@@ -49,6 +49,7 @@ pub(crate) struct Allocation {
pub(crate) process: Arc<Process>,
allocation_info: Option<AllocationInfo>,
free_on_drop: bool,
+ pub(crate) oneway_spam_detected: bool,
}
impl Allocation {
@@ -58,6 +59,7 @@ pub(crate) fn new(
size: usize,
ptr: usize,
pages: Arc<Vec<Pages<0>>>,
+ oneway_spam_detected: bool,
) -> Self {
Self {
process,
@@ -65,6 +67,7 @@ pub(crate) fn new(
size,
ptr,
pages,
+ oneway_spam_detected,
allocation_info: None,
free_on_drop: true,
}
@@ -24,6 +24,7 @@ macro_rules! pub_no_prefix {
BR_NOOP,
BR_SPAWN_LOOPER,
BR_TRANSACTION_COMPLETE,
+ BR_ONEWAY_SPAM_SUSPECT,
BR_OK,
BR_ERROR,
BR_INCREFS,
@@ -92,6 +92,8 @@ pub(crate) struct ProcessInner {
pub(crate) sync_recv: bool,
/// Process received async transactions since last frozen.
pub(crate) async_recv: bool,
+ /// Check for oneway spam
+ oneway_spam_detection_enabled: bool,
}
impl ProcessInner {
@@ -113,6 +115,7 @@ fn new() -> Self {
is_frozen: false,
sync_recv: false,
async_recv: false,
+ oneway_spam_detection_enabled: false,
}
}
@@ -658,17 +661,21 @@ pub(crate) fn buffer_alloc(
self: &Arc<Self>,
size: usize,
is_oneway: bool,
+ from_pid: i32,
) -> BinderResult<Allocation> {
let alloc = range_alloc::ReserveNewBox::try_new()?;
let mut inner = self.inner.lock();
let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
- let offset = mapping.alloc.reserve_new(size, is_oneway, alloc)?;
+ let offset = mapping
+ .alloc
+ .reserve_new(size, is_oneway, from_pid, alloc)?;
Ok(Allocation::new(
self.clone(),
offset,
size,
mapping.address + offset,
mapping.pages.clone(),
+ mapping.alloc.oneway_spam_detected,
))
}
@@ -677,7 +684,14 @@ pub(crate) fn buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation> {
let mapping = inner.mapping.as_mut()?;
let offset = ptr.checked_sub(mapping.address)?;
let (size, odata) = mapping.alloc.reserve_existing(offset).ok()?;
- let mut alloc = Allocation::new(self.clone(), offset, size, ptr, mapping.pages.clone());
+ let mut alloc = Allocation::new(
+ self.clone(),
+ offset,
+ size,
+ ptr,
+ mapping.pages.clone(),
+ mapping.alloc.oneway_spam_detected,
+ );
if let Some(data) = odata {
alloc.set_info(data);
}
@@ -762,6 +776,14 @@ fn set_max_threads(&self, max: u32) {
self.inner.lock().max_threads = max;
}
+ fn set_oneway_spam_detection_enabled(&self, enabled: u32) {
+ self.inner.lock().oneway_spam_detection_enabled = enabled != 0;
+ }
+
+ pub(crate) fn is_oneway_spam_detection_enabled(&self) -> bool {
+ self.inner.lock().oneway_spam_detection_enabled
+ }
+
fn get_node_debug_info(&self, data: UserSlicePtr) -> Result {
let (mut reader, mut writer) = data.reader_writer();
@@ -948,9 +970,17 @@ fn deferred_release(self: Arc<Self>) {
if let Some(mut mapping) = omapping {
let address = mapping.address;
let pages = mapping.pages.clone();
+ let oneway_spam_detected = mapping.alloc.oneway_spam_detected;
mapping.alloc.take_for_each(|offset, size, odata| {
let ptr = offset + address;
- let mut alloc = Allocation::new(self.clone(), offset, size, ptr, pages.clone());
+ let mut alloc = Allocation::new(
+ self.clone(),
+ offset,
+ size,
+ ptr,
+ pages.clone(),
+ oneway_spam_detected,
+ );
if let Some(data) = odata {
alloc.set_info(data);
}
@@ -1144,6 +1174,9 @@ fn write(
bindings::BINDER_SET_CONTEXT_MGR_EXT => {
this.set_as_manager(Some(reader.read()?), &thread)?
}
+ bindings::BINDER_ENABLE_ONEWAY_SPAM_DETECTION => {
+ this.set_oneway_spam_detection_enabled(reader.read()?)
+ }
bindings::BINDER_FREEZE => ioctl_freeze(reader)?,
_ => return Err(EINVAL),
}
@@ -3,6 +3,7 @@
use kernel::{
prelude::*,
rbtree::{RBTree, RBTreeNode, RBTreeNodeReservation},
+ task::Pid,
};
/// Keeps track of allocations in a process' mmap.
@@ -13,7 +14,9 @@
pub(crate) struct RangeAllocator<T> {
tree: RBTree<usize, Descriptor<T>>,
free_tree: RBTree<FreeKey, ()>,
+ size: usize,
free_oneway_space: usize,
+ pub(crate) oneway_spam_detected: bool,
}
impl<T> RangeAllocator<T> {
@@ -26,6 +29,8 @@ pub(crate) fn new(size: usize) -> Result<Self> {
free_oneway_space: size / 2,
tree,
free_tree,
+ oneway_spam_detected: false,
+ size,
})
}
@@ -40,6 +45,7 @@ pub(crate) fn reserve_new(
&mut self,
size: usize,
is_oneway: bool,
+ pid: Pid,
alloc: ReserveNewBox<T>,
) -> Result<usize> {
// Compute new value of free_oneway_space, which is set only on success.
@@ -52,6 +58,15 @@ pub(crate) fn reserve_new(
self.free_oneway_space
};
+ // Start detecting spammers once we have less than 20%
+ // of async space left (which is less than 10% of total
+ // buffer size).
+ //
+ // (This will short-circut, so `low_oneway_space` is
+ // only called when necessary.)
+ self.oneway_spam_detected =
+ is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
+
let (found_size, found_off, tree_node, free_tree_node) = match self.find_best_match(size) {
None => {
pr_warn!("ENOSPC from range_alloc.reserve_new - size: {}", size);
@@ -65,7 +80,7 @@ pub(crate) fn reserve_new(
let new_desc = Descriptor::new(found_offset + size, found_size - size);
let (tree_node, free_tree_node, desc_node_res) = alloc.initialize(new_desc);
- desc.state = Some(DescriptorState::new(is_oneway, desc_node_res));
+ desc.state = Some(DescriptorState::new(is_oneway, pid, desc_node_res));
desc.size = size;
(found_size, found_offset, tree_node, free_tree_node)
@@ -224,6 +239,30 @@ pub(crate) fn take_for_each<F: Fn(usize, usize, Option<T>)>(&mut self, callback:
}
}
}
+
+ /// Find the amount and size of buffers allocated by the current caller.
+ ///
+ /// The idea is that once we cross the threshold, whoever is responsible
+ /// for the low async space is likely to try to send another async transaction,
+ /// and at some point we'll catch them in the act. This is more efficient
+ /// than keeping a map per pid.
+ fn low_oneway_space(&self, calling_pid: Pid) -> bool {
+ let mut total_alloc_size = 0;
+ let mut num_buffers = 0;
+ for (_, desc) in self.tree.iter() {
+ if let Some(state) = &desc.state {
+ if state.is_oneway() && state.pid() == calling_pid {
+ total_alloc_size += desc.size;
+ num_buffers += 1;
+ }
+ }
+ }
+
+ // Warn if this pid has more than 50 transactions, or more than 50% of
+ // async space (which is 25% of total buffer size). Oneway spam is only
+ // detected when the threshold is exceeded.
+ num_buffers > 50 || total_alloc_size > self.size / 4
+ }
}
struct Descriptor<T> {
@@ -257,16 +296,32 @@ enum DescriptorState<T> {
}
impl<T> DescriptorState<T> {
- fn new(is_oneway: bool, free_res: FreeNodeRes) -> Self {
+ fn new(is_oneway: bool, pid: Pid, free_res: FreeNodeRes) -> Self {
DescriptorState::Reserved(Reservation {
is_oneway,
+ pid,
free_res,
})
}
+
+ fn pid(&self) -> Pid {
+ match self {
+ DescriptorState::Reserved(inner) => inner.pid,
+ DescriptorState::Allocated(inner) => inner.pid,
+ }
+ }
+
+ fn is_oneway(&self) -> bool {
+ match self {
+ DescriptorState::Reserved(inner) => inner.is_oneway,
+ DescriptorState::Allocated(inner) => inner.is_oneway,
+ }
+ }
}
struct Reservation {
is_oneway: bool,
+ pid: Pid,
free_res: FreeNodeRes,
}
@@ -275,6 +330,7 @@ fn allocate<T>(self, data: Option<T>) -> Allocation<T> {
Allocation {
data,
is_oneway: self.is_oneway,
+ pid: self.pid,
free_res: self.free_res,
}
}
@@ -282,6 +338,7 @@ fn allocate<T>(self, data: Option<T>) -> Allocation<T> {
struct Allocation<T> {
is_oneway: bool,
+ pid: Pid,
free_res: FreeNodeRes,
data: Option<T>,
}
@@ -291,6 +348,7 @@ fn deallocate(self) -> (Reservation, Option<T>) {
(
Reservation {
is_oneway: self.is_oneway,
+ pid: self.pid,
free_res: self.free_res,
},
self.data,
@@ -107,7 +107,6 @@ fn new(val: impl PinInit<T>) -> impl PinInit<Self> {
})
}
- #[allow(dead_code)]
fn arc_try_new(val: T) -> Result<DLArc<T>, alloc::alloc::AllocError> {
ListArc::pin_init(pin_init!(Self {
links <- ListLinksSelfPtr::new(),
@@ -909,7 +909,7 @@ pub(crate) fn copy_transaction_data(
size_of::<usize>(),
);
let secctx_off = adata_size + aoffsets_size + abuffers_size;
- let mut alloc = match to_process.buffer_alloc(len, is_oneway) {
+ let mut alloc = match to_process.buffer_alloc(len, is_oneway, self.process.task.pid()) {
Ok(alloc) => alloc,
Err(err) => {
pr_warn!(
@@ -1191,8 +1191,15 @@ fn oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> B
let handle = unsafe { tr.transaction_data.target.handle };
let node_ref = self.process.get_transaction_node(handle)?;
security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
- let list_completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
let transaction = Transaction::new(node_ref, None, self, tr)?;
+ let code = if self.process.is_oneway_spam_detection_enabled()
+ && transaction.oneway_spam_detected
+ {
+ BR_ONEWAY_SPAM_SUSPECT
+ } else {
+ BR_TRANSACTION_COMPLETE
+ };
+ let list_completion = DTRWrap::arc_try_new(DeliverCode::new(code))?;
let completion = list_completion.clone_arc();
self.inner.lock().push_work(list_completion);
match transaction.submit() {
@@ -38,6 +38,7 @@ pub(crate) struct Transaction {
data_address: usize,
sender_euid: Kuid,
txn_security_ctx_off: Option<usize>,
+ pub(crate) oneway_spam_detected: bool,
}
kernel::list::impl_list_arc_safe! {
@@ -70,6 +71,7 @@ pub(crate) fn new(
return Err(err);
}
};
+ let oneway_spam_detected = alloc.oneway_spam_detected;
if trd.flags & TF_ONE_WAY != 0 {
if stack_next.is_some() {
pr_warn!("Oneway transaction should not be in a transaction stack.");
@@ -98,6 +100,7 @@ pub(crate) fn new(
allocation <- kernel::new_spinlock!(Some(alloc), "Transaction::new"),
is_outstanding: AtomicBool::new(false),
txn_security_ctx_off,
+ oneway_spam_detected,
}))?)
}
@@ -115,6 +118,7 @@ pub(crate) fn new_reply(
return Err(err);
}
};
+ let oneway_spam_detected = alloc.oneway_spam_detected;
if trd.flags & TF_CLEAR_BUF != 0 {
alloc.set_info_clear_on_drop();
}
@@ -132,6 +136,7 @@ pub(crate) fn new_reply(
allocation <- kernel::new_spinlock!(Some(alloc), "Transaction::new"),
is_outstanding: AtomicBool::new(false),
txn_security_ctx_off: None,
+ oneway_spam_detected,
}))?)
}
@@ -81,7 +81,7 @@ unsafe impl Send for Task {}
unsafe impl Sync for Task {}
/// The type of process identifiers (PIDs).
-type Pid = bindings::pid_t;
+pub type Pid = bindings::pid_t;
/// The type of user identifiers (UIDs).
#[derive(Copy, Clone)]