[14/21] binder: do not add pages to LRU in release path

Message ID 20231102185934.773885-15-cmllamas@google.com
State New
Headers
Series binder: convert alloc->mutex to spinlock |

Commit Message

Carlos Llamas Nov. 2, 2023, 6:59 p.m. UTC
  In binder_alloc_deferred_release() pages are added to the LRU list via
binder_free_buf_locked(). However, this is pointless because these pages
are kfree'd immediately afterwards. Add an option to skip the LRU list.

Signed-off-by: Carlos Llamas <cmllamas@google.com>
---
 drivers/android/binder_alloc.c | 28 +++++++++++++++-------------
 1 file changed, 15 insertions(+), 13 deletions(-)
  

Comments

Alice Ryhl Nov. 7, 2023, 9:08 a.m. UTC | #1
Carlos Llamas <cmllamas@google.com> writes:
> In binder_alloc_deferred_release() pages are added to the LRU list via
> binder_free_buf_locked(). However, this is pointless because these pages
> are kfree'd immediately afterwards. Add an option to skip the LRU list.

They aren't freed with kfree, buf with __free_page.
 
> Signed-off-by: Carlos Llamas <cmllamas@google.com>

The change itself looks correct, so I'll give my tag for this:

Reviewed-by: Alice Ryhl <aliceryhl@google.com>

But I'm wondering whether process cleanup really is so performance
intensive to justify the added complexity of this?

Alice
  
Carlos Llamas Dec. 1, 2023, 7:15 a.m. UTC | #2
On Tue, Nov 07, 2023 at 09:08:29AM +0000, Alice Ryhl wrote:
> Carlos Llamas <cmllamas@google.com> writes:
> > In binder_alloc_deferred_release() pages are added to the LRU list via
> > binder_free_buf_locked(). However, this is pointless because these pages
> > are kfree'd immediately afterwards. Add an option to skip the LRU list.
> 
> They aren't freed with kfree, buf with __free_page.
>  
> > Signed-off-by: Carlos Llamas <cmllamas@google.com>
> 
> The change itself looks correct, so I'll give my tag for this:
> 
> Reviewed-by: Alice Ryhl <aliceryhl@google.com>
> 
> But I'm wondering whether process cleanup really is so performance
> intensive to justify the added complexity of this?

So, this was needed on an earlier version of the patchset and I was
hoping that it would also help with an issue reported here:
https://lore.kernel.org/all/ZSHmtLqtNZRAtaZ0@google.com/

However, I do agree that it is unecessary at this stage so I've decided
to drop it from v2.

Thanks,
--
Carlos Llamas
  

Patch

diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index cc627c106a01..d2a38dee12db 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -595,16 +595,16 @@  static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
 }
 
 static void binder_delete_free_buffer(struct binder_alloc *alloc,
-				      struct binder_buffer *buffer)
+				      struct binder_buffer *buffer,
+				      bool free_pages)
 {
 	struct binder_buffer *prev, *next = NULL;
-	bool to_free = true;
 
 	BUG_ON(alloc->buffers.next == &buffer->entry);
 	prev = binder_buffer_prev(buffer);
 	BUG_ON(!prev->free);
 	if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
-		to_free = false;
+		free_pages = false;
 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 				   "%d: merge free, buffer %lx share page with %lx\n",
 				   alloc->pid, buffer->user_data,
@@ -614,7 +614,7 @@  static void binder_delete_free_buffer(struct binder_alloc *alloc,
 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
 		next = binder_buffer_next(buffer);
 		if (buffer_start_page(next) == buffer_start_page(buffer)) {
-			to_free = false;
+			free_pages = false;
 			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 					   "%d: merge free, buffer %lx share page with %lx\n",
 					   alloc->pid,
@@ -627,10 +627,10 @@  static void binder_delete_free_buffer(struct binder_alloc *alloc,
 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 				   "%d: merge free, buffer start %lx is page aligned\n",
 				   alloc->pid, buffer->user_data);
-		to_free = false;
+		free_pages = false;
 	}
 
-	if (to_free) {
+	if (free_pages) {
 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 				   "%d: merge free, buffer %lx do not share page with %lx or %lx\n",
 				   alloc->pid, buffer->user_data,
@@ -644,7 +644,8 @@  static void binder_delete_free_buffer(struct binder_alloc *alloc,
 }
 
 static void binder_free_buf_locked(struct binder_alloc *alloc,
-				   struct binder_buffer *buffer)
+				   struct binder_buffer *buffer,
+				   bool free_pages)
 {
 	size_t size, buffer_size;
 
@@ -672,8 +673,9 @@  static void binder_free_buf_locked(struct binder_alloc *alloc,
 			      alloc->pid, size, alloc->free_async_space);
 	}
 
-	binder_free_page_range(alloc, PAGE_ALIGN(buffer->user_data),
-			       (buffer->user_data + buffer_size) & PAGE_MASK);
+	if (free_pages)
+		binder_free_page_range(alloc, PAGE_ALIGN(buffer->user_data),
+				       (buffer->user_data + buffer_size) & PAGE_MASK);
 
 	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
 	buffer->free = 1;
@@ -682,14 +684,14 @@  static void binder_free_buf_locked(struct binder_alloc *alloc,
 
 		if (next->free) {
 			rb_erase(&next->rb_node, &alloc->free_buffers);
-			binder_delete_free_buffer(alloc, next);
+			binder_delete_free_buffer(alloc, next, free_pages);
 		}
 	}
 	if (alloc->buffers.next != &buffer->entry) {
 		struct binder_buffer *prev = binder_buffer_prev(buffer);
 
 		if (prev->free) {
-			binder_delete_free_buffer(alloc, buffer);
+			binder_delete_free_buffer(alloc, buffer, free_pages);
 			rb_erase(&prev->rb_node, &alloc->free_buffers);
 			buffer = prev;
 		}
@@ -722,7 +724,7 @@  void binder_alloc_free_buf(struct binder_alloc *alloc,
 		buffer->clear_on_free = false;
 	}
 	mutex_lock(&alloc->mutex);
-	binder_free_buf_locked(alloc, buffer);
+	binder_free_buf_locked(alloc, buffer, true);
 	mutex_unlock(&alloc->mutex);
 }
 
@@ -829,7 +831,7 @@  void binder_alloc_deferred_release(struct binder_alloc *alloc)
 			binder_alloc_clear_buf(alloc, buffer);
 			buffer->clear_on_free = false;
 		}
-		binder_free_buf_locked(alloc, buffer);
+		binder_free_buf_locked(alloc, buffer, false);
 		buffers++;
 	}