binder: use per-vma lock in page reclaiming
Use per-vma locking in the shrinker's callback when reclaiming pages, similar to the page installation logic. This minimizes contention with unrelated vmas improving performance. The mmap_sem is still acquired if the per-vma lock cannot be obtained. Cc: Suren Baghdasaryan <surenb@google.com> Suggested-by: Liam R. Howlett <Liam.Howlett@oracle.com> Reviewed-by: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Carlos Llamas <cmllamas@google.com> Link: https://lore.kernel.org/r/20241210143114.661252-10-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
978ce3ed70
commit
95bc2d4a90
1 changed files with 22 additions and 7 deletions
|
@ -1143,19 +1143,28 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
|||
struct vm_area_struct *vma;
|
||||
struct page *page_to_free;
|
||||
unsigned long page_addr;
|
||||
int mm_locked = 0;
|
||||
size_t index;
|
||||
|
||||
if (!mmget_not_zero(mm))
|
||||
goto err_mmget;
|
||||
if (!mmap_read_trylock(mm))
|
||||
goto err_mmap_read_lock_failed;
|
||||
if (!mutex_trylock(&alloc->mutex))
|
||||
goto err_get_alloc_mutex_failed;
|
||||
|
||||
index = mdata->page_index;
|
||||
page_addr = alloc->vm_start + index * PAGE_SIZE;
|
||||
|
||||
/* attempt per-vma lock first */
|
||||
vma = lock_vma_under_rcu(mm, page_addr);
|
||||
if (!vma) {
|
||||
/* fall back to mmap_lock */
|
||||
if (!mmap_read_trylock(mm))
|
||||
goto err_mmap_read_lock_failed;
|
||||
mm_locked = 1;
|
||||
vma = vma_lookup(mm, page_addr);
|
||||
}
|
||||
|
||||
if (!mutex_trylock(&alloc->mutex))
|
||||
goto err_get_alloc_mutex_failed;
|
||||
|
||||
/*
|
||||
* Since a binder_alloc can only be mapped once, we ensure
|
||||
* the vma corresponds to this mapping by checking whether
|
||||
|
@ -1183,7 +1192,10 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
|||
}
|
||||
|
||||
mutex_unlock(&alloc->mutex);
|
||||
if (mm_locked)
|
||||
mmap_read_unlock(mm);
|
||||
else
|
||||
vma_end_read(vma);
|
||||
mmput_async(mm);
|
||||
binder_free_page(page_to_free);
|
||||
|
||||
|
@ -1192,7 +1204,10 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
|||
err_invalid_vma:
|
||||
mutex_unlock(&alloc->mutex);
|
||||
err_get_alloc_mutex_failed:
|
||||
if (mm_locked)
|
||||
mmap_read_unlock(mm);
|
||||
else
|
||||
vma_end_read(vma);
|
||||
err_mmap_read_lock_failed:
|
||||
mmput_async(mm);
|
||||
err_mmget:
|
||||
|
|
Loading…
Add table
Reference in a new issue