Revert "android: binder: stop saving a pointer to the VMA"
This reverts commita43cfc87ca
. This patch fixed an issue reported by syzkaller in [1]. However, this turned out to be only a band-aid in binder. The root cause, as bisected by syzkaller, was fixed by commit5789151e48
("mm/mmap: undo ->mmap() when mas_preallocate() fails"). We no longer need the patch for binder. Reverting such patch allows us to have a lockless access to alloc->vma in specific cases where the mmap_lock is not required. This approach avoids the contention that caused a performance regression. [1] https://lore.kernel.org/all/0000000000004a0dbe05e1d749e0@google.com [cmllamas: resolved conflicts with rework of alloc->mm and removal of binder_alloc_set_vma() also fixed comment section] Fixes:a43cfc87ca
("android: binder: stop saving a pointer to the VMA") Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: stable@vger.kernel.org Signed-off-by: Carlos Llamas <cmllamas@google.com> Link: https://lore.kernel.org/r/20230502201220.1756319-2-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
b15655b12d
commit
c0fd210178
3 changed files with 12 additions and 11 deletions
|
@ -213,7 +213,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||||
|
|
||||||
if (mm) {
|
if (mm) {
|
||||||
mmap_read_lock(mm);
|
mmap_read_lock(mm);
|
||||||
vma = vma_lookup(mm, alloc->vma_addr);
|
vma = alloc->vma;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!vma && need_mm) {
|
if (!vma && need_mm) {
|
||||||
|
@ -314,9 +314,11 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma = NULL;
|
struct vm_area_struct *vma = NULL;
|
||||||
|
|
||||||
if (alloc->vma_addr)
|
if (alloc->vma) {
|
||||||
vma = vma_lookup(alloc->mm, alloc->vma_addr);
|
/* Look at description in binder_alloc_set_vma */
|
||||||
|
smp_rmb();
|
||||||
|
vma = alloc->vma;
|
||||||
|
}
|
||||||
return vma;
|
return vma;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -775,7 +777,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
||||||
buffer->free = 1;
|
buffer->free = 1;
|
||||||
binder_insert_free_buffer(alloc, buffer);
|
binder_insert_free_buffer(alloc, buffer);
|
||||||
alloc->free_async_space = alloc->buffer_size / 2;
|
alloc->free_async_space = alloc->buffer_size / 2;
|
||||||
alloc->vma_addr = vma->vm_start;
|
alloc->vma = vma;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -805,8 +807,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
||||||
|
|
||||||
buffers = 0;
|
buffers = 0;
|
||||||
mutex_lock(&alloc->mutex);
|
mutex_lock(&alloc->mutex);
|
||||||
BUG_ON(alloc->vma_addr &&
|
BUG_ON(alloc->vma);
|
||||||
vma_lookup(alloc->mm, alloc->vma_addr));
|
|
||||||
|
|
||||||
while ((n = rb_first(&alloc->allocated_buffers))) {
|
while ((n = rb_first(&alloc->allocated_buffers))) {
|
||||||
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
||||||
|
@ -958,7 +959,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
|
||||||
*/
|
*/
|
||||||
void binder_alloc_vma_close(struct binder_alloc *alloc)
|
void binder_alloc_vma_close(struct binder_alloc *alloc)
|
||||||
{
|
{
|
||||||
alloc->vma_addr = 0;
|
alloc->vma = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -75,7 +75,7 @@ struct binder_lru_page {
|
||||||
/**
|
/**
|
||||||
* struct binder_alloc - per-binder proc state for binder allocator
|
* struct binder_alloc - per-binder proc state for binder allocator
|
||||||
* @mutex: protects binder_alloc fields
|
* @mutex: protects binder_alloc fields
|
||||||
* @vma_addr: vm_area_struct->vm_start passed to mmap_handler
|
* @vma: vm_area_struct passed to mmap_handler
|
||||||
* (invariant after mmap)
|
* (invariant after mmap)
|
||||||
* @mm: copy of task->mm (invariant after open)
|
* @mm: copy of task->mm (invariant after open)
|
||||||
* @buffer: base of per-proc address space mapped via mmap
|
* @buffer: base of per-proc address space mapped via mmap
|
||||||
|
@ -99,7 +99,7 @@ struct binder_lru_page {
|
||||||
*/
|
*/
|
||||||
struct binder_alloc {
|
struct binder_alloc {
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
unsigned long vma_addr;
|
struct vm_area_struct *vma;
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
void __user *buffer;
|
void __user *buffer;
|
||||||
struct list_head buffers;
|
struct list_head buffers;
|
||||||
|
|
|
@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
|
||||||
if (!binder_selftest_run)
|
if (!binder_selftest_run)
|
||||||
return;
|
return;
|
||||||
mutex_lock(&binder_selftest_lock);
|
mutex_lock(&binder_selftest_lock);
|
||||||
if (!binder_selftest_run || !alloc->vma_addr)
|
if (!binder_selftest_run || !alloc->vma)
|
||||||
goto done;
|
goto done;
|
||||||
pr_info("STARTED\n");
|
pr_info("STARTED\n");
|
||||||
binder_selftest_alloc_offset(alloc, end_offset, 0);
|
binder_selftest_alloc_offset(alloc, end_offset, 0);
|
||||||
|
|
Loading…
Add table
Reference in a new issue