mm/vmalloc.c: allow vread() to read out vm_map_ram areas
Currently, vread can read out vmalloc areas which is associated with a vm_struct. While this doesn't work for areas created by vm_map_ram() interface because it doesn't have an associated vm_struct. Then in vread(), these areas are all skipped. Here, add a new function vmap_ram_vread() to read out vm_map_ram areas. The area created with vmap_ram_vread() interface directly can be handled like the other normal vmap areas with aligned_vread(). While areas which will be further subdivided and managed with vmap_block need carefully read out page-aligned small regions and zero fill holes. Link: https://lkml.kernel.org/r/20230206084020.174506-4-bhe@redhat.com Reported-by: Stephen Brennan <stephen.s.brennan@oracle.com> Signed-off-by: Baoquan He <bhe@redhat.com> Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com> Tested-by: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Dan Carpenter <error27@gmail.com> Cc: Uladzislau Rezki (Sony) <urezki@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
869176a096
commit
06c8994626
1 changed files with 81 additions and 7 deletions
88
mm/vmalloc.c
88
mm/vmalloc.c
|
@ -3463,6 +3463,68 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
|
||||||
return copied;
|
return copied;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vmap_ram_vread(char *buf, char *addr, int count, unsigned long flags)
|
||||||
|
{
|
||||||
|
char *start;
|
||||||
|
struct vmap_block *vb;
|
||||||
|
unsigned long offset;
|
||||||
|
unsigned int rs, re, n;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If it's area created by vm_map_ram() interface directly, but
|
||||||
|
* not further subdividing and delegating management to vmap_block,
|
||||||
|
* handle it here.
|
||||||
|
*/
|
||||||
|
if (!(flags & VMAP_BLOCK)) {
|
||||||
|
aligned_vread(buf, addr, count);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Area is split into regions and tracked with vmap_block, read out
|
||||||
|
* each region and zero fill the hole between regions.
|
||||||
|
*/
|
||||||
|
vb = xa_load(&vmap_blocks, addr_to_vb_idx((unsigned long)addr));
|
||||||
|
if (!vb)
|
||||||
|
goto finished;
|
||||||
|
|
||||||
|
spin_lock(&vb->lock);
|
||||||
|
if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) {
|
||||||
|
spin_unlock(&vb->lock);
|
||||||
|
goto finished;
|
||||||
|
}
|
||||||
|
for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) {
|
||||||
|
if (!count)
|
||||||
|
break;
|
||||||
|
start = vmap_block_vaddr(vb->va->va_start, rs);
|
||||||
|
while (addr < start) {
|
||||||
|
if (count == 0)
|
||||||
|
goto unlock;
|
||||||
|
*buf = '\0';
|
||||||
|
buf++;
|
||||||
|
addr++;
|
||||||
|
count--;
|
||||||
|
}
|
||||||
|
/*it could start reading from the middle of used region*/
|
||||||
|
offset = offset_in_page(addr);
|
||||||
|
n = ((re - rs + 1) << PAGE_SHIFT) - offset;
|
||||||
|
if (n > count)
|
||||||
|
n = count;
|
||||||
|
aligned_vread(buf, start+offset, n);
|
||||||
|
|
||||||
|
buf += n;
|
||||||
|
addr += n;
|
||||||
|
count -= n;
|
||||||
|
}
|
||||||
|
unlock:
|
||||||
|
spin_unlock(&vb->lock);
|
||||||
|
|
||||||
|
finished:
|
||||||
|
/* zero-fill the left dirty or free regions */
|
||||||
|
if (count)
|
||||||
|
memset(buf, 0, count);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vread() - read vmalloc area in a safe way.
|
* vread() - read vmalloc area in a safe way.
|
||||||
* @buf: buffer for reading data
|
* @buf: buffer for reading data
|
||||||
|
@ -3493,7 +3555,7 @@ long vread(char *buf, char *addr, unsigned long count)
|
||||||
struct vm_struct *vm;
|
struct vm_struct *vm;
|
||||||
char *vaddr, *buf_start = buf;
|
char *vaddr, *buf_start = buf;
|
||||||
unsigned long buflen = count;
|
unsigned long buflen = count;
|
||||||
unsigned long n;
|
unsigned long n, size, flags;
|
||||||
|
|
||||||
addr = kasan_reset_tag(addr);
|
addr = kasan_reset_tag(addr);
|
||||||
|
|
||||||
|
@ -3514,12 +3576,21 @@ long vread(char *buf, char *addr, unsigned long count)
|
||||||
if (!count)
|
if (!count)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (!va->vm)
|
vm = va->vm;
|
||||||
|
flags = va->flags & VMAP_FLAGS_MASK;
|
||||||
|
/*
|
||||||
|
* VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
|
||||||
|
* be set together with VMAP_RAM.
|
||||||
|
*/
|
||||||
|
WARN_ON(flags == VMAP_BLOCK);
|
||||||
|
|
||||||
|
if (!vm && !flags)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
vm = va->vm;
|
vaddr = (char *) va->va_start;
|
||||||
vaddr = (char *) vm->addr;
|
size = vm ? get_vm_area_size(vm) : va_size(va);
|
||||||
if (addr >= vaddr + get_vm_area_size(vm))
|
|
||||||
|
if (addr >= vaddr + size)
|
||||||
continue;
|
continue;
|
||||||
while (addr < vaddr) {
|
while (addr < vaddr) {
|
||||||
if (count == 0)
|
if (count == 0)
|
||||||
|
@ -3529,10 +3600,13 @@ long vread(char *buf, char *addr, unsigned long count)
|
||||||
addr++;
|
addr++;
|
||||||
count--;
|
count--;
|
||||||
}
|
}
|
||||||
n = vaddr + get_vm_area_size(vm) - addr;
|
n = vaddr + size - addr;
|
||||||
if (n > count)
|
if (n > count)
|
||||||
n = count;
|
n = count;
|
||||||
if (!(vm->flags & VM_IOREMAP))
|
|
||||||
|
if (flags & VMAP_RAM)
|
||||||
|
vmap_ram_vread(buf, addr, n, flags);
|
||||||
|
else if (!(vm->flags & VM_IOREMAP))
|
||||||
aligned_vread(buf, addr, n);
|
aligned_vread(buf, addr, n);
|
||||||
else /* IOREMAP area is treated as memory hole */
|
else /* IOREMAP area is treated as memory hole */
|
||||||
memset(buf, 0, n);
|
memset(buf, 0, n);
|
||||||
|
|
Loading…
Add table
Reference in a new issue