x86/arch_prctl/vdso: Add ARCH_MAP_VDSO_*
Add API to change vdso blob type with arch_prctl. As this is usefull only by needs of CRIU, expose this interface under CONFIG_CHECKPOINT_RESTORE. Signed-off-by: Dmitry Safonov <dsafonov@virtuozzo.com> Acked-by: Andy Lutomirski <luto@kernel.org> Cc: 0x7f454c46@gmail.com Cc: oleg@redhat.com Cc: linux-mm@kvack.org Cc: gorcunov@openvz.org Cc: xemul@virtuozzo.com Link: http://lkml.kernel.org/r/20160905133308.28234-4-dsafonov@virtuozzo.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
576ebfefd3
commit
2eefd87896
6 changed files with 78 additions and 10 deletions
|
@ -176,6 +176,16 @@ static int vvar_fault(const struct vm_special_mapping *sm,
|
||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const struct vm_special_mapping vdso_mapping = {
|
||||||
|
.name = "[vdso]",
|
||||||
|
.fault = vdso_fault,
|
||||||
|
.mremap = vdso_mremap,
|
||||||
|
};
|
||||||
|
static const struct vm_special_mapping vvar_mapping = {
|
||||||
|
.name = "[vvar]",
|
||||||
|
.fault = vvar_fault,
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Add vdso and vvar mappings to current process.
|
* Add vdso and vvar mappings to current process.
|
||||||
* @image - blob to map
|
* @image - blob to map
|
||||||
|
@ -188,16 +198,6 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
|
||||||
unsigned long text_start;
|
unsigned long text_start;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
static const struct vm_special_mapping vdso_mapping = {
|
|
||||||
.name = "[vdso]",
|
|
||||||
.fault = vdso_fault,
|
|
||||||
.mremap = vdso_mremap,
|
|
||||||
};
|
|
||||||
static const struct vm_special_mapping vvar_mapping = {
|
|
||||||
.name = "[vvar]",
|
|
||||||
.fault = vvar_fault,
|
|
||||||
};
|
|
||||||
|
|
||||||
if (down_write_killable(&mm->mmap_sem))
|
if (down_write_killable(&mm->mmap_sem))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
|
|
||||||
|
@ -256,6 +256,31 @@ static int map_vdso_randomized(const struct vdso_image *image)
|
||||||
return map_vdso(image, addr);
|
return map_vdso(image, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int map_vdso_once(const struct vdso_image *image, unsigned long addr)
|
||||||
|
{
|
||||||
|
struct mm_struct *mm = current->mm;
|
||||||
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
|
down_write(&mm->mmap_sem);
|
||||||
|
/*
|
||||||
|
* Check if we have already mapped vdso blob - fail to prevent
|
||||||
|
* abusing from userspace install_speciall_mapping, which may
|
||||||
|
* not do accounting and rlimit right.
|
||||||
|
* We could search vma near context.vdso, but it's a slowpath,
|
||||||
|
* so let's explicitely check all VMAs to be completely sure.
|
||||||
|
*/
|
||||||
|
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||||
|
if (vma_is_special_mapping(vma, &vdso_mapping) ||
|
||||||
|
vma_is_special_mapping(vma, &vvar_mapping)) {
|
||||||
|
up_write(&mm->mmap_sem);
|
||||||
|
return -EEXIST;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
up_write(&mm->mmap_sem);
|
||||||
|
|
||||||
|
return map_vdso(image, addr);
|
||||||
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
|
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
|
||||||
static int load_vdso32(void)
|
static int load_vdso32(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -41,6 +41,8 @@ extern const struct vdso_image vdso_image_32;
|
||||||
|
|
||||||
extern void __init init_vdso_image(const struct vdso_image *image);
|
extern void __init init_vdso_image(const struct vdso_image *image);
|
||||||
|
|
||||||
|
extern int map_vdso_once(const struct vdso_image *image, unsigned long addr);
|
||||||
|
|
||||||
#endif /* __ASSEMBLER__ */
|
#endif /* __ASSEMBLER__ */
|
||||||
|
|
||||||
#endif /* _ASM_X86_VDSO_H */
|
#endif /* _ASM_X86_VDSO_H */
|
||||||
|
|
|
@ -6,4 +6,10 @@
|
||||||
#define ARCH_GET_FS 0x1003
|
#define ARCH_GET_FS 0x1003
|
||||||
#define ARCH_GET_GS 0x1004
|
#define ARCH_GET_GS 0x1004
|
||||||
|
|
||||||
|
#ifdef CONFIG_CHECKPOINT_RESTORE
|
||||||
|
# define ARCH_MAP_VDSO_X32 0x2001
|
||||||
|
# define ARCH_MAP_VDSO_32 0x2002
|
||||||
|
# define ARCH_MAP_VDSO_64 0x2003
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* _ASM_X86_PRCTL_H */
|
#endif /* _ASM_X86_PRCTL_H */
|
||||||
|
|
|
@ -49,6 +49,7 @@
|
||||||
#include <asm/debugreg.h>
|
#include <asm/debugreg.h>
|
||||||
#include <asm/switch_to.h>
|
#include <asm/switch_to.h>
|
||||||
#include <asm/xen/hypervisor.h>
|
#include <asm/xen/hypervisor.h>
|
||||||
|
#include <asm/vdso.h>
|
||||||
|
|
||||||
asmlinkage extern void ret_from_fork(void);
|
asmlinkage extern void ret_from_fork(void);
|
||||||
|
|
||||||
|
@ -524,6 +525,17 @@ void set_personality_ia32(bool x32)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(set_personality_ia32);
|
EXPORT_SYMBOL_GPL(set_personality_ia32);
|
||||||
|
|
||||||
|
static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = map_vdso_once(image, addr);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return (long)image->size;
|
||||||
|
}
|
||||||
|
|
||||||
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
|
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -577,6 +589,19 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_CHECKPOINT_RESTORE
|
||||||
|
#ifdef CONFIG_X86_X32
|
||||||
|
case ARCH_MAP_VDSO_X32:
|
||||||
|
return prctl_map_vdso(&vdso_image_x32, addr);
|
||||||
|
#endif
|
||||||
|
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
|
||||||
|
case ARCH_MAP_VDSO_32:
|
||||||
|
return prctl_map_vdso(&vdso_image_32, addr);
|
||||||
|
#endif
|
||||||
|
case ARCH_MAP_VDSO_64:
|
||||||
|
return prctl_map_vdso(&vdso_image_64, addr);
|
||||||
|
#endif
|
||||||
|
|
||||||
default:
|
default:
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -2019,6 +2019,8 @@ extern struct file *get_task_exe_file(struct task_struct *task);
|
||||||
extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
|
extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
|
||||||
extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
|
extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
|
||||||
|
|
||||||
|
extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
|
||||||
|
const struct vm_special_mapping *sm);
|
||||||
extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
|
extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
|
||||||
unsigned long addr, unsigned long len,
|
unsigned long addr, unsigned long len,
|
||||||
unsigned long flags,
|
unsigned long flags,
|
||||||
|
|
|
@ -3063,6 +3063,14 @@ out:
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool vma_is_special_mapping(const struct vm_area_struct *vma,
|
||||||
|
const struct vm_special_mapping *sm)
|
||||||
|
{
|
||||||
|
return vma->vm_private_data == sm &&
|
||||||
|
(vma->vm_ops == &special_mapping_vmops ||
|
||||||
|
vma->vm_ops == &legacy_special_mapping_vmops);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called with mm->mmap_sem held for writing.
|
* Called with mm->mmap_sem held for writing.
|
||||||
* Insert a new vma covering the given region, with the given flags.
|
* Insert a new vma covering the given region, with the given flags.
|
||||||
|
|
Loading…
Add table
Reference in a new issue