KVM: x86/mmu: Ensure NX huge page recovery thread is alive before waking
When waking a VM's NX huge page recovery thread, ensure the thread is
actually alive before trying to wake it. Now that the thread is spawned
on-demand during KVM_RUN, a VM without a recovery thread is reachable via
the related module params.
BUG: kernel NULL pointer dereference, address: 0000000000000040
#PF: supervisor read access in kernel mode
#PF: error_code(0x0000) - not-present page
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:vhost_task_wake+0x5/0x10
Call Trace:
<TASK>
set_nx_huge_pages+0xcc/0x1e0 [kvm]
param_attr_store+0x8a/0xd0
module_attr_store+0x1a/0x30
kernfs_fop_write_iter+0x12f/0x1e0
vfs_write+0x233/0x3e0
ksys_write+0x60/0xd0
do_syscall_64+0x5b/0x160
entry_SYSCALL_64_after_hwframe+0x4b/0x53
RIP: 0033:0x7f3b52710104
</TASK>
Modules linked in: kvm_intel kvm
CR2: 0000000000000040
Fixes: 931656b9e2
("kvm: defer huge page recovery vhost task to later")
Cc: stable@vger.kernel.org
Cc: Keith Busch <kbusch@kernel.org>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-ID: <20250124234623.3609069-1-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
6f61269495
commit
43fb96ae78
1 changed files with 26 additions and 7 deletions
|
@ -7120,6 +7120,19 @@ static void mmu_destroy_caches(void)
|
|||
kmem_cache_destroy(mmu_page_header_cache);
|
||||
}
|
||||
|
||||
static void kvm_wake_nx_recovery_thread(struct kvm *kvm)
|
||||
{
|
||||
/*
|
||||
* The NX recovery thread is spawned on-demand at the first KVM_RUN and
|
||||
* may not be valid even though the VM is globally visible. Do nothing,
|
||||
* as such a VM can't have any possible NX huge pages.
|
||||
*/
|
||||
struct vhost_task *nx_thread = READ_ONCE(kvm->arch.nx_huge_page_recovery_thread);
|
||||
|
||||
if (nx_thread)
|
||||
vhost_task_wake(nx_thread);
|
||||
}
|
||||
|
||||
static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
|
||||
{
|
||||
if (nx_hugepage_mitigation_hard_disabled)
|
||||
|
@ -7180,7 +7193,7 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
|
|||
kvm_mmu_zap_all_fast(kvm);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
|
||||
kvm_wake_nx_recovery_thread(kvm);
|
||||
}
|
||||
mutex_unlock(&kvm_lock);
|
||||
}
|
||||
|
@ -7315,7 +7328,7 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel
|
|||
mutex_lock(&kvm_lock);
|
||||
|
||||
list_for_each_entry(kvm, &vm_list, vm_list)
|
||||
vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
|
||||
kvm_wake_nx_recovery_thread(kvm);
|
||||
|
||||
mutex_unlock(&kvm_lock);
|
||||
}
|
||||
|
@ -7451,14 +7464,20 @@ static void kvm_mmu_start_lpage_recovery(struct once *once)
|
|||
{
|
||||
struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
|
||||
struct kvm *kvm = container_of(ka, struct kvm, arch);
|
||||
struct vhost_task *nx_thread;
|
||||
|
||||
kvm->arch.nx_huge_page_last = get_jiffies_64();
|
||||
kvm->arch.nx_huge_page_recovery_thread = vhost_task_create(
|
||||
kvm_nx_huge_page_recovery_worker, kvm_nx_huge_page_recovery_worker_kill,
|
||||
kvm, "kvm-nx-lpage-recovery");
|
||||
nx_thread = vhost_task_create(kvm_nx_huge_page_recovery_worker,
|
||||
kvm_nx_huge_page_recovery_worker_kill,
|
||||
kvm, "kvm-nx-lpage-recovery");
|
||||
|
||||
if (kvm->arch.nx_huge_page_recovery_thread)
|
||||
vhost_task_start(kvm->arch.nx_huge_page_recovery_thread);
|
||||
if (!nx_thread)
|
||||
return;
|
||||
|
||||
vhost_task_start(nx_thread);
|
||||
|
||||
/* Make the task visible only once it is fully started. */
|
||||
WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
|
||||
}
|
||||
|
||||
int kvm_mmu_post_init_vm(struct kvm *kvm)
|
||||
|
|
Loading…
Add table
Reference in a new issue