Introduce a static branch that would be set during boot if the OS happens to be a KVM guest. Subsequent checks to see if we are on KVM will rely on this static branch. This static branch would be used in vcpu_is_preempted() in a subsequent patch. Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Acked-by: Waiman Long <longman@redhat.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20201202050456.164005-4-srikar@linux.vnet.ibm.com
25 lines
572 B
C
25 lines
572 B
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2020 IBM Corporation
|
|
*/
|
|
|
|
#ifndef _ASM_POWERPC_KVM_GUEST_H_
|
|
#define _ASM_POWERPC_KVM_GUEST_H_
|
|
|
|
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_GUEST)
|
|
#include <linux/jump_label.h>
|
|
|
|
DECLARE_STATIC_KEY_FALSE(kvm_guest);
|
|
|
|
static inline bool is_kvm_guest(void)
|
|
{
|
|
return static_branch_unlikely(&kvm_guest);
|
|
}
|
|
|
|
bool check_kvm_guest(void);
|
|
#else
|
|
static inline bool is_kvm_guest(void) { return false; }
|
|
static inline bool check_kvm_guest(void) { return false; }
|
|
#endif
|
|
|
|
#endif /* _ASM_POWERPC_KVM_GUEST_H_ */
|