1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

Merge branch kvm-arm64/pkvm-memshare-declutter into kvmarm-master/next

* kvm-arm64/pkvm-memshare-declutter:
  : .
  : pKVM memory transition simplifications, courtesy of Quentin Perret.
  :
  : From the cover letter:
  : "Since its early days, pKVM has formalized memory 'transitions' (shares
  : and donations) using 'struct pkvm_mem_transition' and bunch of helpers
  : to manipulate it. The intention was for all transitions to use this
  : machinery to ensure we're checking things consistently. However, as
  : development progressed, it became clear that the rigidity of this model
  : made it really difficult to use in some use-cases which ended-up
  : side-stepping it entirely. That is the case for the
  : hyp_{un}pin_shared_mem() and host_{un}share_guest() paths upstream which
  : use lower level helpers directly, as well as for several other pKVM
  : features that should land upstream in the future (ex: when a guest
  : relinquishes a page during ballooning, when annotating a page that is
  : being DMA'd to, ...). On top of this, the pkvm_mem_transition machinery
  : requires a lot of boilerplate which makes the code hard to read, but
  : also adds layers of indirection that no compilers seems to see through,
  : hence leading to suboptimal generated code.
  :
  : Given all the above, this series removes the pkvm_mem_transition
  : machinery from mem_protect.c, and converts all its users to use
  : __*_{check,set}_page_state_range() low-level helpers directly."
  : .
  KVM: arm64: Drop pkvm_mem_transition for host/hyp donations
  KVM: arm64: Drop pkvm_mem_transition for host/hyp sharing
  KVM: arm64: Drop pkvm_mem_transition for FF-A
  KVM: arm64: Only apply PMCR_EL0.P to the guest range of counters
  KVM: arm64: nv: Reload PMU events upon MDCR_EL2.HPME change
  KVM: arm64: Use KVM_REQ_RELOAD_PMU to handle PMCR_EL0.E change
  KVM: arm64: Add unified helper for reprogramming counters by mask
  KVM: arm64: Always check the state from hyp_ack_unshare()
  KVM: arm64: Fix set_id_regs selftest for ASIDBITS becoming unwritable

Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier 2025-01-17 11:05:18 +00:00
commit 5e68d2eeac
5 changed files with 139 additions and 634 deletions

View file

@ -583,39 +583,6 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
BUG_ON(ret && ret != -EAGAIN);
}
struct pkvm_mem_transition {
u64 nr_pages;
struct {
enum pkvm_component_id id;
/* Address in the initiator's address space */
u64 addr;
union {
struct {
/* Address in the completer's address space */
u64 completer_addr;
} host;
struct {
u64 completer_addr;
} hyp;
};
} initiator;
struct {
enum pkvm_component_id id;
} completer;
};
struct pkvm_mem_share {
const struct pkvm_mem_transition tx;
const enum kvm_pgtable_prot completer_prot;
};
struct pkvm_mem_donation {
const struct pkvm_mem_transition tx;
};
struct check_walk_data {
enum pkvm_page_state desired;
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr);
@ -675,86 +642,6 @@ static int __host_set_page_state_range(u64 addr, u64 size,
return 0;
}
static int host_request_owned_transition(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
*completer_addr = tx->initiator.host.completer_addr;
return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
}
static int host_request_unshare(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
*completer_addr = tx->initiator.host.completer_addr;
return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
}
static int host_initiate_share(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
*completer_addr = tx->initiator.host.completer_addr;
return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
}
static int host_initiate_unshare(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
*completer_addr = tx->initiator.host.completer_addr;
return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED);
}
static int host_initiate_donation(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
{
u8 owner_id = tx->completer.id;
u64 size = tx->nr_pages * PAGE_SIZE;
*completer_addr = tx->initiator.host.completer_addr;
return host_stage2_set_owner_locked(tx->initiator.addr, size, owner_id);
}
static bool __host_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
{
return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
tx->initiator.id != PKVM_ID_HYP);
}
static int __host_ack_transition(u64 addr, const struct pkvm_mem_transition *tx,
enum pkvm_page_state state)
{
u64 size = tx->nr_pages * PAGE_SIZE;
if (__host_ack_skip_pgtable_check(tx))
return 0;
return __host_check_page_state_range(addr, size, state);
}
static int host_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
{
return __host_ack_transition(addr, tx, PKVM_NOPAGE);
}
static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u8 host_id = tx->completer.id;
return host_stage2_set_owner_locked(addr, size, host_id);
}
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr)
{
if (!kvm_pte_valid(pte))
@ -775,98 +662,6 @@ static int __hyp_check_page_state_range(u64 addr, u64 size,
return check_page_state_range(&pkvm_pgtable, addr, size, &d);
}
static int hyp_request_donation(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
*completer_addr = tx->initiator.hyp.completer_addr;
return __hyp_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
}
static int hyp_initiate_donation(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
int ret;
*completer_addr = tx->initiator.hyp.completer_addr;
ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, tx->initiator.addr, size);
return (ret != size) ? -EFAULT : 0;
}
static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
{
return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
tx->initiator.id != PKVM_ID_HOST);
}
static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
enum kvm_pgtable_prot perms)
{
u64 size = tx->nr_pages * PAGE_SIZE;
if (perms != PAGE_HYP)
return -EPERM;
if (__hyp_ack_skip_pgtable_check(tx))
return 0;
return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
}
static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr))
return -EBUSY;
if (__hyp_ack_skip_pgtable_check(tx))
return 0;
return __hyp_check_page_state_range(addr, size,
PKVM_PAGE_SHARED_BORROWED);
}
static int hyp_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
if (__hyp_ack_skip_pgtable_check(tx))
return 0;
return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
}
static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
enum kvm_pgtable_prot perms)
{
void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
enum kvm_pgtable_prot prot;
prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED);
return pkvm_create_mappings_locked(start, end, prot);
}
static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, size);
return (ret != size) ? -EFAULT : 0;
}
static int hyp_complete_donation(u64 addr,
const struct pkvm_mem_transition *tx)
{
void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
enum kvm_pgtable_prot prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
return pkvm_create_mappings_locked(start, end, prot);
}
static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
{
if (!kvm_pte_valid(pte))
@ -888,295 +683,31 @@ static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr,
return check_page_state_range(&vm->pgt, addr, size, &d);
}
static int check_share(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
ret = host_request_owned_transition(&completer_addr, tx);
break;
default:
ret = -EINVAL;
}
if (ret)
return ret;
switch (tx->completer.id) {
case PKVM_ID_HYP:
ret = hyp_ack_share(completer_addr, tx, share->completer_prot);
break;
case PKVM_ID_FFA:
/*
* We only check the host; the secure side will check the other
* end when we forward the FFA call.
*/
ret = 0;
break;
default:
ret = -EINVAL;
}
return ret;
}
static int __do_share(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
ret = host_initiate_share(&completer_addr, tx);
break;
default:
ret = -EINVAL;
}
if (ret)
return ret;
switch (tx->completer.id) {
case PKVM_ID_HYP:
ret = hyp_complete_share(completer_addr, tx, share->completer_prot);
break;
case PKVM_ID_FFA:
/*
* We're not responsible for any secure page-tables, so there's
* nothing to do here.
*/
ret = 0;
break;
default:
ret = -EINVAL;
}
return ret;
}
/*
* do_share():
*
* The page owner grants access to another component with a given set
* of permissions.
*
* Initiator: OWNED => SHARED_OWNED
* Completer: NOPAGE => SHARED_BORROWED
*/
static int do_share(struct pkvm_mem_share *share)
{
int ret;
ret = check_share(share);
if (ret)
return ret;
return WARN_ON(__do_share(share));
}
static int check_unshare(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
ret = host_request_unshare(&completer_addr, tx);
break;
default:
ret = -EINVAL;
}
if (ret)
return ret;
switch (tx->completer.id) {
case PKVM_ID_HYP:
ret = hyp_ack_unshare(completer_addr, tx);
break;
case PKVM_ID_FFA:
/* See check_share() */
ret = 0;
break;
default:
ret = -EINVAL;
}
return ret;
}
static int __do_unshare(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
ret = host_initiate_unshare(&completer_addr, tx);
break;
default:
ret = -EINVAL;
}
if (ret)
return ret;
switch (tx->completer.id) {
case PKVM_ID_HYP:
ret = hyp_complete_unshare(completer_addr, tx);
break;
case PKVM_ID_FFA:
/* See __do_share() */
ret = 0;
break;
default:
ret = -EINVAL;
}
return ret;
}
/*
* do_unshare():
*
* The page owner revokes access from another component for a range of
* pages which were previously shared using do_share().
*
* Initiator: SHARED_OWNED => OWNED
* Completer: SHARED_BORROWED => NOPAGE
*/
static int do_unshare(struct pkvm_mem_share *share)
{
int ret;
ret = check_unshare(share);
if (ret)
return ret;
return WARN_ON(__do_unshare(share));
}
static int check_donation(struct pkvm_mem_donation *donation)
{
const struct pkvm_mem_transition *tx = &donation->tx;
u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
ret = host_request_owned_transition(&completer_addr, tx);
break;
case PKVM_ID_HYP:
ret = hyp_request_donation(&completer_addr, tx);
break;
default:
ret = -EINVAL;
}
if (ret)
return ret;
switch (tx->completer.id) {
case PKVM_ID_HOST:
ret = host_ack_donation(completer_addr, tx);
break;
case PKVM_ID_HYP:
ret = hyp_ack_donation(completer_addr, tx);
break;
default:
ret = -EINVAL;
}
return ret;
}
static int __do_donate(struct pkvm_mem_donation *donation)
{
const struct pkvm_mem_transition *tx = &donation->tx;
u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
ret = host_initiate_donation(&completer_addr, tx);
break;
case PKVM_ID_HYP:
ret = hyp_initiate_donation(&completer_addr, tx);
break;
default:
ret = -EINVAL;
}
if (ret)
return ret;
switch (tx->completer.id) {
case PKVM_ID_HOST:
ret = host_complete_donation(completer_addr, tx);
break;
case PKVM_ID_HYP:
ret = hyp_complete_donation(completer_addr, tx);
break;
default:
ret = -EINVAL;
}
return ret;
}
/*
* do_donate():
*
* The page owner transfers ownership to another component, losing access
* as a consequence.
*
* Initiator: OWNED => NOPAGE
* Completer: NOPAGE => OWNED
*/
static int do_donate(struct pkvm_mem_donation *donation)
{
int ret;
ret = check_donation(donation);
if (ret)
return ret;
return WARN_ON(__do_donate(donation));
}
int __pkvm_host_share_hyp(u64 pfn)
{
u64 phys = hyp_pfn_to_phys(pfn);
void *virt = __hyp_va(phys);
enum kvm_pgtable_prot prot;
u64 size = PAGE_SIZE;
int ret;
u64 host_addr = hyp_pfn_to_phys(pfn);
u64 hyp_addr = (u64)__hyp_va(host_addr);
struct pkvm_mem_share share = {
.tx = {
.nr_pages = 1,
.initiator = {
.id = PKVM_ID_HOST,
.addr = host_addr,
.host = {
.completer_addr = hyp_addr,
},
},
.completer = {
.id = PKVM_ID_HYP,
},
},
.completer_prot = PAGE_HYP,
};
host_lock_component();
hyp_lock_component();
ret = do_share(&share);
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
if (ret)
goto unlock;
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
if (ret)
goto unlock;
}
prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED));
unlock:
hyp_unlock_component();
host_unlock_component();
@ -1185,31 +716,29 @@ int __pkvm_host_share_hyp(u64 pfn)
int __pkvm_host_unshare_hyp(u64 pfn)
{
u64 phys = hyp_pfn_to_phys(pfn);
u64 virt = (u64)__hyp_va(phys);
u64 size = PAGE_SIZE;
int ret;
u64 host_addr = hyp_pfn_to_phys(pfn);
u64 hyp_addr = (u64)__hyp_va(host_addr);
struct pkvm_mem_share share = {
.tx = {
.nr_pages = 1,
.initiator = {
.id = PKVM_ID_HOST,
.addr = host_addr,
.host = {
.completer_addr = hyp_addr,
},
},
.completer = {
.id = PKVM_ID_HYP,
},
},
.completer_prot = PAGE_HYP,
};
host_lock_component();
hyp_lock_component();
ret = do_unshare(&share);
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
if (ret)
goto unlock;
ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_SHARED_BORROWED);
if (ret)
goto unlock;
if (hyp_page_count((void *)virt)) {
ret = -EBUSY;
goto unlock;
}
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_OWNED));
unlock:
hyp_unlock_component();
host_unlock_component();
@ -1218,30 +747,29 @@ int __pkvm_host_unshare_hyp(u64 pfn)
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
{
u64 phys = hyp_pfn_to_phys(pfn);
u64 size = PAGE_SIZE * nr_pages;
void *virt = __hyp_va(phys);
enum kvm_pgtable_prot prot;
int ret;
u64 host_addr = hyp_pfn_to_phys(pfn);
u64 hyp_addr = (u64)__hyp_va(host_addr);
struct pkvm_mem_donation donation = {
.tx = {
.nr_pages = nr_pages,
.initiator = {
.id = PKVM_ID_HOST,
.addr = host_addr,
.host = {
.completer_addr = hyp_addr,
},
},
.completer = {
.id = PKVM_ID_HYP,
},
},
};
host_lock_component();
hyp_lock_component();
ret = do_donate(&donation);
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
if (ret)
goto unlock;
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
if (ret)
goto unlock;
}
prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HYP));
unlock:
hyp_unlock_component();
host_unlock_component();
@ -1250,30 +778,27 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
{
u64 phys = hyp_pfn_to_phys(pfn);
u64 size = PAGE_SIZE * nr_pages;
u64 virt = (u64)__hyp_va(phys);
int ret;
u64 host_addr = hyp_pfn_to_phys(pfn);
u64 hyp_addr = (u64)__hyp_va(host_addr);
struct pkvm_mem_donation donation = {
.tx = {
.nr_pages = nr_pages,
.initiator = {
.id = PKVM_ID_HYP,
.addr = hyp_addr,
.hyp = {
.completer_addr = host_addr,
},
},
.completer = {
.id = PKVM_ID_HOST,
},
},
};
host_lock_component();
hyp_lock_component();
ret = do_donate(&donation);
ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_OWNED);
if (ret)
goto unlock;
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
ret = __host_check_page_state_range(phys, size, PKVM_NOPAGE);
if (ret)
goto unlock;
}
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HOST));
unlock:
hyp_unlock_component();
host_unlock_component();
@ -1327,22 +852,14 @@ void hyp_unpin_shared_mem(void *from, void *to)
int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages)
{
u64 phys = hyp_pfn_to_phys(pfn);
u64 size = PAGE_SIZE * nr_pages;
int ret;
struct pkvm_mem_share share = {
.tx = {
.nr_pages = nr_pages,
.initiator = {
.id = PKVM_ID_HOST,
.addr = hyp_pfn_to_phys(pfn),
},
.completer = {
.id = PKVM_ID_FFA,
},
},
};
host_lock_component();
ret = do_share(&share);
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
if (!ret)
ret = __host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
host_unlock_component();
return ret;
@ -1350,22 +867,14 @@ int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages)
int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
{
u64 phys = hyp_pfn_to_phys(pfn);
u64 size = PAGE_SIZE * nr_pages;
int ret;
struct pkvm_mem_share share = {
.tx = {
.nr_pages = nr_pages,
.initiator = {
.id = PKVM_ID_HOST,
.addr = hyp_pfn_to_phys(pfn),
},
.completer = {
.id = PKVM_ID_FFA,
},
},
};
host_lock_component();
ret = do_unshare(&share);
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
if (!ret)
ret = __host_set_page_state_range(phys, size, PKVM_PAGE_OWNED);
host_unlock_component();
return ret;

View file

@ -24,6 +24,7 @@ static DEFINE_MUTEX(arm_pmus_lock);
static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc);
static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
{
@ -327,48 +328,25 @@ u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
}
/**
* kvm_pmu_enable_counter_mask - enable selected PMU counters
* @vcpu: The vcpu pointer
* @val: the value guest writes to PMCNTENSET register
*
* Call perf_event_enable to start counting the perf event
*/
void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
static void kvm_pmc_enable_perf_event(struct kvm_pmc *pmc)
{
int i;
if (!kvm_vcpu_has_pmu(vcpu))
if (!pmc->perf_event) {
kvm_pmu_create_perf_event(pmc);
return;
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
return;
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
struct kvm_pmc *pmc;
if (!(val & BIT(i)))
continue;
pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
if (!pmc->perf_event) {
kvm_pmu_create_perf_event(pmc);
} else {
perf_event_enable(pmc->perf_event);
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
kvm_debug("fail to enable perf event\n");
}
}
perf_event_enable(pmc->perf_event);
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
kvm_debug("fail to enable perf event\n");
}
/**
* kvm_pmu_disable_counter_mask - disable selected PMU counters
* @vcpu: The vcpu pointer
* @val: the value guest writes to PMCNTENCLR register
*
* Call perf_event_disable to stop counting the perf event
*/
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
static void kvm_pmc_disable_perf_event(struct kvm_pmc *pmc)
{
if (pmc->perf_event)
perf_event_disable(pmc->perf_event);
}
void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
{
int i;
@ -376,16 +354,18 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
return;
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
struct kvm_pmc *pmc;
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
if (!(val & BIT(i)))
continue;
pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
if (pmc->perf_event)
perf_event_disable(pmc->perf_event);
if (kvm_pmu_counter_is_enabled(pmc))
kvm_pmc_enable_perf_event(pmc);
else
kvm_pmc_disable_perf_event(pmc);
}
kvm_vcpu_pmu_restore_guest(vcpu);
}
/*
@ -626,27 +606,28 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
val &= ~ARMV8_PMU_PMCR_LP;
/* Request a reload of the PMU to enable/disable affected counters */
if ((__vcpu_sys_reg(vcpu, PMCR_EL0) ^ val) & ARMV8_PMU_PMCR_E)
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
/* The reset bits don't indicate any state, and shouldn't be saved. */
__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
if (val & ARMV8_PMU_PMCR_E) {
kvm_pmu_enable_counter_mask(vcpu,
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
} else {
kvm_pmu_disable_counter_mask(vcpu,
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
}
if (val & ARMV8_PMU_PMCR_C)
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
if (val & ARMV8_PMU_PMCR_P) {
unsigned long mask = kvm_pmu_accessible_counter_mask(vcpu);
mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
/*
* Unlike other PMU sysregs, the controls in PMCR_EL0 always apply
* to the 'guest' range of counters and never the 'hyp' range.
*/
unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu) &
~kvm_pmu_hyp_counter_mask(vcpu) &
~BIT(ARMV8_PMU_CYCLE_IDX);
for_each_set_bit(i, &mask, 32)
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
}
kvm_vcpu_pmu_restore_guest(vcpu);
}
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
@ -910,11 +891,11 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
{
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
kvm_pmu_reprogram_counter_mask(vcpu, mask);
}
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)

View file

@ -1093,16 +1093,14 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
mask = kvm_pmu_accessible_counter_mask(vcpu);
if (p->is_write) {
val = p->regval & mask;
if (r->Op2 & 0x1) {
if (r->Op2 & 0x1)
/* accessing PMCNTENSET_EL0 */
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
kvm_pmu_enable_counter_mask(vcpu, val);
kvm_vcpu_pmu_restore_guest(vcpu);
} else {
else
/* accessing PMCNTENCLR_EL0 */
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
kvm_pmu_disable_counter_mask(vcpu, val);
}
kvm_pmu_reprogram_counter_mask(vcpu, val);
} else {
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
}
@ -2460,6 +2458,26 @@ static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
return __el2_visibility(vcpu, rd, s1pie_visibility);
}
static bool access_mdcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
u64 old = __vcpu_sys_reg(vcpu, MDCR_EL2);
if (!access_rw(vcpu, p, r))
return false;
/*
* Request a reload of the PMU to enable/disable the counters affected
* by HPME.
*/
if ((old ^ __vcpu_sys_reg(vcpu, MDCR_EL2)) & MDCR_EL2_HPME)
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
return true;
}
/*
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@ -2999,7 +3017,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
EL2_REG(MDCR_EL2, access_mdcr, reset_val, 0),
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0),

View file

@ -53,8 +53,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
@ -127,8 +126,7 @@ static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)

View file

@ -152,7 +152,6 @@ static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = {
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGENDEL0, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, SNSMEM, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGEND, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ASIDBITS, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, PARANGE, 0),
REG_FTR_END,
};