An extra count on ebb_state.stats.pmc_count[PMC_INDEX(pmc)] is being per- formed when count_pmc() is used to reset PMCs on a few selftests. This extra pmc_count can occasionally invalidate results, such as the ones from cycles_test shown hereafter. The ebb_check_count() failed with an above the upper limit error due to the extra value on ebb_state.stats.pmc_count. Furthermore, this extra count is also indicated by extra PMC1 trace_log on the output of the cycle test (as well as on pmc56_overflow_test): ========== ... [21]: counter = 8 [22]: register SPRN_MMCR0 = 0x0000000080000080 [23]: register SPRN_PMC1 = 0x0000000080000004 [24]: counter = 9 [25]: register SPRN_MMCR0 = 0x0000000080000080 [26]: register SPRN_PMC1 = 0x0000000080000004 [27]: counter = 10 [28]: register SPRN_MMCR0 = 0x0000000080000080 [29]: register SPRN_PMC1 = 0x0000000080000004 >> [30]: register SPRN_PMC1 = 0x000000004000051e PMC1 count (0x280000546) above upper limit 0x2800003e8 (+0x15e) [FAIL] Test FAILED on line 52 failure: cycles ========== Signed-off-by: Desnes A. Nunes do Rosario <desnesn@linux.ibm.com> Tested-by: Sachin Sant <sachinp@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200626164737.21943-1-desnesn@linux.ibm.com
102 lines
2.3 KiB
C
102 lines
2.3 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright 2014, Michael Ellerman, IBM Corp.
|
|
*/
|
|
|
|
#include <sched.h>
|
|
#include <signal.h>
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <sys/mman.h>
|
|
|
|
#include "ebb.h"
|
|
|
|
|
|
/*
|
|
* Test that tries to trigger CPU_FTR_PMAO_BUG. Which is a hardware defect
|
|
* where an exception triggers but we context switch before it is delivered and
|
|
* lose the exception.
|
|
*/
|
|
|
|
static int test_body(void)
|
|
{
|
|
int i, orig_period, max_period;
|
|
struct event event;
|
|
|
|
SKIP_IF(!ebb_is_supported());
|
|
|
|
/* We use PMC4 to make sure the kernel switches all counters correctly */
|
|
event_init_named(&event, 0x40002, "instructions");
|
|
event_leader_ebb_init(&event);
|
|
|
|
event.attr.exclude_kernel = 1;
|
|
event.attr.exclude_hv = 1;
|
|
event.attr.exclude_idle = 1;
|
|
|
|
FAIL_IF(event_open(&event));
|
|
|
|
ebb_enable_pmc_counting(4);
|
|
setup_ebb_handler(standard_ebb_callee);
|
|
ebb_global_enable();
|
|
FAIL_IF(ebb_event_enable(&event));
|
|
|
|
/*
|
|
* We want a low sample period, but we also want to get out of the EBB
|
|
* handler without tripping up again.
|
|
*
|
|
* This value picked after much experimentation.
|
|
*/
|
|
orig_period = max_period = sample_period = 400;
|
|
|
|
mtspr(SPRN_PMC4, pmc_sample_period(sample_period));
|
|
|
|
while (ebb_state.stats.ebb_count < 1000000) {
|
|
/*
|
|
* We are trying to get the EBB exception to race exactly with
|
|
* us entering the kernel to do the syscall. We then need the
|
|
* kernel to decide our timeslice is up and context switch to
|
|
* the other thread. When we come back our EBB will have been
|
|
* lost and we'll spin in this while loop forever.
|
|
*/
|
|
|
|
for (i = 0; i < 100000; i++)
|
|
sched_yield();
|
|
|
|
/* Change the sample period slightly to try and hit the race */
|
|
if (sample_period >= (orig_period + 200))
|
|
sample_period = orig_period;
|
|
else
|
|
sample_period++;
|
|
|
|
if (sample_period > max_period)
|
|
max_period = sample_period;
|
|
}
|
|
|
|
ebb_freeze_pmcs();
|
|
ebb_global_disable();
|
|
|
|
mtspr(SPRN_PMC4, 0xdead);
|
|
|
|
dump_summary_ebb_state();
|
|
dump_ebb_hw_state();
|
|
|
|
event_close(&event);
|
|
|
|
FAIL_IF(ebb_state.stats.ebb_count == 0);
|
|
|
|
/* We vary our sample period so we need extra fudge here */
|
|
FAIL_IF(!ebb_check_count(4, orig_period, 2 * (max_period - orig_period)));
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int lost_exception(void)
|
|
{
|
|
return eat_cpu(test_body);
|
|
}
|
|
|
|
int main(void)
|
|
{
|
|
test_harness_set_timeout(300);
|
|
return test_harness(lost_exception, "lost_exception");
|
|
}
|