riscv: report misaligned accesses emulation to hwprobe
hwprobe provides a way to report if misaligned access are emulated. In order to correctly populate that feature, we can check if it actually traps when doing a misaligned access. This can be checked using an exception table entry which will actually be used when a misaligned access is done from kernel mode. Signed-off-by: Clément Léger <cleger@rivosinc.com> Link: https://lore.kernel.org/r/20231004151405.521596-8-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
parent
90b11b470b
commit
71c54b3d16
4 changed files with 79 additions and 1 deletions
|
@ -32,4 +32,22 @@ extern struct riscv_isainfo hart_isa[NR_CPUS];
|
||||||
|
|
||||||
void check_unaligned_access(int cpu);
|
void check_unaligned_access(int cpu);
|
||||||
|
|
||||||
|
#ifdef CONFIG_RISCV_MISALIGNED
|
||||||
|
bool unaligned_ctl_available(void);
|
||||||
|
bool check_unaligned_access_emulated(int cpu);
|
||||||
|
void unaligned_emulation_finish(void);
|
||||||
|
#else
|
||||||
|
static inline bool unaligned_ctl_available(void)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool check_unaligned_access_emulated(int cpu)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void unaligned_emulation_finish(void) {}
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -568,6 +568,9 @@ void check_unaligned_access(int cpu)
|
||||||
void *src;
|
void *src;
|
||||||
long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
|
long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
|
||||||
|
|
||||||
|
if (check_unaligned_access_emulated(cpu))
|
||||||
|
return;
|
||||||
|
|
||||||
page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
|
page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
|
||||||
if (!page) {
|
if (!page) {
|
||||||
pr_warn("Can't alloc pages to measure memcpy performance");
|
pr_warn("Can't alloc pages to measure memcpy performance");
|
||||||
|
@ -648,6 +651,7 @@ out:
|
||||||
static int __init check_unaligned_access_boot_cpu(void)
|
static int __init check_unaligned_access_boot_cpu(void)
|
||||||
{
|
{
|
||||||
check_unaligned_access(0);
|
check_unaligned_access(0);
|
||||||
|
unaligned_emulation_finish();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -245,8 +245,8 @@ asmlinkage __visible void smp_callin(void)
|
||||||
riscv_ipi_enable();
|
riscv_ipi_enable();
|
||||||
|
|
||||||
numa_add_cpu(curr_cpuid);
|
numa_add_cpu(curr_cpuid);
|
||||||
set_cpu_online(curr_cpuid, 1);
|
|
||||||
check_unaligned_access(curr_cpuid);
|
check_unaligned_access(curr_cpuid);
|
||||||
|
set_cpu_online(curr_cpuid, 1);
|
||||||
|
|
||||||
if (has_vector()) {
|
if (has_vector()) {
|
||||||
if (riscv_v_setup_vsize())
|
if (riscv_v_setup_vsize())
|
||||||
|
|
|
@ -14,6 +14,8 @@
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <asm/csr.h>
|
#include <asm/csr.h>
|
||||||
#include <asm/entry-common.h>
|
#include <asm/entry-common.h>
|
||||||
|
#include <asm/hwprobe.h>
|
||||||
|
#include <asm/cpufeature.h>
|
||||||
|
|
||||||
#define INSN_MATCH_LB 0x3
|
#define INSN_MATCH_LB 0x3
|
||||||
#define INSN_MASK_LB 0x707f
|
#define INSN_MASK_LB 0x707f
|
||||||
|
@ -396,6 +398,8 @@ union reg_data {
|
||||||
u64 data_u64;
|
u64 data_u64;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static bool unaligned_ctl __read_mostly;
|
||||||
|
|
||||||
/* sysctl hooks */
|
/* sysctl hooks */
|
||||||
int unaligned_enabled __read_mostly = 1; /* Enabled by default */
|
int unaligned_enabled __read_mostly = 1; /* Enabled by default */
|
||||||
|
|
||||||
|
@ -409,6 +413,8 @@ int handle_misaligned_load(struct pt_regs *regs)
|
||||||
|
|
||||||
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
|
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
|
||||||
|
|
||||||
|
*this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_EMULATED;
|
||||||
|
|
||||||
if (!unaligned_enabled)
|
if (!unaligned_enabled)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
@ -585,3 +591,53 @@ int handle_misaligned_store(struct pt_regs *regs)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool check_unaligned_access_emulated(int cpu)
|
||||||
|
{
|
||||||
|
long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
|
||||||
|
unsigned long tmp_var, tmp_val;
|
||||||
|
bool misaligned_emu_detected;
|
||||||
|
|
||||||
|
*mas_ptr = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
|
||||||
|
|
||||||
|
__asm__ __volatile__ (
|
||||||
|
" "REG_L" %[tmp], 1(%[ptr])\n"
|
||||||
|
: [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
|
||||||
|
|
||||||
|
misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_EMULATED);
|
||||||
|
/*
|
||||||
|
* If unaligned_ctl is already set, this means that we detected that all
|
||||||
|
* CPUS uses emulated misaligned access at boot time. If that changed
|
||||||
|
* when hotplugging the new cpu, this is something we don't handle.
|
||||||
|
*/
|
||||||
|
if (unlikely(unaligned_ctl && !misaligned_emu_detected)) {
|
||||||
|
pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
|
||||||
|
while (true)
|
||||||
|
cpu_relax();
|
||||||
|
}
|
||||||
|
|
||||||
|
return misaligned_emu_detected;
|
||||||
|
}
|
||||||
|
|
||||||
|
void __init unaligned_emulation_finish(void)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We can only support PR_UNALIGN controls if all CPUs have misaligned
|
||||||
|
* accesses emulated since tasks requesting such control can run on any
|
||||||
|
* CPU.
|
||||||
|
*/
|
||||||
|
for_each_present_cpu(cpu) {
|
||||||
|
if (per_cpu(misaligned_access_speed, cpu) !=
|
||||||
|
RISCV_HWPROBE_MISALIGNED_EMULATED) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
unaligned_ctl = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool unaligned_ctl_available(void)
|
||||||
|
{
|
||||||
|
return unaligned_ctl;
|
||||||
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue