LoongArch fixes for v6.14-rc3
-----BEGIN PGP SIGNATURE----- iQJKBAABCAA0FiEEzOlt8mkP+tbeiYy5AoYrw/LiJnoFAmet+IIWHGNoZW5odWFj YWlAa2VybmVsLm9yZwAKCRAChivD8uImesjHD/9NL2GwZTldtC+ryWb2Og7xcKGs f7lSUrUaQypHP8NTxduP/M/tmLrmzbj5LYdoG2zXHWykOwJo2Z1C8N0q1uB6nwqW PNAx+sS2NjRwUCXIsTfCK2/+NKmuHzRJTyEvSS8W4ott3QAgPa5vHpyCDqqr5rJQ UiWgMxbxA/fQKGr5CEsoF3U1w/iJgBCbVMzcY6OAHmO1/8Pf29XN3yUvdiNDqadH bR7nDpLn6uZQn4w16A/ZlFh2k0EGFsVcYC1W5e2x15ud1rU76Eg5DAP9GMIm3EXt 8SeyvaR0jTrIyZeJliF2tn60x4SG94ZDlyNGcOq94StDEPDxyVTs+D6KDiQfLwx6 9zp9igQR/hpTGhPzwD5dtUyJbgfn+Sln8w6c8ygrzfKwopA0GXlkrBLmnPCU1lCG FfbKhNycRH4VrQsAqfO47876T9Bba+vgNkyMOgfkFBr7EmKHCDncCK4EAB9xpXfu 2zE5pc3Yl6I7EHSk/KtKhJ3kgNri5nK/ubJiEKAR+0jep2H5JxMCSJoJMsmFw3Vt d0O495PDIEu6s2ULMyfp5MFrXTDkTiC2ghw2b7+UojScCn9A9+WjARBy/NtH83R5 3j+S+4KbV5aserl7AkOTc87aNXdyZ62b9vzYqzJVaHJxavnTn2DXOGjFBhQn0oRl HhYoJAn9kcK4I74Rvw== =O6uZ -----END PGP SIGNATURE----- Merge tag 'loongarch-fixes-6.14-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson Pull LoongArch fixes from Huacai Chen: "Fix bugs about idle, kernel_page_present(), IP checksum and KVM, plus some trival cleanups" * tag 'loongarch-fixes-6.14-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson: LoongArch: KVM: Set host with kernel mode when switch to VM mode LoongArch: KVM: Remove duplicated cache attribute setting LoongArch: KVM: Fix typo issue about GCFG feature detection LoongArch: csum: Fix OoB access in IP checksum code for negative lengths LoongArch: Remove the deprecated notifier hook mechanism LoongArch: Use str_yes_no() helper function for /proc/cpuinfo LoongArch: Fix kernel_page_present() for KPRANGE/XKPRANGE LoongArch: Fix idle VS timer enqueue
This commit is contained in:
commit
ab68d7eb7b
11 changed files with 30 additions and 73 deletions
|
@ -76,27 +76,6 @@ extern const char *__cpu_full_name[];
|
|||
#define cpu_family_string() __cpu_family[raw_smp_processor_id()]
|
||||
#define cpu_full_name_string() __cpu_full_name[raw_smp_processor_id()]
|
||||
|
||||
struct seq_file;
|
||||
struct notifier_block;
|
||||
|
||||
extern int register_proc_cpuinfo_notifier(struct notifier_block *nb);
|
||||
extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v);
|
||||
|
||||
#define proc_cpuinfo_notifier(fn, pri) \
|
||||
({ \
|
||||
static struct notifier_block fn##_nb = { \
|
||||
.notifier_call = fn, \
|
||||
.priority = pri \
|
||||
}; \
|
||||
\
|
||||
register_proc_cpuinfo_notifier(&fn##_nb); \
|
||||
})
|
||||
|
||||
struct proc_cpuinfo_notifier_args {
|
||||
struct seq_file *m;
|
||||
unsigned long n;
|
||||
};
|
||||
|
||||
static inline bool cpus_are_siblings(int cpua, int cpub)
|
||||
{
|
||||
struct cpuinfo_loongarch *infoa = &cpu_data[cpua];
|
||||
|
|
|
@ -77,6 +77,8 @@ extern int __cpu_logical_map[NR_CPUS];
|
|||
#define SMP_IRQ_WORK BIT(ACTION_IRQ_WORK)
|
||||
#define SMP_CLEAR_VECTOR BIT(ACTION_CLEAR_VECTOR)
|
||||
|
||||
struct seq_file;
|
||||
|
||||
struct secondary_data {
|
||||
unsigned long stack;
|
||||
unsigned long thread_info;
|
||||
|
|
|
@ -18,16 +18,19 @@
|
|||
|
||||
.align 5
|
||||
SYM_FUNC_START(__arch_cpu_idle)
|
||||
/* start of rollback region */
|
||||
LONG_L t0, tp, TI_FLAGS
|
||||
nop
|
||||
andi t0, t0, _TIF_NEED_RESCHED
|
||||
bnez t0, 1f
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
/* start of idle interrupt region */
|
||||
ori t0, zero, CSR_CRMD_IE
|
||||
/* idle instruction needs irq enabled */
|
||||
csrxchg t0, t0, LOONGARCH_CSR_CRMD
|
||||
/*
|
||||
* If an interrupt lands here; between enabling interrupts above and
|
||||
* going idle on the next instruction, we must *NOT* go idle since the
|
||||
* interrupt could have set TIF_NEED_RESCHED or caused an timer to need
|
||||
* reprogramming. Fall through -- see handle_vint() below -- and have
|
||||
* the idle loop take care of things.
|
||||
*/
|
||||
idle 0
|
||||
/* end of rollback region */
|
||||
/* end of idle interrupt region */
|
||||
1: jr ra
|
||||
SYM_FUNC_END(__arch_cpu_idle)
|
||||
|
||||
|
@ -35,11 +38,10 @@ SYM_CODE_START(handle_vint)
|
|||
UNWIND_HINT_UNDEFINED
|
||||
BACKUP_T0T1
|
||||
SAVE_ALL
|
||||
la_abs t1, __arch_cpu_idle
|
||||
la_abs t1, 1b
|
||||
LONG_L t0, sp, PT_ERA
|
||||
/* 32 byte rollback region */
|
||||
ori t0, t0, 0x1f
|
||||
xori t0, t0, 0x1f
|
||||
/* 3 instructions idle interrupt region */
|
||||
ori t0, t0, 0b1100
|
||||
bne t0, t1, 1f
|
||||
LONG_S t0, sp, PT_ERA
|
||||
1: move a0, sp
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
|
||||
void __cpuidle arch_cpu_idle(void)
|
||||
{
|
||||
raw_local_irq_enable();
|
||||
__arch_cpu_idle(); /* idle instruction needs irq enabled */
|
||||
__arch_cpu_idle();
|
||||
raw_local_irq_disable();
|
||||
}
|
||||
|
|
|
@ -13,28 +13,12 @@
|
|||
#include <asm/processor.h>
|
||||
#include <asm/time.h>
|
||||
|
||||
/*
|
||||
* No lock; only written during early bootup by CPU 0.
|
||||
*/
|
||||
static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
|
||||
|
||||
int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
|
||||
}
|
||||
|
||||
int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
|
||||
{
|
||||
return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
|
||||
}
|
||||
|
||||
static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
{
|
||||
unsigned long n = (unsigned long) v - 1;
|
||||
unsigned int isa = cpu_data[n].isa_level;
|
||||
unsigned int version = cpu_data[n].processor_id & 0xff;
|
||||
unsigned int fp_version = cpu_data[n].fpu_vers;
|
||||
struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (!cpu_online(n))
|
||||
|
@ -91,20 +75,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
if (cpu_has_lbt_mips) seq_printf(m, " lbt_mips");
|
||||
seq_printf(m, "\n");
|
||||
|
||||
seq_printf(m, "Hardware Watchpoint\t: %s",
|
||||
cpu_has_watch ? "yes, " : "no\n");
|
||||
seq_printf(m, "Hardware Watchpoint\t: %s", str_yes_no(cpu_has_watch));
|
||||
if (cpu_has_watch) {
|
||||
seq_printf(m, "iwatch count: %d, dwatch count: %d\n",
|
||||
seq_printf(m, ", iwatch count: %d, dwatch count: %d",
|
||||
cpu_data[n].watch_ireg_count, cpu_data[n].watch_dreg_count);
|
||||
}
|
||||
|
||||
proc_cpuinfo_notifier_args.m = m;
|
||||
proc_cpuinfo_notifier_args.n = n;
|
||||
|
||||
raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
|
||||
&proc_cpuinfo_notifier_args);
|
||||
|
||||
seq_printf(m, "\n");
|
||||
seq_printf(m, "\n\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ void machine_halt(void)
|
|||
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
|
||||
|
||||
while (true) {
|
||||
__arch_cpu_idle();
|
||||
__asm__ __volatile__("idle 0" : : : "memory");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -53,7 +53,7 @@ void machine_power_off(void)
|
|||
#endif
|
||||
|
||||
while (true) {
|
||||
__arch_cpu_idle();
|
||||
__asm__ __volatile__("idle 0" : : : "memory");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -74,6 +74,6 @@ void machine_restart(char *command)
|
|||
acpi_reboot();
|
||||
|
||||
while (true) {
|
||||
__arch_cpu_idle();
|
||||
__asm__ __volatile__("idle 0" : : : "memory");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -303,9 +303,9 @@ int kvm_arch_enable_virtualization_cpu(void)
|
|||
* TOE=0: Trap on Exception.
|
||||
* TIT=0: Trap on Timer.
|
||||
*/
|
||||
if (env & CSR_GCFG_GCIP_ALL)
|
||||
if (env & CSR_GCFG_GCIP_SECURE)
|
||||
gcfg |= CSR_GCFG_GCI_SECURE;
|
||||
if (env & CSR_GCFG_MATC_ROOT)
|
||||
if (env & CSR_GCFG_MATP_ROOT)
|
||||
gcfg |= CSR_GCFG_MATC_ROOT;
|
||||
|
||||
write_csr_gcfg(gcfg);
|
||||
|
|
|
@ -85,7 +85,7 @@
|
|||
* Guest CRMD comes from separate GCSR_CRMD register
|
||||
*/
|
||||
ori t0, zero, CSR_PRMD_PIE
|
||||
csrxchg t0, t0, LOONGARCH_CSR_PRMD
|
||||
csrwr t0, LOONGARCH_CSR_PRMD
|
||||
|
||||
/* Set PVM bit to setup ertn to guest context */
|
||||
ori t0, zero, CSR_GSTAT_PVM
|
||||
|
|
|
@ -1548,9 +1548,6 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
|
||||
/* Restore timer state regardless */
|
||||
kvm_restore_timer(vcpu);
|
||||
|
||||
/* Control guest page CCA attribute */
|
||||
change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
|
||||
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
|
||||
|
||||
/* Restore hardware PMU CSRs */
|
||||
|
|
|
@ -25,7 +25,7 @@ unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
|
|||
const u64 *ptr;
|
||||
u64 data, sum64 = 0;
|
||||
|
||||
if (unlikely(len == 0))
|
||||
if (unlikely(len <= 0))
|
||||
return 0;
|
||||
|
||||
offset = (unsigned long)buff & 7;
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
* Copyright (C) 2024 Loongson Technology Corporation Limited
|
||||
*/
|
||||
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/pagewalk.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <asm/set_memory.h>
|
||||
|
@ -167,7 +168,7 @@ bool kernel_page_present(struct page *page)
|
|||
unsigned long addr = (unsigned long)page_address(page);
|
||||
|
||||
if (addr < vm_map_base)
|
||||
return true;
|
||||
return memblock_is_memory(__pa(addr));
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
if (pgd_none(pgdp_get(pgd)))
|
||||
|
|
Loading…
Add table
Reference in a new issue