1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/arch/x86/boot/compressed/idt_64.c
Jun'ichi Nomura 78a509fba9 x86/boot: Ignore NMIs during very early boot
When there are two racing NMIs on x86, the first NMI invokes NMI handler and
the 2nd NMI is latched until IRET is executed.

If panic on NMI and panic kexec are enabled, the first NMI triggers
panic and starts booting the next kernel via kexec. Note that the 2nd
NMI is still latched. During the early boot of the next kernel, once
an IRET is executed as a result of a page fault, then the 2nd NMI is
unlatched and invokes the NMI handler.

However, NMI handler is not set up at the early stage of boot, which
results in a boot failure.

Avoid such problems by setting up a NOP handler for early NMIs.

[ mingo: Refined the changelog. ]

Signed-off-by: Jun'ichi Nomura <junichi.nomura@nec.com>
Signed-off-by: Derek Barbosa <debarbos@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2023-11-30 09:55:40 +01:00

92 lines
2.5 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
#include <asm/trap_pf.h>
#include <asm/segment.h>
#include <asm/trapnr.h>
#include "misc.h"
static void set_idt_entry(int vector, void (*handler)(void))
{
unsigned long address = (unsigned long)handler;
gate_desc entry;
memset(&entry, 0, sizeof(entry));
entry.offset_low = (u16)(address & 0xffff);
entry.segment = __KERNEL_CS;
entry.bits.type = GATE_TRAP;
entry.bits.p = 1;
entry.offset_middle = (u16)((address >> 16) & 0xffff);
entry.offset_high = (u32)(address >> 32);
memcpy(&boot_idt[vector], &entry, sizeof(entry));
}
/* Have this here so we don't need to include <asm/desc.h> */
static void load_boot_idt(const struct desc_ptr *dtr)
{
asm volatile("lidt %0"::"m" (*dtr));
}
/* Setup IDT before kernel jumping to .Lrelocated */
void load_stage1_idt(void)
{
boot_idt_desc.address = (unsigned long)boot_idt;
if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
set_idt_entry(X86_TRAP_VC, boot_stage1_vc);
load_boot_idt(&boot_idt_desc);
}
/*
* Setup IDT after kernel jumping to .Lrelocated.
*
* initialize_identity_maps() needs a #PF handler to be setup
* in order to be able to fault-in identity mapping ranges; see
* do_boot_page_fault().
*
* This #PF handler setup needs to happen in load_stage2_idt() where the
* IDT is loaded and there the #VC IDT entry gets setup too.
*
* In order to be able to handle #VCs, one needs a GHCB which
* gets setup with an already set up pagetable, which is done in
* initialize_identity_maps(). And there's the catch 22: the boot #VC
* handler do_boot_stage2_vc() needs to call early_setup_ghcb() itself
* (and, especially set_page_decrypted()) because the SEV-ES setup code
* cannot initialize a GHCB as there's no #PF handler yet...
*/
void load_stage2_idt(void)
{
boot_idt_desc.address = (unsigned long)boot_idt;
set_idt_entry(X86_TRAP_PF, boot_page_fault);
set_idt_entry(X86_TRAP_NMI, boot_nmi_trap);
#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
* Clear the second stage #VC handler in case guest types
* needing #VC have not been detected.
*/
if (sev_status & BIT(1))
set_idt_entry(X86_TRAP_VC, boot_stage2_vc);
else
set_idt_entry(X86_TRAP_VC, NULL);
#endif
load_boot_idt(&boot_idt_desc);
}
void cleanup_exception_handling(void)
{
/*
* Flush GHCB from cache and map it encrypted again when running as
* SEV-ES guest.
*/
sev_es_shutdown_ghcb();
/* Set a null-idt, disabling #PF and #VC handling */
boot_idt_desc.size = 0;
boot_idt_desc.address = 0;
load_boot_idt(&boot_idt_desc);
}