x86/microcode/AMD: Rip out static buffers
Load straight from the containers (initrd or builtin, for example). There's no need to cache the patch per node. This even simplifies the code a bit with the opportunity for more cleanups later. Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Tested-by: John Allen <john.allen@amd.com> Link: https://lore.kernel.org/r/20230720202813.3269888-1-john.allen@amd.com
This commit is contained in:
parent
6eaae19807
commit
05e91e7211
3 changed files with 31 additions and 70 deletions
|
@ -44,13 +44,11 @@ struct microcode_amd {
|
||||||
#define PATCH_MAX_SIZE (3 * PAGE_SIZE)
|
#define PATCH_MAX_SIZE (3 * PAGE_SIZE)
|
||||||
|
|
||||||
#ifdef CONFIG_MICROCODE_AMD
|
#ifdef CONFIG_MICROCODE_AMD
|
||||||
extern void __init load_ucode_amd_bsp(unsigned int family);
|
extern void load_ucode_amd_early(unsigned int cpuid_1_eax);
|
||||||
extern void load_ucode_amd_ap(unsigned int family);
|
|
||||||
extern int __init save_microcode_in_initrd_amd(unsigned int family);
|
extern int __init save_microcode_in_initrd_amd(unsigned int family);
|
||||||
void reload_ucode_amd(unsigned int cpu);
|
void reload_ucode_amd(unsigned int cpu);
|
||||||
#else
|
#else
|
||||||
static inline void __init load_ucode_amd_bsp(unsigned int family) {}
|
static inline void load_ucode_amd_early(unsigned int cpuid_1_eax) {}
|
||||||
static inline void load_ucode_amd_ap(unsigned int family) {}
|
|
||||||
static inline int __init
|
static inline int __init
|
||||||
save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
|
save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
|
||||||
static inline void reload_ucode_amd(unsigned int cpu) {}
|
static inline void reload_ucode_amd(unsigned int cpu) {}
|
||||||
|
|
|
@ -56,9 +56,6 @@ struct cont_desc {
|
||||||
|
|
||||||
static u32 ucode_new_rev;
|
static u32 ucode_new_rev;
|
||||||
|
|
||||||
/* One blob per node. */
|
|
||||||
static u8 amd_ucode_patch[MAX_NUMNODES][PATCH_MAX_SIZE];
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Microcode patch container file is prepended to the initrd in cpio
|
* Microcode patch container file is prepended to the initrd in cpio
|
||||||
* format. See Documentation/arch/x86/microcode.rst
|
* format. See Documentation/arch/x86/microcode.rst
|
||||||
|
@ -415,20 +412,17 @@ static int __apply_microcode_amd(struct microcode_amd *mc)
|
||||||
*
|
*
|
||||||
* Returns true if container found (sets @desc), false otherwise.
|
* Returns true if container found (sets @desc), false otherwise.
|
||||||
*/
|
*/
|
||||||
static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size, bool save_patch)
|
static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size)
|
||||||
{
|
{
|
||||||
struct cont_desc desc = { 0 };
|
struct cont_desc desc = { 0 };
|
||||||
u8 (*patch)[PATCH_MAX_SIZE];
|
|
||||||
struct microcode_amd *mc;
|
struct microcode_amd *mc;
|
||||||
u32 rev, dummy, *new_rev;
|
u32 rev, dummy, *new_rev;
|
||||||
bool ret = false;
|
bool ret = false;
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
|
new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
|
||||||
patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
|
|
||||||
#else
|
#else
|
||||||
new_rev = &ucode_new_rev;
|
new_rev = &ucode_new_rev;
|
||||||
patch = &amd_ucode_patch[0];
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
desc.cpuid_1_eax = cpuid_1_eax;
|
desc.cpuid_1_eax = cpuid_1_eax;
|
||||||
|
@ -452,9 +446,6 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size, boo
|
||||||
if (!__apply_microcode_amd(mc)) {
|
if (!__apply_microcode_amd(mc)) {
|
||||||
*new_rev = mc->hdr.patch_id;
|
*new_rev = mc->hdr.patch_id;
|
||||||
ret = true;
|
ret = true;
|
||||||
|
|
||||||
if (save_patch)
|
|
||||||
memcpy(patch, mc, min_t(u32, desc.psize, PATCH_MAX_SIZE));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -507,7 +498,7 @@ static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data
|
||||||
*ret = cp;
|
*ret = cp;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
|
static void apply_ucode_from_containers(unsigned int cpuid_1_eax)
|
||||||
{
|
{
|
||||||
struct cpio_data cp = { };
|
struct cpio_data cp = { };
|
||||||
|
|
||||||
|
@ -515,42 +506,12 @@ void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
|
||||||
if (!(cp.data && cp.size))
|
if (!(cp.data && cp.size))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
early_apply_microcode(cpuid_1_eax, cp.data, cp.size, true);
|
early_apply_microcode(cpuid_1_eax, cp.data, cp.size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void load_ucode_amd_ap(unsigned int cpuid_1_eax)
|
void load_ucode_amd_early(unsigned int cpuid_1_eax)
|
||||||
{
|
{
|
||||||
struct microcode_amd *mc;
|
return apply_ucode_from_containers(cpuid_1_eax);
|
||||||
struct cpio_data cp;
|
|
||||||
u32 *new_rev, rev, dummy;
|
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_X86_32)) {
|
|
||||||
mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
|
|
||||||
new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
|
|
||||||
} else {
|
|
||||||
mc = (struct microcode_amd *)amd_ucode_patch;
|
|
||||||
new_rev = &ucode_new_rev;
|
|
||||||
}
|
|
||||||
|
|
||||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check whether a new patch has been saved already. Also, allow application of
|
|
||||||
* the same revision in order to pick up SMT-thread-specific configuration even
|
|
||||||
* if the sibling SMT thread already has an up-to-date revision.
|
|
||||||
*/
|
|
||||||
if (*new_rev && rev <= mc->hdr.patch_id) {
|
|
||||||
if (!__apply_microcode_amd(mc)) {
|
|
||||||
*new_rev = mc->hdr.patch_id;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
find_blobs_in_containers(cpuid_1_eax, &cp);
|
|
||||||
if (!(cp.data && cp.size))
|
|
||||||
return;
|
|
||||||
|
|
||||||
early_apply_microcode(cpuid_1_eax, cp.data, cp.size, false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
|
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
|
||||||
|
@ -578,23 +539,6 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void reload_ucode_amd(unsigned int cpu)
|
|
||||||
{
|
|
||||||
u32 rev, dummy __always_unused;
|
|
||||||
struct microcode_amd *mc;
|
|
||||||
|
|
||||||
mc = (struct microcode_amd *)amd_ucode_patch[cpu_to_node(cpu)];
|
|
||||||
|
|
||||||
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
|
||||||
|
|
||||||
if (rev < mc->hdr.patch_id) {
|
|
||||||
if (!__apply_microcode_amd(mc)) {
|
|
||||||
ucode_new_rev = mc->hdr.patch_id;
|
|
||||||
pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* a small, trivial cache of per-family ucode patches
|
* a small, trivial cache of per-family ucode patches
|
||||||
*/
|
*/
|
||||||
|
@ -655,6 +599,28 @@ static struct ucode_patch *find_patch(unsigned int cpu)
|
||||||
return cache_find_patch(equiv_id);
|
return cache_find_patch(equiv_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void reload_ucode_amd(unsigned int cpu)
|
||||||
|
{
|
||||||
|
u32 rev, dummy __always_unused;
|
||||||
|
struct microcode_amd *mc;
|
||||||
|
struct ucode_patch *p;
|
||||||
|
|
||||||
|
p = find_patch(cpu);
|
||||||
|
if (!p)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mc = p->data;
|
||||||
|
|
||||||
|
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||||
|
|
||||||
|
if (rev < mc->hdr.patch_id) {
|
||||||
|
if (!__apply_microcode_amd(mc)) {
|
||||||
|
ucode_new_rev = mc->hdr.patch_id;
|
||||||
|
pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
|
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
|
||||||
{
|
{
|
||||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||||
|
@ -875,9 +841,6 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ret = UCODE_NEW;
|
ret = UCODE_NEW;
|
||||||
|
|
||||||
memset(&amd_ucode_patch[nid], 0, PATCH_MAX_SIZE);
|
|
||||||
memcpy(&amd_ucode_patch[nid], p->data, min_t(u32, p->size, PATCH_MAX_SIZE));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -172,7 +172,7 @@ void __init load_ucode_bsp(void)
|
||||||
if (intel)
|
if (intel)
|
||||||
load_ucode_intel_bsp();
|
load_ucode_intel_bsp();
|
||||||
else
|
else
|
||||||
load_ucode_amd_bsp(cpuid_1_eax);
|
load_ucode_amd_early(cpuid_1_eax);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool check_loader_disabled_ap(void)
|
static bool check_loader_disabled_ap(void)
|
||||||
|
@ -200,7 +200,7 @@ void load_ucode_ap(void)
|
||||||
break;
|
break;
|
||||||
case X86_VENDOR_AMD:
|
case X86_VENDOR_AMD:
|
||||||
if (x86_family(cpuid_1_eax) >= 0x10)
|
if (x86_family(cpuid_1_eax) >= 0x10)
|
||||||
load_ucode_amd_ap(cpuid_1_eax);
|
load_ucode_amd_early(cpuid_1_eax);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
|
Loading…
Add table
Reference in a new issue