1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/arch/x86/kernel/cpu/mtrr/cyrix.c
Juergen Gross 0340906952 x86/mtrr: Replace vendor tests in MTRR code
Modern CPUs all share the same MTRR interface implemented via
generic_mtrr_ops.

At several places in MTRR code this generic interface is deduced via
is_cpu(INTEL) tests, which is only working due to X86_VENDOR_INTEL
being 0 (the is_cpu() macro is testing mtrr_if->vendor, which isn't
explicitly set in generic_mtrr_ops).

Test the generic CPU feature X86_FEATURE_MTRR instead.

The only other place where the .vendor member of struct mtrr_ops is
being used is in set_num_var_ranges(), where depending on the vendor
the number of MTRR registers is determined. This can easily be changed
by replacing .vendor with the static number of MTRR registers.

It should be noted that the test "is_cpu(HYGON)" wasn't ever returning
true, as there is no struct mtrr_ops with that vendor information.

[ bp: Use mtrr_enabled() before doing mtrr_if-> accesses, esp. in
  mtrr_trim_uncached_memory() which gets called independently from
  whether mtrr_if is set or not. ]

Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230502120931.20719-7-jgross@suse.com
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
2023-06-01 15:04:32 +02:00

244 lines
5 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <asm/processor-cyrix.h>
#include <asm/processor-flags.h>
#include <asm/mtrr.h>
#include <asm/msr.h>
#include "mtrr.h"
static void
cyrix_get_arr(unsigned int reg, unsigned long *base,
unsigned long *size, mtrr_type * type)
{
unsigned char arr, ccr3, rcr, shift;
unsigned long flags;
arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
local_irq_save(flags);
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
((unsigned char *)base)[3] = getCx86(arr);
((unsigned char *)base)[2] = getCx86(arr + 1);
((unsigned char *)base)[1] = getCx86(arr + 2);
rcr = getCx86(CX86_RCR_BASE + reg);
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
local_irq_restore(flags);
shift = ((unsigned char *) base)[1] & 0x0f;
*base >>= PAGE_SHIFT;
/*
* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
* Note: shift==0xf means 4G, this is unsupported.
*/
if (shift)
*size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
else
*size = 0;
/* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
if (reg < 7) {
switch (rcr) {
case 1:
*type = MTRR_TYPE_UNCACHABLE;
break;
case 8:
*type = MTRR_TYPE_WRBACK;
break;
case 9:
*type = MTRR_TYPE_WRCOMB;
break;
case 24:
default:
*type = MTRR_TYPE_WRTHROUGH;
break;
}
} else {
switch (rcr) {
case 0:
*type = MTRR_TYPE_UNCACHABLE;
break;
case 8:
*type = MTRR_TYPE_WRCOMB;
break;
case 9:
*type = MTRR_TYPE_WRBACK;
break;
case 25:
default:
*type = MTRR_TYPE_WRTHROUGH;
break;
}
}
}
/*
* cyrix_get_free_region - get a free ARR.
*
* @base: the starting (base) address of the region.
* @size: the size (in bytes) of the region.
*
* Returns: the index of the region on success, else -1 on error.
*/
static int
cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
{
unsigned long lbase, lsize;
mtrr_type ltype;
int i;
switch (replace_reg) {
case 7:
if (size < 0x40)
break;
fallthrough;
case 6:
case 5:
case 4:
return replace_reg;
case 3:
case 2:
case 1:
case 0:
return replace_reg;
}
/* If we are to set up a region >32M then look at ARR7 immediately */
if (size > 0x2000) {
cyrix_get_arr(7, &lbase, &lsize, &ltype);
if (lsize == 0)
return 7;
/* Else try ARR0-ARR6 first */
} else {
for (i = 0; i < 7; i++) {
cyrix_get_arr(i, &lbase, &lsize, &ltype);
if (lsize == 0)
return i;
}
/*
* ARR0-ARR6 isn't free
* try ARR7 but its size must be at least 256K
*/
cyrix_get_arr(i, &lbase, &lsize, &ltype);
if ((lsize == 0) && (size >= 0x40))
return i;
}
return -ENOSPC;
}
static u32 cr4, ccr3;
static void prepare_set(void)
{
u32 cr0;
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if (boot_cpu_has(X86_FEATURE_PGE)) {
cr4 = __read_cr4();
__write_cr4(cr4 & ~X86_CR4_PGE);
}
/*
* Disable and flush caches.
* Note that wbinvd flushes the TLBs as a side-effect
*/
cr0 = read_cr0() | X86_CR0_CD;
wbinvd();
write_cr0(cr0);
wbinvd();
/* Cyrix ARRs - everything else was excluded at the top */
ccr3 = getCx86(CX86_CCR3);
/* Cyrix ARRs - everything else was excluded at the top */
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
}
static void post_set(void)
{
/* Flush caches and TLBs */
wbinvd();
/* Cyrix ARRs - everything else was excluded at the top */
setCx86(CX86_CCR3, ccr3);
/* Enable caches */
write_cr0(read_cr0() & ~X86_CR0_CD);
/* Restore value of CR4 */
if (boot_cpu_has(X86_FEATURE_PGE))
__write_cr4(cr4);
}
static void cyrix_set_arr(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type)
{
unsigned char arr, arr_type, arr_size;
arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
/* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
if (reg >= 7)
size >>= 6;
size &= 0x7fff; /* make sure arr_size <= 14 */
for (arr_size = 0; size; arr_size++, size >>= 1)
;
if (reg < 7) {
switch (type) {
case MTRR_TYPE_UNCACHABLE:
arr_type = 1;
break;
case MTRR_TYPE_WRCOMB:
arr_type = 9;
break;
case MTRR_TYPE_WRTHROUGH:
arr_type = 24;
break;
default:
arr_type = 8;
break;
}
} else {
switch (type) {
case MTRR_TYPE_UNCACHABLE:
arr_type = 0;
break;
case MTRR_TYPE_WRCOMB:
arr_type = 8;
break;
case MTRR_TYPE_WRTHROUGH:
arr_type = 25;
break;
default:
arr_type = 9;
break;
}
}
prepare_set();
base <<= PAGE_SHIFT;
setCx86(arr + 0, ((unsigned char *)&base)[3]);
setCx86(arr + 1, ((unsigned char *)&base)[2]);
setCx86(arr + 2, (((unsigned char *)&base)[1]) | arr_size);
setCx86(CX86_RCR_BASE + reg, arr_type);
post_set();
}
const struct mtrr_ops cyrix_mtrr_ops = {
.var_regs = 8,
.set = cyrix_set_arr,
.get = cyrix_get_arr,
.get_free_region = cyrix_get_free_region,
.validate_add_page = generic_validate_add_page,
.have_wrcomb = positive_have_wrcomb,
};