1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

xtensa/mm/highmem: Switch to generic kmap atomic

No reason having the same code in every architecture

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Chris Zankel <chris@zankel.net>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Link: https://lore.kernel.org/r/20201103095858.311016780@linutronix.de
This commit is contained in:
Thomas Gleixner 2020-11-03 10:27:29 +01:00
parent 3293efa978
commit 629ed3f7da
4 changed files with 18 additions and 45 deletions

View file

@ -666,6 +666,7 @@ endchoice
config HIGHMEM config HIGHMEM
bool "High Memory Support" bool "High Memory Support"
depends on MMU depends on MMU
select KMAP_LOCAL
help help
Linux can use the full amount of RAM in the system by Linux can use the full amount of RAM in the system by
default. However, the default MMUv2 setup only maps the default. However, the default MMUv2 setup only maps the

View file

@ -16,7 +16,7 @@
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/kmap_types.h> #include <asm/kmap_size.h>
#endif #endif
/* /*
@ -39,7 +39,7 @@ enum fixed_addresses {
/* reserved pte's for temporary kernel mappings */ /* reserved pte's for temporary kernel mappings */
FIX_KMAP_BEGIN, FIX_KMAP_BEGIN,
FIX_KMAP_END = FIX_KMAP_BEGIN + FIX_KMAP_END = FIX_KMAP_BEGIN +
(KM_TYPE_NR * NR_CPUS * DCACHE_N_COLORS) - 1, (KM_MAX_IDX * NR_CPUS * DCACHE_N_COLORS) - 1,
#endif #endif
__end_of_fixed_addresses __end_of_fixed_addresses
}; };

View file

@ -16,7 +16,6 @@
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/kmap_types.h>
#define PKMAP_BASE ((FIXADDR_START - \ #define PKMAP_BASE ((FIXADDR_START - \
(LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK) (LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK)
@ -68,6 +67,15 @@ static inline void flush_cache_kmaps(void)
flush_cache_all(); flush_cache_all();
} }
enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn);
#define arch_kmap_local_map_idx kmap_local_map_idx
enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr);
#define arch_kmap_local_unmap_idx kmap_local_unmap_idx
#define arch_kmap_local_post_unmap(vaddr) \
local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
void kmap_init(void); void kmap_init(void);
#endif #endif

View file

@ -12,8 +12,6 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
static pte_t *kmap_pte;
#if DCACHE_WAY_SIZE > PAGE_SIZE #if DCACHE_WAY_SIZE > PAGE_SIZE
unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS]; unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS];
wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS]; wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS];
@ -33,59 +31,25 @@ static inline void kmap_waitqueues_init(void)
static inline enum fixed_addresses kmap_idx(int type, unsigned long color) static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
{ {
return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS + return (type + KM_MAX_IDX * smp_processor_id()) * DCACHE_N_COLORS +
color; color;
} }
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot) enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn)
{ {
enum fixed_addresses idx; return kmap_idx(type, DCACHE_ALIAS(pfn << PAGE_SHIFT));
unsigned long vaddr;
idx = kmap_idx(kmap_atomic_idx_push(),
DCACHE_ALIAS(page_to_phys(page)));
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte + idx)));
#endif
set_pte(kmap_pte + idx, mk_pte(page, prot));
return (void *)vaddr;
} }
EXPORT_SYMBOL(kmap_atomic_high_prot);
void kunmap_atomic_high(void *kvaddr) enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr)
{ {
if (kvaddr >= (void *)FIXADDR_START && return kmap_idx(type, DCACHE_ALIAS(addr));
kvaddr < (void *)FIXADDR_TOP) {
int idx = kmap_idx(kmap_atomic_idx(),
DCACHE_ALIAS((unsigned long)kvaddr));
/*
* Force other mappings to Oops if they'll try to access this
* pte without first remap it. Keeping stale mappings around
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
pte_clear(&init_mm, kvaddr, kmap_pte + idx);
local_flush_tlb_kernel_range((unsigned long)kvaddr,
(unsigned long)kvaddr + PAGE_SIZE);
kmap_atomic_idx_pop();
} }
}
EXPORT_SYMBOL(kunmap_atomic_high);
void __init kmap_init(void) void __init kmap_init(void)
{ {
unsigned long kmap_vstart;
/* Check if this memory layout is broken because PKMAP overlaps /* Check if this memory layout is broken because PKMAP overlaps
* page table. * page table.
*/ */
BUILD_BUG_ON(PKMAP_BASE < TLBTEMP_BASE_1 + TLBTEMP_SIZE); BUILD_BUG_ON(PKMAP_BASE < TLBTEMP_BASE_1 + TLBTEMP_SIZE);
/* cache the first kmap pte */
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
kmap_pte = virt_to_kpte(kmap_vstart);
kmap_waitqueues_init(); kmap_waitqueues_init();
} }