The pcpu setup when using the page allocator sets up a new vmalloc mapping very early in the boot process, so early that it cannot use the flush_cache_vmap() function which may depend on structures not yet initialized (for example in riscv, we currently send an IPI to flush other cpus TLB). But on some architectures, we must call flush_cache_vmap(): for example, in riscv, some uarchs can cache invalid TLB entries so we need to flush the new established mapping to avoid taking an exception. So fix this by introducing a new function flush_cache_vmap_early() which is called right after setting the new page table entry and before accessing this new mapping. This new function implements a local flush tlb on riscv and is no-op for other architectures (same as today). Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: Dennis Zhou <dennis@kernel.org>
61 lines
1.9 KiB
C
61 lines
1.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef __ABI_CSKY_CACHEFLUSH_H
|
|
#define __ABI_CSKY_CACHEFLUSH_H
|
|
|
|
/* Keep includes the same across arches. */
|
|
#include <linux/mm.h>
|
|
|
|
/*
|
|
* The cache doesn't need to be flushed when TLB entries change when
|
|
* the cache is mapped to physical memory, not virtual memory
|
|
*/
|
|
#define flush_cache_all() do { } while (0)
|
|
#define flush_cache_mm(mm) do { } while (0)
|
|
#define flush_cache_dup_mm(mm) do { } while (0)
|
|
#define flush_cache_range(vma, start, end) do { } while (0)
|
|
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
|
|
|
#define PG_dcache_clean PG_arch_1
|
|
|
|
static inline void flush_dcache_folio(struct folio *folio)
|
|
{
|
|
if (test_bit(PG_dcache_clean, &folio->flags))
|
|
clear_bit(PG_dcache_clean, &folio->flags);
|
|
}
|
|
#define flush_dcache_folio flush_dcache_folio
|
|
|
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
|
static inline void flush_dcache_page(struct page *page)
|
|
{
|
|
flush_dcache_folio(page_folio(page));
|
|
}
|
|
|
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
|
|
|
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
|
|
|
|
void flush_icache_mm_range(struct mm_struct *mm,
|
|
unsigned long start, unsigned long end);
|
|
void flush_icache_deferred(struct mm_struct *mm);
|
|
|
|
#define flush_cache_vmap(start, end) do { } while (0)
|
|
#define flush_cache_vmap_early(start, end) do { } while (0)
|
|
#define flush_cache_vunmap(start, end) do { } while (0)
|
|
|
|
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
|
do { \
|
|
memcpy(dst, src, len); \
|
|
if (vma->vm_flags & VM_EXEC) { \
|
|
dcache_wb_range((unsigned long)dst, \
|
|
(unsigned long)dst + len); \
|
|
flush_icache_mm_range(current->mm, \
|
|
(unsigned long)dst, \
|
|
(unsigned long)dst + len); \
|
|
} \
|
|
} while (0)
|
|
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
|
memcpy(dst, src, len)
|
|
|
|
#endif /* __ABI_CSKY_CACHEFLUSH_H */
|