Let's add a helper that lets us batch-process multiple consecutive PTEs. Note that the loop will get optimized out on all architectures except on powerpc. We have to add an early define of __tlb_remove_tlb_entry() on ppc to make the compiler happy (and avoid making tlb_remove_tlb_entries() a macro). [arnd@kernel.org: change __tlb_remove_tlb_entry() to an inline function] Link: https://lkml.kernel.org/r/20240221154549.2026073-1-arnd@kernel.org Link: https://lkml.kernel.org/r/20240214204435.167852-8-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Cc: Yin Fengwei <fengwei.yin@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
93 lines
2.3 KiB
C
93 lines
2.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* TLB shootdown specifics for powerpc
|
|
*
|
|
* Copyright (C) 2002 Anton Blanchard, IBM Corp.
|
|
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
|
|
*/
|
|
#ifndef _ASM_POWERPC_TLB_H
|
|
#define _ASM_POWERPC_TLB_H
|
|
#ifdef __KERNEL__
|
|
|
|
#ifndef __powerpc64__
|
|
#include <linux/pgtable.h>
|
|
#endif
|
|
#ifndef __powerpc64__
|
|
#include <asm/page.h>
|
|
#include <asm/mmu.h>
|
|
#endif
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
|
|
unsigned long address);
|
|
#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry
|
|
|
|
#define tlb_flush tlb_flush
|
|
extern void tlb_flush(struct mmu_gather *tlb);
|
|
/*
|
|
* book3s:
|
|
* Hash does not use the linux page-tables, so we can avoid
|
|
* the TLB invalidate for page-table freeing, Radix otoh does use the
|
|
* page-tables and needs the TLBI.
|
|
*
|
|
* nohash:
|
|
* We still do TLB invalidate in the __pte_free_tlb routine before we
|
|
* add the page table pages to mmu gather table batch.
|
|
*/
|
|
#define tlb_needs_table_invalidate() radix_enabled()
|
|
|
|
/* Get the generic bits... */
|
|
#include <asm-generic/tlb.h>
|
|
|
|
static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
|
|
unsigned long address)
|
|
{
|
|
#ifdef CONFIG_PPC_BOOK3S_32
|
|
if (pte_val(*ptep) & _PAGE_HASHPTE)
|
|
flush_hash_entry(tlb->mm, ptep, address);
|
|
#endif
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
static inline int mm_is_core_local(struct mm_struct *mm)
|
|
{
|
|
return cpumask_subset(mm_cpumask(mm),
|
|
topology_sibling_cpumask(smp_processor_id()));
|
|
}
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
static inline int mm_is_thread_local(struct mm_struct *mm)
|
|
{
|
|
if (atomic_read(&mm->context.active_cpus) > 1)
|
|
return false;
|
|
return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
|
|
}
|
|
#else /* CONFIG_PPC_BOOK3S_64 */
|
|
static inline int mm_is_thread_local(struct mm_struct *mm)
|
|
{
|
|
return cpumask_equal(mm_cpumask(mm),
|
|
cpumask_of(smp_processor_id()));
|
|
}
|
|
#endif /* !CONFIG_PPC_BOOK3S_64 */
|
|
|
|
#else /* CONFIG_SMP */
|
|
static inline int mm_is_core_local(struct mm_struct *mm)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
static inline int mm_is_thread_local(struct mm_struct *mm)
|
|
{
|
|
return 1;
|
|
}
|
|
#endif
|
|
|
|
#define arch_supports_page_table_move arch_supports_page_table_move
|
|
static inline bool arch_supports_page_table_move(void)
|
|
{
|
|
return radix_enabled();
|
|
}
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* __ASM_POWERPC_TLB_H */
|