FEAT_LPA2 impacts tlb invalidation in 2 ways; Firstly, the TTL field in the non-range tlbi instructions can now validly take a 0 value as a level hint for the 4KB granule (this is due to the extra level of translation) - previously TTL=0b0100 meant no hint and was treated as 0b0000. Secondly, The BADDR field of the range-based tlbi instructions is specified in 64KB units when LPA2 is in use (TCR.DS=1), whereas it is in page units otherwise. Changes are required for tlbi to continue to operate correctly when LPA2 is in use. Solve the first problem by always adding the level hint if the level is between [0, 3] (previously anything other than 0 was hinted, which breaks in the new level -1 case from kvm). When running on non-LPA2 HW, 0 is still safe to hint as the HW will fall back to non-hinted. While we are at it, we replace the notion of 0 being the non-hinted sentinel with a macro, TLBI_TTL_UNKNOWN. This means callers won't need updating if/when translation depth increases in future. The second issue is more complex: When LPA2 is in use, use the non-range tlbi instructions to forward align to a 64KB boundary first, then use range-based tlbi from there on, until we have either invalidated all pages or we have a single page remaining. If the latter, that is done with non-range tlbi. We determine whether LPA2 is in use based on lpa2_is_enabled() (for kernel calls) or kvm_lpa2_is_enabled() (for kvm calls). Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Oliver Upton <oliver.upton@linux.dev> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20231127111737.1897081-4-ryan.roberts@arm.com
111 lines
2.6 KiB
C
111 lines
2.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Based on arch/arm/include/asm/tlb.h
|
|
*
|
|
* Copyright (C) 2002 Russell King
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*/
|
|
#ifndef __ASM_TLB_H
|
|
#define __ASM_TLB_H
|
|
|
|
#include <linux/pagemap.h>
|
|
#include <linux/swap.h>
|
|
|
|
static inline void __tlb_remove_table(void *_table)
|
|
{
|
|
free_page_and_swap_cache((struct page *)_table);
|
|
}
|
|
|
|
#define tlb_flush tlb_flush
|
|
static void tlb_flush(struct mmu_gather *tlb);
|
|
|
|
#include <asm-generic/tlb.h>
|
|
|
|
/*
|
|
* get the tlbi levels in arm64. Default value is TLBI_TTL_UNKNOWN if more than
|
|
* one of cleared_* is set or neither is set - this elides the level hinting to
|
|
* the hardware.
|
|
*/
|
|
static inline int tlb_get_level(struct mmu_gather *tlb)
|
|
{
|
|
/* The TTL field is only valid for the leaf entry. */
|
|
if (tlb->freed_tables)
|
|
return TLBI_TTL_UNKNOWN;
|
|
|
|
if (tlb->cleared_ptes && !(tlb->cleared_pmds ||
|
|
tlb->cleared_puds ||
|
|
tlb->cleared_p4ds))
|
|
return 3;
|
|
|
|
if (tlb->cleared_pmds && !(tlb->cleared_ptes ||
|
|
tlb->cleared_puds ||
|
|
tlb->cleared_p4ds))
|
|
return 2;
|
|
|
|
if (tlb->cleared_puds && !(tlb->cleared_ptes ||
|
|
tlb->cleared_pmds ||
|
|
tlb->cleared_p4ds))
|
|
return 1;
|
|
|
|
if (tlb->cleared_p4ds && !(tlb->cleared_ptes ||
|
|
tlb->cleared_pmds ||
|
|
tlb->cleared_puds))
|
|
return 0;
|
|
|
|
return TLBI_TTL_UNKNOWN;
|
|
}
|
|
|
|
static inline void tlb_flush(struct mmu_gather *tlb)
|
|
{
|
|
struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
|
|
bool last_level = !tlb->freed_tables;
|
|
unsigned long stride = tlb_get_unmap_size(tlb);
|
|
int tlb_level = tlb_get_level(tlb);
|
|
|
|
/*
|
|
* If we're tearing down the address space then we only care about
|
|
* invalidating the walk-cache, since the ASID allocator won't
|
|
* reallocate our ASID without invalidating the entire TLB.
|
|
*/
|
|
if (tlb->fullmm) {
|
|
if (!last_level)
|
|
flush_tlb_mm(tlb->mm);
|
|
return;
|
|
}
|
|
|
|
__flush_tlb_range(&vma, tlb->start, tlb->end, stride,
|
|
last_level, tlb_level);
|
|
}
|
|
|
|
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
|
|
unsigned long addr)
|
|
{
|
|
struct ptdesc *ptdesc = page_ptdesc(pte);
|
|
|
|
pagetable_pte_dtor(ptdesc);
|
|
tlb_remove_ptdesc(tlb, ptdesc);
|
|
}
|
|
|
|
#if CONFIG_PGTABLE_LEVELS > 2
|
|
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
|
|
unsigned long addr)
|
|
{
|
|
struct ptdesc *ptdesc = virt_to_ptdesc(pmdp);
|
|
|
|
pagetable_pmd_dtor(ptdesc);
|
|
tlb_remove_ptdesc(tlb, ptdesc);
|
|
}
|
|
#endif
|
|
|
|
#if CONFIG_PGTABLE_LEVELS > 3
|
|
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
|
|
unsigned long addr)
|
|
{
|
|
struct ptdesc *ptdesc = virt_to_ptdesc(pudp);
|
|
|
|
pagetable_pud_dtor(ptdesc);
|
|
tlb_remove_ptdesc(tlb, ptdesc);
|
|
}
|
|
#endif
|
|
|
|
#endif
|