This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-10-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: WANG Xuerui <kernel@xen0n.name> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Brian Cain <bcain@quicinc.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Christoph Hellwig <hch@infradead.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Jeff Dike <jdike@addtoit.com> Cc: Jonas Bonn <jonas@southpole.se> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Henderson <rth@twiddle.net> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Sam Ravnborg <sam@ravnborg.org> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vineet Gupta <vgupta@kernel.org> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
112 lines
3.7 KiB
C
112 lines
3.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
|
*/
|
|
#ifndef _ASM_PGTABLE_BITS_H
|
|
#define _ASM_PGTABLE_BITS_H
|
|
|
|
/* Page table bits */
|
|
#define _PAGE_VALID_SHIFT 0
|
|
#define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */
|
|
#define _PAGE_DIRTY_SHIFT 1
|
|
#define _PAGE_PLV_SHIFT 2 /* 2~3, two bits */
|
|
#define _CACHE_SHIFT 4 /* 4~5, two bits */
|
|
#define _PAGE_GLOBAL_SHIFT 6
|
|
#define _PAGE_HUGE_SHIFT 6 /* HUGE is a PMD bit */
|
|
#define _PAGE_PRESENT_SHIFT 7
|
|
#define _PAGE_WRITE_SHIFT 8
|
|
#define _PAGE_MODIFIED_SHIFT 9
|
|
#define _PAGE_PROTNONE_SHIFT 10
|
|
#define _PAGE_SPECIAL_SHIFT 11
|
|
#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */
|
|
#define _PAGE_PFN_SHIFT 12
|
|
#define _PAGE_PFN_END_SHIFT 48
|
|
#define _PAGE_NO_READ_SHIFT 61
|
|
#define _PAGE_NO_EXEC_SHIFT 62
|
|
#define _PAGE_RPLV_SHIFT 63
|
|
|
|
/* Used by software */
|
|
#define _PAGE_PRESENT (_ULCAST_(1) << _PAGE_PRESENT_SHIFT)
|
|
#define _PAGE_WRITE (_ULCAST_(1) << _PAGE_WRITE_SHIFT)
|
|
#define _PAGE_ACCESSED (_ULCAST_(1) << _PAGE_ACCESSED_SHIFT)
|
|
#define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT)
|
|
#define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT)
|
|
#define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT)
|
|
|
|
/* Used by TLB hardware (placed in EntryLo*) */
|
|
#define _PAGE_VALID (_ULCAST_(1) << _PAGE_VALID_SHIFT)
|
|
#define _PAGE_DIRTY (_ULCAST_(1) << _PAGE_DIRTY_SHIFT)
|
|
#define _PAGE_PLV (_ULCAST_(3) << _PAGE_PLV_SHIFT)
|
|
#define _PAGE_GLOBAL (_ULCAST_(1) << _PAGE_GLOBAL_SHIFT)
|
|
#define _PAGE_HUGE (_ULCAST_(1) << _PAGE_HUGE_SHIFT)
|
|
#define _PAGE_HGLOBAL (_ULCAST_(1) << _PAGE_HGLOBAL_SHIFT)
|
|
#define _PAGE_NO_READ (_ULCAST_(1) << _PAGE_NO_READ_SHIFT)
|
|
#define _PAGE_NO_EXEC (_ULCAST_(1) << _PAGE_NO_EXEC_SHIFT)
|
|
#define _PAGE_RPLV (_ULCAST_(1) << _PAGE_RPLV_SHIFT)
|
|
#define _CACHE_MASK (_ULCAST_(3) << _CACHE_SHIFT)
|
|
#define _PFN_SHIFT (PAGE_SHIFT - 12 + _PAGE_PFN_SHIFT)
|
|
|
|
#define _PAGE_USER (PLV_USER << _PAGE_PLV_SHIFT)
|
|
#define _PAGE_KERN (PLV_KERN << _PAGE_PLV_SHIFT)
|
|
|
|
#define _PFN_MASK (~((_ULCAST_(1) << (_PFN_SHIFT)) - 1) & \
|
|
((_ULCAST_(1) << (_PAGE_PFN_END_SHIFT)) - 1))
|
|
|
|
/*
|
|
* Cache attributes
|
|
*/
|
|
#ifndef _CACHE_SUC
|
|
#define _CACHE_SUC (0<<_CACHE_SHIFT) /* Strong-ordered UnCached */
|
|
#endif
|
|
#ifndef _CACHE_CC
|
|
#define _CACHE_CC (1<<_CACHE_SHIFT) /* Coherent Cached */
|
|
#endif
|
|
#ifndef _CACHE_WUC
|
|
#define _CACHE_WUC (2<<_CACHE_SHIFT) /* Weak-ordered UnCached */
|
|
#endif
|
|
|
|
#define __READABLE (_PAGE_VALID)
|
|
#define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE)
|
|
|
|
#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV)
|
|
#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE)
|
|
|
|
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \
|
|
_PAGE_USER | _CACHE_CC)
|
|
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
|
|
_PAGE_USER | _CACHE_CC)
|
|
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _CACHE_CC)
|
|
|
|
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
|
|
_PAGE_GLOBAL | _PAGE_KERN | _CACHE_CC)
|
|
#define PAGE_KERNEL_SUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
|
|
_PAGE_GLOBAL | _PAGE_KERN | _CACHE_SUC)
|
|
#define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
|
|
_PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC)
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#define pgprot_noncached pgprot_noncached
|
|
|
|
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
|
|
{
|
|
unsigned long prot = pgprot_val(_prot);
|
|
|
|
prot = (prot & ~_CACHE_MASK) | _CACHE_SUC;
|
|
|
|
return __pgprot(prot);
|
|
}
|
|
|
|
#define pgprot_writecombine pgprot_writecombine
|
|
|
|
static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
|
|
{
|
|
unsigned long prot = pgprot_val(_prot);
|
|
|
|
prot = (prot & ~_CACHE_MASK) | _CACHE_WUC;
|
|
|
|
return __pgprot(prot);
|
|
}
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_PGTABLE_BITS_H */
|