1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/arch/um/include/asm/pgtable.h
Linus Torvalds 831c1926ee This pull request contains the following changes for UML:
- Lots of cleanups, mostly from Benjamin Berg and Tiwei Bie
  - Removal of unused code
  - Fix for sparse warnings
  - Cleanup around stub_exe()
 -----BEGIN PGP SIGNATURE-----
 
 iQJKBAABCAA0FiEEdgfidid8lnn52cLTZvlZhesYu8EFAmdKP94WHHJpY2hhcmRA
 c2lnbWEtc3Rhci5hdAAKCRBm+VmF6xi7wbKIEADERLX94WvICyOd4zCO4bZroF3/
 PoNzn8xNS/XJgFZkGhEODvquaKxUkrWz/2EMF+/di+9JQvuKtrW4KjlZUv5XzSb7
 iig59W+sSevjK4HliXk7U6PSaOKYLL630Ar+Fk56x44yhD///iaV/xcsmT+HABQY
 CJ68MW1E3f9XqSHr8zvhyVwfmZOn/RdHSvcn3dwxDBTG8r/6hiUdd9n4LXO60+79
 B6PQp9VSMQjXZwA/cPqEuFP1FQJmBusEVs/d1wv4LPZVoKbOFYJDeAWs0iIkjX9z
 xsn8X7dLDza6+RAga4UBfvl6bcgrTgh+/VNzgxUmydbFoBaUGmkBSJhsSTMiioul
 bmLRPwEjQXAP3Sdrp/843SGWFJ69+RT4YVMt6zoYF5fDAX5KEc+RWNyAMqnVHtBH
 BqDV+dOGQ4dqocJs7iOmz3wl6dTDwnugm72ekMYOV/iYZ+ct3AU4cp93uLoTQDzF
 81VBkQOIXf1a4KUtC4TERP4NcfMB3NneGvaGnReVvXSs5/XIK51AYSGMLZOCyKLx
 h377d6X5zkvUg4XtgHT53UjEwASYXjqxVb/pG5IX4QurGOIpsLG5Mx/vq05F5cYd
 SVpaBRdyWdG/tou6vHrxoIEi2H3Beyb7+ScD+ieRWCNxrtWcosUXZjUAOaaoS3Wm
 Zwe6lJlnoz+uORSUSg==
 =ZW+3
 -----END PGP SIGNATURE-----

Merge tag 'uml-for-linus-6.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/uml/linux

Pull UML updates from Richard Weinberger:

 - Lots of cleanups, mostly from Benjamin Berg and Tiwei Bie

 - Removal of unused code

 - Fix for sparse warnings

 - Cleanup around stub_exe()

* tag 'uml-for-linus-6.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/uml/linux: (68 commits)
  hostfs: Fix the NULL vs IS_ERR() bug for __filemap_get_folio()
  um: move thread info into task
  um: Always dump trace for specified task in show_stack
  um: vector: Do not use drvdata in release
  um: net: Do not use drvdata in release
  um: ubd: Do not use drvdata in release
  um: ubd: Initialize ubd's disk pointer in ubd_add
  um: virtio_uml: query the number of vqs if supported
  um: virtio_uml: fix call_fd IRQ allocation
  um: virtio_uml: send SET_MEM_TABLE message with the exact size
  um: remove broken double fault detection
  um: remove duplicate UM_NSEC_PER_SEC definition
  um: remove file sync for stub data
  um: always include kconfig.h and compiler-version.h
  um: set DONTDUMP and DONTFORK flags on KASAN shadow memory
  um: fix sparse warnings in signal code
  um: fix sparse warnings from regset refactor
  um: Remove double zero check
  um: fix stub exe build with CONFIG_GCOV
  um: Use os_set_pdeathsig helper in winch thread/process
  ...
2024-11-30 10:34:54 -08:00

337 lines
9 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright 2003 PathScale, Inc.
* Derived from include/asm-i386/pgtable.h
*/
#ifndef __UM_PGTABLE_H
#define __UM_PGTABLE_H
#include <asm/fixmap.h>
#define _PAGE_PRESENT 0x001
#define _PAGE_NEEDSYNC 0x002
#define _PAGE_RW 0x020
#define _PAGE_USER 0x040
#define _PAGE_ACCESSED 0x080
#define _PAGE_DIRTY 0x100
/* If _PAGE_PRESENT is clear, we use these: */
#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
pte_present gives true */
/* We borrow bit 10 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE 0x400
#if CONFIG_PGTABLE_LEVELS == 4
#include <asm/pgtable-4level.h>
#elif CONFIG_PGTABLE_LEVELS == 2
#include <asm/pgtable-2level.h>
#else
#error "Unsupported number of page table levels"
#endif
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
/* zero page used for uninitialized stuff */
extern unsigned long *empty_zero_page;
/* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
* any out-of-bounds memory accesses will hopefully be caught.
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
extern unsigned long end_iomem;
#define VMALLOC_OFFSET (__va_space)
#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
#define MODULES_VADDR VMALLOC_START
#define MODULES_END VMALLOC_END
#define MODULES_LEN (MODULES_VADDR - MODULES_END)
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define __PAGE_KERNEL_EXEC \
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
/*
* The i386 can't do page protection for execute, and considers that the same
* are read.
* Also, write permissions imply read permissions. This is the closest we can
* get..
*/
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
#define pte_clear(mm, addr, xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEEDSYNC))
#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEEDSYNC))
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEEDSYNC; } while (0)
#define pmd_needsync(x) (pmd_val(x) & _PAGE_NEEDSYNC)
#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEEDSYNC)
#define pud_needsync(x) (pud_val(x) & _PAGE_NEEDSYNC)
#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEEDSYNC)
#define p4d_needsync(x) (p4d_val(x) & _PAGE_NEEDSYNC)
#define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEEDSYNC)
#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
/*
* =================================
* Flags checking section.
* =================================
*/
static inline int pte_none(pte_t pte)
{
return pte_is_zero(pte);
}
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
static inline int pte_read(pte_t pte)
{
return((pte_get_bits(pte, _PAGE_USER)) &&
!(pte_get_bits(pte, _PAGE_PROTNONE)));
}
static inline int pte_exec(pte_t pte){
return((pte_get_bits(pte, _PAGE_USER)) &&
!(pte_get_bits(pte, _PAGE_PROTNONE)));
}
static inline int pte_write(pte_t pte)
{
return((pte_get_bits(pte, _PAGE_RW)) &&
!(pte_get_bits(pte, _PAGE_PROTNONE)));
}
static inline int pte_dirty(pte_t pte)
{
return pte_get_bits(pte, _PAGE_DIRTY);
}
static inline int pte_young(pte_t pte)
{
return pte_get_bits(pte, _PAGE_ACCESSED);
}
static inline int pte_needsync(pte_t pte)
{
return pte_get_bits(pte, _PAGE_NEEDSYNC);
}
/*
* =================================
* Flags setting section.
* =================================
*/
static inline pte_t pte_mkclean(pte_t pte)
{
pte_clear_bits(pte, _PAGE_DIRTY);
return(pte);
}
static inline pte_t pte_mkold(pte_t pte)
{
pte_clear_bits(pte, _PAGE_ACCESSED);
return(pte);
}
static inline pte_t pte_wrprotect(pte_t pte)
{
pte_clear_bits(pte, _PAGE_RW);
return pte;
}
static inline pte_t pte_mkread(pte_t pte)
{
pte_set_bits(pte, _PAGE_USER);
return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
{
pte_set_bits(pte, _PAGE_DIRTY);
return(pte);
}
static inline pte_t pte_mkyoung(pte_t pte)
{
pte_set_bits(pte, _PAGE_ACCESSED);
return(pte);
}
static inline pte_t pte_mkwrite_novma(pte_t pte)
{
pte_set_bits(pte, _PAGE_RW);
return pte;
}
static inline pte_t pte_mkuptodate(pte_t pte)
{
pte_clear_bits(pte, _PAGE_NEEDSYNC);
return pte;
}
static inline pte_t pte_mkneedsync(pte_t pte)
{
pte_set_bits(pte, _PAGE_NEEDSYNC);
return(pte);
}
static inline void set_pte(pte_t *pteptr, pte_t pteval)
{
pte_copy(*pteptr, pteval);
/* If it's a swap entry, it needs to be marked _PAGE_NEEDSYNC so
* update_pte_range knows to unmap it.
*/
*pteptr = pte_mkneedsync(*pteptr);
}
#define PFN_PTE_SHIFT PAGE_SHIFT
static inline void um_tlb_mark_sync(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
if (!mm->context.sync_tlb_range_to) {
mm->context.sync_tlb_range_from = start;
mm->context.sync_tlb_range_to = end;
} else {
if (start < mm->context.sync_tlb_range_from)
mm->context.sync_tlb_range_from = start;
if (end > mm->context.sync_tlb_range_to)
mm->context.sync_tlb_range_to = end;
}
}
#define set_ptes set_ptes
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, int nr)
{
/* Basically the default implementation */
size_t length = nr * PAGE_SIZE;
for (;;) {
set_pte(ptep, pte);
if (--nr == 0)
break;
ptep++;
pte = __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
}
um_tlb_mark_sync(mm, addr, addr + length);
}
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEEDSYNC);
}
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
#define __virt_to_page(virt) phys_to_page(__pa(virt))
#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
#define mk_pte(page, pgprot) \
({ pte_t pte; \
\
pte_set_val(pte, page_to_phys(page), (pgprot)); \
pte;})
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
return pte;
}
/*
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
*
* this macro returns the index of the entry in the pmd page which would
* control the given virtual address
*/
#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
struct mm_struct;
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
#define update_mmu_cache(vma,address,ptep) do {} while (0)
#define update_mmu_cache_range(vmf, vma, address, ptep, nr) do {} while (0)
/*
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
* are !pte_none() && !pte_present().
*
* Format of swap PTEs:
*
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* <--------------- offset ----------------> E < type -> 0 0 0 1 0
*
* E is the exclusive marker that is not stored in swap entries.
* _PAGE_NEEDSYNC (bit 1) is always set to 1 in set_pte().
*/
#define __swp_type(x) (((x).val >> 5) & 0x1f)
#define __swp_offset(x) ((x).val >> 11)
#define __swp_entry(type, offset) \
((swp_entry_t) { (((type) & 0x1f) << 5) | ((offset) << 11) })
#define __pte_to_swp_entry(pte) \
((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
static inline int pte_swp_exclusive(pte_t pte)
{
return pte_get_bits(pte, _PAGE_SWP_EXCLUSIVE);
}
static inline pte_t pte_swp_mkexclusive(pte_t pte)
{
pte_set_bits(pte, _PAGE_SWP_EXCLUSIVE);
return pte;
}
static inline pte_t pte_swp_clear_exclusive(pte_t pte)
{
pte_clear_bits(pte, _PAGE_SWP_EXCLUSIVE);
return pte;
}
#endif