mm: remove pte_*map_nested()
Since we no longer need to provide KM_type, the whole pte_*map_nested() API is now redundant, remove it. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Chris Metcalf <cmetcalf@tilera.com> Cc: David Howells <dhowells@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: David Miller <davem@davemloft.net> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
3e4d3af501
commit
ece0e2b640
31 changed files with 22 additions and 99 deletions
|
@ -318,9 +318,7 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
|
||||||
}
|
}
|
||||||
|
|
||||||
#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
|
#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
|
||||||
#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr))
|
|
||||||
#define pte_unmap(pte) do { } while (0)
|
#define pte_unmap(pte) do { } while (0)
|
||||||
#define pte_unmap_nested(pte) do { } while (0)
|
|
||||||
|
|
||||||
extern pgd_t swapper_pg_dir[1024];
|
extern pgd_t swapper_pg_dir[1024];
|
||||||
|
|
||||||
|
|
|
@ -263,17 +263,15 @@ extern struct page *empty_zero_page;
|
||||||
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
|
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
|
||||||
#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
|
#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
|
||||||
|
|
||||||
#define pte_offset_map(dir,addr) (__pte_map(dir, KM_PTE0) + __pte_index(addr))
|
#define pte_offset_map(dir,addr) (__pte_map(dir) + __pte_index(addr))
|
||||||
#define pte_offset_map_nested(dir,addr) (__pte_map(dir, KM_PTE1) + __pte_index(addr))
|
#define pte_unmap(pte) __pte_unmap(pte)
|
||||||
#define pte_unmap(pte) __pte_unmap(pte, KM_PTE0)
|
|
||||||
#define pte_unmap_nested(pte) __pte_unmap(pte, KM_PTE1)
|
|
||||||
|
|
||||||
#ifndef CONFIG_HIGHPTE
|
#ifndef CONFIG_HIGHPTE
|
||||||
#define __pte_map(dir,km) pmd_page_vaddr(*(dir))
|
#define __pte_map(dir) pmd_page_vaddr(*(dir))
|
||||||
#define __pte_unmap(pte,km) do { } while (0)
|
#define __pte_unmap(pte) do { } while (0)
|
||||||
#else
|
#else
|
||||||
#define __pte_map(dir,km) ((pte_t *)kmap_atomic(pmd_page(*(dir)), km) + PTRS_PER_PTE)
|
#define __pte_map(dir) ((pte_t *)kmap_atomic(pmd_page(*(dir))) + PTRS_PER_PTE)
|
||||||
#define __pte_unmap(pte,km) kunmap_atomic((pte - PTRS_PER_PTE), km)
|
#define __pte_unmap(pte) kunmap_atomic((pte - PTRS_PER_PTE))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
|
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
|
||||||
|
|
|
@ -89,13 +89,13 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
|
||||||
* open-code the spin-locking.
|
* open-code the spin-locking.
|
||||||
*/
|
*/
|
||||||
ptl = pte_lockptr(vma->vm_mm, pmd);
|
ptl = pte_lockptr(vma->vm_mm, pmd);
|
||||||
pte = pte_offset_map_nested(pmd, address);
|
pte = pte_offset_map(pmd, address);
|
||||||
spin_lock(ptl);
|
spin_lock(ptl);
|
||||||
|
|
||||||
ret = do_adjust_pte(vma, address, pfn, pte);
|
ret = do_adjust_pte(vma, address, pfn, pte);
|
||||||
|
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
pte_unmap_nested(pte);
|
pte_unmap(pte);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,9 +57,9 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
|
||||||
goto no_pte;
|
goto no_pte;
|
||||||
|
|
||||||
init_pmd = pmd_offset(init_pgd, 0);
|
init_pmd = pmd_offset(init_pgd, 0);
|
||||||
init_pte = pte_offset_map_nested(init_pmd, 0);
|
init_pte = pte_offset_map(init_pmd, 0);
|
||||||
set_pte_ext(new_pte, *init_pte, 0);
|
set_pte_ext(new_pte, *init_pte, 0);
|
||||||
pte_unmap_nested(init_pte);
|
pte_unmap(init_pte);
|
||||||
pte_unmap(new_pte);
|
pte_unmap(new_pte);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -319,9 +319,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||||
#define pte_offset_kernel(dir, address) \
|
#define pte_offset_kernel(dir, address) \
|
||||||
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
|
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
|
||||||
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
|
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
|
||||||
#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
|
|
||||||
#define pte_unmap(pte) do { } while (0)
|
#define pte_unmap(pte) do { } while (0)
|
||||||
#define pte_unmap_nested(pte) do { } while (0)
|
|
||||||
|
|
||||||
struct vm_area_struct;
|
struct vm_area_struct;
|
||||||
extern void update_mmu_cache(struct vm_area_struct * vma,
|
extern void update_mmu_cache(struct vm_area_struct * vma,
|
||||||
|
|
|
@ -248,10 +248,8 @@ static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long addre
|
||||||
((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
|
((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
|
||||||
#define pte_offset_map(dir, address) \
|
#define pte_offset_map(dir, address) \
|
||||||
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
|
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
|
||||||
#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
|
|
||||||
|
|
||||||
#define pte_unmap(pte) do { } while (0)
|
#define pte_unmap(pte) do { } while (0)
|
||||||
#define pte_unmap_nested(pte) do { } while (0)
|
|
||||||
#define pte_pfn(x) ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT)
|
#define pte_pfn(x) ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT)
|
||||||
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
|
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
|
||||||
|
|
||||||
|
|
|
@ -451,17 +451,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||||
|
|
||||||
#if defined(CONFIG_HIGHPTE)
|
#if defined(CONFIG_HIGHPTE)
|
||||||
#define pte_offset_map(dir, address) \
|
#define pte_offset_map(dir, address) \
|
||||||
((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
|
((pte_t *)kmap_atomic(pmd_page(*(dir))) + pte_index(address))
|
||||||
#define pte_offset_map_nested(dir, address) \
|
#define pte_unmap(pte) kunmap_atomic(pte)
|
||||||
((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
|
|
||||||
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
|
|
||||||
#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
|
|
||||||
#else
|
#else
|
||||||
#define pte_offset_map(dir, address) \
|
#define pte_offset_map(dir, address) \
|
||||||
((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
|
((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
|
||||||
#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
|
|
||||||
#define pte_unmap(pte) do { } while (0)
|
#define pte_unmap(pte) do { } while (0)
|
||||||
#define pte_unmap_nested(pte) do { } while (0)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -406,9 +406,7 @@ pgd_offset (const struct mm_struct *mm, unsigned long address)
|
||||||
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
||||||
#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
|
#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
|
||||||
#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
|
#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
|
||||||
#define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr)
|
|
||||||
#define pte_unmap(pte) do { } while (0)
|
#define pte_unmap(pte) do { } while (0)
|
||||||
#define pte_unmap_nested(pte) do { } while (0)
|
|
||||||
|
|
||||||
/* atomic versions of the some PTE manipulations: */
|
/* atomic versions of the some PTE manipulations: */
|
||||||
|
|
||||||
|
|
|
@ -332,9 +332,7 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
|
||||||
((pte_t *)pmd_page_vaddr(*(dir)) + pte_index(address))
|
((pte_t *)pmd_page_vaddr(*(dir)) + pte_index(address))
|
||||||
#define pte_offset_map(dir, address) \
|
#define pte_offset_map(dir, address) \
|
||||||
((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
|
((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
|
||||||
#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
|
|
||||||
#define pte_unmap(pte) do { } while (0)
|
#define pte_unmap(pte) do { } while (0)
|
||||||
#define pte_unmap_nested(pte) do { } while (0)
|
|
||||||
|
|
||||||
/* Encode and de-code a swap entry */
|
/* Encode and de-code a swap entry */
|
||||||
#define __swp_type(x) (((x).val >> 2) & 0x1f)
|
#define __swp_type(x) (((x).val >> 2) & 0x1f)
|
||||||
|
|
|
@ -221,9 +221,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address)
|
||||||
}
|
}
|
||||||
|
|
||||||
#define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
|
#define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
|
||||||
#define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address)
|
|
||||||
#define pte_unmap(pte) ((void)0)
|
#define pte_unmap(pte) ((void)0)
|
||||||
#define pte_unmap_nested(pte) ((void)0)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate and free page tables. The xxx_kernel() versions are
|
* Allocate and free page tables. The xxx_kernel() versions are
|
||||||
|
|
|
@ -219,9 +219,7 @@ static inline pte_t pgoff_to_pte(unsigned off)
|
||||||
#define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address))
|
#define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address))
|
||||||
/* FIXME: should we bother with kmap() here? */
|
/* FIXME: should we bother with kmap() here? */
|
||||||
#define pte_offset_map(pmd, address) ((pte_t *)kmap(pmd_page(*pmd)) + pte_index(address))
|
#define pte_offset_map(pmd, address) ((pte_t *)kmap(pmd_page(*pmd)) + pte_index(address))
|
||||||
#define pte_offset_map_nested(pmd, address) pte_offset_map(pmd, address)
|
|
||||||
#define pte_unmap(pte) kunmap(pte)
|
#define pte_unmap(pte) kunmap(pte)
|
||||||
#define pte_unmap_nested(pte) kunmap(pte)
|
|
||||||
|
|
||||||
/* Macros to (de)construct the fake PTEs representing swap pages. */
|
/* Macros to (de)construct the fake PTEs representing swap pages. */
|
||||||
#define __swp_type(x) ((x).val & 0x7F)
|
#define __swp_type(x) ((x).val & 0x7F)
|
||||||
|
|
|
@ -504,12 +504,9 @@ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
|
||||||
#define pte_offset_kernel(dir, addr) \
|
#define pte_offset_kernel(dir, addr) \
|
||||||
((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
|
((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
|
||||||
#define pte_offset_map(dir, addr) \
|
#define pte_offset_map(dir, addr) \
|
||||||
((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
|
((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
|
||||||
#define pte_offset_map_nested(dir, addr) \
|
|
||||||
((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
|
|
||||||
|
|
||||||
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
|
#define pte_unmap(pte) kunmap_atomic(pte)
|
||||||
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
|
|
||||||
|
|
||||||
/* Encode and decode a nonlinear file mapping entry */
|
/* Encode and decode a nonlinear file mapping entry */
|
||||||
#define PTE_FILE_MAX_BITS 29
|
#define PTE_FILE_MAX_BITS 29
|
||||||
|
|
|
@ -154,10 +154,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
|
||||||
|
|
||||||
#define pte_offset_map(dir, address) \
|
#define pte_offset_map(dir, address) \
|
||||||
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
|
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
|
||||||
#define pte_offset_map_nested(dir, address) \
|
|
||||||
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
|
|
||||||
#define pte_unmap(pte) ((void)(pte))
|
#define pte_unmap(pte) ((void)(pte))
|
||||||
#define pte_unmap_nested(pte) ((void)(pte))
|
|
||||||
|
|
||||||
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
|
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
|
||||||
|
|
||||||
|
|
|
@ -257,10 +257,7 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
|
||||||
((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
|
((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
|
||||||
#define pte_offset_map(dir, address) \
|
#define pte_offset_map(dir, address) \
|
||||||
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
|
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
|
||||||
#define pte_offset_map_nested(dir, address) \
|
|
||||||
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
|
|
||||||
#define pte_unmap(pte) ((void)(pte))
|
#define pte_unmap(pte) ((void)(pte))
|
||||||
#define pte_unmap_nested(pte) ((void)(pte))
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize a new pgd / pmd table with invalid pointers.
|
* Initialize a new pgd / pmd table with invalid pointers.
|
||||||
|
|
|
@ -457,9 +457,7 @@ static inline int set_kernel_exec(unsigned long vaddr, int enable)
|
||||||
|
|
||||||
#define pte_offset_map(dir, address) \
|
#define pte_offset_map(dir, address) \
|
||||||
((pte_t *) page_address(pmd_page(*(dir))) + pte_index(address))
|
((pte_t *) page_address(pmd_page(*(dir))) + pte_index(address))
|
||||||
#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
|
|
||||||
#define pte_unmap(pte) do {} while (0)
|
#define pte_unmap(pte) do {} while (0)
|
||||||
#define pte_unmap_nested(pte) do {} while (0)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The MN10300 has external MMU info in the form of a TLB: this is adapted from
|
* The MN10300 has external MMU info in the form of a TLB: this is adapted from
|
||||||
|
|
|
@ -397,9 +397,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||||
#define pte_offset_kernel(pmd, address) \
|
#define pte_offset_kernel(pmd, address) \
|
||||||
((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
|
((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
|
||||||
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
|
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
|
||||||
#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
|
|
||||||
#define pte_unmap(pte) do { } while (0)
|
#define pte_unmap(pte) do { } while (0)
|
||||||
#define pte_unmap_nested(pte) do { } while (0)
|
|
||||||
|
|
||||||
#define pte_unmap(pte) do { } while (0)
|
#define pte_unmap(pte) do { } while (0)
|
||||||
#define pte_unmap_nested(pte) do { } while (0)
|
#define pte_unmap_nested(pte) do { } while (0)
|
||||||
|
|
|
@ -308,12 +308,8 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
|
||||||
#define pte_offset_kernel(dir, addr) \
|
#define pte_offset_kernel(dir, addr) \
|
||||||
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
|
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
|
||||||
#define pte_offset_map(dir, addr) \
|
#define pte_offset_map(dir, addr) \
|
||||||
((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
|
((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
|
||||||
#define pte_offset_map_nested(dir, addr) \
|
#define pte_unmap(pte) kunmap_atomic(pte)
|
||||||
((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
|
|
||||||
|
|
||||||
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
|
|
||||||
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Encode and decode a swap entry.
|
* Encode and decode a swap entry.
|
||||||
|
|
|
@ -193,9 +193,7 @@
|
||||||
(((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
|
(((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
|
||||||
|
|
||||||
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
|
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
|
||||||
#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
|
|
||||||
#define pte_unmap(pte) do { } while(0)
|
#define pte_unmap(pte) do { } while(0)
|
||||||
#define pte_unmap_nested(pte) do { } while(0)
|
|
||||||
|
|
||||||
/* to find an entry in a kernel page-table-directory */
|
/* to find an entry in a kernel page-table-directory */
|
||||||
/* This now only contains the vmalloc pages */
|
/* This now only contains the vmalloc pages */
|
||||||
|
|
|
@ -1094,9 +1094,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
|
||||||
#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
|
#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
|
||||||
#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
|
#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
|
||||||
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
|
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
|
||||||
#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
|
|
||||||
#define pte_unmap(pte) do { } while (0)
|
#define pte_unmap(pte) do { } while (0)
|
||||||
#define pte_unmap_nested(pte) do { } while (0)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 31 bit swap entry format:
|
* 31 bit swap entry format:
|
||||||
|
|
|
@ -88,10 +88,7 @@ static inline void pmd_clear(pmd_t *pmdp)
|
||||||
|
|
||||||
#define pte_offset_map(dir, address) \
|
#define pte_offset_map(dir, address) \
|
||||||
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
|
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
|
||||||
#define pte_offset_map_nested(dir, address) \
|
|
||||||
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
|
|
||||||
#define pte_unmap(pte) ((void)(pte))
|
#define pte_unmap(pte) ((void)(pte))
|
||||||
#define pte_unmap_nested(pte) ((void)(pte))
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bits 9(_PAGE_PRESENT) and 10(_PAGE_FILE)are taken,
|
* Bits 9(_PAGE_PRESENT) and 10(_PAGE_FILE)are taken,
|
||||||
|
|
|
@ -429,10 +429,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||||
#define pte_offset_kernel(dir, address) \
|
#define pte_offset_kernel(dir, address) \
|
||||||
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
|
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
|
||||||
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
|
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
|
||||||
#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
|
|
||||||
|
|
||||||
#define pte_unmap(pte) do { } while (0)
|
#define pte_unmap(pte) do { } while (0)
|
||||||
#define pte_unmap_nested(pte) do { } while (0)
|
|
||||||
|
|
||||||
#ifdef CONFIG_X2TLB
|
#ifdef CONFIG_X2TLB
|
||||||
#define pte_ERROR(e) \
|
#define pte_ERROR(e) \
|
||||||
|
|
|
@ -84,9 +84,7 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
|
||||||
((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
|
((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
|
||||||
|
|
||||||
#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
|
#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
|
||||||
#define pte_offset_map_nested(dir,addr) pte_offset_kernel(dir, addr)
|
|
||||||
#define pte_unmap(pte) do { } while (0)
|
#define pte_unmap(pte) do { } while (0)
|
||||||
#define pte_unmap_nested(pte) do { } while (0)
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
#define IOBASE_VADDR 0xff000000
|
#define IOBASE_VADDR 0xff000000
|
||||||
|
|
|
@ -304,10 +304,7 @@ BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
|
||||||
* and sun4c is guaranteed to have no highmem anyway.
|
* and sun4c is guaranteed to have no highmem anyway.
|
||||||
*/
|
*/
|
||||||
#define pte_offset_map(d, a) pte_offset_kernel(d,a)
|
#define pte_offset_map(d, a) pte_offset_kernel(d,a)
|
||||||
#define pte_offset_map_nested(d, a) pte_offset_kernel(d,a)
|
|
||||||
|
|
||||||
#define pte_unmap(pte) do{}while(0)
|
#define pte_unmap(pte) do{}while(0)
|
||||||
#define pte_unmap_nested(pte) do{}while(0)
|
|
||||||
|
|
||||||
/* Certain architectures need to do special things when pte's
|
/* Certain architectures need to do special things when pte's
|
||||||
* within a page table are directly modified. Thus, the following
|
* within a page table are directly modified. Thus, the following
|
||||||
|
|
|
@ -652,9 +652,7 @@ static inline int pte_special(pte_t pte)
|
||||||
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
|
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
|
||||||
#define pte_offset_kernel pte_index
|
#define pte_offset_kernel pte_index
|
||||||
#define pte_offset_map pte_index
|
#define pte_offset_map pte_index
|
||||||
#define pte_offset_map_nested pte_index
|
|
||||||
#define pte_unmap(pte) do { } while (0)
|
#define pte_unmap(pte) do { } while (0)
|
||||||
#define pte_unmap_nested(pte) do { } while (0)
|
|
||||||
|
|
||||||
/* Actual page table PTE updates. */
|
/* Actual page table PTE updates. */
|
||||||
extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig);
|
extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig);
|
||||||
|
|
|
@ -347,15 +347,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||||
extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type);
|
extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type);
|
||||||
#define pte_offset_map(dir, address) \
|
#define pte_offset_map(dir, address) \
|
||||||
_pte_offset_map(dir, address, KM_PTE0)
|
_pte_offset_map(dir, address, KM_PTE0)
|
||||||
#define pte_offset_map_nested(dir, address) \
|
|
||||||
_pte_offset_map(dir, address, KM_PTE1)
|
|
||||||
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
|
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
|
||||||
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
|
|
||||||
#else
|
#else
|
||||||
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
|
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
|
||||||
#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
|
|
||||||
#define pte_unmap(pte) do { } while (0)
|
#define pte_unmap(pte) do { } while (0)
|
||||||
#define pte_unmap_nested(pte) do { } while (0)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Clear a non-executable kernel PTE and flush it from the TLB. */
|
/* Clear a non-executable kernel PTE and flush it from the TLB. */
|
||||||
|
|
|
@ -338,9 +338,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||||
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
|
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
|
||||||
#define pte_offset_map(dir, address) \
|
#define pte_offset_map(dir, address) \
|
||||||
((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
|
((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
|
||||||
#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
|
|
||||||
#define pte_unmap(pte) do { } while (0)
|
#define pte_unmap(pte) do { } while (0)
|
||||||
#define pte_unmap_nested(pte) do { } while (0)
|
|
||||||
|
|
||||||
struct mm_struct;
|
struct mm_struct;
|
||||||
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
|
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
|
||||||
|
|
|
@ -49,24 +49,14 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_HIGHPTE)
|
#if defined(CONFIG_HIGHPTE)
|
||||||
#define __KM_PTE \
|
|
||||||
(in_nmi() ? KM_NMI_PTE : \
|
|
||||||
in_irq() ? KM_IRQ_PTE : \
|
|
||||||
KM_PTE0)
|
|
||||||
#define pte_offset_map(dir, address) \
|
#define pte_offset_map(dir, address) \
|
||||||
((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \
|
((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
|
||||||
pte_index((address)))
|
pte_index((address)))
|
||||||
#define pte_offset_map_nested(dir, address) \
|
#define pte_unmap(pte) kunmap_atomic((pte))
|
||||||
((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \
|
|
||||||
pte_index((address)))
|
|
||||||
#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
|
|
||||||
#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
|
|
||||||
#else
|
#else
|
||||||
#define pte_offset_map(dir, address) \
|
#define pte_offset_map(dir, address) \
|
||||||
((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
|
((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
|
||||||
#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
|
|
||||||
#define pte_unmap(pte) do { } while (0)
|
#define pte_unmap(pte) do { } while (0)
|
||||||
#define pte_unmap_nested(pte) do { } while (0)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Clear a kernel PTE and flush it from the TLB */
|
/* Clear a kernel PTE and flush it from the TLB */
|
||||||
|
|
|
@ -127,9 +127,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
|
||||||
|
|
||||||
/* x86-64 always has all page tables mapped. */
|
/* x86-64 always has all page tables mapped. */
|
||||||
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
|
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
|
||||||
#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
|
|
||||||
#define pte_unmap(pte) ((void)(pte))/* NOP */
|
#define pte_unmap(pte) ((void)(pte))/* NOP */
|
||||||
#define pte_unmap_nested(pte) ((void)(pte)) /* NOP */
|
|
||||||
|
|
||||||
#define update_mmu_cache(vma, address, ptep) do { } while (0)
|
#define update_mmu_cache(vma, address, ptep) do { } while (0)
|
||||||
|
|
||||||
|
|
|
@ -324,10 +324,7 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||||
#define pte_offset_kernel(dir,addr) \
|
#define pte_offset_kernel(dir,addr) \
|
||||||
((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
|
((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
|
||||||
#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
|
#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
|
||||||
#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr))
|
|
||||||
|
|
||||||
#define pte_unmap(pte) do { } while (0)
|
#define pte_unmap(pte) do { } while (0)
|
||||||
#define pte_unmap_nested(pte) do { } while (0)
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -736,7 +736,7 @@ again:
|
||||||
dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
|
dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
|
||||||
if (!dst_pte)
|
if (!dst_pte)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
src_pte = pte_offset_map_nested(src_pmd, addr);
|
src_pte = pte_offset_map(src_pmd, addr);
|
||||||
src_ptl = pte_lockptr(src_mm, src_pmd);
|
src_ptl = pte_lockptr(src_mm, src_pmd);
|
||||||
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
|
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
|
||||||
orig_src_pte = src_pte;
|
orig_src_pte = src_pte;
|
||||||
|
@ -767,7 +767,7 @@ again:
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
arch_leave_lazy_mmu_mode();
|
||||||
spin_unlock(src_ptl);
|
spin_unlock(src_ptl);
|
||||||
pte_unmap_nested(orig_src_pte);
|
pte_unmap(orig_src_pte);
|
||||||
add_mm_rss_vec(dst_mm, rss);
|
add_mm_rss_vec(dst_mm, rss);
|
||||||
pte_unmap_unlock(orig_dst_pte, dst_ptl);
|
pte_unmap_unlock(orig_dst_pte, dst_ptl);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
|
@ -101,7 +101,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
||||||
* pte locks because exclusive mmap_sem prevents deadlock.
|
* pte locks because exclusive mmap_sem prevents deadlock.
|
||||||
*/
|
*/
|
||||||
old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
|
old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
|
||||||
new_pte = pte_offset_map_nested(new_pmd, new_addr);
|
new_pte = pte_offset_map(new_pmd, new_addr);
|
||||||
new_ptl = pte_lockptr(mm, new_pmd);
|
new_ptl = pte_lockptr(mm, new_pmd);
|
||||||
if (new_ptl != old_ptl)
|
if (new_ptl != old_ptl)
|
||||||
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
|
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
|
||||||
|
@ -119,7 +119,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
||||||
arch_leave_lazy_mmu_mode();
|
arch_leave_lazy_mmu_mode();
|
||||||
if (new_ptl != old_ptl)
|
if (new_ptl != old_ptl)
|
||||||
spin_unlock(new_ptl);
|
spin_unlock(new_ptl);
|
||||||
pte_unmap_nested(new_pte - 1);
|
pte_unmap(new_pte - 1);
|
||||||
pte_unmap_unlock(old_pte - 1, old_ptl);
|
pte_unmap_unlock(old_pte - 1, old_ptl);
|
||||||
if (mapping)
|
if (mapping)
|
||||||
spin_unlock(&mapping->i_mmap_lock);
|
spin_unlock(&mapping->i_mmap_lock);
|
||||||
|
|
Loading…
Add table
Reference in a new issue