arm64: memory: rename VA_START to PAGE_END
Prior to commit:
14c127c957
("arm64: mm: Flip kernel VA space")
... VA_START described the start of the TTBR1 address space for a given
VA size described by VA_BITS, where all kernel mappings began.
Since that commit, VA_START described a portion midway through the
address space, where the linear map ends and other kernel mappings
begin.
To avoid confusion, let's rename VA_START to PAGE_END, making it clear
that it's not the start of the TTBR1 address space and implying that
it's related to PAGE_OFFSET. Comments and other mnemonics are updated
accordingly, along with a typo fix in the decription of VMEMMAP_SIZE.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Steve Capper <steve.capper@arm.com>
Reviewed-by: Steve Capper <steve.capper@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
233947ef16
commit
77ad4ce693
6 changed files with 19 additions and 19 deletions
|
@ -28,20 +28,20 @@
|
||||||
* a struct page array
|
* a struct page array
|
||||||
*
|
*
|
||||||
* If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE
|
* If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE
|
||||||
* neads to cover the memory region from the beginning of the 52-bit
|
* needs to cover the memory region from the beginning of the 52-bit
|
||||||
* PAGE_OFFSET all the way to VA_START for 48-bit. This allows us to
|
* PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to
|
||||||
* keep a constant PAGE_OFFSET and "fallback" to using the higher end
|
* keep a constant PAGE_OFFSET and "fallback" to using the higher end
|
||||||
* of the VMEMMAP where 52-bit support is not available in hardware.
|
* of the VMEMMAP where 52-bit support is not available in hardware.
|
||||||
*/
|
*/
|
||||||
#define VMEMMAP_SIZE ((_VA_START(VA_BITS_MIN) - PAGE_OFFSET) \
|
#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) \
|
||||||
>> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT))
|
>> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PAGE_OFFSET - the virtual address of the start of the linear map (top
|
* PAGE_OFFSET - the virtual address of the start of the linear map, at the
|
||||||
* (VA_BITS - 1))
|
* start of the TTBR1 address space.
|
||||||
* KIMAGE_VADDR - the virtual address of the start of the kernel image
|
* PAGE_END - the end of the linear map, where all other kernel mappings begin.
|
||||||
|
* KIMAGE_VADDR - the virtual address of the start of the kernel image.
|
||||||
* VA_BITS - the maximum number of bits for virtual addresses.
|
* VA_BITS - the maximum number of bits for virtual addresses.
|
||||||
* VA_START - the first kernel virtual address.
|
|
||||||
*/
|
*/
|
||||||
#define VA_BITS (CONFIG_ARM64_VA_BITS)
|
#define VA_BITS (CONFIG_ARM64_VA_BITS)
|
||||||
#define _PAGE_OFFSET(va) (-(UL(1) << (va)))
|
#define _PAGE_OFFSET(va) (-(UL(1) << (va)))
|
||||||
|
@ -64,7 +64,7 @@
|
||||||
#define VA_BITS_MIN (VA_BITS)
|
#define VA_BITS_MIN (VA_BITS)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define _VA_START(va) (-(UL(1) << ((va) - 1)))
|
#define _PAGE_END(va) (-(UL(1) << ((va) - 1)))
|
||||||
|
|
||||||
#define KERNEL_START _text
|
#define KERNEL_START _text
|
||||||
#define KERNEL_END _end
|
#define KERNEL_END _end
|
||||||
|
@ -87,7 +87,7 @@
|
||||||
#define KASAN_THREAD_SHIFT 1
|
#define KASAN_THREAD_SHIFT 1
|
||||||
#else
|
#else
|
||||||
#define KASAN_THREAD_SHIFT 0
|
#define KASAN_THREAD_SHIFT 0
|
||||||
#define KASAN_SHADOW_END (_VA_START(VA_BITS_MIN))
|
#define KASAN_SHADOW_END (_PAGE_END(VA_BITS_MIN))
|
||||||
#endif /* CONFIG_KASAN */
|
#endif /* CONFIG_KASAN */
|
||||||
|
|
||||||
#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
|
#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
|
||||||
|
@ -173,7 +173,7 @@
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
extern u64 vabits_actual;
|
extern u64 vabits_actual;
|
||||||
#define VA_START (_VA_START(vabits_actual))
|
#define PAGE_END (_PAGE_END(vabits_actual))
|
||||||
|
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/mmdebug.h>
|
#include <linux/mmdebug.h>
|
||||||
|
|
|
@ -856,8 +856,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
|
||||||
|
|
||||||
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
|
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
|
||||||
|
|
||||||
#define kc_vaddr_to_offset(v) ((v) & ~VA_START)
|
#define kc_vaddr_to_offset(v) ((v) & ~PAGE_END)
|
||||||
#define kc_offset_to_vaddr(o) ((o) | VA_START)
|
#define kc_offset_to_vaddr(o) ((o) | PAGE_END)
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||||
#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
|
#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
|
||||||
|
|
|
@ -496,7 +496,7 @@ int swsusp_arch_resume(void)
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, VA_START);
|
rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, PAGE_END);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
|
|
||||||
enum address_markers_idx {
|
enum address_markers_idx {
|
||||||
PAGE_OFFSET_NR = 0,
|
PAGE_OFFSET_NR = 0,
|
||||||
VA_START_NR,
|
PAGE_END_NR,
|
||||||
#ifdef CONFIG_KASAN
|
#ifdef CONFIG_KASAN
|
||||||
KASAN_START_NR,
|
KASAN_START_NR,
|
||||||
#endif
|
#endif
|
||||||
|
@ -36,7 +36,7 @@ enum address_markers_idx {
|
||||||
|
|
||||||
static struct addr_marker address_markers[] = {
|
static struct addr_marker address_markers[] = {
|
||||||
{ PAGE_OFFSET, "Linear Mapping start" },
|
{ PAGE_OFFSET, "Linear Mapping start" },
|
||||||
{ 0 /* VA_START */, "Linear Mapping end" },
|
{ 0 /* PAGE_END */, "Linear Mapping end" },
|
||||||
#ifdef CONFIG_KASAN
|
#ifdef CONFIG_KASAN
|
||||||
{ 0 /* KASAN_SHADOW_START */, "Kasan shadow start" },
|
{ 0 /* KASAN_SHADOW_START */, "Kasan shadow start" },
|
||||||
{ KASAN_SHADOW_END, "Kasan shadow end" },
|
{ KASAN_SHADOW_END, "Kasan shadow end" },
|
||||||
|
@ -411,7 +411,7 @@ void ptdump_check_wx(void)
|
||||||
|
|
||||||
static int ptdump_init(void)
|
static int ptdump_init(void)
|
||||||
{
|
{
|
||||||
address_markers[VA_START_NR].start_address = VA_START;
|
address_markers[PAGE_END_NR].start_address = PAGE_END;
|
||||||
#ifdef CONFIG_KASAN
|
#ifdef CONFIG_KASAN
|
||||||
address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START;
|
address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -226,7 +226,7 @@ void __init kasan_init(void)
|
||||||
kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
|
kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
|
||||||
early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
|
early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
|
||||||
|
|
||||||
kasan_populate_early_shadow(kasan_mem_to_shadow((void *) VA_START),
|
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
|
||||||
(void *)mod_shadow_start);
|
(void *)mod_shadow_start);
|
||||||
kasan_populate_early_shadow((void *)kimg_shadow_end,
|
kasan_populate_early_shadow((void *)kimg_shadow_end,
|
||||||
(void *)KASAN_SHADOW_END);
|
(void *)KASAN_SHADOW_END);
|
||||||
|
|
|
@ -399,7 +399,7 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
|
||||||
static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
|
static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
|
||||||
phys_addr_t size, pgprot_t prot)
|
phys_addr_t size, pgprot_t prot)
|
||||||
{
|
{
|
||||||
if ((virt >= VA_START) && (virt < VMALLOC_START)) {
|
if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
|
||||||
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
|
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
|
||||||
&phys, virt);
|
&phys, virt);
|
||||||
return;
|
return;
|
||||||
|
@ -426,7 +426,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
|
||||||
static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
|
static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
|
||||||
phys_addr_t size, pgprot_t prot)
|
phys_addr_t size, pgprot_t prot)
|
||||||
{
|
{
|
||||||
if ((virt >= VA_START) && (virt < VMALLOC_START)) {
|
if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
|
||||||
pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
|
pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
|
||||||
&phys, virt);
|
&phys, virt);
|
||||||
return;
|
return;
|
||||||
|
|
Loading…
Add table
Reference in a new issue