1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/arch/arm/include/asm/mmu.h
Ard Biesheuvel d31e23aff0 ARM: mm: make vmalloc_seq handling SMP safe
Rework the vmalloc_seq handling so it can be used safely under SMP, as
we started using it to ensure that vmap'ed stacks are guaranteed to be
mapped by the active mm before switching to a task, and here we need to
ensure that changes to the page tables are visible to other CPUs when
they observe a change in the sequence count.

Since LPAE needs none of this, fold a check against it into the
vmalloc_seq counter check after breaking it out into a separate static
inline helper.

Given that vmap'ed stacks are now also supported on !SMP configurations,
let's drop the WARN() that could potentially now fire spuriously.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
2022-01-25 09:53:52 +01:00

49 lines
949 B
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ARM_MMU_H
#define __ARM_MMU_H
#ifdef CONFIG_MMU
typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
atomic64_t id;
#else
int switch_pending;
#endif
atomic_t vmalloc_seq;
unsigned long sigpage;
#ifdef CONFIG_VDSO
unsigned long vdso;
#endif
#ifdef CONFIG_BINFMT_ELF_FDPIC
unsigned long exec_fdpic_loadmap;
unsigned long interp_fdpic_loadmap;
#endif
} mm_context_t;
#ifdef CONFIG_CPU_HAS_ASID
#define ASID_BITS 8
#define ASID_MASK ((~0ULL) << ASID_BITS)
#define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
#else
#define ASID(mm) (0)
#endif
#else
/*
* From nommu.h:
* Copyright (C) 2002, David McCullough <davidm@snapgear.com>
* modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
*/
typedef struct {
unsigned long end_brk;
#ifdef CONFIG_BINFMT_ELF_FDPIC
unsigned long exec_fdpic_loadmap;
unsigned long interp_fdpic_loadmap;
#endif
} mm_context_t;
#endif
#endif