x86/mm: Rework lazy TLB to track the actual loaded mm
Lazy TLB state is currently managed in a rather baroque manner. AFAICT, there are three possible states: - Non-lazy. This means that we're running a user thread or a kernel thread that has called use_mm(). current->mm == current->active_mm == cpu_tlbstate.active_mm and cpu_tlbstate.state == TLBSTATE_OK. - Lazy with user mm. We're running a kernel thread without an mm and we're borrowing an mm_struct. We have current->mm == NULL, current->active_mm == cpu_tlbstate.active_mm, cpu_tlbstate.state != TLBSTATE_OK (i.e. TLBSTATE_LAZY or 0). The current cpu is set in mm_cpumask(current->active_mm). CR3 points to current->active_mm->pgd. The TLB is up to date. - Lazy with init_mm. This happens when we call leave_mm(). We have current->mm == NULL, current->active_mm == cpu_tlbstate.active_mm, but that mm is only relelvant insofar as the scheduler is tracking it for refcounting. cpu_tlbstate.state != TLBSTATE_OK. The current cpu is clear in mm_cpumask(current->active_mm). CR3 points to swapper_pg_dir, i.e. init_mm->pgd. This patch simplifies the situation. Other than perf, x86 stops caring about current->active_mm at all. We have cpu_tlbstate.loaded_mm pointing to the mm that CR3 references. The TLB is always up to date for that mm. leave_mm() just switches us to init_mm. There are no longer any special cases for mm_cpumask, and switch_mm() switches mms without worrying about laziness. After this patch, cpu_tlbstate.state serves only to tell the TLB flush code whether it may switch to init_mm instead of doing a normal flush. This makes fairly extensive changes to xen_exit_mmap(), which used to look a bit like black magic. Perf is unchanged. With or without this change, perf may behave a bit erratically if it tries to read user memory in kernel thread context. We should build on this patch to teach perf to never look at user memory when cpu_tlbstate.loaded_mm != current->mm. Signed-off-by: Andy Lutomirski <luto@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Borislav Petkov <bpetkov@suse.de> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@suse.com> Cc: Nadav Amit <nadav.amit@gmail.com> Cc: Nadav Amit <namit@vmware.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
ce4a4e565f
commit
3d28ebceaf
6 changed files with 148 additions and 145 deletions
|
@ -2101,8 +2101,7 @@ static int x86_pmu_event_init(struct perf_event *event)
|
||||||
|
|
||||||
static void refresh_pce(void *ignored)
|
static void refresh_pce(void *ignored)
|
||||||
{
|
{
|
||||||
if (current->active_mm)
|
load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
|
||||||
load_mm_cr4(current->active_mm);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void x86_pmu_event_mapped(struct perf_event *event)
|
static void x86_pmu_event_mapped(struct perf_event *event)
|
||||||
|
|
|
@ -66,7 +66,13 @@ static inline void invpcid_flush_all_nonglobals(void)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct tlb_state {
|
struct tlb_state {
|
||||||
struct mm_struct *active_mm;
|
/*
|
||||||
|
* cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
|
||||||
|
* are on. This means that it may not match current->active_mm,
|
||||||
|
* which will contain the previous user mm when we're in lazy TLB
|
||||||
|
* mode even if we've already switched back to swapper_pg_dir.
|
||||||
|
*/
|
||||||
|
struct mm_struct *loaded_mm;
|
||||||
int state;
|
int state;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -256,7 +262,9 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
|
||||||
static inline void reset_lazy_tlbstate(void)
|
static inline void reset_lazy_tlbstate(void)
|
||||||
{
|
{
|
||||||
this_cpu_write(cpu_tlbstate.state, 0);
|
this_cpu_write(cpu_tlbstate.state, 0);
|
||||||
this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
|
this_cpu_write(cpu_tlbstate.loaded_mm, &init_mm);
|
||||||
|
|
||||||
|
WARN_ON(read_cr3() != __pa_symbol(swapper_pg_dir));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
|
static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
|
||||||
|
|
|
@ -22,14 +22,15 @@
|
||||||
#include <asm/syscalls.h>
|
#include <asm/syscalls.h>
|
||||||
|
|
||||||
/* context.lock is held for us, so we don't need any locking. */
|
/* context.lock is held for us, so we don't need any locking. */
|
||||||
static void flush_ldt(void *current_mm)
|
static void flush_ldt(void *__mm)
|
||||||
{
|
{
|
||||||
|
struct mm_struct *mm = __mm;
|
||||||
mm_context_t *pc;
|
mm_context_t *pc;
|
||||||
|
|
||||||
if (current->active_mm != current_mm)
|
if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pc = ¤t->active_mm->context;
|
pc = &mm->context;
|
||||||
set_ldt(pc->ldt->entries, pc->ldt->size);
|
set_ldt(pc->ldt->entries, pc->ldt->size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -811,7 +811,7 @@ void __init zone_sizes_init(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
|
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
|
||||||
.active_mm = &init_mm,
|
.loaded_mm = &init_mm,
|
||||||
.state = 0,
|
.state = 0,
|
||||||
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
|
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
|
||||||
};
|
};
|
||||||
|
|
|
@ -28,26 +28,25 @@
|
||||||
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
|
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
|
||||||
* We cannot call mmdrop() because we are in interrupt context,
|
|
||||||
* instead update mm->cpu_vm_mask.
|
|
||||||
*/
|
|
||||||
void leave_mm(int cpu)
|
void leave_mm(int cpu)
|
||||||
{
|
{
|
||||||
struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
|
struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It's plausible that we're in lazy TLB mode while our mm is init_mm.
|
||||||
|
* If so, our callers still expect us to flush the TLB, but there
|
||||||
|
* aren't any user TLB entries in init_mm to worry about.
|
||||||
|
*
|
||||||
|
* This needs to happen before any other sanity checks due to
|
||||||
|
* intel_idle's shenanigans.
|
||||||
|
*/
|
||||||
|
if (loaded_mm == &init_mm)
|
||||||
|
return;
|
||||||
|
|
||||||
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
|
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
|
||||||
BUG();
|
BUG();
|
||||||
if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
|
|
||||||
cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
|
switch_mm(NULL, &init_mm, NULL);
|
||||||
load_cr3(swapper_pg_dir);
|
|
||||||
/*
|
|
||||||
* This gets called in the idle path where RCU
|
|
||||||
* functions differently. Tracing normally
|
|
||||||
* uses RCU, so we have to call the tracepoint
|
|
||||||
* specially here.
|
|
||||||
*/
|
|
||||||
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(leave_mm);
|
EXPORT_SYMBOL_GPL(leave_mm);
|
||||||
|
|
||||||
|
@ -65,8 +64,30 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||||
struct task_struct *tsk)
|
struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
unsigned cpu = smp_processor_id();
|
unsigned cpu = smp_processor_id();
|
||||||
|
struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* NB: The scheduler will call us with prev == next when
|
||||||
|
* switching from lazy TLB mode to normal mode if active_mm
|
||||||
|
* isn't changing. When this happens, there is no guarantee
|
||||||
|
* that CR3 (and hence cpu_tlbstate.loaded_mm) matches next.
|
||||||
|
*
|
||||||
|
* NB: leave_mm() calls us with prev == NULL and tsk == NULL.
|
||||||
|
*/
|
||||||
|
|
||||||
|
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
||||||
|
|
||||||
|
if (real_prev == next) {
|
||||||
|
/*
|
||||||
|
* There's nothing to do: we always keep the per-mm control
|
||||||
|
* regs in sync with cpu_tlbstate.loaded_mm. Just
|
||||||
|
* sanity-check mm_cpumask.
|
||||||
|
*/
|
||||||
|
if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(next))))
|
||||||
|
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (likely(prev != next)) {
|
|
||||||
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
|
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
|
||||||
/*
|
/*
|
||||||
* If our current stack is in vmalloc space and isn't
|
* If our current stack is in vmalloc space and isn't
|
||||||
|
@ -81,9 +102,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||||
set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
|
set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
|
||||||
}
|
}
|
||||||
|
|
||||||
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
this_cpu_write(cpu_tlbstate.loaded_mm, next);
|
||||||
this_cpu_write(cpu_tlbstate.active_mm, next);
|
|
||||||
|
|
||||||
|
WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
|
||||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -112,14 +133,20 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||||
* and neither LOCK nor MFENCE orders them.
|
* and neither LOCK nor MFENCE orders them.
|
||||||
* Fortunately, load_cr3() is serializing and gives the
|
* Fortunately, load_cr3() is serializing and gives the
|
||||||
* ordering guarantee we need.
|
* ordering guarantee we need.
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
load_cr3(next->pgd);
|
load_cr3(next->pgd);
|
||||||
|
|
||||||
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
/*
|
||||||
|
* This gets called via leave_mm() in the idle path where RCU
|
||||||
|
* functions differently. Tracing normally uses RCU, so we have to
|
||||||
|
* call the tracepoint specially here.
|
||||||
|
*/
|
||||||
|
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
||||||
|
|
||||||
/* Stop flush ipis for the previous mm */
|
/* Stop flush ipis for the previous mm */
|
||||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
|
||||||
|
real_prev != &init_mm);
|
||||||
|
cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
|
||||||
|
|
||||||
/* Load per-mm CR4 state */
|
/* Load per-mm CR4 state */
|
||||||
load_mm_cr4(next);
|
load_mm_cr4(next);
|
||||||
|
@ -137,36 +164,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||||
* exists. That means that next->context.ldt !=
|
* exists. That means that next->context.ldt !=
|
||||||
* prev->context.ldt, because mms never share an LDT.
|
* prev->context.ldt, because mms never share an LDT.
|
||||||
*/
|
*/
|
||||||
if (unlikely(prev->context.ldt != next->context.ldt))
|
if (unlikely(real_prev->context.ldt != next->context.ldt))
|
||||||
load_mm_ldt(next);
|
load_mm_ldt(next);
|
||||||
#endif
|
#endif
|
||||||
} else {
|
|
||||||
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
|
||||||
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
|
|
||||||
|
|
||||||
if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
|
|
||||||
/*
|
|
||||||
* On established mms, the mm_cpumask is only changed
|
|
||||||
* from irq context, from ptep_clear_flush() while in
|
|
||||||
* lazy tlb mode, and here. Irqs are blocked during
|
|
||||||
* schedule, protecting us from simultaneous changes.
|
|
||||||
*/
|
|
||||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We were in lazy tlb mode and leave_mm disabled
|
|
||||||
* tlb flush IPI delivery. We must reload CR3
|
|
||||||
* to make sure to use no freed page tables.
|
|
||||||
*
|
|
||||||
* As above, load_cr3() is serializing and orders TLB
|
|
||||||
* fills with respect to the mm_cpumask write.
|
|
||||||
*/
|
|
||||||
load_cr3(next->pgd);
|
|
||||||
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
|
||||||
load_mm_cr4(next);
|
|
||||||
load_mm_ldt(next);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -246,7 +246,7 @@ static void flush_tlb_func_remote(void *info)
|
||||||
|
|
||||||
inc_irq_stat(irq_tlb_count);
|
inc_irq_stat(irq_tlb_count);
|
||||||
|
|
||||||
if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.active_mm))
|
if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
|
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
|
||||||
|
@ -314,7 +314,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
||||||
info.end = TLB_FLUSH_ALL;
|
info.end = TLB_FLUSH_ALL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mm == current->active_mm)
|
if (mm == this_cpu_read(cpu_tlbstate.loaded_mm))
|
||||||
flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
|
flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
|
||||||
if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
|
if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
|
||||||
flush_tlb_others(mm_cpumask(mm), &info);
|
flush_tlb_others(mm_cpumask(mm), &info);
|
||||||
|
|
|
@ -975,37 +975,32 @@ static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
|
||||||
spin_unlock(&mm->page_table_lock);
|
spin_unlock(&mm->page_table_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void drop_mm_ref_this_cpu(void *info)
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/* Another cpu may still have their %cr3 pointing at the pagetable, so
|
|
||||||
we need to repoint it somewhere else before we can unpin it. */
|
|
||||||
static void drop_other_mm_ref(void *info)
|
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = info;
|
struct mm_struct *mm = info;
|
||||||
struct mm_struct *active_mm;
|
|
||||||
|
|
||||||
active_mm = this_cpu_read(cpu_tlbstate.active_mm);
|
if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm)
|
||||||
|
|
||||||
if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
|
|
||||||
leave_mm(smp_processor_id());
|
leave_mm(smp_processor_id());
|
||||||
|
|
||||||
/* If this cpu still has a stale cr3 reference, then make sure
|
/*
|
||||||
it has been flushed. */
|
* If this cpu still has a stale cr3 reference, then make sure
|
||||||
|
* it has been flushed.
|
||||||
|
*/
|
||||||
if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
|
if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
|
||||||
load_cr3(swapper_pg_dir);
|
xen_mc_flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/*
|
||||||
|
* Another cpu may still have their %cr3 pointing at the pagetable, so
|
||||||
|
* we need to repoint it somewhere else before we can unpin it.
|
||||||
|
*/
|
||||||
static void xen_drop_mm_ref(struct mm_struct *mm)
|
static void xen_drop_mm_ref(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
cpumask_var_t mask;
|
cpumask_var_t mask;
|
||||||
unsigned cpu;
|
unsigned cpu;
|
||||||
|
|
||||||
if (current->active_mm == mm) {
|
drop_mm_ref_this_cpu(mm);
|
||||||
if (current->mm == mm)
|
|
||||||
load_cr3(swapper_pg_dir);
|
|
||||||
else
|
|
||||||
leave_mm(smp_processor_id());
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get the "official" set of cpus referring to our pagetable. */
|
/* Get the "official" set of cpus referring to our pagetable. */
|
||||||
if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
|
if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
|
||||||
|
@ -1013,31 +1008,31 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
|
||||||
if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
|
if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
|
||||||
&& per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
|
&& per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
|
||||||
continue;
|
continue;
|
||||||
smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
|
smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
cpumask_copy(mask, mm_cpumask(mm));
|
cpumask_copy(mask, mm_cpumask(mm));
|
||||||
|
|
||||||
/* It's possible that a vcpu may have a stale reference to our
|
/*
|
||||||
cr3, because its in lazy mode, and it hasn't yet flushed
|
* It's possible that a vcpu may have a stale reference to our
|
||||||
its set of pending hypercalls yet. In this case, we can
|
* cr3, because its in lazy mode, and it hasn't yet flushed
|
||||||
look at its actual current cr3 value, and force it to flush
|
* its set of pending hypercalls yet. In this case, we can
|
||||||
if needed. */
|
* look at its actual current cr3 value, and force it to flush
|
||||||
|
* if needed.
|
||||||
|
*/
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
|
if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
|
||||||
cpumask_set_cpu(cpu, mask);
|
cpumask_set_cpu(cpu, mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cpumask_empty(mask))
|
smp_call_function_many(mask, drop_mm_ref_this_cpu, mm, 1);
|
||||||
smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
|
|
||||||
free_cpumask_var(mask);
|
free_cpumask_var(mask);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void xen_drop_mm_ref(struct mm_struct *mm)
|
static void xen_drop_mm_ref(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
if (current->active_mm == mm)
|
drop_mm_ref_this_cpu(mm);
|
||||||
load_cr3(swapper_pg_dir);
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue