Conceptually, we want the memory mappings to always be up to date and represent whatever is in the TLB. To ensure that, we need to sync them over in the userspace case and for the kernel we need to process the mappings. The kernel will call flush_tlb_* if page table entries that were valid before become invalid. Unfortunately, this is not the case if entries are added. As such, change both flush_tlb_* and set_ptes to track the memory range that has to be synchronized. For the kernel, we need to execute a flush_tlb_kern_* immediately but we can wait for the first page fault in case of set_ptes. For userspace in contrast we only store that a range of memory needs to be synced and do so whenever we switch to that process. Signed-off-by: Benjamin Berg <benjamin.berg@intel.com> Link: https://patch.msgid.link/20240703134536.1161108-13-benjamin@sipsolutions.net Signed-off-by: Johannes Berg <johannes.berg@intel.com>
19 lines
374 B
C
19 lines
374 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
|
*/
|
|
|
|
#ifndef __ARCH_UM_MMU_H
|
|
#define __ARCH_UM_MMU_H
|
|
|
|
#include <mm_id.h>
|
|
|
|
typedef struct mm_context {
|
|
struct mm_id id;
|
|
|
|
/* Address range in need of a TLB sync */
|
|
unsigned long sync_tlb_range_from;
|
|
unsigned long sync_tlb_range_to;
|
|
} mm_context_t;
|
|
|
|
#endif
|