1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

KVM: s390: remove the last user of page->index

Shadow page tables use page->index to keep the g2 address of the guest
page table being shadowed.

Instead of keeping the information in page->index, split the address
and smear it over the 16-bit softbits areas of 4 PGSTEs.

This removes the last s390 user of page->index.

Reviewed-by: Steffen Eiden <seiden@linux.ibm.com>
Reviewed-by: Christoph Schlameuss <schlameuss@linux.ibm.com>
Link: https://lore.kernel.org/r/20250123144627.312456-16-imbrenda@linux.ibm.com
Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Message-ID: <20250123144627.312456-16-imbrenda@linux.ibm.com>
This commit is contained in:
Claudio Imbrenda 2025-01-23 15:46:27 +01:00
parent 1f4389931e
commit 84b7387692
3 changed files with 39 additions and 4 deletions

View file

@ -420,6 +420,7 @@ void setup_protection_map(void);
#define PGSTE_HC_BIT 0x0020000000000000UL
#define PGSTE_GR_BIT 0x0004000000000000UL
#define PGSTE_GC_BIT 0x0002000000000000UL
#define PGSTE_ST2_MASK 0x0000ffff00000000UL
#define PGSTE_UC_BIT 0x0000000000008000UL /* user dirty (migration) */
#define PGSTE_IN_BIT 0x0000000000004000UL /* IPTE notify bit */
#define PGSTE_VSIE_BIT 0x0000000000002000UL /* ref'd in a shadow table */
@ -2007,4 +2008,18 @@ extern void s390_reset_cmma(struct mm_struct *mm);
#define pmd_pgtable(pmd) \
((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
static inline unsigned long gmap_pgste_get_pgt_addr(unsigned long *pgt)
{
unsigned long *pgstes, res;
pgstes = pgt + _PAGE_ENTRIES;
res = (pgstes[0] & PGSTE_ST2_MASK) << 16;
res |= pgstes[1] & PGSTE_ST2_MASK;
res |= (pgstes[2] & PGSTE_ST2_MASK) >> 16;
res |= (pgstes[3] & PGSTE_ST2_MASK) >> 32;
return res;
}
#endif /* _S390_PAGE_H */

View file

@ -1409,6 +1409,7 @@ shadow_pgt:
static int shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long *pgt,
int *dat_protection, int *fake)
{
unsigned long pt_index;
unsigned long *table;
struct page *page;
int rc;
@ -1418,9 +1419,10 @@ static int shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long
if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
/* Shadow page tables are full pages (pte+pgste) */
page = pfn_to_page(*table >> PAGE_SHIFT);
*pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
pt_index = gmap_pgste_get_pgt_addr(page_to_virt(page));
*pgt = pt_index & ~GMAP_SHADOW_FAKE_TABLE;
*dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
*fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
*fake = !!(pt_index & GMAP_SHADOW_FAKE_TABLE);
rc = 0;
} else {
rc = -EAGAIN;

View file

@ -1733,6 +1733,23 @@ out_free:
}
EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
static void gmap_pgste_set_pgt_addr(struct ptdesc *ptdesc, unsigned long pgt_addr)
{
unsigned long *pgstes = page_to_virt(ptdesc_page(ptdesc));
pgstes += _PAGE_ENTRIES;
pgstes[0] &= ~PGSTE_ST2_MASK;
pgstes[1] &= ~PGSTE_ST2_MASK;
pgstes[2] &= ~PGSTE_ST2_MASK;
pgstes[3] &= ~PGSTE_ST2_MASK;
pgstes[0] |= (pgt_addr >> 16) & PGSTE_ST2_MASK;
pgstes[1] |= pgt_addr & PGSTE_ST2_MASK;
pgstes[2] |= (pgt_addr << 16) & PGSTE_ST2_MASK;
pgstes[3] |= (pgt_addr << 32) & PGSTE_ST2_MASK;
}
/**
* gmap_shadow_pgt - instantiate a shadow page table
* @sg: pointer to the shadow guest address space structure
@ -1760,9 +1777,10 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
ptdesc = page_table_alloc_pgste(sg->mm);
if (!ptdesc)
return -ENOMEM;
ptdesc->pt_index = pgt & _SEGMENT_ENTRY_ORIGIN;
origin = pgt & _SEGMENT_ENTRY_ORIGIN;
if (fake)
ptdesc->pt_index |= GMAP_SHADOW_FAKE_TABLE;
origin |= GMAP_SHADOW_FAKE_TABLE;
gmap_pgste_set_pgt_addr(ptdesc, origin);
s_pgt = page_to_phys(ptdesc_page(ptdesc));
/* Install shadow page table */
spin_lock(&sg->guest_table_lock);