1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

xen: branch for v6.14-rc3

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRTLbB6QfY48x44uB6AXGG7T9hjvgUCZ672+AAKCRCAXGG7T9hj
 vqnNAP99iQfUC5je/UYE4k9ku0oRD9+d65G5YCmnLv5egUOuTgD/UT+N0LYaahS7
 5hRh61sicj57dsrN4kA4U4TpVvoFsAY=
 =CUIl
 -----END PGP SIGNATURE-----

Merge tag 'for-linus-6.14-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen fixes from Juergen Gross:
 "Three fixes to xen-swiotlb driver:

   - two fixes for issues coming up due to another fix in 6.12

   - addition of an __init annotation"

* tag 'for-linus-6.14-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  Xen/swiotlb: mark xen_swiotlb_fixup() __init
  x86/xen: allow larger contiguous memory regions in PV guests
  xen/swiotlb: relax alignment requirements
This commit is contained in:
Linus Torvalds 2025-02-14 08:15:17 -08:00
commit fd31a1bea3
2 changed files with 75 additions and 18 deletions

View file

@ -111,6 +111,51 @@ static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
*/
static DEFINE_SPINLOCK(xen_reservation_lock);
/* Protected by xen_reservation_lock. */
#define MIN_CONTIG_ORDER 9 /* 2MB */
static unsigned int discontig_frames_order = MIN_CONTIG_ORDER;
static unsigned long discontig_frames_early[1UL << MIN_CONTIG_ORDER] __initdata;
static unsigned long *discontig_frames __refdata = discontig_frames_early;
static bool discontig_frames_dyn;
static int alloc_discontig_frames(unsigned int order)
{
unsigned long *new_array, *old_array;
unsigned int old_order;
unsigned long flags;
BUG_ON(order < MIN_CONTIG_ORDER);
BUILD_BUG_ON(sizeof(discontig_frames_early) != PAGE_SIZE);
new_array = (unsigned long *)__get_free_pages(GFP_KERNEL,
order - MIN_CONTIG_ORDER);
if (!new_array)
return -ENOMEM;
spin_lock_irqsave(&xen_reservation_lock, flags);
old_order = discontig_frames_order;
if (order > discontig_frames_order || !discontig_frames_dyn) {
if (!discontig_frames_dyn)
old_array = NULL;
else
old_array = discontig_frames;
discontig_frames = new_array;
discontig_frames_order = order;
discontig_frames_dyn = true;
} else {
old_array = new_array;
}
spin_unlock_irqrestore(&xen_reservation_lock, flags);
free_pages((unsigned long)old_array, old_order - MIN_CONTIG_ORDER);
return 0;
}
/*
* Note about cr3 (pagetable base) values:
*
@ -814,6 +859,9 @@ static void __init xen_after_bootmem(void)
SetPagePinned(virt_to_page(level3_user_vsyscall));
#endif
xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
if (alloc_discontig_frames(MIN_CONTIG_ORDER))
BUG();
}
static void xen_unpin_page(struct mm_struct *mm, struct page *page,
@ -2203,10 +2251,6 @@ void __init xen_init_mmu_ops(void)
memset(dummy_mapping, 0xff, PAGE_SIZE);
}
/* Protected by xen_reservation_lock. */
#define MAX_CONTIG_ORDER 9 /* 2MB */
static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
#define VOID_PTE (mfn_pte(0, __pgprot(0)))
static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
unsigned long *in_frames,
@ -2323,18 +2367,25 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle)
{
unsigned long *in_frames = discontig_frames, out_frame;
unsigned long *in_frames, out_frame;
unsigned long flags;
int success;
unsigned long vstart = (unsigned long)phys_to_virt(pstart);
if (unlikely(order > MAX_CONTIG_ORDER))
return -ENOMEM;
if (unlikely(order > discontig_frames_order)) {
if (!discontig_frames_dyn)
return -ENOMEM;
if (alloc_discontig_frames(order))
return -ENOMEM;
}
memset((void *) vstart, 0, PAGE_SIZE << order);
spin_lock_irqsave(&xen_reservation_lock, flags);
in_frames = discontig_frames;
/* 1. Zap current PTEs, remembering MFNs. */
xen_zap_pfn_range(vstart, order, in_frames, NULL);
@ -2358,12 +2409,12 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
{
unsigned long *out_frames = discontig_frames, in_frame;
unsigned long *out_frames, in_frame;
unsigned long flags;
int success;
unsigned long vstart;
if (unlikely(order > MAX_CONTIG_ORDER))
if (unlikely(order > discontig_frames_order))
return;
vstart = (unsigned long)phys_to_virt(pstart);
@ -2371,6 +2422,8 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
spin_lock_irqsave(&xen_reservation_lock, flags);
out_frames = discontig_frames;
/* 1. Find start MFN of contiguous extent. */
in_frame = virt_to_mfn((void *)vstart);

View file

@ -74,19 +74,21 @@ static inline phys_addr_t xen_dma_to_phys(struct device *dev,
return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
}
static inline bool range_requires_alignment(phys_addr_t p, size_t size)
{
phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT;
return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn);
}
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
{
unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
next_bfn = pfn_to_bfn(xen_pfn);
/* If buffer is physically aligned, ensure DMA alignment. */
if (IS_ALIGNED(p, algn) &&
!IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
return 1;
for (i = 1; i < nr_pages; i++)
if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
return 1;
@ -111,7 +113,7 @@ static struct io_tlb_pool *xen_swiotlb_find_pool(struct device *dev,
}
#ifdef CONFIG_X86
int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
int __init xen_swiotlb_fixup(void *buf, unsigned long nslabs)
{
int rc;
unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
@ -156,7 +158,8 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
*dma_handle = xen_phys_to_dma(dev, phys);
if (*dma_handle + size - 1 > dma_mask ||
range_straddles_page_boundary(phys, size)) {
range_straddles_page_boundary(phys, size) ||
range_requires_alignment(phys, size)) {
if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
dma_handle) != 0)
goto out_free_pages;
@ -182,7 +185,8 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
size = ALIGN(size, XEN_PAGE_SIZE);
if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
WARN_ON_ONCE(range_straddles_page_boundary(phys, size) ||
range_requires_alignment(phys, size)))
return;
if (TestClearPageXenRemapped(virt_to_page(vaddr)))