summaryrefslogtreecommitdiff
path: root/drivers/xen
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-07-19 10:20:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-07-19 10:20:26 -0700
commitafd81d914f6fb3e74a46bf5d0dd0b028591ea22e (patch)
treed442b5de397de72b365d263e6a9c577fcb7aa0a6 /drivers/xen
parentebcfbf02abfbecc144440ff797419cc95cb047fe (diff)
parentb69bdba5a37eb6224039e9572e0e98fc3a931fee (diff)
downloadlwn-afd81d914f6fb3e74a46bf5d0dd0b028591ea22e.tar.gz
lwn-afd81d914f6fb3e74a46bf5d0dd0b028591ea22e.zip
Merge tag 'dma-mapping-6.11-2024-07-19' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - reduce duplicate swiotlb pool lookups (Michael Kelley) - minor small fixes (Yicong Yang, Yang Li) * tag 'dma-mapping-6.11-2024-07-19' of git://git.infradead.org/users/hch/dma-mapping: swiotlb: fix kernel-doc description for swiotlb_del_transient swiotlb: reduce swiotlb pool lookups dma-mapping: benchmark: Don't starve others when doing the test
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/swiotlb-xen.c31
1 files changed, 20 insertions, 11 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 6579ae3f6dac..35155258a7e2 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -88,7 +88,8 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
return 0;
}
-static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
+static struct io_tlb_pool *xen_swiotlb_find_pool(struct device *dev,
+ dma_addr_t dma_addr)
{
unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
unsigned long xen_pfn = bfn_to_local_pfn(bfn);
@@ -99,8 +100,8 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
* in our domain. Therefore _only_ check address within our domain.
*/
if (pfn_valid(PFN_DOWN(paddr)))
- return is_swiotlb_buffer(dev, paddr);
- return 0;
+ return swiotlb_find_pool(dev, paddr);
+ return NULL;
}
#ifdef CONFIG_X86
@@ -227,8 +228,9 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* Ensure that the address returned is DMA'ble
*/
if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
- swiotlb_tbl_unmap_single(dev, map, size, dir,
- attrs | DMA_ATTR_SKIP_CPU_SYNC);
+ __swiotlb_tbl_unmap_single(dev, map, size, dir,
+ attrs | DMA_ATTR_SKIP_CPU_SYNC,
+ swiotlb_find_pool(dev, map));
return DMA_MAPPING_ERROR;
}
@@ -254,6 +256,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
+ struct io_tlb_pool *pool;
BUG_ON(dir == DMA_NONE);
@@ -265,8 +268,10 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
}
/* NOTE: We use dev_addr here, not paddr! */
- if (is_xen_swiotlb_buffer(hwdev, dev_addr))
- swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
+ pool = xen_swiotlb_find_pool(hwdev, dev_addr);
+ if (pool)
+ __swiotlb_tbl_unmap_single(hwdev, paddr, size, dir,
+ attrs, pool);
}
static void
@@ -274,6 +279,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
+ struct io_tlb_pool *pool;
if (!dev_is_dma_coherent(dev)) {
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
@@ -282,8 +288,9 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
}
- if (is_xen_swiotlb_buffer(dev, dma_addr))
- swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
+ pool = xen_swiotlb_find_pool(dev, dma_addr);
+ if (pool)
+ __swiotlb_sync_single_for_cpu(dev, paddr, size, dir, pool);
}
static void
@@ -291,9 +298,11 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
+ struct io_tlb_pool *pool;
- if (is_xen_swiotlb_buffer(dev, dma_addr))
- swiotlb_sync_single_for_device(dev, paddr, size, dir);
+ pool = xen_swiotlb_find_pool(dev, dma_addr);
+ if (pool)
+ __swiotlb_sync_single_for_device(dev, paddr, size, dir, pool);
if (!dev_is_dma_coherent(dev)) {
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))