summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2016-04-07 18:42:06 +0100
committerJoerg Roedel <jroedel@suse.de>2016-05-09 15:33:29 +0200
commitd16e0faab911cc0e100a1e8e93635b432566608e (patch)
treec91fd568f308365f22d42a0ab8cc07e6396c678f
parent53c92d793395fdab9edbd2f79b084bb6b2e6ae79 (diff)
downloadlwn-d16e0faab911cc0e100a1e8e93635b432566608e.tar.gz
lwn-d16e0faab911cc0e100a1e8e93635b432566608e.zip
iommu: Allow selecting page sizes per domain
Many IOMMUs support multiple page table formats, meaning that any given domain may only support a subset of the hardware page sizes presented in iommu_ops->pgsize_bitmap. There are also certain use-cases where the creator of a domain may want to control which page sizes are used, for example to force the use of hugepage mappings to reduce pagetable walk depth. To this end, add a per-domain pgsize_bitmap to represent the subset of page sizes actually in use, to make it possible for domains with different requirements to coexist. Signed-off-by: Will Deacon <will.deacon@arm.com> [rm: hijacked and rebased original patch with new commit message] Signed-off-by: Robin Murphy <robin.murphy@arm.com> Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/dma-iommu.c2
-rw-r--r--drivers/iommu/iommu.c22
-rw-r--r--drivers/iommu/mtk_iommu.c2
-rw-r--r--drivers/vfio/vfio_iommu_type1.c2
-rw-r--r--include/linux/iommu.h3
5 files changed, 17 insertions, 14 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 886cb3a78326..99432999b52f 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -94,7 +94,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size
return -ENODEV;
/* Use the smallest supported page size for IOVA granularity */
- order = __ffs(domain->ops->pgsize_bitmap);
+ order = __ffs(domain->pgsize_bitmap);
base_pfn = max_t(unsigned long, 1, base >> order);
end_pfn = (base + size - 1) >> order;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index b9df1411c894..ab4d014e3687 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -337,9 +337,9 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
if (!domain || domain->type != IOMMU_DOMAIN_DMA)
return 0;
- BUG_ON(!domain->ops->pgsize_bitmap);
+ BUG_ON(!domain->pgsize_bitmap);
- pg_size = 1UL << __ffs(domain->ops->pgsize_bitmap);
+ pg_size = 1UL << __ffs(domain->pgsize_bitmap);
INIT_LIST_HEAD(&mappings);
iommu_get_dm_regions(dev, &mappings);
@@ -1073,6 +1073,8 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
domain->ops = bus->iommu_ops;
domain->type = type;
+ /* Assume all sizes by default; the driver may override this later */
+ domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
return domain;
}
@@ -1297,7 +1299,7 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
pgsize = (1UL << (pgsize_idx + 1)) - 1;
/* throw away page sizes not supported by the hardware */
- pgsize &= domain->ops->pgsize_bitmap;
+ pgsize &= domain->pgsize_bitmap;
/* make sure we're still sane */
BUG_ON(!pgsize);
@@ -1319,14 +1321,14 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
int ret = 0;
if (unlikely(domain->ops->map == NULL ||
- domain->ops->pgsize_bitmap == 0UL))
+ domain->pgsize_bitmap == 0UL))
return -ENODEV;
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EINVAL;
/* find out the minimum page size supported */
- min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+ min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
/*
* both the virtual address and the physical one, as well as
@@ -1373,14 +1375,14 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
unsigned long orig_iova = iova;
if (unlikely(domain->ops->unmap == NULL ||
- domain->ops->pgsize_bitmap == 0UL))
+ domain->pgsize_bitmap == 0UL))
return -ENODEV;
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EINVAL;
/* find out the minimum page size supported */
- min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+ min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
/*
* The virtual address, as well as the size of the mapping, must be
@@ -1426,10 +1428,10 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
unsigned int i, min_pagesz;
int ret;
- if (unlikely(domain->ops->pgsize_bitmap == 0UL))
+ if (unlikely(domain->pgsize_bitmap == 0UL))
return 0;
- min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+ min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
for_each_sg(sg, s, nents, i) {
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
@@ -1510,7 +1512,7 @@ int iommu_domain_get_attr(struct iommu_domain *domain,
break;
case DOMAIN_ATTR_PAGING:
paging = data;
- *paging = (domain->ops->pgsize_bitmap != 0UL);
+ *paging = (domain->pgsize_bitmap != 0UL);
break;
case DOMAIN_ATTR_WINDOWS:
count = data;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 929a66a81b2b..e6b25276cfec 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -264,7 +264,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
}
/* Update our support page sizes bitmap */
- mtk_iommu_ops.pgsize_bitmap = dom->cfg.pgsize_bitmap;
+ dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
data->base + REG_MMU_PT_BASE_ADDR);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 75b24e93cedb..15a65823aad9 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -407,7 +407,7 @@ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
mutex_lock(&iommu->lock);
list_for_each_entry(domain, &iommu->domain_list, next)
- bitmap &= domain->domain->ops->pgsize_bitmap;
+ bitmap &= domain->domain->pgsize_bitmap;
mutex_unlock(&iommu->lock);
/*
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 8a2570443b80..7811294bc0f7 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -78,6 +78,7 @@ struct iommu_domain_geometry {
struct iommu_domain {
unsigned type;
const struct iommu_ops *ops;
+ unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
iommu_fault_handler_t handler;
void *handler_token;
struct iommu_domain_geometry geometry;
@@ -155,7 +156,7 @@ struct iommu_dm_region {
* @domain_set_windows: Set the number of windows for a domain
* @domain_get_windows: Return the number of windows for a domain
* @of_xlate: add OF master IDs to iommu grouping
- * @pgsize_bitmap: bitmap of supported page sizes
+ * @pgsize_bitmap: bitmap of all possible supported page sizes
*/
struct iommu_ops {
bool (*capable)(enum iommu_cap);