summaryrefslogtreecommitdiff
path: root/drivers/iommu/iommu.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2023-09-13 10:43:55 -0300
committerJoerg Roedel <jroedel@suse.de>2023-09-25 11:41:03 +0200
commit4601cd2d7c4c82c4bafc822e1ff630a709eff206 (patch)
tree39ebfc3cdac50fadcfb5a486cd25c55fe37369a4 /drivers/iommu/iommu.c
parent8359cf39acba72606736f453f54432ac9dc28523 (diff)
downloadlwn-4601cd2d7c4c82c4bafc822e1ff630a709eff206.tar.gz
lwn-4601cd2d7c4c82c4bafc822e1ff630a709eff206.zip
iommu: Add ops->domain_alloc_paging()
This callback requests the driver to create only a __IOMMU_DOMAIN_PAGING domain, so it saves a few lines in a lot of drivers needlessly checking the type. More critically, this allows us to sweep out all the IOMMU_DOMAIN_UNMANAGED and IOMMU_DOMAIN_DMA checks from a lot of the drivers, simplifying what is going on in the code and ultimately removing the now-unused special cases in drivers where they did not support IOMMU_DOMAIN_DMA. domain_alloc_paging() should return a struct iommu_domain that is functionally compatible with ARM_DMA_USE_IOMMU, dma-iommu.c and iommufd. Be forwards looking and pass in a 'struct device *' argument. We can provide this when allocating the default_domain. No drivers will look at this. Tested-by: Steven Price <steven.price@arm.com> Tested-by: Marek Szyprowski <m.szyprowski@samsung.com> Tested-by: Nicolin Chen <nicolinc@nvidia.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/22-v8-81230027b2fa+9d-iommu_all_defdom_jgg@nvidia.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/iommu.c')
-rw-r--r--drivers/iommu/iommu.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 38856d542afc..fe033043be46 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2041,6 +2041,7 @@ void iommu_set_fault_handler(struct iommu_domain *domain,
EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
+ struct device *dev,
unsigned int type)
{
struct iommu_domain *domain;
@@ -2048,8 +2049,13 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
if (alloc_type == IOMMU_DOMAIN_IDENTITY && ops->identity_domain)
return ops->identity_domain;
+ else if (type & __IOMMU_DOMAIN_PAGING && ops->domain_alloc_paging)
+ domain = ops->domain_alloc_paging(dev);
+ else if (ops->domain_alloc)
+ domain = ops->domain_alloc(alloc_type);
+ else
+ return NULL;
- domain = ops->domain_alloc(alloc_type);
if (!domain)
return NULL;
@@ -2074,14 +2080,19 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
static struct iommu_domain *
__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type)
{
- return __iommu_domain_alloc(group_iommu_ops(group), type);
+ struct device *dev =
+ list_first_entry(&group->devices, struct group_device, list)
+ ->dev;
+
+ return __iommu_domain_alloc(group_iommu_ops(group), dev, type);
}
struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
{
if (bus == NULL || bus->iommu_ops == NULL)
return NULL;
- return __iommu_domain_alloc(bus->iommu_ops, IOMMU_DOMAIN_UNMANAGED);
+ return __iommu_domain_alloc(bus->iommu_ops, NULL,
+ IOMMU_DOMAIN_UNMANAGED);
}
EXPORT_SYMBOL_GPL(iommu_domain_alloc);