summaryrefslogtreecommitdiff
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-05-10 17:16:06 +0100
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-05-10 19:49:52 +0100
commit4c25a2c1b90bf785fc2e2f0f0c74a80b3e070d39 (patch)
tree2784fbbf4d6782db300b92870d2bf6111ef26627 /drivers/pci/intel-iommu.c
parentfa3b6dcd5298db2e7b63c17795c9e5570d3df8d9 (diff)
downloadlwn-4c25a2c1b90bf785fc2e2f0f0c74a80b3e070d39.tar.gz
lwn-4c25a2c1b90bf785fc2e2f0f0c74a80b3e070d39.zip
intel-iommu: Clean up handling of "caching mode" vs. context flushing.
It really doesn't make a lot of sense to have some of the logic to handle caching vs. non-caching mode duplicated in qi_flush_context() and __iommu_flush_context(), while the return value indicates whether the caller should take other action which depends on the same thing. Especially since qi_flush_context() thought it was returning something entirely different anyway. This patch makes qi_flush_context() and __iommu_flush_context() both return void, removes the 'non_present_entry_flush' argument and makes the only call site which _set_ that argument to 1 do the right thing. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c52
1 files changed, 21 insertions, 31 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index d6f4ee50924c..9f5d9151edc9 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -857,26 +857,13 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
}
/* return value determine if we need a write buffer flush */
-static int __iommu_flush_context(struct intel_iommu *iommu,
- u16 did, u16 source_id, u8 function_mask, u64 type,
- int non_present_entry_flush)
+static void __iommu_flush_context(struct intel_iommu *iommu,
+ u16 did, u16 source_id, u8 function_mask,
+ u64 type)
{
u64 val = 0;
unsigned long flag;
- /*
- * In the non-present entry flush case, if hardware doesn't cache
- * non-present entry we do nothing and if hardware cache non-present
- * entry, we flush entries of domain 0 (the domain id is used to cache
- * any non-present entries)
- */
- if (non_present_entry_flush) {
- if (!cap_caching_mode(iommu->cap))
- return 1;
- else
- did = 0;
- }
-
switch (type) {
case DMA_CCMD_GLOBAL_INVL:
val = DMA_CCMD_GLOBAL_INVL;
@@ -901,9 +888,6 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
dmar_readq, (!(val & DMA_CCMD_ICC)), val);
spin_unlock_irqrestore(&iommu->register_lock, flag);
-
- /* flush context entry will implicitly flush write buffer */
- return 0;
}
/* return value determine if we need a write buffer flush */
@@ -1428,14 +1412,21 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
context_set_present(context);
domain_flush_cache(domain, context, sizeof(*context));
- /* it's a non-present to present mapping */
- if (iommu->flush.flush_context(iommu, id,
- (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL, 1))
- iommu_flush_write_buffer(iommu);
- else
+ /*
+ * It's a non-present to present mapping. If hardware doesn't cache
+ * non-present entry we only need to flush the write-buffer. If the
+ * _does_ cache non-present entries, then it does so in the special
+ * domain #0, which we have to flush:
+ */
+ if (cap_caching_mode(iommu->cap)) {
+ iommu->flush.flush_context(iommu, 0,
+ (((u16)bus) << 8) | devfn,
+ DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
-
+ } else {
+ iommu_flush_write_buffer(iommu);
+ }
spin_unlock_irqrestore(&iommu->lock, flags);
spin_lock_irqsave(&domain->iommu_lock, flags);
@@ -1566,7 +1557,7 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
clear_context_table(iommu, bus, devfn);
iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL, 0);
+ DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH, 0);
}
@@ -2104,8 +2095,7 @@ static int __init init_dmars(void)
iommu_set_root_entry(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
- 0);
+ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
0);
iommu_disable_protect_mem_regions(iommu);
@@ -2721,7 +2711,7 @@ static int init_iommu_hw(void)
iommu_set_root_entry(iommu);
iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL, 0);
+ DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH, 0);
iommu_disable_protect_mem_regions(iommu);
@@ -2738,7 +2728,7 @@ static void iommu_flush_all(void)
for_each_active_iommu(iommu, drhd) {
iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL, 0);
+ DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH, 0);
}