summaryrefslogtreecommitdiff
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-07-02 11:21:16 +0100
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-07-02 11:27:13 +0100
commit75e6bf9638992dfc0fec9c3ca10444c8e0d6a638 (patch)
treebdbabd91d77edb3d623292ea97b17d88d053816e /drivers/pci/intel-iommu.c
parent7766a3fb905f0b078b05f5d6a6be8df4c64b9f51 (diff)
downloadlwn-75e6bf9638992dfc0fec9c3ca10444c8e0d6a638.tar.gz
lwn-75e6bf9638992dfc0fec9c3ca10444c8e0d6a638.zip
intel-iommu: Introduce first_pte_in_page() to simplify PTE-setting loops
On Wed, 2009-07-01 at 16:59 -0700, Linus Torvalds wrote: > I also _really_ hate how you do > > (unsigned long)pte >> VTD_PAGE_SHIFT == > (unsigned long)first_pte >> VTD_PAGE_SHIFT Kill this, in favour of just looking to see if the incremented pte pointer has 'wrapped' onto the next page. Which means we have to check it _after_ incrementing it, not before. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 2c1b2babfdc5..dcf0295a9b60 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -240,6 +240,11 @@ static inline bool dma_pte_present(struct dma_pte *pte)
return (pte->val & 3) != 0;
}
+static inline int first_pte_in_page(struct dma_pte *pte)
+{
+ return !((unsigned long)pte & ~VTD_PAGE_MASK);
+}
+
/*
* This domain is a statically identity mapping domain.
* 1. This domain creats a static 1:1 mapping to all usable memory.
@@ -780,13 +785,12 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
start_pfn = align_to_level(start_pfn + 1, 2);
continue;
}
- while (start_pfn <= last_pfn &&
- (unsigned long)pte >> VTD_PAGE_SHIFT ==
- (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
+ do {
dma_clear_pte(pte);
start_pfn++;
pte++;
- }
+ } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
+
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
}
@@ -821,14 +825,14 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
tmp = align_to_level(tmp + 1, level + 1);
continue;
}
- while (tmp + level_size(level) - 1 <= last_pfn &&
- (unsigned long)pte >> VTD_PAGE_SHIFT ==
- (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
+ do {
free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
dma_clear_pte(pte);
pte++;
tmp += level_size(level);
- }
+ } while (!first_pte_in_page(pte) &&
+ tmp + level_size(level) - 1 <= last_pfn);
+
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
@@ -1694,9 +1698,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
WARN_ON(1);
}
pte++;
- if (!nr_pages ||
- (unsigned long)pte >> VTD_PAGE_SHIFT !=
- (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
+ if (!nr_pages || first_pte_in_page(pte)) {
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
pte = NULL;