diff options
Diffstat (limited to 'drivers/iommu/io-pgtable-arm.c')
-rw-r--r-- | drivers/iommu/io-pgtable-arm.c | 227 |
1 files changed, 147 insertions, 80 deletions
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 6b9bb58a414f..7632c80edea6 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -223,6 +223,34 @@ static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data) return ptes_per_table - (i & (ptes_per_table - 1)); } +/* + * Check if concatenated PGDs are mandatory according to Arm DDI0487 (K.a) + * 1) R_DXBSH: For 16KB, and 48-bit input size, use level 1 instead of 0. + * 2) R_SRKBC: After de-ciphering the table for PA size and valid initial lookup + * a) 40 bits PA size with 4K: use level 1 instead of level 0 (2 tables for ias = oas) + * b) 40 bits PA size with 16K: use level 2 instead of level 1 (16 tables for ias = oas) + * c) 42 bits PA size with 4K: use level 1 instead of level 0 (8 tables for ias = oas) + * d) 48 bits PA size with 16K: use level 1 instead of level 0 (2 tables for ias = oas) + */ +static inline bool arm_lpae_concat_mandatory(struct io_pgtable_cfg *cfg, + struct arm_lpae_io_pgtable *data) +{ + unsigned int ias = cfg->ias; + unsigned int oas = cfg->oas; + + /* Covers 1 and 2.d */ + if ((ARM_LPAE_GRANULE(data) == SZ_16K) && (data->start_level == 0)) + return (oas == 48) || (ias == 48); + + /* Covers 2.a and 2.c */ + if ((ARM_LPAE_GRANULE(data) == SZ_4K) && (data->start_level == 0)) + return (oas == 40) || (oas == 42); + + /* Case 2.b */ + return (ARM_LPAE_GRANULE(data) == SZ_16K) && + (data->start_level == 1) && (oas == 40); +} + static bool selftest_running = false; static dma_addr_t __arm_lpae_dma_addr(void *pages) @@ -676,85 +704,107 @@ static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iov data->start_level, ptep); } +struct io_pgtable_walk_data { + struct io_pgtable *iop; + void *data; + int (*visit)(struct io_pgtable_walk_data *walk_data, int lvl, + arm_lpae_iopte *ptep, size_t size); + unsigned long flags; + u64 addr; + const u64 end; +}; + +static int __arm_lpae_iopte_walk(struct arm_lpae_io_pgtable *data, + struct io_pgtable_walk_data *walk_data, + arm_lpae_iopte *ptep, + int lvl); + +struct iova_to_phys_data { + arm_lpae_iopte pte; + int lvl; +}; + +static int visit_iova_to_phys(struct io_pgtable_walk_data *walk_data, int lvl, + arm_lpae_iopte *ptep, size_t size) +{ + struct iova_to_phys_data *data = walk_data->data; + data->pte = *ptep; + data->lvl = lvl; + return 0; +} + static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova) { struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); - arm_lpae_iopte pte, *ptep = data->pgd; - int lvl = data->start_level; - - do { - /* Valid IOPTE pointer? */ - if (!ptep) - return 0; - - /* Grab the IOPTE we're interested in */ - ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); - pte = READ_ONCE(*ptep); - - /* Valid entry? */ - if (!pte) - return 0; + struct iova_to_phys_data d; + struct io_pgtable_walk_data walk_data = { + .data = &d, + .visit = visit_iova_to_phys, + .addr = iova, + .end = iova + 1, + }; + int ret; - /* Leaf entry? */ - if (iopte_leaf(pte, lvl, data->iop.fmt)) - goto found_translation; + ret = __arm_lpae_iopte_walk(data, &walk_data, data->pgd, data->start_level); + if (ret) + return 0; - /* Take it to the next level */ - ptep = iopte_deref(pte, data); - } while (++lvl < ARM_LPAE_MAX_LEVELS); + iova &= (ARM_LPAE_BLOCK_SIZE(d.lvl, data) - 1); + return iopte_to_paddr(d.pte, data) | iova; +} - /* Ran out of page tables to walk */ +static int visit_pgtable_walk(struct io_pgtable_walk_data *walk_data, int lvl, + arm_lpae_iopte *ptep, size_t size) +{ + struct arm_lpae_io_pgtable_walk_data *data = walk_data->data; + data->ptes[lvl] = *ptep; return 0; - -found_translation: - iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1); - return iopte_to_paddr(pte, data) | iova; } -struct io_pgtable_walk_data { - struct iommu_dirty_bitmap *dirty; - unsigned long flags; - u64 addr; - const u64 end; -}; +static int arm_lpae_pgtable_walk(struct io_pgtable_ops *ops, unsigned long iova, + void *wd) +{ + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + struct io_pgtable_walk_data walk_data = { + .data = wd, + .visit = visit_pgtable_walk, + .addr = iova, + .end = iova + 1, + }; -static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data, - struct io_pgtable_walk_data *walk_data, - arm_lpae_iopte *ptep, - int lvl); + return __arm_lpae_iopte_walk(data, &walk_data, data->pgd, data->start_level); +} -static int io_pgtable_visit_dirty(struct arm_lpae_io_pgtable *data, - struct io_pgtable_walk_data *walk_data, - arm_lpae_iopte *ptep, int lvl) +static int io_pgtable_visit(struct arm_lpae_io_pgtable *data, + struct io_pgtable_walk_data *walk_data, + arm_lpae_iopte *ptep, int lvl) { struct io_pgtable *iop = &data->iop; arm_lpae_iopte pte = READ_ONCE(*ptep); - if (iopte_leaf(pte, lvl, iop->fmt)) { - size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data); + size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data); + int ret = walk_data->visit(walk_data, lvl, ptep, size); + if (ret) + return ret; - if (iopte_writeable_dirty(pte)) { - iommu_dirty_bitmap_record(walk_data->dirty, - walk_data->addr, size); - if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR)) - iopte_set_writeable_clean(ptep); - } + if (iopte_leaf(pte, lvl, iop->fmt)) { walk_data->addr += size; return 0; } - if (WARN_ON(!iopte_table(pte, lvl))) + if (!iopte_table(pte, lvl)) { return -EINVAL; + } ptep = iopte_deref(pte, data); - return __arm_lpae_iopte_walk_dirty(data, walk_data, ptep, lvl + 1); + return __arm_lpae_iopte_walk(data, walk_data, ptep, lvl + 1); } -static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data, - struct io_pgtable_walk_data *walk_data, - arm_lpae_iopte *ptep, - int lvl) +static int __arm_lpae_iopte_walk(struct arm_lpae_io_pgtable *data, + struct io_pgtable_walk_data *walk_data, + arm_lpae_iopte *ptep, + int lvl) { u32 idx; int max_entries, ret; @@ -769,7 +819,7 @@ static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data, for (idx = ARM_LPAE_LVL_IDX(walk_data->addr, lvl, data); (idx < max_entries) && (walk_data->addr < walk_data->end); ++idx) { - ret = io_pgtable_visit_dirty(data, walk_data, ptep + idx, lvl); + ret = io_pgtable_visit(data, walk_data, ptep + idx, lvl); if (ret) return ret; } @@ -777,6 +827,23 @@ static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data, return 0; } +static int visit_dirty(struct io_pgtable_walk_data *walk_data, int lvl, + arm_lpae_iopte *ptep, size_t size) +{ + struct iommu_dirty_bitmap *dirty = walk_data->data; + + if (!iopte_leaf(*ptep, lvl, walk_data->iop->fmt)) + return 0; + + if (iopte_writeable_dirty(*ptep)) { + iommu_dirty_bitmap_record(dirty, walk_data->addr, size); + if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR)) + iopte_set_writeable_clean(ptep); + } + + return 0; +} + static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops, unsigned long iova, size_t size, unsigned long flags, @@ -785,7 +852,9 @@ static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops, struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable_cfg *cfg = &data->iop.cfg; struct io_pgtable_walk_data walk_data = { - .dirty = dirty, + .iop = &data->iop, + .data = dirty, + .visit = visit_dirty, .flags = flags, .addr = iova, .end = iova + size, @@ -800,7 +869,7 @@ static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops, if (data->iop.fmt != ARM_64_LPAE_S1) return -EINVAL; - return __arm_lpae_iopte_walk_dirty(data, &walk_data, ptep, lvl); + return __arm_lpae_iopte_walk(data, &walk_data, ptep, lvl); } static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) @@ -882,6 +951,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) .unmap_pages = arm_lpae_unmap_pages, .iova_to_phys = arm_lpae_iova_to_phys, .read_and_clear_dirty = arm_lpae_read_and_clear_dirty, + .pgtable_walk = arm_lpae_pgtable_walk, }; return data; @@ -1006,18 +1076,12 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) if (!data) return NULL; - /* - * Concatenate PGDs at level 1 if possible in order to reduce - * the depth of the stage-2 walk. - */ - if (data->start_level == 0) { - unsigned long pgd_pages; - - pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte); - if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { - data->pgd_bits += data->bits_per_level; - data->start_level++; - } + if (arm_lpae_concat_mandatory(cfg, data)) { + if (WARN_ON((ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte)) > + ARM_LPAE_S2_MAX_CONCAT_PAGES)) + return NULL; + data->pgd_bits += data->bits_per_level; + data->start_level++; } /* VTCR */ @@ -1364,15 +1428,14 @@ static int __init arm_lpae_do_selftests(void) SZ_64K | SZ_512M, }; - static const unsigned int ias[] __initconst = { + static const unsigned int address_size[] __initconst = { 32, 36, 40, 42, 44, 48, }; - int i, j, pass = 0, fail = 0; + int i, j, k, pass = 0, fail = 0; struct device dev; struct io_pgtable_cfg cfg = { .tlb = &dummy_tlb_ops, - .oas = 48, .coherent_walk = true, .iommu_dev = &dev, }; @@ -1381,15 +1444,19 @@ static int __init arm_lpae_do_selftests(void) set_dev_node(&dev, NUMA_NO_NODE); for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { - for (j = 0; j < ARRAY_SIZE(ias); ++j) { - cfg.pgsize_bitmap = pgsize[i]; - cfg.ias = ias[j]; - pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n", - pgsize[i], ias[j]); - if (arm_lpae_run_tests(&cfg)) - fail++; - else - pass++; + for (j = 0; j < ARRAY_SIZE(address_size); ++j) { + /* Don't use ias > oas as it is not valid for stage-2. */ + for (k = 0; k <= j; ++k) { + cfg.pgsize_bitmap = pgsize[i]; + cfg.ias = address_size[k]; + cfg.oas = address_size[j]; + pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u OAS %u\n", + pgsize[i], cfg.ias, cfg.oas); + if (arm_lpae_run_tests(&cfg)) + fail++; + else + pass++; + } } } |