diff options
Diffstat (limited to 'drivers/iommu/amd/iommu.c')
-rw-r--r-- | drivers/iommu/amd/iommu.c | 203 |
1 files changed, 49 insertions, 154 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index a69a8b573e40..3ac42bbdefc6 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -290,15 +290,6 @@ static bool pci_iommuv2_capable(struct pci_dev *pdev) return true; } -static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum) -{ - struct iommu_dev_data *dev_data; - - dev_data = dev_iommu_priv_get(&pdev->dev); - - return dev_data->errata & (1 << erratum) ? true : false; -} - /* * This function checks if the driver got a valid device from the caller to * avoid dereferencing invalid pointers. @@ -861,33 +852,58 @@ static void build_inv_dte(struct iommu_cmd *cmd, u16 devid) CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY); } -static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, - size_t size, u16 domid, int pde) +/* + * Builds an invalidation address which is suitable for one page or multiple + * pages. Sets the size bit (S) as needed is more than one page is flushed. + */ +static inline u64 build_inv_address(u64 address, size_t size) { - u64 pages; - bool s; + u64 pages, end, msb_diff; pages = iommu_num_pages(address, size, PAGE_SIZE); - s = false; - if (pages > 1) { + if (pages == 1) + return address & PAGE_MASK; + + end = address + size - 1; + + /* + * msb_diff would hold the index of the most significant bit that + * flipped between the start and end. + */ + msb_diff = fls64(end ^ address) - 1; + + /* + * Bits 63:52 are sign extended. If for some reason bit 51 is different + * between the start and the end, invalidate everything. + */ + if (unlikely(msb_diff > 51)) { + address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; + } else { /* - * If we have to flush more than one page, flush all - * TLB entries for this domain + * The msb-bit must be clear on the address. Just set all the + * lower bits. */ - address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; - s = true; + address |= (1ull << msb_diff) - 1; } + /* Clear bits 11:0 */ address &= PAGE_MASK; + /* Set the size bit - we flush more than one 4kb page */ + return address | CMD_INV_IOMMU_PAGES_SIZE_MASK; +} + +static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, + size_t size, u16 domid, int pde) +{ + u64 inv_address = build_inv_address(address, size); + memset(cmd, 0, sizeof(*cmd)); cmd->data[1] |= domid; - cmd->data[2] = lower_32_bits(address); - cmd->data[3] = upper_32_bits(address); + cmd->data[2] = lower_32_bits(inv_address); + cmd->data[3] = upper_32_bits(inv_address); CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); - if (s) /* size bit - we flush more than one 4kb page */ - cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; if (pde) /* PDE bit - we want to flush everything, not only the PTEs */ cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; } @@ -895,32 +911,15 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, u64 address, size_t size) { - u64 pages; - bool s; - - pages = iommu_num_pages(address, size, PAGE_SIZE); - s = false; - - if (pages > 1) { - /* - * If we have to flush more than one page, flush all - * TLB entries for this domain - */ - address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; - s = true; - } - - address &= PAGE_MASK; + u64 inv_address = build_inv_address(address, size); memset(cmd, 0, sizeof(*cmd)); cmd->data[0] = devid; cmd->data[0] |= (qdep & 0xff) << 24; cmd->data[1] = devid; - cmd->data[2] = lower_32_bits(address); - cmd->data[3] = upper_32_bits(address); + cmd->data[2] = lower_32_bits(inv_address); + cmd->data[3] = upper_32_bits(inv_address); CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); - if (s) - cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; } static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid, @@ -1531,33 +1530,9 @@ static void pdev_iommuv2_disable(struct pci_dev *pdev) pci_disable_pasid(pdev); } -/* FIXME: Change generic reset-function to do the same */ -static int pri_reset_while_enabled(struct pci_dev *pdev) -{ - u16 control; - int pos; - - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); - if (!pos) - return -EINVAL; - - pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); - control |= PCI_PRI_CTRL_RESET; - pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); - - return 0; -} - static int pdev_iommuv2_enable(struct pci_dev *pdev) { - bool reset_enable; - int reqs, ret; - - /* FIXME: Hardcode number of outstanding requests for now */ - reqs = 32; - if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE)) - reqs = 1; - reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET); + int ret; /* Only allow access to user-accessible pages */ ret = pci_enable_pasid(pdev, 0); @@ -1570,16 +1545,11 @@ static int pdev_iommuv2_enable(struct pci_dev *pdev) goto out_err; /* Enable PRI */ - ret = pci_enable_pri(pdev, reqs); + /* FIXME: Hardcode number of outstanding requests for now */ + ret = pci_enable_pri(pdev, 32); if (ret) goto out_err; - if (reset_enable) { - ret = pri_reset_while_enabled(pdev); - if (ret) - goto out_err; - } - ret = pci_enable_ats(pdev, PAGE_SHIFT); if (ret) goto out_err; @@ -1715,9 +1685,6 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev) return ERR_PTR(-ENODEV); devid = get_device_id(dev); - if (devid < 0) - return ERR_PTR(devid); - iommu = amd_iommu_rlookup_table[devid]; if (dev_iommu_priv_get(dev)) @@ -1747,6 +1714,8 @@ static void amd_iommu_probe_finalize(struct device *dev) domain = iommu_get_domain_for_dev(dev); if (domain->type == IOMMU_DOMAIN_DMA) iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0); + else + set_dma_ops(dev, NULL); } static void amd_iommu_release_device(struct device *dev) @@ -1771,26 +1740,6 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev) return acpihid_device_group(dev); } -static int amd_iommu_domain_get_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) -{ - switch (domain->type) { - case IOMMU_DOMAIN_UNMANAGED: - return -ENODEV; - case IOMMU_DOMAIN_DMA: - switch (attr) { - case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: - *(int *)data = !amd_iommu_unmap_flush; - return 0; - default: - return -ENODEV; - } - break; - default: - return -EINVAL; - } -} - /***************************************************************************** * * The next functions belong to the dma_ops mapping/unmapping code. @@ -1855,7 +1804,7 @@ int __init amd_iommu_init_dma_ops(void) pr_info("IO/TLB flush on unmap enabled\n"); else pr_info("Lazy IO/TLB flushing enabled\n"); - + iommu_set_dma_strict(amd_iommu_unmap_flush); return 0; } @@ -2019,16 +1968,12 @@ static void amd_iommu_detach_device(struct iommu_domain *dom, struct device *dev) { struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); + int devid = get_device_id(dev); struct amd_iommu *iommu; - int devid; if (!check_device(dev)) return; - devid = get_device_id(dev); - if (devid < 0) - return; - if (dev_data->domain != NULL) detach_device(dev); @@ -2257,7 +2202,6 @@ const struct iommu_ops amd_iommu_ops = { .release_device = amd_iommu_release_device, .probe_finalize = amd_iommu_probe_finalize, .device_group = amd_iommu_device_group, - .domain_get_attr = amd_iommu_domain_get_attr, .get_resv_regions = amd_iommu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, .is_attach_deferred = amd_iommu_is_attach_deferred, @@ -2310,9 +2254,6 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids) unsigned long flags; int levels, ret; - if (pasids <= 0 || pasids > (PASID_MASK + 1)) - return -EINVAL; - /* Number of GCR3 table levels required */ for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9) levels += 1; @@ -2563,52 +2504,6 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid, } EXPORT_SYMBOL(amd_iommu_complete_ppr); -struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev) -{ - struct protection_domain *pdomain; - struct iommu_dev_data *dev_data; - struct device *dev = &pdev->dev; - struct iommu_domain *io_domain; - - if (!check_device(dev)) - return NULL; - - dev_data = dev_iommu_priv_get(&pdev->dev); - pdomain = dev_data->domain; - io_domain = iommu_get_domain_for_dev(dev); - - if (pdomain == NULL && dev_data->defer_attach) { - dev_data->defer_attach = false; - pdomain = to_pdomain(io_domain); - attach_device(dev, pdomain); - } - - if (pdomain == NULL) - return NULL; - - if (io_domain->type != IOMMU_DOMAIN_DMA) - return NULL; - - /* Only return IOMMUv2 domains */ - if (!(pdomain->flags & PD_IOMMUV2_MASK)) - return NULL; - - return &pdomain->domain; -} -EXPORT_SYMBOL(amd_iommu_get_v2_domain); - -void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum) -{ - struct iommu_dev_data *dev_data; - - if (!amd_iommu_v2_supported()) - return; - - dev_data = dev_iommu_priv_get(&pdev->dev); - dev_data->errata |= (1 << erratum); -} -EXPORT_SYMBOL(amd_iommu_enable_device_erratum); - int amd_iommu_device_info(struct pci_dev *pdev, struct amd_iommu_device_info *info) { |