summaryrefslogtreecommitdiff
path: root/drivers/xen
diff options
context:
space:
mode:
authorOleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>2022-10-25 19:20:03 +0300
committerJuergen Gross <jgross@suse.com>2022-12-05 12:59:49 +0100
commit035e3a4321f73c352b6408ec2153fa5bc3feb459 (patch)
treeb4316270fadbeb589707275cbd607dda71a70e1d /drivers/xen
parent76dcd734eca23168cb008912c0f69ff408905235 (diff)
downloadlwn-035e3a4321f73c352b6408ec2153fa5bc3feb459.tar.gz
lwn-035e3a4321f73c352b6408ec2153fa5bc3feb459.zip
xen/virtio: Optimize the setup of "xen-grant-dma" devices
This is needed to avoid having to parse the same device-tree several times for a given device. For this to work we need to install the xen_virtio_restricted_mem_acc callback in Arm's xen_guest_init() which is same callback as x86's PV and HVM modes already use and remove the manual assignment in xen_setup_dma_ops(). Also we need to split the code to initialize backend_domid into a separate function. Prior to current patch we parsed the device-tree three times: 1. xen_setup_dma_ops()->...->xen_is_dt_grant_dma_device() 2. xen_setup_dma_ops()->...->xen_dt_grant_init_backend_domid() 3. xen_virtio_mem_acc()->...->xen_is_dt_grant_dma_device() With current patch we parse the device-tree only once in xen_virtio_restricted_mem_acc()->...->xen_dt_grant_init_backend_domid() Other benefits are: - Not diverge from x86 when setting up Xen grant DMA ops - Drop several global functions Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> Reviewed-by: Xenia Ragiadakou <burzalodowa@gmail.com> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> Link: https://lore.kernel.org/r/20221025162004.8501-2-olekstysh@gmail.com Signed-off-by: Juergen Gross <jgross@suse.com>
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/grant-dma-ops.c77
1 files changed, 28 insertions, 49 deletions
diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
index daa525df7bdc..1e797a043980 100644
--- a/drivers/xen/grant-dma-ops.c
+++ b/drivers/xen/grant-dma-ops.c
@@ -292,50 +292,20 @@ static const struct dma_map_ops xen_grant_dma_ops = {
.dma_supported = xen_grant_dma_supported,
};
-static bool xen_is_dt_grant_dma_device(struct device *dev)
-{
- struct device_node *iommu_np;
- bool has_iommu;
-
- iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
- has_iommu = iommu_np &&
- of_device_is_compatible(iommu_np, "xen,grant-dma");
- of_node_put(iommu_np);
-
- return has_iommu;
-}
-
-bool xen_is_grant_dma_device(struct device *dev)
-{
- /* XXX Handle only DT devices for now */
- if (dev->of_node)
- return xen_is_dt_grant_dma_device(dev);
-
- return false;
-}
-
-bool xen_virtio_mem_acc(struct virtio_device *dev)
-{
- if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain())
- return true;
-
- return xen_is_grant_dma_device(dev->dev.parent);
-}
-
static int xen_dt_grant_init_backend_domid(struct device *dev,
- struct xen_grant_dma_data *data)
+ domid_t *backend_domid)
{
struct of_phandle_args iommu_spec;
if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
0, &iommu_spec)) {
- dev_err(dev, "Cannot parse iommus property\n");
+ dev_dbg(dev, "Cannot parse iommus property\n");
return -ESRCH;
}
if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
iommu_spec.args_count != 1) {
- dev_err(dev, "Incompatible IOMMU node\n");
+ dev_dbg(dev, "Incompatible IOMMU node\n");
of_node_put(iommu_spec.np);
return -ESRCH;
}
@@ -346,12 +316,28 @@ static int xen_dt_grant_init_backend_domid(struct device *dev,
* The endpoint ID here means the ID of the domain where the
* corresponding backend is running
*/
- data->backend_domid = iommu_spec.args[0];
+ *backend_domid = iommu_spec.args[0];
return 0;
}
-void xen_grant_setup_dma_ops(struct device *dev)
+static int xen_grant_init_backend_domid(struct device *dev,
+ domid_t *backend_domid)
+{
+ int ret = -ENODEV;
+
+ if (dev->of_node) {
+ ret = xen_dt_grant_init_backend_domid(dev, backend_domid);
+ } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) {
+ dev_info(dev, "Using dom0 as backend\n");
+ *backend_domid = 0;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void xen_grant_setup_dma_ops(struct device *dev, domid_t backend_domid)
{
struct xen_grant_dma_data *data;
@@ -365,16 +351,7 @@ void xen_grant_setup_dma_ops(struct device *dev)
if (!data)
goto err;
- if (dev->of_node) {
- if (xen_dt_grant_init_backend_domid(dev, data))
- goto err;
- } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) {
- dev_info(dev, "Using dom0 as backend\n");
- data->backend_domid = 0;
- } else {
- /* XXX ACPI device unsupported for now */
- goto err;
- }
+ data->backend_domid = backend_domid;
if (store_xen_grant_dma_data(dev, data)) {
dev_err(dev, "Cannot store Xen grant DMA data\n");
@@ -392,12 +369,14 @@ err:
bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
{
- bool ret = xen_virtio_mem_acc(dev);
+ domid_t backend_domid;
- if (ret)
- xen_grant_setup_dma_ops(dev->dev.parent);
+ if (!xen_grant_init_backend_domid(dev->dev.parent, &backend_domid)) {
+ xen_grant_setup_dma_ops(dev->dev.parent, backend_domid);
+ return true;
+ }
- return ret;
+ return false;
}
MODULE_DESCRIPTION("Xen grant DMA-mapping layer");