diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-05-21 11:15:56 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-05-21 11:15:56 -0700 |
commit | d4e034b4c43f289a4d96144e209f47f453aae4f8 (patch) | |
tree | 555f254a39a09326a25b1a51a0bdbe3bbadca123 /drivers/dma | |
parent | 34dcc466103e1de5376db85b043bdde19fa0c0ff (diff) | |
parent | 28059ddbee0eb92730931a652e16a994499a7858 (diff) | |
download | lwn-d4e034b4c43f289a4d96144e209f47f453aae4f8.tar.gz lwn-d4e034b4c43f289a4d96144e209f47f453aae4f8.zip |
Merge tag 'dmaengine-6.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine
Pull dmaengine updates from Vinod Koul:
"New HW support:
- Freescale i.MX8ULP edma support in edma driver
- StarFive JH8100 DMA support in Synopsis axi-dmac driver
Updates:
- Tracing support for freescale edma driver, updates to dpaa2 driver
- Remove unused QCom hidma DT support
- Support for i2c dma in imx-sdma
- Maintainers update for idxd and edma drivers"
* tag 'dmaengine-6.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (42 commits)
MAINTAINERS: Update role for IDXD driver
dmaengine: fsl-edma: use _Generic to handle difference type
dmaengine: fsl-edma: add trace event support
dmaengine: idxd: Avoid unnecessary destruction of file_ida
dmaengine: xilinx: xdma: fix module autoloading
dt-bindings: dma: fsl-edma: allow 'power-domains' property
dt-bindings: dma: fsl-edma: remove 'clocks' from required
dmaengine: fsl-dpaa2-qdma: Fix kernel-doc check warning
dmaengine: imx-sdma: Add i2c dma support
dmaengine: imx-sdma: utilize compiler to calculate ADDRS_ARRAY_SIZE_V<n>
dt-bindings: fsl-imx-sdma: Add I2C peripheral types ID
dt-bindings: fsl-dma: fsl-edma: clean up unused "fsl,imx8qm-adma" compatible string
dmaengine: fsl-edma: clean up unused "fsl,imx8qm-adma" compatible string
dt-bindings: dma: Drop unused QCom hidma binding
dmaengine: qcom: Drop hidma DT support
dmaengine: pl08x: Use kcalloc() instead of kzalloc()
dmaengine: fsl-dpaa2-qdma: Update DPDMAI interfaces to version 3
dmaengine: fsl-edma: fix miss mutex unlock at an error return path
dmaengine: pch_dma: remove unused function chan2parent
dmaengine: fsl-dpaa2-qdma: Add dpdmai_cmd_open
...
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Makefile | 6 | ||||
-rw-r--r-- | drivers/dma/amba-pl08x.c | 4 | ||||
-rw-r--r-- | drivers/dma/dma-axi-dmac.c | 78 | ||||
-rw-r--r-- | drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c | 38 | ||||
-rw-r--r-- | drivers/dma/dw-axi-dmac/dw-axi-dmac.h | 3 | ||||
-rw-r--r-- | drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c | 14 | ||||
-rw-r--r-- | drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h | 5 | ||||
-rw-r--r-- | drivers/dma/fsl-dpaa2-qdma/dpdmai.c | 113 | ||||
-rw-r--r-- | drivers/dma/fsl-dpaa2-qdma/dpdmai.h | 61 | ||||
-rw-r--r-- | drivers/dma/fsl-edma-common.c | 25 | ||||
-rw-r--r-- | drivers/dma/fsl-edma-common.h | 110 | ||||
-rw-r--r-- | drivers/dma/fsl-edma-main.c | 50 | ||||
-rw-r--r-- | drivers/dma/fsl-edma-trace.c | 4 | ||||
-rw-r--r-- | drivers/dma/fsl-edma-trace.h | 132 | ||||
-rw-r--r-- | drivers/dma/idma64.c | 4 | ||||
-rw-r--r-- | drivers/dma/idxd/cdev.c | 18 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 97 | ||||
-rw-r--r-- | drivers/dma/mcf-edma-main.c | 4 | ||||
-rw-r--r-- | drivers/dma/pch_dma.c | 5 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma.c | 11 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma_mgmt.c | 109 | ||||
-rw-r--r-- | drivers/dma/virt-dma.h | 10 | ||||
-rw-r--r-- | drivers/dma/xilinx/xdma.c | 1 | ||||
-rw-r--r-- | drivers/dma/xilinx/xilinx_dpdma.c | 10 |
24 files changed, 499 insertions, 413 deletions
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index dfd40d14e408..802ca916f05f 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -31,10 +31,12 @@ obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/ obj-$(CONFIG_DW_DMAC_CORE) += dw/ obj-$(CONFIG_DW_EDMA) += dw-edma/ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o +fsl-edma-trace-$(CONFIG_TRACING) := fsl-edma-trace.o +CFLAGS_fsl-edma-trace.o := -I$(src) obj-$(CONFIG_FSL_DMA) += fsldma.o -fsl-edma-objs := fsl-edma-main.o fsl-edma-common.o +fsl-edma-objs := fsl-edma-main.o fsl-edma-common.o ${fsl-edma-trace-y} obj-$(CONFIG_FSL_EDMA) += fsl-edma.o -mcf-edma-objs := mcf-edma-main.o fsl-edma-common.o +mcf-edma-objs := mcf-edma-main.o fsl-edma-common.o ${fsl-edma-trace-y} obj-$(CONFIG_MCF_EDMA) += mcf-edma.o obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o obj-$(CONFIG_FSL_RAID) += fsl_raid.o diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index fbf048f432bf..73a5cfb4da8a 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -2855,8 +2855,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) } /* Initialize physical channels */ - pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), - GFP_KERNEL); + pl08x->phy_chans = kcalloc(vd->channels, sizeof(*pl08x->phy_chans), + GFP_KERNEL); if (!pl08x->phy_chans) { ret = -ENOMEM; goto out_no_phychans; diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 4e339c04fc1e..bdb752f11869 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -1002,6 +1002,16 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version) return 0; } +static void axi_dmac_tasklet_kill(void *task) +{ + tasklet_kill(task); +} + +static void axi_dmac_free_dma_controller(void *of_node) +{ + of_dma_controller_free(of_node); +} + static int axi_dmac_probe(struct platform_device *pdev) { struct dma_device *dma_dev; @@ -1025,14 +1035,10 @@ static int axi_dmac_probe(struct platform_device *pdev) if (IS_ERR(dmac->base)) return PTR_ERR(dmac->base); - dmac->clk = devm_clk_get(&pdev->dev, NULL); + dmac->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(dmac->clk)) return PTR_ERR(dmac->clk); - ret = clk_prepare_enable(dmac->clk); - if (ret < 0) - return ret; - version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION); if (version >= ADI_AXI_PCORE_VER(4, 3, 'a')) @@ -1041,7 +1047,7 @@ static int axi_dmac_probe(struct platform_device *pdev) ret = axi_dmac_parse_dt(&pdev->dev, dmac); if (ret < 0) - goto err_clk_disable; + return ret; INIT_LIST_HEAD(&dmac->chan.active_descs); @@ -1072,7 +1078,7 @@ static int axi_dmac_probe(struct platform_device *pdev) ret = axi_dmac_detect_caps(dmac, version); if (ret) - goto err_clk_disable; + return ret; dma_dev->copy_align = (dmac->chan.address_align_mask + 1); @@ -1088,57 +1094,42 @@ static int axi_dmac_probe(struct platform_device *pdev) !AXI_DMAC_DST_COHERENT_GET(ret)) { dev_err(dmac->dma_dev.dev, "Coherent DMA not supported in hardware"); - ret = -EINVAL; - goto err_clk_disable; + return -EINVAL; } } - ret = dma_async_device_register(dma_dev); + ret = dmaenginem_async_device_register(dma_dev); + if (ret) + return ret; + + /* + * Put the action in here so it get's done before unregistering the DMA + * device. + */ + ret = devm_add_action_or_reset(&pdev->dev, axi_dmac_tasklet_kill, + &dmac->chan.vchan.task); if (ret) - goto err_clk_disable; + return ret; ret = of_dma_controller_register(pdev->dev.of_node, of_dma_xlate_by_chan_id, dma_dev); if (ret) - goto err_unregister_device; + return ret; - ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED, - dev_name(&pdev->dev), dmac); + ret = devm_add_action_or_reset(&pdev->dev, axi_dmac_free_dma_controller, + pdev->dev.of_node); if (ret) - goto err_unregister_of; + return ret; - platform_set_drvdata(pdev, dmac); + ret = devm_request_irq(&pdev->dev, dmac->irq, axi_dmac_interrupt_handler, + IRQF_SHARED, dev_name(&pdev->dev), dmac); + if (ret) + return ret; regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config); - if (IS_ERR(regmap)) { - ret = PTR_ERR(regmap); - goto err_free_irq; - } - - return 0; - -err_free_irq: - free_irq(dmac->irq, dmac); -err_unregister_of: - of_dma_controller_free(pdev->dev.of_node); -err_unregister_device: - dma_async_device_unregister(&dmac->dma_dev); -err_clk_disable: - clk_disable_unprepare(dmac->clk); - - return ret; -} - -static void axi_dmac_remove(struct platform_device *pdev) -{ - struct axi_dmac *dmac = platform_get_drvdata(pdev); - of_dma_controller_free(pdev->dev.of_node); - free_irq(dmac->irq, dmac); - tasklet_kill(&dmac->chan.vchan.task); - dma_async_device_unregister(&dmac->dma_dev); - clk_disable_unprepare(dmac->clk); + return PTR_ERR_OR_ZERO(regmap); } static const struct of_device_id axi_dmac_of_match_table[] = { @@ -1153,7 +1144,6 @@ static struct platform_driver axi_dmac_driver = { .of_match_table = axi_dmac_of_match_table, }, .probe = axi_dmac_probe, - .remove_new = axi_dmac_remove, }; module_platform_driver(axi_dmac_driver); diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index a86a81ff0caa..fffafa86d964 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -302,6 +302,7 @@ static struct axi_dma_desc *axi_desc_alloc(u32 num) kfree(desc); return NULL; } + desc->nr_hw_descs = num; return desc; } @@ -328,7 +329,7 @@ static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan, static void axi_desc_put(struct axi_dma_desc *desc) { struct axi_dma_chan *chan = desc->chan; - int count = atomic_read(&chan->descs_allocated); + int count = desc->nr_hw_descs; struct axi_dma_hw_desc *hw_desc; int descs_put; @@ -1139,9 +1140,6 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan) /* Remove the completed descriptor from issued list before completing */ list_del(&vd->node); vchan_cookie_complete(vd); - - /* Submit queued descriptors after processing the completed ones */ - axi_chan_start_first_queued(chan); } out: @@ -1445,6 +1443,24 @@ static int parse_device_properties(struct axi_dma_chip *chip) return 0; } +static int axi_req_irqs(struct platform_device *pdev, struct axi_dma_chip *chip) +{ + int irq_count = platform_irq_count(pdev); + int ret; + + for (int i = 0; i < irq_count; i++) { + chip->irq[i] = platform_get_irq(pdev, i); + if (chip->irq[i] < 0) + return chip->irq[i]; + ret = devm_request_irq(chip->dev, chip->irq[i], dw_axi_dma_interrupt, + IRQF_SHARED, KBUILD_MODNAME, chip); + if (ret < 0) + return ret; + } + + return 0; +} + static int dw_probe(struct platform_device *pdev) { struct axi_dma_chip *chip; @@ -1471,10 +1487,6 @@ static int dw_probe(struct platform_device *pdev) chip->dev = &pdev->dev; chip->dw->hdata = hdata; - chip->irq = platform_get_irq(pdev, 0); - if (chip->irq < 0) - return chip->irq; - chip->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(chip->regs)) return PTR_ERR(chip->regs); @@ -1515,8 +1527,7 @@ static int dw_probe(struct platform_device *pdev) if (!dw->chan) return -ENOMEM; - ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt, - IRQF_SHARED, KBUILD_MODNAME, chip); + ret = axi_req_irqs(pdev, chip); if (ret) return ret; @@ -1629,7 +1640,9 @@ static void dw_remove(struct platform_device *pdev) pm_runtime_disable(chip->dev); axi_dma_suspend(chip); - devm_free_irq(chip->dev, chip->irq, chip); + for (i = 0; i < DMAC_MAX_CHANNELS; i++) + if (chip->irq[i] > 0) + devm_free_irq(chip->dev, chip->irq[i], chip); of_dma_controller_free(chip->dev->of_node); @@ -1653,6 +1666,9 @@ static const struct of_device_id dw_dma_of_id_table[] = { }, { .compatible = "starfive,jh7110-axi-dma", .data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2), + }, { + .compatible = "starfive,jh8100-axi-dma", + .data = (void *)AXI_DMA_FLAG_HAS_RESETS, }, {} }; diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h index 454904d99654..b842e6a8d90d 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h @@ -65,7 +65,7 @@ struct dw_axi_dma { struct axi_dma_chip { struct device *dev; - int irq; + int irq[DMAC_MAX_CHANNELS]; void __iomem *regs; void __iomem *apb_regs; struct clk *core_clk; @@ -104,6 +104,7 @@ struct axi_dma_desc { u32 completed_blocks; u32 length; u32 period_len; + u32 nr_hw_descs; }; struct axi_dma_chan_config { diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c index 5a8061a307cd..36384d019263 100644 --- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c +++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c @@ -362,7 +362,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev) for (i = 0; i < priv->num_pairs; i++) { err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, - i, &priv->rx_queue_attr[i]); + i, 0, &priv->rx_queue_attr[i]); if (err) { dev_err(dev, "dpdmai_get_rx_queue() failed\n"); goto exit; @@ -370,13 +370,13 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev) ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid; err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, - i, &priv->tx_fqid[i]); + i, 0, &priv->tx_queue_attr[i]); if (err) { dev_err(dev, "dpdmai_get_tx_queue() failed\n"); goto exit; } - ppriv->req_fqid = priv->tx_fqid[i]; - ppriv->prio = i; + ppriv->req_fqid = priv->tx_queue_attr[i].fqid; + ppriv->prio = DPAA2_QDMA_DEFAULT_PRIORITY; ppriv->priv = priv; ppriv++; } @@ -542,7 +542,7 @@ static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv) rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; rx_queue_cfg.dest_cfg.priority = ppriv->prio; err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, - rx_queue_cfg.dest_cfg.priority, + rx_queue_cfg.dest_cfg.priority, 0, &rx_queue_cfg); if (err) { dev_err(dev, "dpdmai_set_rx_queue() failed\n"); @@ -642,7 +642,7 @@ static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma) for (i = 0; i < dpaa2_qdma->n_chans; i++) { dpaa2_chan = &dpaa2_qdma->chans[i]; dpaa2_chan->qdma = dpaa2_qdma; - dpaa2_chan->fqid = priv->tx_fqid[i % num]; + dpaa2_chan->fqid = priv->tx_queue_attr[i % num].fqid; dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc; vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev); spin_lock_init(&dpaa2_chan->queue_lock); @@ -802,7 +802,7 @@ static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev) dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle); dpaa2_dpdmai_dpio_unbind(priv); dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle); - dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle); + dpdmai_destroy(priv->mc_io, 0, priv->dpqdma_id, ls_dev->mc_handle); } static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = { diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h index 03e2f4e0baca..2c80077cb7c0 100644 --- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h +++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h @@ -6,6 +6,7 @@ #define DPAA2_QDMA_STORE_SIZE 16 #define NUM_CH 8 +#define DPAA2_QDMA_DEFAULT_PRIORITY 0 struct dpaa2_qdma_sd_d { u32 rsv:32; @@ -122,8 +123,8 @@ struct dpaa2_qdma_priv { struct dpaa2_qdma_engine *dpaa2_qdma; struct dpaa2_qdma_priv_per_prio *ppriv; - struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM]; - u32 tx_fqid[DPDMAI_PRIO_NUM]; + struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_MAX_QUEUE_NUM]; + struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_MAX_QUEUE_NUM]; }; struct dpaa2_qdma_priv_per_prio { diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c index 878662aaa1c2..36897b41ee7e 100644 --- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c +++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c @@ -1,47 +1,52 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright 2019 NXP +#include <linux/bitfield.h> #include <linux/module.h> #include <linux/types.h> #include <linux/io.h> #include <linux/fsl/mc.h> #include "dpdmai.h" +#define DEST_TYPE_MASK 0xF + struct dpdmai_rsp_get_attributes { __le32 id; u8 num_of_priorities; - u8 pad0[3]; + u8 num_of_queues; + u8 pad0[2]; __le16 major; __le16 minor; }; struct dpdmai_cmd_queue { __le32 dest_id; - u8 priority; - u8 queue; + u8 dest_priority; + union { + u8 queue; + u8 pri; + }; u8 dest_type; - u8 pad; + u8 queue_idx; __le64 user_ctx; union { __le32 options; __le32 fqid; }; -}; +} __packed; struct dpdmai_rsp_get_tx_queue { __le64 pad; __le32 fqid; }; -#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \ - ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg)) +struct dpdmai_cmd_open { + __le32 dpdmai_id; +} __packed; -/* cmd, param, offset, width, type, arg_name */ -#define DPDMAI_CMD_CREATE(cmd, cfg) \ -do { \ - MC_CMD_OP(cmd, 0, 8, 8, u8, (cfg)->priorities[0]);\ - MC_CMD_OP(cmd, 0, 16, 8, u8, (cfg)->priorities[1]);\ -} while (0) +struct dpdmai_cmd_destroy { + __le32 dpdmai_id; +} __packed; static inline u64 mc_enc(int lsoffset, int width, u64 val) { @@ -68,16 +73,16 @@ static inline u64 mc_enc(int lsoffset, int width, u64 val) int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpdmai_id, u16 *token) { + struct dpdmai_cmd_open *cmd_params; struct fsl_mc_command cmd = { 0 }; - __le64 *cmd_dpdmai_id; int err; /* prepare command */ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN, cmd_flags, 0); - cmd_dpdmai_id = cmd.params; - *cmd_dpdmai_id = cpu_to_le32(dpdmai_id); + cmd_params = (struct dpdmai_cmd_open *)&cmd.params; + cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id); /* send command to mc*/ err = mc_send_command(mc_io, &cmd); @@ -116,65 +121,26 @@ int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) EXPORT_SYMBOL_GPL(dpdmai_close); /** - * dpdmai_create() - Create the DPDMAI object - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @cfg: Configuration structure - * @token: Returned token; use in subsequent API calls - * - * Create the DPDMAI object, allocate required resources and - * perform required initialization. - * - * The object can be created either by declaring it in the - * DPL file, or by calling this function. - * - * This function returns a unique authentication token, - * associated with the specific object ID and the specific MC - * portal; this token must be used in all subsequent calls to - * this specific object. For objects that are created using the - * DPL file, call dpdmai_open() function to get an authentication - * token first. - * - * Return: '0' on Success; Error code otherwise. - */ -int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags, - const struct dpdmai_cfg *cfg, u16 *token) -{ - struct fsl_mc_command cmd = { 0 }; - int err; - - /* prepare command */ - cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE, - cmd_flags, 0); - DPDMAI_CMD_CREATE(cmd, cfg); - - /* send command to mc*/ - err = mc_send_command(mc_io, &cmd); - if (err) - return err; - - /* retrieve response parameters */ - *token = mc_cmd_hdr_read_token(&cmd); - - return 0; -} - -/** * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources. * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dpdmai_id: The object id; it must be a valid id within the container that created this object; * @token: Token of DPDMAI object * * Return: '0' on Success; error code otherwise. */ -int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) +int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u32 dpdmai_id, u16 token) { + struct dpdmai_cmd_destroy *cmd_params; struct fsl_mc_command cmd = { 0 }; /* prepare command */ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY, cmd_flags, token); + cmd_params = (struct dpdmai_cmd_destroy *)&cmd.params; + cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id); + /* send command to mc*/ return mc_send_command(mc_io, &cmd); } @@ -274,6 +240,7 @@ int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, attr->version.major = le16_to_cpu(rsp_params->major); attr->version.minor = le16_to_cpu(rsp_params->minor); attr->num_of_priorities = rsp_params->num_of_priorities; + attr->num_of_queues = rsp_params->num_of_queues; return 0; } @@ -284,13 +251,14 @@ EXPORT_SYMBOL_GPL(dpdmai_get_attributes); * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPDMAI object + * @queue_idx: DMA queue index * @priority: Select the queue relative to number of * priorities configured at DPDMAI creation * @cfg: Rx queue configuration * * Return: '0' on Success; Error code otherwise. */ -int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 queue_idx, u8 priority, const struct dpdmai_rx_queue_cfg *cfg) { struct dpdmai_cmd_queue *cmd_params; @@ -302,11 +270,12 @@ int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, cmd_params = (struct dpdmai_cmd_queue *)cmd.params; cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); - cmd_params->priority = cfg->dest_cfg.priority; - cmd_params->queue = priority; + cmd_params->dest_priority = cfg->dest_cfg.priority; + cmd_params->pri = priority; cmd_params->dest_type = cfg->dest_cfg.dest_type; cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx); cmd_params->options = cpu_to_le32(cfg->options); + cmd_params->queue_idx = queue_idx; /* send command to mc*/ return mc_send_command(mc_io, &cmd); @@ -318,13 +287,14 @@ EXPORT_SYMBOL_GPL(dpdmai_set_rx_queue); * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPDMAI object + * @queue_idx: DMA Queue index * @priority: Select the queue relative to number of * priorities configured at DPDMAI creation * @attr: Returned Rx queue attributes * * Return: '0' on Success; Error code otherwise. */ -int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 queue_idx, u8 priority, struct dpdmai_rx_queue_attr *attr) { struct dpdmai_cmd_queue *cmd_params; @@ -337,6 +307,7 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, cmd_params = (struct dpdmai_cmd_queue *)cmd.params; cmd_params->queue = priority; + cmd_params->queue_idx = queue_idx; /* send command to mc*/ err = mc_send_command(mc_io, &cmd); @@ -345,8 +316,8 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, /* retrieve response parameters */ attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id); - attr->dest_cfg.priority = cmd_params->priority; - attr->dest_cfg.dest_type = cmd_params->dest_type; + attr->dest_cfg.priority = cmd_params->dest_priority; + attr->dest_cfg.dest_type = FIELD_GET(DEST_TYPE_MASK, cmd_params->dest_type); attr->user_ctx = le64_to_cpu(cmd_params->user_ctx); attr->fqid = le32_to_cpu(cmd_params->fqid); @@ -359,14 +330,15 @@ EXPORT_SYMBOL_GPL(dpdmai_get_rx_queue); * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPDMAI object + * @queue_idx: DMA queue index * @priority: Select the queue relative to number of * priorities configured at DPDMAI creation - * @fqid: Returned Tx queue + * @attr: Returned DMA Tx queue attributes * * Return: '0' on Success; Error code otherwise. */ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, - u16 token, u8 priority, u32 *fqid) + u16 token, u8 queue_idx, u8 priority, struct dpdmai_tx_queue_attr *attr) { struct dpdmai_rsp_get_tx_queue *rsp_params; struct dpdmai_cmd_queue *cmd_params; @@ -379,6 +351,7 @@ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, cmd_params = (struct dpdmai_cmd_queue *)cmd.params; cmd_params->queue = priority; + cmd_params->queue_idx = queue_idx; /* send command to mc*/ err = mc_send_command(mc_io, &cmd); @@ -388,7 +361,7 @@ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, /* retrieve response parameters */ rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params; - *fqid = le32_to_cpu(rsp_params->fqid); + attr->fqid = le32_to_cpu(rsp_params->fqid); return 0; } diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.h b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h index b13b9bf0c003..3fe7d8327366 100644 --- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.h +++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h @@ -5,14 +5,19 @@ #define __FSL_DPDMAI_H /* DPDMAI Version */ -#define DPDMAI_VER_MAJOR 2 -#define DPDMAI_VER_MINOR 2 +#define DPDMAI_VER_MAJOR 3 +#define DPDMAI_VER_MINOR 3 -#define DPDMAI_CMD_BASE_VERSION 0 +#define DPDMAI_CMD_BASE_VERSION 1 #define DPDMAI_CMD_ID_OFFSET 4 -#define DPDMAI_CMDID_FORMAT(x) (((x) << DPDMAI_CMD_ID_OFFSET) | \ - DPDMAI_CMD_BASE_VERSION) +/* + * Maximum number of Tx/Rx queues per DPDMAI object + */ +#define DPDMAI_MAX_QUEUE_NUM 8 + +#define DPDMAI_CMDID_FORMAT_V(x, v) (((x) << DPDMAI_CMD_ID_OFFSET) | (v)) +#define DPDMAI_CMDID_FORMAT(x) DPDMAI_CMDID_FORMAT_V(x, DPDMAI_CMD_BASE_VERSION) /* Command IDs */ #define DPDMAI_CMDID_CLOSE DPDMAI_CMDID_FORMAT(0x800) @@ -26,18 +31,9 @@ #define DPDMAI_CMDID_RESET DPDMAI_CMDID_FORMAT(0x005) #define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMDID_FORMAT(0x006) -#define DPDMAI_CMDID_SET_IRQ DPDMAI_CMDID_FORMAT(0x010) -#define DPDMAI_CMDID_GET_IRQ DPDMAI_CMDID_FORMAT(0x011) -#define DPDMAI_CMDID_SET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x012) -#define DPDMAI_CMDID_GET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x013) -#define DPDMAI_CMDID_SET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x014) -#define DPDMAI_CMDID_GET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x015) -#define DPDMAI_CMDID_GET_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x016) -#define DPDMAI_CMDID_CLEAR_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x017) - -#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A0) -#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A1) -#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMDID_FORMAT(0x1A2) +#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A0, 2) +#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A1, 2) +#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A2, 2) #define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */ #define MC_CMD_HDR_TOKEN_S 16 /* Token field size */ @@ -49,30 +45,32 @@ * Contains initialization APIs and runtime control APIs for DPDMAI */ -/** +/* * Maximum number of Tx/Rx priorities per DPDMAI object */ #define DPDMAI_PRIO_NUM 2 /* DPDMAI queue modification options */ -/** +/* * Select to modify the user's context associated with the queue */ #define DPDMAI_QUEUE_OPT_USER_CTX 0x1 -/** +/* * Select to modify the queue's destination */ #define DPDMAI_QUEUE_OPT_DEST 0x2 /** * struct dpdmai_cfg - Structure representing DPDMAI configuration + * @num_queues: Number of the DMA queues * @priorities: Priorities for the DMA hardware processing; valid priorities are * configured with values 1-8; the entry following last valid entry * should be configured with 0 */ struct dpdmai_cfg { + u8 num_queues; u8 priorities[DPDMAI_PRIO_NUM]; }; @@ -80,20 +78,19 @@ struct dpdmai_cfg { * struct dpdmai_attr - Structure representing DPDMAI attributes * @id: DPDMAI object ID * @version: DPDMAI version + * @version.major: DPDMAI major version + * @version.minor: DPDMAI minor version * @num_of_priorities: number of priorities + * @num_of_queues: number of the DMA queues */ struct dpdmai_attr { int id; - /** - * struct version - DPDMAI version - * @major: DPDMAI major version - * @minor: DPDMAI minor version - */ struct { u16 major; u16 minor; } version; u8 num_of_priorities; + u8 num_of_queues; }; /** @@ -158,22 +155,24 @@ struct dpdmai_rx_queue_attr { u32 fqid; }; +struct dpdmai_tx_queue_attr { + u32 fqid; +}; + int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpdmai_id, u16 *token); int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); -int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); -int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags, - const struct dpdmai_cfg *cfg, u16 *token); +int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u32 dpdmai_id, u16 token); int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, struct dpdmai_attr *attr); int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, - u8 priority, const struct dpdmai_rx_queue_cfg *cfg); + u8 queue_idx, u8 priority, const struct dpdmai_rx_queue_cfg *cfg); int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, - u8 priority, struct dpdmai_rx_queue_attr *attr); + u8 queue_idx, u8 priority, struct dpdmai_rx_queue_attr *attr); int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, - u16 token, u8 priority, u32 *fqid); + u16 token, u8 queue_idx, u8 priority, struct dpdmai_tx_queue_attr *attr); #endif /* __FSL_DPDMAI_H */ diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index b18faa7cfedb..3af430787315 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -3,6 +3,8 @@ // Copyright (c) 2013-2014 Freescale Semiconductor, Inc // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it> +#include <linux/cleanup.h> +#include <linux/clk.h> #include <linux/dmapool.h> #include <linux/module.h> #include <linux/slab.h> @@ -74,18 +76,10 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan) flags = fsl_edma_drvflags(fsl_chan); val = edma_readl_chreg(fsl_chan, ch_sbr); - /* Remote/local swapped wrongly on iMX8 QM Audio edma */ - if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) { - if (!fsl_chan->is_rxchan) - val |= EDMA_V3_CH_SBR_RD; - else - val |= EDMA_V3_CH_SBR_WR; - } else { - if (fsl_chan->is_rxchan) - val |= EDMA_V3_CH_SBR_RD; - else - val |= EDMA_V3_CH_SBR_WR; - } + if (fsl_chan->is_rxchan) + val |= EDMA_V3_CH_SBR_RD; + else + val |= EDMA_V3_CH_SBR_WR; if (fsl_chan->is_remote) val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR); @@ -546,6 +540,8 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan, csr |= EDMA_TCD_CSR_START; fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr); + + trace_edma_fill_tcd(fsl_chan, tcd); } static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, @@ -810,6 +806,9 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan) { struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); + if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK) + clk_prepare_enable(fsl_chan->clk); + fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev, fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ? sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd), @@ -838,6 +837,8 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan) fsl_chan->tcd_pool = NULL; fsl_chan->is_sw = false; fsl_chan->srcid = 0; + if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK) + clk_disable_unprepare(fsl_chan->clk); } void fsl_edma_cleanup_vchan(struct dma_device *dmadev) diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index 7bf0aba471a8..ac66222c1604 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -151,7 +151,6 @@ struct fsl_edma_chan { enum dma_status status; enum fsl_edma_pm_state pm_state; bool idle; - u32 slave_id; struct fsl_edma_engine *edma; struct fsl_edma_desc *edesc; struct dma_slave_config cfg; @@ -195,8 +194,6 @@ struct fsl_edma_desc { #define FSL_EDMA_DRV_HAS_PD BIT(5) #define FSL_EDMA_DRV_HAS_CHCLK BIT(6) #define FSL_EDMA_DRV_HAS_CHMUX BIT(7) -/* imx8 QM audio edma remote local swapped */ -#define FSL_EDMA_DRV_QUIRK_SWAPPED BIT(8) /* control and status register is in tcd address space, edma3 reg layout */ #define FSL_EDMA_DRV_SPLIT_REG BIT(9) #define FSL_EDMA_DRV_BUS_8BYTE BIT(10) @@ -238,7 +235,6 @@ struct fsl_edma_engine { void __iomem *muxbase[DMAMUX_NR]; struct clk *muxclk[DMAMUX_NR]; struct clk *dmaclk; - struct clk *chclk; struct mutex fsl_edma_mutex; const struct fsl_edma_drvdata *drvdata; u32 n_chans; @@ -250,13 +246,17 @@ struct fsl_edma_engine { struct fsl_edma_chan chans[] __counted_by(n_chans); }; +static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan) +{ + return fsl_chan->edma->drvdata->flags; +} + #define edma_read_tcdreg_c(chan, _tcd, __name) \ -(sizeof((_tcd)->__name) == sizeof(u64) ? \ - edma_readq(chan->edma, &(_tcd)->__name) : \ - ((sizeof((_tcd)->__name) == sizeof(u32)) ? \ - edma_readl(chan->edma, &(_tcd)->__name) : \ - edma_readw(chan->edma, &(_tcd)->__name) \ - )) +_Generic(((_tcd)->__name), \ + __iomem __le64 : edma_readq(chan->edma, &(_tcd)->__name), \ + __iomem __le32 : edma_readl(chan->edma, &(_tcd)->__name), \ + __iomem __le16 : edma_readw(chan->edma, &(_tcd)->__name) \ + ) #define edma_read_tcdreg(chan, __name) \ ((fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) ? \ @@ -264,23 +264,13 @@ struct fsl_edma_engine { edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd __iomem *)chan->tcd), __name) \ ) -#define edma_write_tcdreg_c(chan, _tcd, _val, __name) \ -do { \ - switch (sizeof(_tcd->__name)) { \ - case sizeof(u64): \ - edma_writeq(chan->edma, (u64 __force)_val, &_tcd->__name); \ - break; \ - case sizeof(u32): \ - edma_writel(chan->edma, (u32 __force)_val, &_tcd->__name); \ - break; \ - case sizeof(u16): \ - edma_writew(chan->edma, (u16 __force)_val, &_tcd->__name); \ - break; \ - case sizeof(u8): \ - edma_writeb(chan->edma, (u8 __force)_val, &_tcd->__name); \ - break; \ - } \ -} while (0) +#define edma_write_tcdreg_c(chan, _tcd, _val, __name) \ +_Generic((_tcd->__name), \ + __iomem __le64 : edma_writeq(chan->edma, (u64 __force)(_val), &_tcd->__name), \ + __iomem __le32 : edma_writel(chan->edma, (u32 __force)(_val), &_tcd->__name), \ + __iomem __le16 : edma_writew(chan->edma, (u16 __force)(_val), &_tcd->__name), \ + __iomem u8 : edma_writeb(chan->edma, _val, &_tcd->__name) \ + ) #define edma_write_tcdreg(chan, val, __name) \ do { \ @@ -321,9 +311,11 @@ do { \ (((struct fsl_edma_hw_tcd *)_tcd)->_field)) #define fsl_edma_le_to_cpu(x) \ -(sizeof(x) == sizeof(u64) ? le64_to_cpu((__force __le64)(x)) : \ - (sizeof(x) == sizeof(u32) ? le32_to_cpu((__force __le32)(x)) : \ - le16_to_cpu((__force __le16)(x)))) +_Generic((x), \ + __le64 : le64_to_cpu((x)), \ + __le32 : le32_to_cpu((x)), \ + __le16 : le16_to_cpu((x)) \ +) #define fsl_edma_get_tcd_to_cpu(_chan, _tcd, _field) \ (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? \ @@ -331,19 +323,11 @@ do { \ fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd *)_tcd)->_field)) #define fsl_edma_set_tcd_to_le_c(_tcd, _val, _field) \ -do { \ - switch (sizeof((_tcd)->_field)) { \ - case sizeof(u64): \ - *(__force __le64 *)(&((_tcd)->_field)) = cpu_to_le64(_val); \ - break; \ - case sizeof(u32): \ - *(__force __le32 *)(&((_tcd)->_field)) = cpu_to_le32(_val); \ - break; \ - case sizeof(u16): \ - *(__force __le16 *)(&((_tcd)->_field)) = cpu_to_le16(_val); \ - break; \ - } \ -} while (0) +_Generic(((_tcd)->_field), \ + __le64 : (_tcd)->_field = cpu_to_le64(_val), \ + __le32 : (_tcd)->_field = cpu_to_le32(_val), \ + __le16 : (_tcd)->_field = cpu_to_le16(_val) \ +) #define fsl_edma_set_tcd_to_le(_chan, _tcd, _val, _field) \ do { \ @@ -353,6 +337,9 @@ do { \ fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd *)_tcd, _val, _field); \ } while (0) +/* Need after struct defination */ +#include "fsl-edma-trace.h" + /* * R/W functions for big- or little-endian registers: * The eDMA controller's endian is independent of the CPU core's endian. @@ -371,23 +358,38 @@ static inline u64 edma_readq(struct fsl_edma_engine *edma, void __iomem *addr) h = ioread32(addr + 4); } + trace_edma_readl(edma, addr, l); + trace_edma_readl(edma, addr + 4, h); + return (h << 32) | l; } static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr) { + u32 val; + if (edma->big_endian) - return ioread32be(addr); + val = ioread32be(addr); else - return ioread32(addr); + val = ioread32(addr); + + trace_edma_readl(edma, addr, val); + + return val; } static inline u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr) { + u16 val; + if (edma->big_endian) - return ioread16be(addr); + val = ioread16be(addr); else - return ioread16(addr); + val = ioread16(addr); + + trace_edma_readw(edma, addr, val); + + return val; } static inline void edma_writeb(struct fsl_edma_engine *edma, @@ -398,6 +400,8 @@ static inline void edma_writeb(struct fsl_edma_engine *edma, iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3)); else iowrite8(val, addr); + + trace_edma_writeb(edma, addr, val); } static inline void edma_writew(struct fsl_edma_engine *edma, @@ -408,6 +412,8 @@ static inline void edma_writew(struct fsl_edma_engine *edma, iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2)); else iowrite16(val, addr); + + trace_edma_writew(edma, addr, val); } static inline void edma_writel(struct fsl_edma_engine *edma, @@ -417,6 +423,8 @@ static inline void edma_writel(struct fsl_edma_engine *edma, iowrite32be(val, addr); else iowrite32(val, addr); + + trace_edma_writel(edma, addr, val); } static inline void edma_writeq(struct fsl_edma_engine *edma, @@ -429,6 +437,9 @@ static inline void edma_writeq(struct fsl_edma_engine *edma, iowrite32(val & 0xFFFFFFFF, addr); iowrite32(val >> 32, addr + 4); } + + trace_edma_writel(edma, addr, val & 0xFFFFFFFF); + trace_edma_writel(edma, addr + 4, val >> 32); } static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan) @@ -436,11 +447,6 @@ static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan) return container_of(chan, struct fsl_edma_chan, vchan.chan); } -static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan) -{ - return fsl_chan->edma->drvdata->flags; -} - static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd) { return container_of(vd, struct fsl_edma_desc, vdesc); diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c index 402f0058a180..391e4f13dfeb 100644 --- a/drivers/dma/fsl-edma-main.c +++ b/drivers/dma/fsl-edma-main.c @@ -105,7 +105,8 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, if (dma_spec->args_count != 2) return NULL; - mutex_lock(&fsl_edma->fsl_edma_mutex); + guard(mutex)(&fsl_edma->fsl_edma_mutex); + list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) { if (chan->client_count) continue; @@ -114,15 +115,20 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, if (chan) { chan->device->privatecnt++; fsl_chan = to_fsl_edma_chan(chan); - fsl_chan->slave_id = dma_spec->args[1]; - fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, + fsl_chan->srcid = dma_spec->args[1]; + + if (!fsl_chan->srcid) { + dev_err(&fsl_chan->pdev->dev, "Invalidate srcid %d\n", + fsl_chan->srcid); + return NULL; + } + + fsl_edma_chan_mux(fsl_chan, fsl_chan->srcid, true); - mutex_unlock(&fsl_edma->fsl_edma_mutex); return chan; } } } - mutex_unlock(&fsl_edma->fsl_edma_mutex); return NULL; } @@ -342,10 +348,13 @@ static struct fsl_edma_drvdata imx8qm_data = { .setup_irq = fsl_edma3_irq_init, }; -static struct fsl_edma_drvdata imx8qm_audio_data = { - .flags = FSL_EDMA_DRV_QUIRK_SWAPPED | FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3, +static struct fsl_edma_drvdata imx8ulp_data = { + .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_CHCLK | FSL_EDMA_DRV_HAS_DMACLK | + FSL_EDMA_DRV_EDMA3, .chreg_space_sz = 0x10000, .chreg_off = 0x10000, + .mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux), + .mux_skip = 0x10000, .setup_irq = fsl_edma3_irq_init, }; @@ -380,7 +389,7 @@ static const struct of_device_id fsl_edma_dt_ids[] = { { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data}, { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data}, { .compatible = "fsl,imx8qm-edma", .data = &imx8qm_data}, - { .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data}, + { .compatible = "fsl,imx8ulp-edma", .data = &imx8ulp_data}, { .compatible = "fsl,imx93-edma3", .data = &imx93_data3}, { .compatible = "fsl,imx93-edma4", .data = &imx93_data4}, { .compatible = "fsl,imx95-edma5", .data = &imx95_data5}, @@ -434,6 +443,7 @@ static int fsl_edma_probe(struct platform_device *pdev) struct fsl_edma_engine *fsl_edma; const struct fsl_edma_drvdata *drvdata = NULL; u32 chan_mask[2] = {0, 0}; + char clk_name[36]; struct edma_regs *regs; int chans; int ret, i; @@ -476,14 +486,6 @@ static int fsl_edma_probe(struct platform_device *pdev) } } - if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) { - fsl_edma->chclk = devm_clk_get_enabled(&pdev->dev, "mp"); - if (IS_ERR(fsl_edma->chclk)) { - dev_err(&pdev->dev, "Missing MP block clock.\n"); - return PTR_ERR(fsl_edma->chclk); - } - } - ret = of_property_read_variable_u32_array(np, "dma-channel-mask", chan_mask, 1, 2); if (ret > 0) { @@ -540,7 +542,7 @@ static int fsl_edma_probe(struct platform_device *pdev) fsl_chan->edma = fsl_edma; fsl_chan->pm_state = RUNNING; - fsl_chan->slave_id = 0; + fsl_chan->srcid = 0; fsl_chan->idle = true; fsl_chan->dma_dir = DMA_NONE; fsl_chan->vchan.desc_free = fsl_edma_free_desc; @@ -551,11 +553,21 @@ static int fsl_edma_probe(struct platform_device *pdev) + i * drvdata->chreg_space_sz + drvdata->chreg_off + len; fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip; + if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) { + snprintf(clk_name, sizeof(clk_name), "ch%02d", i); + fsl_chan->clk = devm_clk_get_enabled(&pdev->dev, + (const char *)clk_name); + + if (IS_ERR(fsl_chan->clk)) + return PTR_ERR(fsl_chan->clk); + } fsl_chan->pdev = pdev; vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); edma_write_tcdreg(fsl_chan, cpu_to_le32(0), csr); fsl_edma_chan_mux(fsl_chan, 0, false); + if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) + clk_disable_unprepare(fsl_chan->clk); } ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma); @@ -682,8 +694,8 @@ static int fsl_edma_resume_early(struct device *dev) continue; fsl_chan->pm_state = RUNNING; edma_write_tcdreg(fsl_chan, 0, csr); - if (fsl_chan->slave_id != 0) - fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true); + if (fsl_chan->srcid != 0) + fsl_edma_chan_mux(fsl_chan, fsl_chan->srcid, true); } if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)) diff --git a/drivers/dma/fsl-edma-trace.c b/drivers/dma/fsl-edma-trace.c new file mode 100644 index 000000000000..28300ad80bb7 --- /dev/null +++ b/drivers/dma/fsl-edma-trace.c @@ -0,0 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define CREATE_TRACE_POINTS +#include "fsl-edma-common.h" diff --git a/drivers/dma/fsl-edma-trace.h b/drivers/dma/fsl-edma-trace.h new file mode 100644 index 000000000000..d3541301a247 --- /dev/null +++ b/drivers/dma/fsl-edma-trace.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2023 NXP. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM fsl_edma + +#if !defined(__LINUX_FSL_EDMA_TRACE) || defined(TRACE_HEADER_MULTI_READ) +#define __LINUX_FSL_EDMA_TRACE + +#include <linux/types.h> +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(edma_log_io, + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), + TP_ARGS(edma, addr, value), + TP_STRUCT__entry( + __field(struct fsl_edma_engine *, edma) + __field(void __iomem *, addr) + __field(u32, value) + ), + TP_fast_assign( + __entry->edma = edma; + __entry->addr = addr; + __entry->value = value; + ), + TP_printk("offset %08x: value %08x", + (u32)(__entry->addr - __entry->edma->membase), __entry->value) +); + +DEFINE_EVENT(edma_log_io, edma_readl, + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), + TP_ARGS(edma, addr, value) +); + +DEFINE_EVENT(edma_log_io, edma_writel, + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), + TP_ARGS(edma, addr, value) +); + +DEFINE_EVENT(edma_log_io, edma_readw, + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), + TP_ARGS(edma, addr, value) +); + +DEFINE_EVENT(edma_log_io, edma_writew, + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), + TP_ARGS(edma, addr, value) +); + +DEFINE_EVENT(edma_log_io, edma_readb, + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), + TP_ARGS(edma, addr, value) +); + +DEFINE_EVENT(edma_log_io, edma_writeb, + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), + TP_ARGS(edma, addr, value) +); + +DECLARE_EVENT_CLASS(edma_log_tcd, + TP_PROTO(struct fsl_edma_chan *chan, void *tcd), + TP_ARGS(chan, tcd), + TP_STRUCT__entry( + __field(u64, saddr) + __field(u16, soff) + __field(u16, attr) + __field(u32, nbytes) + __field(u64, slast) + __field(u64, daddr) + __field(u16, doff) + __field(u16, citer) + __field(u64, dlast_sga) + __field(u16, csr) + __field(u16, biter) + + ), + TP_fast_assign( + __entry->saddr = fsl_edma_get_tcd_to_cpu(chan, tcd, saddr), + __entry->soff = fsl_edma_get_tcd_to_cpu(chan, tcd, soff), + __entry->attr = fsl_edma_get_tcd_to_cpu(chan, tcd, attr), + __entry->nbytes = fsl_edma_get_tcd_to_cpu(chan, tcd, nbytes), + __entry->slast = fsl_edma_get_tcd_to_cpu(chan, tcd, slast), + __entry->daddr = fsl_edma_get_tcd_to_cpu(chan, tcd, daddr), + __entry->doff = fsl_edma_get_tcd_to_cpu(chan, tcd, doff), + __entry->citer = fsl_edma_get_tcd_to_cpu(chan, tcd, citer), + __entry->dlast_sga = fsl_edma_get_tcd_to_cpu(chan, tcd, dlast_sga), + __entry->csr = fsl_edma_get_tcd_to_cpu(chan, tcd, csr), + __entry->biter = fsl_edma_get_tcd_to_cpu(chan, tcd, biter); + ), + TP_printk("\n==== TCD =====\n" + " saddr: 0x%016llx\n" + " soff: 0x%04x\n" + " attr: 0x%04x\n" + " nbytes: 0x%08x\n" + " slast: 0x%016llx\n" + " daddr: 0x%016llx\n" + " doff: 0x%04x\n" + " citer: 0x%04x\n" + " dlast: 0x%016llx\n" + " csr: 0x%04x\n" + " biter: 0x%04x\n", + __entry->saddr, + __entry->soff, + __entry->attr, + __entry->nbytes, + __entry->slast, + __entry->daddr, + __entry->doff, + __entry->citer, + __entry->dlast_sga, + __entry->csr, + __entry->biter) +); + +DEFINE_EVENT(edma_log_tcd, edma_fill_tcd, + TP_PROTO(struct fsl_edma_chan *chan, void *tcd), + TP_ARGS(chan, tcd) +); + +#endif + +/* this part must be outside header guard */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE fsl-edma-trace + +#include <trace/define_trace.h> diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 1398814d8fbb..e3505e56784b 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -598,7 +598,9 @@ static int idma64_probe(struct idma64_chip *chip) idma64->dma.dev = chip->sysdev; - dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); + ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); + if (ret) + return ret; ret = dma_async_device_register(&idma64->dma); if (ret) diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 39935071174a..57f1bf2ab20b 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -577,7 +577,6 @@ void idxd_wq_del_cdev(struct idxd_wq *wq) struct idxd_cdev *idxd_cdev; idxd_cdev = wq->idxd_cdev; - ida_destroy(&file_ida); wq->idxd_cdev = NULL; cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev)); put_device(cdev_dev(idxd_cdev)); @@ -593,6 +592,14 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) if (idxd->state != IDXD_DEV_ENABLED) return -ENXIO; + mutex_lock(&wq->wq_lock); + + if (!idxd_wq_driver_name_match(wq, dev)) { + idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; + rc = -ENODEV; + goto wq_err; + } + /* * User type WQ is enabled only when SVA is enabled for two reasons: * - If no IOMMU or IOMMU Passthrough without SVA, userspace @@ -608,14 +615,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) dev_dbg(&idxd->pdev->dev, "User type WQ cannot be enabled without SVA.\n"); - return -EOPNOTSUPP; - } - - mutex_lock(&wq->wq_lock); - - if (!idxd_wq_driver_name_match(wq, dev)) { - idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; - rc = -ENODEV; + rc = -EOPNOTSUPP; goto wq_err; } diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 9b42f5e96b1e..003e1580b902 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -24,6 +24,7 @@ #include <linux/semaphore.h> #include <linux/spinlock.h> #include <linux/device.h> +#include <linux/genalloc.h> #include <linux/dma-mapping.h> #include <linux/firmware.h> #include <linux/slab.h> @@ -137,7 +138,11 @@ * 0: Source on AIPS * 12 Destination Bit(DP) 1: Destination on SPBA * 0: Destination on AIPS - * 13-15 --------- MUST BE 0 + * 13 Source FIFO 1: Source is dual FIFO + * 0: Source is single FIFO + * 14 Destination FIFO 1: Destination is dual FIFO + * 0: Destination is single FIFO + * 15 --------- MUST BE 0 * 16-23 Higher WML HWML * 24-27 N Total number of samples after * which Pad adding/Swallowing @@ -168,6 +173,8 @@ #define SDMA_WATERMARK_LEVEL_SPDIF BIT(10) #define SDMA_WATERMARK_LEVEL_SP BIT(11) #define SDMA_WATERMARK_LEVEL_DP BIT(12) +#define SDMA_WATERMARK_LEVEL_SD BIT(13) +#define SDMA_WATERMARK_LEVEL_DD BIT(14) #define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16) #define SDMA_WATERMARK_LEVEL_LWE BIT(28) #define SDMA_WATERMARK_LEVEL_HWE BIT(29) @@ -175,6 +182,7 @@ #define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) #define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \ @@ -232,20 +240,23 @@ struct sdma_script_start_addrs { s32 utra_addr; s32 ram_code_start_addr; /* End of v1 array */ - s32 mcu_2_ssish_addr; + union { s32 v1_end; s32 mcu_2_ssish_addr; }; s32 ssish_2_mcu_addr; s32 hdmi_dma_addr; /* End of v2 array */ - s32 zcanfd_2_mcu_addr; + union { s32 v2_end; s32 zcanfd_2_mcu_addr; }; s32 zqspi_2_mcu_addr; s32 mcu_2_ecspi_addr; s32 mcu_2_sai_addr; s32 sai_2_mcu_addr; s32 uart_2_mcu_rom_addr; s32 uartsh_2_mcu_rom_addr; + s32 i2c_2_mcu_addr; + s32 mcu_2_i2c_addr; /* End of v3 array */ - s32 mcu_2_zqspi_addr; + union { s32 v3_end; s32 mcu_2_zqspi_addr; }; /* End of v4 array */ + s32 v4_end[0]; }; /* @@ -531,6 +542,7 @@ struct sdma_engine { /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/ bool clk_ratio; bool fw_loaded; + struct gen_pool *iram_pool; }; static int sdma_config_write(struct dma_chan *chan, @@ -1072,6 +1084,11 @@ static int sdma_get_pc(struct sdma_channel *sdmac, per_2_emi = sdma->script_addrs->sai_2_mcu_addr; emi_2_per = sdma->script_addrs->mcu_2_sai_addr; break; + case IMX_DMATYPE_I2C: + per_2_emi = sdma->script_addrs->i2c_2_mcu_addr; + emi_2_per = sdma->script_addrs->mcu_2_i2c_addr; + sdmac->is_ram_script = true; + break; case IMX_DMATYPE_HDMI: emi_2_per = sdma->script_addrs->hdmi_dma_addr; sdmac->is_ram_script = true; @@ -1255,6 +1272,16 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP; sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT; + + /* + * Limitation: The p2p script support dual fifos in maximum, + * So when fifo number is larger than 1, force enable dual + * fifos. + */ + if (sdmac->n_fifos_src > 1) + sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SD; + if (sdmac->n_fifos_dst > 1) + sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DD; } static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac) @@ -1358,8 +1385,14 @@ static int sdma_request_channel0(struct sdma_engine *sdma) { int ret = -EBUSY; - sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys, - GFP_NOWAIT); + if (sdma->iram_pool) + sdma->bd0 = gen_pool_dma_alloc(sdma->iram_pool, + sizeof(struct sdma_buffer_descriptor), + &sdma->bd0_phys); + else + sdma->bd0 = dma_alloc_coherent(sdma->dev, + sizeof(struct sdma_buffer_descriptor), + &sdma->bd0_phys, GFP_NOWAIT); if (!sdma->bd0) { ret = -ENOMEM; goto out; @@ -1379,10 +1412,14 @@ out: static int sdma_alloc_bd(struct sdma_desc *desc) { u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); + struct sdma_engine *sdma = desc->sdmac->sdma; int ret = 0; - desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size, - &desc->bd_phys, GFP_NOWAIT); + if (sdma->iram_pool) + desc->bd = gen_pool_dma_alloc(sdma->iram_pool, bd_size, &desc->bd_phys); + else + desc->bd = dma_alloc_coherent(sdma->dev, bd_size, &desc->bd_phys, GFP_NOWAIT); + if (!desc->bd) { ret = -ENOMEM; goto out; @@ -1394,9 +1431,12 @@ out: static void sdma_free_bd(struct sdma_desc *desc) { u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); + struct sdma_engine *sdma = desc->sdmac->sdma; - dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, - desc->bd_phys); + if (sdma->iram_pool) + gen_pool_free(sdma->iram_pool, (unsigned long)desc->bd, bd_size); + else + dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, desc->bd_phys); } static void sdma_desc_free(struct virt_dma_desc *vd) @@ -1643,6 +1683,9 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( if (count & 3 || sg->dma_address & 3) goto err_bd_out; break; + case DMA_SLAVE_BUSWIDTH_3_BYTES: + bd->mode.command = 3; + break; case DMA_SLAVE_BUSWIDTH_2_BYTES: bd->mode.command = 2; if (count & 1 || sg->dma_address & 1) @@ -1880,10 +1923,17 @@ static void sdma_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&sdmac->vc.lock, flags); } -#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 -#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 -#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 45 -#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 46 +#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 \ +(offsetof(struct sdma_script_start_addrs, v1_end) / sizeof(s32)) + +#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 \ +(offsetof(struct sdma_script_start_addrs, v2_end) / sizeof(s32)) + +#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 \ +(offsetof(struct sdma_script_start_addrs, v3_end) / sizeof(s32)) + +#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 \ +(offsetof(struct sdma_script_start_addrs, v4_end) / sizeof(s32)) static void sdma_add_scripts(struct sdma_engine *sdma, const struct sdma_script_start_addrs *addr) @@ -2068,6 +2118,7 @@ static int sdma_init(struct sdma_engine *sdma) { int i, ret; dma_addr_t ccb_phys; + int ccbsize; ret = clk_enable(sdma->clk_ipg); if (ret) @@ -2083,10 +2134,14 @@ static int sdma_init(struct sdma_engine *sdma) /* Be sure SDMA has not started yet */ writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); - sdma->channel_control = dma_alloc_coherent(sdma->dev, - MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control) + - sizeof(struct sdma_context_data), - &ccb_phys, GFP_KERNEL); + ccbsize = MAX_DMA_CHANNELS * (sizeof(struct sdma_channel_control) + + sizeof(struct sdma_context_data)); + + if (sdma->iram_pool) + sdma->channel_control = gen_pool_dma_alloc(sdma->iram_pool, ccbsize, &ccb_phys); + else + sdma->channel_control = dma_alloc_coherent(sdma->dev, ccbsize, &ccb_phys, + GFP_KERNEL); if (!sdma->channel_control) { ret = -ENOMEM; @@ -2272,6 +2327,12 @@ static int sdma_probe(struct platform_device *pdev) vchan_init(&sdmac->vc, &sdma->dma_device); } + if (np) { + sdma->iram_pool = of_gen_pool_get(np, "iram", 0); + if (sdma->iram_pool) + dev_info(&pdev->dev, "alloc bd from iram.\n"); + } + ret = sdma_init(sdma); if (ret) goto err_init; diff --git a/drivers/dma/mcf-edma-main.c b/drivers/dma/mcf-edma-main.c index dba631783876..78c606f6d002 100644 --- a/drivers/dma/mcf-edma-main.c +++ b/drivers/dma/mcf-edma-main.c @@ -195,7 +195,7 @@ static int mcf_edma_probe(struct platform_device *pdev) struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i]; mcf_chan->edma = mcf_edma; - mcf_chan->slave_id = i; + mcf_chan->srcid = i; mcf_chan->idle = true; mcf_chan->dma_dir = DMA_NONE; mcf_chan->vchan.desc_free = fsl_edma_free_desc; @@ -277,7 +277,7 @@ bool mcf_edma_filter_fn(struct dma_chan *chan, void *param) if (chan->device->dev->driver == &mcf_edma_driver.driver) { struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan); - return (mcf_chan->slave_id == (uintptr_t)param); + return (mcf_chan->srcid == (uintptr_t)param); } return false; diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index c359decc07a3..6b2793b07694 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -155,11 +155,6 @@ static inline struct device *chan2dev(struct dma_chan *chan) return &chan->dev->device; } -static inline struct device *chan2parent(struct dma_chan *chan) -{ - return chan->dev->device.parent; -} - static inline struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) { diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 202ac95227cb..721b4ac0857a 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -50,7 +50,6 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> -#include <linux/of_dma.h> #include <linux/property.h> #include <linux/delay.h> #include <linux/acpi.h> @@ -947,22 +946,12 @@ static const struct acpi_device_id hidma_acpi_ids[] = { MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids); #endif -static const struct of_device_id hidma_match[] = { - {.compatible = "qcom,hidma-1.0",}, - {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),}, - {.compatible = "qcom,hidma-1.2", - .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),}, - {}, -}; -MODULE_DEVICE_TABLE(of, hidma_match); - static struct platform_driver hidma_driver = { .probe = hidma_probe, .remove_new = hidma_remove, .shutdown = hidma_shutdown, .driver = { .name = "hidma", - .of_match_table = hidma_match, .acpi_match_table = ACPI_PTR(hidma_acpi_ids), }, }; diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index 1d675f31252b..bb883e138ebf 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c @@ -7,12 +7,7 @@ #include <linux/dmaengine.h> #include <linux/acpi.h> -#include <linux/of.h> #include <linux/property.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <linux/of_platform.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/uaccess.h> @@ -327,115 +322,13 @@ static const struct acpi_device_id hidma_mgmt_acpi_ids[] = { MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids); #endif -static const struct of_device_id hidma_mgmt_match[] = { - {.compatible = "qcom,hidma-mgmt-1.0",}, - {}, -}; -MODULE_DEVICE_TABLE(of, hidma_mgmt_match); - static struct platform_driver hidma_mgmt_driver = { .probe = hidma_mgmt_probe, .driver = { .name = "hidma-mgmt", - .of_match_table = hidma_mgmt_match, .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids), }, }; -#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) -static int object_counter; - -static int __init hidma_mgmt_of_populate_channels(struct device_node *np) -{ - struct platform_device *pdev_parent = of_find_device_by_node(np); - struct platform_device_info pdevinfo; - struct device_node *child; - struct resource *res; - int ret = 0; - - /* allocate a resource array */ - res = kcalloc(3, sizeof(*res), GFP_KERNEL); - if (!res) - return -ENOMEM; - - for_each_available_child_of_node(np, child) { - struct platform_device *new_pdev; - - ret = of_address_to_resource(child, 0, &res[0]); - if (!ret) - goto out; - - ret = of_address_to_resource(child, 1, &res[1]); - if (!ret) - goto out; - - ret = of_irq_to_resource(child, 0, &res[2]); - if (ret <= 0) - goto out; - - memset(&pdevinfo, 0, sizeof(pdevinfo)); - pdevinfo.fwnode = &child->fwnode; - pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL; - pdevinfo.name = child->name; - pdevinfo.id = object_counter++; - pdevinfo.res = res; - pdevinfo.num_res = 3; - pdevinfo.data = NULL; - pdevinfo.size_data = 0; - pdevinfo.dma_mask = DMA_BIT_MASK(64); - new_pdev = platform_device_register_full(&pdevinfo); - if (IS_ERR(new_pdev)) { - ret = PTR_ERR(new_pdev); - goto out; - } - new_pdev->dev.of_node = child; - of_dma_configure(&new_pdev->dev, child, true); - /* - * It is assumed that calling of_msi_configure is safe on - * platforms with or without MSI support. - */ - of_msi_configure(&new_pdev->dev, child); - } - - kfree(res); - - return ret; - -out: - of_node_put(child); - kfree(res); - - return ret; -} -#endif - -static int __init hidma_mgmt_init(void) -{ -#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) - struct device_node *child; - - for_each_matching_node(child, hidma_mgmt_match) { - /* device tree based firmware here */ - hidma_mgmt_of_populate_channels(child); - } -#endif - /* - * We do not check for return value here, as it is assumed that - * platform_driver_register must not fail. The reason for this is that - * the (potential) hidma_mgmt_of_populate_channels calls above are not - * cleaned up if it does fail, and to do this work is quite - * complicated. In particular, various calls of of_address_to_resource, - * of_irq_to_resource, platform_device_register_full, of_dma_configure, - * and of_msi_configure which then call other functions and so on, must - * be cleaned up - this is not a trivial exercise. - * - * Currently, this module is not intended to be unloaded, and there is - * no module_exit function defined which does the needed cleanup. For - * this reason, we have to assume success here. - */ - platform_driver_register(&hidma_mgmt_driver); - - return 0; -} -module_init(hidma_mgmt_init); +module_platform_driver(hidma_mgmt_driver); MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index e9f5250fbe4d..59d9eabc8b67 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -81,6 +81,8 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan */ static inline bool vchan_issue_pending(struct virt_dma_chan *vc) { + lockdep_assert_held(&vc->lock); + list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); return !list_empty(&vc->desc_issued); } @@ -96,6 +98,8 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd) struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); dma_cookie_t cookie; + lockdep_assert_held(&vc->lock); + cookie = vd->tx.cookie; dma_cookie_complete(&vd->tx); dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", @@ -146,6 +150,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) { struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + lockdep_assert_held(&vc->lock); + list_add_tail(&vd->node, &vc->desc_terminated); if (vc->cyclic == vd) @@ -160,6 +166,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) */ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) { + lockdep_assert_held(&vc->lock); + return list_first_entry_or_null(&vc->desc_issued, struct virt_dma_desc, node); } @@ -177,6 +185,8 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, struct list_head *head) { + lockdep_assert_held(&vc->lock); + list_splice_tail_init(&vc->desc_allocated, head); list_splice_tail_init(&vc->desc_submitted, head); list_splice_tail_init(&vc->desc_issued, head); diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c index 313b217388fe..e143a7330816 100644 --- a/drivers/dma/xilinx/xdma.c +++ b/drivers/dma/xilinx/xdma.c @@ -1307,6 +1307,7 @@ static const struct platform_device_id xdma_id_table[] = { { "xdma", 0}, { }, }; +MODULE_DEVICE_TABLE(platform, xdma_id_table); static struct platform_driver xdma_driver = { .driver = { diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c index eb0637d90342..36bd4825d389 100644 --- a/drivers/dma/xilinx/xilinx_dpdma.c +++ b/drivers/dma/xilinx/xilinx_dpdma.c @@ -1043,9 +1043,8 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan) static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan) { struct xilinx_dpdma_tx_desc *active; - unsigned long flags; - spin_lock_irqsave(&chan->lock, flags); + spin_lock(&chan->lock); xilinx_dpdma_debugfs_desc_done_irq(chan); @@ -1057,7 +1056,7 @@ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan) "chan%u: DONE IRQ with no active descriptor!\n", chan->id); - spin_unlock_irqrestore(&chan->lock, flags); + spin_unlock(&chan->lock); } /** @@ -1072,10 +1071,9 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan) { struct xilinx_dpdma_tx_desc *pending; struct xilinx_dpdma_sw_desc *sw_desc; - unsigned long flags; u32 desc_id; - spin_lock_irqsave(&chan->lock, flags); + spin_lock(&chan->lock); pending = chan->desc.pending; if (!chan->running || !pending) @@ -1108,7 +1106,7 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan) spin_unlock(&chan->vchan.lock); out: - spin_unlock_irqrestore(&chan->lock, flags); + spin_unlock(&chan->lock); } /** |