summaryrefslogtreecommitdiff
path: root/drivers/uio/uio_dmem_genirq.c
diff options
context:
space:
mode:
authorDamian Hobson-Garcia <dhobsong@igel.co.jp>2012-09-25 15:09:11 +0900
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-10-24 15:39:08 -0700
commit0a0c3b5a24bd802b1ebbf99e0b01296647b8199b (patch)
tree62dc10826157e8f3c1ed311ee175c2ab96b8a3b2 /drivers/uio/uio_dmem_genirq.c
parent664df38b3c74656261d4227b4dd380cfa453f78f (diff)
downloadlwn-0a0c3b5a24bd802b1ebbf99e0b01296647b8199b.tar.gz
lwn-0a0c3b5a24bd802b1ebbf99e0b01296647b8199b.zip
Add new uio device for dynamic memory allocation
This device extends the uio_pdrv_genirq driver to provide limited dynamic memory allocation for UIO devices. This allows UIO devices to use CMA and IOMMU allocated memory regions. This driver is based on the uio_pdrv_genirq driver and provides the same generic interrupt handling capabilities. Like uio_prdv_genirq, a fixed number of memory regions, defined in the platform device's .resources field are exported to userpace. This driver adds the ability to export additional regions whose number and size are known at boot time, but whose memory is not allocated until the uio device file is opened for the first time. When the device file is closed, the allocated memory block is freed. Physical (DMA) addresses for the dynamic regions are provided to the userspace via /sys/class/uio/uioX/maps/mapY/addr in the same way as static addresses are when the uio device file is open, when no processes are holding the device file open, the address returned to userspace is DMA_ERROR_CODE. Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp> Signed-off-by: "Hans J. Koch" <hjk@hansjkoch.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/uio/uio_dmem_genirq.c')
-rw-r--r--drivers/uio/uio_dmem_genirq.c354
1 files changed, 354 insertions, 0 deletions
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
new file mode 100644
index 000000000000..4d4dd008c8b9
--- /dev/null
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -0,0 +1,354 @@
+/*
+ * drivers/uio/uio_dmem_genirq.c
+ *
+ * Userspace I/O platform driver with generic IRQ handling code.
+ *
+ * Copyright (C) 2012 Damian Hobson-Garcia
+ *
+ * Based on uio_pdrv_genirq.c by Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/uio_driver.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_data/uio_dmem_genirq.h>
+#include <linux/stringify.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
+#define DRIVER_NAME "uio_dmem_genirq"
+
+struct uio_dmem_genirq_platdata {
+ struct uio_info *uioinfo;
+ spinlock_t lock;
+ unsigned long flags;
+ struct platform_device *pdev;
+ unsigned int dmem_region_start;
+ unsigned int num_dmem_regions;
+ struct mutex alloc_lock;
+ unsigned int refcnt;
+};
+
+static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
+{
+ struct uio_dmem_genirq_platdata *priv = info->priv;
+ struct uio_mem *uiomem;
+ int ret = 0;
+
+ uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
+
+ mutex_lock(&priv->alloc_lock);
+ while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
+ void *addr;
+ if (!uiomem->size)
+ break;
+
+ addr = dma_alloc_coherent(&priv->pdev->dev, uiomem->size,
+ (dma_addr_t *)&uiomem->addr, GFP_KERNEL);
+ if (!addr) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ uiomem->internal_addr = addr;
+ ++uiomem;
+ }
+ priv->refcnt++;
+
+ mutex_unlock(&priv->alloc_lock);
+ /* Wait until the Runtime PM code has woken up the device */
+ pm_runtime_get_sync(&priv->pdev->dev);
+ return ret;
+}
+
+static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode)
+{
+ struct uio_dmem_genirq_platdata *priv = info->priv;
+ struct uio_mem *uiomem;
+
+ /* Tell the Runtime PM code that the device has become idle */
+ pm_runtime_put_sync(&priv->pdev->dev);
+
+ uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
+
+ mutex_lock(&priv->alloc_lock);
+
+ priv->refcnt--;
+ while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
+ if (!uiomem->size)
+ break;
+
+ dma_free_coherent(&priv->pdev->dev, uiomem->size,
+ uiomem->internal_addr, uiomem->addr);
+ uiomem->addr = DMA_ERROR_CODE;
+ ++uiomem;
+ }
+
+ mutex_unlock(&priv->alloc_lock);
+ return 0;
+}
+
+static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info)
+{
+ struct uio_dmem_genirq_platdata *priv = dev_info->priv;
+
+ /* Just disable the interrupt in the interrupt controller, and
+ * remember the state so we can allow user space to enable it later.
+ */
+
+ if (!test_and_set_bit(0, &priv->flags))
+ disable_irq_nosync(irq);
+
+ return IRQ_HANDLED;
+}
+
+static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
+{
+ struct uio_dmem_genirq_platdata *priv = dev_info->priv;
+ unsigned long flags;
+
+ /* Allow user space to enable and disable the interrupt
+ * in the interrupt controller, but keep track of the
+ * state to prevent per-irq depth damage.
+ *
+ * Serialize this operation to support multiple tasks.
+ */
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (irq_on) {
+ if (test_and_clear_bit(0, &priv->flags))
+ enable_irq(dev_info->irq);
+ } else {
+ if (!test_and_set_bit(0, &priv->flags))
+ disable_irq(dev_info->irq);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int uio_dmem_genirq_probe(struct platform_device *pdev)
+{
+ struct uio_dmem_genirq_pdata *pdata = pdev->dev.platform_data;
+ struct uio_info *uioinfo = &pdata->uioinfo;
+ struct uio_dmem_genirq_platdata *priv;
+ struct uio_mem *uiomem;
+ int ret = -EINVAL;
+ int i;
+
+ if (!uioinfo) {
+ int irq;
+
+ /* alloc uioinfo for one device */
+ uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL);
+ if (!uioinfo) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "unable to kmalloc\n");
+ goto bad2;
+ }
+ uioinfo->name = pdev->dev.of_node->name;
+ uioinfo->version = "devicetree";
+
+ /* Multiple IRQs are not supported */
+ irq = platform_get_irq(pdev, 0);
+ if (irq == -ENXIO)
+ uioinfo->irq = UIO_IRQ_NONE;
+ else
+ uioinfo->irq = irq;
+ }
+
+ if (!uioinfo || !uioinfo->name || !uioinfo->version) {
+ dev_err(&pdev->dev, "missing platform_data\n");
+ goto bad0;
+ }
+
+ if (uioinfo->handler || uioinfo->irqcontrol ||
+ uioinfo->irq_flags & IRQF_SHARED) {
+ dev_err(&pdev->dev, "interrupt configuration error\n");
+ goto bad0;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "unable to kmalloc\n");
+ goto bad0;
+ }
+
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+
+ priv->uioinfo = uioinfo;
+ spin_lock_init(&priv->lock);
+ priv->flags = 0; /* interrupt is enabled to begin with */
+ priv->pdev = pdev;
+ mutex_init(&priv->alloc_lock);
+
+ if (!uioinfo->irq) {
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ\n");
+ goto bad0;
+ }
+ uioinfo->irq = ret;
+ }
+ uiomem = &uioinfo->mem[0];
+
+ for (i = 0; i < pdev->num_resources; ++i) {
+ struct resource *r = &pdev->resource[i];
+
+ if (r->flags != IORESOURCE_MEM)
+ continue;
+
+ if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
+ dev_warn(&pdev->dev, "device has more than "
+ __stringify(MAX_UIO_MAPS)
+ " I/O memory resources.\n");
+ break;
+ }
+
+ uiomem->memtype = UIO_MEM_PHYS;
+ uiomem->addr = r->start;
+ uiomem->size = resource_size(r);
+ ++uiomem;
+ }
+
+ priv->dmem_region_start = i;
+ priv->num_dmem_regions = pdata->num_dynamic_regions;
+
+ for (i = 0; i < pdata->num_dynamic_regions; ++i) {
+ if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
+ dev_warn(&pdev->dev, "device has more than "
+ __stringify(MAX_UIO_MAPS)
+ " dynamic and fixed memory regions.\n");
+ break;
+ }
+ uiomem->memtype = UIO_MEM_PHYS;
+ uiomem->addr = DMA_ERROR_CODE;
+ uiomem->size = pdata->dynamic_region_sizes[i];
+ ++uiomem;
+ }
+
+ while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) {
+ uiomem->size = 0;
+ ++uiomem;
+ }
+
+ /* This driver requires no hardware specific kernel code to handle
+ * interrupts. Instead, the interrupt handler simply disables the
+ * interrupt in the interrupt controller. User space is responsible
+ * for performing hardware specific acknowledge and re-enabling of
+ * the interrupt in the interrupt controller.
+ *
+ * Interrupt sharing is not supported.
+ */
+
+ uioinfo->handler = uio_dmem_genirq_handler;
+ uioinfo->irqcontrol = uio_dmem_genirq_irqcontrol;
+ uioinfo->open = uio_dmem_genirq_open;
+ uioinfo->release = uio_dmem_genirq_release;
+ uioinfo->priv = priv;
+
+ /* Enable Runtime PM for this device:
+ * The device starts in suspended state to allow the hardware to be
+ * turned off by default. The Runtime PM bus code should power on the
+ * hardware and enable clocks at open().
+ */
+ pm_runtime_enable(&pdev->dev);
+
+ ret = uio_register_device(&pdev->dev, priv->uioinfo);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register uio device\n");
+ goto bad1;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ return 0;
+ bad1:
+ kfree(priv);
+ pm_runtime_disable(&pdev->dev);
+ bad0:
+ /* kfree uioinfo for OF */
+ if (pdev->dev.of_node)
+ kfree(uioinfo);
+ bad2:
+ return ret;
+}
+
+static int uio_dmem_genirq_remove(struct platform_device *pdev)
+{
+ struct uio_dmem_genirq_platdata *priv = platform_get_drvdata(pdev);
+
+ uio_unregister_device(priv->uioinfo);
+ pm_runtime_disable(&pdev->dev);
+
+ priv->uioinfo->handler = NULL;
+ priv->uioinfo->irqcontrol = NULL;
+
+ /* kfree uioinfo for OF */
+ if (pdev->dev.of_node)
+ kfree(priv->uioinfo);
+
+ kfree(priv);
+ return 0;
+}
+
+static int uio_dmem_genirq_runtime_nop(struct device *dev)
+{
+ /* Runtime PM callback shared between ->runtime_suspend()
+ * and ->runtime_resume(). Simply returns success.
+ *
+ * In this driver pm_runtime_get_sync() and pm_runtime_put_sync()
+ * are used at open() and release() time. This allows the
+ * Runtime PM code to turn off power to the device while the
+ * device is unused, ie before open() and after release().
+ *
+ * This Runtime PM callback does not need to save or restore
+ * any registers since user space is responsbile for hardware
+ * register reinitialization after open().
+ */
+ return 0;
+}
+
+static const struct dev_pm_ops uio_dmem_genirq_dev_pm_ops = {
+ .runtime_suspend = uio_dmem_genirq_runtime_nop,
+ .runtime_resume = uio_dmem_genirq_runtime_nop,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id uio_of_genirq_match[] = {
+ { /* empty for now */ },
+};
+MODULE_DEVICE_TABLE(of, uio_of_genirq_match);
+#else
+# define uio_of_genirq_match NULL
+#endif
+
+static struct platform_driver uio_dmem_genirq = {
+ .probe = uio_dmem_genirq_probe,
+ .remove = uio_dmem_genirq_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &uio_dmem_genirq_dev_pm_ops,
+ .of_match_table = uio_of_genirq_match,
+ },
+};
+
+module_platform_driver(uio_dmem_genirq);
+
+MODULE_AUTHOR("Damian Hobson-Garcia");
+MODULE_DESCRIPTION("Userspace I/O platform driver with dynamic memory.");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);