diff options
Diffstat (limited to 'drivers/iio/buffer/industrialio-buffer-dmaengine.c')
-rw-r--r-- | drivers/iio/buffer/industrialio-buffer-dmaengine.c | 144 |
1 files changed, 100 insertions, 44 deletions
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c index 614e1c4189a9..e9d9a7d39fe1 100644 --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c @@ -206,39 +206,29 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = { /** * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine - * @dev: DMA channel consumer device - * @channel: DMA channel name, typically "rx". + * @chan: DMA channel. * * This allocates a new IIO buffer which internally uses the DMAengine framework - * to perform its transfers. The parent device will be used to request the DMA - * channel. + * to perform its transfers. * * Once done using the buffer iio_dmaengine_buffer_free() should be used to * release it. */ -static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, - const char *channel) +static struct iio_buffer *iio_dmaengine_buffer_alloc(struct dma_chan *chan) { struct dmaengine_buffer *dmaengine_buffer; unsigned int width, src_width, dest_width; struct dma_slave_caps caps; - struct dma_chan *chan; int ret; + ret = dma_get_slave_caps(chan, &caps); + if (ret < 0) + return ERR_PTR(ret); + dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL); if (!dmaengine_buffer) return ERR_PTR(-ENOMEM); - chan = dma_request_chan(dev, channel); - if (IS_ERR(chan)) { - ret = PTR_ERR(chan); - goto err_free; - } - - ret = dma_get_slave_caps(chan, &caps); - if (ret < 0) - goto err_release; - /* Needs to be aligned to the maximum of the minimums */ if (caps.src_addr_widths) src_width = __ffs(caps.src_addr_widths); @@ -262,12 +252,6 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops; return &dmaengine_buffer->queue.buffer; - -err_release: - dma_release_channel(chan); -err_free: - kfree(dmaengine_buffer); - return ERR_PTR(ret); } /** @@ -276,17 +260,57 @@ err_free: * * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc(). */ -void iio_dmaengine_buffer_free(struct iio_buffer *buffer) +static void iio_dmaengine_buffer_free(struct iio_buffer *buffer) { struct dmaengine_buffer *dmaengine_buffer = iio_buffer_to_dmaengine_buffer(buffer); iio_dma_buffer_exit(&dmaengine_buffer->queue); - dma_release_channel(dmaengine_buffer->chan); - iio_buffer_put(buffer); } -EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, "IIO_DMAENGINE_BUFFER"); + +/** + * iio_dmaengine_buffer_teardown() - Releases DMA channel and frees buffer + * @buffer: Buffer to free + * + * Releases the DMA channel and frees the buffer previously setup with + * iio_dmaengine_buffer_setup_ext(). + */ +void iio_dmaengine_buffer_teardown(struct iio_buffer *buffer) +{ + struct dmaengine_buffer *dmaengine_buffer = + iio_buffer_to_dmaengine_buffer(buffer); + struct dma_chan *chan = dmaengine_buffer->chan; + + iio_dmaengine_buffer_free(buffer); + dma_release_channel(chan); +} +EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_teardown, "IIO_DMAENGINE_BUFFER"); + +static struct iio_buffer +*__iio_dmaengine_buffer_setup_ext(struct iio_dev *indio_dev, + struct dma_chan *chan, + enum iio_buffer_direction dir) +{ + struct iio_buffer *buffer; + int ret; + + buffer = iio_dmaengine_buffer_alloc(chan); + if (IS_ERR(buffer)) + return ERR_CAST(buffer); + + indio_dev->modes |= INDIO_BUFFER_HARDWARE; + + buffer->direction = dir; + + ret = iio_device_attach_buffer(indio_dev, buffer); + if (ret) { + iio_dmaengine_buffer_free(buffer); + return ERR_PTR(ret); + } + + return buffer; +} /** * iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device @@ -300,7 +324,7 @@ EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, "IIO_DMAENGINE_BUFFER"); * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the * IIO device. * - * Once done using the buffer iio_dmaengine_buffer_free() should be used to + * Once done using the buffer iio_dmaengine_buffer_teardown() should be used to * release it. */ struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev, @@ -308,30 +332,24 @@ struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev, const char *channel, enum iio_buffer_direction dir) { + struct dma_chan *chan; struct iio_buffer *buffer; - int ret; - - buffer = iio_dmaengine_buffer_alloc(dev, channel); - if (IS_ERR(buffer)) - return ERR_CAST(buffer); - - indio_dev->modes |= INDIO_BUFFER_HARDWARE; - buffer->direction = dir; + chan = dma_request_chan(dev, channel); + if (IS_ERR(chan)) + return ERR_CAST(chan); - ret = iio_device_attach_buffer(indio_dev, buffer); - if (ret) { - iio_dmaengine_buffer_free(buffer); - return ERR_PTR(ret); - } + buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir); + if (IS_ERR(buffer)) + dma_release_channel(chan); return buffer; } EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER"); -static void __devm_iio_dmaengine_buffer_free(void *buffer) +static void devm_iio_dmaengine_buffer_teardown(void *buffer) { - iio_dmaengine_buffer_free(buffer); + iio_dmaengine_buffer_teardown(buffer); } /** @@ -357,11 +375,49 @@ int devm_iio_dmaengine_buffer_setup_ext(struct device *dev, if (IS_ERR(buffer)) return PTR_ERR(buffer); - return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free, + return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_teardown, buffer); } EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER"); +static void devm_iio_dmaengine_buffer_free(void *buffer) +{ + iio_dmaengine_buffer_free(buffer); +} + +/** + * devm_iio_dmaengine_buffer_setup_with_handle() - Setup a DMA buffer for an + * IIO device + * @dev: Device for devm ownership + * @indio_dev: IIO device to which to attach this buffer. + * @chan: DMA channel + * @dir: Direction of buffer (in or out) + * + * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc() + * and attaches it to an IIO device with iio_device_attach_buffer(). + * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the + * IIO device. + * + * This is the same as devm_iio_dmaengine_buffer_setup_ext() except that the + * caller manages requesting and releasing the DMA channel handle. + */ +int devm_iio_dmaengine_buffer_setup_with_handle(struct device *dev, + struct iio_dev *indio_dev, + struct dma_chan *chan, + enum iio_buffer_direction dir) +{ + struct iio_buffer *buffer; + + buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir); + if (IS_ERR(buffer)) + return PTR_ERR(buffer); + + return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_free, + buffer); +} +EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_with_handle, + "IIO_DMAENGINE_BUFFER"); + MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("DMA buffer for the IIO framework"); MODULE_LICENSE("GPL"); |