summaryrefslogtreecommitdiff
path: root/drivers/ntb/ntb_transport.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ntb/ntb_transport.c')
-rw-r--r--drivers/ntb/ntb_transport.c33
1 files changed, 24 insertions, 9 deletions
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 77e55debeed6..a22ea4a4b202 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -314,7 +314,7 @@ static void ntb_transport_bus_remove(struct device *dev)
put_device(dev);
}
-static struct bus_type ntb_transport_bus = {
+static const struct bus_type ntb_transport_bus = {
.name = "ntb_transport",
.match = ntb_transport_bus_match,
.probe = ntb_transport_bus_probe,
@@ -377,6 +377,8 @@ EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
* @device_name: Name of NTB client device
*
* Register an NTB client device with the NTB transport layer
+ *
+ * Returns: %0 on success or -errno code on error
*/
int ntb_transport_register_client_dev(char *device_name)
{
@@ -807,16 +809,29 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
}
static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw,
- struct device *dma_dev, size_t align)
+ struct device *ntb_dev, size_t align)
{
dma_addr_t dma_addr;
void *alloc_addr, *virt_addr;
int rc;
- alloc_addr = dma_alloc_coherent(dma_dev, mw->alloc_size,
- &dma_addr, GFP_KERNEL);
+ /*
+ * The buffer here is allocated against the NTB device. The reason to
+ * use dma_alloc_*() call is to allocate a large IOVA contiguous buffer
+ * backing the NTB BAR for the remote host to write to. During receive
+ * processing, the data is being copied out of the receive buffer to
+ * the kernel skbuff. When a DMA device is being used, dma_map_page()
+ * is called on the kvaddr of the receive buffer (from dma_alloc_*())
+ * and remapped against the DMA device. It appears to be a double
+ * DMA mapping of buffers, but first is mapped to the NTB device and
+ * second is to the DMA device. DMA_ATTR_FORCE_CONTIGUOUS is necessary
+ * in order for the later dma_map_page() to not fail.
+ */
+ alloc_addr = dma_alloc_attrs(ntb_dev, mw->alloc_size,
+ &dma_addr, GFP_KERNEL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
if (!alloc_addr) {
- dev_err(dma_dev, "Unable to alloc MW buff of size %zu\n",
+ dev_err(ntb_dev, "Unable to alloc MW buff of size %zu\n",
mw->alloc_size);
return -ENOMEM;
}
@@ -845,7 +860,7 @@ static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw,
return 0;
err:
- dma_free_coherent(dma_dev, mw->alloc_size, alloc_addr, dma_addr);
+ dma_free_coherent(ntb_dev, mw->alloc_size, alloc_addr, dma_addr);
return rc;
}
@@ -1966,9 +1981,9 @@ static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
/**
* ntb_transport_create_queue - Create a new NTB transport layer queue
- * @rx_handler: receive callback function
- * @tx_handler: transmit callback function
- * @event_handler: event callback function
+ * @data: pointer for callback data
+ * @client_dev: &struct device pointer
+ * @handlers: pointer to various ntb queue (callback) handlers
*
* Create a new NTB transport layer queue and provide the queue with a callback
* routine for both transmit and receive. The receive callback routine will be