diff options
author | Christoph Hellwig <hch@lst.de> | 2018-10-09 16:57:18 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-10-10 22:38:50 -0700 |
commit | 1335d6fd65da11e9576c0a5dc6b8d65aadbfc9b6 (patch) | |
tree | 4d77841efdc1ae3a9bcee0a39eb42418b4b9c275 /drivers/atm/fore200e.c | |
parent | f3fadcb5644aa94c8197f1699798d95baa5d59a8 (diff) | |
download | lwn-1335d6fd65da11e9576c0a5dc6b8d65aadbfc9b6.tar.gz lwn-1335d6fd65da11e9576c0a5dc6b8d65aadbfc9b6.zip |
fore200e: devirtualize dma alloc calls
There is no need for an indirection before calling the dma alloc
routines now that we store a struct device in struct fore200e.
Also remove the pointless GFP_ATOMIC for the sbus case, and fix the
up the error handling by removing the 0 dma_addr test - some iommus
can return 0 as a perfectly valid bus address.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/atm/fore200e.c')
-rw-r--r-- | drivers/atm/fore200e.c | 128 |
1 files changed, 45 insertions, 83 deletions
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index beeb71088560..86be269500a9 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -209,6 +209,34 @@ fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk) kfree(chunk->alloc_addr); } +/* + * Allocate a DMA consistent chunk of memory intended to act as a communication + * mechanism (to hold descriptors, status, queues, etc.) shared by the driver + * and the adapter. + */ +static int +fore200e_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk, + int size, int nbr, int alignment) +{ + /* returned chunks are page-aligned */ + chunk->alloc_size = size * nbr; + chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size, + &chunk->dma_addr, GFP_KERNEL); + if (!chunk->alloc_addr) + return -ENOMEM; + chunk->align_addr = chunk->alloc_addr; + return 0; +} + +/* + * Free a DMA consistent chunk of memory. + */ +static void +fore200e_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) +{ + dma_free_coherent(fore200e->dev, chunk->alloc_size, chunk->alloc_addr, + chunk->dma_addr); +} static void fore200e_spin(int msecs) @@ -301,10 +329,10 @@ fore200e_uninit_bs_queue(struct fore200e* fore200e) struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block; if (status->alloc_addr) - fore200e->bus->dma_chunk_free(fore200e, status); + fore200e_dma_chunk_free(fore200e, status); if (rbd_block->alloc_addr) - fore200e->bus->dma_chunk_free(fore200e, rbd_block); + fore200e_dma_chunk_free(fore200e, rbd_block); } } } @@ -370,17 +398,17 @@ fore200e_shutdown(struct fore200e* fore200e) /* fall through */ case FORE200E_STATE_INIT_RXQ: - fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status); - fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd); + fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status); + fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd); /* fall through */ case FORE200E_STATE_INIT_TXQ: - fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status); - fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd); + fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status); + fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd); /* fall through */ case FORE200E_STATE_INIT_CMDQ: - fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status); + fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status); /* fall through */ case FORE200E_STATE_INITIALIZE: @@ -427,41 +455,6 @@ static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr) writel(cpu_to_le32(val), addr); } -/* allocate a DMA consistent chunk of memory intended to act as a communication mechanism - (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */ - -static int -fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, - int size, int nbr, int alignment) -{ - /* returned chunks are page-aligned */ - chunk->alloc_size = size * nbr; - chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, - chunk->alloc_size, - &chunk->dma_addr, - GFP_KERNEL); - - if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) - return -ENOMEM; - - chunk->align_addr = chunk->alloc_addr; - - return 0; -} - - -/* free a DMA consistent chunk of memory */ - -static void -fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) -{ - dma_free_coherent(fore200e->dev, - chunk->alloc_size, - chunk->alloc_addr, - chunk->dma_addr); -} - - static int fore200e_pca_irq_check(struct fore200e* fore200e) { @@ -631,8 +624,6 @@ static const struct fore200e_bus fore200e_pci_ops = { .status_alignment = 32, .read = fore200e_pca_read, .write = fore200e_pca_write, - .dma_chunk_alloc = fore200e_pca_dma_chunk_alloc, - .dma_chunk_free = fore200e_pca_dma_chunk_free, .configure = fore200e_pca_configure, .map = fore200e_pca_map, .reset = fore200e_pca_reset, @@ -656,33 +647,6 @@ static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr) sbus_writel(val, addr); } -/* Allocate a DVMA consistent chunk of memory intended to act as a communication mechanism - * (to hold descriptors, status, queues, etc.) shared by the driver and the adapter. - */ -static int fore200e_sba_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk, - int size, int nbr, int alignment) -{ - chunk->alloc_size = size * nbr; - - /* returned chunks are page-aligned */ - chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size, - &chunk->dma_addr, GFP_ATOMIC); - - if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) - return -ENOMEM; - - chunk->align_addr = chunk->alloc_addr; - - return 0; -} - -/* free a DVMA consistent chunk of memory */ -static void fore200e_sba_dma_chunk_free(struct fore200e *fore200e, struct chunk *chunk) -{ - dma_free_coherent(fore200e->dev, chunk->alloc_size, - chunk->alloc_addr, chunk->dma_addr); -} - static void fore200e_sba_irq_enable(struct fore200e *fore200e) { u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; @@ -796,8 +760,6 @@ static const struct fore200e_bus fore200e_sbus_ops = { .status_alignment = 32, .read = fore200e_sba_read, .write = fore200e_sba_write, - .dma_chunk_alloc = fore200e_sba_dma_chunk_alloc, - .dma_chunk_free = fore200e_sba_dma_chunk_free, .configure = fore200e_sba_configure, .map = fore200e_sba_map, .reset = fore200e_sba_reset, @@ -2111,7 +2073,7 @@ static int fore200e_init_bs_queue(struct fore200e *fore200e) bsq = &fore200e->host_bsq[ scheme ][ magn ]; /* allocate and align the array of status words */ - if (fore200e->bus->dma_chunk_alloc(fore200e, + if (fore200e_dma_chunk_alloc(fore200e, &bsq->status, sizeof(enum status), QUEUE_SIZE_BS, @@ -2120,13 +2082,13 @@ static int fore200e_init_bs_queue(struct fore200e *fore200e) } /* allocate and align the array of receive buffer descriptors */ - if (fore200e->bus->dma_chunk_alloc(fore200e, + if (fore200e_dma_chunk_alloc(fore200e, &bsq->rbd_block, sizeof(struct rbd_block), QUEUE_SIZE_BS, fore200e->bus->descr_alignment) < 0) { - fore200e->bus->dma_chunk_free(fore200e, &bsq->status); + fore200e_dma_chunk_free(fore200e, &bsq->status); return -ENOMEM; } @@ -2167,7 +2129,7 @@ static int fore200e_init_rx_queue(struct fore200e *fore200e) DPRINTK(2, "receive queue is being initialized\n"); /* allocate and align the array of status words */ - if (fore200e->bus->dma_chunk_alloc(fore200e, + if (fore200e_dma_chunk_alloc(fore200e, &rxq->status, sizeof(enum status), QUEUE_SIZE_RX, @@ -2176,13 +2138,13 @@ static int fore200e_init_rx_queue(struct fore200e *fore200e) } /* allocate and align the array of receive PDU descriptors */ - if (fore200e->bus->dma_chunk_alloc(fore200e, + if (fore200e_dma_chunk_alloc(fore200e, &rxq->rpd, sizeof(struct rpd), QUEUE_SIZE_RX, fore200e->bus->descr_alignment) < 0) { - fore200e->bus->dma_chunk_free(fore200e, &rxq->status); + fore200e_dma_chunk_free(fore200e, &rxq->status); return -ENOMEM; } @@ -2226,7 +2188,7 @@ static int fore200e_init_tx_queue(struct fore200e *fore200e) DPRINTK(2, "transmit queue is being initialized\n"); /* allocate and align the array of status words */ - if (fore200e->bus->dma_chunk_alloc(fore200e, + if (fore200e_dma_chunk_alloc(fore200e, &txq->status, sizeof(enum status), QUEUE_SIZE_TX, @@ -2235,13 +2197,13 @@ static int fore200e_init_tx_queue(struct fore200e *fore200e) } /* allocate and align the array of transmit PDU descriptors */ - if (fore200e->bus->dma_chunk_alloc(fore200e, + if (fore200e_dma_chunk_alloc(fore200e, &txq->tpd, sizeof(struct tpd), QUEUE_SIZE_TX, fore200e->bus->descr_alignment) < 0) { - fore200e->bus->dma_chunk_free(fore200e, &txq->status); + fore200e_dma_chunk_free(fore200e, &txq->status); return -ENOMEM; } @@ -2288,7 +2250,7 @@ static int fore200e_init_cmd_queue(struct fore200e *fore200e) DPRINTK(2, "command queue is being initialized\n"); /* allocate and align the array of status words */ - if (fore200e->bus->dma_chunk_alloc(fore200e, + if (fore200e_dma_chunk_alloc(fore200e, &cmdq->status, sizeof(enum status), QUEUE_SIZE_CMD, |