summaryrefslogtreecommitdiff
path: root/drivers/net/bnx2x/bnx2x_cmn.c
diff options
context:
space:
mode:
authorDmitry Kravkov <dmitry@broadcom.com>2011-05-04 23:50:33 +0000
committerDavid S. Miller <davem@davemloft.net>2011-05-05 10:44:34 -0700
commitb3b83c3f3c640b239f1f1dfc49c0ecafbc074fdb (patch)
tree46efa0dc1b979290770004f9ea6c89089685490b /drivers/net/bnx2x/bnx2x_cmn.c
parent426b92415074d6d268c7aac4561efecd3785d0b1 (diff)
downloadlwn-b3b83c3f3c640b239f1f1dfc49c0ecafbc074fdb.tar.gz
lwn-b3b83c3f3c640b239f1f1dfc49c0ecafbc074fdb.zip
bnx2x: improve memory handling, low memory recovery flows
Signed-off-by: Dmitry Kravkov <dmitry@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x/bnx2x_cmn.c')
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c402
1 files changed, 324 insertions, 78 deletions
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 8853ae2a042e..218a7ad7cdd4 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -27,6 +27,49 @@
static int bnx2x_setup_irqs(struct bnx2x *bp);
+/**
+ * bnx2x_bz_fp - zero content of the fastpath structure.
+ *
+ * @bp: driver handle
+ * @index: fastpath index to be zeroed
+ *
+ * Makes sure the contents of the bp->fp[index].napi is kept
+ * intact.
+ */
+static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
+{
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+ struct napi_struct orig_napi = fp->napi;
+ /* bzero bnx2x_fastpath contents */
+ memset(fp, 0, sizeof(*fp));
+
+ /* Restore the NAPI object as it has been already initialized */
+ fp->napi = orig_napi;
+}
+
+/**
+ * bnx2x_move_fp - move content of the fastpath structure.
+ *
+ * @bp: driver handle
+ * @from: source FP index
+ * @to: destination FP index
+ *
+ * Makes sure the contents of the bp->fp[to].napi is kept
+ * intact.
+ */
+static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
+{
+ struct bnx2x_fastpath *from_fp = &bp->fp[from];
+ struct bnx2x_fastpath *to_fp = &bp->fp[to];
+ struct napi_struct orig_napi = to_fp->napi;
+ /* Move bnx2x_fastpath contents */
+ memcpy(to_fp, from_fp, sizeof(*to_fp));
+ to_fp->index = to;
+
+ /* Restore the NAPI object as it has been already initialized */
+ to_fp->napi = orig_napi;
+}
+
/* free skb in the packet ring at pos idx
* return idx of last bd freed
*/
@@ -881,55 +924,6 @@ void __bnx2x_link_report(struct bnx2x *bp)
}
}
-/* Returns the number of actually allocated BDs */
-static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
- int rx_ring_size)
-{
- struct bnx2x *bp = fp->bp;
- u16 ring_prod, cqe_ring_prod;
- int i;
-
- fp->rx_comp_cons = 0;
- cqe_ring_prod = ring_prod = 0;
- for (i = 0; i < rx_ring_size; i++) {
- if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
- BNX2X_ERR("was only able to allocate "
- "%d rx skbs on queue[%d]\n", i, fp->index);
- fp->eth_q_stats.rx_skb_alloc_failed++;
- break;
- }
- ring_prod = NEXT_RX_IDX(ring_prod);
- cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
- WARN_ON(ring_prod <= i);
- }
-
- fp->rx_bd_prod = ring_prod;
- /* Limit the CQE producer by the CQE ring size */
- fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
- cqe_ring_prod);
- fp->rx_pkt = fp->rx_calls = 0;
-
- return i;
-}
-
-static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
-{
- struct bnx2x *bp = fp->bp;
- int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
- MAX_RX_AVAIL/bp->num_queues;
-
- rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
-
- bnx2x_alloc_rx_bds(fp, rx_ring_size);
-
- /* Warning!
- * this will generate an interrupt (to the TSTORM)
- * must only be done after chip is initialized
- */
- bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
- fp->rx_sge_prod);
-}
-
void bnx2x_init_rx_rings(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
@@ -938,6 +932,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
u16 ring_prod;
int i, j;
+ /* Allocate TPA resources */
for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
@@ -945,6 +940,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
"mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
if (!fp->disable_tpa) {
+ /* Fill the per-aggregation pool */
for (i = 0; i < max_agg_queues; i++) {
fp->tpa_pool[i].skb =
netdev_alloc_skb(bp->dev, fp->rx_buf_size);
@@ -999,13 +995,13 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
fp->rx_bd_cons = 0;
- bnx2x_set_next_page_rx_bd(fp);
-
- /* CQ ring */
- bnx2x_set_next_page_rx_cq(fp);
-
- /* Allocate BDs and initialize BD ring */
- bnx2x_alloc_rx_bd_ring(fp);
+ /* Activate BD ring */
+ /* Warning!
+ * this will generate an interrupt (to the TSTORM)
+ * must only be done after chip is initialized
+ */
+ bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
+ fp->rx_sge_prod);
if (j != 0)
continue;
@@ -1039,27 +1035,40 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
}
}
+static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x *bp = fp->bp;
+ int i;
+
+ /* ring wasn't allocated */
+ if (fp->rx_buf_ring == NULL)
+ return;
+
+ for (i = 0; i < NUM_RX_BD; i++) {
+ struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
+ struct sk_buff *skb = rx_buf->skb;
+
+ if (skb == NULL)
+ continue;
+
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ fp->rx_buf_size, DMA_FROM_DEVICE);
+
+ rx_buf->skb = NULL;
+ dev_kfree_skb(skb);
+ }
+}
+
static void bnx2x_free_rx_skbs(struct bnx2x *bp)
{
- int i, j;
+ int j;
for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
- for (i = 0; i < NUM_RX_BD; i++) {
- struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
- struct sk_buff *skb = rx_buf->skb;
+ bnx2x_free_rx_bds(fp);
- if (skb == NULL)
- continue;
-
- dma_unmap_single(&bp->pdev->dev,
- dma_unmap_addr(rx_buf, mapping),
- fp->rx_buf_size, DMA_FROM_DEVICE);
-
- rx_buf->skb = NULL;
- dev_kfree_skb(skb);
- }
if (!fp->disable_tpa)
bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
ETH_MAX_AGGREGATION_QUEUES_E1 :
@@ -1435,26 +1444,37 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* must be called before memory allocation and HW init */
bnx2x_ilt_set_info(bp);
+ /* zero fastpath structures preserving invariants like napi which are
+ * allocated only once
+ */
+ for_each_queue(bp, i)
+ bnx2x_bz_fp(bp, i);
+
/* Set the receive queues buffer size */
bnx2x_set_rx_buf_size(bp);
+ for_each_queue(bp, i)
+ bnx2x_fp(bp, i, disable_tpa) =
+ ((bp->flags & TPA_ENABLE_FLAG) == 0);
+
+#ifdef BCM_CNIC
+ /* We don't want TPA on FCoE L2 ring */
+ bnx2x_fcoe(bp, disable_tpa) = 1;
+#endif
+
if (bnx2x_alloc_mem(bp))
return -ENOMEM;
+ /* As long as bnx2x_alloc_mem() may possibly update
+ * bp->num_queues, bnx2x_set_real_num_queues() should always
+ * come after it.
+ */
rc = bnx2x_set_real_num_queues(bp);
if (rc) {
BNX2X_ERR("Unable to set real_num_queues\n");
goto load_error0;
}
- for_each_queue(bp, i)
- bnx2x_fp(bp, i, disable_tpa) =
- ((bp->flags & TPA_ENABLE_FLAG) == 0);
-
-#ifdef BCM_CNIC
- /* We don't want TPA on FCoE L2 ring */
- bnx2x_fcoe(bp, disable_tpa) = 1;
-#endif
bnx2x_napi_enable(bp);
/* Send LOAD_REQUEST command to MCP
@@ -2480,6 +2500,232 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
return 0;
}
+static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
+{
+ union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
+ struct bnx2x_fastpath *fp = &bp->fp[fp_index];
+
+ /* Common */
+#ifdef BCM_CNIC
+ if (IS_FCOE_IDX(fp_index)) {
+ memset(sb, 0, sizeof(union host_hc_status_block));
+ fp->status_blk_mapping = 0;
+
+ } else {
+#endif
+ /* status blocks */
+ if (CHIP_IS_E2(bp))
+ BNX2X_PCI_FREE(sb->e2_sb,
+ bnx2x_fp(bp, fp_index,
+ status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_FREE(sb->e1x_sb,
+ bnx2x_fp(bp, fp_index,
+ status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+#ifdef BCM_CNIC
+ }
+#endif
+ /* Rx */
+ if (!skip_rx_queue(bp, fp_index)) {
+ bnx2x_free_rx_bds(fp);
+
+ /* fastpath rx rings: rx_buf rx_desc rx_comp */
+ BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
+ bnx2x_fp(bp, fp_index, rx_desc_mapping),
+ sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
+ bnx2x_fp(bp, fp_index, rx_comp_mapping),
+ sizeof(struct eth_fast_path_rx_cqe) *
+ NUM_RCQ_BD);
+
+ /* SGE ring */
+ BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
+ bnx2x_fp(bp, fp_index, rx_sge_mapping),
+ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ }
+
+ /* Tx */
+ if (!skip_tx_queue(bp, fp_index)) {
+ /* fastpath tx rings: tx_buf tx_desc */
+ BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring));
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring),
+ bnx2x_fp(bp, fp_index, tx_desc_mapping),
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ }
+ /* end of fastpath */
+}
+
+void bnx2x_free_fp_mem(struct bnx2x *bp)
+{
+ int i;
+ for_each_queue(bp, i)
+ bnx2x_free_fp_mem_at(bp, i);
+}
+
+static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
+{
+ union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
+ if (CHIP_IS_E2(bp)) {
+ bnx2x_fp(bp, index, sb_index_values) =
+ (__le16 *)status_blk.e2_sb->sb.index_values;
+ bnx2x_fp(bp, index, sb_running_index) =
+ (__le16 *)status_blk.e2_sb->sb.running_index;
+ } else {
+ bnx2x_fp(bp, index, sb_index_values) =
+ (__le16 *)status_blk.e1x_sb->sb.index_values;
+ bnx2x_fp(bp, index, sb_running_index) =
+ (__le16 *)status_blk.e1x_sb->sb.running_index;
+ }
+}
+
+static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
+{
+ union host_hc_status_block *sb;
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+ int ring_size = 0;
+
+ /* if rx_ring_size specified - use it */
+ int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
+ MAX_RX_AVAIL/bp->num_queues;
+
+ /* allocate at least number of buffers required by FW */
+ rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+ MIN_RX_SIZE_TPA,
+ rx_ring_size);
+
+ bnx2x_fp(bp, index, bp) = bp;
+ bnx2x_fp(bp, index, index) = index;
+
+ /* Common */
+ sb = &bnx2x_fp(bp, index, status_blk);
+#ifdef BCM_CNIC
+ if (!IS_FCOE_IDX(index)) {
+#endif
+ /* status blocks */
+ if (CHIP_IS_E2(bp))
+ BNX2X_PCI_ALLOC(sb->e2_sb,
+ &bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_ALLOC(sb->e1x_sb,
+ &bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+#ifdef BCM_CNIC
+ }
+#endif
+ set_sb_shortcuts(bp, index);
+
+ /* Tx */
+ if (!skip_tx_queue(bp, index)) {
+ /* fastpath tx rings: tx_buf tx_desc */
+ BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring),
+ sizeof(struct sw_tx_bd) * NUM_TX_BD);
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring),
+ &bnx2x_fp(bp, index, tx_desc_mapping),
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ }
+
+ /* Rx */
+ if (!skip_rx_queue(bp, index)) {
+ /* fastpath rx rings: rx_buf rx_desc rx_comp */
+ BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
+ sizeof(struct sw_rx_bd) * NUM_RX_BD);
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
+ &bnx2x_fp(bp, index, rx_desc_mapping),
+ sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
+ &bnx2x_fp(bp, index, rx_comp_mapping),
+ sizeof(struct eth_fast_path_rx_cqe) *
+ NUM_RCQ_BD);
+
+ /* SGE ring */
+ BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
+ sizeof(struct sw_rx_page) * NUM_RX_SGE);
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
+ &bnx2x_fp(bp, index, rx_sge_mapping),
+ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ /* RX BD ring */
+ bnx2x_set_next_page_rx_bd(fp);
+
+ /* CQ ring */
+ bnx2x_set_next_page_rx_cq(fp);
+
+ /* BDs */
+ ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
+ if (ring_size < rx_ring_size)
+ goto alloc_mem_err;
+ }
+
+ return 0;
+
+/* handles low memory cases */
+alloc_mem_err:
+ BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
+ index, ring_size);
+ /* FW will drop all packets if queue is not big enough,
+ * In these cases we disable the queue
+ * Min size diferent for TPA and non-TPA queues
+ */
+ if (ring_size < (fp->disable_tpa ?
+ MIN_RX_SIZE_TPA : MIN_RX_SIZE_NONTPA)) {
+ /* release memory allocated for this queue */
+ bnx2x_free_fp_mem_at(bp, index);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int bnx2x_alloc_fp_mem(struct bnx2x *bp)
+{
+ int i;
+
+ /**
+ * 1. Allocate FP for leading - fatal if error
+ * 2. {CNIC} Allocate FCoE FP - fatal if error
+ * 3. Allocate RSS - fix number of queues if error
+ */
+
+ /* leading */
+ if (bnx2x_alloc_fp_mem_at(bp, 0))
+ return -ENOMEM;
+#ifdef BCM_CNIC
+ /* FCoE */
+ if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
+ return -ENOMEM;
+#endif
+ /* RSS */
+ for_each_nondefault_eth_queue(bp, i)
+ if (bnx2x_alloc_fp_mem_at(bp, i))
+ break;
+
+ /* handle memory failures */
+ if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
+ int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
+
+ WARN_ON(delta < 0);
+#ifdef BCM_CNIC
+ /**
+ * move non eth FPs next to last eth FP
+ * must be done in that order
+ * FCOE_IDX < FWD_IDX < OOO_IDX
+ */
+
+ /* move FCoE fp */
+ bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
+#endif
+ bp->num_queues -= delta;
+ BNX2X_ERR("Adjusted num of queues from %d to %d\n",
+ bp->num_queues + delta, bp->num_queues);
+ }
+
+ return 0;
+}
static int bnx2x_setup_irqs(struct bnx2x *bp)
{