diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-28 23:05:50 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-28 23:05:50 +0100 |
commit | b0d44c0dbbd52effb731b1c0af9afd56215c48de (patch) | |
tree | 3237c0087d91a5390aed05689b9f610ba16fa116 /drivers/net/cxgb3 | |
parent | 9537a48ed4b9e4b738943d6da0a0fd4278adf905 (diff) | |
parent | 7c730ccdc1188b97f5c8cb690906242c7ed75c22 (diff) | |
download | lwn-b0d44c0dbbd52effb731b1c0af9afd56215c48de.tar.gz lwn-b0d44c0dbbd52effb731b1c0af9afd56215c48de.zip |
Merge branch 'linus' into core/iommu
Conflicts:
arch/x86/Kconfig
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r-- | drivers/net/cxgb3/adapter.h | 25 | ||||
-rw-r--r-- | drivers/net/cxgb3/ael1002.c | 3 | ||||
-rw-r--r-- | drivers/net/cxgb3/common.h | 10 | ||||
-rw-r--r-- | drivers/net/cxgb3/cxgb3_main.c | 266 | ||||
-rw-r--r-- | drivers/net/cxgb3/cxgb3_offload.c | 12 | ||||
-rw-r--r-- | drivers/net/cxgb3/cxgb3_offload.h | 7 | ||||
-rw-r--r-- | drivers/net/cxgb3/regs.h | 21 | ||||
-rw-r--r-- | drivers/net/cxgb3/sge.c | 459 | ||||
-rw-r--r-- | drivers/net/cxgb3/t3_hw.c | 244 | ||||
-rw-r--r-- | drivers/net/cxgb3/version.h | 4 | ||||
-rw-r--r-- | drivers/net/cxgb3/xgmac.c | 85 |
11 files changed, 796 insertions, 340 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index a89d8cc51205..714df2b675e6 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h @@ -42,7 +42,6 @@ #include <linux/cache.h> #include <linux/mutex.h> #include <linux/bitops.h> -#include <linux/inet_lro.h> #include "t3cdev.h" #include <asm/io.h> @@ -69,6 +68,8 @@ struct port_info { struct net_device_stats netstats; int activity; __be32 iscsi_ipv4addr; + + int link_fault; /* link fault was detected */ }; enum { /* adapter flags */ @@ -84,6 +85,8 @@ struct fl_pg_chunk { struct page *page; void *va; unsigned int offset; + u64 *p_cnt; + DECLARE_PCI_UNMAP_ADDR(mapping); }; struct rx_desc; @@ -92,6 +95,7 @@ struct rx_sw_desc; struct sge_fl { /* SGE per free-buffer list state */ unsigned int buf_size; /* size of each Rx buffer */ unsigned int credits; /* # of available Rx buffers */ + unsigned int pend_cred; /* new buffers since last FL DB ring */ unsigned int size; /* capacity of free list */ unsigned int cidx; /* consumer index */ unsigned int pidx; /* producer index */ @@ -99,6 +103,7 @@ struct sge_fl { /* SGE per free-buffer list state */ struct fl_pg_chunk pg_chunk;/* page chunk cache */ unsigned int use_pages; /* whether FL uses pages or sk_buffs */ unsigned int order; /* order of page allocations */ + unsigned int alloc_size; /* size of allocated buffer */ struct rx_desc *desc; /* address of HW Rx descriptor ring */ struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ dma_addr_t phys_addr; /* physical address of HW ring start */ @@ -178,15 +183,11 @@ enum { /* per port SGE statistics */ SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ - SGE_PSTAT_LRO_AGGR, /* # of page chunks added to LRO sessions */ - SGE_PSTAT_LRO_FLUSHED, /* # of flushed LRO sessions */ - SGE_PSTAT_LRO_NO_DESC, /* # of overflown LRO sessions */ SGE_PSTAT_MAX /* must be last */ }; -#define T3_MAX_LRO_SES 8 -#define T3_MAX_LRO_MAX_PKTS 64 +struct napi_gro_fraginfo; struct sge_qset { /* an SGE queue set */ struct adapter *adap; @@ -194,17 +195,14 @@ struct sge_qset { /* an SGE queue set */ struct sge_rspq rspq; struct sge_fl fl[SGE_RXQ_PER_SET]; struct sge_txq txq[SGE_TXQ_PER_SET]; - struct net_lro_mgr lro_mgr; - struct net_lro_desc lro_desc[T3_MAX_LRO_SES]; - struct skb_frag_struct *lro_frag_tbl; - int lro_nfrags; + struct napi_gro_fraginfo lro_frag_tbl; int lro_enabled; - int lro_frag_len; void *lro_va; struct net_device *netdev; struct netdev_queue *tx_q; /* associated netdev TX queue */ unsigned long txq_stopped; /* which Tx queues are stopped */ struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ + struct timer_list rx_reclaim_timer; /* reclaims RX buffers */ unsigned long port_stats[SGE_PSTAT_MAX]; } ____cacheline_aligned; @@ -230,6 +228,7 @@ struct adapter { unsigned int slow_intr_mask; unsigned long irq_stats[IRQ_NUM_STATS]; + int msix_nvectors; struct { unsigned short vec; char desc[22]; @@ -247,6 +246,7 @@ struct adapter { struct delayed_work adap_check_task; struct work_struct ext_intr_handler_task; struct work_struct fatal_error_handler_task; + struct work_struct link_fault_handler_task; struct dentry *debugfs_root; @@ -289,9 +289,12 @@ void t3_os_ext_intr_handler(struct adapter *adapter); void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status, int speed, int duplex, int fc); void t3_os_phymod_changed(struct adapter *adap, int port_id); +void t3_os_link_fault(struct adapter *adapter, int port_id, int state); +void t3_os_link_fault_handler(struct adapter *adapter, int port_id); void t3_sge_start(struct adapter *adap); void t3_sge_stop(struct adapter *adap); +void t3_start_sge_timers(struct adapter *adap); void t3_stop_sge_timers(struct adapter *adap); void t3_free_sge_resources(struct adapter *adap); void t3_sge_err_intr_handler(struct adapter *adapter); diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c index 5c3c05da4d96..e1b22490ff59 100644 --- a/drivers/net/cxgb3/ael1002.c +++ b/drivers/net/cxgb3/ael1002.c @@ -1005,7 +1005,8 @@ static int ael2005_reset(struct cphy *phy, int wait) { 0, 0, 0, 0 } }; - int err, lasi_ctrl; + int err; + unsigned int lasi_ctrl; err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, &lasi_ctrl); if (err) diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h index db4f4f575b6a..e508dc32f3ec 100644 --- a/drivers/net/cxgb3/common.h +++ b/drivers/net/cxgb3/common.h @@ -191,7 +191,8 @@ struct mdio_ops { }; struct adapter_info { - unsigned char nports; /* # of ports */ + unsigned char nports0; /* # of ports on channel 0 */ + unsigned char nports1; /* # of ports on channel 1 */ unsigned char phy_base_addr; /* MDIO PHY base address */ unsigned int gpio_out; /* GPIO output settings */ unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */ @@ -280,6 +281,7 @@ struct mac_stats { unsigned long num_toggled; /* # times toggled TxEn due to stuck TX */ unsigned long num_resets; /* # times reset due to stuck TX */ + unsigned long link_faults; /* # detected link faults */ }; struct tp_mib_stats { @@ -421,6 +423,7 @@ struct adapter_params { unsigned short b_wnd[NCCTRL_WIN]; unsigned int nports; /* # of ethernet ports */ + unsigned int chan_map; /* bitmap of in-use Tx channels */ unsigned int stats_update_period; /* MAC stats accumulation period */ unsigned int linkpoll_period; /* link poll period in 0.1s */ unsigned int rev; /* chip revision */ @@ -701,6 +704,8 @@ int t3_phy_lasi_intr_handler(struct cphy *phy); void t3_intr_enable(struct adapter *adapter); void t3_intr_disable(struct adapter *adapter); void t3_intr_clear(struct adapter *adapter); +void t3_xgm_intr_enable(struct adapter *adapter, int idx); +void t3_xgm_intr_disable(struct adapter *adapter, int idx); void t3_port_intr_enable(struct adapter *adapter, int idx); void t3_port_intr_disable(struct adapter *adapter, int idx); void t3_port_intr_clear(struct adapter *adapter, int idx); @@ -708,6 +713,7 @@ int t3_slow_intr_handler(struct adapter *adapter); int t3_phy_intr_handler(struct adapter *adapter); void t3_link_changed(struct adapter *adapter, int port_id); +void t3_link_fault(struct adapter *adapter, int port_id); int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); const struct adapter_info *t3_get_adapter_info(unsigned int board_id); int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data); @@ -744,6 +750,8 @@ int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n, int t3_mac_reset(struct cmac *mac); void t3b_pcs_reset(struct cmac *mac); +void t3_mac_disable_exact_filters(struct cmac *mac); +void t3_mac_enable_exact_filters(struct cmac *mac); int t3_mac_enable(struct cmac *mac, int which); int t3_mac_disable(struct cmac *mac, int which); int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu); diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index bab8a934c33d..2c2aaa741450 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -170,6 +170,40 @@ static void link_report(struct net_device *dev) } } +void t3_os_link_fault(struct adapter *adap, int port_id, int state) +{ + struct net_device *dev = adap->port[port_id]; + struct port_info *pi = netdev_priv(dev); + + if (state == netif_carrier_ok(dev)) + return; + + if (state) { + struct cmac *mac = &pi->mac; + + netif_carrier_on(dev); + + /* Clear local faults */ + t3_xgm_intr_disable(adap, pi->port_id); + t3_read_reg(adap, A_XGM_INT_STATUS + + pi->mac.offset); + t3_write_reg(adap, + A_XGM_INT_CAUSE + pi->mac.offset, + F_XGM_INT); + + t3_set_reg_field(adap, + A_XGM_INT_ENABLE + + pi->mac.offset, + F_XGM_INT, F_XGM_INT); + t3_xgm_intr_enable(adap, pi->port_id); + + t3_mac_enable(mac, MAC_DIRECTION_TX); + } else + netif_carrier_off(dev); + + link_report(dev); +} + /** * t3_os_link_changed - handle link status changes * @adapter: the adapter associated with the link change @@ -197,10 +231,34 @@ void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat, if (link_stat != netif_carrier_ok(dev)) { if (link_stat) { t3_mac_enable(mac, MAC_DIRECTION_RX); + + /* Clear local faults */ + t3_xgm_intr_disable(adapter, pi->port_id); + t3_read_reg(adapter, A_XGM_INT_STATUS + + pi->mac.offset); + t3_write_reg(adapter, + A_XGM_INT_CAUSE + pi->mac.offset, + F_XGM_INT); + + t3_set_reg_field(adapter, + A_XGM_INT_ENABLE + pi->mac.offset, + F_XGM_INT, F_XGM_INT); + t3_xgm_intr_enable(adapter, pi->port_id); + netif_carrier_on(dev); } else { netif_carrier_off(dev); - pi->phy.ops->power_down(&pi->phy, 1); + + t3_xgm_intr_disable(adapter, pi->port_id); + t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); + t3_set_reg_field(adapter, + A_XGM_INT_ENABLE + pi->mac.offset, + F_XGM_INT, 0); + + if (is_10G(adapter)) + pi->phy.ops->power_down(&pi->phy, 1); + + t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); t3_mac_disable(mac, MAC_DIRECTION_RX); t3_link_start(&pi->phy, mac, &pi->link_config); } @@ -339,7 +397,7 @@ static void free_irq_resources(struct adapter *adapter) free_irq(adapter->msix_info[0].vec, adapter); for_each_port(adapter, i) - n += adap2pinfo(adapter, i)->nqsets; + n += adap2pinfo(adapter, i)->nqsets; for (i = 0; i < n; ++i) free_irq(adapter->msix_info[i + 1].vec, @@ -509,19 +567,9 @@ static void set_qset_lro(struct net_device *dev, int qset_idx, int val) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; - int i, lro_on = 1; adapter->params.sge.qset[qset_idx].lro = !!val; adapter->sge.qs[qset_idx].lro_enabled = !!val; - - /* let ethtool report LRO on only if all queues are LRO enabled */ - for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i) - lro_on &= adapter->params.sge.qset[i].lro; - - if (lro_on) - dev->features |= NETIF_F_LRO; - else - dev->features &= ~NETIF_F_LRO; } /** @@ -554,7 +602,6 @@ static int setup_sge_qsets(struct adapter *adap) &adap->params.sge.qset[qset_idx], ntxq, dev, netdev_get_tx_queue(dev, j)); if (err) { - t3_stop_sge_timers(adap); t3_free_sge_resources(adap); return err; } @@ -998,6 +1045,8 @@ static int cxgb_up(struct adapter *adap) setup_rss(adap); if (!(adap->flags & NAPI_INIT)) init_napi(adap); + + t3_start_sge_timers(adap); adap->flags |= FULL_INIT_DONE; } @@ -1183,6 +1232,10 @@ static int cxgb_close(struct net_device *dev) struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; + /* Stop link fault interrupts */ + t3_xgm_intr_disable(adapter, pi->port_id); + t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); + t3_port_intr_disable(adapter, pi->port_id); netif_tx_stop_all_queues(dev); pi->phy.ops->power_down(&pi->phy, 1); @@ -1309,6 +1362,7 @@ static char stats_strings[][ETH_GSTRING_LEN] = { "CheckTXEnToggled ", "CheckResets ", + "LinkFaults ", }; static int get_sset_count(struct net_device *dev, int sset) @@ -1434,13 +1488,15 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS); *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM); *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD); - *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR); - *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED); - *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC); + *data++ = 0; + *data++ = 0; + *data++ = 0; *data++ = s->rx_cong_drops; *data++ = s->num_toggled; *data++ = s->num_resets; + + *data++ = s->link_faults; } static inline void reg_block_dump(struct adapter *ap, void *buf, @@ -1576,7 +1632,6 @@ static int speed_duplex_to_caps(int speed, int duplex) static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { - int cap; struct port_info *p = netdev_priv(dev); struct link_config *lc = &p->link_config; @@ -1586,7 +1641,7 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) * being requested. */ if (cmd->autoneg == AUTONEG_DISABLE) { - cap = speed_duplex_to_caps(cmd->speed, cmd->duplex); + int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex); if (lc->supported & cap) return 0; } @@ -1827,28 +1882,6 @@ static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) memset(&wol->sopass, 0, sizeof(wol->sopass)); } -static int cxgb3_set_flags(struct net_device *dev, u32 data) -{ - struct port_info *pi = netdev_priv(dev); - int i; - - if (data & ETH_FLAG_LRO) { - if (!(pi->rx_offload & T3_RX_CSUM)) - return -EINVAL; - - pi->rx_offload |= T3_LRO; - for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) - set_qset_lro(dev, i, 1); - - } else { - pi->rx_offload &= ~T3_LRO; - for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) - set_qset_lro(dev, i, 0); - } - - return 0; -} - static const struct ethtool_ops cxgb_ethtool_ops = { .get_settings = get_settings, .set_settings = set_settings, @@ -1878,8 +1911,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .get_regs = get_regs, .get_wol = get_wol, .set_tso = ethtool_op_set_tso, - .get_flags = ethtool_op_get_flags, - .set_flags = cxgb3_set_flags, }; static int in_range(int val, int lo, int hi) @@ -2460,8 +2491,20 @@ static void check_link_status(struct adapter *adapter) struct net_device *dev = adapter->port[i]; struct port_info *p = netdev_priv(dev); - if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) + spin_lock_irq(&adapter->work_lock); + if (p->link_fault) { + spin_unlock_irq(&adapter->work_lock); + continue; + } + spin_unlock_irq(&adapter->work_lock); + + if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) { + t3_xgm_intr_disable(adapter, i); + t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset); + t3_link_changed(adapter, i); + t3_xgm_intr_enable(adapter, i); + } } } @@ -2506,6 +2549,8 @@ static void t3_adap_check_task(struct work_struct *work) struct adapter *adapter = container_of(work, struct adapter, adap_check_task.work); const struct adapter_params *p = &adapter->params; + int port; + unsigned int v, status, reset; adapter->check_task_cnt++; @@ -2524,6 +2569,54 @@ static void t3_adap_check_task(struct work_struct *work) if (p->rev == T3_REV_B2) check_t3b2_mac(adapter); + /* + * Scan the XGMAC's to check for various conditions which we want to + * monitor in a periodic polling manner rather than via an interrupt + * condition. This is used for conditions which would otherwise flood + * the system with interrupts and we only really need to know that the + * conditions are "happening" ... For each condition we count the + * detection of the condition and reset it for the next polling loop. + */ + for_each_port(adapter, port) { + struct cmac *mac = &adap2pinfo(adapter, port)->mac; + u32 cause; + + cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset); + reset = 0; + if (cause & F_RXFIFO_OVERFLOW) { + mac->stats.rx_fifo_ovfl++; + reset |= F_RXFIFO_OVERFLOW; + } + + t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset); + } + + /* + * We do the same as above for FL_EMPTY interrupts. + */ + status = t3_read_reg(adapter, A_SG_INT_CAUSE); + reset = 0; + + if (status & F_FLEMPTY) { + struct sge_qset *qs = &adapter->sge.qs[0]; + int i = 0; + + reset |= F_FLEMPTY; + + v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) & + 0xffff; + + while (v) { + qs->fl[i].empty += (v & 1); + if (i) + qs++; + i ^= 1; + v >>= 1; + } + } + + t3_write_reg(adapter, A_SG_INT_CAUSE, reset); + /* Schedule the next check update if any port is active. */ spin_lock_irq(&adapter->work_lock); if (adapter->open_device_map & PORT_MASK) @@ -2538,9 +2631,23 @@ static void ext_intr_task(struct work_struct *work) { struct adapter *adapter = container_of(work, struct adapter, ext_intr_handler_task); + int i; + /* Disable link fault interrupts */ + for_each_port(adapter, i) { + struct net_device *dev = adapter->port[i]; + struct port_info *p = netdev_priv(dev); + + t3_xgm_intr_disable(adapter, i); + t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset); + } + + /* Re-enable link fault interrupts */ t3_phy_intr_handler(adapter); + for_each_port(adapter, i) + t3_xgm_intr_enable(adapter, i); + /* Now reenable external interrupts */ spin_lock_irq(&adapter->work_lock); if (adapter->slow_intr_mask) { @@ -2573,10 +2680,42 @@ void t3_os_ext_intr_handler(struct adapter *adapter) spin_unlock(&adapter->work_lock); } +static void link_fault_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + link_fault_handler_task); + int i; + + for_each_port(adapter, i) { + struct net_device *netdev = adapter->port[i]; + struct port_info *pi = netdev_priv(netdev); + + if (pi->link_fault) + t3_link_fault(adapter, i); + } +} + +void t3_os_link_fault_handler(struct adapter *adapter, int port_id) +{ + struct net_device *netdev = adapter->port[port_id]; + struct port_info *pi = netdev_priv(netdev); + + spin_lock(&adapter->work_lock); + pi->link_fault = 1; + queue_work(cxgb3_wq, &adapter->link_fault_handler_task); + spin_unlock(&adapter->work_lock); +} + static int t3_adapter_error(struct adapter *adapter, int reset) { int i, ret = 0; + if (is_offload(adapter) && + test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { + cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0); + offload_close(&adapter->tdev); + } + /* Stop all ports */ for_each_port(adapter, i) { struct net_device *netdev = adapter->port[i]; @@ -2585,10 +2724,6 @@ static int t3_adapter_error(struct adapter *adapter, int reset) cxgb_close(netdev); } - if (is_offload(adapter) && - test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) - offload_close(&adapter->tdev); - /* Stop SGE timers */ t3_stop_sge_timers(adapter); @@ -2640,6 +2775,9 @@ static void t3_resume_ports(struct adapter *adapter) } } } + + if (is_offload(adapter) && !ofld_disable) + cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0); } /* @@ -2684,7 +2822,6 @@ void t3_fatal_err(struct adapter *adapter) CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n", fw_status[0], fw_status[1], fw_status[2], fw_status[3]); - } /** @@ -2734,6 +2871,9 @@ static void t3_io_resume(struct pci_dev *pdev) { struct adapter *adapter = pci_get_drvdata(pdev); + CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n", + t3_read_reg(adapter, A_PCIE_PEX_ERR)); + t3_resume_ports(adapter); } @@ -2753,7 +2893,7 @@ static void set_nqsets(struct adapter *adap) int i, j = 0; int num_cpus = num_online_cpus(); int hwports = adap->params.nports; - int nqsets = SGE_QSETS; + int nqsets = adap->msix_nvectors - 1; if (adap->params.rev > 0 && adap->flags & USING_MSIX) { if (hwports == 2 && @@ -2782,18 +2922,25 @@ static void set_nqsets(struct adapter *adap) static int __devinit cxgb_enable_msix(struct adapter *adap) { struct msix_entry entries[SGE_QSETS + 1]; + int vectors; int i, err; - for (i = 0; i < ARRAY_SIZE(entries); ++i) + vectors = ARRAY_SIZE(entries); + for (i = 0; i < vectors; ++i) entries[i].entry = i; - err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries)); + while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0) + vectors = err; + + if (!err && vectors < (adap->params.nports + 1)) + err = -1; + if (!err) { - for (i = 0; i < ARRAY_SIZE(entries); ++i) + for (i = 0; i < vectors; ++i) adap->msix_info[i].vec = entries[i].vector; - } else if (err > 0) - dev_info(&adap->pdev->dev, - "only %d MSI-X vectors left, not using MSI-X\n", err); + adap->msix_nvectors = vectors; + } + return err; } @@ -2859,7 +3006,7 @@ static int __devinit init_one(struct pci_dev *pdev, static int version_printed; int i, err, pci_using_dac = 0; - unsigned long mmio_start, mmio_len; + resource_size_t mmio_start, mmio_len; const struct adapter_info *ai; struct adapter *adapter = NULL; struct port_info *pi; @@ -2935,10 +3082,11 @@ static int __devinit init_one(struct pci_dev *pdev, INIT_LIST_HEAD(&adapter->adapter_list); INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); + INIT_WORK(&adapter->link_fault_handler_task, link_fault_task); INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); - for (i = 0; i < ai->nports; ++i) { + for (i = 0; i < ai->nports0 + ai->nports1; ++i) { struct net_device *netdev; netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS); @@ -2961,7 +3109,7 @@ static int __devinit init_one(struct pci_dev *pdev, netdev->mem_end = mmio_start + mmio_len - 1; netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; netdev->features |= NETIF_F_LLTX; - netdev->features |= NETIF_F_LRO; + netdev->features |= NETIF_F_GRO; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; @@ -3028,7 +3176,7 @@ static int __devinit init_one(struct pci_dev *pdev, out_free_dev: iounmap(adapter->regs); - for (i = ai->nports - 1; i >= 0; --i) + for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i) if (adapter->port[i]) free_netdev(adapter->port[i]); diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index 2d7f69aff1d9..620d80be6aac 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c @@ -153,6 +153,18 @@ void cxgb3_remove_clients(struct t3cdev *tdev) mutex_unlock(&cxgb3_db_lock); } +void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error) +{ + struct cxgb3_client *client; + + mutex_lock(&cxgb3_db_lock); + list_for_each_entry(client, &client_list, client_list) { + if (client->err_handler) + client->err_handler(tdev, status, error); + } + mutex_unlock(&cxgb3_db_lock); +} + static struct net_device *get_iff_from_mac(struct adapter *adapter, const unsigned char *mac, unsigned int vlan) diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h index d514e5019dfc..a8e8e5fcdf84 100644 --- a/drivers/net/cxgb3/cxgb3_offload.h +++ b/drivers/net/cxgb3/cxgb3_offload.h @@ -64,10 +64,16 @@ void cxgb3_register_client(struct cxgb3_client *client); void cxgb3_unregister_client(struct cxgb3_client *client); void cxgb3_add_clients(struct t3cdev *tdev); void cxgb3_remove_clients(struct t3cdev *tdev); +void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error); typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb, void *ctx); +enum { + OFFLOAD_STATUS_UP, + OFFLOAD_STATUS_DOWN +}; + struct cxgb3_client { char *name; void (*add) (struct t3cdev *); @@ -76,6 +82,7 @@ struct cxgb3_client { int (*redirect)(void *ctx, struct dst_entry *old, struct dst_entry *new, struct l2t_entry *l2t); struct list_head client_list; + void (*err_handler)(struct t3cdev *tdev, u32 status, u32 error); }; /* diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h index a035d5c24442..1b5327b5a965 100644 --- a/drivers/net/cxgb3/regs.h +++ b/drivers/net/cxgb3/regs.h @@ -170,6 +170,10 @@ #define S_RSPQ0DISABLED 8 +#define S_FL0EMPTY 16 +#define V_FL0EMPTY(x) ((x) << S_FL0EMPTY) +#define F_FL0EMPTY V_FL0EMPTY(1U) + #define A_SG_EGR_RCQ_DRB_THRSH 0x54 #define S_HIRCQDRBTHRSH 16 @@ -258,6 +262,10 @@ #define V_RSPQCREDITOVERFOW(x) ((x) << S_RSPQCREDITOVERFOW) #define F_RSPQCREDITOVERFOW V_RSPQCREDITOVERFOW(1U) +#define S_FLEMPTY 1 +#define V_FLEMPTY(x) ((x) << S_FLEMPTY) +#define F_FLEMPTY V_FLEMPTY(1U) + #define A_SG_INT_ENABLE 0x60 #define A_SG_CMDQ_CREDIT_TH 0x64 @@ -2207,6 +2215,15 @@ #define A_XGM_RX_EXACT_MATCH_LOW_8 0x854 +#define A_XGM_INT_STATUS 0x86c + +#define S_LINKFAULTCHANGE 9 +#define V_LINKFAULTCHANGE(x) ((x) << S_LINKFAULTCHANGE) +#define F_LINKFAULTCHANGE V_LINKFAULTCHANGE(1U) + +#define A_XGM_XGM_INT_ENABLE 0x874 +#define A_XGM_XGM_INT_DISABLE 0x878 + #define A_XGM_STAT_CTRL 0x880 #define S_CLRSTATS 2 @@ -2405,6 +2422,10 @@ #define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE) #define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U) +#define S_XGM_INT 0 +#define V_XGM_INT(x) ((x) << S_XGM_INT) +#define F_XGM_INT V_XGM_INT(1U) + #define A_XGM_INT_CAUSE 0x8d8 #define A_XGM_XAUI_ACT_CTRL 0x8dc diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index d31791f60292..26d3587f3399 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -50,6 +50,7 @@ #define SGE_RX_COPY_THRES 256 #define SGE_RX_PULL_LEN 128 +#define SGE_PG_RSVD SMP_CACHE_BYTES /* * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks. * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs @@ -57,16 +58,25 @@ */ #define FL0_PG_CHUNK_SIZE 2048 #define FL0_PG_ORDER 0 +#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER) #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192) #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) +#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER) #define SGE_RX_DROP_THRES 16 +#define RX_RECLAIM_PERIOD (HZ/4) /* + * Max number of Rx buffers we replenish at a time. + */ +#define MAX_RX_REFILL 16U +/* * Period of the Tx buffer reclaim timer. This timer does not need to run * frequently as Tx buffers are usually reclaimed by new Tx packets. */ #define TX_RECLAIM_PERIOD (HZ / 4) +#define TX_RECLAIM_TIMER_CHUNK 64U +#define TX_RECLAIM_CHUNK 16U /* WR size in bytes */ #define WR_LEN (WR_FLITS * 8) @@ -304,21 +314,25 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, * reclaim_completed_tx - reclaims completed Tx descriptors * @adapter: the adapter * @q: the Tx queue to reclaim completed descriptors from + * @chunk: maximum number of descriptors to reclaim * * Reclaims Tx descriptors that the SGE has indicated it has processed, * and frees the associated buffers if possible. Called with the Tx * queue's lock held. */ -static inline void reclaim_completed_tx(struct adapter *adapter, - struct sge_txq *q) +static inline unsigned int reclaim_completed_tx(struct adapter *adapter, + struct sge_txq *q, + unsigned int chunk) { unsigned int reclaim = q->processed - q->cleaned; + reclaim = min(chunk, reclaim); if (reclaim) { free_tx_desc(adapter, q, reclaim); q->cleaned += reclaim; q->in_use -= reclaim; } + return q->processed - q->cleaned; } /** @@ -334,6 +348,26 @@ static inline int should_restart_tx(const struct sge_txq *q) return q->in_use - r < (q->size >> 1); } +static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, + struct rx_sw_desc *d) +{ + if (q->use_pages && d->pg_chunk.page) { + (*d->pg_chunk.p_cnt)--; + if (!*d->pg_chunk.p_cnt) + pci_unmap_page(pdev, + pci_unmap_addr(&d->pg_chunk, mapping), + q->alloc_size, PCI_DMA_FROMDEVICE); + + put_page(d->pg_chunk.page); + d->pg_chunk.page = NULL; + } else { + pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), + q->buf_size, PCI_DMA_FROMDEVICE); + kfree_skb(d->skb); + d->skb = NULL; + } +} + /** * free_rx_bufs - free the Rx buffers on an SGE free list * @pdev: the PCI device associated with the adapter @@ -349,16 +383,8 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) while (q->credits--) { struct rx_sw_desc *d = &q->sdesc[cidx]; - pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), - q->buf_size, PCI_DMA_FROMDEVICE); - if (q->use_pages) { - if (d->pg_chunk.page) - put_page(d->pg_chunk.page); - d->pg_chunk.page = NULL; - } else { - kfree_skb(d->skb); - d->skb = NULL; - } + + clear_rx_desc(pdev, q, d); if (++cidx == q->size) cidx = 0; } @@ -401,18 +427,39 @@ static inline int add_one_rx_buf(void *va, unsigned int len, return 0; } -static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp, +static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d, + unsigned int gen) +{ + d->addr_lo = cpu_to_be32(mapping); + d->addr_hi = cpu_to_be32((u64) mapping >> 32); + wmb(); + d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); + d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); + return 0; +} + +static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, + struct rx_sw_desc *sd, gfp_t gfp, unsigned int order) { if (!q->pg_chunk.page) { + dma_addr_t mapping; + q->pg_chunk.page = alloc_pages(gfp, order); if (unlikely(!q->pg_chunk.page)) return -ENOMEM; q->pg_chunk.va = page_address(q->pg_chunk.page); + q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - + SGE_PG_RSVD; q->pg_chunk.offset = 0; + mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, + 0, q->alloc_size, PCI_DMA_FROMDEVICE); + pci_unmap_addr_set(&q->pg_chunk, mapping, mapping); } sd->pg_chunk = q->pg_chunk; + prefetch(sd->pg_chunk.p_cnt); + q->pg_chunk.offset += q->buf_size; if (q->pg_chunk.offset == (PAGE_SIZE << order)) q->pg_chunk.page = NULL; @@ -420,9 +467,23 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp, q->pg_chunk.va += q->buf_size; get_page(q->pg_chunk.page); } + + if (sd->pg_chunk.offset == 0) + *sd->pg_chunk.p_cnt = 1; + else + *sd->pg_chunk.p_cnt += 1; + return 0; } +static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) +{ + if (q->pend_cred >= q->credits / 4) { + q->pend_cred = 0; + t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); + } +} + /** * refill_fl - refill an SGE free-buffer list * @adapter: the adapter @@ -436,38 +497,43 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp, */ static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) { - void *buf_start; struct rx_sw_desc *sd = &q->sdesc[q->pidx]; struct rx_desc *d = &q->desc[q->pidx]; unsigned int count = 0; while (n--) { + dma_addr_t mapping; int err; if (q->use_pages) { - if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) { + if (unlikely(alloc_pg_chunk(adap, q, sd, gfp, + q->order))) { nomem: q->alloc_failed++; break; } - buf_start = sd->pg_chunk.va; + mapping = pci_unmap_addr(&sd->pg_chunk, mapping) + + sd->pg_chunk.offset; + pci_unmap_addr_set(sd, dma_addr, mapping); + + add_one_rx_chunk(mapping, d, q->gen); + pci_dma_sync_single_for_device(adap->pdev, mapping, + q->buf_size - SGE_PG_RSVD, + PCI_DMA_FROMDEVICE); } else { - struct sk_buff *skb = alloc_skb(q->buf_size, gfp); + void *buf_start; + struct sk_buff *skb = alloc_skb(q->buf_size, gfp); if (!skb) goto nomem; sd->skb = skb; buf_start = skb->data; - } - - err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen, - adap->pdev); - if (unlikely(err)) { - if (!q->use_pages) { - kfree_skb(sd->skb); - sd->skb = NULL; + err = add_one_rx_buf(buf_start, q->buf_size, d, sd, + q->gen, adap->pdev); + if (unlikely(err)) { + clear_rx_desc(adap->pdev, q, sd); + break; } - break; } d++; @@ -478,19 +544,19 @@ nomem: q->alloc_failed++; sd = q->sdesc; d = q->desc; } - q->credits++; count++; } - wmb(); - if (likely(count)) - t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); + + q->credits += count; + q->pend_cred += count; + ring_fl_db(adap, q); return count; } static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) { - refill_fl(adap, fl, min(16U, fl->size - fl->credits), + refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits), GFP_ATOMIC | __GFP_COMP); } @@ -515,13 +581,15 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, wmb(); to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); - q->credits++; if (++q->pidx == q->size) { q->pidx = 0; q->gen ^= 1; } - t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); + + q->credits++; + q->pend_cred++; + ring_fl_db(adap, q); } /** @@ -585,8 +653,8 @@ static void t3_reset_qset(struct sge_qset *q) memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); q->txq_stopped = 0; q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ - kfree(q->lro_frag_tbl); - q->lro_nfrags = q->lro_frag_len = 0; + q->rx_reclaim_timer.function = NULL; + q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0; } @@ -733,7 +801,9 @@ recycle: return skb; } - if (unlikely(fl->credits < drop_thres)) + if (unlikely(fl->credits < drop_thres) && + refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1), + GFP_ATOMIC | __GFP_COMP) == 0) goto recycle; use_orig_buf: @@ -770,19 +840,19 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, struct sk_buff *newskb, *skb; struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; - newskb = skb = q->pg_skb; + dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr); + newskb = skb = q->pg_skb; if (!skb && (len <= SGE_RX_COPY_THRES)) { newskb = alloc_skb(len, GFP_ATOMIC); if (likely(newskb != NULL)) { __skb_put(newskb, len); - pci_dma_sync_single_for_cpu(adap->pdev, - pci_unmap_addr(sd, dma_addr), len, + pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); memcpy(newskb->data, sd->pg_chunk.va, len); - pci_dma_sync_single_for_device(adap->pdev, - pci_unmap_addr(sd, dma_addr), len, - PCI_DMA_FROMDEVICE); + pci_dma_sync_single_for_device(adap->pdev, dma_addr, + len, + PCI_DMA_FROMDEVICE); } else if (!drop_thres) return NULL; recycle: @@ -795,16 +865,25 @@ recycle: if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) goto recycle; + prefetch(sd->pg_chunk.p_cnt); + if (!skb) newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); + if (unlikely(!newskb)) { if (!drop_thres) return NULL; goto recycle; } - pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), - fl->buf_size, PCI_DMA_FROMDEVICE); + pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, + PCI_DMA_FROMDEVICE); + (*sd->pg_chunk.p_cnt)--; + if (!*sd->pg_chunk.p_cnt) + pci_unmap_page(adap->pdev, + pci_unmap_addr(&sd->pg_chunk, mapping), + fl->alloc_size, + PCI_DMA_FROMDEVICE); if (!skb) { __skb_put(newskb, SGE_RX_PULL_LEN); memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); @@ -813,14 +892,15 @@ recycle: len - SGE_RX_PULL_LEN); newskb->len = len; newskb->data_len = len - SGE_RX_PULL_LEN; + newskb->truesize += newskb->data_len; } else { skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags, sd->pg_chunk.page, sd->pg_chunk.offset, len); newskb->len += len; newskb->data_len += len; + newskb->truesize += len; } - newskb->truesize += newskb->data_len; fl->credits--; /* @@ -1063,7 +1143,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, struct tx_desc *d = &q->desc[pidx]; struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d; - cpl->len = htonl(skb->len | 0x80000000); + cpl->len = htonl(skb->len); cntrl = V_TXPKT_INTF(pi->port_id); if (vlan_tx_tag_present(skb) && pi->vlan_grp) @@ -1161,7 +1241,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) txq = netdev_get_tx_queue(dev, qidx); spin_lock(&q->lock); - reclaim_completed_tx(adap, q); + reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); credits = q->size - q->in_use; ndesc = calc_tx_descs(skb); @@ -1570,7 +1650,7 @@ static int ofld_xmit(struct adapter *adap, struct sge_txq *q, unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen; spin_lock(&q->lock); - again:reclaim_completed_tx(adap, q); +again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); if (unlikely(ret)) { @@ -1612,7 +1692,7 @@ static void restart_offloadq(unsigned long data) struct adapter *adap = pi->adapter; spin_lock(&q->lock); - again:reclaim_completed_tx(adap, q); +again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); while ((skb = skb_peek(&q->sendq)) != NULL) { unsigned int gen, pidx; @@ -1932,12 +2012,13 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, skb_pull(skb, sizeof(*p) + pad); skb->protocol = eth_type_trans(skb, adap->port[p->iff]); pi = netdev_priv(skb->dev); - if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && p->csum == htons(0xffff) && - !p->fragment) { + if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && + p->csum == htons(0xffff) && !p->fragment) { qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; skb->ip_summed = CHECKSUM_UNNECESSARY; } else skb->ip_summed = CHECKSUM_NONE; + skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); if (unlikely(p->vlan_valid)) { struct vlan_group *grp = pi->vlan_grp; @@ -1945,10 +2026,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, qs->port_stats[SGE_PSTAT_VLANEX]++; if (likely(grp)) if (lro) - lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb, - grp, - ntohs(p->vlan), - p); + vlan_gro_receive(&qs->napi, grp, + ntohs(p->vlan), skb); else { if (unlikely(pi->iscsi_ipv4addr && is_arp(skb))) { @@ -1965,7 +2044,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, dev_kfree_skb_any(skb); } else if (rq->polling) { if (lro) - lro_receive_skb(&qs->lro_mgr, skb, p); + napi_gro_receive(&qs->napi, skb); else { if (unlikely(pi->iscsi_ipv4addr && is_arp(skb))) cxgb3_arp_process(adap, skb); @@ -1981,59 +2060,6 @@ static inline int is_eth_tcp(u32 rss) } /** - * lro_frame_ok - check if an ingress packet is eligible for LRO - * @p: the CPL header of the packet - * - * Returns true if a received packet is eligible for LRO. - * The following conditions must be true: - * - packet is TCP/IP Ethernet II (checked elsewhere) - * - not an IP fragment - * - no IP options - * - TCP/IP checksums are correct - * - the packet is for this host - */ -static inline int lro_frame_ok(const struct cpl_rx_pkt *p) -{ - const struct ethhdr *eh = (struct ethhdr *)(p + 1); - const struct iphdr *ih = (struct iphdr *)(eh + 1); - - return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) && - eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2); -} - -static int t3_get_lro_header(void **eh, void **iph, void **tcph, - u64 *hdr_flags, void *priv) -{ - const struct cpl_rx_pkt *cpl = priv; - - if (!lro_frame_ok(cpl)) - return -1; - - *eh = (struct ethhdr *)(cpl + 1); - *iph = (struct iphdr *)((struct ethhdr *)*eh + 1); - *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1); - - *hdr_flags = LRO_IPV4 | LRO_TCP; - return 0; -} - -static int t3_get_skb_header(struct sk_buff *skb, - void **iph, void **tcph, u64 *hdr_flags, - void *priv) -{ - void *eh; - - return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv); -} - -static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh, - void **iph, void **tcph, u64 *hdr_flags, - void *priv) -{ - return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv); -} - -/** * lro_add_page - add a page chunk to an LRO session * @adap: the adapter * @qs: the associated queue set @@ -2049,8 +2075,9 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, { struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; struct cpl_rx_pkt *cpl; - struct skb_frag_struct *rx_frag = qs->lro_frag_tbl; - int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len; + struct skb_frag_struct *rx_frag = qs->lro_frag_tbl.frags; + int nr_frags = qs->lro_frag_tbl.nr_frags; + int frag_len = qs->lro_frag_tbl.len; int offset = 0; if (!nr_frags) { @@ -2061,21 +2088,33 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, fl->credits--; len -= offset; - pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), - fl->buf_size, PCI_DMA_FROMDEVICE); + pci_dma_sync_single_for_cpu(adap->pdev, + pci_unmap_addr(sd, dma_addr), + fl->buf_size - SGE_PG_RSVD, + PCI_DMA_FROMDEVICE); + + (*sd->pg_chunk.p_cnt)--; + if (!*sd->pg_chunk.p_cnt) + pci_unmap_page(adap->pdev, + pci_unmap_addr(&sd->pg_chunk, mapping), + fl->alloc_size, + PCI_DMA_FROMDEVICE); + + prefetch(qs->lro_va); rx_frag += nr_frags; rx_frag->page = sd->pg_chunk.page; rx_frag->page_offset = sd->pg_chunk.offset + offset; rx_frag->size = len; frag_len += len; - qs->lro_nfrags++; - qs->lro_frag_len = frag_len; + qs->lro_frag_tbl.nr_frags++; + qs->lro_frag_tbl.len = frag_len; + if (!complete) return; - qs->lro_nfrags = qs->lro_frag_len = 0; + qs->lro_frag_tbl.ip_summed = CHECKSUM_UNNECESSARY; cpl = qs->lro_va; if (unlikely(cpl->vlan_valid)) { @@ -2084,36 +2123,15 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, struct vlan_group *grp = pi->vlan_grp; if (likely(grp != NULL)) { - lro_vlan_hwaccel_receive_frags(&qs->lro_mgr, - qs->lro_frag_tbl, - frag_len, frag_len, - grp, ntohs(cpl->vlan), - cpl, 0); - return; + vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan), + &qs->lro_frag_tbl); + goto out; } } - lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl, - frag_len, frag_len, cpl, 0); -} + napi_gro_frags(&qs->napi, &qs->lro_frag_tbl); -/** - * init_lro_mgr - initialize a LRO manager object - * @lro_mgr: the LRO manager object - */ -static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr) -{ - lro_mgr->dev = qs->netdev; - lro_mgr->features = LRO_F_NAPI; - lro_mgr->frag_align_pad = NET_IP_ALIGN; - lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; - lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; - lro_mgr->max_desc = T3_MAX_LRO_SES; - lro_mgr->lro_arr = qs->lro_desc; - lro_mgr->get_frag_header = t3_get_frag_header; - lro_mgr->get_skb_header = t3_get_skb_header; - lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS; - if (lro_mgr->max_aggr > MAX_SKB_FRAGS) - lro_mgr->max_aggr = MAX_SKB_FRAGS; +out: + qs->lro_frag_tbl.nr_frags = qs->lro_frag_tbl.len = 0; } /** @@ -2282,6 +2300,8 @@ no_mem: if (fl->use_pages) { void *addr = fl->sdesc[fl->cidx].pg_chunk.va; + prefetch(&qs->lro_frag_tbl); + prefetch(addr); #if L1_CACHE_BYTES < 128 prefetch(addr + L1_CACHE_BYTES); @@ -2356,10 +2376,6 @@ next_fl: } deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); - lro_flush_all(&qs->lro_mgr); - qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated; - qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed; - qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc; if (sleeping) check_ring_db(adap, qs, sleeping); @@ -2775,7 +2791,8 @@ irq_handler_t t3_intr_handler(struct adapter *adap, int polling) */ void t3_sge_err_intr_handler(struct adapter *adapter) { - unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE); + unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) & + ~F_FLEMPTY; if (status & SGE_PARERR) CH_ALERT(adapter, "SGE parity error (0x%x)\n", @@ -2805,13 +2822,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter) } /** - * sge_timer_cb - perform periodic maintenance of an SGE qset + * sge_timer_tx - perform periodic maintenance of an SGE qset * @data: the SGE queue set to maintain * * Runs periodically from a timer to perform maintenance of an SGE queue * set. It performs two tasks: * - * a) Cleans up any completed Tx descriptors that may still be pending. + * Cleans up any completed Tx descriptors that may still be pending. * Normal descriptor cleanup happens when new packets are added to a Tx * queue so this timer is relatively infrequent and does any cleanup only * if the Tx queue has not seen any new packets in a while. We make a @@ -2821,51 +2838,87 @@ void t3_sge_err_intr_handler(struct adapter *adapter) * up). Since control queues use immediate data exclusively we don't * bother cleaning them up here. * - * b) Replenishes Rx queues that have run out due to memory shortage. - * Normally new Rx buffers are added when existing ones are consumed but - * when out of memory a queue can become empty. We try to add only a few - * buffers here, the queue will be replenished fully as these new buffers - * are used up if memory shortage has subsided. */ -static void sge_timer_cb(unsigned long data) +static void sge_timer_tx(unsigned long data) { - spinlock_t *lock; struct sge_qset *qs = (struct sge_qset *)data; - struct adapter *adap = qs->adap; + struct port_info *pi = netdev_priv(qs->netdev); + struct adapter *adap = pi->adapter; + unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0}; + unsigned long next_period; if (spin_trylock(&qs->txq[TXQ_ETH].lock)) { - reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]); + tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH], + TX_RECLAIM_TIMER_CHUNK); spin_unlock(&qs->txq[TXQ_ETH].lock); } if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) { - reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]); + tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD], + TX_RECLAIM_TIMER_CHUNK); spin_unlock(&qs->txq[TXQ_OFLD].lock); } - lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock : - &adap->sge.qs[0].rspq.lock; - if (spin_trylock_irq(lock)) { - if (!napi_is_scheduled(&qs->napi)) { - u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); - - if (qs->fl[0].credits < qs->fl[0].size) - __refill_fl(adap, &qs->fl[0]); - if (qs->fl[1].credits < qs->fl[1].size) - __refill_fl(adap, &qs->fl[1]); - - if (status & (1 << qs->rspq.cntxt_id)) { - qs->rspq.starved++; - if (qs->rspq.credits) { - refill_rspq(adap, &qs->rspq, 1); - qs->rspq.credits--; - qs->rspq.restarted++; - t3_write_reg(adap, A_SG_RSPQ_FL_STATUS, - 1 << qs->rspq.cntxt_id); - } + + next_period = TX_RECLAIM_PERIOD >> + (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) / + TX_RECLAIM_TIMER_CHUNK); + mod_timer(&qs->tx_reclaim_timer, jiffies + next_period); +} + +/* + * sge_timer_rx - perform periodic maintenance of an SGE qset + * @data: the SGE queue set to maintain + * + * a) Replenishes Rx queues that have run out due to memory shortage. + * Normally new Rx buffers are added when existing ones are consumed but + * when out of memory a queue can become empty. We try to add only a few + * buffers here, the queue will be replenished fully as these new buffers + * are used up if memory shortage has subsided. + * + * b) Return coalesced response queue credits in case a response queue is + * starved. + * + */ +static void sge_timer_rx(unsigned long data) +{ + spinlock_t *lock; + struct sge_qset *qs = (struct sge_qset *)data; + struct port_info *pi = netdev_priv(qs->netdev); + struct adapter *adap = pi->adapter; + u32 status; + + lock = adap->params.rev > 0 ? + &qs->rspq.lock : &adap->sge.qs[0].rspq.lock; + + if (!spin_trylock_irq(lock)) + goto out; + + if (napi_is_scheduled(&qs->napi)) + goto unlock; + + if (adap->params.rev < 4) { + status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); + + if (status & (1 << qs->rspq.cntxt_id)) { + qs->rspq.starved++; + if (qs->rspq.credits) { + qs->rspq.credits--; + refill_rspq(adap, &qs->rspq, 1); + qs->rspq.restarted++; + t3_write_reg(adap, A_SG_RSPQ_FL_STATUS, + 1 << qs->rspq.cntxt_id); } } - spin_unlock_irq(lock); } - mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); + + if (qs->fl[0].credits < qs->fl[0].size) + __refill_fl(adap, &qs->fl[0]); + if (qs->fl[1].credits < qs->fl[1].size) + __refill_fl(adap, &qs->fl[1]); + +unlock: + spin_unlock_irq(lock); +out: + mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); } /** @@ -2906,10 +2959,10 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, { int i, avail, ret = -ENOMEM; struct sge_qset *q = &adapter->sge.qs[id]; - struct net_lro_mgr *lro_mgr = &q->lro_mgr; init_qset_cntxt(q, id); - setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q); + setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q); + setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q); q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, sizeof(struct rx_desc), @@ -2985,25 +3038,23 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; q->fl[0].order = FL0_PG_ORDER; q->fl[1].order = FL1_PG_ORDER; + q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; + q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; - q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1, - sizeof(struct skb_frag_struct), - GFP_KERNEL); - q->lro_nfrags = q->lro_frag_len = 0; spin_lock_irq(&adapter->sge.reg_lock); /* FL threshold comparison uses < */ ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, q->rspq.phys_addr, q->rspq.size, - q->fl[0].buf_size, 1, 0); + q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); if (ret) goto err_unlock; for (i = 0; i < SGE_RXQ_PER_SET; ++i) { ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, q->fl[i].phys_addr, q->fl[i].size, - q->fl[i].buf_size, p->cong_thres, 1, - 0); + q->fl[i].buf_size - SGE_PG_RSVD, + p->cong_thres, 1, 0); if (ret) goto err_unlock; } @@ -3041,8 +3092,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, q->tx_q = netdevq; t3_update_qset_coalesce(q, p); - init_lro_mgr(q, lro_mgr); - avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL | __GFP_COMP); if (!avail) { @@ -3063,7 +3112,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | V_NEWTIMER(q->rspq.holdoff_tmr)); - mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); return 0; err_unlock: @@ -3074,6 +3122,27 @@ err: } /** + * t3_start_sge_timers - start SGE timer call backs + * @adap: the adapter + * + * Starts each SGE queue set's timer call back + */ +void t3_start_sge_timers(struct adapter *adap) +{ + int i; + + for (i = 0; i < SGE_QSETS; ++i) { + struct sge_qset *q = &adap->sge.qs[i]; + + if (q->tx_reclaim_timer.function) + mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); + + if (q->rx_reclaim_timer.function) + mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); + } +} + +/** * t3_stop_sge_timers - stop SGE timer call backs * @adap: the adapter * @@ -3088,6 +3157,8 @@ void t3_stop_sge_timers(struct adapter *adap) if (q->tx_reclaim_timer.function) del_timer_sync(&q->tx_reclaim_timer); + if (q->rx_reclaim_timer.function) + del_timer_sync(&q->rx_reclaim_timer); } } diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index ac2a974dfe37..31ed31a3428b 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c @@ -493,20 +493,20 @@ int t3_phy_lasi_intr_handler(struct cphy *phy) } static const struct adapter_info t3_adap_info[] = { - {2, 0, + {1, 1, 0, F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, &mi1_mdio_ops, "Chelsio PE9000"}, - {2, 0, + {1, 1, 0, F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, &mi1_mdio_ops, "Chelsio T302"}, - {1, 0, + {1, 0, 0, F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, &mi1_mdio_ext_ops, "Chelsio T310"}, - {2, 0, + {1, 1, 0, F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, @@ -514,7 +514,7 @@ static const struct adapter_info t3_adap_info[] = { &mi1_mdio_ext_ops, "Chelsio T320"}, {}, {}, - {1, 0, + {1, 0, 0, F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, @@ -1153,6 +1153,38 @@ int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr, return ret; } +static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg, + u32 *rx_hash_high, u32 *rx_hash_low) +{ + /* stop Rx unicast traffic */ + t3_mac_disable_exact_filters(mac); + + /* stop broadcast, multicast, promiscuous mode traffic */ + *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG); + t3_set_reg_field(mac->adapter, A_XGM_RX_CFG, + F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES, + F_DISBCAST); + + *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH); + t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0); + + *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW); + t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0); + + /* Leave time to drain max RX fifo */ + msleep(1); +} + +static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg, + u32 rx_hash_high, u32 rx_hash_low) +{ + t3_mac_enable_exact_filters(mac); + t3_set_reg_field(mac->adapter, A_XGM_RX_CFG, + F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES, + rx_cfg); + t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high); + t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low); +} /** * t3_link_changed - handle interface link changes @@ -1170,9 +1202,32 @@ void t3_link_changed(struct adapter *adapter, int port_id) struct cphy *phy = &pi->phy; struct cmac *mac = &pi->mac; struct link_config *lc = &pi->link_config; + int force_link_down = 0; phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); + if (!lc->link_ok && link_ok) { + u32 rx_cfg, rx_hash_high, rx_hash_low; + u32 status; + + t3_xgm_intr_enable(adapter, port_id); + t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low); + t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0); + t3_mac_enable(mac, MAC_DIRECTION_RX); + + status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset); + if (status & F_LINKFAULTCHANGE) { + mac->stats.link_faults++; + force_link_down = 1; + } + t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low); + + if (force_link_down) { + t3_os_link_fault_handler(adapter, port_id); + return; + } + } + if (lc->requested_fc & PAUSE_AUTONEG) fc &= lc->requested_fc; else @@ -1202,6 +1257,57 @@ void t3_link_changed(struct adapter *adapter, int port_id) t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc); } +void t3_link_fault(struct adapter *adapter, int port_id) +{ + struct port_info *pi = adap2pinfo(adapter, port_id); + struct cmac *mac = &pi->mac; + struct cphy *phy = &pi->phy; + struct link_config *lc = &pi->link_config; + int link_ok, speed, duplex, fc, link_fault; + u32 rx_cfg, rx_hash_high, rx_hash_low; + + t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low); + + if (adapter->params.rev > 0 && uses_xaui(adapter)) + t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0); + + t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0); + t3_mac_enable(mac, MAC_DIRECTION_RX); + + t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low); + + link_fault = t3_read_reg(adapter, + A_XGM_INT_STATUS + mac->offset); + link_fault &= F_LINKFAULTCHANGE; + + phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); + + if (link_fault) { + lc->link_ok = 0; + lc->speed = SPEED_INVALID; + lc->duplex = DUPLEX_INVALID; + + t3_os_link_fault(adapter, port_id, 0); + + /* Account link faults only when the phy reports a link up */ + if (link_ok) + mac->stats.link_faults++; + + msleep(1000); + t3_os_link_fault_handler(adapter, port_id); + } else { + if (link_ok) + t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, + F_TXACTENABLE | F_RXEN); + + pi->link_fault = 0; + lc->link_ok = (unsigned char)link_ok; + lc->speed = speed < 0 ? SPEED_INVALID : speed; + lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex; + t3_os_link_fault(adapter, port_id, link_ok); + } +} + /** * t3_link_start - apply link configuration to MAC/PHY * @phy: the PHY to setup @@ -1323,7 +1429,7 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg, #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE)) #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \ V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \ - F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW) + F_TXFIFO_UNDERRUN) #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \ F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \ F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \ @@ -1360,11 +1466,11 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg, V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \ V_RXTPPARERRENB(M_RXTPPARERRENB) | \ V_MCAPARERRENB(M_MCAPARERRENB)) +#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE) #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \ F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \ F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \ F_MPS0 | F_CPL_SWITCH) - /* * Interrupt handler for the PCIX1 module. */ @@ -1695,7 +1801,14 @@ static void mc7_intr_handler(struct mc7 *mc7) static int mac_intr_handler(struct adapter *adap, unsigned int idx) { struct cmac *mac = &adap2pinfo(adap, idx)->mac; - u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset); + /* + * We mask out interrupt causes for which we're not taking interrupts. + * This allows us to use polling logic to monitor some of the other + * conditions when taking interrupts would impose too much load on the + * system. + */ + u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) & + ~F_RXFIFO_OVERFLOW; if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) { mac->stats.tx_fifo_parity_err++; @@ -1715,10 +1828,20 @@ static int mac_intr_handler(struct adapter *adap, unsigned int idx) mac->stats.xaui_pcs_ctc_err++; if (cause & F_XAUIPCSALIGNCHANGE) mac->stats.xaui_pcs_align_change++; + if (cause & F_XGM_INT) { + t3_set_reg_field(adap, + A_XGM_INT_ENABLE + mac->offset, + F_XGM_INT, 0); + mac->stats.link_faults++; + + t3_os_link_fault_handler(adap, idx); + } t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause); + if (cause & XGM_INTR_FATAL) t3_fatal_err(adap); + return cause != 0; } @@ -1924,6 +2047,22 @@ void t3_intr_clear(struct adapter *adapter) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */ } +void t3_xgm_intr_enable(struct adapter *adapter, int idx) +{ + struct port_info *pi = adap2pinfo(adapter, idx); + + t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset, + XGM_EXTRA_INTR_MASK); +} + +void t3_xgm_intr_disable(struct adapter *adapter, int idx) +{ + struct port_info *pi = adap2pinfo(adapter, idx); + + t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset, + 0x7ff); +} + /** * t3_port_intr_enable - enable port-specific interrupts * @adapter: associated adapter @@ -1989,16 +2128,40 @@ void t3_port_intr_clear(struct adapter *adapter, int idx) static int t3_sge_write_context(struct adapter *adapter, unsigned int id, unsigned int type) { - t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); - t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); - t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff); - t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); + if (type == F_RESPONSEQ) { + /* + * Can't write the Response Queue Context bits for + * Interrupt Armed or the Reserve bits after the chip + * has been initialized out of reset. Writing to these + * bits can confuse the hardware. + */ + t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); + } else { + t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); + } t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } +/** + * clear_sge_ctxt - completely clear an SGE context + * @adapter: the adapter + * @id: the context id + * @type: the context type + * + * Completely clear an SGE context. Used predominantly at post-reset + * initialization. Note in particular that we don't skip writing to any + * "sensitive bits" in the contexts the way that t3_sge_write_context() + * does ... + */ static int clear_sge_ctxt(struct adapter *adap, unsigned int id, unsigned int type) { @@ -2006,7 +2169,14 @@ static int clear_sge_ctxt(struct adapter *adap, unsigned int id, t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0); t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0); t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0); - return t3_sge_write_context(adap, id, type); + t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff); + t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff); + t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff); + t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff); + t3_write_reg(adap, A_SG_CONTEXT_CMD, + V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); + return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** @@ -2590,10 +2760,10 @@ static void tp_config(struct adapter *adap, const struct tp_params *p) F_TCPCHECKSUMOFFLOAD | V_IPTTL(64)); t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) | F_MTUENABLE | V_WINDOWSCALEMODE(1) | - V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1)); + V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1)); t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) | V_AUTOSTATE2(1) | V_AUTOSTATE1(0) | - V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) | + V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) | F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1)); t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO, F_IPV6ENABLE | F_NICMODE); @@ -3057,20 +3227,22 @@ int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask) } /* - * Perform the bits of HW initialization that are dependent on the number - * of available ports. + * Perform the bits of HW initialization that are dependent on the Tx + * channels being used. */ -static void init_hw_for_avail_ports(struct adapter *adap, int nports) +static void chan_init_hw(struct adapter *adap, unsigned int chan_map) { int i; - if (nports == 1) { + if (chan_map != 3) { /* one channel */ t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0); t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0); - t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN | - F_PORT0ACTIVE | F_ENFORCEPKT); - t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff); - } else { + t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT | + (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE : + F_TPTXPORT1EN | F_PORT1ACTIVE)); + t3_write_reg(adap, A_PM1_TX_CFG, + chan_map == 1 ? 0xffffffff : 0); + } else { /* two channels */ t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN); t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB); t3_write_reg(adap, A_ULPTX_DMA_WEIGHT, @@ -3378,7 +3550,7 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params) t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff); t3_write_reg(adapter, A_PM1_RX_MODE, 0); t3_write_reg(adapter, A_PM1_TX_MODE, 0); - init_hw_for_avail_ports(adapter, adapter->params.nports); + chan_init_hw(adapter, adapter->params.chan_map); t3_sge_init(adapter, &adapter->params.sge); t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter)); @@ -3615,9 +3787,18 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, get_pci_mode(adapter, &adapter->params.pci); adapter->params.info = ai; - adapter->params.nports = ai->nports; + adapter->params.nports = ai->nports0 + ai->nports1; + adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1); adapter->params.rev = t3_read_reg(adapter, A_PL_REV); - adapter->params.linkpoll_period = 0; + /* + * We used to only run the "adapter check task" once a second if + * we had PHYs which didn't support interrupts (we would check + * their link status once a second). Now we check other conditions + * in that routine which could potentially impose a very high + * interrupt load on the system. As such, we now always scan the + * adapter state once a second ... + */ + adapter->params.linkpoll_period = 10; adapter->params.stats_update_period = is_10G(adapter) ? MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10); adapter->params.pci.vpd_cap_addr = @@ -3638,7 +3819,7 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX"); mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM"); - p->nchan = ai->nports; + p->nchan = adapter->params.chan_map == 3 ? 2 : 1; p->pmrx_size = t3_mc7_size(&adapter->pmrx); p->pmtx_size = t3_mc7_size(&adapter->pmtx); p->cm_size = t3_mc7_size(&adapter->cm); @@ -3707,7 +3888,14 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, ETH_ALEN); init_link_config(&p->link_config, p->phy.caps); p->phy.ops->power_down(&p->phy, 1); - if (!(p->phy.caps & SUPPORTED_IRQ)) + + /* + * If the PHY doesn't support interrupts for link status + * changes, schedule a scan of the adapter links at least + * once a second. + */ + if (!(p->phy.caps & SUPPORTED_IRQ) && + adapter->params.linkpoll_period > 10) adapter->params.linkpoll_period = 10; } diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h index b1b25c37aa10..7bf963ec5548 100644 --- a/drivers/net/cxgb3/version.h +++ b/drivers/net/cxgb3/version.h @@ -35,10 +35,10 @@ #define DRV_DESC "Chelsio T3 Network Driver" #define DRV_NAME "cxgb3" /* Driver version */ -#define DRV_VERSION "1.1.1-ko" +#define DRV_VERSION "1.1.2-ko" /* Firmware version */ #define FW_VERSION_MAJOR 7 -#define FW_VERSION_MINOR 0 +#define FW_VERSION_MINOR 1 #define FW_VERSION_MICRO 0 #endif /* __CHELSIO_VERSION_H */ diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c index 9d7786937aad..f87f9435049f 100644 --- a/drivers/net/cxgb3/xgmac.c +++ b/drivers/net/cxgb3/xgmac.c @@ -150,7 +150,8 @@ int t3_mac_reset(struct cmac *mac) static int t3b2_mac_reset(struct cmac *mac) { struct adapter *adap = mac->adapter; - unsigned int oft = mac->offset; + unsigned int oft = mac->offset, store; + int idx = macidx(mac); u32 val; if (!macidx(mac)) @@ -158,14 +159,28 @@ static int t3b2_mac_reset(struct cmac *mac) else t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0); + /* Stop NIC traffic to reduce the number of TXTOGGLES */ + t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 0); + /* Ensure TX drains */ + t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 0); + t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_); t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ + /* Store A_TP_TX_DROP_CFG_CH0 */ + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); + store = t3_read_reg(adap, A_TP_TX_DROP_CFG_CH0 + idx); + msleep(10); + /* Change DROP_CFG to 0xc0000011 */ + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); + t3_write_reg(adap, A_TP_PIO_DATA, 0xc0000011); + /* Check for xgm Rx fifo empty */ + /* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */ if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft, - 0x80000000, 1, 5, 2)) { + 0x80000000, 1, 1000, 2)) { CH_ERR(adap, "MAC %d Rx fifo drain failed\n", macidx(mac)); return -1; @@ -191,11 +206,21 @@ static int t3b2_mac_reset(struct cmac *mac) F_DISPAUSEFRAMES | F_EN1536BFRAMES | F_RMFCS | F_ENJUMBO | F_ENHASHMCAST); - if (!macidx(mac)) + /* Restore the DROP_CFG */ + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); + t3_write_reg(adap, A_TP_PIO_DATA, store); + + if (!idx) t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE); else t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE); + /* re-enable nic traffic */ + t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1); + + /* Set: re-enable NIC traffic */ + t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1); + return 0; } @@ -236,7 +261,7 @@ int t3_mac_set_num_ucast(struct cmac *mac, int n) return 0; } -static void disable_exact_filters(struct cmac *mac) +void t3_mac_disable_exact_filters(struct cmac *mac) { unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1; @@ -247,7 +272,7 @@ static void disable_exact_filters(struct cmac *mac) t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */ } -static void enable_exact_filters(struct cmac *mac) +void t3_mac_enable_exact_filters(struct cmac *mac) { unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1; @@ -332,18 +357,9 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu) return -EINVAL; t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu); - /* - * Adjust the PAUSE frame watermarks. We always set the LWM, and the - * HWM only if flow-control is enabled. - */ - hwm = max_t(unsigned int, MAC_RXFIFO_SIZE - 3 * mtu, - MAC_RXFIFO_SIZE * 38 / 100); - hwm = min(hwm, MAC_RXFIFO_SIZE - 8192); - lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4); - if (adap->params.rev >= T3_REV_B2 && (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) { - disable_exact_filters(mac); + t3_mac_disable_exact_filters(mac); v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset); t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset, F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST); @@ -355,14 +371,14 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu) if (t3_wait_op_done(adap, reg + mac->offset, F_RXFIFO_EMPTY, 1, 20, 5)) { t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v); - enable_exact_filters(mac); + t3_mac_enable_exact_filters(mac); return -EIO; } t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, V_RXMAXPKTSIZE(M_RXMAXPKTSIZE), V_RXMAXPKTSIZE(mtu)); t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v); - enable_exact_filters(mac); + t3_mac_enable_exact_filters(mac); } else t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, V_RXMAXPKTSIZE(M_RXMAXPKTSIZE), @@ -452,9 +468,12 @@ int t3_mac_enable(struct cmac *mac, int which) if (which & MAC_DIRECTION_TX) { t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); - t3_write_reg(adap, A_TP_PIO_DATA, 0xc0ede401); + t3_write_reg(adap, A_TP_PIO_DATA, + adap->params.rev == T3_REV_C ? + 0xc4ffff01 : 0xc0ede401); t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE); - t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx); + t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, + adap->params.rev == T3_REV_C ? 0 : 1 << idx); t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN); @@ -510,15 +529,12 @@ int t3b2_mac_watchdog_task(struct cmac *mac) struct adapter *adap = mac->adapter; struct mac_stats *s = &mac->stats; unsigned int tx_tcnt, tx_xcnt; - unsigned int tx_mcnt = s->tx_frames; - unsigned int rx_mcnt = s->rx_frames; - unsigned int rx_xcnt; + u64 tx_mcnt = s->tx_frames; int status; status = 0; tx_xcnt = 1; /* By default tx_xcnt is making progress */ tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt */ - rx_xcnt = 1; /* By default rx_xcnt is making progress */ if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) { tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, A_XGM_TX_SPI4_SOP_EOP_CNT + @@ -529,11 +545,11 @@ int t3b2_mac_watchdog_task(struct cmac *mac) tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap, A_TP_PIO_DATA))); } else { - goto rxcheck; + goto out; } } else { mac->toggle_cnt = 0; - goto rxcheck; + goto out; } if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) { @@ -546,23 +562,6 @@ int t3b2_mac_watchdog_task(struct cmac *mac) } } else { mac->toggle_cnt = 0; - goto rxcheck; - } - -rxcheck: - if (rx_mcnt != mac->rx_mcnt) { - rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, - A_XGM_RX_SPI4_SOP_EOP_CNT + - mac->offset))) + - (s->rx_fifo_ovfl - - mac->rx_ocnt); - mac->rx_ocnt = s->rx_fifo_ovfl; - } else - goto out; - - if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 && - mac->rx_xcnt == 0) { - status = 2; goto out; } @@ -570,8 +569,6 @@ out: mac->tx_tcnt = tx_tcnt; mac->tx_xcnt = tx_xcnt; mac->tx_mcnt = s->tx_frames; - mac->rx_xcnt = rx_xcnt; - mac->rx_mcnt = s->rx_frames; mac->rx_pause = s->rx_pause; if (status == 1) { t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0); |