diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice.h | 35 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 23 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_common.c | 44 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_controlq.c | 223 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_ethtool.c | 26 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_hw_autogen.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h | 79 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.c | 152 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 94 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_sched.c | 122 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_sched.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_sriov.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_switch.c | 55 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_txrx.c | 46 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_type.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 34 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h | 2 |
17 files changed, 536 insertions, 411 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index b8548370f1c7..a385575600f6 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -52,7 +52,6 @@ extern const char ice_drv_ver[]; #define ICE_MBXQ_LEN 64 #define ICE_MIN_MSIX 2 #define ICE_NO_VSI 0xffff -#define ICE_MAX_VSI_ALLOC 130 #define ICE_MAX_TXQS 2048 #define ICE_MAX_RXQS 2048 #define ICE_VSI_MAP_CONTIG 0 @@ -97,14 +96,14 @@ extern const char ice_drv_ver[]; #define ice_for_each_vsi(pf, i) \ for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++) -/* Macros for each tx/rx ring in a VSI */ +/* Macros for each Tx/Rx ring in a VSI */ #define ice_for_each_txq(vsi, i) \ for ((i) = 0; (i) < (vsi)->num_txq; (i)++) #define ice_for_each_rxq(vsi, i) \ for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) -/* Macros for each allocated tx/rx ring whether used or not in a VSI */ +/* Macros for each allocated Tx/Rx ring whether used or not in a VSI */ #define ice_for_each_alloc_txq(vsi, i) \ for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++) @@ -113,7 +112,9 @@ extern const char ice_drv_ver[]; struct ice_tc_info { u16 qoffset; - u16 qcount; + u16 qcount_tx; + u16 qcount_rx; + u8 netdev_tc; }; struct ice_tc_cfg { @@ -149,10 +150,10 @@ enum ice_state { __ICE_RESET_FAILED, /* set by reset/rebuild */ /* When checking for the PF to be in a nominal operating state, the * bits that are grouped at the beginning of the list need to be - * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will - * be checked. If you need to add a bit into consideration for nominal + * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will + * be checked. If you need to add a bit into consideration for nominal * operating state, it must be added before - * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position + * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position * without appropriate consideration. */ __ICE_STATE_NOMINAL_CHECK_BITS, @@ -182,8 +183,8 @@ struct ice_vsi { struct ice_sw *vsw; /* switch this VSI is on */ struct ice_pf *back; /* back pointer to PF */ struct ice_port_info *port_info; /* back pointer to port_info */ - struct ice_ring **rx_rings; /* rx ring array */ - struct ice_ring **tx_rings; /* tx ring array */ + struct ice_ring **rx_rings; /* Rx ring array */ + struct ice_ring **tx_rings; /* Tx ring array */ struct ice_q_vector **q_vectors; /* q_vector array */ irqreturn_t (*irq_handler)(int irq, void *data); @@ -200,8 +201,8 @@ struct ice_vsi { int sw_base_vector; /* Irq base for OS reserved vectors */ int hw_base_vector; /* HW (absolute) index of a vector */ enum ice_vsi_type type; - u16 vsi_num; /* HW (absolute) index of this VSI */ - u16 idx; /* software index in pf->vsi[] */ + u16 vsi_num; /* HW (absolute) index of this VSI */ + u16 idx; /* software index in pf->vsi[] */ /* Interrupt thresholds */ u16 work_lmt; @@ -254,8 +255,8 @@ struct ice_q_vector { struct ice_ring_container tx; struct irq_affinity_notify affinity_notify; u16 v_idx; /* index in the vsi->q_vector array. */ - u8 num_ring_tx; /* total number of tx rings in vector */ - u8 num_ring_rx; /* total number of rx rings in vector */ + u8 num_ring_tx; /* total number of Tx rings in vector */ + u8 num_ring_rx; /* total number of Rx rings in vector */ char name[ICE_INT_NAME_STR_LEN]; /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this * value to the device @@ -307,10 +308,10 @@ struct ice_pf { u32 hw_oicr_idx; /* Other interrupt cause vector HW index */ u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */ u32 num_lan_msix; /* Total MSIX vectors for base driver */ - u16 num_lan_tx; /* num lan tx queues setup */ - u16 num_lan_rx; /* num lan rx queues setup */ - u16 q_left_tx; /* remaining num tx queues left unclaimed */ - u16 q_left_rx; /* remaining num rx queues left unclaimed */ + u16 num_lan_tx; /* num lan Tx queues setup */ + u16 num_lan_rx; /* num lan Rx queues setup */ + u16 q_left_tx; /* remaining num Tx queues left unclaimed */ + u16 q_left_rx; /* remaining num Rx queues left unclaimed */ u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ u16 num_alloc_vsi; u16 corer_count; /* Core reset count */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 6653555f55dd..fcdcd80b18e7 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -5,7 +5,7 @@ #define _ICE_ADMINQ_CMD_H_ /* This header file defines the Admin Queue commands, error codes and - * descriptor format. It is shared between Firmware and Software. + * descriptor format. It is shared between Firmware and Software. */ #define ICE_MAX_VSI 768 @@ -87,6 +87,7 @@ struct ice_aqc_list_caps { /* Device/Function buffer entry, repeated per reported capability */ struct ice_aqc_list_caps_elem { __le16 cap; +#define ICE_AQC_CAPS_VALID_FUNCTIONS 0x0005 #define ICE_AQC_CAPS_SRIOV 0x0012 #define ICE_AQC_CAPS_VF 0x0013 #define ICE_AQC_CAPS_VSI 0x0017 @@ -462,7 +463,7 @@ struct ice_aqc_sw_rules { }; /* Add/Update/Get/Remove lookup Rx/Tx command/response entry - * This structures describes the lookup rules and associated actions. "index" + * This structures describes the lookup rules and associated actions. "index" * is returned as part of a response to a successful Add command, and can be * used to identify the rule for Update/Get/Remove commands. */ @@ -1065,10 +1066,10 @@ struct ice_aqc_nvm { #define ICE_AQC_NVM_LAST_CMD BIT(0) #define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Update reply */ #define ICE_AQC_NVM_PRESERVATION_S 1 -#define ICE_AQC_NVM_PRESERVATION_M (3 << CSR_AQ_NVM_PRESERVATION_S) -#define ICE_AQC_NVM_NO_PRESERVATION (0 << CSR_AQ_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_PRESERVE_ALL BIT(1) -#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << CSR_AQ_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_FLASH_ONLY BIT(7) __le16 module_typeid; __le16 length; @@ -1110,7 +1111,7 @@ struct ice_aqc_get_set_rss_keys { }; /* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */ -struct ice_aqc_get_set_rss_lut { +struct ice_aqc_get_set_rss_lut { #define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15) #define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0 #define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S) @@ -1314,10 +1315,10 @@ struct ice_aqc_get_clear_fw_log { * @params: command-specific parameters * * Descriptor format for commands the driver posts on the Admin Transmit Queue - * (ATQ). The firmware writes back onto the command descriptor and returns - * the result of the command. Asynchronous events that are not an immediate + * (ATQ). The firmware writes back onto the command descriptor and returns + * the result of the command. Asynchronous events that are not an immediate * result of the command are written to the Admin Receive Queue (ARQ) using - * the same descriptor format. Descriptors are in little-endian notation with + * the same descriptor format. Descriptors are in little-endian notation with * 32-bit words. */ struct ice_aq_desc { @@ -1379,10 +1380,10 @@ struct ice_aq_desc { /* error codes */ enum ice_aq_err { - ICE_AQ_RC_OK = 0, /* success */ + ICE_AQ_RC_OK = 0, /* Success */ ICE_AQ_RC_ENOMEM = 9, /* Out of memory */ ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */ - ICE_AQ_RC_EEXIST = 13, /* object already exists */ + ICE_AQ_RC_EEXIST = 13, /* Object already exists */ ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */ }; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 554fd707a6d6..4c1d35da940d 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -405,9 +405,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) INIT_LIST_HEAD(&sw->vsi_list_map_head); - ice_init_def_sw_recp(hw); - - return 0; + return ice_init_def_sw_recp(hw); } /** @@ -715,7 +713,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) hw->evb_veb = true; - /* Query the allocated resources for tx scheduler */ + /* Query the allocated resources for Tx scheduler */ status = ice_sched_query_res_alloc(hw); if (status) { ice_debug(hw, ICE_DBG_SCHED, @@ -958,7 +956,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) * ice_copy_rxq_ctx_to_hw * @hw: pointer to the hardware structure * @ice_rxq_ctx: pointer to the rxq context - * @rxq_index: the index of the rx queue + * @rxq_index: the index of the Rx queue * * Copies rxq context from dense structure to hw register space */ @@ -1014,7 +1012,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { * ice_write_rxq_ctx * @hw: pointer to the hardware structure * @rlan_ctx: pointer to the rxq context - * @rxq_index: the index of the rx queue + * @rxq_index: the index of the Rx queue * * Converts rxq context from sparse to dense structure and then writes * it to hw register space @@ -1387,6 +1385,27 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) } /** + * ice_get_guar_num_vsi - determine number of guar VSI for a PF + * @hw: pointer to the hw structure + * + * Determine the number of valid functions by going through the bitmap returned + * from parsing capabilities and use this to calculate the number of VSI per PF. + */ +static u32 ice_get_guar_num_vsi(struct ice_hw *hw) +{ + u8 funcs; + +#define ICE_CAPS_VALID_FUNCS_M 0xFF + funcs = hweight8(hw->dev_caps.common_cap.valid_functions & + ICE_CAPS_VALID_FUNCS_M); + + if (!funcs) + return 0; + + return ICE_MAX_VSI / funcs; +} + +/** * ice_parse_caps - parse function/device capabilities * @hw: pointer to the hw struct * @buf: pointer to a buffer containing function/device capability records @@ -1428,6 +1447,12 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, u16 cap = le16_to_cpu(cap_resp->cap); switch (cap) { + case ICE_AQC_CAPS_VALID_FUNCTIONS: + caps->valid_functions = number; + ice_debug(hw, ICE_DBG_INIT, + "HW caps: Valid Functions = %d\n", + caps->valid_functions); + break; case ICE_AQC_CAPS_SRIOV: caps->sr_iov_1_1 = (number == 1); ice_debug(hw, ICE_DBG_INIT, @@ -1457,10 +1482,10 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, "HW caps: Dev.VSI cnt = %d\n", dev_p->num_vsi_allocd_to_host); } else if (func_p) { - func_p->guaranteed_num_vsi = number; + func_p->guar_num_vsi = ice_get_guar_num_vsi(hw); ice_debug(hw, ICE_DBG_INIT, "HW caps: Func.VSI cnt = %d\n", - func_p->guaranteed_num_vsi); + number); } break; case ICE_AQC_CAPS_RSS: @@ -1688,8 +1713,7 @@ void ice_clear_pxe_mode(struct ice_hw *hw) * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned */ -static u16 -ice_get_link_speed_based_on_phy_type(u64 phy_type_low) +static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low) { u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 84c967294eaf..2bf5e11f559a 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -3,6 +3,26 @@ #include "ice_common.h" +#define ICE_CQ_INIT_REGS(qinfo, prefix) \ +do { \ + (qinfo)->sq.head = prefix##_ATQH; \ + (qinfo)->sq.tail = prefix##_ATQT; \ + (qinfo)->sq.len = prefix##_ATQLEN; \ + (qinfo)->sq.bah = prefix##_ATQBAH; \ + (qinfo)->sq.bal = prefix##_ATQBAL; \ + (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ + (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ + (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ + (qinfo)->rq.head = prefix##_ARQH; \ + (qinfo)->rq.tail = prefix##_ARQT; \ + (qinfo)->rq.len = prefix##_ARQLEN; \ + (qinfo)->rq.bah = prefix##_ARQBAH; \ + (qinfo)->rq.bal = prefix##_ARQBAL; \ + (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ + (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ + (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ +} while (0) + /** * ice_adminq_init_regs - Initialize AdminQ registers * @hw: pointer to the hardware structure @@ -13,23 +33,7 @@ static void ice_adminq_init_regs(struct ice_hw *hw) { struct ice_ctl_q_info *cq = &hw->adminq; - cq->sq.head = PF_FW_ATQH; - cq->sq.tail = PF_FW_ATQT; - cq->sq.len = PF_FW_ATQLEN; - cq->sq.bah = PF_FW_ATQBAH; - cq->sq.bal = PF_FW_ATQBAL; - cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M; - cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M; - cq->sq.head_mask = PF_FW_ATQH_ATQH_M; - - cq->rq.head = PF_FW_ARQH; - cq->rq.tail = PF_FW_ARQT; - cq->rq.len = PF_FW_ARQLEN; - cq->rq.bah = PF_FW_ARQBAH; - cq->rq.bal = PF_FW_ARQBAL; - cq->rq.len_mask = PF_FW_ARQLEN_ARQLEN_M; - cq->rq.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M; - cq->rq.head_mask = PF_FW_ARQH_ARQH_M; + ICE_CQ_INIT_REGS(cq, PF_FW); } /** @@ -42,24 +46,7 @@ static void ice_mailbox_init_regs(struct ice_hw *hw) { struct ice_ctl_q_info *cq = &hw->mailboxq; - /* set head and tail registers in our local struct */ - cq->sq.head = PF_MBX_ATQH; - cq->sq.tail = PF_MBX_ATQT; - cq->sq.len = PF_MBX_ATQLEN; - cq->sq.bah = PF_MBX_ATQBAH; - cq->sq.bal = PF_MBX_ATQBAL; - cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M; - cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M; - cq->sq.head_mask = PF_MBX_ATQH_ATQH_M; - - cq->rq.head = PF_MBX_ARQH; - cq->rq.tail = PF_MBX_ARQT; - cq->rq.len = PF_MBX_ARQLEN; - cq->rq.bah = PF_MBX_ARQBAH; - cq->rq.bal = PF_MBX_ARQBAL; - cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M; - cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M; - cq->rq.head_mask = PF_MBX_ARQH_ARQH_M; + ICE_CQ_INIT_REGS(cq, PF_MBX); } /** @@ -131,37 +118,20 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) } /** - * ice_free_ctrlq_sq_ring - Free Control Transmit Queue (ATQ) rings + * ice_free_cq_ring - Free control queue ring * @hw: pointer to the hardware structure - * @cq: pointer to the specific Control queue + * @ring: pointer to the specific control queue ring * - * This assumes the posted send buffers have already been cleaned + * This assumes the posted buffers have already been cleaned * and de-allocated */ -static void ice_free_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) { - dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, - cq->sq.desc_buf.va, cq->sq.desc_buf.pa); - cq->sq.desc_buf.va = NULL; - cq->sq.desc_buf.pa = 0; - cq->sq.desc_buf.size = 0; -} - -/** - * ice_free_ctrlq_rq_ring - Free Control Receive Queue (ARQ) rings - * @hw: pointer to the hardware structure - * @cq: pointer to the specific Control queue - * - * This assumes the posted receive buffers have already been cleaned - * and de-allocated - */ -static void ice_free_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) -{ - dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.desc_buf.size, - cq->rq.desc_buf.va, cq->rq.desc_buf.pa); - cq->rq.desc_buf.va = NULL; - cq->rq.desc_buf.pa = 0; - cq->rq.desc_buf.size = 0; + dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size, + ring->desc_buf.va, ring->desc_buf.pa); + ring->desc_buf.va = NULL; + ring->desc_buf.pa = 0; + ring->desc_buf.size = 0; } /** @@ -280,54 +250,23 @@ unwind_alloc_sq_bufs: return ICE_ERR_NO_MEMORY; } -/** - * ice_free_rq_bufs - Free ARQ buffer info elements - * @hw: pointer to the hardware structure - * @cq: pointer to the specific Control queue - */ -static void ice_free_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) -{ - int i; - - /* free descriptors */ - for (i = 0; i < cq->num_rq_entries; i++) { - dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, - cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); - cq->rq.r.rq_bi[i].va = NULL; - cq->rq.r.rq_bi[i].pa = 0; - cq->rq.r.rq_bi[i].size = 0; - } - - /* free the dma header */ - devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); -} - -/** - * ice_free_sq_bufs - Free ATQ buffer info elements - * @hw: pointer to the hardware structure - * @cq: pointer to the specific Control queue - */ -static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static enum ice_status +ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) { - int i; + /* Clear Head and Tail */ + wr32(hw, ring->head, 0); + wr32(hw, ring->tail, 0); - /* only unmap if the address is non-NULL */ - for (i = 0; i < cq->num_sq_entries; i++) - if (cq->sq.r.sq_bi[i].pa) { - dmam_free_coherent(ice_hw_to_dev(hw), - cq->sq.r.sq_bi[i].size, - cq->sq.r.sq_bi[i].va, - cq->sq.r.sq_bi[i].pa); - cq->sq.r.sq_bi[i].va = NULL; - cq->sq.r.sq_bi[i].pa = 0; - cq->sq.r.sq_bi[i].size = 0; - } + /* set starting point */ + wr32(hw, ring->len, (num_entries | ring->len_ena_mask)); + wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa)); + wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa)); - /* free the buffer info list */ - devm_kfree(ice_hw_to_dev(hw), cq->sq.cmd_buf); + /* Check one register to verify that config was applied */ + if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa)) + return ICE_ERR_AQ_ERROR; - /* free the dma header */ - devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); + return 0; } /** @@ -340,23 +279,7 @@ static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) static enum ice_status ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - u32 reg = 0; - - /* Clear Head and Tail */ - wr32(hw, cq->sq.head, 0); - wr32(hw, cq->sq.tail, 0); - - /* set starting point */ - wr32(hw, cq->sq.len, (cq->num_sq_entries | cq->sq.len_ena_mask)); - wr32(hw, cq->sq.bal, lower_32_bits(cq->sq.desc_buf.pa)); - wr32(hw, cq->sq.bah, upper_32_bits(cq->sq.desc_buf.pa)); - - /* Check one register to verify that config was applied */ - reg = rd32(hw, cq->sq.bal); - if (reg != lower_32_bits(cq->sq.desc_buf.pa)) - return ICE_ERR_AQ_ERROR; - - return 0; + return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); } /** @@ -369,25 +292,15 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) static enum ice_status ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - u32 reg = 0; - - /* Clear Head and Tail */ - wr32(hw, cq->rq.head, 0); - wr32(hw, cq->rq.tail, 0); + enum ice_status status; - /* set starting point */ - wr32(hw, cq->rq.len, (cq->num_rq_entries | cq->rq.len_ena_mask)); - wr32(hw, cq->rq.bal, lower_32_bits(cq->rq.desc_buf.pa)); - wr32(hw, cq->rq.bah, upper_32_bits(cq->rq.desc_buf.pa)); + status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); + if (status) + return status; /* Update tail in the HW to post pre-allocated buffers */ wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); - /* Check one register to verify that config was applied */ - reg = rd32(hw, cq->rq.bal); - if (reg != lower_32_bits(cq->rq.desc_buf.pa)) - return ICE_ERR_AQ_ERROR; - return 0; } @@ -444,7 +357,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) goto init_ctrlq_exit; init_ctrlq_free_rings: - ice_free_ctrlq_sq_ring(hw, cq); + ice_free_cq_ring(hw, &cq->sq); init_ctrlq_exit: return ret_code; @@ -503,12 +416,33 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) goto init_ctrlq_exit; init_ctrlq_free_rings: - ice_free_ctrlq_rq_ring(hw, cq); + ice_free_cq_ring(hw, &cq->rq); init_ctrlq_exit: return ret_code; } +#define ICE_FREE_CQ_BUFS(hw, qi, ring) \ +do { \ + int i; \ + /* free descriptors */ \ + for (i = 0; i < (qi)->num_##ring##_entries; i++) \ + if ((qi)->ring.r.ring##_bi[i].pa) { \ + dmam_free_coherent(ice_hw_to_dev(hw), \ + (qi)->ring.r.ring##_bi[i].size,\ + (qi)->ring.r.ring##_bi[i].va,\ + (qi)->ring.r.ring##_bi[i].pa);\ + (qi)->ring.r.ring##_bi[i].va = NULL; \ + (qi)->ring.r.ring##_bi[i].pa = 0; \ + (qi)->ring.r.ring##_bi[i].size = 0; \ + } \ + /* free the buffer info list */ \ + if ((qi)->ring.cmd_buf) \ + devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ + /* free dma head */ \ + devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ +} while (0) + /** * ice_shutdown_sq - shutdown the Control ATQ * @hw: pointer to the hardware structure @@ -538,8 +472,8 @@ ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq->sq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers and the ring itself */ - ice_free_sq_bufs(hw, cq); - ice_free_ctrlq_sq_ring(hw, cq); + ICE_FREE_CQ_BUFS(hw, cq, sq); + ice_free_cq_ring(hw, &cq->sq); shutdown_sq_out: mutex_unlock(&cq->sq_lock); @@ -606,8 +540,8 @@ ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq->rq.count = 0; /* free ring buffers and the ring itself */ - ice_free_rq_bufs(hw, cq); - ice_free_ctrlq_rq_ring(hw, cq); + ICE_FREE_CQ_BUFS(hw, cq, rq); + ice_free_cq_ring(hw, &cq->rq); shutdown_rq_out: mutex_unlock(&cq->rq_lock); @@ -657,7 +591,6 @@ init_ctrlq_free_rq: * - cq->num_rq_entries * - cq->rq_buf_size * - cq->sq_buf_size - * */ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) { @@ -841,7 +774,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) * @buf_size: size of buffer for indirect commands (or 0 for direct commands) * @cd: pointer to command details structure * - * This is the main send command routine for the ATQ. It runs the q, + * This is the main send command routine for the ATQ. It runs the queue, * cleans the queue, etc. */ enum ice_status @@ -1035,7 +968,7 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) * @pending: number of events that could be left to process * * This function cleans one Admin Receive Queue element and returns - * the contents through e. It can also return how many events are + * the contents through e. It can also return how many events are * left to process through 'pending'. */ enum ice_status diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 648acdb4c644..3b6e387f5440 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -62,7 +62,7 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = { * The PF_STATs are appended to the netdev stats only when ethtool -S * is queried on the base PF netdev. */ -static struct ice_stats ice_gstrings_pf_stats[] = { +static const struct ice_stats ice_gstrings_pf_stats[] = { ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes), ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes), ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast), @@ -104,7 +104,7 @@ static struct ice_stats ice_gstrings_pf_stats[] = { ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults), }; -static u32 ice_regs_dump_list[] = { +static const u32 ice_regs_dump_list[] = { PFGEN_STATE, PRTGEN_STATUS, QRX_CTRL(0), @@ -260,10 +260,10 @@ static int ice_get_sset_count(struct net_device *netdev, int sset) * a private ethtool flag). This is due to the nature of the * ethtool stats API. * - * User space programs such as ethtool must make 3 separate + * Userspace programs such as ethtool must make 3 separate * ioctl requests, one for size, one for the strings, and * finally one for the stats. Since these cross into - * user space, changes to the number or size could result in + * userspace, changes to the number or size could result in * undefined memory access or incorrect string<->value * correlations for statistics. * @@ -1392,17 +1392,17 @@ static int ice_nway_reset(struct net_device *netdev) { /* restart autonegotiation */ struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_link_status *hw_link_info; struct ice_vsi *vsi = np->vsi; struct ice_port_info *pi; enum ice_status status; - bool link_up; pi = vsi->port_info; - hw_link_info = &pi->phy.link_info; - link_up = hw_link_info->link_info & ICE_AQ_LINK_UP; + /* If VSI state is up, then restart autoneg with link up */ + if (!test_bit(__ICE_DOWN, vsi->back->state)) + status = ice_aq_set_link_restart_an(pi, true, NULL); + else + status = ice_aq_set_link_restart_an(pi, false, NULL); - status = ice_aq_set_link_restart_an(pi, link_up, NULL); if (status) { netdev_info(netdev, "link restart failed, err %d aq_err %d\n", status, pi->hw->adminq.sq_last_status); @@ -1441,7 +1441,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) /** * ice_set_pauseparam - Set Flow Control parameter * @netdev: network interface device structure - * @pause: return tx/rx flow control status + * @pause: return Tx/Rx flow control status */ static int ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) @@ -1543,7 +1543,7 @@ static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev) } /** - * ice_get_rxfh_indir_size - get the rx flow hash indirection table size + * ice_get_rxfh_indir_size - get the Rx flow hash indirection table size * @netdev: network interface device structure * * Returns the table size. @@ -1556,7 +1556,7 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev) } /** - * ice_get_rxfh - get the rx flow hash indirection table + * ice_get_rxfh - get the Rx flow hash indirection table * @netdev: network interface device structure * @indir: indirection table * @key: hash key @@ -1603,7 +1603,7 @@ out: } /** - * ice_set_rxfh - set the rx flow hash indirection table + * ice_set_rxfh - set the Rx flow hash indirection table * @netdev: network interface device structure * @indir: indirection table * @key: hash key diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 596b9fb1c510..5507928c8fbe 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -7,6 +7,9 @@ #define _ICE_HW_AUTOGEN_H_ #define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) +#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4)) +#define QTX_COMM_HEAD_HEAD_S 0 +#define QTX_COMM_HEAD_HEAD_M ICE_M(0x1FFF, 0) #define PF_FW_ARQBAH 0x00080180 #define PF_FW_ARQBAL 0x00080080 #define PF_FW_ARQH 0x00080380 diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 7d2a66739e3f..bb51dd7defb5 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -6,11 +6,11 @@ union ice_32byte_rx_desc { struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ /* bit 0 of hdr_addr is DD bit */ - __le64 rsvd1; - __le64 rsvd2; + __le64 rsvd1; + __le64 rsvd2; } read; struct { struct { @@ -105,11 +105,11 @@ enum ice_rx_ptype_payload_layer { */ union ice_32b_rx_flex_desc { struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - /* bit 0 of hdr_addr is DD bit */ - __le64 rsvd1; - __le64 rsvd2; + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_addr is DD bit */ + __le64 rsvd1; + __le64 rsvd2; } read; struct { /* Qword 0 */ @@ -256,6 +256,9 @@ enum ice_rx_flex_desc_status_error_0_bits { #define ICE_RXQ_CTX_SIZE_DWORDS 8 #define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) +#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22 +#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5 +#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800)) /* RLAN Rx queue context data * @@ -274,18 +277,18 @@ struct ice_rlan_ctx { u16 dbuf; /* bigger than needed, see above for reason */ #define ICE_RLAN_CTX_HBUF_S 6 u16 hbuf; /* bigger than needed, see above for reason */ - u8 dtype; - u8 dsize; - u8 crcstrip; - u8 l2tsel; - u8 hsplit_0; - u8 hsplit_1; - u8 showiv; + u8 dtype; + u8 dsize; + u8 crcstrip; + u8 l2tsel; + u8 hsplit_0; + u8 hsplit_1; + u8 showiv; u32 rxmax; /* bigger than needed, see above for reason */ - u8 tphrdesc_ena; - u8 tphwdesc_ena; - u8 tphdata_ena; - u8 tphhead_ena; + u8 tphrdesc_ena; + u8 tphwdesc_ena; + u8 tphdata_ena; + u8 tphhead_ena; u16 lrxqthresh; /* bigger than needed, see above for reason */ }; @@ -413,35 +416,35 @@ enum ice_tx_ctx_desc_cmd_bits { struct ice_tlan_ctx { #define ICE_TLAN_CTX_BASE_S 7 u64 base; /* base is defined in 128-byte units */ - u8 port_num; + u8 port_num; u16 cgd_num; /* bigger than needed, see above for reason */ - u8 pf_num; + u8 pf_num; u16 vmvf_num; - u8 vmvf_type; + u8 vmvf_type; #define ICE_TLAN_CTX_VMVF_TYPE_VF 0 #define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1 #define ICE_TLAN_CTX_VMVF_TYPE_PF 2 u16 src_vsi; - u8 tsyn_ena; - u8 alt_vlan; + u8 tsyn_ena; + u8 alt_vlan; u16 cpuid; /* bigger than needed, see above for reason */ - u8 wb_mode; - u8 tphrd_desc; - u8 tphrd; - u8 tphwr_desc; + u8 wb_mode; + u8 tphrd_desc; + u8 tphrd; + u8 tphwr_desc; u16 cmpq_id; u16 qnum_in_func; - u8 itr_notification_mode; - u8 adjust_prof_id; + u8 itr_notification_mode; + u8 adjust_prof_id; u32 qlen; /* bigger than needed, see above for reason */ - u8 quanta_prof_idx; - u8 tso_ena; + u8 quanta_prof_idx; + u8 tso_ena; u16 tso_qnum; - u8 legacy_int; - u8 drop_ena; - u8 cache_prof_idx; - u8 pkt_shaper_prof_idx; - u8 int_q_state; /* width not needed - internal do not write */ + u8 legacy_int; + u8 drop_ena; + u8 cache_prof_idx; + u8 pkt_shaper_prof_idx; + u8 int_q_state; /* width not needed - internal do not write */ }; /* macro to make the table lines short */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 1041fa2a7767..29b1dcfd4331 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -20,7 +20,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) u16 pf_q; int err; - /* what is RX queue number in global space of 2K Rx queues */ + /* what is Rx queue number in global space of 2K Rx queues */ pf_q = vsi->rxq_map[ring->q_index]; /* clear the context structure first */ @@ -174,15 +174,15 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) { int i; - for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { + for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q)); if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) break; - usleep_range(10, 20); + usleep_range(20, 40); } - if (i >= ICE_Q_WAIT_RETRY_LIMIT) + if (i >= ICE_Q_WAIT_MAX_RETRY) return -ETIMEDOUT; return 0; @@ -774,11 +774,13 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) */ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) { - u16 offset = 0, qmap = 0, numq_tc; - u16 pow = 0, max_rss = 0, qcount; + u16 offset = 0, qmap = 0, tx_count = 0; u16 qcount_tx = vsi->alloc_txq; u16 qcount_rx = vsi->alloc_rxq; + u16 tx_numq_tc, rx_numq_tc; + u16 pow = 0, max_rss = 0; bool ena_tc0 = false; + u8 netdev_tc = 0; int i; /* at least TC0 should be enabled by default */ @@ -794,7 +796,12 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) vsi->tc_cfg.ena_tc |= 1; } - numq_tc = qcount_rx / vsi->tc_cfg.numtc; + rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc; + if (!rx_numq_tc) + rx_numq_tc = 1; + tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc; + if (!tx_numq_tc) + tx_numq_tc = 1; /* TC mapping is a function of the number of Rx queues assigned to the * VSI for each traffic class and the offset of these queues. @@ -808,7 +815,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) * Setup number and offset of Rx queues for all TCs for the VSI */ - qcount = numq_tc; + qcount_rx = rx_numq_tc; + /* qcount will change if RSS is enabled */ if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) { @@ -816,37 +824,41 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) max_rss = ICE_MAX_LG_RSS_QS; else max_rss = ICE_MAX_SMALL_RSS_QS; - qcount = min_t(int, numq_tc, max_rss); - qcount = min_t(int, qcount, vsi->rss_size); + qcount_rx = min_t(int, rx_numq_tc, max_rss); + qcount_rx = min_t(int, qcount_rx, vsi->rss_size); } } /* find the (rounded up) power-of-2 of qcount */ - pow = order_base_2(qcount); + pow = order_base_2(qcount_rx); for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { if (!(vsi->tc_cfg.ena_tc & BIT(i))) { /* TC is not enabled */ vsi->tc_cfg.tc_info[i].qoffset = 0; - vsi->tc_cfg.tc_info[i].qcount = 1; + vsi->tc_cfg.tc_info[i].qcount_rx = 1; + vsi->tc_cfg.tc_info[i].qcount_tx = 1; + vsi->tc_cfg.tc_info[i].netdev_tc = 0; ctxt->info.tc_mapping[i] = 0; continue; } /* TC is enabled */ vsi->tc_cfg.tc_info[i].qoffset = offset; - vsi->tc_cfg.tc_info[i].qcount = qcount; + vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; + vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc; + vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & ICE_AQ_VSI_TC_Q_OFFSET_M) | ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M); - offset += qcount; + offset += qcount_rx; + tx_count += tx_numq_tc; ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); } - - vsi->num_txq = qcount_tx; vsi->num_rxq = offset; + vsi->num_txq = tx_count; if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); @@ -1000,7 +1012,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi) * @vsi: the VSI being configured * @v_idx: index of the vector in the VSI struct * - * We allocate one q_vector. If allocation fails we return -ENOMEM. + * We allocate one q_vector. If allocation fails we return -ENOMEM. */ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) { @@ -1039,7 +1051,7 @@ out: * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors * @vsi: the VSI being configured * - * We allocate one q_vector per queue interrupt. If allocation fails we + * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. */ static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) @@ -1188,7 +1200,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) struct ice_pf *pf = vsi->back; int i; - /* Allocate tx_rings */ + /* Allocate Tx rings */ for (i = 0; i < vsi->alloc_txq; i++) { struct ice_ring *ring; @@ -1207,7 +1219,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) vsi->tx_rings[i] = ring; } - /* Allocate rx_rings */ + /* Allocate Rx rings */ for (i = 0; i < vsi->alloc_rxq; i++) { struct ice_ring *ring; @@ -1611,55 +1623,62 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi) struct ice_aqc_add_tx_qgrp *qg_buf; struct ice_aqc_add_txqs_perq *txq; struct ice_pf *pf = vsi->back; + u8 num_q_grps, q_idx = 0; enum ice_status status; u16 buf_len, i, pf_q; int err = 0, tc = 0; - u8 num_q_grps; buf_len = sizeof(struct ice_aqc_add_tx_qgrp); qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); if (!qg_buf) return -ENOMEM; - if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) { - err = -EINVAL; - goto err_cfg_txqs; - } qg_buf->num_txqs = 1; num_q_grps = 1; - /* set up and configure the Tx queues */ - ice_for_each_txq(vsi, i) { - struct ice_tlan_ctx tlan_ctx = { 0 }; + /* set up and configure the Tx queues for each enabled TC */ + for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) { + if (!(vsi->tc_cfg.ena_tc & BIT(tc))) + break; - pf_q = vsi->txq_map[i]; - ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q); - /* copy context contents into the qg_buf */ - qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); - ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, - ice_tlan_ctx_info); + for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { + struct ice_tlan_ctx tlan_ctx = { 0 }; + + pf_q = vsi->txq_map[q_idx]; + ice_setup_tx_ctx(vsi->tx_rings[q_idx], &tlan_ctx, + pf_q); + /* copy context contents into the qg_buf */ + qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); + ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, + ice_tlan_ctx_info); + + /* init queue specific tail reg. It is referred as + * transmit comm scheduler queue doorbell. + */ + vsi->tx_rings[q_idx]->tail = + pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); + status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, + num_q_grps, qg_buf, buf_len, + NULL); + if (status) { + dev_err(&vsi->back->pdev->dev, + "Failed to set LAN Tx queue context, error: %d\n", + status); + err = -ENODEV; + goto err_cfg_txqs; + } - /* init queue specific tail reg. It is referred as transmit - * comm scheduler queue doorbell. - */ - vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); - status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, - num_q_grps, qg_buf, buf_len, NULL); - if (status) { - dev_err(&vsi->back->pdev->dev, - "Failed to set LAN Tx queue context, error: %d\n", - status); - err = -ENODEV; - goto err_cfg_txqs; - } + /* Add Tx Queue TEID into the VSI Tx ring from the + * response. This will complete configuring and + * enabling the queue. + */ + txq = &qg_buf->txqs[0]; + if (pf_q == le16_to_cpu(txq->txq_id)) + vsi->tx_rings[q_idx]->txq_teid = + le32_to_cpu(txq->q_teid); - /* Add Tx Queue TEID into the VSI Tx ring from the response - * This will complete configuring and enabling the queue. - */ - txq = &qg_buf->txqs[0]; - if (pf_q == le16_to_cpu(txq->txq_id)) - vsi->tx_rings[i]->txq_teid = - le32_to_cpu(txq->q_teid); + q_idx++; + } } err_cfg_txqs: devm_kfree(&pf->pdev->dev, qg_buf); @@ -1908,7 +1927,8 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ice_for_each_txq(vsi, i) { u16 v_idx; - if (!vsi->tx_rings || !vsi->tx_rings[i]) { + if (!vsi->tx_rings || !vsi->tx_rings[i] || + !vsi->tx_rings[i]->q_vector) { err = -EINVAL; goto err_out; } @@ -2056,6 +2076,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, /* set RSS capabilities */ ice_vsi_set_rss_params(vsi); + /* set tc configuration */ + ice_vsi_set_tc_cfg(vsi); + /* create the VSI */ ret = ice_vsi_init(vsi); if (ret) @@ -2113,17 +2136,13 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, pf->q_left_rx -= vsi->alloc_rxq; break; default: - /* if VSI type is not recognized, clean up the resources and - * exit - */ + /* clean up the resources and exit */ goto unroll_vsi_init; } - ice_vsi_set_tc_cfg(vsi); - /* configure VSI nodes based on number of queues and TC's */ for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = vsi->num_txq; + max_txqs[i] = pf->num_lan_tx; ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); @@ -2314,7 +2333,7 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) int start = res->search_hint; int end = start; - if ((start + needed) > res->num_entries) + if ((start + needed) > res->num_entries) return -ENOMEM; id |= ICE_RES_VALID_BIT; @@ -2491,6 +2510,7 @@ int ice_vsi_release(struct ice_vsi *vsi) } ice_remove_vsi_fltr(&pf->hw, vsi->idx); + ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); ice_vsi_delete(vsi); ice_vsi_free_q_vectors(vsi); ice_vsi_clear_rings(vsi); @@ -2518,11 +2538,14 @@ int ice_vsi_release(struct ice_vsi *vsi) int ice_vsi_rebuild(struct ice_vsi *vsi) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + struct ice_pf *pf; int ret, i; if (!vsi) return -EINVAL; + pf = vsi->back; + ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); ice_vsi_free_q_vectors(vsi); ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); @@ -2532,6 +2555,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ice_vsi_free_arrays(vsi, false); ice_dev_onetime_setup(&vsi->back->hw); ice_vsi_set_num_qs(vsi); + ice_vsi_set_tc_cfg(vsi); /* Initialize VSI struct elements and create VSI in FW */ ret = ice_vsi_init(vsi); @@ -2578,11 +2602,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) break; } - ice_vsi_set_tc_cfg(vsi); - /* configure VSI nodes based on number of queues and TC's */ for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = vsi->num_txq; + max_txqs[i] = pf->num_lan_tx; ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 333312a1d595..8725569d11f0 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -349,6 +349,9 @@ ice_prepare_for_reset(struct ice_pf *pf) /* disable the VSIs and their queues that are not already DOWN */ ice_pf_dis_all_vsi(pf); + if (hw->port_info) + ice_sched_clear_port(hw->port_info); + ice_shutdown_all_ctrlq(hw); set_bit(__ICE_PREPARED_FOR_RESET, pf->state); @@ -405,7 +408,7 @@ static void ice_reset_subtask(struct ice_pf *pf) /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an * OICR interrupt. The OICR handler (ice_misc_intr) determines what type * of reset is pending and sets bits in pf->state indicating the reset - * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set + * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set * prepare for pending reset if not already (for PF software-initiated * global resets the software should already be prepared for it as * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated @@ -1379,7 +1382,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) * @pf: board private structure * * This sets up the handler for MSIX 0, which is used to manage the - * non-queue interrupts, e.g. AdminQ and errors. This is not used + * non-queue interrupts, e.g. AdminQ and errors. This is not used * when in MSI or Legacy interrupt mode. */ static int ice_req_irq_msix_misc(struct ice_pf *pf) @@ -1783,7 +1786,7 @@ static void ice_determine_q_usage(struct ice_pf *pf) pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus()); - /* only 1 rx queue unless RSS is enabled */ + /* only 1 Rx queue unless RSS is enabled */ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) pf->num_lan_rx = 1; else @@ -2091,8 +2094,7 @@ static int ice_probe(struct pci_dev *pdev, ice_determine_q_usage(pf); - pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC, - hw->func_caps.guaranteed_num_vsi); + pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; if (!pf->num_alloc_vsi) { err = -EIO; goto err_init_pf_unroll; @@ -2544,7 +2546,6 @@ static int ice_vsi_cfg(struct ice_vsi *vsi) if (err) return err; } - err = ice_vsi_cfg_txqs(vsi); if (!err) err = ice_vsi_cfg_rxqs(vsi); @@ -2563,8 +2564,12 @@ static void ice_napi_enable_all(struct ice_vsi *vsi) if (!vsi->netdev) return; - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) - napi_enable(&vsi->q_vectors[q_idx]->napi); + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { + struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_enable(&q_vector->napi); + } } /** @@ -2931,8 +2936,12 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) if (!vsi->netdev) return; - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) - napi_disable(&vsi->q_vectors[q_idx]->napi); + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { + struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_disable(&q_vector->napi); + } } /** @@ -3138,8 +3147,9 @@ static void ice_vsi_release_all(struct ice_pf *pf) /** * ice_dis_vsi - pause a VSI * @vsi: the VSI being paused + * @locked: is the rtnl_lock already held */ -static void ice_dis_vsi(struct ice_vsi *vsi) +static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) { if (test_bit(__ICE_DOWN, vsi->state)) return; @@ -3148,9 +3158,13 @@ static void ice_dis_vsi(struct ice_vsi *vsi) if (vsi->type == ICE_VSI_PF && vsi->netdev) { if (netif_running(vsi->netdev)) { - rtnl_lock(); - vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); - rtnl_unlock(); + if (!locked) { + rtnl_lock(); + vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); + rtnl_unlock(); + } else { + vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); + } } else { ice_vsi_close(vsi); } @@ -3189,7 +3203,7 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf) ice_for_each_vsi(pf, v) if (pf->vsi[v]) - ice_dis_vsi(pf->vsi[v]); + ice_dis_vsi(pf->vsi[v], false); } /** @@ -3618,6 +3632,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) * @dev: the netdev being configured * @nlh: RTNL message * @flags: bridge setlink flags + * @extack: netlink extended ack * * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if @@ -3626,7 +3641,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) */ static int ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, - u16 __always_unused flags) + u16 __always_unused flags, struct netlink_ext_ack *extack) { struct ice_netdev_priv *np = netdev_priv(dev); struct ice_pf *pf = np->vsi->back; @@ -3668,7 +3683,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, */ status = ice_update_sw_rule_bridge_mode(hw); if (status) { - netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n", + netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n", mode, status, hw->adminq.sq_last_status); /* revert hw->evb_veb */ hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); @@ -3691,40 +3706,36 @@ static void ice_tx_timeout(struct net_device *netdev) struct ice_ring *tx_ring = NULL; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; - u32 head, val = 0, i; int hung_queue = -1; + u32 i; pf->tx_timeout_count++; - /* find the stopped queue the same way the stack does */ + /* find the stopped queue the same way dev_watchdog() does */ for (i = 0; i < netdev->num_tx_queues; i++) { - struct netdev_queue *q; unsigned long trans_start; + struct netdev_queue *q; q = netdev_get_tx_queue(netdev, i); trans_start = q->trans_start; if (netif_xmit_stopped(q) && time_after(jiffies, - (trans_start + netdev->watchdog_timeo))) { + trans_start + netdev->watchdog_timeo)) { hung_queue = i; break; } } - if (i == netdev->num_tx_queues) { + if (i == netdev->num_tx_queues) netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); - } else { + else /* now that we have an index, find the tx_ring struct */ - for (i = 0; i < vsi->num_txq; i++) { - if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { - if (hung_queue == - vsi->tx_rings[i]->q_index) { + for (i = 0; i < vsi->num_txq; i++) + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) + if (hung_queue == vsi->tx_rings[i]->q_index) { tx_ring = vsi->tx_rings[i]; break; } - } - } - } /* Reset recovery level if enough time has elapsed after last timeout. * Also ensure no new reset action happens before next timeout period. @@ -3736,17 +3747,20 @@ static void ice_tx_timeout(struct net_device *netdev) return; if (tx_ring) { - head = tx_ring->next_to_clean; + struct ice_hw *hw = &pf->hw; + u32 head, val = 0; + + head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) & + QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; /* Read interrupt register */ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - val = rd32(&pf->hw, + val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->v_idx + - tx_ring->vsi->hw_base_vector)); + tx_ring->vsi->hw_base_vector)); - netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", + netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", vsi->vsi_num, hung_queue, tx_ring->next_to_clean, - head, tx_ring->next_to_use, - readl(tx_ring->tail), val); + head, tx_ring->next_to_use, val); } pf->tx_timeout_last_recovery = jiffies; @@ -3780,7 +3794,7 @@ static void ice_tx_timeout(struct net_device *netdev) * @netdev: network interface device structure * * The open entry point is called when a network interface is made - * active by the system (IFF_UP). At this point all resources needed + * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the netdev watchdog is enabled, * and the stack is notified that the interface is ready. @@ -3813,7 +3827,7 @@ static int ice_open(struct net_device *netdev) * @netdev: network interface device structure * * The stop entry point is called when an interface is de-activated by the OS, - * and the netdevice enters the DOWN state. The hardware is still under the + * and the netdevice enters the DOWN state. The hardware is still under the * driver's control, but the netdev interface is disabled. * * Returns success only - not allowed to fail @@ -3842,14 +3856,14 @@ ice_features_check(struct sk_buff *skb, size_t len; /* No point in doing any of this if neither checksum nor GSO are - * being requested for this frame. We can rule out both by just + * being requested for this frame. We can rule out both by just * checking for CHECKSUM_PARTIAL */ if (skb->ip_summed != CHECKSUM_PARTIAL) return features; /* We cannot support GSO if the MSS is going to be less than - * 64 bytes. If it is then we need to drop support for GSO. + * 64 bytes. If it is then we need to drop support for GSO. */ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) features &= ~NETIF_F_GSO_MASK; diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 7cc8aa18a22b..a1681853df2e 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -630,7 +630,7 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi) * * Cleanup scheduling elements from SW DB */ -static void ice_sched_clear_port(struct ice_port_info *pi) +void ice_sched_clear_port(struct ice_port_info *pi) { if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return; @@ -894,8 +894,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) * This function removes the leaf node that was created by the FW * during initialization */ -static void -ice_rm_dflt_leaf_node(struct ice_port_info *pi) +static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) { struct ice_sched_node *node; @@ -923,8 +922,7 @@ ice_rm_dflt_leaf_node(struct ice_port_info *pi) * This function frees all the nodes except root and TC that were created by * the FW during initialization */ -static void -ice_sched_rm_dflt_nodes(struct ice_port_info *pi) +static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi) { struct ice_sched_node *node; @@ -1339,7 +1337,7 @@ ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi, * @num_nodes: pointer to num nodes array * * This function calculates the number of supported nodes needed to add this - * VSI into tx tree including the VSI, parent and intermediate nodes in below + * VSI into Tx tree including the VSI, parent and intermediate nodes in below * layers */ static void @@ -1376,13 +1374,13 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, } /** - * ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree + * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree * @pi: port information structure * @vsi_handle: software VSI handle * @tc_node: pointer to TC node * @num_nodes: pointer to num nodes array * - * This function adds the VSI supported nodes into tx tree including the + * This function adds the VSI supported nodes into Tx tree including the * VSI, its parent and intermediate nodes in below layers */ static enum ice_status @@ -1527,7 +1525,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, } /** - * ice_sched_cfg_vsi - configure the new/exisiting VSI + * ice_sched_cfg_vsi - configure the new/existing VSI * @pi: port information structure * @vsi_handle: software VSI handle * @tc: TC number @@ -1605,3 +1603,109 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, return status; } + +/** + * ice_sched_rm_agg_vsi_entry - remove agg related VSI info entry + * @pi: port information structure + * @vsi_handle: software VSI handle + * + * This function removes single aggregator VSI info entry from + * aggregator list. + */ +static void +ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle) +{ + struct ice_sched_agg_info *agg_info; + struct ice_sched_agg_info *atmp; + + list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) { + struct ice_sched_agg_vsi_info *agg_vsi_info; + struct ice_sched_agg_vsi_info *vtmp; + + list_for_each_entry_safe(agg_vsi_info, vtmp, + &agg_info->agg_vsi_list, list_entry) + if (agg_vsi_info->vsi_handle == vsi_handle) { + list_del(&agg_vsi_info->list_entry); + devm_kfree(ice_hw_to_dev(pi->hw), + agg_vsi_info); + return; + } + } +} + +/** + * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes + * @pi: port information structure + * @vsi_handle: software VSI handle + * @owner: LAN or RDMA + * + * This function removes the VSI and its LAN or RDMA children nodes from the + * scheduler tree. + */ +static enum ice_status +ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) +{ + enum ice_status status = ICE_ERR_PARAM; + struct ice_vsi_ctx *vsi_ctx; + u8 i, j = 0; + + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + return status; + mutex_lock(&pi->sched_lock); + vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); + if (!vsi_ctx) + goto exit_sched_rm_vsi_cfg; + + for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + struct ice_sched_node *vsi_node, *tc_node; + + tc_node = ice_sched_get_tc_node(pi, i); + if (!tc_node) + continue; + + vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle); + if (!vsi_node) + continue; + + while (j < vsi_node->num_children) { + if (vsi_node->children[j]->owner == owner) { + ice_free_sched_node(pi, vsi_node->children[j]); + + /* reset the counter again since the num + * children will be updated after node removal + */ + j = 0; + } else { + j++; + } + } + /* remove the VSI if it has no children */ + if (!vsi_node->num_children) { + ice_free_sched_node(pi, vsi_node); + vsi_ctx->sched.vsi_node[i] = NULL; + + /* clean up agg related vsi info if any */ + ice_sched_rm_agg_vsi_info(pi, vsi_handle); + } + if (owner == ICE_SCHED_NODE_OWNER_LAN) + vsi_ctx->sched.max_lanq[i] = 0; + } + status = 0; + +exit_sched_rm_vsi_cfg: + mutex_unlock(&pi->sched_lock); + return status; +} + +/** + * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes + * @pi: port information structure + * @vsi_handle: software VSI handle + * + * This function clears the VSI and its LAN children nodes from scheduler tree + * for all TCs. + */ +enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) +{ + return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); +} diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index 5dc9cfa04c58..da5b4c166da8 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -12,6 +12,7 @@ struct ice_sched_agg_vsi_info { struct list_head list_entry; DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS); + u16 vsi_handle; }; struct ice_sched_agg_info { @@ -25,6 +26,7 @@ struct ice_sched_agg_info { /* FW AQ command calls */ enum ice_status ice_sched_init_port(struct ice_port_info *pi); enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw); +void ice_sched_clear_port(struct ice_port_info *pi); void ice_sched_cleanup_all(struct ice_hw *hw); struct ice_sched_node * ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid); @@ -39,4 +41,5 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_status ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, u8 owner, bool enable); +enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle); #endif /* _ICE_SCHED_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 027eba4e13f8..533b989a23e1 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -46,7 +46,7 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, * @link_speed: variable containing the link_speed to be converted * * Convert link speed supported by HW to link speed supported by virtchnl. - * If adv_link_support is true, then return link speed in Mbps. Else return + * If adv_link_support is true, then return link speed in Mbps. Else return * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller * needs to cast back to an enum virtchnl_link_speed in the case where * adv_link_support is false, but when adv_link_support is true the caller can diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 40c9c6558956..2e5693107fa4 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -92,8 +92,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, * Allocate memory for the entire recipe table and initialize the structures/ * entries corresponding to basic recipes. */ -enum ice_status -ice_init_def_sw_recp(struct ice_hw *hw) +enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) { struct ice_sw_recipe *recps; u8 i; @@ -130,7 +129,7 @@ ice_init_def_sw_recp(struct ice_hw *hw) * * NOTE: *req_desc is both an input/output parameter. * The caller of this function first calls this function with *request_desc set - * to 0. If the response from f/w has *req_desc set to 0, all the switch + * to 0. If the response from f/w has *req_desc set to 0, all the switch * configuration information has been returned; if non-zero (meaning not all * the information was returned), the caller should call this function again * with *req_desc set to the previous value returned by f/w to get the @@ -629,25 +628,36 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) /** * ice_fill_sw_info - Helper function to populate lb_en and lan_en * @hw: pointer to the hardware structure - * @f_info: filter info structure to fill/update + * @fi: filter info structure to fill/update * * This helper function populates the lb_en and lan_en elements of the provided * ice_fltr_info struct using the switch's type and characteristics of the * switch rule being configured. */ -static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info) +static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) { - f_info->lb_en = false; - f_info->lan_en = false; - if ((f_info->flag & ICE_FLTR_TX) && - (f_info->fltr_act == ICE_FWD_TO_VSI || - f_info->fltr_act == ICE_FWD_TO_VSI_LIST || - f_info->fltr_act == ICE_FWD_TO_Q || - f_info->fltr_act == ICE_FWD_TO_QGRP)) { - f_info->lb_en = true; - if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC && - is_unicast_ether_addr(f_info->l_data.mac.mac_addr))) - f_info->lan_en = true; + fi->lb_en = false; + fi->lan_en = false; + if ((fi->flag & ICE_FLTR_TX) && + (fi->fltr_act == ICE_FWD_TO_VSI || + fi->fltr_act == ICE_FWD_TO_VSI_LIST || + fi->fltr_act == ICE_FWD_TO_Q || + fi->fltr_act == ICE_FWD_TO_QGRP)) { + fi->lb_en = true; + /* Do not set lan_en to TRUE if + * 1. The switch is a VEB AND + * 2 + * 2.1 The lookup is MAC with unicast addr for MAC, OR + * 2.2 The lookup is MAC_VLAN with unicast addr for MAC + * + * In all other cases, the LAN enable has to be set to true. + */ + if (!(hw->evb_veb && + ((fi->lkup_type == ICE_SW_LKUP_MAC && + is_unicast_ether_addr(fi->l_data.mac.mac_addr)) || + (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN && + is_unicast_ether_addr(fi->l_data.mac_vlan.mac_addr))))) + fi->lan_en = true; } } @@ -817,7 +827,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, /* Create two back-to-back switch rules and submit them to the HW using * one memory buffer: * 1. Large Action - * 2. Look up tx rx + * 2. Look up Tx Rx */ lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts); rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; @@ -861,7 +871,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); - /* call the fill switch rule to fill the lookup tx rx structure */ + /* call the fill switch rule to fill the lookup Tx Rx structure */ ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, ice_aqc_opc_update_sw_rules); @@ -1158,8 +1168,8 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) * Call AQ command to add or update previously created VSI list with new VSI. * * Helper function to do book keeping associated with adding filter information - * The algorithm to do the booking keeping is described below : - * When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.) + * The algorithm to do the book keeping is described below : + * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.) * if only one VSI has been added till now * Allocate a new VSI list and add two VSIs * to this list using switch rule command @@ -1237,6 +1247,9 @@ ice_add_update_vsi_list(struct ice_hw *hw, u16 vsi_handle = new_fltr->vsi_handle; enum ice_adminq_opc opcode; + if (!m_entry->vsi_list_info) + return ICE_ERR_CFG; + /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) return 0; @@ -1853,7 +1866,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; /* Update the previous switch rule to a new VSI list which - * includes current VSI thats requested + * includes current VSI that is requested */ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); if (status) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index fe5bbabbb41e..49fc38094185 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -219,7 +219,7 @@ static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, /** * ice_setup_tx_ring - Allocate the Tx descriptors - * @tx_ring: the tx ring to set up + * @tx_ring: the Tx ring to set up * * Return 0 on success, negative on error */ @@ -324,7 +324,7 @@ void ice_free_rx_ring(struct ice_ring *rx_ring) /** * ice_setup_rx_ring - Allocate the Rx descriptors - * @rx_ring: the rx ring to set up + * @rx_ring: the Rx ring to set up * * Return 0 on success, negative on error */ @@ -377,7 +377,7 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) rx_ring->next_to_alloc = val; /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only + * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ @@ -586,7 +586,7 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf, /** * ice_reuse_rx_page - page flip buffer and store it back on the ring - * @rx_ring: rx descriptor ring to store buffers on + * @rx_ring: Rx descriptor ring to store buffers on * @old_buf: donor buffer to have page reused * * Synchronizes page for reuse by the adapter @@ -609,7 +609,7 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring, /** * ice_fetch_rx_buf - Allocate skb and populate it - * @rx_ring: rx descriptor ring to transact packets on + * @rx_ring: Rx descriptor ring to transact packets on * @rx_desc: descriptor containing info written by hardware * * This function allocates an skb on the fly, and populates it with the page @@ -686,7 +686,7 @@ static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring, * ice_pull_tail - ice specific version of skb_pull_tail * @skb: pointer to current skb being adjusted * - * This function is an ice specific version of __pskb_pull_tail. The + * This function is an ice specific version of __pskb_pull_tail. The * main difference between this version and the original function is that * this function can make several assumptions about the state of things * that allow for significant optimizations versus the standard function. @@ -768,7 +768,7 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, * @rx_desc: Rx descriptor for current buffer * @skb: Current socket buffer containing buffer in progress * - * This function updates next to clean. If the buffer is an EOP buffer + * This function updates next to clean. If the buffer is an EOP buffer * this function exits returning false, otherwise it will place the * sk_buff in the next buffer to be chained and return true indicating * that this is in fact a non-EOP buffer. @@ -904,7 +904,7 @@ checksum_fail: /** * ice_process_skb_fields - Populate skb header fields from Rx descriptor - * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_ring: Rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated * @ptype: the packet type decoded by hardware @@ -927,7 +927,7 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring, /** * ice_receive_skb - Send a completed packet up the stack - * @rx_ring: rx ring in play + * @rx_ring: Rx ring in play * @skb: packet to send up * @vlan_tag: vlan tag for packet * @@ -946,11 +946,11 @@ static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, /** * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf - * @rx_ring: rx descriptor ring to transact packets on + * @rx_ring: Rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process * * This function provides a "bounce buffer" approach to Rx interrupt - * processing. The advantage to this is that on systems that have + * processing. The advantage to this is that on systems that have * expensive overhead for IOMMU access this provides a means of avoiding * it by maintaining the mapping of the page to the system. * @@ -1103,11 +1103,14 @@ int ice_napi_poll(struct napi_struct *napi, int budget) if (!clean_complete) return budget; - /* Work is done so exit the polling mode and re-enable the interrupt */ - napi_complete_done(napi, work_done); - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector); - return 0; + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector); + + return min(work_done, budget - 1); } /* helper function for building cmd/type/offset */ @@ -1122,7 +1125,7 @@ build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) } /** - * __ice_maybe_stop_tx - 2nd level check for tx stop conditions + * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions * @tx_ring: the ring to be checked * @size: the size buffer we want to assure is available * @@ -1145,7 +1148,7 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) } /** - * ice_maybe_stop_tx - 1st level check for tx stop conditions + * ice_maybe_stop_tx - 1st level check for Tx stop conditions * @tx_ring: the ring to be checked * @size: the size buffer we want to assure is available * @@ -1155,6 +1158,7 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) { if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) return 0; + return __ice_maybe_stop_tx(tx_ring, size); } @@ -1552,7 +1556,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) * Finally, we add one to round up. Because 256 isn't an exact multiple of * 3, we'll underestimate near each multiple of 12K. This is actually more * accurate as we have 4K - 1 of wiggle room that we can fit into the last - * segment. For our purposes this is accurate out to 1M which is orders of + * segment. For our purposes this is accurate out to 1M which is orders of * magnitude greater than our largest possible GSO size. * * This would then be implemented as: @@ -1568,7 +1572,7 @@ static unsigned int ice_txd_use_count(unsigned int size) } /** - * ice_xmit_desc_count - calculate number of tx descriptors needed + * ice_xmit_desc_count - calculate number of Tx descriptors needed * @skb: send buffer * * Returns number of data descriptors needed for this skb. @@ -1620,7 +1624,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb) nr_frags -= ICE_MAX_BUF_TXD - 2; frag = &skb_shinfo(skb)->frags[0]; - /* Initialize size to the negative value of gso_size minus 1. We + /* Initialize size to the negative value of gso_size minus 1. We * use this as the worst case scenerio in which the frag ahead * of us only provides one byte which is why we are limited to 6 * descriptors for a single transmit as the header and previous diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index f4dbc81c1988..0ea428104215 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -124,6 +124,8 @@ struct ice_phy_info { /* Common HW capabilities for SW use */ struct ice_hw_common_caps { + u32 valid_functions; + /* TX/RX queues */ u16 num_rxq; /* Number/Total RX queues */ u16 rxq_first_id; /* First queue ID for RX queues */ @@ -150,7 +152,7 @@ struct ice_hw_func_caps { struct ice_hw_common_caps common_cap; u32 num_allocd_vfs; /* Number of allocated VFs */ u32 vf_base_id; /* Logical ID of the first VF */ - u32 guaranteed_num_vsi; + u32 guar_num_vsi; }; /* Device wide capabilities */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index e71065f9d391..05ff4f910649 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -156,8 +156,6 @@ static void ice_free_vf_res(struct ice_vf *vf) clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); } -/***********************enable_vf routines*****************************/ - /** * ice_dis_vf_mappings * @vf: pointer to the VF structure @@ -215,6 +213,15 @@ void ice_free_vfs(struct ice_pf *pf) while (test_and_set_bit(__ICE_VF_DIS, pf->state)) usleep_range(1000, 2000); + /* Disable IOV before freeing resources. This lets any VF drivers + * running in the host get themselves cleaned up before we yank + * the carpet out from underneath their feet. + */ + if (!pci_vfs_assigned(pf->pdev)) + pci_disable_sriov(pf->pdev); + else + dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); + /* Avoid wait time by stopping all VFs at the same time */ for (i = 0; i < pf->num_alloc_vfs; i++) { if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states)) @@ -228,15 +235,6 @@ void ice_free_vfs(struct ice_pf *pf) clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); } - /* Disable IOV before freeing resources. This lets any VF drivers - * running in the host get themselves cleaned up before we yank - * the carpet out from underneath their feet. - */ - if (!pci_vfs_assigned(pf->pdev)) - pci_disable_sriov(pf->pdev); - else - dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); - tmp = pf->num_alloc_vfs; pf->num_vf_qps = 0; pf->num_alloc_vfs = 0; @@ -454,7 +452,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) /* Clear this bit after VF initialization since we shouldn't reclaim * and reassign interrupts for synchronous or asynchronous VFR events. - * We don't want to reconfigure interrupts since AVF driver doesn't + * We dont want to reconfigure interrupts since AVF driver doesn't * expect vector assignment to be changed unless there is a request for * more vectors. */ @@ -1105,7 +1103,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) * ice_process_vflr_event - Free VF resources via IRQ calls * @pf: pointer to the PF structure * - * called from the VLFR IRQ handler to + * called from the VFLR IRQ handler to * free up VF resources and state variables */ void ice_process_vflr_event(struct ice_pf *pf) @@ -1764,7 +1762,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) /* copy Tx queue info from VF into VSI */ vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; vsi->tx_rings[i]->count = qpi->txq.ring_len; - /* copy Rx queue info from VF into vsi */ + /* copy Rx queue info from VF into VSI */ vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; vsi->rx_rings[i]->count = qpi->rxq.ring_len; if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) { @@ -1830,7 +1828,7 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf) * @msg: pointer to the msg buffer * @set: true if mac filters are being set, false otherwise * - * add guest mac address filter + * add guest MAC address filter */ static int ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) @@ -1968,9 +1966,9 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg) * @msg: pointer to the msg buffer * * VFs get a default number of queues but can use this message to request a - * different number. If the request is successful, PF will reset the VF and + * different number. If the request is successful, PF will reset the VF and * return 0. If unsuccessful, PF will send message informing VF of number of - * available queue pairs via virtchnl message response to VF. + * available queue pairs via virtchnl message response to vf. */ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) { @@ -1991,7 +1989,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); if (req_queues <= 0) { dev_err(&pf->pdev->dev, - "VF %d tried to request %d queues. Ignoring.\n", + "VF %d tried to request %d queues. Ignoring.\n", vf->vf_id, req_queues); } else if (req_queues > ICE_MAX_QS_PER_VF) { dev_err(&pf->pdev->dev, diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 10131e0180f9..01470a8ee03a 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -70,7 +70,7 @@ struct ice_vf { u8 spoofchk; u16 num_mac; u16 num_vlan; - u8 num_req_qs; /* num of queue pairs requested by VF */ + u8 num_req_qs; /* num of queue pairs requested by VF */ }; #ifdef CONFIG_PCI_IOV |