diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
97 files changed, 7050 insertions, 1854 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 20bc40eec487..24ec9a4f1ffa 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -292,6 +292,7 @@ config ICE select DIMLIB select LIBIE select NET_DEVLINK + select PACKING select PLDMFW select DPLL help diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 98861cc6df7c..b9dd7b719832 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1180,126 +1180,6 @@ s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid) } /** - * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF - * @hw: Pointer to hardware structure - * @results: Pointer array to message, results[0] is pointer to message - * @mbx: Pointer to mailbox information structure - * - * This function is a default handler for MAC/VLAN requests from the VF. - * The assumption is that in this case it is acceptable to just directly - * hand off the message from the VF to the underlying shared code. - **/ -s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) -{ - struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; - u8 mac[ETH_ALEN]; - u32 *result; - int err = 0; - bool set; - u16 vlan; - u32 vid; - - /* we shouldn't be updating rules on a disabled interface */ - if (!FM10K_VF_FLAG_ENABLED(vf_info)) - err = FM10K_ERR_PARAM; - - if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) { - result = results[FM10K_MAC_VLAN_MSG_VLAN]; - - /* record VLAN id requested */ - err = fm10k_tlv_attr_get_u32(result, &vid); - if (err) - return err; - - set = !(vid & FM10K_VLAN_CLEAR); - vid &= ~FM10K_VLAN_CLEAR; - - /* if the length field has been set, this is a multi-bit - * update request. For multi-bit requests, simply disallow - * them when the pf_vid has been set. In this case, the PF - * should have already cleared the VLAN_TABLE, and if we - * allowed them, it could allow a rogue VF to receive traffic - * on a VLAN it was not assigned. In the single-bit case, we - * need to modify requests for VLAN 0 to use the default PF or - * SW vid when assigned. - */ - - if (vid >> 16) { - /* prevent multi-bit requests when PF has - * administratively set the VLAN for this VF - */ - if (vf_info->pf_vid) - return FM10K_ERR_PARAM; - } else { - err = fm10k_iov_select_vid(vf_info, (u16)vid); - if (err < 0) - return err; - - vid = err; - } - - /* update VSI info for VF in regards to VLAN table */ - err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); - } - - if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) { - result = results[FM10K_MAC_VLAN_MSG_MAC]; - - /* record unicast MAC address requested */ - err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); - if (err) - return err; - - /* block attempts to set MAC for a locked device */ - if (is_valid_ether_addr(vf_info->mac) && - !ether_addr_equal(mac, vf_info->mac)) - return FM10K_ERR_PARAM; - - set = !(vlan & FM10K_VLAN_CLEAR); - vlan &= ~FM10K_VLAN_CLEAR; - - err = fm10k_iov_select_vid(vf_info, vlan); - if (err < 0) - return err; - - vlan = (u16)err; - - /* notify switch of request for new unicast address */ - err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, - mac, vlan, set, 0); - } - - if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) { - result = results[FM10K_MAC_VLAN_MSG_MULTICAST]; - - /* record multicast MAC address requested */ - err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); - if (err) - return err; - - /* verify that the VF is allowed to request multicast */ - if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED)) - return FM10K_ERR_PARAM; - - set = !(vlan & FM10K_VLAN_CLEAR); - vlan &= ~FM10K_VLAN_CLEAR; - - err = fm10k_iov_select_vid(vf_info, vlan); - if (err < 0) - return err; - - vlan = (u16)err; - - /* notify switch of request for new multicast address */ - err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, - mac, vlan, set); - } - - return err; -} - -/** * fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode * @vf_info: VF info structure containing capability flags * @mode: Requested xcast mode diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h index 8e814df709d2..ad3696893cb1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h @@ -99,8 +99,6 @@ extern const struct fm10k_tlv_attr fm10k_err_msg_attr[]; s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid); s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); -s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **, - struct fm10k_mbx_info *); s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index d4255c2706fa..c67963bfe14e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -88,6 +88,7 @@ enum i40e_state { __I40E_SERVICE_SCHED, __I40E_ADMINQ_EVENT_PENDING, __I40E_MDD_EVENT_PENDING, + __I40E_MDD_VF_PRINT_PENDING, __I40E_VFLR_EVENT_PENDING, __I40E_RESET_RECOVERY_PENDING, __I40E_TIMEOUT_RECOVERY_PENDING, @@ -191,6 +192,7 @@ enum i40e_pf_flags { */ I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, I40E_FLAG_VF_VLAN_PRUNING_ENA, + I40E_FLAG_MDD_AUTO_RESET_VF, I40E_PF_FLAGS_NBITS, /* must be last */ }; @@ -572,7 +574,7 @@ struct i40e_pf { int num_alloc_vfs; /* actual number of VFs allocated */ u32 vf_aq_requests; u32 arq_overflows; /* Not fatal, possibly indicative of problems */ - + struct ratelimit_state mdd_message_rate_limit; /* DCBx/DCBNL capability for PF that indicates * whether DCBx is managed by firmware or host * based agent (LLDPAD). Also, indicates what @@ -1189,7 +1191,6 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi, struct i40e_fdir_filter *input, bool add); void i40e_fdir_check_and_reenable(struct i40e_pf *pf); u32 i40e_get_current_fd_count(struct i40e_pf *pf); -u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf); u32 i40e_get_current_atr_cnt(struct i40e_pf *pf); u32 i40e_get_global_fd_count(struct i40e_pf *pf); bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); @@ -1197,7 +1198,6 @@ void i40e_set_ethtool_ops(struct net_device *netdev); struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan); void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f); -void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan); int i40e_sync_vsi_filters(struct i40e_vsi *vsi); struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, u16 uplink, u32 param1); @@ -1313,7 +1313,6 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset); int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); int i40e_get_partition_bw_setting(struct i40e_pf *pf); int i40e_set_partition_bw_setting(struct i40e_pf *pf); -int i40e_commit_partition_bw_setting(struct i40e_pf *pf); void i40e_print_link_message(struct i40e_vsi *vsi, bool isup); void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index f73f5930fc58..175c1320c143 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -1016,16 +1016,6 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw, return status; } -int -i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc, - void *buff, /* can be NULL */ u16 buff_size, - struct i40e_asq_cmd_details *cmd_details, - enum i40e_admin_queue_err *aq_status) -{ - return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size, - cmd_details, true, aq_status); -} - /** * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function * @desc: pointer to the temp descriptor (non DMA mem) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index e8031f1a9b4f..370b4bddee44 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1805,37 +1805,6 @@ int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, } /** - * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting - * @hw: pointer to the hw struct - * @seid: vsi number - * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN - * @cmd_details: pointer to command details structure or NULL - **/ -int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, - u16 seid, bool enable, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_set_vsi_promiscuous_modes *cmd = - (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; - u16 flags = 0; - int status; - - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_vsi_promiscuous_modes); - if (enable) - flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; - - cmd->promiscuous_flags = cpu_to_le16(flags); - cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); - cmd->seid = cpu_to_le16(seid); - - status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - return status; -} - -/** * i40e_aq_get_vsi_params - get VSI configuration info * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct @@ -2436,136 +2405,6 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid, } /** - * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule - * @hw: pointer to the hw struct - * @opcode: AQ opcode for add or delete mirror rule - * @sw_seid: Switch SEID (to which rule refers) - * @rule_type: Rule Type (ingress/egress/VLAN) - * @id: Destination VSI SEID or Rule ID - * @count: length of the list - * @mr_list: list of mirrored VSI SEIDs or VLAN IDs - * @cmd_details: pointer to command details structure or NULL - * @rule_id: Rule ID returned from FW - * @rules_used: Number of rules used in internal switch - * @rules_free: Number of rules free in internal switch - * - * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for - * VEBs/VEPA elements only - **/ -static int i40e_mirrorrule_op(struct i40e_hw *hw, - u16 opcode, u16 sw_seid, u16 rule_type, u16 id, - u16 count, __le16 *mr_list, - struct i40e_asq_cmd_details *cmd_details, - u16 *rule_id, u16 *rules_used, u16 *rules_free) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_add_delete_mirror_rule *cmd = - (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; - struct i40e_aqc_add_delete_mirror_rule_completion *resp = - (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; - u16 buf_size; - int status; - - buf_size = count * sizeof(*mr_list); - - /* prep the rest of the request */ - i40e_fill_default_direct_cmd_desc(&desc, opcode); - cmd->seid = cpu_to_le16(sw_seid); - cmd->rule_type = cpu_to_le16(rule_type & - I40E_AQC_MIRROR_RULE_TYPE_MASK); - cmd->num_entries = cpu_to_le16(count); - /* Dest VSI for add, rule_id for delete */ - cmd->destination = cpu_to_le16(id); - if (mr_list) { - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | - I40E_AQ_FLAG_RD)); - if (buf_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - } - - status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, - cmd_details); - if (!status || - hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { - if (rule_id) - *rule_id = le16_to_cpu(resp->rule_id); - if (rules_used) - *rules_used = le16_to_cpu(resp->mirror_rules_used); - if (rules_free) - *rules_free = le16_to_cpu(resp->mirror_rules_free); - } - return status; -} - -/** - * i40e_aq_add_mirrorrule - add a mirror rule - * @hw: pointer to the hw struct - * @sw_seid: Switch SEID (to which rule refers) - * @rule_type: Rule Type (ingress/egress/VLAN) - * @dest_vsi: SEID of VSI to which packets will be mirrored - * @count: length of the list - * @mr_list: list of mirrored VSI SEIDs or VLAN IDs - * @cmd_details: pointer to command details structure or NULL - * @rule_id: Rule ID returned from FW - * @rules_used: Number of rules used in internal switch - * @rules_free: Number of rules free in internal switch - * - * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only - **/ -int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, - u16 rule_type, u16 dest_vsi, u16 count, - __le16 *mr_list, - struct i40e_asq_cmd_details *cmd_details, - u16 *rule_id, u16 *rules_used, u16 *rules_free) -{ - if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || - rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { - if (count == 0 || !mr_list) - return -EINVAL; - } - - return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, - rule_type, dest_vsi, count, mr_list, - cmd_details, rule_id, rules_used, rules_free); -} - -/** - * i40e_aq_delete_mirrorrule - delete a mirror rule - * @hw: pointer to the hw struct - * @sw_seid: Switch SEID (to which rule refers) - * @rule_type: Rule Type (ingress/egress/VLAN) - * @count: length of the list - * @rule_id: Rule ID that is returned in the receive desc as part of - * add_mirrorrule. - * @mr_list: list of mirrored VLAN IDs to be removed - * @cmd_details: pointer to command details structure or NULL - * @rules_used: Number of rules used in internal switch - * @rules_free: Number of rules free in internal switch - * - * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only - **/ -int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, - u16 rule_type, u16 rule_id, u16 count, - __le16 *mr_list, - struct i40e_asq_cmd_details *cmd_details, - u16 *rules_used, u16 *rules_free) -{ - /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ - if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { - /* count and mr_list shall be valid for rule_type INGRESS VLAN - * mirroring. For other rule_type, count and rule_type should - * not matter. - */ - if (count == 0 || !mr_list) - return -EINVAL; - } - - return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, - rule_type, rule_id, count, mr_list, - cmd_details, NULL, rules_used, rules_free); -} - -/** * i40e_aq_send_msg_to_vf * @hw: pointer to the hardware structure * @vfid: VF id to send msg @@ -3180,41 +3019,6 @@ i40e_aq_update_nvm_exit: } /** - * i40e_aq_rearrange_nvm - * @hw: pointer to the hw struct - * @rearrange_nvm: defines direction of rearrangement - * @cmd_details: pointer to command details structure or NULL - * - * Rearrange NVM structure, available only for transition FW - **/ -int i40e_aq_rearrange_nvm(struct i40e_hw *hw, - u8 rearrange_nvm, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aqc_nvm_update *cmd; - struct i40e_aq_desc desc; - int status; - - cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; - - i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); - - rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | - I40E_AQ_NVM_REARRANGE_TO_STRUCT); - - if (!rearrange_nvm) { - status = -EINVAL; - goto i40e_aq_rearrange_nvm_exit; - } - - cmd->command_flags |= rearrange_nvm; - status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - -i40e_aq_rearrange_nvm_exit: - return status; -} - -/** * i40e_aq_get_lldp_mib * @hw: pointer to the hw struct * @bridge_type: type of bridge requested @@ -3335,44 +3139,6 @@ int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, } /** - * i40e_aq_restore_lldp - * @hw: pointer to the hw struct - * @setting: pointer to factory setting variable or NULL - * @restore: True if factory settings should be restored - * @cmd_details: pointer to command details structure or NULL - * - * Restore LLDP Agent factory settings if @restore set to True. In other case - * only returns factory setting in AQ response. - **/ -int -i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_lldp_restore *cmd = - (struct i40e_aqc_lldp_restore *)&desc.params.raw; - int status; - - if (!test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) { - i40e_debug(hw, I40E_DEBUG_ALL, - "Restore LLDP not supported by current FW version.\n"); - return -ENODEV; - } - - i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); - - if (restore) - cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; - - status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - if (setting) - *setting = cmd->command & 1; - - return status; -} - -/** * i40e_aq_stop_lldp * @hw: pointer to the hw struct * @shutdown_agent: True if LLDP Agent needs to be Shutdown @@ -4570,84 +4336,6 @@ phy_write_end: } /** - * i40e_write_phy_register - * @hw: pointer to the HW structure - * @page: registers page number - * @reg: register address in the page - * @phy_addr: PHY address on MDIO interface - * @value: PHY register value - * - * Writes value to specified PHY register - **/ -int i40e_write_phy_register(struct i40e_hw *hw, - u8 page, u16 reg, u8 phy_addr, u16 value) -{ - int status; - - switch (hw->device_id) { - case I40E_DEV_ID_1G_BASE_T_X722: - status = i40e_write_phy_register_clause22(hw, reg, phy_addr, - value); - break; - case I40E_DEV_ID_1G_BASE_T_BC: - case I40E_DEV_ID_5G_BASE_T_BC: - case I40E_DEV_ID_10G_BASE_T: - case I40E_DEV_ID_10G_BASE_T4: - case I40E_DEV_ID_10G_BASE_T_BC: - case I40E_DEV_ID_10G_BASE_T_X722: - case I40E_DEV_ID_25G_B: - case I40E_DEV_ID_25G_SFP28: - status = i40e_write_phy_register_clause45(hw, page, reg, - phy_addr, value); - break; - default: - status = -EIO; - break; - } - - return status; -} - -/** - * i40e_read_phy_register - * @hw: pointer to the HW structure - * @page: registers page number - * @reg: register address in the page - * @phy_addr: PHY address on MDIO interface - * @value: PHY register value - * - * Reads specified PHY register value - **/ -int i40e_read_phy_register(struct i40e_hw *hw, - u8 page, u16 reg, u8 phy_addr, u16 *value) -{ - int status; - - switch (hw->device_id) { - case I40E_DEV_ID_1G_BASE_T_X722: - status = i40e_read_phy_register_clause22(hw, reg, phy_addr, - value); - break; - case I40E_DEV_ID_1G_BASE_T_BC: - case I40E_DEV_ID_5G_BASE_T_BC: - case I40E_DEV_ID_10G_BASE_T: - case I40E_DEV_ID_10G_BASE_T4: - case I40E_DEV_ID_10G_BASE_T_BC: - case I40E_DEV_ID_10G_BASE_T_X722: - case I40E_DEV_ID_25G_B: - case I40E_DEV_ID_25G_SFP28: - status = i40e_read_phy_register_clause45(hw, page, reg, - phy_addr, value); - break; - default: - status = -EIO; - break; - } - - return status; -} - -/** * i40e_get_phy_address * @hw: pointer to the HW structure * @dev_num: PHY port num that address we want @@ -4663,80 +4351,6 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) } /** - * i40e_blink_phy_link_led - * @hw: pointer to the HW structure - * @time: time how long led will blinks in secs - * @interval: gap between LED on and off in msecs - * - * Blinks PHY link LED - **/ -int i40e_blink_phy_link_led(struct i40e_hw *hw, - u32 time, u32 interval) -{ - u16 led_addr = I40E_PHY_LED_PROV_REG_1; - u16 gpio_led_port; - u8 phy_addr = 0; - int status = 0; - u16 led_ctl; - u8 port_num; - u16 led_reg; - u32 i; - - i = rd32(hw, I40E_PFGEN_PORTNUM); - port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); - phy_addr = i40e_get_phy_address(hw, port_num); - - for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, - led_addr++) { - status = i40e_read_phy_register_clause45(hw, - I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, - &led_reg); - if (status) - goto phy_blinking_end; - led_ctl = led_reg; - if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { - led_reg = 0; - status = i40e_write_phy_register_clause45(hw, - I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, - led_reg); - if (status) - goto phy_blinking_end; - break; - } - } - - if (time > 0 && interval > 0) { - for (i = 0; i < time * 1000; i += interval) { - status = i40e_read_phy_register_clause45(hw, - I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, &led_reg); - if (status) - goto restore_config; - if (led_reg & I40E_PHY_LED_MANUAL_ON) - led_reg = 0; - else - led_reg = I40E_PHY_LED_MANUAL_ON; - status = i40e_write_phy_register_clause45(hw, - I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, led_reg); - if (status) - goto restore_config; - msleep(interval); - } - } - -restore_config: - status = i40e_write_phy_register_clause45(hw, - I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, led_ctl); - -phy_blinking_end: - return status; -} - -/** * i40e_led_get_reg - read LED register * @hw: pointer to the HW structure * @led_addr: LED register address @@ -5269,39 +4883,6 @@ i40e_find_segment_in_package(u32 segment_type, (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) /** - * i40e_find_section_in_profile - * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) - * @profile: pointer to the i40e segment header to be searched - * - * This function searches i40e segment for a particular section type. On - * success it returns a pointer to the section header, otherwise it will - * return NULL. - **/ -struct i40e_profile_section_header * -i40e_find_section_in_profile(u32 section_type, - struct i40e_profile_segment *profile) -{ - struct i40e_profile_section_header *sec; - struct i40e_section_table *sec_tbl; - u32 sec_off; - u32 i; - - if (profile->header.type != SEGMENT_TYPE_I40E) - return NULL; - - I40E_SECTION_TABLE(profile, sec_tbl); - - for (i = 0; i < sec_tbl->section_count; i++) { - sec_off = sec_tbl->section_offset[i]; - sec = I40E_SECTION_HEADER(profile, sec_off); - if (sec->section.type == section_type) - return sec; - } - - return NULL; -} - -/** * i40e_ddp_exec_aq_section - Execute generic AQ for DDP * @hw: pointer to the hw struct * @aq: command buffer containing all data to execute AQ @@ -5524,45 +5105,6 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, } /** - * i40e_add_pinfo_to_list - * @hw: pointer to the hardware structure - * @profile: pointer to the profile segment of the package - * @profile_info_sec: buffer for information section - * @track_id: package tracking id - * - * Register a profile to the list of loaded profiles. - */ -int -i40e_add_pinfo_to_list(struct i40e_hw *hw, - struct i40e_profile_segment *profile, - u8 *profile_info_sec, u32 track_id) -{ - struct i40e_profile_section_header *sec = NULL; - struct i40e_profile_info *pinfo; - u32 offset = 0, info = 0; - int status = 0; - - sec = (struct i40e_profile_section_header *)profile_info_sec; - sec->tbl_size = 1; - sec->data_end = sizeof(struct i40e_profile_section_header) + - sizeof(struct i40e_profile_info); - sec->section.type = SECTION_TYPE_INFO; - sec->section.offset = sizeof(struct i40e_profile_section_header); - sec->section.size = sizeof(struct i40e_profile_info); - pinfo = (struct i40e_profile_info *)(profile_info_sec + - sec->section.offset); - pinfo->track_id = track_id; - pinfo->version = profile->version; - pinfo->op = I40E_DDP_ADD_TRACKID; - memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); - - status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, - track_id, &offset, &info, NULL); - - return status; -} - -/** * i40e_aq_add_cloud_filters * @hw: pointer to the hardware structure * @seid: VSI seid to add cloud filters from diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 8db1eb0c1768..352e957443fd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -1491,19 +1491,6 @@ void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc) } /** - * i40e_dcb_hw_get_num_tc - * @hw: pointer to the hw struct - * - * Returns number of traffic classes configured in HW - **/ -u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw) -{ - u32 reg = rd32(hw, I40E_PRTDCB_GENC); - - return FIELD_GET(I40E_PRTDCB_GENC_NUMTC_MASK, reg); -} - -/** * i40e_dcb_hw_rx_ets_bw_config * @hw: pointer to the hw struct * @bw_share: Bandwidth share indexed per traffic class diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index d76497566e40..d5662c639c41 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -253,7 +253,6 @@ void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw, void i40e_dcb_hw_pfc_config(struct i40e_hw *hw, u8 pfc_en, u8 *prio_tc); void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc); -u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw); void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share, u8 *mode, u8 *prio_type); void i40e_dcb_hw_rx_up2tc_config(struct i40e_hw *hw, u8 *prio_tc); diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 208c2f0857b6..6cd9da662ae1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -722,7 +722,7 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id) dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n", vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs); dev_info(&pf->pdev->dev, " num MDD=%lld\n", - vf->num_mdd_events); + vf->mdd_tx_events.count + vf->mdd_rx_events.count); } else { dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index bce5b76f1e7a..8a7a83f83ee5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -459,6 +459,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0), I40E_PRIV_FLAG("vf-vlan-pruning", I40E_FLAG_VF_VLAN_PRUNING_ENA, 0), + I40E_PRIV_FLAG("mdd-auto-reset-vf", + I40E_FLAG_MDD_AUTO_RESET_VF, 0), }; #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 0e1d9e2fbf38..65a702668e21 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1666,9 +1666,8 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, * @vsi: VSI to remove from * @f: the filter to remove from the list * - * This function should be called instead of i40e_del_filter only if you know - * the exact filter you will remove already, such as via i40e_find_filter or - * i40e_find_mac. + * This function requires you've found * the exact filter you will remove + * already, such as via i40e_find_filter or i40e_find_mac. * * NOTE: This function is expected to be called with mac_filter_hash_lock * being held. @@ -1698,29 +1697,6 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) } /** - * i40e_del_filter - Remove a MAC/VLAN filter from the VSI - * @vsi: the VSI to be searched - * @macaddr: the MAC address - * @vlan: the VLAN - * - * NOTE: This function is expected to be called with mac_filter_hash_lock - * being held. - * ANOTHER NOTE: This function MUST be called from within the context of - * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() - * instead of list_for_each_entry(). - **/ -void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan) -{ - struct i40e_mac_filter *f; - - if (!vsi || !macaddr) - return; - - f = i40e_find_filter(vsi, macaddr, vlan); - __i40e_del_filter(vsi, f); -} - -/** * i40e_add_mac_filter - Add a MAC filter for all active VLANs * @vsi: the VSI to be searched * @macaddr: the mac address to be filtered @@ -9629,19 +9605,6 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, } /** - * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters - * @pf: board private structure - **/ -u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) -{ - u32 val, fcnt_prog; - - val = rd32(&pf->hw, I40E_PFQF_FDSTAT); - fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); - return fcnt_prog; -} - -/** * i40e_get_current_fd_count - Get total FD filters programmed for this PF * @pf: board private structure **/ @@ -11217,6 +11180,67 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired) } /** + * i40e_print_vf_mdd_event - print VF Tx/Rx malicious driver detect event + * @pf: board private structure + * @vf: pointer to the VF structure + * @is_tx: true - for Tx event, false - for Rx + */ +static void i40e_print_vf_mdd_event(struct i40e_pf *pf, struct i40e_vf *vf, + bool is_tx) +{ + dev_err(&pf->pdev->dev, is_tx ? + "%lld Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n" : + "%lld Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n", + is_tx ? vf->mdd_tx_events.count : vf->mdd_rx_events.count, + pf->hw.pf_id, + vf->vf_id, + vf->default_lan_addr.addr, + str_on_off(test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags))); +} + +/** + * i40e_print_vfs_mdd_events - print VFs malicious driver detect event + * @pf: pointer to the PF structure + * + * Called from i40e_handle_mdd_event to rate limit and print VFs MDD events. + */ +static void i40e_print_vfs_mdd_events(struct i40e_pf *pf) +{ + unsigned int i; + + /* check that there are pending MDD events to print */ + if (!test_and_clear_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state)) + return; + + if (!__ratelimit(&pf->mdd_message_rate_limit)) + return; + + for (i = 0; i < pf->num_alloc_vfs; i++) { + struct i40e_vf *vf = &pf->vf[i]; + bool is_printed = false; + + /* only print Rx MDD event message if there are new events */ + if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { + vf->mdd_rx_events.last_printed = vf->mdd_rx_events.count; + i40e_print_vf_mdd_event(pf, vf, false); + is_printed = true; + } + + /* only print Tx MDD event message if there are new events */ + if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { + vf->mdd_tx_events.last_printed = vf->mdd_tx_events.count; + i40e_print_vf_mdd_event(pf, vf, true); + is_printed = true; + } + + if (is_printed && !test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)) + dev_info(&pf->pdev->dev, + "Use PF Control I/F to re-enable the VF #%d\n", + i); + } +} + +/** * i40e_handle_mdd_event * @pf: pointer to the PF structure * @@ -11230,8 +11254,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) u32 reg; int i; - if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state)) + if (!test_and_clear_bit(__I40E_MDD_EVENT_PENDING, pf->state)) { + /* Since the VF MDD event logging is rate limited, check if + * there are pending MDD events. + */ + i40e_print_vfs_mdd_events(pf); return; + } /* find what triggered the MDD event */ reg = rd32(hw, I40E_GL_MDET_TX); @@ -11275,36 +11304,48 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) /* see if one of the VFs needs its hand slapped */ for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { + bool is_mdd_on_tx = false; + bool is_mdd_on_rx = false; + vf = &(pf->vf[i]); reg = rd32(hw, I40E_VP_MDET_TX(i)); if (reg & I40E_VP_MDET_TX_VALID_MASK) { + set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state); wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); - vf->num_mdd_events++; - dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", - i); - dev_info(&pf->pdev->dev, - "Use PF Control I/F to re-enable the VF\n"); + vf->mdd_tx_events.count++; set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); + is_mdd_on_tx = true; } reg = rd32(hw, I40E_VP_MDET_RX(i)); if (reg & I40E_VP_MDET_RX_VALID_MASK) { + set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state); wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); - vf->num_mdd_events++; - dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", - i); - dev_info(&pf->pdev->dev, - "Use PF Control I/F to re-enable the VF\n"); + vf->mdd_rx_events.count++; set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); + is_mdd_on_rx = true; + } + + if ((is_mdd_on_tx || is_mdd_on_rx) && + test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { + /* VF MDD event counters will be cleared by + * reset, so print the event prior to reset. + */ + if (is_mdd_on_rx) + i40e_print_vf_mdd_event(pf, vf, false); + if (is_mdd_on_tx) + i40e_print_vf_mdd_event(pf, vf, true); + + i40e_vc_reset_vf(vf, true); } } - /* re-enable mdd interrupt cause */ - clear_bit(__I40E_MDD_EVENT_PENDING, pf->state); reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); i40e_flush(hw); + + i40e_print_vfs_mdd_events(pf); } /** @@ -12614,89 +12655,6 @@ int i40e_set_partition_bw_setting(struct i40e_pf *pf) } /** - * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition - * @pf: board private structure - **/ -int i40e_commit_partition_bw_setting(struct i40e_pf *pf) -{ - /* Commit temporary BW setting to permanent NVM image */ - enum i40e_admin_queue_err last_aq_status; - u16 nvm_word; - int ret; - - if (pf->hw.partition_id != 1) { - dev_info(&pf->pdev->dev, - "Commit BW only works on partition 1! This is partition %d", - pf->hw.partition_id); - ret = -EOPNOTSUPP; - goto bw_commit_out; - } - - /* Acquire NVM for read access */ - ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); - last_aq_status = pf->hw.aq.asq_last_status; - if (ret) { - dev_info(&pf->pdev->dev, - "Cannot acquire NVM for read access, err %pe aq_err %s\n", - ERR_PTR(ret), - i40e_aq_str(&pf->hw, last_aq_status)); - goto bw_commit_out; - } - - /* Read word 0x10 of NVM - SW compatibility word 1 */ - ret = i40e_aq_read_nvm(&pf->hw, - I40E_SR_NVM_CONTROL_WORD, - 0x10, sizeof(nvm_word), &nvm_word, - false, NULL); - /* Save off last admin queue command status before releasing - * the NVM - */ - last_aq_status = pf->hw.aq.asq_last_status; - i40e_release_nvm(&pf->hw); - if (ret) { - dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n", - ERR_PTR(ret), - i40e_aq_str(&pf->hw, last_aq_status)); - goto bw_commit_out; - } - - /* Wait a bit for NVM release to complete */ - msleep(50); - - /* Acquire NVM for write access */ - ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); - last_aq_status = pf->hw.aq.asq_last_status; - if (ret) { - dev_info(&pf->pdev->dev, - "Cannot acquire NVM for write access, err %pe aq_err %s\n", - ERR_PTR(ret), - i40e_aq_str(&pf->hw, last_aq_status)); - goto bw_commit_out; - } - /* Write it back out unchanged to initiate update NVM, - * which will force a write of the shadow (alt) RAM to - * the NVM - thus storing the bandwidth values permanently. - */ - ret = i40e_aq_update_nvm(&pf->hw, - I40E_SR_NVM_CONTROL_WORD, - 0x10, sizeof(nvm_word), - &nvm_word, true, 0, NULL); - /* Save off last admin queue command status before releasing - * the NVM - */ - last_aq_status = pf->hw.aq.asq_last_status; - i40e_release_nvm(&pf->hw); - if (ret) - dev_info(&pf->pdev->dev, - "BW settings NOT SAVED, err %pe aq_err %s\n", - ERR_PTR(ret), - i40e_aq_str(&pf->hw, last_aq_status)); -bw_commit_out: - - return ret; -} - -/** * i40e_is_total_port_shutdown_enabled - read NVM and return value * if total port shutdown feature is enabled for this PF * @pf: board private structure @@ -15998,6 +15956,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + /* VF MDD event logs are rate limited to one second intervals */ + ratelimit_state_init(&pf->mdd_message_rate_limit, 1 * HZ, 1); + /* Reconfigure hardware for allowing smaller MSS in the case * of TSO, so that we avoid the MDD being fired and causing * a reset in the case of small MSS+TSO. diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 5a0699ca7ce5..099bb8ab7d70 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -27,13 +27,6 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details); int -i40e_asq_send_command_v2(struct i40e_hw *hw, - struct i40e_aq_desc *desc, - void *buff, /* can be NULL */ - u16 buff_size, - struct i40e_asq_cmd_details *cmd_details, - enum i40e_admin_queue_err *aq_status); -int i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details, @@ -72,8 +65,6 @@ int i40e_led_set_phy(struct i40e_hw *hw, bool on, u16 led_addr, u32 mode); int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, u16 *val); -int i40e_blink_phy_link_led(struct i40e_hw *hw, - u32 time, u32 interval); /* admin send queue commands */ @@ -141,9 +132,6 @@ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details); -int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, - u16 seid, bool enable, - struct i40e_asq_cmd_details *cmd_details); int i40e_aq_get_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details); @@ -176,14 +164,6 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid, struct i40e_aqc_remove_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details, enum i40e_admin_queue_err *aq_status); -int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, - u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, - struct i40e_asq_cmd_details *cmd_details, - u16 *rule_id, u16 *rules_used, u16 *rules_free); -int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, - u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, - struct i40e_asq_cmd_details *cmd_details, - u16 *rules_used, u16 *rules_free); int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, @@ -220,9 +200,6 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, bool last_command, u8 preservation_flags, struct i40e_asq_cmd_details *cmd_details); -int i40e_aq_rearrange_nvm(struct i40e_hw *hw, - u8 rearrange_nvm, - struct i40e_asq_cmd_details *cmd_details); int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, u8 mib_type, void *buff, u16 buff_size, u16 *local_len, u16 *remote_len, @@ -234,9 +211,6 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw, int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, bool enable_update, struct i40e_asq_cmd_details *cmd_details); -int -i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, - struct i40e_asq_cmd_details *cmd_details); int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, bool persist, struct i40e_asq_cmd_details *cmd_details); @@ -458,13 +432,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value); int i40e_write_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 value); -int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 *value); -int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 value); u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); -int i40e_blink_phy_link_led(struct i40e_hw *hw, - u32 time, u32 interval); int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, u16 buff_size, u32 track_id, u32 *error_offset, u32 *error_info, @@ -477,20 +445,12 @@ int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, struct i40e_generic_seg_header * i40e_find_segment_in_package(u32 segment_type, struct i40e_package_header *pkg_header); -struct i40e_profile_section_header * -i40e_find_section_in_profile(u32 section_type, - struct i40e_profile_segment *profile); int i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, u32 track_id); int i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, u32 track_id); -int -i40e_add_pinfo_to_list(struct i40e_hw *hw, - struct i40e_profile_segment *profile, - u8 *profile_info_sec, u32 track_id); - /* i40e_ddp */ int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index dfa785e39458..1120f8e4bb67 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -216,7 +216,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) * @notify_vf: notify vf about reset or not * Reset VF handler. **/ -static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf) +void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf) { struct i40e_pf *pf = vf->pf; int i; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 66f95e2f3146..5cf74f16f433 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -64,6 +64,12 @@ struct i40evf_channel { u64 max_tx_rate; /* bandwidth rate allocation for VSIs */ }; +struct i40e_mdd_vf_events { + u64 count; /* total count of Rx|Tx events */ + /* count number of the last printed event */ + u64 last_printed; +}; + /* VF information structure */ struct i40e_vf { struct i40e_pf *pf; @@ -92,7 +98,9 @@ struct i40e_vf { u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u8 num_req_queues; /* num of requested qps */ - u64 num_mdd_events; /* num of mdd events detected */ + /* num of mdd tx and rx events detected */ + struct i40e_mdd_vf_events mdd_rx_events; + struct i40e_mdd_vf_events mdd_tx_events; unsigned long vf_caps; /* vf's adv. capabilities */ unsigned long vf_states; /* vf's runtime states */ @@ -120,6 +128,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs); int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen); int i40e_vc_process_vflr_event(struct i40e_pf *pf); +void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf); bool i40e_reset_vf(struct i40e_vf *vf, bool flr); bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr); void i40e_vc_notify_vf_reset(struct i40e_vf *vf); diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 4e885df789ef..e28f1905a4a0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -395,32 +395,6 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, WARN_ON_ONCE(1); } -static int -i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first, - struct xdp_buff *xdp, const unsigned int size) -{ - struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first); - - if (!xdp_buff_has_frags(first)) { - sinfo->nr_frags = 0; - sinfo->xdp_frags_size = 0; - xdp_buff_set_frags_flag(first); - } - - if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { - xsk_buff_free(first); - return -ENOMEM; - } - - __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, - virt_to_page(xdp->data_hard_start), - XDP_PACKET_HEADROOM, size); - sinfo->xdp_frags_size += size; - xsk_buff_add_frag(xdp); - - return 0; -} - /** * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring * @rx_ring: Rx ring @@ -486,8 +460,10 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) if (!first) first = bi; - else if (i40e_add_xsk_frag(rx_ring, first, bi, size)) + else if (!xsk_buff_add_frag(first, bi)) { + xsk_buff_free(first); break; + } if (++next_to_process == count) next_to_process = 0; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index a9e54866ae6b..6faa62bced3a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -773,6 +773,11 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, f->state = IAVF_VLAN_ADD; adapter->num_vlan_filters++; iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER); + } else if (f->state == IAVF_VLAN_REMOVE) { + /* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed. + * We can safely only change the state here. + */ + f->state = IAVF_VLAN_ACTIVE; } clearout: @@ -793,8 +798,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan) f = iavf_find_vlan(adapter, vlan); if (f) { - f->state = IAVF_VLAN_REMOVE; - iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER); + /* IAVF_ADD_VLAN means that VLAN wasn't even added yet. + * Remove it from the list. + */ + if (f->state == IAVF_VLAN_ADD) { + list_del(&f->list); + kfree(f); + adapter->num_vlan_filters--; + } else { + f->state = IAVF_VLAN_REMOVE; + iavf_schedule_aq_request(adapter, + IAVF_FLAG_AQ_DEL_VLAN_FILTER); + } } spin_unlock_bh(&adapter->mac_vlan_list_lock); @@ -1180,7 +1195,7 @@ static void iavf_napi_enable_all(struct iavf_adapter *adapter) q_vector = &adapter->q_vectors[q_idx]; napi = &q_vector->napi; - napi_enable(napi); + napi_enable_locked(napi); } } @@ -1196,7 +1211,7 @@ static void iavf_napi_disable_all(struct iavf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = &adapter->q_vectors[q_idx]; - napi_disable(&q_vector->napi); + napi_disable_locked(&q_vector->napi); } } @@ -1800,8 +1815,8 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) q_vector->v_idx = q_idx; q_vector->reg_idx = q_idx; cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); - netif_napi_add(adapter->netdev, &q_vector->napi, - iavf_napi_poll); + netif_napi_add_locked(adapter->netdev, &q_vector->napi, + iavf_napi_poll); } return 0; @@ -1827,7 +1842,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter) for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; - netif_napi_del(&q_vector->napi); + netif_napi_del_locked(&q_vector->napi); } kfree(adapter->q_vectors); adapter->q_vectors = NULL; @@ -1968,6 +1983,7 @@ err: static void iavf_finish_config(struct work_struct *work) { struct iavf_adapter *adapter; + bool locks_released = false; int pairs, err; adapter = container_of(work, struct iavf_adapter, finish_config); @@ -1976,7 +1992,7 @@ static void iavf_finish_config(struct work_struct *work) * The dev->lock is needed to update the queue number */ rtnl_lock(); - mutex_lock(&adapter->netdev->lock); + netdev_lock(adapter->netdev); mutex_lock(&adapter->crit_lock); if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) && @@ -1988,26 +2004,34 @@ static void iavf_finish_config(struct work_struct *work) switch (adapter->state) { case __IAVF_DOWN: + /* Set the real number of queues when reset occurs while + * state == __IAVF_DOWN + */ + pairs = adapter->num_active_queues; + netif_set_real_num_rx_queues(adapter->netdev, pairs); + netif_set_real_num_tx_queues(adapter->netdev, pairs); + if (adapter->netdev->reg_state != NETREG_REGISTERED) { + mutex_unlock(&adapter->crit_lock); + netdev_unlock(adapter->netdev); + locks_released = true; err = register_netdevice(adapter->netdev); if (err) { dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n", err); /* go back and try again.*/ + mutex_lock(&adapter->crit_lock); iavf_free_rss(adapter); iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER); + mutex_unlock(&adapter->crit_lock); goto out; } } - - /* Set the real number of queues when reset occurs while - * state == __IAVF_DOWN - */ - fallthrough; + break; case __IAVF_RUNNING: pairs = adapter->num_active_queues; netif_set_real_num_rx_queues(adapter->netdev, pairs); @@ -2019,8 +2043,10 @@ static void iavf_finish_config(struct work_struct *work) } out: - mutex_unlock(&adapter->crit_lock); - mutex_unlock(&adapter->netdev->lock); + if (!locks_released) { + mutex_unlock(&adapter->crit_lock); + netdev_unlock(adapter->netdev); + } rtnl_unlock(); } @@ -2713,12 +2739,16 @@ static void iavf_watchdog_task(struct work_struct *work) struct iavf_adapter *adapter = container_of(work, struct iavf_adapter, watchdog_task.work); + struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; u32 reg_val; + netdev_lock(netdev); if (!mutex_trylock(&adapter->crit_lock)) { - if (adapter->state == __IAVF_REMOVE) + if (adapter->state == __IAVF_REMOVE) { + netdev_unlock(netdev); return; + } goto restart_watchdog; } @@ -2730,30 +2760,35 @@ static void iavf_watchdog_task(struct work_struct *work) case __IAVF_STARTUP: iavf_startup(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(30)); return; case __IAVF_INIT_VERSION_CHECK: iavf_init_version_check(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(30)); return; case __IAVF_INIT_GET_RESOURCES: iavf_init_get_resources(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; case __IAVF_INIT_EXTENDED_CAPS: iavf_init_process_extended_caps(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; case __IAVF_INIT_CONFIG_ADAPTER: iavf_init_config_adapter(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; @@ -2765,6 +2800,7 @@ static void iavf_watchdog_task(struct work_struct *work) * as it can loop forever */ mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return; } if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { @@ -2773,6 +2809,7 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; iavf_shutdown_adminq(hw); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, (5 * HZ)); return; @@ -2780,6 +2817,7 @@ static void iavf_watchdog_task(struct work_struct *work) /* Try again from failed step*/ iavf_change_state(adapter, adapter->last_state); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ); return; case __IAVF_COMM_FAILED: @@ -2792,6 +2830,7 @@ static void iavf_watchdog_task(struct work_struct *work) iavf_change_state(adapter, __IAVF_INIT_FAILED); adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return; } reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & @@ -2811,12 +2850,14 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(10)); return; case __IAVF_RESETTING: mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ * 2); return; @@ -2847,6 +2888,7 @@ static void iavf_watchdog_task(struct work_struct *work) case __IAVF_REMOVE: default: mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return; } @@ -2858,6 +2900,7 @@ static void iavf_watchdog_task(struct work_struct *work) dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ * 2); return; @@ -2865,6 +2908,7 @@ static void iavf_watchdog_task(struct work_struct *work) mutex_unlock(&adapter->crit_lock); restart_watchdog: + netdev_unlock(netdev); if (adapter->state >= __IAVF_DOWN) queue_work(adapter->wq, &adapter->adminq_task); if (adapter->aq_required) @@ -2990,12 +3034,12 @@ static void iavf_reset_task(struct work_struct *work) /* When device is being removed it doesn't make sense to run the reset * task, just return in such a case. */ - mutex_lock(&netdev->lock); + netdev_lock(netdev); if (!mutex_trylock(&adapter->crit_lock)) { if (adapter->state != __IAVF_REMOVE) queue_work(adapter->wq, &adapter->reset_task); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return; } @@ -3043,7 +3087,7 @@ static void iavf_reset_task(struct work_struct *work) reg_val); iavf_disable_vf(adapter); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -3184,7 +3228,7 @@ continue_reset: wake_up(&adapter->reset_waitqueue); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return; reset_err: @@ -3195,7 +3239,7 @@ reset_err: iavf_disable_vf(adapter); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); } @@ -3667,10 +3711,10 @@ exit: if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) return 0; - mutex_lock(&netdev->lock); + netdev_lock(netdev); netif_set_real_num_rx_queues(netdev, total_qps); netif_set_real_num_tx_queues(netdev, total_qps); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return ret; } @@ -4340,14 +4384,17 @@ static int iavf_open(struct net_device *netdev) return -EIO; } + netdev_lock(netdev); while (!mutex_trylock(&adapter->crit_lock)) { /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock * is already taken and iavf_open is called from an upper * device's notifier reacting on NETDEV_REGISTER event. * We have to leave here to avoid dead lock. */ - if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) + if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) { + netdev_unlock(netdev); return -EBUSY; + } usleep_range(500, 1000); } @@ -4396,6 +4443,7 @@ static int iavf_open(struct net_device *netdev) iavf_irq_enable(adapter, true); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return 0; @@ -4408,6 +4456,7 @@ err_setup_tx: iavf_free_all_tx_resources(adapter); err_unlock: mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return err; } @@ -4429,10 +4478,12 @@ static int iavf_close(struct net_device *netdev) u64 aq_to_restore; int status; + netdev_lock(netdev); mutex_lock(&adapter->crit_lock); if (adapter->state <= __IAVF_DOWN_PENDING) { mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return 0; } @@ -4466,6 +4517,7 @@ static int iavf_close(struct net_device *netdev) iavf_free_traffic_irqs(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); /* We explicitly don't free resources here because the hardware is * still active and can DMA into memory. Resources are cleared in @@ -5342,6 +5394,7 @@ static int iavf_suspend(struct device *dev_d) netif_device_detach(netdev); + netdev_lock(netdev); mutex_lock(&adapter->crit_lock); if (netif_running(netdev)) { @@ -5353,6 +5406,7 @@ static int iavf_suspend(struct device *dev_d) iavf_reset_interrupt_capability(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return 0; } @@ -5451,6 +5505,7 @@ static void iavf_remove(struct pci_dev *pdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); + netdev_lock(netdev); mutex_lock(&adapter->crit_lock); dev_info(&adapter->pdev->dev, "Removing device\n"); iavf_change_state(adapter, __IAVF_REMOVE); @@ -5487,6 +5542,7 @@ static void iavf_remove(struct pci_dev *pdev) mutex_destroy(&hw->aq.asq_mutex); mutex_unlock(&adapter->crit_lock); mutex_destroy(&adapter->crit_lock); + netdev_unlock(netdev); iounmap(hw->hw_addr); pci_release_regions(pdev); diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 3307d551f431..9e0d9f710441 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -32,7 +32,8 @@ ice-y := ice_main.o \ ice_parser_rt.o \ ice_idc.o \ devlink/devlink.o \ - devlink/devlink_port.o \ + devlink/health.o \ + devlink/port.o \ ice_sf_eth.o \ ice_sf_vsi_vlan_ops.o \ ice_ddp.o \ diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c index 415445cefdb2..dbdb83567364 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c @@ -6,7 +6,7 @@ #include "ice.h" #include "ice_lib.h" #include "devlink.h" -#include "devlink_port.h" +#include "port.h" #include "ice_eswitch.h" #include "ice_fw_update.h" #include "ice_dcb_lib.h" @@ -368,14 +368,18 @@ static int ice_devlink_info_get(struct devlink *devlink, } break; case ICE_VERSION_RUNNING: - err = devlink_info_version_running_put(req, key, ctx->buf); + err = devlink_info_version_running_put_ext(req, key, + ctx->buf, + DEVLINK_INFO_VERSION_TYPE_COMPONENT); if (err) { NL_SET_ERR_MSG_MOD(extack, "Unable to set running version"); goto out_free_ctx; } break; case ICE_VERSION_STORED: - err = devlink_info_version_stored_put(req, key, ctx->buf); + err = devlink_info_version_stored_put_ext(req, key, + ctx->buf, + DEVLINK_INFO_VERSION_TYPE_COMPONENT); if (err) { NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version"); goto out_free_ctx; @@ -977,6 +981,9 @@ static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv /* preallocate memory for ice_sched_node */ node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + *priv = node; return 0; @@ -1207,9 +1214,15 @@ static int ice_devlink_reinit_up(struct ice_pf *pf) struct ice_vsi *vsi = ice_get_main_vsi(pf); int err; + err = ice_init_hw(&pf->hw); + if (err) { + dev_err(ice_pf_to_dev(pf), "ice_init_hw failed: %d\n", err); + return err; + } + err = ice_init_dev(pf); if (err) - return err; + goto unroll_hw_init; vsi->flags = ICE_VSI_FLAG_INIT; @@ -1232,6 +1245,8 @@ err_load: rtnl_unlock(); err_vsi_cfg: ice_deinit_dev(pf); +unroll_hw_init: + ice_deinit_hw(&pf->hw); return err; } diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c new file mode 100644 index 000000000000..ea40f7941259 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/devlink/health.c @@ -0,0 +1,550 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024, Intel Corporation. */ + +#include "ice.h" +#include "ice_adminq_cmd.h" /* for enum ice_aqc_health_status_elem */ +#include "health.h" + +#define ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, obj, name) \ + devlink_fmsg_put(fmsg, #name, (obj)->name) + +#define ICE_HEALTH_STATUS_DATA_SIZE 2 + +struct ice_health_status { + enum ice_aqc_health_status code; + const char *description; + const char *solution; + const char *data_label[ICE_HEALTH_STATUS_DATA_SIZE]; +}; + +/* + * In addition to the health status codes provided below, the firmware might + * generate Health Status Codes that are not pertinent to the end-user. + * For instance, Health Code 0x1002 is triggered when the command fails. + * Such codes should be disregarded by the end-user. + * The below lookup requires to be sorted by code. + */ + +static const char *const ice_common_port_solutions = + "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex."; +static const char *const ice_port_number_label = "Port Number"; +static const char *const ice_update_nvm_solution = "Update to the latest NVM image."; + +static const struct ice_health_status ice_health_status_lookup[] = { + {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT, "An unsupported module was detected.", + ice_common_port_solutions, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE, "Module type is not supported.", + "Change or replace the module or cable.", {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL, "Module is not qualified.", + ice_common_port_solutions, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM, + "Device cannot communicate with the module.", + "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT, "Unresolved module conflict.", + "Manually set speed/duplex or change the port option. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT, "Module is not present.", + "Check that the module is inserted correctly. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED, "Underutilized module.", + "Change or replace the module or cable. Change the port option.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT, "An unsupported module was detected.", + ice_common_port_solutions, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG, "Invalid link configuration.", + NULL, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS, "Port hardware access error.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE, "A port is unreachable.", + "Change the port option. Update to the latest NVM image."}, + {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED, "Port speed is limited due to module.", + "Change the module or configure the port option to match the current module speed. Change the port option.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT, + "All configured link modes were attempted but failed to establish link. The device will restart the process to establish link.", + "Check link partner connection and configuration.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED, + "Port speed is limited by PHY capabilities.", + "Change the module to align to port option.", {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO, "LOM topology netlist is corrupted.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_NETLIST, "Unrecoverable netlist error.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT, "Port topology conflict.", + "Change the port option. Update to the latest NVM image."}, + {ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS, "Unrecoverable hardware access error.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME, "Unrecoverable runtime error.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT, "Link management engine failed to initialize.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD, + "Failed to load the firmware image in the external PHY.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_INFO_RECOVERY, "The device is in firmware recovery mode.", + ice_update_nvm_solution, {"Extended Error"}}, + {ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS, "The flash chip cannot be accessed.", + "If issue persists, call customer support.", {"Access Type"}}, + {ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH, "NVM authentication failed.", + ice_update_nvm_solution}, + {ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH, "Option ROM authentication failed.", + ice_update_nvm_solution}, + {ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH, "DDP package authentication failed.", + "Update to latest base driver and DDP package."}, + {ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT, "NVM image is incompatible.", + ice_update_nvm_solution}, + {ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT, "Option ROM is incompatible.", + ice_update_nvm_solution, {"Expected PCI Device ID", "Expected Module ID"}}, + {ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB, + "Supplied MIB file is invalid. DCB reverted to default configuration.", + "Disable FW-LLDP and check DCBx system configuration.", + {ice_port_number_label, "MIB ID"}}, +}; + +static int ice_health_status_lookup_compare(const void *a, const void *b) +{ + return ((struct ice_health_status *)a)->code - ((struct ice_health_status *)b)->code; +} + +static const struct ice_health_status *ice_get_health_status(u16 code) +{ + struct ice_health_status key = { .code = code }; + + return bsearch(&key, ice_health_status_lookup, ARRAY_SIZE(ice_health_status_lookup), + sizeof(struct ice_health_status), ice_health_status_lookup_compare); +} + +static void ice_describe_status_code(struct devlink_fmsg *fmsg, + struct ice_aqc_health_status_elem *hse) +{ + static const char *const aux_label[] = { "Aux Data 1", "Aux Data 2" }; + const struct ice_health_status *health_code; + u32 internal_data[2]; + u16 status_code; + + status_code = le16_to_cpu(hse->health_status_code); + + devlink_fmsg_put(fmsg, "Syndrome", status_code); + if (status_code) { + internal_data[0] = le32_to_cpu(hse->internal_data1); + internal_data[1] = le32_to_cpu(hse->internal_data2); + + health_code = ice_get_health_status(status_code); + if (!health_code) + return; + + devlink_fmsg_string_pair_put(fmsg, "Description", health_code->description); + if (health_code->solution) + devlink_fmsg_string_pair_put(fmsg, "Possible Solution", + health_code->solution); + + for (size_t i = 0; i < ICE_HEALTH_STATUS_DATA_SIZE; i++) { + if (internal_data[i] != ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA) + devlink_fmsg_u32_pair_put(fmsg, + health_code->data_label[i] ? + health_code->data_label[i] : + aux_label[i], + internal_data[i]); + } + } +} + +static int +ice_port_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_health_reporter_priv(reporter); + + ice_describe_status_code(fmsg, &pf->health_reporters.port_status); + return 0; +} + +static int +ice_port_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, + void *priv_ctx, struct netlink_ext_ack __always_unused *extack) +{ + struct ice_pf *pf = devlink_health_reporter_priv(reporter); + + ice_describe_status_code(fmsg, &pf->health_reporters.port_status); + return 0; +} + +static int +ice_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_health_reporter_priv(reporter); + + ice_describe_status_code(fmsg, &pf->health_reporters.fw_status); + return 0; +} + +static int +ice_fw_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, + void *priv_ctx, struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_health_reporter_priv(reporter); + + ice_describe_status_code(fmsg, &pf->health_reporters.fw_status); + return 0; +} + +static void ice_config_health_events(struct ice_pf *pf, bool enable) +{ + u8 enable_bits = 0; + int ret; + + if (enable) + enable_bits = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK | + ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK; + + ret = ice_aq_set_health_status_cfg(&pf->hw, enable_bits); + if (ret) + dev_err(ice_pf_to_dev(pf), "Failed to %s firmware health events, err %d aq_err %s\n", + str_enable_disable(enable), ret, + ice_aq_str(pf->hw.adminq.sq_last_status)); +} + +/** + * ice_process_health_status_event - Process the health status event from FW + * @pf: pointer to the PF structure + * @event: event structure containing the Health Status Event opcode + * + * Decode the Health Status Events and print the associated messages + */ +void ice_process_health_status_event(struct ice_pf *pf, struct ice_rq_event_info *event) +{ + const struct ice_aqc_health_status_elem *health_info; + u16 count; + + health_info = (struct ice_aqc_health_status_elem *)event->msg_buf; + count = le16_to_cpu(event->desc.params.get_health_status.health_status_count); + + if (count > (event->buf_len / sizeof(*health_info))) { + dev_err(ice_pf_to_dev(pf), "Received a health status event with invalid element count\n"); + return; + } + + for (size_t i = 0; i < count; i++) { + const struct ice_health_status *health_code; + u16 status_code; + + status_code = le16_to_cpu(health_info->health_status_code); + health_code = ice_get_health_status(status_code); + + if (health_code) { + switch (le16_to_cpu(health_info->event_source)) { + case ICE_AQC_HEALTH_STATUS_GLOBAL: + pf->health_reporters.fw_status = *health_info; + devlink_health_report(pf->health_reporters.fw, + "FW syndrome reported", NULL); + break; + case ICE_AQC_HEALTH_STATUS_PF: + case ICE_AQC_HEALTH_STATUS_PORT: + pf->health_reporters.port_status = *health_info; + devlink_health_report(pf->health_reporters.port, + "Port syndrome reported", NULL); + break; + default: + dev_err(ice_pf_to_dev(pf), "Health code with unknown source\n"); + } + } else { + u32 data1, data2; + u16 source; + + source = le16_to_cpu(health_info->event_source); + data1 = le32_to_cpu(health_info->internal_data1); + data2 = le32_to_cpu(health_info->internal_data2); + dev_dbg(ice_pf_to_dev(pf), + "Received internal health status code 0x%08x, source: 0x%08x, data1: 0x%08x, data2: 0x%08x", + status_code, source, data1, data2); + } + health_info++; + } +} + +/** + * ice_devlink_health_report - boilerplate to call given @reporter + * + * @reporter: devlink health reporter to call, do nothing on NULL + * @msg: message to pass up, "event name" is fine + * @priv_ctx: typically some event struct + */ +static void ice_devlink_health_report(struct devlink_health_reporter *reporter, + const char *msg, void *priv_ctx) +{ + if (!reporter) + return; + + /* We do not do auto recovering, so return value of the below function + * will always be 0, thus we do ignore it. + */ + devlink_health_report(reporter, msg, priv_ctx); +} + +struct ice_mdd_event { + enum ice_mdd_src src; + u16 vf_num; + u16 queue; + u8 pf_num; + u8 event; +}; + +static const char *ice_mdd_src_to_str(enum ice_mdd_src src) +{ + switch (src) { + case ICE_MDD_SRC_TX_PQM: + return "tx_pqm"; + case ICE_MDD_SRC_TX_TCLAN: + return "tx_tclan"; + case ICE_MDD_SRC_TX_TDPU: + return "tx_tdpu"; + case ICE_MDD_SRC_RX: + return "rx"; + default: + return "invalid"; + } +} + +static int +ice_mdd_reporter_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx, + struct netlink_ext_ack *extack) +{ + struct ice_mdd_event *mdd_event = priv_ctx; + const char *src; + + if (!mdd_event) + return 0; + + src = ice_mdd_src_to_str(mdd_event->src); + + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_put(fmsg, "src", src); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, pf_num); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, vf_num); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, event); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, queue); + devlink_fmsg_obj_nest_end(fmsg); + + return 0; +} + +/** + * ice_report_mdd_event - Report an MDD event through devlink health + * @pf: the PF device structure + * @src: the HW block that was the source of this MDD event + * @pf_num: the pf_num on which the MDD event occurred + * @vf_num: the vf_num on which the MDD event occurred + * @event: the event type of the MDD event + * @queue: the queue on which the MDD event occurred + * + * Report an MDD event that has occurred on this PF. + */ +void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num, + u16 vf_num, u8 event, u16 queue) +{ + struct ice_mdd_event ev = { + .src = src, + .pf_num = pf_num, + .vf_num = vf_num, + .event = event, + .queue = queue, + }; + + ice_devlink_health_report(pf->health_reporters.mdd, "MDD event", &ev); +} + +/** + * ice_fmsg_put_ptr - put hex value of pointer into fmsg + * + * @fmsg: devlink fmsg under construction + * @name: name to pass + * @ptr: 64 bit value to print as hex and put into fmsg + */ +static void ice_fmsg_put_ptr(struct devlink_fmsg *fmsg, const char *name, + void *ptr) +{ + char buf[sizeof(ptr) * 3]; + + sprintf(buf, "%p", ptr); + devlink_fmsg_put(fmsg, name, buf); +} + +struct ice_tx_hang_event { + u32 head; + u32 intr; + u16 vsi_num; + u16 queue; + u16 next_to_clean; + u16 next_to_use; + struct ice_tx_ring *tx_ring; +}; + +static int ice_tx_hang_reporter_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx, + struct netlink_ext_ack *extack) +{ + struct ice_tx_hang_event *event = priv_ctx; + struct sk_buff *skb; + + if (!event) + return 0; + + skb = event->tx_ring->tx_buf->skb; + devlink_fmsg_obj_nest_start(fmsg); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, head); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, intr); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, vsi_num); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, queue); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_clean); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_use); + devlink_fmsg_put(fmsg, "irq-mapping", event->tx_ring->q_vector->name); + ice_fmsg_put_ptr(fmsg, "desc-ptr", event->tx_ring->desc); + ice_fmsg_put_ptr(fmsg, "dma-ptr", (void *)(long)event->tx_ring->dma); + ice_fmsg_put_ptr(fmsg, "skb-ptr", skb); + devlink_fmsg_binary_pair_put(fmsg, "desc", event->tx_ring->desc, + event->tx_ring->count * sizeof(struct ice_tx_desc)); + devlink_fmsg_dump_skb(fmsg, skb); + devlink_fmsg_obj_nest_end(fmsg); + + return 0; +} + +void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring, + u16 vsi_num, u32 head, u32 intr) +{ + struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf; + + buf->tx_ring = tx_ring; + buf->vsi_num = vsi_num; + buf->head = head; + buf->intr = intr; +} + +void ice_report_tx_hang(struct ice_pf *pf) +{ + struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf; + struct ice_tx_ring *tx_ring = buf->tx_ring; + + struct ice_tx_hang_event ev = { + .head = buf->head, + .intr = buf->intr, + .vsi_num = buf->vsi_num, + .queue = tx_ring->q_index, + .next_to_clean = tx_ring->next_to_clean, + .next_to_use = tx_ring->next_to_use, + .tx_ring = tx_ring, + }; + + ice_devlink_health_report(pf->health_reporters.tx_hang, "Tx hang", &ev); +} + +static struct devlink_health_reporter * +ice_init_devlink_rep(struct ice_pf *pf, + const struct devlink_health_reporter_ops *ops) +{ + struct devlink *devlink = priv_to_devlink(pf); + struct devlink_health_reporter *rep; + const u64 graceful_period = 0; + + rep = devl_health_reporter_create(devlink, ops, graceful_period, pf); + if (IS_ERR(rep)) { + struct device *dev = ice_pf_to_dev(pf); + + dev_err(dev, "failed to create devlink %s health report er", + ops->name); + return NULL; + } + return rep; +} + +#define ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field) \ + ._field = ice_##_name##_reporter_##_field, + +#define ICE_DEFINE_HEALTH_REPORTER_OPS_1(_name, _field1) \ + static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \ + .name = #_name, \ + ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \ + } + +#define ICE_DEFINE_HEALTH_REPORTER_OPS_2(_name, _field1, _field2) \ + static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \ + .name = #_name, \ + ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \ + ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field2) \ + } + +ICE_DEFINE_HEALTH_REPORTER_OPS_1(mdd, dump); +ICE_DEFINE_HEALTH_REPORTER_OPS_1(tx_hang, dump); +ICE_DEFINE_HEALTH_REPORTER_OPS_2(fw, dump, diagnose); +ICE_DEFINE_HEALTH_REPORTER_OPS_2(port, dump, diagnose); + +/** + * ice_health_init - allocate and init all ice devlink health reporters and + * accompanied data + * + * @pf: PF struct + */ +void ice_health_init(struct ice_pf *pf) +{ + struct ice_health *reps = &pf->health_reporters; + + reps->mdd = ice_init_devlink_rep(pf, &ice_mdd_reporter_ops); + reps->tx_hang = ice_init_devlink_rep(pf, &ice_tx_hang_reporter_ops); + + if (ice_is_fw_health_report_supported(&pf->hw)) { + reps->fw = ice_init_devlink_rep(pf, &ice_fw_reporter_ops); + reps->port = ice_init_devlink_rep(pf, &ice_port_reporter_ops); + ice_config_health_events(pf, true); + } +} + +/** + * ice_deinit_devl_reporter - destroy given devlink health reporter + * @reporter: reporter to destroy + */ +static void ice_deinit_devl_reporter(struct devlink_health_reporter *reporter) +{ + if (reporter) + devl_health_reporter_destroy(reporter); +} + +/** + * ice_health_deinit - deallocate all ice devlink health reporters and + * accompanied data + * + * @pf: PF struct + */ +void ice_health_deinit(struct ice_pf *pf) +{ + ice_deinit_devl_reporter(pf->health_reporters.mdd); + ice_deinit_devl_reporter(pf->health_reporters.tx_hang); + if (ice_is_fw_health_report_supported(&pf->hw)) { + ice_deinit_devl_reporter(pf->health_reporters.fw); + ice_deinit_devl_reporter(pf->health_reporters.port); + ice_config_health_events(pf, false); + } +} + +static +void ice_health_assign_healthy_state(struct devlink_health_reporter *reporter) +{ + if (reporter) + devlink_health_reporter_state_update(reporter, + DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); +} + +/** + * ice_health_clear - clear devlink health issues after a reset + * @pf: the PF device structure + * + * Mark the PF in healthy state again after a reset has completed. + */ +void ice_health_clear(struct ice_pf *pf) +{ + ice_health_assign_healthy_state(pf->health_reporters.mdd); + ice_health_assign_healthy_state(pf->health_reporters.tx_hang); +} diff --git a/drivers/net/ethernet/intel/ice/devlink/health.h b/drivers/net/ethernet/intel/ice/devlink/health.h new file mode 100644 index 000000000000..5edfc4d2adce --- /dev/null +++ b/drivers/net/ethernet/intel/ice/devlink/health.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2024, Intel Corporation. */ + +#ifndef _HEALTH_H_ +#define _HEALTH_H_ + +#include <linux/types.h> + +/** + * DOC: health.h + * + * This header file stores everything that is needed for broadly understood + * devlink health mechanism for ice driver. + */ + +struct ice_aqc_health_status_elem; +struct ice_pf; +struct ice_tx_ring; +struct ice_rq_event_info; + +enum ice_mdd_src { + ICE_MDD_SRC_TX_PQM, + ICE_MDD_SRC_TX_TCLAN, + ICE_MDD_SRC_TX_TDPU, + ICE_MDD_SRC_RX, +}; + +/** + * struct ice_health - stores ice devlink health reporters and accompanied data + * @fw: devlink health reporter for FW Health Status events + * @mdd: devlink health reporter for MDD detection event + * @port: devlink health reporter for Port Health Status events + * @tx_hang: devlink health reporter for tx_hang event + * @tx_hang_buf: pre-allocated place to put info for Tx hang reporter from + * non-sleeping context + * @tx_ring: ring that the hang occurred on + * @head: descriptor head + * @intr: interrupt register value + * @vsi_num: VSI owning the queue that the hang occurred on + * @fw_status: buffer for last received FW Status event + * @port_status: buffer for last received Port Status event + */ +struct ice_health { + struct devlink_health_reporter *fw; + struct devlink_health_reporter *mdd; + struct devlink_health_reporter *port; + struct devlink_health_reporter *tx_hang; + struct_group_tagged(ice_health_tx_hang_buf, tx_hang_buf, + struct ice_tx_ring *tx_ring; + u32 head; + u32 intr; + u16 vsi_num; + ); + struct ice_aqc_health_status_elem fw_status; + struct ice_aqc_health_status_elem port_status; +}; + +void ice_process_health_status_event(struct ice_pf *pf, + struct ice_rq_event_info *event); + +void ice_health_init(struct ice_pf *pf); +void ice_health_deinit(struct ice_pf *pf); +void ice_health_clear(struct ice_pf *pf); + +void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring, + u16 vsi_num, u32 head, u32 intr); +void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num, + u16 vf_num, u8 event, u16 queue); +void ice_report_tx_hang(struct ice_pf *pf); + +#endif /* _HEALTH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/port.c index c6779d9dffff..767419a67fef 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c +++ b/drivers/net/ethernet/intel/ice/devlink/port.c @@ -5,7 +5,7 @@ #include "ice.h" #include "devlink.h" -#include "devlink_port.h" +#include "port.h" #include "ice_lib.h" #include "ice_fltr.h" diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/port.h index d60efc340945..d60efc340945 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h +++ b/drivers/net/ethernet/intel/ice/devlink/port.h diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 2f5d6f974185..71e05d30f0fd 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -78,6 +78,7 @@ #include "ice_irq.h" #include "ice_dpll.h" #include "ice_adapter.h" +#include "devlink/health.h" #define ICE_BAR0 0 #define ICE_REQ_DESC_MULTIPLE 32 @@ -665,6 +666,7 @@ struct ice_pf { struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; struct ice_dplls dplls; struct device *hwmon_dev; + struct ice_health health_reporters; u8 num_quanta_prof_used; }; diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 46f9726d9a8a..bdee499f991a 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -12,6 +12,13 @@ #define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9 #define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728 +#define ICE_RXQ_CTX_SIZE_DWORDS 8 +#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) +#define ICE_TXQ_CTX_SZ 22 + +typedef struct __packed { u8 buf[ICE_RXQ_CTX_SZ]; } ice_rxq_ctx_buf_t; +typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t; + struct ice_aqc_generic { __le32 param0; __le32 param1; @@ -1491,7 +1498,6 @@ struct ice_aqc_dnl_equa_param { #define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT) #define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT) #define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT) -#define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT) #define ICE_AQC_RX_EQU_CTLE_GAINHF (0x20 << ICE_AQC_RX_EQU_SHIFT) #define ICE_AQC_RX_EQU_CTLE_GAINLF (0x21 << ICE_AQC_RX_EQU_SHIFT) #define ICE_AQC_RX_EQU_CTLE_GAINDC (0x22 << ICE_AQC_RX_EQU_SHIFT) @@ -1808,6 +1814,7 @@ struct ice_aqc_nvm_pass_comp_tbl { #define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED 0x0 #define ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1 #define ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2 +#define ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK 0x3 u8 component_response_code; /* Response only */ #define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0 #define ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1 @@ -2085,10 +2092,10 @@ struct ice_aqc_add_txqs_perq { __le16 txq_id; u8 rsvd[2]; __le32 q_teid; - u8 txq_ctx[22]; + ice_txq_ctx_buf_t txq_ctx; u8 rsvd2[2]; struct ice_aqc_txsched_elem info; -}; +} __packed; /* The format of the command buffer for Add Tx LAN Queues (0x0C30) * is an array of the following structs. Please note that the length of @@ -2511,6 +2518,87 @@ enum ice_aqc_fw_logging_mod { ICE_AQC_FW_LOG_ID_MAX, }; +enum ice_aqc_health_status_mask { + ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK = BIT(0), + ICE_AQC_HEALTH_STATUS_SET_ALL_PF_MASK = BIT(1), + ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK = BIT(2), +}; + +/* Set Health Status (direct 0xFF20) */ +struct ice_aqc_set_health_status_cfg { + u8 event_source; + u8 reserved[15]; +}; + +enum ice_aqc_health_status { + ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT = 0x101, + ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE = 0x102, + ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL = 0x103, + ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM = 0x104, + ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT = 0x105, + ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT = 0x106, + ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED = 0x107, + ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT = 0x108, + ICE_AQC_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE = 0x109, + ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG = 0x10B, + ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS = 0x10C, + ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE = 0x10D, + ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED = 0x10F, + ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT = 0x110, + ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED = 0x111, + ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO = 0x112, + ICE_AQC_HEALTH_STATUS_ERR_NETLIST = 0x113, + ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT = 0x114, + ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS = 0x115, + ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME = 0x116, + ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT = 0x117, + ICE_AQC_HEALTH_STATUS_ERR_PHY_NVM_PROG = 0x120, + ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD = 0x121, + ICE_AQC_HEALTH_STATUS_INFO_RECOVERY = 0x500, + ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS = 0x501, + ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH = 0x502, + ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH = 0x503, + ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH = 0x504, + ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT = 0x505, + ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT = 0x506, + ICE_AQC_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION = 0x507, + ICE_AQC_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION = 0x508, + ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB = 0x509, + ICE_AQC_HEALTH_STATUS_ERR_MNG_TIMEOUT = 0x50A, + ICE_AQC_HEALTH_STATUS_ERR_BMC_RESET = 0x50B, + ICE_AQC_HEALTH_STATUS_ERR_LAST_MNG_FAIL = 0x50C, + ICE_AQC_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL = 0x50D, + ICE_AQC_HEALTH_STATUS_ERR_FW_LOOP = 0x1000, + ICE_AQC_HEALTH_STATUS_ERR_FW_PFR_FAIL = 0x1001, + ICE_AQC_HEALTH_STATUS_ERR_LAST_FAIL_AQ = 0x1002, +}; + +/* Get Health Status (indirect 0xFF22) */ +struct ice_aqc_get_health_status { + __le16 health_status_count; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +enum ice_aqc_health_status_scope { + ICE_AQC_HEALTH_STATUS_PF = 0x1, + ICE_AQC_HEALTH_STATUS_PORT = 0x2, + ICE_AQC_HEALTH_STATUS_GLOBAL = 0x3, +}; + +#define ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA 0xDEADBEEF + +/* Get Health Status event buffer entry (0xFF22), + * repeated per reported health status. + */ +struct ice_aqc_health_status_elem { + __le16 health_status_code; + __le16 event_source; + __le32 internal_data1; + __le32 internal_data2; +}; + /* Set FW Logging configuration (indirect 0xFF30) * Register for FW Logging (indirect 0xFF31) * Query FW Logging (indirect 0xFF32) @@ -2651,6 +2739,8 @@ struct ice_aq_desc { struct ice_aqc_get_link_status get_link_status; struct ice_aqc_event_lan_overflow lan_overflow; struct ice_aqc_get_link_topo get_link_topo; + struct ice_aqc_set_health_status_cfg set_health_status_cfg; + struct ice_aqc_get_health_status get_health_status; struct ice_aqc_dnl_call_command dnl_call; struct ice_aqc_i2c read_write_i2c; struct ice_aqc_read_i2c_resp read_i2c_resp; @@ -2853,6 +2943,10 @@ enum ice_adminq_opc { /* Standalone Commands/Events */ ice_aqc_opc_event_lan_overflow = 0x1001, + /* System Diagnostic commands */ + ice_aqc_opc_set_health_status_cfg = 0xFF20, + ice_aqc_opc_get_health_status = 0xFF22, + /* FW Logging Commands */ ice_aqc_opc_fw_logs_config = 0xFF30, ice_aqc_opc_fw_logs_register = 0xFF31, diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index 7cee365cc7d1..405ddd17de1b 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -511,7 +511,7 @@ void ice_init_arfs(struct ice_vsi *vsi) struct hlist_head *arfs_fltr_list; unsigned int i; - if (!vsi || vsi->type != ICE_VSI_PF) + if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi)) return; arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list), diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 82a9cd4ec7ae..b2af8e3586f7 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -454,6 +454,9 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) /* Rx queue threshold in units of 64 */ rlan_ctx.lrxqthresh = 1; + /* Enable descriptor prefetch */ + rlan_ctx.prefena = 1; + /* PF acts as uplink for switchdev; set flex descriptor with src_vsi * metadata and flags to allow redirecting to PR netdev */ @@ -910,8 +913,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); /* copy context contents into the qg_buf */ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); - ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, - ice_tlan_ctx_info); + ice_pack_txq_ctx(&tlan_ctx, &qg_buf->txqs[0].txq_ctx); /* init queue specific tail reg. It is referred as * transmit comm scheduler queue doorbell. diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 532024f34ce4..7a2a2e8da8fa 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -6,6 +6,7 @@ #include "ice_adminq_cmd.h" #include "ice_flow.h" #include "ice_ptp_hw.h" +#include <linux/packing.h> #define ICE_PF_RESET_WAIT_COUNT 300 #define ICE_MAX_NETLIST_SIZE 10 @@ -308,6 +309,42 @@ bool ice_is_e825c(struct ice_hw *hw) } /** + * ice_is_pf_c827 - check if pf contains c827 phy + * @hw: pointer to the hw struct + * + * Return: true if the device has c827 phy. + */ +static bool ice_is_pf_c827(struct ice_hw *hw) +{ + struct ice_aqc_get_link_topo cmd = {}; + u8 node_part_number; + u16 node_handle; + int status; + + if (hw->mac_type != ICE_MAC_E810) + return false; + + if (hw->device_id != ICE_DEV_ID_E810C_QSFP) + return true; + + cmd.addr.topo_params.node_type_ctx = + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); + cmd.addr.topo_params.index = 0; + + status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, + &node_handle); + + if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) + return false; + + if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) + return true; + + return false; +} + +/** * ice_clear_pf_cfg - Clear PF configuration * @hw: pointer to the hardware structure * @@ -1025,6 +1062,33 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw) } /** + * ice_wait_for_fw - wait for full FW readiness + * @hw: pointer to the hardware structure + * @timeout: milliseconds that can elapse before timing out + * + * Return: 0 on success, -ETIMEDOUT on timeout. + */ +static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout) +{ + int fw_loading; + u32 elapsed = 0; + + while (elapsed <= timeout) { + fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M; + + /* firmware was not yet loaded, we have to wait more */ + if (fw_loading) { + elapsed += 100; + msleep(100); + continue; + } + return 0; + } + + return -ETIMEDOUT; +} + +/** * ice_init_hw - main hardware initialization routine * @hw: pointer to the hardware structure */ @@ -1173,8 +1237,19 @@ int ice_init_hw(struct ice_hw *hw) mutex_init(&hw->tnl_lock); ice_init_chk_recipe_reuse_support(hw); - return 0; + /* Some cards require longer initialization times + * due to necessity of loading FW from an external source. + * This can take even half a minute. + */ + if (ice_is_pf_c827(hw)) { + status = ice_wait_for_fw(hw, 30000); + if (status) { + dev_err(ice_hw_to_dev(hw), "ice_wait_for_fw timed out"); + goto err_unroll_fltr_mgmt_struct; + } + } + return 0; err_unroll_fltr_mgmt_struct: ice_cleanup_fltr_mgmt_struct(hw); err_unroll_sched: @@ -1360,39 +1435,31 @@ int ice_reset(struct ice_hw *hw, enum ice_reset_req req) } /** - * ice_copy_rxq_ctx_to_hw + * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers * @hw: pointer to the hardware structure - * @ice_rxq_ctx: pointer to the rxq context + * @rxq_ctx: pointer to the packed Rx queue context * @rxq_index: the index of the Rx queue - * - * Copies rxq context from dense structure to HW register space */ -static int -ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) +static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, + const ice_rxq_ctx_buf_t *rxq_ctx, + u32 rxq_index) { - u8 i; - - if (!ice_rxq_ctx) - return -EINVAL; - - if (rxq_index > QRX_CTRL_MAX_INDEX) - return -EINVAL; - /* Copy each dword separately to HW */ - for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { - wr32(hw, QRX_CONTEXT(i, rxq_index), - *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); + for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { + u32 ctx = ((const u32 *)rxq_ctx)[i]; - ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, - *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); - } + wr32(hw, QRX_CONTEXT(i, rxq_index), ctx); - return 0; + ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx); + } } +#define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \ + PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field) + /* LAN Rx Queue Context */ -static const struct ice_ctx_ele ice_rlan_ctx_info[] = { - /* Field Width LSB */ +static const struct packed_field_u8 ice_rlan_ctx_fields[] = { + /* Field Width LSB */ ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), @@ -1413,35 +1480,50 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), - { 0 } }; /** - * ice_write_rxq_ctx + * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer + * @ctx: the Rx queue context to pack + * @buf: the HW buffer to pack into + * + * Pack the Rx queue context from the CPU-friendly unpacked buffer into its + * bit-packed HW layout. + */ +static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx, + ice_rxq_ctx_buf_t *buf) +{ + pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields, + QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); +} + +/** + * ice_write_rxq_ctx - Write Rx Queue context to hardware * @hw: pointer to the hardware structure - * @rlan_ctx: pointer to the rxq context + * @rlan_ctx: pointer to the unpacked Rx queue context * @rxq_index: the index of the Rx queue * - * Converts rxq context from sparse to dense structure and then writes - * it to HW register space and enables the hardware to prefetch descriptors - * instead of only fetching them on demand + * Pack the sparse Rx Queue context into dense hardware format and write it + * into the HW register space. + * + * Return: 0 on success, or -EINVAL if the Rx queue index is invalid. */ int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index) { - u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; + ice_rxq_ctx_buf_t buf = {}; - if (!rlan_ctx) + if (rxq_index > QRX_CTRL_MAX_INDEX) return -EINVAL; - rlan_ctx->prefena = 1; + ice_pack_rxq_ctx(rlan_ctx, &buf); + ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index); - ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); - return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); + return 0; } /* LAN Tx Queue Context */ -const struct ice_ctx_ele ice_tlan_ctx_info[] = { +static const struct packed_field_u8 ice_tlan_ctx_fields[] = { /* Field Width LSB */ ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), @@ -1470,10 +1552,22 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), - ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), - { 0 } }; +/** + * ice_pack_txq_ctx - Pack Tx queue context into a HW buffer + * @ctx: the Tx queue context to pack + * @buf: the HW buffer to pack into + * + * Pack the Tx queue context from the CPU-friendly unpacked buffer into its + * bit-packed HW layout. + */ +void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf) +{ + pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields, + QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); +} + /* Sideband Queue command wrappers */ /** @@ -2547,6 +2641,7 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0); + info->ll_phy_tmr_update = ((number & ICE_TS_LL_PHY_TMR_UPDATE_M) != 0); info->ena_ports = logical_id; info->tmr_own_map = phys_id; @@ -2569,6 +2664,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, info->ts_ll_read); ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n", info->ts_ll_int_read); + ice_debug(hw, ICE_DBG_INIT, "dev caps: ll_phy_tmr_update = %u\n", + info->ll_phy_tmr_update); ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", info->ena_ports); ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", @@ -2709,40 +2806,6 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, } /** - * ice_is_pf_c827 - check if pf contains c827 phy - * @hw: pointer to the hw struct - */ -bool ice_is_pf_c827(struct ice_hw *hw) -{ - struct ice_aqc_get_link_topo cmd = {}; - u8 node_part_number; - u16 node_handle; - int status; - - if (hw->mac_type != ICE_MAC_E810) - return false; - - if (hw->device_id != ICE_DEV_ID_E810C_QSFP) - return true; - - cmd.addr.topo_params.node_type_ctx = - FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | - FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); - cmd.addr.topo_params.index = 0; - - status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, - &node_handle); - - if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) - return false; - - if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) - return true; - - return false; -} - -/** * ice_is_phy_rclk_in_netlist * @hw: pointer to the hw struct * @@ -4609,205 +4672,6 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, /* End of FW Admin Queue command wrappers */ /** - * ice_pack_ctx_byte - write a byte to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u8 src_byte, dest_byte, mask; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); - - src_byte = *from; - src_byte <<= shift_width; - src_byte &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_byte, dest, sizeof(dest_byte)); - - dest_byte &= ~mask; /* get the bits not changing */ - dest_byte |= src_byte; /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_byte, sizeof(dest_byte)); -} - -/** - * ice_pack_ctx_word - write a word to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u16 src_word, mask; - __le16 dest_word; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); - - /* don't swizzle the bits until after the mask because the mask bits - * will be in a different bit position on big endian machines - */ - src_word = *(u16 *)from; - src_word <<= shift_width; - src_word &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_word, dest, sizeof(dest_word)); - - dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ - dest_word |= cpu_to_le16(src_word); /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_word, sizeof(dest_word)); -} - -/** - * ice_pack_ctx_dword - write a dword to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u32 src_dword, mask; - __le32 dest_dword; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); - - /* don't swizzle the bits until after the mask because the mask bits - * will be in a different bit position on big endian machines - */ - src_dword = *(u32 *)from; - src_dword <<= shift_width; - src_dword &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_dword, dest, sizeof(dest_dword)); - - dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ - dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_dword, sizeof(dest_dword)); -} - -/** - * ice_pack_ctx_qword - write a qword to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u64 src_qword, mask; - __le64 dest_qword; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width); - - /* don't swizzle the bits until after the mask because the mask bits - * will be in a different bit position on big endian machines - */ - src_qword = *(u64 *)from; - src_qword <<= shift_width; - src_qword &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_qword, dest, sizeof(dest_qword)); - - dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ - dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_qword, sizeof(dest_qword)); -} - -/** - * ice_set_ctx - set context bits in packed structure - * @hw: pointer to the hardware structure - * @src_ctx: pointer to a generic non-packed context structure - * @dest_ctx: pointer to memory for the packed structure - * @ce_info: List of Rx context elements - */ -int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - int f; - - for (f = 0; ce_info[f].width; f++) { - /* We have to deal with each element of the FW response - * using the correct size so that we are correct regardless - * of the endianness of the machine. - */ - if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { - ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", - f, ce_info[f].width, ce_info[f].size_of); - continue; - } - switch (ce_info[f].size_of) { - case sizeof(u8): - ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]); - break; - case sizeof(u16): - ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]); - break; - case sizeof(u32): - ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]); - break; - case sizeof(u64): - ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]); - break; - default: - return -EINVAL; - } - } - - return 0; -} - -/** * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC * @hw: pointer to the HW struct * @vsi_handle: software VSI handle @@ -6083,6 +5947,44 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) } /** + * ice_is_fw_health_report_supported - checks if firmware supports health events + * @hw: pointer to the hardware structure + * + * Return: true if firmware supports health status reports, + * false otherwise + */ +bool ice_is_fw_health_report_supported(struct ice_hw *hw) +{ + return ice_is_fw_api_min_ver(hw, ICE_FW_API_HEALTH_REPORT_MAJ, + ICE_FW_API_HEALTH_REPORT_MIN, + ICE_FW_API_HEALTH_REPORT_PATCH); +} + +/** + * ice_aq_set_health_status_cfg - Configure FW health events + * @hw: pointer to the HW struct + * @event_source: type of diagnostic events to enable + * + * Configure the health status event types that the firmware will send to this + * PF. The supported event types are: PF-specific, all PFs, and global. + * + * Return: 0 on success, negative error code otherwise. + */ +int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source) +{ + struct ice_aqc_set_health_status_cfg *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.set_health_status_cfg; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_health_status_cfg); + + cmd->event_source = event_source; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** * ice_aq_set_lldp_mib - Set the LLDP MIB * @hw: pointer to the HW struct * @mib_type: Local, Remote or both Local and Remote MIBs diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index fe6f88cfd948..15ba38543738 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -92,9 +92,8 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); -extern const struct ice_ctx_ele ice_tlan_ctx_info[]; -int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info); + +void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf); extern struct mutex ice_global_cfg_lock_sw; @@ -113,7 +112,6 @@ int ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *caps, struct ice_sq_cd *cd); -bool ice_is_pf_c827(struct ice_hw *hw); bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw); bool ice_is_clock_mux_in_netlist(struct ice_hw *hw); bool ice_is_cgu_in_netlist(struct ice_hw *hw); @@ -143,6 +141,8 @@ int ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi); bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps); +bool ice_is_fw_health_report_supported(struct ice_hw *hw); +int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source); int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code, u8 serdes_num, int *output); int diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c index 38e151c7ea23..8d806d8ad761 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.c +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -2053,7 +2053,7 @@ static int ice_dpll_init_worker(struct ice_pf *pf) struct kthread_worker *kworker; kthread_init_delayed_work(&d->work, ice_dpll_periodic_work); - kworker = kthread_create_worker(0, "ice-dplls-%s", + kworker = kthread_run_worker(0, "ice-dplls-%s", dev_name(ice_pf_to_dev(pf))); if (IS_ERR(kworker)) return PTR_ERR(kworker); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index fb527434b58b..ed21d7f55ac1 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -38,8 +38,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) if (ice_vsi_add_vlan_zero(uplink_vsi)) goto err_vlan_zero; - if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true, - ICE_FLTR_RX)) + if (ice_set_dflt_vsi(uplink_vsi)) goto err_def_rx; if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true, @@ -50,9 +49,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) if (vlan_ops->dis_rx_filtering(uplink_vsi)) goto err_vlan_filtering; - if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) - goto err_override_uplink; - if (ice_vsi_update_local_lb(uplink_vsi, true)) goto err_override_local_lb; @@ -64,8 +60,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) err_up: ice_vsi_update_local_lb(uplink_vsi, false); err_override_local_lb: - ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); -err_override_uplink: vlan_ops->ena_rx_filtering(uplink_vsi); err_vlan_filtering: ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, @@ -276,7 +270,6 @@ static void ice_eswitch_release_env(struct ice_pf *pf) vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); ice_vsi_update_local_lb(uplink_vsi, false); - ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); vlan_ops->ena_rx_filtering(uplink_vsi); ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, ICE_FLTR_TX); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h index ac7db100e2cd..5c7dcf21b222 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -5,7 +5,7 @@ #define _ICE_ESWITCH_H_ #include <net/devlink.h> -#include "devlink/devlink_port.h" +#include "devlink/port.h" #ifdef CONFIG_ICE_SWITCHDEV void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 3072634bf049..f241493a6ac8 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -710,7 +710,6 @@ static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num, { ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 }, { ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf }, { ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf }, - { ICE_AQC_RX_EQU_DRATE, rx, &ptr->rx_equ_drate }, { ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf }, { ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf }, { ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc }, diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h index 8f2ad1c172c0..23b2cfbc9684 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.h +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h @@ -15,7 +15,6 @@ struct ice_serdes_equalization_to_ethtool { int rx_equ_post1; int rx_equ_bflf; int rx_equ_bfhf; - int rx_equ_drate; int rx_equ_ctle_gainhf; int rx_equ_ctle_gainlf; int rx_equ_ctle_gaindc; diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c index 2702a0da5c3e..70c201f569ce 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.c +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c @@ -6,6 +6,7 @@ #include <linux/crc32.h> #include <linux/pldmfw.h> #include "ice.h" +#include "ice_lib.h" #include "ice_fw_update.h" struct ice_fwu_priv { @@ -125,6 +126,10 @@ ice_check_component_response(struct ice_pf *pf, u16 id, u8 response, u8 code, case ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED: dev_info(dev, "firmware has rejected updating %s\n", component); break; + case ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK: + if (ice_is_recovery_mode(&pf->hw)) + return 0; + break; } switch (code) { @@ -1004,13 +1009,20 @@ int ice_devlink_flash_update(struct devlink *devlink, return -EOPNOTSUPP; } - if (!hw->dev_caps.common_cap.nvm_unified_update) { + if (!hw->dev_caps.common_cap.nvm_unified_update && !ice_is_recovery_mode(hw)) { NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update"); return -EOPNOTSUPP; } memset(&priv, 0, sizeof(priv)); + if (params->component && strcmp(params->component, "fw.mgmt") == 0) { + priv.context.mode = PLDMFW_UPDATE_MODE_SINGLE_COMPONENT; + priv.context.component_identifier = NVM_COMP_ID_NVM; + } else if (params->component) { + return -EOPNOTSUPP; + } + /* the E822 device needs a slightly different ops */ if (hw->mac_type == ICE_MAC_GENERIC) priv.context.ops = &ice_fwu_ops_e822; diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c index f02e8ca55375..b2148dbe49b2 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.c +++ b/drivers/net/ethernet/intel/ice/ice_gnss.c @@ -182,7 +182,7 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf) pf->gnss_serial = gnss; kthread_init_delayed_work(&gnss->read_work, ice_gnss_read); - kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev)); + kworker = kthread_run_worker(0, "ice-gnss-%s", dev_name(dev)); if (IS_ERR(kworker)) { kfree(gnss); return NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 1ccb572ce285..22371011c249 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -1001,6 +1001,28 @@ static void ice_lag_link(struct ice_lag *lag) } /** + * ice_lag_config_eswitch - configure eswitch to work with LAG + * @lag: lag info struct + * @netdev: active network interface device struct + * + * Updates all port representors in eswitch to use @netdev for Tx. + * + * Configures the netdev to keep dst metadata (also used in representor Tx). + * This is required for an uplink without switchdev mode configured. + */ +static void ice_lag_config_eswitch(struct ice_lag *lag, + struct net_device *netdev) +{ + struct ice_repr *repr; + unsigned long id; + + xa_for_each(&lag->pf->eswitch.reprs, id, repr) + repr->dst->u.port_info.lower_dev = netdev; + + netif_keep_dst(netdev); +} + +/** * ice_lag_unlink - handle unlink event * @lag: LAG info struct */ @@ -1021,6 +1043,9 @@ static void ice_lag_unlink(struct ice_lag *lag) ice_lag_move_vf_nodes(lag, act_port, pri_port); lag->primary = false; lag->active_port = ICE_LAG_INVALID_PORT; + + /* Config primary's eswitch back to normal operation. */ + ice_lag_config_eswitch(lag, lag->netdev); } else { struct ice_lag *primary_lag; @@ -1419,6 +1444,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr) ice_lag_move_vf_nodes(lag, prim_port, event_port); lag->active_port = event_port; + ice_lag_config_eswitch(lag, event_netdev); return; } @@ -1428,6 +1454,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr) /* new active port */ ice_lag_move_vf_nodes(lag, lag->active_port, event_port); lag->active_port = event_port; + ice_lag_config_eswitch(lag, event_netdev); } else { /* port not set as currently active (e.g. new active port * has already claimed the nodes and filters diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 611577ebc29d..1479b45738af 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -371,29 +371,21 @@ enum ice_rx_flex_desc_status_error_1_bits { ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */ }; -#define ICE_RXQ_CTX_SIZE_DWORDS 8 -#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) #define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22 #define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5 #define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800)) -/* RLAN Rx queue context data - * - * The sizes of the variables may be larger than needed due to crossing byte - * boundaries. If we do not have the width of the variable set to the correct - * size then we could end up shifting bits off the top of the variable when the - * variable is at the top of a byte and crosses over into the next byte. - */ +/* RLAN Rx queue context data */ struct ice_rlan_ctx { u16 head; - u16 cpuid; /* bigger than needed, see above for reason */ + u8 cpuid; #define ICE_RLAN_BASE_S 7 u64 base; u16 qlen; #define ICE_RLAN_CTX_DBUF_S 7 - u16 dbuf; /* bigger than needed, see above for reason */ + u8 dbuf; #define ICE_RLAN_CTX_HBUF_S 6 - u16 hbuf; /* bigger than needed, see above for reason */ + u8 hbuf; u8 dtype; u8 dsize; u8 crcstrip; @@ -401,29 +393,15 @@ struct ice_rlan_ctx { u8 hsplit_0; u8 hsplit_1; u8 showiv; - u32 rxmax; /* bigger than needed, see above for reason */ + u16 rxmax; u8 tphrdesc_ena; u8 tphwdesc_ena; u8 tphdata_ena; u8 tphhead_ena; - u16 lrxqthresh; /* bigger than needed, see above for reason */ + u8 lrxqthresh; u8 prefena; /* NOTE: normally must be set to 1 at init */ }; -struct ice_ctx_ele { - u16 offset; - u16 size_of; - u16 width; - u16 lsb; -}; - -#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \ - .offset = offsetof(struct _struct, _ele), \ - .size_of = sizeof_field(struct _struct, _ele), \ - .width = _width, \ - .lsb = _lsb, \ -} - /* for hsplit_0 field of Rx RLAN context */ enum ice_rlan_ctx_rx_hsplit_0 { ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0, @@ -551,18 +529,12 @@ enum ice_tx_ctx_desc_eipt_offload { #define ICE_LAN_TXQ_MAX_QGRPS 127 #define ICE_LAN_TXQ_MAX_QDIS 1023 -/* Tx queue context data - * - * The sizes of the variables may be larger than needed due to crossing byte - * boundaries. If we do not have the width of the variable set to the correct - * size then we could end up shifting bits off the top of the variable when the - * variable is at the top of a byte and crosses over into the next byte. - */ +/* Tx queue context data */ struct ice_tlan_ctx { #define ICE_TLAN_CTX_BASE_S 7 u64 base; /* base is defined in 128-byte units */ u8 port_num; - u16 cgd_num; /* bigger than needed, see above for reason */ + u8 cgd_num; u8 pf_num; u16 vmvf_num; u8 vmvf_type; @@ -573,7 +545,7 @@ struct ice_tlan_ctx { u8 tsyn_ena; u8 internal_usage_flag; u8 alt_vlan; - u16 cpuid; /* bigger than needed, see above for reason */ + u8 cpuid; u8 wb_mode; u8 tphrd_desc; u8 tphrd; @@ -582,7 +554,7 @@ struct ice_tlan_ctx { u16 qnum_in_func; u8 itr_notification_mode; u8 adjust_prof_id; - u32 qlen; /* bigger than needed, see above for reason */ + u16 qlen; u8 quanta_prof_idx; u8 tso_ena; u16 tso_qnum; @@ -590,7 +562,6 @@ struct ice_tlan_ctx { u8 drop_ena; u8 cache_prof_idx; u8 pkt_shaper_prof_idx; - u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */ }; #endif /* _ICE_LAN_TX_RX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index a7d45a8ce7ac..d0faa087793d 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1700,6 +1700,12 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf) return true; } +#define ICE_FW_MODE_REC_M BIT(1) +bool ice_is_recovery_mode(struct ice_hw *hw) +{ + return rd32(hw, GL_MNG_FWSM) & ICE_FW_MODE_REC_M; +} + /** * ice_update_eth_stats - Update VSI-specific ethernet statistics counters * @vsi: the VSI to be updated @@ -3931,24 +3937,6 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx) } /** - * ice_vsi_ctx_set_allow_override - allow destination override on VSI - * @ctx: pointer to VSI ctx structure - */ -void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx) -{ - ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; -} - -/** - * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI - * @ctx: pointer to VSI ctx structure - */ -void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx) -{ - ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; -} - -/** * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit * @vsi: pointer to VSI structure * @set: set or unset the bit diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 10d6fc479a32..b4c9cb28a016 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -90,6 +90,7 @@ void ice_set_q_vector_intrl(struct ice_q_vector *q_vector); bool ice_is_safe_mode(struct ice_pf *pf); bool ice_is_rdma_ena(struct ice_pf *pf); +bool ice_is_recovery_mode(struct ice_hw *hw); bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi); bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi); int ice_set_dflt_vsi(struct ice_vsi *vsi); @@ -104,10 +105,6 @@ ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)) void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx); void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx); - -void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx); - -void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx); int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set); int ice_vsi_add_vlan_zero(struct ice_vsi *vsi); int ice_vsi_del_vlan_zero(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 89fa3d53d317..e13bd5a6cb6c 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -14,7 +14,7 @@ #include "ice_dcb_lib.h" #include "ice_dcb_nl.h" #include "devlink/devlink.h" -#include "devlink/devlink_port.h" +#include "devlink/port.h" #include "ice_sf_eth.h" #include "ice_hwmon.h" /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the @@ -1567,6 +1567,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) case ice_aqc_opc_lldp_set_mib_change: ice_dcb_process_lldp_set_mib_change(pf, &event); break; + case ice_aqc_opc_get_health_status: + ice_process_health_status_event(pf, &event); + break; default: dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", qtype, opcode); @@ -1816,6 +1819,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); + ice_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, vf_num, + event, queue); wr32(hw, GL_MDET_TX_PQM, 0xffffffff); } @@ -1829,6 +1834,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); + ice_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, vf_num, + event, queue); wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX); } @@ -1842,6 +1849,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_rx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); + ice_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, vf_num, event, + queue); wr32(hw, GL_MDET_RX, 0xffffffff); } @@ -2355,6 +2364,18 @@ static void ice_check_media_subtask(struct ice_pf *pf) } } +static void ice_service_task_recovery_mode(struct work_struct *work) +{ + struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); + + set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); + ice_clean_adminq_subtask(pf); + + ice_service_task_complete(pf); + + mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100)); +} + /** * ice_service_task - manage and run subtasks * @work: pointer to work_struct contained by the PF struct @@ -2364,9 +2385,11 @@ static void ice_service_task(struct work_struct *work) struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); unsigned long start_time = jiffies; - /* subtasks */ + if (pf->health_reporters.tx_hang_buf.tx_ring) { + ice_report_tx_hang(pf); + pf->health_reporters.tx_hang_buf.tx_ring = NULL; + } - /* process reset requests first */ ice_reset_subtask(pf); /* bail if a reset/recovery cycle is pending or rebuild failed */ @@ -4741,55 +4764,12 @@ static void ice_decfg_netdev(struct ice_vsi *vsi) vsi->netdev = NULL; } -/** - * ice_wait_for_fw - wait for full FW readiness - * @hw: pointer to the hardware structure - * @timeout: milliseconds that can elapse before timing out - */ -static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout) -{ - int fw_loading; - u32 elapsed = 0; - - while (elapsed <= timeout) { - fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M; - - /* firmware was not yet loaded, we have to wait more */ - if (fw_loading) { - elapsed += 100; - msleep(100); - continue; - } - return 0; - } - - return -ETIMEDOUT; -} - int ice_init_dev(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; int err; - err = ice_init_hw(hw); - if (err) { - dev_err(dev, "ice_init_hw failed: %d\n", err); - return err; - } - - /* Some cards require longer initialization times - * due to necessity of loading FW from an external source. - * This can take even half a minute. - */ - if (ice_is_pf_c827(hw)) { - err = ice_wait_for_fw(hw, 30000); - if (err) { - dev_err(dev, "ice_wait_for_fw timed out"); - return err; - } - } - ice_init_feature_support(pf); err = ice_init_ddp_config(hw, pf); @@ -4810,7 +4790,7 @@ int ice_init_dev(struct ice_pf *pf) err = ice_init_pf(pf); if (err) { dev_err(dev, "ice_init_pf failed: %d\n", err); - goto err_init_pf; + return err; } pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; @@ -4834,7 +4814,7 @@ int ice_init_dev(struct ice_pf *pf) if (err) { dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); err = -EIO; - goto err_init_interrupt_scheme; + goto unroll_pf_init; } /* In case of MSIX we are going to setup the misc vector right here @@ -4845,17 +4825,15 @@ int ice_init_dev(struct ice_pf *pf) err = ice_req_irq_msix_misc(pf); if (err) { dev_err(dev, "setup of misc vector failed: %d\n", err); - goto err_req_irq_msix_misc; + goto unroll_irq_scheme_init; } return 0; -err_req_irq_msix_misc: +unroll_irq_scheme_init: ice_clear_interrupt_scheme(pf); -err_init_interrupt_scheme: +unroll_pf_init: ice_deinit_pf(pf); -err_init_pf: - ice_deinit_hw(hw); return err; } @@ -5088,12 +5066,14 @@ static int ice_init_devlink(struct ice_pf *pf) ice_devlink_init_regions(pf); ice_devlink_register(pf); + ice_health_init(pf); return 0; } static void ice_deinit_devlink(struct ice_pf *pf) { + ice_health_deinit(pf); ice_devlink_unregister(pf); ice_devlink_destroy_regions(pf); ice_devlink_unregister_params(pf); @@ -5249,6 +5229,36 @@ void ice_unload(struct ice_pf *pf) ice_decfg_netdev(vsi); } +static int ice_probe_recovery_mode(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + int err; + + dev_err(dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode\n"); + + INIT_HLIST_HEAD(&pf->aq_wait_list); + spin_lock_init(&pf->aq_wait_lock); + init_waitqueue_head(&pf->aq_wait_queue); + + timer_setup(&pf->serv_tmr, ice_service_timer, 0); + pf->serv_tmr_period = HZ; + INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode); + clear_bit(ICE_SERVICE_SCHED, pf->state); + err = ice_create_all_ctrlq(&pf->hw); + if (err) + return err; + + scoped_guard(devl, priv_to_devlink(pf)) { + err = ice_init_devlink(pf); + if (err) + return err; + } + + ice_service_task_restart(pf); + + return 0; +} + /** * ice_probe - Device initialization routine * @pdev: PCI device information struct @@ -5312,13 +5322,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) } pci_set_master(pdev); - - adapter = ice_adapter_get(pdev); - if (IS_ERR(adapter)) - return PTR_ERR(adapter); - pf->pdev = pdev; - pf->adapter = adapter; pci_set_drvdata(pdev, pf); set_bit(ICE_DOWN, pf->state); /* Disable service task until DOWN bit is cleared */ @@ -5346,29 +5350,47 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) hw->debug_mask = debug; #endif + if (ice_is_recovery_mode(hw)) + return ice_probe_recovery_mode(pf); + + err = ice_init_hw(hw); + if (err) { + dev_err(dev, "ice_init_hw failed: %d\n", err); + return err; + } + + adapter = ice_adapter_get(pdev); + if (IS_ERR(adapter)) { + err = PTR_ERR(adapter); + goto unroll_hw_init; + } + pf->adapter = adapter; + err = ice_init(pf); if (err) - goto err_init; + goto unroll_adapter; devl_lock(priv_to_devlink(pf)); err = ice_load(pf); if (err) - goto err_load; + goto unroll_init; err = ice_init_devlink(pf); if (err) - goto err_init_devlink; + goto unroll_load; devl_unlock(priv_to_devlink(pf)); return 0; -err_init_devlink: +unroll_load: ice_unload(pf); -err_load: +unroll_init: devl_unlock(priv_to_devlink(pf)); ice_deinit(pf); -err_init: +unroll_adapter: ice_adapter_put(pdev); +unroll_hw_init: + ice_deinit_hw(hw); return err; } @@ -5448,6 +5470,14 @@ static void ice_remove(struct pci_dev *pdev) msleep(100); } + if (ice_is_recovery_mode(&pf->hw)) { + ice_service_task_stop(pf); + scoped_guard(devl, priv_to_devlink(pf)) { + ice_deinit_devlink(pf); + } + return; + } + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { set_bit(ICE_VF_RESETS_DISABLED, pf->state); ice_free_vfs(pf); @@ -7793,6 +7823,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) /* if we get here, reset flow is successful */ clear_bit(ICE_RESET_FAILED, pf->state); + ice_health_clear(pf); + ice_plug_aux_dev(pf); if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) ice_lag_rebuild(pf); @@ -8283,16 +8315,18 @@ void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) if (tx_ring) { struct ice_hw *hw = &pf->hw; - u32 head, val = 0; + u32 head, intr = 0; head = FIELD_GET(QTX_COMM_HEAD_HEAD_M, rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue]))); /* Read interrupt register */ - val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); + intr = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", vsi->vsi_num, txqueue, tx_ring->next_to_clean, - head, tx_ring->next_to_use, val); + head, tx_ring->next_to_use, intr); + + ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr); } pf->tx_timeout_last_recovery = jiffies; diff --git a/drivers/net/ethernet/intel/ice/ice_parser.h b/drivers/net/ethernet/intel/ice/ice_parser.h index 6509d807627c..4f56d53d56b9 100644 --- a/drivers/net/ethernet/intel/ice/ice_parser.h +++ b/drivers/net/ethernet/intel/ice/ice_parser.h @@ -257,7 +257,6 @@ ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size, /*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/ #define ICE_BST_TCAM_TABLE_SIZE 256 #define ICE_BST_TCAM_KEY_SIZE 20 -#define ICE_BST_KEY_TCAM_SIZE 19 /* Boost TCAM item */ struct ice_bst_tcam_item { @@ -401,7 +400,6 @@ u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag); #define ICE_PARSER_GPR_NUM 128 #define ICE_PARSER_FLG_NUM 64 #define ICE_PARSER_ERR_NUM 16 -#define ICE_BST_KEY_SIZE 10 #define ICE_MARKER_ID_SIZE 9 #define ICE_MARKER_MAX_SIZE \ (ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1) @@ -431,13 +429,13 @@ struct ice_parser_rt { u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV]; u16 pkt_len; u16 po; - u8 bst_key[ICE_BST_KEY_SIZE]; + u8 bst_key[ICE_BST_TCAM_KEY_SIZE]; struct ice_pg_cam_key pg_key; + u8 pg_prio; struct ice_alu *alu0; struct ice_alu *alu1; struct ice_alu *alu2; struct ice_pg_cam_action *action; - u8 pg_prio; struct ice_gpr_pu pu; u8 markers[ICE_MARKER_ID_SIZE]; bool protocols[ICE_PO_PAIR_SIZE]; diff --git a/drivers/net/ethernet/intel/ice/ice_parser_rt.c b/drivers/net/ethernet/intel/ice/ice_parser_rt.c index dedf5e854e4b..3995d662e050 100644 --- a/drivers/net/ethernet/intel/ice/ice_parser_rt.c +++ b/drivers/net/ethernet/intel/ice/ice_parser_rt.c @@ -125,22 +125,20 @@ static void ice_bst_key_init(struct ice_parser_rt *rt, else key[idd] = imem->b_kb.prio; - idd = ICE_BST_KEY_TCAM_SIZE - 1; + idd = ICE_BST_TCAM_KEY_SIZE - 2; for (i = idd; i >= 0; i--) { int j; j = ho + idd - i; if (j < ICE_PARSER_MAX_PKT_LEN) - key[i] = rt->pkt_buf[ho + idd - i]; + key[i] = rt->pkt_buf[j]; else key[i] = 0; } - ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n"); - ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", - key[0], key[1], key[2], key[3], key[4], - key[5], key[6], key[7], key[8], key[9]); - ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n"); + ice_debug_array_w_prefix(rt->psr->hw, ICE_DBG_PARSER, + KBUILD_MODNAME ": Generated Boost TCAM Key", + key, ICE_BST_TCAM_KEY_SIZE); } static u16 ice_bit_rev_u16(u16 v, int len) diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index efd770dfec44..e26320ce52ca 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -16,28 +16,28 @@ static const char ice_pin_names[][64] = { }; static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = { - /* name, gpio */ - { TIME_SYNC, { 4, -1 }}, - { ONE_PPS, { -1, 5 }}, + /* name, gpio, delay */ + { TIME_SYNC, { 4, -1 }, { 0, 0 }}, + { ONE_PPS, { -1, 5 }, { 0, 11 }}, }; static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = { - /* name, gpio */ - { SDP0, { 0, 0 }}, - { SDP1, { 1, 1 }}, - { SDP2, { 2, 2 }}, - { SDP3, { 3, 3 }}, - { TIME_SYNC, { 4, -1 }}, - { ONE_PPS, { -1, 5 }}, + /* name, gpio, delay */ + { SDP0, { 0, 0 }, { 15, 14 }}, + { SDP1, { 1, 1 }, { 15, 14 }}, + { SDP2, { 2, 2 }, { 15, 14 }}, + { SDP3, { 3, 3 }, { 15, 14 }}, + { TIME_SYNC, { 4, -1 }, { 11, 0 }}, + { ONE_PPS, { -1, 5 }, { 0, 9 }}, }; static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = { - /* name, gpio */ - { SDP0, { 0, 0 }}, - { SDP1, { 1, 1 }}, - { SDP2, { 2, 2 }}, - { SDP3, { 3, 3 }}, - { ONE_PPS, { -1, 5 }}, + /* name, gpio, delay */ + { SDP0, { 0, 0 }, { 0, 1 }}, + { SDP1, { 1, 1 }, { 0, 1 }}, + { SDP2, { 2, 2 }, { 0, 1 }}, + { SDP3, { 3, 3 }, { 0, 1 }}, + { ONE_PPS, { -1, 5 }, { 0, 1 }}, }; static const char ice_pin_names_nvm[][64] = { @@ -49,12 +49,12 @@ static const char ice_pin_names_nvm[][64] = { }; static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = { - /* name, gpio */ - { GNSS, { 1, -1 }}, - { SMA1, { 1, 0 }}, - { UFL1, { -1, 0 }}, - { SMA2, { 3, 2 }}, - { UFL2, { 3, -1 }}, + /* name, gpio, delay */ + { GNSS, { 1, -1 }, { 0, 0 }}, + { SMA1, { 1, 0 }, { 0, 1 }}, + { UFL1, { -1, 0 }, { 0, 1 }}, + { SMA2, { 3, 2 }, { 0, 1 }}, + { UFL2, { 3, -1 }, { 0, 0 }}, }; static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf) @@ -464,7 +464,9 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) */ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) { + struct ice_e810_params *params; struct ice_ptp_port *ptp_port; + unsigned long flags; struct sk_buff *skb; struct ice_pf *pf; @@ -473,6 +475,7 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) ptp_port = container_of(tx, struct ice_ptp_port, tx); pf = ptp_port_to_pf(ptp_port); + params = &pf->hw.ptp.phy.e810; /* Drop packets which have waited for more than 2 seconds */ if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { @@ -489,11 +492,17 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); + spin_lock_irqsave(¶ms->atqbal_wq.lock, flags); + + params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS; + /* Write TS index to read to the PF register so the FW can read it */ - wr32(&pf->hw, PF_SB_ATQBAL, - TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) | - TS_LL_READ_TS); + wr32(&pf->hw, REG_LL_PROXY_H, + REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) | + REG_LL_PROXY_H_EXEC); tx->last_ll_ts_idx_read = idx; + + spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); } /** @@ -504,35 +513,52 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) { struct skb_shared_hwtstamps shhwtstamps = {}; u8 idx = tx->last_ll_ts_idx_read; + struct ice_e810_params *params; struct ice_ptp_port *ptp_port; u64 raw_tstamp, tstamp; bool drop_ts = false; struct sk_buff *skb; + unsigned long flags; + struct device *dev; struct ice_pf *pf; - u32 val; + u32 reg_ll_high; if (!tx->init || tx->last_ll_ts_idx_read < 0) return; ptp_port = container_of(tx, struct ice_ptp_port, tx); pf = ptp_port_to_pf(ptp_port); + dev = ice_pf_to_dev(pf); + params = &pf->hw.ptp.phy.e810; ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); - val = rd32(&pf->hw, PF_SB_ATQBAL); + spin_lock_irqsave(¶ms->atqbal_wq.lock, flags); + + if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS)) + dev_dbg(dev, "%s: low latency interrupt request not in progress?\n", + __func__); + + /* Read the low 32 bit value */ + raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L); + /* Read the status together with high TS part */ + reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H); + + /* Wake up threads waiting on low latency interface */ + params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS; + + wake_up_locked(¶ms->atqbal_wq); + + spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); /* When the bit is cleared, the TS is ready in the register */ - if (val & TS_LL_READ_TS) { + if (reg_ll_high & REG_LL_PROXY_H_EXEC) { dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready"); return; } /* High 8 bit value of the TS is on the bits 16:23 */ - raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val); - raw_tstamp <<= 32; - - /* Read the low 32 bit value */ - raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH); + raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32; /* Devices using this interface always verify the timestamp differs * relative to the last cached timestamp value. @@ -1558,18 +1584,29 @@ void ice_ptp_extts_event(struct ice_pf *pf) * Event is defined in GLTSYN_EVNT_0 register */ for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) { + int pin_desc_idx; + /* Check if channel is enabled */ - if (pf->ptp.ext_ts_irq & (1 << chan)) { - lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); - hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); - event.timestamp = (((u64)hi) << 32) | lo; - event.type = PTP_CLOCK_EXTTS; - event.index = chan; - - /* Fire event */ - ptp_clock_event(pf->ptp.clock, &event); - pf->ptp.ext_ts_irq &= ~(1 << chan); + if (!(pf->ptp.ext_ts_irq & (1 << chan))) + continue; + + lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); + hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); + event.timestamp = (u64)hi << 32 | lo; + + /* Add delay compensation */ + pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan); + if (pin_desc_idx >= 0) { + const struct ice_ptp_pin_desc *desc; + + desc = &pf->ptp.ice_pin_desc[pin_desc_idx]; + event.timestamp -= desc->delay[0]; } + + event.type = PTP_CLOCK_EXTTS; + event.index = chan; + pf->ptp.ext_ts_irq &= ~(1 << chan); + ptp_clock_event(pf->ptp.clock, &event); } } @@ -1764,9 +1801,9 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan, static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, int on) { + unsigned int gpio_pin, prop_delay_ns; u64 clk, period, start, phase; struct ice_hw *hw = &pf->hw; - unsigned int gpio_pin; int pin_desc_idx; if (rq->flags & ~PTP_PEROUT_PHASE) @@ -1777,6 +1814,7 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, return -EIO; gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1]; + prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1]; period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec; /* If we're disabling the output or period is 0, clear out CLKO and TGT @@ -1808,11 +1846,11 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, * at the next multiple of period, maintaining phase. */ clk = ice_ptp_read_src_clk_reg(pf, NULL); - if (rq->flags & PTP_PEROUT_PHASE || start <= clk - ice_prop_delay(hw)) + if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns) start = div64_u64(clk + period - 1, period) * period + phase; /* Compensate for propagation delay from the generator to the pin. */ - start -= ice_prop_delay(hw); + start -= prop_delay_ns; return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period); } @@ -3072,7 +3110,7 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) /* Allocate a kworker for handling work required for the ports * connected to the PTP hardware clock. */ - kworker = kthread_create_worker(0, "ice-ptp-%s", + kworker = kthread_run_worker(0, "ice-ptp-%s", dev_name(ice_pf_to_dev(pf))); if (IS_ERR(kworker)) return PTR_ERR(kworker); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index c490d98fd9c6..a1d0e988c084 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -211,6 +211,7 @@ enum ice_ptp_pin_nvm { * struct ice_ptp_pin_desc - hardware pin description data * @name_idx: index of the name of pin in ice_pin_names * @gpio: the associated GPIO input and output pins + * @delay: input and output signal delays in nanoseconds * * Structure describing a PTP-capable GPIO pin that extends ptp_pin_desc array * for the device. Device families have separate sets of available pins with @@ -219,6 +220,7 @@ enum ice_ptp_pin_nvm { struct ice_ptp_pin_desc { int name_idx; int gpio[2]; + unsigned int delay[2]; }; /** diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h index a8e57cf05a9c..ac46d1183300 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h @@ -341,8 +341,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 823437500, /* 823.4375 MHz PLL */ /* nominal_incval */ 0x136e44fabULL, - /* pps_delay */ - 11, }, /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */ @@ -351,8 +349,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 783360000, /* 783.36 MHz */ /* nominal_incval */ 0x146cc2177ULL, - /* pps_delay */ - 12, }, /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */ @@ -361,8 +357,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 796875000, /* 796.875 MHz */ /* nominal_incval */ 0x141414141ULL, - /* pps_delay */ - 12, }, /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */ @@ -371,8 +365,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 816000000, /* 816 MHz */ /* nominal_incval */ 0x139b9b9baULL, - /* pps_delay */ - 12, }, /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */ @@ -381,8 +373,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 830078125, /* 830.78125 MHz */ /* nominal_incval */ 0x134679aceULL, - /* pps_delay */ - 11, }, /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */ @@ -391,8 +381,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 783360000, /* 783.36 MHz */ /* nominal_incval */ 0x146cc2177ULL, - /* pps_delay */ - 12, }, }; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index 02e84f5b1d45..ec91822e9280 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -391,7 +391,7 @@ static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw, /* Log the current clock configuration */ ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", - dw24.ts_pll_enable ? "enabled" : "disabled", + str_enabled_disabled(dw24.ts_pll_enable), ice_clk_src_str(dw24.time_ref_sel), ice_clk_freq_str(dw9.time_ref_freq_sel), bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked"); @@ -469,7 +469,7 @@ static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw, /* Log the current clock configuration */ ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", - dw24.ts_pll_enable ? "enabled" : "disabled", + str_enabled_disabled(dw24.ts_pll_enable), ice_clk_src_str(dw24.time_ref_sel), ice_clk_freq_str(dw9.time_ref_freq_sel), bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked"); @@ -546,7 +546,7 @@ static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw, /* Log the current clock configuration */ ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", - dw24.ts_pll_enable ? "enabled" : "disabled", + str_enabled_disabled(dw24.ts_pll_enable), ice_clk_src_str(dw23.time_ref_sel), ice_clk_freq_str(dw9.time_ref_freq_sel), ro_lock.plllock_true_lock_cri ? "locked" : "unlocked"); @@ -651,7 +651,7 @@ static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw, /* Log the current clock configuration */ ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", - dw24.ts_pll_enable ? "enabled" : "disabled", + str_enabled_disabled(dw24.ts_pll_enable), ice_clk_src_str(dw23.time_ref_sel), ice_clk_freq_str(dw9.time_ref_freq_sel), ro_lock.plllock_true_lock_cri ? "locked" : "unlocked"); @@ -4876,33 +4876,46 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val) static int ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo) { + struct ice_e810_params *params = &hw->ptp.phy.e810; + unsigned long flags; u32 val; - u8 i; + int err; + + spin_lock_irqsave(¶ms->atqbal_wq.lock, flags); + + /* Wait for any pending in-progress low latency interrupt */ + err = wait_event_interruptible_locked_irq(params->atqbal_wq, + !(params->atqbal_flags & + ATQBAL_FLAGS_INTR_IN_PROGRESS)); + if (err) { + spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); + return err; + } /* Write TS index to read to the PF register so the FW can read it */ - val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS; - wr32(hw, PF_SB_ATQBAL, val); + val = FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) | REG_LL_PROXY_H_EXEC; + wr32(hw, REG_LL_PROXY_H, val); /* Read the register repeatedly until the FW provides us the TS */ - for (i = TS_LL_READ_RETRIES; i > 0; i--) { - val = rd32(hw, PF_SB_ATQBAL); + err = read_poll_timeout_atomic(rd32, val, + !FIELD_GET(REG_LL_PROXY_H_EXEC, val), 10, + REG_LL_PROXY_H_TIMEOUT_US, false, hw, + REG_LL_PROXY_H); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n"); + spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); + return err; + } - /* When the bit is cleared, the TS is ready in the register */ - if (!(FIELD_GET(TS_LL_READ_TS, val))) { - /* High 8 bit value of the TS is on the bits 16:23 */ - *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val); + /* High 8 bit value of the TS is on the bits 16:23 */ + *hi = FIELD_GET(REG_LL_PROXY_H_TS_HIGH, val); - /* Read the low 32 bit value and set the TS valid bit */ - *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID; - return 0; - } + /* Read the low 32 bit value and set the TS valid bit */ + *lo = rd32(hw, REG_LL_PROXY_L) | TS_VALID; - udelay(10); - } + spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); - /* FW failed to provide the TS in time */ - ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n"); - return -EINVAL; + return 0; } /** @@ -5085,6 +5098,55 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time) } /** + * ice_ptp_prep_phy_adj_ll_e810 - Prep PHY ports for a time adjustment + * @hw: pointer to HW struct + * @adj: adjustment value to program + * + * Use the low latency firmware interface to program PHY time adjustment to + * all PHY ports. + * + * Return: 0 on success, -EBUSY on timeout + */ +static int ice_ptp_prep_phy_adj_ll_e810(struct ice_hw *hw, s32 adj) +{ + const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + struct ice_e810_params *params = &hw->ptp.phy.e810; + u32 val; + int err; + + spin_lock_irq(¶ms->atqbal_wq.lock); + + /* Wait for any pending in-progress low latency interrupt */ + err = wait_event_interruptible_locked_irq(params->atqbal_wq, + !(params->atqbal_flags & + ATQBAL_FLAGS_INTR_IN_PROGRESS)); + if (err) { + spin_unlock_irq(¶ms->atqbal_wq.lock); + return err; + } + + wr32(hw, REG_LL_PROXY_L, adj); + val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_ADJ) | + FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC; + wr32(hw, REG_LL_PROXY_H, val); + + /* Read the register repeatedly until the FW indicates completion */ + err = read_poll_timeout_atomic(rd32, val, + !FIELD_GET(REG_LL_PROXY_H_EXEC, val), + 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw, + REG_LL_PROXY_H); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer adjustment using low latency interface\n"); + spin_unlock_irq(¶ms->atqbal_wq.lock); + return err; + } + + spin_unlock_irq(¶ms->atqbal_wq.lock); + + return 0; +} + +/** * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment * @hw: pointer to HW struct * @adj: adjustment value to program @@ -5102,6 +5164,9 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj) u8 tmr_idx; int err; + if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update) + return ice_ptp_prep_phy_adj_ll_e810(hw, adj); + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; /* Adjustments are represented as signed 2's complement values in @@ -5125,6 +5190,56 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj) } /** + * ice_ptp_prep_phy_incval_ll_e810 - Prep PHY ports increment value change + * @hw: pointer to HW struct + * @incval: The new 40bit increment value to prepare + * + * Use the low latency firmware interface to program PHY time increment value + * for all PHY ports. + * + * Return: 0 on success, -EBUSY on timeout + */ +static int ice_ptp_prep_phy_incval_ll_e810(struct ice_hw *hw, u64 incval) +{ + const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + struct ice_e810_params *params = &hw->ptp.phy.e810; + u32 val; + int err; + + spin_lock_irq(¶ms->atqbal_wq.lock); + + /* Wait for any pending in-progress low latency interrupt */ + err = wait_event_interruptible_locked_irq(params->atqbal_wq, + !(params->atqbal_flags & + ATQBAL_FLAGS_INTR_IN_PROGRESS)); + if (err) { + spin_unlock_irq(¶ms->atqbal_wq.lock); + return err; + } + + wr32(hw, REG_LL_PROXY_L, lower_32_bits(incval)); + val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_FREQ) | + FIELD_PREP(REG_LL_PROXY_H_TS_HIGH, (u8)upper_32_bits(incval)) | + FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC; + wr32(hw, REG_LL_PROXY_H, val); + + /* Read the register repeatedly until the FW indicates completion */ + err = read_poll_timeout_atomic(rd32, val, + !FIELD_GET(REG_LL_PROXY_H_EXEC, val), + 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw, + REG_LL_PROXY_H); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer increment using low latency interface\n"); + spin_unlock_irq(¶ms->atqbal_wq.lock); + return err; + } + + spin_unlock_irq(¶ms->atqbal_wq.lock); + + return 0; +} + +/** * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change * @hw: pointer to HW struct * @incval: The new 40bit increment value to prepare @@ -5139,6 +5254,9 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval) u8 tmr_idx; int err; + if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update) + return ice_ptp_prep_phy_incval_ll_e810(hw, incval); + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; low = lower_32_bits(incval); high = upper_32_bits(incval); @@ -5423,6 +5541,8 @@ static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp) ptp->phy_model = ICE_PHY_E810; ptp->num_lports = 8; ptp->ports_per_phy = 4; + + init_waitqueue_head(&ptp->phy.e810.atqbal_wq); } /* Device agnostic functions diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index 1cee0f1bba2d..6779ce120515 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -80,7 +80,6 @@ struct ice_phy_reg_info_eth56g { * struct ice_time_ref_info_e82x * @pll_freq: Frequency of PLL that drives timer ticks in Hz * @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L - * @pps_delay: propagation delay of the PPS output signal * * Characteristic information for the various TIME_REF sources possible in the * E822 devices @@ -88,7 +87,6 @@ struct ice_phy_reg_info_eth56g { struct ice_time_ref_info_e82x { u64 pll_freq; u64 nominal_incval; - u8 pps_delay; }; /** @@ -326,8 +324,6 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD]; */ #define ICE_E810_PLL_FREQ 812500000 #define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL -#define ICE_E810_OUT_PROP_DELAY_NS 1 -#define ICE_E825C_OUT_PROP_DELAY_NS 11 /* Device agnostic functions */ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw); @@ -389,11 +385,6 @@ static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref) return e82x_time_ref[time_ref].nominal_incval; } -static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref) -{ - return e82x_time_ref[time_ref].pps_delay; -} - /* E822 Vernier calibration functions */ int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset); int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port); @@ -432,20 +423,6 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port); #define ICE_ETH56G_NOMINAL_THRESH4 0x7777 #define ICE_ETH56G_NOMINAL_TX_THRESH 0x6 -static inline u64 ice_prop_delay(const struct ice_hw *hw) -{ - switch (hw->ptp.phy_model) { - case ICE_PHY_ETH56G: - return ICE_E825C_OUT_PROP_DELAY_NS; - case ICE_PHY_E810: - return ICE_E810_OUT_PROP_DELAY_NS; - case ICE_PHY_E82X: - return ice_e82x_pps_delay(ice_e82x_time_ref(hw)); - default: - return 0; - } -} - /** * ice_get_base_incval - Get base clock increment value * @hw: pointer to the HW struct @@ -689,11 +666,18 @@ static inline bool ice_is_dual(struct ice_hw *hw) #define BYTES_PER_IDX_ADDR_L 4 /* Tx timestamp low latency read definitions */ -#define TS_LL_READ_RETRIES 200 -#define TS_LL_READ_TS_HIGH GENMASK(23, 16) -#define TS_LL_READ_TS_IDX GENMASK(29, 24) -#define TS_LL_READ_TS_INTR BIT(30) -#define TS_LL_READ_TS BIT(31) +#define REG_LL_PROXY_H_TIMEOUT_US 2000 +#define REG_LL_PROXY_H_PHY_TMR_CMD_M GENMASK(7, 6) +#define REG_LL_PROXY_H_PHY_TMR_CMD_ADJ 0x1 +#define REG_LL_PROXY_H_PHY_TMR_CMD_FREQ 0x2 +#define REG_LL_PROXY_H_TS_HIGH GENMASK(23, 16) +#define REG_LL_PROXY_H_PHY_TMR_IDX_M BIT(24) +#define REG_LL_PROXY_H_TS_IDX GENMASK(29, 24) +#define REG_LL_PROXY_H_TS_INTR_ENA BIT(30) +#define REG_LL_PROXY_H_EXEC BIT(31) + +#define REG_LL_PROXY_L PF_SB_ATQBAH +#define REG_LL_PROXY_H PF_SB_ATQBAL /* Internal PHY timestamp address */ #define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U)) diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index 970a99a52bf1..fb7a1b9a4313 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -4,7 +4,7 @@ #include "ice.h" #include "ice_eswitch.h" #include "devlink/devlink.h" -#include "devlink/devlink_port.h" +#include "devlink/port.h" #include "ice_sriov.h" #include "ice_tc_lib.h" #include "ice_dcb_lib.h" diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c index 75d7147e1c01..1a2c94375ca7 100644 --- a/drivers/net/ethernet/intel/ice/ice_sf_eth.c +++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c @@ -5,8 +5,8 @@ #include "ice_txrx.h" #include "ice_fltr.h" #include "ice_sf_eth.h" -#include "devlink/devlink_port.h" #include "devlink/devlink.h" +#include "devlink/port.h" static const struct net_device_ops ice_sf_netdev_ops = { .ndo_open = ice_open, diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index b83f99c01d91..8aabf7749aa5 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -36,6 +36,7 @@ static void ice_free_vf_entries(struct ice_pf *pf) hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { hash_del_rcu(&vf->entry); + ice_deinitialize_vf_entry(vf); ice_put_vf(vf); } } @@ -193,10 +194,6 @@ void ice_free_vfs(struct ice_pf *pf) wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); } - /* clear malicious info since the VF is getting released */ - if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) - list_del(&vf->mbx_info.list_entry); - mutex_unlock(&vf->cfg_lock); } diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 0e740342e294..4a91e0aaf0a5 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -4784,7 +4784,8 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, */ if (found && recp[i].tun_type == rinfo->tun_type && recp[i].need_pass_l2 == rinfo->need_pass_l2 && - recp[i].allow_pass_l2 == rinfo->allow_pass_l2) + recp[i].allow_pass_l2 == rinfo->allow_pass_l2 && + recp[i].priority == rinfo->priority) return i; /* Return the recipe ID */ } } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 5d2d7736fd5f..380ba1e8b3b2 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -527,15 +527,14 @@ err: * @xdp: xdp_buff used as input to the XDP program * @xdp_prog: XDP program to run * @xdp_ring: ring to be used for XDP_TX action - * @rx_buf: Rx buffer to store the XDP action * @eop_desc: Last descriptor in packet to read metadata from * * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} */ -static void +static u32 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, - struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc) + union ice_32b_rx_flex_desc *eop_desc) { unsigned int ret = ICE_XDP_PASS; u32 act; @@ -574,7 +573,7 @@ out_failure: ret = ICE_XDP_CONSUMED; } exit: - ice_set_rx_bufs_act(xdp, rx_ring, ret); + return ret; } /** @@ -860,10 +859,8 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, xdp_buff_set_frags_flag(xdp); } - if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { - ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED); + if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) return -ENOMEM; - } __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, rx_buf->page_offset, size); @@ -924,7 +921,6 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, struct ice_rx_buf *rx_buf; rx_buf = &rx_ring->rx_buf[ntc]; - rx_buf->pgcnt = page_count(rx_buf->page); prefetchw(rx_buf->page); if (!size) @@ -941,6 +937,31 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, } /** + * ice_get_pgcnts - grab page_count() for gathered fragments + * @rx_ring: Rx descriptor ring to store the page counts on + * + * This function is intended to be called right before running XDP + * program so that the page recycling mechanism will be able to take + * a correct decision regarding underlying pages; this is done in such + * way as XDP program can change the refcount of page + */ +static void ice_get_pgcnts(struct ice_rx_ring *rx_ring) +{ + u32 nr_frags = rx_ring->nr_frags + 1; + u32 idx = rx_ring->first_desc; + struct ice_rx_buf *rx_buf; + u32 cnt = rx_ring->count; + + for (int i = 0; i < nr_frags; i++) { + rx_buf = &rx_ring->rx_buf[idx]; + rx_buf->pgcnt = page_count(rx_buf->page); + + if (++idx == cnt) + idx = 0; + } +} + +/** * ice_build_skb - Build skb around an existing buffer * @rx_ring: Rx descriptor ring to transact packets on * @xdp: xdp_buff pointing to the data @@ -1051,12 +1072,12 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) rx_buf->page_offset + headlen, size, xdp->frame_sz); } else { - /* buffer is unused, change the act that should be taken later - * on; data was copied onto skb's linear part so there's no + /* buffer is unused, restore biased page count in Rx buffer; + * data was copied onto skb's linear part so there's no * need for adjusting page offset and we can reuse this buffer * as-is */ - rx_buf->act = ICE_SKB_CONSUMED; + rx_buf->pagecnt_bias++; } if (unlikely(xdp_buff_has_frags(xdp))) { @@ -1104,6 +1125,65 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf) } /** + * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags + * @rx_ring: Rx ring with all the auxiliary data + * @xdp: XDP buffer carrying linear + frags part + * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage + * @ntc: a current next_to_clean value to be stored at rx_ring + * @verdict: return code from XDP program execution + * + * Walk through gathered fragments and satisfy internal page + * recycle mechanism; we take here an action related to verdict + * returned by XDP program; + */ +static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, + u32 *xdp_xmit, u32 ntc, u32 verdict) +{ + u32 nr_frags = rx_ring->nr_frags + 1; + u32 idx = rx_ring->first_desc; + u32 cnt = rx_ring->count; + u32 post_xdp_frags = 1; + struct ice_rx_buf *buf; + int i; + + if (unlikely(xdp_buff_has_frags(xdp))) + post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags; + + for (i = 0; i < post_xdp_frags; i++) { + buf = &rx_ring->rx_buf[idx]; + + if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) { + ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); + *xdp_xmit |= verdict; + } else if (verdict & ICE_XDP_CONSUMED) { + buf->pagecnt_bias++; + } else if (verdict == ICE_XDP_PASS) { + ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); + } + + ice_put_rx_buf(rx_ring, buf); + + if (++idx == cnt) + idx = 0; + } + /* handle buffers that represented frags released by XDP prog; + * for these we keep pagecnt_bias as-is; refcount from struct page + * has been decremented within XDP prog and we do not have to increase + * the biased refcnt + */ + for (; i < nr_frags; i++) { + buf = &rx_ring->rx_buf[idx]; + ice_put_rx_buf(rx_ring, buf); + if (++idx == cnt) + idx = 0; + } + + xdp->data = NULL; + rx_ring->first_desc = ntc; + rx_ring->nr_frags = 0; +} + +/** * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: Rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process @@ -1120,15 +1200,13 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) unsigned int total_rx_bytes = 0, total_rx_pkts = 0; unsigned int offset = rx_ring->rx_offset; struct xdp_buff *xdp = &rx_ring->xdp; - u32 cached_ntc = rx_ring->first_desc; struct ice_tx_ring *xdp_ring = NULL; struct bpf_prog *xdp_prog = NULL; u32 ntc = rx_ring->next_to_clean; + u32 cached_ntu, xdp_verdict; u32 cnt = rx_ring->count; u32 xdp_xmit = 0; - u32 cached_ntu; bool failure; - u32 first; xdp_prog = READ_ONCE(rx_ring->xdp_prog); if (xdp_prog) { @@ -1190,6 +1268,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); xdp_buff_clear_frags_flag(xdp); } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { + ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED); break; } if (++ntc == cnt) @@ -1199,15 +1278,15 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) if (ice_is_non_eop(rx_ring, rx_desc)) continue; - ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc); - if (rx_buf->act == ICE_XDP_PASS) + ice_get_pgcnts(rx_ring); + xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc); + if (xdp_verdict == ICE_XDP_PASS) goto construct_skb; total_rx_bytes += xdp_get_buff_len(xdp); total_rx_pkts++; - xdp->data = NULL; - rx_ring->first_desc = ntc; - rx_ring->nr_frags = 0; + ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); + continue; construct_skb: if (likely(ice_ring_uses_build_skb(rx_ring))) @@ -1217,18 +1296,12 @@ construct_skb: /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->ring_stats->rx_stats.alloc_page_failed++; - rx_buf->act = ICE_XDP_CONSUMED; - if (unlikely(xdp_buff_has_frags(xdp))) - ice_set_rx_bufs_act(xdp, rx_ring, - ICE_XDP_CONSUMED); - xdp->data = NULL; - rx_ring->first_desc = ntc; - rx_ring->nr_frags = 0; - break; + xdp_verdict = ICE_XDP_CONSUMED; } - xdp->data = NULL; - rx_ring->first_desc = ntc; - rx_ring->nr_frags = 0; + ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); + + if (!skb) + break; stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); if (unlikely(ice_test_staterr(rx_desc->wb.status_error0, @@ -1257,23 +1330,6 @@ construct_skb: total_rx_pkts++; } - first = rx_ring->first_desc; - while (cached_ntc != first) { - struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc]; - - if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) { - ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); - xdp_xmit |= buf->act; - } else if (buf->act & ICE_XDP_CONSUMED) { - buf->pagecnt_bias++; - } else if (buf->act == ICE_XDP_PASS) { - ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); - } - - ice_put_rx_buf(rx_ring, buf); - if (++cached_ntc >= cnt) - cached_ntc = 0; - } rx_ring->next_to_clean = ntc; /* return up to cleaned_count buffers to hardware */ failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); @@ -2368,7 +2424,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) ICE_TXD_CTX_QW1_CMD_S); ice_tstamp(tx_ring, skb, first, &offload); - if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF) + if ((ice_is_switchdev_running(vsi->back) || + ice_lag_is_switchdev_running(vsi->back)) && + vsi->type != ICE_VSI_SF) ice_eswitch_set_target_vsi(skb, &offload); if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index cb347c852ba9..806bce701df3 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -201,7 +201,6 @@ struct ice_rx_buf { struct page *page; unsigned int page_offset; unsigned int pgcnt; - unsigned int act; unsigned int pagecnt_bias; }; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h index 79f960c6680d..6cf32b404127 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h @@ -6,49 +6,6 @@ #include "ice.h" /** - * ice_set_rx_bufs_act - propagate Rx buffer action to frags - * @xdp: XDP buffer representing frame (linear and frags part) - * @rx_ring: Rx ring struct - * act: action to store onto Rx buffers related to XDP buffer parts - * - * Set action that should be taken before putting Rx buffer from first frag - * to the last. - */ -static inline void -ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring, - const unsigned int act) -{ - u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; - u32 nr_frags = rx_ring->nr_frags + 1; - u32 idx = rx_ring->first_desc; - u32 cnt = rx_ring->count; - struct ice_rx_buf *buf; - - for (int i = 0; i < nr_frags; i++) { - buf = &rx_ring->rx_buf[idx]; - buf->act = act; - - if (++idx == cnt) - idx = 0; - } - - /* adjust pagecnt_bias on frags freed by XDP prog */ - if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) { - u32 delta = rx_ring->nr_frags - sinfo_frags; - - while (delta) { - if (idx == 0) - idx = cnt - 1; - else - idx--; - buf = &rx_ring->rx_buf[idx]; - buf->pagecnt_bias--; - delta--; - } - } -} - -/** * ice_test_staterr - tests bits in Rx descriptor status and error fields * @status_err_n: Rx descriptor status_error0 or status_error1 bits * @stat_err_bits: value to mask diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 4a9ef722635f..33a1a5934c0d 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -18,6 +18,7 @@ #include "ice_sbq_cmd.h" #include "ice_vlan_mode.h" #include "ice_fwlog.h" +#include <linux/wait.h> static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) { @@ -368,6 +369,7 @@ struct ice_ts_func_info { #define ICE_TS_TMR1_ENA_M BIT(26) #define ICE_TS_LL_TX_TS_READ_M BIT(28) #define ICE_TS_LL_TX_TS_INT_READ_M BIT(29) +#define ICE_TS_LL_PHY_TMR_UPDATE_M BIT(30) struct ice_ts_dev_info { /* Device specific info */ @@ -382,6 +384,7 @@ struct ice_ts_dev_info { u8 tmr1_ena; u8 ts_ll_read; u8 ts_ll_int_read; + u8 ll_phy_tmr_update; }; #define ICE_NAC_TOPO_PRIMARY_M BIT(0) @@ -848,6 +851,14 @@ struct ice_mbx_data { #define ICE_PORTS_PER_QUAD 4 #define ICE_GET_QUAD_NUM(port) ((port) / ICE_PORTS_PER_QUAD) +#define ATQBAL_FLAGS_INTR_IN_PROGRESS BIT(0) + +struct ice_e810_params { + /* The wait queue lock also protects the low latency interface */ + wait_queue_head_t atqbal_wq; + unsigned int atqbal_flags; +}; + struct ice_eth56g_params { u8 num_phys; bool onestep_ena; @@ -856,6 +867,7 @@ struct ice_eth56g_params { }; union ice_phy_params { + struct ice_e810_params e810; struct ice_eth56g_params eth56g; }; @@ -1214,4 +1226,9 @@ struct ice_aq_get_set_rss_lut_params { #define ICE_FW_API_REPORT_DFLT_CFG_MIN 7 #define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3 +/* AQ API version for Health Status support */ +#define ICE_FW_API_HEALTH_REPORT_MAJ 1 +#define ICE_FW_API_HEALTH_REPORT_MIN 7 +#define ICE_FW_API_HEALTH_REPORT_PATCH 6 + #endif /* _ICE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index c7c0c2f50c26..815ad0bfe832 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -1036,6 +1036,14 @@ void ice_initialize_vf_entry(struct ice_vf *vf) mutex_init(&vf->cfg_lock); } +void ice_deinitialize_vf_entry(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + + if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) + list_del(&vf->mbx_info.list_entry); +} + /** * ice_dis_vf_qs - Disable the VF queues * @vf: pointer to the VF structure diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h index 0c7e77c0a09f..5392b0404986 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h @@ -24,6 +24,7 @@ #endif void ice_initialize_vf_entry(struct ice_vf *vf); +void ice_deinitialize_vf_entry(struct ice_vf *vf); void ice_dis_vf_qs(struct ice_vf *vf); int ice_check_vf_init(struct ice_vf *vf); enum virtchnl_status_code ice_err_to_virt_err(int err); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 334ae945d640..8975d2971bc3 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -801,35 +801,6 @@ out_failure: return result; } -static int -ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first, - struct xdp_buff *xdp, const unsigned int size) -{ - struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first); - - if (!size) - return 0; - - if (!xdp_buff_has_frags(first)) { - sinfo->nr_frags = 0; - sinfo->xdp_frags_size = 0; - xdp_buff_set_frags_flag(first); - } - - if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { - xsk_buff_free(first); - return -ENOMEM; - } - - __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, - virt_to_page(xdp->data_hard_start), - XDP_PACKET_HEADROOM, size); - sinfo->xdp_frags_size += size; - xsk_buff_add_frag(xdp); - - return 0; -} - /** * ice_clean_rx_irq_zc - consumes packets from the hardware ring * @rx_ring: AF_XDP Rx ring @@ -895,7 +866,8 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, if (!first) { first = xdp; - } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) { + } else if (likely(size) && !xsk_buff_add_frag(first, xdp)) { + xsk_buff_free(first); break; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c index 4849590a5591..b28991dd1870 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c @@ -376,6 +376,9 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD)) break; + /* Ensure no other fields are read until DD flag is checked */ + dma_rmb(); + /* strip off FW internal code */ desc_err = le16_to_cpu(desc->ret_val) & 0xff; @@ -563,6 +566,9 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, if (!(flags & IDPF_CTLQ_FLAG_DD)) break; + /* Ensure no other fields are read until DD flag is checked */ + dma_rmb(); + q_msg[i].vmvf_type = (flags & (IDPF_CTLQ_FLAG_FTYPE_VM | IDPF_CTLQ_FLAG_FTYPE_PF)) >> diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index b4fbb99bfad2..a3d6b8f198a8 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -2159,8 +2159,13 @@ static int idpf_open(struct net_device *netdev) idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); + err = idpf_set_real_num_queues(vport); + if (err) + goto unlock; + err = idpf_vport_open(vport); +unlock: idpf_vport_ctrl_unlock(netdev); return err; diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c index f71d3182580b..b6c515d14cbf 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_main.c +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -174,7 +174,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); pci_set_drvdata(pdev, adapter); - adapter->init_wq = alloc_workqueue("%s-%s-init", 0, 0, + adapter->init_wq = alloc_workqueue("%s-%s-init", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0, dev_driver_string(dev), dev_name(dev)); if (!adapter->init_wq) { @@ -183,7 +184,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_free; } - adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0, + adapter->serv_wq = alloc_workqueue("%s-%s-service", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0, dev_driver_string(dev), dev_name(dev)); if (!adapter->serv_wq) { @@ -192,7 +194,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_serv_wq_alloc; } - adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0, + adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0, dev_driver_string(dev), dev_name(dev)); if (!adapter->mbx_wq) { @@ -201,7 +204,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_mbx_wq_alloc; } - adapter->stats_wq = alloc_workqueue("%s-%s-stats", 0, 0, + adapter->stats_wq = alloc_workqueue("%s-%s-stats", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0, dev_driver_string(dev), dev_name(dev)); if (!adapter->stats_wq) { @@ -210,7 +214,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_stats_wq_alloc; } - adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 0, 0, + adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0, dev_driver_string(dev), dev_name(dev)); if (!adapter->vc_event_wq) { diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 2fa9c36e33c9..977741c41498 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -3008,14 +3008,11 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, return -EINVAL; rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len); - if (unlikely(rsc_segments == 1)) - return 0; NAPI_GRO_CB(skb)->count = rsc_segments; skb_shinfo(skb)->gso_size = rsc_seg_len; skb_reset_network_header(skb); - len = skb->len - skb_transport_offset(skb); if (ipv4) { struct iphdr *ipv4h = ip_hdr(skb); @@ -3024,6 +3021,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, /* Reset and set transport header offset in skb */ skb_set_transport_header(skb, sizeof(struct iphdr)); + len = skb->len - skb_transport_offset(skb); /* Compute the TCP pseudo header checksum*/ tcp_hdr(skb)->check = @@ -3033,6 +3031,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + len = skb->len - skb_transport_offset(skb); tcp_hdr(skb)->check = ~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0); } @@ -3072,6 +3071,7 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb, idpf_rx_hash(rxq, skb, rx_desc, decoded); skb->protocol = eth_type_trans(skb, rxq->netdev); + skb_record_rx_queue(skb, rxq->idx); if (le16_get_bits(rx_desc->hdrlen_flags, VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M)) @@ -3080,8 +3080,6 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb, csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc); idpf_rx_csum(rxq, skb, csum_bits, decoded); - skb_record_rx_queue(skb, rxq->idx); - return 0; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index d46c95f91b0d..3d2413b8684f 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -517,8 +517,10 @@ static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, retval = -ENXIO; goto only_unlock; case IDPF_VC_XN_WAITING: - dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n", - params->vc_op, params->timeout_ms); + dev_notice_ratelimited(&adapter->pdev->dev, + "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n", + params->vc_op, cookie, xn->vc_op, + xn->salt, params->timeout_ms); retval = -ETIME; break; case IDPF_VC_XN_COMPLETED_SUCCESS: @@ -612,14 +614,16 @@ idpf_vc_xn_forward_reply(struct idpf_adapter *adapter, return -EINVAL; } xn = &adapter->vcxn_mngr->ring[xn_idx]; + idpf_vc_xn_lock(xn); salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info); if (xn->salt != salt) { - dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n", - xn->salt, salt); + dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n", + xn->vc_op, xn->salt, xn->state, + ctlq_msg->cookie.mbx.chnl_opcode, salt); + idpf_vc_xn_unlock(xn); return -EINVAL; } - idpf_vc_xn_lock(xn); switch (xn->state) { case IDPF_VC_XN_WAITING: /* success */ @@ -3077,12 +3081,21 @@ init_failed: */ void idpf_vc_core_deinit(struct idpf_adapter *adapter) { + bool remove_in_prog; + if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags)) return; + /* Avoid transaction timeouts when called during reset */ + remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags); + if (!remove_in_prog) + idpf_vc_xn_shutdown(adapter->vcxn_mngr); + idpf_deinit_task(adapter); idpf_intr_rel(adapter); - idpf_vc_xn_shutdown(adapter->vcxn_mngr); + + if (remove_in_prog) + idpf_vc_xn_shutdown(adapter->vcxn_mngr); cancel_delayed_work_sync(&adapter->serv_task); cancel_delayed_work_sync(&adapter->mbx_task); diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile index 463c0d26b9d4..6c1b702fd992 100644 --- a/drivers/net/ethernet/intel/igb/Makefile +++ b/drivers/net/ethernet/intel/igb/Makefile @@ -8,4 +8,4 @@ obj-$(CONFIG_IGB) += igb.o igb-y := igb_main.o igb_ethtool.o e1000_82575.o \ e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \ - e1000_i210.o igb_ptp.o igb_hwmon.o + e1000_i210.o igb_ptp.o igb_hwmon.o igb_xsk.o diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 3c2dc7bdebb5..02f340280d20 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -18,8 +18,10 @@ #include <linux/i2c-algo-bit.h> #include <linux/pci.h> #include <linux/mdio.h> +#include <linux/lockdep.h> #include <net/xdp.h> +#include <net/xdp_sock_drv.h> struct igb_adapter; @@ -86,6 +88,7 @@ struct igb_adapter; #define IGB_XDP_CONSUMED BIT(0) #define IGB_XDP_TX BIT(1) #define IGB_XDP_REDIR BIT(2) +#define IGB_XDP_EXIT BIT(3) struct vf_data_storage { unsigned char vf_mac_addresses[ETH_ALEN]; @@ -255,6 +258,7 @@ enum igb_tx_flags { enum igb_tx_buf_type { IGB_TYPE_SKB = 0, IGB_TYPE_XDP, + IGB_TYPE_XSK }; /* wrapper around a pointer to a socket buffer, @@ -320,6 +324,7 @@ struct igb_ring { union { /* array of buffer info structs */ struct igb_tx_buffer *tx_buffer_info; struct igb_rx_buffer *rx_buffer_info; + struct xdp_buff **rx_buffer_info_zc; }; void *desc; /* descriptor ring memory */ unsigned long flags; /* ring specific flags */ @@ -357,6 +362,7 @@ struct igb_ring { }; }; struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; } ____cacheline_internodealigned_in_smp; struct igb_q_vector { @@ -384,7 +390,8 @@ enum e1000_ring_flags_t { IGB_RING_FLAG_RX_SCTP_CSUM, IGB_RING_FLAG_RX_LB_VLAN_BSWAP, IGB_RING_FLAG_TX_CTX_IDX, - IGB_RING_FLAG_TX_DETECT_HANG + IGB_RING_FLAG_TX_DETECT_HANG, + IGB_RING_FLAG_TX_DISABLED }; #define ring_uses_large_buffer(ring) \ @@ -731,12 +738,21 @@ int igb_setup_tx_resources(struct igb_ring *); int igb_setup_rx_resources(struct igb_ring *); void igb_free_tx_resources(struct igb_ring *); void igb_free_rx_resources(struct igb_ring *); +void igb_clean_tx_ring(struct igb_ring *tx_ring); +void igb_clean_rx_ring(struct igb_ring *rx_ring); void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); +void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status); +void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets, + unsigned int bytes); void igb_setup_tctl(struct igb_adapter *); void igb_setup_rctl(struct igb_adapter *); void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *); netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); +int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp); +void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb); void igb_alloc_rx_buffers(struct igb_ring *, u16); void igb_update_stats(struct igb_adapter *); bool igb_has_link(struct igb_adapter *adapter); @@ -797,6 +813,33 @@ static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); } +/* This function assumes __netif_tx_lock is held by the caller. */ +static inline void igb_xdp_ring_update_tail(struct igb_ring *ring) +{ + lockdep_assert_held(&txring_txq(ring)->_xmit_lock); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static inline struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) +{ + unsigned int r_idx = smp_processor_id(); + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static inline bool igb_xdp_is_enabled(struct igb_adapter *adapter) +{ + return !!READ_ONCE(adapter->xdp_prog); +} + int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input); int igb_erase_filter(struct igb_adapter *adapter, @@ -807,4 +850,17 @@ int igb_add_mac_steering_filter(struct igb_adapter *adapter, int igb_del_mac_steering_filter(struct igb_adapter *adapter, const u8 *addr, u8 queue, u8 flags); +struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter, + struct igb_ring *ring); +int igb_xsk_pool_setup(struct igb_adapter *adapter, + struct xsk_buff_pool *pool, + u16 qid); +bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring, + struct xsk_buff_pool *xsk_pool, u16 count); +void igb_clean_rx_ring_zc(struct igb_ring *rx_ring); +int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector, + struct xsk_buff_pool *xsk_pool, const int budget); +bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool); +int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags); + #endif /* _IGB_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 288a4bb2683a..d368b753a467 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -33,7 +33,6 @@ #include <linux/bpf_trace.h> #include <linux/pm_runtime.h> #include <linux/etherdevice.h> -#include <linux/lockdep.h> #ifdef CONFIG_IGB_DCA #include <linux/dca.h> #endif @@ -116,8 +115,6 @@ static void igb_configure_tx(struct igb_adapter *); static void igb_configure_rx(struct igb_adapter *); static void igb_clean_all_tx_rings(struct igb_adapter *); static void igb_clean_all_rx_rings(struct igb_adapter *); -static void igb_clean_tx_ring(struct igb_ring *); -static void igb_clean_rx_ring(struct igb_ring *); static void igb_set_rx_mode(struct net_device *); static void igb_update_phy_info(struct timer_list *); static void igb_watchdog(struct timer_list *); @@ -475,12 +472,17 @@ rx_ring_summary: for (i = 0; i < rx_ring->count; i++) { const char *next_desc; - struct igb_rx_buffer *buffer_info; - buffer_info = &rx_ring->rx_buffer_info[i]; + dma_addr_t dma = (dma_addr_t)0; + struct igb_rx_buffer *buffer_info = NULL; rx_desc = IGB_RX_DESC(rx_ring, i); u0 = (struct my_u0 *)rx_desc; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + if (!rx_ring->xsk_pool) { + buffer_info = &rx_ring->rx_buffer_info[i]; + dma = buffer_info->dma; + } + if (i == rx_ring->next_to_use) next_desc = " NTU"; else if (i == rx_ring->next_to_clean) @@ -500,11 +502,11 @@ rx_ring_summary: "R ", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), - (u64)buffer_info->dma, + (u64)dma, next_desc); if (netif_msg_pktdata(adapter) && - buffer_info->dma && buffer_info->page) { + buffer_info && dma && buffer_info->page) { print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, @@ -1990,7 +1992,11 @@ static void igb_configure(struct igb_adapter *adapter) */ for (i = 0; i < adapter->num_rx_queues; i++) { struct igb_ring *ring = adapter->rx_ring[i]; - igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); + if (ring->xsk_pool) + igb_alloc_rx_buffers_zc(ring, ring->xsk_pool, + igb_desc_unused(ring)); + else + igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); } } @@ -2911,37 +2917,20 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf) static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) { + struct igb_adapter *adapter = netdev_priv(dev); + switch (xdp->command) { case XDP_SETUP_PROG: return igb_xdp_setup(dev, xdp); + case XDP_SETUP_XSK_POOL: + return igb_xsk_pool_setup(adapter, xdp->xsk.pool, + xdp->xsk.queue_id); default: return -EINVAL; } } -/* This function assumes __netif_tx_lock is held by the caller. */ -static void igb_xdp_ring_update_tail(struct igb_ring *ring) -{ - lockdep_assert_held(&txring_txq(ring)->_xmit_lock); - - /* Force memory writes to complete before letting h/w know there - * are new descriptors to fetch. - */ - wmb(); - writel(ring->next_to_use, ring->tail); -} - -static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) -{ - unsigned int r_idx = smp_processor_id(); - - if (r_idx >= adapter->num_tx_queues) - r_idx = r_idx % adapter->num_tx_queues; - - return adapter->tx_ring[r_idx]; -} - -static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) +int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) { struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); int cpu = smp_processor_id(); @@ -2955,7 +2944,8 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) /* During program transitions its possible adapter->xdp_prog is assigned * but ring has not been configured yet. In this case simply abort xmit. */ - tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; + tx_ring = igb_xdp_is_enabled(adapter) ? + igb_xdp_tx_queue_mapping(adapter) : NULL; if (unlikely(!tx_ring)) return IGB_XDP_CONSUMED; @@ -2988,10 +2978,14 @@ static int igb_xdp_xmit(struct net_device *dev, int n, /* During program transitions its possible adapter->xdp_prog is assigned * but ring has not been configured yet. In this case simply abort xmit. */ - tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; + tx_ring = igb_xdp_is_enabled(adapter) ? + igb_xdp_tx_queue_mapping(adapter) : NULL; if (unlikely(!tx_ring)) return -ENXIO; + if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags))) + return -ENXIO; + nq = txring_txq(tx_ring); __netif_tx_lock(nq, cpu); @@ -3042,6 +3036,7 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_setup_tc = igb_setup_tc, .ndo_bpf = igb_xdp, .ndo_xdp_xmit = igb_xdp_xmit, + .ndo_xsk_wakeup = igb_xsk_wakeup, }; /** @@ -3338,7 +3333,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->priv_flags |= IFF_SUPP_NOFCS; netdev->priv_flags |= IFF_UNICAST_FLT; - netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY; /* MTU range: 68 - 9216 */ netdev->min_mtu = ETH_MIN_MTU; @@ -4364,6 +4360,8 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, u64 tdba = ring->dma; int reg_idx = ring->reg_idx; + WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring)); + wr32(E1000_TDLEN(reg_idx), ring->count * sizeof(union e1000_adv_tx_desc)); wr32(E1000_TDBAL(reg_idx), @@ -4424,7 +4422,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) xdp_rxq_info_unreg(&rx_ring->xdp_rxq); res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, - rx_ring->queue_index, 0); + rx_ring->queue_index, + rx_ring->q_vector->napi.napi_id); if (res < 0) { dev_err(dev, "Failed to register xdp_rxq index %u\n", rx_ring->queue_index); @@ -4720,12 +4719,17 @@ void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring) struct e1000_hw *hw = &adapter->hw; int reg_idx = ring->reg_idx; u32 srrctl = 0; + u32 buf_size; - srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; - if (ring_uses_large_buffer(ring)) - srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + if (ring->xsk_pool) + buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); + else if (ring_uses_large_buffer(ring)) + buf_size = IGB_RXBUFFER_3072; else - srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + buf_size = IGB_RXBUFFER_2048; + + srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; if (hw->mac.type >= e1000_82580) srrctl |= E1000_SRRCTL_TIMESTAMP; @@ -4757,8 +4761,17 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, u32 rxdctl = 0; xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); - WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, - MEM_TYPE_PAGE_SHARED, NULL)); + WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring)); + if (ring->xsk_pool) { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL)); + } /* disable the queue */ wr32(E1000_RXDCTL(reg_idx), 0); @@ -4785,9 +4798,12 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, rxdctl |= IGB_RX_HTHRESH << 8; rxdctl |= IGB_RX_WTHRESH << 16; - /* initialize rx_buffer_info */ - memset(ring->rx_buffer_info, 0, - sizeof(struct igb_rx_buffer) * ring->count); + if (ring->xsk_pool) + memset(ring->rx_buffer_info_zc, 0, + sizeof(*ring->rx_buffer_info_zc) * ring->count); + else + memset(ring->rx_buffer_info, 0, + sizeof(*ring->rx_buffer_info) * ring->count); /* initialize Rx descriptor 0 */ rx_desc = IGB_RX_DESC(ring, 0); @@ -4888,19 +4904,24 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) * igb_clean_tx_ring - Free Tx Buffers * @tx_ring: ring to be cleaned **/ -static void igb_clean_tx_ring(struct igb_ring *tx_ring) +void igb_clean_tx_ring(struct igb_ring *tx_ring) { u16 i = tx_ring->next_to_clean; struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + u32 xsk_frames = 0; while (i != tx_ring->next_to_use) { union e1000_adv_tx_desc *eop_desc, *tx_desc; /* Free all the Tx ring sk_buffs or xdp frames */ - if (tx_buffer->type == IGB_TYPE_SKB) + if (tx_buffer->type == IGB_TYPE_SKB) { dev_kfree_skb_any(tx_buffer->skb); - else + } else if (tx_buffer->type == IGB_TYPE_XDP) { xdp_return_frame(tx_buffer->xdpf); + } else if (tx_buffer->type == IGB_TYPE_XSK) { + xsk_frames++; + goto skip_for_xsk; + } /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -4931,6 +4952,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) DMA_TO_DEVICE); } +skip_for_xsk: tx_buffer->next_to_watch = NULL; /* move us one more past the eop_desc for start of next pkt */ @@ -4945,6 +4967,9 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) /* reset BQL for queue */ netdev_tx_reset_queue(txring_txq(tx_ring)); + if (tx_ring->xsk_pool && xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; @@ -4975,8 +5000,13 @@ void igb_free_rx_resources(struct igb_ring *rx_ring) rx_ring->xdp_prog = NULL; xdp_rxq_info_unreg(&rx_ring->xdp_rxq); - vfree(rx_ring->rx_buffer_info); - rx_ring->rx_buffer_info = NULL; + if (rx_ring->xsk_pool) { + vfree(rx_ring->rx_buffer_info_zc); + rx_ring->rx_buffer_info_zc = NULL; + } else { + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + } /* if not set, then don't free */ if (!rx_ring->desc) @@ -5007,13 +5037,18 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) * igb_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from **/ -static void igb_clean_rx_ring(struct igb_ring *rx_ring) +void igb_clean_rx_ring(struct igb_ring *rx_ring) { u16 i = rx_ring->next_to_clean; dev_kfree_skb(rx_ring->skb); rx_ring->skb = NULL; + if (rx_ring->xsk_pool) { + igb_clean_rx_ring_zc(rx_ring); + goto skip_for_xsk; + } + /* Free all the Rx ring sk_buffs */ while (i != rx_ring->next_to_alloc) { struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; @@ -5041,6 +5076,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) i = 0; } +skip_for_xsk: rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -6467,6 +6503,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_BUSY; } + if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags))) + return NETDEV_TX_BUSY; + /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; first->type = IGB_TYPE_SKB; @@ -6622,7 +6661,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) struct igb_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD; - if (adapter->xdp_prog) { + if (igb_xdp_is_enabled(adapter)) { int i; for (i = 0; i < adapter->num_rx_queues; i++) { @@ -8195,6 +8234,7 @@ static int igb_poll(struct napi_struct *napi, int budget) struct igb_q_vector *q_vector = container_of(napi, struct igb_q_vector, napi); + struct xsk_buff_pool *xsk_pool; bool clean_complete = true; int work_done = 0; @@ -8206,7 +8246,12 @@ static int igb_poll(struct napi_struct *napi, int budget) clean_complete = igb_clean_tx_irq(q_vector, budget); if (q_vector->rx.ring) { - int cleaned = igb_clean_rx_irq(q_vector, budget); + int cleaned; + + xsk_pool = READ_ONCE(q_vector->rx.ring->xsk_pool); + cleaned = xsk_pool ? + igb_clean_rx_irq_zc(q_vector, xsk_pool, budget) : + igb_clean_rx_irq(q_vector, budget); work_done += cleaned; if (cleaned >= budget) @@ -8235,13 +8280,18 @@ static int igb_poll(struct napi_struct *napi, int budget) **/ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) { - struct igb_adapter *adapter = q_vector->adapter; - struct igb_ring *tx_ring = q_vector->tx.ring; - struct igb_tx_buffer *tx_buffer; - union e1000_adv_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; + struct igb_adapter *adapter = q_vector->adapter; unsigned int budget = q_vector->tx.work_limit; + struct igb_ring *tx_ring = q_vector->tx.ring; unsigned int i = tx_ring->next_to_clean; + union e1000_adv_tx_desc *tx_desc; + struct igb_tx_buffer *tx_buffer; + struct xsk_buff_pool *xsk_pool; + int cpu = smp_processor_id(); + bool xsk_xmit_done = true; + struct netdev_queue *nq; + u32 xsk_frames = 0; if (test_bit(__IGB_DOWN, &adapter->state)) return true; @@ -8272,10 +8322,14 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) total_packets += tx_buffer->gso_segs; /* free the skb */ - if (tx_buffer->type == IGB_TYPE_SKB) + if (tx_buffer->type == IGB_TYPE_SKB) { napi_consume_skb(tx_buffer->skb, napi_budget); - else + } else if (tx_buffer->type == IGB_TYPE_XDP) { xdp_return_frame(tx_buffer->xdpf); + } else if (tx_buffer->type == IGB_TYPE_XSK) { + xsk_frames++; + goto skip_for_xsk; + } /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -8307,6 +8361,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) } } +skip_for_xsk: /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; tx_desc++; @@ -8335,6 +8390,21 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; + xsk_pool = READ_ONCE(tx_ring->xsk_pool); + if (xsk_pool) { + if (xsk_frames) + xsk_tx_completed(xsk_pool, xsk_frames); + if (xsk_uses_need_wakeup(xsk_pool)) + xsk_set_tx_need_wakeup(xsk_pool); + + nq = txring_txq(tx_ring); + __netif_tx_lock(nq, cpu); + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + xsk_xmit_done = igb_xmit_zc(tx_ring, xsk_pool); + __netif_tx_unlock(nq); + } + if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { struct e1000_hw *hw = &adapter->hw; @@ -8397,7 +8467,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) } } - return !!budget; + return !!budget && xsk_xmit_done; } /** @@ -8588,9 +8658,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, return skb; } -static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, - struct igb_ring *rx_ring, - struct xdp_buff *xdp) +static int igb_run_xdp(struct igb_adapter *adapter, struct igb_ring *rx_ring, + struct xdp_buff *xdp) { int err, result = IGB_XDP_PASS; struct bpf_prog *xdp_prog; @@ -8630,7 +8699,7 @@ out_failure: break; } xdp_out: - return ERR_PTR(-result); + return result; } static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring, @@ -8756,10 +8825,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) { - /* XDP packets use error pointer so abort at this point */ - if (IS_ERR(skb)) - return true; - if (unlikely((igb_test_staterr(rx_desc, E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { struct net_device *netdev = rx_ring->netdev; @@ -8786,9 +8851,9 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring, * order to populate the hash, checksum, VLAN, timestamp, protocol, and * other fields within the skb. **/ -static void igb_process_skb_fields(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) +void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) { struct net_device *dev = rx_ring->netdev; @@ -8870,6 +8935,38 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring, rx_buffer->page = NULL; } +void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status) +{ + int cpu = smp_processor_id(); + struct netdev_queue *nq; + + if (status & IGB_XDP_REDIR) + xdp_do_flush(); + + if (status & IGB_XDP_TX) { + struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); + + nq = txring_txq(tx_ring); + __netif_tx_lock(nq, cpu); + igb_xdp_ring_update_tail(tx_ring); + __netif_tx_unlock(nq); + } +} + +void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets, + unsigned int bytes) +{ + struct igb_ring *ring = q_vector->rx.ring; + + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.packets += packets; + ring->rx_stats.bytes += bytes; + u64_stats_update_end(&ring->rx_syncp); + + q_vector->rx.total_packets += packets; + q_vector->rx.total_bytes += bytes; +} + static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) { unsigned int total_bytes = 0, total_packets = 0; @@ -8877,12 +8974,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) struct igb_ring *rx_ring = q_vector->rx.ring; u16 cleaned_count = igb_desc_unused(rx_ring); struct sk_buff *skb = rx_ring->skb; - int cpu = smp_processor_id(); unsigned int xdp_xmit = 0; - struct netdev_queue *nq; struct xdp_buff xdp; u32 frame_sz = 0; int rx_buf_pgcnt; + int xdp_res = 0; /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ #if (PAGE_SIZE < 8192) @@ -8940,12 +9036,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size); #endif - skb = igb_run_xdp(adapter, rx_ring, &xdp); + xdp_res = igb_run_xdp(adapter, rx_ring, &xdp); } - if (IS_ERR(skb)) { - unsigned int xdp_res = -PTR_ERR(skb); - + if (xdp_res) { if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) { xdp_xmit |= xdp_res; igb_rx_buffer_flip(rx_ring, rx_buffer, size); @@ -8964,7 +9058,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) &xdp, timestamp); /* exit if we failed to retrieve a buffer */ - if (!skb) { + if (!xdp_res && !skb) { rx_ring->rx_stats.alloc_failed++; rx_buffer->pagecnt_bias++; break; @@ -8978,7 +9072,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) continue; /* verify the packet layout is correct */ - if (igb_cleanup_headers(rx_ring, rx_desc, skb)) { + if (xdp_res || igb_cleanup_headers(rx_ring, rx_desc, skb)) { skb = NULL; continue; } @@ -9001,24 +9095,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) /* place incomplete frames back on ring for completion */ rx_ring->skb = skb; - if (xdp_xmit & IGB_XDP_REDIR) - xdp_do_flush(); - - if (xdp_xmit & IGB_XDP_TX) { - struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); - - nq = txring_txq(tx_ring); - __netif_tx_lock(nq, cpu); - igb_xdp_ring_update_tail(tx_ring); - __netif_tx_unlock(nq); - } + if (xdp_xmit) + igb_finalize_xdp(adapter, xdp_xmit); - u64_stats_update_begin(&rx_ring->rx_syncp); - rx_ring->rx_stats.packets += total_packets; - rx_ring->rx_stats.bytes += total_bytes; - u64_stats_update_end(&rx_ring->rx_syncp); - q_vector->rx.total_packets += total_packets; - q_vector->rx.total_bytes += total_bytes; + igb_update_rx_stats(q_vector, total_packets, total_bytes); if (cleaned_count) igb_alloc_rx_buffers(rx_ring, cleaned_count); diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c b/drivers/net/ethernet/intel/igb/igb_xsk.c new file mode 100644 index 000000000000..157d43787fa0 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/igb_xsk.c @@ -0,0 +1,562 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2018 Intel Corporation. */ + +#include <linux/bpf_trace.h> +#include <net/xdp_sock_drv.h> +#include <net/xdp.h> + +#include "e1000_hw.h" +#include "igb.h" + +static int igb_realloc_rx_buffer_info(struct igb_ring *ring, bool pool_present) +{ + int size = pool_present ? + sizeof(*ring->rx_buffer_info_zc) * ring->count : + sizeof(*ring->rx_buffer_info) * ring->count; + void *buff_info = vmalloc(size); + + if (!buff_info) + return -ENOMEM; + + if (pool_present) { + vfree(ring->rx_buffer_info); + ring->rx_buffer_info = NULL; + ring->rx_buffer_info_zc = buff_info; + } else { + vfree(ring->rx_buffer_info_zc); + ring->rx_buffer_info_zc = NULL; + ring->rx_buffer_info = buff_info; + } + + return 0; +} + +static void igb_txrx_ring_disable(struct igb_adapter *adapter, u16 qid) +{ + struct igb_ring *tx_ring = adapter->tx_ring[qid]; + struct igb_ring *rx_ring = adapter->rx_ring[qid]; + struct e1000_hw *hw = &adapter->hw; + + set_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags); + + wr32(E1000_TXDCTL(tx_ring->reg_idx), 0); + wr32(E1000_RXDCTL(rx_ring->reg_idx), 0); + + synchronize_net(); + + /* Rx/Tx share the same napi context. */ + napi_disable(&rx_ring->q_vector->napi); + + igb_clean_tx_ring(tx_ring); + igb_clean_rx_ring(rx_ring); + + memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); + memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats)); +} + +static void igb_txrx_ring_enable(struct igb_adapter *adapter, u16 qid) +{ + struct igb_ring *tx_ring = adapter->tx_ring[qid]; + struct igb_ring *rx_ring = adapter->rx_ring[qid]; + + igb_configure_tx_ring(adapter, tx_ring); + igb_configure_rx_ring(adapter, rx_ring); + + synchronize_net(); + + clear_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags); + + /* call igb_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + if (rx_ring->xsk_pool) + igb_alloc_rx_buffers_zc(rx_ring, rx_ring->xsk_pool, + igb_desc_unused(rx_ring)); + else + igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring)); + + /* Rx/Tx share the same napi context. */ + napi_enable(&rx_ring->q_vector->napi); +} + +struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter, + struct igb_ring *ring) +{ + int qid = ring->queue_index; + struct xsk_buff_pool *pool; + + pool = xsk_get_pool_from_qid(adapter->netdev, qid); + + if (!igb_xdp_is_enabled(adapter)) + return NULL; + + return (pool && pool->dev) ? pool : NULL; +} + +static int igb_xsk_pool_enable(struct igb_adapter *adapter, + struct xsk_buff_pool *pool, + u16 qid) +{ + struct net_device *netdev = adapter->netdev; + struct igb_ring *rx_ring; + bool if_running; + int err; + + if (qid >= adapter->num_rx_queues) + return -EINVAL; + + if (qid >= netdev->real_num_rx_queues || + qid >= netdev->real_num_tx_queues) + return -EINVAL; + + err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IGB_RX_DMA_ATTR); + if (err) + return err; + + rx_ring = adapter->rx_ring[qid]; + if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter); + if (if_running) + igb_txrx_ring_disable(adapter, qid); + + if (if_running) { + err = igb_realloc_rx_buffer_info(rx_ring, true); + if (!err) { + igb_txrx_ring_enable(adapter, qid); + /* Kick start the NAPI context so that receiving will start */ + err = igb_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); + } + + if (err) { + xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR); + return err; + } + } + + return 0; +} + +static int igb_xsk_pool_disable(struct igb_adapter *adapter, u16 qid) +{ + struct xsk_buff_pool *pool; + struct igb_ring *rx_ring; + bool if_running; + int err; + + pool = xsk_get_pool_from_qid(adapter->netdev, qid); + if (!pool) + return -EINVAL; + + rx_ring = adapter->rx_ring[qid]; + if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter); + if (if_running) + igb_txrx_ring_disable(adapter, qid); + + xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR); + + if (if_running) { + err = igb_realloc_rx_buffer_info(rx_ring, false); + if (err) + return err; + + igb_txrx_ring_enable(adapter, qid); + } + + return 0; +} + +int igb_xsk_pool_setup(struct igb_adapter *adapter, + struct xsk_buff_pool *pool, + u16 qid) +{ + return pool ? igb_xsk_pool_enable(adapter, pool, qid) : + igb_xsk_pool_disable(adapter, qid); +} + +static u16 igb_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, + union e1000_adv_rx_desc *rx_desc, u16 count) +{ + dma_addr_t dma; + u16 buffs; + int i; + + /* nothing to do */ + if (!count) + return 0; + + buffs = xsk_buff_alloc_batch(pool, xdp, count); + for (i = 0; i < buffs; i++) { + dma = xsk_buff_xdp_get_dma(*xdp); + rx_desc->read.pkt_addr = cpu_to_le64(dma); + rx_desc->wb.upper.length = 0; + + rx_desc++; + xdp++; + } + + return buffs; +} + +bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring, + struct xsk_buff_pool *xsk_pool, u16 count) +{ + u32 nb_buffs_extra = 0, nb_buffs = 0; + union e1000_adv_rx_desc *rx_desc; + u16 ntu = rx_ring->next_to_use; + u16 total_count = count; + struct xdp_buff **xdp; + + rx_desc = IGB_RX_DESC(rx_ring, ntu); + xdp = &rx_ring->rx_buffer_info_zc[ntu]; + + if (ntu + count >= rx_ring->count) { + nb_buffs_extra = igb_fill_rx_descs(xsk_pool, xdp, rx_desc, + rx_ring->count - ntu); + if (nb_buffs_extra != rx_ring->count - ntu) { + ntu += nb_buffs_extra; + goto exit; + } + rx_desc = IGB_RX_DESC(rx_ring, 0); + xdp = rx_ring->rx_buffer_info_zc; + ntu = 0; + count -= nb_buffs_extra; + } + + nb_buffs = igb_fill_rx_descs(xsk_pool, xdp, rx_desc, count); + ntu += nb_buffs; + if (ntu == rx_ring->count) + ntu = 0; + + /* clear the length for the next_to_use descriptor */ + rx_desc = IGB_RX_DESC(rx_ring, ntu); + rx_desc->wb.upper.length = 0; + +exit: + if (rx_ring->next_to_use != ntu) { + rx_ring->next_to_use = ntu; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(ntu, rx_ring->tail); + } + + return total_count == (nb_buffs + nb_buffs_extra); +} + +void igb_clean_rx_ring_zc(struct igb_ring *rx_ring) +{ + u16 ntc = rx_ring->next_to_clean; + u16 ntu = rx_ring->next_to_use; + + while (ntc != ntu) { + struct xdp_buff *xdp = rx_ring->rx_buffer_info_zc[ntc]; + + xsk_buff_free(xdp); + ntc++; + if (ntc >= rx_ring->count) + ntc = 0; + } +} + +static struct sk_buff *igb_construct_skb_zc(struct igb_ring *rx_ring, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + unsigned int totalsize = xdp->data_end - xdp->data_meta; + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + net_prefetch(xdp->data_meta); + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); + if (unlikely(!skb)) + return NULL; + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + memcpy(__skb_put(skb, totalsize), xdp->data_meta, + ALIGN(totalsize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + + return skb; +} + +static int igb_run_xdp_zc(struct igb_adapter *adapter, struct igb_ring *rx_ring, + struct xdp_buff *xdp, struct xsk_buff_pool *xsk_pool, + struct bpf_prog *xdp_prog) +{ + int err, result = IGB_XDP_PASS; + u32 act; + + prefetchw(xdp->data_hard_start); /* xdp_frame write */ + + act = bpf_prog_run_xdp(xdp_prog, xdp); + + if (likely(act == XDP_REDIRECT)) { + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); + if (!err) + return IGB_XDP_REDIR; + + if (xsk_uses_need_wakeup(xsk_pool) && + err == -ENOBUFS) + result = IGB_XDP_EXIT; + else + result = IGB_XDP_CONSUMED; + goto out_failure; + } + + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + result = igb_xdp_xmit_back(adapter, xdp); + if (result == IGB_XDP_CONSUMED) + goto out_failure; + break; + default: + bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: + result = IGB_XDP_CONSUMED; + break; + } + + return result; +} + +int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector, + struct xsk_buff_pool *xsk_pool, const int budget) +{ + struct igb_adapter *adapter = q_vector->adapter; + unsigned int total_bytes = 0, total_packets = 0; + struct igb_ring *rx_ring = q_vector->rx.ring; + u32 ntc = rx_ring->next_to_clean; + struct bpf_prog *xdp_prog; + unsigned int xdp_xmit = 0; + bool failure = false; + u16 entries_to_alloc; + struct sk_buff *skb; + + /* xdp_prog cannot be NULL in the ZC path */ + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + + while (likely(total_packets < budget)) { + union e1000_adv_rx_desc *rx_desc; + ktime_t timestamp = 0; + struct xdp_buff *xdp; + unsigned int size; + int xdp_res = 0; + + rx_desc = IGB_RX_DESC(rx_ring, ntc); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + xdp = rx_ring->rx_buffer_info_zc[ntc]; + xsk_buff_set_size(xdp, size); + xsk_buff_dma_sync_for_cpu(xdp); + + /* pull rx packet timestamp if available and valid */ + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { + int ts_hdr_len; + + ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector, + xdp->data, + ×tamp); + + xdp->data += ts_hdr_len; + xdp->data_meta += ts_hdr_len; + size -= ts_hdr_len; + } + + xdp_res = igb_run_xdp_zc(adapter, rx_ring, xdp, xsk_pool, + xdp_prog); + + if (xdp_res) { + if (likely(xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR))) { + xdp_xmit |= xdp_res; + } else if (xdp_res == IGB_XDP_EXIT) { + failure = true; + break; + } else if (xdp_res == IGB_XDP_CONSUMED) { + xsk_buff_free(xdp); + } + + total_packets++; + total_bytes += size; + ntc++; + if (ntc == rx_ring->count) + ntc = 0; + continue; + } + + skb = igb_construct_skb_zc(rx_ring, xdp, timestamp); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + break; + } + + xsk_buff_free(xdp); + ntc++; + if (ntc == rx_ring->count) + ntc = 0; + + if (eth_skb_pad(skb)) + continue; + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + igb_process_skb_fields(rx_ring, rx_desc, skb); + + napi_gro_receive(&q_vector->napi, skb); + + /* update budget accounting */ + total_packets++; + } + + rx_ring->next_to_clean = ntc; + + if (xdp_xmit) + igb_finalize_xdp(adapter, xdp_xmit); + + igb_update_rx_stats(q_vector, total_packets, total_bytes); + + entries_to_alloc = igb_desc_unused(rx_ring); + if (entries_to_alloc >= IGB_RX_BUFFER_WRITE) + failure |= !igb_alloc_rx_buffers_zc(rx_ring, xsk_pool, + entries_to_alloc); + + if (xsk_uses_need_wakeup(xsk_pool)) { + if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) + xsk_set_rx_need_wakeup(xsk_pool); + else + xsk_clear_rx_need_wakeup(xsk_pool); + + return (int)total_packets; + } + return failure ? budget : (int)total_packets; +} + +bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool) +{ + unsigned int budget = igb_desc_unused(tx_ring); + u32 cmd_type, olinfo_status, nb_pkts, i = 0; + struct xdp_desc *descs = xsk_pool->tx_descs; + union e1000_adv_tx_desc *tx_desc = NULL; + struct igb_tx_buffer *tx_buffer_info; + unsigned int total_bytes = 0; + dma_addr_t dma; + + if (!netif_carrier_ok(tx_ring->netdev)) + return true; + + if (test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)) + return true; + + nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget); + if (!nb_pkts) + return true; + + while (nb_pkts-- > 0) { + dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr); + xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len); + + tx_buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + tx_buffer_info->bytecount = descs[i].len; + tx_buffer_info->type = IGB_TYPE_XSK; + tx_buffer_info->xdpf = NULL; + tx_buffer_info->gso_segs = 1; + tx_buffer_info->time_stamp = jiffies; + + tx_desc = IGB_TX_DESC(tx_ring, tx_ring->next_to_use); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + /* put descriptor type bits */ + cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT | + E1000_ADVTXD_DCMD_IFCS; + olinfo_status = descs[i].len << E1000_ADVTXD_PAYLEN_SHIFT; + + /* FIXME: This sets the Report Status (RS) bit for every + * descriptor. One nice to have optimization would be to set it + * only for the last descriptor in the whole batch. See Intel + * ice driver for an example on how to do it. + */ + cmd_type |= descs[i].len | IGB_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + + total_bytes += descs[i].len; + + i++; + tx_ring->next_to_use++; + tx_buffer_info->next_to_watch = tx_desc; + if (tx_ring->next_to_use == tx_ring->count) + tx_ring->next_to_use = 0; + } + + netdev_tx_sent_queue(txring_txq(tx_ring), total_bytes); + igb_xdp_ring_update_tail(tx_ring); + + return nb_pkts < budget; +} + +int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) +{ + struct igb_adapter *adapter = netdev_priv(dev); + struct e1000_hw *hw = &adapter->hw; + struct igb_ring *ring; + u32 eics = 0; + + if (test_bit(__IGB_DOWN, &adapter->state)) + return -ENETDOWN; + + if (!igb_xdp_is_enabled(adapter)) + return -EINVAL; + + if (qid >= adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[qid]; + + if (test_bit(IGB_RING_FLAG_TX_DISABLED, &ring->flags)) + return -ENETDOWN; + + if (!READ_ONCE(ring->xsk_pool)) + return -EINVAL; + + if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { + /* Cause software interrupt */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + eics |= ring->q_vector->eims_value; + wr32(E1000_EICS, eics); + } else { + wr32(E1000_ICS, E1000_ICS_RXDMT0); + } + } + + return 0; +} diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index eac0f966e0e4..b8111ad9a9a8 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -337,6 +337,8 @@ struct igc_adapter { struct igc_led_classdev *leds; }; +void igc_set_queue_napi(struct igc_adapter *adapter, int q_idx, + struct napi_struct *napi); void igc_up(struct igc_adapter *adapter); void igc_down(struct igc_adapter *adapter); int igc_open(struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index d9d1a1a11daf..be8a49a86d09 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -279,9 +279,4 @@ struct net_device *igc_get_hw_dev(struct igc_hw *hw); #define hw_dbg(format, arg...) \ netdev_dbg(igc_get_hw_dev(hw), format, ##arg) -s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); -s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); -void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); -void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); - #endif /* _IGC_HW_H_ */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 27872bdea9bd..733820a0c350 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1096,6 +1096,7 @@ static int igc_init_empty_frame(struct igc_ring *ring, return -ENOMEM; } + buffer->type = IGC_TX_BUFFER_TYPE_SKB; buffer->skb = skb; buffer->protocol = 0; buffer->bytecount = skb->len; @@ -2123,10 +2124,6 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring, union igc_adv_rx_desc *rx_desc, struct sk_buff *skb) { - /* XDP packets use error pointer so abort at this point */ - if (IS_ERR(skb)) - return true; - if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { struct net_device *netdev = rx_ring->netdev; @@ -2515,8 +2512,7 @@ out_failure: } } -static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, - struct xdp_buff *xdp) +static int igc_xdp_run_prog(struct igc_adapter *adapter, struct xdp_buff *xdp) { struct bpf_prog *prog; int res; @@ -2530,7 +2526,7 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, res = __igc_xdp_run_prog(adapter, prog, xdp); out: - return ERR_PTR(-res); + return res; } /* This function assumes __netif_tx_lock is held by the caller. */ @@ -2585,6 +2581,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = igc_desc_unused(rx_ring); int xdp_status = 0, rx_buffer_pgcnt; + int xdp_res = 0; while (likely(total_packets < budget)) { struct igc_xdp_buff ctx = { .rx_ts = NULL }; @@ -2630,12 +2627,10 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) xdp_buff_clear_frags_flag(&ctx.xdp); ctx.rx_desc = rx_desc; - skb = igc_xdp_run_prog(adapter, &ctx.xdp); + xdp_res = igc_xdp_run_prog(adapter, &ctx.xdp); } - if (IS_ERR(skb)) { - unsigned int xdp_res = -PTR_ERR(skb); - + if (xdp_res) { switch (xdp_res) { case IGC_XDP_CONSUMED: rx_buffer->pagecnt_bias++; @@ -2657,7 +2652,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) skb = igc_construct_skb(rx_ring, rx_buffer, &ctx); /* exit if we failed to retrieve a buffer */ - if (!skb) { + if (!xdp_res && !skb) { rx_ring->rx_stats.alloc_failed++; rx_buffer->pagecnt_bias++; set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); @@ -2672,7 +2667,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) continue; /* verify the packet layout is correct */ - if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { + if (xdp_res || igc_cleanup_headers(rx_ring, rx_desc, skb)) { skb = NULL; continue; } @@ -2707,8 +2702,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) } static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, - struct xdp_buff *xdp) + struct igc_xdp_buff *ctx) { + struct xdp_buff *xdp = &ctx->xdp; unsigned int totalsize = xdp->data_end - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta; struct sk_buff *skb; @@ -2727,27 +2723,28 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, __skb_pull(skb, metasize); } + if (ctx->rx_ts) { + skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; + skb_hwtstamps(skb)->netdev_data = ctx->rx_ts; + } + return skb; } static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, union igc_adv_rx_desc *desc, - struct xdp_buff *xdp, - ktime_t timestamp) + struct igc_xdp_buff *ctx) { struct igc_ring *ring = q_vector->rx.ring; struct sk_buff *skb; - skb = igc_construct_skb_zc(ring, xdp); + skb = igc_construct_skb_zc(ring, ctx); if (!skb) { ring->rx_stats.alloc_failed++; set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags); return; } - if (timestamp) - skb_hwtstamps(skb)->hwtstamp = timestamp; - if (igc_cleanup_headers(ring, desc, skb)) return; @@ -2783,7 +2780,6 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) union igc_adv_rx_desc *desc; struct igc_rx_buffer *bi; struct igc_xdp_buff *ctx; - ktime_t timestamp = 0; unsigned int size; int res; @@ -2813,6 +2809,8 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) */ bi->xdp->data_meta += IGC_TS_HDR_LEN; size -= IGC_TS_HDR_LEN; + } else { + ctx->rx_ts = NULL; } bi->xdp->data_end = bi->xdp->data + size; @@ -2821,7 +2819,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) res = __igc_xdp_run_prog(adapter, prog, bi->xdp); switch (res) { case IGC_XDP_PASS: - igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); + igc_dispatch_skb_zc(q_vector, desc, ctx); fallthrough; case IGC_XDP_CONSUMED: xsk_buff_free(bi->xdp); @@ -4948,6 +4946,22 @@ static int igc_sw_init(struct igc_adapter *adapter) return 0; } +void igc_set_queue_napi(struct igc_adapter *adapter, int vector, + struct napi_struct *napi) +{ + struct igc_q_vector *q_vector = adapter->q_vector[vector]; + + if (q_vector->rx.ring) + netif_queue_set_napi(adapter->netdev, + q_vector->rx.ring->queue_index, + NETDEV_QUEUE_TYPE_RX, napi); + + if (q_vector->tx.ring) + netif_queue_set_napi(adapter->netdev, + q_vector->tx.ring->queue_index, + NETDEV_QUEUE_TYPE_TX, napi); +} + /** * igc_up - Open the interface and prepare it to handle traffic * @adapter: board private structure @@ -4955,6 +4969,7 @@ static int igc_sw_init(struct igc_adapter *adapter) void igc_up(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; + struct napi_struct *napi; int i = 0; /* hardware has been reset, we need to reload some things */ @@ -4962,8 +4977,11 @@ void igc_up(struct igc_adapter *adapter) clear_bit(__IGC_DOWN, &adapter->state); - for (i = 0; i < adapter->num_q_vectors; i++) - napi_enable(&adapter->q_vector[i]->napi); + for (i = 0; i < adapter->num_q_vectors; i++) { + napi = &adapter->q_vector[i]->napi; + napi_enable(napi); + igc_set_queue_napi(adapter, i, napi); + } if (adapter->msix_entries) igc_configure_msix(adapter); @@ -5192,6 +5210,7 @@ void igc_down(struct igc_adapter *adapter) for (i = 0; i < adapter->num_q_vectors; i++) { if (adapter->q_vector[i]) { napi_synchronize(&adapter->q_vector[i]->napi); + igc_set_queue_napi(adapter, i, NULL); napi_disable(&adapter->q_vector[i]->napi); } } @@ -5576,6 +5595,9 @@ static int igc_request_msix(struct igc_adapter *adapter) q_vector); if (err) goto err_free; + + netif_napi_set_irq(&q_vector->napi, + adapter->msix_entries[vector].vector); } igc_configure_msix(adapter); @@ -6018,6 +6040,7 @@ static int __igc_open(struct net_device *netdev, bool resuming) struct igc_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = adapter->pdev; struct igc_hw *hw = &adapter->hw; + struct napi_struct *napi; int err = 0; int i = 0; @@ -6053,8 +6076,11 @@ static int __igc_open(struct net_device *netdev, bool resuming) clear_bit(__IGC_DOWN, &adapter->state); - for (i = 0; i < adapter->num_q_vectors; i++) - napi_enable(&adapter->q_vector[i]->napi); + for (i = 0; i < adapter->num_q_vectors; i++) { + napi = &adapter->q_vector[i]->napi; + napi_enable(napi); + igc_set_queue_napi(adapter, i, napi); + } /* Clear any pending interrupts. */ rd32(IGC_ICR); @@ -6779,45 +6805,6 @@ static const struct net_device_ops igc_netdev_ops = { .ndo_get_tstamp = igc_get_tstamp, }; -/* PCIe configuration access */ -void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) -{ - struct igc_adapter *adapter = hw->back; - - pci_read_config_word(adapter->pdev, reg, value); -} - -void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) -{ - struct igc_adapter *adapter = hw->back; - - pci_write_config_word(adapter->pdev, reg, *value); -} - -s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) -{ - struct igc_adapter *adapter = hw->back; - - if (!pci_is_pcie(adapter->pdev)) - return -IGC_ERR_CONFIG; - - pcie_capability_read_word(adapter->pdev, reg, value); - - return IGC_SUCCESS; -} - -s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) -{ - struct igc_adapter *adapter = hw->back; - - if (!pci_is_pcie(adapter->pdev)) - return -IGC_ERR_CONFIG; - - pcie_capability_write_word(adapter->pdev, reg, *value); - - return IGC_SUCCESS; -} - u32 igc_rd32(struct igc_hw *hw, u32 reg) { struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); @@ -7103,8 +7090,8 @@ static int igc_probe(struct pci_dev *pdev, INIT_WORK(&adapter->reset_task, igc_reset_task); INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); - hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - adapter->hrtimer.function = &igc_qbv_scheduling_timer; + hrtimer_setup(&adapter->hrtimer, &igc_qbv_scheduling_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); /* Initialize link properties that are user-changeable */ adapter->fc_autoneg = true; @@ -7338,7 +7325,7 @@ static void igc_deliver_wake_packet(struct net_device *netdev) netif_rx(skb); } -static int igc_resume(struct device *dev) +static int __igc_resume(struct device *dev, bool rpm) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); @@ -7381,7 +7368,11 @@ static int igc_resume(struct device *dev) wr32(IGC_WUS, ~0); if (netif_running(netdev)) { + if (!rpm) + rtnl_lock(); err = __igc_open(netdev, true); + if (!rpm) + rtnl_unlock(); if (!err) netif_device_attach(netdev); } @@ -7389,9 +7380,14 @@ static int igc_resume(struct device *dev) return err; } +static int igc_resume(struct device *dev) +{ + return __igc_resume(dev, false); +} + static int igc_runtime_resume(struct device *dev) { - return igc_resume(dev); + return __igc_resume(dev, true); } static int igc_suspend(struct device *dev) @@ -7436,14 +7432,18 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, struct net_device *netdev = pci_get_drvdata(pdev); struct igc_adapter *adapter = netdev_priv(netdev); + rtnl_lock(); netif_device_detach(netdev); - if (state == pci_channel_io_perm_failure) + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); return PCI_ERS_RESULT_DISCONNECT; + } if (netif_running(netdev)) igc_down(adapter); pci_disable_device(pdev); + rtnl_unlock(); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; @@ -7454,7 +7454,7 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation - * resembles the first-half of the igc_resume routine. + * resembles the first-half of the __igc_resume routine. **/ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) { @@ -7493,7 +7493,7 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the - * second-half of the igc_resume routine. + * second-half of the __igc_resume routine. */ static void igc_io_resume(struct pci_dev *pdev) { diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.c b/drivers/net/ethernet/intel/igc/igc_nvm.c index 58f81aba0144..efd121c03967 100644 --- a/drivers/net/ethernet/intel/igc/igc_nvm.c +++ b/drivers/net/ethernet/intel/igc/igc_nvm.c @@ -36,56 +36,6 @@ static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg) } /** - * igc_acquire_nvm - Generic request for access to EEPROM - * @hw: pointer to the HW structure - * - * Set the EEPROM access request bit and wait for EEPROM access grant bit. - * Return successful if access grant bit set, else clear the request for - * EEPROM access and return -IGC_ERR_NVM (-1). - */ -s32 igc_acquire_nvm(struct igc_hw *hw) -{ - s32 timeout = IGC_NVM_GRANT_ATTEMPTS; - u32 eecd = rd32(IGC_EECD); - s32 ret_val = 0; - - wr32(IGC_EECD, eecd | IGC_EECD_REQ); - eecd = rd32(IGC_EECD); - - while (timeout) { - if (eecd & IGC_EECD_GNT) - break; - udelay(5); - eecd = rd32(IGC_EECD); - timeout--; - } - - if (!timeout) { - eecd &= ~IGC_EECD_REQ; - wr32(IGC_EECD, eecd); - hw_dbg("Could not acquire NVM grant\n"); - ret_val = -IGC_ERR_NVM; - } - - return ret_val; -} - -/** - * igc_release_nvm - Release exclusive access to EEPROM - * @hw: pointer to the HW structure - * - * Stop any current commands to the EEPROM and clear the EEPROM request bit. - */ -void igc_release_nvm(struct igc_hw *hw) -{ - u32 eecd; - - eecd = rd32(IGC_EECD); - eecd &= ~IGC_EECD_REQ; - wr32(IGC_EECD, eecd); -} - -/** * igc_read_nvm_eerd - Reads EEPROM using EERD register * @hw: pointer to the HW structure * @offset: offset of word in the EEPROM to read diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.h b/drivers/net/ethernet/intel/igc/igc_nvm.h index f9fc2e9cfb03..ab78d0c64547 100644 --- a/drivers/net/ethernet/intel/igc/igc_nvm.h +++ b/drivers/net/ethernet/intel/igc/igc_nvm.h @@ -4,8 +4,6 @@ #ifndef _IGC_NVM_H_ #define _IGC_NVM_H_ -s32 igc_acquire_nvm(struct igc_hw *hw); -void igc_release_nvm(struct igc_hw *hw); s32 igc_read_mac_addr(struct igc_hw *hw); s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data); s32 igc_validate_nvm_checksum(struct igc_hw *hw); diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c index e27af72aada8..13bbd3346e01 100644 --- a/drivers/net/ethernet/intel/igc/igc_xdp.c +++ b/drivers/net/ethernet/intel/igc/igc_xdp.c @@ -13,6 +13,7 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, struct net_device *dev = adapter->netdev; bool if_running = netif_running(dev); struct bpf_prog *old_prog; + bool need_update; if (dev->mtu > ETH_DATA_LEN) { /* For now, the driver doesn't support XDP functionality with @@ -22,7 +23,8 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, return -EOPNOTSUPP; } - if (if_running) + need_update = !!adapter->xdp_prog != !!prog; + if (if_running && need_update) igc_close(dev); old_prog = xchg(&adapter->xdp_prog, prog); @@ -34,7 +36,7 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, else xdp_features_clear_redirect_target(dev); - if (if_running) + if (if_running && need_update) igc_open(dev); return 0; @@ -84,6 +86,7 @@ static int igc_xdp_enable_pool(struct igc_adapter *adapter, napi_disable(napi); } + igc_set_queue_napi(adapter, queue_id, NULL); set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); @@ -133,6 +136,7 @@ static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id) xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + igc_set_queue_napi(adapter, queue_id, napi); if (needs_reset) { napi_enable(napi); diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 965e5ce1b326..b456d102655a 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -# Copyright(c) 1999 - 2018 Intel Corporation. +# Copyright(c) 1999 - 2024 Intel Corporation. # # Makefile for the Intel(R) 10GbE PCI Express ethernet driver # @@ -9,7 +9,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o ixgbe-y := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \ - ixgbe_xsk.o + ixgbe_xsk.o ixgbe_e610.o ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 559b443c409f..e6a380d4929b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #ifndef _IXGBE_H_ #define _IXGBE_H_ @@ -20,6 +20,7 @@ #include "ixgbe_type.h" #include "ixgbe_common.h" #include "ixgbe_dcb.h" +#include "ixgbe_e610.h" #if IS_ENABLED(CONFIG_FCOE) #define IXGBE_FCOE #include "ixgbe_fcoe.h" @@ -173,6 +174,7 @@ enum ixgbe_tx_flags { #define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) #define IXGBE_82599_VF_DEVICE_ID 0x10ED #define IXGBE_X540_VF_DEVICE_ID 0x1515 +#define IXGBE_E610_VF_DEVICE_ID 0x57AD #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ { \ @@ -654,6 +656,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9) #define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10) #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) +#define IXGBE_FLAG2_FW_ASYNC_EVENT BIT(12) #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) #define IXGBE_FLAG2_EEE_CAPABLE BIT(14) #define IXGBE_FLAG2_EEE_ENABLED BIT(15) @@ -661,6 +664,9 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_IPSEC_ENABLED BIT(17) #define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18) #define IXGBE_FLAG2_AUTO_DISABLE_VF BIT(19) +#define IXGBE_FLAG2_PHY_FW_LOAD_FAILED BIT(20) +#define IXGBE_FLAG2_NO_MEDIA BIT(21) +#define IXGBE_FLAG2_MOD_POWER_UNSUPPORTED BIT(22) /* Tx fast path data */ int num_tx_queues; @@ -793,6 +799,7 @@ struct ixgbe_adapter { u32 vferr_refcount; struct ixgbe_mac_addr *mac_table; struct kobject *info_kobj; + u16 lse_mask; #ifdef CONFIG_IXGBE_HWMON struct hwmon_buff *ixgbe_hwmon_buff; #endif /* CONFIG_IXGBE_HWMON */ @@ -849,6 +856,7 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: return IXGBE_MAX_RSS_INDICES_X550; default: return 0; @@ -874,6 +882,7 @@ enum ixgbe_state_t { __IXGBE_PTP_RUNNING, __IXGBE_PTP_TX_IN_PROGRESS, __IXGBE_RESET_REQUESTED, + __IXGBE_PHY_INIT_COMPLETE, }; struct ixgbe_cb { @@ -896,6 +905,7 @@ enum ixgbe_boards { board_x550em_x_fw, board_x550em_a, board_x550em_a_fw, + board_e610, }; extern const struct ixgbe_info ixgbe_82598_info; @@ -906,6 +916,7 @@ extern const struct ixgbe_info ixgbe_X550EM_x_info; extern const struct ixgbe_info ixgbe_x550em_x_fw_info; extern const struct ixgbe_info ixgbe_x550em_a_info; extern const struct ixgbe_info ixgbe_x550em_a_fw_info; +extern const struct ixgbe_info ixgbe_e610_info; #ifdef CONFIG_IXGBE_DCB extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops; #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index cdaf087b4e85..964988b4d58b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> @@ -1615,6 +1615,7 @@ int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 3be1bfb16498..7beaf6ea57f9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> @@ -58,6 +58,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_SFP: case IXGBE_DEV_ID_X550EM_A_SFP_N: + case IXGBE_DEV_ID_E610_SFP: supported = false; break; default: @@ -88,6 +89,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550EM_A_10G_T: case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: + case IXGBE_DEV_ID_E610_10G_T: + case IXGBE_DEV_ID_E610_2_5G_T: supported = true; break; default: @@ -469,9 +472,14 @@ int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) } } - if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_e610) { if (hw->phy.id == 0) hw->phy.ops.identify(hw); + } + + if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i); hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i); @@ -660,7 +668,11 @@ int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) hw->bus.type = ixgbe_bus_type_pci_express; /* Get the negotiated link width and speed from PCI config space */ - link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS); + if (hw->mac.type == ixgbe_mac_e610) + link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS_E610); + else + link_status = ixgbe_read_pci_cfg_word(hw, + IXGBE_PCI_LINK_STATUS); hw->bus.width = ixgbe_convert_bus_width(link_status); hw->bus.speed = ixgbe_convert_bus_speed(link_status); @@ -2918,6 +2930,10 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; break; + case ixgbe_mac_e610: + pcie_offset = IXGBE_PCIE_MSIX_E610_CAPS; + max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; + break; default: return 1; } @@ -3366,7 +3382,8 @@ int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, *speed = IXGBE_LINK_SPEED_1GB_FULL; break; case IXGBE_LINKS_SPEED_100_82599: - if ((hw->mac.type >= ixgbe_mac_X550) && + if ((hw->mac.type >= ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_e610) && (links_reg & IXGBE_LINKS_SPEED_NON_STD)) *speed = IXGBE_LINK_SPEED_5GB_FULL; else diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index f2709b10c2e5..19d6b6fa8fb3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include "ixgbe.h" #include <linux/dcbnl.h> @@ -154,6 +154,7 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: + case ixgbe_mac_e610: for (j = 0; j < netdev->addr_len; j++, i++) perm_addr[i] = adapter->hw.mac.san_addr[j]; break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c new file mode 100644 index 000000000000..cb07ecd8937d --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c @@ -0,0 +1,2658 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2024 Intel Corporation. */ + +#include "ixgbe_common.h" +#include "ixgbe_e610.h" +#include "ixgbe_x550.h" +#include "ixgbe_type.h" +#include "ixgbe_x540.h" +#include "ixgbe_mbx.h" +#include "ixgbe_phy.h" + +/** + * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should + * be resent + * @opcode: ACI opcode + * + * Check if ACI command should be sent again depending on the provided opcode. + * It may happen when CSR is busy during link state changes. + * + * Return: true if the sending command routine should be repeated, + * otherwise false. + */ +static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode) +{ + switch (opcode) { + case ixgbe_aci_opc_disable_rxen: + case ixgbe_aci_opc_get_phy_caps: + case ixgbe_aci_opc_get_link_status: + case ixgbe_aci_opc_get_link_topo: + return true; + } + + return false; +} + +/** + * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin + * Command Interface + * @hw: pointer to the HW struct + * @desc: descriptor describing the command + * @buf: buffer to use for indirect commands (NULL for direct commands) + * @buf_size: size of buffer for indirect commands (0 for direct commands) + * + * Admin Command is sent using CSR by setting descriptor and buffer in specific + * registers. + * + * Return: the exit code of the operation. + * * - 0 - success. + * * - -EIO - CSR mechanism is not enabled. + * * - -EBUSY - CSR mechanism is busy. + * * - -EINVAL - buf_size is too big or + * invalid argument buf or buf_size. + * * - -ETIME - Admin Command X command timeout. + * * - -EIO - Admin Command X invalid state of HICR register or + * Admin Command failed because of bad opcode was returned or + * Admin Command failed with error Y. + */ +static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw, + struct ixgbe_aci_desc *desc, + void *buf, u16 buf_size) +{ + u16 opcode, buf_tail_size = buf_size % 4; + u32 *raw_desc = (u32 *)desc; + u32 hicr, i, buf_tail = 0; + bool valid_buf = false; + + hw->aci.last_status = IXGBE_ACI_RC_OK; + + /* It's necessary to check if mechanism is enabled */ + hicr = IXGBE_READ_REG(hw, IXGBE_PF_HICR); + + if (!(hicr & IXGBE_PF_HICR_EN)) + return -EIO; + + if (hicr & IXGBE_PF_HICR_C) { + hw->aci.last_status = IXGBE_ACI_RC_EBUSY; + return -EBUSY; + } + + opcode = le16_to_cpu(desc->opcode); + + if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE) + return -EINVAL; + + if (buf) + desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_BUF); + + if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_BUF)) { + if ((buf && !buf_size) || + (!buf && buf_size)) + return -EINVAL; + if (buf && buf_size) + valid_buf = true; + } + + if (valid_buf) { + if (buf_tail_size) + memcpy(&buf_tail, buf + buf_size - buf_tail_size, + buf_tail_size); + + if (((buf_size + 3) & ~0x3) > IXGBE_ACI_LG_BUF) + desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_LB); + + desc->datalen = cpu_to_le16(buf_size); + + if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_RD)) { + for (i = 0; i < buf_size / 4; i++) + IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), ((u32 *)buf)[i]); + if (buf_tail_size) + IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), buf_tail); + } + } + + /* Descriptor is written to specific registers */ + for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) + IXGBE_WRITE_REG(hw, IXGBE_PF_HIDA(i), raw_desc[i]); + + /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and + * PF_HICR_EV + */ + hicr = (IXGBE_READ_REG(hw, IXGBE_PF_HICR) | IXGBE_PF_HICR_C) & + ~(IXGBE_PF_HICR_SV | IXGBE_PF_HICR_EV); + IXGBE_WRITE_REG(hw, IXGBE_PF_HICR, hicr); + +#define MAX_SLEEP_RESP_US 1000 +#define MAX_TMOUT_RESP_SYNC_US 100000000 + + /* Wait for sync Admin Command response */ + read_poll_timeout(IXGBE_READ_REG, hicr, + (hicr & IXGBE_PF_HICR_SV) || + !(hicr & IXGBE_PF_HICR_C), + MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_SYNC_US, true, hw, + IXGBE_PF_HICR); + +#define MAX_TMOUT_RESP_ASYNC_US 150000000 + + /* Wait for async Admin Command response */ + read_poll_timeout(IXGBE_READ_REG, hicr, + (hicr & IXGBE_PF_HICR_EV) || + !(hicr & IXGBE_PF_HICR_C), + MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_ASYNC_US, true, hw, + IXGBE_PF_HICR); + + /* Read sync Admin Command response */ + if ((hicr & IXGBE_PF_HICR_SV)) { + for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) { + raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA(i)); + raw_desc[i] = raw_desc[i]; + } + } + + /* Read async Admin Command response */ + if ((hicr & IXGBE_PF_HICR_EV) && !(hicr & IXGBE_PF_HICR_C)) { + for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) { + raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA_2(i)); + raw_desc[i] = raw_desc[i]; + } + } + + /* Handle timeout and invalid state of HICR register */ + if (hicr & IXGBE_PF_HICR_C) + return -ETIME; + + if (!(hicr & IXGBE_PF_HICR_SV) && !(hicr & IXGBE_PF_HICR_EV)) + return -EIO; + + /* For every command other than 0x0014 treat opcode mismatch + * as an error. Response to 0x0014 command read from HIDA_2 + * is a descriptor of an event which is expected to contain + * different opcode than the command. + */ + if (desc->opcode != cpu_to_le16(opcode) && + opcode != ixgbe_aci_opc_get_fw_event) + return -EIO; + + if (desc->retval) { + hw->aci.last_status = (enum ixgbe_aci_err) + le16_to_cpu(desc->retval); + return -EIO; + } + + /* Write a response values to a buf */ + if (valid_buf) { + for (i = 0; i < buf_size / 4; i++) + ((u32 *)buf)[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i)); + if (buf_tail_size) { + buf_tail = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i)); + memcpy(buf + buf_size - buf_tail_size, &buf_tail, + buf_tail_size); + } + } + + return 0; +} + +/** + * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface + * @hw: pointer to the HW struct + * @desc: descriptor describing the command + * @buf: buffer to use for indirect commands (NULL for direct commands) + * @buf_size: size of buffer for indirect commands (0 for direct commands) + * + * Helper function to send FW Admin Commands to the FW Admin Command Interface. + * + * Retry sending the FW Admin Command multiple times to the FW ACI + * if the EBUSY Admin Command error is returned. + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc, + void *buf, u16 buf_size) +{ + u16 opcode = le16_to_cpu(desc->opcode); + struct ixgbe_aci_desc desc_cpy; + enum ixgbe_aci_err last_status; + u8 idx = 0, *buf_cpy = NULL; + bool is_cmd_for_retry; + unsigned long timeout; + int err; + + is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode); + if (is_cmd_for_retry) { + if (buf) { + buf_cpy = kmalloc(buf_size, GFP_KERNEL); + if (!buf_cpy) + return -ENOMEM; + *buf_cpy = *(u8 *)buf; + } + desc_cpy = *desc; + } + + timeout = jiffies + msecs_to_jiffies(IXGBE_ACI_SEND_TIMEOUT_MS); + do { + mutex_lock(&hw->aci.lock); + err = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size); + last_status = hw->aci.last_status; + mutex_unlock(&hw->aci.lock); + + if (!is_cmd_for_retry || !err || + last_status != IXGBE_ACI_RC_EBUSY) + break; + + if (buf) + memcpy(buf, buf_cpy, buf_size); + *desc = desc_cpy; + + msleep(IXGBE_ACI_SEND_DELAY_TIME_MS); + } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE && + time_before(jiffies, timeout)); + + kfree(buf_cpy); + + return err; +} + +/** + * ixgbe_aci_check_event_pending - check if there are any pending events + * @hw: pointer to the HW struct + * + * Determine if there are any pending events. + * + * Return: true if there are any currently pending events + * otherwise false. + */ +bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw) +{ + u32 ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0; + u32 fwsts = IXGBE_READ_REG(hw, GL_FWSTS); + + return (fwsts & ep_bit_mask) ? true : false; +} + +/** + * ixgbe_aci_get_event - get an event from ACI + * @hw: pointer to the HW struct + * @e: event information structure + * @pending: optional flag signaling that there are more pending events + * + * Obtain an event from ACI and return its content + * through 'e' using ACI command (0x0014). + * Provide information if there are more events + * to retrieve through 'pending'. + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e, + bool *pending) +{ + struct ixgbe_aci_desc desc; + int err; + + if (!e || (!e->msg_buf && e->buf_len)) + return -EINVAL; + + mutex_lock(&hw->aci.lock); + + /* Check if there are any events pending */ + if (!ixgbe_aci_check_event_pending(hw)) { + err = -ENOENT; + goto aci_get_event_exit; + } + + /* Obtain pending event */ + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event); + err = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len); + if (err) + goto aci_get_event_exit; + + /* Returned 0x0014 opcode indicates that no event was obtained */ + if (desc.opcode == cpu_to_le16(ixgbe_aci_opc_get_fw_event)) { + err = -ENOENT; + goto aci_get_event_exit; + } + + /* Determine size of event data */ + e->msg_len = min_t(u16, le16_to_cpu(desc.datalen), e->buf_len); + /* Write event descriptor to event info structure */ + memcpy(&e->desc, &desc, sizeof(e->desc)); + + /* Check if there are any further events pending */ + if (pending) + *pending = ixgbe_aci_check_event_pending(hw); + +aci_get_event_exit: + mutex_unlock(&hw->aci.lock); + + return err; +} + +/** + * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values. + * @desc: pointer to the temp descriptor (non DMA mem) + * @opcode: the opcode can be used to decide which flags to turn off or on + * + * Helper function to fill the descriptor desc with default values + * and the provided opcode. + */ +void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode) +{ + /* Zero out the desc. */ + memset(desc, 0, sizeof(*desc)); + desc->opcode = cpu_to_le16(opcode); + desc->flags = cpu_to_le16(IXGBE_ACI_FLAG_SI); +} + +/** + * ixgbe_aci_req_res - request a common resource + * @hw: pointer to the HW struct + * @res: resource ID + * @access: access type + * @sdp_number: resource number + * @timeout: the maximum time in ms that the driver may hold the resource + * + * Requests a common resource using the ACI command (0x0008). + * Specifies the maximum time the driver may hold the resource. + * If the requested resource is currently occupied by some other driver, + * a busy return value is returned and the timeout field value indicates the + * maximum time the current owner has to free it. + * + * Return: the exit code of the operation. + */ +static int ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res, + enum ixgbe_aci_res_access_type access, + u8 sdp_number, u32 *timeout) +{ + struct ixgbe_aci_cmd_req_res *cmd_resp; + struct ixgbe_aci_desc desc; + int err; + + cmd_resp = &desc.params.res_owner; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res); + + cmd_resp->res_id = cpu_to_le16(res); + cmd_resp->access_type = cpu_to_le16(access); + cmd_resp->res_number = cpu_to_le32(sdp_number); + cmd_resp->timeout = cpu_to_le32(*timeout); + *timeout = 0; + + err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0); + + /* If the resource is held by some other driver, the command completes + * with a busy return value and the timeout field indicates the maximum + * time the current owner of the resource has to free it. + */ + if (!err || hw->aci.last_status == IXGBE_ACI_RC_EBUSY) + *timeout = le32_to_cpu(cmd_resp->timeout); + + return err; +} + +/** + * ixgbe_aci_release_res - release a common resource using ACI + * @hw: pointer to the HW struct + * @res: resource ID + * @sdp_number: resource number + * + * Release a common resource using ACI command (0x0009). + * + * Return: the exit code of the operation. + */ +static int ixgbe_aci_release_res(struct ixgbe_hw *hw, + enum ixgbe_aci_res_ids res, u8 sdp_number) +{ + struct ixgbe_aci_cmd_req_res *cmd; + struct ixgbe_aci_desc desc; + + cmd = &desc.params.res_owner; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res); + + cmd->res_id = cpu_to_le16(res); + cmd->res_number = cpu_to_le32(sdp_number); + + return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); +} + +/** + * ixgbe_acquire_res - acquire the ownership of a resource + * @hw: pointer to the HW structure + * @res: resource ID + * @access: access type (read or write) + * @timeout: timeout in milliseconds + * + * Make an attempt to acquire the ownership of a resource using + * the ixgbe_aci_req_res to utilize ACI. + * In case if some other driver has previously acquired the resource and + * performed any necessary updates, the -EALREADY is returned, + * and the caller does not obtain the resource and has no further work to do. + * If needed, the function will poll until the current lock owner timeouts. + * + * Return: the exit code of the operation. + */ +int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res, + enum ixgbe_aci_res_access_type access, u32 timeout) +{ +#define IXGBE_RES_POLLING_DELAY_MS 10 + u32 delay = IXGBE_RES_POLLING_DELAY_MS; + u32 res_timeout = timeout; + u32 retry_timeout; + int err; + + err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout); + + /* A return code of -EALREADY means that another driver has + * previously acquired the resource and performed any necessary updates; + * in this case the caller does not obtain the resource and has no + * further work to do. + */ + if (err == -EALREADY) + return err; + + /* If necessary, poll until the current lock owner timeouts. + * Set retry_timeout to the timeout value reported by the FW in the + * response to the "Request Resource Ownership" (0x0008) Admin Command + * as it indicates the maximum time the current owner of the resource + * is allowed to hold it. + */ + retry_timeout = res_timeout; + while (err && retry_timeout && res_timeout) { + msleep(delay); + retry_timeout = (retry_timeout > delay) ? + retry_timeout - delay : 0; + err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout); + + /* Success - lock acquired. + * -EALREADY - lock free, no work to do. + */ + if (!err || err == -EALREADY) + break; + } + + return err; +} + +/** + * ixgbe_release_res - release a common resource + * @hw: pointer to the HW structure + * @res: resource ID + * + * Release a common resource using ixgbe_aci_release_res. + */ +void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res) +{ + u32 total_delay = 0; + int err; + + err = ixgbe_aci_release_res(hw, res, 0); + + /* There are some rare cases when trying to release the resource + * results in an admin command timeout, so handle them correctly. + */ + while (err == -ETIME && + total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT) { + usleep_range(1000, 1500); + err = ixgbe_aci_release_res(hw, res, 0); + total_delay++; + } +} + +/** + * ixgbe_parse_e610_caps - Parse common device/function capabilities + * @hw: pointer to the HW struct + * @caps: pointer to common capabilities structure + * @elem: the capability element to parse + * @prefix: message prefix for tracing capabilities + * + * Given a capability element, extract relevant details into the common + * capability structure. + * + * Return: true if the capability matches one of the common capability ids, + * false otherwise. + */ +static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_caps *caps, + struct ixgbe_aci_cmd_list_caps_elem *elem, + const char *prefix) +{ + u32 logical_id = le32_to_cpu(elem->logical_id); + u32 phys_id = le32_to_cpu(elem->phys_id); + u32 number = le32_to_cpu(elem->number); + u16 cap = le16_to_cpu(elem->cap); + + switch (cap) { + case IXGBE_ACI_CAPS_VALID_FUNCTIONS: + caps->valid_functions = number; + break; + case IXGBE_ACI_CAPS_SRIOV: + caps->sr_iov_1_1 = (number == 1); + break; + case IXGBE_ACI_CAPS_VMDQ: + caps->vmdq = (number == 1); + break; + case IXGBE_ACI_CAPS_DCB: + caps->dcb = (number == 1); + caps->active_tc_bitmap = logical_id; + caps->maxtc = phys_id; + break; + case IXGBE_ACI_CAPS_RSS: + caps->rss_table_size = number; + caps->rss_table_entry_width = logical_id; + break; + case IXGBE_ACI_CAPS_RXQS: + caps->num_rxq = number; + caps->rxq_first_id = phys_id; + break; + case IXGBE_ACI_CAPS_TXQS: + caps->num_txq = number; + caps->txq_first_id = phys_id; + break; + case IXGBE_ACI_CAPS_MSIX: + caps->num_msix_vectors = number; + caps->msix_vector_first_id = phys_id; + break; + case IXGBE_ACI_CAPS_NVM_VER: + break; + case IXGBE_ACI_CAPS_MAX_MTU: + caps->max_mtu = number; + break; + case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE: + caps->pcie_reset_avoidance = (number > 0); + break; + case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT: + caps->reset_restrict_support = (number == 1); + break; + case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0: + case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1: + case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2: + case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3: + { + u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0; + + caps->ext_topo_dev_img_ver_high[index] = number; + caps->ext_topo_dev_img_ver_low[index] = logical_id; + caps->ext_topo_dev_img_part_num[index] = + FIELD_GET(IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M, phys_id); + caps->ext_topo_dev_img_load_en[index] = + (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0; + caps->ext_topo_dev_img_prog_en[index] = + (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0; + break; + } + default: + /* Not one of the recognized common capabilities */ + return false; + } + + return true; +} + +/** + * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities. + */ +static void +ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw, + struct ixgbe_hw_dev_caps *dev_p, + struct ixgbe_aci_cmd_list_caps_elem *cap) +{ + dev_p->num_funcs = hweight32(le32_to_cpu(cap->number)); +} + +/** + * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse IXGBE_ACI_CAPS_VF for device capabilities. + */ +static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_dev_caps *dev_p, + struct ixgbe_aci_cmd_list_caps_elem *cap) +{ + dev_p->num_vfs_exposed = le32_to_cpu(cap->number); +} + +/** + * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse IXGBE_ACI_CAPS_VSI for device capabilities. + */ +static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_dev_caps *dev_p, + struct ixgbe_aci_cmd_list_caps_elem *cap) +{ + dev_p->num_vsi_allocd_to_host = le32_to_cpu(cap->number); +} + +/** + * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse IXGBE_ACI_CAPS_FD for device capabilities. + */ +static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_dev_caps *dev_p, + struct ixgbe_aci_cmd_list_caps_elem *cap) +{ + dev_p->num_flow_director_fltr = le32_to_cpu(cap->number); +} + +/** + * ixgbe_parse_dev_caps - Parse device capabilities + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @buf: buffer containing the device capability records + * @cap_count: the number of capabilities + * + * Helper device to parse device (0x000B) capabilities list. For + * capabilities shared between device and function, this relies on + * ixgbe_parse_e610_caps. + * + * Loop through the list of provided capabilities and extract the relevant + * data into the device capabilities structured. + */ +static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_dev_caps *dev_p, + void *buf, u32 cap_count) +{ + struct ixgbe_aci_cmd_list_caps_elem *cap_resp; + u32 i; + + cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf; + + memset(dev_p, 0, sizeof(*dev_p)); + + for (i = 0; i < cap_count; i++) { + u16 cap = le16_to_cpu(cap_resp[i].cap); + + ixgbe_parse_e610_caps(hw, &dev_p->common_cap, &cap_resp[i], + "dev caps"); + + switch (cap) { + case IXGBE_ACI_CAPS_VALID_FUNCTIONS: + ixgbe_parse_valid_functions_cap(hw, dev_p, + &cap_resp[i]); + break; + case IXGBE_ACI_CAPS_VF: + ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); + break; + case IXGBE_ACI_CAPS_VSI: + ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); + break; + case IXGBE_ACI_CAPS_FD: + ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); + break; + default: + /* Don't list common capabilities as unknown */ + break; + } + } +} + +/** + * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps + * @hw: pointer to the HW struct + * @func_p: pointer to function capabilities structure + * @cap: pointer to the capability element to parse + * + * Extract function capabilities for IXGBE_ACI_CAPS_VF. + */ +static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_func_caps *func_p, + struct ixgbe_aci_cmd_list_caps_elem *cap) +{ + func_p->num_allocd_vfs = le32_to_cpu(cap->number); + func_p->vf_base_id = le32_to_cpu(cap->logical_id); +} + +/** + * ixgbe_get_num_per_func - determine number of resources per PF + * @hw: pointer to the HW structure + * @max: value to be evenly split between each PF + * + * Determine the number of valid functions by going through the bitmap returned + * from parsing capabilities and use this to calculate the number of resources + * per PF based on the max value passed in. + * + * Return: the number of resources per PF or 0, if no PH are available. + */ +static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max) +{ +#define IXGBE_CAPS_VALID_FUNCS_M GENMASK(7, 0) + u8 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & + IXGBE_CAPS_VALID_FUNCS_M); + + return funcs ? (max / funcs) : 0; +} + +/** + * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps + * @hw: pointer to the HW struct + * @func_p: pointer to function capabilities structure + * @cap: pointer to the capability element to parse + * + * Extract function capabilities for IXGBE_ACI_CAPS_VSI. + */ +static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_func_caps *func_p, + struct ixgbe_aci_cmd_list_caps_elem *cap) +{ + func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI); +} + +/** + * ixgbe_parse_func_caps - Parse function capabilities + * @hw: pointer to the HW struct + * @func_p: pointer to function capabilities structure + * @buf: buffer containing the function capability records + * @cap_count: the number of capabilities + * + * Helper function to parse function (0x000A) capabilities list. For + * capabilities shared between device and function, this relies on + * ixgbe_parse_e610_caps. + * + * Loop through the list of provided capabilities and extract the relevant + * data into the function capabilities structured. + */ +static void ixgbe_parse_func_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_func_caps *func_p, + void *buf, u32 cap_count) +{ + struct ixgbe_aci_cmd_list_caps_elem *cap_resp; + u32 i; + + cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf; + + memset(func_p, 0, sizeof(*func_p)); + + for (i = 0; i < cap_count; i++) { + u16 cap = le16_to_cpu(cap_resp[i].cap); + + ixgbe_parse_e610_caps(hw, &func_p->common_cap, + &cap_resp[i], "func caps"); + + switch (cap) { + case IXGBE_ACI_CAPS_VF: + ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]); + break; + case IXGBE_ACI_CAPS_VSI: + ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); + break; + default: + /* Don't list common capabilities as unknown */ + break; + } + } +} + +/** + * ixgbe_aci_list_caps - query function/device capabilities + * @hw: pointer to the HW struct + * @buf: a buffer to hold the capabilities + * @buf_size: size of the buffer + * @cap_count: if not NULL, set to the number of capabilities reported + * @opc: capabilities type to discover, device or function + * + * Get the function (0x000A) or device (0x000B) capabilities description from + * firmware and store it in the buffer. + * + * If the cap_count pointer is not NULL, then it is set to the number of + * capabilities firmware will report. Note that if the buffer size is too + * small, it is possible the command will return -ENOMEM. The + * cap_count will still be updated in this case. It is recommended that the + * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible + * buffer that firmware could return) to avoid this. + * + * Return: the exit code of the operation. + * Exit code of -ENOMEM means the buffer size is too small. + */ +int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size, + u32 *cap_count, enum ixgbe_aci_opc opc) +{ + struct ixgbe_aci_cmd_list_caps *cmd; + struct ixgbe_aci_desc desc; + int err; + + cmd = &desc.params.get_cap; + + if (opc != ixgbe_aci_opc_list_func_caps && + opc != ixgbe_aci_opc_list_dev_caps) + return -EINVAL; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, opc); + err = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size); + + if (cap_count) + *cap_count = le32_to_cpu(cmd->count); + + return err; +} + +/** + * ixgbe_discover_dev_caps - Read and extract device capabilities + * @hw: pointer to the hardware structure + * @dev_caps: pointer to device capabilities structure + * + * Read the device capabilities and extract them into the dev_caps structure + * for later use. + * + * Return: the exit code of the operation. + */ +int ixgbe_discover_dev_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_dev_caps *dev_caps) +{ + u32 cap_count; + u8 *cbuf; + int err; + + cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL); + if (!cbuf) + return -ENOMEM; + + /* Although the driver doesn't know the number of capabilities the + * device will return, we can simply send a 4KB buffer, the maximum + * possible size that firmware can return. + */ + cap_count = IXGBE_ACI_MAX_BUFFER_SIZE / + sizeof(struct ixgbe_aci_cmd_list_caps_elem); + + err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE, + &cap_count, + ixgbe_aci_opc_list_dev_caps); + if (!err) + ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count); + + kfree(cbuf); + + return 0; +} + +/** + * ixgbe_discover_func_caps - Read and extract function capabilities + * @hw: pointer to the hardware structure + * @func_caps: pointer to function capabilities structure + * + * Read the function capabilities and extract them into the func_caps structure + * for later use. + * + * Return: the exit code of the operation. + */ +int ixgbe_discover_func_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_func_caps *func_caps) +{ + u32 cap_count; + u8 *cbuf; + int err; + + cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL); + if (!cbuf) + return -ENOMEM; + + /* Although the driver doesn't know the number of capabilities the + * device will return, we can simply send a 4KB buffer, the maximum + * possible size that firmware can return. + */ + cap_count = IXGBE_ACI_MAX_BUFFER_SIZE / + sizeof(struct ixgbe_aci_cmd_list_caps_elem); + + err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE, + &cap_count, + ixgbe_aci_opc_list_func_caps); + if (!err) + ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count); + + kfree(cbuf); + + return 0; +} + +/** + * ixgbe_get_caps - get info about the HW + * @hw: pointer to the hardware structure + * + * Retrieve both device and function capabilities. + * + * Return: the exit code of the operation. + */ +int ixgbe_get_caps(struct ixgbe_hw *hw) +{ + int err; + + err = ixgbe_discover_dev_caps(hw, &hw->dev_caps); + if (err) + return err; + + return ixgbe_discover_func_caps(hw, &hw->func_caps); +} + +/** + * ixgbe_aci_disable_rxen - disable RX + * @hw: pointer to the HW struct + * + * Request a safe disable of Receive Enable using ACI command (0x000C). + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_disable_rxen *cmd; + struct ixgbe_aci_desc desc; + + cmd = &desc.params.disable_rxen; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen); + + cmd->lport_num = hw->bus.func; + + return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); +} + +/** + * ixgbe_aci_get_phy_caps - returns PHY capabilities + * @hw: pointer to the HW struct + * @qual_mods: report qualified modules + * @report_mode: report mode capabilities + * @pcaps: structure for PHY capabilities to be filled + * + * Returns the various PHY capabilities supported on the Port + * using ACI command (0x0600). + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode, + struct ixgbe_aci_cmd_get_phy_caps_data *pcaps) +{ + struct ixgbe_aci_cmd_get_phy_caps *cmd; + u16 pcaps_size = sizeof(*pcaps); + struct ixgbe_aci_desc desc; + int err; + + cmd = &desc.params.get_phy; + + if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M)) + return -EINVAL; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps); + + if (qual_mods) + cmd->param0 |= cpu_to_le16(IXGBE_ACI_GET_PHY_RQM); + + cmd->param0 |= cpu_to_le16(report_mode); + err = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size); + if (!err && report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) { + hw->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); + hw->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); + memcpy(hw->link.link_info.module_type, &pcaps->module_type, + sizeof(hw->link.link_info.module_type)); + } + + return err; +} + +/** + * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data + * @caps: PHY ability structure to copy data from + * @cfg: PHY configuration structure to copy data to + * + * Helper function to copy data from PHY capabilities data structure + * to PHY configuration data structure + */ +void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps, + struct ixgbe_aci_cmd_set_phy_cfg_data *cfg) +{ + if (!caps || !cfg) + return; + + memset(cfg, 0, sizeof(*cfg)); + cfg->phy_type_low = caps->phy_type_low; + cfg->phy_type_high = caps->phy_type_high; + cfg->caps = caps->caps; + cfg->low_power_ctrl_an = caps->low_power_ctrl_an; + cfg->eee_cap = caps->eee_cap; + cfg->eeer_value = caps->eeer_value; + cfg->link_fec_opt = caps->link_fec_options; + cfg->module_compliance_enforcement = + caps->module_compliance_enforcement; +} + +/** + * ixgbe_aci_set_phy_cfg - set PHY configuration + * @hw: pointer to the HW struct + * @cfg: structure with PHY configuration data to be set + * + * Set the various PHY configuration parameters supported on the Port + * using ACI command (0x0601). + * One or more of the Set PHY config parameters may be ignored in an MFP + * mode as the PF may not have the privilege to set some of the PHY Config + * parameters. + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw, + struct ixgbe_aci_cmd_set_phy_cfg_data *cfg) +{ + struct ixgbe_aci_desc desc; + int err; + + if (!cfg) + return -EINVAL; + + /* Ensure that only valid bits of cfg->caps can be turned on. */ + cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg); + desc.params.set_phy.lport_num = hw->bus.func; + desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD); + + err = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg)); + if (!err) + hw->phy.curr_user_phy_cfg = *cfg; + + return err; +} + +/** + * ixgbe_aci_set_link_restart_an - set up link and restart AN + * @hw: pointer to the HW struct + * @ena_link: if true: enable link, if false: disable link + * + * Function sets up the link and restarts the Auto-Negotiation over the link. + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link) +{ + struct ixgbe_aci_cmd_restart_an *cmd; + struct ixgbe_aci_desc desc; + + cmd = &desc.params.restart_an; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an); + + cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART; + cmd->lport_num = hw->bus.func; + if (ena_link) + cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE; + else + cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE; + + return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); +} + +/** + * ixgbe_is_media_cage_present - check if media cage is present + * @hw: pointer to the HW struct + * + * Identify presence of media cage using the ACI command (0x06E0). + * + * Return: true if media cage is present, else false. If no cage, then + * media type is backplane or BASE-T. + */ +static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_get_link_topo *cmd; + struct ixgbe_aci_desc desc; + + cmd = &desc.params.get_link_topo; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo); + + cmd->addr.topo_params.node_type_ctx = + FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_CTX_M, + IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT); + + /* Set node type. */ + cmd->addr.topo_params.node_type_ctx |= + FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_TYPE_M, + IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE); + + /* Node type cage can be used to determine if cage is present. If AQC + * returns error (ENOENT), then no cage present. If no cage present then + * connection type is backplane or BASE-T. + */ + return !ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL); +} + +/** + * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type + * @hw: pointer to the HW struct + * + * Try to identify the media type based on the phy type. + * If more than one media type, the ixgbe_media_type_unknown is returned. + * First, phy_type_low is checked, then phy_type_high. + * If none are identified, the ixgbe_media_type_unknown is returned + * + * Return: type of a media based on phy type in form of enum. + */ +static enum ixgbe_media_type +ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw) +{ + struct ixgbe_link_status *hw_link_info; + + if (!hw) + return ixgbe_media_type_unknown; + + hw_link_info = &hw->link.link_info; + if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) + /* If more than one media type is selected, report unknown */ + return ixgbe_media_type_unknown; + + if (hw_link_info->phy_type_low) { + /* 1G SGMII is a special case where some DA cable PHYs + * may show this as an option when it really shouldn't + * be since SGMII is meant to be between a MAC and a PHY + * in a backplane. Try to detect this case and handle it + */ + if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII && + (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] == + IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || + hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] == + IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) + return ixgbe_media_type_da; + + switch (hw_link_info->phy_type_low) { + case IXGBE_PHY_TYPE_LOW_1000BASE_SX: + case IXGBE_PHY_TYPE_LOW_1000BASE_LX: + case IXGBE_PHY_TYPE_LOW_10GBASE_SR: + case IXGBE_PHY_TYPE_LOW_10GBASE_LR: + case IXGBE_PHY_TYPE_LOW_25GBASE_SR: + case IXGBE_PHY_TYPE_LOW_25GBASE_LR: + return ixgbe_media_type_fiber; + case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: + case IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: + return ixgbe_media_type_fiber; + case IXGBE_PHY_TYPE_LOW_100BASE_TX: + case IXGBE_PHY_TYPE_LOW_1000BASE_T: + case IXGBE_PHY_TYPE_LOW_2500BASE_T: + case IXGBE_PHY_TYPE_LOW_5GBASE_T: + case IXGBE_PHY_TYPE_LOW_10GBASE_T: + case IXGBE_PHY_TYPE_LOW_25GBASE_T: + return ixgbe_media_type_copper; + case IXGBE_PHY_TYPE_LOW_10G_SFI_DA: + case IXGBE_PHY_TYPE_LOW_25GBASE_CR: + case IXGBE_PHY_TYPE_LOW_25GBASE_CR_S: + case IXGBE_PHY_TYPE_LOW_25GBASE_CR1: + return ixgbe_media_type_da; + case IXGBE_PHY_TYPE_LOW_25G_AUI_C2C: + if (ixgbe_is_media_cage_present(hw)) + return ixgbe_media_type_aui; + fallthrough; + case IXGBE_PHY_TYPE_LOW_1000BASE_KX: + case IXGBE_PHY_TYPE_LOW_2500BASE_KX: + case IXGBE_PHY_TYPE_LOW_2500BASE_X: + case IXGBE_PHY_TYPE_LOW_5GBASE_KR: + case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1: + case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C: + case IXGBE_PHY_TYPE_LOW_25GBASE_KR: + case IXGBE_PHY_TYPE_LOW_25GBASE_KR1: + case IXGBE_PHY_TYPE_LOW_25GBASE_KR_S: + return ixgbe_media_type_backplane; + } + } else { + switch (hw_link_info->phy_type_high) { + case IXGBE_PHY_TYPE_HIGH_10BASE_T: + return ixgbe_media_type_copper; + } + } + return ixgbe_media_type_unknown; +} + +/** + * ixgbe_update_link_info - update status of the HW network link + * @hw: pointer to the HW struct + * + * Update the status of the HW network link. + * + * Return: the exit code of the operation. + */ +int ixgbe_update_link_info(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_get_phy_caps_data *pcaps; + struct ixgbe_link_status *li; + int err; + + if (!hw) + return -EINVAL; + + li = &hw->link.link_info; + + err = ixgbe_aci_get_link_info(hw, true, NULL); + if (err) + return err; + + if (!(li->link_info & IXGBE_ACI_MEDIA_AVAILABLE)) + return 0; + + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return -ENOMEM; + + err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, + pcaps); + + if (!err) + memcpy(li->module_type, &pcaps->module_type, + sizeof(li->module_type)); + + kfree(pcaps); + + return err; +} + +/** + * ixgbe_get_link_status - get status of the HW network link + * @hw: pointer to the HW struct + * @link_up: pointer to bool (true/false = linkup/linkdown) + * + * Variable link_up is true if link is up, false if link is down. + * The variable link_up is invalid if status is non zero. As a + * result of this call, link status reporting becomes enabled + * + * Return: the exit code of the operation. + */ +int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up) +{ + if (!hw || !link_up) + return -EINVAL; + + if (hw->link.get_link_info) { + int err = ixgbe_update_link_info(hw); + + if (err) + return err; + } + + *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP; + + return 0; +} + +/** + * ixgbe_aci_get_link_info - get the link status + * @hw: pointer to the HW struct + * @ena_lse: enable/disable LinkStatusEvent reporting + * @link: pointer to link status structure - optional + * + * Get the current Link Status using ACI command (0x607). + * The current link can be optionally provided to update + * the status. + * + * Return: the link status of the adapter. + */ +int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse, + struct ixgbe_link_status *link) +{ + struct ixgbe_aci_cmd_get_link_status_data link_data = {}; + struct ixgbe_aci_cmd_get_link_status *resp; + struct ixgbe_link_status *li_old, *li; + struct ixgbe_fc_info *hw_fc_info; + struct ixgbe_aci_desc desc; + bool tx_pause, rx_pause; + u8 cmd_flags; + int err; + + if (!hw) + return -EINVAL; + + li_old = &hw->link.link_info_old; + li = &hw->link.link_info; + hw_fc_info = &hw->fc; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status); + cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS; + resp = &desc.params.get_link_status; + resp->cmd_flags = cpu_to_le16(cmd_flags); + resp->lport_num = hw->bus.func; + + err = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data)); + if (err) + return err; + + /* Save off old link status information. */ + *li_old = *li; + + /* Update current link status information. */ + li->link_speed = le16_to_cpu(link_data.link_speed); + li->phy_type_low = le64_to_cpu(link_data.phy_type_low); + li->phy_type_high = le64_to_cpu(link_data.phy_type_high); + li->link_info = link_data.link_info; + li->link_cfg_err = link_data.link_cfg_err; + li->an_info = link_data.an_info; + li->ext_info = link_data.ext_info; + li->max_frame_size = le16_to_cpu(link_data.max_frame_size); + li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK; + li->topo_media_conflict = link_data.topo_media_conflict; + li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M | + IXGBE_ACI_CFG_PACING_TYPE_M); + + /* Update fc info. */ + tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX); + rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX); + if (tx_pause && rx_pause) + hw_fc_info->current_mode = ixgbe_fc_full; + else if (tx_pause) + hw_fc_info->current_mode = ixgbe_fc_tx_pause; + else if (rx_pause) + hw_fc_info->current_mode = ixgbe_fc_rx_pause; + else + hw_fc_info->current_mode = ixgbe_fc_none; + + li->lse_ena = !!(le16_to_cpu(resp->cmd_flags) & + IXGBE_ACI_LSE_IS_ENABLED); + + /* Save link status information. */ + if (link) + *link = *li; + + /* Flag cleared so calling functions don't call AQ again. */ + hw->link.get_link_info = false; + + return 0; +} + +/** + * ixgbe_aci_set_event_mask - set event mask + * @hw: pointer to the HW struct + * @port_num: port number of the physical function + * @mask: event mask to be set + * + * Set the event mask using ACI command (0x0613). + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask) +{ + struct ixgbe_aci_cmd_set_event_mask *cmd; + struct ixgbe_aci_desc desc; + + cmd = &desc.params.set_event_mask; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask); + + cmd->lport_num = port_num; + + cmd->event_mask = cpu_to_le16(mask); + return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); +} + +/** + * ixgbe_configure_lse - enable/disable link status events + * @hw: pointer to the HW struct + * @activate: true for enable lse, false otherwise + * @mask: event mask to be set; a set bit means deactivation of the + * corresponding event + * + * Set the event mask and then enable or disable link status events + * + * Return: the exit code of the operation. + */ +int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask) +{ + int err; + + err = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask); + if (err) + return err; + + /* Enabling link status events generation by fw. */ + return ixgbe_aci_get_link_info(hw, activate, NULL); +} + +/** + * ixgbe_get_media_type_e610 - Gets media type + * @hw: pointer to the HW struct + * + * In order to get the media type, the function gets PHY + * capabilities and later on use them to identify the PHY type + * checking phy_type_high and phy_type_low. + * + * Return: the type of media in form of ixgbe_media_type enum + * or ixgbe_media_type_unknown in case of an error. + */ +enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_get_phy_caps_data pcaps; + int rc; + + rc = ixgbe_update_link_info(hw); + if (rc) + return ixgbe_media_type_unknown; + + /* If there is no link but PHY (dongle) is available SW should use + * Get PHY Caps admin command instead of Get Link Status, find most + * significant bit that is set in PHY types reported by the command + * and use it to discover media type. + */ + if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) && + (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) { + int highest_bit; + + /* Get PHY Capabilities */ + rc = ixgbe_aci_get_phy_caps(hw, false, + IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, + &pcaps); + if (rc) + return ixgbe_media_type_unknown; + + highest_bit = fls64(le64_to_cpu(pcaps.phy_type_high)); + if (highest_bit) { + hw->link.link_info.phy_type_high = + BIT_ULL(highest_bit - 1); + hw->link.link_info.phy_type_low = 0; + } else { + highest_bit = fls64(le64_to_cpu(pcaps.phy_type_low)); + if (highest_bit) + hw->link.link_info.phy_type_low = + BIT_ULL(highest_bit - 1); + } + } + + /* Based on link status or search above try to discover media type. */ + hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw); + + return hw->phy.media_type; +} + +/** + * ixgbe_setup_link_e610 - Set up link + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait: true when waiting for completion is needed + * + * Set up the link with the specified speed. + * + * Return: the exit code of the operation. + */ +int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait) +{ + /* Simply request FW to perform proper PHY setup */ + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); +} + +/** + * ixgbe_check_link_e610 - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Determine if the link is up and the current link speed + * using ACI command (0x0607). + * + * Return: the exit code of the operation. + */ +int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + int err; + u32 i; + + if (!speed || !link_up) + return -EINVAL; + + /* Set get_link_info flag to ensure that fresh + * link information will be obtained from FW + * by sending Get Link Status admin command. + */ + hw->link.get_link_info = true; + + /* Update link information in adapter context. */ + err = ixgbe_get_link_status(hw, link_up); + if (err) + return err; + + /* Wait for link up if it was requested. */ + if (link_up_wait_to_complete && !(*link_up)) { + for (i = 0; i < hw->mac.max_link_up_time; i++) { + msleep(100); + hw->link.get_link_info = true; + err = ixgbe_get_link_status(hw, link_up); + if (err) + return err; + if (*link_up) + break; + } + } + + /* Use link information in adapter context updated by the call + * to ixgbe_get_link_status() to determine current link speed. + * Link speed information is valid only when link up was + * reported by FW. + */ + if (*link_up) { + switch (hw->link.link_info.link_speed) { + case IXGBE_ACI_LINK_SPEED_10MB: + *speed = IXGBE_LINK_SPEED_10_FULL; + break; + case IXGBE_ACI_LINK_SPEED_100MB: + *speed = IXGBE_LINK_SPEED_100_FULL; + break; + case IXGBE_ACI_LINK_SPEED_1000MB: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_ACI_LINK_SPEED_2500MB: + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + break; + case IXGBE_ACI_LINK_SPEED_5GB: + *speed = IXGBE_LINK_SPEED_5GB_FULL; + break; + case IXGBE_ACI_LINK_SPEED_10GB: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + break; + } + } else { + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } + + return 0; +} + +/** + * ixgbe_get_link_capabilities_e610 - Determine link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determine speed and AN parameters of a link. + * + * Return: the exit code of the operation. + */ +int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + if (!speed || !autoneg) + return -EINVAL; + + *autoneg = true; + *speed = hw->phy.speeds_supported; + + return 0; +} + +/** + * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode + * @hw: pointer to hardware structure + * @cfg: PHY configuration data to set FC mode + * @req_mode: FC mode to configure + * + * Configures PHY Flow Control according to the provided configuration. + * + * Return: the exit code of the operation. + */ +int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw, + struct ixgbe_aci_cmd_set_phy_cfg_data *cfg, + enum ixgbe_fc_mode req_mode) +{ + u8 pause_mask = 0x0; + + if (!cfg) + return -EINVAL; + + switch (req_mode) { + case ixgbe_fc_full: + pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE; + pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE; + break; + case ixgbe_fc_rx_pause: + pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE; + break; + case ixgbe_fc_tx_pause: + pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE; + break; + default: + break; + } + + /* Clear the old pause settings. */ + cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE | + IXGBE_ACI_PHY_EN_RX_LINK_PAUSE); + + /* Set the new capabilities. */ + cfg->caps |= pause_mask; + + return 0; +} + +/** + * ixgbe_setup_fc_e610 - Set up flow control + * @hw: pointer to hardware structure + * + * Set up flow control. This has to be done during init time. + * + * Return: the exit code of the operation. + */ +int ixgbe_setup_fc_e610(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_get_phy_caps_data pcaps = {}; + struct ixgbe_aci_cmd_set_phy_cfg_data cfg = {}; + int err; + + /* Get the current PHY config */ + err = ixgbe_aci_get_phy_caps(hw, false, + IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps); + if (err) + return err; + + ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg); + + /* Configure the set PHY data */ + err = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode); + if (err) + return err; + + /* If the capabilities have changed, then set the new config */ + if (cfg.caps != pcaps.caps) { + cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT; + + err = ixgbe_aci_set_phy_cfg(hw, &cfg); + if (err) + return err; + } + + return err; +} + +/** + * ixgbe_fc_autoneg_e610 - Configure flow control + * @hw: pointer to hardware structure + * + * Configure Flow Control. + */ +void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw) +{ + int err; + + /* Get current link err. + * Current FC mode will be stored in the hw context. + */ + err = ixgbe_aci_get_link_info(hw, false, NULL); + if (err) + goto no_autoneg; + + /* Check if the link is up */ + if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)) + goto no_autoneg; + + /* Check if auto-negotiation has completed */ + if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED)) + goto no_autoneg; + + hw->fc.fc_was_autonegged = true; + return; + +no_autoneg: + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; +} + +/** + * ixgbe_disable_rx_e610 - Disable RX unit + * @hw: pointer to hardware structure + * + * Disable RX DMA unit on E610 with use of ACI command (0x000C). + * + * Return: the exit code of the operation. + */ +void ixgbe_disable_rx_e610(struct ixgbe_hw *hw) +{ + u32 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + u32 pfdtxgswc; + int err; + + if (!(rxctrl & IXGBE_RXCTRL_RXEN)) + return; + + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + + err = ixgbe_aci_disable_rxen(hw); + + /* If we fail - disable RX using register write */ + if (err) { + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } + } +} + +/** + * ixgbe_init_phy_ops_e610 - PHY specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY type was not known. + * + * Return: the exit code of the operation. + */ +int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) + phy->ops.set_phy_power = ixgbe_set_phy_power_e610; + else + phy->ops.set_phy_power = NULL; + + /* Identify the PHY */ + return phy->ops.identify(hw); +} + +/** + * ixgbe_identify_phy_e610 - Identify PHY + * @hw: pointer to hardware structure + * + * Determine PHY type, supported speeds and PHY ID. + * + * Return: the exit code of the operation. + */ +int ixgbe_identify_phy_e610(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_get_phy_caps_data pcaps; + u64 phy_type_low, phy_type_high; + int err; + + /* Set PHY type */ + hw->phy.type = ixgbe_phy_fw; + + err = ixgbe_aci_get_phy_caps(hw, false, + IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, &pcaps); + if (err) + return err; + + if (!(pcaps.module_compliance_enforcement & + IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) { + /* Handle lenient mode */ + err = ixgbe_aci_get_phy_caps(hw, false, + IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA, + &pcaps); + if (err) + return err; + } + + /* Determine supported speeds */ + hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN; + phy_type_high = le64_to_cpu(pcaps.phy_type_high); + phy_type_low = le64_to_cpu(pcaps.phy_type_low); + + if (phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T || + phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL; + if (phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX || + phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII || + phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; + if (phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T || + phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX || + phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX || + phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX || + phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII || + phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL; + if (phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T || + phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA || + phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR || + phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR || + phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 || + phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC || + phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C || + phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL; + + /* 2.5 and 5 Gbps link speeds must be excluded from the + * auto-negotiation set used during driver initialization due to + * compatibility issues with certain switches. Those issues do not + * exist in case of E610 2.5G SKU device (0x57b1). + */ + if (!hw->phy.autoneg_advertised && + hw->device_id != IXGBE_DEV_ID_E610_2_5G_T) + hw->phy.autoneg_advertised = hw->phy.speeds_supported; + + if (phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T || + phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X || + phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX || + phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII || + phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; + + if (!hw->phy.autoneg_advertised && + hw->device_id == IXGBE_DEV_ID_E610_2_5G_T) + hw->phy.autoneg_advertised = hw->phy.speeds_supported; + + if (phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T || + phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR || + phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; + + /* Set PHY ID */ + memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32)); + + hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL | + IXGBE_LINK_SPEED_100_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; + + return 0; +} + +/** + * ixgbe_identify_module_e610 - Identify SFP module type + * @hw: pointer to hardware structure + * + * Identify the SFP module type. + * + * Return: the exit code of the operation. + */ +int ixgbe_identify_module_e610(struct ixgbe_hw *hw) +{ + bool media_available; + u8 module_type; + int err; + + err = ixgbe_update_link_info(hw); + if (err) + return err; + + media_available = + (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE); + + if (media_available) { + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + + /* Get module type from hw context updated by + * ixgbe_update_link_info() + */ + module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT]; + + if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) || + (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) { + hw->phy.sfp_type = ixgbe_sfp_type_da_cu; + } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) { + hw->phy.sfp_type = ixgbe_sfp_type_sr; + } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) || + (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) { + hw->phy.sfp_type = ixgbe_sfp_type_lr; + } + } else { + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + return -ENOENT; + } + + return 0; +} + +/** + * ixgbe_setup_phy_link_e610 - Sets up firmware-controlled PHYs + * @hw: pointer to hardware structure + * + * Set the parameters for the firmware-controlled PHYs. + * + * Return: the exit code of the operation. + */ +int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_get_phy_caps_data pcaps; + struct ixgbe_aci_cmd_set_phy_cfg_data pcfg; + u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA; + u64 sup_phy_type_low, sup_phy_type_high; + u64 phy_type_low = 0, phy_type_high = 0; + int err; + + err = ixgbe_aci_get_link_info(hw, false, NULL); + if (err) + return err; + + /* If media is not available get default config. */ + if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) + rmode = IXGBE_ACI_REPORT_DFLT_CFG; + + err = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps); + if (err) + return err; + + sup_phy_type_low = le64_to_cpu(pcaps.phy_type_low); + sup_phy_type_high = le64_to_cpu(pcaps.phy_type_high); + + /* Get Active configuration to avoid unintended changes. */ + err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG, + &pcaps); + if (err) + return err; + + ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg); + + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) { + phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T; + phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII; + } + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) { + phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX; + phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII; + phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII; + } + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { + phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T; + phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX; + phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX; + phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX; + phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII; + phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII; + } + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) { + phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T; + phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X; + phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX; + phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII; + phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII; + } + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) { + phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T; + phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR; + phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII; + } + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) { + phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T; + phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA; + phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR; + phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR; + phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1; + phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC; + phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C; + phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII; + } + + /* Mask the set values to avoid requesting unsupported link types. */ + phy_type_low &= sup_phy_type_low; + pcfg.phy_type_low = cpu_to_le64(phy_type_low); + phy_type_high &= sup_phy_type_high; + pcfg.phy_type_high = cpu_to_le64(phy_type_high); + + if (pcfg.phy_type_high != pcaps.phy_type_high || + pcfg.phy_type_low != pcaps.phy_type_low || + pcfg.caps != pcaps.caps) { + pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK; + pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT; + + err = ixgbe_aci_set_phy_cfg(hw, &pcfg); + if (err) + return err; + } + + return 0; +} + +/** + * ixgbe_set_phy_power_e610 - Control power for copper PHY + * @hw: pointer to hardware structure + * @on: true for on, false for off + * + * Set the power on/off of the PHY + * by getting its capabilities and setting the appropriate + * configuration parameters. + * + * Return: the exit code of the operation. + */ +int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on) +{ + struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {}; + struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {}; + int err; + + err = ixgbe_aci_get_phy_caps(hw, false, + IXGBE_ACI_REPORT_ACTIVE_CFG, + &phy_caps); + if (err) + return err; + + ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg); + + if (on) + phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER; + else + phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER; + + /* PHY is already in requested power mode. */ + if (phy_caps.caps == phy_cfg.caps) + return 0; + + phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK; + phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT; + + return ixgbe_aci_set_phy_cfg(hw, &phy_cfg); +} + +/** + * ixgbe_enter_lplu_e610 - Transition to low power states + * @hw: pointer to hardware structure + * + * Configures Low Power Link Up on transition to low power states + * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the + * X557 PHY immediately prior to entering LPLU. + * + * Return: the exit code of the operation. + */ +int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {}; + struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {}; + int err; + + err = ixgbe_aci_get_phy_caps(hw, false, + IXGBE_ACI_REPORT_ACTIVE_CFG, + &phy_caps); + if (err) + return err; + + ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg); + + phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG; + + return ixgbe_aci_set_phy_cfg(hw, &phy_cfg); +} + +/** + * ixgbe_init_eeprom_params_e610 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initialize the EEPROM parameters ixgbe_eeprom_info within the ixgbe_hw + * struct in order to set up EEPROM access. + * + * Return: the operation exit code. + */ +int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 gens_stat; + u8 sr_size; + + if (eeprom->type != ixgbe_eeprom_uninitialized) + return 0; + + eeprom->type = ixgbe_flash; + + gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS); + sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat); + + /* Switching to words (sr_size contains power of 2). */ + eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB; + + hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type, + eeprom->word_size); + + return 0; +} + +/** + * ixgbe_aci_get_netlist_node - get a node handle + * @hw: pointer to the hw struct + * @cmd: get_link_topo AQ structure + * @node_part_number: output node part number if node found + * @node_handle: output node handle parameter if node found + * + * Get the netlist node and assigns it to + * the provided handle using ACI command (0x06E0). + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw, + struct ixgbe_aci_cmd_get_link_topo *cmd, + u8 *node_part_number, u16 *node_handle) +{ + struct ixgbe_aci_desc desc; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo); + desc.params.get_link_topo = *cmd; + + if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0)) + return -EOPNOTSUPP; + + if (node_handle) + *node_handle = + le16_to_cpu(desc.params.get_link_topo.addr.handle); + if (node_part_number) + *node_part_number = desc.params.get_link_topo.node_part_num; + + return 0; +} + +/** + * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership + * @hw: pointer to the HW structure + * @access: NVM access type (read or write) + * + * Request NVM ownership. + * + * Return: the exit code of the operation. + */ +int ixgbe_acquire_nvm(struct ixgbe_hw *hw, + enum ixgbe_aci_res_access_type access) +{ + u32 fla; + + /* Skip if we are in blank NVM programming mode */ + fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA); + if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0) + return 0; + + return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access, + IXGBE_NVM_TIMEOUT); +} + +/** + * ixgbe_release_nvm - Generic request for releasing the NVM ownership + * @hw: pointer to the HW structure + * + * Release NVM ownership. + */ +void ixgbe_release_nvm(struct ixgbe_hw *hw) +{ + u32 fla; + + /* Skip if we are in blank NVM programming mode */ + fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA); + if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0) + return; + + ixgbe_release_res(hw, IXGBE_NVM_RES_ID); +} + +/** + * ixgbe_aci_read_nvm - read NVM + * @hw: pointer to the HW struct + * @module_typeid: module pointer location in words from the NVM beginning + * @offset: byte offset from the module beginning + * @length: length of the section to be read (in bytes from the offset) + * @data: command buffer (size [bytes] = length) + * @last_command: tells if this is the last command in a series + * @read_shadow_ram: tell if this is a shadow RAM read + * + * Read the NVM using ACI command (0x0701). + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset, + u16 length, void *data, bool last_command, + bool read_shadow_ram) +{ + struct ixgbe_aci_cmd_nvm *cmd; + struct ixgbe_aci_desc desc; + + if (offset > IXGBE_ACI_NVM_MAX_OFFSET) + return -EINVAL; + + cmd = &desc.params.nvm; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read); + + if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT) + cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY; + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD; + cmd->module_typeid = cpu_to_le16(module_typeid); + cmd->offset_low = cpu_to_le16(offset & 0xFFFF); + cmd->offset_high = (offset >> 16) & 0xFF; + cmd->length = cpu_to_le16(length); + + return ixgbe_aci_send_cmd(hw, &desc, data, length); +} + +/** + * ixgbe_nvm_validate_checksum - validate checksum + * @hw: pointer to the HW struct + * + * Verify NVM PFA checksum validity using ACI command (0x0706). + * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned. + * The function acquires and then releases the NVM ownership. + * + * Return: the exit code of the operation. + */ +int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_nvm_checksum *cmd; + struct ixgbe_aci_desc desc; + int err; + + err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); + if (err) + return err; + + cmd = &desc.params.nvm_checksum; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum); + cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY; + + err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0); + + ixgbe_release_nvm(hw); + + if (!err && cmd->checksum != + cpu_to_le16(IXGBE_ACI_NVM_CHECKSUM_CORRECT)) { + struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter, + hw); + + err = -EIO; + netdev_err(adapter->netdev, "Invalid Shadow Ram checksum"); + } + + return err; +} + +/** + * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm. + * + * Return: the exit code of the operation. + */ +int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + u32 bytes = sizeof(u16); + u16 data_local; + int err; + + err = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes, + (u8 *)&data_local, true); + if (err) + return err; + + *data = data_local; + return 0; +} + +/** + * ixgbe_read_flat_nvm - Read portion of NVM by flat offset + * @hw: pointer to the HW struct + * @offset: offset from beginning of NVM + * @length: (in) number of bytes to read; (out) number of bytes actually read + * @data: buffer to return data in (sized to fit the specified length) + * @read_shadow_ram: if true, read from shadow RAM instead of NVM + * + * Reads a portion of the NVM, as a flat memory space. This function correctly + * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size + * from being exceeded in case of Shadow RAM read requests and ensures that no + * single read request exceeds the maximum 4KB read for a single admin command. + * + * Returns an error code on failure. Note that the data pointer may be + * partially updated if some reads succeed before a failure. + * + * Return: the exit code of the operation. + */ +int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length, + u8 *data, bool read_shadow_ram) +{ + u32 inlen = *length; + u32 bytes_read = 0; + bool last_cmd; + int err; + + /* Verify the length of the read if this is for the Shadow RAM */ + if (read_shadow_ram && ((offset + inlen) > + (hw->eeprom.word_size * 2u))) + return -EINVAL; + + do { + u32 read_size, sector_offset; + + /* ixgbe_aci_read_nvm cannot read more than 4KB at a time. + * Additionally, a read from the Shadow RAM may not cross over + * a sector boundary. Conveniently, the sector size is also 4KB. + */ + sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE; + read_size = min_t(u32, + IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset, + inlen - bytes_read); + + last_cmd = !(bytes_read + read_size < inlen); + + /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size + * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE + * maximum size guarantees that it will fit within the 2 bytes. + */ + err = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT, + offset, (u16)read_size, + data + bytes_read, last_cmd, + read_shadow_ram); + if (err) + break; + + bytes_read += read_size; + offset += read_size; + } while (!last_cmd); + + *length = bytes_read; + return err; +} + +/** + * ixgbe_read_sr_buf_aci - Read Shadow RAM buffer via ACI + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM words to read (0x000000 - 0x001FFF) + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Read 16 bit words (data buf) from the Shadow RAM. Acquire/release the NVM + * ownership. + * + * Return: the operation exit code. + */ +int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words, + u16 *data) +{ + u32 bytes = *words * 2; + int err; + + err = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true); + if (err) + return err; + + *words = bytes / 2; + + for (int i = 0; i < *words; i++) + data[i] = le16_to_cpu(((__le16 *)data)[i]); + + return 0; +} + +/** + * ixgbe_read_ee_aci_e610 - Read EEPROM word using the admin command. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the ACI. + * If the EEPROM params are not initialized, the function + * initialize them before proceeding with reading. + * The function acquires and then releases the NVM ownership. + * + * Return: the exit code of the operation. + */ +int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + int err; + + if (hw->eeprom.type == ixgbe_eeprom_uninitialized) { + err = hw->eeprom.ops.init_params(hw); + if (err) + return err; + } + + err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); + if (err) + return err; + + err = ixgbe_read_sr_word_aci(hw, offset, data); + ixgbe_release_nvm(hw); + + return err; +} + +/** + * ixgbe_read_ee_aci_buffer_e610 - Read EEPROM words via ACI + * @hw: pointer to hardware structure + * @offset: offset of words in the EEPROM to read + * @words: number of words to read + * @data: words to read from the EEPROM + * + * Read 16 bit words from the EEPROM via the ACI. Initialize the EEPROM params + * prior to the read. Acquire/release the NVM ownership. + * + * Return: the operation exit code. + */ +int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + int err; + + if (hw->eeprom.type == ixgbe_eeprom_uninitialized) { + err = hw->eeprom.ops.init_params(hw); + if (err) + return err; + } + + err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); + if (err) + return err; + + err = ixgbe_read_sr_buf_aci(hw, offset, &words, data); + ixgbe_release_nvm(hw); + + return err; +} + +/** + * ixgbe_validate_eeprom_checksum_e610 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + * If the EEPROM params are not initialized, the function + * initialize them before proceeding. + * The function acquires and then releases the NVM ownership. + * + * Return: the exit code of the operation. + */ +int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val) +{ + int err; + + if (hw->eeprom.type == ixgbe_eeprom_uninitialized) { + err = hw->eeprom.ops.init_params(hw); + if (err) + return err; + } + + err = ixgbe_nvm_validate_checksum(hw); + if (err) + return err; + + if (checksum_val) { + u16 tmp_checksum; + + err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); + if (err) + return err; + + err = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD, + &tmp_checksum); + ixgbe_release_nvm(hw); + + if (!err) + *checksum_val = tmp_checksum; + } + + return err; +} + +/** + * ixgbe_reset_hw_e610 - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, and performs a reset. + * + * Return: the exit code of the operation. + */ +int ixgbe_reset_hw_e610(struct ixgbe_hw *hw) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + u32 ctrl, i; + int err; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + err = hw->mac.ops.stop_adapter(hw); + if (err) + goto reset_hw_out; + + /* Flush pending Tx transactions. */ + ixgbe_clear_tx_pending(hw); + + hw->phy.ops.init(hw); +mac_reset_top: + err = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (err) + return -EBUSY; + ctrl = IXGBE_CTRL_RST; + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + udelay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter, + hw); + + err = -EIO; + netdev_err(adapter->netdev, "Reset polling failed to complete."); + } + + /* Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. + */ + msleep(100); + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* Set the Rx packet buffer size. */ + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), GENMASK(18, 17)); + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* Maximum number of Receive Address Registers. */ +#define IXGBE_MAX_NUM_RAR 128 + + /* Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to the + * maximum number of Receive Address Registers, since we modify this + * value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = IXGBE_MAX_NUM_RAR; + hw->mac.ops.init_rx_addrs(hw); + + /* Initialize bus function number */ + hw->mac.ops.set_lan_id(hw); + +reset_hw_out: + return err; +} + +static const struct ixgbe_mac_operations mac_ops_e610 = { + .init_hw = ixgbe_init_hw_generic, + .start_hw = ixgbe_start_hw_X540, + .clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic, + .enable_rx_dma = ixgbe_enable_rx_dma_generic, + .get_mac_addr = ixgbe_get_mac_addr_generic, + .get_device_caps = ixgbe_get_device_caps_generic, + .stop_adapter = ixgbe_stop_adapter_generic, + .set_lan_id = ixgbe_set_lan_id_multi_port_pcie, + .set_rxpba = ixgbe_set_rxpba_generic, + .check_link = ixgbe_check_link_e610, + .blink_led_start = ixgbe_blink_led_start_X540, + .blink_led_stop = ixgbe_blink_led_stop_X540, + .set_rar = ixgbe_set_rar_generic, + .clear_rar = ixgbe_clear_rar_generic, + .set_vmdq = ixgbe_set_vmdq_generic, + .set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic, + .clear_vmdq = ixgbe_clear_vmdq_generic, + .init_rx_addrs = ixgbe_init_rx_addrs_generic, + .update_mc_addr_list = ixgbe_update_mc_addr_list_generic, + .enable_mc = ixgbe_enable_mc_generic, + .disable_mc = ixgbe_disable_mc_generic, + .clear_vfta = ixgbe_clear_vfta_generic, + .set_vfta = ixgbe_set_vfta_generic, + .fc_enable = ixgbe_fc_enable_generic, + .set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550, + .init_uta_tables = ixgbe_init_uta_tables_generic, + .set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing, + .set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing, + .set_source_address_pruning = + ixgbe_set_source_address_pruning_x550, + .set_ethertype_anti_spoofing = + ixgbe_set_ethertype_anti_spoofing_x550, + .disable_rx_buff = ixgbe_disable_rx_buff_generic, + .enable_rx_buff = ixgbe_enable_rx_buff_generic, + .enable_rx = ixgbe_enable_rx_generic, + .disable_rx = ixgbe_disable_rx_e610, + .led_on = ixgbe_led_on_generic, + .led_off = ixgbe_led_off_generic, + .init_led_link_act = ixgbe_init_led_link_act_generic, + .reset_hw = ixgbe_reset_hw_e610, + .get_media_type = ixgbe_get_media_type_e610, + .setup_link = ixgbe_setup_link_e610, + .get_link_capabilities = ixgbe_get_link_capabilities_e610, + .get_bus_info = ixgbe_get_bus_info_generic, + .acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540, + .release_swfw_sync = ixgbe_release_swfw_sync_X540, + .init_swfw_sync = ixgbe_init_swfw_sync_X540, + .prot_autoc_read = prot_autoc_read_generic, + .prot_autoc_write = prot_autoc_write_generic, + .setup_fc = ixgbe_setup_fc_e610, + .fc_autoneg = ixgbe_fc_autoneg_e610, +}; + +static const struct ixgbe_phy_operations phy_ops_e610 = { + .init = ixgbe_init_phy_ops_e610, + .identify = ixgbe_identify_phy_e610, + .identify_sfp = ixgbe_identify_module_e610, + .setup_link_speed = ixgbe_setup_phy_link_speed_generic, + .setup_link = ixgbe_setup_phy_link_e610, + .enter_lplu = ixgbe_enter_lplu_e610, +}; + +static const struct ixgbe_eeprom_operations eeprom_ops_e610 = { + .read = ixgbe_read_ee_aci_e610, + .read_buffer = ixgbe_read_ee_aci_buffer_e610, + .validate_checksum = ixgbe_validate_eeprom_checksum_e610, +}; + +const struct ixgbe_info ixgbe_e610_info = { + .mac = ixgbe_mac_e610, + .get_invariants = ixgbe_get_invariants_X540, + .mac_ops = &mac_ops_e610, + .eeprom_ops = &eeprom_ops_e610, + .phy_ops = &phy_ops_e610, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_x550em_a, +}; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h new file mode 100644 index 000000000000..ba8c06b73810 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Intel Corporation. */ + +#ifndef _IXGBE_E610_H_ +#define _IXGBE_E610_H_ + +#include "ixgbe_type.h" + +int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc, + void *buf, u16 buf_size); +bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw); +int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e, + bool *pending); +void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode); +int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res, + enum ixgbe_aci_res_access_type access, u32 timeout); +void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res); +int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size, + u32 *cap_count, enum ixgbe_aci_opc opc); +int ixgbe_discover_dev_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_dev_caps *dev_caps); +int ixgbe_discover_func_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_func_caps *func_caps); +int ixgbe_get_caps(struct ixgbe_hw *hw); +int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw); +int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode, + struct ixgbe_aci_cmd_get_phy_caps_data *pcaps); +void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps, + struct ixgbe_aci_cmd_set_phy_cfg_data *cfg); +int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw, + struct ixgbe_aci_cmd_set_phy_cfg_data *cfg); +int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link); +int ixgbe_update_link_info(struct ixgbe_hw *hw); +int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up); +int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse, + struct ixgbe_link_status *link); +int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask); +int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask); +enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw); +int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait); +int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); +int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); +int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw, + struct ixgbe_aci_cmd_set_phy_cfg_data *cfg, + enum ixgbe_fc_mode req_mode); +int ixgbe_setup_fc_e610(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw); +void ixgbe_disable_rx_e610(struct ixgbe_hw *hw); +int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw); +int ixgbe_identify_phy_e610(struct ixgbe_hw *hw); +int ixgbe_identify_module_e610(struct ixgbe_hw *hw); +int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw); +int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on); +int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw); +int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw); +int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw, + struct ixgbe_aci_cmd_get_link_topo *cmd, + u8 *node_part_number, u16 *node_handle); +int ixgbe_acquire_nvm(struct ixgbe_hw *hw, + enum ixgbe_aci_res_access_type access); +void ixgbe_release_nvm(struct ixgbe_hw *hw); +int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset, + u16 length, void *data, bool last_command, + bool read_shadow_ram); +int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw); +int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data); +int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length, + u8 *data, bool read_shadow_ram); +int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words, + u16 *data); +int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data); +int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val); +int ixgbe_reset_hw_e610(struct ixgbe_hw *hw); + +#endif /* _IXGBE_E610_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 9482e0cca8b7..da91c582d439 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ /* ethtool support for ixgbe */ @@ -690,6 +690,7 @@ static void ixgbe_get_regs(struct net_device *netdev, case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); break; @@ -1613,6 +1614,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: toggle = 0x7FFFF30F; test = reg_test_82599; break; @@ -1874,6 +1876,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); reg_data |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); @@ -1935,6 +1938,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); reg_data |= IXGBE_MACC_FLU; IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 16fa621ce0ff..336d47ffb95a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_sriov.h" @@ -107,6 +107,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: if (num_tcs > 4) { /* * TCs : TC0/1 TC2/3 TC4-7 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 2e38e8f6fac1..467f81239e12 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include <linux/types.h> #include <linux/module.h> @@ -42,6 +42,7 @@ #include "ixgbe.h" #include "ixgbe_common.h" +#include "ixgbe_e610.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_mbx.h" #include "ixgbe_phy.h" @@ -73,6 +74,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info, [board_x550em_a] = &ixgbe_x550em_a_info, [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info, + [board_e610] = &ixgbe_e610_info, }; /* ixgbe_pci_tbl - PCI Device ID Table @@ -131,6 +133,11 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_BACKPLANE), board_e610}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_SFP), board_e610}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_10G_T), board_e610}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_2_5G_T), board_e610}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_SGMII), board_e610}, /* required last entry */ {0, } }; @@ -173,6 +180,8 @@ static struct workqueue_struct *ixgbe_wq; static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); +static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *); +static void ixgbe_watchdog_update_link(struct ixgbe_adapter *); static const struct net_device_ops ixgbe_netdev_ops; @@ -236,8 +245,11 @@ static int ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) * bandwidth details should be gathered from the parent bus instead of from the * device. Used to ensure that various locations all have the correct device ID * checks. + * + * Return: true if information should be collected from the parent bus, false + * otherwise */ -static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) +static bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) { switch (hw->device_id) { case IXGBE_DEV_ID_82599_SFP_SF_QP: @@ -876,6 +888,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; @@ -915,6 +928,7 @@ void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: mask = (qmask & 0xFFFFFFFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); mask = (qmask >> 32); @@ -1025,7 +1039,7 @@ static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) return ((head <= tail) ? tail : tail + ring->count) - head; } -static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) +static bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) { u32 tx_done = ixgbe_get_tx_completed(tx_ring); u32 tx_done_old = tx_ring->tx_stats.tx_done_old; @@ -1909,10 +1923,6 @@ bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, { struct net_device *netdev = rx_ring->netdev; - /* XDP packets use error pointer so abort at this point */ - if (IS_ERR(skb)) - return true; - /* Verify netdev is present, and that packet does not have any * errors that would be unacceptable to the netdev. */ @@ -2095,7 +2105,7 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, /* hand second half of page back to the ring */ ixgbe_reuse_rx_page(rx_ring, rx_buffer); } else { - if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) { + if (skb && IXGBE_CB(skb)->dma == rx_buffer->dma) { /* the page has been released from the ring */ IXGBE_CB(skb)->page_released = true; } else { @@ -2220,9 +2230,9 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, return skb; } -static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring, - struct xdp_buff *xdp) +static int ixgbe_run_xdp(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring, + struct xdp_buff *xdp) { int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; @@ -2272,7 +2282,7 @@ out_failure: break; } xdp_out: - return ERR_PTR(-result); + return result; } static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring, @@ -2330,6 +2340,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, unsigned int offset = rx_ring->rx_offset; unsigned int xdp_xmit = 0; struct xdp_buff xdp; + int xdp_res = 0; /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ #if (PAGE_SIZE < 8192) @@ -2375,12 +2386,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size); #endif - skb = ixgbe_run_xdp(adapter, rx_ring, &xdp); + xdp_res = ixgbe_run_xdp(adapter, rx_ring, &xdp); } - if (IS_ERR(skb)) { - unsigned int xdp_res = -PTR_ERR(skb); - + if (xdp_res) { if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { xdp_xmit |= xdp_res; ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); @@ -2400,7 +2409,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } /* exit if we failed to retrieve a buffer */ - if (!skb) { + if (!xdp_res && !skb) { rx_ring->rx_stats.alloc_rx_buff_failed++; rx_buffer->pagecnt_bias++; break; @@ -2414,7 +2423,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, continue; /* verify the packet layout is correct */ - if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) + if (xdp_res || ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) continue; /* probably a little skewed due to removing CRC */ @@ -2515,6 +2524,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: ixgbe_set_ivar(adapter, -1, 1, v_idx); break; default: @@ -2528,6 +2538,9 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) IXGBE_EIMS_MAILBOX | IXGBE_EIMS_LSC); + if (adapter->hw.mac.type == ixgbe_mac_e610) + mask &= ~IXGBE_EIMS_FW_EVENT; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); } @@ -2744,6 +2757,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: /* * set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt @@ -2966,6 +2980,218 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) } } +/** + * ixgbe_check_phy_fw_load - check if PHY FW load failed + * @adapter: pointer to adapter structure + * @link_cfg_err: bitmap from the link info structure + * + * Check if external PHY FW load failed and print an error message if it did. + */ +static void ixgbe_check_phy_fw_load(struct ixgbe_adapter *adapter, + u8 link_cfg_err) +{ + if (!(link_cfg_err & IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE)) { + adapter->flags2 &= ~IXGBE_FLAG2_PHY_FW_LOAD_FAILED; + return; + } + + if (adapter->flags2 & IXGBE_FLAG2_PHY_FW_LOAD_FAILED) + return; + + if (link_cfg_err & IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE) { + netdev_err(adapter->netdev, "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n"); + adapter->flags2 |= IXGBE_FLAG2_PHY_FW_LOAD_FAILED; + } +} + +/** + * ixgbe_check_module_power - check module power level + * @adapter: pointer to adapter structure + * @link_cfg_err: bitmap from the link info structure + * + * Check module power level returned by a previous call to aci_get_link_info + * and print error messages if module power level is not supported. + */ +static void ixgbe_check_module_power(struct ixgbe_adapter *adapter, + u8 link_cfg_err) +{ + /* If module power level is supported, clear the flag. */ + if (!(link_cfg_err & (IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT | + IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED))) { + adapter->flags2 &= ~IXGBE_FLAG2_MOD_POWER_UNSUPPORTED; + return; + } + + /* If IXGBE_FLAG2_MOD_POWER_UNSUPPORTED was previously set and the + * above block didn't clear this bit, there's nothing to do. + */ + if (adapter->flags2 & IXGBE_FLAG2_MOD_POWER_UNSUPPORTED) + return; + + if (link_cfg_err & IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT) { + netdev_err(adapter->netdev, "The installed module is incompatible with the device's NVM image. Cannot start link.\n"); + adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED; + } else if (link_cfg_err & IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED) { + netdev_err(adapter->netdev, "The module's power requirements exceed the device's power supply. Cannot start link.\n"); + adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED; + } +} + +/** + * ixgbe_check_link_cfg_err - check if link configuration failed + * @adapter: pointer to adapter structure + * @link_cfg_err: bitmap from the link info structure + * + * Print if any link configuration failure happens due to the value in the + * link_cfg_err parameter in the link info structure. + */ +static void ixgbe_check_link_cfg_err(struct ixgbe_adapter *adapter, + u8 link_cfg_err) +{ + ixgbe_check_module_power(adapter, link_cfg_err); + ixgbe_check_phy_fw_load(adapter, link_cfg_err); +} + +/** + * ixgbe_process_link_status_event - process the link event + * @adapter: pointer to adapter structure + * @link_up: true if the physical link is up and false if it is down + * @link_speed: current link speed received from the link event + * + * Return: 0 on success or negative value on failure. + */ +static int +ixgbe_process_link_status_event(struct ixgbe_adapter *adapter, bool link_up, + u16 link_speed) +{ + struct ixgbe_hw *hw = &adapter->hw; + int status; + + /* Update the link info structures and re-enable link events, + * don't bail on failure due to other book keeping needed. + */ + status = ixgbe_update_link_info(hw); + if (status) + e_dev_err("Failed to update link status, err %d aq_err %d\n", + status, hw->aci.last_status); + + ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err); + + /* Check if the link state is up after updating link info, and treat + * this event as an UP event since the link is actually UP now. + */ + if (hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) + link_up = true; + + /* Turn off PHY if media was removed. */ + if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA) && + !(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) + adapter->flags2 |= IXGBE_FLAG2_NO_MEDIA; + + if (link_up == adapter->link_up && + link_up == netif_carrier_ok(adapter->netdev) && + link_speed == adapter->link_speed) + return 0; + + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + ixgbe_watchdog_update_link(adapter); + + if (link_up) + ixgbe_watchdog_link_is_up(adapter); + else + ixgbe_watchdog_link_is_down(adapter); + + return 0; +} + +/** + * ixgbe_handle_link_status_event - handle link status event via ACI + * @adapter: pointer to adapter structure + * @e: event structure containing link status info + */ +static void +ixgbe_handle_link_status_event(struct ixgbe_adapter *adapter, + struct ixgbe_aci_event *e) +{ + struct ixgbe_aci_cmd_get_link_status_data *link_data; + u16 link_speed; + bool link_up; + + link_data = (struct ixgbe_aci_cmd_get_link_status_data *)e->msg_buf; + + link_up = !!(link_data->link_info & IXGBE_ACI_LINK_UP); + link_speed = le16_to_cpu(link_data->link_speed); + + if (ixgbe_process_link_status_event(adapter, link_up, link_speed)) + e_dev_warn("Could not process link status event"); +} + +/** + * ixgbe_schedule_fw_event - schedule Firmware event + * @adapter: pointer to the adapter structure + * + * If the adapter is not in down, removing or resetting state, + * an event is scheduled. + */ +static void ixgbe_schedule_fw_event(struct ixgbe_adapter *adapter) +{ + if (!test_bit(__IXGBE_DOWN, &adapter->state) && + !test_bit(__IXGBE_REMOVING, &adapter->state) && + !test_bit(__IXGBE_RESETTING, &adapter->state)) { + adapter->flags2 |= IXGBE_FLAG2_FW_ASYNC_EVENT; + ixgbe_service_event_schedule(adapter); + } +} + +/** + * ixgbe_aci_event_cleanup - release msg_buf memory + * @event: pointer to the event holding msg_buf to be released + * + * Clean memory allocated for event's msg_buf. Implements auto memory cleanup. + */ +static void ixgbe_aci_event_cleanup(struct ixgbe_aci_event *event) +{ + kfree(event->msg_buf); +} + +/** + * ixgbe_handle_fw_event - handle Firmware event + * @adapter: pointer to the adapter structure + * + * Obtain an event from the ACI and then and then process it according to the + * type of the event and the opcode. + */ +static void ixgbe_handle_fw_event(struct ixgbe_adapter *adapter) +{ + struct ixgbe_aci_event event __cleanup(ixgbe_aci_event_cleanup); + struct ixgbe_hw *hw = &adapter->hw; + bool pending = false; + int err; + + if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT) + adapter->flags2 &= ~IXGBE_FLAG2_FW_ASYNC_EVENT; + event.buf_len = IXGBE_ACI_MAX_BUFFER_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + return; + + do { + err = ixgbe_aci_get_event(hw, &event, &pending); + if (err) + break; + + switch (le16_to_cpu(event.desc.opcode)) { + case ixgbe_aci_opc_get_link_status: + ixgbe_handle_link_status_event(adapter, &event); + break; + default: + e_warn(hw, "unknown FW async event captured\n"); + break; + } + } while (pending); +} + static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, u64 qmask) { @@ -2982,6 +3208,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); @@ -3035,6 +3262,9 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_e610: + mask |= IXGBE_EIMS_FW_EVENT; + fallthrough; case ixgbe_mac_x550em_a: if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP || adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP || @@ -3091,12 +3321,16 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) if (eicr & IXGBE_EICR_MAILBOX) ixgbe_msg_task(adapter); + if (eicr & IXGBE_EICR_FW_EVENT) + ixgbe_schedule_fw_event(adapter); + switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: if (hw->phy.type == ixgbe_phy_x550em_ext_t && (eicr & IXGBE_EICR_GPI_SDP0_X540)) { adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; @@ -3334,6 +3568,9 @@ static irqreturn_t ixgbe_intr(int irq, void *data) if (eicr & IXGBE_EICR_LSC) ixgbe_check_lsc(adapter); + if (eicr & IXGBE_EICR_FW_EVENT) + ixgbe_schedule_fw_event(adapter); + switch (hw->mac.type) { case ixgbe_mac_82599EB: ixgbe_check_sfp_event(adapter, eicr); @@ -3342,6 +3579,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); @@ -3442,6 +3680,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); @@ -4359,6 +4598,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: if (adapter->num_vfs) rdrxctl |= IXGBE_RDRXCTL_PSP; fallthrough; @@ -4526,6 +4766,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; @@ -4564,6 +4805,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; @@ -5148,6 +5390,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: dv_id = IXGBE_DV_X540(link, tc); break; default: @@ -5208,6 +5451,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: dv_id = IXGBE_LOW_DV_X540(tc); break; default: @@ -5510,6 +5754,48 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) } /** + * ixgbe_enable_link_status_events - enable link status events + * @adapter: pointer to the adapter structure + * @mask: event mask to be set + * + * Enables link status events by invoking ixgbe_configure_lse() + * + * Return: the exit code of the operation. + */ +static int ixgbe_enable_link_status_events(struct ixgbe_adapter *adapter, + u16 mask) +{ + int err; + + err = ixgbe_configure_lse(&adapter->hw, true, mask); + if (err) + return err; + + adapter->lse_mask = mask; + return 0; +} + +/** + * ixgbe_disable_link_status_events - disable link status events + * @adapter: pointer to the adapter structure + * + * Disables link status events by invoking ixgbe_configure_lse() + * + * Return: the exit code of the operation. + */ +static int ixgbe_disable_link_status_events(struct ixgbe_adapter *adapter) +{ + int err; + + err = ixgbe_configure_lse(&adapter->hw, false, adapter->lse_mask); + if (err) + return err; + + adapter->lse_mask = 0; + return 0; +} + +/** * ixgbe_sfp_link_config - set up SFP+ link * @adapter: pointer to private adapter struct **/ @@ -5532,13 +5818,21 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) * ixgbe_non_sfp_link_config - set up non-SFP+ link * @hw: pointer to private hardware struct * - * Returns 0 on success, negative on failure + * Configure non-SFP link. + * + * Return: 0 on success, negative on failure **/ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) { - u32 speed; + struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter, + hw); + u16 mask = ~((u16)(IXGBE_ACI_LINK_EVENT_UPDOWN | + IXGBE_ACI_LINK_EVENT_MEDIA_NA | + IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL | + IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL)); bool autoneg, link_up = false; int ret = -EIO; + u32 speed; if (hw->mac.ops.check_link) ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); @@ -5561,13 +5855,53 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) if (ret) return ret; - if (hw->mac.ops.setup_link) + if (hw->mac.ops.setup_link) { + if (adapter->hw.mac.type == ixgbe_mac_e610) { + ret = ixgbe_enable_link_status_events(adapter, mask); + if (ret) + return ret; + } ret = hw->mac.ops.setup_link(hw, speed, link_up); + } return ret; } /** + * ixgbe_check_media_subtask - check for media + * @adapter: pointer to adapter structure + * + * If media is available then initialize PHY user configuration. Configure the + * PHY if the interface is up. + */ +static void ixgbe_check_media_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + /* No need to check for media if it's already present */ + if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA)) + return; + + /* Refresh link info and check if media is present */ + if (ixgbe_update_link_info(hw)) + return; + + ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err); + + if (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE) { + /* PHY settings are reset on media insertion, reconfigure + * PHY to preserve settings. + */ + if (!(ixgbe_non_sfp_link_config(&adapter->hw))) + adapter->flags2 &= ~IXGBE_FLAG2_NO_MEDIA; + + /* A Link Status Event will be generated; the event handler + * will complete bringing the interface up + */ + } +} + +/** * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset * @adapter: board private structure * @@ -5630,6 +5964,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: default: IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); @@ -5980,6 +6315,7 @@ dma_engine_disable: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ~IXGBE_DMATXCTL_TE)); @@ -6224,6 +6560,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_clean_all_tx_rings(adapter); ixgbe_clean_all_rx_rings(adapter); + if (adapter->hw.mac.type == ixgbe_mac_e610) + ixgbe_disable_link_status_events(adapter); } /** @@ -6279,6 +6617,7 @@ static void ixgbe_init_dcb(struct ixgbe_adapter *adapter) break; case ixgbe_mac_X540: case ixgbe_mac_X550: + case ixgbe_mac_e610: adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; break; @@ -6342,6 +6681,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; + hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME; + /* get_invariants needs the device IDs */ ii->get_invariants(hw); @@ -6909,6 +7250,19 @@ int ixgbe_open(struct net_device *netdev) ixgbe_up_complete(adapter); udp_tunnel_nic_reset_ntf(netdev); + if (adapter->hw.mac.type == ixgbe_mac_e610) { + int err = ixgbe_update_link_info(&adapter->hw); + + if (err) + e_dev_err("Failed to update link info, err %d.\n", err); + + ixgbe_check_link_cfg_err(adapter, + adapter->hw.link.link_info.link_cfg_err); + + err = ixgbe_non_sfp_link_config(&adapter->hw); + if (ixgbe_non_sfp_link_config(&adapter->hw)) + e_dev_err("Link setup failed, err %d.\n", err); + } return 0; @@ -7062,6 +7416,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: pci_wake_from_d3(pdev, !!wufc); break; default: @@ -7209,6 +7564,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); break; @@ -7221,11 +7577,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) for (i = 0; i < 16; i++) { hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); - if ((hw->mac.type == ixgbe_mac_82599EB) || - (hw->mac.type == ixgbe_mac_X540) || - (hw->mac.type == ixgbe_mac_X550) || - (hw->mac.type == ixgbe_mac_X550EM_x) || - (hw->mac.type == ixgbe_mac_x550em_a)) { + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_x550em_a || + hw->mac.type == ixgbe_mac_e610) { hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); @@ -7251,6 +7608,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: /* OS2BMC stats are X540 and later */ hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); @@ -7551,6 +7909,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: case ixgbe_mac_82599EB: { u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); @@ -8052,6 +8411,11 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_service_event_complete(adapter); return; } + if (adapter->hw.mac.type == ixgbe_mac_e610) { + if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT) + ixgbe_handle_fw_event(adapter); + ixgbe_check_media_subtask(adapter); + } ixgbe_reset_subtask(adapter); ixgbe_phy_interrupt_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); @@ -10771,6 +11135,24 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, } /** + * ixgbe_set_fw_version_e610 - Set FW version specifically on E610 adapters + * @adapter: the adapter private structure + * + * This function is used by probe and ethtool to determine the FW version to + * format to display. The FW version is taken from the EEPROM/NVM. + * + */ +static void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter) +{ + struct ixgbe_orom_info *orom = &adapter->hw.flash.orom; + struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm; + + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor, + nvm->eetrack, orom->major, orom->build, orom->patch); +} + +/** * ixgbe_set_fw_version - Set FW version * @adapter: the adapter private structure * @@ -10782,6 +11164,11 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_nvm_version nvm_ver; + if (adapter->hw.mac.type == ixgbe_mac_e610) { + ixgbe_set_fw_version_e610(adapter); + return; + } + ixgbe_get_oem_prod_version(hw, &nvm_ver); if (nvm_ver.oem_valid) { snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), @@ -10868,6 +11255,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #else indices = IXGBE_MAX_RSS_INDICES; #endif + } else if (ii->mac == ixgbe_mac_e610) { + indices = IXGBE_MAX_RSS_INDICES_X550; } netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); @@ -10933,12 +11322,19 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; + if (adapter->hw.mac.type == ixgbe_mac_e610) { + err = ixgbe_get_caps(&adapter->hw); + if (err) + dev_err(&pdev->dev, "ixgbe_get_caps failed %d\n", err); + } + if (adapter->hw.mac.type == ixgbe_mac_82599EB) adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF; switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_e610: netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550; break; case ixgbe_mac_x550em_a: @@ -10959,6 +11355,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); break; default: @@ -11130,6 +11527,8 @@ skip_sriov: ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); ixgbe_mac_set_default_filter(adapter); + if (hw->mac.type == ixgbe_mac_e610) + mutex_init(&hw->aci.lock); timer_setup(&adapter->service_timer, ixgbe_service_timer, 0); if (ixgbe_removed(hw->hw_addr)) { @@ -11275,6 +11674,8 @@ err_netdev: err_register: ixgbe_release_hw_control(adapter); ixgbe_clear_interrupt_scheme(adapter); + if (hw->mac.type == ixgbe_mac_e610) + mutex_destroy(&adapter->hw.aci.lock); err_sw_init: ixgbe_disable_sriov(adapter); adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; @@ -11321,6 +11722,11 @@ static void ixgbe_remove(struct pci_dev *pdev) set_bit(__IXGBE_REMOVING, &adapter->state); cancel_work_sync(&adapter->service_task); + if (adapter->hw.mac.type == ixgbe_mac_e610) { + ixgbe_disable_link_status_events(adapter); + mutex_destroy(&adapter->hw.aci.lock); + } + if (adapter->mii_bus) mdiobus_unregister(adapter->mii_bus); @@ -11452,6 +11858,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, case ixgbe_mac_x550em_a: device_id = IXGBE_DEV_ID_X550EM_A_VF; break; + case ixgbe_mac_e610: + device_id = IXGBE_DEV_ID_E610_VF; + break; default: device_id = 0; break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index d67d77e5dacc..788b5af07c70 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> @@ -283,6 +283,7 @@ static int ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); break; default: @@ -407,6 +408,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) hw->mac.type != ixgbe_mac_X550 && hw->mac.type != ixgbe_mac_X550EM_x && hw->mac.type != ixgbe_mac_x550em_a && + hw->mac.type != ixgbe_mac_e610 && hw->mac.type != ixgbe_mac_X540) return; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 07eaa3c3f4d3..0a03a8bb5f88 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> @@ -1117,7 +1117,7 @@ int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, MDIO_MMD_AN, &autoneg_reg); - if (hw->mac.type == ixgbe_mac_X550) { + if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_e610) { /* Set or unset auto-negotiation 5G advertisement */ autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE; if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) && @@ -1233,6 +1233,7 @@ static int ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) switch (hw->mac.type) { case ixgbe_mac_X550: + case ixgbe_mac_e610: hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 9baccacd02a1..5fdf32d79d82 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #ifndef _IXGBE_TYPE_H_ #define _IXGBE_TYPE_H_ @@ -7,6 +7,7 @@ #include <linux/types.h> #include <linux/mdio.h> #include <linux/netdevice.h> +#include "ixgbe_type_e610.h" /* Device IDs */ #define IXGBE_DEV_ID_82598 0x10B6 @@ -71,12 +72,19 @@ #define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 #define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 +#define IXGBE_DEV_ID_E610_BACKPLANE 0x57AE +#define IXGBE_DEV_ID_E610_SFP 0x57AF +#define IXGBE_DEV_ID_E610_10G_T 0x57B0 +#define IXGBE_DEV_ID_E610_2_5G_T 0x57B1 +#define IXGBE_DEV_ID_E610_SGMII 0x57B2 + /* VF Device IDs */ #define IXGBE_DEV_ID_82599_VF 0x10ED #define IXGBE_DEV_ID_X540_VF 0x1515 #define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 #define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 +#define IXGBE_DEV_ID_E610_VF 0x57AD #define IXGBE_CAT(r, m) IXGBE_##r##_##m @@ -1600,7 +1608,7 @@ enum { #define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ #define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ #define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ -#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ +#define IXGBE_EICR_FW_EVENT 0x00200000 /* Async FW event */ #define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ #define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ #define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ @@ -1636,6 +1644,7 @@ enum { #define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ #define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ #define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EICS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */ #define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ #define IXGBE_EICS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) @@ -1654,6 +1663,7 @@ enum { #define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ #define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ #define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */ #define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */ #define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ @@ -1673,6 +1683,7 @@ enum { #define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ #define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ #define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMC_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */ #define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ #define IXGBE_EIMC_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) @@ -2068,6 +2079,7 @@ enum { #define IXGBE_SAN_MAC_ADDR_PTR 0x28 #define IXGBE_DEVICE_CAPS 0x2C #define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 +#define IXGBE_PCIE_MSIX_E610_CAPS 0xB2 #define IXGBE_PCIE_MSIX_82599_CAPS 0x72 #define IXGBE_MAX_MSIX_VECTORS_82599 0x40 #define IXGBE_PCIE_MSIX_82598_CAPS 0x62 @@ -2168,6 +2180,7 @@ enum { #define IXGBE_PCI_DEVICE_STATUS 0xAA #define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 #define IXGBE_PCI_LINK_STATUS 0xB2 +#define IXGBE_PCI_LINK_STATUS_E610 0x82 #define IXGBE_PCI_DEVICE_CONTROL2 0xC8 #define IXGBE_PCI_LINK_WIDTH 0x3F0 #define IXGBE_PCI_LINK_WIDTH_1 0x10 @@ -2288,6 +2301,7 @@ enum { #define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00 #define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ +#define IXGBE_FCTRL_TPE 0x00000080 /* Tag Promiscuous Ena*/ #define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ #define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ #define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ @@ -2351,6 +2365,7 @@ enum { /* Multiple Transmit Queue Command Register */ #define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ #define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ +#define IXGBE_MTQC_NUM_TC_OR_Q 0xC /* Number of TCs or TxQs per pool */ #define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ #define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ #define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ @@ -2970,6 +2985,29 @@ typedef u32 ixgbe_link_speed; IXGBE_LINK_SPEED_1GB_FULL | \ IXGBE_LINK_SPEED_10GB_FULL) +/* Physical layer type */ +typedef u64 ixgbe_physical_layer; +#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 +#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001 +#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002 +#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004 +#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020 +#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080 +#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100 +#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200 +#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800 +#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000 +#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000 +#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000 +#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000 +#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000 +#define IXGBE_PHYSICAL_LAYER_2500BASE_T 0x20000 +#define IXGBE_PHYSICAL_LAYER_5000BASE_T 0x40000 + /* Flow Control Data Sheet defined values * Calculation and defines taken from 802.1bb Annex O */ @@ -3145,6 +3183,8 @@ enum ixgbe_mac_type { ixgbe_mac_X550, ixgbe_mac_X550EM_x, ixgbe_mac_x550em_a, + ixgbe_mac_e610, + ixgbe_mac_e610_vf, ixgbe_num_macs }; @@ -3224,7 +3264,9 @@ enum ixgbe_media_type { ixgbe_media_type_copper, ixgbe_media_type_backplane, ixgbe_media_type_cx4, - ixgbe_media_type_virtual + ixgbe_media_type_virtual, + ixgbe_media_type_da, + ixgbe_media_type_aui, }; /* Flow Control Settings */ @@ -3233,7 +3275,8 @@ enum ixgbe_fc_mode { ixgbe_fc_rx_pause, ixgbe_fc_tx_pause, ixgbe_fc_full, - ixgbe_fc_default + ixgbe_fc_default, + ixgbe_fc_pfc, }; /* Smart Speed Settings */ @@ -3533,6 +3576,9 @@ struct ixgbe_link_operations { struct ixgbe_link_info { struct ixgbe_link_operations ops; u8 addr; + struct ixgbe_link_status link_info; + struct ixgbe_link_status link_info_old; + u8 get_link_info; }; struct ixgbe_eeprom_info { @@ -3575,6 +3621,7 @@ struct ixgbe_mac_info { u8 san_mac_rar_index; struct ixgbe_thermal_sensor_data thermal_sensor_data; bool set_lben; + u32 max_link_up_time; u8 led_link_act; }; @@ -3599,6 +3646,10 @@ struct ixgbe_phy_info { bool reset_if_overtemp; bool qsfp_shared_i2c_bus; u32 nw_mng_if_sel; + u64 phy_type_low; + u64 phy_type_high; + u16 curr_user_speed_req; + struct ixgbe_aci_cmd_set_phy_cfg_data curr_user_phy_cfg; }; struct ixgbe_mbx_stats { @@ -3643,6 +3694,19 @@ struct ixgbe_hw { bool allow_unsupported_sfp; bool wol_enabled; bool need_crosstalk_fix; + u8 api_branch; + u8 api_maj_ver; + u8 api_min_ver; + u8 api_patch; + u8 fw_branch; + u8 fw_maj_ver; + u8 fw_min_ver; + u8 fw_patch; + u32 fw_build; + struct ixgbe_aci_info aci; + struct ixgbe_flash_info flash; + struct ixgbe_hw_dev_caps dev_caps; + struct ixgbe_hw_func_caps func_caps; }; struct ixgbe_info { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h new file mode 100644 index 000000000000..8d06ade3c7cd --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h @@ -0,0 +1,1074 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Intel Corporation. */ + +#ifndef _IXGBE_TYPE_E610_H_ +#define _IXGBE_TYPE_E610_H_ + +#define BYTES_PER_DWORD 4 + +/* General E610 defines */ +#define IXGBE_MAX_VSI 768 + +/* Checksum and Shadow RAM pointers */ +#define E610_SR_SW_CHECKSUM_WORD 0x3F + +/* Shadow RAM related */ +#define IXGBE_SR_WORDS_IN_1KB 512 + +/* Firmware Status Register (GL_FWSTS) */ +#define GL_FWSTS 0x00083048 /* Reset Source: POR */ +#define GL_FWSTS_EP_PF0 BIT(24) +#define GL_FWSTS_EP_PF1 BIT(25) + +/* Global NVM General Status Register */ +#define GLNVM_GENS 0x000B6100 /* Reset Source: POR */ +#define GLNVM_GENS_SR_SIZE_M GENMASK(7, 5) + +/* Flash Access Register */ +#define IXGBE_GLNVM_FLA 0x000B6108 /* Reset Source: POR */ +#define IXGBE_GLNVM_FLA_LOCKED_S 6 +#define IXGBE_GLNVM_FLA_LOCKED_M BIT(6) + +/* Admin Command Interface (ACI) registers */ +#define IXGBE_PF_HIDA(_i) (0x00085000 + ((_i) * 4)) +#define IXGBE_PF_HIDA_2(_i) (0x00085020 + ((_i) * 4)) +#define IXGBE_PF_HIBA(_i) (0x00084000 + ((_i) * 4)) +#define IXGBE_PF_HICR 0x00082048 + +#define IXGBE_PF_HICR_EN BIT(0) +#define IXGBE_PF_HICR_C BIT(1) +#define IXGBE_PF_HICR_SV BIT(2) +#define IXGBE_PF_HICR_EV BIT(3) + +#define IXGBE_ACI_DESC_SIZE 32 +#define IXGBE_ACI_DESC_SIZE_IN_DWORDS (IXGBE_ACI_DESC_SIZE / BYTES_PER_DWORD) + +#define IXGBE_ACI_MAX_BUFFER_SIZE 4096 /* Size in bytes */ +#define IXGBE_ACI_SEND_DELAY_TIME_MS 10 +#define IXGBE_ACI_SEND_MAX_EXECUTE 3 +#define IXGBE_ACI_SEND_TIMEOUT_MS \ + (IXGBE_ACI_SEND_MAX_EXECUTE * IXGBE_ACI_SEND_DELAY_TIME_MS) +/* [ms] timeout of waiting for sync response */ +#define IXGBE_ACI_SYNC_RESPONSE_TIMEOUT 100000 +/* [ms] timeout of waiting for async response */ +#define IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT 150000 +/* [ms] timeout of waiting for resource release */ +#define IXGBE_ACI_RELEASE_RES_TIMEOUT 10000 + +/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */ +#define IXGBE_ACI_LG_BUF 512 + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | + */ + +#define IXGBE_ACI_FLAG_DD BIT(0) /* 0x1 */ +#define IXGBE_ACI_FLAG_CMP BIT(1) /* 0x2 */ +#define IXGBE_ACI_FLAG_ERR BIT(2) /* 0x4 */ +#define IXGBE_ACI_FLAG_VFE BIT(3) /* 0x8 */ +#define IXGBE_ACI_FLAG_LB BIT(9) /* 0x200 */ +#define IXGBE_ACI_FLAG_RD BIT(10) /* 0x400 */ +#define IXGBE_ACI_FLAG_VFC BIT(11) /* 0x800 */ +#define IXGBE_ACI_FLAG_BUF BIT(12) /* 0x1000 */ +#define IXGBE_ACI_FLAG_SI BIT(13) /* 0x2000 */ +#define IXGBE_ACI_FLAG_EI BIT(14) /* 0x4000 */ +#define IXGBE_ACI_FLAG_FE BIT(15) /* 0x8000 */ + +/* Admin Command Interface (ACI) error codes */ +enum ixgbe_aci_err { + IXGBE_ACI_RC_OK = 0, /* Success */ + IXGBE_ACI_RC_EPERM = 1, /* Operation not permitted */ + IXGBE_ACI_RC_ENOENT = 2, /* No such element */ + IXGBE_ACI_RC_ESRCH = 3, /* Bad opcode */ + IXGBE_ACI_RC_EINTR = 4, /* Operation interrupted */ + IXGBE_ACI_RC_EIO = 5, /* I/O error */ + IXGBE_ACI_RC_ENXIO = 6, /* No such resource */ + IXGBE_ACI_RC_E2BIG = 7, /* Arg too long */ + IXGBE_ACI_RC_EAGAIN = 8, /* Try again */ + IXGBE_ACI_RC_ENOMEM = 9, /* Out of memory */ + IXGBE_ACI_RC_EACCES = 10, /* Permission denied */ + IXGBE_ACI_RC_EFAULT = 11, /* Bad address */ + IXGBE_ACI_RC_EBUSY = 12, /* Device or resource busy */ + IXGBE_ACI_RC_EEXIST = 13, /* Object already exists */ + IXGBE_ACI_RC_EINVAL = 14, /* Invalid argument */ + IXGBE_ACI_RC_ENOTTY = 15, /* Not a typewriter */ + IXGBE_ACI_RC_ENOSPC = 16, /* No space left or alloc failure */ + IXGBE_ACI_RC_ENOSYS = 17, /* Function not implemented */ + IXGBE_ACI_RC_ERANGE = 18, /* Parameter out of range */ + IXGBE_ACI_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + IXGBE_ACI_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + IXGBE_ACI_RC_EMODE = 21, /* Op not allowed in current dev mode */ + IXGBE_ACI_RC_EFBIG = 22, /* File too big */ + IXGBE_ACI_RC_ESBCOMP = 23, /* SB-IOSF completion unsuccessful */ + IXGBE_ACI_RC_ENOSEC = 24, /* Missing security manifest */ + IXGBE_ACI_RC_EBADSIG = 25, /* Bad RSA signature */ + IXGBE_ACI_RC_ESVN = 26, /* SVN number prohibits this package */ + IXGBE_ACI_RC_EBADMAN = 27, /* Manifest hash mismatch */ + IXGBE_ACI_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */ + IXGBE_ACI_RC_EACCES_BMCU = 29, /* BMC Update in progress */ +}; + +/* Admin Command Interface (ACI) opcodes */ +enum ixgbe_aci_opc { + ixgbe_aci_opc_get_ver = 0x0001, + ixgbe_aci_opc_driver_ver = 0x0002, + ixgbe_aci_opc_get_exp_err = 0x0005, + + /* resource ownership */ + ixgbe_aci_opc_req_res = 0x0008, + ixgbe_aci_opc_release_res = 0x0009, + + /* device/function capabilities */ + ixgbe_aci_opc_list_func_caps = 0x000A, + ixgbe_aci_opc_list_dev_caps = 0x000B, + + /* safe disable of RXEN */ + ixgbe_aci_opc_disable_rxen = 0x000C, + + /* FW events */ + ixgbe_aci_opc_get_fw_event = 0x0014, + + /* PHY commands */ + ixgbe_aci_opc_get_phy_caps = 0x0600, + ixgbe_aci_opc_set_phy_cfg = 0x0601, + ixgbe_aci_opc_restart_an = 0x0605, + ixgbe_aci_opc_get_link_status = 0x0607, + ixgbe_aci_opc_set_event_mask = 0x0613, + ixgbe_aci_opc_get_link_topo = 0x06E0, + ixgbe_aci_opc_get_link_topo_pin = 0x06E1, + ixgbe_aci_opc_read_i2c = 0x06E2, + ixgbe_aci_opc_write_i2c = 0x06E3, + ixgbe_aci_opc_read_mdio = 0x06E4, + ixgbe_aci_opc_write_mdio = 0x06E5, + ixgbe_aci_opc_set_gpio_by_func = 0x06E6, + ixgbe_aci_opc_get_gpio_by_func = 0x06E7, + ixgbe_aci_opc_set_gpio = 0x06EC, + ixgbe_aci_opc_get_gpio = 0x06ED, + ixgbe_aci_opc_sff_eeprom = 0x06EE, + ixgbe_aci_opc_prog_topo_dev_nvm = 0x06F2, + ixgbe_aci_opc_read_topo_dev_nvm = 0x06F3, + + /* NVM commands */ + ixgbe_aci_opc_nvm_read = 0x0701, + ixgbe_aci_opc_nvm_erase = 0x0702, + ixgbe_aci_opc_nvm_write = 0x0703, + ixgbe_aci_opc_nvm_cfg_read = 0x0704, + ixgbe_aci_opc_nvm_cfg_write = 0x0705, + ixgbe_aci_opc_nvm_checksum = 0x0706, + ixgbe_aci_opc_nvm_write_activate = 0x0707, + ixgbe_aci_opc_nvm_sr_dump = 0x0707, + ixgbe_aci_opc_nvm_save_factory_settings = 0x0708, + ixgbe_aci_opc_nvm_update_empr = 0x0709, + ixgbe_aci_opc_nvm_pkg_data = 0x070A, + ixgbe_aci_opc_nvm_pass_component_tbl = 0x070B, + + /* Alternate Structure Commands */ + ixgbe_aci_opc_write_alt_direct = 0x0900, + ixgbe_aci_opc_write_alt_indirect = 0x0901, + ixgbe_aci_opc_read_alt_direct = 0x0902, + ixgbe_aci_opc_read_alt_indirect = 0x0903, + ixgbe_aci_opc_done_alt_write = 0x0904, + ixgbe_aci_opc_clear_port_alt_write = 0x0906, + + /* debug commands */ + ixgbe_aci_opc_debug_dump_internals = 0xFF08, + + /* SystemDiagnostic commands */ + ixgbe_aci_opc_set_health_status_config = 0xFF20, + ixgbe_aci_opc_get_supported_health_status_codes = 0xFF21, + ixgbe_aci_opc_get_health_status = 0xFF22, + ixgbe_aci_opc_clear_health_status = 0xFF23, +}; + +/* Get version (direct 0x0001) */ +struct ixgbe_aci_cmd_get_ver { + __le32 rom_ver; + __le32 fw_build; + u8 fw_branch; + u8 fw_major; + u8 fw_minor; + u8 fw_patch; + u8 api_branch; + u8 api_major; + u8 api_minor; + u8 api_patch; +}; + +#define IXGBE_DRV_VER_STR_LEN_E610 32 + +/* Send driver version (indirect 0x0002) */ +struct ixgbe_aci_cmd_driver_ver { + u8 major_ver; + u8 minor_ver; + u8 build_ver; + u8 subbuild_ver; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Get Expanded Error Code (0x0005, direct) */ +struct ixgbe_aci_cmd_get_exp_err { + __le32 reason; +#define IXGBE_ACI_EXPANDED_ERROR_NOT_PROVIDED 0xFFFFFFFF + __le32 identifier; + u8 rsvd[8]; +}; + +/* FW update timeout definitions are in milliseconds */ +#define IXGBE_NVM_TIMEOUT 180000 + +enum ixgbe_aci_res_access_type { + IXGBE_RES_READ = 1, + IXGBE_RES_WRITE +}; + +enum ixgbe_aci_res_ids { + IXGBE_NVM_RES_ID = 1, + IXGBE_SPD_RES_ID, + IXGBE_CHANGE_LOCK_RES_ID, + IXGBE_GLOBAL_CFG_LOCK_RES_ID +}; + +/* Request resource ownership (direct 0x0008) + * Release resource ownership (direct 0x0009) + */ +struct ixgbe_aci_cmd_req_res { + __le16 res_id; + __le16 access_type; + + /* Upon successful completion, FW writes this value and driver is + * expected to release resource before timeout. This value is provided + * in milliseconds. + */ + __le32 timeout; +#define IXGBE_ACI_RES_NVM_READ_DFLT_TIMEOUT_MS 3000 +#define IXGBE_ACI_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000 +#define IXGBE_ACI_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000 +#define IXGBE_ACI_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000 + /* For SDP: pin ID of the SDP */ + __le32 res_number; + __le16 status; +#define IXGBE_ACI_RES_GLBL_SUCCESS 0 +#define IXGBE_ACI_RES_GLBL_IN_PROG 1 +#define IXGBE_ACI_RES_GLBL_DONE 2 + u8 reserved[2]; +}; + +/* Get function capabilities (indirect 0x000A) + * Get device capabilities (indirect 0x000B) + */ +struct ixgbe_aci_cmd_list_caps { + u8 cmd_flags; + u8 pf_index; + u8 reserved[2]; + __le32 count; + __le32 addr_high; + __le32 addr_low; +}; + +/* Device/Function buffer entry, repeated per reported capability */ +struct ixgbe_aci_cmd_list_caps_elem { + __le16 cap; +#define IXGBE_ACI_CAPS_VALID_FUNCTIONS 0x0005 +#define IXGBE_ACI_MAX_VALID_FUNCTIONS 0x8 +#define IXGBE_ACI_CAPS_SRIOV 0x0012 +#define IXGBE_ACI_CAPS_VF 0x0013 +#define IXGBE_ACI_CAPS_VMDQ 0x0014 +#define IXGBE_ACI_CAPS_VSI 0x0017 +#define IXGBE_ACI_CAPS_DCB 0x0018 +#define IXGBE_ACI_CAPS_RSS 0x0040 +#define IXGBE_ACI_CAPS_RXQS 0x0041 +#define IXGBE_ACI_CAPS_TXQS 0x0042 +#define IXGBE_ACI_CAPS_MSIX 0x0043 +#define IXGBE_ACI_CAPS_FD 0x0045 +#define IXGBE_ACI_CAPS_1588 0x0046 +#define IXGBE_ACI_CAPS_MAX_MTU 0x0047 +#define IXGBE_ACI_CAPS_NVM_VER 0x0048 +#define IXGBE_ACI_CAPS_PENDING_NVM_VER 0x0049 +#define IXGBE_ACI_CAPS_OROM_VER 0x004A +#define IXGBE_ACI_CAPS_PENDING_OROM_VER 0x004B +#define IXGBE_ACI_CAPS_PENDING_NET_VER 0x004D +#define IXGBE_ACI_CAPS_INLINE_IPSEC 0x0070 +#define IXGBE_ACI_CAPS_NUM_ENABLED_PORTS 0x0072 +#define IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE 0x0076 +#define IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077 +#define IXGBE_ACI_CAPS_NVM_MGMT 0x0080 +#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0 0x0081 +#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1 0x0082 +#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2 0x0083 +#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3 0x0084 + u8 major_ver; + u8 minor_ver; + /* Number of resources described by this capability */ + __le32 number; + /* Only meaningful for some types of resources */ + __le32 logical_id; + /* Only meaningful for some types of resources */ + __le32 phys_id; + __le64 rsvd1; + __le64 rsvd2; +}; + +/* Disable RXEN (direct 0x000C) */ +struct ixgbe_aci_cmd_disable_rxen { + u8 lport_num; + u8 reserved[15]; +}; + +/* Get PHY capabilities (indirect 0x0600) */ +struct ixgbe_aci_cmd_get_phy_caps { + u8 lport_num; + u8 reserved; + __le16 param0; + /* 18.0 - Report qualified modules */ +#define IXGBE_ACI_GET_PHY_RQM BIT(0) + /* 18.1 - 18.3 : Report mode + * 000b - Report topology capabilities, without media + * 001b - Report topology capabilities, with media + * 010b - Report Active configuration + * 011b - Report PHY Type and FEC mode capabilities + * 100b - Report Default capabilities + */ +#define IXGBE_ACI_REPORT_MODE_M GENMASK(3, 1) +#define IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA 0 +#define IXGBE_ACI_REPORT_TOPO_CAP_MEDIA BIT(1) +#define IXGBE_ACI_REPORT_ACTIVE_CFG BIT(2) +#define IXGBE_ACI_REPORT_DFLT_CFG BIT(3) + __le32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +/* This is #define of PHY type (Extended): + * The first set of defines is for phy_type_low. + */ +#define IXGBE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0) +#define IXGBE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1) +#define IXGBE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2) +#define IXGBE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3) +#define IXGBE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4) +#define IXGBE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5) +#define IXGBE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6) +#define IXGBE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7) +#define IXGBE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8) +#define IXGBE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9) +#define IXGBE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10) +#define IXGBE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11) +#define IXGBE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12) +#define IXGBE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13) +#define IXGBE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14) +#define IXGBE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15) +#define IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16) +#define IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17) +#define IXGBE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18) +#define IXGBE_PHY_TYPE_LOW_25GBASE_T BIT_ULL(19) +#define IXGBE_PHY_TYPE_LOW_25GBASE_CR BIT_ULL(20) +#define IXGBE_PHY_TYPE_LOW_25GBASE_CR_S BIT_ULL(21) +#define IXGBE_PHY_TYPE_LOW_25GBASE_CR1 BIT_ULL(22) +#define IXGBE_PHY_TYPE_LOW_25GBASE_SR BIT_ULL(23) +#define IXGBE_PHY_TYPE_LOW_25GBASE_LR BIT_ULL(24) +#define IXGBE_PHY_TYPE_LOW_25GBASE_KR BIT_ULL(25) +#define IXGBE_PHY_TYPE_LOW_25GBASE_KR_S BIT_ULL(26) +#define IXGBE_PHY_TYPE_LOW_25GBASE_KR1 BIT_ULL(27) +#define IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC BIT_ULL(28) +#define IXGBE_PHY_TYPE_LOW_25G_AUI_C2C BIT_ULL(29) +#define IXGBE_PHY_TYPE_LOW_MAX_INDEX 29 +/* The second set of defines is for phy_type_high. */ +#define IXGBE_PHY_TYPE_HIGH_10BASE_T BIT_ULL(1) +#define IXGBE_PHY_TYPE_HIGH_10M_SGMII BIT_ULL(2) +#define IXGBE_PHY_TYPE_HIGH_2500M_SGMII BIT_ULL(56) +#define IXGBE_PHY_TYPE_HIGH_100M_USXGMII BIT_ULL(57) +#define IXGBE_PHY_TYPE_HIGH_1G_USXGMII BIT_ULL(58) +#define IXGBE_PHY_TYPE_HIGH_2500M_USXGMII BIT_ULL(59) +#define IXGBE_PHY_TYPE_HIGH_5G_USXGMII BIT_ULL(60) +#define IXGBE_PHY_TYPE_HIGH_10G_USXGMII BIT_ULL(61) +#define IXGBE_PHY_TYPE_HIGH_MAX_INDEX 61 + +struct ixgbe_aci_cmd_get_phy_caps_data { + __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */ + __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */ + u8 caps; +#define IXGBE_ACI_PHY_EN_TX_LINK_PAUSE BIT(0) +#define IXGBE_ACI_PHY_EN_RX_LINK_PAUSE BIT(1) +#define IXGBE_ACI_PHY_LOW_POWER_MODE BIT(2) +#define IXGBE_ACI_PHY_EN_LINK BIT(3) +#define IXGBE_ACI_PHY_AN_MODE BIT(4) +#define IXGBE_ACI_PHY_EN_MOD_QUAL BIT(5) +#define IXGBE_ACI_PHY_EN_LESM BIT(6) +#define IXGBE_ACI_PHY_EN_AUTO_FEC BIT(7) +#define IXGBE_ACI_PHY_CAPS_MASK GENMASK(7, 0) + u8 low_power_ctrl_an; +#define IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0) +#define IXGBE_ACI_PHY_AN_EN_CLAUSE28 BIT(1) +#define IXGBE_ACI_PHY_AN_EN_CLAUSE73 BIT(2) +#define IXGBE_ACI_PHY_AN_EN_CLAUSE37 BIT(3) + __le16 eee_cap; +#define IXGBE_ACI_PHY_EEE_EN_100BASE_TX BIT(0) +#define IXGBE_ACI_PHY_EEE_EN_1000BASE_T BIT(1) +#define IXGBE_ACI_PHY_EEE_EN_10GBASE_T BIT(2) +#define IXGBE_ACI_PHY_EEE_EN_1000BASE_KX BIT(3) +#define IXGBE_ACI_PHY_EEE_EN_10GBASE_KR BIT(4) +#define IXGBE_ACI_PHY_EEE_EN_25GBASE_KR BIT(5) +#define IXGBE_ACI_PHY_EEE_EN_10BASE_T BIT(11) + __le16 eeer_value; + u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */ + u8 phy_fw_ver[8]; + u8 link_fec_options; +#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_EN BIT(0) +#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1) +#define IXGBE_ACI_PHY_FEC_25G_RS_528_REQ BIT(2) +#define IXGBE_ACI_PHY_FEC_25G_KR_REQ BIT(3) +#define IXGBE_ACI_PHY_FEC_25G_RS_544_REQ BIT(4) +#define IXGBE_ACI_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6) +#define IXGBE_ACI_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7) +#define IXGBE_ACI_PHY_FEC_MASK 0xdf + u8 module_compliance_enforcement; +#define IXGBE_ACI_MOD_ENFORCE_STRICT_MODE BIT(0) + u8 extended_compliance_code; +#define IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE 3 + u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE]; +#define IXGBE_ACI_MOD_TYPE_BYTE0_SFP_PLUS 0xA0 +#define IXGBE_ACI_MOD_TYPE_BYTE0_QSFP_PLUS 0x80 +#define IXGBE_ACI_MOD_TYPE_IDENT 1 +#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0) +#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1) +#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4) +#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5) +#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6) +#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7) +#define IXGBE_ACI_MOD_TYPE_BYTE2_SFP_PLUS 0xA0 +#define IXGBE_ACI_MOD_TYPE_BYTE2_QSFP_PLUS 0x86 + u8 qualified_module_count; + u8 rsvd2[7]; /* Bytes 47:41 reserved */ +#define IXGBE_ACI_QUAL_MOD_COUNT_MAX 16 + struct { + u8 v_oui[3]; + u8 rsvd3; + u8 v_part[16]; + __le32 v_rev; + __le64 rsvd4; + } qual_modules[IXGBE_ACI_QUAL_MOD_COUNT_MAX]; +}; + +/* Set PHY capabilities (direct 0x0601) + * NOTE: This command must be followed by setup link and restart auto-neg + */ +struct ixgbe_aci_cmd_set_phy_cfg { + u8 lport_num; + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Set PHY config command data structure */ +struct ixgbe_aci_cmd_set_phy_cfg_data { + __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */ + __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */ + u8 caps; +#define IXGBE_ACI_PHY_ENA_VALID_MASK 0xef +#define IXGBE_ACI_PHY_ENA_TX_PAUSE_ABILITY BIT(0) +#define IXGBE_ACI_PHY_ENA_RX_PAUSE_ABILITY BIT(1) +#define IXGBE_ACI_PHY_ENA_LOW_POWER BIT(2) +#define IXGBE_ACI_PHY_ENA_LINK BIT(3) +#define IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT BIT(5) +#define IXGBE_ACI_PHY_ENA_LESM BIT(6) +#define IXGBE_ACI_PHY_ENA_AUTO_FEC BIT(7) + u8 low_power_ctrl_an; + __le16 eee_cap; /* Value from ixgbe_aci_get_phy_caps */ + __le16 eeer_value; /* Use defines from ixgbe_aci_get_phy_caps */ + u8 link_fec_opt; /* Use defines from ixgbe_aci_get_phy_caps */ + u8 module_compliance_enforcement; +}; + +/* Restart AN command data structure (direct 0x0605) + * Also used for response, with only the lport_num field present. + */ +struct ixgbe_aci_cmd_restart_an { + u8 lport_num; + u8 reserved; + u8 cmd_flags; +#define IXGBE_ACI_RESTART_AN_LINK_RESTART BIT(1) +#define IXGBE_ACI_RESTART_AN_LINK_ENABLE BIT(2) + u8 reserved2[13]; +}; + +/* Get link status (indirect 0x0607), also used for Link Status Event */ +struct ixgbe_aci_cmd_get_link_status { + u8 lport_num; + u8 reserved; + __le16 cmd_flags; +#define IXGBE_ACI_LSE_M GENMASK(1, 0) +#define IXGBE_ACI_LSE_NOP 0x0 +#define IXGBE_ACI_LSE_DIS 0x2 +#define IXGBE_ACI_LSE_ENA 0x3 + /* only response uses this flag */ +#define IXGBE_ACI_LSE_IS_ENABLED 0x1 + __le32 reserved2; + __le32 addr_high; + __le32 addr_low; +}; + +/* Get link status response data structure, also used for Link Status Event */ +struct ixgbe_aci_cmd_get_link_status_data { + u8 topo_media_conflict; +#define IXGBE_ACI_LINK_TOPO_CONFLICT BIT(0) +#define IXGBE_ACI_LINK_MEDIA_CONFLICT BIT(1) +#define IXGBE_ACI_LINK_TOPO_CORRUPT BIT(2) +#define IXGBE_ACI_LINK_TOPO_UNREACH_PRT BIT(4) +#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_PRT BIT(5) +#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_MEDIA BIT(6) +#define IXGBE_ACI_LINK_TOPO_UNSUPP_MEDIA BIT(7) + u8 link_cfg_err; +#define IXGBE_ACI_LINK_CFG_ERR BIT(0) +#define IXGBE_ACI_LINK_CFG_COMPLETED BIT(1) +#define IXGBE_ACI_LINK_ACT_PORT_OPT_INVAL BIT(2) +#define IXGBE_ACI_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3) +#define IXGBE_ACI_LINK_TOPO_CRITICAL_SDP_ERR BIT(4) +#define IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED BIT(5) +#define IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6) +#define IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT BIT(7) + u8 link_info; +#define IXGBE_ACI_LINK_UP BIT(0) /* Link Status */ +#define IXGBE_ACI_LINK_FAULT BIT(1) +#define IXGBE_ACI_LINK_FAULT_TX BIT(2) +#define IXGBE_ACI_LINK_FAULT_RX BIT(3) +#define IXGBE_ACI_LINK_FAULT_REMOTE BIT(4) +#define IXGBE_ACI_LINK_UP_PORT BIT(5) /* External Port Link Status */ +#define IXGBE_ACI_MEDIA_AVAILABLE BIT(6) +#define IXGBE_ACI_SIGNAL_DETECT BIT(7) + u8 an_info; +#define IXGBE_ACI_AN_COMPLETED BIT(0) +#define IXGBE_ACI_LP_AN_ABILITY BIT(1) +#define IXGBE_ACI_PD_FAULT BIT(2) /* Parallel Detection Fault */ +#define IXGBE_ACI_FEC_EN BIT(3) +#define IXGBE_ACI_PHY_LOW_POWER BIT(4) /* Low Power State */ +#define IXGBE_ACI_LINK_PAUSE_TX BIT(5) +#define IXGBE_ACI_LINK_PAUSE_RX BIT(6) +#define IXGBE_ACI_QUALIFIED_MODULE BIT(7) + u8 ext_info; +#define IXGBE_ACI_LINK_PHY_TEMP_ALARM BIT(0) +#define IXGBE_ACI_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */ + /* Port Tx Suspended */ +#define IXGBE_ACI_LINK_TX_ACTIVE 0 +#define IXGBE_ACI_LINK_TX_DRAINED 1 +#define IXGBE_ACI_LINK_TX_FLUSHED 3 + u8 lb_status; +#define IXGBE_ACI_LINK_LB_PHY_LCL BIT(0) +#define IXGBE_ACI_LINK_LB_PHY_RMT BIT(1) +#define IXGBE_ACI_LINK_LB_MAC_LCL BIT(2) + __le16 max_frame_size; + u8 cfg; +#define IXGBE_ACI_LINK_25G_KR_FEC_EN BIT(0) +#define IXGBE_ACI_LINK_25G_RS_528_FEC_EN BIT(1) +#define IXGBE_ACI_LINK_25G_RS_544_FEC_EN BIT(2) +#define IXGBE_ACI_FEC_MASK GENMASK(2, 0) + /* Pacing Config */ +#define IXGBE_ACI_CFG_PACING_M GENMASK(6, 3) +#define IXGBE_ACI_CFG_PACING_TYPE_M BIT(7) +#define IXGBE_ACI_CFG_PACING_TYPE_AVG 0 +#define IXGBE_ACI_CFG_PACING_TYPE_FIXED IXGBE_ACI_CFG_PACING_TYPE_M + /* External Device Power Ability */ + u8 power_desc; +#define IXGBE_ACI_PWR_CLASS_M GENMASK(5, 0) +#define IXGBE_ACI_LINK_PWR_BASET_LOW_HIGH 0 +#define IXGBE_ACI_LINK_PWR_BASET_HIGH 1 +#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_1 0 +#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_2 1 +#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_3 2 +#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_4 3 + __le16 link_speed; +#define IXGBE_ACI_LINK_SPEED_M GENMASK(10, 0) +#define IXGBE_ACI_LINK_SPEED_10MB BIT(0) +#define IXGBE_ACI_LINK_SPEED_100MB BIT(1) +#define IXGBE_ACI_LINK_SPEED_1000MB BIT(2) +#define IXGBE_ACI_LINK_SPEED_2500MB BIT(3) +#define IXGBE_ACI_LINK_SPEED_5GB BIT(4) +#define IXGBE_ACI_LINK_SPEED_10GB BIT(5) +#define IXGBE_ACI_LINK_SPEED_20GB BIT(6) +#define IXGBE_ACI_LINK_SPEED_25GB BIT(7) +#define IXGBE_ACI_LINK_SPEED_40GB BIT(8) +#define IXGBE_ACI_LINK_SPEED_50GB BIT(9) +#define IXGBE_ACI_LINK_SPEED_100GB BIT(10) +#define IXGBE_ACI_LINK_SPEED_200GB BIT(11) +#define IXGBE_ACI_LINK_SPEED_UNKNOWN BIT(15) + __le16 reserved3; + u8 ext_fec_status; +#define IXGBE_ACI_LINK_RS_272_FEC_EN BIT(0) /* RS 272 FEC enabled */ + u8 reserved4; + __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ + __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ + /* Get link status version 2 link partner data */ + __le64 lp_phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ + __le64 lp_phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ + u8 lp_fec_adv; +#define IXGBE_ACI_LINK_LP_10G_KR_FEC_CAP BIT(0) +#define IXGBE_ACI_LINK_LP_25G_KR_FEC_CAP BIT(1) +#define IXGBE_ACI_LINK_LP_RS_528_FEC_CAP BIT(2) +#define IXGBE_ACI_LINK_LP_50G_KR_272_FEC_CAP BIT(3) +#define IXGBE_ACI_LINK_LP_100G_KR_272_FEC_CAP BIT(4) +#define IXGBE_ACI_LINK_LP_200G_KR_272_FEC_CAP BIT(5) + u8 lp_fec_req; +#define IXGBE_ACI_LINK_LP_10G_KR_FEC_REQ BIT(0) +#define IXGBE_ACI_LINK_LP_25G_KR_FEC_REQ BIT(1) +#define IXGBE_ACI_LINK_LP_RS_528_FEC_REQ BIT(2) +#define IXGBE_ACI_LINK_LP_KR_272_FEC_REQ BIT(3) + u8 lp_flowcontrol; +#define IXGBE_ACI_LINK_LP_PAUSE_ADV BIT(0) +#define IXGBE_ACI_LINK_LP_ASM_DIR_ADV BIT(1) + u8 reserved5[5]; +} __packed; + +/* Set event mask command (direct 0x0613) */ +struct ixgbe_aci_cmd_set_event_mask { + u8 lport_num; + u8 reserved[7]; + __le16 event_mask; +#define IXGBE_ACI_LINK_EVENT_UPDOWN BIT(1) +#define IXGBE_ACI_LINK_EVENT_MEDIA_NA BIT(2) +#define IXGBE_ACI_LINK_EVENT_LINK_FAULT BIT(3) +#define IXGBE_ACI_LINK_EVENT_PHY_TEMP_ALARM BIT(4) +#define IXGBE_ACI_LINK_EVENT_EXCESSIVE_ERRORS BIT(5) +#define IXGBE_ACI_LINK_EVENT_SIGNAL_DETECT BIT(6) +#define IXGBE_ACI_LINK_EVENT_AN_COMPLETED BIT(7) +#define IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL BIT(8) +#define IXGBE_ACI_LINK_EVENT_PORT_TX_SUSPENDED BIT(9) +#define IXGBE_ACI_LINK_EVENT_TOPO_CONFLICT BIT(10) +#define IXGBE_ACI_LINK_EVENT_MEDIA_CONFLICT BIT(11) +#define IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12) + u8 reserved1[6]; +}; + +struct ixgbe_aci_cmd_link_topo_params { + u8 lport_num; + u8 lport_num_valid; +#define IXGBE_ACI_LINK_TOPO_PORT_NUM_VALID BIT(0) + u8 node_type_ctx; +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_M GENMASK(3, 0) +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_PHY 0 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MUX_CTRL 2 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED_CTRL 3 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED 4 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_THERMAL 5 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE 6 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MEZZ 7 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_ID_EEPROM 8 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CLK_CTRL 9 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CLK_MUX 10 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPS 11 +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_S 4 +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_M GENMASK(7, 4) +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_GLOBAL 0 +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_BOARD 1 +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT 2 +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE 3 +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE 4 +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_DIRECT_BUS_ACCESS 5 +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE_BUS_ADDRESS 6 + u8 index; +}; + +struct ixgbe_aci_cmd_link_topo_addr { + struct ixgbe_aci_cmd_link_topo_params topo_params; + __le16 handle; +/* Used to decode the handle field */ +#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9) +#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9) +#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0 +}; + +/* Get Link Topology Handle (direct, 0x06E0) */ +struct ixgbe_aci_cmd_get_link_topo { + struct ixgbe_aci_cmd_link_topo_addr addr; + u8 node_part_num; +#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_PCA9575 0x21 +#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_ZL30632_80032 0x24 +#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_SI5384 0x25 +#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_C827 0x31 +#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX 0x47 +#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48 +#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_E610_PTC 0x49 + u8 rsvd[9]; +}; + +/* Get Link Topology Pin (direct, 0x06E1) */ +struct ixgbe_aci_cmd_get_link_topo_pin { + struct ixgbe_aci_cmd_link_topo_addr addr; + u8 input_io_params; +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GPIO 0 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RESET_N 1 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_INT_N 2 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_PRESENT_N 3 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_DIS 4 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_MODSEL_N 5 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LPMODE 6 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_FAULT 7 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RX_LOSS 8 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS0 9 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS1 10 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_EEPROM_WP 11 +/* 12 repeats intentionally due to two different uses depending on context */ +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LED 12 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RED_LED 12 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GREEN_LED 13 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_BLUE_LED 14 +#define IXGBE_ACI_LINK_TOPO_INPUT_IO_TYPE_GPIO 3 +/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */ + u8 output_io_params; +/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */ + u8 output_io_flags; +#define IXGBE_ACI_LINK_TOPO_OUTPUT_POLARITY BIT(5) +#define IXGBE_ACI_LINK_TOPO_OUTPUT_VALUE BIT(6) +#define IXGBE_ACI_LINK_TOPO_OUTPUT_DRIVEN BIT(7) + u8 rsvd[7]; +}; + +/* Read/Write SFF EEPROM command (indirect 0x06EE) */ +struct ixgbe_aci_cmd_sff_eeprom { + u8 lport_num; + u8 lport_num_valid; +#define IXGBE_ACI_SFF_PORT_NUM_VALID BIT(0) + __le16 i2c_bus_addr; +#define IXGBE_ACI_SFF_I2CBUS_7BIT_M GENMASK(6, 0) +#define IXGBE_ACI_SFF_I2CBUS_10BIT_M GENMASK(9, 0) +#define IXGBE_ACI_SFF_I2CBUS_TYPE_M BIT(10) +#define IXGBE_ACI_SFF_I2CBUS_TYPE_7BIT 0 +#define IXGBE_ACI_SFF_I2CBUS_TYPE_10BIT IXGBE_ACI_SFF_I2CBUS_TYPE_M +#define IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE 0 +#define IXGBE_ACI_SFF_UPDATE_PAGE 1 +#define IXGBE_ACI_SFF_UPDATE_BANK 2 +#define IXGBE_ACI_SFF_UPDATE_PAGE_BANK 3 +#define IXGBE_ACI_SFF_IS_WRITE BIT(15) + __le16 i2c_offset; + u8 module_bank; + u8 module_page; + __le32 addr_high; + __le32 addr_low; +}; + +/* NVM Read command (indirect 0x0701) + * NVM Erase commands (direct 0x0702) + * NVM Write commands (indirect 0x0703) + * NVM Write Activate commands (direct 0x0707) + * NVM Shadow RAM Dump commands (direct 0x0707) + */ +struct ixgbe_aci_cmd_nvm { +#define IXGBE_ACI_NVM_MAX_OFFSET 0xFFFFFF + __le16 offset_low; + u8 offset_high; /* For Write Activate offset_high is used as flags2 */ + u8 cmd_flags; +#define IXGBE_ACI_NVM_LAST_CMD BIT(0) +#define IXGBE_ACI_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */ +#define IXGBE_ACI_NVM_PRESERVE_ALL BIT(1) +#define IXGBE_ACI_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */ +#define IXGBE_ACI_NVM_ACTIV_SEL_OROM BIT(4) +#define IXGBE_ACI_NVM_ACTIV_SEL_NETLIST BIT(5) +#define IXGBE_ACI_NVM_SPECIAL_UPDATE BIT(6) +#define IXGBE_ACI_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */ +#define IXGBE_ACI_NVM_FLASH_ONLY BIT(7) +#define IXGBE_ACI_NVM_RESET_LVL_M GENMASK(1, 0) /* Write reply only */ +#define IXGBE_ACI_NVM_POR_FLAG 0 +#define IXGBE_ACI_NVM_PERST_FLAG 1 +#define IXGBE_ACI_NVM_EMPR_FLAG 2 +#define IXGBE_ACI_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */ + /* For Write Activate, several flags are sent as part of a separate + * flags2 field using a separate byte. For simplicity of the software + * interface, we pass the flags as a 16 bit value so these flags are + * all offset by 8 bits + */ +#define IXGBE_ACI_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */ + __le16 module_typeid; + __le16 length; +#define IXGBE_ACI_NVM_ERASE_LEN 0xFFFF + __le32 addr_high; + __le32 addr_low; +}; + +/* NVM Module_Type ID, needed offset and read_len for + * struct ixgbe_aci_cmd_nvm. + */ +#define IXGBE_ACI_NVM_START_POINT 0 + +/* NVM Checksum Command (direct, 0x0706) */ +struct ixgbe_aci_cmd_nvm_checksum { + u8 flags; +#define IXGBE_ACI_NVM_CHECKSUM_VERIFY BIT(0) +#define IXGBE_ACI_NVM_CHECKSUM_RECALC BIT(1) + u8 rsvd; + __le16 checksum; /* Used only by response */ +#define IXGBE_ACI_NVM_CHECKSUM_CORRECT 0xBABA + u8 rsvd2[12]; +}; + +/** + * struct ixgbe_aci_desc - Admin Command (AC) descriptor + * @flags: IXGBE_ACI_FLAG_* flags + * @opcode: Admin command opcode + * @datalen: length in bytes of indirect/external data buffer + * @retval: return value from firmware + * @cookie_high: opaque data high-half + * @cookie_low: opaque data low-half + * @params: command-specific parameters + * + * Descriptor format for commands the driver posts via the + * Admin Command Interface (ACI). + * The firmware writes back onto the command descriptor and returns + * the result of the command. Asynchronous events that are not an immediate + * result of the command are written to the Admin Command Interface (ACI) using + * the same descriptor format. Descriptors are in little-endian notation with + * 32-bit words. + */ +struct ixgbe_aci_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 retval; + __le32 cookie_high; + __le32 cookie_low; + union { + u8 raw[16]; + struct ixgbe_aci_cmd_get_ver get_ver; + struct ixgbe_aci_cmd_driver_ver driver_ver; + struct ixgbe_aci_cmd_get_exp_err exp_err; + struct ixgbe_aci_cmd_req_res res_owner; + struct ixgbe_aci_cmd_list_caps get_cap; + struct ixgbe_aci_cmd_disable_rxen disable_rxen; + struct ixgbe_aci_cmd_get_phy_caps get_phy; + struct ixgbe_aci_cmd_set_phy_cfg set_phy; + struct ixgbe_aci_cmd_restart_an restart_an; + struct ixgbe_aci_cmd_get_link_status get_link_status; + struct ixgbe_aci_cmd_set_event_mask set_event_mask; + struct ixgbe_aci_cmd_get_link_topo get_link_topo; + struct ixgbe_aci_cmd_get_link_topo_pin get_link_topo_pin; + struct ixgbe_aci_cmd_sff_eeprom read_write_sff_param; + struct ixgbe_aci_cmd_nvm nvm; + struct ixgbe_aci_cmd_nvm_checksum nvm_checksum; + } params; +}; + +/* E610-specific adapter context structures */ + +struct ixgbe_link_status { + /* Refer to ixgbe_aci_phy_type for bits definition */ + u64 phy_type_low; + u64 phy_type_high; + u16 max_frame_size; + u16 link_speed; + u16 req_speeds; + u8 topo_media_conflict; + u8 link_cfg_err; + u8 lse_ena; /* Link Status Event notification */ + u8 link_info; + u8 an_info; + u8 ext_info; + u8 fec_info; + u8 pacing; + /* Refer to #define from module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE] + * of ixgbe_aci_get_phy_caps structure + */ + u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE]; +}; + +/* Common HW capabilities for SW use */ +struct ixgbe_hw_caps { + /* Write CSR protection */ + u64 wr_csr_prot; + u32 switching_mode; + /* switching mode supported - EVB switching (including cloud) */ +#define IXGBE_NVM_IMAGE_TYPE_EVB 0x0 + + /* Manageability mode & supported protocols over MCTP */ + u32 mgmt_mode; +#define IXGBE_MGMT_MODE_PASS_THRU_MODE_M GENMASK(3, 0) +#define IXGBE_MGMT_MODE_CTL_INTERFACE_M GENMASK(7, 4) +#define IXGBE_MGMT_MODE_REDIR_SB_INTERFACE_M GENMASK(11, 8) + + u32 mgmt_protocols_mctp; +#define IXGBE_MGMT_MODE_PROTO_RSVD BIT(0) +#define IXGBE_MGMT_MODE_PROTO_PLDM BIT(1) +#define IXGBE_MGMT_MODE_PROTO_OEM BIT(2) +#define IXGBE_MGMT_MODE_PROTO_NC_SI BIT(3) + + u32 os2bmc; + u32 valid_functions; + /* DCB capabilities */ + u32 active_tc_bitmap; + u32 maxtc; + + /* RSS related capabilities */ + u32 rss_table_size; /* 512 for PFs and 64 for VFs */ + u32 rss_table_entry_width; /* RSS Entry width in bits */ + + /* Tx/Rx queues */ + u32 num_rxq; /* Number/Total Rx queues */ + u32 rxq_first_id; /* First queue ID for Rx queues */ + u32 num_txq; /* Number/Total Tx queues */ + u32 txq_first_id; /* First queue ID for Tx queues */ + + /* MSI-X vectors */ + u32 num_msix_vectors; + u32 msix_vector_first_id; + + /* Max MTU for function or device */ + u32 max_mtu; + + /* WOL related */ + u32 num_wol_proxy_fltr; + u32 wol_proxy_vsi_seid; + + /* LED/SDP pin count */ + u32 led_pin_num; + u32 sdp_pin_num; + + /* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */ +#define IXGBE_MAX_SUPPORTED_GPIO_LED 12 +#define IXGBE_MAX_SUPPORTED_GPIO_SDP 8 + u8 led[IXGBE_MAX_SUPPORTED_GPIO_LED]; + u8 sdp[IXGBE_MAX_SUPPORTED_GPIO_SDP]; + /* SR-IOV virtualization */ + u8 sr_iov_1_1; /* SR-IOV enabled */ + /* VMDQ */ + u8 vmdq; /* VMDQ supported */ + + /* EVB capabilities */ + u8 evb_802_1_qbg; /* Edge Virtual Bridging */ + u8 evb_802_1_qbh; /* Bridge Port Extension */ + + u8 dcb; + u8 iscsi; + u8 ieee_1588; + u8 mgmt_cem; + + /* WoL and APM support */ +#define IXGBE_WOL_SUPPORT_M BIT(0) +#define IXGBE_ACPI_PROG_MTHD_M BIT(1) +#define IXGBE_PROXY_SUPPORT_M BIT(2) + u8 apm_wol_support; + u8 acpi_prog_mthd; + u8 proxy_support; + bool nvm_update_pending_nvm; + bool nvm_update_pending_orom; + bool nvm_update_pending_netlist; +#define IXGBE_NVM_PENDING_NVM_IMAGE BIT(0) +#define IXGBE_NVM_PENDING_OROM BIT(1) +#define IXGBE_NVM_PENDING_NETLIST BIT(2) + bool sec_rev_disabled; + bool update_disabled; + bool nvm_unified_update; + bool netlist_auth; +#define IXGBE_NVM_MGMT_SEC_REV_DISABLED BIT(0) +#define IXGBE_NVM_MGMT_UPDATE_DISABLED BIT(1) +#define IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3) +#define IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT BIT(5) + bool no_drop_policy_support; + /* PCIe reset avoidance */ + bool pcie_reset_avoidance; /* false: not supported, true: supported */ + /* Post update reset restriction */ + bool reset_restrict_support; /* false: not supported, true: supported */ + + /* External topology device images within the NVM */ +#define IXGBE_EXT_TOPO_DEV_IMG_COUNT 4 + u32 ext_topo_dev_img_ver_high[IXGBE_EXT_TOPO_DEV_IMG_COUNT]; + u32 ext_topo_dev_img_ver_low[IXGBE_EXT_TOPO_DEV_IMG_COUNT]; + u8 ext_topo_dev_img_part_num[IXGBE_EXT_TOPO_DEV_IMG_COUNT]; +#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S 8 +#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M GENMASK(15, 8) + bool ext_topo_dev_img_load_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT]; +#define IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0) + bool ext_topo_dev_img_prog_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT]; +#define IXGBE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1) +} __packed; + +/* Function specific capabilities */ +struct ixgbe_hw_func_caps { + u32 num_allocd_vfs; /* Number of allocated VFs */ + u32 vf_base_id; /* Logical ID of the first VF */ + u32 guar_num_vsi; + struct ixgbe_hw_caps common_cap; + bool no_drop_policy_ena; +}; + +/* Device wide capabilities */ +struct ixgbe_hw_dev_caps { + struct ixgbe_hw_caps common_cap; + u32 num_vfs_exposed; /* Total number of VFs exposed */ + u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */ + u32 num_flow_director_fltr; /* Number of FD filters available */ + u32 num_funcs; +}; + +/* ACI event information */ +struct ixgbe_aci_event { + struct ixgbe_aci_desc desc; + u8 *msg_buf; + u16 msg_len; + u16 buf_len; +}; + +struct ixgbe_aci_info { + struct mutex lock; /* admin command interface lock */ + enum ixgbe_aci_err last_status; /* last status of sent admin command */ +}; + +/* Option ROM version information */ +struct ixgbe_orom_info { + u8 major; /* Major version of OROM */ + u8 patch; /* Patch version of OROM */ + u16 build; /* Build version of OROM */ + u32 srev; /* Security revision */ +}; + +/* NVM version information */ +struct ixgbe_nvm_info { + u32 eetrack; + u32 srev; + u8 major; + u8 minor; +} __packed; + +/* netlist version information */ +struct ixgbe_netlist_info { + u32 major; /* major high/low */ + u32 minor; /* minor high/low */ + u32 type; /* type high/low */ + u32 rev; /* revision high/low */ + u32 hash; /* SHA-1 hash word */ + u16 cust_ver; /* customer version */ +} __packed; + +/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules + * of the flash image. + */ +enum ixgbe_flash_bank { + IXGBE_INVALID_FLASH_BANK, + IXGBE_1ST_FLASH_BANK, + IXGBE_2ND_FLASH_BANK, +}; + +/* information for accessing NVM, OROM, and Netlist flash banks */ +struct ixgbe_bank_info { + u32 nvm_ptr; /* Pointer to 1st NVM bank */ + u32 nvm_size; /* Size of NVM bank */ + u32 orom_ptr; /* Pointer to 1st OROM bank */ + u32 orom_size; /* Size of OROM bank */ + u32 netlist_ptr; /* Ptr to 1st Netlist bank */ + u32 netlist_size; /* Size of Netlist bank */ + enum ixgbe_flash_bank nvm_bank; /* Active NVM bank */ + enum ixgbe_flash_bank orom_bank; /* Active OROM bank */ + enum ixgbe_flash_bank netlist_bank; /* Active Netlist bank */ +}; + +/* Flash Chip Information */ +struct ixgbe_flash_info { + struct ixgbe_orom_info orom; /* Option ROM version info */ + u32 flash_size; /* Available flash size in bytes */ + struct ixgbe_nvm_info nvm; /* NVM version information */ + struct ixgbe_netlist_info netlist; /* Netlist version info */ + struct ixgbe_bank_info banks; /* Flash Bank information */ + u16 sr_words; /* Shadow RAM size in words */ + u8 blank_nvm_mode; /* is NVM empty (no FW present) */ +}; + +#endif /* _IXGBE_TYPE_E610_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 81e1df83f136..1fc821fb351a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> @@ -66,7 +66,9 @@ int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, * Resets the hardware by resetting the transmit and receive units, masks * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. - **/ + * + * Return: 0 on success or negative value on failure + */ int ixgbe_reset_hw_X540(struct ixgbe_hw *hw) { u32 swfw_mask = hw->phy.phy_semaphore_mask; @@ -133,10 +135,14 @@ mac_reset_top: hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES; hw->mac.ops.init_rx_addrs(hw); + /* The following is not supported by E610. */ + if (hw->mac.type == ixgbe_mac_e610) + return status; + /* Store the permanent SAN mac address */ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); - /* Add the SAN MAC address to the RAR only if it's a valid address */ + /* Add the SAN MAC address to RAR if it's a valid address */ if (is_valid_ether_addr(hw->mac.san_addr)) { /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h index b69a680d3ab5..6ed360c5b605 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h @@ -1,5 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ + +#ifndef _IXGBE_X540_H_ +#define _IXGBE_X540_H_ #include "ixgbe_type.h" @@ -17,3 +20,5 @@ int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw); int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); + +#endif /* _IXGBE_X540_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index d9a8cf018d3b..277ceaf8a793 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include "ixgbe_x540.h" +#include "ixgbe_x550.h" #include "ixgbe_type.h" #include "ixgbe_common.h" #include "ixgbe_mbx.h" @@ -2770,9 +2771,9 @@ static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) * semaphore, -EIO when command fails or -ENIVAL when incorrect * params passed. **/ -static int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 sub, u16 len, - const char *driver_ver) +int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub, u16 len, + const char *driver_ver) { struct ixgbe_hic_drv_info2 fw_cmd; int ret_val; @@ -3505,14 +3506,14 @@ mac_reset_top: return status; } -/** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype +/** ixgbe_set_ethertype_anti_spoofing_x550 - Enable/Disable Ethertype * anti-spoofing * @hw: pointer to hardware structure * @enable: enable or disable switch for Ethertype anti-spoofing * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing **/ -static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, - bool enable, int vf) +void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw, + bool enable, int vf) { int vf_target_reg = vf >> 3; int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; @@ -3527,14 +3528,14 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } -/** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning +/** ixgbe_set_source_address_pruning_x550 - Enable/Disable src address pruning * @hw: pointer to hardware structure * @enable: enable or disable source address pruning * @pool: Rx pool to set source address pruning for **/ -static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, - bool enable, - unsigned int pool) +void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw, + bool enable, + unsigned int pool) { u64 pfflp; @@ -3831,9 +3832,9 @@ static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ .set_source_address_pruning = \ - &ixgbe_set_source_address_pruning_X550, \ + &ixgbe_set_source_address_pruning_x550, \ .set_ethertype_anti_spoofing = \ - &ixgbe_set_ethertype_anti_spoofing_X550, \ + &ixgbe_set_ethertype_anti_spoofing_x550, \ .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \ .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ .get_thermal_sensor_data = NULL, \ @@ -4047,7 +4048,7 @@ static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X550EM_x) }; -static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = { +const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X550EM_a) }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h new file mode 100644 index 000000000000..3e4092f8da3e --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Intel Corporation. */ + +#ifndef _IXGBE_X550_H_ +#define _IXGBE_X550_H_ + +#include "ixgbe_type.h" + +extern const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT]; + +int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub, u16 len, + const char *driver_ver); +void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw, + bool enable, + unsigned int pool); +void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw, + bool enable, int vf); + +#endif /* _IXGBE_X550_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 5f08779c0e4e..a9bc96f6399d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #ifndef _IXGBEVF_DEFINES_H_ #define _IXGBEVF_DEFINES_H_ @@ -16,6 +16,9 @@ #define IXGBE_DEV_ID_X550_VF_HV 0x1564 #define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 +#define IXGBE_DEV_ID_E610_VF 0x57AD +#define IXGBE_SUBDEV_ID_E610_VF_HV 0x00FF + #define IXGBE_VF_IRQ_CLEAR_MASK 7 #define IXGBE_VF_MAX_TX_QUEUES 8 #define IXGBE_VF_MAX_RX_QUEUES 8 diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 130cb868774c..4384e892f967 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #ifndef _IXGBEVF_H_ #define _IXGBEVF_H_ @@ -418,6 +418,8 @@ enum ixgbevf_boards { board_X550EM_x_vf, board_X550EM_x_vf_hv, board_x550em_a_vf, + board_e610_vf, + board_e610_vf_hv, }; enum ixgbevf_xcast_modes { @@ -434,12 +436,13 @@ extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy; extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info; +extern const struct ixgbevf_info ixgbevf_e610_vf_info; extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info; extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info; extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info; extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info; -extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops; +extern const struct ixgbevf_info ixgbevf_e610_vf_hv_info; /* needed by ethtool.c */ extern const char ixgbevf_driver_name[]; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 149911e3002a..6442f115a262 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ /****************************************************************************** Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code @@ -39,7 +39,7 @@ static const char ixgbevf_driver_string[] = "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; static char ixgbevf_copyright[] = - "Copyright (c) 2009 - 2018 Intel Corporation."; + "Copyright (c) 2009 - 2024 Intel Corporation."; static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, @@ -51,6 +51,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info, [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info, + [board_e610_vf] = &ixgbevf_e610_vf_info, + [board_e610_vf_hv] = &ixgbevf_e610_vf_hv_info, }; /* ixgbevf_pci_tbl - PCI Device ID Table @@ -71,6 +73,9 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf }, + {PCI_VDEVICE_SUB(INTEL, IXGBE_DEV_ID_E610_VF, PCI_ANY_ID, + IXGBE_SUBDEV_ID_E610_VF_HV), board_e610_vf_hv}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_VF), board_e610_vf}, /* required last entry */ {0, } }; @@ -732,10 +737,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - /* XDP packets use error pointer so abort at this point */ - if (IS_ERR(skb)) - return true; - /* verify that the packet does not have any known errors */ if (unlikely(ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { @@ -1044,9 +1045,9 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, return IXGBEVF_XDP_TX; } -static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *rx_ring, - struct xdp_buff *xdp) +static int ixgbevf_run_xdp(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *rx_ring, + struct xdp_buff *xdp) { int result = IXGBEVF_XDP_PASS; struct ixgbevf_ring *xdp_ring; @@ -1080,7 +1081,7 @@ out_failure: break; } xdp_out: - return ERR_PTR(-result); + return result; } static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring, @@ -1122,6 +1123,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, struct sk_buff *skb = rx_ring->skb; bool xdp_xmit = false; struct xdp_buff xdp; + int xdp_res = 0; /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ #if (PAGE_SIZE < 8192) @@ -1165,11 +1167,11 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size); #endif - skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp); + xdp_res = ixgbevf_run_xdp(adapter, rx_ring, &xdp); } - if (IS_ERR(skb)) { - if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) { + if (xdp_res) { + if (xdp_res == IXGBEVF_XDP_TX) { xdp_xmit = true; ixgbevf_rx_buffer_flip(rx_ring, rx_buffer, size); @@ -1189,7 +1191,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, } /* exit if we failed to retrieve a buffer */ - if (!skb) { + if (!xdp_res && !skb) { rx_ring->rx_stats.alloc_rx_buff_failed++; rx_buffer->pagecnt_bias++; break; @@ -1203,7 +1205,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, continue; /* verify the packet layout is correct */ - if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) { + if (xdp_res || ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) { skb = NULL; continue; } @@ -4693,6 +4695,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case ixgbe_mac_X540_vf: dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n"); break; + case ixgbe_mac_e610_vf: + dev_info(&pdev->dev, "Intel(R) E610 Virtual Function\n"); + break; case ixgbe_mac_82599_vf: default: dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n"); diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c index a55dd978f7ca..24d0237e7a99 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.c +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -505,15 +505,3 @@ const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy = { .check_for_ack = ixgbevf_check_for_ack_vf, .check_for_rst = ixgbevf_check_for_rst_vf, }; - -/* Mailbox operations when running on Hyper-V. - * On Hyper-V, PF/VF communication is not through the - * hardware mailbox; this communication is through - * a software mediated path. - * Most mail box operations are noop while running on - * Hyper-V. - */ -const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = { - .init_params = ixgbevf_init_mbx_params_vf, - .check_for_rst = ixgbevf_check_for_rst_vf, -}; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 1641d00d8ed3..da7a72ecce7a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include "vf.h" #include "ixgbevf.h" @@ -1076,3 +1076,13 @@ const struct ixgbevf_info ixgbevf_x550em_a_vf_info = { .mac = ixgbe_mac_x550em_a_vf, .mac_ops = &ixgbevf_mac_ops, }; + +const struct ixgbevf_info ixgbevf_e610_vf_info = { + .mac = ixgbe_mac_e610_vf, + .mac_ops = &ixgbevf_mac_ops, +}; + +const struct ixgbevf_info ixgbevf_e610_vf_hv_info = { + .mac = ixgbe_mac_e610_vf, + .mac_ops = &ixgbevf_hv_mac_ops, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index b4eef5b6c172..2d791bc26ae4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #ifndef __IXGBE_VF_H__ #define __IXGBE_VF_H__ @@ -54,6 +54,8 @@ enum ixgbe_mac_type { ixgbe_mac_X550_vf, ixgbe_mac_X550EM_x_vf, ixgbe_mac_x550em_a_vf, + ixgbe_mac_e610, + ixgbe_mac_e610_vf, ixgbe_num_macs }; |