diff options
Diffstat (limited to 'drivers/net/ethernet/intel/iavf/iavf_txrx.c')
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_txrx.c | 433 |
1 files changed, 332 insertions, 101 deletions
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 26b424fd6718..422312b8b54a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -8,6 +8,26 @@ #include "iavf.h" #include "iavf_trace.h" #include "iavf_prototype.h" +#include "iavf_ptp.h" + +/** + * iavf_is_descriptor_done - tests DD bit in Rx descriptor + * @qw1: quad word 1 from descriptor to get Descriptor Done field from + * @flex: is the descriptor flex or legacy + * + * This function tests the descriptor done bit in specified descriptor. Because + * there are two types of descriptors (legacy and flex) the parameter rx_ring + * is used to distinguish. + * + * Return: true or false based on the state of DD bit in Rx descriptor. + */ +static bool iavf_is_descriptor_done(u64 qw1, bool flex) +{ + if (flex) + return FIELD_GET(IAVF_RXD_FLEX_DD_M, qw1); + else + return FIELD_GET(IAVF_RXD_LEGACY_DD_M, qw1); +} static __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, u32 td_tag) @@ -766,7 +786,7 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) u64_stats_init(&rx_ring->syncp); /* Round up to nearest 4K */ - rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc); + rx_ring->size = rx_ring->count * sizeof(struct iavf_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); @@ -845,7 +865,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) .count = rx_ring->count, }; u16 ntu = rx_ring->next_to_use; - union iavf_rx_desc *rx_desc; + struct iavf_rx_desc *rx_desc; /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) @@ -863,7 +883,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(addr); + rx_desc->qw0 = cpu_to_le64(addr); rx_desc++; ntu++; @@ -873,7 +893,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) } /* clear the status bits for the next_to_use descriptor */ - rx_desc->wb.qword1.status_error_len = 0; + rx_desc->qw1 = 0; cleaned_count--; } while (cleaned_count); @@ -896,60 +916,43 @@ no_buffers: } /** - * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum + * iavf_rx_csum - Indicate in skb if hw indicated a good checksum * @vsi: the VSI we care about * @skb: skb currently being received and modified - * @rx_desc: the receive descriptor + * @decoded_pt: decoded ptype information + * @csum_bits: decoded Rx descriptor information **/ -static void iavf_rx_checksum(struct iavf_vsi *vsi, - struct sk_buff *skb, - union iavf_rx_desc *rx_desc) +static void iavf_rx_csum(const struct iavf_vsi *vsi, struct sk_buff *skb, + struct libeth_rx_pt decoded_pt, + struct libeth_rx_csum csum_bits) { - struct libeth_rx_pt decoded; - u32 rx_error, rx_status; bool ipv4, ipv6; - u8 ptype; - u64 qword; skb->ip_summed = CHECKSUM_NONE; - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword); - - decoded = libie_rx_pt_parse(ptype); - if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded)) - return; - - rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword); - rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword); - /* did the hardware decode the packet and checksum? */ - if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT))) + if (unlikely(!csum_bits.l3l4p)) return; - ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; - ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; + ipv4 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV6; - if (ipv4 && - (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) | - BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT)))) + if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe))) goto checksum_fail; /* likely incorrect csum if alternate IP extension headers found */ - if (ipv6 && - rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT)) - /* don't increment checksum err here, non-fatal err */ + if (unlikely(ipv6 && csum_bits.ipv6exadd)) return; /* there was some L4 error, count error and punt packet to the stack */ - if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT)) + if (unlikely(csum_bits.l4e)) goto checksum_fail; /* handle packets that were not able to be checksummed due * to arrival speed, in this case the stack can compute * the csum. */ - if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT)) + if (unlikely(csum_bits.pprs)) return; skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -960,52 +963,196 @@ checksum_fail: } /** - * iavf_rx_hash - set the hash value in the skb + * iavf_legacy_rx_csum - Indicate in skb if hw indicated a good checksum + * @vsi: the VSI we care about + * @qw1: quad word 1 + * @decoded_pt: decoded packet type + * + * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte + * descriptor writeback format. + * + * Return: decoded checksum bits. + **/ +static struct libeth_rx_csum +iavf_legacy_rx_csum(const struct iavf_vsi *vsi, u64 qw1, + const struct libeth_rx_pt decoded_pt) +{ + struct libeth_rx_csum csum_bits = {}; + + if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt)) + return csum_bits; + + csum_bits.ipe = FIELD_GET(IAVF_RXD_LEGACY_IPE_M, qw1); + csum_bits.eipe = FIELD_GET(IAVF_RXD_LEGACY_EIPE_M, qw1); + csum_bits.l4e = FIELD_GET(IAVF_RXD_LEGACY_L4E_M, qw1); + csum_bits.pprs = FIELD_GET(IAVF_RXD_LEGACY_PPRS_M, qw1); + csum_bits.l3l4p = FIELD_GET(IAVF_RXD_LEGACY_L3L4P_M, qw1); + csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_LEGACY_IPV6EXADD_M, qw1); + + return csum_bits; +} + +/** + * iavf_flex_rx_csum - Indicate in skb if hw indicated a good checksum + * @vsi: the VSI we care about + * @qw1: quad word 1 + * @decoded_pt: decoded packet type + * + * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible + * descriptor writeback format. + * + * Return: decoded checksum bits. + **/ +static struct libeth_rx_csum +iavf_flex_rx_csum(const struct iavf_vsi *vsi, u64 qw1, + const struct libeth_rx_pt decoded_pt) +{ + struct libeth_rx_csum csum_bits = {}; + + if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt)) + return csum_bits; + + csum_bits.ipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_IPE_M, qw1); + csum_bits.eipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EIPE_M, qw1); + csum_bits.l4e = FIELD_GET(IAVF_RXD_FLEX_XSUM_L4E_M, qw1); + csum_bits.eudpe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EUDPE_M, qw1); + csum_bits.l3l4p = FIELD_GET(IAVF_RXD_FLEX_L3L4P_M, qw1); + csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_FLEX_IPV6EXADD_M, qw1); + csum_bits.nat = FIELD_GET(IAVF_RXD_FLEX_NAT_M, qw1); + + return csum_bits; +} + +/** + * iavf_legacy_rx_hash - set the hash value in the skb + * @ring: descriptor ring + * @qw0: quad word 0 + * @qw1: quad word 1 + * @skb: skb currently being received and modified + * @decoded_pt: decoded packet type + * + * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte + * descriptor writeback format. + **/ +static void iavf_legacy_rx_hash(const struct iavf_ring *ring, __le64 qw0, + __le64 qw1, struct sk_buff *skb, + const struct libeth_rx_pt decoded_pt) +{ + const __le64 rss_mask = cpu_to_le64(IAVF_RXD_LEGACY_FLTSTAT_M); + u32 hash; + + if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt)) + return; + + if ((qw1 & rss_mask) == rss_mask) { + hash = le64_get_bits(qw0, IAVF_RXD_LEGACY_RSS_M); + libeth_rx_pt_set_hash(skb, hash, decoded_pt); + } +} + +/** + * iavf_flex_rx_hash - set the hash value in the skb * @ring: descriptor ring - * @rx_desc: specific descriptor + * @qw1: quad word 1 * @skb: skb currently being received and modified - * @rx_ptype: Rx packet type + * @decoded_pt: decoded packet type + * + * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible + * descriptor writeback format. **/ -static void iavf_rx_hash(struct iavf_ring *ring, - union iavf_rx_desc *rx_desc, - struct sk_buff *skb, - u8 rx_ptype) +static void iavf_flex_rx_hash(const struct iavf_ring *ring, __le64 qw1, + struct sk_buff *skb, + const struct libeth_rx_pt decoded_pt) { - struct libeth_rx_pt decoded; + bool rss_valid; u32 hash; - const __le64 rss_mask = - cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH << - IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT); - decoded = libie_rx_pt_parse(rx_ptype); - if (!libeth_rx_pt_has_hash(ring->netdev, decoded)) + if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt)) return; - if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { - hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - libeth_rx_pt_set_hash(skb, hash, decoded); + rss_valid = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_VALID_M); + if (rss_valid) { + hash = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_HASH_M); + libeth_rx_pt_set_hash(skb, hash, decoded_pt); } } /** + * iavf_flex_rx_tstamp - Capture Rx timestamp from the descriptor + * @rx_ring: descriptor ring + * @qw2: quad word 2 of descriptor + * @qw3: quad word 3 of descriptor + * @skb: skb currently being received + * + * Read the Rx timestamp value from the descriptor and pass it to the stack. + * + * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible + * descriptor writeback format. + */ +static void iavf_flex_rx_tstamp(const struct iavf_ring *rx_ring, __le64 qw2, + __le64 qw3, struct sk_buff *skb) +{ + u32 tstamp; + u64 ns; + + /* Skip processing if timestamps aren't enabled */ + if (!(rx_ring->flags & IAVF_TXRX_FLAGS_HW_TSTAMP)) + return; + + /* Check if this Rx descriptor has a valid timestamp */ + if (!le64_get_bits(qw2, IAVF_PTP_40B_TSTAMP_VALID)) + return; + + /* the ts_low field only contains the valid bit and sub-nanosecond + * precision, so we don't need to extract it. + */ + tstamp = le64_get_bits(qw3, IAVF_RXD_FLEX_QW3_TSTAMP_HIGH_M); + + ns = iavf_ptp_extend_32b_timestamp(rx_ring->ptp->cached_phc_time, + tstamp); + + *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) { + .hwtstamp = ns_to_ktime(ns), + }; +} + +/** * iavf_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated - * @rx_ptype: the packet type decoded by hardware + * @ptype: the packet type decoded by hardware + * @flex: is the descriptor flex or legacy * * This function checks the ring, descriptor, and packet information in * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb. **/ -static void -iavf_process_skb_fields(struct iavf_ring *rx_ring, - union iavf_rx_desc *rx_desc, struct sk_buff *skb, - u8 rx_ptype) +static void iavf_process_skb_fields(const struct iavf_ring *rx_ring, + const struct iavf_rx_desc *rx_desc, + struct sk_buff *skb, u32 ptype, + bool flex) { - iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype); - - iavf_rx_checksum(rx_ring->vsi, skb, rx_desc); + struct libeth_rx_csum csum_bits; + struct libeth_rx_pt decoded_pt; + __le64 qw0 = rx_desc->qw0; + __le64 qw1 = rx_desc->qw1; + __le64 qw2 = rx_desc->qw2; + __le64 qw3 = rx_desc->qw3; + + decoded_pt = libie_rx_pt_parse(ptype); + + if (flex) { + iavf_flex_rx_hash(rx_ring, qw1, skb, decoded_pt); + iavf_flex_rx_tstamp(rx_ring, qw2, qw3, skb); + csum_bits = iavf_flex_rx_csum(rx_ring->vsi, le64_to_cpu(qw1), + decoded_pt); + } else { + iavf_legacy_rx_hash(rx_ring, qw0, qw1, skb, decoded_pt); + csum_bits = iavf_legacy_rx_csum(rx_ring->vsi, le64_to_cpu(qw1), + decoded_pt); + } + iavf_rx_csum(rx_ring->vsi, skb, decoded_pt, csum_bits); skb_record_rx_queue(skb, rx_ring->queue_index); @@ -1092,8 +1239,7 @@ static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer, /** * iavf_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed - * @rx_desc: Rx descriptor for current buffer - * @skb: Current socket buffer containing buffer in progress + * @fields: Rx descriptor extracted fields * * This function updates next to clean. If the buffer is an EOP buffer * this function exits returning false, otherwise it will place the @@ -1101,8 +1247,7 @@ static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer, * that this is in fact a non-EOP buffer. **/ static bool iavf_is_non_eop(struct iavf_ring *rx_ring, - union iavf_rx_desc *rx_desc, - struct sk_buff *skb) + struct libeth_rqe_info fields) { u32 ntc = rx_ring->next_to_clean + 1; @@ -1113,8 +1258,7 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring, prefetch(IAVF_RX_DESC(rx_ring, ntc)); /* if we are the last buffer then there is nothing else to do */ -#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT) - if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF))) + if (likely(fields.eop)) return false; rx_ring->rx_stats.non_eop_descs++; @@ -1123,6 +1267,109 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring, } /** + * iavf_extract_legacy_rx_fields - Extract fields from the Rx descriptor + * @rx_ring: rx descriptor ring + * @rx_desc: the descriptor to process + * + * Decode the Rx descriptor and extract relevant information including the + * size, VLAN tag, Rx packet type, end of packet field and RXE field value. + * + * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte + * descriptor writeback format. + * + * Return: fields extracted from the Rx descriptor. + */ +static struct libeth_rqe_info +iavf_extract_legacy_rx_fields(const struct iavf_ring *rx_ring, + const struct iavf_rx_desc *rx_desc) +{ + u64 qw0 = le64_to_cpu(rx_desc->qw0); + u64 qw1 = le64_to_cpu(rx_desc->qw1); + u64 qw2 = le64_to_cpu(rx_desc->qw2); + struct libeth_rqe_info fields; + bool l2tag1p, l2tag2p; + + fields.eop = FIELD_GET(IAVF_RXD_LEGACY_EOP_M, qw1); + fields.len = FIELD_GET(IAVF_RXD_LEGACY_LENGTH_M, qw1); + + if (!fields.eop) + return fields; + + fields.rxe = FIELD_GET(IAVF_RXD_LEGACY_RXE_M, qw1); + fields.ptype = FIELD_GET(IAVF_RXD_LEGACY_PTYPE_M, qw1); + fields.vlan = 0; + + if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) { + l2tag1p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1P_M, qw1); + if (l2tag1p) + fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1_M, qw0); + } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) { + l2tag2p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2P_M, qw2); + if (l2tag2p) + fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2_M, qw2); + } + + return fields; +} + +/** + * iavf_extract_flex_rx_fields - Extract fields from the Rx descriptor + * @rx_ring: rx descriptor ring + * @rx_desc: the descriptor to process + * + * Decode the Rx descriptor and extract relevant information including the + * size, VLAN tag, Rx packet type, end of packet field and RXE field value. + * + * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible + * descriptor writeback format. + * + * Return: fields extracted from the Rx descriptor. + */ +static struct libeth_rqe_info +iavf_extract_flex_rx_fields(const struct iavf_ring *rx_ring, + const struct iavf_rx_desc *rx_desc) +{ + struct libeth_rqe_info fields = {}; + u64 qw0 = le64_to_cpu(rx_desc->qw0); + u64 qw1 = le64_to_cpu(rx_desc->qw1); + u64 qw2 = le64_to_cpu(rx_desc->qw2); + bool l2tag1p, l2tag2p; + + fields.eop = FIELD_GET(IAVF_RXD_FLEX_EOP_M, qw1); + fields.len = FIELD_GET(IAVF_RXD_FLEX_PKT_LEN_M, qw0); + + if (!fields.eop) + return fields; + + fields.rxe = FIELD_GET(IAVF_RXD_FLEX_RXE_M, qw1); + fields.ptype = FIELD_GET(IAVF_RXD_FLEX_PTYPE_M, qw0); + fields.vlan = 0; + + if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) { + l2tag1p = FIELD_GET(IAVF_RXD_FLEX_L2TAG1P_M, qw1); + if (l2tag1p) + fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG1_M, qw1); + } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) { + l2tag2p = FIELD_GET(IAVF_RXD_FLEX_L2TAG2P_M, qw2); + if (l2tag2p) + fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG2_2_M, qw2); + } + + return fields; +} + +static struct libeth_rqe_info +iavf_extract_rx_fields(const struct iavf_ring *rx_ring, + const struct iavf_rx_desc *rx_desc, + bool flex) +{ + if (flex) + return iavf_extract_flex_rx_fields(rx_ring, rx_desc); + else + return iavf_extract_legacy_rx_fields(rx_ring, rx_desc); +} + +/** * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process @@ -1136,18 +1383,17 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring, **/ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) { + bool flex = rx_ring->rxdid == VIRTCHNL_RXDID_2_FLEX_SQ_NIC; unsigned int total_rx_bytes = 0, total_rx_packets = 0; struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring); bool failure = false; while (likely(total_rx_packets < (unsigned int)budget)) { + struct libeth_rqe_info fields; struct libeth_fqe *rx_buffer; - union iavf_rx_desc *rx_desc; - unsigned int size; - u16 vlan_tag = 0; - u8 rx_ptype; - u64 qword; + struct iavf_rx_desc *rx_desc; + u64 qw1; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IAVF_RX_BUFFER_WRITE) { @@ -1158,35 +1404,32 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean); - /* status_error_len will always be zero for unused descriptors - * because it's cleared in cleanup, and overlaps with hdr_addr - * which is always zero because packet split isn't used, if the - * hardware wrote DD then the length will be non-zero - */ - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we have * verified the descriptor has been written back. */ dma_rmb(); -#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT) - if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD)) + + qw1 = le64_to_cpu(rx_desc->qw1); + /* If DD field (descriptor done) is unset then other fields are + * not valid + */ + if (!iavf_is_descriptor_done(qw1, flex)) break; - size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword); + fields = iavf_extract_rx_fields(rx_ring, rx_desc, flex); iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean]; - if (!libeth_rx_sync_for_cpu(rx_buffer, size)) + if (!libeth_rx_sync_for_cpu(rx_buffer, fields.len)) goto skip_data; /* retrieve a buffer from the ring */ if (skb) - iavf_add_rx_frag(skb, rx_buffer, size); + iavf_add_rx_frag(skb, rx_buffer, fields.len); else - skb = iavf_build_skb(rx_buffer, size); + skb = iavf_build_skb(rx_buffer, fields.len); /* exit if we failed to retrieve a buffer */ if (!skb) { @@ -1197,15 +1440,14 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) skip_data: cleaned_count++; - if (iavf_is_non_eop(rx_ring, rx_desc, skb) || unlikely(!skb)) + if (iavf_is_non_eop(rx_ring, fields) || unlikely(!skb)) continue; - /* ERR_MASK will only have valid bits if EOP set, and - * what we are doing here is actually checking - * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in - * the error field + /* RXE field in descriptor is an indication of the MAC errors + * (like CRC, alignment, oversize etc). If it is set then iavf + * should finish. */ - if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) { + if (unlikely(fields.rxe)) { dev_kfree_skb_any(skb); skb = NULL; continue; @@ -1219,22 +1461,11 @@ skip_data: /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword); - /* populate checksum, VLAN, and protocol */ - iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); - - if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) && - rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) - vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1); - if (rx_desc->wb.qword2.ext_status & - cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) && - rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) - vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2); + iavf_process_skb_fields(rx_ring, rx_desc, skb, fields.ptype, flex); iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); - iavf_receive_skb(rx_ring, skb, vlan_tag); + iavf_receive_skb(rx_ring, skb, fields.vlan); skb = NULL; /* update budget accounting */ |