summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-01-27 23:59:45 -0800
committerJakub Kicinski <kuba@kernel.org>2023-01-28 00:00:14 -0800
commit2d104c390f0d3901c4915dcb141cb96da96cffe7 (patch)
tree6c74e81297ef3ae095ff69ab90fea64816babbae /drivers/net/ethernet/mellanox
parentd8afe2f8a92d2aac3df645772f6ee61b0b2fc147 (diff)
parent16809afdcbad5fa45f34622f62873c7d7114cde5 (diff)
downloadlwn-2d104c390f0d3901c4915dcb141cb96da96cffe7.tar.gz
lwn-2d104c390f0d3901c4915dcb141cb96da96cffe7.zip
Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== bpf-next 2023-01-28 We've added 124 non-merge commits during the last 22 day(s) which contain a total of 124 files changed, 6386 insertions(+), 1827 deletions(-). The main changes are: 1) Implement XDP hints via kfuncs with initial support for RX hash and timestamp metadata kfuncs, from Stanislav Fomichev and Toke Høiland-Jørgensen. Measurements on overhead: https://lore.kernel.org/bpf/875yellcx6.fsf@toke.dk 2) Extend libbpf's bpf_tracing.h support for tracing arguments of kprobes/uprobes and syscall as a special case, from Andrii Nakryiko. 3) Significantly reduce the search time for module symbols by livepatch and BPF, from Jiri Olsa and Zhen Lei. 4) Enable cpumasks to be used as kptrs, which is useful for tracing programs tracking which tasks end up running on which CPUs in different time intervals, from David Vernet. 5) Fix several issues in the dynptr processing such as stack slot liveness propagation, missing checks for PTR_TO_STACK variable offset, etc, from Kumar Kartikeya Dwivedi. 6) Various performance improvements, fixes, and introduction of more than just one XDP program to XSK selftests, from Magnus Karlsson. 7) Big batch to BPF samples to reduce deprecated functionality, from Daniel T. Lee. 8) Enable struct_ops programs to be sleepable in verifier, from David Vernet. 9) Reduce pr_warn() noise on BTF mismatches when they are expected under the CONFIG_MODULE_ALLOW_BTF_MISMATCH config anyway, from Connor O'Brien. 10) Describe modulo and division by zero behavior of the BPF runtime in BPF's instruction specification document, from Dave Thaler. 11) Several improvements to libbpf API documentation in libbpf.h, from Grant Seltzer. 12) Improve resolve_btfids header dependencies related to subcmd and add proper support for HOSTCC, from Ian Rogers. 13) Add ipip6 and ip6ip decapsulation support for bpf_skb_adjust_room() helper along with BPF selftests, from Ziyang Xuan. 14) Simplify the parsing logic of structure parameters for BPF trampoline in the x86-64 JIT compiler, from Pu Lehui. 15) Get BTF working for kernels with CONFIG_RUST enabled by excluding Rust compilation units with pahole, from Martin Rodriguez Reboredo. 16) Get bpf_setsockopt() working for kTLS on top of TCP sockets, from Kui-Feng Lee. 17) Disable stack protection for BPF objects in bpftool given BPF backends don't support it, from Holger Hoffstätte. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (124 commits) selftest/bpf: Make crashes more debuggable in test_progs libbpf: Add documentation to map pinning API functions libbpf: Fix malformed documentation formatting selftests/bpf: Properly enable hwtstamp in xdp_hw_metadata selftests/bpf: Calls bpf_setsockopt() on a ktls enabled socket. bpf: Check the protocol of a sock to agree the calls to bpf_setsockopt(). bpf/selftests: Verify struct_ops prog sleepable behavior bpf: Pass const struct bpf_prog * to .check_member libbpf: Support sleepable struct_ops.s section bpf: Allow BPF_PROG_TYPE_STRUCT_OPS programs to be sleepable selftests/bpf: Fix vmtest static compilation error tools/resolve_btfids: Alter how HOSTCC is forced tools/resolve_btfids: Install subcmd headers bpf/docs: Document the nocast aliasing behavior of ___init bpf/docs: Document how nested trusted fields may be defined bpf/docs: Document cpumask kfuncs in a new file selftests/bpf: Add selftest suite for cpumask kfuncs selftests/bpf: Add nested trust selftests suite bpf: Enable cpumasks to be queried and used as kptrs bpf: Disallow NULLable pointers for trusted kfuncs ... ==================== Link: https://lore.kernel.org/r/20230128004827.21371-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/mellanox')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c99
12 files changed, 205 insertions, 82 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 98b5ffb4d729..9e3b76182088 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -58,9 +58,7 @@ u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe)
return hi | lo;
}
-void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
- struct skb_shared_hwtstamps *hwts,
- u64 timestamp)
+u64 mlx4_en_get_hwtstamp(struct mlx4_en_dev *mdev, u64 timestamp)
{
unsigned int seq;
u64 nsec;
@@ -70,8 +68,15 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
nsec = timecounter_cyc2time(&mdev->clock, timestamp);
} while (read_seqretry(&mdev->clock_lock, seq));
+ return ns_to_ktime(nsec);
+}
+
+void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
+ struct skb_shared_hwtstamps *hwts,
+ u64 timestamp)
+{
memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
- hwts->hwtstamp = ns_to_ktime(nsec);
+ hwts->hwtstamp = mlx4_en_get_hwtstamp(mdev, timestamp);
}
/**
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 8800d3f1f55c..af4c4858f397 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2889,6 +2889,11 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_bpf = mlx4_xdp,
};
+static const struct xdp_metadata_ops mlx4_xdp_metadata_ops = {
+ .xmo_rx_timestamp = mlx4_en_xdp_rx_timestamp,
+ .xmo_rx_hash = mlx4_en_xdp_rx_hash,
+};
+
struct mlx4_en_bond {
struct work_struct work;
struct mlx4_en_priv *priv;
@@ -3310,6 +3315,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->netdev_ops = &mlx4_netdev_ops_master;
else
dev->netdev_ops = &mlx4_netdev_ops;
+ dev->xdp_metadata_ops = &mlx4_xdp_metadata_ops;
dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 8f762fc170b3..0869d4fff17b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -661,9 +661,41 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
#define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4)
#endif
+struct mlx4_en_xdp_buff {
+ struct xdp_buff xdp;
+ struct mlx4_cqe *cqe;
+ struct mlx4_en_dev *mdev;
+ struct mlx4_en_rx_ring *ring;
+ struct net_device *dev;
+};
+
+int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
+{
+ struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
+
+ if (unlikely(_ctx->ring->hwtstamp_rx_filter != HWTSTAMP_FILTER_ALL))
+ return -EOPNOTSUPP;
+
+ *timestamp = mlx4_en_get_hwtstamp(_ctx->mdev,
+ mlx4_en_get_cqe_ts(_ctx->cqe));
+ return 0;
+}
+
+int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+{
+ struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
+
+ if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH)))
+ return -EOPNOTSUPP;
+
+ *hash = be32_to_cpu(_ctx->cqe->immed_rss_invalid);
+ return 0;
+}
+
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_xdp_buff mxbuf = {};
int factor = priv->cqe_factor;
struct mlx4_en_rx_ring *ring;
struct bpf_prog *xdp_prog;
@@ -671,7 +703,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
bool doorbell_pending;
bool xdp_redir_flush;
struct mlx4_cqe *cqe;
- struct xdp_buff xdp;
int polled = 0;
int index;
@@ -681,7 +712,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
ring = priv->rx_ring[cq_ring];
xdp_prog = rcu_dereference_bh(ring->xdp_prog);
- xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
+ xdp_init_buff(&mxbuf.xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
doorbell_pending = false;
xdp_redir_flush = false;
@@ -776,24 +807,28 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
- xdp_prepare_buff(&xdp, va - frags[0].page_offset,
- frags[0].page_offset, length, false);
- orig_data = xdp.data;
-
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
-
- length = xdp.data_end - xdp.data;
- if (xdp.data != orig_data) {
- frags[0].page_offset = xdp.data -
- xdp.data_hard_start;
- va = xdp.data;
+ xdp_prepare_buff(&mxbuf.xdp, va - frags[0].page_offset,
+ frags[0].page_offset, length, true);
+ orig_data = mxbuf.xdp.data;
+ mxbuf.cqe = cqe;
+ mxbuf.mdev = priv->mdev;
+ mxbuf.ring = ring;
+ mxbuf.dev = dev;
+
+ act = bpf_prog_run_xdp(xdp_prog, &mxbuf.xdp);
+
+ length = mxbuf.xdp.data_end - mxbuf.xdp.data;
+ if (mxbuf.xdp.data != orig_data) {
+ frags[0].page_offset = mxbuf.xdp.data -
+ mxbuf.xdp.data_hard_start;
+ va = mxbuf.xdp.data;
}
switch (act) {
case XDP_PASS:
break;
case XDP_REDIRECT:
- if (likely(!xdp_do_redirect(dev, &xdp, xdp_prog))) {
+ if (likely(!xdp_do_redirect(dev, &mxbuf.xdp, xdp_prog))) {
ring->xdp_redirect++;
xdp_redir_flush = true;
frags[0].page = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 3d4226ddba5e..544e09b97483 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -796,10 +796,15 @@ void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
int mlx4_en_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr);
+struct xdp_md;
+int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp);
+int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash);
+
/*
* Functions for time stamping
*/
u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
+u64 mlx4_en_get_hwtstamp(struct mlx4_en_dev *mdev, u64 timestamp);
void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
struct skb_shared_hwtstamps *hwts,
u64 timestamp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index da58322cbc3a..6f8723cc6874 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -626,10 +626,11 @@ struct mlx5e_rq;
typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+ struct mlx5_cqe64 *cqe, u16 cqe_bcnt,
+ u32 head_offset, u32 page_idx);
typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
- u32 cqe_bcnt);
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt);
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 5578f92f7e0f..c10c6ab2e7bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -73,6 +73,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
+static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
+{
+ return config->rx_filter == HWTSTAMP_FILTER_ALL;
+}
+
/* TX */
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 20507ef2f956..f7d52b1d293b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -156,10 +156,39 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
return true;
}
+static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
+{
+ const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
+
+ if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp)))
+ return -EOPNOTSUPP;
+
+ *timestamp = mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time,
+ _ctx->rq->clock, get_cqe_ts(_ctx->cqe));
+ return 0;
+}
+
+static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+{
+ const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
+
+ if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)))
+ return -EOPNOTSUPP;
+
+ *hash = be32_to_cpu(_ctx->cqe->rss_hash_result);
+ return 0;
+}
+
+const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = {
+ .xmo_rx_timestamp = mlx5e_xdp_rx_timestamp,
+ .xmo_rx_hash = mlx5e_xdp_rx_hash,
+};
+
/* returns true if packet was consumed by xdp */
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
- struct bpf_prog *prog, struct xdp_buff *xdp)
+ struct bpf_prog *prog, struct mlx5e_xdp_buff *mxbuf)
{
+ struct xdp_buff *xdp = &mxbuf->xdp;
u32 act;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index bc2d9034af5b..69f338bc0633 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -44,10 +44,16 @@
(MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
sizeof(struct mlx5_wqe_inline_seg))
+struct mlx5e_xdp_buff {
+ struct xdp_buff xdp;
+ struct mlx5_cqe64 *cqe;
+ struct mlx5e_rq *rq;
+};
+
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
- struct bpf_prog *prog, struct xdp_buff *xdp);
+ struct bpf_prog *prog, struct mlx5e_xdp_buff *mlctx);
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
@@ -56,6 +62,8 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
+extern const struct xdp_metadata_ops mlx5e_xdp_metadata_ops;
+
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
struct skb_shared_info *sinfo,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index c91b54d9ff27..b7c84ebe8418 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -8,6 +8,14 @@
/* RX data path */
+static struct mlx5e_xdp_buff *xsk_buff_to_mxbuf(struct xdp_buff *xdp)
+{
+ /* mlx5e_xdp_buff shares its layout with xdp_buff_xsk
+ * and private mlx5e_xdp_buff fields fall into xdp_buff_xsk->cb
+ */
+ return (struct mlx5e_xdp_buff *)xdp;
+}
+
int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
@@ -22,6 +30,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
goto err;
BUILD_BUG_ON(sizeof(wi->alloc_units[0]) != sizeof(wi->alloc_units[0].xsk));
+ XSK_CHECK_PRIV_TYPE(struct mlx5e_xdp_buff);
batch = xsk_buff_alloc_batch(rq->xsk_pool, (struct xdp_buff **)wi->alloc_units,
rq->mpwqe.pages_per_wqe);
@@ -43,25 +52,30 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) {
for (i = 0; i < batch; i++) {
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
.ptag = cpu_to_be64(addr | MLX5_EN_WR),
};
+ mxbuf->rq = rq;
}
} else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED)) {
for (i = 0; i < batch; i++) {
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
.key = rq->mkey_be,
.va = cpu_to_be64(addr),
};
+ mxbuf->rq = rq;
}
} else if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)) {
u32 mapping_size = 1 << (rq->mpwqe.page_shift - 2);
for (i = 0; i < batch; i++) {
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
umr_wqe->inline_ksms[i << 2] = (struct mlx5_ksm) {
@@ -80,6 +94,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
.key = rq->mkey_be,
.va = cpu_to_be64(rq->wqe_overflow.addr),
};
+ mxbuf->rq = rq;
}
} else {
__be32 pad_size = cpu_to_be32((1 << rq->mpwqe.page_shift) -
@@ -87,6 +102,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
__be32 frame_size = cpu_to_be32(rq->xsk_pool->chunk_size);
for (i = 0; i < batch; i++) {
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
umr_wqe->inline_klms[i << 1] = (struct mlx5_klm) {
@@ -99,6 +115,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
.va = cpu_to_be64(rq->wqe_overflow.addr),
.bcount = pad_size,
};
+ mxbuf->rq = rq;
}
}
@@ -229,11 +246,12 @@ static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, struct xdp_b
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi,
+ struct mlx5_cqe64 *cqe,
u16 cqe_bcnt,
u32 head_offset,
u32 page_idx)
{
- struct xdp_buff *xdp = wi->alloc_units[page_idx].xsk;
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[page_idx].xsk);
struct bpf_prog *prog;
/* Check packet size. Note LRO doesn't use linear SKB */
@@ -249,9 +267,11 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(head_offset);
- xsk_buff_set_size(xdp, cqe_bcnt);
- xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
- net_prefetch(xdp->data);
+ /* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
+ mxbuf->cqe = cqe;
+ xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
+ xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
+ net_prefetch(mxbuf->xdp.data);
/* Possible flows:
* - XDP_REDIRECT to XSKMAP:
@@ -269,7 +289,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/
prog = rcu_dereference(rq->xdp_prog);
- if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp))) {
+ if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, mxbuf))) {
if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
@@ -278,14 +298,15 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
* frame. On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp);
+ return mlx5e_xsk_construct_skb(rq, &mxbuf->xdp);
}
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
+ struct mlx5_cqe64 *cqe,
u32 cqe_bcnt)
{
- struct xdp_buff *xdp = wi->au->xsk;
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->au->xsk);
struct bpf_prog *prog;
/* wi->offset is not used in this function, because xdp->data and the
@@ -295,17 +316,19 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(wi->offset);
- xsk_buff_set_size(xdp, cqe_bcnt);
- xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
- net_prefetch(xdp->data);
+ /* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
+ mxbuf->cqe = cqe;
+ xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
+ xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
+ net_prefetch(mxbuf->xdp.data);
prog = rcu_dereference(rq->xdp_prog);
- if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp)))
+ if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, mxbuf)))
return NULL; /* page/packet was consumed by XDP */
/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
* will be handled by mlx5e_free_rx_wqe.
* On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp);
+ return mlx5e_xsk_construct_skb(rq, &mxbuf->xdp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
index 087c943bd8e9..cefc0ef6105d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
@@ -13,11 +13,13 @@ int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi,
+ struct mlx5_cqe64 *cqe,
u16 cqe_bcnt,
u32 head_offset,
u32 page_idx);
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
+ struct mlx5_cqe64 *cqe,
u32 cqe_bcnt);
#endif /* __MLX5_EN_XSK_RX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 1e0afaa31dd0..0e87432ec6f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -5057,6 +5057,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops;
+ netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops;
mlx5e_dcbnl_build_netdev(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 3df455f6b168..a9473a51edc1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -62,10 +62,12 @@
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+ struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
+ u32 page_idx);
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+ struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
+ u32 page_idx);
static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -76,11 +78,6 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
.handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo,
};
-static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
-{
- return config->rx_filter == HWTSTAMP_FILTER_ALL;
-}
-
static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
u32 cqcc, void *data)
{
@@ -1575,16 +1572,19 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
return skb;
}
-static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
- u32 len, struct xdp_buff *xdp)
+static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ void *va, u16 headroom, u32 len,
+ struct mlx5e_xdp_buff *mxbuf)
{
- xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
- xdp_prepare_buff(xdp, va, headroom, len, true);
+ xdp_init_buff(&mxbuf->xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(&mxbuf->xdp, va, headroom, len, true);
+ mxbuf->cqe = cqe;
+ mxbuf->rq = rq;
}
static struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
- u32 cqe_bcnt)
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
{
union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
@@ -1606,16 +1606,16 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct xdp_buff xdp;
+ struct mlx5e_xdp_buff mxbuf;
net_prefetchw(va); /* xdp_frame data area */
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- if (mlx5e_xdp_handle(rq, au->page, prog, &xdp))
+ mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf);
+ if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf))
return NULL; /* page/packet was consumed by XDP */
- rx_headroom = xdp.data - xdp.data_hard_start;
- metasize = xdp.data - xdp.data_meta;
- cqe_bcnt = xdp.data_end - xdp.data;
+ rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
+ metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
+ cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@@ -1630,16 +1630,16 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
static struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
- u32 cqe_bcnt)
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
struct mlx5e_wqe_frag_info *head_wi = wi;
union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
struct skb_shared_info *sinfo;
+ struct mlx5e_xdp_buff mxbuf;
u32 frag_consumed_bytes;
struct bpf_prog *prog;
- struct xdp_buff xdp;
struct sk_buff *skb;
dma_addr_t addr;
u32 truesize;
@@ -1654,8 +1654,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom);
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp);
- sinfo = xdp_get_shared_info_from_buff(&xdp);
+ mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, frag_consumed_bytes, &mxbuf);
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
truesize = 0;
cqe_bcnt -= frag_consumed_bytes;
@@ -1673,13 +1673,13 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
frag_consumed_bytes, rq->buff.map_dir);
- if (!xdp_buff_has_frags(&xdp)) {
+ if (!xdp_buff_has_frags(&mxbuf.xdp)) {
/* Init on the first fragment to avoid cold cache access
* when possible.
*/
sinfo->nr_frags = 0;
sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(&xdp);
+ xdp_buff_set_frags_flag(&mxbuf.xdp);
}
frag = &sinfo->frags[sinfo->nr_frags++];
@@ -1688,7 +1688,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
skb_frag_size_set(frag, frag_consumed_bytes);
if (page_is_pfmemalloc(au->page))
- xdp_buff_set_frag_pfmemalloc(&xdp);
+ xdp_buff_set_frag_pfmemalloc(&mxbuf.xdp);
sinfo->xdp_frags_size += frag_consumed_bytes;
truesize += frag_info->frag_stride;
@@ -1701,7 +1701,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
au = head_wi->au;
prog = rcu_dereference(rq->xdp_prog);
- if (prog && mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
+ if (prog && mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) {
if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
int i;
@@ -1711,22 +1711,22 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
return NULL; /* page/packet was consumed by XDP */
}
- skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz,
- xdp.data - xdp.data_hard_start,
- xdp.data_end - xdp.data,
- xdp.data - xdp.data_meta);
+ skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
+ mxbuf.xdp.data - mxbuf.xdp.data_hard_start,
+ mxbuf.xdp.data_end - mxbuf.xdp.data,
+ mxbuf.xdp.data - mxbuf.xdp.data_meta);
if (unlikely(!skb))
return NULL;
page_ref_inc(au->page);
- if (unlikely(xdp_buff_has_frags(&xdp))) {
+ if (unlikely(xdp_buff_has_frags(&mxbuf.xdp))) {
int i;
/* sinfo->nr_frags is reset by build_skb, calculate again. */
xdp_update_skb_shared_info(skb, wi - head_wi - 1,
sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&xdp));
+ xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
for (i = 0; i < sinfo->nr_frags; i++) {
skb_frag_t *frag = &sinfo->frags[i];
@@ -1777,7 +1777,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
mlx5e_xsk_skb_from_cqe_linear,
- rq, wi, cqe_bcnt);
+ rq, wi, cqe, cqe_bcnt);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -1830,7 +1830,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
- rq, wi, cqe_bcnt);
+ rq, wi, cqe, cqe_bcnt);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -1889,7 +1889,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
mlx5e_skb_from_cqe_mpwrq_linear,
mlx5e_skb_from_cqe_mpwrq_nonlinear,
- rq, wi, cqe_bcnt, head_offset, page_idx);
+ rq, wi, cqe, cqe_bcnt, head_offset, page_idx);
if (!skb)
goto mpwrq_cqe_out;
@@ -1940,7 +1940,8 @@ mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx)
+ struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
+ u32 page_idx)
{
union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
@@ -1979,7 +1980,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx)
+ struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
+ u32 page_idx)
{
union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
u16 rx_headroom = rq->buff.headroom;
@@ -2007,19 +2009,19 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct xdp_buff xdp;
+ struct mlx5e_xdp_buff mxbuf;
net_prefetchw(va); /* xdp_frame data area */
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- if (mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
+ mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf);
+ if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
}
- rx_headroom = xdp.data - xdp.data_hard_start;
- metasize = xdp.data - xdp.data_meta;
- cqe_bcnt = xdp.data_end - xdp.data;
+ rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
+ metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
+ cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@@ -2174,8 +2176,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
if (likely(head_size))
*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
else
- *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe_bcnt, data_offset,
- page_idx);
+ *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe, cqe_bcnt,
+ data_offset, page_idx);
if (unlikely(!*skb))
goto free_hd_entry;
@@ -2249,7 +2251,8 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
mlx5e_skb_from_cqe_mpwrq_linear,
mlx5e_skb_from_cqe_mpwrq_nonlinear,
mlx5e_xsk_skb_from_cqe_mpwrq_linear,
- rq, wi, cqe_bcnt, head_offset, page_idx);
+ rq, wi, cqe, cqe_bcnt, head_offset,
+ page_idx);
if (!skb)
goto mpwrq_cqe_out;
@@ -2494,7 +2497,7 @@ static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
- rq, wi, cqe_bcnt);
+ rq, wi, cqe, cqe_bcnt);
if (!skb)
goto wq_free_wqe;
@@ -2586,7 +2589,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
goto free_wqe;
}
- skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe_bcnt);
+ skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe, cqe_bcnt);
if (!skb)
goto free_wqe;