summaryrefslogtreecommitdiff
path: root/drivers/vdpa/mlx5/net/mlx5_vnet.h
blob: 36c44d9fdd166b52c83557c66793acee1dbb2ae4 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */

#ifndef __MLX5_VNET_H__
#define __MLX5_VNET_H__

#include "mlx5_vdpa.h"

#define to_mlx5_vdpa_ndev(__mvdev)                                             \
	container_of(__mvdev, struct mlx5_vdpa_net, mvdev)
#define to_mvdev(__vdev) container_of((__vdev), struct mlx5_vdpa_dev, vdev)

struct mlx5_vdpa_net_resources {
	u32 tisn;
	u32 tdn;
	u32 tirn;
	u32 rqtn;
	bool valid;
	struct dentry *tirn_dent;
};

#define MLX5V_MACVLAN_SIZE 256

static inline u16 key2vid(u64 key)
{
	return (u16)(key >> 48) & 0xfff;
}

#define MLX5_VDPA_IRQ_NAME_LEN 32

struct mlx5_vdpa_irq_pool_entry {
	struct msi_map map;
	bool used;
	char name[MLX5_VDPA_IRQ_NAME_LEN];
	void *dev_id;
};

struct mlx5_vdpa_irq_pool {
	int num_ent;
	struct mlx5_vdpa_irq_pool_entry *entries;
};

struct mlx5_vdpa_net {
	struct mlx5_vdpa_dev mvdev;
	struct mlx5_vdpa_net_resources res;
	struct virtio_net_config config;
	struct mlx5_vdpa_virtqueue *vqs;
	struct vdpa_callback *event_cbs;

	/* Serialize vq resources creation and destruction. This is required
	 * since memory map might change and we need to destroy and create
	 * resources while driver in operational.
	 */
	struct rw_semaphore reslock;
	struct mlx5_flow_table *rxft;
	struct dentry *rx_dent;
	struct dentry *rx_table_dent;
	bool setup;
	u32 cur_num_vqs;
	u32 rqt_size;
	bool nb_registered;
	struct notifier_block nb;
	struct vdpa_callback config_cb;
	struct mlx5_vdpa_wq_ent cvq_ent;
	struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
	struct mlx5_vdpa_irq_pool irqp;
	struct dentry *debugfs;
};

struct mlx5_vdpa_counter {
	struct mlx5_fc *counter;
	struct dentry *dent;
	struct mlx5_core_dev *mdev;
};

struct macvlan_node {
	struct hlist_node hlist;
	struct mlx5_flow_handle *ucast_rule;
	struct mlx5_flow_handle *mcast_rule;
	u64 macvlan;
	struct mlx5_vdpa_net *ndev;
	bool tagged;
#if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
	struct dentry *dent;
	struct mlx5_vdpa_counter ucast_counter;
	struct mlx5_vdpa_counter mcast_counter;
#endif
};

void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev);
void mlx5_vdpa_remove_debugfs(struct dentry *dbg);
void mlx5_vdpa_add_rx_flow_table(struct mlx5_vdpa_net *ndev);
void mlx5_vdpa_remove_rx_flow_table(struct mlx5_vdpa_net *ndev);
void mlx5_vdpa_add_tirn(struct mlx5_vdpa_net *ndev);
void mlx5_vdpa_remove_tirn(struct mlx5_vdpa_net *ndev);
#if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
void mlx5_vdpa_add_rx_counters(struct mlx5_vdpa_net *ndev,
			       struct macvlan_node *node);
void mlx5_vdpa_remove_rx_counters(struct mlx5_vdpa_net *ndev,
				  struct macvlan_node *node);
#else
static inline void mlx5_vdpa_add_rx_counters(struct mlx5_vdpa_net *ndev,
					     struct macvlan_node *node) {}
static inline void mlx5_vdpa_remove_rx_counters(struct mlx5_vdpa_net *ndev,
						struct macvlan_node *node) {}
#endif


#endif /* __MLX5_VNET_H__ */