summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/cgroup-defs.h126
-rw-r--r--include/linux/cgroup.h66
-rw-r--r--include/linux/etherdevice.h3
-rw-r--r--include/linux/filter.h41
-rw-r--r--include/linux/hashtable.h4
-rw-r--r--include/linux/hdlc.h2
-rw-r--r--include/linux/if_pppox.h1
-rw-r--r--include/linux/if_team.h1
-rw-r--r--include/linux/if_vlan.h4
-rw-r--r--include/linux/inet_diag.h9
-rw-r--r--include/linux/kernfs.h12
-rw-r--r--include/linux/mdio.h78
-rw-r--r--include/linux/mlx4/driver.h5
-rw-r--r--include/linux/mlx5/device.h66
-rw-r--r--include/linux/mlx5/driver.h30
-rw-r--r--include/linux/mlx5/flow_table.h54
-rw-r--r--include/linux/mlx5/fs.h111
-rw-r--r--include/linux/mlx5/mlx5_ifc.h311
-rw-r--r--include/linux/mlx5/vport.h37
-rw-r--r--include/linux/mroute.h76
-rw-r--r--include/linux/netdev_features.h14
-rw-r--r--include/linux/netdevice.h282
-rw-r--r--include/linux/netfilter/nf_conntrack_sctp.h13
-rw-r--r--include/linux/netfilter/nfnetlink.h12
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/phy.h80
-rw-r--r--include/linux/pim.h5
-rw-r--r--include/linux/platform_data/microread.h2
-rw-r--r--include/linux/qed/qed_if.h17
-rw-r--r--include/linux/rhashtable.h82
-rw-r--r--include/linux/rtnetlink.h5
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/sh_eth.h2
-rw-r--r--include/linux/skbuff.h171
-rw-r--r--include/linux/soc/ti/knav_dma.h22
-rw-r--r--include/linux/sock_diag.h2
-rw-r--r--include/linux/ssb/ssb.h10
-rw-r--r--include/linux/wait.h21
40 files changed, 1493 insertions, 290 deletions
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 59f4a7304419..f0ba9c2ec639 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -26,6 +26,7 @@
#define PHY_ID_BCM7366 0x600d8490
#define PHY_ID_BCM7425 0x600d86b0
#define PHY_ID_BCM7429 0x600d8730
+#define PHY_ID_BCM7435 0x600d8750
#define PHY_ID_BCM7439 0x600d8480
#define PHY_ID_BCM7439_2 0xae025080
#define PHY_ID_BCM7445 0x600d8510
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 06b77f9dd3f2..e5f4164cbd99 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -231,6 +231,14 @@ struct cgroup {
int id;
/*
+ * The depth this cgroup is at. The root is at depth zero and each
+ * step down the hierarchy increments the level. This along with
+ * ancestor_ids[] can determine whether a given cgroup is a
+ * descendant of another without traversing the hierarchy.
+ */
+ int level;
+
+ /*
* Each non-empty css_set associated with this cgroup contributes
* one to populated_cnt. All children with non-zero popuplated_cnt
* of their own contribute one. The count is zero iff there's no
@@ -285,6 +293,9 @@ struct cgroup {
/* used to schedule release agent */
struct work_struct release_agent_work;
+
+ /* ids of the ancestors at each level including self */
+ int ancestor_ids[];
};
/*
@@ -304,6 +315,9 @@ struct cgroup_root {
/* The root cgroup. Root is destroyed on its release. */
struct cgroup cgrp;
+ /* for cgrp->ancestor_ids[0] */
+ int cgrp_ancestor_id_storage;
+
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
@@ -521,4 +535,116 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
#endif /* CONFIG_CGROUPS */
+#ifdef CONFIG_SOCK_CGROUP_DATA
+
+/*
+ * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
+ * per-socket cgroup information except for memcg association.
+ *
+ * On legacy hierarchies, net_prio and net_cls controllers directly set
+ * attributes on each sock which can then be tested by the network layer.
+ * On the default hierarchy, each sock is associated with the cgroup it was
+ * created in and the networking layer can match the cgroup directly.
+ *
+ * To avoid carrying all three cgroup related fields separately in sock,
+ * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
+ * On boot, sock_cgroup_data records the cgroup that the sock was created
+ * in so that cgroup2 matches can be made; however, once either net_prio or
+ * net_cls starts being used, the area is overriden to carry prioidx and/or
+ * classid. The two modes are distinguished by whether the lowest bit is
+ * set. Clear bit indicates cgroup pointer while set bit prioidx and
+ * classid.
+ *
+ * While userland may start using net_prio or net_cls at any time, once
+ * either is used, cgroup2 matching no longer works. There is no reason to
+ * mix the two and this is in line with how legacy and v2 compatibility is
+ * handled. On mode switch, cgroup references which are already being
+ * pointed to by socks may be leaked. While this can be remedied by adding
+ * synchronization around sock_cgroup_data, given that the number of leaked
+ * cgroups is bound and highly unlikely to be high, this seems to be the
+ * better trade-off.
+ */
+struct sock_cgroup_data {
+ union {
+#ifdef __LITTLE_ENDIAN
+ struct {
+ u8 is_data;
+ u8 padding;
+ u16 prioidx;
+ u32 classid;
+ } __packed;
+#else
+ struct {
+ u32 classid;
+ u16 prioidx;
+ u8 padding;
+ u8 is_data;
+ } __packed;
+#endif
+ u64 val;
+ };
+};
+
+/*
+ * There's a theoretical window where the following accessors race with
+ * updaters and return part of the previous pointer as the prioidx or
+ * classid. Such races are short-lived and the result isn't critical.
+ */
+static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
+{
+ /* fallback to 1 which is always the ID of the root cgroup */
+ return (skcd->is_data & 1) ? skcd->prioidx : 1;
+}
+
+static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
+{
+ /* fallback to 0 which is the unconfigured default classid */
+ return (skcd->is_data & 1) ? skcd->classid : 0;
+}
+
+/*
+ * If invoked concurrently, the updaters may clobber each other. The
+ * caller is responsible for synchronization.
+ */
+static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
+ u16 prioidx)
+{
+ struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
+
+ if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
+ return;
+
+ if (!(skcd_buf.is_data & 1)) {
+ skcd_buf.val = 0;
+ skcd_buf.is_data = 1;
+ }
+
+ skcd_buf.prioidx = prioidx;
+ WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
+}
+
+static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
+ u32 classid)
+{
+ struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
+
+ if (sock_cgroup_classid(&skcd_buf) == classid)
+ return;
+
+ if (!(skcd_buf.is_data & 1)) {
+ skcd_buf.val = 0;
+ skcd_buf.is_data = 1;
+ }
+
+ skcd_buf.classid = classid;
+ WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
+}
+
+#else /* CONFIG_SOCK_CGROUP_DATA */
+
+struct sock_cgroup_data {
+};
+
+#endif /* CONFIG_SOCK_CGROUP_DATA */
+
#endif /* _LINUX_CGROUP_DEFS_H */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index cb91b44f5f78..322a28482745 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -81,7 +81,8 @@ struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup_subsys *ss);
-bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
+struct cgroup *cgroup_get_from_path(const char *path);
+
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
@@ -364,6 +365,11 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
percpu_ref_put_many(&css->refcnt, n);
}
+static inline void cgroup_put(struct cgroup *cgrp)
+{
+ css_put(&cgrp->self);
+}
+
/**
* task_css_set_check - obtain a task's css_set with extra access conditions
* @task: the task to obtain css_set for
@@ -471,6 +477,23 @@ static inline struct cgroup *task_cgroup(struct task_struct *task,
return task_css(task, subsys_id)->cgroup;
}
+/**
+ * cgroup_is_descendant - test ancestry
+ * @cgrp: the cgroup to be tested
+ * @ancestor: possible ancestor of @cgrp
+ *
+ * Test whether @cgrp is a descendant of @ancestor. It also returns %true
+ * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
+ * and @ancestor are accessible.
+ */
+static inline bool cgroup_is_descendant(struct cgroup *cgrp,
+ struct cgroup *ancestor)
+{
+ if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
+ return false;
+ return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
+}
+
/* no synchronization, the result can only be used as a hint */
static inline bool cgroup_is_populated(struct cgroup *cgrp)
{
@@ -554,4 +577,45 @@ static inline int cgroup_init(void) { return 0; }
#endif /* !CONFIG_CGROUPS */
+/*
+ * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
+ * definition in cgroup-defs.h.
+ */
+#ifdef CONFIG_SOCK_CGROUP_DATA
+
+#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
+extern spinlock_t cgroup_sk_update_lock;
+#endif
+
+void cgroup_sk_alloc_disable(void);
+void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
+void cgroup_sk_free(struct sock_cgroup_data *skcd);
+
+static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
+{
+#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
+ unsigned long v;
+
+ /*
+ * @skcd->val is 64bit but the following is safe on 32bit too as we
+ * just need the lower ulong to be written and read atomically.
+ */
+ v = READ_ONCE(skcd->val);
+
+ if (v & 1)
+ return &cgrp_dfl_root.cgrp;
+
+ return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
+#else
+ return (struct cgroup *)(unsigned long)skcd->val;
+#endif
+}
+
+#else /* CONFIG_CGROUP_DATA */
+
+static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
+static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
+
+#endif /* CONFIG_CGROUP_DATA */
+
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index eb049c622208..37ff4a6faa9a 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -29,6 +29,9 @@
#include <asm/bitsperlong.h>
#ifdef __KERNEL__
+struct device;
+int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
+unsigned char *arch_get_platform_get_mac_address(void);
u32 eth_get_headlen(void *data, unsigned int max_len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 5972ffe5719a..43aa1f8855c7 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -350,25 +350,43 @@ struct sk_filter {
#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
+#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
+
+static inline u8 *bpf_skb_cb(struct sk_buff *skb)
+{
+ /* eBPF programs may read/write skb->cb[] area to transfer meta
+ * data between tail calls. Since this also needs to work with
+ * tc, that scratch memory is mapped to qdisc_skb_cb's data area.
+ *
+ * In some socket filter cases, the cb unfortunately needs to be
+ * saved/restored so that protocol specific skb->cb[] data won't
+ * be lost. In any case, due to unpriviledged eBPF programs
+ * attached to sockets, we need to clear the bpf_skb_cb() area
+ * to not leak previous contents to user space.
+ */
+ BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
+ BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
+ FIELD_SIZEOF(struct qdisc_skb_cb, data));
+
+ return qdisc_skb_cb(skb)->data;
+}
+
static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
struct sk_buff *skb)
{
- u8 *cb_data = qdisc_skb_cb(skb)->data;
- u8 saved_cb[QDISC_CB_PRIV_LEN];
+ u8 *cb_data = bpf_skb_cb(skb);
+ u8 cb_saved[BPF_SKB_CB_LEN];
u32 res;
- BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
- QDISC_CB_PRIV_LEN);
-
if (unlikely(prog->cb_access)) {
- memcpy(saved_cb, cb_data, sizeof(saved_cb));
- memset(cb_data, 0, sizeof(saved_cb));
+ memcpy(cb_saved, cb_data, sizeof(cb_saved));
+ memset(cb_data, 0, sizeof(cb_saved));
}
res = BPF_PROG_RUN(prog, skb);
if (unlikely(prog->cb_access))
- memcpy(cb_data, saved_cb, sizeof(saved_cb));
+ memcpy(cb_data, cb_saved, sizeof(cb_saved));
return res;
}
@@ -376,10 +394,11 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
struct sk_buff *skb)
{
- u8 *cb_data = qdisc_skb_cb(skb)->data;
+ u8 *cb_data = bpf_skb_cb(skb);
if (unlikely(prog->cb_access))
- memset(cb_data, 0, QDISC_CB_PRIV_LEN);
+ memset(cb_data, 0, BPF_SKB_CB_LEN);
+
return BPF_PROG_RUN(prog, skb);
}
@@ -447,6 +466,8 @@ void bpf_prog_destroy(struct bpf_prog *fp);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
+int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
+int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
int sk_detach_filter(struct sock *sk);
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
unsigned int len);
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index 519b6e2d769e..661e5c2a8e2a 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -16,6 +16,10 @@
struct hlist_head name[1 << (bits)] = \
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)] __read_mostly = \
+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+
#define DECLARE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)]
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index 1acb1445e05f..e31bcd4c7859 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -101,7 +101,7 @@ netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev);
int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
size_t size);
/* May be used by hardware driver to gain control over HDLC device */
-void detach_hdlc_protocol(struct net_device *dev);
+int detach_hdlc_protocol(struct net_device *dev);
static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb,
struct net_device *dev)
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index b49cf923becc..ba7a9b0c7c57 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -91,7 +91,6 @@ enum {
PPPOX_CONNECTED = 1, /* connection established ==TCP_ESTABLISHED */
PPPOX_BOUND = 2, /* bound to ppp device */
PPPOX_RELAY = 4, /* forwarding is enabled */
- PPPOX_ZOMBIE = 8, /* dead, but still bound to ppp device */
PPPOX_DEAD = 16 /* dead, useless, please clean me up!*/
};
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index a6aa970758a2..b84e49c3a738 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -164,6 +164,7 @@ struct team_mode {
size_t priv_size;
size_t port_priv_size;
const struct team_mode_ops *ops;
+ enum netdev_lag_tx_type lag_tx_type;
};
#define TEAM_PORT_HASHBITS 4
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 67ce5bd3b56a..a5f6ce6b578c 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -73,7 +73,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
/* found in socket.c */
extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
-static inline bool is_vlan_dev(struct net_device *dev)
+static inline bool is_vlan_dev(const struct net_device *dev)
{
return dev->priv_flags & IFF_802_1Q_VLAN;
}
@@ -621,7 +621,7 @@ static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
NETIF_F_SG |
NETIF_F_HIGHDMA |
NETIF_F_FRAGLIST |
- NETIF_F_GEN_CSUM |
+ NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 0e707f0c1a3e..7c27fa1030e8 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -3,6 +3,7 @@
#include <uapi/linux/inet_diag.h>
+struct net;
struct sock;
struct inet_hashinfo;
struct nlattr;
@@ -23,6 +24,10 @@ struct inet_diag_handler {
void (*idiag_get_info)(struct sock *sk,
struct inet_diag_msg *r,
void *info);
+
+ int (*destroy)(struct sk_buff *in_skb,
+ const struct inet_diag_req_v2 *req);
+
__u16 idiag_type;
__u16 idiag_info_size;
};
@@ -41,6 +46,10 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
struct sk_buff *in_skb, const struct nlmsghdr *nlh,
const struct inet_diag_req_v2 *req);
+struct sock *inet_diag_find_one_icsk(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const struct inet_diag_req_v2 *req);
+
int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
extern int inet_diag_register(const struct inet_diag_handler *handler);
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 5d4e9c4b821d..af51df35d749 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -274,6 +274,8 @@ void pr_cont_kernfs_path(struct kernfs_node *kn);
struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn);
struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
const char *name, const void *ns);
+struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
+ const char *path, const void *ns);
void kernfs_get(struct kernfs_node *kn);
void kernfs_put(struct kernfs_node *kn);
@@ -350,6 +352,10 @@ static inline struct kernfs_node *
kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name,
const void *ns)
{ return NULL; }
+static inline struct kernfs_node *
+kernfs_walk_and_get_ns(struct kernfs_node *parent, const char *path,
+ const void *ns)
+{ return NULL; }
static inline void kernfs_get(struct kernfs_node *kn) { }
static inline void kernfs_put(struct kernfs_node *kn) { }
@@ -431,6 +437,12 @@ kernfs_find_and_get(struct kernfs_node *kn, const char *name)
}
static inline struct kernfs_node *
+kernfs_walk_and_get(struct kernfs_node *kn, const char *path)
+{
+ return kernfs_walk_and_get_ns(kn, path, NULL);
+}
+
+static inline struct kernfs_node *
kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
void *priv)
{
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index b42963bc81dd..5bfd99d1a40a 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -11,6 +11,55 @@
#include <uapi/linux/mdio.h>
+struct mii_bus;
+
+struct mdio_device {
+ struct device dev;
+
+ const struct dev_pm_ops *pm_ops;
+ struct mii_bus *bus;
+
+ int (*bus_match)(struct device *dev, struct device_driver *drv);
+ void (*device_free)(struct mdio_device *mdiodev);
+ void (*device_remove)(struct mdio_device *mdiodev);
+
+ /* Bus address of the MDIO device (0-31) */
+ int addr;
+ int flags;
+};
+#define to_mdio_device(d) container_of(d, struct mdio_device, dev)
+
+/* struct mdio_driver_common: Common to all MDIO drivers */
+struct mdio_driver_common {
+ struct device_driver driver;
+ int flags;
+};
+#define MDIO_DEVICE_FLAG_PHY 1
+#define to_mdio_common_driver(d) \
+ container_of(d, struct mdio_driver_common, driver)
+
+/* struct mdio_driver: Generic MDIO driver */
+struct mdio_driver {
+ struct mdio_driver_common mdiodrv;
+
+ /*
+ * Called during discovery. Used to set
+ * up device-specific structures, if any
+ */
+ int (*probe)(struct mdio_device *mdiodev);
+
+ /* Clears up any memory if needed */
+ void (*remove)(struct mdio_device *mdiodev);
+};
+#define to_mdio_driver(d) \
+ container_of(to_mdio_common_driver(d), struct mdio_driver, mdiodrv)
+
+void mdio_device_free(struct mdio_device *mdiodev);
+struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr);
+int mdio_device_register(struct mdio_device *mdiodev);
+void mdio_device_remove(struct mdio_device *mdiodev);
+int mdio_driver_register(struct mdio_driver *drv);
+void mdio_driver_unregister(struct mdio_driver *drv);
static inline bool mdio_phy_id_is_c45(int phy_id)
{
@@ -173,4 +222,33 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
return reg;
}
+int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
+int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
+int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+
+int mdiobus_register_device(struct mdio_device *mdiodev);
+int mdiobus_unregister_device(struct mdio_device *mdiodev);
+bool mdiobus_is_registered_device(struct mii_bus *bus, int addr);
+struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr);
+
+/**
+ * module_mdio_driver() - Helper macro for registering mdio drivers
+ *
+ * Helper macro for MDIO drivers which do not do anything special in module
+ * init/exit. Each module may only use this macro once, and calling it
+ * replaces module_init() and module_exit().
+ */
+#define mdio_module_driver(_mdio_driver) \
+static int __init mdio_module_init(void) \
+{ \
+ return mdio_driver_register(&_mdio_driver); \
+} \
+module_init(mdio_module_init); \
+static void __exit mdio_module_exit(void) \
+{ \
+ mdio_driver_unregister(&_mdio_driver); \
+} \
+module_exit(mdio_module_exit)
+
#endif /* __LINUX_MDIO_H__ */
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 5a06d969338e..2e8af001c5da 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -75,6 +75,11 @@ static inline int mlx4_is_bonded(struct mlx4_dev *dev)
return !!(dev->flags & MLX4_FLAG_BONDED);
}
+static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev)
+{
+ return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev));
+}
+
struct mlx4_port_map {
u8 port1;
u8 port2;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 0b473cbfa7ef..7be845e30689 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -251,6 +251,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
+ MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
};
enum {
@@ -442,9 +443,12 @@ struct mlx5_init_seg {
__be32 rsvd1[120];
__be32 initializing;
struct health_buffer health;
- __be32 rsvd2[884];
+ __be32 rsvd2[880];
+ __be32 internal_timer_h;
+ __be32 internal_timer_l;
+ __be32 rsrv3[2];
__be32 health_counter;
- __be32 rsvd3[1019];
+ __be32 rsvd4[1019];
__be64 ieee1588_clk;
__be32 ieee1588_clk_type;
__be32 clr_intx;
@@ -520,6 +524,12 @@ struct mlx5_eqe_page_fault {
__be32 flags_qpn;
} __packed;
+struct mlx5_eqe_vport_change {
+ u8 rsvd0[2];
+ __be16 vport_num;
+ __be32 rsvd1[6];
+} __packed;
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -532,6 +542,7 @@ union ev_data {
struct mlx5_eqe_stall_vl stall_vl;
struct mlx5_eqe_page_req req_pages;
struct mlx5_eqe_page_fault page_fault;
+ struct mlx5_eqe_vport_change vport_change;
} __packed;
struct mlx5_eqe {
@@ -593,7 +604,8 @@ struct mlx5_cqe64 {
__be32 imm_inval_pkey;
u8 rsvd40[4];
__be32 byte_cnt;
- __be64 timestamp;
+ __be32 timestamp_h;
+ __be32 timestamp_l;
__be32 sop_drop_qpn;
__be16 wqe_counter;
u8 signature;
@@ -615,6 +627,16 @@ static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
return !!(cqe->l4_hdr_type_etc & 0x1);
}
+static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
+{
+ u32 hi, lo;
+
+ hi = be32_to_cpu(cqe->timestamp_h);
+ lo = be32_to_cpu(cqe->timestamp_l);
+
+ return (u64)lo | ((u64)hi << 32);
+}
+
enum {
CQE_L4_HDR_TYPE_NONE = 0x0,
CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
@@ -1067,6 +1089,12 @@ enum {
};
enum {
+ MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0,
+ MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1,
+ MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2,
+};
+
+enum {
MLX5_L3_PROT_TYPE_IPV4 = 0,
MLX5_L3_PROT_TYPE_IPV6 = 1,
};
@@ -1102,6 +1130,12 @@ enum {
MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2,
};
+enum mlx5_list_type {
+ MLX5_NVPRT_LIST_TYPE_UC = 0x0,
+ MLX5_NVPRT_LIST_TYPE_MC = 0x1,
+ MLX5_NVPRT_LIST_TYPE_VLAN = 0x2,
+};
+
enum {
MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
@@ -1124,6 +1158,8 @@ enum mlx5_cap_type {
MLX5_CAP_IPOIB_OFFLOADS,
MLX5_CAP_EOIB_OFFLOADS,
MLX5_CAP_FLOW_TABLE,
+ MLX5_CAP_ESWITCH_FLOW_TABLE,
+ MLX5_CAP_ESWITCH,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1161,6 +1197,28 @@ enum mlx5_cap_type {
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
+ MLX5_GET(flow_table_eswitch_cap, \
+ mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
+ MLX5_GET(flow_table_eswitch_cap, \
+ mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
+
+#define MLX5_CAP_ESW(mdev, cap) \
+ MLX5_GET(e_switch_cap, \
+ mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
+
+#define MLX5_CAP_ESW_MAX(mdev, cap) \
+ MLX5_GET(e_switch_cap, \
+ mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
+
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
@@ -1200,4 +1258,6 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
}
+#define MLX5_BY_PASS_NUM_PRIOS 9
+
#endif /* MLX5_DEVICE_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5c857f2a20d7..2fd7019f69db 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -426,11 +426,23 @@ struct mlx5_mr_table {
struct radix_tree_root tree;
};
+struct mlx5_vf_context {
+ int enabled;
+};
+
+struct mlx5_core_sriov {
+ struct mlx5_vf_context *vfs_ctx;
+ int num_vfs;
+ int enabled_vfs;
+};
+
struct mlx5_irq_info {
cpumask_var_t mask;
char name[MLX5_MAX_IRQ_NAME];
};
+struct mlx5_eswitch;
+
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table eq_table;
@@ -447,6 +459,7 @@ struct mlx5_priv {
int fw_pages;
atomic_t reg_pages;
struct list_head free_list;
+ int vfs_pages;
struct mlx5_core_health health;
@@ -485,6 +498,12 @@ struct mlx5_priv {
struct list_head dev_list;
struct list_head ctx_list;
spinlock_t ctx_lock;
+
+ struct mlx5_eswitch *eswitch;
+ struct mlx5_core_sriov sriov;
+ unsigned long pci_dev_data;
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_flow_root_namespace *fdb_root_ns;
};
enum mlx5_device_state {
@@ -739,6 +758,8 @@ void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
+int mlx5_sriov_init(struct mlx5_core_dev *dev);
+int mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s32 npages);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
@@ -884,6 +905,15 @@ struct mlx5_profile {
} mr_cache[MAX_MR_CACHE_ENTRIES];
};
+enum {
+ MLX5_PCI_DEV_IS_VF = 1 << 0,
+};
+
+static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
+{
+ return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
+}
+
static inline int mlx5_get_gid_table_len(u16 param)
{
if (param > 4) {
diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h
deleted file mode 100644
index 5f922c6d4fc2..000000000000
--- a/include/linux/mlx5/flow_table.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_FLOW_TABLE_H
-#define MLX5_FLOW_TABLE_H
-
-#include <linux/mlx5/driver.h>
-
-struct mlx5_flow_table_group {
- u8 log_sz;
- u8 match_criteria_enable;
- u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
-};
-
-void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
- u16 num_groups,
- struct mlx5_flow_table_group *group);
-void mlx5_destroy_flow_table(void *flow_table);
-int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
- void *match_criteria, void *flow_context,
- u32 *flow_index);
-void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
-u32 mlx5_get_flow_table_id(void *flow_table);
-
-#endif /* MLX5_FLOW_TABLE_H */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
new file mode 100644
index 000000000000..8230caa3fb6e
--- /dev/null
+++ b/include/linux/mlx5/fs.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MLX5_FS_
+#define _MLX5_FS_
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+
+#define MLX5_FS_DEFAULT_FLOW_TAG 0x0
+
+#define LEFTOVERS_RULE_NUM 2
+static inline void build_leftovers_ft_param(int *priority,
+ int *n_ent,
+ int *n_grp)
+{
+ *priority = 0; /* Priority of leftovers_prio-0 */
+ *n_ent = LEFTOVERS_RULE_NUM;
+ *n_grp = LEFTOVERS_RULE_NUM;
+}
+
+enum mlx5_flow_namespace_type {
+ MLX5_FLOW_NAMESPACE_BYPASS,
+ MLX5_FLOW_NAMESPACE_KERNEL,
+ MLX5_FLOW_NAMESPACE_LEFTOVERS,
+ MLX5_FLOW_NAMESPACE_FDB,
+};
+
+struct mlx5_flow_table;
+struct mlx5_flow_group;
+struct mlx5_flow_rule;
+struct mlx5_flow_namespace;
+
+struct mlx5_flow_destination {
+ enum mlx5_flow_destination_type type;
+ union {
+ u32 tir_num;
+ struct mlx5_flow_table *ft;
+ u32 vport_num;
+ };
+};
+
+struct mlx5_flow_namespace *
+mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type);
+
+struct mlx5_flow_table *
+mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
+ int prio,
+ int num_flow_table_entries,
+ int max_num_groups);
+
+struct mlx5_flow_table *
+mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ int prio,
+ int num_flow_table_entries);
+int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
+
+/* inbox should be set with the following values:
+ * start_flow_index
+ * end_flow_index
+ * match_criteria_enable
+ * match_criteria
+ */
+struct mlx5_flow_group *
+mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
+void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
+
+/* Single destination per rule.
+ * Group ID is implied by the match criteria.
+ */
+struct mlx5_flow_rule *
+mlx5_add_flow_rule(struct mlx5_flow_table *ft,
+ u8 match_criteria_enable,
+ u32 *match_criteria,
+ u32 *match_value,
+ u32 action,
+ u32 flow_tag,
+ struct mlx5_flow_destination *dest);
+void mlx5_del_flow_rule(struct mlx5_flow_rule *fr);
+
+#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 1565324eb620..68d73f82e009 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -185,6 +185,7 @@ enum {
MLX5_CMD_OP_MODIFY_RQT = 0x917,
MLX5_CMD_OP_DESTROY_RQT = 0x918,
MLX5_CMD_OP_QUERY_RQT = 0x919,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ROOT = 0x92f,
MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930,
MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931,
MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932,
@@ -193,7 +194,8 @@ enum {
MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935,
MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937,
- MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938
+ MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938,
+ MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c
};
struct mlx5_ifc_flow_table_fields_supported_bits {
@@ -256,25 +258,30 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
struct mlx5_ifc_flow_table_prop_layout_bits {
u8 ft_support[0x1];
- u8 reserved_0[0x1f];
+ u8 reserved_0[0x2];
+ u8 flow_modify_en[0x1];
+ u8 modify_root[0x1];
+ u8 identified_miss_table_mode[0x1];
+ u8 flow_table_modify[0x1];
+ u8 reserved_1[0x19];
- u8 reserved_1[0x2];
+ u8 reserved_2[0x2];
u8 log_max_ft_size[0x6];
- u8 reserved_2[0x10];
+ u8 reserved_3[0x10];
u8 max_ft_level[0x8];
- u8 reserved_3[0x20];
+ u8 reserved_4[0x20];
- u8 reserved_4[0x18];
+ u8 reserved_5[0x18];
u8 log_max_ft_num[0x8];
- u8 reserved_5[0x18];
+ u8 reserved_6[0x18];
u8 log_max_destination[0x8];
- u8 reserved_6[0x18];
+ u8 reserved_7[0x18];
u8 log_max_flow[0x8];
- u8 reserved_7[0x40];
+ u8 reserved_8[0x40];
struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
@@ -291,6 +298,22 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits {
u8 reserved_1[0x1a];
};
+struct mlx5_ifc_ipv4_layout_bits {
+ u8 reserved_0[0x60];
+
+ u8 ipv4[0x20];
+};
+
+struct mlx5_ifc_ipv6_layout_bits {
+ u8 ipv6[16][0x8];
+};
+
+union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
+ struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
+ struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
+ u8 reserved_0[0x80];
+};
+
struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 smac_47_16[0x20];
@@ -321,9 +344,9 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 udp_sport[0x10];
u8 udp_dport[0x10];
- u8 src_ip[4][0x20];
+ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
- u8 dst_ip[4][0x20];
+ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
};
struct mlx5_ifc_fte_match_set_misc_bits {
@@ -447,6 +470,29 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 reserved_3[0x7200];
};
+struct mlx5_ifc_flow_table_eswitch_cap_bits {
+ u8 reserved_0[0x200];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_ingress;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress;
+
+ u8 reserved_1[0x7800];
+};
+
+struct mlx5_ifc_e_switch_cap_bits {
+ u8 vport_svlan_strip[0x1];
+ u8 vport_cvlan_strip[0x1];
+ u8 vport_svlan_insert[0x1];
+ u8 vport_cvlan_insert_if_not_exist[0x1];
+ u8 vport_cvlan_insert_overwrite[0x1];
+ u8 reserved_0[0x1b];
+
+ u8 reserved_1[0x7e0];
+};
+
struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 csum_cap[0x1];
u8 vlan_cap[0x1];
@@ -665,7 +711,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_17[0x1];
u8 ets[0x1];
u8 nic_flow_table[0x1];
- u8 reserved_18[0x4];
+ u8 eswitch_flow_table[0x1];
+ u8 early_vf_enable;
+ u8 reserved_18[0x2];
u8 local_ca_ack_delay[0x5];
u8 reserved_19[0x6];
u8 port_type[0x2];
@@ -787,27 +835,36 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_60[0x1b];
u8 log_max_wq_sz[0x5];
- u8 reserved_61[0xa0];
-
+ u8 nic_vport_change_event[0x1];
+ u8 reserved_61[0xa];
+ u8 log_max_vlan_list[0x5];
u8 reserved_62[0x3];
+ u8 log_max_current_mc_list[0x5];
+ u8 reserved_63[0x3];
+ u8 log_max_current_uc_list[0x5];
+
+ u8 reserved_64[0x80];
+
+ u8 reserved_65[0x3];
u8 log_max_l2_table[0x5];
- u8 reserved_63[0x8];
+ u8 reserved_66[0x8];
u8 log_uar_page_sz[0x10];
- u8 reserved_64[0x100];
-
- u8 reserved_65[0x1f];
+ u8 reserved_67[0x40];
+ u8 device_frequency_khz[0x20];
+ u8 reserved_68[0x5f];
u8 cqe_zip[0x1];
u8 cqe_zip_timeout[0x10];
u8 cqe_zip_max_num[0x10];
- u8 reserved_66[0x220];
+ u8 reserved_69[0x220];
};
-enum {
- MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_FLOW_TABLE_ = 0x1,
- MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_TIR = 0x2,
+enum mlx5_flow_destination_type {
+ MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
+ MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
};
struct mlx5_ifc_dest_format_struct_bits {
@@ -900,6 +957,13 @@ struct mlx5_ifc_mac_address_layout_bits {
u8 mac_addr_31_0[0x20];
};
+struct mlx5_ifc_vlan_layout_bits {
+ u8 reserved_0[0x14];
+ u8 vlan[0x0c];
+
+ u8 reserved_1[0x20];
+};
+
struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
u8 reserved_0[0xa0];
@@ -1829,6 +1893,8 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_roce_cap_bits roce_cap;
struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
+ struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
+ struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
u8 reserved_0[0x8000];
};
@@ -2133,24 +2199,35 @@ struct mlx5_ifc_rmpc_bits {
struct mlx5_ifc_wq_bits wq;
};
-enum {
- MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS = 0x0,
-};
-
struct mlx5_ifc_nic_vport_context_bits {
u8 reserved_0[0x1f];
u8 roce_en[0x1];
- u8 reserved_1[0x760];
+ u8 arm_change_event[0x1];
+ u8 reserved_1[0x1a];
+ u8 event_on_mtu[0x1];
+ u8 event_on_promisc_change[0x1];
+ u8 event_on_vlan_change[0x1];
+ u8 event_on_mc_address_change[0x1];
+ u8 event_on_uc_address_change[0x1];
- u8 reserved_2[0x5];
+ u8 reserved_2[0xf0];
+
+ u8 mtu[0x10];
+
+ u8 reserved_3[0x640];
+
+ u8 promisc_uc[0x1];
+ u8 promisc_mc[0x1];
+ u8 promisc_all[0x1];
+ u8 reserved_4[0x2];
u8 allowed_list_type[0x3];
- u8 reserved_3[0xc];
+ u8 reserved_5[0xc];
u8 allowed_list_size[0xc];
struct mlx5_ifc_mac_address_layout_bits permanent_address;
- u8 reserved_4[0x20];
+ u8 reserved_6[0x20];
u8 current_uc_mac_address[0][0x40];
};
@@ -2263,6 +2340,26 @@ struct mlx5_ifc_hca_vport_context_bits {
u8 reserved_6[0xca0];
};
+struct mlx5_ifc_esw_vport_context_bits {
+ u8 reserved_0[0x3];
+ u8 vport_svlan_strip[0x1];
+ u8 vport_cvlan_strip[0x1];
+ u8 vport_svlan_insert[0x1];
+ u8 vport_cvlan_insert[0x2];
+ u8 reserved_1[0x18];
+
+ u8 reserved_2[0x20];
+
+ u8 svlan_cfi[0x1];
+ u8 svlan_pcp[0x3];
+ u8 svlan_id[0xc];
+ u8 cvlan_cfi[0x1];
+ u8 cvlan_pcp[0x3];
+ u8 cvlan_id[0xc];
+
+ u8 reserved_3[0x7a0];
+};
+
enum {
MLX5_EQC_STATUS_OK = 0x0,
MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa,
@@ -2769,6 +2866,13 @@ struct mlx5_ifc_set_hca_cap_in_bits {
union mlx5_ifc_hca_cap_union_bits capability;
};
+enum {
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3
+};
+
struct mlx5_ifc_set_fte_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
@@ -2793,11 +2897,14 @@ struct mlx5_ifc_set_fte_in_bits {
u8 reserved_4[0x8];
u8 table_id[0x18];
- u8 reserved_5[0x40];
+ u8 reserved_5[0x18];
+ u8 modify_enable_mask[0x8];
+
+ u8 reserved_6[0x20];
u8 flow_index[0x20];
- u8 reserved_6[0xe0];
+ u8 reserved_7[0xe0];
struct mlx5_ifc_flow_context_bits flow_context;
};
@@ -2940,6 +3047,7 @@ struct mlx5_ifc_query_vport_state_out_bits {
enum {
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1,
};
struct mlx5_ifc_query_vport_state_in_bits {
@@ -3700,6 +3808,64 @@ struct mlx5_ifc_query_flow_group_in_bits {
u8 reserved_5[0x120];
};
+struct mlx5_ifc_query_esw_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_esw_vport_context_bits esw_vport_context;
+};
+
+struct mlx5_ifc_query_esw_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_modify_esw_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_esw_vport_context_fields_select_bits {
+ u8 reserved[0x1c];
+ u8 vport_cvlan_insert[0x1];
+ u8 vport_svlan_insert[0x1];
+ u8 vport_cvlan_strip[0x1];
+ u8 vport_svlan_strip[0x1];
+};
+
+struct mlx5_ifc_modify_esw_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ struct mlx5_ifc_esw_vport_context_fields_select_bits field_select;
+
+ struct mlx5_ifc_esw_vport_context_bits esw_vport_context;
+};
+
struct mlx5_ifc_query_eq_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
@@ -4228,7 +4394,10 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
- u8 reserved_0[0x1c];
+ u8 reserved_0[0x19];
+ u8 mtu[0x1];
+ u8 change_event[0x1];
+ u8 promisc[0x1];
u8 permanent_address[0x1];
u8 addresses_list[0x1];
u8 roce_en[0x1];
@@ -5519,12 +5688,16 @@ struct mlx5_ifc_create_flow_table_in_bits {
u8 reserved_4[0x20];
- u8 reserved_5[0x8];
+ u8 reserved_5[0x4];
+ u8 table_miss_mode[0x4];
u8 level[0x8];
u8 reserved_6[0x8];
u8 log_size[0x8];
- u8 reserved_7[0x120];
+ u8 reserved_7[0x8];
+ u8 table_miss_id[0x18];
+
+ u8 reserved_8[0x100];
};
struct mlx5_ifc_create_flow_group_out_bits {
@@ -6798,4 +6971,72 @@ union mlx5_ifc_uplink_pci_interface_document_bits {
u8 reserved_0[0x20060];
};
+struct mlx5_ifc_set_flow_table_root_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_flow_table_root_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x140];
+};
+
+enum {
+ MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = 0x1,
+};
+
+struct mlx5_ifc_modify_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_flow_table_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x10];
+ u8 modify_field_select[0x10];
+
+ u8 table_type[0x8];
+ u8 reserved_4[0x18];
+
+ u8 reserved_5[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_6[0x4];
+ u8 table_miss_mode[0x4];
+ u8 reserved_7[0x18];
+
+ u8 reserved_8[0x8];
+ u8 table_miss_id[0x18];
+
+ u8 reserved_9[0x100];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 967e0fd06e89..638f2ca7a527 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -34,9 +34,17 @@
#define __MLX5_VPORT_H__
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
-u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod);
-void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
+u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport);
+int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport, u8 state);
+int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
+ u16 vport, u8 *addr);
+int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
+ u16 vport, u8 *addr);
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
u8 port_num, u16 vf_num, u16 gid_index,
union ib_gid *gid);
@@ -51,5 +59,30 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
u64 *sys_image_guid);
int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
u64 *node_guid);
+int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
+ u32 vport,
+ enum mlx5_list_type list_type,
+ u8 addr_list[][ETH_ALEN],
+ int *list_size);
+int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
+ enum mlx5_list_type list_type,
+ u8 addr_list[][ETH_ALEN],
+ int list_size);
+int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
+ u32 vport,
+ int *promisc_uc,
+ int *promisc_mc,
+ int *promisc_all);
+int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
+ int promisc_uc,
+ int promisc_mc,
+ int promisc_all);
+int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
+ u32 vport,
+ u16 vlans[],
+ int *size);
+int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
+ u16 vlans[],
+ int list_size);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 79aaa9fc1a15..bf9b322cb0b0 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -9,38 +9,28 @@
#ifdef CONFIG_IP_MROUTE
static inline int ip_mroute_opt(int opt)
{
- return (opt >= MRT_BASE) && (opt <= MRT_MAX);
+ return opt >= MRT_BASE && opt <= MRT_MAX;
}
-#else
-static inline int ip_mroute_opt(int opt)
-{
- return 0;
-}
-#endif
-#ifdef CONFIG_IP_MROUTE
-extern int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
-extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
-extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
-extern int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
-extern int ip_mr_init(void);
+int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
+int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
+int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
+int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
+int ip_mr_init(void);
#else
-static inline
-int ip_mroute_setsockopt(struct sock *sock,
- int optname, char __user *optval, unsigned int optlen)
+static inline int ip_mroute_setsockopt(struct sock *sock, int optname,
+ char __user *optval, unsigned int optlen)
{
return -ENOPROTOOPT;
}
-static inline
-int ip_mroute_getsockopt(struct sock *sock,
- int optname, char __user *optval, int __user *optlen)
+static inline int ip_mroute_getsockopt(struct sock *sock, int optname,
+ char __user *optval, int __user *optlen)
{
return -ENOPROTOOPT;
}
-static inline
-int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
+static inline int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
{
return -ENOIOCTLCMD;
}
@@ -49,6 +39,11 @@ static inline int ip_mr_init(void)
{
return 0;
}
+
+static inline int ip_mroute_opt(int opt)
+{
+ return 0;
+}
#endif
struct vif_device {
@@ -64,6 +59,32 @@ struct vif_device {
#define VIFF_STATIC 0x8000
+#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
+#define MFC_LINES 64
+
+struct mr_table {
+ struct list_head list;
+ possible_net_t net;
+ u32 id;
+ struct sock __rcu *mroute_sk;
+ struct timer_list ipmr_expire_timer;
+ struct list_head mfc_unres_queue;
+ struct list_head mfc_cache_array[MFC_LINES];
+ struct vif_device vif_table[MAXVIFS];
+ int maxvif;
+ atomic_t cache_resolve_queue_len;
+ bool mroute_do_assert;
+ bool mroute_do_pim;
+ int mroute_reg_vif_num;
+};
+
+/* mfc_flags:
+ * MFC_STATIC - the entry was added statically (not by a routing daemon)
+ */
+enum {
+ MFC_STATIC = BIT(0),
+};
+
struct mfc_cache {
struct list_head list;
__be32 mfc_mcastgrp; /* Group the entry belongs to */
@@ -89,19 +110,14 @@ struct mfc_cache {
struct rcu_head rcu;
};
-#define MFC_STATIC 1
-#define MFC_NOTIFY 2
-
-#define MFC_LINES 64
-
#ifdef __BIG_ENDIAN
#define MFC_HASH(a,b) (((((__force u32)(__be32)a)>>24)^(((__force u32)(__be32)b)>>26))&(MFC_LINES-1))
#else
#define MFC_HASH(a,b) ((((__force u32)(__be32)a)^(((__force u32)(__be32)b)>>2))&(MFC_LINES-1))
-#endif
+#endif
struct rtmsg;
-extern int ipmr_get_route(struct net *net, struct sk_buff *skb,
- __be32 saddr, __be32 daddr,
- struct rtmsg *rtm, int nowait);
+int ipmr_get_route(struct net *net, struct sk_buff *skb,
+ __be32 saddr, __be32 daddr,
+ struct rtmsg *rtm, int nowait);
#endif
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index f0d87347df19..d9654f0eecb3 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -52,7 +52,7 @@ enum {
NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
- NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */
+ NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/
NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */
NETIF_F_RXHASH_BIT, /* Receive hashing offload */
@@ -103,7 +103,7 @@ enum {
#define NETIF_F_NTUPLE __NETIF_F(NTUPLE)
#define NETIF_F_RXCSUM __NETIF_F(RXCSUM)
#define NETIF_F_RXHASH __NETIF_F(RXHASH)
-#define NETIF_F_SCTP_CSUM __NETIF_F(SCTP_CSUM)
+#define NETIF_F_SCTP_CRC __NETIF_F(SCTP_CRC)
#define NETIF_F_SG __NETIF_F(SG)
#define NETIF_F_TSO6 __NETIF_F(TSO6)
#define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN)
@@ -146,10 +146,12 @@ enum {
#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
NETIF_F_TSO6 | NETIF_F_UFO)
-#define NETIF_F_GEN_CSUM NETIF_F_HW_CSUM
-#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
-#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
-#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
+/* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be
+ * set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set--
+ * this would be contradictory
+ */
+#define NETIF_F_CSUM_MASK (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
+ NETIF_F_HW_CSUM)
#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3143c847bddb..5ac140dcb789 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -132,7 +132,9 @@ static inline bool dev_xmit_complete(int rc)
* used.
*/
-#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
+#if defined(CONFIG_HYPERV_NET)
+# define LL_MAX_HEADER 128
+#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
# if defined(CONFIG_MAC80211_MESH)
# define LL_MAX_HEADER 128
# else
@@ -326,7 +328,8 @@ enum {
NAPI_STATE_SCHED, /* Poll is scheduled */
NAPI_STATE_DISABLE, /* Disable pending */
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
- NAPI_STATE_HASHED, /* In NAPI hash */
+ NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
+ NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
};
enum gro_result {
@@ -461,19 +464,13 @@ static inline void napi_complete(struct napi_struct *n)
}
/**
- * napi_by_id - lookup a NAPI by napi_id
- * @napi_id: hashed napi_id
- *
- * lookup @napi_id in napi_hash table
- * must be called under rcu_read_lock()
- */
-struct napi_struct *napi_by_id(unsigned int napi_id);
-
-/**
* napi_hash_add - add a NAPI to global hashtable
* @napi: napi context
*
* generate a new napi_id and store a @napi under it in napi_hash
+ * Used for busy polling (CONFIG_NET_RX_BUSY_POLL)
+ * Note: This is normally automatically done from netif_napi_add(),
+ * so might disappear in a future linux version.
*/
void napi_hash_add(struct napi_struct *napi);
@@ -482,9 +479,14 @@ void napi_hash_add(struct napi_struct *napi);
* @napi: napi context
*
* Warning: caller must observe rcu grace period
- * before freeing memory containing @napi
+ * before freeing memory containing @napi, if
+ * this function returns true.
+ * Note: core networking stack automatically calls it
+ * from netif_napi_del()
+ * Drivers might want to call this helper to combine all
+ * the needed rcu grace periods into a single one.
*/
-void napi_hash_del(struct napi_struct *napi);
+bool napi_hash_del(struct napi_struct *napi);
/**
* napi_disable - prevent NAPI from scheduling
@@ -810,6 +812,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
* Required can not be NULL.
*
+ * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ * netdev_features_t features);
+ * Adjusts the requested feature flags according to device-specific
+ * constraints, and returns the resulting flags. Must not modify
+ * the device state.
+ *
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* void *accel_priv, select_queue_fallback_t fallback);
* Called to decide which queue to when device supports multiple
@@ -957,12 +965,6 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* Called to release previously enslaved netdev.
*
* Feature/offload setting functions.
- * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
- * netdev_features_t features);
- * Adjusts the requested feature flags according to device-specific
- * constraints, and returns the resulting flags. Must not modify
- * the device state.
- *
* int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
* Called to update device configuration to new features. Passed
* feature set might be less than what was returned by ndo_fix_features()).
@@ -1011,6 +1013,19 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* a new port starts listening. The operation is protected by the
* vxlan_net->sock_lock.
*
+ * void (*ndo_add_geneve_port)(struct net_device *dev,
+ * sa_family_t sa_family, __be16 port);
+ * Called by geneve to notify a driver about the UDP port and socket
+ * address family that geneve is listnening to. It is called only when
+ * a new port starts listening. The operation is protected by the
+ * geneve_net->sock_lock.
+ *
+ * void (*ndo_del_geneve_port)(struct net_device *dev,
+ * sa_family_t sa_family, __be16 port);
+ * Called by geneve to notify the driver about a UDP port and socket
+ * address family that geneve is not listening to anymore. The operation
+ * is protected by the geneve_net->sock_lock.
+ *
* void (*ndo_del_vxlan_port)(struct net_device *dev,
* sa_family_t sa_family, __be16 port);
* Called by vxlan to notify the driver about a UDP port and socket
@@ -1066,8 +1081,11 @@ struct net_device_ops {
void (*ndo_uninit)(struct net_device *dev);
int (*ndo_open)(struct net_device *dev);
int (*ndo_stop)(struct net_device *dev);
- netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
- struct net_device *dev);
+ netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
+ struct net_device *dev);
+ netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb,
void *accel_priv,
@@ -1215,7 +1233,12 @@ struct net_device_ops {
void (*ndo_del_vxlan_port)(struct net_device *dev,
sa_family_t sa_family,
__be16 port);
-
+ void (*ndo_add_geneve_port)(struct net_device *dev,
+ sa_family_t sa_family,
+ __be16 port);
+ void (*ndo_del_geneve_port)(struct net_device *dev,
+ sa_family_t sa_family,
+ __be16 port);
void* (*ndo_dfwd_add_station)(struct net_device *pdev,
struct net_device *dev);
void (*ndo_dfwd_del_station)(struct net_device *pdev,
@@ -1225,9 +1248,6 @@ struct net_device_ops {
struct net_device *dev,
void *priv);
int (*ndo_get_lock_subclass)(struct net_device *dev);
- netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
- struct net_device *dev,
- netdev_features_t features);
int (*ndo_set_tx_maxrate)(struct net_device *dev,
int queue_index,
u32 maxrate);
@@ -1271,6 +1291,7 @@ struct net_device_ops {
* @IFF_NO_QUEUE: device can run without qdisc attached
* @IFF_OPENVSWITCH: device is a Open vSwitch master
* @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
+ * @IFF_TEAM: device is a team device
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1297,6 +1318,7 @@ enum netdev_priv_flags {
IFF_NO_QUEUE = 1<<21,
IFF_OPENVSWITCH = 1<<22,
IFF_L3MDEV_SLAVE = 1<<23,
+ IFF_TEAM = 1<<24,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1323,6 +1345,7 @@ enum netdev_priv_flags {
#define IFF_NO_QUEUE IFF_NO_QUEUE
#define IFF_OPENVSWITCH IFF_OPENVSWITCH
#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
+#define IFF_TEAM IFF_TEAM
/**
* struct net_device - The DEVICE structure.
@@ -1716,7 +1739,9 @@ struct net_device {
#ifdef CONFIG_XPS
struct xps_dev_maps __rcu *xps_maps;
#endif
-
+#ifdef CONFIG_NET_CLS_ACT
+ struct tcf_proto __rcu *egress_cl_list;
+#endif
#ifdef CONFIG_NET_SWITCHDEV
u32 offload_fwd_mark;
#endif
@@ -1949,6 +1974,26 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight);
/**
+ * netif_tx_napi_add - initialize a napi context
+ * @dev: network device
+ * @napi: napi context
+ * @poll: polling function
+ * @weight: default weight
+ *
+ * This variant of netif_napi_add() should be used from drivers using NAPI
+ * to exclusively poll a TX queue.
+ * This will avoid we add it into napi_hash[], thus polluting this hash table.
+ */
+static inline void netif_tx_napi_add(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
+ netif_napi_add(dev, napi, poll, weight);
+}
+
+/**
* netif_napi_del - remove a napi context
* @napi: napi context
*
@@ -2086,6 +2131,24 @@ struct pcpu_sw_netstats {
#define netdev_alloc_pcpu_stats(type) \
__netdev_alloc_pcpu_stats(type, GFP_KERNEL)
+enum netdev_lag_tx_type {
+ NETDEV_LAG_TX_TYPE_UNKNOWN,
+ NETDEV_LAG_TX_TYPE_RANDOM,
+ NETDEV_LAG_TX_TYPE_BROADCAST,
+ NETDEV_LAG_TX_TYPE_ROUNDROBIN,
+ NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
+ NETDEV_LAG_TX_TYPE_HASH,
+};
+
+struct netdev_lag_upper_info {
+ enum netdev_lag_tx_type tx_type;
+};
+
+struct netdev_lag_lower_state_info {
+ u8 link_up : 1,
+ tx_enabled : 1;
+};
+
#include <linux/notifier.h>
/* netdevice notifier chain. Please remember to update the rtnetlink
@@ -2121,6 +2184,7 @@ struct pcpu_sw_netstats {
#define NETDEV_CHANGEINFODATA 0x0018
#define NETDEV_BONDING_INFO 0x0019
#define NETDEV_PRECHANGEUPPER 0x001A
+#define NETDEV_CHANGELOWERSTATE 0x001B
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2139,6 +2203,12 @@ struct netdev_notifier_changeupper_info {
struct net_device *upper_dev; /* new upper dev */
bool master; /* is upper dev master */
bool linking; /* is the nofication for link or unlink */
+ void *upper_info; /* upper dev info */
+};
+
+struct netdev_notifier_changelowerstate_info {
+ struct netdev_notifier_info info; /* must be first */
+ void *lower_state_info; /* is lower dev state */
};
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
@@ -2472,6 +2542,71 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
remcsum_unadjust((__sum16 *)ptr, grc->delta);
}
+struct skb_csum_offl_spec {
+ __u16 ipv4_okay:1,
+ ipv6_okay:1,
+ encap_okay:1,
+ ip_options_okay:1,
+ ext_hdrs_okay:1,
+ tcp_okay:1,
+ udp_okay:1,
+ sctp_okay:1,
+ vlan_okay:1,
+ no_encapped_ipv6:1,
+ no_not_encapped:1;
+};
+
+bool __skb_csum_offload_chk(struct sk_buff *skb,
+ const struct skb_csum_offl_spec *spec,
+ bool *csum_encapped,
+ bool csum_help);
+
+static inline bool skb_csum_offload_chk(struct sk_buff *skb,
+ const struct skb_csum_offl_spec *spec,
+ bool *csum_encapped,
+ bool csum_help)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return false;
+
+ return __skb_csum_offload_chk(skb, spec, csum_encapped, csum_help);
+}
+
+static inline bool skb_csum_offload_chk_help(struct sk_buff *skb,
+ const struct skb_csum_offl_spec *spec)
+{
+ bool csum_encapped;
+
+ return skb_csum_offload_chk(skb, spec, &csum_encapped, true);
+}
+
+static inline bool skb_csum_off_chk_help_cmn(struct sk_buff *skb)
+{
+ static const struct skb_csum_offl_spec csum_offl_spec = {
+ .ipv4_okay = 1,
+ .ip_options_okay = 1,
+ .ipv6_okay = 1,
+ .vlan_okay = 1,
+ .tcp_okay = 1,
+ .udp_okay = 1,
+ };
+
+ return skb_csum_offload_chk_help(skb, &csum_offl_spec);
+}
+
+static inline bool skb_csum_off_chk_help_cmn_v4_only(struct sk_buff *skb)
+{
+ static const struct skb_csum_offl_spec csum_offl_spec = {
+ .ipv4_okay = 1,
+ .ip_options_okay = 1,
+ .tcp_okay = 1,
+ .udp_okay = 1,
+ .vlan_okay = 1,
+ };
+
+ return skb_csum_offload_chk_help(skb, &csum_offl_spec);
+}
+
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
@@ -3595,15 +3730,15 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
int netdev_master_upper_dev_link(struct net_device *dev,
- struct net_device *upper_dev);
-int netdev_master_upper_dev_link_private(struct net_device *dev,
- struct net_device *upper_dev,
- void *private);
+ struct net_device *upper_dev,
+ void *upper_priv, void *upper_info);
void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev);
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev);
+void netdev_lower_state_changed(struct net_device *lower_dev,
+ void *lower_state_info);
/* RSS keys are 40 or 52 bytes long */
#define NETDEV_RSS_KEY_LEN 52
@@ -3611,7 +3746,7 @@ extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
void netdev_rss_key_fill(void *buffer, size_t len);
int dev_get_nest_level(struct net_device *dev,
- bool (*type_check)(struct net_device *dev));
+ bool (*type_check)(const struct net_device *dev));
int skb_checksum_help(struct sk_buff *skb);
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path);
@@ -3641,13 +3776,37 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
static inline bool can_checksum_protocol(netdev_features_t features,
__be16 protocol)
{
- return ((features & NETIF_F_GEN_CSUM) ||
- ((features & NETIF_F_V4_CSUM) &&
- protocol == htons(ETH_P_IP)) ||
- ((features & NETIF_F_V6_CSUM) &&
- protocol == htons(ETH_P_IPV6)) ||
- ((features & NETIF_F_FCOE_CRC) &&
- protocol == htons(ETH_P_FCOE)));
+ if (protocol == htons(ETH_P_FCOE))
+ return !!(features & NETIF_F_FCOE_CRC);
+
+ /* Assume this is an IP checksum (not SCTP CRC) */
+
+ if (features & NETIF_F_HW_CSUM) {
+ /* Can checksum everything */
+ return true;
+ }
+
+ switch (protocol) {
+ case htons(ETH_P_IP):
+ return !!(features & NETIF_F_IP_CSUM);
+ case htons(ETH_P_IPV6):
+ return !!(features & NETIF_F_IPV6_CSUM);
+ default:
+ return false;
+ }
+}
+
+/* Map an ethertype into IP protocol if possible */
+static inline int eproto_to_ipproto(int eproto)
+{
+ switch (eproto) {
+ case htons(ETH_P_IP):
+ return IPPROTO_IP;
+ case htons(ETH_P_IPV6):
+ return IPPROTO_IPV6;
+ default:
+ return -1;
+ }
}
#ifdef CONFIG_BUG
@@ -3712,15 +3871,14 @@ void linkwatch_run_queue(void);
static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
netdev_features_t f2)
{
- if (f1 & NETIF_F_GEN_CSUM)
- f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
- if (f2 & NETIF_F_GEN_CSUM)
- f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
- f1 &= f2;
- if (f1 & NETIF_F_GEN_CSUM)
- f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
+ if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
+ if (f1 & NETIF_F_HW_CSUM)
+ f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
+ else
+ f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
+ }
- return f1;
+ return f1 & f2;
}
static inline netdev_features_t netdev_get_wanted_features(
@@ -3808,32 +3966,32 @@ static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
skb->mac_len = mac_len;
}
-static inline bool netif_is_macvlan(struct net_device *dev)
+static inline bool netif_is_macvlan(const struct net_device *dev)
{
return dev->priv_flags & IFF_MACVLAN;
}
-static inline bool netif_is_macvlan_port(struct net_device *dev)
+static inline bool netif_is_macvlan_port(const struct net_device *dev)
{
return dev->priv_flags & IFF_MACVLAN_PORT;
}
-static inline bool netif_is_ipvlan(struct net_device *dev)
+static inline bool netif_is_ipvlan(const struct net_device *dev)
{
return dev->priv_flags & IFF_IPVLAN_SLAVE;
}
-static inline bool netif_is_ipvlan_port(struct net_device *dev)
+static inline bool netif_is_ipvlan_port(const struct net_device *dev)
{
return dev->priv_flags & IFF_IPVLAN_MASTER;
}
-static inline bool netif_is_bond_master(struct net_device *dev)
+static inline bool netif_is_bond_master(const struct net_device *dev)
{
return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
}
-static inline bool netif_is_bond_slave(struct net_device *dev)
+static inline bool netif_is_bond_slave(const struct net_device *dev)
{
return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
}
@@ -3868,6 +4026,26 @@ static inline bool netif_is_ovs_master(const struct net_device *dev)
return dev->priv_flags & IFF_OPENVSWITCH;
}
+static inline bool netif_is_team_master(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_TEAM;
+}
+
+static inline bool netif_is_team_port(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_TEAM_PORT;
+}
+
+static inline bool netif_is_lag_master(const struct net_device *dev)
+{
+ return netif_is_bond_master(dev) || netif_is_team_master(dev);
+}
+
+static inline bool netif_is_lag_port(const struct net_device *dev)
+{
+ return netif_is_bond_slave(dev) || netif_is_team_port(dev);
+}
+
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev)
{
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
new file mode 100644
index 000000000000..22a16a23cd8a
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -0,0 +1,13 @@
+#ifndef _NF_CONNTRACK_SCTP_H
+#define _NF_CONNTRACK_SCTP_H
+/* SCTP tracking. */
+
+#include <uapi/linux/netfilter/nf_conntrack_sctp.h>
+
+struct ip_ct_sctp {
+ enum sctp_conntrack state;
+
+ __be32 vtag[IP_CT_DIR_MAX];
+};
+
+#endif /* _NF_CONNTRACK_SCTP_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 5646b24bfc64..ba0d9789eb6e 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -8,12 +8,12 @@
#include <uapi/linux/netfilter/nfnetlink.h>
struct nfnl_callback {
- int (*call)(struct sock *nl, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
- const struct nlattr * const cda[]);
- int (*call_rcu)(struct sock *nl, struct sk_buff *skb,
+ int (*call)(struct net *net, struct sock *nl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[]);
+ int (*call_rcu)(struct net *net, struct sock *nl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[]);
int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[]);
@@ -26,8 +26,8 @@ struct nfnetlink_subsystem {
__u8 subsys_id; /* nfnetlink subsystem ID */
__u8 cb_count; /* number of callbacks */
const struct nfnl_callback *cb; /* callback for individual types */
- int (*commit)(struct sk_buff *skb);
- int (*abort)(struct sk_buff *skb);
+ int (*commit)(struct net *net, struct sk_buff *skb);
+ int (*abort)(struct net *net, struct sk_buff *skb);
};
int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 639e9b8b0e4d..0b41959aab9f 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -131,6 +131,7 @@ netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
struct netlink_callback {
struct sk_buff *skb;
const struct nlmsghdr *nlh;
+ int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff * skb,
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
@@ -153,6 +154,7 @@ struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags);
struct netlink_dump_control {
+ int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff *skb, struct netlink_callback *);
int (*done)(struct netlink_callback *);
void *data;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index d9ba49cedc5d..1acbefc4bbda 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2495,6 +2495,8 @@
#define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700
#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff
+#define PCI_VENDOR_ID_NETRONOME 0x19ee
+
#define PCI_VENDOR_ID_QMI 0x1a32
#define PCI_VENDOR_ID_AZWAVE 0x1a3b
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 05fde31b6dc6..d6f3641e7933 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -16,8 +16,10 @@
#ifndef __PHY_H
#define __PHY_H
+#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
+#include <linux/mdio.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/timer.h>
@@ -58,6 +60,7 @@
#define PHY_HAS_INTERRUPT 0x00000001
#define PHY_HAS_MAGICANEG 0x00000002
#define PHY_IS_INTERNAL 0x00000004
+#define MDIO_DEVICE_IS_PHY 0x80000000
/* Interface Mode definitions */
typedef enum {
@@ -158,8 +161,8 @@ struct mii_bus {
const char *name;
char id[MII_BUS_ID_SIZE];
void *priv;
- int (*read)(struct mii_bus *bus, int phy_id, int regnum);
- int (*write)(struct mii_bus *bus, int phy_id, int regnum, u16 val);
+ int (*read)(struct mii_bus *bus, int addr, int regnum);
+ int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
int (*reset)(struct mii_bus *bus);
/*
@@ -178,7 +181,7 @@ struct mii_bus {
struct device dev;
/* list of all PHYs on bus */
- struct phy_device *phy_map[PHY_MAX_ADDR];
+ struct mdio_device *mdio_map[PHY_MAX_ADDR];
/* PHY addresses to be ignored when probing */
u32 phy_mask;
@@ -187,10 +190,10 @@ struct mii_bus {
u32 phy_ignore_ta_mask;
/*
- * Pointer to an array of interrupts, each PHY's
- * interrupt at the index matching its address
+ * An array of interrupts, each PHY's interrupt at the index
+ * matching its address
*/
- int *irq;
+ int irq[PHY_MAX_ADDR];
};
#define to_mii_bus(d) container_of(d, struct mii_bus, dev)
@@ -212,11 +215,6 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
-int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
-int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
-int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
-int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val);
-
#define PHY_INTERRUPT_DISABLED 0x0
#define PHY_INTERRUPT_ENABLED 0x80000000
@@ -361,14 +359,12 @@ struct phy_c45_device_ids {
* handling, as well as handling shifts in PHY hardware state
*/
struct phy_device {
+ struct mdio_device mdio;
+
/* Information about the PHY type */
/* And management functions */
struct phy_driver *drv;
- struct mii_bus *bus;
-
- struct device dev;
-
u32 phy_id;
struct phy_c45_device_ids c45_ids;
@@ -384,9 +380,6 @@ struct phy_device {
phy_interface_t interface;
- /* Bus address of the PHY (0-31) */
- int addr;
-
/*
* forced speed & duplex (no autoneg)
* partner speed & duplex & pause (autoneg)
@@ -435,10 +428,12 @@ struct phy_device {
void (*adjust_link)(struct net_device *dev);
};
-#define to_phy_device(d) container_of(d, struct phy_device, dev)
+#define to_phy_device(d) container_of(to_mdio_device(d), \
+ struct phy_device, mdio)
/* struct phy_driver: Driver structure for a particular PHY type
*
+ * driver_data: static driver data
* phy_id: The result of reading the UID registers of this PHY
* type, and ANDing them with the phy_id_mask. This driver
* only works for PHYs with IDs which match this field
@@ -448,7 +443,6 @@ struct phy_device {
* by this PHY
* flags: A bitfield defining certain other features this PHY
* supports (like interrupts)
- * driver_data: static driver data
*
* The drivers must implement config_aneg and read_status. All
* other functions are optional. Note that none of these
@@ -459,6 +453,7 @@ struct phy_device {
* supported in the driver).
*/
struct phy_driver {
+ struct mdio_driver_common mdiodrv;
u32 phy_id;
char *name;
unsigned int phy_id_mask;
@@ -589,9 +584,14 @@ struct phy_driver {
int (*module_eeprom)(struct phy_device *dev,
struct ethtool_eeprom *ee, u8 *data);
- struct device_driver driver;
+ /* Get statistics from the phy using ethtool */
+ int (*get_sset_count)(struct phy_device *dev);
+ void (*get_strings)(struct phy_device *dev, u8 *data);
+ void (*get_stats)(struct phy_device *dev,
+ struct ethtool_stats *stats, u64 *data);
};
-#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
+#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
+ struct phy_driver, mdiodrv)
#define PHY_ANY_ID "MATCH ANY PHY"
#define PHY_ANY_UID 0xffffffff
@@ -619,7 +619,7 @@ static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
if (!phydev->is_c45)
return -EOPNOTSUPP;
- return mdiobus_read(phydev->bus, phydev->addr,
+ return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff));
}
@@ -627,14 +627,12 @@ static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
* phy_read_mmd_indirect - reads data from the MMD registers
* @phydev: The PHY device bus
* @prtad: MMD Address
- * @devad: MMD DEVAD
* @addr: PHY address on the MII bus
*
* Description: it reads data from the MMD registers (clause 22 to access to
* clause 45) of the specified phy address.
*/
-int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
- int devad, int addr);
+int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad);
/**
* phy_read - Convenience function for reading a given PHY register
@@ -647,7 +645,7 @@ int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
*/
static inline int phy_read(struct phy_device *phydev, u32 regnum)
{
- return mdiobus_read(phydev->bus, phydev->addr, regnum);
+ return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum);
}
/**
@@ -662,7 +660,7 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum)
*/
static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
{
- return mdiobus_write(phydev->bus, phydev->addr, regnum, val);
+ return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val);
}
/**
@@ -725,7 +723,7 @@ static inline int phy_write_mmd(struct phy_device *phydev, int devad,
regnum = MII_ADDR_C45 | ((devad & 0x1f) << 16) | (regnum & 0xffff);
- return mdiobus_write(phydev->bus, phydev->addr, regnum, val);
+ return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val);
}
/**
@@ -733,14 +731,13 @@ static inline int phy_write_mmd(struct phy_device *phydev, int devad,
* @phydev: The PHY device
* @prtad: MMD Address
* @devad: MMD DEVAD
- * @addr: PHY address on the MII bus
* @data: data to write in the MMD register
*
* Description: Write data from the MMD registers of the specified
* phy address.
*/
void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
- int devad, int addr, u32 data);
+ int devad, u32 data);
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
bool is_c45,
@@ -775,6 +772,20 @@ static inline int phy_read_status(struct phy_device *phydev)
return phydev->drv->read_status(phydev);
}
+#define phydev_err(_phydev, format, args...) \
+ dev_err(&_phydev->mdio.dev, format, ##args)
+
+#define phydev_dbg(_phydev, format, args...) \
+ dev_dbg(&_phydev->mdio.dev, format, ##args);
+
+static inline const char *phydev_name(const struct phy_device *phydev)
+{
+ return dev_name(&phydev->mdio.dev);
+}
+
+void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
+ __printf(2, 3);
+void phy_attached_info(struct phy_device *phydev);
int genphy_config_init(struct phy_device *phydev);
int genphy_setup_forced(struct phy_device *phydev);
int genphy_restart_aneg(struct phy_device *phydev);
@@ -787,8 +798,9 @@ int genphy_resume(struct phy_device *phydev);
int genphy_soft_reset(struct phy_device *phydev);
void phy_driver_unregister(struct phy_driver *drv);
void phy_drivers_unregister(struct phy_driver *drv, int n);
-int phy_driver_register(struct phy_driver *new_driver);
-int phy_drivers_register(struct phy_driver *new_driver, int n);
+int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
+int phy_drivers_register(struct phy_driver *new_driver, int n,
+ struct module *owner);
void phy_state_machine(struct work_struct *work);
void phy_change(struct work_struct *work);
void phy_mac_interrupt(struct phy_device *phydev, int new_link);
@@ -833,7 +845,7 @@ extern struct bus_type mdio_bus_type;
#define phy_module_driver(__phy_drivers, __count) \
static int __init phy_module_init(void) \
{ \
- return phy_drivers_register(__phy_drivers, __count); \
+ return phy_drivers_register(__phy_drivers, __count, THIS_MODULE); \
} \
module_init(phy_module_init); \
static void __exit phy_module_exit(void) \
diff --git a/include/linux/pim.h b/include/linux/pim.h
index 252bf6644c51..e1d756f81348 100644
--- a/include/linux/pim.h
+++ b/include/linux/pim.h
@@ -13,6 +13,11 @@
#define PIM_NULL_REGISTER cpu_to_be32(0x40000000)
+static inline bool ipmr_pimsm_enabled(void)
+{
+ return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2);
+}
+
/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
struct pimreghdr
{
diff --git a/include/linux/platform_data/microread.h b/include/linux/platform_data/microread.h
index cfda59b226ee..ca13992089b8 100644
--- a/include/linux/platform_data/microread.h
+++ b/include/linux/platform_data/microread.h
@@ -1,5 +1,5 @@
/*
- * Driver include for the PN544 NFC chip.
+ * Driver include for the Inside Secure microread NFC Chip.
*
* Copyright (C) 2011 Tieto Poland
* Copyright (C) 2012 Intel Corporation. All rights reserved.
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index dc9a1353f971..d4a32e878180 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -25,6 +25,12 @@
#include <linux/qed/common_hsi.h>
#include <linux/qed/qed_chain.h>
+enum qed_led_mode {
+ QED_LED_MODE_OFF,
+ QED_LED_MODE_ON,
+ QED_LED_MODE_RESTORE
+};
+
#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
(void __iomem *)(reg_addr))
@@ -252,6 +258,17 @@ struct qed_common_ops {
void (*chain_free)(struct qed_dev *cdev,
struct qed_chain *p_chain);
+
+/**
+ * @brief set_led - Configure LED mode
+ *
+ * @param cdev
+ * @param mode - LED mode
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*set_led)(struct qed_dev *cdev,
+ enum qed_led_mode mode);
};
/**
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index e50b31d18462..63bd7601b6de 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -823,4 +823,86 @@ out:
return err;
}
+/* Internal function, please use rhashtable_replace_fast() instead */
+static inline int __rhashtable_replace_fast(
+ struct rhashtable *ht, struct bucket_table *tbl,
+ struct rhash_head *obj_old, struct rhash_head *obj_new,
+ const struct rhashtable_params params)
+{
+ struct rhash_head __rcu **pprev;
+ struct rhash_head *he;
+ spinlock_t *lock;
+ unsigned int hash;
+ int err = -ENOENT;
+
+ /* Minimally, the old and new objects must have same hash
+ * (which should mean identifiers are the same).
+ */
+ hash = rht_head_hashfn(ht, tbl, obj_old, params);
+ if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
+ return -EINVAL;
+
+ lock = rht_bucket_lock(tbl, hash);
+
+ spin_lock_bh(lock);
+
+ pprev = &tbl->buckets[hash];
+ rht_for_each(he, tbl, hash) {
+ if (he != obj_old) {
+ pprev = &he->next;
+ continue;
+ }
+
+ rcu_assign_pointer(obj_new->next, obj_old->next);
+ rcu_assign_pointer(*pprev, obj_new);
+ err = 0;
+ break;
+ }
+
+ spin_unlock_bh(lock);
+
+ return err;
+}
+
+/**
+ * rhashtable_replace_fast - replace an object in hash table
+ * @ht: hash table
+ * @obj_old: pointer to hash head inside object being replaced
+ * @obj_new: pointer to hash head inside object which is new
+ * @params: hash table parameters
+ *
+ * Replacing an object doesn't affect the number of elements in the hash table
+ * or bucket, so we don't need to worry about shrinking or expanding the
+ * table here.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found,
+ * -EINVAL if hash is not the same for the old and new objects.
+ */
+static inline int rhashtable_replace_fast(
+ struct rhashtable *ht, struct rhash_head *obj_old,
+ struct rhash_head *obj_new,
+ const struct rhashtable_params params)
+{
+ struct bucket_table *tbl;
+ int err;
+
+ rcu_read_lock();
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ /* Because we have already taken (and released) the bucket
+ * lock in old_tbl, if we find that future_tbl is not yet
+ * visible then that guarantees the entry to still be in
+ * the old tbl if it exists.
+ */
+ while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
+ obj_new, params)) &&
+ (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
+ ;
+
+ rcu_read_unlock();
+
+ return err;
+}
+
#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 4be5048b1fbe..c006cc900c44 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -84,6 +84,11 @@ void net_inc_ingress_queue(void);
void net_dec_ingress_queue(void);
#endif
+#ifdef CONFIG_NET_EGRESS
+void net_inc_egress_queue(void);
+void net_dec_egress_queue(void);
+#endif
+
extern void rtnetlink_init(void);
extern void __rtnl_unlock(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4bae8ab3b893..61aa9bbea871 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -834,6 +834,7 @@ struct user_struct {
unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
#endif
unsigned long locked_shm; /* How many pages of mlocked shm ? */
+ unsigned long unix_inflight; /* How many files in flight in unix sockets */
#ifdef CONFIG_KEYS
struct key *uid_keyring; /* UID specific keyring */
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
index 8c9131db2b25..f2e27e078362 100644
--- a/include/linux/sh_eth.h
+++ b/include/linux/sh_eth.h
@@ -4,7 +4,7 @@
#include <linux/phy.h>
#include <linux/if_ether.h>
-enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN};
+enum {EDMAC_LITTLE_ENDIAN};
struct sh_eth_plat_data {
int phy;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4355129fff91..07f9ccd28654 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -39,11 +39,55 @@
#include <linux/in6.h>
#include <net/flow.h>
-/* A. Checksumming of received packets by device.
+/* The interface for checksum offload between the stack and networking drivers
+ * is as follows...
+ *
+ * A. IP checksum related features
+ *
+ * Drivers advertise checksum offload capabilities in the features of a device.
+ * From the stack's point of view these are capabilities offered by the driver,
+ * a driver typically only advertises features that it is capable of offloading
+ * to its device.
+ *
+ * The checksum related features are:
+ *
+ * NETIF_F_HW_CSUM - The driver (or its device) is able to compute one
+ * IP (one's complement) checksum for any combination
+ * of protocols or protocol layering. The checksum is
+ * computed and set in a packet per the CHECKSUM_PARTIAL
+ * interface (see below).
+ *
+ * NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
+ * TCP or UDP packets over IPv4. These are specifically
+ * unencapsulated packets of the form IPv4|TCP or
+ * IPv4|UDP where the Protocol field in the IPv4 header
+ * is TCP or UDP. The IPv4 header may contain IP options
+ * This feature cannot be set in features for a device
+ * with NETIF_F_HW_CSUM also set. This feature is being
+ * DEPRECATED (see below).
+ *
+ * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
+ * TCP or UDP packets over IPv6. These are specifically
+ * unencapsulated packets of the form IPv6|TCP or
+ * IPv4|UDP where the Next Header field in the IPv6
+ * header is either TCP or UDP. IPv6 extension headers
+ * are not supported with this feature. This feature
+ * cannot be set in features for a device with
+ * NETIF_F_HW_CSUM also set. This feature is being
+ * DEPRECATED (see below).
+ *
+ * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
+ * This flag is used only used to disable the RX checksum
+ * feature for a device. The stack will accept receive
+ * checksum indication in packets received on a device
+ * regardless of whether NETIF_F_RXCSUM is set.
+ *
+ * B. Checksumming of received packets by device. Indication of checksum
+ * verification is in set skb->ip_summed. Possible values are:
*
* CHECKSUM_NONE:
*
- * Device failed to checksum this packet e.g. due to lack of capabilities.
+ * Device did not checksum this packet e.g. due to lack of capabilities.
* The packet contains full (though not verified) checksum in packet but
* not in skb->csum. Thus, skb->csum is undefined in this case.
*
@@ -53,9 +97,8 @@
* (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
* for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
* if their checksums are okay. skb->csum is still undefined in this case
- * though. It is a bad option, but, unfortunately, nowadays most vendors do
- * this. Apparently with the secret goal to sell you new devices, when you
- * will add new protocol to your host, f.e. IPv6 8)
+ * though. A driver or device must never modify the checksum field in the
+ * packet even if checksum is verified.
*
* CHECKSUM_UNNECESSARY is applicable to following protocols:
* TCP: IPv6 and IPv4.
@@ -96,40 +139,77 @@
* packet that are after the checksum being offloaded are not considered to
* be verified.
*
- * B. Checksumming on output.
- *
- * CHECKSUM_NONE:
- *
- * The skb was already checksummed by the protocol, or a checksum is not
- * required.
+ * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
+ * in the skb->ip_summed for a packet. Values are:
*
* CHECKSUM_PARTIAL:
*
- * The device is required to checksum the packet as seen by hard_start_xmit()
+ * The driver is required to checksum the packet as seen by hard_start_xmit()
* from skb->csum_start up to the end, and to record/write the checksum at
- * offset skb->csum_start + skb->csum_offset.
+ * offset skb->csum_start + skb->csum_offset. A driver may verify that the
+ * csum_start and csum_offset values are valid values given the length and
+ * offset of the packet, however they should not attempt to validate that the
+ * checksum refers to a legitimate transport layer checksum-- it is the
+ * purview of the stack to validate that csum_start and csum_offset are set
+ * correctly.
+ *
+ * When the stack requests checksum offload for a packet, the driver MUST
+ * ensure that the checksum is set correctly. A driver can either offload the
+ * checksum calculation to the device, or call skb_checksum_help (in the case
+ * that the device does not support offload for a particular checksum).
+ *
+ * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
+ * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
+ * checksum offload capability. If a device has limited checksum capabilities
+ * (for instance can only perform NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM as
+ * described above) a helper function can be called to resolve
+ * CHECKSUM_PARTIAL. The helper functions are skb_csum_off_chk*. The helper
+ * function takes a spec argument that describes the protocol layer that is
+ * supported for checksum offload and can be called for each packet. If a
+ * packet does not match the specification for offload, skb_checksum_help
+ * is called to resolve the checksum.
*
- * The device must show its capabilities in dev->features, set up at device
- * setup time, e.g. netdev_features.h:
+ * CHECKSUM_NONE:
*
- * NETIF_F_HW_CSUM - It's a clever device, it's able to checksum everything.
- * NETIF_F_IP_CSUM - Device is dumb, it's able to checksum only TCP/UDP over
- * IPv4. Sigh. Vendors like this way for an unknown reason.
- * Though, see comment above about CHECKSUM_UNNECESSARY. 8)
- * NETIF_F_IPV6_CSUM - About as dumb as the last one but does IPv6 instead.
- * NETIF_F_... - Well, you get the picture.
+ * The skb was already checksummed by the protocol, or a checksum is not
+ * required.
*
* CHECKSUM_UNNECESSARY:
*
- * Normally, the device will do per protocol specific checksumming. Protocol
- * implementations that do not want the NIC to perform the checksum
- * calculation should use this flag in their outgoing skbs.
- *
- * NETIF_F_FCOE_CRC - This indicates that the device can do FCoE FC CRC
- * offload. Correspondingly, the FCoE protocol driver
- * stack should use CHECKSUM_UNNECESSARY.
+ * This has the same meaning on as CHECKSUM_NONE for checksum offload on
+ * output.
*
- * Any questions? No questions, good. --ANK
+ * CHECKSUM_COMPLETE:
+ * Not used in checksum output. If a driver observes a packet with this value
+ * set in skbuff, if should treat as CHECKSUM_NONE being set.
+ *
+ * D. Non-IP checksum (CRC) offloads
+ *
+ * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
+ * offloading the SCTP CRC in a packet. To perform this offload the stack
+ * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
+ * accordingly. Note the there is no indication in the skbuff that the
+ * CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports
+ * both IP checksum offload and SCTP CRC offload must verify which offload
+ * is configured for a packet presumably by inspecting packet headers.
+ *
+ * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
+ * offloading the FCOE CRC in a packet. To perform this offload the stack
+ * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
+ * accordingly. Note the there is no indication in the skbuff that the
+ * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports
+ * both IP checksum offload and FCOE CRC offload must verify which offload
+ * is configured for a packet presumably by inspecting packet headers.
+ *
+ * E. Checksumming on output with GSO.
+ *
+ * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload
+ * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
+ * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
+ * part of the GSO operation is implied. If a checksum is being offloaded
+ * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset
+ * are set to refer to the outermost checksum being offload (two offloaded
+ * checksums are possible with UDP encapsulation).
*/
/* Don't change this without changing skb_csum_unnecessary! */
@@ -833,7 +913,7 @@ struct sk_buff_fclones {
* skb_fclone_busy - check if fclone is busy
* @skb: buffer
*
- * Returns true is skb is a fast clone, and its clone is not freed.
+ * Returns true if skb is a fast clone, and its clone is not freed.
* Some drivers call skb_orphan() in their ndo_start_xmit(),
* so we also check that this didnt happen.
*/
@@ -1082,9 +1162,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
static inline void skb_sender_cpu_clear(struct sk_buff *skb)
{
-#ifdef CONFIG_XPS
- skb->sender_cpu = 0;
-#endif
}
#ifdef NET_SKBUFF_DATA_USES_OFFSET
@@ -1942,6 +2019,11 @@ static inline unsigned char *skb_inner_transport_header(const struct sk_buff
return skb->head + skb->inner_transport_header;
}
+static inline int skb_inner_transport_offset(const struct sk_buff *skb)
+{
+ return skb_inner_transport_header(skb) - skb->data;
+}
+
static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
{
skb->inner_transport_header = skb->data - skb->head;
@@ -2723,6 +2805,23 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
+static inline void skb_postpush_rcsum(struct sk_buff *skb,
+ const void *start, unsigned int len)
+{
+ /* For performing the reverse operation to skb_postpull_rcsum(),
+ * we can instead of ...
+ *
+ * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
+ *
+ * ... just use this equivalent version here to save a few
+ * instructions. Feeding csum of 0 in csum_partial() and later
+ * on adding skb->csum is equivalent to feed skb->csum in the
+ * first place.
+ */
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_partial(start, len, skb->csum);
+}
+
/**
* pskb_trim_rcsum - trim received skb and update checksum
* @skb: buffer to trim
@@ -2788,6 +2887,12 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
#define skb_walk_frags(skb, iter) \
for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
+
+int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
+ const struct sk_buff *skb);
+struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
+ int *peeked, int *off, int *err,
+ struct sk_buff **last);
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
int *peeked, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
index dad035c16d94..343c13ac4f71 100644
--- a/include/linux/soc/ti/knav_dma.h
+++ b/include/linux/soc/ti/knav_dma.h
@@ -144,17 +144,17 @@ struct knav_dma_cfg {
* @psdata: Protocol specific
*/
struct knav_dma_desc {
- u32 desc_info;
- u32 tag_info;
- u32 packet_info;
- u32 buff_len;
- u32 buff;
- u32 next_desc;
- u32 orig_len;
- u32 orig_buff;
- u32 epib[KNAV_DMA_NUM_EPIB_WORDS];
- u32 psdata[KNAV_DMA_NUM_PS_WORDS];
- u32 pad[4];
+ __le32 desc_info;
+ __le32 tag_info;
+ __le32 packet_info;
+ __le32 buff_len;
+ __le32 buff;
+ __le32 next_desc;
+ __le32 orig_len;
+ __le32 orig_buff;
+ __le32 epib[KNAV_DMA_NUM_EPIB_WORDS];
+ __le32 psdata[KNAV_DMA_NUM_PS_WORDS];
+ __le32 pad[4];
} ____cacheline_aligned;
#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA)
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index fddebc617469..4018b48f2b3b 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -15,6 +15,7 @@ struct sock_diag_handler {
__u8 family;
int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
int (*get_info)(struct sk_buff *skb, struct sock *sk);
+ int (*destroy)(struct sk_buff *skb, struct nlmsghdr *nlh);
};
int sock_diag_register(const struct sock_diag_handler *h);
@@ -68,4 +69,5 @@ bool sock_diag_has_destroy_listeners(const struct sock *sk)
}
void sock_diag_broadcast_destroy(struct sock *sk);
+int sock_diag_destroy(struct sock *sk, int err);
#endif
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index c3d1a525bacc..26a0b3c3ce5f 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -524,13 +524,9 @@ struct ssb_init_invariants {
typedef int (*ssb_invariants_func_t)(struct ssb_bus *bus,
struct ssb_init_invariants *iv);
-/* Register a SSB system bus. get_invariants() is called after the
- * basic system devices are initialized.
- * The invariants are usually fetched from some NVRAM.
- * Put the invariants into the struct pointed to by iv. */
-extern int ssb_bus_ssbbus_register(struct ssb_bus *bus,
- unsigned long baseaddr,
- ssb_invariants_func_t get_invariants);
+/* Register SoC bus. */
+extern int ssb_bus_host_soc_register(struct ssb_bus *bus,
+ unsigned long baseaddr);
#ifdef CONFIG_SSB_PCIHOST
extern int ssb_bus_pcibus_register(struct ssb_bus *bus,
struct pci_dev *host_pci);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index d2f4ec7dba7c..ae71a769b89e 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -137,6 +137,27 @@ static inline int waitqueue_active(wait_queue_head_t *q)
return !list_empty(&q->task_list);
}
+/**
+ * wq_has_sleeper - check if there are any waiting processes
+ * @wq: wait queue head
+ *
+ * Returns true if wq has waiting processes
+ *
+ * Please refer to the comment for waitqueue_active.
+ */
+static inline bool wq_has_sleeper(wait_queue_head_t *wq)
+{
+ /*
+ * We need to be sure we are in sync with the
+ * add_wait_queue modifications to the wait queue.
+ *
+ * This memory barrier should be paired with one on the
+ * waiting side.
+ */
+ smp_mb();
+ return waitqueue_active(wq);
+}
+
extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);