summaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-03 16:27:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-03 16:27:18 -0700
commitcb8e59cc87201af93dfbb6c3dccc8fcad72a09c2 (patch)
treea334db9022f89654b777bbce8c4c6632e65b9031 /net/ipv4
parent2e63f6ce7ed2c4ff83ba30ad9ccad422289a6c63 (diff)
parent065fcfd49763ec71ae345bb5c5a74f961031e70e (diff)
downloadlwn-cb8e59cc87201af93dfbb6c3dccc8fcad72a09c2.tar.gz
lwn-cb8e59cc87201af93dfbb6c3dccc8fcad72a09c2.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from David Miller: 1) Allow setting bluetooth L2CAP modes via socket option, from Luiz Augusto von Dentz. 2) Add GSO partial support to igc, from Sasha Neftin. 3) Several cleanups and improvements to r8169 from Heiner Kallweit. 4) Add IF_OPER_TESTING link state and use it when ethtool triggers a device self-test. From Andrew Lunn. 5) Start moving away from custom driver versions, use the globally defined kernel version instead, from Leon Romanovsky. 6) Support GRO vis gro_cells in DSA layer, from Alexander Lobakin. 7) Allow hard IRQ deferral during NAPI, from Eric Dumazet. 8) Add sriov and vf support to hinic, from Luo bin. 9) Support Media Redundancy Protocol (MRP) in the bridging code, from Horatiu Vultur. 10) Support netmap in the nft_nat code, from Pablo Neira Ayuso. 11) Allow UDPv6 encapsulation of ESP in the ipsec code, from Sabrina Dubroca. Also add ipv6 support for espintcp. 12) Lots of ReST conversions of the networking documentation, from Mauro Carvalho Chehab. 13) Support configuration of ethtool rxnfc flows in bcmgenet driver, from Doug Berger. 14) Allow to dump cgroup id and filter by it in inet_diag code, from Dmitry Yakunin. 15) Add infrastructure to export netlink attribute policies to userspace, from Johannes Berg. 16) Several optimizations to sch_fq scheduler, from Eric Dumazet. 17) Fallback to the default qdisc if qdisc init fails because otherwise a packet scheduler init failure will make a device inoperative. From Jesper Dangaard Brouer. 18) Several RISCV bpf jit optimizations, from Luke Nelson. 19) Correct the return type of the ->ndo_start_xmit() method in several drivers, it's netdev_tx_t but many drivers were using 'int'. From Yunjian Wang. 20) Add an ethtool interface for PHY master/slave config, from Oleksij Rempel. 21) Add BPF iterators, from Yonghang Song. 22) Add cable test infrastructure, including ethool interfaces, from Andrew Lunn. Marvell PHY driver is the first to support this facility. 23) Remove zero-length arrays all over, from Gustavo A. R. Silva. 24) Calculate and maintain an explicit frame size in XDP, from Jesper Dangaard Brouer. 25) Add CAP_BPF, from Alexei Starovoitov. 26) Support terse dumps in the packet scheduler, from Vlad Buslov. 27) Support XDP_TX bulking in dpaa2 driver, from Ioana Ciornei. 28) Add devm_register_netdev(), from Bartosz Golaszewski. 29) Minimize qdisc resets, from Cong Wang. 30) Get rid of kernel_getsockopt and kernel_setsockopt in order to eliminate set_fs/get_fs calls. From Christoph Hellwig. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2517 commits) selftests: net: ip_defrag: ignore EPERM net_failover: fixed rollback in net_failover_open() Revert "tipc: Fix potential tipc_aead refcnt leak in tipc_crypto_rcv" Revert "tipc: Fix potential tipc_node refcnt leak in tipc_rcv" vmxnet3: allow rx flow hash ops only when rss is enabled hinic: add set_channels ethtool_ops support selftests/bpf: Add a default $(CXX) value tools/bpf: Don't use $(COMPILE.c) bpf, selftests: Use bpf_probe_read_kernel s390/bpf: Use bcr 0,%0 as tail call nop filler s390/bpf: Maintain 8-byte stack alignment selftests/bpf: Fix verifier test selftests/bpf: Fix sample_cnt shared between two threads bpf, selftests: Adapt cls_redirect to call csum_level helper bpf: Add csum_level helper for fixing up csum levels bpf: Fix up bpf_skb_adjust_room helper's skb csum setting sfc: add missing annotation for efx_ef10_try_update_nic_stats_vf() crypto/chtls: IPv6 support for inline TLS Crypto/chcr: Fixes a coccinile check error Crypto/chcr: Fixes compilations warnings ...
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/Kconfig3
-rw-r--r--net/ipv4/af_inet.c69
-rw-r--r--net/ipv4/devinet.c32
-rw-r--r--net/ipv4/fib_semantics.c3
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c18
-rw-r--r--net/ipv4/inet_connection_sock.c6
-rw-r--r--net/ipv4/inet_diag.c39
-rw-r--r--net/ipv4/ip_gre.c94
-rw-r--r--net/ipv4/ip_sockglue.c397
-rw-r--r--net/ipv4/ip_tunnel.c16
-rw-r--r--net/ipv4/ip_vti.c32
-rw-r--r--net/ipv4/ipip.c30
-rw-r--r--net/ipv4/ipmr.c122
-rw-r--r--net/ipv4/nexthop.c168
-rw-r--r--net/ipv4/route.c3
-rw-r--r--net/ipv4/sysctl_net_ipv4.c54
-rw-r--r--net/ipv4/tcp.c226
-rw-r--r--net/ipv4/tcp_input.c89
-rw-r--r--net/ipv4/tcp_ipv4.c105
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c36
-rw-r--r--net/ipv4/tcp_timer.c8
-rw-r--r--net/ipv4/udp.c10
-rw-r--r--net/ipv4/udp_tunnel.c4
-rw-r--r--net/ipv4/xfrm4_input.c5
-rw-r--r--net/ipv4/xfrm4_output.c63
-rw-r--r--net/ipv4/xfrm4_state.c24
28 files changed, 1047 insertions, 613 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 25a8888826b8..23ba5045e3d3 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -49,7 +49,7 @@ config IP_ADVANCED_ROUTER
Note that some distributions enable it in startup scripts.
For details about rp_filter strict and loose mode read
- <file:Documentation/networking/ip-sysctl.txt>.
+ <file:Documentation/networking/ip-sysctl.rst>.
If unsure, say N here.
@@ -384,6 +384,7 @@ config INET_ESPINTCP
depends on XFRM && INET_ESP
select STREAM_PARSER
select NET_SOCK_MSG
+ select XFRM_ESPINTCP
help
Support for RFC 8229 encapsulation of ESP and IKE over
TCP/IPv4 sockets.
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index cf58e29cf746..02aa5cb3a4fd 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -116,6 +116,7 @@
#include <linux/mroute.h>
#endif
#include <net/l3mdev.h>
+#include <net/compat.h>
#include <trace/events/sock.h>
@@ -450,12 +451,12 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (err)
return err;
- return __inet_bind(sk, uaddr, addr_len, false, true);
+ return __inet_bind(sk, uaddr, addr_len, BIND_WITH_LOCK);
}
EXPORT_SYMBOL(inet_bind);
int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
- bool force_bind_address_no_port, bool with_lock)
+ u32 flags)
{
struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
struct inet_sock *inet = inet_sk(sk);
@@ -506,7 +507,7 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
* would be illegal to use them (multicast/broadcast) in
* which case the sending device address is used.
*/
- if (with_lock)
+ if (flags & BIND_WITH_LOCK)
lock_sock(sk);
/* Check these errors (active socket, double bind). */
@@ -520,16 +521,18 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
/* Make sure we are allowed to bind here. */
if (snum || !(inet->bind_address_no_port ||
- force_bind_address_no_port)) {
+ (flags & BIND_FORCE_ADDRESS_NO_PORT))) {
if (sk->sk_prot->get_port(sk, snum)) {
inet->inet_saddr = inet->inet_rcv_saddr = 0;
err = -EADDRINUSE;
goto out_release_sock;
}
- err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk);
- if (err) {
- inet->inet_saddr = inet->inet_rcv_saddr = 0;
- goto out_release_sock;
+ if (!(flags & BIND_FROM_BPF)) {
+ err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk);
+ if (err) {
+ inet->inet_saddr = inet->inet_rcv_saddr = 0;
+ goto out_release_sock;
+ }
}
}
@@ -543,7 +546,7 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
sk_dst_reset(sk);
err = 0;
out_release_sock:
- if (with_lock)
+ if (flags & BIND_WITH_LOCK)
release_sock(sk);
out:
return err;
@@ -753,12 +756,11 @@ do_err:
}
EXPORT_SYMBOL(inet_accept);
-
/*
* This does both peername and sockname.
*/
int inet_getname(struct socket *sock, struct sockaddr *uaddr,
- int peer)
+ int peer)
{
struct sock *sk = sock->sk;
struct inet_sock *inet = inet_sk(sk);
@@ -779,6 +781,11 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr,
sin->sin_port = inet->inet_sport;
sin->sin_addr.s_addr = addr;
}
+ if (cgroup_bpf_enabled)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin,
+ peer ? BPF_CGROUP_INET4_GETPEERNAME :
+ BPF_CGROUP_INET4_GETSOCKNAME,
+ NULL);
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
return sizeof(*sin);
}
@@ -968,17 +975,42 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
EXPORT_SYMBOL(inet_ioctl);
#ifdef CONFIG_COMPAT
+static int inet_compat_routing_ioctl(struct sock *sk, unsigned int cmd,
+ struct compat_rtentry __user *ur)
+{
+ compat_uptr_t rtdev;
+ struct rtentry rt;
+
+ if (copy_from_user(&rt.rt_dst, &ur->rt_dst,
+ 3 * sizeof(struct sockaddr)) ||
+ get_user(rt.rt_flags, &ur->rt_flags) ||
+ get_user(rt.rt_metric, &ur->rt_metric) ||
+ get_user(rt.rt_mtu, &ur->rt_mtu) ||
+ get_user(rt.rt_window, &ur->rt_window) ||
+ get_user(rt.rt_irtt, &ur->rt_irtt) ||
+ get_user(rtdev, &ur->rt_dev))
+ return -EFAULT;
+
+ rt.rt_dev = compat_ptr(rtdev);
+ return ip_rt_ioctl(sock_net(sk), cmd, &rt);
+}
+
static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
+ void __user *argp = compat_ptr(arg);
struct sock *sk = sock->sk;
- int err = -ENOIOCTLCMD;
-
- if (sk->sk_prot->compat_ioctl)
- err = sk->sk_prot->compat_ioctl(sk, cmd, arg);
- return err;
+ switch (cmd) {
+ case SIOCADDRT:
+ case SIOCDELRT:
+ return inet_compat_routing_ioctl(sk, cmd, argp);
+ default:
+ if (!sk->sk_prot->compat_ioctl)
+ return -ENOIOCTLCMD;
+ return sk->sk_prot->compat_ioctl(sk, cmd, arg);
+ }
}
-#endif
+#endif /* CONFIG_COMPAT */
const struct proto_ops inet_stream_ops = {
.family = PF_INET,
@@ -1835,6 +1867,7 @@ static __net_init int inet_init_net(struct net *net)
net->ipv4.sysctl_ip_early_demux = 1;
net->ipv4.sysctl_udp_early_demux = 1;
net->ipv4.sysctl_tcp_early_demux = 1;
+ net->ipv4.sysctl_nexthop_compat_mode = 1;
#ifdef CONFIG_SYSCTL
net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
#endif
@@ -1914,7 +1947,7 @@ static int __init inet_init(void)
{
struct inet_protosw *q;
struct list_head *r;
- int rc = -EINVAL;
+ int rc;
sock_skb_cb_check_size(sizeof(struct inet_skb_parm));
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5267b6b191eb..123a6d39438f 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -2367,8 +2367,7 @@ static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
}
static int devinet_conf_proc(struct ctl_table *ctl, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int old_value = *(int *)ctl->data;
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
@@ -2420,8 +2419,7 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write,
}
static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
int val = *valp;
@@ -2464,8 +2462,7 @@ static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
}
static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
int val = *valp;
@@ -2670,11 +2667,24 @@ static __net_init int devinet_init_net(struct net *net)
tbl[0].extra2 = net;
#endif
- if ((!IS_ENABLED(CONFIG_SYSCTL) ||
- sysctl_devconf_inherit_init_net != 2) &&
- !net_eq(net, &init_net)) {
- memcpy(all, init_net.ipv4.devconf_all, sizeof(ipv4_devconf));
- memcpy(dflt, init_net.ipv4.devconf_dflt, sizeof(ipv4_devconf_dflt));
+ if (!net_eq(net, &init_net)) {
+ if (IS_ENABLED(CONFIG_SYSCTL) &&
+ sysctl_devconf_inherit_init_net == 3) {
+ /* copy from the current netns */
+ memcpy(all, current->nsproxy->net_ns->ipv4.devconf_all,
+ sizeof(ipv4_devconf));
+ memcpy(dflt,
+ current->nsproxy->net_ns->ipv4.devconf_dflt,
+ sizeof(ipv4_devconf_dflt));
+ } else if (!IS_ENABLED(CONFIG_SYSCTL) ||
+ sysctl_devconf_inherit_init_net != 2) {
+ /* inherit == 0 or 1: copy from init_net */
+ memcpy(all, init_net.ipv4.devconf_all,
+ sizeof(ipv4_devconf));
+ memcpy(dflt, init_net.ipv4.devconf_dflt,
+ sizeof(ipv4_devconf_dflt));
+ }
+ /* else inherit == 2: use compiled values */
}
#ifdef CONFIG_SYSCTL
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 55ca2e521828..e53871e4a097 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1780,6 +1780,8 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
goto nla_put_failure;
if (nexthop_is_blackhole(fi->nh))
rtm->rtm_type = RTN_BLACKHOLE;
+ if (!fi->fib_net->ipv4.sysctl_nexthop_compat_mode)
+ goto offload;
}
if (nhs == 1) {
@@ -1805,6 +1807,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
goto nla_put_failure;
}
+offload:
if (fri->offload)
rtm->rtm_flags |= RTM_F_OFFLOAD;
if (fri->trap)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index fc61f51d87a3..956a806649f7 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -853,7 +853,7 @@ static bool icmp_unreach(struct sk_buff *skb)
case ICMP_FRAG_NEEDED:
/* for documentation of the ip_no_pmtu_disc
* values please see
- * Documentation/networking/ip-sysctl.txt
+ * Documentation/networking/ip-sysctl.rst
*/
switch (net->ipv4.sysctl_ip_no_pmtu_disc) {
default:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 47f0502b2101..7b272bbed2b4 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2565,9 +2565,9 @@ done:
}
int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
- struct group_filter __user *optval, int __user *optlen)
+ struct sockaddr_storage __user *p)
{
- int err, i, count, copycount;
+ int i, count, copycount;
struct sockaddr_in *psin;
__be32 addr;
struct ip_mc_socklist *pmc;
@@ -2583,37 +2583,29 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
if (!ipv4_is_multicast(addr))
return -EINVAL;
- err = -EADDRNOTAVAIL;
-
for_each_pmc_rtnl(inet, pmc) {
if (pmc->multi.imr_multiaddr.s_addr == addr &&
pmc->multi.imr_ifindex == gsf->gf_interface)
break;
}
if (!pmc) /* must have a prior join */
- goto done;
+ return -EADDRNOTAVAIL;
gsf->gf_fmode = pmc->sfmode;
psl = rtnl_dereference(pmc->sflist);
count = psl ? psl->sl_count : 0;
copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
gsf->gf_numsrc = count;
- if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||
- copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
- return -EFAULT;
- }
- for (i = 0; i < copycount; i++) {
+ for (i = 0; i < copycount; i++, p++) {
struct sockaddr_storage ss;
psin = (struct sockaddr_in *)&ss;
memset(&ss, 0, sizeof(ss));
psin->sin_family = AF_INET;
psin->sin_addr.s_addr = psl->sl_addr[i];
- if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
+ if (copy_to_user(p, &ss, sizeof(ss)))
return -EFAULT;
}
return 0;
-done:
- return err;
}
/*
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 65c29f2bd89f..f40b1b72f979 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -901,11 +901,7 @@ void inet_csk_prepare_forced_close(struct sock *sk)
/* sk_clone_lock locked the socket and set refcnt to 2 */
bh_unlock_sock(sk);
sock_put(sk);
-
- /* The below has to be done to allow calling inet_csk_destroy_sock */
- sock_set_flag(sk, SOCK_DEAD);
- percpu_counter_inc(sk->sk_prot->orphan_count);
- inet_sk(sk)->inet_num = 0;
+ inet_csk_prepare_for_destroy_sock(sk);
}
EXPORT_SYMBOL(inet_csk_prepare_forced_close);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 5d50aad3cdbf..125f4f8a36b4 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -43,6 +43,9 @@ struct inet_diag_entry {
u16 userlocks;
u32 ifindex;
u32 mark;
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ u64 cgroup_id;
+#endif
};
static DEFINE_MUTEX(inet_diag_table_mutex);
@@ -162,6 +165,13 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
goto errout;
}
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ if (nla_put_u64_64bit(skb, INET_DIAG_CGROUP_ID,
+ cgroup_id(sock_cgroup_ptr(&sk->sk_cgrp_data)),
+ INET_DIAG_PAD))
+ goto errout;
+#endif
+
r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
r->idiag_inode = sock_i_ino(sk);
@@ -675,6 +685,16 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
yes = 0;
break;
}
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ case INET_DIAG_BC_CGROUP_COND: {
+ u64 cgroup_id;
+
+ cgroup_id = get_unaligned((const u64 *)(op + 1));
+ if (cgroup_id != entry->cgroup_id)
+ yes = 0;
+ break;
+ }
+#endif
}
if (yes) {
@@ -725,6 +745,10 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark;
else
entry.mark = 0;
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ entry.cgroup_id = sk_fullsock(sk) ?
+ cgroup_id(sock_cgroup_ptr(&sk->sk_cgrp_data)) : 0;
+#endif
return inet_diag_bc_run(bc, &entry);
}
@@ -814,6 +838,15 @@ static bool valid_markcond(const struct inet_diag_bc_op *op, int len,
return len >= *min_len;
}
+#ifdef CONFIG_SOCK_CGROUP_DATA
+static bool valid_cgroupcond(const struct inet_diag_bc_op *op, int len,
+ int *min_len)
+{
+ *min_len += sizeof(u64);
+ return len >= *min_len;
+}
+#endif
+
static int inet_diag_bc_audit(const struct nlattr *attr,
const struct sk_buff *skb)
{
@@ -856,6 +889,12 @@ static int inet_diag_bc_audit(const struct nlattr *attr,
if (!valid_markcond(bc, len, &min_len))
return -EINVAL;
break;
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ case INET_DIAG_BC_CGROUP_COND:
+ if (!valid_cgroupcond(bc, len, &min_len))
+ return -EINVAL;
+ break;
+#endif
case INET_DIAG_BC_AUTO:
case INET_DIAG_BC_JMP:
case INET_DIAG_BC_NOP:
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 029b24eeafba..4e31f23e4117 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -248,6 +248,15 @@ static void gre_err(struct sk_buff *skb, u32 info)
ipgre_err(skb, info, &tpi);
}
+static bool is_erspan_type1(int gre_hdr_len)
+{
+ /* Both ERSPAN type I (version 0) and type II (version 1) use
+ * protocol 0x88BE, but the type I has only 4-byte GRE header,
+ * while type II has 8-byte.
+ */
+ return gre_hdr_len == 4;
+}
+
static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
int gre_hdr_len)
{
@@ -262,17 +271,26 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
int len;
itn = net_generic(net, erspan_net_id);
-
iph = ip_hdr(skb);
- ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
- ver = ershdr->ver;
-
- tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
- tpi->flags | TUNNEL_KEY,
- iph->saddr, iph->daddr, tpi->key);
+ if (is_erspan_type1(gre_hdr_len)) {
+ ver = 0;
+ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
+ tpi->flags | TUNNEL_NO_KEY,
+ iph->saddr, iph->daddr, 0);
+ } else {
+ ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
+ ver = ershdr->ver;
+ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
+ tpi->flags | TUNNEL_KEY,
+ iph->saddr, iph->daddr, tpi->key);
+ }
if (tunnel) {
- len = gre_hdr_len + erspan_hdr_len(ver);
+ if (is_erspan_type1(gre_hdr_len))
+ len = gre_hdr_len;
+ else
+ len = gre_hdr_len + erspan_hdr_len(ver);
+
if (unlikely(!pskb_may_pull(skb, len)))
return PACKET_REJECT;
@@ -665,7 +683,10 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
}
/* Push ERSPAN header */
- if (tunnel->erspan_ver == 1) {
+ if (tunnel->erspan_ver == 0) {
+ proto = htons(ETH_P_ERSPAN);
+ tunnel->parms.o_flags &= ~TUNNEL_SEQ;
+ } else if (tunnel->erspan_ver == 1) {
erspan_build_header(skb, ntohl(tunnel->parms.o_key),
tunnel->index,
truncate, true);
@@ -747,45 +768,37 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
}
}
-static int ipgre_tunnel_ioctl(struct net_device *dev,
- struct ifreq *ifr, int cmd)
+static int ipgre_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p,
+ int cmd)
{
- struct ip_tunnel_parm p;
int err;
- if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
- return -EFAULT;
-
if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
- if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
- p.iph.ihl != 5 || (p.iph.frag_off & htons(~IP_DF)) ||
- ((p.i_flags | p.o_flags) & (GRE_VERSION | GRE_ROUTING)))
+ if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
+ p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
+ ((p->i_flags | p->o_flags) & (GRE_VERSION | GRE_ROUTING)))
return -EINVAL;
}
- p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
- p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
+ p->i_flags = gre_flags_to_tnl_flags(p->i_flags);
+ p->o_flags = gre_flags_to_tnl_flags(p->o_flags);
- err = ip_tunnel_ioctl(dev, &p, cmd);
+ err = ip_tunnel_ctl(dev, p, cmd);
if (err)
return err;
if (cmd == SIOCCHGTUNNEL) {
struct ip_tunnel *t = netdev_priv(dev);
- t->parms.i_flags = p.i_flags;
- t->parms.o_flags = p.o_flags;
+ t->parms.i_flags = p->i_flags;
+ t->parms.o_flags = p->o_flags;
if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
ipgre_link_update(dev, true);
}
- p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
- p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
-
- if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
- return -EFAULT;
-
+ p->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
+ p->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
return 0;
}
@@ -903,10 +916,11 @@ static const struct net_device_ops ipgre_netdev_ops = {
.ndo_stop = ipgre_close,
#endif
.ndo_start_xmit = ipgre_xmit,
- .ndo_do_ioctl = ipgre_tunnel_ioctl,
+ .ndo_do_ioctl = ip_tunnel_ioctl,
.ndo_change_mtu = ip_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
+ .ndo_tunnel_ctl = ipgre_tunnel_ctl,
};
#define GRE_FEATURES (NETIF_F_SG | \
@@ -1066,7 +1080,11 @@ static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
if (ret)
return ret;
- /* ERSPAN should only have GRE sequence and key flag */
+ if (data[IFLA_GRE_ERSPAN_VER] &&
+ nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0)
+ return 0;
+
+ /* ERSPAN type II/III should only have GRE sequence and key flag */
if (data[IFLA_GRE_OFLAGS])
flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
if (data[IFLA_GRE_IFLAGS])
@@ -1174,7 +1192,7 @@ static int erspan_netlink_parms(struct net_device *dev,
if (data[IFLA_GRE_ERSPAN_VER]) {
t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
- if (t->erspan_ver != 1 && t->erspan_ver != 2)
+ if (t->erspan_ver > 2)
return -EINVAL;
}
@@ -1259,7 +1277,11 @@ static int erspan_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- tunnel->tun_hlen = 8;
+ if (tunnel->erspan_ver == 0)
+ tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */
+ else
+ tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */
+
tunnel->parms.iph.protocol = IPPROTO_GRE;
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
erspan_hdr_len(tunnel->erspan_ver);
@@ -1456,8 +1478,8 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct ip_tunnel_parm *p = &t->parms;
__be16 o_flags = p->o_flags;
- if (t->erspan_ver == 1 || t->erspan_ver == 2) {
- if (!t->collect_md)
+ if (t->erspan_ver <= 2) {
+ if (t->erspan_ver != 0 && !t->collect_md)
o_flags |= TUNNEL_KEY;
if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
@@ -1466,7 +1488,7 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (t->erspan_ver == 1) {
if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
goto nla_put_failure;
- } else {
+ } else if (t->erspan_ver == 2) {
if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index aa3fd61818c4..84ec3703c909 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -560,6 +560,61 @@ out:
return err;
}
+static void __ip_sock_set_tos(struct sock *sk, int val)
+{
+ if (sk->sk_type == SOCK_STREAM) {
+ val &= ~INET_ECN_MASK;
+ val |= inet_sk(sk)->tos & INET_ECN_MASK;
+ }
+ if (inet_sk(sk)->tos != val) {
+ inet_sk(sk)->tos = val;
+ sk->sk_priority = rt_tos2priority(val);
+ sk_dst_reset(sk);
+ }
+}
+
+void ip_sock_set_tos(struct sock *sk, int val)
+{
+ lock_sock(sk);
+ __ip_sock_set_tos(sk, val);
+ release_sock(sk);
+}
+EXPORT_SYMBOL(ip_sock_set_tos);
+
+void ip_sock_set_freebind(struct sock *sk)
+{
+ lock_sock(sk);
+ inet_sk(sk)->freebind = true;
+ release_sock(sk);
+}
+EXPORT_SYMBOL(ip_sock_set_freebind);
+
+void ip_sock_set_recverr(struct sock *sk)
+{
+ lock_sock(sk);
+ inet_sk(sk)->recverr = true;
+ release_sock(sk);
+}
+EXPORT_SYMBOL(ip_sock_set_recverr);
+
+int ip_sock_set_mtu_discover(struct sock *sk, int val)
+{
+ if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
+ return -EINVAL;
+ lock_sock(sk);
+ inet_sk(sk)->pmtudisc = val;
+ release_sock(sk);
+ return 0;
+}
+EXPORT_SYMBOL(ip_sock_set_mtu_discover);
+
+void ip_sock_set_pktinfo(struct sock *sk)
+{
+ lock_sock(sk);
+ inet_sk(sk)->cmsg_flags |= IP_CMSG_PKTINFO;
+ release_sock(sk);
+}
+EXPORT_SYMBOL(ip_sock_set_pktinfo);
/*
* Socket option code for IP. This is the end of the line after any
@@ -587,6 +642,86 @@ static bool setsockopt_needs_rtnl(int optname)
return false;
}
+static int set_mcast_msfilter(struct sock *sk, int ifindex,
+ int numsrc, int fmode,
+ struct sockaddr_storage *group,
+ struct sockaddr_storage *list)
+{
+ int msize = IP_MSFILTER_SIZE(numsrc);
+ struct ip_msfilter *msf;
+ struct sockaddr_in *psin;
+ int err, i;
+
+ msf = kmalloc(msize, GFP_KERNEL);
+ if (!msf)
+ return -ENOBUFS;
+
+ psin = (struct sockaddr_in *)group;
+ if (psin->sin_family != AF_INET)
+ goto Eaddrnotavail;
+ msf->imsf_multiaddr = psin->sin_addr.s_addr;
+ msf->imsf_interface = 0;
+ msf->imsf_fmode = fmode;
+ msf->imsf_numsrc = numsrc;
+ for (i = 0; i < numsrc; ++i) {
+ psin = (struct sockaddr_in *)&list[i];
+
+ if (psin->sin_family != AF_INET)
+ goto Eaddrnotavail;
+ msf->imsf_slist[i] = psin->sin_addr.s_addr;
+ }
+ err = ip_mc_msfilter(sk, msf, ifindex);
+ kfree(msf);
+ return err;
+
+Eaddrnotavail:
+ kfree(msf);
+ return -EADDRNOTAVAIL;
+}
+
+static int do_mcast_group_source(struct sock *sk, int optname,
+ struct group_source_req *greqs)
+{
+ struct ip_mreq_source mreqs;
+ struct sockaddr_in *psin;
+ int omode, add, err;
+
+ if (greqs->gsr_group.ss_family != AF_INET ||
+ greqs->gsr_source.ss_family != AF_INET)
+ return -EADDRNOTAVAIL;
+
+ psin = (struct sockaddr_in *)&greqs->gsr_group;
+ mreqs.imr_multiaddr = psin->sin_addr.s_addr;
+ psin = (struct sockaddr_in *)&greqs->gsr_source;
+ mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
+ mreqs.imr_interface = 0; /* use index for mc_source */
+
+ if (optname == MCAST_BLOCK_SOURCE) {
+ omode = MCAST_EXCLUDE;
+ add = 1;
+ } else if (optname == MCAST_UNBLOCK_SOURCE) {
+ omode = MCAST_EXCLUDE;
+ add = 0;
+ } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
+ struct ip_mreqn mreq;
+
+ psin = (struct sockaddr_in *)&greqs->gsr_group;
+ mreq.imr_multiaddr = psin->sin_addr;
+ mreq.imr_address.s_addr = 0;
+ mreq.imr_ifindex = greqs->gsr_interface;
+ err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
+ if (err && err != -EADDRINUSE)
+ return err;
+ greqs->gsr_interface = mreq.imr_ifindex;
+ omode = MCAST_INCLUDE;
+ add = 1;
+ } else /* MCAST_LEAVE_SOURCE_GROUP */ {
+ omode = MCAST_INCLUDE;
+ add = 0;
+ }
+ return ip_mc_source(add, omode, sk, &mreqs, greqs->gsr_interface);
+}
+
static int do_ip_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, unsigned int optlen)
{
@@ -743,15 +878,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
inet->cmsg_flags &= ~IP_CMSG_RECVFRAGSIZE;
break;
case IP_TOS: /* This sets both TOS and Precedence */
- if (sk->sk_type == SOCK_STREAM) {
- val &= ~INET_ECN_MASK;
- val |= inet->tos & INET_ECN_MASK;
- }
- if (inet->tos != val) {
- inet->tos = val;
- sk->sk_priority = rt_tos2priority(val);
- sk_dst_reset(sk);
- }
+ __ip_sock_set_tos(sk, val);
break;
case IP_TTL:
if (optlen < 1)
@@ -1029,9 +1156,6 @@ static int do_ip_setsockopt(struct sock *sk, int level,
case MCAST_UNBLOCK_SOURCE:
{
struct group_source_req greqs;
- struct ip_mreq_source mreqs;
- struct sockaddr_in *psin;
- int omode, add;
if (optlen != sizeof(struct group_source_req))
goto e_inval;
@@ -1039,50 +1163,12 @@ static int do_ip_setsockopt(struct sock *sk, int level,
err = -EFAULT;
break;
}
- if (greqs.gsr_group.ss_family != AF_INET ||
- greqs.gsr_source.ss_family != AF_INET) {
- err = -EADDRNOTAVAIL;
- break;
- }
- psin = (struct sockaddr_in *)&greqs.gsr_group;
- mreqs.imr_multiaddr = psin->sin_addr.s_addr;
- psin = (struct sockaddr_in *)&greqs.gsr_source;
- mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
- mreqs.imr_interface = 0; /* use index for mc_source */
-
- if (optname == MCAST_BLOCK_SOURCE) {
- omode = MCAST_EXCLUDE;
- add = 1;
- } else if (optname == MCAST_UNBLOCK_SOURCE) {
- omode = MCAST_EXCLUDE;
- add = 0;
- } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
- struct ip_mreqn mreq;
-
- psin = (struct sockaddr_in *)&greqs.gsr_group;
- mreq.imr_multiaddr = psin->sin_addr;
- mreq.imr_address.s_addr = 0;
- mreq.imr_ifindex = greqs.gsr_interface;
- err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
- if (err && err != -EADDRINUSE)
- break;
- greqs.gsr_interface = mreq.imr_ifindex;
- omode = MCAST_INCLUDE;
- add = 1;
- } else /* MCAST_LEAVE_SOURCE_GROUP */ {
- omode = MCAST_INCLUDE;
- add = 0;
- }
- err = ip_mc_source(add, omode, sk, &mreqs,
- greqs.gsr_interface);
+ err = do_mcast_group_source(sk, optname, &greqs);
break;
}
case MCAST_MSFILTER:
{
- struct sockaddr_in *psin;
- struct ip_msfilter *msf = NULL;
struct group_filter *gsf = NULL;
- int msize, i, ifindex;
if (optlen < GROUP_FILTER_SIZE(0))
goto e_inval;
@@ -1095,7 +1181,6 @@ static int do_ip_setsockopt(struct sock *sk, int level,
err = PTR_ERR(gsf);
break;
}
-
/* numsrc >= (4G-140)/128 overflow in 32 bits */
if (gsf->gf_numsrc >= 0x1ffffff ||
gsf->gf_numsrc > net->ipv4.sysctl_igmp_max_msf) {
@@ -1106,36 +1191,10 @@ static int do_ip_setsockopt(struct sock *sk, int level,
err = -EINVAL;
goto mc_msf_out;
}
- msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
- msf = kmalloc(msize, GFP_KERNEL);
- if (!msf) {
- err = -ENOBUFS;
- goto mc_msf_out;
- }
- ifindex = gsf->gf_interface;
- psin = (struct sockaddr_in *)&gsf->gf_group;
- if (psin->sin_family != AF_INET) {
- err = -EADDRNOTAVAIL;
- goto mc_msf_out;
- }
- msf->imsf_multiaddr = psin->sin_addr.s_addr;
- msf->imsf_interface = 0;
- msf->imsf_fmode = gsf->gf_fmode;
- msf->imsf_numsrc = gsf->gf_numsrc;
- err = -EADDRNOTAVAIL;
- for (i = 0; i < gsf->gf_numsrc; ++i) {
- psin = (struct sockaddr_in *)&gsf->gf_slist[i];
-
- if (psin->sin_family != AF_INET)
- goto mc_msf_out;
- msf->imsf_slist[i] = psin->sin_addr.s_addr;
- }
- kfree(gsf);
- gsf = NULL;
-
- err = ip_mc_msfilter(sk, msf, ifindex);
+ err = set_mcast_msfilter(sk, gsf->gf_interface,
+ gsf->gf_numsrc, gsf->gf_fmode,
+ &gsf->gf_group, gsf->gf_slist);
mc_msf_out:
- kfree(msf);
kfree(gsf);
break;
}
@@ -1272,9 +1331,113 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname,
if (level != SOL_IP)
return -ENOPROTOOPT;
- if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
- return compat_mc_setsockopt(sk, level, optname, optval, optlen,
- ip_setsockopt);
+ switch (optname) {
+ case MCAST_JOIN_GROUP:
+ case MCAST_LEAVE_GROUP:
+ {
+ struct compat_group_req __user *gr32 = (void __user *)optval;
+ struct group_req greq;
+ struct sockaddr_in *psin = (struct sockaddr_in *)&greq.gr_group;
+ struct ip_mreqn mreq;
+
+ if (optlen < sizeof(struct compat_group_req))
+ return -EINVAL;
+
+ if (get_user(greq.gr_interface, &gr32->gr_interface) ||
+ copy_from_user(&greq.gr_group, &gr32->gr_group,
+ sizeof(greq.gr_group)))
+ return -EFAULT;
+
+ if (psin->sin_family != AF_INET)
+ return -EINVAL;
+
+ memset(&mreq, 0, sizeof(mreq));
+ mreq.imr_multiaddr = psin->sin_addr;
+ mreq.imr_ifindex = greq.gr_interface;
+
+ rtnl_lock();
+ lock_sock(sk);
+ if (optname == MCAST_JOIN_GROUP)
+ err = ip_mc_join_group(sk, &mreq);
+ else
+ err = ip_mc_leave_group(sk, &mreq);
+ release_sock(sk);
+ rtnl_unlock();
+ return err;
+ }
+ case MCAST_JOIN_SOURCE_GROUP:
+ case MCAST_LEAVE_SOURCE_GROUP:
+ case MCAST_BLOCK_SOURCE:
+ case MCAST_UNBLOCK_SOURCE:
+ {
+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
+ struct group_source_req greqs;
+
+ if (optlen != sizeof(struct compat_group_source_req))
+ return -EINVAL;
+
+ if (get_user(greqs.gsr_interface, &gsr32->gsr_interface) ||
+ copy_from_user(&greqs.gsr_group, &gsr32->gsr_group,
+ sizeof(greqs.gsr_group)) ||
+ copy_from_user(&greqs.gsr_source, &gsr32->gsr_source,
+ sizeof(greqs.gsr_source)))
+ return -EFAULT;
+
+ rtnl_lock();
+ lock_sock(sk);
+ err = do_mcast_group_source(sk, optname, &greqs);
+ release_sock(sk);
+ rtnl_unlock();
+ return err;
+ }
+ case MCAST_MSFILTER:
+ {
+ const int size0 = offsetof(struct compat_group_filter, gf_slist);
+ struct compat_group_filter *gf32;
+ unsigned int n;
+ void *p;
+
+ if (optlen < size0)
+ return -EINVAL;
+ if (optlen > sysctl_optmem_max - 4)
+ return -ENOBUFS;
+
+ p = kmalloc(optlen + 4, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ gf32 = p + 4; /* we want ->gf_group and ->gf_slist aligned */
+ if (copy_from_user(gf32, optval, optlen)) {
+ err = -EFAULT;
+ goto mc_msf_out;
+ }
+
+ n = gf32->gf_numsrc;
+ /* numsrc >= (4G-140)/128 overflow in 32 bits */
+ if (n >= 0x1ffffff) {
+ err = -ENOBUFS;
+ goto mc_msf_out;
+ }
+ if (offsetof(struct compat_group_filter, gf_slist[n]) > optlen) {
+ err = -EINVAL;
+ goto mc_msf_out;
+ }
+
+ rtnl_lock();
+ lock_sock(sk);
+ /* numsrc >= (4G-140)/128 overflow in 32 bits */
+ if (n > sock_net(sk)->ipv4.sysctl_igmp_max_msf)
+ err = -ENOBUFS;
+ else
+ err = set_mcast_msfilter(sk, gf32->gf_interface,
+ n, gf32->gf_fmode,
+ &gf32->gf_group, gf32->gf_slist);
+ release_sock(sk);
+ rtnl_unlock();
+mc_msf_out:
+ kfree(p);
+ return err;
+ }
+ }
err = do_ip_setsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
@@ -1465,19 +1628,28 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
}
case MCAST_MSFILTER:
{
+ struct group_filter __user *p = (void __user *)optval;
struct group_filter gsf;
+ const int size0 = offsetof(struct group_filter, gf_slist);
+ int num;
- if (len < GROUP_FILTER_SIZE(0)) {
+ if (len < size0) {
err = -EINVAL;
goto out;
}
- if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) {
+ if (copy_from_user(&gsf, p, size0)) {
err = -EFAULT;
goto out;
}
- err = ip_mc_gsfget(sk, &gsf,
- (struct group_filter __user *)optval,
- optlen);
+ num = gsf.gf_numsrc;
+ err = ip_mc_gsfget(sk, &gsf, p->gf_slist);
+ if (err)
+ goto out;
+ if (gsf.gf_numsrc < num)
+ num = gsf.gf_numsrc;
+ if (put_user(GROUP_FILTER_SIZE(num), optlen) ||
+ copy_to_user(p, &gsf, size0))
+ err = -EFAULT;
goto out;
}
case IP_MULTICAST_ALL:
@@ -1492,7 +1664,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
if (sk->sk_type != SOCK_STREAM)
return -ENOPROTOOPT;
- msg.msg_control = (__force void *) optval;
+ msg.msg_control_is_user = true;
+ msg.msg_control_user = optval;
msg.msg_controllen = len;
msg.msg_flags = flags;
@@ -1589,9 +1762,47 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
{
int err;
- if (optname == MCAST_MSFILTER)
- return compat_mc_getsockopt(sk, level, optname, optval, optlen,
- ip_getsockopt);
+ if (optname == MCAST_MSFILTER) {
+ const int size0 = offsetof(struct compat_group_filter, gf_slist);
+ struct compat_group_filter __user *p = (void __user *)optval;
+ struct compat_group_filter gf32;
+ struct group_filter gf;
+ int ulen, err;
+ int num;
+
+ if (level != SOL_IP)
+ return -EOPNOTSUPP;
+
+ if (get_user(ulen, optlen))
+ return -EFAULT;
+
+ if (ulen < size0)
+ return -EINVAL;
+
+ if (copy_from_user(&gf32, p, size0))
+ return -EFAULT;
+
+ gf.gf_interface = gf32.gf_interface;
+ gf.gf_fmode = gf32.gf_fmode;
+ num = gf.gf_numsrc = gf32.gf_numsrc;
+ gf.gf_group = gf32.gf_group;
+
+ rtnl_lock();
+ lock_sock(sk);
+ err = ip_mc_gsfget(sk, &gf, p->gf_slist);
+ release_sock(sk);
+ rtnl_unlock();
+ if (err)
+ return err;
+ if (gf.gf_numsrc < num)
+ num = gf.gf_numsrc;
+ ulen = GROUP_FILTER_SIZE(num) - (sizeof(gf) - sizeof(gf32));
+ if (put_user(ulen, optlen) ||
+ put_user(gf.gf_fmode, &p->gf_fmode) ||
+ put_user(gf.gf_numsrc, &p->gf_numsrc))
+ return -EFAULT;
+ return 0;
+ }
err = do_ip_getsockopt(sk, level, optname, optval, optlen,
MSG_CMSG_COMPAT);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index cd4b84310d92..f4f1d11eab50 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -860,7 +860,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
netdev_state_change(dev);
}
-int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
+int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
{
int err = 0;
struct ip_tunnel *t = netdev_priv(dev);
@@ -960,6 +960,20 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
done:
return err;
}
+EXPORT_SYMBOL_GPL(ip_tunnel_ctl);
+
+int ip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct ip_tunnel_parm p;
+ int err;
+
+ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+ return -EFAULT;
+ err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, cmd);
+ if (!err && copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+ return -EFAULT;
+ return err;
+}
EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 1dda7c155c48..1d9c8cff5ac3 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -399,38 +399,31 @@ static int vti4_err(struct sk_buff *skb, u32 info)
}
static int
-vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+vti_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
{
int err = 0;
- struct ip_tunnel_parm p;
-
- if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
- return -EFAULT;
if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
- if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
- p.iph.ihl != 5)
+ if (p->iph.version != 4 || p->iph.protocol != IPPROTO_IPIP ||
+ p->iph.ihl != 5)
return -EINVAL;
}
- if (!(p.i_flags & GRE_KEY))
- p.i_key = 0;
- if (!(p.o_flags & GRE_KEY))
- p.o_key = 0;
+ if (!(p->i_flags & GRE_KEY))
+ p->i_key = 0;
+ if (!(p->o_flags & GRE_KEY))
+ p->o_key = 0;
- p.i_flags = VTI_ISVTI;
+ p->i_flags = VTI_ISVTI;
- err = ip_tunnel_ioctl(dev, &p, cmd);
+ err = ip_tunnel_ctl(dev, p, cmd);
if (err)
return err;
if (cmd != SIOCDELTUNNEL) {
- p.i_flags |= GRE_KEY;
- p.o_flags |= GRE_KEY;
+ p->i_flags |= GRE_KEY;
+ p->o_flags |= GRE_KEY;
}
-
- if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
- return -EFAULT;
return 0;
}
@@ -438,10 +431,11 @@ static const struct net_device_ops vti_netdev_ops = {
.ndo_init = vti_tunnel_init,
.ndo_uninit = ip_tunnel_uninit,
.ndo_start_xmit = vti_tunnel_xmit,
- .ndo_do_ioctl = vti_tunnel_ioctl,
+ .ndo_do_ioctl = ip_tunnel_ioctl,
.ndo_change_mtu = ip_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
+ .ndo_tunnel_ctl = vti_tunnel_ctl,
};
static void vti_tunnel_setup(struct net_device *dev)
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 678575adaf3b..40fea52c8277 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -327,41 +327,29 @@ static bool ipip_tunnel_ioctl_verify_protocol(u8 ipproto)
}
static int
-ipip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ipip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
{
- int err = 0;
- struct ip_tunnel_parm p;
-
- if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
- return -EFAULT;
-
if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
- if (p.iph.version != 4 ||
- !ipip_tunnel_ioctl_verify_protocol(p.iph.protocol) ||
- p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
+ if (p->iph.version != 4 ||
+ !ipip_tunnel_ioctl_verify_protocol(p->iph.protocol) ||
+ p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)))
return -EINVAL;
}
- p.i_key = p.o_key = 0;
- p.i_flags = p.o_flags = 0;
- err = ip_tunnel_ioctl(dev, &p, cmd);
- if (err)
- return err;
-
- if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
- return -EFAULT;
-
- return 0;
+ p->i_key = p->o_key = 0;
+ p->i_flags = p->o_flags = 0;
+ return ip_tunnel_ctl(dev, p, cmd);
}
static const struct net_device_ops ipip_netdev_ops = {
.ndo_init = ipip_tunnel_init,
.ndo_uninit = ip_tunnel_uninit,
.ndo_start_xmit = ipip_tunnel_xmit,
- .ndo_do_ioctl = ipip_tunnel_ioctl,
+ .ndo_do_ioctl = ip_tunnel_ioctl,
.ndo_change_mtu = ip_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
+ .ndo_tunnel_ctl = ipip_tunnel_ctl,
};
#define IPIP_FEATURES (NETIF_F_SG | \
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index b2363b82b48d..f5c7a58844a4 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -421,37 +421,6 @@ static void ipmr_free_table(struct mr_table *mrt)
/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
-static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
-{
- struct net *net = dev_net(dev);
-
- dev_close(dev);
-
- dev = __dev_get_by_name(net, "tunl0");
- if (dev) {
- const struct net_device_ops *ops = dev->netdev_ops;
- struct ifreq ifr;
- struct ip_tunnel_parm p;
-
- memset(&p, 0, sizeof(p));
- p.iph.daddr = v->vifc_rmt_addr.s_addr;
- p.iph.saddr = v->vifc_lcl_addr.s_addr;
- p.iph.version = 4;
- p.iph.ihl = 5;
- p.iph.protocol = IPPROTO_IPIP;
- sprintf(p.name, "dvmrp%d", v->vifc_vifi);
- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
-
- if (ops->ndo_do_ioctl) {
- mm_segment_t oldfs = get_fs();
-
- set_fs(KERNEL_DS);
- ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
- set_fs(oldfs);
- }
- }
-}
-
/* Initialize ipmr pimreg/tunnel in_device */
static bool ipmr_init_vif_indev(const struct net_device *dev)
{
@@ -471,51 +440,52 @@ static bool ipmr_init_vif_indev(const struct net_device *dev)
static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
{
- struct net_device *dev;
-
- dev = __dev_get_by_name(net, "tunl0");
+ struct net_device *tunnel_dev, *new_dev;
+ struct ip_tunnel_parm p = { };
+ int err;
- if (dev) {
- const struct net_device_ops *ops = dev->netdev_ops;
- int err;
- struct ifreq ifr;
- struct ip_tunnel_parm p;
+ tunnel_dev = __dev_get_by_name(net, "tunl0");
+ if (!tunnel_dev)
+ goto out;
- memset(&p, 0, sizeof(p));
- p.iph.daddr = v->vifc_rmt_addr.s_addr;
- p.iph.saddr = v->vifc_lcl_addr.s_addr;
- p.iph.version = 4;
- p.iph.ihl = 5;
- p.iph.protocol = IPPROTO_IPIP;
- sprintf(p.name, "dvmrp%d", v->vifc_vifi);
- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
+ p.iph.daddr = v->vifc_rmt_addr.s_addr;
+ p.iph.saddr = v->vifc_lcl_addr.s_addr;
+ p.iph.version = 4;
+ p.iph.ihl = 5;
+ p.iph.protocol = IPPROTO_IPIP;
+ sprintf(p.name, "dvmrp%d", v->vifc_vifi);
- if (ops->ndo_do_ioctl) {
- mm_segment_t oldfs = get_fs();
+ if (!tunnel_dev->netdev_ops->ndo_tunnel_ctl)
+ goto out;
+ err = tunnel_dev->netdev_ops->ndo_tunnel_ctl(tunnel_dev, &p,
+ SIOCADDTUNNEL);
+ if (err)
+ goto out;
- set_fs(KERNEL_DS);
- err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
- set_fs(oldfs);
- } else {
- err = -EOPNOTSUPP;
- }
- dev = NULL;
-
- if (err == 0 &&
- (dev = __dev_get_by_name(net, p.name)) != NULL) {
- dev->flags |= IFF_MULTICAST;
- if (!ipmr_init_vif_indev(dev))
- goto failure;
- if (dev_open(dev, NULL))
- goto failure;
- dev_hold(dev);
- }
- }
- return dev;
+ new_dev = __dev_get_by_name(net, p.name);
+ if (!new_dev)
+ goto out;
-failure:
- unregister_netdevice(dev);
- return NULL;
+ new_dev->flags |= IFF_MULTICAST;
+ if (!ipmr_init_vif_indev(new_dev))
+ goto out_unregister;
+ if (dev_open(new_dev, NULL))
+ goto out_unregister;
+ dev_hold(new_dev);
+ err = dev_set_allmulti(new_dev, 1);
+ if (err) {
+ dev_close(new_dev);
+ tunnel_dev->netdev_ops->ndo_tunnel_ctl(tunnel_dev, &p,
+ SIOCDELTUNNEL);
+ dev_put(new_dev);
+ new_dev = ERR_PTR(err);
+ }
+ return new_dev;
+
+out_unregister:
+ unregister_netdevice(new_dev);
+out:
+ return ERR_PTR(-ENOBUFS);
}
#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
@@ -867,14 +837,8 @@ static int vif_add(struct net *net, struct mr_table *mrt,
break;
case VIFF_TUNNEL:
dev = ipmr_new_tunnel(net, vifc);
- if (!dev)
- return -ENOBUFS;
- err = dev_set_allmulti(dev, 1);
- if (err) {
- ipmr_del_tunnel(dev, vifc);
- dev_put(dev);
- return err;
- }
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
break;
case VIFF_USE_IFINDEX:
case 0:
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 563f71bcb2d7..400a9f89ebdb 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -33,8 +33,20 @@ static const struct nla_policy rtm_nh_policy[NHA_MAX + 1] = {
[NHA_ENCAP] = { .type = NLA_NESTED },
[NHA_GROUPS] = { .type = NLA_FLAG },
[NHA_MASTER] = { .type = NLA_U32 },
+ [NHA_FDB] = { .type = NLA_FLAG },
};
+static int call_nexthop_notifiers(struct net *net,
+ enum nexthop_event_type event_type,
+ struct nexthop *nh)
+{
+ int err;
+
+ err = atomic_notifier_call_chain(&net->nexthop.notifier_chain,
+ event_type, nh);
+ return notifier_to_errno(err);
+}
+
static unsigned int nh_dev_hashfn(unsigned int val)
{
unsigned int mask = NH_DEV_HASHSIZE - 1;
@@ -114,6 +126,7 @@ static struct nexthop *nexthop_alloc(void)
INIT_LIST_HEAD(&nh->fi_list);
INIT_LIST_HEAD(&nh->f6i_list);
INIT_LIST_HEAD(&nh->grp_list);
+ INIT_LIST_HEAD(&nh->fdb_list);
}
return nh;
}
@@ -234,6 +247,9 @@ static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
if (nla_put_u32(skb, NHA_ID, nh->id))
goto nla_put_failure;
+ if (nh->is_fdb_nh && nla_put_flag(skb, NHA_FDB))
+ goto nla_put_failure;
+
if (nh->is_group) {
struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
@@ -248,7 +264,7 @@ static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
if (nla_put_flag(skb, NHA_BLACKHOLE))
goto nla_put_failure;
goto out;
- } else {
+ } else if (!nh->is_fdb_nh) {
const struct net_device *dev;
dev = nhi->fib_nhc.nhc_dev;
@@ -395,12 +411,35 @@ static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
return true;
}
+static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family,
+ struct netlink_ext_ack *extack)
+{
+ struct nh_info *nhi;
+
+ if (!nh->is_fdb_nh) {
+ NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops");
+ return -EINVAL;
+ }
+
+ nhi = rtnl_dereference(nh->nh_info);
+ if (*nh_family == AF_UNSPEC) {
+ *nh_family = nhi->family;
+ } else if (*nh_family != nhi->family) {
+ NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nh_check_attr_group(struct net *net, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
unsigned int len = nla_len(tb[NHA_GROUP]);
+ u8 nh_family = AF_UNSPEC;
struct nexthop_grp *nhg;
unsigned int i, j;
+ u8 nhg_fdb = 0;
if (len & (sizeof(struct nexthop_grp) - 1)) {
NL_SET_ERR_MSG(extack,
@@ -429,6 +468,8 @@ static int nh_check_attr_group(struct net *net, struct nlattr *tb[],
}
}
+ if (tb[NHA_FDB])
+ nhg_fdb = 1;
nhg = nla_data(tb[NHA_GROUP]);
for (i = 0; i < len; ++i) {
struct nexthop *nh;
@@ -440,11 +481,20 @@ static int nh_check_attr_group(struct net *net, struct nlattr *tb[],
}
if (!valid_group_nh(nh, len, extack))
return -EINVAL;
+
+ if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack))
+ return -EINVAL;
+
+ if (!nhg_fdb && nh->is_fdb_nh) {
+ NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops");
+ return -EINVAL;
+ }
}
for (i = NHA_GROUP_TYPE + 1; i < __NHA_MAX; ++i) {
if (!tb[i])
continue;
-
+ if (tb[NHA_FDB])
+ continue;
NL_SET_ERR_MSG(extack,
"No other attributes can be set in nexthop groups");
return -EINVAL;
@@ -503,6 +553,9 @@ struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
if (hash > atomic_read(&nhge->upper_bound))
continue;
+ if (nhge->nh->is_fdb_nh)
+ return nhge->nh;
+
/* nexthops always check if it is good and does
* not rely on a sysctl for this behavior
*/
@@ -572,6 +625,11 @@ int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
{
struct nh_info *nhi;
+ if (nh->is_fdb_nh) {
+ NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
+ return -EINVAL;
+ }
+
/* fib6_src is unique to a fib6_info and limits the ability to cache
* routes in fib6_nh within a nexthop that is potentially shared
* across multiple fib entries. If the config wants to use source
@@ -648,6 +706,12 @@ int fib_check_nexthop(struct nexthop *nh, u8 scope,
{
int err = 0;
+ if (nh->is_fdb_nh) {
+ NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
+ err = -EINVAL;
+ goto out;
+ }
+
if (nh->is_group) {
struct nh_group *nhg;
@@ -787,6 +851,8 @@ static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
bool do_flush = false;
struct fib_info *fi;
+ call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh);
+
list_for_each_entry(fi, &nh->fi_list, nh_list) {
fi->fib_flags |= RTNH_F_DEAD;
do_flush = true;
@@ -798,7 +864,8 @@ static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) {
/* __ip6_del_rt does a release, so do a hold here */
fib6_info_hold(f6i);
- ipv6_stub->ip6_del_rt(net, f6i);
+ ipv6_stub->ip6_del_rt(net, f6i,
+ !net->ipv4.sysctl_nexthop_compat_mode);
}
}
@@ -1055,7 +1122,7 @@ out:
if (!rc) {
nh_base_seq_inc(net);
nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
- if (replace_notify)
+ if (replace_notify && net->ipv4.sysctl_nexthop_compat_mode)
nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
}
@@ -1118,10 +1185,10 @@ static struct nexthop *nexthop_create_group(struct net *net,
/* spare group used for removals */
nhg->spare = nexthop_grp_alloc(num_nh);
- if (!nhg) {
+ if (!nhg->spare) {
kfree(nhg);
kfree(nh);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
nhg->spare->spare = nhg;
@@ -1148,6 +1215,9 @@ static struct nexthop *nexthop_create_group(struct net *net,
nh_group_rebalance(nhg);
}
+ if (cfg->nh_fdb)
+ nh->is_fdb_nh = 1;
+
rcu_assign_pointer(nh->nh_grp, nhg);
return nh;
@@ -1176,7 +1246,7 @@ static int nh_create_ipv4(struct net *net, struct nexthop *nh,
.fc_encap = cfg->nh_encap,
.fc_encap_type = cfg->nh_encap_type,
};
- u32 tb_id = l3mdev_fib_table(cfg->dev);
+ u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN);
int err;
err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack);
@@ -1185,6 +1255,9 @@ static int nh_create_ipv4(struct net *net, struct nexthop *nh,
goto out;
}
+ if (nh->is_fdb_nh)
+ goto out;
+
/* sets nh_dev if successful */
err = fib_check_nh(net, fib_nh, tb_id, 0, extack);
if (!err) {
@@ -1210,6 +1283,7 @@ static int nh_create_ipv6(struct net *net, struct nexthop *nh,
.fc_flags = cfg->nh_flags,
.fc_encap = cfg->nh_encap,
.fc_encap_type = cfg->nh_encap_type,
+ .fc_is_fdb = cfg->nh_fdb,
};
int err;
@@ -1251,6 +1325,9 @@ static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
nhi->family = cfg->nh_family;
nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
+ if (cfg->nh_fdb)
+ nh->is_fdb_nh = 1;
+
if (cfg->nh_blackhole) {
nhi->reject_nh = 1;
cfg->nh_ifindex = net->loopback_dev->ifindex;
@@ -1272,7 +1349,8 @@ static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
}
/* add the entry to the device based hash */
- nexthop_devhash_add(net, nhi);
+ if (!nh->is_fdb_nh)
+ nexthop_devhash_add(net, nhi);
rcu_assign_pointer(nh->nh_info, nhi);
@@ -1376,6 +1454,19 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
if (tb[NHA_ID])
cfg->nh_id = nla_get_u32(tb[NHA_ID]);
+ if (tb[NHA_FDB]) {
+ if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] ||
+ tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE]) {
+ NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole");
+ goto out;
+ }
+ if (nhm->nh_flags) {
+ NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header");
+ goto out;
+ }
+ cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]);
+ }
+
if (tb[NHA_GROUP]) {
if (nhm->nh_family != AF_UNSPEC) {
NL_SET_ERR_MSG(extack, "Invalid family for group");
@@ -1399,8 +1490,8 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
if (tb[NHA_BLACKHOLE]) {
if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
- tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE]) {
- NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway or oif");
+ tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) {
+ NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb");
goto out;
}
@@ -1409,26 +1500,28 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
goto out;
}
- if (!tb[NHA_OIF]) {
- NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole nexthops");
+ if (!cfg->nh_fdb && !tb[NHA_OIF]) {
+ NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops");
goto out;
}
- cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
- if (cfg->nh_ifindex)
- cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
+ if (!cfg->nh_fdb && tb[NHA_OIF]) {
+ cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
+ if (cfg->nh_ifindex)
+ cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
- if (!cfg->dev) {
- NL_SET_ERR_MSG(extack, "Invalid device index");
- goto out;
- } else if (!(cfg->dev->flags & IFF_UP)) {
- NL_SET_ERR_MSG(extack, "Nexthop device is not up");
- err = -ENETDOWN;
- goto out;
- } else if (!netif_carrier_ok(cfg->dev)) {
- NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
- err = -ENETDOWN;
- goto out;
+ if (!cfg->dev) {
+ NL_SET_ERR_MSG(extack, "Invalid device index");
+ goto out;
+ } else if (!(cfg->dev->flags & IFF_UP)) {
+ NL_SET_ERR_MSG(extack, "Nexthop device is not up");
+ err = -ENETDOWN;
+ goto out;
+ } else if (!netif_carrier_ok(cfg->dev)) {
+ NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
+ err = -ENETDOWN;
+ goto out;
+ }
}
err = -EINVAL;
@@ -1657,7 +1750,7 @@ static bool nh_dump_filtered(struct nexthop *nh, int dev_idx, int master_idx,
static int nh_valid_dump_req(const struct nlmsghdr *nlh, int *dev_idx,
int *master_idx, bool *group_filter,
- struct netlink_callback *cb)
+ bool *fdb_filter, struct netlink_callback *cb)
{
struct netlink_ext_ack *extack = cb->extack;
struct nlattr *tb[NHA_MAX + 1];
@@ -1694,6 +1787,9 @@ static int nh_valid_dump_req(const struct nlmsghdr *nlh, int *dev_idx,
case NHA_GROUPS:
*group_filter = true;
break;
+ case NHA_FDB:
+ *fdb_filter = true;
+ break;
default:
NL_SET_ERR_MSG(extack, "Unsupported attribute in dump request");
return -EINVAL;
@@ -1712,17 +1808,17 @@ static int nh_valid_dump_req(const struct nlmsghdr *nlh, int *dev_idx,
/* rtnl */
static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
{
+ bool group_filter = false, fdb_filter = false;
struct nhmsg *nhm = nlmsg_data(cb->nlh);
int dev_filter_idx = 0, master_idx = 0;
struct net *net = sock_net(skb->sk);
struct rb_root *root = &net->nexthop.rb_root;
- bool group_filter = false;
struct rb_node *node;
int idx = 0, s_idx;
int err;
err = nh_valid_dump_req(cb->nlh, &dev_filter_idx, &master_idx,
- &group_filter, cb);
+ &group_filter, &fdb_filter, cb);
if (err < 0)
return err;
@@ -1807,6 +1903,19 @@ static struct notifier_block nh_netdev_notifier = {
.notifier_call = nh_netdev_event,
};
+int register_nexthop_notifier(struct net *net, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&net->nexthop.notifier_chain, nb);
+}
+EXPORT_SYMBOL(register_nexthop_notifier);
+
+int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&net->nexthop.notifier_chain,
+ nb);
+}
+EXPORT_SYMBOL(unregister_nexthop_notifier);
+
static void __net_exit nexthop_net_exit(struct net *net)
{
rtnl_lock();
@@ -1823,6 +1932,7 @@ static int __net_init nexthop_net_init(struct net *net)
net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
if (!net->nexthop.devhash)
return -ENOMEM;
+ ATOMIC_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
return 0;
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index b73f540fa19b..1d7076b78e63 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3334,8 +3334,7 @@ static int ip_rt_gc_elasticity __read_mostly = 8;
static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = (struct net *)__ctl->extra1;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 81b267e990a1..5653e3b011bf 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -71,8 +71,7 @@ static void set_local_port_range(struct net *net, int range[2])
/* Validate changes from /proc interface. */
static int ipv4_local_port_range(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net =
container_of(table->data, struct net, ipv4.ip_local_ports.range);
@@ -107,7 +106,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
/* Validate changes from /proc interface. */
static int ipv4_privileged_ports(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
ipv4.sysctl_ip_prot_sock);
@@ -168,8 +167,7 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
/* Validate changes from /proc interface. */
static int ipv4_ping_group_range(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct user_namespace *user_ns = current_user_ns();
int ret;
@@ -204,8 +202,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
}
static int ipv4_fwd_update_priority(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net;
int ret;
@@ -221,7 +218,7 @@ static int ipv4_fwd_update_priority(struct ctl_table *table, int write,
}
static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(ctl->data, struct net,
ipv4.tcp_congestion_control);
@@ -241,9 +238,8 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
}
static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
- int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
+ int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
int ret;
@@ -258,9 +254,8 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
}
static int proc_allowed_congestion_control(struct ctl_table *ctl,
- int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
+ int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
int ret;
@@ -296,8 +291,7 @@ static int sscanf_key(char *buf, __le32 *key)
}
static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
ipv4.sysctl_tcp_fastopen);
@@ -399,7 +393,7 @@ static void proc_configure_early_demux(int enabled, int protocol)
}
static int proc_tcp_early_demux(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = 0;
@@ -415,7 +409,7 @@ static int proc_tcp_early_demux(struct ctl_table *table, int write,
}
static int proc_udp_early_demux(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = 0;
@@ -431,8 +425,7 @@ static int proc_udp_early_demux(struct ctl_table *table, int write,
}
static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table,
- int write,
- void __user *buffer,
+ int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
@@ -447,8 +440,7 @@ static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table,
}
static int proc_tcp_available_ulp(struct ctl_table *ctl,
- int write,
- void __user *buffer, size_t *lenp,
+ int write, void *buffer, size_t *lenp,
loff_t *ppos)
{
struct ctl_table tbl = { .maxlen = TCP_ULP_BUF_MAX, };
@@ -466,7 +458,7 @@ static int proc_tcp_available_ulp(struct ctl_table *ctl,
#ifdef CONFIG_IP_ROUTE_MULTIPATH
static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
+ void *buffer, size_t *lenp,
loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
@@ -711,6 +703,15 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_tcp_early_demux
},
{
+ .procname = "nexthop_compat_mode",
+ .data = &init_net.ipv4.sysctl_nexthop_compat_mode,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
.procname = "ip_default_ttl",
.data = &init_net.ipv4.sysctl_ip_default_ttl,
.maxlen = sizeof(int),
@@ -1321,6 +1322,13 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_doulongvec_minmax,
},
{
+ .procname = "tcp_comp_sack_slack_ns",
+ .data = &init_net.ipv4.sysctl_tcp_comp_sack_slack_ns,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+ {
.procname = "tcp_comp_sack_nr",
.data = &init_net.ipv4.sysctl_tcp_comp_sack_nr,
.maxlen = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index dd401757eea1..15d47d5e7951 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2801,6 +2801,163 @@ static void tcp_enable_tx_delay(void)
}
}
+/* When set indicates to always queue non-full frames. Later the user clears
+ * this option and we transmit any pending partial frames in the queue. This is
+ * meant to be used alongside sendfile() to get properly filled frames when the
+ * user (for example) must write out headers with a write() call first and then
+ * use sendfile to send out the data parts.
+ *
+ * TCP_CORK can be set together with TCP_NODELAY and it is stronger than
+ * TCP_NODELAY.
+ */
+static void __tcp_sock_set_cork(struct sock *sk, bool on)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (on) {
+ tp->nonagle |= TCP_NAGLE_CORK;
+ } else {
+ tp->nonagle &= ~TCP_NAGLE_CORK;
+ if (tp->nonagle & TCP_NAGLE_OFF)
+ tp->nonagle |= TCP_NAGLE_PUSH;
+ tcp_push_pending_frames(sk);
+ }
+}
+
+void tcp_sock_set_cork(struct sock *sk, bool on)
+{
+ lock_sock(sk);
+ __tcp_sock_set_cork(sk, on);
+ release_sock(sk);
+}
+EXPORT_SYMBOL(tcp_sock_set_cork);
+
+/* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is
+ * remembered, but it is not activated until cork is cleared.
+ *
+ * However, when TCP_NODELAY is set we make an explicit push, which overrides
+ * even TCP_CORK for currently queued segments.
+ */
+static void __tcp_sock_set_nodelay(struct sock *sk, bool on)
+{
+ if (on) {
+ tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
+ tcp_push_pending_frames(sk);
+ } else {
+ tcp_sk(sk)->nonagle &= ~TCP_NAGLE_OFF;
+ }
+}
+
+void tcp_sock_set_nodelay(struct sock *sk)
+{
+ lock_sock(sk);
+ __tcp_sock_set_nodelay(sk, true);
+ release_sock(sk);
+}
+EXPORT_SYMBOL(tcp_sock_set_nodelay);
+
+static void __tcp_sock_set_quickack(struct sock *sk, int val)
+{
+ if (!val) {
+ inet_csk_enter_pingpong_mode(sk);
+ return;
+ }
+
+ inet_csk_exit_pingpong_mode(sk);
+ if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
+ inet_csk_ack_scheduled(sk)) {
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED;
+ tcp_cleanup_rbuf(sk, 1);
+ if (!(val & 1))
+ inet_csk_enter_pingpong_mode(sk);
+ }
+}
+
+void tcp_sock_set_quickack(struct sock *sk, int val)
+{
+ lock_sock(sk);
+ __tcp_sock_set_quickack(sk, val);
+ release_sock(sk);
+}
+EXPORT_SYMBOL(tcp_sock_set_quickack);
+
+int tcp_sock_set_syncnt(struct sock *sk, int val)
+{
+ if (val < 1 || val > MAX_TCP_SYNCNT)
+ return -EINVAL;
+
+ lock_sock(sk);
+ inet_csk(sk)->icsk_syn_retries = val;
+ release_sock(sk);
+ return 0;
+}
+EXPORT_SYMBOL(tcp_sock_set_syncnt);
+
+void tcp_sock_set_user_timeout(struct sock *sk, u32 val)
+{
+ lock_sock(sk);
+ inet_csk(sk)->icsk_user_timeout = val;
+ release_sock(sk);
+}
+EXPORT_SYMBOL(tcp_sock_set_user_timeout);
+
+static int __tcp_sock_set_keepidle(struct sock *sk, int val)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (val < 1 || val > MAX_TCP_KEEPIDLE)
+ return -EINVAL;
+
+ tp->keepalive_time = val * HZ;
+ if (sock_flag(sk, SOCK_KEEPOPEN) &&
+ !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
+ u32 elapsed = keepalive_time_elapsed(tp);
+
+ if (tp->keepalive_time > elapsed)
+ elapsed = tp->keepalive_time - elapsed;
+ else
+ elapsed = 0;
+ inet_csk_reset_keepalive_timer(sk, elapsed);
+ }
+
+ return 0;
+}
+
+int tcp_sock_set_keepidle(struct sock *sk, int val)
+{
+ int err;
+
+ lock_sock(sk);
+ err = __tcp_sock_set_keepidle(sk, val);
+ release_sock(sk);
+ return err;
+}
+EXPORT_SYMBOL(tcp_sock_set_keepidle);
+
+int tcp_sock_set_keepintvl(struct sock *sk, int val)
+{
+ if (val < 1 || val > MAX_TCP_KEEPINTVL)
+ return -EINVAL;
+
+ lock_sock(sk);
+ tcp_sk(sk)->keepalive_intvl = val * HZ;
+ release_sock(sk);
+ return 0;
+}
+EXPORT_SYMBOL(tcp_sock_set_keepintvl);
+
+int tcp_sock_set_keepcnt(struct sock *sk, int val)
+{
+ if (val < 1 || val > MAX_TCP_KEEPCNT)
+ return -EINVAL;
+
+ lock_sock(sk);
+ tcp_sk(sk)->keepalive_probes = val;
+ release_sock(sk);
+ return 0;
+}
+EXPORT_SYMBOL(tcp_sock_set_keepcnt);
+
/*
* Socket option code for TCP.
*/
@@ -2898,20 +3055,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
break;
case TCP_NODELAY:
- if (val) {
- /* TCP_NODELAY is weaker than TCP_CORK, so that
- * this option on corked socket is remembered, but
- * it is not activated until cork is cleared.
- *
- * However, when TCP_NODELAY is set we make
- * an explicit push, which overrides even TCP_CORK
- * for currently queued segments.
- */
- tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
- tcp_push_pending_frames(sk);
- } else {
- tp->nonagle &= ~TCP_NAGLE_OFF;
- }
+ __tcp_sock_set_nodelay(sk, val);
break;
case TCP_THIN_LINEAR_TIMEOUTS:
@@ -2979,43 +3123,11 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
break;
case TCP_CORK:
- /* When set indicates to always queue non-full frames.
- * Later the user clears this option and we transmit
- * any pending partial frames in the queue. This is
- * meant to be used alongside sendfile() to get properly
- * filled frames when the user (for example) must write
- * out headers with a write() call first and then use
- * sendfile to send out the data parts.
- *
- * TCP_CORK can be set together with TCP_NODELAY and it is
- * stronger than TCP_NODELAY.
- */
- if (val) {
- tp->nonagle |= TCP_NAGLE_CORK;
- } else {
- tp->nonagle &= ~TCP_NAGLE_CORK;
- if (tp->nonagle&TCP_NAGLE_OFF)
- tp->nonagle |= TCP_NAGLE_PUSH;
- tcp_push_pending_frames(sk);
- }
+ __tcp_sock_set_cork(sk, val);
break;
case TCP_KEEPIDLE:
- if (val < 1 || val > MAX_TCP_KEEPIDLE)
- err = -EINVAL;
- else {
- tp->keepalive_time = val * HZ;
- if (sock_flag(sk, SOCK_KEEPOPEN) &&
- !((1 << sk->sk_state) &
- (TCPF_CLOSE | TCPF_LISTEN))) {
- u32 elapsed = keepalive_time_elapsed(tp);
- if (tp->keepalive_time > elapsed)
- elapsed = tp->keepalive_time - elapsed;
- else
- elapsed = 0;
- inet_csk_reset_keepalive_timer(sk, elapsed);
- }
- }
+ err = __tcp_sock_set_keepidle(sk, val);
break;
case TCP_KEEPINTVL:
if (val < 1 || val > MAX_TCP_KEEPINTVL)
@@ -3046,8 +3158,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
case TCP_LINGER2:
if (val < 0)
tp->linger2 = -1;
- else if (val > net->ipv4.sysctl_tcp_fin_timeout / HZ)
- tp->linger2 = 0;
+ else if (val > TCP_FIN_TIMEOUT_MAX / HZ)
+ tp->linger2 = TCP_FIN_TIMEOUT_MAX;
else
tp->linger2 = val * HZ;
break;
@@ -3072,19 +3184,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
break;
case TCP_QUICKACK:
- if (!val) {
- inet_csk_enter_pingpong_mode(sk);
- } else {
- inet_csk_exit_pingpong_mode(sk);
- if ((1 << sk->sk_state) &
- (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
- inet_csk_ack_scheduled(sk)) {
- icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
- tcp_cleanup_rbuf(sk, 1);
- if (!(val & 1))
- inet_csk_enter_pingpong_mode(sk);
- }
- }
+ __tcp_sock_set_quickack(sk, val);
break;
#ifdef CONFIG_TCP_MD5SIG
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 29c6fc8c7716..83330a6cb242 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -437,7 +437,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
/* 3. Try to fixup all. It is made immediately after connection enters
* established state.
*/
-void tcp_init_buffer_space(struct sock *sk)
+static void tcp_init_buffer_space(struct sock *sk)
{
int tcp_app_win = sock_net(sk)->ipv4.sysctl_tcp_app_win;
struct tcp_sock *tp = tcp_sk(sk);
@@ -2183,8 +2183,7 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
}
/* Detect loss in event "A" above by marking head of queue up as lost.
- * For non-SACK(Reno) senders, the first "packets" number of segments
- * are considered lost. For RFC3517 SACK, a segment is considered lost if it
+ * For RFC3517 SACK, a segment is considered lost if it
* has at least tp->reordering SACKed seqments above it; "packets" refers to
* the maximum SACKed segments to pass before reaching this limit.
*/
@@ -2192,10 +2191,9 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
- int cnt, oldcnt, lost;
- unsigned int mss;
+ int cnt;
/* Use SACK to deduce losses of new sequences sent during recovery */
- const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq;
+ const u32 loss_high = tp->snd_nxt;
WARN_ON(packets > tp->packets_out);
skb = tp->lost_skb_hint;
@@ -2218,26 +2216,11 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
if (after(TCP_SKB_CB(skb)->end_seq, loss_high))
break;
- oldcnt = cnt;
- if (tcp_is_reno(tp) ||
- (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
+ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
cnt += tcp_skb_pcount(skb);
- if (cnt > packets) {
- if (tcp_is_sack(tp) ||
- (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) ||
- (oldcnt >= packets))
- break;
-
- mss = tcp_skb_mss(skb);
- /* If needed, chop off the prefix to mark as lost. */
- lost = (packets - oldcnt) * mss;
- if (lost < skb->len &&
- tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
- lost, mss, GFP_ATOMIC) < 0)
- break;
- cnt = packets;
- }
+ if (cnt > packets)
+ break;
tcp_skb_mark_lost(tp, skb);
@@ -2849,8 +2832,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
if (tcp_try_undo_partial(sk, prior_snd_una))
return;
/* Partial ACK arrived. Force fast retransmit. */
- do_lost = tcp_is_reno(tp) ||
- tcp_force_fast_retransmit(sk);
+ do_lost = tcp_force_fast_retransmit(sk);
}
if (tcp_try_undo_dsack(sk)) {
tcp_try_keep_open(sk);
@@ -3014,7 +2996,7 @@ void tcp_rearm_rto(struct sock *sk)
rto = usecs_to_jiffies(max_t(int, delta_us, 1));
}
tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
- TCP_RTO_MAX, tcp_rtx_queue_head(sk));
+ TCP_RTO_MAX);
}
}
@@ -3291,7 +3273,7 @@ static void tcp_ack_probe(struct sock *sk)
unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- when, TCP_RTO_MAX, NULL);
+ when, TCP_RTO_MAX);
}
}
@@ -4323,6 +4305,33 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
}
}
+static void tcp_sack_compress_send_ack(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (!tp->compressed_ack)
+ return;
+
+ if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
+ __sock_put(sk);
+
+ /* Since we have to send one ack finally,
+ * substract one from tp->compressed_ack to keep
+ * LINUX_MIB_TCPACKCOMPRESSED accurate.
+ */
+ NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
+ tp->compressed_ack - 1);
+
+ tp->compressed_ack = 0;
+ tcp_send_ack(sk);
+}
+
+/* Reasonable amount of sack blocks included in TCP SACK option
+ * The max is 4, but this becomes 3 if TCP timestamps are there.
+ * Given that SACK packets might be lost, be conservative and use 2.
+ */
+#define TCP_SACK_BLOCKS_EXPECTED 2
+
static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -4335,6 +4344,8 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) {
if (tcp_sack_extend(sp, seq, end_seq)) {
+ if (this_sack >= TCP_SACK_BLOCKS_EXPECTED)
+ tcp_sack_compress_send_ack(sk);
/* Rotate this_sack to the first one. */
for (; this_sack > 0; this_sack--, sp--)
swap(*sp, *(sp - 1));
@@ -4344,6 +4355,9 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
}
}
+ if (this_sack >= TCP_SACK_BLOCKS_EXPECTED)
+ tcp_sack_compress_send_ack(sk);
+
/* Could not find an adjacent existing SACK, build a new one,
* put it at the front, and shift everyone else down. We
* always know there is at least one SACK present already here.
@@ -4351,8 +4365,6 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
* If the sack array is full, forget about the last one.
*/
if (this_sack >= TCP_NUM_SACKS) {
- if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
- tcp_send_ack(sk);
this_sack--;
tp->rx_opt.num_sacks--;
sp--;
@@ -5272,15 +5284,13 @@ send_now:
if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
tp->compressed_ack_rcv_nxt = tp->rcv_nxt;
- if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
- NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
- tp->compressed_ack - TCP_FASTRETRANS_THRESH);
- tp->compressed_ack = 0;
+ tp->dup_ack_counter = 0;
}
-
- if (++tp->compressed_ack <= TCP_FASTRETRANS_THRESH)
+ if (tp->dup_ack_counter < TCP_FASTRETRANS_THRESH) {
+ tp->dup_ack_counter++;
goto send_now;
-
+ }
+ tp->compressed_ack++;
if (hrtimer_is_queued(&tp->compressed_ack_timer))
return;
@@ -5293,8 +5303,9 @@ send_now:
delay = min_t(unsigned long, sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns,
rtt * (NSEC_PER_USEC >> 3)/20);
sock_hold(sk);
- hrtimer_start(&tp->compressed_ack_timer, ns_to_ktime(delay),
- HRTIMER_MODE_REL_PINNED_SOFT);
+ hrtimer_start_range_ns(&tp->compressed_ack_timer, ns_to_ktime(delay),
+ sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns,
+ HRTIMER_MODE_REL_PINNED_SOFT);
}
static inline void tcp_ack_snd_check(struct sock *sk)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 83a5d24e13b8..ad6435ba6d72 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -403,6 +403,46 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort)
}
EXPORT_SYMBOL(tcp_req_err);
+/* TCP-LD (RFC 6069) logic */
+void tcp_ld_RTO_revert(struct sock *sk, u32 seq)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+ s32 remaining;
+ u32 delta_us;
+
+ if (sock_owned_by_user(sk))
+ return;
+
+ if (seq != tp->snd_una || !icsk->icsk_retransmits ||
+ !icsk->icsk_backoff)
+ return;
+
+ skb = tcp_rtx_queue_head(sk);
+ if (WARN_ON_ONCE(!skb))
+ return;
+
+ icsk->icsk_backoff--;
+ icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT;
+ icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
+
+ tcp_mstamp_refresh(tp);
+ delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
+ remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us);
+
+ if (remaining > 0) {
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+ remaining, TCP_RTO_MAX);
+ } else {
+ /* RTO revert clocked out retransmission.
+ * Will retransmit now.
+ */
+ tcp_retransmit_timer(sk);
+ }
+}
+EXPORT_SYMBOL(tcp_ld_RTO_revert);
+
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
@@ -419,27 +459,23 @@ EXPORT_SYMBOL(tcp_req_err);
*
*/
-int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+int tcp_v4_err(struct sk_buff *skb, u32 info)
{
- const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
- struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
- struct inet_connection_sock *icsk;
+ const struct iphdr *iph = (const struct iphdr *)skb->data;
+ struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
struct tcp_sock *tp;
struct inet_sock *inet;
- const int type = icmp_hdr(icmp_skb)->type;
- const int code = icmp_hdr(icmp_skb)->code;
+ const int type = icmp_hdr(skb)->type;
+ const int code = icmp_hdr(skb)->code;
struct sock *sk;
- struct sk_buff *skb;
struct request_sock *fastopen;
u32 seq, snd_una;
- s32 remaining;
- u32 delta_us;
int err;
- struct net *net = dev_net(icmp_skb->dev);
+ struct net *net = dev_net(skb->dev);
sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
th->dest, iph->saddr, ntohs(th->source),
- inet_iif(icmp_skb), 0);
+ inet_iif(skb), 0);
if (!sk) {
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
return -ENOENT;
@@ -476,7 +512,6 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
goto out;
}
- icsk = inet_csk(sk);
tp = tcp_sk(sk);
/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
fastopen = rcu_dereference(tp->fastopen_rsk);
@@ -490,7 +525,7 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
switch (type) {
case ICMP_REDIRECT:
if (!sock_owned_by_user(sk))
- do_redirect(icmp_skb, sk);
+ do_redirect(skb, sk);
goto out;
case ICMP_SOURCE_QUENCH:
/* Just silently ignore these. */
@@ -521,41 +556,12 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
}
err = icmp_err_convert[code].errno;
- /* check if icmp_skb allows revert of backoff
- * (see draft-zimmermann-tcp-lcd) */
- if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
- break;
- if (seq != tp->snd_una || !icsk->icsk_retransmits ||
- !icsk->icsk_backoff || fastopen)
- break;
-
- if (sock_owned_by_user(sk))
- break;
-
- skb = tcp_rtx_queue_head(sk);
- if (WARN_ON_ONCE(!skb))
- break;
-
- icsk->icsk_backoff--;
- icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
- TCP_TIMEOUT_INIT;
- icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
-
-
- tcp_mstamp_refresh(tp);
- delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
- remaining = icsk->icsk_rto -
- usecs_to_jiffies(delta_us);
-
- if (remaining > 0) {
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- remaining, TCP_RTO_MAX);
- } else {
- /* RTO revert clocked out retransmission.
- * Will retransmit now */
- tcp_retransmit_timer(sk);
- }
-
+ /* check if this ICMP message allows revert of backoff.
+ * (see RFC 6069)
+ */
+ if (!fastopen &&
+ (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))
+ tcp_ld_RTO_revert(sk, seq);
break;
case ICMP_TIME_EXCEEDED:
err = EHOSTUNREACH;
@@ -573,6 +579,8 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
if (fastopen && !fastopen->sk)
break;
+ ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
+
if (!sock_owned_by_user(sk)) {
sk->sk_err = err;
@@ -2780,6 +2788,7 @@ static int __net_init tcp_sk_init(struct net *net)
sizeof(init_net.ipv4.sysctl_tcp_wmem));
}
net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
+ net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
net->ipv4.sysctl_tcp_comp_sack_nr = 44;
net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 7e40322cc5ec..495dda2449fe 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -774,7 +774,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
if (!child)
goto listen_overflow;
- if (own_req && sk_is_mptcp(child) && mptcp_sk_is_subflow(child)) {
+ if (own_req && rsk_drop_req(req)) {
reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
inet_csk_reqsk_queue_drop_and_put(sk, req);
return child;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 2f45cde168c4..a50e1990a845 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -184,10 +184,10 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
{
struct tcp_sock *tp = tcp_sk(sk);
- if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) {
+ if (unlikely(tp->compressed_ack)) {
NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
- tp->compressed_ack - TCP_FASTRETRANS_THRESH);
- tp->compressed_ack = TCP_FASTRETRANS_THRESH;
+ tp->compressed_ack);
+ tp->compressed_ack = 0;
if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
__sock_put(sk);
}
@@ -2593,8 +2593,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
if (rto_delta_us > 0)
timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
- tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
- TCP_RTO_MAX, NULL);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX);
return true;
}
@@ -2772,8 +2771,12 @@ u32 __tcp_select_window(struct sock *sk)
int mss = icsk->icsk_ack.rcv_mss;
int free_space = tcp_space(sk);
int allowed_space = tcp_full_space(sk);
- int full_space = min_t(int, tp->window_clamp, allowed_space);
- int window;
+ int full_space, window;
+
+ if (sk_is_mptcp(sk))
+ mptcp_space(sk, &free_space, &allowed_space);
+
+ full_space = min_t(int, tp->window_clamp, allowed_space);
if (unlikely(mss > full_space)) {
mss = full_space;
@@ -3109,6 +3112,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
const struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_buff *skb, *rtx_head, *hole = NULL;
struct tcp_sock *tp = tcp_sk(sk);
+ bool rearm_timer = false;
u32 max_segs;
int mib_idx;
@@ -3131,7 +3135,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
if (segs <= 0)
- return;
+ break;
sacked = TCP_SKB_CB(skb)->sacked;
/* In case tcp_shift_skb_data() have aggregated large skbs,
* we need to make sure not sending too bigs TSO packets
@@ -3156,10 +3160,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
continue;
if (tcp_small_queue_check(sk, skb, 1))
- return;
+ break;
if (tcp_retransmit_skb(sk, skb, segs))
- return;
+ break;
NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
@@ -3168,11 +3172,13 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
if (skb == rtx_head &&
icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
- tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX,
- skb);
+ rearm_timer = true;
+
}
+ if (rearm_timer)
+ tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+ inet_csk(sk)->icsk_rto,
+ TCP_RTO_MAX);
}
/* We allow to exceed memory limits for FIN packets to expedite
@@ -3903,7 +3909,7 @@ void tcp_send_probe0(struct sock *sk)
*/
timeout = TCP_RESOURCE_PROBE_INTERVAL;
}
- tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
}
int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index c3f26dcd6704..ada046f425d2 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -753,8 +753,14 @@ static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
- if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
+ if (tp->compressed_ack) {
+ /* Since we have to send one ack finally,
+ * substract one from tp->compressed_ack to keep
+ * LINUX_MIB_TCPACKCOMPRESSED accurate.
+ */
+ tp->compressed_ack--;
tcp_send_ack(sk);
+ }
} else {
if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
&sk->sk_tsq_flags))
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 32564b350823..1b7ebbcae497 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -112,6 +112,9 @@
#include <net/sock_reuseport.h>
#include <net/addrconf.h>
#include <net/udp_tunnel.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6_stubs.h>
+#endif
struct udp_table udp_table __read_mostly;
EXPORT_SYMBOL(udp_table);
@@ -2563,7 +2566,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
#ifdef CONFIG_XFRM
case UDP_ENCAP_ESPINUDP:
case UDP_ENCAP_ESPINUDP_NON_IKE:
- up->encap_rcv = xfrm4_udp_encap_rcv;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+ up->encap_rcv = ipv6_stub->xfrm6_udp_encap_rcv;
+ else
+#endif
+ up->encap_rcv = xfrm4_udp_encap_rcv;
#endif
fallthrough;
case UDP_ENCAP_L2TPINUDP:
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index 150e6f0fdbf5..3eecba0874aa 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -22,9 +22,7 @@ int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
goto error;
if (cfg->bind_ifindex) {
- err = kernel_setsockopt(sock, SOL_SOCKET, SO_BINDTOIFINDEX,
- (void *)&cfg->bind_ifindex,
- sizeof(cfg->bind_ifindex));
+ err = sock_bindtoindex(sock->sk, cfg->bind_ifindex, true);
if (err < 0)
goto error;
}
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index f8de2482a529..ad2afeef4f10 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -18,11 +18,6 @@
#include <net/ip.h>
#include <net/xfrm.h>
-int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb)
-{
- return xfrm4_extract_header(skb);
-}
-
static int xfrm4_rcv_encap_finish2(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 30ddb9dc9398..3cff51ba72bb 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -14,77 +14,18 @@
#include <net/xfrm.h>
#include <net/icmp.h>
-static int xfrm4_tunnel_check_size(struct sk_buff *skb)
-{
- int mtu, ret = 0;
-
- if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
- goto out;
-
- if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
- goto out;
-
- mtu = dst_mtu(skb_dst(skb));
- if ((!skb_is_gso(skb) && skb->len > mtu) ||
- (skb_is_gso(skb) &&
- !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
- skb->protocol = htons(ETH_P_IP);
-
- if (skb->sk)
- xfrm_local_error(skb, mtu);
- else
- icmp_send(skb, ICMP_DEST_UNREACH,
- ICMP_FRAG_NEEDED, htonl(mtu));
- ret = -EMSGSIZE;
- }
-out:
- return ret;
-}
-
-int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
-{
- int err;
-
- err = xfrm4_tunnel_check_size(skb);
- if (err)
- return err;
-
- XFRM_MODE_SKB_CB(skb)->protocol = ip_hdr(skb)->protocol;
-
- return xfrm4_extract_header(skb);
-}
-
-int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb)
-{
- memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-
- IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
-
- return xfrm_output(sk, skb);
-}
-
static int __xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+#ifdef CONFIG_NETFILTER
struct xfrm_state *x = skb_dst(skb)->xfrm;
- const struct xfrm_state_afinfo *afinfo;
- int ret = -EAFNOSUPPORT;
-#ifdef CONFIG_NETFILTER
if (!x) {
IPCB(skb)->flags |= IPSKB_REROUTED;
return dst_output(net, sk, skb);
}
#endif
- rcu_read_lock();
- afinfo = xfrm_state_afinfo_get_rcu(x->outer_mode.family);
- if (likely(afinfo))
- ret = afinfo->output_finish(sk, skb);
- else
- kfree_skb(skb);
- rcu_read_unlock();
-
- return ret;
+ return xfrm_output(sk, skb);
}
int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index f8ed3c3bb928..87d4db591488 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -8,36 +8,12 @@
*
*/
-#include <net/ip.h>
#include <net/xfrm.h>
-#include <linux/pfkeyv2.h>
-#include <linux/ipsec.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/export.h>
-
-int xfrm4_extract_header(struct sk_buff *skb)
-{
- const struct iphdr *iph = ip_hdr(skb);
-
- XFRM_MODE_SKB_CB(skb)->ihl = sizeof(*iph);
- XFRM_MODE_SKB_CB(skb)->id = iph->id;
- XFRM_MODE_SKB_CB(skb)->frag_off = iph->frag_off;
- XFRM_MODE_SKB_CB(skb)->tos = iph->tos;
- XFRM_MODE_SKB_CB(skb)->ttl = iph->ttl;
- XFRM_MODE_SKB_CB(skb)->optlen = iph->ihl * 4 - sizeof(*iph);
- memset(XFRM_MODE_SKB_CB(skb)->flow_lbl, 0,
- sizeof(XFRM_MODE_SKB_CB(skb)->flow_lbl));
-
- return 0;
-}
static struct xfrm_state_afinfo xfrm4_state_afinfo = {
.family = AF_INET,
.proto = IPPROTO_IPIP,
.output = xfrm4_output,
- .output_finish = xfrm4_output_finish,
- .extract_input = xfrm4_extract_input,
- .extract_output = xfrm4_extract_output,
.transport_finish = xfrm4_transport_finish,
.local_error = xfrm4_local_error,
};