summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/9p/client.c44
-rw-r--r--net/9p/error.c21
-rw-r--r--net/9p/trans_fd.c73
-rw-r--r--net/appletalk/aarp.c4
-rw-r--r--net/atm/clip.c2
-rw-r--r--net/atm/lec.c26
-rw-r--r--net/atm/mpc.c4
-rw-r--r--net/ax25/af_ax25.c10
-rw-r--r--net/ax25/ax25_ds_timer.c2
-rw-r--r--net/ax25/ax25_subr.c10
-rw-r--r--net/ax25/ax25_timer.c14
-rw-r--r--net/batman-adv/Kconfig2
-rw-r--r--net/batman-adv/tp_meter.c6
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/rfcomm/core.c4
-rw-r--r--net/bridge/br_mdb.c6
-rw-r--r--net/bridge/br_multicast.c44
-rw-r--r--net/bridge/br_stp.c14
-rw-r--r--net/bridge/br_stp_if.c12
-rw-r--r--net/can/af_can.c2
-rw-r--r--net/ceph/Kconfig2
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/dev.c15
-rw-r--r--net/core/dev_api.c8
-rw-r--r--net/core/devmem.c62
-rw-r--r--net/core/drop_monitor.c8
-rw-r--r--net/core/dst.c8
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/gen_estimator.c2
-rw-r--r--net/core/lock_debug.c (renamed from net/core/rtnl_net_debug.c)16
-rw-r--r--net/core/neighbour.c10
-rw-r--r--net/core/netdev-genl.c12
-rw-r--r--net/core/netdev_rx_queue.c53
-rw-r--r--net/core/rtnetlink.c11
-rw-r--r--net/core/sock.c4
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/igmp.c10
-rw-r--r--net/ipv4/inet_fragment.c6
-rw-r--r--net/ipv4/ip_tunnel_core.c4
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/tcp.c18
-rw-r--r--net/ipv4/tcp_fastopen.c2
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv4/udp.c42
-rw-r--r--net/ipv6/addrconf.c54
-rw-r--r--net/ipv6/calipso.c21
-rw-r--r--net/ipv6/ip6_fib.c4
-rw-r--r--net/ipv6/ip6_flowlabel.c2
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/route.c42
-rw-r--r--net/iucv/af_iucv.c3
-rw-r--r--net/iucv/iucv.c3
-rw-r--r--net/lapb/lapb_iface.c4
-rw-r--r--net/lapb/lapb_timer.c8
-rw-r--r--net/llc/llc_c_ac.c18
-rw-r--r--net/llc/llc_conn.c16
-rw-r--r--net/mac80211/agg-rx.c4
-rw-r--r--net/mac80211/agg-tx.c6
-rw-r--r--net/mac80211/ibss.c2
-rw-r--r--net/mac80211/iface.c2
-rw-r--r--net/mac80211/led.c2
-rw-r--r--net/mac80211/mesh.c8
-rw-r--r--net/mac80211/mesh_plink.c12
-rw-r--r--net/mac80211/mlme.c16
-rw-r--r--net/mac80211/ocb.c2
-rw-r--r--net/mac80211/offchannel.c6
-rw-r--r--net/mac80211/pm.c4
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/mctp/af_mctp.c2
-rw-r--r--net/mpls/internal.h4
-rw-r--r--net/mptcp/pm.c2
-rw-r--r--net/mptcp/subflow.c15
-rw-r--r--net/ncsi/ncsi-manage.c4
-rw-r--r--net/netfilter/Kconfig4
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h2
-rw-r--r--net/netfilter/ipvs/Kconfig2
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/nf_conntrack_expect.c10
-rw-r--r--net/netfilter/nf_conntrack_netlink.c4
-rw-r--r--net/netfilter/nf_tables_api.c4
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/nft_set_hash.c3
-rw-r--r--net/netfilter/nft_tunnel.c6
-rw-r--r--net/netrom/nr_loopback.c2
-rw-r--r--net/nfc/core.c6
-rw-r--r--net/nfc/hci/core.c4
-rw-r--r--net/nfc/hci/llc_shdlc.c8
-rw-r--r--net/nfc/llcp_core.c6
-rw-r--r--net/nfc/nci/core.c6
-rw-r--r--net/nfc/nci/data.c2
-rw-r--r--net/nfc/nci/rsp.c2
-rw-r--r--net/openvswitch/Kconfig2
-rw-r--r--net/openvswitch/actions.c6
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/rose/rose_link.c8
-rw-r--r--net/rose/rose_loopback.c2
-rw-r--r--net/rose/rose_route.c4
-rw-r--r--net/rxrpc/call_event.c2
-rw-r--r--net/rxrpc/call_object.c4
-rw-r--r--net/rxrpc/conn_client.c2
-rw-r--r--net/rxrpc/conn_object.c8
-rw-r--r--net/rxrpc/net_ns.c4
-rw-r--r--net/sched/Kconfig2
-rw-r--r--net/sched/act_tunnel_key.c2
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/sch_api.c73
-rw-r--r--net/sched/sch_fq_pie.c2
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_pie.c2
-rw-r--r--net/sched/sch_red.c4
-rw-r--r--net/sched/sch_sfq.c4
-rw-r--r--net/sched/sch_skbprio.c3
-rw-r--r--net/sctp/Kconfig2
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/input.c2
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sctp/outqueue.c5
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/sm_sideeffect.c6
-rw-r--r--net/sctp/stream.c6
-rw-r--r--net/sctp/sysctl.c4
-rw-r--r--net/sctp/transport.c12
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c144
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_internal.h7
-rw-r--r--net/sunrpc/cache.c6
-rw-r--r--net/sunrpc/clnt.c33
-rw-r--r--net/sunrpc/rpcb_clnt.c5
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/sunrpc/sysfs.c202
-rw-r--r--net/sunrpc/xprt.c4
-rw-r--r--net/sunrpc/xprtmultipath.c21
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c3
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/tipc/subscr.c2
-rw-r--r--net/tls/tls_device_fallback.c31
-rw-r--r--net/vmw_vsock/af_vsock.c6
-rw-r--r--net/wireless/core.c6
-rw-r--r--net/x25/x25_link.c2
-rw-r--r--net/x25/x25_timer.c4
-rw-r--r--net/xdp/xsk.c5
-rw-r--r--net/xfrm/xfrm_algo.c7
-rw-r--r--net/xfrm/xfrm_ipcomp.c433
-rw-r--r--net/xfrm/xfrm_policy.c10
-rw-r--r--net/xfrm/xfrm_state.c2
146 files changed, 1107 insertions, 981 deletions
diff --git a/net/9p/client.c b/net/9p/client.c
index 09f8ced9f8bb..61461b9fa134 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1548,7 +1548,8 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
struct p9_client *clnt = fid->clnt;
struct p9_req_t *req;
int count = iov_iter_count(to);
- int rsize, received, non_zc = 0;
+ u32 rsize, received;
+ bool non_zc = false;
char *dataptr;
*err = 0;
@@ -1571,7 +1572,7 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
0, 11, "dqd", fid->fid,
offset, rsize);
} else {
- non_zc = 1;
+ non_zc = true;
req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
rsize);
}
@@ -1592,11 +1593,13 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
return 0;
}
if (rsize < received) {
- pr_err("bogus RREAD count (%d > %d)\n", received, rsize);
- received = rsize;
+ pr_err("bogus RREAD count (%u > %u)\n", received, rsize);
+ *err = -EIO;
+ p9_req_put(clnt, req);
+ return 0;
}
- p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", received);
+ p9_debug(P9_DEBUG_9P, "<<< RREAD count %u\n", received);
if (non_zc) {
int n = copy_to_iter(dataptr, received, to);
@@ -1623,9 +1626,9 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
*err = 0;
while (iov_iter_count(from)) {
- int count = iov_iter_count(from);
- int rsize = fid->iounit;
- int written;
+ size_t count = iov_iter_count(from);
+ u32 rsize = fid->iounit;
+ u32 written;
if (!rsize || rsize > clnt->msize - P9_IOHDRSZ)
rsize = clnt->msize - P9_IOHDRSZ;
@@ -1633,7 +1636,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
if (count < rsize)
rsize = count;
- p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d (/%d)\n",
+ p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %u (/%zu)\n",
fid->fid, offset, rsize, count);
/* Don't bother zerocopy for small IO (< 1024) */
@@ -1659,11 +1662,14 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
break;
}
if (rsize < written) {
- pr_err("bogus RWRITE count (%d > %d)\n", written, rsize);
- written = rsize;
+ pr_err("bogus RWRITE count (%u > %u)\n", written, rsize);
+ *err = -EIO;
+ iov_iter_revert(from, count - iov_iter_count(from));
+ p9_req_put(clnt, req);
+ break;
}
- p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", written);
+ p9_debug(P9_DEBUG_9P, "<<< RWRITE count %u\n", written);
p9_req_put(clnt, req);
iov_iter_revert(from, count - written - iov_iter_count(from));
@@ -1712,7 +1718,7 @@ p9_client_write_subreq(struct netfs_io_subrequest *subreq)
if (written > len) {
pr_err("bogus RWRITE count (%d > %u)\n", written, len);
- written = len;
+ written = -EIO;
}
p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", len);
@@ -2098,7 +2104,8 @@ EXPORT_SYMBOL_GPL(p9_client_xattrcreate);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
{
- int err, rsize, non_zc = 0;
+ int err, non_zc = 0;
+ u32 rsize;
struct p9_client *clnt;
struct p9_req_t *req;
char *dataptr;
@@ -2107,7 +2114,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
iov_iter_kvec(&to, ITER_DEST, &kv, 1, count);
- p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
+ p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %u\n",
fid->fid, offset, count);
clnt = fid->clnt;
@@ -2142,11 +2149,12 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
goto free_and_error;
}
if (rsize < count) {
- pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize);
- count = rsize;
+ pr_err("bogus RREADDIR count (%u > %u)\n", count, rsize);
+ err = -EIO;
+ goto free_and_error;
}
- p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
+ p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %u\n", count);
if (non_zc)
memmove(data, dataptr, count);
diff --git a/net/9p/error.c b/net/9p/error.c
index 8da744494b68..8ba8afc91482 100644
--- a/net/9p/error.c
+++ b/net/9p/error.c
@@ -16,6 +16,7 @@
#include <linux/list.h>
#include <linux/jhash.h>
#include <linux/errno.h>
+#include <linux/hashtable.h>
#include <net/9p/9p.h>
/**
@@ -33,8 +34,8 @@ struct errormap {
struct hlist_node list;
};
-#define ERRHASHSZ 32
-static struct hlist_head hash_errmap[ERRHASHSZ];
+#define ERRHASH_BITS 5
+static DEFINE_HASHTABLE(hash_errmap, ERRHASH_BITS);
/* FixMe - reduce to a reasonable size */
static struct errormap errmap[] = {
@@ -176,18 +177,14 @@ static struct errormap errmap[] = {
int p9_error_init(void)
{
struct errormap *c;
- int bucket;
-
- /* initialize hash table */
- for (bucket = 0; bucket < ERRHASHSZ; bucket++)
- INIT_HLIST_HEAD(&hash_errmap[bucket]);
+ u32 hash;
/* load initial error map into hash table */
for (c = errmap; c->name; c++) {
c->namelen = strlen(c->name);
- bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ;
+ hash = jhash(c->name, c->namelen, 0);
INIT_HLIST_NODE(&c->list);
- hlist_add_head(&c->list, &hash_errmap[bucket]);
+ hash_add(hash_errmap, &c->list, hash);
}
return 1;
@@ -205,12 +202,12 @@ int p9_errstr2errno(char *errstr, int len)
{
int errno;
struct errormap *c;
- int bucket;
+ u32 hash;
errno = 0;
c = NULL;
- bucket = jhash(errstr, len, 0) % ERRHASHSZ;
- hlist_for_each_entry(c, &hash_errmap[bucket], list) {
+ hash = jhash(errstr, len, 0);
+ hash_for_each_possible(hash_errmap, c, list, hash) {
if (c->namelen == len && !memcmp(c->name, errstr, len)) {
errno = c->val;
break;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 196060dc6138..339ec4e54778 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
+#include <linux/in6.h>
#include <linux/module.h>
#include <linux/net.h>
#include <linux/ipv6.h>
@@ -191,12 +192,13 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
spin_lock(&m->req_lock);
- if (m->err) {
+ if (READ_ONCE(m->err)) {
spin_unlock(&m->req_lock);
return;
}
- m->err = err;
+ WRITE_ONCE(m->err, err);
+ ASSERT_EXCLUSIVE_WRITER(m->err);
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
list_move(&req->req_list, &cancel_list);
@@ -283,7 +285,7 @@ static void p9_read_work(struct work_struct *work)
m = container_of(work, struct p9_conn, rq);
- if (m->err < 0)
+ if (READ_ONCE(m->err) < 0)
return;
p9_debug(P9_DEBUG_TRANS, "start mux %p pos %zd\n", m, m->rc.offset);
@@ -450,7 +452,7 @@ static void p9_write_work(struct work_struct *work)
m = container_of(work, struct p9_conn, wq);
- if (m->err < 0) {
+ if (READ_ONCE(m->err) < 0) {
clear_bit(Wworksched, &m->wsched);
return;
}
@@ -622,7 +624,7 @@ static void p9_poll_mux(struct p9_conn *m)
__poll_t n;
int err = -ECONNRESET;
- if (m->err < 0)
+ if (READ_ONCE(m->err) < 0)
return;
n = p9_fd_poll(m->client, NULL, &err);
@@ -665,6 +667,7 @@ static void p9_poll_mux(struct p9_conn *m)
static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
{
__poll_t n;
+ int err;
struct p9_trans_fd *ts = client->trans;
struct p9_conn *m = &ts->conn;
@@ -673,9 +676,10 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
spin_lock(&m->req_lock);
- if (m->err < 0) {
+ err = READ_ONCE(m->err);
+ if (err < 0) {
spin_unlock(&m->req_lock);
- return m->err;
+ return err;
}
WRITE_ONCE(req->status, REQ_STATUS_UNSENT);
@@ -954,64 +958,55 @@ static void p9_fd_close(struct p9_client *client)
kfree(ts);
}
-/*
- * stolen from NFS - maybe should be made a generic function?
- */
-static inline int valid_ipaddr4(const char *buf)
-{
- int rc, count, in[4];
-
- rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
- if (rc != 4)
- return -EINVAL;
- for (count = 0; count < 4; count++) {
- if (in[count] > 255)
- return -EINVAL;
- }
- return 0;
-}
-
static int p9_bind_privport(struct socket *sock)
{
- struct sockaddr_in cl;
+ struct sockaddr_storage stor = { 0 };
int port, err = -EINVAL;
- memset(&cl, 0, sizeof(cl));
- cl.sin_family = AF_INET;
- cl.sin_addr.s_addr = htonl(INADDR_ANY);
+ stor.ss_family = sock->ops->family;
+ if (stor.ss_family == AF_INET)
+ ((struct sockaddr_in *)&stor)->sin_addr.s_addr = htonl(INADDR_ANY);
+ else
+ ((struct sockaddr_in6 *)&stor)->sin6_addr = in6addr_any;
for (port = p9_ipport_resv_max; port >= p9_ipport_resv_min; port--) {
- cl.sin_port = htons((ushort)port);
- err = kernel_bind(sock, (struct sockaddr *)&cl, sizeof(cl));
+ if (stor.ss_family == AF_INET)
+ ((struct sockaddr_in *)&stor)->sin_port = htons((ushort)port);
+ else
+ ((struct sockaddr_in6 *)&stor)->sin6_port = htons((ushort)port);
+ err = kernel_bind(sock, (struct sockaddr *)&stor, sizeof(stor));
if (err != -EADDRINUSE)
break;
}
return err;
}
-
static int
p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
{
int err;
+ char port_str[6];
struct socket *csocket;
- struct sockaddr_in sin_server;
+ struct sockaddr_storage stor = { 0 };
struct p9_fd_opts opts;
err = parse_opts(args, &opts);
if (err < 0)
return err;
- if (addr == NULL || valid_ipaddr4(addr) < 0)
+ if (!addr)
return -EINVAL;
+ sprintf(port_str, "%u", opts.port);
+ err = inet_pton_with_scope(current->nsproxy->net_ns, AF_UNSPEC, addr,
+ port_str, &stor);
+ if (err < 0)
+ return err;
+
csocket = NULL;
client->trans_opts.tcp.port = opts.port;
client->trans_opts.tcp.privport = opts.privport;
- sin_server.sin_family = AF_INET;
- sin_server.sin_addr.s_addr = in_aton(addr);
- sin_server.sin_port = htons(opts.port);
- err = __sock_create(current->nsproxy->net_ns, PF_INET,
+ err = __sock_create(current->nsproxy->net_ns, stor.ss_family,
SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
if (err) {
pr_err("%s (%d): problem creating socket\n",
@@ -1030,8 +1025,8 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
}
err = READ_ONCE(csocket->ops)->connect(csocket,
- (struct sockaddr *)&sin_server,
- sizeof(struct sockaddr_in), 0);
+ (struct sockaddr *)&stor,
+ sizeof(stor), 0);
if (err < 0) {
pr_err("%s (%d): problem connecting socket to %s\n",
__func__, task_pid_nr(current), addr);
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 05cbb3c227c5..9c787e2e4b17 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -856,7 +856,7 @@ int __init aarp_proto_init(void)
add_timer(&aarp_timer);
rc = register_netdevice_notifier(&aarp_notifier);
if (rc) {
- del_timer_sync(&aarp_timer);
+ timer_delete_sync(&aarp_timer);
unregister_snap_client(aarp_dl);
}
return rc;
@@ -1011,7 +1011,7 @@ const struct seq_operations aarp_seq_ops = {
/* General module cleanup. Called from cleanup_module() in ddp.c. */
void aarp_cleanup_module(void)
{
- del_timer_sync(&aarp_timer);
+ timer_delete_sync(&aarp_timer);
unregister_netdevice_notifier(&aarp_notifier);
unregister_snap_client(aarp_dl);
aarp_purge();
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 42b910cb4e8e..61b5b700817d 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -904,7 +904,7 @@ static void atm_clip_exit_noproc(void)
/* First, stop the idle timer, so it stops banging
* on the table.
*/
- del_timer_sync(&idle_timer);
+ timer_delete_sync(&idle_timer);
dev = clip_devs;
while (dev) {
diff --git a/net/atm/lec.c b/net/atm/lec.c
index a948dd47c3f3..ded2f0df2ee6 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1302,7 +1302,7 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
return -1;
hlist_del(&to_remove->next);
- del_timer(&to_remove->timer);
+ timer_delete(&to_remove->timer);
/*
* If this is the only MAC connected to this VCC,
@@ -1482,7 +1482,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_empty_ones, next) {
- del_timer_sync(&entry->timer);
+ timer_delete_sync(&entry->timer);
lec_arp_clear_vccs(entry);
hlist_del(&entry->next);
lec_arp_put(entry);
@@ -1491,7 +1491,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
hlist_for_each_entry_safe(entry, next,
&priv->lec_no_forward, next) {
- del_timer_sync(&entry->timer);
+ timer_delete_sync(&entry->timer);
lec_arp_clear_vccs(entry);
hlist_del(&entry->next);
lec_arp_put(entry);
@@ -1575,7 +1575,7 @@ static void lec_arp_expire_vcc(struct timer_list *t)
struct lec_arp_table *to_remove = from_timer(to_remove, t, timer);
struct lec_priv *priv = to_remove->priv;
- del_timer(&to_remove->timer);
+ timer_delete(&to_remove->timer);
pr_debug("%p %p: vpi:%d vci:%d\n",
to_remove, priv,
@@ -1843,16 +1843,16 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
&priv->lec_arp_empty_ones, next) {
if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) {
hlist_del(&entry->next);
- del_timer(&entry->timer);
+ timer_delete(&entry->timer);
tmp = lec_arp_find(priv, mac_addr);
if (tmp) {
- del_timer(&tmp->timer);
+ timer_delete(&tmp->timer);
tmp->status = ESI_FORWARD_DIRECT;
memcpy(tmp->atm_addr, atm_addr, ATM_ESA_LEN);
tmp->vcc = entry->vcc;
tmp->old_push = entry->old_push;
tmp->last_used = jiffies;
- del_timer(&entry->timer);
+ timer_delete(&entry->timer);
lec_arp_put(entry);
entry = tmp;
} else {
@@ -1883,7 +1883,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
/* Temporary, changes before end of function */
}
memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN);
- del_timer(&entry->timer);
+ timer_delete(&entry->timer);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
hlist_for_each_entry(tmp,
&priv->lec_arp_tables[i], next) {
@@ -1946,7 +1946,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
entry = make_entry(priv, bus_mac);
if (entry == NULL)
goto out;
- del_timer(&entry->timer);
+ timer_delete(&entry->timer);
memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
entry->recv_vcc = vcc;
entry->old_recv_push = old_push;
@@ -1988,7 +1988,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
entry->recv_vcc ? entry->recv_vcc->
vci : 0);
found_entry = 1;
- del_timer(&entry->timer);
+ timer_delete(&entry->timer);
entry->vcc = vcc;
entry->old_push = old_push;
if (entry->status == ESI_VC_PENDING) {
@@ -2172,7 +2172,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
&priv->lec_arp_empty_ones, next) {
if (entry->vcc == vcc) {
lec_arp_clear_vccs(entry);
- del_timer(&entry->timer);
+ timer_delete(&entry->timer);
hlist_del(&entry->next);
lec_arp_put(entry);
}
@@ -2182,7 +2182,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
&priv->lec_no_forward, next) {
if (entry->recv_vcc == vcc) {
lec_arp_clear_vccs(entry);
- del_timer(&entry->timer);
+ timer_delete(&entry->timer);
hlist_del(&entry->next);
lec_arp_put(entry);
}
@@ -2215,7 +2215,7 @@ lec_arp_check_empties(struct lec_priv *priv,
hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_empty_ones, next) {
if (vcc == entry->vcc) {
- del_timer(&entry->timer);
+ timer_delete(&entry->timer);
ether_addr_copy(entry->mac_addr, src);
entry->status = ESI_FORWARD_DIRECT;
entry->last_used = jiffies;
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 12da0269275c..f6b447bba329 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -804,7 +804,7 @@ static int atm_mpoa_mpoad_attach(struct atm_vcc *vcc, int arg)
/* This lets us now how our LECs are doing */
err = register_netdevice_notifier(&mpoa_notifier);
if (err < 0) {
- del_timer(&mpc_timer);
+ timer_delete(&mpc_timer);
return err;
}
}
@@ -1495,7 +1495,7 @@ static void __exit atm_mpoa_cleanup(void)
mpc_proc_clean();
- del_timer_sync(&mpc_timer);
+ timer_delete_sync(&mpc_timer);
unregister_netdevice_notifier(&mpoa_notifier);
deregister_atm_ioctl(&atm_ioctl_ops);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 3ee7dba34310..b790bb92ed1c 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1071,11 +1071,11 @@ static int ax25_release(struct socket *sock)
}
if (ax25_dev) {
if (!ax25_dev->device_up) {
- del_timer_sync(&ax25->timer);
- del_timer_sync(&ax25->t1timer);
- del_timer_sync(&ax25->t2timer);
- del_timer_sync(&ax25->t3timer);
- del_timer_sync(&ax25->idletimer);
+ timer_delete_sync(&ax25->timer);
+ timer_delete_sync(&ax25->t1timer);
+ timer_delete_sync(&ax25->t2timer);
+ timer_delete_sync(&ax25->t3timer);
+ timer_delete_sync(&ax25->idletimer);
}
netdev_put(ax25_dev->dev, &ax25->dev_tracker);
ax25_dev_put(ax25_dev);
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index c4f8adbf8144..8d9fba069001 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -44,7 +44,7 @@ void ax25_ds_setup_timer(ax25_dev *ax25_dev)
void ax25_ds_del_timer(ax25_dev *ax25_dev)
{
if (ax25_dev)
- del_timer(&ax25_dev->dama.slave_timer);
+ timer_delete(&ax25_dev->dama.slave_timer);
}
void ax25_ds_set_timer(ax25_dev *ax25_dev)
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 9ff98f46dc6b..bff4b203a893 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -262,11 +262,11 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
ax25_clear_queues(ax25);
if (reason == ENETUNREACH) {
- del_timer_sync(&ax25->timer);
- del_timer_sync(&ax25->t1timer);
- del_timer_sync(&ax25->t2timer);
- del_timer_sync(&ax25->t3timer);
- del_timer_sync(&ax25->idletimer);
+ timer_delete_sync(&ax25->timer);
+ timer_delete_sync(&ax25->t1timer);
+ timer_delete_sync(&ax25->t2timer);
+ timer_delete_sync(&ax25->t3timer);
+ timer_delete_sync(&ax25->idletimer);
} else {
if (ax25->sk && !sock_flag(ax25->sk, SOCK_DESTROY))
ax25_stop_heartbeat(ax25);
diff --git a/net/ax25/ax25_timer.c b/net/ax25/ax25_timer.c
index 9f7cb0a7c73f..3891a3923d6c 100644
--- a/net/ax25/ax25_timer.c
+++ b/net/ax25/ax25_timer.c
@@ -65,7 +65,7 @@ void ax25_start_t3timer(ax25_cb *ax25)
if (ax25->t3 > 0)
mod_timer(&ax25->t3timer, jiffies + ax25->t3);
else
- del_timer(&ax25->t3timer);
+ timer_delete(&ax25->t3timer);
}
void ax25_start_idletimer(ax25_cb *ax25)
@@ -73,32 +73,32 @@ void ax25_start_idletimer(ax25_cb *ax25)
if (ax25->idle > 0)
mod_timer(&ax25->idletimer, jiffies + ax25->idle);
else
- del_timer(&ax25->idletimer);
+ timer_delete(&ax25->idletimer);
}
void ax25_stop_heartbeat(ax25_cb *ax25)
{
- del_timer(&ax25->timer);
+ timer_delete(&ax25->timer);
}
void ax25_stop_t1timer(ax25_cb *ax25)
{
- del_timer(&ax25->t1timer);
+ timer_delete(&ax25->t1timer);
}
void ax25_stop_t2timer(ax25_cb *ax25)
{
- del_timer(&ax25->t2timer);
+ timer_delete(&ax25->t2timer);
}
void ax25_stop_t3timer(ax25_cb *ax25)
{
- del_timer(&ax25->t3timer);
+ timer_delete(&ax25->t3timer);
}
void ax25_stop_idletimer(ax25_cb *ax25)
{
- del_timer(&ax25->idletimer);
+ timer_delete(&ax25->idletimer);
}
int ax25_t1timer_running(ax25_cb *ax25)
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 860a0786bc1e..20b316207f9a 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -9,7 +9,7 @@
config BATMAN_ADV
tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
- select LIBCRC32C
+ select CRC32
help
B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
a routing protocol for multi-hop ad-hoc mesh networks. The
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
index 9fb14e40e156..adbadb436033 100644
--- a/net/batman-adv/tp_meter.c
+++ b/net/batman-adv/tp_meter.c
@@ -384,13 +384,13 @@ static void batadv_tp_sender_cleanup(struct batadv_priv *bat_priv,
atomic_dec(&tp_vars->bat_priv->tp_num);
/* kill the timer and remove its reference */
- del_timer_sync(&tp_vars->timer);
+ timer_delete_sync(&tp_vars->timer);
/* the worker might have rearmed itself therefore we kill it again. Note
* that if the worker should run again before invoking the following
- * del_timer(), it would not re-arm itself once again because the status
+ * timer_delete(), it would not re-arm itself once again because the status
* is OFF now
*/
- del_timer(&tp_vars->timer);
+ timer_delete(&tp_vars->timer);
batadv_tp_vars_put(tp_vars);
}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 707f229f896a..fc5af8639b1e 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -433,7 +433,7 @@ static void hidp_set_timer(struct hidp_session *session)
static void hidp_del_timer(struct hidp_session *session)
{
if (session->idle_to > 0)
- del_timer_sync(&session->timer);
+ timer_delete_sync(&session->timer);
}
static void hidp_process_report(struct hidp_session *session, int type,
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index ad5177e3a69b..20ea7dba0a9a 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -254,7 +254,7 @@ static void rfcomm_session_clear_timer(struct rfcomm_session *s)
{
BT_DBG("session %p state %ld", s, s->state);
- del_timer_sync(&s->timer);
+ timer_delete_sync(&s->timer);
}
/* ---- RFCOMM DLCs ---- */
@@ -281,7 +281,7 @@ static void rfcomm_dlc_clear_timer(struct rfcomm_dlc *d)
{
BT_DBG("dlc %p state %ld", d, d->state);
- if (del_timer(&d->timer))
+ if (timer_delete(&d->timer))
rfcomm_dlc_put(d);
}
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 7e1ad229e133..722203b98ff7 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -732,7 +732,7 @@ static int br_mdb_replace_group_sg(const struct br_mdb_config *cfg,
mod_timer(&pg->timer,
now + brmctx->multicast_membership_interval);
else
- del_timer(&pg->timer);
+ timer_delete(&pg->timer);
br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
@@ -853,7 +853,7 @@ static int br_mdb_add_group_src(const struct br_mdb_config *cfg,
cfg->entry->state == MDB_TEMPORARY)
mod_timer(&ent->timer, now + br_multicast_gmi(brmctx));
else
- del_timer(&ent->timer);
+ timer_delete(&ent->timer);
/* Install a (S, G) forwarding entry for the source. */
err = br_mdb_add_group_src_fwd(cfg, &src->addr, brmctx, extack);
@@ -953,7 +953,7 @@ static int br_mdb_replace_group_star_g(const struct br_mdb_config *cfg,
mod_timer(&pg->timer,
now + brmctx->multicast_membership_interval);
else
- del_timer(&pg->timer);
+ timer_delete(&pg->timer);
br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index b2ae0d2434d2..dcbf058de1e3 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -546,7 +546,7 @@ static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
return;
/* the kernel is now responsible for removing this S,G */
- del_timer(&sg->timer);
+ timer_delete(&sg->timer);
star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
if (!star_mp)
return;
@@ -2015,9 +2015,9 @@ void br_multicast_port_ctx_init(struct net_bridge_port *port,
void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx)
{
#if IS_ENABLED(CONFIG_IPV6)
- del_timer_sync(&pmctx->ip6_mc_router_timer);
+ timer_delete_sync(&pmctx->ip6_mc_router_timer);
#endif
- del_timer_sync(&pmctx->ip4_mc_router_timer);
+ timer_delete_sync(&pmctx->ip4_mc_router_timer);
}
int br_multicast_add_port(struct net_bridge_port *port)
@@ -2062,7 +2062,7 @@ static void br_multicast_enable(struct bridge_mcast_own_query *query)
query->startup_sent = 0;
if (try_to_del_timer_sync(&query->timer) >= 0 ||
- del_timer(&query->timer))
+ timer_delete(&query->timer))
mod_timer(&query->timer, jiffies);
}
@@ -2127,12 +2127,12 @@ static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
br_multicast_find_del_pg(pmctx->port->br, pg);
del |= br_ip4_multicast_rport_del(pmctx);
- del_timer(&pmctx->ip4_mc_router_timer);
- del_timer(&pmctx->ip4_own_query.timer);
+ timer_delete(&pmctx->ip4_mc_router_timer);
+ timer_delete(&pmctx->ip4_own_query.timer);
del |= br_ip6_multicast_rport_del(pmctx);
#if IS_ENABLED(CONFIG_IPV6)
- del_timer(&pmctx->ip6_mc_router_timer);
- del_timer(&pmctx->ip6_own_query.timer);
+ timer_delete(&pmctx->ip6_mc_router_timer);
+ timer_delete(&pmctx->ip6_own_query.timer);
#endif
br_multicast_rport_del_notify(pmctx, del);
}
@@ -4199,15 +4199,15 @@ void br_multicast_open(struct net_bridge *br)
static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
{
- del_timer_sync(&brmctx->ip4_mc_router_timer);
- del_timer_sync(&brmctx->ip4_other_query.timer);
- del_timer_sync(&brmctx->ip4_other_query.delay_timer);
- del_timer_sync(&brmctx->ip4_own_query.timer);
+ timer_delete_sync(&brmctx->ip4_mc_router_timer);
+ timer_delete_sync(&brmctx->ip4_other_query.timer);
+ timer_delete_sync(&brmctx->ip4_other_query.delay_timer);
+ timer_delete_sync(&brmctx->ip4_own_query.timer);
#if IS_ENABLED(CONFIG_IPV6)
- del_timer_sync(&brmctx->ip6_mc_router_timer);
- del_timer_sync(&brmctx->ip6_other_query.timer);
- del_timer_sync(&brmctx->ip6_other_query.delay_timer);
- del_timer_sync(&brmctx->ip6_own_query.timer);
+ timer_delete_sync(&brmctx->ip6_mc_router_timer);
+ timer_delete_sync(&brmctx->ip6_other_query.timer);
+ timer_delete_sync(&brmctx->ip6_other_query.delay_timer);
+ timer_delete_sync(&brmctx->ip6_own_query.timer);
#endif
}
@@ -4384,9 +4384,9 @@ int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val)
case MDB_RTR_TYPE_DISABLED:
case MDB_RTR_TYPE_PERM:
br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM);
- del_timer(&brmctx->ip4_mc_router_timer);
+ timer_delete(&brmctx->ip4_mc_router_timer);
#if IS_ENABLED(CONFIG_IPV6)
- del_timer(&brmctx->ip6_mc_router_timer);
+ timer_delete(&brmctx->ip6_mc_router_timer);
#endif
brmctx->multicast_router = val;
err = 0;
@@ -4455,10 +4455,10 @@ int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx,
case MDB_RTR_TYPE_DISABLED:
pmctx->multicast_router = MDB_RTR_TYPE_DISABLED;
del |= br_ip4_multicast_rport_del(pmctx);
- del_timer(&pmctx->ip4_mc_router_timer);
+ timer_delete(&pmctx->ip4_mc_router_timer);
del |= br_ip6_multicast_rport_del(pmctx);
#if IS_ENABLED(CONFIG_IPV6)
- del_timer(&pmctx->ip6_mc_router_timer);
+ timer_delete(&pmctx->ip6_mc_router_timer);
#endif
br_multicast_rport_del_notify(pmctx, del);
break;
@@ -4470,10 +4470,10 @@ int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx,
break;
case MDB_RTR_TYPE_PERM:
pmctx->multicast_router = MDB_RTR_TYPE_PERM;
- del_timer(&pmctx->ip4_mc_router_timer);
+ timer_delete(&pmctx->ip4_mc_router_timer);
br_ip4_multicast_add_router(brmctx, pmctx);
#if IS_ENABLED(CONFIG_IPV6)
- del_timer(&pmctx->ip6_mc_router_timer);
+ timer_delete(&pmctx->ip6_mc_router_timer);
#endif
br_ip6_multicast_add_router(brmctx, pmctx);
break;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 7d27b2e6038f..024210f95468 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -198,7 +198,7 @@ void br_become_root_bridge(struct net_bridge *br)
br->hello_time = br->bridge_hello_time;
br->forward_delay = br->bridge_forward_delay;
br_topology_change_detection(br);
- del_timer(&br->tcn_timer);
+ timer_delete(&br->tcn_timer);
if (br->dev->flags & IFF_UP) {
br_config_bpdu_generation(br);
@@ -363,7 +363,7 @@ static int br_supersedes_port_info(const struct net_bridge_port *p,
static void br_topology_change_acknowledged(struct net_bridge *br)
{
br->topology_change_detected = 0;
- del_timer(&br->tcn_timer);
+ timer_delete(&br->tcn_timer);
}
/* called under bridge lock */
@@ -439,7 +439,7 @@ static void br_make_blocking(struct net_bridge_port *p)
br_set_state(p, BR_STATE_BLOCKING);
br_ifinfo_notify(RTM_NEWLINK, NULL, p);
- del_timer(&p->forward_delay_timer);
+ timer_delete(&p->forward_delay_timer);
}
}
@@ -454,7 +454,7 @@ static void br_make_forwarding(struct net_bridge_port *p)
if (br->stp_enabled == BR_NO_STP || br->forward_delay == 0) {
br_set_state(p, BR_STATE_FORWARDING);
br_topology_change_detection(br);
- del_timer(&p->forward_delay_timer);
+ timer_delete(&p->forward_delay_timer);
} else if (br->stp_enabled == BR_KERNEL_STP)
br_set_state(p, BR_STATE_LISTENING);
else
@@ -483,7 +483,7 @@ void br_port_state_selection(struct net_bridge *br)
p->topology_change_ack = 0;
br_make_forwarding(p);
} else if (br_is_designated_port(p)) {
- del_timer(&p->message_age_timer);
+ timer_delete(&p->message_age_timer);
br_make_forwarding(p);
} else {
p->config_pending = 0;
@@ -533,9 +533,9 @@ void br_received_config_bpdu(struct net_bridge_port *p,
br_port_state_selection(br);
if (!br_is_root_bridge(br) && was_root) {
- del_timer(&br->hello_timer);
+ timer_delete(&br->hello_timer);
if (br->topology_change_detected) {
- del_timer(&br->topology_change_timer);
+ timer_delete(&br->topology_change_timer);
br_transmit_tcn(br);
mod_timer(&br->tcn_timer,
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 75204d36d7f9..c20a41bf253b 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -81,9 +81,9 @@ void br_stp_disable_bridge(struct net_bridge *br)
br->topology_change_detected = 0;
spin_unlock_bh(&br->lock);
- del_timer_sync(&br->hello_timer);
- del_timer_sync(&br->topology_change_timer);
- del_timer_sync(&br->tcn_timer);
+ timer_delete_sync(&br->hello_timer);
+ timer_delete_sync(&br->topology_change_timer);
+ timer_delete_sync(&br->tcn_timer);
cancel_delayed_work_sync(&br->gc_work);
}
@@ -109,9 +109,9 @@ void br_stp_disable_port(struct net_bridge_port *p)
br_ifinfo_notify(RTM_NEWLINK, NULL, p);
- del_timer(&p->message_age_timer);
- del_timer(&p->forward_delay_timer);
- del_timer(&p->hold_timer);
+ timer_delete(&p->message_age_timer);
+ timer_delete(&p->forward_delay_timer);
+ timer_delete(&p->hold_timer);
if (!rcu_access_pointer(p->backup_port))
br_fdb_delete_by_port(br, p, 0, 0);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 4c059e41c831..4aab7033c933 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -825,7 +825,7 @@ static void can_pernet_exit(struct net *net)
if (IS_ENABLED(CONFIG_PROC_FS)) {
can_remove_proc(net);
if (stats_timer)
- del_timer_sync(&net->can.stattimer);
+ timer_delete_sync(&net->can.stattimer);
}
kfree(net->can.rx_alldev_list);
diff --git a/net/ceph/Kconfig b/net/ceph/Kconfig
index c5c4eef3a9ff..0aa21fcbf6ec 100644
--- a/net/ceph/Kconfig
+++ b/net/ceph/Kconfig
@@ -2,7 +2,7 @@
config CEPH_LIB
tristate "Ceph core library"
depends on INET
- select LIBCRC32C
+ select CRC32
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_GCM
diff --git a/net/core/Makefile b/net/core/Makefile
index a10c3bd96798..b2a76ce33932 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -45,5 +45,5 @@ obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o
obj-$(CONFIG_OF) += of_net.o
obj-$(CONFIG_NET_TEST) += net_test.o
obj-$(CONFIG_NET_DEVMEM) += devmem.o
-obj-$(CONFIG_DEBUG_NET_SMALL_RTNL) += rtnl_net_debug.o
+obj-$(CONFIG_DEBUG_NET) += lock_debug.o
obj-$(CONFIG_FAIL_SKB_REALLOC) += skb_fault_injection.o
diff --git a/net/core/dev.c b/net/core/dev.c
index be17e0660144..0608605cfc24 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1771,6 +1771,7 @@ void netif_disable_lro(struct net_device *dev)
netdev_unlock_ops(lower_dev);
}
}
+EXPORT_IPV6_MOD(netif_disable_lro);
/**
* dev_disable_gro_hw - disable HW Generic Receive Offload on a device
@@ -1858,7 +1859,9 @@ static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
int err;
for_each_netdev(net, dev) {
+ netdev_lock_ops(dev);
err = call_netdevice_register_notifiers(nb, dev);
+ netdev_unlock_ops(dev);
if (err)
goto rollback;
}
@@ -10284,7 +10287,9 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
goto unlock;
}
+ netdev_lock_ops(dev);
err = dev_xdp_attach_link(dev, &extack, link);
+ netdev_unlock_ops(dev);
rtnl_unlock();
if (err) {
@@ -11045,7 +11050,9 @@ int register_netdevice(struct net_device *dev)
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* Notify protocols, that a new device appeared. */
+ netdev_lock_ops(dev);
ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
+ netdev_unlock_ops(dev);
ret = notifier_to_errno(ret);
if (ret) {
/* Expect explicit free_netdev() on failure */
@@ -12057,7 +12064,7 @@ void unregister_netdev(struct net_device *dev)
}
EXPORT_SYMBOL(unregister_netdev);
-int netif_change_net_namespace(struct net_device *dev, struct net *net,
+int __dev_change_net_namespace(struct net_device *dev, struct net *net,
const char *pat, int new_ifindex,
struct netlink_ext_ack *extack)
{
@@ -12142,11 +12149,12 @@ int netif_change_net_namespace(struct net_device *dev, struct net *net,
* And now a mini version of register_netdevice unregister_netdevice.
*/
+ netdev_lock_ops(dev);
/* If device is running close it first. */
netif_close(dev);
-
/* And unlink it from device chain */
unlist_netdevice(dev);
+ netdev_unlock_ops(dev);
synchronize_net();
@@ -12208,11 +12216,12 @@ int netif_change_net_namespace(struct net_device *dev, struct net *net,
err = netdev_change_owner(dev, net_old, net);
WARN_ON(err);
+ netdev_lock_ops(dev);
/* Add the device back in the hashes */
list_netdevice(dev);
-
/* Notify protocols, that a new device appeared. */
call_netdevice_notifiers(NETDEV_REGISTER, dev);
+ netdev_unlock_ops(dev);
/*
* Prevent userspace races by waiting until the network
diff --git a/net/core/dev_api.c b/net/core/dev_api.c
index 8dbc60612100..90bafb0b1b8c 100644
--- a/net/core/dev_api.c
+++ b/net/core/dev_api.c
@@ -117,13 +117,7 @@ EXPORT_SYMBOL(dev_set_mac_address_user);
int dev_change_net_namespace(struct net_device *dev, struct net *net,
const char *pat)
{
- int ret;
-
- netdev_lock_ops(dev);
- ret = netif_change_net_namespace(dev, net, pat, 0, NULL);
- netdev_unlock_ops(dev);
-
- return ret;
+ return __dev_change_net_namespace(dev, net, pat, 0, NULL);
}
EXPORT_SYMBOL_GPL(dev_change_net_namespace);
diff --git a/net/core/devmem.c b/net/core/devmem.c
index ee145a2aa41c..6e27a47d0493 100644
--- a/net/core/devmem.c
+++ b/net/core/devmem.c
@@ -8,7 +8,6 @@
*/
#include <linux/dma-buf.h>
-#include <linux/ethtool_netlink.h>
#include <linux/genalloc.h>
#include <linux/mm.h>
#include <linux/netdevice.h>
@@ -117,21 +116,19 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
struct netdev_rx_queue *rxq;
unsigned long xa_idx;
unsigned int rxq_idx;
- int err;
if (binding->list.next)
list_del(&binding->list);
xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
- WARN_ON(rxq->mp_params.mp_priv != binding);
-
- rxq->mp_params.mp_priv = NULL;
- rxq->mp_params.mp_ops = NULL;
+ const struct pp_memory_provider_params mp_params = {
+ .mp_priv = binding,
+ .mp_ops = &dmabuf_devmem_ops,
+ };
rxq_idx = get_netdev_rx_queue_index(rxq);
- err = netdev_rx_queue_restart(binding->dev, rxq_idx);
- WARN_ON(err && err != -ENETDOWN);
+ __net_mp_close_rxq(binding->dev, rxq_idx, &mp_params);
}
xa_erase(&net_devmem_dmabuf_bindings, binding->id);
@@ -143,57 +140,28 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
struct net_devmem_dmabuf_binding *binding,
struct netlink_ext_ack *extack)
{
+ struct pp_memory_provider_params mp_params = {
+ .mp_priv = binding,
+ .mp_ops = &dmabuf_devmem_ops,
+ };
struct netdev_rx_queue *rxq;
u32 xa_idx;
int err;
- if (rxq_idx >= dev->real_num_rx_queues) {
- NL_SET_ERR_MSG(extack, "rx queue index out of range");
- return -ERANGE;
- }
-
- if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
- NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
- return -EINVAL;
- }
-
- if (dev->cfg->hds_thresh) {
- NL_SET_ERR_MSG(extack, "hds-thresh is not zero");
- return -EINVAL;
- }
+ err = __net_mp_open_rxq(dev, rxq_idx, &mp_params, extack);
+ if (err)
+ return err;
rxq = __netif_get_rx_queue(dev, rxq_idx);
- if (rxq->mp_params.mp_ops) {
- NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
- return -EEXIST;
- }
-
-#ifdef CONFIG_XDP_SOCKETS
- if (rxq->pool) {
- NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
- return -EBUSY;
- }
-#endif
-
err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
GFP_KERNEL);
if (err)
- return err;
-
- rxq->mp_params.mp_priv = binding;
- rxq->mp_params.mp_ops = &dmabuf_devmem_ops;
-
- err = netdev_rx_queue_restart(dev, rxq_idx);
- if (err)
- goto err_xa_erase;
+ goto err_close_rxq;
return 0;
-err_xa_erase:
- rxq->mp_params.mp_priv = NULL;
- rxq->mp_params.mp_ops = NULL;
- xa_erase(&binding->bound_rxqs, xa_idx);
-
+err_close_rxq:
+ __net_mp_close_rxq(dev, rxq_idx, &mp_params);
return err;
}
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 212f0a048cab..8a7ce640f74d 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -1088,7 +1088,7 @@ err_module_put:
struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
struct sk_buff *skb;
- del_timer_sync(&hw_data->send_timer);
+ timer_delete_sync(&hw_data->send_timer);
cancel_work_sync(&hw_data->dm_alert_work);
while ((skb = __skb_dequeue(&hw_data->drop_queue))) {
struct devlink_trap_metadata *hw_metadata;
@@ -1122,7 +1122,7 @@ static void net_dm_hw_monitor_stop(struct netlink_ext_ack *extack)
struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
struct sk_buff *skb;
- del_timer_sync(&hw_data->send_timer);
+ timer_delete_sync(&hw_data->send_timer);
cancel_work_sync(&hw_data->dm_alert_work);
while ((skb = __skb_dequeue(&hw_data->drop_queue))) {
struct devlink_trap_metadata *hw_metadata;
@@ -1183,7 +1183,7 @@ err_module_put:
struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
struct sk_buff *skb;
- del_timer_sync(&data->send_timer);
+ timer_delete_sync(&data->send_timer);
cancel_work_sync(&data->dm_alert_work);
while ((skb = __skb_dequeue(&data->drop_queue)))
consume_skb(skb);
@@ -1211,7 +1211,7 @@ static void net_dm_trace_off_set(void)
struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
struct sk_buff *skb;
- del_timer_sync(&data->send_timer);
+ timer_delete_sync(&data->send_timer);
cancel_work_sync(&data->dm_alert_work);
while ((skb = __skb_dequeue(&data->drop_queue)))
consume_skb(skb);
diff --git a/net/core/dst.c b/net/core/dst.c
index c99b95cf9cbb..795ca07e28a4 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -165,6 +165,14 @@ static void dst_count_dec(struct dst_entry *dst)
void dst_release(struct dst_entry *dst)
{
if (dst && rcuref_put(&dst->__rcuref)) {
+#ifdef CONFIG_DST_CACHE
+ if (dst->flags & DST_METADATA) {
+ struct metadata_dst *md_dst = (struct metadata_dst *)dst;
+
+ if (md_dst->type == METADATA_IP_TUNNEL)
+ dst_cache_reset_now(&md_dst->u.tun_info.dst_cache);
+ }
+#endif
dst_count_dec(dst);
call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu);
}
diff --git a/net/core/filter.c b/net/core/filter.c
index 46ae8eb7a03c..bc6828761a47 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -8137,6 +8137,8 @@ sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_skb_load_bytes_relative_proto;
case BPF_FUNC_get_socket_cookie:
return &bpf_get_socket_cookie_proto;
+ case BPF_FUNC_get_netns_cookie:
+ return &bpf_get_netns_cookie_proto;
case BPF_FUNC_get_socket_uid:
return &bpf_get_socket_uid_proto;
case BPF_FUNC_perf_event_output:
@@ -9697,7 +9699,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
case offsetof(struct __sk_buff, queue_mapping):
if (type == BPF_WRITE) {
- u32 off = bpf_target_off(struct sk_buff, queue_mapping, 2, target_size);
+ u32 offset = bpf_target_off(struct sk_buff, queue_mapping, 2, target_size);
if (BPF_CLASS(si->code) == BPF_ST && si->imm >= NO_QUEUE_MAPPING) {
*insn++ = BPF_JMP_A(0); /* noop */
@@ -9706,7 +9708,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
if (BPF_CLASS(si->code) == BPF_STX)
*insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
- *insn++ = BPF_EMIT_STORE(BPF_H, si, off);
+ *insn++ = BPF_EMIT_STORE(BPF_H, si, offset);
} else {
*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
bpf_target_off(struct sk_buff,
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 412816076b8b..2b821b9a8699 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -177,7 +177,7 @@ int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
spin_lock_bh(lock);
old = rcu_dereference_protected(*rate_est, 1);
if (old) {
- del_timer_sync(&old->timer);
+ timer_delete_sync(&old->timer);
est->avbps = old->avbps;
est->avpps = old->avpps;
}
diff --git a/net/core/rtnl_net_debug.c b/net/core/lock_debug.c
index 7ecd28cc1c22..b7f22dc92a6f 100644
--- a/net/core/rtnl_net_debug.c
+++ b/net/core/lock_debug.c
@@ -6,10 +6,11 @@
#include <linux/notifier.h>
#include <linux/rtnetlink.h>
#include <net/net_namespace.h>
+#include <net/netdev_lock.h>
#include <net/netns/generic.h>
-static int rtnl_net_debug_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+int netdev_debug_event(struct notifier_block *nb, unsigned long event,
+ void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
@@ -17,11 +18,13 @@ static int rtnl_net_debug_event(struct notifier_block *nb,
/* Keep enum and don't add default to trigger -Werror=switch */
switch (cmd) {
+ case NETDEV_REGISTER:
case NETDEV_UP:
+ netdev_ops_assert_locked(dev);
+ fallthrough;
case NETDEV_DOWN:
case NETDEV_REBOOT:
case NETDEV_CHANGE:
- case NETDEV_REGISTER:
case NETDEV_UNREGISTER:
case NETDEV_CHANGEMTU:
case NETDEV_CHANGEADDR:
@@ -66,6 +69,7 @@ static int rtnl_net_debug_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
+EXPORT_SYMBOL_NS_GPL(netdev_debug_event, "NETDEV_INTERNAL");
static int rtnl_net_debug_net_id;
@@ -74,7 +78,7 @@ static int __net_init rtnl_net_debug_net_init(struct net *net)
struct notifier_block *nb;
nb = net_generic(net, rtnl_net_debug_net_id);
- nb->notifier_call = rtnl_net_debug_event;
+ nb->notifier_call = netdev_debug_event;
return register_netdevice_notifier_net(net, nb);
}
@@ -95,14 +99,14 @@ static struct pernet_operations rtnl_net_debug_net_ops __net_initdata = {
};
static struct notifier_block rtnl_net_debug_block = {
- .notifier_call = rtnl_net_debug_event,
+ .notifier_call = netdev_debug_event,
};
static int __init rtnl_net_debug_init(void)
{
int ret;
- ret = register_pernet_device(&rtnl_net_debug_net_ops);
+ ret = register_pernet_subsys(&rtnl_net_debug_net_ops);
if (ret)
return ret;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 0738aa6cca25..a07249b59ae1 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -309,7 +309,7 @@ static void neigh_add_timer(struct neighbour *n, unsigned long when)
static int neigh_del_timer(struct neighbour *n)
{
if ((n->nud_state & NUD_IN_TIMER) &&
- del_timer(&n->timer)) {
+ timer_delete(&n->timer)) {
neigh_release(n);
return 1;
}
@@ -427,7 +427,7 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
tbl->family);
if (skb_queue_empty_lockless(&tbl->proxy_queue))
- del_timer_sync(&tbl->proxy_timer);
+ timer_delete_sync(&tbl->proxy_timer);
return 0;
}
@@ -1597,7 +1597,7 @@ static void neigh_proxy_process(struct timer_list *t)
} else if (!sched_next || tdif < sched_next)
sched_next = tdif;
}
- del_timer(&tbl->proxy_timer);
+ timer_delete(&tbl->proxy_timer);
if (sched_next)
mod_timer(&tbl->proxy_timer, jiffies + sched_next);
spin_unlock(&tbl->proxy_queue.lock);
@@ -1628,7 +1628,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
spin_lock(&tbl->proxy_queue.lock);
- if (del_timer(&tbl->proxy_timer)) {
+ if (timer_delete(&tbl->proxy_timer)) {
if (time_before(tbl->proxy_timer.expires, sched_next))
sched_next = tbl->proxy_timer.expires;
}
@@ -1786,7 +1786,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
/* It is not clean... Fix it to unload IPv6 module safely */
cancel_delayed_work_sync(&tbl->managed_work);
cancel_delayed_work_sync(&tbl->gc_work);
- del_timer_sync(&tbl->proxy_timer);
+ timer_delete_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family);
neigh_ifdown(tbl, NULL);
if (atomic_read(&tbl->entries))
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index fd1cfa9707dc..5d7af50fe702 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -874,12 +874,6 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
goto err_unlock;
}
- if (dev_xdp_prog_count(netdev)) {
- NL_SET_ERR_MSG(info->extack, "unable to bind dmabuf to device with XDP program attached");
- err = -EEXIST;
- goto err_unlock;
- }
-
binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack);
if (IS_ERR(binding)) {
err = PTR_ERR(binding);
@@ -951,12 +945,14 @@ void netdev_nl_sock_priv_destroy(struct netdev_nl_sock *priv)
{
struct net_devmem_dmabuf_binding *binding;
struct net_devmem_dmabuf_binding *temp;
+ struct net_device *dev;
mutex_lock(&priv->lock);
list_for_each_entry_safe(binding, temp, &priv->bindings, list) {
- netdev_lock(binding->dev);
+ dev = binding->dev;
+ netdev_lock(dev);
net_devmem_unbind_dmabuf(binding);
- netdev_unlock(binding->dev);
+ netdev_unlock(dev);
}
mutex_unlock(&priv->lock);
}
diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c
index 3af716f77a13..d126f10197bf 100644
--- a/net/core/netdev_rx_queue.c
+++ b/net/core/netdev_rx_queue.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/ethtool_netlink.h>
#include <linux/netdevice.h>
#include <net/netdev_lock.h>
#include <net/netdev_queues.h>
@@ -86,8 +87,9 @@ err_free_new_mem:
}
EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
-static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
- struct pp_memory_provider_params *p)
+int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
+ const struct pp_memory_provider_params *p,
+ struct netlink_ext_ack *extack)
{
struct netdev_rx_queue *rxq;
int ret;
@@ -95,16 +97,41 @@ static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
if (!netdev_need_ops_lock(dev))
return -EOPNOTSUPP;
- if (ifq_idx >= dev->real_num_rx_queues)
+ if (rxq_idx >= dev->real_num_rx_queues)
return -EINVAL;
- ifq_idx = array_index_nospec(ifq_idx, dev->real_num_rx_queues);
+ rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
- rxq = __netif_get_rx_queue(dev, ifq_idx);
- if (rxq->mp_params.mp_ops)
+ if (rxq_idx >= dev->real_num_rx_queues) {
+ NL_SET_ERR_MSG(extack, "rx queue index out of range");
+ return -ERANGE;
+ }
+ if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
+ NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
+ return -EINVAL;
+ }
+ if (dev->cfg->hds_thresh) {
+ NL_SET_ERR_MSG(extack, "hds-thresh is not zero");
+ return -EINVAL;
+ }
+ if (dev_xdp_prog_count(dev)) {
+ NL_SET_ERR_MSG(extack, "unable to custom memory provider to device with XDP program attached");
return -EEXIST;
+ }
+
+ rxq = __netif_get_rx_queue(dev, rxq_idx);
+ if (rxq->mp_params.mp_ops) {
+ NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
+ return -EEXIST;
+ }
+#ifdef CONFIG_XDP_SOCKETS
+ if (rxq->pool) {
+ NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
+ return -EBUSY;
+ }
+#endif
rxq->mp_params = *p;
- ret = netdev_rx_queue_restart(dev, ifq_idx);
+ ret = netdev_rx_queue_restart(dev, rxq_idx);
if (ret) {
rxq->mp_params.mp_ops = NULL;
rxq->mp_params.mp_priv = NULL;
@@ -112,21 +139,22 @@ static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
return ret;
}
-int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
+int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
struct pp_memory_provider_params *p)
{
int ret;
netdev_lock(dev);
- ret = __net_mp_open_rxq(dev, ifq_idx, p);
+ ret = __net_mp_open_rxq(dev, rxq_idx, p, NULL);
netdev_unlock(dev);
return ret;
}
-static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
- struct pp_memory_provider_params *old_p)
+void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
+ const struct pp_memory_provider_params *old_p)
{
struct netdev_rx_queue *rxq;
+ int err;
if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
return;
@@ -146,7 +174,8 @@ static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
rxq->mp_params.mp_ops = NULL;
rxq->mp_params.mp_priv = NULL;
- WARN_ON(netdev_rx_queue_restart(dev, ifq_idx));
+ err = netdev_rx_queue_restart(dev, ifq_idx);
+ WARN_ON(err && err != -ENETDOWN);
}
void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 5a24a30dfc2d..c23852835050 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1177,6 +1177,9 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
/* IFLA_VF_STATS_TX_DROPPED */
nla_total_size_64bit(sizeof(__u64)));
}
+ if (dev->netdev_ops->ndo_get_vf_guid)
+ size += num_vfs * 2 *
+ nla_total_size(sizeof(struct ifla_vf_guid));
return size;
} else
return 0;
@@ -3022,8 +3025,6 @@ static int do_setlink(const struct sk_buff *skb, struct net_device *dev,
char ifname[IFNAMSIZ];
int err;
- netdev_lock_ops(dev);
-
err = validate_linkmsg(dev, tb, extack);
if (err < 0)
goto errout;
@@ -3039,14 +3040,16 @@ static int do_setlink(const struct sk_buff *skb, struct net_device *dev,
new_ifindex = nla_get_s32_default(tb[IFLA_NEW_IFINDEX], 0);
- err = netif_change_net_namespace(dev, tgt_net, pat,
+ err = __dev_change_net_namespace(dev, tgt_net, pat,
new_ifindex, extack);
if (err)
- goto errout;
+ return err;
status |= DO_SETLINK_MODIFIED;
}
+ netdev_lock_ops(dev);
+
if (tb[IFLA_MAP]) {
struct rtnl_link_ifmap *u_map;
struct ifmap k_map;
diff --git a/net/core/sock.c b/net/core/sock.c
index 323892066def..f67a3c5b0988 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3598,14 +3598,14 @@ EXPORT_SYMBOL(sk_reset_timer);
void sk_stop_timer(struct sock *sk, struct timer_list* timer)
{
- if (del_timer(timer))
+ if (timer_delete(timer))
__sock_put(sk);
}
EXPORT_SYMBOL(sk_stop_timer);
void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer)
{
- if (del_timer_sync(timer))
+ if (timer_delete_sync(timer))
__sock_put(sk);
}
EXPORT_SYMBOL(sk_stop_timer_sync);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 754f60fb6e25..77e5705ac799 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -281,7 +281,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
if (!in_dev->arp_parms)
goto out_kfree;
if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
- dev_disable_lro(dev);
+ netif_disable_lro(dev);
/* Reference in_dev->dev */
netdev_hold(dev, &in_dev->dev_tracker, GFP_KERNEL);
/* Account for reference dev->ip_ptr (below) */
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 2c394c364cb9..ca7d539b3846 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -205,7 +205,7 @@ static void ip_sf_list_clear_all(struct ip_sf_list *psf)
static void igmp_stop_timer(struct ip_mc_list *im)
{
spin_lock_bh(&im->lock);
- if (del_timer(&im->timer))
+ if (timer_delete(&im->timer))
refcount_dec(&im->refcnt);
im->tm_running = 0;
im->reporter = 0;
@@ -251,7 +251,7 @@ static void igmp_mod_timer(struct ip_mc_list *im, int max_delay)
{
spin_lock_bh(&im->lock);
im->unsolicit_count = 0;
- if (del_timer(&im->timer)) {
+ if (timer_delete(&im->timer)) {
if ((long)(im->timer.expires-jiffies) < max_delay) {
add_timer(&im->timer);
im->tm_running = 1;
@@ -974,7 +974,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
}
/* cancel the interface change timer */
WRITE_ONCE(in_dev->mr_ifc_count, 0);
- if (del_timer(&in_dev->mr_ifc_timer))
+ if (timer_delete(&in_dev->mr_ifc_timer))
__in_dev_put(in_dev);
/* clear deleted report items */
igmpv3_clear_delrec(in_dev);
@@ -1830,10 +1830,10 @@ void ip_mc_down(struct in_device *in_dev)
#ifdef CONFIG_IP_MULTICAST
WRITE_ONCE(in_dev->mr_ifc_count, 0);
- if (del_timer(&in_dev->mr_ifc_timer))
+ if (timer_delete(&in_dev->mr_ifc_timer))
__in_dev_put(in_dev);
in_dev->mr_gq_running = 0;
- if (del_timer(&in_dev->mr_gq_timer))
+ if (timer_delete(&in_dev->mr_gq_timer))
__in_dev_put(in_dev);
#endif
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 19fae4811ab2..470ab17ceb51 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -133,7 +133,7 @@ static void inet_frags_free_cb(void *ptr, void *arg)
struct inet_frag_queue *fq = ptr;
int count;
- count = del_timer_sync(&fq->timer) ? 1 : 0;
+ count = timer_delete_sync(&fq->timer) ? 1 : 0;
spin_lock_bh(&fq->lock);
fq->flags |= INET_FRAG_DROP;
@@ -227,7 +227,7 @@ EXPORT_SYMBOL(fqdir_exit);
void inet_frag_kill(struct inet_frag_queue *fq, int *refs)
{
- if (del_timer(&fq->timer))
+ if (timer_delete(&fq->timer))
(*refs)++;
if (!(fq->flags & INET_FRAG_COMPLETE)) {
@@ -297,7 +297,7 @@ void inet_frag_destroy(struct inet_frag_queue *q)
reason = (q->flags & INET_FRAG_DROP) ?
SKB_DROP_REASON_FRAG_REASM_TIMEOUT :
SKB_CONSUMED;
- WARN_ON(del_timer(&q->timer) != 0);
+ WARN_ON(timer_delete(&q->timer) != 0);
/* Release all fragment data. */
fqdir = q->fqdir;
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index a3676155be78..f65d2f727381 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -416,7 +416,7 @@ int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
skb_dst_update_pmtu_no_confirm(skb, mtu);
- if (!reply || skb->pkt_type == PACKET_HOST)
+ if (!reply)
return 0;
if (skb->protocol == htons(ETH_P_IP))
@@ -451,7 +451,7 @@ static const struct nla_policy
geneve_opt_policy[LWTUNNEL_IP_OPT_GENEVE_MAX + 1] = {
[LWTUNNEL_IP_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
[LWTUNNEL_IP_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
- [LWTUNNEL_IP_OPT_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 },
+ [LWTUNNEL_IP_OPT_GENEVE_DATA] = { .type = NLA_BINARY, .len = 127 },
};
static const struct nla_policy
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index b81c8131e23f..a8b04d4abcaa 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1289,7 +1289,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
}
}
if (list_empty(&mrt->mfc_unres_queue))
- del_timer(&mrt->ipmr_expire_timer);
+ timer_delete(&mrt->ipmr_expire_timer);
spin_unlock_bh(&mfc_unres_lock);
if (found) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ea8de00f669d..6edc441b3702 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1525,25 +1525,11 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
__tcp_cleanup_rbuf(sk, copied);
}
-/* private version of sock_rfree() avoiding one atomic_sub() */
-void tcp_sock_rfree(struct sk_buff *skb)
-{
- struct sock *sk = skb->sk;
- unsigned int len = skb->truesize;
-
- sock_owned_by_me(sk);
- atomic_set(&sk->sk_rmem_alloc,
- atomic_read(&sk->sk_rmem_alloc) - len);
-
- sk_forward_alloc_add(sk, len);
- sk_mem_reclaim(sk);
-}
-
static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
- if (likely(skb->destructor == tcp_sock_rfree)) {
- tcp_sock_rfree(skb);
+ if (likely(skb->destructor == sock_rfree)) {
+ sock_rfree(skb);
skb->destructor = NULL;
skb->sk = NULL;
return skb_attempt_defer_free(skb);
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index ca40665145c6..1a6b1bc54245 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -189,7 +189,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
tcp_segs_in(tp, skb);
__skb_pull(skb, tcp_hdrlen(skb));
sk_forced_mem_schedule(sk, skb->truesize);
- tcp_skb_set_owner_r(skb, sk);
+ skb_set_owner_r(skb, sk);
TCP_SKB_CB(skb)->seq++;
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e1f952fbac48..a35018e2d0ba 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5171,7 +5171,7 @@ end:
if (tcp_is_sack(tp))
tcp_grow_window(sk, skb, false);
skb_condense(skb);
- tcp_skb_set_owner_r(skb, sk);
+ skb_set_owner_r(skb, sk);
}
}
@@ -5187,7 +5187,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
if (!eaten) {
tcp_add_receive_queue(sk, skb);
- tcp_skb_set_owner_r(skb, sk);
+ skb_set_owner_r(skb, sk);
}
return eaten;
}
@@ -5504,7 +5504,7 @@ skip_this:
__skb_queue_before(list, skb, nskb);
else
__skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
- tcp_skb_set_owner_r(nskb, sk);
+ skb_set_owner_r(nskb, sk);
mptcp_skb_ext_move(nskb, skb);
/* Copy data, releasing collapsed skbs. */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d0bffcfa56d8..2742cc7602bb 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1625,12 +1625,12 @@ static bool udp_skb_has_head_state(struct sk_buff *skb)
}
/* fully reclaim rmem/fwd memory allocated for skb */
-static void udp_rmem_release(struct sock *sk, int size, int partial,
- bool rx_queue_lock_held)
+static void udp_rmem_release(struct sock *sk, unsigned int size,
+ int partial, bool rx_queue_lock_held)
{
struct udp_sock *up = udp_sk(sk);
struct sk_buff_head *sk_queue;
- int amt;
+ unsigned int amt;
if (likely(partial)) {
up->forward_deficit += size;
@@ -1650,10 +1650,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
if (!rx_queue_lock_held)
spin_lock(&sk_queue->lock);
-
- sk_forward_alloc_add(sk, size);
- amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
- sk_forward_alloc_add(sk, -amt);
+ amt = (size + sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
+ sk_forward_alloc_add(sk, size - amt);
if (amt)
__sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT);
@@ -1725,17 +1723,25 @@ static int udp_rmem_schedule(struct sock *sk, int size)
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
{
struct sk_buff_head *list = &sk->sk_receive_queue;
- int rmem, err = -ENOMEM;
+ unsigned int rmem, rcvbuf;
spinlock_t *busy = NULL;
- int size, rcvbuf;
+ int size, err = -ENOMEM;
- /* Immediately drop when the receive queue is full.
- * Always allow at least one packet.
- */
rmem = atomic_read(&sk->sk_rmem_alloc);
rcvbuf = READ_ONCE(sk->sk_rcvbuf);
- if (rmem > rcvbuf)
- goto drop;
+ size = skb->truesize;
+
+ /* Immediately drop when the receive queue is full.
+ * Cast to unsigned int performs the boundary check for INT_MAX.
+ */
+ if (rmem + size > rcvbuf) {
+ if (rcvbuf > INT_MAX >> 1)
+ goto drop;
+
+ /* Always allow at least one packet for small buffer. */
+ if (rmem > rcvbuf)
+ goto drop;
+ }
/* Under mem pressure, it might be helpful to help udp_recvmsg()
* having linear skbs :
@@ -1745,10 +1751,10 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
*/
if (rmem > (rcvbuf >> 1)) {
skb_condense(skb);
-
+ size = skb->truesize;
busy = busylock_acquire(sk);
}
- size = skb->truesize;
+
udp_set_dev_scratch(skb);
atomic_add(size, &sk->sk_rmem_alloc);
@@ -1835,7 +1841,7 @@ EXPORT_IPV6_MOD_GPL(skb_consume_udp);
static struct sk_buff *__first_packet_length(struct sock *sk,
struct sk_buff_head *rcvq,
- int *total)
+ unsigned int *total)
{
struct sk_buff *skb;
@@ -1868,8 +1874,8 @@ static int first_packet_length(struct sock *sk)
{
struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
+ unsigned int total = 0;
struct sk_buff *skb;
- int total = 0;
int res;
spin_lock_bh(&rcvq->lock);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ac8cc1076536..2cffb8f4a2bc 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -80,6 +80,7 @@
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/l3mdev.h>
+#include <net/netdev_lock.h>
#include <linux/if_tunnel.h>
#include <linux/rtnetlink.h>
#include <linux/netconf.h>
@@ -312,7 +313,7 @@ static inline bool addrconf_link_ready(const struct net_device *dev)
static void addrconf_del_rs_timer(struct inet6_dev *idev)
{
- if (del_timer(&idev->rs_timer))
+ if (timer_delete(&idev->rs_timer))
__in6_dev_put(idev);
}
@@ -377,6 +378,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
int err = -ENOMEM;
ASSERT_RTNL();
+ netdev_ops_assert_locked(dev);
if (dev->mtu < IPV6_MIN_MTU && dev != blackhole_netdev)
return ERR_PTR(-EINVAL);
@@ -402,7 +404,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
return ERR_PTR(err);
}
if (ndev->cnf.forwarding)
- dev_disable_lro(dev);
+ netif_disable_lro(dev);
/* We refer to the device */
netdev_hold(dev, &ndev->dev_tracker, GFP_KERNEL);
@@ -3152,10 +3154,12 @@ int addrconf_add_ifaddr(struct net *net, void __user *arg)
rtnl_net_lock(net);
dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
+ netdev_lock_ops(dev);
if (dev)
err = inet6_addr_add(net, dev, &cfg, 0, 0, NULL);
else
err = -ENODEV;
+ netdev_unlock_ops(dev);
rtnl_net_unlock(net);
return err;
}
@@ -5026,9 +5030,10 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!dev) {
NL_SET_ERR_MSG_MOD(extack, "Unable to find the interface");
err = -ENODEV;
- goto unlock;
+ goto unlock_rtnl;
}
+ netdev_lock_ops(dev);
idev = ipv6_find_idev(dev);
if (IS_ERR(idev)) {
err = PTR_ERR(idev);
@@ -5065,6 +5070,8 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
in6_ifa_put(ifa);
unlock:
+ netdev_unlock_ops(dev);
+unlock_rtnl:
rtnl_net_unlock(net);
return err;
@@ -5784,6 +5791,27 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
}
}
+static int inet6_fill_ifla6_stats_attrs(struct sk_buff *skb,
+ struct inet6_dev *idev)
+{
+ struct nlattr *nla;
+
+ nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
+ if (!nla)
+ goto nla_put_failure;
+ snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
+
+ nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
+ if (!nla)
+ goto nla_put_failure;
+ snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
u32 ext_filter_mask)
{
@@ -5806,18 +5834,10 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
/* XXX - MC not implemented */
- if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS)
- return 0;
-
- nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
- if (!nla)
- goto nla_put_failure;
- snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
-
- nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
- if (!nla)
- goto nla_put_failure;
- snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
+ if (!(ext_filter_mask & RTEXT_FILTER_SKIP_STATS)) {
+ if (inet6_fill_ifla6_stats_attrs(skb, idev) < 0)
+ goto nla_put_failure;
+ }
nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
if (!nla)
@@ -6503,7 +6523,9 @@ static int addrconf_sysctl_addr_gen_mode(const struct ctl_table *ctl, int write,
if (idev->cnf.addr_gen_mode != new_val) {
WRITE_ONCE(idev->cnf.addr_gen_mode, new_val);
+ netdev_lock_ops(idev->dev);
addrconf_init_auto_addrs(idev->dev);
+ netdev_unlock_ops(idev->dev);
}
} else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
struct net_device *dev;
@@ -6515,7 +6537,9 @@ static int addrconf_sysctl_addr_gen_mode(const struct ctl_table *ctl, int write,
idev->cnf.addr_gen_mode != new_val) {
WRITE_ONCE(idev->cnf.addr_gen_mode,
new_val);
+ netdev_lock_ops(idev->dev);
addrconf_init_auto_addrs(idev->dev);
+ netdev_unlock_ops(idev->dev);
}
}
}
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index dbcea9fee626..62618a058b8f 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -1072,8 +1072,13 @@ static int calipso_sock_getattr(struct sock *sk,
struct ipv6_opt_hdr *hop;
int opt_len, len, ret_val = -ENOMSG, offset;
unsigned char *opt;
- struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk));
+ struct ipv6_pinfo *pinfo = inet6_sk(sk);
+ struct ipv6_txoptions *txopts;
+
+ if (!pinfo)
+ return -EAFNOSUPPORT;
+ txopts = txopt_get(pinfo);
if (!txopts || !txopts->hopopt)
goto done;
@@ -1125,8 +1130,13 @@ static int calipso_sock_setattr(struct sock *sk,
{
int ret_val;
struct ipv6_opt_hdr *old, *new;
- struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk));
+ struct ipv6_pinfo *pinfo = inet6_sk(sk);
+ struct ipv6_txoptions *txopts;
+
+ if (!pinfo)
+ return -EAFNOSUPPORT;
+ txopts = txopt_get(pinfo);
old = NULL;
if (txopts)
old = txopts->hopopt;
@@ -1153,8 +1163,13 @@ static int calipso_sock_setattr(struct sock *sk,
static void calipso_sock_delattr(struct sock *sk)
{
struct ipv6_opt_hdr *new_hop;
- struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk));
+ struct ipv6_pinfo *pinfo = inet6_sk(sk);
+ struct ipv6_txoptions *txopts;
+
+ if (!pinfo)
+ return;
+ txopts = txopt_get(pinfo);
if (!txopts || !txopts->hopopt)
goto done;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index c134ba202c4c..bf727149fdec 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -2383,7 +2383,7 @@ void fib6_run_gc(unsigned long expires, struct net *net, bool force)
round_jiffies(now
+ net->ipv6.sysctl.ip6_rt_gc_interval));
else
- del_timer(&net->ipv6.ip6_fib_timer);
+ timer_delete(&net->ipv6.ip6_fib_timer);
spin_unlock_bh(&net->ipv6.fib6_gc_lock);
}
@@ -2470,7 +2470,7 @@ static void fib6_net_exit(struct net *net)
{
unsigned int i;
- del_timer_sync(&net->ipv6.ip6_fib_timer);
+ timer_delete_sync(&net->ipv6.ip6_fib_timer);
for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
struct hlist_head *head = &net->ipv6.fib_table_hash[i];
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index eca07e10e21f..a3ff575798dd 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -907,6 +907,6 @@ int ip6_flowlabel_init(void)
void ip6_flowlabel_cleanup(void)
{
static_key_deferred_flush(&ipv6_flowlabel_exclusive);
- del_timer(&ip6_fl_gc_timer);
+ timer_delete(&ip6_fl_gc_timer);
unregister_pernet_subsys(&ip6_flowlabel_net_ops);
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index e8ade93a0f0e..b413c9c8a21c 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1526,7 +1526,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
}
}
if (list_empty(&mrt->mfc_unres_queue))
- del_timer(&mrt->ipmr_expire_timer);
+ timer_delete(&mrt->ipmr_expire_timer);
spin_unlock_bh(&mfc_unres_lock);
if (found) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c3406a0d45bd..ab12b816ab94 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -412,12 +412,37 @@ static bool rt6_check_expired(const struct rt6_info *rt)
return false;
}
+static struct fib6_info *
+rt6_multipath_first_sibling_rcu(const struct fib6_info *rt)
+{
+ struct fib6_info *iter;
+ struct fib6_node *fn;
+
+ fn = rcu_dereference(rt->fib6_node);
+ if (!fn)
+ goto out;
+ iter = rcu_dereference(fn->leaf);
+ if (!iter)
+ goto out;
+
+ while (iter) {
+ if (iter->fib6_metric == rt->fib6_metric &&
+ rt6_qualify_for_ecmp(iter))
+ return iter;
+ iter = rcu_dereference(iter->fib6_next);
+ }
+
+out:
+ return NULL;
+}
+
void fib6_select_path(const struct net *net, struct fib6_result *res,
struct flowi6 *fl6, int oif, bool have_oif_match,
const struct sk_buff *skb, int strict)
{
- struct fib6_info *match = res->f6i;
+ struct fib6_info *first, *match = res->f6i;
struct fib6_info *sibling;
+ int hash;
if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
goto out;
@@ -440,16 +465,25 @@ void fib6_select_path(const struct net *net, struct fib6_result *res,
return;
}
- if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
+ first = rt6_multipath_first_sibling_rcu(match);
+ if (!first)
goto out;
- list_for_each_entry_rcu(sibling, &match->fib6_siblings,
+ hash = fl6->mp_hash;
+ if (hash <= atomic_read(&first->fib6_nh->fib_nh_upper_bound) &&
+ rt6_score_route(first->fib6_nh, first->fib6_flags, oif,
+ strict) >= 0) {
+ match = first;
+ goto out;
+ }
+
+ list_for_each_entry_rcu(sibling, &first->fib6_siblings,
fib6_siblings) {
const struct fib6_nh *nh = sibling->fib6_nh;
int nh_upper_bound;
nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
- if (fl6->mp_hash > nh_upper_bound)
+ if (hash > nh_upper_bound)
continue;
if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
break;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 7929df08d4e0..cc2b3c44bc05 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -28,6 +28,7 @@
#include <linux/poll.h>
#include <linux/security.h>
#include <net/sock.h>
+#include <asm/machine.h>
#include <asm/ebcdic.h>
#include <asm/cpcmd.h>
#include <linux/kmod.h>
@@ -2272,7 +2273,7 @@ static int __init afiucv_init(void)
{
int err;
- if (MACHINE_IS_VM && IS_ENABLED(CONFIG_IUCV)) {
+ if (machine_is_vm() && IS_ENABLED(CONFIG_IUCV)) {
cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
if (unlikely(err)) {
WARN_ON(err);
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index d3e9efab7f4b..83070a2e4485 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -39,6 +39,7 @@
#include <linux/reboot.h>
#include <net/iucv/iucv.h>
#include <linux/atomic.h>
+#include <asm/machine.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -1865,7 +1866,7 @@ static int __init iucv_init(void)
{
int rc;
- if (!MACHINE_IS_VM) {
+ if (!machine_is_vm()) {
rc = -EPROTONOSUPPORT;
goto out;
}
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index 0971ca48ba15..a0596e1f91da 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -194,8 +194,8 @@ int lapb_unregister(struct net_device *dev)
spin_unlock_bh(&lapb->lock);
/* Wait for running timers to stop */
- del_timer_sync(&lapb->t1timer);
- del_timer_sync(&lapb->t2timer);
+ timer_delete_sync(&lapb->t1timer);
+ timer_delete_sync(&lapb->t2timer);
__lapb_remove_cb(lapb);
diff --git a/net/lapb/lapb_timer.c b/net/lapb/lapb_timer.c
index 5be68869064d..5b3f3b444d19 100644
--- a/net/lapb/lapb_timer.c
+++ b/net/lapb/lapb_timer.c
@@ -35,7 +35,7 @@ static void lapb_t2timer_expiry(struct timer_list *);
void lapb_start_t1timer(struct lapb_cb *lapb)
{
- del_timer(&lapb->t1timer);
+ timer_delete(&lapb->t1timer);
lapb->t1timer.function = lapb_t1timer_expiry;
lapb->t1timer.expires = jiffies + lapb->t1;
@@ -46,7 +46,7 @@ void lapb_start_t1timer(struct lapb_cb *lapb)
void lapb_start_t2timer(struct lapb_cb *lapb)
{
- del_timer(&lapb->t2timer);
+ timer_delete(&lapb->t2timer);
lapb->t2timer.function = lapb_t2timer_expiry;
lapb->t2timer.expires = jiffies + lapb->t2;
@@ -58,13 +58,13 @@ void lapb_start_t2timer(struct lapb_cb *lapb)
void lapb_stop_t1timer(struct lapb_cb *lapb)
{
lapb->t1timer_running = false;
- del_timer(&lapb->t1timer);
+ timer_delete(&lapb->t1timer);
}
void lapb_stop_t2timer(struct lapb_cb *lapb)
{
lapb->t2timer_running = false;
- del_timer(&lapb->t2timer);
+ timer_delete(&lapb->t2timer);
}
int lapb_t1timer_running(struct lapb_cb *lapb)
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index 40ca3c1e42a2..7e8fc710c590 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -51,7 +51,7 @@ int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb)
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
llc->remote_busy_flag = 0;
- del_timer(&llc->busy_state_timer.timer);
+ timer_delete(&llc->busy_state_timer.timer);
nr = LLC_I_GET_NR(pdu);
llc_conn_resend_i_pdu_as_cmd(sk, nr, 0);
}
@@ -191,7 +191,7 @@ int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk,
struct llc_sock *llc = llc_sk(sk);
if (llc->data_flag == 2)
- del_timer(&llc->rej_sent_timer.timer);
+ timer_delete(&llc->rej_sent_timer.timer);
return 0;
}
@@ -1111,9 +1111,9 @@ int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb)
{
struct llc_sock *llc = llc_sk(sk);
- del_timer(&llc->rej_sent_timer.timer);
- del_timer(&llc->pf_cycle_timer.timer);
- del_timer(&llc->busy_state_timer.timer);
+ timer_delete(&llc->rej_sent_timer.timer);
+ timer_delete(&llc->pf_cycle_timer.timer);
+ timer_delete(&llc->busy_state_timer.timer);
llc->ack_must_be_send = 0;
llc->ack_pf = 0;
return 0;
@@ -1149,7 +1149,7 @@ int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk,
int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb)
{
- del_timer(&llc_sk(sk)->ack_timer.timer);
+ timer_delete(&llc_sk(sk)->ack_timer.timer);
return 0;
}
@@ -1157,14 +1157,14 @@ int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb)
{
struct llc_sock *llc = llc_sk(sk);
- del_timer(&llc->pf_cycle_timer.timer);
+ timer_delete(&llc->pf_cycle_timer.timer);
llc_conn_set_p_flag(sk, 0);
return 0;
}
int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb)
{
- del_timer(&llc_sk(sk)->rej_sent_timer.timer);
+ timer_delete(&llc_sk(sk)->rej_sent_timer.timer);
return 0;
}
@@ -1180,7 +1180,7 @@ int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb)
/* On loopback we don't queue I frames in unack_pdu_q queue. */
if (acked > 0 || (llc->dev->flags & IFF_LOOPBACK)) {
llc->retry_count = 0;
- del_timer(&llc->ack_timer.timer);
+ timer_delete(&llc->ack_timer.timer);
if (llc->failed_data_req) {
/* already, we did not accept data from upper layer
* (tx_window full or unacceptable state). Now, we
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index afc6974eafda..5c0ac243b248 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -949,15 +949,15 @@ void llc_sk_stop_all_timers(struct sock *sk, bool sync)
struct llc_sock *llc = llc_sk(sk);
if (sync) {
- del_timer_sync(&llc->pf_cycle_timer.timer);
- del_timer_sync(&llc->ack_timer.timer);
- del_timer_sync(&llc->rej_sent_timer.timer);
- del_timer_sync(&llc->busy_state_timer.timer);
+ timer_delete_sync(&llc->pf_cycle_timer.timer);
+ timer_delete_sync(&llc->ack_timer.timer);
+ timer_delete_sync(&llc->rej_sent_timer.timer);
+ timer_delete_sync(&llc->busy_state_timer.timer);
} else {
- del_timer(&llc->pf_cycle_timer.timer);
- del_timer(&llc->ack_timer.timer);
- del_timer(&llc->rej_sent_timer.timer);
- del_timer(&llc->busy_state_timer.timer);
+ timer_delete(&llc->pf_cycle_timer.timer);
+ timer_delete(&llc->ack_timer.timer);
+ timer_delete(&llc->rej_sent_timer.timer);
+ timer_delete(&llc->busy_state_timer.timer);
}
llc->ack_must_be_send = 0;
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index aeb99d102c6e..85612234742a 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -103,13 +103,13 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
if (!tid_rx)
return;
- del_timer_sync(&tid_rx->session_timer);
+ timer_delete_sync(&tid_rx->session_timer);
/* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */
spin_lock_bh(&tid_rx->reorder_lock);
tid_rx->removed = true;
spin_unlock_bh(&tid_rx->reorder_lock);
- del_timer_sync(&tid_rx->reorder_timer);
+ timer_delete_sync(&tid_rx->reorder_timer);
call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
}
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 63a5e48291ac..8dc8c3c96b96 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -362,8 +362,8 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
sta->sta.addr, tid);
- del_timer_sync(&tid_tx->addba_resp_timer);
- del_timer_sync(&tid_tx->session_timer);
+ timer_delete_sync(&tid_tx->addba_resp_timer);
+ timer_delete_sync(&tid_tx->session_timer);
/*
* After this packets are no longer handed right through
@@ -1002,7 +1002,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
return;
}
- del_timer_sync(&tid_tx->addba_resp_timer);
+ timer_delete_sync(&tid_tx->addba_resp_timer);
ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n",
sta->sta.addr, tid);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 05a945df3259..4246d168374f 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -1844,7 +1844,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
skb_queue_purge(&sdata->skb_queue);
- del_timer_sync(&sdata->u.ibss.timer);
+ timer_delete_sync(&sdata->u.ibss.timer);
return 0;
}
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index b0423046028c..f0f4a250b10e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -526,7 +526,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
netif_addr_unlock_bh(sdata->dev);
}
- del_timer_sync(&local->dynamic_ps_timer);
+ timer_delete_sync(&local->dynamic_ps_timer);
wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work);
WARN(ieee80211_vif_is_mld(&sdata->vif),
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index 2dc732147e85..885fa6aa3fc1 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -342,7 +342,7 @@ static void ieee80211_stop_tpt_led_trig(struct ieee80211_local *local)
return;
tpt_trig->running = false;
- del_timer_sync(&tpt_trig->timer);
+ timer_delete_sync(&tpt_trig->timer);
led_trigger_event(&local->tpt_led, LED_OFF);
}
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 974081324aa4..7257f5610af5 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -706,7 +706,7 @@ void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
else {
clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
/* stop running timer */
- del_timer_sync(&ifmsh->mesh_path_root_timer);
+ timer_delete_sync(&ifmsh->mesh_path_root_timer);
}
}
@@ -1241,9 +1241,9 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf);
skb_queue_purge(&ifmsh->ps.bc_buf);
- del_timer_sync(&sdata->u.mesh.housekeeping_timer);
- del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
- del_timer_sync(&sdata->u.mesh.mesh_path_timer);
+ timer_delete_sync(&sdata->u.mesh.housekeeping_timer);
+ timer_delete_sync(&sdata->u.mesh.mesh_path_root_timer);
+ timer_delete_sync(&sdata->u.mesh.mesh_path_timer);
/* clear any mesh work (for next join) we may have accrued */
ifmsh->wrkq_flags = 0;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 5a0156e11c91..96e0a861886a 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -417,7 +417,7 @@ u64 mesh_plink_deactivate(struct sta_info *sta)
}
spin_unlock_bh(&sta->mesh->plink_lock);
if (!sdata->u.mesh.user_mpm)
- del_timer_sync(&sta->mesh->plink_timer);
+ timer_delete_sync(&sta->mesh->plink_timer);
mesh_path_flush_by_nexthop(sta);
/* make sure no readers can access nexthop sta from here on */
@@ -666,7 +666,7 @@ void mesh_plink_timer(struct timer_list *t)
/*
* This STA is valid because sta_info_destroy() will
- * del_timer_sync() this timer after having made sure
+ * timer_delete_sync() this timer after having made sure
* it cannot be re-added (by deleting the plink.)
*/
sta = mesh->plink_sta;
@@ -689,7 +689,7 @@ void mesh_plink_timer(struct timer_list *t)
return;
}
- /* del_timer() and handler may race when entering these states */
+ /* timer_delete() and handler may race when entering these states */
if (sta->mesh->plink_state == NL80211_PLINK_LISTEN ||
sta->mesh->plink_state == NL80211_PLINK_ESTAB) {
mpl_dbg(sta->sdata,
@@ -735,7 +735,7 @@ void mesh_plink_timer(struct timer_list *t)
break;
case NL80211_PLINK_HOLDING:
/* holding timer */
- del_timer(&sta->mesh->plink_timer);
+ timer_delete(&sta->mesh->plink_timer);
mesh_plink_fsm_restart(sta);
break;
default:
@@ -848,7 +848,7 @@ static u64 mesh_plink_establish(struct ieee80211_sub_if_data *sdata,
struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
u64 changed = 0;
- del_timer(&sta->mesh->plink_timer);
+ timer_delete(&sta->mesh->plink_timer);
sta->mesh->plink_state = NL80211_PLINK_ESTAB;
changed |= mesh_plink_inc_estab_count(sdata);
changed |= mesh_set_ht_prot_mode(sdata);
@@ -975,7 +975,7 @@ static u64 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
case NL80211_PLINK_HOLDING:
switch (event) {
case CLS_ACPT:
- del_timer(&sta->mesh->plink_timer);
+ timer_delete(&sta->mesh->plink_timer);
mesh_plink_fsm_restart(sta);
break;
case OPN_ACPT:
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index c010bb3d24e3..5d1f2d6d09ad 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3194,7 +3194,7 @@ static void ieee80211_change_ps(struct ieee80211_local *local)
} else if (conf->flags & IEEE80211_CONF_PS) {
conf->flags &= ~IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
- del_timer_sync(&local->dynamic_ps_timer);
+ timer_delete_sync(&local->dynamic_ps_timer);
wiphy_work_cancel(local->hw.wiphy,
&local->dynamic_ps_enable_work);
}
@@ -4069,7 +4069,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->deflink.ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
- del_timer_sync(&local->dynamic_ps_timer);
+ timer_delete_sync(&local->dynamic_ps_timer);
wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work);
/* Disable ARP filtering */
@@ -4097,9 +4097,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
/* disassociated - set to defaults now */
ieee80211_set_wmm_default(&sdata->deflink, false, false);
- del_timer_sync(&sdata->u.mgd.conn_mon_timer);
- del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
- del_timer_sync(&sdata->u.mgd.timer);
+ timer_delete_sync(&sdata->u.mgd.conn_mon_timer);
+ timer_delete_sync(&sdata->u.mgd.bcn_mon_timer);
+ timer_delete_sync(&sdata->u.mgd.timer);
sdata->vif.bss_conf.dtim_period = 0;
sdata->vif.bss_conf.beacon_rate = NULL;
@@ -4589,7 +4589,7 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
* running is the timeout for the authentication response which
* which is not relevant anymore.
*/
- del_timer_sync(&sdata->u.mgd.timer);
+ timer_delete_sync(&sdata->u.mgd.timer);
sta_info_destroy_addr(sdata, auth_data->ap_addr);
/* other links are destroyed */
@@ -4628,7 +4628,7 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
* running is the timeout for the association response which
* which is not relevant anymore.
*/
- del_timer_sync(&sdata->u.mgd.timer);
+ timer_delete_sync(&sdata->u.mgd.timer);
sta_info_destroy_addr(sdata, assoc_data->ap_addr);
eth_zero_addr(sdata->deflink.u.mgd.bssid);
@@ -9852,7 +9852,7 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
ifmgd->assoc_req_ies = NULL;
ifmgd->assoc_req_ies_len = 0;
spin_unlock_bh(&ifmgd->teardown_lock);
- del_timer_sync(&ifmgd->timer);
+ timer_delete_sync(&ifmgd->timer);
}
void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c
index 6218abc3e441..ece1e83c7b2f 100644
--- a/net/mac80211/ocb.c
+++ b/net/mac80211/ocb.c
@@ -230,7 +230,7 @@ int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata)
skb_queue_purge(&sdata->skb_queue);
- del_timer_sync(&sdata->u.ocb.housekeeping_timer);
+ timer_delete_sync(&sdata->u.ocb.housekeeping_timer);
/* If the timer fired while we waited for it, it will have
* requeued the work. Now the work will be running again
* but will not rearm the timer again because it checks
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 29fab7ae47b4..2b9abc27462e 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -30,9 +30,9 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
/* FIXME: what to do when local->pspolling is true? */
- del_timer_sync(&local->dynamic_ps_timer);
- del_timer_sync(&ifmgd->bcn_mon_timer);
- del_timer_sync(&ifmgd->conn_mon_timer);
+ timer_delete_sync(&local->dynamic_ps_timer);
+ timer_delete_sync(&ifmgd->bcn_mon_timer);
+ timer_delete_sync(&ifmgd->conn_mon_timer);
wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 7be52345f218..a9cc832240a5 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -69,14 +69,14 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
flush_workqueue(local->workqueue);
/* Don't try to run timers while suspended. */
- del_timer_sync(&local->sta_cleanup);
+ timer_delete_sync(&local->sta_cleanup);
/*
* Note that this particular timer doesn't need to be
* restarted at resume.
*/
wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work);
- del_timer_sync(&local->dynamic_ps_timer);
+ timer_delete_sync(&local->dynamic_ps_timer);
local->wowlan = wowlan;
if (local->wowlan) {
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index f7f89cd1b7d7..09beb65d6108 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1242,7 +1242,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
tid_agg_rx->reorder_time[j] + 1 +
HT_RX_REORDER_BUF_TIMEOUT);
} else {
- del_timer(&tid_agg_rx->reorder_timer);
+ timer_delete(&tid_agg_rx->reorder_timer);
}
}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 30cdc783999d..248e1f63bf73 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1592,7 +1592,7 @@ int sta_info_init(struct ieee80211_local *local)
void sta_info_stop(struct ieee80211_local *local)
{
- del_timer_sync(&local->sta_cleanup);
+ timer_delete_sync(&local->sta_cleanup);
rhltable_destroy(&local->sta_hash);
rhltable_destroy(&local->link_sta_hash);
}
diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
index f6de136008f6..dd895617defd 100644
--- a/net/mctp/af_mctp.c
+++ b/net/mctp/af_mctp.c
@@ -663,7 +663,7 @@ static void mctp_sk_unhash(struct sock *sk)
* keys), stop any pending expiry events. the timer cannot be re-queued
* as the sk is no longer observable
*/
- del_timer_sync(&msk->key_expiry);
+ timer_delete_sync(&msk->key_expiry);
}
static void mctp_sk_destruct(struct sock *sk)
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index b9f492ddf93b..83c629529b57 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -33,7 +33,7 @@ struct mpls_dev {
#define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field) \
do { \
- __typeof__(*(mdev)->stats) *ptr = \
+ TYPEOF_UNQUAL(*(mdev)->stats) *ptr = \
raw_cpu_ptr((mdev)->stats); \
local_bh_disable(); \
u64_stats_update_begin(&ptr->syncp); \
@@ -45,7 +45,7 @@ struct mpls_dev {
#define MPLS_INC_STATS(mdev, field) \
do { \
- __typeof__(*(mdev)->stats) *ptr = \
+ TYPEOF_UNQUAL(*(mdev)->stats) *ptr = \
raw_cpu_ptr((mdev)->stats); \
local_bh_disable(); \
u64_stats_update_begin(&ptr->syncp); \
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 18b19dbccbba..31747f974941 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -327,7 +327,7 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk,
list_del(&entry->list);
spin_unlock_bh(&msk->pm.lock);
- /* no lock, because sk_stop_timer_sync() is calling del_timer_sync() */
+ /* no lock, because sk_stop_timer_sync() is calling timer_delete_sync() */
if (add_timer)
sk_stop_timer_sync(sk, add_timer);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index efe8d86496db..409bd415ef1d 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -754,8 +754,6 @@ static bool subflow_hmac_valid(const struct request_sock *req,
subflow_req = mptcp_subflow_rsk(req);
msk = subflow_req->msk;
- if (!msk)
- return false;
subflow_generate_hmac(READ_ONCE(msk->remote_key),
READ_ONCE(msk->local_key),
@@ -850,12 +848,8 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
} else if (subflow_req->mp_join) {
mptcp_get_options(skb, &mp_opt);
- if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) ||
- !subflow_hmac_valid(req, &mp_opt) ||
- !mptcp_can_accept_new_subflow(subflow_req->msk)) {
- SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
+ if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK))
fallback = true;
- }
}
create_child:
@@ -905,6 +899,13 @@ create_child:
goto dispose_child;
}
+ if (!subflow_hmac_valid(req, &mp_opt) ||
+ !mptcp_can_accept_new_subflow(subflow_req->msk)) {
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
+ subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
+ goto dispose_child;
+ }
+
/* move the msk reference ownership to the subflow */
subflow_req->msk = NULL;
ctx->conn = (struct sock *)owner;
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 7891a537bddd..b36947063783 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -189,7 +189,7 @@ void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
nc->monitor.enabled = false;
spin_unlock_irqrestore(&nc->lock, flags);
- del_timer_sync(&nc->monitor.timer);
+ timer_delete_sync(&nc->monitor.timer);
}
struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
@@ -396,7 +396,7 @@ void ncsi_free_request(struct ncsi_request *nr)
if (nr->enabled) {
nr->enabled = false;
- del_timer_sync(&nr->timer);
+ timer_delete_sync(&nr->timer);
}
spin_lock_irqsave(&ndp->lock, flags);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index df2dc21304ef..047ba81865ed 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -212,7 +212,7 @@ config NF_CT_PROTO_SCTP
bool 'SCTP protocol connection tracking support'
depends on NETFILTER_ADVANCED
default y
- select LIBCRC32C
+ select CRC32
help
With this option enabled, the layer 3 independent connection
tracking code will be able to do state tracking on SCTP connections.
@@ -475,7 +475,7 @@ endif # NF_CONNTRACK
config NF_TABLES
select NETFILTER_NETLINK
- select LIBCRC32C
+ select CRC32
tristate "Netfilter nf_tables support"
help
nftables is the new packet classification framework that intends to
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index cb48a2b9cb9f..6ae042f702d2 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -294,7 +294,7 @@ mtype_cancel_gc(struct ip_set *set)
struct mtype *map = set->data;
if (SET_WITH_TIMEOUT(set))
- del_timer_sync(&map->gc);
+ timer_delete_sync(&map->gc);
}
static const struct ip_set_type_variant mtype = {
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 2a3017b9c001..8c5b1fe12d07 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -105,7 +105,7 @@ config IP_VS_PROTO_AH
config IP_VS_PROTO_SCTP
bool "SCTP load balancing support"
- select LIBCRC32C
+ select CRC32
help
This option enables support for load balancing SCTP transport
protocol. Say Y if unsure.
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 20a1727e2457..8699944c0baf 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -822,7 +822,7 @@ static void ip_vs_conn_rcu_free(struct rcu_head *head)
/* Try to delete connection while not holding reference */
static void ip_vs_conn_del(struct ip_vs_conn *cp)
{
- if (del_timer(&cp->timer)) {
+ if (timer_delete(&cp->timer)) {
/* Drop cp->control chain too */
if (cp->control)
cp->timeout = 0;
@@ -833,7 +833,7 @@ static void ip_vs_conn_del(struct ip_vs_conn *cp)
/* Try to delete connection while holding reference */
static void ip_vs_conn_del_put(struct ip_vs_conn *cp)
{
- if (del_timer(&cp->timer)) {
+ if (timer_delete(&cp->timer)) {
/* Drop cp->control chain too */
if (cp->control)
cp->timeout = 0;
@@ -860,7 +860,7 @@ static void ip_vs_conn_expire(struct timer_list *t)
struct ip_vs_conn *ct = cp->control;
/* delete the timer if it is activated by other users */
- del_timer(&cp->timer);
+ timer_delete(&cp->timer);
/* does anybody control me? */
if (ct) {
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 0633276d96bf..7d5b7418f8c7 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -848,7 +848,7 @@ static void ip_vs_trash_cleanup(struct netns_ipvs *ipvs)
{
struct ip_vs_dest *dest, *nxt;
- del_timer_sync(&ipvs->dest_trash_timer);
+ timer_delete_sync(&ipvs->dest_trash_timer);
/* No need to use dest_trash_lock */
list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, t_list) {
list_del(&dest->t_list);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 21fa550966f0..21d22fa22e4e 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -118,7 +118,7 @@ nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple,
bool nf_ct_remove_expect(struct nf_conntrack_expect *exp)
{
- if (del_timer(&exp->timeout)) {
+ if (timer_delete(&exp->timeout)) {
nf_ct_unlink_expect(exp);
nf_ct_expect_put(exp);
return true;
@@ -214,11 +214,11 @@ nf_ct_find_expectation(struct net *net,
if (exp->flags & NF_CT_EXPECT_PERMANENT || !unlink) {
refcount_inc(&exp->use);
return exp;
- } else if (del_timer(&exp->timeout)) {
+ } else if (timer_delete(&exp->timeout)) {
nf_ct_unlink_expect(exp);
return exp;
}
- /* Undo exp->master refcnt increase, if del_timer() failed */
+ /* Undo exp->master refcnt increase, if timer_delete() failed */
nf_ct_put(exp->master);
return NULL;
@@ -520,7 +520,7 @@ void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, vo
hlist_for_each_entry_safe(exp, next,
&nf_ct_expect_hash[i],
hnode) {
- if (iter(exp, data) && del_timer(&exp->timeout)) {
+ if (iter(exp, data) && timer_delete(&exp->timeout)) {
nf_ct_unlink_expect(exp);
nf_ct_expect_put(exp);
}
@@ -550,7 +550,7 @@ void nf_ct_expect_iterate_net(struct net *net,
if (!net_eq(nf_ct_exp_net(exp), net))
continue;
- if (iter(exp, data) && del_timer(&exp->timeout)) {
+ if (iter(exp, data) && timer_delete(&exp->timeout)) {
nf_ct_unlink_expect_report(exp, portid, report);
nf_ct_expect_put(exp);
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index db23876a6016..2cc0fde23344 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -3448,7 +3448,7 @@ static int ctnetlink_del_expect(struct sk_buff *skb,
/* after list removal, usage count == 1 */
spin_lock_bh(&nf_conntrack_expect_lock);
- if (del_timer(&exp->timeout)) {
+ if (timer_delete(&exp->timeout)) {
nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
nlmsg_report(info->nlh));
nf_ct_expect_put(exp);
@@ -3477,7 +3477,7 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
const struct nlattr * const cda[])
{
if (cda[CTA_EXPECT_TIMEOUT]) {
- if (!del_timer(&x->timeout))
+ if (!timer_delete(&x->timeout))
return -ETIME;
x->timeout.expires = jiffies +
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index c2df81b7e950..a133e1c175ce 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2839,11 +2839,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
err = nft_netdev_register_hooks(ctx->net, &hook.list);
if (err < 0)
goto err_hooks;
+
+ unregister = true;
}
}
- unregister = true;
-
if (nla[NFTA_CHAIN_COUNTERS]) {
if (!nft_is_base_chain(chain)) {
err = -EOPNOTSUPP;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 134e05d31061..882962f3c84d 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -381,7 +381,7 @@ static void
__nfulnl_flush(struct nfulnl_instance *inst)
{
/* timer holds a reference */
- if (del_timer(&inst->timer))
+ if (timer_delete(&inst->timer))
instance_put(inst);
if (inst->skb)
__nfulnl_send(inst);
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 8bfac4185ac7..abb0c8ec6371 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -309,7 +309,8 @@ static bool nft_rhash_expr_needs_gc_run(const struct nft_set *set,
nft_setelem_expr_foreach(expr, elem_expr, size) {
if (expr->ops->gc &&
- expr->ops->gc(read_pnet(&set->net), expr))
+ expr->ops->gc(read_pnet(&set->net), expr) &&
+ set->flags & NFT_SET_EVAL)
return true;
}
diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
index 681301b46aa4..0c63d1367cf7 100644
--- a/net/netfilter/nft_tunnel.c
+++ b/net/netfilter/nft_tunnel.c
@@ -335,13 +335,13 @@ static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = {
[NFTA_TUNNEL_KEY_GENEVE_CLASS] = { .type = NLA_U16 },
[NFTA_TUNNEL_KEY_GENEVE_TYPE] = { .type = NLA_U8 },
- [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 },
+ [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 127 },
};
static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
struct nft_tunnel_opts *opts)
{
- struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len;
+ struct geneve_opt *opt = (struct geneve_opt *)(opts->u.data + opts->len);
struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1];
int err, data_len;
@@ -625,7 +625,7 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb,
if (!inner)
goto failure;
while (opts->len > offset) {
- opt = (struct geneve_opt *)opts->u.data + offset;
+ opt = (struct geneve_opt *)(opts->u.data + offset);
if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS,
opt->opt_class) ||
nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE,
diff --git a/net/netrom/nr_loopback.c b/net/netrom/nr_loopback.c
index 511819fbfa67..7a9d765b30c0 100644
--- a/net/netrom/nr_loopback.c
+++ b/net/netrom/nr_loopback.c
@@ -68,6 +68,6 @@ static void nr_loopback_timer(struct timer_list *unused)
void nr_loopback_clear(void)
{
- del_timer_sync(&loopback_timer);
+ timer_delete_sync(&loopback_timer);
skb_queue_purge(&loopback_queue);
}
diff --git a/net/nfc/core.c b/net/nfc/core.c
index e58dc6405054..75ed8a9146ba 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -464,7 +464,7 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx, u8 mode)
}
if (dev->ops->check_presence)
- del_timer_sync(&dev->check_pres_timer);
+ timer_delete_sync(&dev->check_pres_timer);
dev->ops->deactivate_target(dev, dev->active_target, mode);
dev->active_target = NULL;
@@ -509,7 +509,7 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
}
if (dev->ops->check_presence)
- del_timer_sync(&dev->check_pres_timer);
+ timer_delete_sync(&dev->check_pres_timer);
rc = dev->ops->im_transceive(dev, dev->active_target, skb, cb,
cb_context);
@@ -1172,7 +1172,7 @@ void nfc_unregister_device(struct nfc_dev *dev)
device_unlock(&dev->dev);
if (dev->ops->check_presence) {
- del_timer_sync(&dev->check_pres_timer);
+ timer_delete_sync(&dev->check_pres_timer);
cancel_work_sync(&dev->check_pres_work);
}
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index ceb87db57cdb..aa493344d93e 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -148,7 +148,7 @@ static void nfc_hci_msg_rx_work(struct work_struct *work)
static void __nfc_hci_cmd_completion(struct nfc_hci_dev *hdev, int err,
struct sk_buff *skb)
{
- del_timer_sync(&hdev->cmd_timer);
+ timer_delete_sync(&hdev->cmd_timer);
if (hdev->cmd_pending_msg->cb)
hdev->cmd_pending_msg->cb(hdev->cmd_pending_msg->cb_context,
@@ -1046,7 +1046,7 @@ void nfc_hci_unregister_device(struct nfc_hci_dev *hdev)
mutex_unlock(&hdev->msg_tx_mutex);
- del_timer_sync(&hdev->cmd_timer);
+ timer_delete_sync(&hdev->cmd_timer);
cancel_work_sync(&hdev->msg_tx_work);
cancel_work_sync(&hdev->msg_rx_work);
diff --git a/net/nfc/hci/llc_shdlc.c b/net/nfc/hci/llc_shdlc.c
index e90f70385813..ce9c683a3ead 100644
--- a/net/nfc/hci/llc_shdlc.c
+++ b/net/nfc/hci/llc_shdlc.c
@@ -198,7 +198,7 @@ static void llc_shdlc_reset_t2(struct llc_shdlc *shdlc, int y_nr)
if (skb_queue_empty(&shdlc->ack_pending_q)) {
if (shdlc->t2_active) {
- del_timer_sync(&shdlc->t2_timer);
+ timer_delete_sync(&shdlc->t2_timer);
shdlc->t2_active = false;
pr_debug("All sent frames acked. Stopped T2(retransmit)\n");
@@ -289,7 +289,7 @@ static void llc_shdlc_rcv_rej(struct llc_shdlc *shdlc, int y_nr)
if (llc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) {
if (shdlc->t2_active) {
- del_timer_sync(&shdlc->t2_timer);
+ timer_delete_sync(&shdlc->t2_timer);
shdlc->t2_active = false;
pr_debug("Stopped T2(retransmit)\n");
}
@@ -342,7 +342,7 @@ static void llc_shdlc_connect_complete(struct llc_shdlc *shdlc, int r)
{
pr_debug("result=%d\n", r);
- del_timer_sync(&shdlc->connect_timer);
+ timer_delete_sync(&shdlc->connect_timer);
if (r == 0) {
shdlc->ns = 0;
@@ -526,7 +526,7 @@ static void llc_shdlc_handle_send_queue(struct llc_shdlc *shdlc)
(shdlc->rnr == false)) {
if (shdlc->t1_active) {
- del_timer_sync(&shdlc->t1_timer);
+ timer_delete_sync(&shdlc->t1_timer);
shdlc->t1_active = false;
pr_debug("Stopped T1(send ack)\n");
}
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index 18be13fb9b75..27e863f96ed1 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -160,14 +160,14 @@ static struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
static void local_cleanup(struct nfc_llcp_local *local)
{
nfc_llcp_socket_release(local, false, ENXIO);
- del_timer_sync(&local->link_timer);
+ timer_delete_sync(&local->link_timer);
skb_queue_purge(&local->tx_queue);
cancel_work_sync(&local->tx_work);
cancel_work_sync(&local->rx_work);
cancel_work_sync(&local->timeout_work);
kfree_skb(local->rx_pending);
local->rx_pending = NULL;
- del_timer_sync(&local->sdreq_timer);
+ timer_delete_sync(&local->sdreq_timer);
cancel_work_sync(&local->sdreq_timeout_work);
nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs);
}
@@ -1536,7 +1536,7 @@ static void nfc_llcp_rx_work(struct work_struct *work)
static void __nfc_llcp_recv(struct nfc_llcp_local *local, struct sk_buff *skb)
{
local->rx_pending = skb;
- del_timer(&local->link_timer);
+ timer_delete(&local->link_timer);
schedule_work(&local->rx_work);
}
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 1ec5955fe469..0171bf3c7016 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -565,8 +565,8 @@ static int nci_close_device(struct nci_dev *ndev)
* there is a queued/running cmd_work
*/
flush_workqueue(ndev->cmd_wq);
- del_timer_sync(&ndev->cmd_timer);
- del_timer_sync(&ndev->data_timer);
+ timer_delete_sync(&ndev->cmd_timer);
+ timer_delete_sync(&ndev->data_timer);
mutex_unlock(&ndev->req_lock);
return 0;
}
@@ -597,7 +597,7 @@ static int nci_close_device(struct nci_dev *ndev)
/* Flush cmd wq */
flush_workqueue(ndev->cmd_wq);
- del_timer_sync(&ndev->cmd_timer);
+ timer_delete_sync(&ndev->cmd_timer);
/* Clear flags except NCI_UNREG */
ndev->flags &= BIT(NCI_UNREG);
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index 3d36ea5701f0..78f4131af3cf 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -42,7 +42,7 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
pr_debug("len %d, err %d\n", skb ? skb->len : 0, err);
/* data exchange is complete, stop the data timer */
- del_timer_sync(&ndev->data_timer);
+ timer_delete_sync(&ndev->data_timer);
clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
if (cb) {
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
index b911ab78bed9..9eeb862825c5 100644
--- a/net/nfc/nci/rsp.c
+++ b/net/nfc/nci/rsp.c
@@ -347,7 +347,7 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
__u16 rsp_opcode = nci_opcode(skb->data);
/* we got a rsp, stop the cmd timer */
- del_timer(&ndev->cmd_timer);
+ timer_delete(&ndev->cmd_timer);
pr_debug("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
nci_pbf(skb->data),
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 2535f3f9f462..5481bd561eb4 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -11,7 +11,7 @@ config OPENVSWITCH
(!NF_NAT || NF_NAT) && \
(!NETFILTER_CONNCOUNT || NETFILTER_CONNCOUNT)))
depends on PSAMPLE || !PSAMPLE
- select LIBCRC32C
+ select CRC32
select MPLS
select NET_MPLS_GSO
select DST_CACHE
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 704c858cf209..61fea7baae5d 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -947,12 +947,6 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
pskb_trim(skb, ovs_mac_header_len(key));
}
- /* Need to set the pkt_type to involve the routing layer. The
- * packet movement through the OVS datapath doesn't generally
- * use routing, but this is needed for tunnel cases.
- */
- skb->pkt_type = PACKET_OUTGOING;
-
if (likely(!mru ||
(skb->len <= mru + vport->dev->hard_header_len))) {
ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 3e9ddf72cd03..d4dba06297c3 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -581,7 +581,7 @@ static __be16 vlan_get_protocol_dgram(const struct sk_buff *skb)
static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
{
- del_timer_sync(&pkc->retire_blk_timer);
+ timer_delete_sync(&pkc->retire_blk_timer);
}
static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index 0f77ae8ef944..9f9629e6fdae 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -32,7 +32,7 @@ static void rose_transmit_restart_request(struct rose_neigh *neigh);
void rose_start_ftimer(struct rose_neigh *neigh)
{
- del_timer(&neigh->ftimer);
+ timer_delete(&neigh->ftimer);
neigh->ftimer.function = rose_ftimer_expiry;
neigh->ftimer.expires =
@@ -43,7 +43,7 @@ void rose_start_ftimer(struct rose_neigh *neigh)
static void rose_start_t0timer(struct rose_neigh *neigh)
{
- del_timer(&neigh->t0timer);
+ timer_delete(&neigh->t0timer);
neigh->t0timer.function = rose_t0timer_expiry;
neigh->t0timer.expires =
@@ -54,12 +54,12 @@ static void rose_start_t0timer(struct rose_neigh *neigh)
void rose_stop_ftimer(struct rose_neigh *neigh)
{
- del_timer(&neigh->ftimer);
+ timer_delete(&neigh->ftimer);
}
void rose_stop_t0timer(struct rose_neigh *neigh)
{
- del_timer(&neigh->t0timer);
+ timer_delete(&neigh->t0timer);
}
int rose_ftimer_running(struct rose_neigh *neigh)
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index 036d92c0ad79..b538e39b3df5 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -124,7 +124,7 @@ void __exit rose_loopback_clear(void)
{
struct sk_buff *skb;
- del_timer(&loopback_timer);
+ timer_delete(&loopback_timer);
while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
skb->sk = NULL;
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index fee772b4637c..2dd6bd3a3011 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -227,8 +227,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
{
struct rose_neigh *s;
- del_timer_sync(&rose_neigh->ftimer);
- del_timer_sync(&rose_neigh->t0timer);
+ timer_delete_sync(&rose_neigh->ftimer);
+ timer_delete_sync(&rose_neigh->t0timer);
skb_queue_purge(&rose_neigh->queue);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 8e477f7f8850..fec59d9338b9 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -469,7 +469,7 @@ bool rxrpc_input_call_event(struct rxrpc_call *call)
out:
if (__rxrpc_call_is_complete(call)) {
- del_timer_sync(&call->timer);
+ timer_delete_sync(&call->timer);
if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
rxrpc_disconnect_call(call);
if (call->security)
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index c4c8b46a68c6..fce58be65e7c 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -688,7 +688,7 @@ static void rxrpc_destroy_call(struct work_struct *work)
{
struct rxrpc_call *call = container_of(work, struct rxrpc_call, destroyer);
- del_timer_sync(&call->timer);
+ timer_delete_sync(&call->timer);
rxrpc_cleanup_tx_buffers(call);
rxrpc_cleanup_rx_buffers(call);
@@ -711,7 +711,7 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE);
ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
- del_timer(&call->timer);
+ timer_delete(&call->timer);
if (rcu_read_lock_held())
/* Can't use the rxrpc workqueue as we need to cancel/flush
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index db0099197890..63bbcc567f59 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -818,7 +818,7 @@ void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
local->kill_all_client_conns = true;
- del_timer_sync(&local->client_conn_reap_timer);
+ timer_delete_sync(&local->client_conn_reap_timer);
while ((conn = list_first_entry_or_null(&local->idle_client_conns,
struct rxrpc_connection, cache_link))) {
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 2f1fd1e2e7e4..8ac22dde8b39 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -314,9 +314,9 @@ static void rxrpc_clean_up_connection(struct work_struct *work)
!conn->channels[3].call);
ASSERT(list_empty(&conn->cache_link));
- del_timer_sync(&conn->timer);
+ timer_delete_sync(&conn->timer);
cancel_work_sync(&conn->processor); /* Processing may restart the timer */
- del_timer_sync(&conn->timer);
+ timer_delete_sync(&conn->timer);
write_lock(&rxnet->conn_lock);
list_del_init(&conn->proc_link);
@@ -365,7 +365,7 @@ void rxrpc_put_connection(struct rxrpc_connection *conn,
dead = __refcount_dec_and_test(&conn->ref, &r);
trace_rxrpc_conn(debug_id, r - 1, why);
if (dead) {
- del_timer(&conn->timer);
+ timer_delete(&conn->timer);
cancel_work(&conn->processor);
if (in_softirq() || work_busy(&conn->processor) ||
@@ -470,7 +470,7 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
atomic_dec(&rxnet->nr_conns);
- del_timer_sync(&rxnet->service_conn_reap_timer);
+ timer_delete_sync(&rxnet->service_conn_reap_timer);
rxrpc_queue_work(&rxnet->service_conn_reaper);
flush_workqueue(rxrpc_workqueue);
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index a4c135d0fbcc..9a9834145e81 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -105,10 +105,10 @@ static __net_exit void rxrpc_exit_net(struct net *net)
struct rxrpc_net *rxnet = rxrpc_net(net);
rxnet->live = false;
- del_timer_sync(&rxnet->peer_keepalive_timer);
+ timer_delete_sync(&rxnet->peer_keepalive_timer);
cancel_work_sync(&rxnet->peer_keepalive_work);
/* Remove the timer again as the worker may have restarted it. */
- del_timer_sync(&rxnet->peer_keepalive_timer);
+ timer_delete_sync(&rxnet->peer_keepalive_timer);
rxrpc_destroy_all_calls(rxnet);
rxrpc_destroy_all_connections(rxnet);
rxrpc_destroy_all_peers(rxnet);
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 8180d0c12fce..a800127effcd 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -784,7 +784,7 @@ config NET_ACT_SKBEDIT
config NET_ACT_CSUM
tristate "Checksum Updating"
depends on NET_CLS_ACT && INET
- select LIBCRC32C
+ select CRC32
help
Say Y here to update some common checksum after some direct
packet alterations.
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index ae5dea7c48a8..2cef4b08befb 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -68,7 +68,7 @@ geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
- .len = 128 },
+ .len = 127 },
};
static const struct nla_policy
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 03505673d523..099ff6a3e1f5 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -766,7 +766,7 @@ geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
- .len = 128 },
+ .len = 127 },
};
static const struct nla_policy
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index defb05c1fba4..f74a097f54ae 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1267,38 +1267,8 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
struct qdisc_size_table *stab;
ops = qdisc_lookup_ops(kind);
-#ifdef CONFIG_MODULES
- if (ops == NULL && kind != NULL) {
- char name[IFNAMSIZ];
- if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
- /* We dropped the RTNL semaphore in order to
- * perform the module load. So, even if we
- * succeeded in loading the module we have to
- * tell the caller to replay the request. We
- * indicate this using -EAGAIN.
- * We replay the request because the device may
- * go away in the mean time.
- */
- netdev_unlock_ops(dev);
- rtnl_unlock();
- request_module(NET_SCH_ALIAS_PREFIX "%s", name);
- rtnl_lock();
- netdev_lock_ops(dev);
- ops = qdisc_lookup_ops(kind);
- if (ops != NULL) {
- /* We will try again qdisc_lookup_ops,
- * so don't keep a reference.
- */
- module_put(ops->owner);
- err = -EAGAIN;
- goto err_out;
- }
- }
- }
-#endif
-
- err = -ENOENT;
if (!ops) {
+ err = -ENOENT;
NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown");
goto err_out;
}
@@ -1623,8 +1593,7 @@ static int __tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
struct netlink_ext_ack *extack,
struct net_device *dev,
struct nlattr *tca[TCA_MAX + 1],
- struct tcmsg *tcm,
- bool *replay)
+ struct tcmsg *tcm)
{
struct Qdisc *q = NULL;
struct Qdisc *p = NULL;
@@ -1789,13 +1758,8 @@ create_n_graft2:
tcm->tcm_parent, tcm->tcm_handle,
tca, &err, extack);
}
- if (q == NULL) {
- if (err == -EAGAIN) {
- *replay = true;
- return 0;
- }
+ if (!q)
return err;
- }
graft:
err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
@@ -1808,6 +1772,27 @@ graft:
return 0;
}
+static void request_qdisc_module(struct nlattr *kind)
+{
+ struct Qdisc_ops *ops;
+ char name[IFNAMSIZ];
+
+ if (!kind)
+ return;
+
+ ops = qdisc_lookup_ops(kind);
+ if (ops) {
+ module_put(ops->owner);
+ return;
+ }
+
+ if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
+ rtnl_unlock();
+ request_module(NET_SCH_ALIAS_PREFIX "%s", name);
+ rtnl_lock();
+ }
+}
+
/*
* Create/change qdisc.
*/
@@ -1818,27 +1803,23 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
struct nlattr *tca[TCA_MAX + 1];
struct net_device *dev;
struct tcmsg *tcm;
- bool replay;
int err;
-replay:
- /* Reinit, just in case something touches this. */
err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
rtm_tca_policy, extack);
if (err < 0)
return err;
+ request_qdisc_module(tca[TCA_KIND]);
+
tcm = nlmsg_data(n);
dev = __dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev)
return -ENODEV;
- replay = false;
netdev_lock_ops(dev);
- err = __tc_modify_qdisc(skb, n, extack, dev, tca, tcm, &replay);
+ err = __tc_modify_qdisc(skb, n, extack, dev, tca, tcm);
netdev_unlock_ops(dev);
- if (replay)
- goto replay;
return err;
}
diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
index 93c36afbf576..f3b8203d3e85 100644
--- a/net/sched/sch_fq_pie.c
+++ b/net/sched/sch_fq_pie.c
@@ -555,7 +555,7 @@ static void fq_pie_destroy(struct Qdisc *sch)
tcf_block_put(q->block);
q->p_params.tupdate = 0;
- del_timer_sync(&q->adapt_timer);
+ timer_delete_sync(&q->adapt_timer);
kvfree(q->flows);
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 14ab2f4c190a..514b1b6ac681 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -567,7 +567,7 @@ EXPORT_SYMBOL_GPL(netdev_watchdog_up);
static void netdev_watchdog_down(struct net_device *dev)
{
netif_tx_lock_bh(dev);
- if (del_timer(&dev->watchdog_timer))
+ if (timer_delete(&dev->watchdog_timer))
netdev_put(dev, &dev->watchdog_dev_tracker);
netif_tx_unlock_bh(dev);
}
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index bb1fa9aa530b..3771d000b30d 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -545,7 +545,7 @@ static void pie_destroy(struct Qdisc *sch)
struct pie_sched_data *q = qdisc_priv(sch);
q->params.tupdate = 0;
- del_timer_sync(&q->adapt_timer);
+ timer_delete_sync(&q->adapt_timer);
}
static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index ef8a2afed26b..1ba3e0bba54f 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -218,7 +218,7 @@ static void red_destroy(struct Qdisc *sch)
tcf_qevent_destroy(&q->qe_mark, sch);
tcf_qevent_destroy(&q->qe_early_drop, sch);
- del_timer_sync(&q->adapt_timer);
+ timer_delete_sync(&q->adapt_timer);
red_offload(sch, false);
qdisc_put(q->qdisc);
}
@@ -297,7 +297,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
max_P);
red_set_vars(&q->vars);
- del_timer(&q->adapt_timer);
+ timer_delete(&q->adapt_timer);
if (ctl->flags & TC_RED_ADAPTATIVE)
mod_timer(&q->adapt_timer, jiffies + HZ/2);
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 65d5b59da583..9ed197e96396 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -696,7 +696,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt,
rtnl_kfree_skbs(to_free, tail);
qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
- del_timer(&q->perturb_timer);
+ timer_delete(&q->perturb_timer);
if (q->perturb_period) {
mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
get_random_bytes(&q->perturbation, sizeof(q->perturbation));
@@ -722,7 +722,7 @@ static void sfq_destroy(struct Qdisc *sch)
tcf_block_put(q->block);
WRITE_ONCE(q->perturb_period, 0);
- del_timer_sync(&q->perturb_timer);
+ timer_delete_sync(&q->perturb_timer);
sfq_free(q->ht);
sfq_free(q->slots);
kfree(q->red_parms);
diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c
index 20ff7386b74b..f485f62ab721 100644
--- a/net/sched/sch_skbprio.c
+++ b/net/sched/sch_skbprio.c
@@ -123,8 +123,6 @@ static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* Check to update highest and lowest priorities. */
if (skb_queue_empty(lp_qdisc)) {
if (q->lowest_prio == q->highest_prio) {
- /* The incoming packet is the only packet in queue. */
- BUG_ON(sch->q.qlen != 1);
q->lowest_prio = prio;
q->highest_prio = prio;
} else {
@@ -156,7 +154,6 @@ static struct sk_buff *skbprio_dequeue(struct Qdisc *sch)
/* Update highest priority field. */
if (skb_queue_empty(hpq)) {
if (q->lowest_prio == q->highest_prio) {
- BUG_ON(sch->q.qlen);
q->highest_prio = 0;
q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
} else {
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index 5da599ff84a9..d18a72df3654 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -7,10 +7,10 @@ menuconfig IP_SCTP
tristate "The SCTP Protocol"
depends on INET
depends on IPV6 || IPV6=n
+ select CRC32
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_SHA1
- select LIBCRC32C
select NET_UDP_TUNNEL
help
Stream Control Transmission Protocol
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 0b0794f164cf..760152e751c7 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -362,7 +362,7 @@ void sctp_association_free(struct sctp_association *asoc)
* on our state.
*/
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
- if (del_timer(&asoc->timers[i]))
+ if (timer_delete(&asoc->timers[i]))
sctp_association_put(asoc);
}
@@ -1521,7 +1521,7 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
/* Stop the SACK timer. */
timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
- if (del_timer(timer))
+ if (timer_delete(timer))
sctp_association_put(asoc);
}
}
diff --git a/net/sctp/input.c b/net/sctp/input.c
index a8a254a5008e..0c0d2757f6f8 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -446,7 +446,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
pr_debug("%s: unrecognized next header type "
"encountered!\n", __func__);
- if (del_timer(&t->proto_unreach_timer))
+ if (timer_delete(&t->proto_unreach_timer))
sctp_transport_put(t);
sctp_do_sm(net, SCTP_EVENT_T_OTHER,
diff --git a/net/sctp/output.c b/net/sctp/output.c
index a63df055ac57..23e96305cad7 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -312,7 +312,7 @@ static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt,
SCTP_MIB_OUTCTRLCHUNKS);
asoc->stats.octrlchunks++;
asoc->peer.sack_needed = 0;
- if (del_timer(timer))
+ if (timer_delete(timer))
sctp_association_put(asoc);
}
}
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 0dc6b8ab9963..f6b8c13dafa4 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1630,8 +1630,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
* as the receiver acknowledged any data.
*/
if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
- del_timer(&asoc->timers
- [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
+ timer_delete(&asoc->timers[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
sctp_association_put(asoc);
/* Mark the destination transport address as
@@ -1688,7 +1687,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
* address.
*/
if (!transport->flight_size) {
- if (del_timer(&transport->T3_rtx_timer))
+ if (timer_delete(&transport->T3_rtx_timer))
sctp_transport_put(transport);
} else if (restart_timer) {
if (!mod_timer(&transport->T3_rtx_timer,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 5407a3922101..8c3b80c4d40b 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -695,7 +695,7 @@ static void sctp_free_addr_wq(struct net *net)
struct sctp_sockaddr_entry *temp;
spin_lock_bh(&net->sctp.addr_wq_lock);
- del_timer(&net->sctp.addr_wq_timer);
+ timer_delete(&net->sctp.addr_wq_timer);
list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) {
list_del(&addrw->list);
kfree(addrw);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 23d6633966b1..3aa5da5e3bbd 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -734,7 +734,7 @@ static void sctp_cmd_hb_timers_stop(struct sctp_cmd_seq *cmds,
list_for_each_entry(t, &asoc->peer.transport_addr_list,
transports) {
- if (del_timer(&t->hb_timer))
+ if (timer_delete(&t->hb_timer))
sctp_transport_put(t);
}
}
@@ -747,7 +747,7 @@ static void sctp_cmd_t3_rtx_timers_stop(struct sctp_cmd_seq *cmds,
list_for_each_entry(t, &asoc->peer.transport_addr_list,
transports) {
- if (del_timer(&t->T3_rtx_timer))
+ if (timer_delete(&t->T3_rtx_timer))
sctp_transport_put(t);
}
}
@@ -1557,7 +1557,7 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
case SCTP_CMD_TIMER_STOP:
timer = &asoc->timers[cmd->obj.to];
- if (del_timer(timer))
+ if (timer_delete(timer))
sctp_association_put(asoc);
break;
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index bfcff6d6a438..f205556c5b24 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -576,7 +576,7 @@ struct sctp_chunk *sctp_process_strreset_outreq(
struct sctp_transport *t;
t = asoc->strreset_chunk->transport;
- if (del_timer(&t->reconf_timer))
+ if (timer_delete(&t->reconf_timer))
sctp_transport_put(t);
sctp_chunk_put(asoc->strreset_chunk);
@@ -825,7 +825,7 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
struct sctp_transport *t;
t = asoc->strreset_chunk->transport;
- if (del_timer(&t->reconf_timer))
+ if (timer_delete(&t->reconf_timer))
sctp_transport_put(t);
sctp_chunk_put(asoc->strreset_chunk);
@@ -1076,7 +1076,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
/* remove everything for this reconf request */
if (!asoc->strreset_outstanding) {
t = asoc->strreset_chunk->transport;
- if (del_timer(&t->reconf_timer))
+ if (timer_delete(&t->reconf_timer))
sctp_transport_put(t);
sctp_chunk_put(asoc->strreset_chunk);
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 8e1e97be4df7..ee3eac338a9d 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -525,6 +525,8 @@ static int proc_sctp_do_auth(const struct ctl_table *ctl, int write,
return ret;
}
+static DEFINE_MUTEX(sctp_sysctl_mutex);
+
static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
@@ -549,6 +551,7 @@ static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write,
if (new_value > max || new_value < min)
return -EINVAL;
+ mutex_lock(&sctp_sysctl_mutex);
net->sctp.udp_port = new_value;
sctp_udp_sock_stop(net);
if (new_value) {
@@ -561,6 +564,7 @@ static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write,
lock_sock(sk);
sctp_sk(sk)->udp_port = htons(net->sctp.udp_port);
release_sock(sk);
+ mutex_unlock(&sctp_sysctl_mutex);
}
return ret;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 2abe45af98e7..59675f6d9e7d 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -118,7 +118,7 @@ fail:
void sctp_transport_free(struct sctp_transport *transport)
{
/* Try to delete the heartbeat timer. */
- if (del_timer(&transport->hb_timer))
+ if (timer_delete(&transport->hb_timer))
sctp_transport_put(transport);
/* Delete the T3_rtx timer if it's active.
@@ -126,17 +126,17 @@ void sctp_transport_free(struct sctp_transport *transport)
* structure hang around in memory since we know
* the transport is going away.
*/
- if (del_timer(&transport->T3_rtx_timer))
+ if (timer_delete(&transport->T3_rtx_timer))
sctp_transport_put(transport);
- if (del_timer(&transport->reconf_timer))
+ if (timer_delete(&transport->reconf_timer))
sctp_transport_put(transport);
- if (del_timer(&transport->probe_timer))
+ if (timer_delete(&transport->probe_timer))
sctp_transport_put(transport);
/* Delete the ICMP proto unreachable timer if it's active. */
- if (del_timer(&transport->proto_unreach_timer))
+ if (timer_delete(&transport->proto_unreach_timer))
sctp_transport_put(transport);
sctp_transport_put(transport);
@@ -829,7 +829,7 @@ void sctp_transport_reset(struct sctp_transport *t)
void sctp_transport_immediate_rtx(struct sctp_transport *t)
{
/* Stop pending T3_rtx_timer */
- if (del_timer(&t->T3_rtx_timer))
+ if (timer_delete(&t->T3_rtx_timer))
sctp_transport_put(t);
sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 9a27201638e2..8f2d65c1e831 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -138,60 +138,6 @@ out:
return ret;
}
-/**
- * krb5_decrypt - simple decryption of an RPCSEC GSS payload
- * @tfm: initialized cipher transform
- * @iv: pointer to an IV
- * @in: ciphertext to decrypt
- * @out: OUT: plaintext
- * @length: length of input and output buffers, in bytes
- *
- * @iv may be NULL to force the use of an all-zero IV.
- * The buffer containing the IV must be as large as the
- * cipher's ivsize.
- *
- * Return values:
- * %0: @in successfully decrypted into @out
- * negative errno: @in not decrypted
- */
-u32
-krb5_decrypt(
- struct crypto_sync_skcipher *tfm,
- void * iv,
- void * in,
- void * out,
- int length)
-{
- u32 ret = -EINVAL;
- struct scatterlist sg[1];
- u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
-
- if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
- goto out;
-
- if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
- dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
- crypto_sync_skcipher_ivsize(tfm));
- goto out;
- }
- if (iv)
- memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
-
- memcpy(out, in, length);
- sg_init_one(sg, out, length);
-
- skcipher_request_set_sync_tfm(req, tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, sg, sg, length, local_iv);
-
- ret = crypto_skcipher_decrypt(req);
- skcipher_request_zero(req);
-out:
- dprintk("RPC: gss_k5decrypt returns %d\n",ret);
- return ret;
-}
-
static int
checksummer(struct scatterlist *sg, void *data)
{
@@ -202,96 +148,6 @@ checksummer(struct scatterlist *sg, void *data)
return crypto_ahash_update(req);
}
-/*
- * checksum the plaintext data and hdrlen bytes of the token header
- * The checksum is performed over the first 8 bytes of the
- * gss token header and then over the data body
- */
-u32
-make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
- struct xdr_buf *body, int body_offset, u8 *cksumkey,
- unsigned int usage, struct xdr_netobj *cksumout)
-{
- struct crypto_ahash *tfm;
- struct ahash_request *req;
- struct scatterlist sg[1];
- int err = -1;
- u8 *checksumdata;
- unsigned int checksumlen;
-
- if (cksumout->len < kctx->gk5e->cksumlength) {
- dprintk("%s: checksum buffer length, %u, too small for %s\n",
- __func__, cksumout->len, kctx->gk5e->name);
- return GSS_S_FAILURE;
- }
-
- checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_KERNEL);
- if (checksumdata == NULL)
- return GSS_S_FAILURE;
-
- tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- goto out_free_cksum;
-
- req = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!req)
- goto out_free_ahash;
-
- ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
-
- checksumlen = crypto_ahash_digestsize(tfm);
-
- if (cksumkey != NULL) {
- err = crypto_ahash_setkey(tfm, cksumkey,
- kctx->gk5e->keylength);
- if (err)
- goto out;
- }
-
- err = crypto_ahash_init(req);
- if (err)
- goto out;
- sg_init_one(sg, header, hdrlen);
- ahash_request_set_crypt(req, sg, NULL, hdrlen);
- err = crypto_ahash_update(req);
- if (err)
- goto out;
- err = xdr_process_buf(body, body_offset, body->len - body_offset,
- checksummer, req);
- if (err)
- goto out;
- ahash_request_set_crypt(req, NULL, checksumdata, 0);
- err = crypto_ahash_final(req);
- if (err)
- goto out;
-
- switch (kctx->gk5e->ctype) {
- case CKSUMTYPE_RSA_MD5:
- err = krb5_encrypt(kctx->seq, NULL, checksumdata,
- checksumdata, checksumlen);
- if (err)
- goto out;
- memcpy(cksumout->data,
- checksumdata + checksumlen - kctx->gk5e->cksumlength,
- kctx->gk5e->cksumlength);
- break;
- case CKSUMTYPE_HMAC_SHA1_DES3:
- memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
- break;
- default:
- BUG();
- break;
- }
- cksumout->len = kctx->gk5e->cksumlength;
-out:
- ahash_request_free(req);
-out_free_ahash:
- crypto_free_ahash(tfm);
-out_free_cksum:
- kfree(checksumdata);
- return err ? GSS_S_FAILURE : 0;
-}
-
/**
* gss_krb5_checksum - Compute the MAC for a GSS Wrap or MIC token
* @tfm: an initialized hash transform
diff --git a/net/sunrpc/auth_gss/gss_krb5_internal.h b/net/sunrpc/auth_gss/gss_krb5_internal.h
index a47e9ec228a5..8769e9e705bf 100644
--- a/net/sunrpc/auth_gss/gss_krb5_internal.h
+++ b/net/sunrpc/auth_gss/gss_krb5_internal.h
@@ -155,10 +155,6 @@ static inline int krb5_derive_key(struct krb5_ctx *kctx,
void krb5_make_confounder(u8 *p, int conflen);
-u32 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
- struct xdr_buf *body, int body_offset, u8 *cksumkey,
- unsigned int usage, struct xdr_netobj *cksumout);
-
u32 gss_krb5_checksum(struct crypto_ahash *tfm, char *header, int hdrlen,
const struct xdr_buf *body, int body_offset,
struct xdr_netobj *cksumout);
@@ -166,9 +162,6 @@ u32 gss_krb5_checksum(struct crypto_ahash *tfm, char *header, int hdrlen,
u32 krb5_encrypt(struct crypto_sync_skcipher *key, void *iv, void *in,
void *out, int length);
-u32 krb5_decrypt(struct crypto_sync_skcipher *key, void *iv, void *in,
- void *out, int length);
-
int xdr_extend_head(struct xdr_buf *buf, unsigned int base,
unsigned int shiftlen);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 7ce5e28a6c03..004cdb59f010 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1536,9 +1536,13 @@ static ssize_t write_flush(struct file *file, const char __user *buf,
* or by one second if it has already reached the current time.
* Newly added cache entries will always have ->last_refresh greater
* that ->flush_time, so they don't get flushed prematurely.
+ *
+ * If someone frequently calls the flush interface, we should
+ * immediately clean the corresponding cache_detail instead of
+ * continuously accumulating nextcheck.
*/
- if (cd->flush_time >= now)
+ if (cd->flush_time >= now && cd->flush_time < (now + 5))
now = cd->flush_time + 1;
cd->flush_time = now;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 2fe88ea79a70..6f75862d9782 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -270,9 +270,6 @@ static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt,
old = rcu_dereference_protected(clnt->cl_xprt,
lockdep_is_held(&clnt->cl_lock));
- if (!xprt_bound(xprt))
- clnt->cl_autobind = 1;
-
clnt->cl_timeout = timeout;
rcu_assign_pointer(clnt->cl_xprt, xprt);
spin_unlock(&clnt->cl_lock);
@@ -512,6 +509,8 @@ static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
clnt->cl_discrtry = 1;
if (!(args->flags & RPC_CLNT_CREATE_QUIET))
clnt->cl_chatty = 1;
+ if (args->flags & RPC_CLNT_CREATE_NETUNREACH_FATAL)
+ clnt->cl_netunreach_fatal = 1;
return clnt;
}
@@ -662,6 +661,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
new->cl_noretranstimeo = clnt->cl_noretranstimeo;
new->cl_discrtry = clnt->cl_discrtry;
new->cl_chatty = clnt->cl_chatty;
+ new->cl_netunreach_fatal = clnt->cl_netunreach_fatal;
new->cl_principal = clnt->cl_principal;
new->cl_max_connect = clnt->cl_max_connect;
return new;
@@ -1195,6 +1195,8 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
task->tk_flags |= RPC_TASK_TIMEOUT;
if (clnt->cl_noretranstimeo)
task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
+ if (clnt->cl_netunreach_fatal)
+ task->tk_flags |= RPC_TASK_NETUNREACH_FATAL;
atomic_inc(&clnt->cl_task_count);
}
@@ -2102,14 +2104,17 @@ call_bind_status(struct rpc_task *task)
case -EPROTONOSUPPORT:
trace_rpcb_bind_version_err(task);
goto retry_timeout;
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL)
+ break;
+ fallthrough;
case -ECONNREFUSED: /* connection problems */
case -ECONNRESET:
case -ECONNABORTED:
case -ENOTCONN:
case -EHOSTDOWN:
- case -ENETDOWN:
case -EHOSTUNREACH:
- case -ENETUNREACH:
case -EPIPE:
trace_rpcb_unreachable_err(task);
if (!RPC_IS_SOFTCONN(task)) {
@@ -2191,19 +2196,22 @@ call_connect_status(struct rpc_task *task)
task->tk_status = 0;
switch (status) {
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL)
+ break;
+ fallthrough;
case -ECONNREFUSED:
case -ECONNRESET:
/* A positive refusal suggests a rebind is needed. */
- if (RPC_IS_SOFTCONN(task))
- break;
if (clnt->cl_autobind) {
rpc_force_rebind(clnt);
+ if (RPC_IS_SOFTCONN(task))
+ break;
goto out_retry;
}
fallthrough;
case -ECONNABORTED:
- case -ENETDOWN:
- case -ENETUNREACH:
case -EHOSTUNREACH:
case -EPIPE:
case -EPROTO:
@@ -2455,10 +2463,13 @@ call_status(struct rpc_task *task)
trace_rpc_call_status(task);
task->tk_status = 0;
switch(status) {
- case -EHOSTDOWN:
case -ENETDOWN:
- case -EHOSTUNREACH:
case -ENETUNREACH:
+ if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL)
+ goto out_exit;
+ fallthrough;
+ case -EHOSTDOWN:
+ case -EHOSTUNREACH:
case -EPERM:
if (RPC_IS_SOFTCONN(task))
goto out_exit;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 102c3818bc54..53bcca365fb1 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -820,9 +820,10 @@ static void rpcb_getport_done(struct rpc_task *child, void *data)
}
trace_rpcb_setport(child, map->r_status, map->r_port);
- xprt->ops->set_port(xprt, map->r_port);
- if (map->r_port)
+ if (map->r_port) {
+ xprt->ops->set_port(xprt, map->r_port);
xprt_set_bound(xprt);
+ }
}
/*
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 9b45fbdc90ca..73bc39281ef5 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -276,6 +276,8 @@ EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
{
+ if (unlikely(current->flags & PF_EXITING))
+ return -EINTR;
schedule();
if (signal_pending_state(mode, current))
return -ERESTARTSYS;
diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c
index 5c8ecdaaa985..09434e1143c5 100644
--- a/net/sunrpc/sysfs.c
+++ b/net/sunrpc/sysfs.c
@@ -59,6 +59,16 @@ static struct kobject *rpc_sysfs_object_alloc(const char *name,
return NULL;
}
+static inline struct rpc_clnt *
+rpc_sysfs_client_kobj_get_clnt(struct kobject *kobj)
+{
+ struct rpc_sysfs_client *c = container_of(kobj,
+ struct rpc_sysfs_client, kobject);
+ struct rpc_clnt *ret = c->clnt;
+
+ return refcount_inc_not_zero(&ret->cl_count) ? ret : NULL;
+}
+
static inline struct rpc_xprt *
rpc_sysfs_xprt_kobj_get_xprt(struct kobject *kobj)
{
@@ -86,6 +96,51 @@ rpc_sysfs_xprt_switch_kobj_get_xprt(struct kobject *kobj)
return xprt_switch_get(x->xprt_switch);
}
+static ssize_t rpc_sysfs_clnt_version_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rpc_clnt *clnt = rpc_sysfs_client_kobj_get_clnt(kobj);
+ ssize_t ret;
+
+ if (!clnt)
+ return sprintf(buf, "<closed>\n");
+
+ ret = sprintf(buf, "%u", clnt->cl_vers);
+ refcount_dec(&clnt->cl_count);
+ return ret;
+}
+
+static ssize_t rpc_sysfs_clnt_program_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rpc_clnt *clnt = rpc_sysfs_client_kobj_get_clnt(kobj);
+ ssize_t ret;
+
+ if (!clnt)
+ return sprintf(buf, "<closed>\n");
+
+ ret = sprintf(buf, "%s", clnt->cl_program->name);
+ refcount_dec(&clnt->cl_count);
+ return ret;
+}
+
+static ssize_t rpc_sysfs_clnt_max_connect_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rpc_clnt *clnt = rpc_sysfs_client_kobj_get_clnt(kobj);
+ ssize_t ret;
+
+ if (!clnt)
+ return sprintf(buf, "<closed>\n");
+
+ ret = sprintf(buf, "%u\n", clnt->cl_max_connect);
+ refcount_dec(&clnt->cl_count);
+ return ret;
+}
+
static ssize_t rpc_sysfs_xprt_dstaddr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
@@ -129,6 +184,31 @@ static ssize_t rpc_sysfs_xprt_srcaddr_show(struct kobject *kobj,
return ret;
}
+static const char *xprtsec_strings[] = {
+ [RPC_XPRTSEC_NONE] = "none",
+ [RPC_XPRTSEC_TLS_ANON] = "tls-anon",
+ [RPC_XPRTSEC_TLS_X509] = "tls-x509",
+};
+
+static ssize_t rpc_sysfs_xprt_xprtsec_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj);
+ ssize_t ret;
+
+ if (!xprt) {
+ ret = sprintf(buf, "<closed>\n");
+ goto out;
+ }
+
+ ret = sprintf(buf, "%s\n", xprtsec_strings[xprt->xprtsec.policy]);
+ xprt_put(xprt);
+out:
+ return ret;
+
+}
+
static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -206,6 +286,14 @@ static ssize_t rpc_sysfs_xprt_state_show(struct kobject *kobj,
return ret;
}
+static ssize_t rpc_sysfs_xprt_del_xprt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "# delete this xprt\n");
+}
+
+
static ssize_t rpc_sysfs_xprt_switch_info_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
@@ -225,6 +313,55 @@ static ssize_t rpc_sysfs_xprt_switch_info_show(struct kobject *kobj,
return ret;
}
+static ssize_t rpc_sysfs_xprt_switch_add_xprt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "# add one xprt to this xprt_switch\n");
+}
+
+static ssize_t rpc_sysfs_xprt_switch_add_xprt_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rpc_xprt_switch *xprt_switch =
+ rpc_sysfs_xprt_switch_kobj_get_xprt(kobj);
+ struct xprt_create xprt_create_args;
+ struct rpc_xprt *xprt, *new;
+
+ if (!xprt_switch)
+ return 0;
+
+ xprt = rpc_xprt_switch_get_main_xprt(xprt_switch);
+ if (!xprt)
+ goto out;
+
+ xprt_create_args.ident = xprt->xprt_class->ident;
+ xprt_create_args.net = xprt->xprt_net;
+ xprt_create_args.dstaddr = (struct sockaddr *)&xprt->addr;
+ xprt_create_args.addrlen = xprt->addrlen;
+ xprt_create_args.servername = xprt->servername;
+ xprt_create_args.bc_xprt = xprt->bc_xprt;
+ xprt_create_args.xprtsec = xprt->xprtsec;
+ xprt_create_args.connect_timeout = xprt->connect_timeout;
+ xprt_create_args.reconnect_timeout = xprt->max_reconnect_timeout;
+
+ new = xprt_create_transport(&xprt_create_args);
+ if (IS_ERR_OR_NULL(new)) {
+ count = PTR_ERR(new);
+ goto out_put_xprt;
+ }
+
+ rpc_xprt_switch_add_xprt(xprt_switch, new);
+ xprt_put(new);
+
+out_put_xprt:
+ xprt_put(xprt);
+out:
+ xprt_switch_put(xprt_switch);
+ return count;
+}
+
static ssize_t rpc_sysfs_xprt_dstaddr_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
@@ -335,6 +472,40 @@ out_put:
return count;
}
+static ssize_t rpc_sysfs_xprt_del_xprt(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj);
+ struct rpc_xprt_switch *xps = rpc_sysfs_xprt_kobj_get_xprt_switch(kobj);
+
+ if (!xprt || !xps) {
+ count = 0;
+ goto out;
+ }
+
+ if (xprt->main) {
+ count = -EINVAL;
+ goto release_tasks;
+ }
+
+ if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) {
+ count = -EINTR;
+ goto out_put;
+ }
+
+ xprt_set_offline_locked(xprt, xps);
+ xprt_delete_locked(xprt, xps);
+
+release_tasks:
+ xprt_release_write(xprt, NULL);
+out_put:
+ xprt_put(xprt);
+ xprt_switch_put(xps);
+out:
+ return count;
+}
+
int rpc_sysfs_init(void)
{
rpc_sunrpc_kset = kset_create_and_add("sunrpc", NULL, kernel_kobj);
@@ -398,23 +569,48 @@ static const void *rpc_sysfs_xprt_namespace(const struct kobject *kobj)
kobject)->xprt->xprt_net;
}
+static struct kobj_attribute rpc_sysfs_clnt_version = __ATTR(rpc_version,
+ 0444, rpc_sysfs_clnt_version_show, NULL);
+
+static struct kobj_attribute rpc_sysfs_clnt_program = __ATTR(program,
+ 0444, rpc_sysfs_clnt_program_show, NULL);
+
+static struct kobj_attribute rpc_sysfs_clnt_max_connect = __ATTR(max_connect,
+ 0444, rpc_sysfs_clnt_max_connect_show, NULL);
+
+static struct attribute *rpc_sysfs_rpc_clnt_attrs[] = {
+ &rpc_sysfs_clnt_version.attr,
+ &rpc_sysfs_clnt_program.attr,
+ &rpc_sysfs_clnt_max_connect.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(rpc_sysfs_rpc_clnt);
+
static struct kobj_attribute rpc_sysfs_xprt_dstaddr = __ATTR(dstaddr,
0644, rpc_sysfs_xprt_dstaddr_show, rpc_sysfs_xprt_dstaddr_store);
static struct kobj_attribute rpc_sysfs_xprt_srcaddr = __ATTR(srcaddr,
0644, rpc_sysfs_xprt_srcaddr_show, NULL);
+static struct kobj_attribute rpc_sysfs_xprt_xprtsec = __ATTR(xprtsec,
+ 0644, rpc_sysfs_xprt_xprtsec_show, NULL);
+
static struct kobj_attribute rpc_sysfs_xprt_info = __ATTR(xprt_info,
0444, rpc_sysfs_xprt_info_show, NULL);
static struct kobj_attribute rpc_sysfs_xprt_change_state = __ATTR(xprt_state,
0644, rpc_sysfs_xprt_state_show, rpc_sysfs_xprt_state_change);
+static struct kobj_attribute rpc_sysfs_xprt_del = __ATTR(del_xprt,
+ 0644, rpc_sysfs_xprt_del_xprt_show, rpc_sysfs_xprt_del_xprt);
+
static struct attribute *rpc_sysfs_xprt_attrs[] = {
&rpc_sysfs_xprt_dstaddr.attr,
&rpc_sysfs_xprt_srcaddr.attr,
+ &rpc_sysfs_xprt_xprtsec.attr,
&rpc_sysfs_xprt_info.attr,
&rpc_sysfs_xprt_change_state.attr,
+ &rpc_sysfs_xprt_del.attr,
NULL,
};
ATTRIBUTE_GROUPS(rpc_sysfs_xprt);
@@ -422,14 +618,20 @@ ATTRIBUTE_GROUPS(rpc_sysfs_xprt);
static struct kobj_attribute rpc_sysfs_xprt_switch_info =
__ATTR(xprt_switch_info, 0444, rpc_sysfs_xprt_switch_info_show, NULL);
+static struct kobj_attribute rpc_sysfs_xprt_switch_add_xprt =
+ __ATTR(add_xprt, 0644, rpc_sysfs_xprt_switch_add_xprt_show,
+ rpc_sysfs_xprt_switch_add_xprt_store);
+
static struct attribute *rpc_sysfs_xprt_switch_attrs[] = {
&rpc_sysfs_xprt_switch_info.attr,
+ &rpc_sysfs_xprt_switch_add_xprt.attr,
NULL,
};
ATTRIBUTE_GROUPS(rpc_sysfs_xprt_switch);
static const struct kobj_type rpc_sysfs_client_type = {
.release = rpc_sysfs_client_release,
+ .default_groups = rpc_sysfs_rpc_clnt_groups,
.sysfs_ops = &kobj_sysfs_ops,
.namespace = rpc_sysfs_client_namespace,
};
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 09f245cda526..0eab15465511 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1167,7 +1167,7 @@ xprt_request_enqueue_receive(struct rpc_task *task)
spin_unlock(&xprt->queue_lock);
/* Turn off autodisconnect */
- del_timer_sync(&xprt->timer);
+ timer_delete_sync(&xprt->timer);
return 0;
}
@@ -2138,7 +2138,7 @@ static void xprt_destroy(struct rpc_xprt *xprt)
* can only run *before* del_time_sync(), never after.
*/
spin_lock(&xprt->transport_lock);
- del_timer_sync(&xprt->timer);
+ timer_delete_sync(&xprt->timer);
spin_unlock(&xprt->transport_lock);
/*
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
index 7e98d4dd9f10..4c5e08b0aa64 100644
--- a/net/sunrpc/xprtmultipath.c
+++ b/net/sunrpc/xprtmultipath.c
@@ -92,6 +92,27 @@ void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps,
xprt_put(xprt);
}
+/**
+ * rpc_xprt_switch_get_main_xprt - Get the 'main' xprt for an xprt switch.
+ * @xps: pointer to struct rpc_xprt_switch.
+ */
+struct rpc_xprt *rpc_xprt_switch_get_main_xprt(struct rpc_xprt_switch *xps)
+{
+ struct rpc_xprt_iter xpi;
+ struct rpc_xprt *xprt;
+
+ xprt_iter_init_listall(&xpi, xps);
+
+ xprt = xprt_iter_get_next(&xpi);
+ while (xprt && !xprt->main) {
+ xprt_put(xprt);
+ xprt = xprt_iter_get_next(&xpi);
+ }
+
+ xprt_iter_destroy(&xpi);
+ return xprt;
+}
+
static DEFINE_IDA(rpc_xprtswitch_ids);
void xprt_multipath_cleanup_ids(void)
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index c3fbf0779d4a..aca8bdf65d72 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -621,7 +621,8 @@ static void __svc_rdma_free(struct work_struct *work)
/* Destroy the CM ID */
rdma_destroy_id(rdma->sc_cm_id);
- rpcrdma_rn_unregister(device, &rdma->sc_rn);
+ if (!test_bit(XPT_LISTENER, &rdma->sc_xprt.xpt_flags))
+ rpcrdma_rn_unregister(device, &rdma->sc_rn);
kfree(rdma);
}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 500320e5ca47..ccf5e427f43e 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -638,7 +638,7 @@ static void tipc_node_delete(struct tipc_node *node)
trace_tipc_node_delete(node, true, " ");
tipc_node_delete_from_list(node);
- del_timer_sync(&node->timer);
+ timer_delete_sync(&node->timer);
tipc_node_put(node);
}
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 05d49ad81290..621addab2834 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -177,7 +177,7 @@ void tipc_sub_unsubscribe(struct tipc_subscription *sub)
{
tipc_nametbl_unsubscribe(sub);
if (sub->evt.s.timeout != TIPC_WAIT_FOREVER)
- del_timer_sync(&sub->timer);
+ timer_delete_sync(&sub->timer);
list_del(&sub->sub_list);
tipc_sub_put(sub);
}
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index f9e3d3d90dcf..03d508a45aae 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -37,17 +37,6 @@
#include "tls.h"
-static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
-{
- struct scatterlist *src = walk->sg;
- int diff = walk->offset - src->offset;
-
- sg_set_page(sg, sg_page(src),
- src->length - diff, walk->offset);
-
- scatterwalk_crypto_chain(sg, sg_next(src), 2);
-}
-
static int tls_enc_record(struct aead_request *aead_req,
struct crypto_aead *aead, char *aad,
char *iv, __be64 rcd_sn,
@@ -69,16 +58,13 @@ static int tls_enc_record(struct aead_request *aead_req,
buf_size = TLS_HEADER_SIZE + cipher_desc->iv;
len = min_t(int, *in_len, buf_size);
- scatterwalk_copychunks(buf, in, len, 0);
- scatterwalk_copychunks(buf, out, len, 1);
+ memcpy_from_scatterwalk(buf, in, len);
+ memcpy_to_scatterwalk(out, buf, len);
*in_len -= len;
if (!*in_len)
return 0;
- scatterwalk_pagedone(in, 0, 1);
- scatterwalk_pagedone(out, 1, 1);
-
len = buf[4] | (buf[3] << 8);
len -= cipher_desc->iv;
@@ -90,8 +76,8 @@ static int tls_enc_record(struct aead_request *aead_req,
sg_init_table(sg_out, ARRAY_SIZE(sg_out));
sg_set_buf(sg_in, aad, TLS_AAD_SPACE_SIZE);
sg_set_buf(sg_out, aad, TLS_AAD_SPACE_SIZE);
- chain_to_walk(sg_in + 1, in);
- chain_to_walk(sg_out + 1, out);
+ scatterwalk_get_sglist(in, sg_in + 1);
+ scatterwalk_get_sglist(out, sg_out + 1);
*in_len -= len;
if (*in_len < 0) {
@@ -110,10 +96,8 @@ static int tls_enc_record(struct aead_request *aead_req,
}
if (*in_len) {
- scatterwalk_copychunks(NULL, in, len, 2);
- scatterwalk_pagedone(in, 0, 1);
- scatterwalk_copychunks(NULL, out, len, 2);
- scatterwalk_pagedone(out, 1, 1);
+ scatterwalk_skip(in, len);
+ scatterwalk_skip(out, len);
}
len -= cipher_desc->tag;
@@ -162,9 +146,6 @@ static int tls_enc_records(struct aead_request *aead_req,
} while (rc == 0 && len);
- scatterwalk_done(&in, 0, 0);
- scatterwalk_done(&out, 1, 0);
-
return rc;
}
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 7e3db87ae433..fc6afbc8d680 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1551,7 +1551,11 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
timeout = vsk->connect_timeout;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
- while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) {
+ /* If the socket is already closing or it is in an error state, there
+ * is no point in waiting.
+ */
+ while (sk->sk_state != TCP_ESTABLISHED &&
+ sk->sk_state != TCP_CLOSING && sk->sk_err == 0) {
if (flags & O_NONBLOCK) {
/* If we're not going to block, we schedule a timeout
* function to generate a timeout on the connection
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 9e6b31903121..dcce326fdb8c 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1722,7 +1722,7 @@ void wiphy_delayed_work_queue(struct wiphy *wiphy,
trace_wiphy_delayed_work_queue(wiphy, &dwork->work, delay);
if (!delay) {
- del_timer(&dwork->timer);
+ timer_delete(&dwork->timer);
wiphy_work_queue(wiphy, &dwork->work);
return;
}
@@ -1737,7 +1737,7 @@ void wiphy_delayed_work_cancel(struct wiphy *wiphy,
{
lockdep_assert_held(&wiphy->mtx);
- del_timer_sync(&dwork->timer);
+ timer_delete_sync(&dwork->timer);
wiphy_work_cancel(wiphy, &dwork->work);
}
EXPORT_SYMBOL_GPL(wiphy_delayed_work_cancel);
@@ -1747,7 +1747,7 @@ void wiphy_delayed_work_flush(struct wiphy *wiphy,
{
lockdep_assert_held(&wiphy->mtx);
- del_timer_sync(&dwork->timer);
+ timer_delete_sync(&dwork->timer);
wiphy_work_flush(wiphy, &dwork->work);
}
EXPORT_SYMBOL_GPL(wiphy_delayed_work_flush);
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 5460b9146dd8..37b190499405 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -55,7 +55,7 @@ static void x25_t20timer_expiry(struct timer_list *t)
static inline void x25_stop_t20timer(struct x25_neigh *nb)
{
- del_timer(&nb->t20timer);
+ timer_delete(&nb->t20timer);
}
/*
diff --git a/net/x25/x25_timer.c b/net/x25/x25_timer.c
index 9376365cdcc9..e4c5ad5b070f 100644
--- a/net/x25/x25_timer.c
+++ b/net/x25/x25_timer.c
@@ -41,7 +41,7 @@ void x25_start_heartbeat(struct sock *sk)
void x25_stop_heartbeat(struct sock *sk)
{
- del_timer(&sk->sk_timer);
+ timer_delete(&sk->sk_timer);
}
void x25_start_t2timer(struct sock *sk)
@@ -74,7 +74,7 @@ void x25_start_t23timer(struct sock *sk)
void x25_stop_timer(struct sock *sk)
{
- del_timer(&x25_sk(sk)->timer);
+ timer_delete(&x25_sk(sk)->timer);
}
unsigned long x25_display_timer(struct sock *sk)
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index e5d104ce7b82..5696af45bcf7 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -806,8 +806,11 @@ static int __xsk_generic_xmit(struct sock *sk)
* if there is space in it. This avoids having to implement
* any buffering in the Tx path.
*/
- if (xsk_cq_reserve_addr_locked(xs->pool, desc.addr))
+ err = xsk_cq_reserve_addr_locked(xs->pool, desc.addr);
+ if (err) {
+ err = -EAGAIN;
goto out;
+ }
skb = xsk_build_skb(xs, &desc);
if (IS_ERR(skb)) {
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index e6da7e8495c9..749011e031c0 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -5,13 +5,13 @@
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*/
+#include <crypto/acompress.h>
#include <crypto/aead.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pfkeyv2.h>
-#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <net/xfrm.h>
#if IS_ENABLED(CONFIG_INET_ESP) || IS_ENABLED(CONFIG_INET6_ESP)
@@ -669,7 +669,7 @@ static const struct xfrm_algo_list xfrm_ealg_list = {
};
static const struct xfrm_algo_list xfrm_calg_list = {
- .find = crypto_has_comp,
+ .find = crypto_has_acomp,
.algs = calg_list,
.entries = ARRAY_SIZE(calg_list),
};
@@ -828,8 +828,7 @@ void xfrm_probe_algs(void)
}
for (i = 0; i < calg_entries(); i++) {
- status = crypto_has_comp(calg_list[i].name, 0,
- CRYPTO_ALG_ASYNC);
+ status = crypto_has_acomp(calg_list[i].name, 0, 0);
if (calg_list[i].available != status)
calg_list[i].available = status;
}
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index 9c0fa0e1786a..0c1420534394 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -3,7 +3,7 @@
* IP Payload Compression Protocol (IPComp) - RFC3173.
*
* Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
- * Copyright (c) 2003-2008 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2003-2025 Herbert Xu <herbert@gondor.apana.org.au>
*
* Todo:
* - Tunable compression parameters.
@@ -11,303 +11,302 @@
* - Adaptive compression.
*/
-#include <linux/crypto.h>
+#include <crypto/acompress.h>
#include <linux/err.h>
-#include <linux/list.h>
#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/percpu.h>
+#include <linux/skbuff_ref.h>
#include <linux/slab.h>
-#include <linux/smp.h>
-#include <linux/vmalloc.h>
-#include <net/ip.h>
#include <net/ipcomp.h>
#include <net/xfrm.h>
-struct ipcomp_tfms {
- struct list_head list;
- struct crypto_comp * __percpu *tfms;
- int users;
+#define IPCOMP_SCRATCH_SIZE 65400
+
+struct ipcomp_skb_cb {
+ struct xfrm_skb_cb xfrm;
+ struct acomp_req *req;
};
-static DEFINE_MUTEX(ipcomp_resource_mutex);
-static void * __percpu *ipcomp_scratches;
-static int ipcomp_scratch_users;
-static LIST_HEAD(ipcomp_tfms_list);
+struct ipcomp_data {
+ u16 threshold;
+ struct crypto_acomp *tfm;
+};
-static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
+struct ipcomp_req_extra {
+ struct xfrm_state *x;
+ struct scatterlist sg[];
+};
+
+static inline struct ipcomp_skb_cb *ipcomp_cb(struct sk_buff *skb)
{
- struct ipcomp_data *ipcd = x->data;
- const int plen = skb->len;
- int dlen = IPCOMP_SCRATCH_SIZE;
- const u8 *start = skb->data;
- u8 *scratch = *this_cpu_ptr(ipcomp_scratches);
- struct crypto_comp *tfm = *this_cpu_ptr(ipcd->tfms);
- int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
- int len;
+ struct ipcomp_skb_cb *cb = (void *)skb->cb;
- if (err)
- return err;
+ BUILD_BUG_ON(sizeof(*cb) > sizeof(skb->cb));
+ return cb;
+}
- if (dlen < (plen + sizeof(struct ip_comp_hdr)))
- return -EINVAL;
+static int ipcomp_post_acomp(struct sk_buff *skb, int err, int hlen)
+{
+ struct acomp_req *req = ipcomp_cb(skb)->req;
+ struct ipcomp_req_extra *extra;
+ const int plen = skb->data_len;
+ struct scatterlist *dsg;
+ int len, dlen;
- len = dlen - plen;
- if (len > skb_tailroom(skb))
- len = skb_tailroom(skb);
+ if (unlikely(err))
+ goto out_free_req;
- __skb_put(skb, len);
+ extra = acomp_request_extra(req);
+ dsg = extra->sg;
+ dlen = req->dlen;
- len += plen;
- skb_copy_to_linear_data(skb, scratch, len);
+ pskb_trim_unique(skb, 0);
+ __skb_put(skb, hlen);
- while ((scratch += len, dlen -= len) > 0) {
+ /* Only update truesize on input. */
+ if (!hlen)
+ skb->truesize += dlen - plen;
+ skb->data_len = dlen;
+ skb->len += dlen;
+
+ do {
skb_frag_t *frag;
struct page *page;
- if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
- return -EMSGSIZE;
-
frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
- page = alloc_page(GFP_ATOMIC);
-
- if (!page)
- return -ENOMEM;
+ page = sg_page(dsg);
+ dsg = sg_next(dsg);
len = PAGE_SIZE;
if (dlen < len)
len = dlen;
skb_frag_fill_page_desc(frag, page, 0, len);
- memcpy(skb_frag_address(frag), scratch, len);
-
- skb->truesize += len;
- skb->data_len += len;
- skb->len += len;
skb_shinfo(skb)->nr_frags++;
- }
+ } while ((dlen -= len));
- return 0;
+ for (; dsg; dsg = sg_next(dsg))
+ __free_page(sg_page(dsg));
+
+out_free_req:
+ acomp_request_free(req);
+ return err;
}
-int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
+static int ipcomp_input_done2(struct sk_buff *skb, int err)
{
- int nexthdr;
- int err = -ENOMEM;
- struct ip_comp_hdr *ipch;
-
- if (skb_linearize_cow(skb))
- goto out;
-
- skb->ip_summed = CHECKSUM_NONE;
+ struct ip_comp_hdr *ipch = ip_comp_hdr(skb);
+ const int plen = skb->len;
- /* Remove ipcomp header and decompress original payload */
- ipch = (void *)skb->data;
- nexthdr = ipch->nexthdr;
+ skb_reset_transport_header(skb);
- skb->transport_header = skb->network_header + sizeof(*ipch);
- __skb_pull(skb, sizeof(*ipch));
- err = ipcomp_decompress(x, skb);
- if (err)
- goto out;
+ return ipcomp_post_acomp(skb, err, 0) ?:
+ skb->len < (plen + sizeof(ip_comp_hdr)) ? -EINVAL :
+ ipch->nexthdr;
+}
- err = nexthdr;
+static void ipcomp_input_done(void *data, int err)
+{
+ struct sk_buff *skb = data;
-out:
- return err;
+ xfrm_input_resume(skb, ipcomp_input_done2(skb, err));
}
-EXPORT_SYMBOL_GPL(ipcomp_input);
-static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
+static struct acomp_req *ipcomp_setup_req(struct xfrm_state *x,
+ struct sk_buff *skb, int minhead,
+ int dlen)
{
+ const int dnfrags = min(MAX_SKB_FRAGS, 16);
struct ipcomp_data *ipcd = x->data;
+ struct ipcomp_req_extra *extra;
+ struct scatterlist *sg, *dsg;
const int plen = skb->len;
- int dlen = IPCOMP_SCRATCH_SIZE;
- u8 *start = skb->data;
- struct crypto_comp *tfm;
- u8 *scratch;
+ struct crypto_acomp *tfm;
+ struct acomp_req *req;
+ int nfrags;
+ int total;
int err;
+ int i;
- local_bh_disable();
- scratch = *this_cpu_ptr(ipcomp_scratches);
- tfm = *this_cpu_ptr(ipcd->tfms);
- err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
- if (err)
- goto out;
-
- if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) {
- err = -EMSGSIZE;
- goto out;
- }
+ ipcomp_cb(skb)->req = NULL;
- memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
- local_bh_enable();
+ do {
+ struct sk_buff *trailer;
- pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
- return 0;
+ if (skb->len > PAGE_SIZE) {
+ if (skb_linearize_cow(skb))
+ return ERR_PTR(-ENOMEM);
+ nfrags = 1;
+ break;
+ }
-out:
- local_bh_enable();
- return err;
-}
+ if (!skb_cloned(skb) && skb_headlen(skb) >= minhead) {
+ if (!skb_is_nonlinear(skb)) {
+ nfrags = 1;
+ break;
+ } else if (!skb_has_frag_list(skb)) {
+ nfrags = skb_shinfo(skb)->nr_frags;
+ nfrags++;
+ break;
+ }
+ }
-int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
-{
- int err;
- struct ip_comp_hdr *ipch;
- struct ipcomp_data *ipcd = x->data;
+ nfrags = skb_cow_data(skb, skb_headlen(skb) < minhead ?
+ minhead - skb_headlen(skb) : 0,
+ &trailer);
+ if (nfrags < 0)
+ return ERR_PTR(nfrags);
+ } while (0);
+
+ tfm = ipcd->tfm;
+ req = acomp_request_alloc_extra(
+ tfm, sizeof(*extra) + sizeof(*sg) * (nfrags + dnfrags),
+ GFP_ATOMIC);
+ ipcomp_cb(skb)->req = req;
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ extra = acomp_request_extra(req);
+ extra->x = x;
+
+ dsg = extra->sg;
+ sg = dsg + dnfrags;
+ sg_init_table(sg, nfrags);
+ err = skb_to_sgvec(skb, sg, 0, plen);
+ if (unlikely(err < 0))
+ return ERR_PTR(err);
+
+ sg_init_table(dsg, dnfrags);
+ total = 0;
+ for (i = 0; i < dnfrags && total < dlen; i++) {
+ struct page *page;
- if (skb->len < ipcd->threshold) {
- /* Don't bother compressing */
- goto out_ok;
+ page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ break;
+ sg_set_page(dsg + i, page, PAGE_SIZE, 0);
+ total += PAGE_SIZE;
}
+ if (!i)
+ return ERR_PTR(-ENOMEM);
+ sg_mark_end(dsg + i - 1);
+ dlen = min(dlen, total);
- if (skb_linearize_cow(skb))
- goto out_ok;
-
- err = ipcomp_compress(x, skb);
-
- if (err) {
- goto out_ok;
- }
+ acomp_request_set_params(req, sg, dsg, plen, dlen);
- /* Install ipcomp header, convert into ipcomp datagram. */
- ipch = ip_comp_hdr(skb);
- ipch->nexthdr = *skb_mac_header(skb);
- ipch->flags = 0;
- ipch->cpi = htons((u16 )ntohl(x->id.spi));
- *skb_mac_header(skb) = IPPROTO_COMP;
-out_ok:
- skb_push(skb, -skb_network_offset(skb));
- return 0;
+ return req;
}
-EXPORT_SYMBOL_GPL(ipcomp_output);
-static void ipcomp_free_scratches(void)
+static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
{
- int i;
- void * __percpu *scratches;
-
- if (--ipcomp_scratch_users)
- return;
+ struct acomp_req *req;
+ int err;
- scratches = ipcomp_scratches;
- if (!scratches)
- return;
+ req = ipcomp_setup_req(x, skb, 0, IPCOMP_SCRATCH_SIZE);
+ err = PTR_ERR(req);
+ if (IS_ERR(req))
+ goto out;
- for_each_possible_cpu(i)
- vfree(*per_cpu_ptr(scratches, i));
+ acomp_request_set_callback(req, 0, ipcomp_input_done, skb);
+ err = crypto_acomp_decompress(req);
+ if (err == -EINPROGRESS)
+ return err;
- free_percpu(scratches);
- ipcomp_scratches = NULL;
+out:
+ return ipcomp_input_done2(skb, err);
}
-static void * __percpu *ipcomp_alloc_scratches(void)
+int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
{
- void * __percpu *scratches;
- int i;
-
- if (ipcomp_scratch_users++)
- return ipcomp_scratches;
-
- scratches = alloc_percpu(void *);
- if (!scratches)
- return NULL;
+ struct ip_comp_hdr *ipch __maybe_unused;
- ipcomp_scratches = scratches;
+ if (!pskb_may_pull(skb, sizeof(*ipch)))
+ return -EINVAL;
- for_each_possible_cpu(i) {
- void *scratch;
+ skb->ip_summed = CHECKSUM_NONE;
- scratch = vmalloc_node(IPCOMP_SCRATCH_SIZE, cpu_to_node(i));
- if (!scratch)
- return NULL;
- *per_cpu_ptr(scratches, i) = scratch;
- }
+ /* Remove ipcomp header and decompress original payload */
+ __skb_pull(skb, sizeof(*ipch));
- return scratches;
+ return ipcomp_decompress(x, skb);
}
+EXPORT_SYMBOL_GPL(ipcomp_input);
-static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
+static int ipcomp_output_push(struct sk_buff *skb)
{
- struct ipcomp_tfms *pos;
- int cpu;
-
- list_for_each_entry(pos, &ipcomp_tfms_list, list) {
- if (pos->tfms == tfms)
- break;
- }
-
- WARN_ON(list_entry_is_head(pos, &ipcomp_tfms_list, list));
-
- if (--pos->users)
- return;
+ skb_push(skb, -skb_network_offset(skb));
+ return 0;
+}
- list_del(&pos->list);
- kfree(pos);
+static int ipcomp_output_done2(struct xfrm_state *x, struct sk_buff *skb,
+ int err)
+{
+ struct ip_comp_hdr *ipch;
- if (!tfms)
- return;
+ err = ipcomp_post_acomp(skb, err, sizeof(*ipch));
+ if (err)
+ goto out_ok;
- for_each_possible_cpu(cpu) {
- struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
- crypto_free_comp(tfm);
- }
- free_percpu(tfms);
+ /* Install ipcomp header, convert into ipcomp datagram. */
+ ipch = ip_comp_hdr(skb);
+ ipch->nexthdr = *skb_mac_header(skb);
+ ipch->flags = 0;
+ ipch->cpi = htons((u16 )ntohl(x->id.spi));
+ *skb_mac_header(skb) = IPPROTO_COMP;
+out_ok:
+ return ipcomp_output_push(skb);
}
-static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
+static void ipcomp_output_done(void *data, int err)
{
- struct ipcomp_tfms *pos;
- struct crypto_comp * __percpu *tfms;
- int cpu;
+ struct ipcomp_req_extra *extra;
+ struct sk_buff *skb = data;
+ struct acomp_req *req;
+ req = ipcomp_cb(skb)->req;
+ extra = acomp_request_extra(req);
- list_for_each_entry(pos, &ipcomp_tfms_list, list) {
- struct crypto_comp *tfm;
+ xfrm_output_resume(skb_to_full_sk(skb), skb,
+ ipcomp_output_done2(extra->x, skb, err));
+}
- /* This can be any valid CPU ID so we don't need locking. */
- tfm = this_cpu_read(*pos->tfms);
+static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
+{
+ struct ip_comp_hdr *ipch __maybe_unused;
+ struct acomp_req *req;
+ int err;
- if (!strcmp(crypto_comp_name(tfm), alg_name)) {
- pos->users++;
- return pos->tfms;
- }
- }
+ req = ipcomp_setup_req(x, skb, sizeof(*ipch),
+ skb->len - sizeof(*ipch));
+ err = PTR_ERR(req);
+ if (IS_ERR(req))
+ goto out;
- pos = kmalloc(sizeof(*pos), GFP_KERNEL);
- if (!pos)
- return NULL;
+ acomp_request_set_callback(req, 0, ipcomp_output_done, skb);
+ err = crypto_acomp_compress(req);
+ if (err == -EINPROGRESS)
+ return err;
- pos->users = 1;
- INIT_LIST_HEAD(&pos->list);
- list_add(&pos->list, &ipcomp_tfms_list);
+out:
+ return ipcomp_output_done2(x, skb, err);
+}
- pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
- if (!tfms)
- goto error;
+int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
+{
+ struct ipcomp_data *ipcd = x->data;
- for_each_possible_cpu(cpu) {
- struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- goto error;
- *per_cpu_ptr(tfms, cpu) = tfm;
+ if (skb->len < ipcd->threshold) {
+ /* Don't bother compressing */
+ return ipcomp_output_push(skb);
}
- return tfms;
-
-error:
- ipcomp_free_tfms(tfms);
- return NULL;
+ return ipcomp_compress(x, skb);
}
+EXPORT_SYMBOL_GPL(ipcomp_output);
static void ipcomp_free_data(struct ipcomp_data *ipcd)
{
- if (ipcd->tfms)
- ipcomp_free_tfms(ipcd->tfms);
- ipcomp_free_scratches();
+ crypto_free_acomp(ipcd->tfm);
}
void ipcomp_destroy(struct xfrm_state *x)
@@ -316,9 +315,7 @@ void ipcomp_destroy(struct xfrm_state *x)
if (!ipcd)
return;
xfrm_state_delete_tunnel(x);
- mutex_lock(&ipcomp_resource_mutex);
ipcomp_free_data(ipcd);
- mutex_unlock(&ipcomp_resource_mutex);
kfree(ipcd);
}
EXPORT_SYMBOL_GPL(ipcomp_destroy);
@@ -345,14 +342,9 @@ int ipcomp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
if (!ipcd)
goto out;
- mutex_lock(&ipcomp_resource_mutex);
- if (!ipcomp_alloc_scratches())
- goto error;
-
- ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name);
- if (!ipcd->tfms)
+ ipcd->tfm = crypto_alloc_acomp(x->calg->alg_name, 0, 0);
+ if (IS_ERR(ipcd->tfm))
goto error;
- mutex_unlock(&ipcomp_resource_mutex);
calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0);
BUG_ON(!calg_desc);
@@ -364,7 +356,6 @@ out:
error:
ipcomp_free_data(ipcd);
- mutex_unlock(&ipcomp_resource_mutex);
kfree(ipcd);
goto out;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 30970d40a454..143ac3aa7537 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -462,7 +462,7 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
{
BUG_ON(!policy->walk.dead);
- if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
+ if (timer_delete(&policy->timer) || timer_delete(&policy->polq.hold_timer))
BUG();
xfrm_dev_policy_free(policy);
@@ -487,11 +487,11 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
atomic_inc(&policy->genid);
- if (del_timer(&policy->polq.hold_timer))
+ if (timer_delete(&policy->polq.hold_timer))
xfrm_pol_put(policy);
skb_queue_purge(&policy->polq.hold_queue);
- if (del_timer(&policy->timer))
+ if (timer_delete(&policy->timer))
xfrm_pol_put(policy);
/* XXX: Flush state cache */
@@ -1469,7 +1469,7 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
spin_lock_bh(&pq->hold_queue.lock);
skb_queue_splice_init(&pq->hold_queue, &list);
- if (del_timer(&pq->hold_timer))
+ if (timer_delete(&pq->hold_timer))
xfrm_pol_put(old);
spin_unlock_bh(&pq->hold_queue.lock);
@@ -3004,7 +3004,7 @@ static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *s
sched_next = jiffies + pq->timeout;
- if (del_timer(&pq->hold_timer)) {
+ if (timer_delete(&pq->hold_timer)) {
if (time_before(pq->hold_timer.expires, sched_next))
sched_next = pq->hold_timer.expires;
xfrm_pol_put(pol);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index d896c3fefb07..341d79ecb5c2 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -598,7 +598,7 @@ static void ___xfrm_state_destroy(struct xfrm_state *x)
if (x->mode_cbs && x->mode_cbs->destroy_state)
x->mode_cbs->destroy_state(x);
hrtimer_cancel(&x->mtimer);
- del_timer_sync(&x->rtimer);
+ timer_delete_sync(&x->rtimer);
kfree(x->aead);
kfree(x->aalg);
kfree(x->ealg);