summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/Kconfig5
-rw-r--r--net/core/dev.c74
-rw-r--r--net/core/net-sysfs.c409
-rw-r--r--net/core/net-sysfs.h4
-rw-r--r--net/core/netpoll.c3
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/rtnetlink.c29
-rw-r--r--net/core/scm.c10
-rw-r--r--net/ipv4/devinet.c26
-rw-r--r--net/ipv4/ipconfig.c32
-rw-r--r--net/ipv4/tcp_output.c5
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/inet6_connection_sock.c22
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/mcast.c75
-rw-r--r--net/ipv6/reassembly.c36
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_teql.c3
-rw-r--r--net/x25/af_x25.c100
-rw-r--r--net/x25/x25_link.c8
22 files changed, 671 insertions, 193 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index dc1071327d87..6e64f7c6a2e9 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -341,9 +341,6 @@ static void vlan_transfer_features(struct net_device *dev,
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
#endif
- vlandev->real_num_tx_queues = dev->real_num_tx_queues;
- BUG_ON(vlandev->real_num_tx_queues > vlandev->num_tx_queues);
-
if (old_features != vlandev->features)
netdev_features_change(vlandev);
}
diff --git a/net/Kconfig b/net/Kconfig
index 55fd82e9ffd9..126c2af0fc1f 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -220,6 +220,11 @@ config RPS
depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
default y
+config XPS
+ boolean
+ depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
+ default y
+
menu "Network testing"
config NET_PKTGEN
diff --git a/net/core/dev.c b/net/core/dev.c
index 381b8e280162..3259d2c323a6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1557,12 +1557,19 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
*/
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
{
+ int rc;
+
if (txq < 1 || txq > dev->num_tx_queues)
return -EINVAL;
if (dev->reg_state == NETREG_REGISTERED) {
ASSERT_RTNL();
+ rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
+ txq);
+ if (rc)
+ return rc;
+
if (txq < dev->real_num_tx_queues)
qdisc_reset_all_tx_gt(dev, txq);
}
@@ -2142,26 +2149,70 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
return queue_index;
}
+static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+ struct xps_dev_maps *dev_maps;
+ struct xps_map *map;
+ int queue_index = -1;
+
+ rcu_read_lock();
+ dev_maps = rcu_dereference(dev->xps_maps);
+ if (dev_maps) {
+ map = rcu_dereference(
+ dev_maps->cpu_map[raw_smp_processor_id()]);
+ if (map) {
+ if (map->len == 1)
+ queue_index = map->queues[0];
+ else {
+ u32 hash;
+ if (skb->sk && skb->sk->sk_hash)
+ hash = skb->sk->sk_hash;
+ else
+ hash = (__force u16) skb->protocol ^
+ skb->rxhash;
+ hash = jhash_1word(hash, hashrnd);
+ queue_index = map->queues[
+ ((u64)hash * map->len) >> 32];
+ }
+ if (unlikely(queue_index >= dev->real_num_tx_queues))
+ queue_index = -1;
+ }
+ }
+ rcu_read_unlock();
+
+ return queue_index;
+#else
+ return -1;
+#endif
+}
+
static struct netdev_queue *dev_pick_tx(struct net_device *dev,
struct sk_buff *skb)
{
int queue_index;
const struct net_device_ops *ops = dev->netdev_ops;
- if (ops->ndo_select_queue) {
+ if (dev->real_num_tx_queues == 1)
+ queue_index = 0;
+ else if (ops->ndo_select_queue) {
queue_index = ops->ndo_select_queue(dev, skb);
queue_index = dev_cap_txqueue(dev, queue_index);
} else {
struct sock *sk = skb->sk;
queue_index = sk_tx_queue_get(sk);
- if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) {
- queue_index = 0;
- if (dev->real_num_tx_queues > 1)
+ if (queue_index < 0 || skb->ooo_okay ||
+ queue_index >= dev->real_num_tx_queues) {
+ int old_index = queue_index;
+
+ queue_index = get_xps_queue(dev, skb);
+ if (queue_index < 0)
queue_index = skb_tx_hash(dev, skb);
- if (sk) {
- struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
+ if (queue_index != old_index && sk) {
+ struct dst_entry *dst =
+ rcu_dereference_check(sk->sk_dst_cache, 1);
if (dst && skb_dst(skb) == dst)
sk_tx_queue_set(sk, queue_index);
@@ -5037,9 +5088,9 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
}
EXPORT_SYMBOL(netif_stacked_transfer_operstate);
+#ifdef CONFIG_RPS
static int netif_alloc_rx_queues(struct net_device *dev)
{
-#ifdef CONFIG_RPS
unsigned int i, count = dev->num_rx_queues;
struct netdev_rx_queue *rx;
@@ -5054,14 +5105,15 @@ static int netif_alloc_rx_queues(struct net_device *dev)
for (i = 0; i < count; i++)
rx[i].dev = dev;
-#endif
return 0;
}
+#endif
static int netif_alloc_netdev_queues(struct net_device *dev)
{
unsigned int count = dev->num_tx_queues;
struct netdev_queue *tx;
+ int i;
BUG_ON(count < 1);
@@ -5072,6 +5124,10 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
return -ENOMEM;
}
dev->_tx = tx;
+
+ for (i = 0; i < count; i++)
+ tx[i].dev = dev;
+
return 0;
}
@@ -5079,8 +5135,6 @@ static void netdev_init_one_queue(struct net_device *dev,
struct netdev_queue *queue,
void *_unused)
{
- queue->dev = dev;
-
/* Initialize queue lock */
spin_lock_init(&queue->_xmit_lock);
netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 7abeb7ceaa4c..f85cee3d869e 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -751,10 +751,12 @@ static int rx_queue_add_kobject(struct net_device *net, int index)
return error;
}
+#endif /* CONFIG_RPS */
int
net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
{
+#ifdef CONFIG_RPS
int i;
int error = 0;
@@ -770,23 +772,412 @@ net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
kobject_put(&net->_rx[i].kobj);
return error;
+#else
+ return 0;
+#endif
+}
+
+#ifdef CONFIG_XPS
+/*
+ * netdev_queue sysfs structures and functions.
+ */
+struct netdev_queue_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct netdev_queue *queue,
+ struct netdev_queue_attribute *attr, char *buf);
+ ssize_t (*store)(struct netdev_queue *queue,
+ struct netdev_queue_attribute *attr, const char *buf, size_t len);
+};
+#define to_netdev_queue_attr(_attr) container_of(_attr, \
+ struct netdev_queue_attribute, attr)
+
+#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
+
+static ssize_t netdev_queue_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
+ struct netdev_queue *queue = to_netdev_queue(kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(queue, attribute, buf);
}
-static int rx_queue_register_kobjects(struct net_device *net)
+static ssize_t netdev_queue_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
{
+ struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
+ struct netdev_queue *queue = to_netdev_queue(kobj);
+
+ if (!attribute->store)
+ return -EIO;
+
+ return attribute->store(queue, attribute, buf, count);
+}
+
+static const struct sysfs_ops netdev_queue_sysfs_ops = {
+ .show = netdev_queue_attr_show,
+ .store = netdev_queue_attr_store,
+};
+
+static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
+{
+ struct net_device *dev = queue->dev;
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++)
+ if (queue == &dev->_tx[i])
+ break;
+
+ BUG_ON(i >= dev->num_tx_queues);
+
+ return i;
+}
+
+
+static ssize_t show_xps_map(struct netdev_queue *queue,
+ struct netdev_queue_attribute *attribute, char *buf)
+{
+ struct net_device *dev = queue->dev;
+ struct xps_dev_maps *dev_maps;
+ cpumask_var_t mask;
+ unsigned long index;
+ size_t len = 0;
+ int i;
+
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ index = get_netdev_queue_index(queue);
+
+ rcu_read_lock();
+ dev_maps = rcu_dereference(dev->xps_maps);
+ if (dev_maps) {
+ for_each_possible_cpu(i) {
+ struct xps_map *map =
+ rcu_dereference(dev_maps->cpu_map[i]);
+ if (map) {
+ int j;
+ for (j = 0; j < map->len; j++) {
+ if (map->queues[j] == index) {
+ cpumask_set_cpu(i, mask);
+ break;
+ }
+ }
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
+ if (PAGE_SIZE - len < 3) {
+ free_cpumask_var(mask);
+ return -EINVAL;
+ }
+
+ free_cpumask_var(mask);
+ len += sprintf(buf + len, "\n");
+ return len;
+}
+
+static void xps_map_release(struct rcu_head *rcu)
+{
+ struct xps_map *map = container_of(rcu, struct xps_map, rcu);
+
+ kfree(map);
+}
+
+static void xps_dev_maps_release(struct rcu_head *rcu)
+{
+ struct xps_dev_maps *dev_maps =
+ container_of(rcu, struct xps_dev_maps, rcu);
+
+ kfree(dev_maps);
+}
+
+static DEFINE_MUTEX(xps_map_mutex);
+#define xmap_dereference(P) \
+ rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
+
+static ssize_t store_xps_map(struct netdev_queue *queue,
+ struct netdev_queue_attribute *attribute,
+ const char *buf, size_t len)
+{
+ struct net_device *dev = queue->dev;
+ cpumask_var_t mask;
+ int err, i, cpu, pos, map_len, alloc_len, need_set;
+ unsigned long index;
+ struct xps_map *map, *new_map;
+ struct xps_dev_maps *dev_maps, *new_dev_maps;
+ int nonempty = 0;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ index = get_netdev_queue_index(queue);
+
+ err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
+ if (err) {
+ free_cpumask_var(mask);
+ return err;
+ }
+
+ new_dev_maps = kzalloc(max_t(unsigned,
+ XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
+ if (!new_dev_maps) {
+ free_cpumask_var(mask);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&xps_map_mutex);
+
+ dev_maps = xmap_dereference(dev->xps_maps);
+
+ for_each_possible_cpu(cpu) {
+ map = dev_maps ?
+ xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
+ new_map = map;
+ if (map) {
+ for (pos = 0; pos < map->len; pos++)
+ if (map->queues[pos] == index)
+ break;
+ map_len = map->len;
+ alloc_len = map->alloc_len;
+ } else
+ pos = map_len = alloc_len = 0;
+
+ need_set = cpu_isset(cpu, *mask) && cpu_online(cpu);
+
+ if (need_set && pos >= map_len) {
+ /* Need to add queue to this CPU's map */
+ if (map_len >= alloc_len) {
+ alloc_len = alloc_len ?
+ 2 * alloc_len : XPS_MIN_MAP_ALLOC;
+ new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
+ GFP_KERNEL,
+ cpu_to_node(cpu));
+ if (!new_map)
+ goto error;
+ new_map->alloc_len = alloc_len;
+ for (i = 0; i < map_len; i++)
+ new_map->queues[i] = map->queues[i];
+ new_map->len = map_len;
+ }
+ new_map->queues[new_map->len++] = index;
+ } else if (!need_set && pos < map_len) {
+ /* Need to remove queue from this CPU's map */
+ if (map_len > 1)
+ new_map->queues[pos] =
+ new_map->queues[--new_map->len];
+ else
+ new_map = NULL;
+ }
+ RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
+ }
+
+ /* Cleanup old maps */
+ for_each_possible_cpu(cpu) {
+ map = dev_maps ?
+ xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
+ if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
+ call_rcu(&map->rcu, xps_map_release);
+ if (new_dev_maps->cpu_map[cpu])
+ nonempty = 1;
+ }
+
+ if (nonempty)
+ rcu_assign_pointer(dev->xps_maps, new_dev_maps);
+ else {
+ kfree(new_dev_maps);
+ rcu_assign_pointer(dev->xps_maps, NULL);
+ }
+
+ if (dev_maps)
+ call_rcu(&dev_maps->rcu, xps_dev_maps_release);
+
+ mutex_unlock(&xps_map_mutex);
+
+ free_cpumask_var(mask);
+ return len;
+
+error:
+ mutex_unlock(&xps_map_mutex);
+
+ if (new_dev_maps)
+ for_each_possible_cpu(i)
+ kfree(rcu_dereference_protected(
+ new_dev_maps->cpu_map[i],
+ 1));
+ kfree(new_dev_maps);
+ free_cpumask_var(mask);
+ return -ENOMEM;
+}
+
+static struct netdev_queue_attribute xps_cpus_attribute =
+ __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
+
+static struct attribute *netdev_queue_default_attrs[] = {
+ &xps_cpus_attribute.attr,
+ NULL
+};
+
+static void netdev_queue_release(struct kobject *kobj)
+{
+ struct netdev_queue *queue = to_netdev_queue(kobj);
+ struct net_device *dev = queue->dev;
+ struct xps_dev_maps *dev_maps;
+ struct xps_map *map;
+ unsigned long index;
+ int i, pos, nonempty = 0;
+
+ index = get_netdev_queue_index(queue);
+
+ mutex_lock(&xps_map_mutex);
+ dev_maps = xmap_dereference(dev->xps_maps);
+
+ if (dev_maps) {
+ for_each_possible_cpu(i) {
+ map = xmap_dereference(dev_maps->cpu_map[i]);
+ if (!map)
+ continue;
+
+ for (pos = 0; pos < map->len; pos++)
+ if (map->queues[pos] == index)
+ break;
+
+ if (pos < map->len) {
+ if (map->len > 1)
+ map->queues[pos] =
+ map->queues[--map->len];
+ else {
+ RCU_INIT_POINTER(dev_maps->cpu_map[i],
+ NULL);
+ call_rcu(&map->rcu, xps_map_release);
+ map = NULL;
+ }
+ }
+ if (map)
+ nonempty = 1;
+ }
+
+ if (!nonempty) {
+ RCU_INIT_POINTER(dev->xps_maps, NULL);
+ call_rcu(&dev_maps->rcu, xps_dev_maps_release);
+ }
+ }
+
+ mutex_unlock(&xps_map_mutex);
+
+ memset(kobj, 0, sizeof(*kobj));
+ dev_put(queue->dev);
+}
+
+static struct kobj_type netdev_queue_ktype = {
+ .sysfs_ops = &netdev_queue_sysfs_ops,
+ .release = netdev_queue_release,
+ .default_attrs = netdev_queue_default_attrs,
+};
+
+static int netdev_queue_add_kobject(struct net_device *net, int index)
+{
+ struct netdev_queue *queue = net->_tx + index;
+ struct kobject *kobj = &queue->kobj;
+ int error = 0;
+
+ kobj->kset = net->queues_kset;
+ error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
+ "tx-%u", index);
+ if (error) {
+ kobject_put(kobj);
+ return error;
+ }
+
+ kobject_uevent(kobj, KOBJ_ADD);
+ dev_hold(queue->dev);
+
+ return error;
+}
+#endif /* CONFIG_XPS */
+
+int
+netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
+{
+#ifdef CONFIG_XPS
+ int i;
+ int error = 0;
+
+ for (i = old_num; i < new_num; i++) {
+ error = netdev_queue_add_kobject(net, i);
+ if (error) {
+ new_num = old_num;
+ break;
+ }
+ }
+
+ while (--i >= new_num)
+ kobject_put(&net->_tx[i].kobj);
+
+ return error;
+#else
+ return 0;
+#endif
+}
+
+static int register_queue_kobjects(struct net_device *net)
+{
+ int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
+
+#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
net->queues_kset = kset_create_and_add("queues",
NULL, &net->dev.kobj);
if (!net->queues_kset)
return -ENOMEM;
- return net_rx_queue_update_kobjects(net, 0, net->real_num_rx_queues);
+#endif
+
+#ifdef CONFIG_RPS
+ real_rx = net->real_num_rx_queues;
+#endif
+ real_tx = net->real_num_tx_queues;
+
+ error = net_rx_queue_update_kobjects(net, 0, real_rx);
+ if (error)
+ goto error;
+ rxq = real_rx;
+
+ error = netdev_queue_update_kobjects(net, 0, real_tx);
+ if (error)
+ goto error;
+ txq = real_tx;
+
+ return 0;
+
+error:
+ netdev_queue_update_kobjects(net, txq, 0);
+ net_rx_queue_update_kobjects(net, rxq, 0);
+ return error;
}
-static void rx_queue_remove_kobjects(struct net_device *net)
+static void remove_queue_kobjects(struct net_device *net)
{
- net_rx_queue_update_kobjects(net, net->real_num_rx_queues, 0);
+ int real_rx = 0, real_tx = 0;
+
+#ifdef CONFIG_RPS
+ real_rx = net->real_num_rx_queues;
+#endif
+ real_tx = net->real_num_tx_queues;
+
+ net_rx_queue_update_kobjects(net, real_rx, 0);
+ netdev_queue_update_kobjects(net, real_tx, 0);
+#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
kset_unregister(net->queues_kset);
+#endif
}
-#endif /* CONFIG_RPS */
static const void *net_current_ns(void)
{
@@ -885,9 +1276,7 @@ void netdev_unregister_kobject(struct net_device * net)
kobject_get(&dev->kobj);
-#ifdef CONFIG_RPS
- rx_queue_remove_kobjects(net);
-#endif
+ remove_queue_kobjects(net);
device_del(dev);
}
@@ -926,13 +1315,11 @@ int netdev_register_kobject(struct net_device *net)
if (error)
return error;
-#ifdef CONFIG_RPS
- error = rx_queue_register_kobjects(net);
+ error = register_queue_kobjects(net);
if (error) {
device_del(dev);
return error;
}
-#endif
return error;
}
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h
index 778e1571548d..bd7751ec1c4d 100644
--- a/net/core/net-sysfs.h
+++ b/net/core/net-sysfs.h
@@ -4,8 +4,8 @@
int netdev_kobject_init(void);
int netdev_register_kobject(struct net_device *);
void netdev_unregister_kobject(struct net_device *);
-#ifdef CONFIG_RPS
int net_rx_queue_update_kobjects(struct net_device *, int old_num, int new_num);
-#endif
+int netdev_queue_update_kobjects(struct net_device *net,
+ int old_num, int new_num);
#endif
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 4e98ffac3af0..ee38acb6d463 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -76,8 +76,7 @@ static void queue_process(struct work_struct *work)
local_irq_save(flags);
__netif_tx_lock(txq, smp_processor_id());
- if (netif_tx_queue_stopped(txq) ||
- netif_tx_queue_frozen(txq) ||
+ if (netif_tx_queue_frozen_or_stopped(txq) ||
ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
skb_queue_head(&npinfo->txq, skb);
__netif_tx_unlock(txq);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2e57830cbeb2..2953b2abc971 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3527,7 +3527,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
__netif_tx_lock_bh(txq);
- if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) {
+ if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) {
ret = NETDEV_TX_BUSY;
pkt_dev->last_ok = 0;
goto unlock;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index bf69e5871b1a..750db57f3bb3 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1107,6 +1107,28 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
return -EINVAL;
}
+ if (tb[IFLA_AF_SPEC]) {
+ struct nlattr *af;
+ int rem, err;
+
+ nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
+ const struct rtnl_af_ops *af_ops;
+
+ if (!(af_ops = rtnl_af_lookup(nla_type(af))))
+ return -EAFNOSUPPORT;
+
+ if (!af_ops->set_link_af)
+ return -EOPNOTSUPP;
+
+ if (af_ops->validate_link_af) {
+ err = af_ops->validate_link_af(dev,
+ tb[IFLA_AF_SPEC]);
+ if (err < 0)
+ return err;
+ }
+ }
+ }
+
return 0;
}
@@ -1356,12 +1378,9 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
const struct rtnl_af_ops *af_ops;
if (!(af_ops = rtnl_af_lookup(nla_type(af))))
- continue;
-
- if (!af_ops->parse_link_af)
- continue;
+ BUG();
- err = af_ops->parse_link_af(dev, af);
+ err = af_ops->set_link_af(dev, af);
if (err < 0)
goto errout;
diff --git a/net/core/scm.c b/net/core/scm.c
index 413cab89017d..bbe454450801 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -79,10 +79,11 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
return -ENOMEM;
*fplp = fpl;
fpl->count = 0;
+ fpl->max = SCM_MAX_FD;
}
fpp = &fpl->fp[fpl->count];
- if (fpl->count + num > SCM_MAX_FD)
+ if (fpl->count + num > fpl->max)
return -EINVAL;
/*
@@ -331,11 +332,12 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
if (!fpl)
return NULL;
- new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL);
+ new_fpl = kmemdup(fpl, offsetof(struct scm_fp_list, fp[fpl->count]),
+ GFP_KERNEL);
if (new_fpl) {
- for (i=fpl->count-1; i>=0; i--)
+ for (i = 0; i < fpl->count; i++)
get_file(fpl->fp[i]);
- memcpy(new_fpl, fpl, sizeof(*fpl));
+ new_fpl->max = new_fpl->count;
}
return new_fpl;
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 71afc26c2df8..d9f71bae45c4 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1289,14 +1289,14 @@ static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
[IFLA_INET_CONF] = { .type = NLA_NESTED },
};
-static int inet_parse_link_af(struct net_device *dev, const struct nlattr *nla)
+static int inet_validate_link_af(const struct net_device *dev,
+ const struct nlattr *nla)
{
- struct in_device *in_dev = __in_dev_get_rcu(dev);
struct nlattr *a, *tb[IFLA_INET_MAX+1];
int err, rem;
- if (!in_dev)
- return -EOPNOTSUPP;
+ if (dev && !__in_dev_get_rcu(dev))
+ return -EAFNOSUPPORT;
err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
if (err < 0)
@@ -1314,6 +1314,21 @@ static int inet_parse_link_af(struct net_device *dev, const struct nlattr *nla)
}
}
+ return 0;
+}
+
+static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
+{
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
+ struct nlattr *a, *tb[IFLA_INET_MAX+1];
+ int rem;
+
+ if (!in_dev)
+ return -EAFNOSUPPORT;
+
+ if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0)
+ BUG();
+
if (tb[IFLA_INET_CONF]) {
nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
@@ -1689,7 +1704,8 @@ static struct rtnl_af_ops inet_af_ops = {
.family = AF_INET,
.fill_link_af = inet_fill_link_af,
.get_link_af_size = inet_get_link_af_size,
- .parse_link_af = inet_parse_link_af,
+ .validate_link_af = inet_validate_link_af,
+ .set_link_af = inet_set_link_af,
};
void __init devinet_init(void)
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 3a6e1ec5e9ae..2b097752426b 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1191,13 +1191,13 @@ static int __init ic_dynamic(void)
(ic_proto_enabled & IC_USE_DHCP) &&
ic_dhcp_msgtype != DHCPACK) {
ic_got_reply = 0;
- printk(",");
+ printk(KERN_CONT ",");
continue;
}
#endif /* IPCONFIG_DHCP */
if (ic_got_reply) {
- printk(" OK\n");
+ printk(KERN_CONT " OK\n");
break;
}
@@ -1205,7 +1205,7 @@ static int __init ic_dynamic(void)
continue;
if (! --retries) {
- printk(" timed out!\n");
+ printk(KERN_CONT " timed out!\n");
break;
}
@@ -1215,7 +1215,7 @@ static int __init ic_dynamic(void)
if (timeout > CONF_TIMEOUT_MAX)
timeout = CONF_TIMEOUT_MAX;
- printk(".");
+ printk(KERN_CONT ".");
}
#ifdef IPCONFIG_BOOTP
@@ -1236,7 +1236,7 @@ static int __init ic_dynamic(void)
((ic_got_reply & IC_RARP) ? "RARP"
: (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
&ic_servaddr);
- printk("my address is %pI4\n", &ic_myaddr);
+ printk(KERN_CONT "my address is %pI4\n", &ic_myaddr);
return 0;
}
@@ -1468,19 +1468,19 @@ static int __init ip_auto_config(void)
/*
* Clue in the operator.
*/
- printk("IP-Config: Complete:");
- printk("\n device=%s", ic_dev->name);
- printk(", addr=%pI4", &ic_myaddr);
- printk(", mask=%pI4", &ic_netmask);
- printk(", gw=%pI4", &ic_gateway);
- printk(",\n host=%s, domain=%s, nis-domain=%s",
+ printk("IP-Config: Complete:\n");
+ printk(" device=%s", ic_dev->name);
+ printk(KERN_CONT ", addr=%pI4", &ic_myaddr);
+ printk(KERN_CONT ", mask=%pI4", &ic_netmask);
+ printk(KERN_CONT ", gw=%pI4", &ic_gateway);
+ printk(KERN_CONT ",\n host=%s, domain=%s, nis-domain=%s",
utsname()->nodename, ic_domain, utsname()->domainname);
- printk(",\n bootserver=%pI4", &ic_servaddr);
- printk(", rootserver=%pI4", &root_server_addr);
- printk(", rootpath=%s", root_server_path);
+ printk(KERN_CONT ",\n bootserver=%pI4", &ic_servaddr);
+ printk(KERN_CONT ", rootserver=%pI4", &root_server_addr);
+ printk(KERN_CONT ", rootpath=%s", root_server_path);
if (ic_dev_mtu)
- printk(", mtu=%d", ic_dev_mtu);
- printk("\n");
+ printk(KERN_CONT ", mtu=%d", ic_dev_mtu);
+ printk(KERN_CONT "\n");
#endif /* !SILENT */
return 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index bb8f547fc7d2..5f29b2e20e23 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -822,8 +822,11 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
&md5);
tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
- if (tcp_packets_in_flight(tp) == 0)
+ if (tcp_packets_in_flight(tp) == 0) {
tcp_ca_event(sk, CA_EVENT_TX_START);
+ skb->ooo_okay = 1;
+ } else
+ skb->ooo_okay = 0;
skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 4cf760598c2a..1023ad0d2b15 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3956,11 +3956,6 @@ static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
return 0;
}
-static int inet6_parse_link_af(struct net_device *dev, const struct nlattr *nla)
-{
- return -EOPNOTSUPP;
-}
-
static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
u32 pid, u32 seq, int event, unsigned int flags)
{
@@ -4670,7 +4665,6 @@ static struct rtnl_af_ops inet6_ops = {
.family = AF_INET6,
.fill_link_af = inet6_fill_link_af,
.get_link_af_size = inet6_get_link_af_size,
- .parse_link_af = inet6_parse_link_af,
};
/*
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 8a1628023bd1..861d252bd1ba 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -60,18 +60,16 @@ EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
const u32 rnd, const u16 synq_hsize)
{
- u32 a = (__force u32)raddr->s6_addr32[0];
- u32 b = (__force u32)raddr->s6_addr32[1];
- u32 c = (__force u32)raddr->s6_addr32[2];
-
- a += JHASH_GOLDEN_RATIO;
- b += JHASH_GOLDEN_RATIO;
- c += rnd;
- __jhash_mix(a, b, c);
-
- a += (__force u32)raddr->s6_addr32[3];
- b += (__force u32)rport;
- __jhash_mix(a, b, c);
+ u32 c;
+
+ c = jhash_3words((__force u32)raddr->s6_addr32[0],
+ (__force u32)raddr->s6_addr32[1],
+ (__force u32)raddr->s6_addr32[2],
+ rnd);
+
+ c = jhash_2words((__force u32)raddr->s6_addr32[3],
+ (__force u32)rport,
+ c);
return c & (synq_hsize - 1);
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 2a59610c2a58..b1155554bb18 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -58,8 +58,6 @@ MODULE_AUTHOR("Ville Nuorvala");
MODULE_DESCRIPTION("IPv6 tunneling device");
MODULE_LICENSE("GPL");
-#define IPV6_TLV_TEL_DST_SIZE 8
-
#ifdef IP6_TNL_DEBUG
#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
#else
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 9c5074528a71..49f986d626a0 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -82,7 +82,7 @@ static void *__mld2_query_bugs[] __attribute__((__unused__)) = {
static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
/* Big mc list lock for all the sockets */
-static DEFINE_RWLOCK(ipv6_sk_mc_lock);
+static DEFINE_SPINLOCK(ipv6_sk_mc_lock);
static void igmp6_join_group(struct ifmcaddr6 *ma);
static void igmp6_leave_group(struct ifmcaddr6 *ma);
@@ -123,6 +123,11 @@ int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
* socket join on multicast group
*/
+#define for_each_pmc_rcu(np, pmc) \
+ for (pmc = rcu_dereference(np->ipv6_mc_list); \
+ pmc != NULL; \
+ pmc = rcu_dereference(pmc->next))
+
int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
{
struct net_device *dev = NULL;
@@ -134,15 +139,15 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
if (!ipv6_addr_is_multicast(addr))
return -EINVAL;
- read_lock_bh(&ipv6_sk_mc_lock);
- for (mc_lst=np->ipv6_mc_list; mc_lst; mc_lst=mc_lst->next) {
+ rcu_read_lock();
+ for_each_pmc_rcu(np, mc_lst) {
if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
ipv6_addr_equal(&mc_lst->addr, addr)) {
- read_unlock_bh(&ipv6_sk_mc_lock);
+ rcu_read_unlock();
return -EADDRINUSE;
}
}
- read_unlock_bh(&ipv6_sk_mc_lock);
+ rcu_read_unlock();
mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
@@ -186,33 +191,41 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
return err;
}
- write_lock_bh(&ipv6_sk_mc_lock);
+ spin_lock(&ipv6_sk_mc_lock);
mc_lst->next = np->ipv6_mc_list;
- np->ipv6_mc_list = mc_lst;
- write_unlock_bh(&ipv6_sk_mc_lock);
+ rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
+ spin_unlock(&ipv6_sk_mc_lock);
rcu_read_unlock();
return 0;
}
+static void ipv6_mc_socklist_reclaim(struct rcu_head *head)
+{
+ kfree(container_of(head, struct ipv6_mc_socklist, rcu));
+}
/*
* socket leave on multicast group
*/
int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
{
struct ipv6_pinfo *np = inet6_sk(sk);
- struct ipv6_mc_socklist *mc_lst, **lnk;
+ struct ipv6_mc_socklist *mc_lst;
+ struct ipv6_mc_socklist __rcu **lnk;
struct net *net = sock_net(sk);
- write_lock_bh(&ipv6_sk_mc_lock);
- for (lnk = &np->ipv6_mc_list; (mc_lst = *lnk) !=NULL ; lnk = &mc_lst->next) {
+ spin_lock(&ipv6_sk_mc_lock);
+ for (lnk = &np->ipv6_mc_list;
+ (mc_lst = rcu_dereference_protected(*lnk,
+ lockdep_is_held(&ipv6_sk_mc_lock))) !=NULL ;
+ lnk = &mc_lst->next) {
if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
ipv6_addr_equal(&mc_lst->addr, addr)) {
struct net_device *dev;
*lnk = mc_lst->next;
- write_unlock_bh(&ipv6_sk_mc_lock);
+ spin_unlock(&ipv6_sk_mc_lock);
rcu_read_lock();
dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
@@ -225,11 +238,12 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
} else
(void) ip6_mc_leave_src(sk, mc_lst, NULL);
rcu_read_unlock();
- sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
+ atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
+ call_rcu(&mc_lst->rcu, ipv6_mc_socklist_reclaim);
return 0;
}
}
- write_unlock_bh(&ipv6_sk_mc_lock);
+ spin_unlock(&ipv6_sk_mc_lock);
return -EADDRNOTAVAIL;
}
@@ -272,12 +286,13 @@ void ipv6_sock_mc_close(struct sock *sk)
struct ipv6_mc_socklist *mc_lst;
struct net *net = sock_net(sk);
- write_lock_bh(&ipv6_sk_mc_lock);
- while ((mc_lst = np->ipv6_mc_list) != NULL) {
+ spin_lock(&ipv6_sk_mc_lock);
+ while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
+ lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
struct net_device *dev;
np->ipv6_mc_list = mc_lst->next;
- write_unlock_bh(&ipv6_sk_mc_lock);
+ spin_unlock(&ipv6_sk_mc_lock);
rcu_read_lock();
dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
@@ -290,11 +305,13 @@ void ipv6_sock_mc_close(struct sock *sk)
} else
(void) ip6_mc_leave_src(sk, mc_lst, NULL);
rcu_read_unlock();
- sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
- write_lock_bh(&ipv6_sk_mc_lock);
+ atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
+ call_rcu(&mc_lst->rcu, ipv6_mc_socklist_reclaim);
+
+ spin_lock(&ipv6_sk_mc_lock);
}
- write_unlock_bh(&ipv6_sk_mc_lock);
+ spin_unlock(&ipv6_sk_mc_lock);
}
int ip6_mc_source(int add, int omode, struct sock *sk,
@@ -328,8 +345,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
err = -EADDRNOTAVAIL;
- read_lock(&ipv6_sk_mc_lock);
- for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
+ for_each_pmc_rcu(inet6, pmc) {
if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
continue;
if (ipv6_addr_equal(&pmc->addr, group))
@@ -428,7 +444,6 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
done:
if (pmclocked)
write_unlock(&pmc->sflock);
- read_unlock(&ipv6_sk_mc_lock);
read_unlock_bh(&idev->lock);
rcu_read_unlock();
if (leavegroup)
@@ -466,14 +481,13 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
dev = idev->dev;
err = 0;
- read_lock(&ipv6_sk_mc_lock);
if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
leavegroup = 1;
goto done;
}
- for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
+ for_each_pmc_rcu(inet6, pmc) {
if (pmc->ifindex != gsf->gf_interface)
continue;
if (ipv6_addr_equal(&pmc->addr, group))
@@ -521,7 +535,6 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
write_unlock(&pmc->sflock);
err = 0;
done:
- read_unlock(&ipv6_sk_mc_lock);
read_unlock_bh(&idev->lock);
rcu_read_unlock();
if (leavegroup)
@@ -562,7 +575,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
* so reading the list is safe.
*/
- for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
+ for_each_pmc_rcu(inet6, pmc) {
if (pmc->ifindex != gsf->gf_interface)
continue;
if (ipv6_addr_equal(group, &pmc->addr))
@@ -612,13 +625,13 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
struct ip6_sf_socklist *psl;
int rv = 1;
- read_lock(&ipv6_sk_mc_lock);
- for (mc = np->ipv6_mc_list; mc; mc = mc->next) {
+ rcu_read_lock();
+ for_each_pmc_rcu(np, mc) {
if (ipv6_addr_equal(&mc->addr, mc_addr))
break;
}
if (!mc) {
- read_unlock(&ipv6_sk_mc_lock);
+ rcu_read_unlock();
return 1;
}
read_lock(&mc->sflock);
@@ -638,7 +651,7 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
rv = 0;
}
read_unlock(&mc->sflock);
- read_unlock(&ipv6_sk_mc_lock);
+ rcu_read_unlock();
return rv;
}
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 0f2766453759..07beeb06f752 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -104,26 +104,22 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
const struct in6_addr *daddr, u32 rnd)
{
- u32 a, b, c;
-
- a = (__force u32)saddr->s6_addr32[0];
- b = (__force u32)saddr->s6_addr32[1];
- c = (__force u32)saddr->s6_addr32[2];
-
- a += JHASH_GOLDEN_RATIO;
- b += JHASH_GOLDEN_RATIO;
- c += rnd;
- __jhash_mix(a, b, c);
-
- a += (__force u32)saddr->s6_addr32[3];
- b += (__force u32)daddr->s6_addr32[0];
- c += (__force u32)daddr->s6_addr32[1];
- __jhash_mix(a, b, c);
-
- a += (__force u32)daddr->s6_addr32[2];
- b += (__force u32)daddr->s6_addr32[3];
- c += (__force u32)id;
- __jhash_mix(a, b, c);
+ u32 c;
+
+ c = jhash_3words((__force u32)saddr->s6_addr32[0],
+ (__force u32)saddr->s6_addr32[1],
+ (__force u32)saddr->s6_addr32[2],
+ rnd);
+
+ c = jhash_3words((__force u32)saddr->s6_addr32[3],
+ (__force u32)daddr->s6_addr32[0],
+ (__force u32)daddr->s6_addr32[1],
+ c);
+
+ c = jhash_3words((__force u32)daddr->s6_addr32[2],
+ (__force u32)daddr->s6_addr32[3],
+ (__force u32)id,
+ c);
return c & (INETFRAGS_HASHSZ - 1);
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c346ccf66ae1..a0c4ad109c63 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2453,8 +2453,6 @@ static int ip6_route_dev_notify(struct notifier_block *this,
#ifdef CONFIG_PROC_FS
-#define RT6_INFO_LEN (32 + 4 + 32 + 4 + 32 + 40 + 5 + 1)
-
struct rt6_proc_arg
{
char *buffer;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5dbb3cd96e59..7f0bd8952646 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -60,8 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
/* check the reason of requeuing without tx lock first */
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
- if (!netif_tx_queue_stopped(txq) &&
- !netif_tx_queue_frozen(txq)) {
+ if (!netif_tx_queue_frozen_or_stopped(txq)) {
q->gso_skb = NULL;
q->q.qlen--;
} else
@@ -122,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
spin_unlock(root_lock);
HARD_TX_LOCK(dev, txq, smp_processor_id());
- if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
+ if (!netif_tx_queue_frozen_or_stopped(txq))
ret = dev_hard_start_xmit(skb, dev, txq);
HARD_TX_UNLOCK(dev, txq);
@@ -144,8 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
ret = dev_requeue_skb(skb, q);
}
- if (ret && (netif_tx_queue_stopped(txq) ||
- netif_tx_queue_frozen(txq)))
+ if (ret && netif_tx_queue_frozen_or_stopped(txq))
ret = 0;
return ret;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 401af9596709..106479a7c94a 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -309,8 +309,7 @@ restart:
if (__netif_tx_trylock(slave_txq)) {
unsigned int length = qdisc_pkt_len(skb);
- if (!netif_tx_queue_stopped(slave_txq) &&
- !netif_tx_queue_frozen(slave_txq) &&
+ if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
txq_trans_update(slave_txq);
__netif_tx_unlock(slave_txq);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 2351aceb296d..ad96ee90fe27 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1415,47 +1415,43 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
rc = x25_route_ioctl(cmd, argp);
break;
case SIOCX25GSUBSCRIP:
- lock_kernel();
rc = x25_subscr_ioctl(cmd, argp);
- unlock_kernel();
break;
case SIOCX25SSUBSCRIP:
rc = -EPERM;
if (!capable(CAP_NET_ADMIN))
break;
- lock_kernel();
rc = x25_subscr_ioctl(cmd, argp);
- unlock_kernel();
break;
case SIOCX25GFACILITIES: {
- struct x25_facilities fac = x25->facilities;
- lock_kernel();
- rc = copy_to_user(argp, &fac,
- sizeof(fac)) ? -EFAULT : 0;
- unlock_kernel();
+ lock_sock(sk);
+ rc = copy_to_user(argp, &x25->facilities,
+ sizeof(x25->facilities))
+ ? -EFAULT : 0;
+ release_sock(sk);
break;
}
case SIOCX25SFACILITIES: {
struct x25_facilities facilities;
rc = -EFAULT;
- lock_kernel();
if (copy_from_user(&facilities, argp,
sizeof(facilities)))
break;
rc = -EINVAL;
+ lock_sock(sk);
if (sk->sk_state != TCP_LISTEN &&
sk->sk_state != TCP_CLOSE)
- break;
+ goto out_fac_release;
if (facilities.pacsize_in < X25_PS16 ||
facilities.pacsize_in > X25_PS4096)
- break;
+ goto out_fac_release;
if (facilities.pacsize_out < X25_PS16 ||
facilities.pacsize_out > X25_PS4096)
- break;
+ goto out_fac_release;
if (facilities.winsize_in < 1 ||
facilities.winsize_in > 127)
- break;
+ goto out_fac_release;
if (facilities.throughput) {
int out = facilities.throughput & 0xf0;
int in = facilities.throughput & 0x0f;
@@ -1463,27 +1459,28 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
facilities.throughput |=
X25_DEFAULT_THROUGHPUT << 4;
else if (out < 0x30 || out > 0xD0)
- break;
+ goto out_fac_release;
if (!in)
facilities.throughput |=
X25_DEFAULT_THROUGHPUT;
else if (in < 0x03 || in > 0x0D)
- break;
+ goto out_fac_release;
}
if (facilities.reverse &&
(facilities.reverse & 0x81) != 0x81)
- break;
+ goto out_fac_release;
x25->facilities = facilities;
rc = 0;
- unlock_kernel();
+out_fac_release:
+ release_sock(sk);
break;
}
case SIOCX25GDTEFACILITIES: {
- lock_kernel();
+ lock_sock(sk);
rc = copy_to_user(argp, &x25->dte_facilities,
sizeof(x25->dte_facilities));
- unlock_kernel();
+ release_sock(sk);
if (rc)
rc = -EFAULT;
break;
@@ -1492,33 +1489,34 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case SIOCX25SDTEFACILITIES: {
struct x25_dte_facilities dtefacs;
rc = -EFAULT;
- lock_kernel();
if (copy_from_user(&dtefacs, argp, sizeof(dtefacs)))
break;
rc = -EINVAL;
+ lock_sock(sk);
if (sk->sk_state != TCP_LISTEN &&
sk->sk_state != TCP_CLOSE)
- break;
+ goto out_dtefac_release;
if (dtefacs.calling_len > X25_MAX_AE_LEN)
- break;
+ goto out_dtefac_release;
if (dtefacs.calling_ae == NULL)
- break;
+ goto out_dtefac_release;
if (dtefacs.called_len > X25_MAX_AE_LEN)
- break;
+ goto out_dtefac_release;
if (dtefacs.called_ae == NULL)
- break;
+ goto out_dtefac_release;
x25->dte_facilities = dtefacs;
rc = 0;
- unlock_kernel();
+out_dtefac_release:
+ release_sock(sk);
break;
}
case SIOCX25GCALLUSERDATA: {
- struct x25_calluserdata cud = x25->calluserdata;
- lock_kernel();
- rc = copy_to_user(argp, &cud,
- sizeof(cud)) ? -EFAULT : 0;
- unlock_kernel();
+ lock_sock(sk);
+ rc = copy_to_user(argp, &x25->calluserdata,
+ sizeof(x25->calluserdata))
+ ? -EFAULT : 0;
+ release_sock(sk);
break;
}
@@ -1526,37 +1524,36 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
struct x25_calluserdata calluserdata;
rc = -EFAULT;
- lock_kernel();
if (copy_from_user(&calluserdata, argp,
sizeof(calluserdata)))
break;
rc = -EINVAL;
if (calluserdata.cudlength > X25_MAX_CUD_LEN)
break;
+ lock_sock(sk);
x25->calluserdata = calluserdata;
- unlock_kernel();
+ release_sock(sk);
rc = 0;
break;
}
case SIOCX25GCAUSEDIAG: {
- struct x25_causediag causediag;
- lock_kernel();
- causediag = x25->causediag;
- rc = copy_to_user(argp, &causediag,
- sizeof(causediag)) ? -EFAULT : 0;
- unlock_kernel();
+ lock_sock(sk);
+ rc = copy_to_user(argp, &x25->causediag,
+ sizeof(x25->causediag))
+ ? -EFAULT : 0;
+ release_sock(sk);
break;
}
case SIOCX25SCAUSEDIAG: {
struct x25_causediag causediag;
rc = -EFAULT;
- lock_kernel();
if (copy_from_user(&causediag, argp, sizeof(causediag)))
break;
+ lock_sock(sk);
x25->causediag = causediag;
- unlock_kernel();
+ release_sock(sk);
rc = 0;
break;
@@ -1565,19 +1562,20 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case SIOCX25SCUDMATCHLEN: {
struct x25_subaddr sub_addr;
rc = -EINVAL;
- lock_kernel();
+ lock_sock(sk);
if(sk->sk_state != TCP_CLOSE)
- break;
+ goto out_cud_release;
rc = -EFAULT;
if (copy_from_user(&sub_addr, argp,
sizeof(sub_addr)))
- break;
+ goto out_cud_release;
rc = -EINVAL;
if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN)
- break;
+ goto out_cud_release;
x25->cudmatchlength = sub_addr.cudmatchlength;
- unlock_kernel();
rc = 0;
+out_cud_release:
+ release_sock(sk);
break;
}
@@ -1646,16 +1644,20 @@ static int compat_x25_subscr_ioctl(unsigned int cmd,
dev_put(dev);
if (cmd == SIOCX25GSUBSCRIP) {
+ read_lock_bh(&x25_neigh_list_lock);
x25_subscr.extended = nb->extended;
x25_subscr.global_facil_mask = nb->global_facil_mask;
+ read_unlock_bh(&x25_neigh_list_lock);
rc = copy_to_user(x25_subscr32, &x25_subscr,
sizeof(*x25_subscr32)) ? -EFAULT : 0;
} else {
rc = -EINVAL;
if (x25_subscr.extended == 0 || x25_subscr.extended == 1) {
rc = 0;
+ write_lock_bh(&x25_neigh_list_lock);
nb->extended = x25_subscr.extended;
nb->global_facil_mask = x25_subscr.global_facil_mask;
+ write_unlock_bh(&x25_neigh_list_lock);
}
}
x25_neigh_put(nb);
@@ -1711,17 +1713,13 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
rc = x25_route_ioctl(cmd, argp);
break;
case SIOCX25GSUBSCRIP:
- lock_kernel();
rc = compat_x25_subscr_ioctl(cmd, argp);
- unlock_kernel();
break;
case SIOCX25SSUBSCRIP:
rc = -EPERM;
if (!capable(CAP_NET_ADMIN))
break;
- lock_kernel();
rc = compat_x25_subscr_ioctl(cmd, argp);
- unlock_kernel();
break;
case SIOCX25GFACILITIES:
case SIOCX25SFACILITIES:
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 73e7b954ad28..4c81f6abb65b 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -31,8 +31,8 @@
#include <linux/init.h>
#include <net/x25.h>
-static LIST_HEAD(x25_neigh_list);
-static DEFINE_RWLOCK(x25_neigh_list_lock);
+LIST_HEAD(x25_neigh_list);
+DEFINE_RWLOCK(x25_neigh_list_lock);
static void x25_t20timer_expiry(unsigned long);
@@ -360,16 +360,20 @@ int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
dev_put(dev);
if (cmd == SIOCX25GSUBSCRIP) {
+ read_lock_bh(&x25_neigh_list_lock);
x25_subscr.extended = nb->extended;
x25_subscr.global_facil_mask = nb->global_facil_mask;
+ read_unlock_bh(&x25_neigh_list_lock);
rc = copy_to_user(arg, &x25_subscr,
sizeof(x25_subscr)) ? -EFAULT : 0;
} else {
rc = -EINVAL;
if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
rc = 0;
+ write_lock_bh(&x25_neigh_list_lock);
nb->extended = x25_subscr.extended;
nb->global_facil_mask = x25_subscr.global_facil_mask;
+ write_unlock_bh(&x25_neigh_list_lock);
}
}
x25_neigh_put(nb);