summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c19
-rw-r--r--net/core/net-sysfs.c27
-rw-r--r--net/core/net_namespace.c10
-rw-r--r--net/core/scm.c2
-rw-r--r--net/core/skbuff.c16
-rw-r--r--net/core/sock.c13
-rw-r--r--net/core/stream.c1
-rw-r--r--net/core/sysctl_net_core.c51
8 files changed, 83 insertions, 56 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 7307a0c15c9f..18dc8d75ead9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1870,14 +1870,6 @@ static void __move_netdevice_notifier_net(struct net *src_net,
__register_netdevice_notifier_net(dst_net, nb, true);
}
-void move_netdevice_notifier_net(struct net *src_net, struct net *dst_net,
- struct notifier_block *nb)
-{
- rtnl_lock();
- __move_netdevice_notifier_net(src_net, dst_net, nb);
- rtnl_unlock();
-}
-
int register_netdevice_notifier_dev_net(struct net_device *dev,
struct notifier_block *nb,
struct netdev_net_notifier *nn)
@@ -5027,7 +5019,7 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
WARN_ON(refcount_read(&skb->users));
if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
- trace_consume_skb(skb);
+ trace_consume_skb(skb, net_tx_action);
else
trace_kfree_skb(skb, net_tx_action,
SKB_DROP_REASON_NOT_SPECIFIED);
@@ -8321,9 +8313,8 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
}
}
if (dev->flags != old_flags) {
- pr_info("device %s %s promiscuous mode\n",
- dev->name,
- dev->flags & IFF_PROMISC ? "entered" : "left");
+ netdev_info(dev, "%s promiscuous mode\n",
+ dev->flags & IFF_PROMISC ? "entered" : "left");
if (audit_enabled) {
current_uid_gid(&uid, &gid);
audit_log(audit_context(), GFP_ATOMIC,
@@ -8391,6 +8382,8 @@ static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
}
}
if (dev->flags ^ old_flags) {
+ netdev_info(dev, "%s allmulticast mode\n",
+ dev->flags & IFF_ALLMULTI ? "entered" : "left");
dev_change_rx_flags(dev, IFF_ALLMULTI);
dev_set_rx_mode(dev);
if (notify)
@@ -10381,7 +10374,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
for (i = 0; i < n; i++)
- dst[i] = atomic_long_read(&src[i]);
+ dst[i] = (unsigned long)atomic_long_read(&src[i]);
/* zero out counters that only exist in rtnl_link_stats64 */
memset((char *)stats64 + n * sizeof(u64), 0,
sizeof(*stats64) - n * sizeof(u64));
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 4b361ac6a252..15e3f4606b5f 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1052,7 +1052,7 @@ static void rx_queue_get_ownership(const struct kobject *kobj,
net_ns_get_ownership(net, uid, gid);
}
-static struct kobj_type rx_queue_ktype __ro_after_init = {
+static const struct kobj_type rx_queue_ktype = {
.sysfs_ops = &rx_queue_sysfs_ops,
.release = rx_queue_release,
.default_groups = rx_queue_default_groups,
@@ -1060,6 +1060,18 @@ static struct kobj_type rx_queue_ktype __ro_after_init = {
.get_ownership = rx_queue_get_ownership,
};
+static int rx_queue_default_mask(struct net_device *dev,
+ struct netdev_rx_queue *queue)
+{
+#if IS_ENABLED(CONFIG_RPS) && IS_ENABLED(CONFIG_SYSCTL)
+ struct cpumask *rps_default_mask = READ_ONCE(dev_net(dev)->core.rps_default_mask);
+
+ if (rps_default_mask && !cpumask_empty(rps_default_mask))
+ return netdev_rx_queue_set_rps_mask(queue, rps_default_mask);
+#endif
+ return 0;
+}
+
static int rx_queue_add_kobject(struct net_device *dev, int index)
{
struct netdev_rx_queue *queue = dev->_rx + index;
@@ -1083,13 +1095,10 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
goto err;
}
-#if IS_ENABLED(CONFIG_RPS) && IS_ENABLED(CONFIG_SYSCTL)
- if (!cpumask_empty(&rps_default_mask)) {
- error = netdev_rx_queue_set_rps_mask(queue, &rps_default_mask);
- if (error)
- goto err;
- }
-#endif
+ error = rx_queue_default_mask(dev, queue);
+ if (error)
+ goto err;
+
kobject_uevent(kobj, KOBJ_ADD);
return error;
@@ -1662,7 +1671,7 @@ static void netdev_queue_get_ownership(const struct kobject *kobj,
net_ns_get_ownership(net, uid, gid);
}
-static struct kobj_type netdev_queue_ktype __ro_after_init = {
+static const struct kobj_type netdev_queue_ktype = {
.sysfs_ops = &netdev_queue_sysfs_ops,
.release = netdev_queue_release,
.default_groups = netdev_queue_default_groups,
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 078a0a420c8a..7b69cf882b8e 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -304,6 +304,12 @@ struct net *get_net_ns_by_id(const struct net *net, int id)
}
EXPORT_SYMBOL_GPL(get_net_ns_by_id);
+/* init code that must occur even if setup_net() is not called. */
+static __net_init void preinit_net(struct net *net)
+{
+ ref_tracker_dir_init(&net->notrefcnt_tracker, 128);
+}
+
/*
* setup_net runs the initializers for the network namespace object.
*/
@@ -316,7 +322,6 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
refcount_set(&net->ns.count, 1);
ref_tracker_dir_init(&net->refcnt_tracker, 128);
- ref_tracker_dir_init(&net->notrefcnt_tracker, 128);
refcount_set(&net->passive, 1);
get_random_bytes(&net->hash_mix, sizeof(u32));
@@ -472,6 +477,8 @@ struct net *copy_net_ns(unsigned long flags,
rv = -ENOMEM;
goto dec_ucounts;
}
+
+ preinit_net(net);
refcount_set(&net->passive, 1);
net->ucounts = ucounts;
get_user_ns(user_ns);
@@ -1118,6 +1125,7 @@ void __init net_ns_init(void)
init_net.key_domain = &init_net_key_domain;
#endif
down_write(&pernet_ops_rwsem);
+ preinit_net(&init_net);
if (setup_net(&init_net, &init_user_ns))
panic("Could not setup the initial network namespace");
diff --git a/net/core/scm.c b/net/core/scm.c
index 5c356f0dee30..acb7d776fa6e 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -229,6 +229,8 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
if (msg->msg_control_is_user) {
struct cmsghdr __user *cm = msg->msg_control_user;
+ check_object_size(data, cmlen - sizeof(*cm), true);
+
if (!user_write_access_begin(cm, cmlen))
goto efault;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 13ea10cf8544..eb7d33b41e71 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -991,7 +991,7 @@ bool __kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
DEBUG_NET_WARN_ON_ONCE(reason <= 0 || reason >= SKB_DROP_REASON_MAX);
if (reason == SKB_CONSUMED)
- trace_consume_skb(skb);
+ trace_consume_skb(skb, __builtin_return_address(0));
else
trace_kfree_skb(skb, __builtin_return_address(0), reason);
return true;
@@ -1189,7 +1189,7 @@ void consume_skb(struct sk_buff *skb)
if (!skb_unref(skb))
return;
- trace_consume_skb(skb);
+ trace_consume_skb(skb, __builtin_return_address(0));
__kfree_skb(skb);
}
EXPORT_SYMBOL(consume_skb);
@@ -1204,7 +1204,7 @@ EXPORT_SYMBOL(consume_skb);
*/
void __consume_stateless_skb(struct sk_buff *skb)
{
- trace_consume_skb(skb);
+ trace_consume_skb(skb, __builtin_return_address(0));
skb_release_data(skb, SKB_CONSUMED);
kfree_skbmem(skb);
}
@@ -1260,7 +1260,7 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
return;
/* if reaching here SKB is ready to free */
- trace_consume_skb(skb);
+ trace_consume_skb(skb, __builtin_return_address(0));
/* if SKB is a clone, don't handle this case */
if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
@@ -1406,14 +1406,18 @@ EXPORT_SYMBOL_GPL(skb_morph);
int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
{
- unsigned long max_pg, num_pg, new_pg, old_pg;
+ unsigned long max_pg, num_pg, new_pg, old_pg, rlim;
struct user_struct *user;
if (capable(CAP_IPC_LOCK) || !size)
return 0;
+ rlim = rlimit(RLIMIT_MEMLOCK);
+ if (rlim == RLIM_INFINITY)
+ return 0;
+
num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */
- max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ max_pg = rlim >> PAGE_SHIFT;
user = mmp->user ? : current_user();
old_pg = atomic_long_read(&user->locked_vm);
diff --git a/net/core/sock.c b/net/core/sock.c
index afbb02984d5f..341c565dbc26 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2340,17 +2340,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
smp_wmb();
refcount_set(&newsk->sk_refcnt, 2);
- /* Increment the counter in the same struct proto as the master
- * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
- * is the same as sk->sk_prot->socks, as this field was copied
- * with memcpy).
- *
- * This _changes_ the previous behaviour, where
- * tcp_create_openreq_child always was incrementing the
- * equivalent to tcp_prot->socks (inet_sock_nr), so this have
- * to be taken into account in all callers. -acme
- */
- sk_refcnt_debug_inc(newsk);
sk_set_socket(newsk, NULL);
sk_tx_queue_clear(newsk);
RCU_INIT_POINTER(newsk->sk_wq, NULL);
@@ -3710,8 +3699,6 @@ void sk_common_release(struct sock *sk)
xfrm_sk_free_policy(sk);
- sk_refcnt_debug_release(sk);
-
sock_put(sk);
}
EXPORT_SYMBOL(sk_common_release);
diff --git a/net/core/stream.c b/net/core/stream.c
index cd06750dd329..434446ab14c5 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -209,7 +209,6 @@ void sk_stream_kill_queues(struct sock *sk)
sk_mem_reclaim_final(sk);
WARN_ON_ONCE(sk->sk_wmem_queued);
- WARN_ON_ONCE(sk->sk_forward_alloc);
/* It is _impossible_ for the backlog to contain anything
* when we get here. All user references to this socket
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 7130e6d9e263..74842b453407 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -74,24 +74,47 @@ static void dump_cpumask(void *buffer, size_t *lenp, loff_t *ppos,
#endif
#ifdef CONFIG_RPS
-struct cpumask rps_default_mask;
+
+static struct cpumask *rps_default_mask_cow_alloc(struct net *net)
+{
+ struct cpumask *rps_default_mask;
+
+ if (net->core.rps_default_mask)
+ return net->core.rps_default_mask;
+
+ rps_default_mask = kzalloc(cpumask_size(), GFP_KERNEL);
+ if (!rps_default_mask)
+ return NULL;
+
+ /* pairs with READ_ONCE in rx_queue_default_mask() */
+ WRITE_ONCE(net->core.rps_default_mask, rps_default_mask);
+ return rps_default_mask;
+}
static int rps_default_mask_sysctl(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
+ struct net *net = (struct net *)table->data;
int err = 0;
rtnl_lock();
if (write) {
- err = cpumask_parse(buffer, &rps_default_mask);
+ struct cpumask *rps_default_mask = rps_default_mask_cow_alloc(net);
+
+ err = -ENOMEM;
+ if (!rps_default_mask)
+ goto done;
+
+ err = cpumask_parse(buffer, rps_default_mask);
if (err)
goto done;
- err = rps_cpumask_housekeeping(&rps_default_mask);
+ err = rps_cpumask_housekeeping(rps_default_mask);
if (err)
goto done;
} else {
- dump_cpumask(buffer, lenp, ppos, &rps_default_mask);
+ dump_cpumask(buffer, lenp, ppos,
+ net->core.rps_default_mask ? : cpu_none_mask);
}
done:
@@ -508,11 +531,6 @@ static struct ctl_table net_core_table[] = {
.mode = 0644,
.proc_handler = rps_sock_flow_sysctl
},
- {
- .procname = "rps_default_mask",
- .mode = 0644,
- .proc_handler = rps_default_mask_sysctl
- },
#endif
#ifdef CONFIG_NET_FLOW_LIMIT
{
@@ -639,6 +657,14 @@ static struct ctl_table net_core_table[] = {
};
static struct ctl_table netns_core_table[] = {
+#if IS_ENABLED(CONFIG_RPS)
+ {
+ .procname = "rps_default_mask",
+ .data = &init_net,
+ .mode = 0644,
+ .proc_handler = rps_default_mask_sysctl
+ },
+#endif
{
.procname = "somaxconn",
.data = &init_net.core.sysctl_somaxconn,
@@ -706,6 +732,9 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
tbl = net->core.sysctl_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->core.sysctl_hdr);
BUG_ON(tbl == netns_core_table);
+#if IS_ENABLED(CONFIG_RPS)
+ kfree(net->core.rps_default_mask);
+#endif
kfree(tbl);
}
@@ -716,10 +745,6 @@ static __net_initdata struct pernet_operations sysctl_core_ops = {
static __init int sysctl_core_init(void)
{
-#if IS_ENABLED(CONFIG_RPS)
- cpumask_copy(&rps_default_mask, cpu_none_mask);
-#endif
-
register_net_sysctl(&init_net, "net/core", net_core_table);
return register_pernet_subsys(&sysctl_core_ops);
}