summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2014-06-08 13:41:23 +0400
committerPablo Neira Ayuso <pablo@netfilter.org>2014-06-16 14:51:36 +0400
commitcd5f336f1780cb20e83146cde64d3d5779e175e6 (patch)
tree1256bde8377eda4f1ca29a232eaaa0e8a3d13e22 /net
parent266155b2de8fb721ae353688529b2f8bcdde2f90 (diff)
downloadlinux-cd5f336f1780cb20e83146cde64d3d5779e175e6.tar.xz
netfilter: ctnetlink: fix refcnt leak in dying/unconfirmed list dumper
'last' keeps track of the ct that had its refcnt bumped during previous dump cycle. Thus it must not be overwritten until end-of-function. Another (unrelated, theoretical) issue: Don't attempt to bump refcnt of a conntrack whose reference count is already 0. Such conntrack is being destroyed right now, its memory is freed once we release the percpu dying spinlock. Fixes: b7779d06 ('netfilter: conntrack: spinlock per cpu to protect special lists.') Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net')
-rw-r--r--net/netfilter/nf_conntrack_netlink.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index ef0eedd70541..70123f48b6c9 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1150,7 +1150,7 @@ static int ctnetlink_done_list(struct netlink_callback *cb)
static int
ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
{
- struct nf_conn *ct, *last = NULL;
+ struct nf_conn *ct, *last;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
@@ -1163,6 +1163,8 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
if (cb->args[2])
return 0;
+ last = (struct nf_conn *)cb->args[1];
+
for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
struct ct_pcpu *pcpu;
@@ -1171,7 +1173,6 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
spin_lock_bh(&pcpu->lock);
- last = (struct nf_conn *)cb->args[1];
list = dying ? &pcpu->dying : &pcpu->unconfirmed;
restart:
hlist_nulls_for_each_entry(h, n, list, hnnode) {
@@ -1190,7 +1191,8 @@ restart:
ct);
rcu_read_unlock();
if (res < 0) {
- nf_conntrack_get(&ct->ct_general);
+ if (!atomic_inc_not_zero(&ct->ct_general.use))
+ continue;
cb->args[0] = cpu;
cb->args[1] = (unsigned long)ct;
spin_unlock_bh(&pcpu->lock);