summaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-06-09 20:21:07 +0400
committerDavid S. Miller <davem@davemloft.net>2010-06-11 09:47:08 +0400
commit592fcb9dfafaa02dd0edc207bf5d3a0ee7a1f8df (patch)
tree3b7bbd5313c7f564cf8087658c36d32a87ca79c4 /net/ipv4
parent96b52e61be1ad4d4f8de39b9deaf253da804ea3b (diff)
downloadlinux-592fcb9dfafaa02dd0edc207bf5d3a0ee7a1f8df.tar.xz
ip: ip_ra_control() rcu fix
commit 66018506e15b (ip: Router Alert RCU conversion) introduced RCU lookups to ip_call_ra_chain(). It missed proper deinit phase : When ip_ra_control() deletes an ip_ra_chain, it should make sure ip_call_ra_chain() users can not start to use socket during the rcu grace period. It should also delay the sock_put() after the grace period, or we risk a premature socket freeing and corruptions, as raw sockets are not rcu protected yet. This delay avoids using expensive atomic_inc_not_zero() in ip_call_ra_chain(). Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/ip_sockglue.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 08b9519a24f4..47fff528ff39 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -241,9 +241,13 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
struct ip_ra_chain *ip_ra_chain;
static DEFINE_SPINLOCK(ip_ra_lock);
-static void ip_ra_free_rcu(struct rcu_head *head)
+
+static void ip_ra_destroy_rcu(struct rcu_head *head)
{
- kfree(container_of(head, struct ip_ra_chain, rcu));
+ struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
+
+ sock_put(ra->saved_sk);
+ kfree(ra);
}
int ip_ra_control(struct sock *sk, unsigned char on,
@@ -264,13 +268,20 @@ int ip_ra_control(struct sock *sk, unsigned char on,
kfree(new_ra);
return -EADDRINUSE;
}
+ /* dont let ip_call_ra_chain() use sk again */
+ ra->sk = NULL;
rcu_assign_pointer(*rap, ra->next);
spin_unlock_bh(&ip_ra_lock);
if (ra->destructor)
ra->destructor(sk);
- sock_put(sk);
- call_rcu(&ra->rcu, ip_ra_free_rcu);
+ /*
+ * Delay sock_put(sk) and kfree(ra) after one rcu grace
+ * period. This guarantee ip_call_ra_chain() dont need
+ * to mess with socket refcounts.
+ */
+ ra->saved_sk = sk;
+ call_rcu(&ra->rcu, ip_ra_destroy_rcu);
return 0;
}
}