summaryrefslogtreecommitdiff
path: root/net/mctp
diff options
context:
space:
mode:
authorJeremy Kerr <jk@codeconstruct.com.au>2023-10-09 10:56:45 +0300
committerJakub Kicinski <kuba@kernel.org>2023-10-11 05:43:22 +0300
commit5093bbfc10ab6636b32728e35813cbd79feb063c (patch)
tree3ca6638478d5e045256aa9d20e673a2225e1c0c7 /net/mctp
parent8527ca7735ef4cdad32c45853b0138f46ab2df58 (diff)
downloadlinux-5093bbfc10ab6636b32728e35813cbd79feb063c.tar.xz
mctp: perform route lookups under a RCU read-side lock
Our current route lookups (mctp_route_lookup and mctp_route_lookup_null) traverse the net's route list without the RCU read lock held. This means the route lookup is subject to preemption, resulting in an potential grace period expiry, and so an eventual kfree() while we still have the route pointer. Add the proper read-side critical section locks around the route lookups, preventing premption and a possible parallel kfree. The remaining net->mctp.routes accesses are already under a rcu_read_lock, or protected by the RTNL for updates. Based on an analysis from Sili Luo <rootlab@huawei.com>, where introducing a delay in the route lookup could cause a UAF on simultaneous sendmsg() and route deletion. Reported-by: Sili Luo <rootlab@huawei.com> Fixes: 889b7da23abf ("mctp: Add initial routing framework") Cc: stable@vger.kernel.org Signed-off-by: Jeremy Kerr <jk@codeconstruct.com.au> Reviewed-by: Eric Dumazet <edumazet@google.com> Link: https://lore.kernel.org/r/29c4b0e67dc1bf3571df3982de87df90cae9b631.1696837310.git.jk@codeconstruct.com.au Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/mctp')
-rw-r--r--net/mctp/route.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/net/mctp/route.c b/net/mctp/route.c
index ab62fe447038..7a47a58aa54b 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -737,6 +737,8 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
{
struct mctp_route *tmp, *rt = NULL;
+ rcu_read_lock();
+
list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
/* TODO: add metrics */
if (mctp_rt_match_eid(tmp, dnet, daddr)) {
@@ -747,21 +749,29 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
}
}
+ rcu_read_unlock();
+
return rt;
}
static struct mctp_route *mctp_route_lookup_null(struct net *net,
struct net_device *dev)
{
- struct mctp_route *rt;
+ struct mctp_route *tmp, *rt = NULL;
- list_for_each_entry_rcu(rt, &net->mctp.routes, list) {
- if (rt->dev->dev == dev && rt->type == RTN_LOCAL &&
- refcount_inc_not_zero(&rt->refs))
- return rt;
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
+ if (tmp->dev->dev == dev && tmp->type == RTN_LOCAL &&
+ refcount_inc_not_zero(&tmp->refs)) {
+ rt = tmp;
+ break;
+ }
}
- return NULL;
+ rcu_read_unlock();
+
+ return rt;
}
static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,