summaryrefslogtreecommitdiff
path: root/net/xfrm
diff options
context:
space:
mode:
authorSteffen Klassert <steffen.klassert@secunet.com>2014-02-19 13:07:34 +0400
committerSteffen Klassert <steffen.klassert@secunet.com>2014-02-19 13:35:43 +0400
commit1a1ccc96abb2ed9b8fbb71018e64b97324caef53 (patch)
tree0888c68978167c36ce434344264afb19cf83a7c7 /net/xfrm
parentd3623099d3509fa68fa28235366049dd3156c63a (diff)
downloadlinux-1a1ccc96abb2ed9b8fbb71018e64b97324caef53.tar.xz
xfrm: Remove caching of xfrm_policy_sk_bundles
We currently cache socket policy bundles at xfrm_policy_sk_bundles. These cached bundles are never used. Instead we create and cache a new one whenever xfrm_lookup() is called on a socket policy. Most protocols cache the used routes to the socket, so let's remove the unused caching of socket policy bundles in xfrm. Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
Diffstat (limited to 'net/xfrm')
-rw-r--r--net/xfrm/xfrm_policy.c28
1 files changed, 0 insertions, 28 deletions
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 2232c6f26aff..bb3669d973a7 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -39,8 +39,6 @@
#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
#define XFRM_MAX_QUEUE_LEN 100
-static struct dst_entry *xfrm_policy_sk_bundles;
-
static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
__read_mostly;
@@ -2109,13 +2107,6 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
goto no_transform;
}
- dst_hold(&xdst->u.dst);
-
- spin_lock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
- xdst->u.dst.next = xfrm_policy_sk_bundles;
- xfrm_policy_sk_bundles = &xdst->u.dst;
- spin_unlock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
-
route = xdst->route;
}
}
@@ -2549,33 +2540,15 @@ static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
return dst;
}
-static void __xfrm_garbage_collect(struct net *net)
-{
- struct dst_entry *head, *next;
-
- spin_lock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
- head = xfrm_policy_sk_bundles;
- xfrm_policy_sk_bundles = NULL;
- spin_unlock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
-
- while (head) {
- next = head->next;
- dst_free(head);
- head = next;
- }
-}
-
void xfrm_garbage_collect(struct net *net)
{
flow_cache_flush(net);
- __xfrm_garbage_collect(net);
}
EXPORT_SYMBOL(xfrm_garbage_collect);
static void xfrm_garbage_collect_deferred(struct net *net)
{
flow_cache_flush_deferred(net);
- __xfrm_garbage_collect(net);
}
static void xfrm_init_pmtu(struct dst_entry *dst)
@@ -2944,7 +2917,6 @@ static int __net_init xfrm_net_init(struct net *net)
/* Initialize the per-net locks here */
spin_lock_init(&net->xfrm.xfrm_state_lock);
rwlock_init(&net->xfrm.xfrm_policy_lock);
- spin_lock_init(&net->xfrm.xfrm_policy_sk_bundle_lock);
mutex_init(&net->xfrm.xfrm_cfg_mutex);
flow_cache_init(net);