summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMasahide NAKAMURA <nakam@linux-ipv6.org>2007-04-30 11:33:35 +0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-30 11:58:09 +0400
commit157bfc25020f7eb731f94140e099307ade47299e (patch)
tree422821e5233daf0d8347ac361f09be9f49b43de4
parent34588b4c046c34773e5a1a962da7b78b05c4d1bd (diff)
downloadlinux-157bfc25020f7eb731f94140e099307ade47299e.tar.xz
[XFRM]: Restrict upper layer information by bundle.
On MIPv6 usage, XFRM sub policy is enabled. When main (IPsec) and sub (MIPv6) policy selectors have the same address set but different upper layer information (i.e. protocol number and its ports or type/code), multiple bundle should be created. However, currently we have issue to use the same bundle created for the first time with all flows covered by the case. It is useful for the bundle to have the upper layer information to be restructured correctly if it does not match with the flow. 1. Bundle was created by two policies Selector from another policy is added to xfrm_dst. If the flow does not match the selector, it goes to slow path to restructure new bundle by single policy. 2. Bundle was created by one policy Flow cache is added to xfrm_dst as originated one. If the flow does not match the cache, it goes to slow path to try searching another policy. Signed-off-by: Masahide NAKAMURA <nakam@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/flow.h6
-rw-r--r--include/net/xfrm.h10
-rw-r--r--net/xfrm/xfrm_policy.c55
3 files changed, 71 insertions, 0 deletions
diff --git a/include/net/flow.h b/include/net/flow.h
index ce4b10d8b412..f3cc1f812619 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -97,4 +97,10 @@ extern void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
extern void flow_cache_flush(void);
extern atomic_t flow_cache_genid;
+static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
+{
+ return (fl1->proto == fl2->proto &&
+ !memcmp(&fl1->uli_u, &fl2->uli_u, sizeof(fl1->uli_u)));
+}
+
#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 9561bf817b02..66c2d3eec03c 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -603,6 +603,10 @@ struct xfrm_dst
struct rt6_info rt6;
} u;
struct dst_entry *route;
+#ifdef CONFIG_XFRM_SUB_POLICY
+ struct flowi *origin;
+ struct xfrm_selector *partner;
+#endif
u32 genid;
u32 route_mtu_cached;
u32 child_mtu_cached;
@@ -615,6 +619,12 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
dst_release(xdst->route);
if (likely(xdst->u.dst.xfrm))
xfrm_state_put(xdst->u.dst.xfrm);
+#ifdef CONFIG_XFRM_SUB_POLICY
+ kfree(xdst->origin);
+ xdst->origin = NULL;
+ kfree(xdst->partner);
+ xdst->partner = NULL;
+#endif
}
extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index dbf9d96a2f0b..263e34e45265 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1344,6 +1344,40 @@ xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
return err;
}
+static int inline
+xfrm_dst_alloc_copy(void **target, void *src, int size)
+{
+ if (!*target) {
+ *target = kmalloc(size, GFP_ATOMIC);
+ if (!*target)
+ return -ENOMEM;
+ }
+ memcpy(*target, src, size);
+ return 0;
+}
+
+static int inline
+xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
+{
+#ifdef CONFIG_XFRM_SUB_POLICY
+ struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+ return xfrm_dst_alloc_copy((void **)&(xdst->partner),
+ sel, sizeof(*sel));
+#else
+ return 0;
+#endif
+}
+
+static int inline
+xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
+{
+#ifdef CONFIG_XFRM_SUB_POLICY
+ struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+ return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
+#else
+ return 0;
+#endif
+}
static int stale_bundle(struct dst_entry *dst);
@@ -1532,6 +1566,18 @@ restart:
err = -EHOSTUNREACH;
goto error;
}
+
+ if (npols > 1)
+ err = xfrm_dst_update_parent(dst, &pols[1]->selector);
+ else
+ err = xfrm_dst_update_origin(dst, fl);
+ if (unlikely(err)) {
+ write_unlock_bh(&policy->lock);
+ if (dst)
+ dst_free(dst);
+ goto error;
+ }
+
dst->next = policy->bundles;
policy->bundles = dst;
dst_hold(dst);
@@ -1947,6 +1993,15 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
(dst->dev && !netif_running(dst->dev)))
return 0;
+#ifdef CONFIG_XFRM_SUB_POLICY
+ if (fl) {
+ if (first->origin && !flow_cache_uli_match(first->origin, fl))
+ return 0;
+ if (first->partner &&
+ !xfrm_selector_match(first->partner, fl, family))
+ return 0;
+ }
+#endif
last = NULL;