summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 00:04:52 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 00:04:52 +0300
commite0c38a4d1f196a4b17d2eba36afff8f656a4f1de (patch)
treeb26a69fabef0160adb127416a9744217700feeb7 /lib
parent7f9f852c75e7d776b078813586c76a2bc7dca993 (diff)
parent90cadbbf341dd5b2df991c33a6bd6341f3a53788 (diff)
downloadlinux-e0c38a4d1f196a4b17d2eba36afff8f656a4f1de.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) New ipset extensions for matching on destination MAC addresses, from Stefano Brivio. 2) Add ipv4 ttl and tos, plus ipv6 flow label and hop limit offloads to nfp driver. From Stefano Brivio. 3) Implement GRO for plain UDP sockets, from Paolo Abeni. 4) Lots of work from Michał Mirosław to eliminate the VLAN_TAG_PRESENT bit so that we could support the entire vlan_tci value. 5) Rework the IPSEC policy lookups to better optimize more usecases, from Florian Westphal. 6) Infrastructure changes eliminating direct manipulation of SKB lists wherever possible, and to always use the appropriate SKB list helpers. This work is still ongoing... 7) Lots of PHY driver and state machine improvements and simplifications, from Heiner Kallweit. 8) Various TSO deferral refinements, from Eric Dumazet. 9) Add ntuple filter support to aquantia driver, from Dmitry Bogdanov. 10) Batch dropping of XDP packets in tuntap, from Jason Wang. 11) Lots of cleanups and improvements to the r8169 driver from Heiner Kallweit, including support for ->xmit_more. This driver has been getting some much needed love since he started working on it. 12) Lots of new forwarding selftests from Petr Machata. 13) Enable VXLAN learning in mlxsw driver, from Ido Schimmel. 14) Packed ring support for virtio, from Tiwei Bie. 15) Add new Aquantia AQtion USB driver, from Dmitry Bezrukov. 16) Add XDP support to dpaa2-eth driver, from Ioana Ciocoi Radulescu. 17) Implement coalescing on TCP backlog queue, from Eric Dumazet. 18) Implement carrier change in tun driver, from Nicolas Dichtel. 19) Support msg_zerocopy in UDP, from Willem de Bruijn. 20) Significantly improve garbage collection of neighbor objects when the table has many PERMANENT entries, from David Ahern. 21) Remove egdev usage from nfp and mlx5, and remove the facility completely from the tree as it no longer has any users. From Oz Shlomo and others. 22) Add a NETDEV_PRE_CHANGEADDR so that drivers can veto the change and therefore abort the operation before the commit phase (which is the NETDEV_CHANGEADDR event). From Petr Machata. 23) Add indirect call wrappers to avoid retpoline overhead, and use them in the GRO code paths. From Paolo Abeni. 24) Add support for netlink FDB get operations, from Roopa Prabhu. 25) Support bloom filter in mlxsw driver, from Nir Dotan. 26) Add SKB extension infrastructure. This consolidates the handling of the auxiliary SKB data used by IPSEC and bridge netfilter, and is designed to support the needs to MPTCP which could be integrated in the future. 27) Lots of XDP TX optimizations in mlx5 from Tariq Toukan. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1845 commits) net: dccp: fix kernel crash on module load drivers/net: appletalk/cops: remove redundant if statement and mask bnx2x: Fix NULL pointer dereference in bnx2x_del_all_vlans() on some hw net/net_namespace: Check the return value of register_pernet_subsys() net/netlink_compat: Fix a missing check of nla_parse_nested ieee802154: lowpan_header_create check must check daddr net/mlx4_core: drop useless LIST_HEAD mlxsw: spectrum: drop useless LIST_HEAD net/mlx5e: drop useless LIST_HEAD iptunnel: Set tun_flags in the iptunnel_metadata_reply from src net/mlx5e: fix semicolon.cocci warnings staging: octeon: fix build failure with XFRM enabled net: Revert recent Spectre-v1 patches. can: af_can: Fix Spectre v1 vulnerability packet: validate address length if non-zero nfc: af_nfc: Fix Spectre v1 vulnerability phonet: af_phonet: Fix Spectre v1 vulnerability net: core: Fix Spectre v1 vulnerability net: minor cleanup in skb_ext_add() net: drop the unused helper skb_ext_get() ...
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug10
-rw-r--r--lib/Makefile2
-rw-r--r--lib/cordic.c23
-rw-r--r--lib/objagg.c501
-rw-r--r--lib/rhashtable.c8
-rw-r--r--lib/test_bpf.c14
-rw-r--r--lib/test_objagg.c836
-rw-r--r--lib/test_rhashtable.c32
9 files changed, 1391 insertions, 38 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index a9965f4af4dd..7dbbcfe9cd90 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -624,3 +624,6 @@ config GENERIC_LIB_CMPDI2
config GENERIC_LIB_UCMPDI2
bool
+
+config OBJAGG
+ tristate "objagg" if COMPILE_TEST
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1af29b8224fd..b3c91b9e32f8 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1976,6 +1976,16 @@ config TEST_MEMCAT_P
If unsure, say N.
+config TEST_OBJAGG
+ tristate "Perform selftest on object aggreration manager"
+ default n
+ depends on OBJAGG
+ help
+ Enable this option to test object aggregation manager on boot
+ (or module load).
+
+ If unsure, say N.
+
endif # RUNTIME_TESTING_MENU
config MEMTEST
diff --git a/lib/Makefile b/lib/Makefile
index db06d1237898..f5262d30bfe6 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -75,6 +75,7 @@ obj-$(CONFIG_TEST_PARMAN) += test_parman.o
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
+obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -274,3 +275,4 @@ obj-$(CONFIG_GENERIC_LIB_LSHRDI3) += lshrdi3.o
obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o
obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o
obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o
+obj-$(CONFIG_OBJAGG) += objagg.o
diff --git a/lib/cordic.c b/lib/cordic.c
index 6cf477839ebd..8ef27c12956f 100644
--- a/lib/cordic.c
+++ b/lib/cordic.c
@@ -16,15 +16,6 @@
#include <linux/module.h>
#include <linux/cordic.h>
-#define CORDIC_ANGLE_GEN 39797
-#define CORDIC_PRECISION_SHIFT 16
-#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2)
-
-#define FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT))
-#define FLOAT(X) (((X) >= 0) \
- ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \
- : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1))
-
static const s32 arctan_table[] = {
2949120,
1740967,
@@ -64,16 +55,16 @@ struct cordic_iq cordic_calc_iq(s32 theta)
coord.q = 0;
angle = 0;
- theta = FIXED(theta);
+ theta = CORDIC_FIXED(theta);
signtheta = (theta < 0) ? -1 : 1;
- theta = ((theta + FIXED(180) * signtheta) % FIXED(360)) -
- FIXED(180) * signtheta;
+ theta = ((theta + CORDIC_FIXED(180) * signtheta) % CORDIC_FIXED(360)) -
+ CORDIC_FIXED(180) * signtheta;
- if (FLOAT(theta) > 90) {
- theta -= FIXED(180);
+ if (CORDIC_FLOAT(theta) > 90) {
+ theta -= CORDIC_FIXED(180);
signx = -1;
- } else if (FLOAT(theta) < -90) {
- theta += FIXED(180);
+ } else if (CORDIC_FLOAT(theta) < -90) {
+ theta += CORDIC_FIXED(180);
signx = -1;
}
diff --git a/lib/objagg.c b/lib/objagg.c
new file mode 100644
index 000000000000..c9b457a91153
--- /dev/null
+++ b/lib/objagg.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rhashtable.h>
+#include <linux/list.h>
+#include <linux/sort.h>
+#include <linux/objagg.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/objagg.h>
+
+struct objagg {
+ const struct objagg_ops *ops;
+ void *priv;
+ struct rhashtable obj_ht;
+ struct rhashtable_params ht_params;
+ struct list_head obj_list;
+ unsigned int obj_count;
+};
+
+struct objagg_obj {
+ struct rhash_head ht_node; /* member of objagg->obj_ht */
+ struct list_head list; /* member of objagg->obj_list */
+ struct objagg_obj *parent; /* if the object is nested, this
+ * holds pointer to parent, otherwise NULL
+ */
+ union {
+ void *delta_priv; /* user delta private */
+ void *root_priv; /* user root private */
+ };
+ unsigned int refcount; /* counts number of users of this object
+ * including nested objects
+ */
+ struct objagg_obj_stats stats;
+ unsigned long obj[0];
+};
+
+static unsigned int objagg_obj_ref_inc(struct objagg_obj *objagg_obj)
+{
+ return ++objagg_obj->refcount;
+}
+
+static unsigned int objagg_obj_ref_dec(struct objagg_obj *objagg_obj)
+{
+ return --objagg_obj->refcount;
+}
+
+static void objagg_obj_stats_inc(struct objagg_obj *objagg_obj)
+{
+ objagg_obj->stats.user_count++;
+ objagg_obj->stats.delta_user_count++;
+ if (objagg_obj->parent)
+ objagg_obj->parent->stats.delta_user_count++;
+}
+
+static void objagg_obj_stats_dec(struct objagg_obj *objagg_obj)
+{
+ objagg_obj->stats.user_count--;
+ objagg_obj->stats.delta_user_count--;
+ if (objagg_obj->parent)
+ objagg_obj->parent->stats.delta_user_count--;
+}
+
+static bool objagg_obj_is_root(const struct objagg_obj *objagg_obj)
+{
+ /* Nesting is not supported, so we can use ->parent
+ * to figure out if the object is root.
+ */
+ return !objagg_obj->parent;
+}
+
+/**
+ * objagg_obj_root_priv - obtains root private for an object
+ * @objagg_obj: objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Either the object is root itself when the private is returned
+ * directly, or the parent is root and its private is returned
+ * instead.
+ *
+ * Returns a user private root pointer.
+ */
+const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj)
+{
+ if (objagg_obj_is_root(objagg_obj))
+ return objagg_obj->root_priv;
+ WARN_ON(!objagg_obj_is_root(objagg_obj->parent));
+ return objagg_obj->parent->root_priv;
+}
+EXPORT_SYMBOL(objagg_obj_root_priv);
+
+/**
+ * objagg_obj_delta_priv - obtains delta private for an object
+ * @objagg_obj: objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Returns user private delta pointer or NULL in case the passed
+ * object is root.
+ */
+const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj)
+{
+ if (objagg_obj_is_root(objagg_obj))
+ return NULL;
+ return objagg_obj->delta_priv;
+}
+EXPORT_SYMBOL(objagg_obj_delta_priv);
+
+/**
+ * objagg_obj_raw - obtains object user private pointer
+ * @objagg_obj: objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Returns user private pointer as was passed to objagg_obj_get() by "obj" arg.
+ */
+const void *objagg_obj_raw(const struct objagg_obj *objagg_obj)
+{
+ return objagg_obj->obj;
+}
+EXPORT_SYMBOL(objagg_obj_raw);
+
+static struct objagg_obj *objagg_obj_lookup(struct objagg *objagg, void *obj)
+{
+ return rhashtable_lookup_fast(&objagg->obj_ht, obj, objagg->ht_params);
+}
+
+static int objagg_obj_parent_assign(struct objagg *objagg,
+ struct objagg_obj *objagg_obj,
+ struct objagg_obj *parent)
+{
+ void *delta_priv;
+
+ delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj,
+ objagg_obj->obj);
+ if (IS_ERR(delta_priv))
+ return PTR_ERR(delta_priv);
+
+ /* User returned a delta private, that means that
+ * our object can be aggregated into the parent.
+ */
+ objagg_obj->parent = parent;
+ objagg_obj->delta_priv = delta_priv;
+ objagg_obj_ref_inc(objagg_obj->parent);
+ trace_objagg_obj_parent_assign(objagg, objagg_obj,
+ parent,
+ parent->refcount);
+ return 0;
+}
+
+static int objagg_obj_parent_lookup_assign(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ struct objagg_obj *objagg_obj_cur;
+ int err;
+
+ list_for_each_entry(objagg_obj_cur, &objagg->obj_list, list) {
+ /* Nesting is not supported. In case the object
+ * is not root, it cannot be assigned as parent.
+ */
+ if (!objagg_obj_is_root(objagg_obj_cur))
+ continue;
+ err = objagg_obj_parent_assign(objagg, objagg_obj,
+ objagg_obj_cur);
+ if (!err)
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static void __objagg_obj_put(struct objagg *objagg,
+ struct objagg_obj *objagg_obj);
+
+static void objagg_obj_parent_unassign(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ trace_objagg_obj_parent_unassign(objagg, objagg_obj,
+ objagg_obj->parent,
+ objagg_obj->parent->refcount);
+ objagg->ops->delta_destroy(objagg->priv, objagg_obj->delta_priv);
+ __objagg_obj_put(objagg, objagg_obj->parent);
+}
+
+static int objagg_obj_root_create(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ objagg_obj->root_priv = objagg->ops->root_create(objagg->priv,
+ objagg_obj->obj);
+ if (IS_ERR(objagg_obj->root_priv))
+ return PTR_ERR(objagg_obj->root_priv);
+
+ trace_objagg_obj_root_create(objagg, objagg_obj);
+ return 0;
+}
+
+static void objagg_obj_root_destroy(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ trace_objagg_obj_root_destroy(objagg, objagg_obj);
+ objagg->ops->root_destroy(objagg->priv, objagg_obj->root_priv);
+}
+
+static int objagg_obj_init(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ int err;
+
+ /* Try to find if the object can be aggregated under an existing one. */
+ err = objagg_obj_parent_lookup_assign(objagg, objagg_obj);
+ if (!err)
+ return 0;
+ /* If aggregation is not possible, make the object a root. */
+ return objagg_obj_root_create(objagg, objagg_obj);
+}
+
+static void objagg_obj_fini(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ if (!objagg_obj_is_root(objagg_obj))
+ objagg_obj_parent_unassign(objagg, objagg_obj);
+ else
+ objagg_obj_root_destroy(objagg, objagg_obj);
+}
+
+static struct objagg_obj *objagg_obj_create(struct objagg *objagg, void *obj)
+{
+ struct objagg_obj *objagg_obj;
+ int err;
+
+ objagg_obj = kzalloc(sizeof(*objagg_obj) + objagg->ops->obj_size,
+ GFP_KERNEL);
+ if (!objagg_obj)
+ return ERR_PTR(-ENOMEM);
+ objagg_obj_ref_inc(objagg_obj);
+ memcpy(objagg_obj->obj, obj, objagg->ops->obj_size);
+
+ err = objagg_obj_init(objagg, objagg_obj);
+ if (err)
+ goto err_obj_init;
+
+ err = rhashtable_insert_fast(&objagg->obj_ht, &objagg_obj->ht_node,
+ objagg->ht_params);
+ if (err)
+ goto err_ht_insert;
+ list_add(&objagg_obj->list, &objagg->obj_list);
+ objagg->obj_count++;
+ trace_objagg_obj_create(objagg, objagg_obj);
+
+ return objagg_obj;
+
+err_ht_insert:
+ objagg_obj_fini(objagg, objagg_obj);
+err_obj_init:
+ kfree(objagg_obj);
+ return ERR_PTR(err);
+}
+
+static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj)
+{
+ struct objagg_obj *objagg_obj;
+
+ /* First, try to find the object exactly as user passed it,
+ * perhaps it is already in use.
+ */
+ objagg_obj = objagg_obj_lookup(objagg, obj);
+ if (objagg_obj) {
+ objagg_obj_ref_inc(objagg_obj);
+ return objagg_obj;
+ }
+
+ return objagg_obj_create(objagg, obj);
+}
+
+/**
+ * objagg_obj_get - gets an object within objagg instance
+ * @objagg: objagg instance
+ * @obj: user-specific private object pointer
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Size of the "obj" memory is specified in "objagg->ops".
+ *
+ * There are 3 main options this function wraps:
+ * 1) The object according to "obj" already exist. In that case
+ * the reference counter is incrementes and the object is returned.
+ * 2) The object does not exist, but it can be aggregated within
+ * another object. In that case, user ops->delta_create() is called
+ * to obtain delta data and a new object is created with returned
+ * user-delta private pointer.
+ * 3) The object does not exist and cannot be aggregated into
+ * any of the existing objects. In that case, user ops->root_create()
+ * is called to create the root and a new object is created with
+ * returned user-root private pointer.
+ *
+ * Returns a pointer to objagg object instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj)
+{
+ struct objagg_obj *objagg_obj;
+
+ objagg_obj = __objagg_obj_get(objagg, obj);
+ if (IS_ERR(objagg_obj))
+ return objagg_obj;
+ objagg_obj_stats_inc(objagg_obj);
+ trace_objagg_obj_get(objagg, objagg_obj, objagg_obj->refcount);
+ return objagg_obj;
+}
+EXPORT_SYMBOL(objagg_obj_get);
+
+static void objagg_obj_destroy(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ trace_objagg_obj_destroy(objagg, objagg_obj);
+ --objagg->obj_count;
+ list_del(&objagg_obj->list);
+ rhashtable_remove_fast(&objagg->obj_ht, &objagg_obj->ht_node,
+ objagg->ht_params);
+ objagg_obj_fini(objagg, objagg_obj);
+ kfree(objagg_obj);
+}
+
+static void __objagg_obj_put(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ if (!objagg_obj_ref_dec(objagg_obj))
+ objagg_obj_destroy(objagg, objagg_obj);
+}
+
+/**
+ * objagg_obj_put - puts an object within objagg instance
+ * @objagg: objagg instance
+ * @objagg_obj: objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Symmetric to objagg_obj_get().
+ */
+void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj)
+{
+ trace_objagg_obj_put(objagg, objagg_obj, objagg_obj->refcount);
+ objagg_obj_stats_dec(objagg_obj);
+ __objagg_obj_put(objagg, objagg_obj);
+}
+EXPORT_SYMBOL(objagg_obj_put);
+
+/**
+ * objagg_create - creates a new objagg instance
+ * @ops: user-specific callbacks
+ * @priv: pointer to a private data passed to the ops
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * The purpose of the library is to provide an infrastructure to
+ * aggregate user-specified objects. Library does not care about the type
+ * of the object. User fills-up ops which take care of the specific
+ * user object manipulation.
+ *
+ * As a very stupid example, consider integer numbers. For example
+ * number 8 as a root object. That can aggregate number 9 with delta 1,
+ * number 10 with delta 2, etc. This example is implemented as
+ * a part of a testing module in test_objagg.c file.
+ *
+ * Each objagg instance contains multiple trees. Each tree node is
+ * represented by "an object". In the current implementation there can be
+ * only roots and leafs nodes. Leaf nodes are called deltas.
+ * But in general, this can be easily extended for intermediate nodes.
+ * In that extension, a delta would be associated with all non-root
+ * nodes.
+ *
+ * Returns a pointer to newly created objagg instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+struct objagg *objagg_create(const struct objagg_ops *ops, void *priv)
+{
+ struct objagg *objagg;
+ int err;
+
+ if (WARN_ON(!ops || !ops->root_create || !ops->root_destroy ||
+ !ops->delta_create || !ops->delta_destroy))
+ return ERR_PTR(-EINVAL);
+ objagg = kzalloc(sizeof(*objagg), GFP_KERNEL);
+ if (!objagg)
+ return ERR_PTR(-ENOMEM);
+ objagg->ops = ops;
+ objagg->priv = priv;
+ INIT_LIST_HEAD(&objagg->obj_list);
+
+ objagg->ht_params.key_len = ops->obj_size;
+ objagg->ht_params.key_offset = offsetof(struct objagg_obj, obj);
+ objagg->ht_params.head_offset = offsetof(struct objagg_obj, ht_node);
+
+ err = rhashtable_init(&objagg->obj_ht, &objagg->ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ trace_objagg_create(objagg);
+ return objagg;
+
+err_rhashtable_init:
+ kfree(objagg);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(objagg_create);
+
+/**
+ * objagg_destroy - destroys a new objagg instance
+ * @objagg: objagg instance
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void objagg_destroy(struct objagg *objagg)
+{
+ trace_objagg_destroy(objagg);
+ WARN_ON(!list_empty(&objagg->obj_list));
+ rhashtable_destroy(&objagg->obj_ht);
+ kfree(objagg);
+}
+EXPORT_SYMBOL(objagg_destroy);
+
+static int objagg_stats_info_sort_cmp_func(const void *a, const void *b)
+{
+ const struct objagg_obj_stats_info *stats_info1 = a;
+ const struct objagg_obj_stats_info *stats_info2 = b;
+
+ if (stats_info1->is_root != stats_info2->is_root)
+ return stats_info2->is_root - stats_info1->is_root;
+ if (stats_info1->stats.delta_user_count !=
+ stats_info2->stats.delta_user_count)
+ return stats_info2->stats.delta_user_count -
+ stats_info1->stats.delta_user_count;
+ return stats_info2->stats.user_count - stats_info1->stats.user_count;
+}
+
+/**
+ * objagg_stats_get - obtains stats of the objagg instance
+ * @objagg: objagg instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * The returned structure contains statistics of all object
+ * currently in use, ordered by following rules:
+ * 1) Root objects are always on lower indexes than the rest.
+ * 2) Objects with higher delta user count are always on lower
+ * indexes.
+ * 3) In case more objects have the same delta user count,
+ * the objects are ordered by user count.
+ *
+ * Returns a pointer to stats instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+const struct objagg_stats *objagg_stats_get(struct objagg *objagg)
+{
+ struct objagg_stats *objagg_stats;
+ struct objagg_obj *objagg_obj;
+ size_t alloc_size;
+ int i;
+
+ alloc_size = sizeof(*objagg_stats) +
+ sizeof(objagg_stats->stats_info[0]) * objagg->obj_count;
+ objagg_stats = kzalloc(alloc_size, GFP_KERNEL);
+ if (!objagg_stats)
+ return ERR_PTR(-ENOMEM);
+
+ i = 0;
+ list_for_each_entry(objagg_obj, &objagg->obj_list, list) {
+ memcpy(&objagg_stats->stats_info[i].stats, &objagg_obj->stats,
+ sizeof(objagg_stats->stats_info[0].stats));
+ objagg_stats->stats_info[i].objagg_obj = objagg_obj;
+ objagg_stats->stats_info[i].is_root =
+ objagg_obj_is_root(objagg_obj);
+ i++;
+ }
+ objagg_stats->stats_info_count = i;
+
+ sort(objagg_stats->stats_info, objagg_stats->stats_info_count,
+ sizeof(struct objagg_obj_stats_info),
+ objagg_stats_info_sort_cmp_func, NULL);
+
+ return objagg_stats;
+}
+EXPORT_SYMBOL(objagg_stats_get);
+
+/**
+ * objagg_stats_puts - puts stats of the objagg instance
+ * @objagg_stats: objagg instance stats
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void objagg_stats_put(const struct objagg_stats *objagg_stats)
+{
+ kfree(objagg_stats);
+}
+EXPORT_SYMBOL(objagg_stats_put);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Object aggregation manager");
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 30526afa8343..852ffa5160f1 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -1179,8 +1179,7 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
- static struct rhash_head __rcu *rhnull =
- (struct rhash_head __rcu *)NULLS_MARKER(0);
+ static struct rhash_head __rcu *rhnull;
unsigned int index = hash & ((1 << tbl->nest) - 1);
unsigned int size = tbl->size >> tbl->nest;
unsigned int subhash = hash;
@@ -1198,8 +1197,11 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
subhash >>= shift;
}
- if (!ntbl)
+ if (!ntbl) {
+ if (!rhnull)
+ INIT_RHT_NULLS_HEAD(rhnull);
return &rhnull;
+ }
return &ntbl[subhash].bucket;
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index aa22bcaec1dc..f3e570722a7e 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -39,6 +39,7 @@
#define SKB_HASH 0x1234aaab
#define SKB_QUEUE_MAP 123
#define SKB_VLAN_TCI 0xffff
+#define SKB_VLAN_PRESENT 1
#define SKB_DEV_IFINDEX 577
#define SKB_DEV_TYPE 588
@@ -725,8 +726,8 @@ static struct bpf_test tests[] = {
CLASSIC,
{ },
{
- { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT },
- { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }
+ { 1, SKB_VLAN_TCI },
+ { 10, SKB_VLAN_TCI }
},
},
{
@@ -739,8 +740,8 @@ static struct bpf_test tests[] = {
CLASSIC,
{ },
{
- { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
- { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
+ { 1, SKB_VLAN_PRESENT },
+ { 10, SKB_VLAN_PRESENT }
},
},
{
@@ -5289,8 +5290,8 @@ static struct bpf_test tests[] = {
#endif
{ },
{
- { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
- { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
+ { 1, SKB_VLAN_PRESENT },
+ { 10, SKB_VLAN_PRESENT }
},
.fill_helper = bpf_fill_maxinsns6,
.expected_errcode = -ENOTSUPP,
@@ -6493,6 +6494,7 @@ static struct sk_buff *populate_skb(char *buf, int size)
skb->hash = SKB_HASH;
skb->queue_mapping = SKB_QUEUE_MAP;
skb->vlan_tci = SKB_VLAN_TCI;
+ skb->vlan_present = SKB_VLAN_PRESENT;
skb->vlan_proto = htons(ETH_P_IP);
dev_net_set(&dev, &init_net);
skb->dev = &dev;
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
new file mode 100644
index 000000000000..ab57144bb0cd
--- /dev/null
+++ b/lib/test_objagg.c
@@ -0,0 +1,836 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/objagg.h>
+
+struct tokey {
+ unsigned int id;
+};
+
+#define NUM_KEYS 32
+
+static int key_id_index(unsigned int key_id)
+{
+ if (key_id >= NUM_KEYS) {
+ WARN_ON(1);
+ return 0;
+ }
+ return key_id;
+}
+
+#define BUF_LEN 128
+
+struct world {
+ unsigned int root_count;
+ unsigned int delta_count;
+ char next_root_buf[BUF_LEN];
+ struct objagg_obj *objagg_objs[NUM_KEYS];
+ unsigned int key_refs[NUM_KEYS];
+};
+
+struct root {
+ struct tokey key;
+ char buf[BUF_LEN];
+};
+
+struct delta {
+ unsigned int key_id_diff;
+};
+
+static struct objagg_obj *world_obj_get(struct world *world,
+ struct objagg *objagg,
+ unsigned int key_id)
+{
+ struct objagg_obj *objagg_obj;
+ struct tokey key;
+ int err;
+
+ key.id = key_id;
+ objagg_obj = objagg_obj_get(objagg, &key);
+ if (IS_ERR(objagg_obj)) {
+ pr_err("Key %u: Failed to get object.\n", key_id);
+ return objagg_obj;
+ }
+ if (!world->key_refs[key_id_index(key_id)]) {
+ world->objagg_objs[key_id_index(key_id)] = objagg_obj;
+ } else if (world->objagg_objs[key_id_index(key_id)] != objagg_obj) {
+ pr_err("Key %u: God another object for the same key.\n",
+ key_id);
+ err = -EINVAL;
+ goto err_key_id_check;
+ }
+ world->key_refs[key_id_index(key_id)]++;
+ return objagg_obj;
+
+err_key_id_check:
+ objagg_obj_put(objagg, objagg_obj);
+ return ERR_PTR(err);
+}
+
+static void world_obj_put(struct world *world, struct objagg *objagg,
+ unsigned int key_id)
+{
+ struct objagg_obj *objagg_obj;
+
+ if (!world->key_refs[key_id_index(key_id)])
+ return;
+ objagg_obj = world->objagg_objs[key_id_index(key_id)];
+ objagg_obj_put(objagg, objagg_obj);
+ world->key_refs[key_id_index(key_id)]--;
+}
+
+#define MAX_KEY_ID_DIFF 5
+
+static void *delta_create(void *priv, void *parent_obj, void *obj)
+{
+ struct tokey *parent_key = parent_obj;
+ struct world *world = priv;
+ struct tokey *key = obj;
+ int diff = key->id - parent_key->id;
+ struct delta *delta;
+
+ if (diff < 0 || diff > MAX_KEY_ID_DIFF)
+ return ERR_PTR(-EINVAL);
+
+ delta = kzalloc(sizeof(*delta), GFP_KERNEL);
+ if (!delta)
+ return ERR_PTR(-ENOMEM);
+ delta->key_id_diff = diff;
+ world->delta_count++;
+ return delta;
+}
+
+static void delta_destroy(void *priv, void *delta_priv)
+{
+ struct delta *delta = delta_priv;
+ struct world *world = priv;
+
+ world->delta_count--;
+ kfree(delta);
+}
+
+static void *root_create(void *priv, void *obj)
+{
+ struct world *world = priv;
+ struct tokey *key = obj;
+ struct root *root;
+
+ root = kzalloc(sizeof(*root), GFP_KERNEL);
+ if (!root)
+ return ERR_PTR(-ENOMEM);
+ memcpy(&root->key, key, sizeof(root->key));
+ memcpy(root->buf, world->next_root_buf, sizeof(root->buf));
+ world->root_count++;
+ return root;
+}
+
+static void root_destroy(void *priv, void *root_priv)
+{
+ struct root *root = root_priv;
+ struct world *world = priv;
+
+ world->root_count--;
+ kfree(root);
+}
+
+static int test_nodelta_obj_get(struct world *world, struct objagg *objagg,
+ unsigned int key_id, bool should_create_root)
+{
+ unsigned int orig_root_count = world->root_count;
+ struct objagg_obj *objagg_obj;
+ const struct root *root;
+ int err;
+
+ if (should_create_root)
+ prandom_bytes(world->next_root_buf,
+ sizeof(world->next_root_buf));
+
+ objagg_obj = world_obj_get(world, objagg, key_id);
+ if (IS_ERR(objagg_obj)) {
+ pr_err("Key %u: Failed to get object.\n", key_id);
+ return PTR_ERR(objagg_obj);
+ }
+ if (should_create_root) {
+ if (world->root_count != orig_root_count + 1) {
+ pr_err("Key %u: Root was not created\n", key_id);
+ err = -EINVAL;
+ goto err_check_root_count;
+ }
+ } else {
+ if (world->root_count != orig_root_count) {
+ pr_err("Key %u: Root was incorrectly created\n",
+ key_id);
+ err = -EINVAL;
+ goto err_check_root_count;
+ }
+ }
+ root = objagg_obj_root_priv(objagg_obj);
+ if (root->key.id != key_id) {
+ pr_err("Key %u: Root has unexpected key id\n", key_id);
+ err = -EINVAL;
+ goto err_check_key_id;
+ }
+ if (should_create_root &&
+ memcmp(world->next_root_buf, root->buf, sizeof(root->buf))) {
+ pr_err("Key %u: Buffer does not match the expected content\n",
+ key_id);
+ err = -EINVAL;
+ goto err_check_buf;
+ }
+ return 0;
+
+err_check_buf:
+err_check_key_id:
+err_check_root_count:
+ objagg_obj_put(objagg, objagg_obj);
+ return err;
+}
+
+static int test_nodelta_obj_put(struct world *world, struct objagg *objagg,
+ unsigned int key_id, bool should_destroy_root)
+{
+ unsigned int orig_root_count = world->root_count;
+
+ world_obj_put(world, objagg, key_id);
+
+ if (should_destroy_root) {
+ if (world->root_count != orig_root_count - 1) {
+ pr_err("Key %u: Root was not destroyed\n", key_id);
+ return -EINVAL;
+ }
+ } else {
+ if (world->root_count != orig_root_count) {
+ pr_err("Key %u: Root was incorrectly destroyed\n",
+ key_id);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int check_stats_zero(struct objagg *objagg)
+{
+ const struct objagg_stats *stats;
+ int err = 0;
+
+ stats = objagg_stats_get(objagg);
+ if (IS_ERR(stats))
+ return PTR_ERR(stats);
+
+ if (stats->stats_info_count != 0) {
+ pr_err("Stats: Object count is not zero while it should be\n");
+ err = -EINVAL;
+ }
+
+ objagg_stats_put(stats);
+ return err;
+}
+
+static int check_stats_nodelta(struct objagg *objagg)
+{
+ const struct objagg_stats *stats;
+ int i;
+ int err;
+
+ stats = objagg_stats_get(objagg);
+ if (IS_ERR(stats))
+ return PTR_ERR(stats);
+
+ if (stats->stats_info_count != NUM_KEYS) {
+ pr_err("Stats: Unexpected object count (%u expected, %u returned)\n",
+ NUM_KEYS, stats->stats_info_count);
+ err = -EINVAL;
+ goto stats_put;
+ }
+
+ for (i = 0; i < stats->stats_info_count; i++) {
+ if (stats->stats_info[i].stats.user_count != 2) {
+ pr_err("Stats: incorrect user count\n");
+ err = -EINVAL;
+ goto stats_put;
+ }
+ if (stats->stats_info[i].stats.delta_user_count != 2) {
+ pr_err("Stats: incorrect delta user count\n");
+ err = -EINVAL;
+ goto stats_put;
+ }
+ }
+ err = 0;
+
+stats_put:
+ objagg_stats_put(stats);
+ return err;
+}
+
+static void *delta_create_dummy(void *priv, void *parent_obj, void *obj)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static void delta_destroy_dummy(void *priv, void *delta_priv)
+{
+}
+
+static const struct objagg_ops nodelta_ops = {
+ .obj_size = sizeof(struct tokey),
+ .delta_create = delta_create_dummy,
+ .delta_destroy = delta_destroy_dummy,
+ .root_create = root_create,
+ .root_destroy = root_destroy,
+};
+
+static int test_nodelta(void)
+{
+ struct world world = {};
+ struct objagg *objagg;
+ int i;
+ int err;
+
+ objagg = objagg_create(&nodelta_ops, &world);
+ if (IS_ERR(objagg))
+ return PTR_ERR(objagg);
+
+ err = check_stats_zero(objagg);
+ if (err)
+ goto err_stats_first_zero;
+
+ /* First round of gets, the root objects should be created */
+ for (i = 0; i < NUM_KEYS; i++) {
+ err = test_nodelta_obj_get(&world, objagg, i, true);
+ if (err)
+ goto err_obj_first_get;
+ }
+
+ /* Do the second round of gets, all roots are already created,
+ * make sure that no new root is created
+ */
+ for (i = 0; i < NUM_KEYS; i++) {
+ err = test_nodelta_obj_get(&world, objagg, i, false);
+ if (err)
+ goto err_obj_second_get;
+ }
+
+ err = check_stats_nodelta(objagg);
+ if (err)
+ goto err_stats_nodelta;
+
+ for (i = NUM_KEYS - 1; i >= 0; i--) {
+ err = test_nodelta_obj_put(&world, objagg, i, false);
+ if (err)
+ goto err_obj_first_put;
+ }
+ for (i = NUM_KEYS - 1; i >= 0; i--) {
+ err = test_nodelta_obj_put(&world, objagg, i, true);
+ if (err)
+ goto err_obj_second_put;
+ }
+
+ err = check_stats_zero(objagg);
+ if (err)
+ goto err_stats_second_zero;
+
+ objagg_destroy(objagg);
+ return 0;
+
+err_stats_nodelta:
+err_obj_first_put:
+err_obj_second_get:
+ for (i--; i >= 0; i--)
+ world_obj_put(&world, objagg, i);
+
+ i = NUM_KEYS;
+err_obj_first_get:
+err_obj_second_put:
+ for (i--; i >= 0; i--)
+ world_obj_put(&world, objagg, i);
+err_stats_first_zero:
+err_stats_second_zero:
+ objagg_destroy(objagg);
+ return err;
+}
+
+static const struct objagg_ops delta_ops = {
+ .obj_size = sizeof(struct tokey),
+ .delta_create = delta_create,
+ .delta_destroy = delta_destroy,
+ .root_create = root_create,
+ .root_destroy = root_destroy,
+};
+
+enum action {
+ ACTION_GET,
+ ACTION_PUT,
+};
+
+enum expect_delta {
+ EXPECT_DELTA_SAME,
+ EXPECT_DELTA_INC,
+ EXPECT_DELTA_DEC,
+};
+
+enum expect_root {
+ EXPECT_ROOT_SAME,
+ EXPECT_ROOT_INC,
+ EXPECT_ROOT_DEC,
+};
+
+struct expect_stats_info {
+ struct objagg_obj_stats stats;
+ bool is_root;
+ unsigned int key_id;
+};
+
+struct expect_stats {
+ unsigned int info_count;
+ struct expect_stats_info info[NUM_KEYS];
+};
+
+struct action_item {
+ unsigned int key_id;
+ enum action action;
+ enum expect_delta expect_delta;
+ enum expect_root expect_root;
+ struct expect_stats expect_stats;
+};
+
+#define EXPECT_STATS(count, ...) \
+{ \
+ .info_count = count, \
+ .info = { __VA_ARGS__ } \
+}
+
+#define ROOT(key_id, user_count, delta_user_count) \
+ {{user_count, delta_user_count}, true, key_id}
+
+#define DELTA(key_id, user_count) \
+ {{user_count, user_count}, false, key_id}
+
+static const struct action_item action_items[] = {
+ {
+ 1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+ EXPECT_STATS(1, ROOT(1, 1, 1)),
+ }, /* r: 1 d: */
+ {
+ 7, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+ EXPECT_STATS(2, ROOT(1, 1, 1), ROOT(7, 1, 1)),
+ }, /* r: 1, 7 d: */
+ {
+ 3, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(3, ROOT(1, 1, 2), ROOT(7, 1, 1),
+ DELTA(3, 1)),
+ }, /* r: 1, 7 d: 3^1 */
+ {
+ 5, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(4, ROOT(1, 1, 3), ROOT(7, 1, 1),
+ DELTA(3, 1), DELTA(5, 1)),
+ }, /* r: 1, 7 d: 3^1, 5^1 */
+ {
+ 3, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(4, ROOT(1, 1, 4), ROOT(7, 1, 1),
+ DELTA(3, 2), DELTA(5, 1)),
+ }, /* r: 1, 7 d: 3^1, 3^1, 5^1 */
+ {
+ 1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(4, ROOT(1, 2, 5), ROOT(7, 1, 1),
+ DELTA(3, 2), DELTA(5, 1)),
+ }, /* r: 1, 1, 7 d: 3^1, 3^1, 5^1 */
+ {
+ 30, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+ EXPECT_STATS(5, ROOT(1, 2, 5), ROOT(7, 1, 1), ROOT(30, 1, 1),
+ DELTA(3, 2), DELTA(5, 1)),
+ }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1 */
+ {
+ 8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 2), ROOT(30, 1, 1),
+ DELTA(3, 2), DELTA(5, 1), DELTA(8, 1)),
+ }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7 */
+ {
+ 8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 3), ROOT(30, 1, 1),
+ DELTA(3, 2), DELTA(8, 2), DELTA(5, 1)),
+ }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7, 8^7 */
+ {
+ 3, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(6, ROOT(1, 2, 4), ROOT(7, 1, 3), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(3, 1), DELTA(5, 1)),
+ }, /* r: 1, 1, 7, 30 d: 3^1, 5^1, 8^7, 8^7 */
+ {
+ 3, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(1, 2, 3), ROOT(7, 1, 3), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(5, 1)),
+ }, /* r: 1, 1, 7, 30 d: 5^1, 8^7, 8^7 */
+ {
+ 1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(1, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(5, 1)),
+ }, /* r: 1, 7, 30 d: 5^1, 8^7, 8^7 */
+ {
+ 1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(1, 0, 1),
+ DELTA(8, 2), DELTA(5, 1)),
+ }, /* r: 7, 30 d: 5^1, 8^7, 8^7 */
+ {
+ 5, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
+ EXPECT_STATS(3, ROOT(7, 1, 3), ROOT(30, 1, 1),
+ DELTA(8, 2)),
+ }, /* r: 7, 30 d: 8^7, 8^7 */
+ {
+ 5, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+ EXPECT_STATS(4, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(5, 1, 1),
+ DELTA(8, 2)),
+ }, /* r: 7, 30, 5 d: 8^7, 8^7 */
+ {
+ 6, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */
+ {
+ 8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 4), ROOT(5, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 3), DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 8^7, 8^7, 8^7, 6^5 */
+ {
+ 8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */
+ {
+ 8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 2), ROOT(5, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 1), DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 8^7, 6^5 */
+ {
+ 8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(4, ROOT(5, 1, 2), ROOT(7, 1, 1), ROOT(30, 1, 1),
+ DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 6^5 */
+ {
+ 8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(5, 1, 3), ROOT(7, 1, 1), ROOT(30, 1, 1),
+ DELTA(6, 1), DELTA(8, 1)),
+ }, /* r: 7, 30, 5 d: 6^5, 8^5 */
+ {
+ 7, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
+ EXPECT_STATS(4, ROOT(5, 1, 3), ROOT(30, 1, 1),
+ DELTA(6, 1), DELTA(8, 1)),
+ }, /* r: 30, 5 d: 6^5, 8^5 */
+ {
+ 30, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
+ EXPECT_STATS(3, ROOT(5, 1, 3),
+ DELTA(6, 1), DELTA(8, 1)),
+ }, /* r: 5 d: 6^5, 8^5 */
+ {
+ 5, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(3, ROOT(5, 0, 2),
+ DELTA(6, 1), DELTA(8, 1)),
+ }, /* r: d: 6^5, 8^5 */
+ {
+ 6, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(2, ROOT(5, 0, 1),
+ DELTA(8, 1)),
+ }, /* r: d: 6^5 */
+ {
+ 8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
+ EXPECT_STATS(0, ),
+ }, /* r: d: */
+};
+
+static int check_expect(struct world *world,
+ const struct action_item *action_item,
+ unsigned int orig_delta_count,
+ unsigned int orig_root_count)
+{
+ unsigned int key_id = action_item->key_id;
+
+ switch (action_item->expect_delta) {
+ case EXPECT_DELTA_SAME:
+ if (orig_delta_count != world->delta_count) {
+ pr_err("Key %u: Delta count changed while expected to remain the same.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ case EXPECT_DELTA_INC:
+ if (WARN_ON(action_item->action == ACTION_PUT))
+ return -EINVAL;
+ if (orig_delta_count + 1 != world->delta_count) {
+ pr_err("Key %u: Delta count was not incremented.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ case EXPECT_DELTA_DEC:
+ if (WARN_ON(action_item->action == ACTION_GET))
+ return -EINVAL;
+ if (orig_delta_count - 1 != world->delta_count) {
+ pr_err("Key %u: Delta count was not decremented.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ }
+
+ switch (action_item->expect_root) {
+ case EXPECT_ROOT_SAME:
+ if (orig_root_count != world->root_count) {
+ pr_err("Key %u: Root count changed while expected to remain the same.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ case EXPECT_ROOT_INC:
+ if (WARN_ON(action_item->action == ACTION_PUT))
+ return -EINVAL;
+ if (orig_root_count + 1 != world->root_count) {
+ pr_err("Key %u: Root count was not incremented.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ case EXPECT_ROOT_DEC:
+ if (WARN_ON(action_item->action == ACTION_GET))
+ return -EINVAL;
+ if (orig_root_count - 1 != world->root_count) {
+ pr_err("Key %u: Root count was not decremented.\n",
+ key_id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static unsigned int obj_to_key_id(struct objagg_obj *objagg_obj)
+{
+ const struct tokey *root_key;
+ const struct delta *delta;
+ unsigned int key_id;
+
+ root_key = objagg_obj_root_priv(objagg_obj);
+ key_id = root_key->id;
+ delta = objagg_obj_delta_priv(objagg_obj);
+ if (delta)
+ key_id += delta->key_id_diff;
+ return key_id;
+}
+
+static int
+check_expect_stats_nums(const struct objagg_obj_stats_info *stats_info,
+ const struct expect_stats_info *expect_stats_info,
+ const char **errmsg)
+{
+ if (stats_info->is_root != expect_stats_info->is_root) {
+ if (errmsg)
+ *errmsg = "Incorrect root/delta indication";
+ return -EINVAL;
+ }
+ if (stats_info->stats.user_count !=
+ expect_stats_info->stats.user_count) {
+ if (errmsg)
+ *errmsg = "Incorrect user count";
+ return -EINVAL;
+ }
+ if (stats_info->stats.delta_user_count !=
+ expect_stats_info->stats.delta_user_count) {
+ if (errmsg)
+ *errmsg = "Incorrect delta user count";
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+check_expect_stats_key_id(const struct objagg_obj_stats_info *stats_info,
+ const struct expect_stats_info *expect_stats_info,
+ const char **errmsg)
+{
+ if (obj_to_key_id(stats_info->objagg_obj) !=
+ expect_stats_info->key_id) {
+ if (errmsg)
+ *errmsg = "incorrect key id";
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int check_expect_stats_neigh(const struct objagg_stats *stats,
+ const struct expect_stats *expect_stats,
+ int pos)
+{
+ int i;
+ int err;
+
+ for (i = pos - 1; i >= 0; i--) {
+ err = check_expect_stats_nums(&stats->stats_info[i],
+ &expect_stats->info[pos], NULL);
+ if (err)
+ break;
+ err = check_expect_stats_key_id(&stats->stats_info[i],
+ &expect_stats->info[pos], NULL);
+ if (!err)
+ return 0;
+ }
+ for (i = pos + 1; i < stats->stats_info_count; i++) {
+ err = check_expect_stats_nums(&stats->stats_info[i],
+ &expect_stats->info[pos], NULL);
+ if (err)
+ break;
+ err = check_expect_stats_key_id(&stats->stats_info[i],
+ &expect_stats->info[pos], NULL);
+ if (!err)
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int __check_expect_stats(const struct objagg_stats *stats,
+ const struct expect_stats *expect_stats,
+ const char **errmsg)
+{
+ int i;
+ int err;
+
+ if (stats->stats_info_count != expect_stats->info_count) {
+ *errmsg = "Unexpected object count";
+ return -EINVAL;
+ }
+
+ for (i = 0; i < stats->stats_info_count; i++) {
+ err = check_expect_stats_nums(&stats->stats_info[i],
+ &expect_stats->info[i], errmsg);
+ if (err)
+ return err;
+ err = check_expect_stats_key_id(&stats->stats_info[i],
+ &expect_stats->info[i], errmsg);
+ if (err) {
+ /* It is possible that one of the neighbor stats with
+ * same numbers have the correct key id, so check it
+ */
+ err = check_expect_stats_neigh(stats, expect_stats, i);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int check_expect_stats(struct objagg *objagg,
+ const struct expect_stats *expect_stats,
+ const char **errmsg)
+{
+ const struct objagg_stats *stats;
+ int err;
+
+ stats = objagg_stats_get(objagg);
+ if (IS_ERR(stats))
+ return PTR_ERR(stats);
+ err = __check_expect_stats(stats, expect_stats, errmsg);
+ objagg_stats_put(stats);
+ return err;
+}
+
+static int test_delta_action_item(struct world *world,
+ struct objagg *objagg,
+ const struct action_item *action_item,
+ bool inverse)
+{
+ unsigned int orig_delta_count = world->delta_count;
+ unsigned int orig_root_count = world->root_count;
+ unsigned int key_id = action_item->key_id;
+ enum action action = action_item->action;
+ struct objagg_obj *objagg_obj;
+ const char *errmsg;
+ int err;
+
+ if (inverse)
+ action = action == ACTION_GET ? ACTION_PUT : ACTION_GET;
+
+ switch (action) {
+ case ACTION_GET:
+ objagg_obj = world_obj_get(world, objagg, key_id);
+ if (IS_ERR(objagg_obj))
+ return PTR_ERR(objagg_obj);
+ break;
+ case ACTION_PUT:
+ world_obj_put(world, objagg, key_id);
+ break;
+ }
+
+ if (inverse)
+ return 0;
+ err = check_expect(world, action_item,
+ orig_delta_count, orig_root_count);
+ if (err)
+ goto errout;
+
+ errmsg = NULL;
+ err = check_expect_stats(objagg, &action_item->expect_stats, &errmsg);
+ if (err) {
+ pr_err("Key %u: Stats: %s\n", action_item->key_id, errmsg);
+ goto errout;
+ }
+
+ return 0;
+
+errout:
+ /* This can only happen when action is not inversed.
+ * So in case of an error, cleanup by doing inverse action.
+ */
+ test_delta_action_item(world, objagg, action_item, true);
+ return err;
+}
+
+static int test_delta(void)
+{
+ struct world world = {};
+ struct objagg *objagg;
+ int i;
+ int err;
+
+ objagg = objagg_create(&delta_ops, &world);
+ if (IS_ERR(objagg))
+ return PTR_ERR(objagg);
+
+ for (i = 0; i < ARRAY_SIZE(action_items); i++) {
+ err = test_delta_action_item(&world, objagg,
+ &action_items[i], false);
+ if (err)
+ goto err_do_action_item;
+ }
+
+ objagg_destroy(objagg);
+ return 0;
+
+err_do_action_item:
+ for (i--; i >= 0; i--)
+ test_delta_action_item(&world, objagg, &action_items[i], true);
+
+ objagg_destroy(objagg);
+ return err;
+}
+
+static int __init test_objagg_init(void)
+{
+ int err;
+
+ err = test_nodelta();
+ if (err)
+ return err;
+ return test_delta();
+}
+
+static void __exit test_objagg_exit(void)
+{
+}
+
+module_init(test_objagg_init);
+module_exit(test_objagg_exit);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Test module for objagg");
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 82ac39ce5310..6a8ac7626797 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -20,11 +20,11 @@
#include <linux/module.h>
#include <linux/rcupdate.h>
#include <linux/rhashtable.h>
-#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
+#include <linux/wait.h>
#define MAX_ENTRIES 1000000
#define TEST_INSERT_FAIL INT_MAX
@@ -112,8 +112,8 @@ static struct rhashtable_params test_rht_params_dup = {
.automatic_shrinking = false,
};
-static struct semaphore prestart_sem;
-static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
+static atomic_t startup_count;
+static DECLARE_WAIT_QUEUE_HEAD(startup_wait);
static int insert_retry(struct rhashtable *ht, struct test_obj *obj,
const struct rhashtable_params params)
@@ -634,9 +634,12 @@ static int threadfunc(void *data)
int i, step, err = 0, insert_retries = 0;
struct thread_data *tdata = data;
- up(&prestart_sem);
- if (down_interruptible(&startup_sem))
- pr_err(" thread[%d]: down_interruptible failed\n", tdata->id);
+ if (atomic_dec_and_test(&startup_count))
+ wake_up(&startup_wait);
+ if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == -1)) {
+ pr_err(" thread[%d]: interrupted\n", tdata->id);
+ goto out;
+ }
for (i = 0; i < tdata->entries; i++) {
tdata->objs[i].value.id = i;
@@ -755,7 +758,7 @@ static int __init test_rht_init(void)
pr_info("Testing concurrent rhashtable access from %d threads\n",
tcount);
- sema_init(&prestart_sem, 1 - tcount);
+ atomic_set(&startup_count, tcount);
tdata = vzalloc(array_size(tcount, sizeof(struct thread_data)));
if (!tdata)
return -ENOMEM;
@@ -781,15 +784,18 @@ static int __init test_rht_init(void)
tdata[i].objs = objs + i * entries;
tdata[i].task = kthread_run(threadfunc, &tdata[i],
"rhashtable_thrad[%d]", i);
- if (IS_ERR(tdata[i].task))
+ if (IS_ERR(tdata[i].task)) {
pr_err(" kthread_run failed for thread %d\n", i);
- else
+ atomic_dec(&startup_count);
+ } else {
started_threads++;
+ }
}
- if (down_interruptible(&prestart_sem))
- pr_err(" down interruptible failed\n");
- for (i = 0; i < tcount; i++)
- up(&startup_sem);
+ if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == 0))
+ pr_err(" wait_event interruptible failed\n");
+ /* count is 0 now, set it to -1 and wake up all threads together */
+ atomic_dec(&startup_count);
+ wake_up_all(&startup_wait);
for (i = 0; i < tcount; i++) {
if (IS_ERR(tdata[i].task))
continue;