summaryrefslogtreecommitdiff
path: root/net/tipc/link.c
diff options
context:
space:
mode:
authorJon Paul Maloy <jon.maloy@ericsson.com>2015-11-19 22:30:44 +0300
committerDavid S. Miller <davem@davemloft.net>2015-11-20 22:06:10 +0300
commit5405ff6e15f40f2f53e37d2dcd7de521e2b7a96f (patch)
tree226f40f32f063d27a8d9a6abe6708d550721f1fd /net/tipc/link.c
parent2312bf61ae365fdd6b9bfb24558a417859759447 (diff)
downloadlinux-5405ff6e15f40f2f53e37d2dcd7de521e2b7a96f.tar.xz
tipc: convert node lock to rwlock
According to the node FSM a node in state SELF_UP_PEER_UP cannot change state inside a lock context, except when a TUNNEL_PROTOCOL (SYNCH or FAILOVER) packet arrives. However, the node's individual links may still change state. Since each link now is protected by its own spinlock, we finally have the conditions in place to convert the node spinlock to an rwlock_t. If the node state and arriving packet type are rigth, we can let the link directly receive the packet under protection of its own spinlock and the node lock in read mode. In all other cases we use the node lock in write mode. This enables full concurrent execution between parallel links during steady-state traffic situations, i.e., 99+ % of the time. This commit implements this change. Reviewed-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/link.c')
-rw-r--r--net/tipc/link.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b5e895c6f1aa..1dda46e5dd83 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1547,7 +1547,7 @@ static struct tipc_node *tipc_link_find_owner(struct net *net,
*bearer_id = 0;
rcu_read_lock();
list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
- tipc_node_lock(n_ptr);
+ tipc_node_read_lock(n_ptr);
for (i = 0; i < MAX_BEARERS; i++) {
l_ptr = n_ptr->links[i].link;
if (l_ptr && !strcmp(l_ptr->name, link_name)) {
@@ -1556,7 +1556,7 @@ static struct tipc_node *tipc_link_find_owner(struct net *net,
break;
}
}
- tipc_node_unlock(n_ptr);
+ tipc_node_read_unlock(n_ptr);
if (found_node)
break;
}
@@ -1658,7 +1658,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
if (!node)
return -EINVAL;
- tipc_node_lock(node);
+ tipc_node_read_lock(node);
link = node->links[bearer_id].link;
if (!link) {
@@ -1699,7 +1699,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
}
out:
- tipc_node_unlock(node);
+ tipc_node_read_unlock(node);
return res;
}
@@ -1898,10 +1898,10 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
list_for_each_entry_continue_rcu(node, &tn->node_list,
list) {
- tipc_node_lock(node);
+ tipc_node_read_lock(node);
err = __tipc_nl_add_node_links(net, &msg, node,
&prev_link);
- tipc_node_unlock(node);
+ tipc_node_read_unlock(node);
if (err)
goto out;
@@ -1913,10 +1913,10 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
goto out;
list_for_each_entry_rcu(node, &tn->node_list, list) {
- tipc_node_lock(node);
+ tipc_node_read_lock(node);
err = __tipc_nl_add_node_links(net, &msg, node,
&prev_link);
- tipc_node_unlock(node);
+ tipc_node_read_unlock(node);
if (err)
goto out;
@@ -1967,16 +1967,16 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
if (!node)
return -EINVAL;
- tipc_node_lock(node);
+ tipc_node_read_lock(node);
link = node->links[bearer_id].link;
if (!link) {
- tipc_node_unlock(node);
+ tipc_node_read_unlock(node);
nlmsg_free(msg.skb);
return -EINVAL;
}
err = __tipc_nl_add_link(net, &msg, link, 0);
- tipc_node_unlock(node);
+ tipc_node_read_unlock(node);
if (err) {
nlmsg_free(msg.skb);
return err;
@@ -2021,18 +2021,18 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
node = tipc_link_find_owner(net, link_name, &bearer_id);
if (!node)
return -EINVAL;
+
le = &node->links[bearer_id];
- tipc_node_lock(node);
+ tipc_node_read_lock(node);
spin_lock_bh(&le->lock);
link = le->link;
if (!link) {
- tipc_node_unlock(node);
+ spin_unlock_bh(&le->lock);
+ tipc_node_read_unlock(node);
return -EINVAL;
}
-
link_reset_statistics(link);
spin_unlock_bh(&le->lock);
- tipc_node_unlock(node);
-
+ tipc_node_read_unlock(node);
return 0;
}