summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKuniyuki Iwashima <kuniyu@amazon.com>2022-06-20 02:29:27 +0300
committerDavid S. Miller <davem@davemloft.net>2022-06-20 11:10:13 +0300
commitf289c02bf41b55fbfccf21d72c4ac44cd4a7a107 (patch)
tree8a90d3251bf1e6c02a45be0667b0147aa5c5765a
parent5da39e31b1b0eb62b8ed369ad9615da850239e9e (diff)
downloadlinux-f289c02bf41b55fbfccf21d72c4ac44cd4a7a107.tar.xz
raw: Use helpers for the hlist_nulls variant.
hlist_nulls_add_head_rcu() and hlist_nulls_for_each_entry() have dedicated macros for sk. Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/raw.c8
-rw-r--r--net/ipv4/raw_diag.c4
-rw-r--r--net/ipv6/raw.c4
3 files changed, 8 insertions, 8 deletions
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index b3b255db9021..959bea12dc48 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -96,7 +96,7 @@ int raw_hash_sk(struct sock *sk)
hlist = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)];
write_lock_bh(&h->lock);
- hlist_nulls_add_head_rcu(&sk->sk_nulls_node, hlist);
+ __sk_nulls_add_node_rcu(sk, hlist);
sock_set_flag(sk, SOCK_RCU_FREE);
write_unlock_bh(&h->lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
@@ -172,7 +172,7 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
hlist = &raw_v4_hashinfo.ht[hash];
rcu_read_lock();
- hlist_nulls_for_each_entry(sk, hnode, hlist, sk_nulls_node) {
+ sk_nulls_for_each(sk, hnode, hlist) {
if (!raw_v4_match(net, sk, iph->protocol,
iph->saddr, iph->daddr, dif, sdif))
continue;
@@ -275,7 +275,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
hlist = &raw_v4_hashinfo.ht[hash];
rcu_read_lock();
- hlist_nulls_for_each_entry(sk, hnode, hlist, sk_nulls_node) {
+ sk_nulls_for_each(sk, hnode, hlist) {
iph = (const struct iphdr *)skb->data;
if (!raw_v4_match(net, sk, iph->protocol,
iph->saddr, iph->daddr, dif, sdif))
@@ -954,7 +954,7 @@ static struct sock *raw_get_first(struct seq_file *seq, int bucket)
for (state->bucket = bucket; state->bucket < RAW_HTABLE_SIZE;
++state->bucket) {
hlist = &h->ht[state->bucket];
- hlist_nulls_for_each_entry(sk, hnode, hlist, sk_nulls_node) {
+ sk_nulls_for_each(sk, hnode, hlist) {
if (sock_net(sk) == seq_file_net(seq))
return sk;
}
diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c
index 5f208e840d85..ac4b6525d3c6 100644
--- a/net/ipv4/raw_diag.c
+++ b/net/ipv4/raw_diag.c
@@ -68,7 +68,7 @@ static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2
rcu_read_lock();
for (slot = 0; slot < RAW_HTABLE_SIZE; slot++) {
hlist = &hashinfo->ht[slot];
- hlist_nulls_for_each_entry(sk, hnode, hlist, sk_nulls_node) {
+ sk_nulls_for_each(sk, hnode, hlist) {
if (raw_lookup(net, sk, r)) {
/*
* Grab it and keep until we fill
@@ -161,7 +161,7 @@ static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
num = 0;
hlist = &hashinfo->ht[slot];
- hlist_nulls_for_each_entry(sk, hnode, hlist, sk_nulls_node) {
+ sk_nulls_for_each(sk, hnode, hlist) {
struct inet_sock *inet = inet_sk(sk);
if (!net_eq(sock_net(sk), net))
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index f6119998700e..46b560aacc11 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -155,7 +155,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
hash = nexthdr & (RAW_HTABLE_SIZE - 1);
hlist = &raw_v6_hashinfo.ht[hash];
rcu_read_lock();
- hlist_nulls_for_each_entry(sk, hnode, hlist, sk_nulls_node) {
+ sk_nulls_for_each(sk, hnode, hlist) {
int filtered;
if (!raw_v6_match(net, sk, nexthdr, daddr, saddr,
@@ -342,7 +342,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
hash = nexthdr & (RAW_HTABLE_SIZE - 1);
hlist = &raw_v6_hashinfo.ht[hash];
rcu_read_lock();
- hlist_nulls_for_each_entry(sk, hnode, hlist, sk_nulls_node) {
+ sk_nulls_for_each(sk, hnode, hlist) {
/* Note: ipv6_hdr(skb) != skb->data */
const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
saddr = &ip6h->saddr;