summaryrefslogtreecommitdiff
path: root/include/net/sock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h39
1 files changed, 26 insertions, 13 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index bdc4323ce53c..636810ddcd9b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -226,7 +226,7 @@ struct sock_common {
struct hlist_nulls_node skc_nulls_node;
};
unsigned short skc_tx_queue_mapping;
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
unsigned short skc_rx_queue_mapping;
#endif
union {
@@ -356,7 +356,7 @@ struct sock {
#define sk_nulls_node __sk_common.skc_nulls_node
#define sk_refcnt __sk_common.skc_refcnt
#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
#define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping
#endif
@@ -1174,6 +1174,8 @@ struct proto {
int (*backlog_rcv) (struct sock *sk,
struct sk_buff *skb);
+ bool (*bpf_bypass_getsockopt)(int level,
+ int optname);
void (*release_cb)(struct sock *sk);
@@ -1350,14 +1352,18 @@ sk_memory_allocated_sub(struct sock *sk, int amt)
atomic_long_sub(amt, sk->sk_prot->memory_allocated);
}
+#define SK_ALLOC_PERCPU_COUNTER_BATCH 16
+
static inline void sk_sockets_allocated_dec(struct sock *sk)
{
- percpu_counter_dec(sk->sk_prot->sockets_allocated);
+ percpu_counter_add_batch(sk->sk_prot->sockets_allocated, -1,
+ SK_ALLOC_PERCPU_COUNTER_BATCH);
}
static inline void sk_sockets_allocated_inc(struct sock *sk)
{
- percpu_counter_inc(sk->sk_prot->sockets_allocated);
+ percpu_counter_add_batch(sk->sk_prot->sockets_allocated, 1,
+ SK_ALLOC_PERCPU_COUNTER_BATCH);
}
static inline u64
@@ -1834,7 +1840,7 @@ static inline int sk_tx_queue_get(const struct sock *sk)
static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
{
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
if (skb_rx_queue_recorded(skb)) {
u16 rx_queue = skb_get_rx_queue(skb);
@@ -1848,20 +1854,20 @@ static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
static inline void sk_rx_queue_clear(struct sock *sk)
{
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
#endif
}
-#ifdef CONFIG_XPS
static inline int sk_rx_queue_get(const struct sock *sk)
{
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
return sk->sk_rx_queue_mapping;
+#endif
return -1;
}
-#endif
static inline void sk_set_socket(struct sock *sk, struct socket *sock)
{
@@ -1921,10 +1927,13 @@ static inline void sk_set_txhash(struct sock *sk)
sk->sk_txhash = net_tx_rndhash();
}
-static inline void sk_rethink_txhash(struct sock *sk)
+static inline bool sk_rethink_txhash(struct sock *sk)
{
- if (sk->sk_txhash)
+ if (sk->sk_txhash) {
sk_set_txhash(sk);
+ return true;
+ }
+ return false;
}
static inline struct dst_entry *
@@ -1947,12 +1956,10 @@ sk_dst_get(struct sock *sk)
return dst;
}
-static inline void dst_negative_advice(struct sock *sk)
+static inline void __dst_negative_advice(struct sock *sk)
{
struct dst_entry *ndst, *dst = __sk_dst_get(sk);
- sk_rethink_txhash(sk);
-
if (dst && dst->ops->negative_advice) {
ndst = dst->ops->negative_advice(dst);
@@ -1964,6 +1971,12 @@ static inline void dst_negative_advice(struct sock *sk)
}
}
+static inline void dst_negative_advice(struct sock *sk)
+{
+ sk_rethink_txhash(sk);
+ __dst_negative_advice(sk);
+}
+
static inline void
__sk_dst_set(struct sock *sk, struct dst_entry *dst)
{