summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/uaccess.h12
-rw-r--r--include/linux/bpf_verifier.h12
-rw-r--r--include/linux/filter.h10
-rw-r--r--include/linux/phy.h13
-rw-r--r--include/linux/phy/phy.h2
-rw-r--r--include/linux/ptr_ring.h2
-rw-r--r--include/linux/regset.h4
-rw-r--r--include/linux/uaccess.h11
-rw-r--r--include/net/checksum.h4
-rw-r--r--include/net/ip_tunnels.h20
-rw-r--r--include/net/netfilter/nf_conntrack_count.h19
-rw-r--r--include/net/sock.h38
12 files changed, 98 insertions, 49 deletions
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 6b2e63df2739..d82c78a79da5 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -35,7 +35,7 @@ static inline void set_fs(mm_segment_t fs)
#define segment_eq(a, b) ((a).seg == (b).seg)
#endif
-#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
+#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size))
/*
* The architecture should really override this if possible, at least
@@ -78,7 +78,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
({ \
void __user *__p = (ptr); \
might_fault(); \
- access_ok(VERIFY_WRITE, __p, sizeof(*ptr)) ? \
+ access_ok(__p, sizeof(*ptr)) ? \
__put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \
-EFAULT; \
})
@@ -140,7 +140,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
({ \
const void __user *__p = (ptr); \
might_fault(); \
- access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \
+ access_ok(__p, sizeof(*ptr)) ? \
__get_user((x), (__typeof__(*(ptr)) __user *)__p) :\
((x) = (__typeof__(*(ptr)))0,-EFAULT); \
})
@@ -175,7 +175,7 @@ __strncpy_from_user(char *dst, const char __user *src, long count)
static inline long
strncpy_from_user(char *dst, const char __user *src, long count)
{
- if (!access_ok(VERIFY_READ, src, 1))
+ if (!access_ok(src, 1))
return -EFAULT;
return __strncpy_from_user(dst, src, count);
}
@@ -196,7 +196,7 @@ strncpy_from_user(char *dst, const char __user *src, long count)
*/
static inline long strnlen_user(const char __user *src, long n)
{
- if (!access_ok(VERIFY_READ, src, 1))
+ if (!access_ok(src, 1))
return 0;
return __strnlen_user(src, n);
}
@@ -217,7 +217,7 @@ static inline __must_check unsigned long
clear_user(void __user *to, unsigned long n)
{
might_fault();
- if (!access_ok(VERIFY_WRITE, to, n))
+ if (!access_ok(to, n))
return n;
return __clear_user(to, n);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index c233efc106c6..27b74947cd2b 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -148,6 +148,7 @@ struct bpf_verifier_state {
/* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES];
u32 curframe;
+ bool speculative;
};
#define bpf_get_spilled_reg(slot, frame) \
@@ -167,15 +168,24 @@ struct bpf_verifier_state_list {
struct bpf_verifier_state_list *next;
};
+/* Possible states for alu_state member. */
+#define BPF_ALU_SANITIZE_SRC 1U
+#define BPF_ALU_SANITIZE_DST 2U
+#define BPF_ALU_NEG_VALUE (1U << 2)
+#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
+ BPF_ALU_SANITIZE_DST)
+
struct bpf_insn_aux_data {
union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
unsigned long map_state; /* pointer/poison value for maps */
s32 call_imm; /* saved imm field of call insn */
+ u32 alu_limit; /* limit for add/sub register with pointer */
};
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
int sanitize_stack_off; /* stack slot to be cleared */
bool seen; /* this insn was processed by the verifier */
+ u8 alu_state; /* used in combination with alu_limit */
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -212,6 +222,8 @@ struct bpf_subprog_info {
* one verifier_env per bpf_check() call
*/
struct bpf_verifier_env {
+ u32 insn_idx;
+ u32 prev_insn_idx;
struct bpf_prog *prog; /* eBPF program being verified */
const struct bpf_verifier_ops *ops;
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 8c8544b375eb..ad106d845b22 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -53,14 +53,10 @@ struct sock_reuseport;
#define BPF_REG_D BPF_REG_8 /* data, callee-saved */
#define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */
-/* Kernel hidden auxiliary/helper register for hardening step.
- * Only used by eBPF JITs. It's nothing more than a temporary
- * register that JITs use internally, only that here it's part
- * of eBPF instructions that have been rewritten for blinding
- * constants. See JIT pre-step in bpf_jit_blind_constants().
- */
+/* Kernel hidden auxiliary/helper register. */
#define BPF_REG_AX MAX_BPF_REG
-#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
+#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1)
+#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG
/* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL 0xf0
diff --git a/include/linux/phy.h b/include/linux/phy.h
index da039f211c22..3b051f761450 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -1,6 +1,6 @@
/*
* Framework and drivers for configuring and reading different PHYs
- * Based on code in sungem_phy.c and gianfar_phy.c
+ * Based on code in sungem_phy.c and (long-removed) gianfar_phy.c
*
* Author: Andy Fleming
*
@@ -110,9 +110,9 @@ typedef enum {
* @speeds: buffer to store supported speeds in.
* @size: size of speeds buffer.
*
- * Description: Returns the number of supported speeds, and
- * fills the speeds * buffer with the supported speeds. If speeds buffer is
- * too small to contain * all currently supported speeds, will return as
+ * Description: Returns the number of supported speeds, and fills
+ * the speeds buffer with the supported speeds. If speeds buffer is
+ * too small to contain all currently supported speeds, will return as
* many speeds as can fit.
*/
unsigned int phy_supported_speeds(struct phy_device *phy,
@@ -120,7 +120,10 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
unsigned int size);
/**
- * It maps 'enum phy_interface_t' found in include/linux/phy.h
+ * phy_modes - map phy_interface_t enum to device tree binding of phy-mode
+ * @interface: enum phy_interface_t value
+ *
+ * Description: maps 'enum phy_interface_t' defined in this file
* into the device tree binding of 'phy-mode', so that Ethernet
* device driver can get phy interface from device tree.
*/
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 1fdefadf150a..e8e118d70fd7 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -110,6 +110,7 @@ struct phy_ops {
/**
* struct phy_attrs - represents phy attributes
* @bus_width: Data path width implemented by PHY
+ * @mode: PHY mode
*/
struct phy_attrs {
u32 bus_width;
@@ -121,7 +122,6 @@ struct phy_attrs {
* @dev: phy device
* @id: id of the phy device
* @ops: function pointers for performing phy operations
- * @init_data: list of PHY consumers (non-dt only)
* @mutex: mutex to protect phy_ops
* @init_count: used to protect when the PHY is used by multiple consumers
* @power_count: used to protect when the PHY is used by multiple consumers
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 6894976b54e3..186cd8e970c7 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -573,6 +573,8 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
else if (destroy)
destroy(ptr);
+ if (producer >= size)
+ producer = 0;
__ptr_ring_set_size(r, size);
r->producer = producer;
r->consumer_head = 0;
diff --git a/include/linux/regset.h b/include/linux/regset.h
index 494cedaafdf2..a85c1707285c 100644
--- a/include/linux/regset.h
+++ b/include/linux/regset.h
@@ -376,7 +376,7 @@ static inline int copy_regset_to_user(struct task_struct *target,
if (!regset->get)
return -EOPNOTSUPP;
- if (!access_ok(VERIFY_WRITE, data, size))
+ if (!access_ok(data, size))
return -EFAULT;
return regset->get(target, regset, offset, size, NULL, data);
@@ -402,7 +402,7 @@ static inline int copy_regset_from_user(struct task_struct *target,
if (!regset->set)
return -EOPNOTSUPP;
- if (!access_ok(VERIFY_READ, data, size))
+ if (!access_ok(data, size))
return -EFAULT;
return regset->set(target, regset, offset, size, NULL, data);
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index efe79c1cdd47..37b226e8df13 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -6,9 +6,6 @@
#include <linux/thread_info.h>
#include <linux/kasan-checks.h>
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-
#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
#include <asm/uaccess.h>
@@ -111,7 +108,7 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
might_fault();
- if (likely(access_ok(VERIFY_READ, from, n))) {
+ if (likely(access_ok(from, n))) {
kasan_check_write(to, n);
res = raw_copy_from_user(to, from, n);
}
@@ -129,7 +126,7 @@ static inline unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
- if (access_ok(VERIFY_WRITE, to, n)) {
+ if (access_ok(to, n)) {
kasan_check_read(from, n);
n = raw_copy_to_user(to, from, n);
}
@@ -160,7 +157,7 @@ static __always_inline unsigned long __must_check
copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
might_fault();
- if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
+ if (access_ok(to, n) && access_ok(from, n))
n = raw_copy_in_user(to, from, n);
return n;
}
@@ -267,7 +264,7 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
probe_kernel_read(&retval, addr, sizeof(retval))
#ifndef user_access_begin
-#define user_access_begin() do { } while (0)
+#define user_access_begin(ptr,len) access_ok(ptr, len)
#define user_access_end() do { } while (0)
#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
diff --git a/include/net/checksum.h b/include/net/checksum.h
index aef2b2bb6603..0f319e13be2c 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -30,7 +30,7 @@ static inline
__wsum csum_and_copy_from_user (const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
{
- if (access_ok(VERIFY_READ, src, len))
+ if (access_ok(src, len))
return csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
if (len)
@@ -46,7 +46,7 @@ static __inline__ __wsum csum_and_copy_to_user
{
sum = csum_partial(src, len, sum);
- if (access_ok(VERIFY_WRITE, dst, len)) {
+ if (access_ok(dst, len)) {
if (copy_to_user(dst, src, len) == 0)
return sum;
}
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index cbcf35ce1b14..34f019650941 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -308,6 +308,26 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
int ip_tunnel_encap_setup(struct ip_tunnel *t,
struct ip_tunnel_encap *ipencap);
+static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+{
+ int nhlen;
+
+ switch (skb->protocol) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ nhlen = sizeof(struct ipv6hdr);
+ break;
+#endif
+ case htons(ETH_P_IP):
+ nhlen = sizeof(struct iphdr);
+ break;
+ default:
+ nhlen = 0;
+ }
+
+ return pskb_network_may_pull(skb, nhlen);
+}
+
static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
{
const struct ip_tunnel_encap_ops *ops;
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
index 4b2b2baf8ab4..f32fc8289473 100644
--- a/include/net/netfilter/nf_conntrack_count.h
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -5,17 +5,10 @@
struct nf_conncount_data;
-enum nf_conncount_list_add {
- NF_CONNCOUNT_ADDED, /* list add was ok */
- NF_CONNCOUNT_ERR, /* -ENOMEM, must drop skb */
- NF_CONNCOUNT_SKIP, /* list is already reclaimed by gc */
-};
-
struct nf_conncount_list {
spinlock_t list_lock;
struct list_head head; /* connections with the same filtering key */
unsigned int count; /* length of list */
- bool dead;
};
struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
@@ -29,18 +22,12 @@ unsigned int nf_conncount_count(struct net *net,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone);
-void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone,
- bool *addit);
+int nf_conncount_add(struct net *net, struct nf_conncount_list *list,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone);
void nf_conncount_list_init(struct nf_conncount_list *list);
-enum nf_conncount_list_add
-nf_conncount_add(struct nf_conncount_list *list,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone);
-
bool nf_conncount_gc_list(struct net *net,
struct nf_conncount_list *list);
diff --git a/include/net/sock.h b/include/net/sock.h
index a6235c286ef9..2b229f7be8eb 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -298,6 +298,7 @@ struct sock_common {
* @sk_filter: socket filtering instructions
* @sk_timer: sock cleanup timer
* @sk_stamp: time stamp of last packet received
+ * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
* @sk_tsflags: SO_TIMESTAMPING socket options
* @sk_tskey: counter to disambiguate concurrent tstamp requests
* @sk_zckey: counter to order MSG_ZEROCOPY notifications
@@ -474,6 +475,9 @@ struct sock {
const struct cred *sk_peer_cred;
long sk_rcvtimeo;
ktime_t sk_stamp;
+#if BITS_PER_LONG==32
+ seqlock_t sk_stamp_seq;
+#endif
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
@@ -2297,6 +2301,34 @@ static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
atomic_add(segs, &sk->sk_drops);
}
+static inline ktime_t sock_read_timestamp(struct sock *sk)
+{
+#if BITS_PER_LONG==32
+ unsigned int seq;
+ ktime_t kt;
+
+ do {
+ seq = read_seqbegin(&sk->sk_stamp_seq);
+ kt = sk->sk_stamp;
+ } while (read_seqretry(&sk->sk_stamp_seq, seq));
+
+ return kt;
+#else
+ return sk->sk_stamp;
+#endif
+}
+
+static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
+{
+#if BITS_PER_LONG==32
+ write_seqlock(&sk->sk_stamp_seq);
+ sk->sk_stamp = kt;
+ write_sequnlock(&sk->sk_stamp_seq);
+#else
+ sk->sk_stamp = kt;
+#endif
+}
+
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
@@ -2321,7 +2353,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
__sock_recv_timestamp(msg, sk, skb);
else
- sk->sk_stamp = kt;
+ sock_write_timestamp(sk, kt);
if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
__sock_recv_wifi_status(msg, sk, skb);
@@ -2342,9 +2374,9 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
__sock_recv_ts_and_drops(msg, sk, skb);
else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
- sk->sk_stamp = skb->tstamp;
+ sock_write_timestamp(sk, skb->tstamp);
else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
- sk->sk_stamp = 0;
+ sock_write_timestamp(sk, 0);
}
void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);