summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h6
-rw-r--r--include/crypto/drbg.h13
-rw-r--r--include/drm/drm_pciids.h7
-rw-r--r--include/linux/blk-mq.h7
-rw-r--r--include/linux/bpf.h136
-rw-r--r--include/linux/brcmphy.h137
-rw-r--r--include/linux/ccp.h12
-rw-r--r--include/linux/com20020.h29
-rw-r--r--include/linux/cpuset.h4
-rw-r--r--include/linux/cycx_x25.h125
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/dynamic_queue_limits.h12
-rw-r--r--include/linux/etherdevice.h1
-rw-r--r--include/linux/ethtool.h4
-rw-r--r--include/linux/f2fs_fs.h16
-rw-r--r--include/linux/filter.h196
-rw-r--r--include/linux/ftrace.h14
-rw-r--r--include/linux/gpio/consumer.h109
-rw-r--r--include/linux/hash.h4
-rw-r--r--include/linux/i2c.h16
-rw-r--r--include/linux/i82593.h229
-rw-r--r--include/linux/if_macvlan.h1
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/iio/trigger.h4
-rw-r--r--include/linux/jbd2.h30
-rw-r--r--include/linux/jiffies.h12
-rw-r--r--include/linux/leds.h3
-rw-r--r--include/linux/mlx4/device.h19
-rw-r--r--include/linux/mlx4/qp.h12
-rw-r--r--include/linux/mlx5/device.h152
-rw-r--r--include/linux/mlx5/driver.h118
-rw-r--r--include/linux/mlx5/mlx5_ifc.h349
-rw-r--r--include/linux/mlx5/qp.h3
-rw-r--r--include/linux/mtd/nand.h2
-rw-r--r--include/linux/netdevice.h211
-rw-r--r--include/linux/netfilter.h5
-rw-r--r--include/linux/netfilter/ipset/ip_set.h60
-rw-r--r--include/linux/netfilter/ipset/ip_set_list.h1
-rw-r--r--include/linux/netfilter_bridge.h50
-rw-r--r--include/linux/nfs_page.h1
-rw-r--r--include/linux/pci.h6
-rw-r--r--include/linux/percpu-refcount.h1
-rw-r--r--include/linux/phonedev.h25
-rw-r--r--include/linux/phy.h27
-rw-r--r--include/linux/phy_fixed.h17
-rw-r--r--include/linux/platform_data/mtd-nand-omap2.h13
-rw-r--r--include/linux/pm_domain.h2
-rw-r--r--include/linux/random.h4
-rw-r--r--include/linux/regulator/driver.h2
-rw-r--r--include/linux/regulator/machine.h1
-rw-r--r--include/linux/rhashtable.h2
-rw-r--r--include/linux/rtnetlink.h10
-rw-r--r--include/linux/sched.h47
-rw-r--r--include/linux/seqno-fence.h1
-rw-r--r--include/linux/skbuff.h341
-rw-r--r--include/linux/spi/spi.h7
-rw-r--r--include/linux/syscalls.h3
-rw-r--r--include/linux/tcp.h2
-rw-r--r--include/linux/tick.h7
-rw-r--r--include/linux/udp.h16
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/vga_switcheroo.h2
-rw-r--r--include/linux/vgaarb.h2
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/media/videobuf2-core.h6
-rw-r--r--include/net/addrconf.h3
-rw-r--r--include/net/ah.h3
-rw-r--r--include/net/checksum.h4
-rw-r--r--include/net/codel.h2
-rw-r--r--include/net/dsa.h95
-rw-r--r--include/net/dst.h16
-rw-r--r--include/net/flow_keys.h16
-rw-r--r--include/net/gen_stats.h15
-rw-r--r--include/net/genetlink.h8
-rw-r--r--include/net/gue.h23
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--include/net/inet_connection_sock.h9
-rw-r--r--include/net/inetpeer.h1
-rw-r--r--include/net/ip.h29
-rw-r--r--include/net/ip6_checksum.h8
-rw-r--r--include/net/ip6_fib.h5
-rw-r--r--include/net/ip_fib.h5
-rw-r--r--include/net/ip_tunnels.h19
-rw-r--r--include/net/ip_vs.h223
-rw-r--r--include/net/ipv6.h4
-rw-r--r--include/net/mld.h5
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/net_namespace.h20
-rw-r--r--include/net/netfilter/br_netfilter.h6
-rw-r--r--include/net/netfilter/ipv4/nf_nat_masquerade.h14
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h119
-rw-r--r--include/net/netfilter/ipv6/nf_nat_masquerade.h10
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h2
-rw-r--r--include/net/netfilter/nf_nat.h10
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h75
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/netfilter/nft_masq.h16
-rw-r--r--include/net/netfilter/nft_reject.h9
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/xfrm.h14
-rw-r--r--include/net/pkt_cls.h8
-rw-r--r--include/net/pkt_sched.h4
-rw-r--r--include/net/sch_generic.h122
-rw-r--r--include/net/sctp/sctp.h13
-rw-r--r--include/net/snmp.h8
-rw-r--r--include/net/sock.h20
-rw-r--r--include/net/tcp.h85
-rw-r--r--include/net/udp.h21
-rw-r--r--include/net/udp_tunnel.h85
-rw-r--r--include/net/wimax.h2
-rw-r--r--include/net/xfrm.h1
-rw-r--r--include/rdma/ib_umem.h1
-rw-r--r--include/rxrpc/types.h41
-rw-r--r--include/scsi/scsi_tcq.h2
-rw-r--r--include/sound/soc.h2
-rw-r--r--include/trace/events/irq.h4
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/drm/radeon_drm.h1
-rw-r--r--include/uapi/linux/Kbuild3
-rw-r--r--include/uapi/linux/bpf.h155
-rw-r--r--include/uapi/linux/ethtool.h28
-rw-r--r--include/uapi/linux/fou.h39
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/if_link.h24
-rw-r--r--include/uapi/linux/if_tunnel.h17
-rw-r--r--include/uapi/linux/inet_diag.h13
-rw-r--r--include/uapi/linux/input.h1
-rw-r--r--include/uapi/linux/ip_vs.h3
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h12
-rw-r--r--include/uapi/linux/netfilter/nf_nat.h5
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h59
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_acct.h8
-rw-r--r--include/uapi/linux/netfilter/xt_set.h10
-rw-r--r--include/uapi/linux/netfilter_arp/arpt_mangle.h2
-rw-r--r--include/uapi/linux/openvswitch.h26
-rw-r--r--include/uapi/linux/usbip.h26
-rw-r--r--include/uapi/linux/xattr.h2
-rw-r--r--include/uapi/linux/xfrm.h7
-rw-r--r--include/xen/interface/features.h3
139 files changed, 2913 insertions, 1409 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index bcfd808b1098..57ee0528aacb 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -118,6 +118,7 @@ struct acpi_device;
struct acpi_hotplug_profile {
struct kobject kobj;
int (*scan_dependent)(struct acpi_device *adev);
+ void (*notify_online)(struct acpi_device *adev);
bool enabled:1;
bool demand_offline:1;
};
@@ -204,10 +205,9 @@ struct acpi_device_flags {
u32 match_driver:1;
u32 initialized:1;
u32 visited:1;
- u32 no_hotplug:1;
u32 hotplug_notify:1;
u32 is_dock_station:1;
- u32 reserved:22;
+ u32 reserved:23;
};
/* File System */
@@ -246,7 +246,6 @@ struct acpi_device_pnp {
acpi_device_name device_name; /* Driver-determined */
acpi_device_class device_class; /* " */
union acpi_object *str_obj; /* unicode string for _STR method */
- unsigned long sun; /* _SUN */
};
#define acpi_device_bid(d) ((d)->pnp.bus_id)
@@ -412,7 +411,6 @@ void acpi_bus_private_data_handler(acpi_handle, void *);
int acpi_bus_get_private_data(acpi_handle, void **);
int acpi_bus_attach_private_data(acpi_handle, void *);
void acpi_bus_detach_private_data(acpi_handle);
-void acpi_bus_no_hotplug(acpi_handle handle);
extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
extern int register_acpi_notifier(struct notifier_block *);
extern int unregister_acpi_notifier(struct notifier_block *);
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 831d786976c5..882675e7c055 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -162,12 +162,25 @@ static inline size_t drbg_max_request_bytes(struct drbg_state *drbg)
static inline size_t drbg_max_addtl(struct drbg_state *drbg)
{
+#if (__BITS_PER_LONG == 32)
+ /*
+ * SP800-90A allows smaller maximum numbers to be returned -- we
+ * return SIZE_MAX - 1 to allow the verification of the enforcement
+ * of this value in drbg_healthcheck_sanity.
+ */
+ return (SIZE_MAX - 1);
+#else
return (1UL<<(drbg->core->max_addtllen));
+#endif
}
static inline size_t drbg_max_requests(struct drbg_state *drbg)
{
+#if (__BITS_PER_LONG == 32)
+ return SIZE_MAX;
+#else
return (1UL<<(drbg->core->max_req));
+#endif
}
/*
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 6dfd64b3a604..e973540cd15b 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -17,6 +17,7 @@
{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
@@ -164,8 +165,11 @@
{0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
@@ -175,6 +179,8 @@
{0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
@@ -297,6 +303,7 @@
{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index eb726b9c5762..a1e31f274fcd 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -127,10 +127,9 @@ enum {
BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
- BLK_MQ_F_SHOULD_SORT = 1 << 1,
- BLK_MQ_F_TAG_SHARED = 1 << 2,
- BLK_MQ_F_SG_MERGE = 1 << 3,
- BLK_MQ_F_SYSFS_UP = 1 << 4,
+ BLK_MQ_F_TAG_SHARED = 1 << 1,
+ BLK_MQ_F_SG_MERGE = 1 << 2,
+ BLK_MQ_F_SYSFS_UP = 1 << 3,
BLK_MQ_S_STOPPED = 0,
BLK_MQ_S_TAG_ACTIVE = 1,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
new file mode 100644
index 000000000000..3cf91754a957
--- /dev/null
+++ b/include/linux/bpf.h
@@ -0,0 +1,136 @@
+/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _LINUX_BPF_H
+#define _LINUX_BPF_H 1
+
+#include <uapi/linux/bpf.h>
+#include <linux/workqueue.h>
+#include <linux/file.h>
+
+struct bpf_map;
+
+/* map is generic key/value storage optionally accesible by eBPF programs */
+struct bpf_map_ops {
+ /* funcs callable from userspace (via syscall) */
+ struct bpf_map *(*map_alloc)(union bpf_attr *attr);
+ void (*map_free)(struct bpf_map *);
+ int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
+
+ /* funcs callable from userspace and from eBPF programs */
+ void *(*map_lookup_elem)(struct bpf_map *map, void *key);
+ int (*map_update_elem)(struct bpf_map *map, void *key, void *value);
+ int (*map_delete_elem)(struct bpf_map *map, void *key);
+};
+
+struct bpf_map {
+ atomic_t refcnt;
+ enum bpf_map_type map_type;
+ u32 key_size;
+ u32 value_size;
+ u32 max_entries;
+ struct bpf_map_ops *ops;
+ struct work_struct work;
+};
+
+struct bpf_map_type_list {
+ struct list_head list_node;
+ struct bpf_map_ops *ops;
+ enum bpf_map_type type;
+};
+
+void bpf_register_map_type(struct bpf_map_type_list *tl);
+void bpf_map_put(struct bpf_map *map);
+struct bpf_map *bpf_map_get(struct fd f);
+
+/* function argument constraints */
+enum bpf_arg_type {
+ ARG_ANYTHING = 0, /* any argument is ok */
+
+ /* the following constraints used to prototype
+ * bpf_map_lookup/update/delete_elem() functions
+ */
+ ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
+ ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
+ ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
+
+ /* the following constraints used to prototype bpf_memcmp() and other
+ * functions that access data on eBPF program stack
+ */
+ ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
+ ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
+};
+
+/* type of values returned from helper functions */
+enum bpf_return_type {
+ RET_INTEGER, /* function returns integer */
+ RET_VOID, /* function doesn't return anything */
+ RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
+};
+
+/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
+ * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
+ * instructions after verifying
+ */
+struct bpf_func_proto {
+ u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+ bool gpl_only;
+ enum bpf_return_type ret_type;
+ enum bpf_arg_type arg1_type;
+ enum bpf_arg_type arg2_type;
+ enum bpf_arg_type arg3_type;
+ enum bpf_arg_type arg4_type;
+ enum bpf_arg_type arg5_type;
+};
+
+/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
+ * the first argument to eBPF programs.
+ * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
+ */
+struct bpf_context;
+
+enum bpf_access_type {
+ BPF_READ = 1,
+ BPF_WRITE = 2
+};
+
+struct bpf_verifier_ops {
+ /* return eBPF function prototype for verification */
+ const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
+
+ /* return true if 'size' wide access at offset 'off' within bpf_context
+ * with 'type' (read or write) is allowed
+ */
+ bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
+};
+
+struct bpf_prog_type_list {
+ struct list_head list_node;
+ struct bpf_verifier_ops *ops;
+ enum bpf_prog_type type;
+};
+
+void bpf_register_prog_type(struct bpf_prog_type_list *tl);
+
+struct bpf_prog;
+
+struct bpf_prog_aux {
+ atomic_t refcnt;
+ bool is_gpl_compatible;
+ enum bpf_prog_type prog_type;
+ struct bpf_verifier_ops *ops;
+ struct bpf_map **used_maps;
+ u32 used_map_cnt;
+ struct bpf_prog *prog;
+ struct work_struct work;
+};
+
+void bpf_prog_put(struct bpf_prog *prog);
+struct bpf_prog *bpf_prog_get(u32 ufd);
+/* verify correctness of eBPF program */
+int bpf_check(struct bpf_prog *fp, union bpf_attr *attr);
+
+#endif /* _LINUX_BPF_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 61219b9b3445..7ccd928cc1f2 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -13,7 +13,11 @@
#define PHY_ID_BCM5461 0x002060c0
#define PHY_ID_BCM57780 0x03625d90
+#define PHY_ID_BCM7250 0xae025280
+#define PHY_ID_BCM7364 0xae025260
#define PHY_ID_BCM7366 0x600d8490
+#define PHY_ID_BCM7425 0x03625e60
+#define PHY_ID_BCM7429 0x600d8730
#define PHY_ID_BCM7439 0x600d8480
#define PHY_ID_BCM7445 0x600d8510
@@ -21,9 +25,9 @@
#define PHY_BCM_OUI_1 0x00206000
#define PHY_BCM_OUI_2 0x0143bc00
#define PHY_BCM_OUI_3 0x03625c00
-#define PHY_BCM_OUI_4 0x600d0000
+#define PHY_BCM_OUI_4 0x600d8400
#define PHY_BCM_OUI_5 0x03625e00
-
+#define PHY_BCM_OUI_6 0xae025000
#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
@@ -38,7 +42,8 @@
#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
/* Broadcom BCM7xxx specific workarounds */
-#define PHY_BRCM_100MBPS_WAR 0x00010000
+#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
+#define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff)
#define PHY_BCM_FLAGS_VALID 0x80000000
/* Broadcom BCM54XX register definitions, common to most Broadcom PHYs */
@@ -92,4 +97,130 @@
#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
+/*
+ * Broadcom LED source encodings. These are used in BCM5461, BCM5481,
+ * BCM5482, and possibly some others.
+ */
+#define BCM_LED_SRC_LINKSPD1 0x0
+#define BCM_LED_SRC_LINKSPD2 0x1
+#define BCM_LED_SRC_XMITLED 0x2
+#define BCM_LED_SRC_ACTIVITYLED 0x3
+#define BCM_LED_SRC_FDXLED 0x4
+#define BCM_LED_SRC_SLAVE 0x5
+#define BCM_LED_SRC_INTR 0x6
+#define BCM_LED_SRC_QUALITY 0x7
+#define BCM_LED_SRC_RCVLED 0x8
+#define BCM_LED_SRC_MULTICOLOR1 0xa
+#define BCM_LED_SRC_OPENSHORT 0xb
+#define BCM_LED_SRC_OFF 0xe /* Tied high */
+#define BCM_LED_SRC_ON 0xf /* Tied low */
+
+
+/*
+ * BCM5482: Shadow registers
+ * Shadow values go into bits [14:10] of register 0x1c to select a shadow
+ * register to access.
+ */
+/* 00101: Spare Control Register 3 */
+#define BCM54XX_SHD_SCR3 0x05
+#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
+#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002
+#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004
+
+/* 01010: Auto Power-Down */
+#define BCM54XX_SHD_APD 0x0a
+#define BCM54XX_SHD_APD_EN 0x0020
+
+#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
+ /* LED3 / ~LINKSPD[2] selector */
+#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
+ /* LED1 / ~LINKSPD[1] selector */
+#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
+#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */
+#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
+#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
+#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */
+#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */
+#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */
+
+
+/*
+ * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17)
+ */
+#define MII_BCM54XX_EXP_AADJ1CH0 0x001f
+#define MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN 0x0200
+#define MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF 0x0100
+#define MII_BCM54XX_EXP_AADJ1CH3 0x601f
+#define MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ 0x0002
+#define MII_BCM54XX_EXP_EXP08 0x0F08
+#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001
+#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200
+#define MII_BCM54XX_EXP_EXP75 0x0f75
+#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c
+#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001
+#define MII_BCM54XX_EXP_EXP96 0x0f96
+#define MII_BCM54XX_EXP_EXP96_MYST 0x0010
+#define MII_BCM54XX_EXP_EXP97 0x0f97
+#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c
+
+/*
+ * BCM5482: Secondary SerDes registers
+ */
+#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */
+#define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */
+#define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */
+#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
+#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
+
+
+/*****************************************************************************/
+/* Fast Ethernet Transceiver definitions. */
+/*****************************************************************************/
+
+#define MII_BRCM_FET_INTREG 0x1a /* Interrupt register */
+#define MII_BRCM_FET_IR_MASK 0x0100 /* Mask all interrupts */
+#define MII_BRCM_FET_IR_LINK_EN 0x0200 /* Link status change enable */
+#define MII_BRCM_FET_IR_SPEED_EN 0x0400 /* Link speed change enable */
+#define MII_BRCM_FET_IR_DUPLEX_EN 0x0800 /* Duplex mode change enable */
+#define MII_BRCM_FET_IR_ENABLE 0x4000 /* Interrupt enable */
+
+#define MII_BRCM_FET_BRCMTEST 0x1f /* Brcm test register */
+#define MII_BRCM_FET_BT_SRE 0x0080 /* Shadow register enable */
+
+
+/*** Shadow register definitions ***/
+
+#define MII_BRCM_FET_SHDW_MISCCTRL 0x10 /* Shadow misc ctrl */
+#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */
+
+#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */
+#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003
+#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001
+
+#define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */
+#define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */
+
+/*
+ * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T
+ * 0x1c shadow registers.
+ */
+static inline int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow)
+{
+ phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
+ return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD));
+}
+
+static inline int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow,
+ u16 val)
+{
+ return phy_write(phydev, MII_BCM54XX_SHD,
+ MII_BCM54XX_SHD_WRITE |
+ MII_BCM54XX_SHD_VAL(shadow) |
+ MII_BCM54XX_SHD_DATA(val));
+}
+
+#define BRCM_CL45VEN_EEE_CONTROL 0x803d
+#define LPI_FEATURE_EN 0x8000
+#define LPI_FEATURE_EN_DIG1000X 0x4000
+
#endif /* _LINUX_BRCMPHY_H */
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index ebcc9d146219..7f437036baa4 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -27,6 +27,13 @@ struct ccp_cmd;
defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE)
/**
+ * ccp_present - check if a CCP device is present
+ *
+ * Returns zero if a CCP device is present, -ENODEV otherwise.
+ */
+int ccp_present(void);
+
+/**
* ccp_enqueue_cmd - queue an operation for processing by the CCP
*
* @cmd: ccp_cmd struct to be processed
@@ -53,6 +60,11 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd);
#else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */
+static inline int ccp_present(void)
+{
+ return -ENODEV;
+}
+
static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
{
return -ENODEV;
diff --git a/include/linux/com20020.h b/include/linux/com20020.h
index 5dcfb944b6ce..85898995b234 100644
--- a/include/linux/com20020.h
+++ b/include/linux/com20020.h
@@ -41,6 +41,35 @@ extern const struct net_device_ops com20020_netdev_ops;
#define BUS_ALIGN 1
#endif
+#define PLX_PCI_MAX_CARDS 2
+
+struct com20020_pci_channel_map {
+ u32 bar;
+ u32 offset;
+ u32 size; /* 0x00 - auto, e.g. length of entire bar */
+};
+
+struct com20020_pci_card_info {
+ const char *name;
+ int devcount;
+
+ struct com20020_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CARDS];
+
+ unsigned int flags;
+};
+
+struct com20020_priv {
+ struct com20020_pci_card_info *ci;
+ struct list_head list_dev;
+};
+
+struct com20020_dev {
+ struct list_head list;
+ struct net_device *dev;
+
+ struct com20020_priv *pci_priv;
+ int index;
+};
#define _INTMASK (ioaddr+BUS_ALIGN*0) /* writable */
#define _STATUS (ioaddr+BUS_ALIGN*0) /* readable */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index ade2390ffe92..6e39c9bb0dae 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -93,12 +93,12 @@ extern int cpuset_slab_spread_node(void);
static inline int cpuset_do_page_mem_spread(void)
{
- return current->flags & PF_SPREAD_PAGE;
+ return task_spread_page(current);
}
static inline int cpuset_do_slab_mem_spread(void)
{
- return current->flags & PF_SPREAD_SLAB;
+ return task_spread_slab(current);
}
extern int current_cpuset_is_being_rebound(void);
diff --git a/include/linux/cycx_x25.h b/include/linux/cycx_x25.h
deleted file mode 100644
index 362bf19d6cf1..000000000000
--- a/include/linux/cycx_x25.h
+++ /dev/null
@@ -1,125 +0,0 @@
-#ifndef _CYCX_X25_H
-#define _CYCX_X25_H
-/*
-* cycx_x25.h Cyclom X.25 firmware API definitions.
-*
-* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
-*
-* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo
-*
-* Based on sdla_x25.h by Gene Kozin <74604.152@compuserve.com>
-*
-* This program is free software; you can redistribute it and/or
-* modify it under the terms of the GNU General Public License
-* as published by the Free Software Foundation; either version
-* 2 of the License, or (at your option) any later version.
-* ============================================================================
-* 2000/04/02 acme dprintk and cycx_debug
-* 1999/01/03 acme judicious use of data types
-* 1999/01/02 acme #define X25_ACK_N3 0x4411
-* 1998/12/28 acme cleanup: lot'o'things removed
-* commands listed,
-* TX25Cmd & TX25Config structs
-* typedef'ed
-*/
-#ifndef PACKED
-#define PACKED __attribute__((packed))
-#endif
-
-/* X.25 shared memory layout. */
-#define X25_MBOX_OFFS 0x300 /* general mailbox block */
-#define X25_RXMBOX_OFFS 0x340 /* receive mailbox */
-
-/* Debug */
-#define dprintk(level, format, a...) if (cycx_debug >= level) printk(format, ##a)
-
-extern unsigned int cycx_debug;
-
-/* Data Structures */
-/* X.25 Command Block. */
-struct cycx_x25_cmd {
- u16 command;
- u16 link; /* values: 0 or 1 */
- u16 len; /* values: 0 thru 0x205 (517) */
- u32 buf;
-} PACKED;
-
-/* Defines for the 'command' field. */
-#define X25_CONNECT_REQUEST 0x4401
-#define X25_CONNECT_RESPONSE 0x4402
-#define X25_DISCONNECT_REQUEST 0x4403
-#define X25_DISCONNECT_RESPONSE 0x4404
-#define X25_DATA_REQUEST 0x4405
-#define X25_ACK_TO_VC 0x4406
-#define X25_INTERRUPT_RESPONSE 0x4407
-#define X25_CONFIG 0x4408
-#define X25_CONNECT_INDICATION 0x4409
-#define X25_CONNECT_CONFIRM 0x440A
-#define X25_DISCONNECT_INDICATION 0x440B
-#define X25_DISCONNECT_CONFIRM 0x440C
-#define X25_DATA_INDICATION 0x440E
-#define X25_INTERRUPT_INDICATION 0x440F
-#define X25_ACK_FROM_VC 0x4410
-#define X25_ACK_N3 0x4411
-#define X25_CONNECT_COLLISION 0x4413
-#define X25_N3WIN 0x4414
-#define X25_LINE_ON 0x4415
-#define X25_LINE_OFF 0x4416
-#define X25_RESET_REQUEST 0x4417
-#define X25_LOG 0x4500
-#define X25_STATISTIC 0x4600
-#define X25_TRACE 0x4700
-#define X25_N2TRACEXC 0x4702
-#define X25_N3TRACEXC 0x4703
-
-/**
- * struct cycx_x25_config - cyclom2x x25 firmware configuration
- * @link - link number
- * @speed - line speed
- * @clock - internal/external
- * @n2 - # of level 2 retransm.(values: 1 thru FF)
- * @n2win - level 2 window (values: 1 thru 7)
- * @n3win - level 3 window (values: 1 thru 7)
- * @nvc - # of logical channels (values: 1 thru 64)
- * @pktlen - level 3 packet length - log base 2 of size
- * @locaddr - my address
- * @remaddr - remote address
- * @t1 - time, in seconds
- * @t2 - time, in seconds
- * @t21 - time, in seconds
- * @npvc - # of permanent virt. circuits (1 thru nvc)
- * @t23 - time, in seconds
- * @flags - see dosx25.doc, in portuguese, for details
- */
-struct cycx_x25_config {
- u8 link;
- u8 speed;
- u8 clock;
- u8 n2;
- u8 n2win;
- u8 n3win;
- u8 nvc;
- u8 pktlen;
- u8 locaddr;
- u8 remaddr;
- u16 t1;
- u16 t2;
- u8 t21;
- u8 npvc;
- u8 t23;
- u8 flags;
-} PACKED;
-
-struct cycx_x25_stats {
- u16 rx_crc_errors;
- u16 rx_over_errors;
- u16 n2_tx_frames;
- u16 n2_rx_frames;
- u16 tx_timeouts;
- u16 rx_timeouts;
- u16 n3_tx_packets;
- u16 n3_rx_packets;
- u16 tx_aborts;
- u16 rx_aborts;
-} PACKED;
-#endif /* _CYCX_X25_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index e4ae2ad48d07..75a227cc7ce2 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -55,6 +55,7 @@ struct qstr {
#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
#define hashlen_hash(hashlen) ((u32) (hashlen))
#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
+#define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash))
struct dentry_stat_t {
long nr_dentry;
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index 5621547d631b..a4be70398ce1 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -73,14 +73,22 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
{
BUG_ON(count > DQL_MAX_OBJECT);
- dql->num_queued += count;
dql->last_obj_cnt = count;
+
+ /* We want to force a write first, so that cpu do not attempt
+ * to get cache line containing last_obj_cnt, num_queued, adj_limit
+ * in Shared state, but directly does a Request For Ownership
+ * It is only a hint, we use barrier() only.
+ */
+ barrier();
+
+ dql->num_queued += count;
}
/* Returns how many objects can be queued, < 0 indicates over limit. */
static inline int dql_avail(const struct dql *dql)
{
- return dql->adj_limit - dql->num_queued;
+ return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued);
}
/* Record number of completed objects and recalculate the limit. */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 9c5529dc6d07..733980fce8e3 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -29,6 +29,7 @@
#include <asm/bitsperlong.h>
#ifdef __KERNEL__
+u32 eth_get_headlen(void *data, unsigned int max_len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index e658229fee39..c1a2d60dfb82 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -257,6 +257,10 @@ struct ethtool_ops {
struct ethtool_eeprom *, u8 *);
int (*get_eee)(struct net_device *, struct ethtool_eee *);
int (*set_eee)(struct net_device *, struct ethtool_eee *);
+ int (*get_tunable)(struct net_device *,
+ const struct ethtool_tunable *, void *);
+ int (*set_tunable)(struct net_device *,
+ const struct ethtool_tunable *, const void *);
};
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 6ff0b0b42d47..08ed2b0a96e6 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -24,6 +24,9 @@
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
+/* 0, 1(node nid), 2(meta nid) are reserved node id */
+#define F2FS_RESERVED_NODE_NUM 3
+
#define F2FS_ROOT_INO(sbi) (sbi->root_ino_num)
#define F2FS_NODE_INO(sbi) (sbi->node_ino_num)
#define F2FS_META_INO(sbi) (sbi->meta_ino_num)
@@ -87,6 +90,8 @@ struct f2fs_super_block {
#define CP_ORPHAN_PRESENT_FLAG 0x00000002
#define CP_UMOUNT_FLAG 0x00000001
+#define F2FS_CP_PACKS 2 /* # of checkpoint packs */
+
struct f2fs_checkpoint {
__le64 checkpoint_ver; /* checkpoint block version number */
__le64 user_block_count; /* # of user blocks */
@@ -123,6 +128,9 @@ struct f2fs_checkpoint {
*/
#define F2FS_ORPHANS_PER_BLOCK 1020
+#define GET_ORPHAN_BLOCKS(n) ((n + F2FS_ORPHANS_PER_BLOCK - 1) / \
+ F2FS_ORPHANS_PER_BLOCK)
+
struct f2fs_orphan_block {
__le32 ino[F2FS_ORPHANS_PER_BLOCK]; /* inode numbers */
__le32 reserved; /* reserved */
@@ -144,6 +152,7 @@ struct f2fs_extent {
#define F2FS_NAME_LEN 255
#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
+#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
#define ADDRS_PER_INODE(fi) addrs_per_inode(fi)
#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
@@ -163,8 +172,9 @@ struct f2fs_extent {
#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
F2FS_INLINE_XATTR_ADDRS - 1))
-#define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) \
- - sizeof(__le32) * (DEF_ADDRS_PER_INODE + 5 - 1))
+#define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) -\
+ sizeof(__le32) * (DEF_ADDRS_PER_INODE + \
+ DEF_NIDS_PER_INODE - 1))
struct f2fs_inode {
__le16 i_mode; /* file mode */
@@ -194,7 +204,7 @@ struct f2fs_inode {
__le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
- __le32 i_nid[5]; /* direct(2), indirect(2),
+ __le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2),
double_indirect(1) node id */
} __packed;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a5227ab8ccb1..ca95abd2bed1 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -4,58 +4,24 @@
#ifndef __LINUX_FILTER_H__
#define __LINUX_FILTER_H__
+#include <stdarg.h>
+
#include <linux/atomic.h>
#include <linux/compat.h>
#include <linux/skbuff.h>
+#include <linux/linkage.h>
+#include <linux/printk.h>
#include <linux/workqueue.h>
-#include <uapi/linux/filter.h>
-/* Internally used and optimized filter representation with extended
- * instruction set based on top of classic BPF.
- */
+#include <asm/cacheflush.h>
-/* instruction classes */
-#define BPF_ALU64 0x07 /* alu mode in double word width */
-
-/* ld/ldx fields */
-#define BPF_DW 0x18 /* double word */
-#define BPF_XADD 0xc0 /* exclusive add */
-
-/* alu/jmp fields */
-#define BPF_MOV 0xb0 /* mov reg to reg */
-#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
-
-/* change endianness of a register */
-#define BPF_END 0xd0 /* flags for endianness conversion: */
-#define BPF_TO_LE 0x00 /* convert to little-endian */
-#define BPF_TO_BE 0x08 /* convert to big-endian */
-#define BPF_FROM_LE BPF_TO_LE
-#define BPF_FROM_BE BPF_TO_BE
-
-#define BPF_JNE 0x50 /* jump != */
-#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
-#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
-#define BPF_CALL 0x80 /* function call */
-#define BPF_EXIT 0x90 /* function return */
-
-/* Register numbers */
-enum {
- BPF_REG_0 = 0,
- BPF_REG_1,
- BPF_REG_2,
- BPF_REG_3,
- BPF_REG_4,
- BPF_REG_5,
- BPF_REG_6,
- BPF_REG_7,
- BPF_REG_8,
- BPF_REG_9,
- BPF_REG_10,
- __MAX_BPF_REG,
-};
+#include <uapi/linux/filter.h>
+#include <uapi/linux/bpf.h>
-/* BPF has 10 general purpose 64-bit registers and stack frame. */
-#define MAX_BPF_REG __MAX_BPF_REG
+struct sk_buff;
+struct sock;
+struct seccomp_data;
+struct bpf_prog_aux;
/* ArgX, context and stack frame pointer register positions. Note,
* Arg1, Arg2, Arg3, etc are used as argument mappings of function
@@ -161,6 +127,30 @@ enum {
.off = 0, \
.imm = IMM })
+/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
+#define BPF_LD_IMM64(DST, IMM) \
+ BPF_LD_IMM64_RAW(DST, 0, IMM)
+
+#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_LD | BPF_DW | BPF_IMM, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = (__u32) (IMM) }), \
+ ((struct bpf_insn) { \
+ .code = 0, /* zero is reserved opcode */ \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = ((__u64) (IMM)) >> 32 })
+
+#define BPF_PSEUDO_MAP_FD 1
+
+/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
+#define BPF_LD_MAP_FD(DST, MAP_FD) \
+ BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
+
/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
@@ -299,14 +289,6 @@ enum {
#define SK_RUN_FILTER(filter, ctx) \
(*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
-struct bpf_insn {
- __u8 code; /* opcode */
- __u8 dst_reg:4; /* dest register */
- __u8 src_reg:4; /* source register */
- __s16 off; /* signed offset */
- __s32 imm; /* signed immediate constant */
-};
-
#ifdef CONFIG_COMPAT
/* A struct sock_filter is architecture independent. */
struct compat_sock_fprog {
@@ -320,20 +302,23 @@ struct sock_fprog_kern {
struct sock_filter *filter;
};
-struct sk_buff;
-struct sock;
-struct seccomp_data;
+struct bpf_binary_header {
+ unsigned int pages;
+ u8 image[];
+};
struct bpf_prog {
- u32 jited:1, /* Is our filter JIT'ed? */
- len:31; /* Number of filter blocks */
+ u16 pages; /* Number of allocated pages */
+ bool jited; /* Is our filter JIT'ed? */
+ u32 len; /* Number of filter blocks */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
+ struct bpf_prog_aux *aux; /* Auxiliary fields */
unsigned int (*bpf_func)(const struct sk_buff *skb,
const struct bpf_insn *filter);
+ /* Instructions for interpreter */
union {
struct sock_filter insns[0];
struct bpf_insn insnsi[0];
- struct work_struct work;
};
};
@@ -353,6 +338,26 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
+#ifdef CONFIG_DEBUG_SET_MODULE_RONX
+static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
+{
+ set_memory_ro((unsigned long)fp, fp->pages);
+}
+
+static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
+{
+ set_memory_rw((unsigned long)fp, fp->pages);
+}
+#else
+static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
+{
+}
+
+static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
+{
+}
+#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
+
int sk_filter(struct sock *sk, struct sk_buff *skb);
void bpf_prog_select_runtime(struct bpf_prog *fp);
@@ -361,6 +366,17 @@ void bpf_prog_free(struct bpf_prog *fp);
int bpf_convert_filter(struct sock_filter *prog, int len,
struct bpf_insn *new_prog, int *new_len);
+struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
+struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
+ gfp_t gfp_extra_flags);
+void __bpf_prog_free(struct bpf_prog *fp);
+
+static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
+{
+ bpf_prog_unlock_ro(fp);
+ __bpf_prog_free(fp);
+}
+
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
void bpf_prog_destroy(struct bpf_prog *fp);
@@ -377,6 +393,38 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
void bpf_int_jit_compile(struct bpf_prog *fp);
+#ifdef CONFIG_BPF_JIT
+typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
+
+struct bpf_binary_header *
+bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
+ unsigned int alignment,
+ bpf_jit_fill_hole_t bpf_fill_ill_insns);
+void bpf_jit_binary_free(struct bpf_binary_header *hdr);
+
+void bpf_jit_compile(struct bpf_prog *fp);
+void bpf_jit_free(struct bpf_prog *fp);
+
+static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
+ u32 pass, void *image)
+{
+ pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
+ flen, proglen, pass, image);
+ if (image)
+ print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
+ 16, 1, image, proglen, false);
+}
+#else
+static inline void bpf_jit_compile(struct bpf_prog *fp)
+{
+}
+
+static inline void bpf_jit_free(struct bpf_prog *fp)
+{
+ bpf_prog_unlock_free(fp);
+}
+#endif /* CONFIG_BPF_JIT */
+
#define BPF_ANC BIT(15)
static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
@@ -424,36 +472,6 @@ static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
return bpf_internal_load_pointer_neg_helper(skb, k, size);
}
-#ifdef CONFIG_BPF_JIT
-#include <stdarg.h>
-#include <linux/linkage.h>
-#include <linux/printk.h>
-
-void bpf_jit_compile(struct bpf_prog *fp);
-void bpf_jit_free(struct bpf_prog *fp);
-
-static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
- u32 pass, void *image)
-{
- pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
- flen, proglen, pass, image);
- if (image)
- print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
- 16, 1, image, proglen, false);
-}
-#else
-#include <linux/slab.h>
-
-static inline void bpf_jit_compile(struct bpf_prog *fp)
-{
-}
-
-static inline void bpf_jit_free(struct bpf_prog *fp)
-{
- kfree(fp);
-}
-#endif /* CONFIG_BPF_JIT */
-
static inline int bpf_tell_extensions(void)
{
return SKF_AD_MAX;
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 6bb5e3f2a3b4..f0b0edbf55a9 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -102,6 +102,15 @@ enum {
FTRACE_OPS_FL_DELETED = 1 << 8,
};
+#ifdef CONFIG_DYNAMIC_FTRACE
+/* The hash used to know what functions callbacks trace */
+struct ftrace_ops_hash {
+ struct ftrace_hash *notrace_hash;
+ struct ftrace_hash *filter_hash;
+ struct mutex regex_lock;
+};
+#endif
+
/*
* Note, ftrace_ops can be referenced outside of RCU protection.
* (Although, for perf, the control ops prevent that). If ftrace_ops is
@@ -121,10 +130,9 @@ struct ftrace_ops {
int __percpu *disabled;
#ifdef CONFIG_DYNAMIC_FTRACE
int nr_trampolines;
- struct ftrace_hash *notrace_hash;
- struct ftrace_hash *filter_hash;
+ struct ftrace_ops_hash local_hash;
+ struct ftrace_ops_hash *func_hash;
struct ftrace_hash *tramp_hash;
- struct mutex regex_lock;
unsigned long trampoline;
#endif
};
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index b7ce0c64c6f3..12f146fa6604 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -16,8 +16,6 @@ struct device;
*/
struct gpio_desc;
-#ifdef CONFIG_GPIOLIB
-
#define GPIOD_FLAGS_BIT_DIR_SET BIT(0)
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
@@ -34,64 +32,38 @@ enum gpiod_flags {
GPIOD_FLAGS_BIT_DIR_VAL,
};
+#ifdef CONFIG_GPIOLIB
+
/* Acquire and dispose GPIOs */
struct gpio_desc *__must_check __gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
-#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
-#define gpiod_get(varargs...) __gpiod_get(varargs, 0)
struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags);
-#define __gpiod_get_index(dev, con_id, index, flags, ...) \
- __gpiod_get_index(dev, con_id, index, flags)
-#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, 0)
struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
-#define __gpiod_get_optional(dev, con_id, flags, ...) \
- __gpiod_get_optional(dev, con_id, flags)
-#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, 0)
struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
const char *con_id,
unsigned int index,
enum gpiod_flags flags);
-#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \
- __gpiod_get_index_optional(dev, con_id, index, flags)
-#define gpiod_get_index_optional(varargs...) \
- __gpiod_get_index_optional(varargs, 0)
-
void gpiod_put(struct gpio_desc *desc);
struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
-#define __devm_gpiod_get(dev, con_id, flags, ...) \
- __devm_gpiod_get(dev, con_id, flags)
-#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, 0)
struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags);
-#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \
- __devm_gpiod_get_index(dev, con_id, index, flags)
-#define devm_gpiod_get_index(varargs...) __devm_gpiod_get_index(varargs, 0)
struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
-#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \
- __devm_gpiod_get_optional(dev, con_id, flags)
-#define devm_gpiod_get_optional(varargs...) \
- __devm_gpiod_get_optional(varargs, 0)
struct gpio_desc *__must_check
__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags);
-#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \
- __devm_gpiod_get_index_optional(dev, con_id, index, flags)
-#define devm_gpiod_get_index_optional(varargs...) \
- __devm_gpiod_get_index_optional(varargs, 0)
-
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
int gpiod_get_direction(const struct gpio_desc *desc);
@@ -124,27 +96,31 @@ int desc_to_gpio(const struct gpio_desc *desc);
#else /* CONFIG_GPIOLIB */
-static inline struct gpio_desc *__must_check gpiod_get(struct device *dev,
- const char *con_id)
+static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
-static inline struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
- const char *con_id,
- unsigned int idx)
+static inline struct gpio_desc *__must_check
+__gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-gpiod_get_optional(struct device *dev, const char *con_id)
+__gpiod_get_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-gpiod_get_index_optional(struct device *dev, const char *con_id,
- unsigned int index)
+__gpiod_get_index_optional(struct device *dev, const char *con_id,
+ unsigned int index, enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
@@ -157,28 +133,33 @@ static inline void gpiod_put(struct gpio_desc *desc)
WARN_ON(1);
}
-static inline struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
- const char *con_id)
+static inline struct gpio_desc *__must_check
+__devm_gpiod_get(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline
-struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
- const char *con_id,
- unsigned int idx)
+struct gpio_desc *__must_check
+__devm_gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-devm_gpiod_get_optional(struct device *dev, const char *con_id)
+__devm_gpiod_get_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
- unsigned int index)
+__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
+ unsigned int index, enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
@@ -303,9 +284,43 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
return -EINVAL;
}
-
#endif /* CONFIG_GPIOLIB */
+/*
+ * Vararg-hacks! This is done to transition the kernel to always pass
+ * the options flags argument to the below functions. During a transition
+ * phase these vararg macros make both old-and-newstyle code compile,
+ * but when all calls to the elder API are removed, these should go away
+ * and the __gpiod_get() etc functions above be renamed just gpiod_get()
+ * etc.
+ */
+#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
+#define gpiod_get(varargs...) __gpiod_get(varargs, 0)
+#define __gpiod_get_index(dev, con_id, index, flags, ...) \
+ __gpiod_get_index(dev, con_id, index, flags)
+#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, 0)
+#define __gpiod_get_optional(dev, con_id, flags, ...) \
+ __gpiod_get_optional(dev, con_id, flags)
+#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, 0)
+#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \
+ __gpiod_get_index_optional(dev, con_id, index, flags)
+#define gpiod_get_index_optional(varargs...) \
+ __gpiod_get_index_optional(varargs, 0)
+#define __devm_gpiod_get(dev, con_id, flags, ...) \
+ __devm_gpiod_get(dev, con_id, flags)
+#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, 0)
+#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \
+ __devm_gpiod_get_index(dev, con_id, index, flags)
+#define devm_gpiod_get_index(varargs...) __devm_gpiod_get_index(varargs, 0)
+#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \
+ __devm_gpiod_get_optional(dev, con_id, flags)
+#define devm_gpiod_get_optional(varargs...) \
+ __devm_gpiod_get_optional(varargs, 0)
+#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \
+ __devm_gpiod_get_index_optional(dev, con_id, index, flags)
+#define devm_gpiod_get_index_optional(varargs...) \
+ __devm_gpiod_get_index_optional(varargs, 0)
+
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
diff --git a/include/linux/hash.h b/include/linux/hash.h
index bd1754c7ecef..d0494c399392 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -37,6 +37,9 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits)
{
u64 hash = val;
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+ hash = hash * GOLDEN_RATIO_PRIME_64;
+#else
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */
u64 n = hash;
n <<= 18;
@@ -51,6 +54,7 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits)
hash += n;
n <<= 2;
hash += n;
+#endif
/* High bits are more random, so use them. */
return hash >> (64 - bits);
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index a95efeb53a8b..b556e0ab946f 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -577,20 +577,4 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node
}
#endif /* CONFIG_OF */
-#ifdef CONFIG_ACPI
-void acpi_i2c_register_devices(struct i2c_adapter *adap);
-#else
-static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { }
-#endif /* CONFIG_ACPI */
-
-#ifdef CONFIG_ACPI_I2C_OPREGION
-int acpi_i2c_install_space_handler(struct i2c_adapter *adapter);
-void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter);
-#else
-static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
-{ }
-static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
-{ return 0; }
-#endif /* CONFIG_ACPI_I2C_OPREGION */
-
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i82593.h b/include/linux/i82593.h
deleted file mode 100644
index afac5c7a323d..000000000000
--- a/include/linux/i82593.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Definitions for Intel 82593 CSMA/CD Core LAN Controller
- * The definitions are taken from the 1992 users manual with Intel
- * order number 297125-001.
- *
- * /usr/src/pc/RCS/i82593.h,v 1.1 1996/07/17 15:23:12 root Exp
- *
- * Copyright 1994, Anders Klemets <klemets@it.kth.se>
- *
- * HISTORY
- * i82593.h,v
- * Revision 1.4 2005/11/4 09:15:00 baroniunas
- * Modified copyright with permission of author as follows:
- *
- * "If I82539.H is the only file with my copyright statement
- * that is included in the Source Forge project, then you have
- * my approval to change the copyright statement to be a GPL
- * license, in the way you proposed on October 10."
- *
- * Revision 1.1 1996/07/17 15:23:12 root
- * Initial revision
- *
- * Revision 1.3 1995/04/05 15:13:58 adj
- * Initial alpha release
- *
- * Revision 1.2 1994/06/16 23:57:31 klemets
- * Mirrored all the fields in the configuration block.
- *
- * Revision 1.1 1994/06/02 20:25:34 klemets
- * Initial revision
- *
- *
- */
-#ifndef _I82593_H
-#define _I82593_H
-
-/* Intel 82593 CSMA/CD Core LAN Controller */
-
-/* Port 0 Command Register definitions */
-
-/* Execution operations */
-#define OP0_NOP 0 /* CHNL = 0 */
-#define OP0_SWIT_TO_PORT_1 0 /* CHNL = 1 */
-#define OP0_IA_SETUP 1
-#define OP0_CONFIGURE 2
-#define OP0_MC_SETUP 3
-#define OP0_TRANSMIT 4
-#define OP0_TDR 5
-#define OP0_DUMP 6
-#define OP0_DIAGNOSE 7
-#define OP0_TRANSMIT_NO_CRC 9
-#define OP0_RETRANSMIT 12
-#define OP0_ABORT 13
-/* Reception operations */
-#define OP0_RCV_ENABLE 8
-#define OP0_RCV_DISABLE 10
-#define OP0_STOP_RCV 11
-/* Status pointer control operations */
-#define OP0_FIX_PTR 15 /* CHNL = 1 */
-#define OP0_RLS_PTR 15 /* CHNL = 0 */
-#define OP0_RESET 14
-
-#define CR0_CHNL (1 << 4) /* 0=Channel 0, 1=Channel 1 */
-#define CR0_STATUS_0 0x00
-#define CR0_STATUS_1 0x20
-#define CR0_STATUS_2 0x40
-#define CR0_STATUS_3 0x60
-#define CR0_INT_ACK (1 << 7) /* 0=No ack, 1=acknowledge */
-
-/* Port 0 Status Register definitions */
-
-#define SR0_NO_RESULT 0 /* dummy */
-#define SR0_EVENT_MASK 0x0f
-#define SR0_IA_SETUP_DONE 1
-#define SR0_CONFIGURE_DONE 2
-#define SR0_MC_SETUP_DONE 3
-#define SR0_TRANSMIT_DONE 4
-#define SR0_TDR_DONE 5
-#define SR0_DUMP_DONE 6
-#define SR0_DIAGNOSE_PASSED 7
-#define SR0_TRANSMIT_NO_CRC_DONE 9
-#define SR0_RETRANSMIT_DONE 12
-#define SR0_EXECUTION_ABORTED 13
-#define SR0_END_OF_FRAME 8
-#define SR0_RECEPTION_ABORTED 10
-#define SR0_DIAGNOSE_FAILED 15
-#define SR0_STOP_REG_HIT 11
-
-#define SR0_CHNL (1 << 4)
-#define SR0_EXECUTION (1 << 5)
-#define SR0_RECEPTION (1 << 6)
-#define SR0_INTERRUPT (1 << 7)
-#define SR0_BOTH_RX_TX (SR0_EXECUTION | SR0_RECEPTION)
-
-#define SR3_EXEC_STATE_MASK 0x03
-#define SR3_EXEC_IDLE 0
-#define SR3_TX_ABORT_IN_PROGRESS 1
-#define SR3_EXEC_ACTIVE 2
-#define SR3_ABORT_IN_PROGRESS 3
-#define SR3_EXEC_CHNL (1 << 2)
-#define SR3_STP_ON_NO_RSRC (1 << 3)
-#define SR3_RCVING_NO_RSRC (1 << 4)
-#define SR3_RCV_STATE_MASK 0x60
-#define SR3_RCV_IDLE 0x00
-#define SR3_RCV_READY 0x20
-#define SR3_RCV_ACTIVE 0x40
-#define SR3_RCV_STOP_IN_PROG 0x60
-#define SR3_RCV_CHNL (1 << 7)
-
-/* Port 1 Command Register definitions */
-
-#define OP1_NOP 0
-#define OP1_SWIT_TO_PORT_0 1
-#define OP1_INT_DISABLE 2
-#define OP1_INT_ENABLE 3
-#define OP1_SET_TS 5
-#define OP1_RST_TS 7
-#define OP1_POWER_DOWN 8
-#define OP1_RESET_RING_MNGMT 11
-#define OP1_RESET 14
-#define OP1_SEL_RST 15
-
-#define CR1_STATUS_4 0x00
-#define CR1_STATUS_5 0x20
-#define CR1_STATUS_6 0x40
-#define CR1_STOP_REG_UPDATE (1 << 7)
-
-/* Receive frame status bits */
-
-#define RX_RCLD (1 << 0)
-#define RX_IA_MATCH (1 << 1)
-#define RX_NO_AD_MATCH (1 << 2)
-#define RX_NO_SFD (1 << 3)
-#define RX_SRT_FRM (1 << 7)
-#define RX_OVRRUN (1 << 8)
-#define RX_ALG_ERR (1 << 10)
-#define RX_CRC_ERR (1 << 11)
-#define RX_LEN_ERR (1 << 12)
-#define RX_RCV_OK (1 << 13)
-#define RX_TYP_LEN (1 << 15)
-
-/* Transmit status bits */
-
-#define TX_NCOL_MASK 0x0f
-#define TX_FRTL (1 << 4)
-#define TX_MAX_COL (1 << 5)
-#define TX_HRT_BEAT (1 << 6)
-#define TX_DEFER (1 << 7)
-#define TX_UND_RUN (1 << 8)
-#define TX_LOST_CTS (1 << 9)
-#define TX_LOST_CRS (1 << 10)
-#define TX_LTCOL (1 << 11)
-#define TX_OK (1 << 13)
-#define TX_COLL (1 << 15)
-
-struct i82593_conf_block {
- u_char fifo_limit : 4,
- forgnesi : 1,
- fifo_32 : 1,
- d6mod : 1,
- throttle_enb : 1;
- u_char throttle : 6,
- cntrxint : 1,
- contin : 1;
- u_char addr_len : 3,
- acloc : 1,
- preamb_len : 2,
- loopback : 2;
- u_char lin_prio : 3,
- tbofstop : 1,
- exp_prio : 3,
- bof_met : 1;
- u_char : 4,
- ifrm_spc : 4;
- u_char : 5,
- slottim_low : 3;
- u_char slottim_hi : 3,
- : 1,
- max_retr : 4;
- u_char prmisc : 1,
- bc_dis : 1,
- : 1,
- crs_1 : 1,
- nocrc_ins : 1,
- crc_1632 : 1,
- : 1,
- crs_cdt : 1;
- u_char cs_filter : 3,
- crs_src : 1,
- cd_filter : 3,
- : 1;
- u_char : 2,
- min_fr_len : 6;
- u_char lng_typ : 1,
- lng_fld : 1,
- rxcrc_xf : 1,
- artx : 1,
- sarec : 1,
- tx_jabber : 1, /* why is this called max_len in the manual? */
- hash_1 : 1,
- lbpkpol : 1;
- u_char : 6,
- fdx : 1,
- : 1;
- u_char dummy_6 : 6, /* supposed to be ones */
- mult_ia : 1,
- dis_bof : 1;
- u_char dummy_1 : 1, /* supposed to be one */
- tx_ifs_retrig : 2,
- mc_all : 1,
- rcv_mon : 2,
- frag_acpt : 1,
- tstrttrs : 1;
- u_char fretx : 1,
- runt_eop : 1,
- hw_sw_pin : 1,
- big_endn : 1,
- syncrqs : 1,
- sttlen : 1,
- tx_eop : 1,
- rx_eop : 1;
- u_char rbuf_size : 5,
- rcvstop : 1,
- : 2;
-};
-
-#define I82593_MAX_MULTICAST_ADDRESSES 128 /* Hardware hashed filter */
-
-#endif /* _I82593_H */
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 6b2c7cf352a5..6f6929ea8a0c 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -60,6 +60,7 @@ struct macvlan_dev {
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
+ unsigned int macaddr_count;
};
static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index f47550d75f85..2c677afeea47 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -39,6 +39,7 @@ static inline struct igmpv3_query *
extern int sysctl_igmp_max_memberships;
extern int sysctl_igmp_max_msf;
+extern int sysctl_igmp_qrv;
struct ip_sf_socklist {
unsigned int sl_max;
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 4b79ffe7b188..fa76c79a52a1 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -84,10 +84,12 @@ static inline void iio_trigger_put(struct iio_trigger *trig)
put_device(&trig->dev);
}
-static inline void iio_trigger_get(struct iio_trigger *trig)
+static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
{
get_device(&trig->dev);
__module_get(trig->ops->owner);
+
+ return trig;
}
/**
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index d5b50a19463c..0dae71e9971c 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -159,7 +159,11 @@ typedef struct journal_header_s
* journal_block_tag (in the descriptor). The other h_chksum* fields are
* not used.
*
- * Checksum v1 and v2 are mutually exclusive features.
+ * If FEATURE_INCOMPAT_CSUM_V3 is set, the descriptor block uses
+ * journal_block_tag3_t to store a full 32-bit checksum. Everything else
+ * is the same as v2.
+ *
+ * Checksum v1, v2, and v3 are mutually exclusive features.
*/
struct commit_header {
__be32 h_magic;
@@ -179,6 +183,14 @@ struct commit_header {
* raw struct shouldn't be used for pointer math or sizeof() - use
* journal_tag_bytes(journal) instead to compute this.
*/
+typedef struct journal_block_tag3_s
+{
+ __be32 t_blocknr; /* The on-disk block number */
+ __be32 t_flags; /* See below */
+ __be32 t_blocknr_high; /* most-significant high 32bits. */
+ __be32 t_checksum; /* crc32c(uuid+seq+block) */
+} journal_block_tag3_t;
+
typedef struct journal_block_tag_s
{
__be32 t_blocknr; /* The on-disk block number */
@@ -187,9 +199,6 @@ typedef struct journal_block_tag_s
__be32 t_blocknr_high; /* most-significant high 32bits. */
} journal_block_tag_t;
-#define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
-#define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t))
-
/* Tail of descriptor block, for checksumming */
struct jbd2_journal_block_tail {
__be32 t_checksum; /* crc32c(uuid+descr_block) */
@@ -284,6 +293,7 @@ typedef struct journal_superblock_s
#define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002
#define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004
#define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008
+#define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010
/* Features known to this kernel version: */
#define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM
@@ -291,7 +301,8 @@ typedef struct journal_superblock_s
#define JBD2_KNOWN_INCOMPAT_FEATURES (JBD2_FEATURE_INCOMPAT_REVOKE | \
JBD2_FEATURE_INCOMPAT_64BIT | \
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
- JBD2_FEATURE_INCOMPAT_CSUM_V2)
+ JBD2_FEATURE_INCOMPAT_CSUM_V2 | \
+ JBD2_FEATURE_INCOMPAT_CSUM_V3)
#ifdef __KERNEL__
@@ -1296,6 +1307,15 @@ static inline int tid_geq(tid_t x, tid_t y)
extern int jbd2_journal_blocks_per_page(struct inode *inode);
extern size_t journal_tag_bytes(journal_t *journal);
+static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
+{
+ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) ||
+ JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
+ return 1;
+
+ return 0;
+}
+
/*
* We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
* transaction control blocks.
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 1f44466c1e9d..c367cbdf73ab 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -258,23 +258,11 @@ extern unsigned long preset_lpj;
#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
#endif
#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
-#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
TICK_NSEC -1) / (u64)TICK_NSEC))
#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
TICK_NSEC -1) / (u64)TICK_NSEC))
-#define USEC_CONVERSION \
- ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
- TICK_NSEC -1) / (u64)TICK_NSEC))
-/*
- * USEC_ROUND is used in the timeval to jiffie conversion. See there
- * for more details. It is the scaled resolution rounding value. Note
- * that it is a 64-bit value. Since, when it is applied, we are already
- * in jiffies (albit scaled), it is nothing but the bits we will shift
- * off.
- */
-#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
/*
* The maximum jiffie value is (MAX_INT >> 1). Here we translate that
* into seconds. The 64-bit case will overflow if we are not careful,
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 6a599dce7f9d..e43686472197 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rwsem.h>
+#include <linux/timer.h>
#include <linux/workqueue.h>
struct device;
@@ -68,7 +69,7 @@ struct led_classdev {
const char *default_trigger; /* Trigger to use */
unsigned long blink_delay_on, blink_delay_off;
- struct delayed_work blink_work;
+ struct timer_list blink_timer;
int blink_brightness;
struct work_struct set_brightness_work;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 071f6b234604..b2f8ab9a57c4 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -38,6 +38,7 @@
#include <linux/completion.h>
#include <linux/radix-tree.h>
#include <linux/cpu_rmap.h>
+#include <linux/crash_dump.h>
#include <linux/atomic.h>
@@ -184,19 +185,24 @@ enum {
MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9,
MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10,
MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11,
+ MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12,
+ MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13
};
enum {
MLX4_DEV_CAP_64B_EQE_ENABLED = 1LL << 0,
- MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1
+ MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1,
+ MLX4_DEV_CAP_CQE_STRIDE_ENABLED = 1LL << 2,
+ MLX4_DEV_CAP_EQE_STRIDE_ENABLED = 1LL << 3
};
enum {
- MLX4_USER_DEV_CAP_64B_CQE = 1L << 0
+ MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0
};
enum {
- MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0
+ MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0,
+ MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1
};
@@ -209,6 +215,7 @@ enum {
MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10,
MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11,
+ MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28,
};
enum mlx4_event {
@@ -700,6 +707,7 @@ struct mlx4_dev {
u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
struct mlx4_vf_dev *dev_vfs;
+ int nvfs[MLX4_MAX_PORTS + 1];
};
struct mlx4_eqe {
@@ -1196,6 +1204,9 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id);
int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+ int port, int qpn, u16 prio, u64 *reg_id);
+
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
int i, int val);
@@ -1275,7 +1286,7 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
/* Returns true if running in low memory profile (kdump kernel) */
static inline bool mlx4_low_memory_profile(void)
{
- return reset_devices;
+ return is_kdump_kernel();
}
#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 7040dc98ff8b..5f4e36cf0091 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -56,7 +56,8 @@ enum mlx4_qp_optpar {
MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13,
MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16,
- MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20
+ MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20,
+ MLX4_QP_OPTPAR_VLAN_STRIPPING = 1 << 21,
};
enum mlx4_qp_state {
@@ -423,13 +424,20 @@ struct mlx4_wqe_inline_seg {
enum mlx4_update_qp_attr {
MLX4_UPDATE_QP_SMAC = 1 << 0,
+ MLX4_UPDATE_QP_VSD = 1 << 2,
+ MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1
+};
+
+enum mlx4_update_qp_params_flags {
+ MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 0,
};
struct mlx4_update_qp_params {
u8 smac_index;
+ u32 flags;
};
-int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
enum mlx4_update_qp_attr attr,
struct mlx4_update_qp_params *params);
int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 334947151dfc..1d67fd32e71c 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -44,6 +44,50 @@
#error Host endianness not defined
#endif
+/* helper macros */
+#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
+#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
+#define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld)))
+#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
+#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
+#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
+#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
+#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
+#define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
+
+#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
+#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
+#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
+
+/* insert a value to a struct */
+#define MLX5_SET(typ, p, fld, v) do { \
+ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
+ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
+ (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
+ << __mlx5_dw_bit_off(typ, fld))); \
+} while (0)
+
+#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
+__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
+__mlx5_mask(typ, fld))
+
+#define MLX5_GET_PR(typ, p, fld) ({ \
+ u32 ___t = MLX5_GET(typ, p, fld); \
+ pr_debug(#fld " = 0x%x\n", ___t); \
+ ___t; \
+})
+
+#define MLX5_SET64(typ, p, fld, v) do { \
+ BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
+ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
+ *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
+} while (0)
+
+#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
+
enum {
MLX5_MAX_COMMANDS = 32,
MLX5_CMD_DATA_BLOCK_SIZE = 512,
@@ -71,6 +115,11 @@ enum {
};
enum {
+ MLX5_MIN_PKEY_TABLE_SIZE = 128,
+ MLX5_MAX_LOG_PKEY_TABLE = 5,
+};
+
+enum {
MLX5_PERM_LOCAL_READ = 1 << 2,
MLX5_PERM_LOCAL_WRITE = 1 << 3,
MLX5_PERM_REMOTE_READ = 1 << 4,
@@ -184,10 +233,10 @@ enum {
MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32,
+ MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38,
MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39,
MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
- MLX5_DEV_CAP_FLAG_DCT = 1LL << 41,
MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
};
@@ -243,10 +292,14 @@ enum {
};
enum {
- MLX5_CAP_OFF_DCT = 41,
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
+enum {
+ HCA_CAP_OPMOD_GET_MAX = 0,
+ HCA_CAP_OPMOD_GET_CUR = 1,
+};
+
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
@@ -274,101 +327,6 @@ struct mlx5_cmd_query_adapter_mbox_out {
u8 vsd_psid[16];
};
-struct mlx5_hca_cap {
- u8 rsvd1[16];
- u8 log_max_srq_sz;
- u8 log_max_qp_sz;
- u8 rsvd2;
- u8 log_max_qp;
- u8 log_max_strq_sz;
- u8 log_max_srqs;
- u8 rsvd4[2];
- u8 rsvd5;
- u8 log_max_cq_sz;
- u8 rsvd6;
- u8 log_max_cq;
- u8 log_max_eq_sz;
- u8 log_max_mkey;
- u8 rsvd7;
- u8 log_max_eq;
- u8 max_indirection;
- u8 log_max_mrw_sz;
- u8 log_max_bsf_list_sz;
- u8 log_max_klm_list_sz;
- u8 rsvd_8_0;
- u8 log_max_ra_req_dc;
- u8 rsvd_8_1;
- u8 log_max_ra_res_dc;
- u8 rsvd9;
- u8 log_max_ra_req_qp;
- u8 rsvd10;
- u8 log_max_ra_res_qp;
- u8 rsvd11[4];
- __be16 max_qp_count;
- __be16 rsvd12;
- u8 rsvd13;
- u8 local_ca_ack_delay;
- u8 rsvd14;
- u8 num_ports;
- u8 log_max_msg;
- u8 rsvd15[3];
- __be16 stat_rate_support;
- u8 rsvd16[2];
- __be64 flags;
- u8 rsvd17;
- u8 uar_sz;
- u8 rsvd18;
- u8 log_pg_sz;
- __be16 bf_log_bf_reg_size;
- u8 rsvd19[4];
- __be16 max_desc_sz_sq;
- u8 rsvd20[2];
- __be16 max_desc_sz_rq;
- u8 rsvd21[2];
- __be16 max_desc_sz_sq_dc;
- __be32 max_qp_mcg;
- u8 rsvd22[3];
- u8 log_max_mcg;
- u8 rsvd23;
- u8 log_max_pd;
- u8 rsvd24;
- u8 log_max_xrcd;
- u8 rsvd25[42];
- __be16 log_uar_page_sz;
- u8 rsvd26[28];
- u8 log_max_atomic_size_qp;
- u8 rsvd27[2];
- u8 log_max_atomic_size_dc;
- u8 rsvd28[76];
-};
-
-
-struct mlx5_cmd_query_hca_cap_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-
-struct mlx5_cmd_query_hca_cap_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
- struct mlx5_hca_cap hca_cap;
-};
-
-
-struct mlx5_cmd_set_hca_cap_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
- struct mlx5_hca_cap hca_cap;
-};
-
-
-struct mlx5_cmd_set_hca_cap_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
-};
-
-
struct mlx5_cmd_init_hca_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd0[2];
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index b88e9b46d957..246310dc8bef 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -44,6 +44,7 @@
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
+#include <linux/mlx5/mlx5_ifc.h>
enum {
MLX5_BOARD_ID_LEN = 64,
@@ -99,81 +100,6 @@ enum {
};
enum {
- MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
- MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
- MLX5_CMD_OP_INIT_HCA = 0x102,
- MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
- MLX5_CMD_OP_ENABLE_HCA = 0x104,
- MLX5_CMD_OP_DISABLE_HCA = 0x105,
- MLX5_CMD_OP_QUERY_PAGES = 0x107,
- MLX5_CMD_OP_MANAGE_PAGES = 0x108,
- MLX5_CMD_OP_SET_HCA_CAP = 0x109,
-
- MLX5_CMD_OP_CREATE_MKEY = 0x200,
- MLX5_CMD_OP_QUERY_MKEY = 0x201,
- MLX5_CMD_OP_DESTROY_MKEY = 0x202,
- MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
-
- MLX5_CMD_OP_CREATE_EQ = 0x301,
- MLX5_CMD_OP_DESTROY_EQ = 0x302,
- MLX5_CMD_OP_QUERY_EQ = 0x303,
-
- MLX5_CMD_OP_CREATE_CQ = 0x400,
- MLX5_CMD_OP_DESTROY_CQ = 0x401,
- MLX5_CMD_OP_QUERY_CQ = 0x402,
- MLX5_CMD_OP_MODIFY_CQ = 0x403,
-
- MLX5_CMD_OP_CREATE_QP = 0x500,
- MLX5_CMD_OP_DESTROY_QP = 0x501,
- MLX5_CMD_OP_RST2INIT_QP = 0x502,
- MLX5_CMD_OP_INIT2RTR_QP = 0x503,
- MLX5_CMD_OP_RTR2RTS_QP = 0x504,
- MLX5_CMD_OP_RTS2RTS_QP = 0x505,
- MLX5_CMD_OP_SQERR2RTS_QP = 0x506,
- MLX5_CMD_OP_2ERR_QP = 0x507,
- MLX5_CMD_OP_RTS2SQD_QP = 0x508,
- MLX5_CMD_OP_SQD2RTS_QP = 0x509,
- MLX5_CMD_OP_2RST_QP = 0x50a,
- MLX5_CMD_OP_QUERY_QP = 0x50b,
- MLX5_CMD_OP_CONF_SQP = 0x50c,
- MLX5_CMD_OP_MAD_IFC = 0x50d,
- MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
- MLX5_CMD_OP_SUSPEND_QP = 0x50f,
- MLX5_CMD_OP_UNSUSPEND_QP = 0x510,
- MLX5_CMD_OP_SQD2SQD_QP = 0x511,
- MLX5_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512,
- MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513,
- MLX5_CMD_OP_QUERY_QP_COUNTER_SET = 0x514,
-
- MLX5_CMD_OP_CREATE_PSV = 0x600,
- MLX5_CMD_OP_DESTROY_PSV = 0x601,
- MLX5_CMD_OP_QUERY_PSV = 0x602,
- MLX5_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603,
- MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604,
-
- MLX5_CMD_OP_CREATE_SRQ = 0x700,
- MLX5_CMD_OP_DESTROY_SRQ = 0x701,
- MLX5_CMD_OP_QUERY_SRQ = 0x702,
- MLX5_CMD_OP_ARM_RQ = 0x703,
- MLX5_CMD_OP_RESIZE_SRQ = 0x704,
-
- MLX5_CMD_OP_ALLOC_PD = 0x800,
- MLX5_CMD_OP_DEALLOC_PD = 0x801,
- MLX5_CMD_OP_ALLOC_UAR = 0x802,
- MLX5_CMD_OP_DEALLOC_UAR = 0x803,
-
- MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
- MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
-
-
- MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
- MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
-
- MLX5_CMD_OP_ACCESS_REG = 0x805,
- MLX5_CMD_OP_MAX = 0x810,
-};
-
-enum {
MLX5_REG_PCAP = 0x5001,
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
@@ -335,23 +261,30 @@ struct mlx5_port_caps {
int pkey_table_len;
};
-struct mlx5_caps {
+struct mlx5_general_caps {
u8 log_max_eq;
u8 log_max_cq;
u8 log_max_qp;
u8 log_max_mkey;
u8 log_max_pd;
u8 log_max_srq;
+ u8 log_max_strq;
+ u8 log_max_mrw_sz;
+ u8 log_max_bsf_list_size;
+ u8 log_max_klm_list_size;
u32 max_cqes;
int max_wqes;
+ u32 max_eqes;
+ u32 max_indirection;
int max_sq_desc_sz;
int max_rq_desc_sz;
+ int max_dc_sq_desc_sz;
u64 flags;
u16 stat_rate_support;
int log_max_msg;
int num_ports;
- int max_ra_res_qp;
- int max_ra_req_qp;
+ u8 log_max_ra_res_qp;
+ u8 log_max_ra_req_qp;
int max_srq_wqes;
int bf_reg_size;
int bf_regs_per_page;
@@ -363,6 +296,19 @@ struct mlx5_caps {
u8 log_max_mcg;
u32 max_qp_mcg;
int min_page_sz;
+ int pd_cap;
+ u32 max_qp_counters;
+ u32 pkey_table_size;
+ u8 log_max_ra_req_dc;
+ u8 log_max_ra_res_dc;
+ u32 uar_sz;
+ u8 min_log_pg_sz;
+ u8 log_max_xrcd;
+ u16 log_uar_page_sz;
+};
+
+struct mlx5_caps {
+ struct mlx5_general_caps gen;
};
struct mlx5_cmd_mailbox {
@@ -429,6 +375,16 @@ struct mlx5_core_mr {
u32 pd;
};
+enum mlx5_res_type {
+ MLX5_RES_QP,
+};
+
+struct mlx5_core_rsc_common {
+ enum mlx5_res_type res;
+ atomic_t refcount;
+ struct completion free;
+};
+
struct mlx5_core_srq {
u32 srqn;
int max;
@@ -695,6 +651,9 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
+int mlx5_cmd_status_to_err_v2(void *ptr);
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
+ u16 opmod);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@@ -751,7 +710,7 @@ int mlx5_eq_init(struct mlx5_core_dev *dev);
void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
-void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type);
+void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
@@ -788,6 +747,7 @@ void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
+void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
static inline u32 mlx5_mkey_to_idx(u32 mkey)
{
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
new file mode 100644
index 000000000000..5f48b8f592c5
--- /dev/null
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -0,0 +1,349 @@
+/*
+ * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_IFC_H
+#define MLX5_IFC_H
+
+enum {
+ MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
+ MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
+ MLX5_CMD_OP_INIT_HCA = 0x102,
+ MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
+ MLX5_CMD_OP_ENABLE_HCA = 0x104,
+ MLX5_CMD_OP_DISABLE_HCA = 0x105,
+ MLX5_CMD_OP_QUERY_PAGES = 0x107,
+ MLX5_CMD_OP_MANAGE_PAGES = 0x108,
+ MLX5_CMD_OP_SET_HCA_CAP = 0x109,
+ MLX5_CMD_OP_CREATE_MKEY = 0x200,
+ MLX5_CMD_OP_QUERY_MKEY = 0x201,
+ MLX5_CMD_OP_DESTROY_MKEY = 0x202,
+ MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
+ MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204,
+ MLX5_CMD_OP_CREATE_EQ = 0x301,
+ MLX5_CMD_OP_DESTROY_EQ = 0x302,
+ MLX5_CMD_OP_QUERY_EQ = 0x303,
+ MLX5_CMD_OP_GEN_EQE = 0x304,
+ MLX5_CMD_OP_CREATE_CQ = 0x400,
+ MLX5_CMD_OP_DESTROY_CQ = 0x401,
+ MLX5_CMD_OP_QUERY_CQ = 0x402,
+ MLX5_CMD_OP_MODIFY_CQ = 0x403,
+ MLX5_CMD_OP_CREATE_QP = 0x500,
+ MLX5_CMD_OP_DESTROY_QP = 0x501,
+ MLX5_CMD_OP_RST2INIT_QP = 0x502,
+ MLX5_CMD_OP_INIT2RTR_QP = 0x503,
+ MLX5_CMD_OP_RTR2RTS_QP = 0x504,
+ MLX5_CMD_OP_RTS2RTS_QP = 0x505,
+ MLX5_CMD_OP_SQERR2RTS_QP = 0x506,
+ MLX5_CMD_OP_2ERR_QP = 0x507,
+ MLX5_CMD_OP_2RST_QP = 0x50a,
+ MLX5_CMD_OP_QUERY_QP = 0x50b,
+ MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
+ MLX5_CMD_OP_CREATE_PSV = 0x600,
+ MLX5_CMD_OP_DESTROY_PSV = 0x601,
+ MLX5_CMD_OP_CREATE_SRQ = 0x700,
+ MLX5_CMD_OP_DESTROY_SRQ = 0x701,
+ MLX5_CMD_OP_QUERY_SRQ = 0x702,
+ MLX5_CMD_OP_ARM_RQ = 0x703,
+ MLX5_CMD_OP_RESIZE_SRQ = 0x704,
+ MLX5_CMD_OP_CREATE_DCT = 0x710,
+ MLX5_CMD_OP_DESTROY_DCT = 0x711,
+ MLX5_CMD_OP_DRAIN_DCT = 0x712,
+ MLX5_CMD_OP_QUERY_DCT = 0x713,
+ MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714,
+ MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
+ MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
+ MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
+ MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755,
+ MLX5_CMD_OP_QUERY_RCOE_ADDRESS = 0x760,
+ MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761,
+ MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
+ MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
+ MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
+ MLX5_CMD_OP_ALLOC_PD = 0x800,
+ MLX5_CMD_OP_DEALLOC_PD = 0x801,
+ MLX5_CMD_OP_ALLOC_UAR = 0x802,
+ MLX5_CMD_OP_DEALLOC_UAR = 0x803,
+ MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
+ MLX5_CMD_OP_ACCESS_REG = 0x805,
+ MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
+ MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
+ MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
+ MLX5_CMD_OP_MAD_IFC = 0x50d,
+ MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
+ MLX5_CMD_OP_SET_MAD_DEMUX = 0x80c,
+ MLX5_CMD_OP_NOP = 0x80d,
+ MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
+ MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
+ MLX5_CMD_OP_SET_BURST_SIZE = 0x812,
+ MLX5_CMD_OP_QUERY_BURST_SZIE = 0x813,
+ MLX5_CMD_OP_ACTIVATE_TRACER = 0x814,
+ MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815,
+ MLX5_CMD_OP_CREATE_SNIFFER_RULE = 0x820,
+ MLX5_CMD_OP_DESTROY_SNIFFER_RULE = 0x821,
+ MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x822,
+ MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x823,
+ MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x824,
+ MLX5_CMD_OP_CREATE_TIR = 0x900,
+ MLX5_CMD_OP_MODIFY_TIR = 0x901,
+ MLX5_CMD_OP_DESTROY_TIR = 0x902,
+ MLX5_CMD_OP_QUERY_TIR = 0x903,
+ MLX5_CMD_OP_CREATE_TIS = 0x912,
+ MLX5_CMD_OP_MODIFY_TIS = 0x913,
+ MLX5_CMD_OP_DESTROY_TIS = 0x914,
+ MLX5_CMD_OP_QUERY_TIS = 0x915,
+ MLX5_CMD_OP_CREATE_SQ = 0x904,
+ MLX5_CMD_OP_MODIFY_SQ = 0x905,
+ MLX5_CMD_OP_DESTROY_SQ = 0x906,
+ MLX5_CMD_OP_QUERY_SQ = 0x907,
+ MLX5_CMD_OP_CREATE_RQ = 0x908,
+ MLX5_CMD_OP_MODIFY_RQ = 0x909,
+ MLX5_CMD_OP_DESTROY_RQ = 0x90a,
+ MLX5_CMD_OP_QUERY_RQ = 0x90b,
+ MLX5_CMD_OP_CREATE_RMP = 0x90c,
+ MLX5_CMD_OP_MODIFY_RMP = 0x90d,
+ MLX5_CMD_OP_DESTROY_RMP = 0x90e,
+ MLX5_CMD_OP_QUERY_RMP = 0x90f,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x910,
+ MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x911,
+ MLX5_CMD_OP_MAX = 0x911
+};
+
+struct mlx5_ifc_cmd_hca_cap_bits {
+ u8 reserved_0[0x80];
+
+ u8 log_max_srq_sz[0x8];
+ u8 log_max_qp_sz[0x8];
+ u8 reserved_1[0xb];
+ u8 log_max_qp[0x5];
+
+ u8 log_max_strq_sz[0x8];
+ u8 reserved_2[0x3];
+ u8 log_max_srqs[0x5];
+ u8 reserved_3[0x10];
+
+ u8 reserved_4[0x8];
+ u8 log_max_cq_sz[0x8];
+ u8 reserved_5[0xb];
+ u8 log_max_cq[0x5];
+
+ u8 log_max_eq_sz[0x8];
+ u8 reserved_6[0x2];
+ u8 log_max_mkey[0x6];
+ u8 reserved_7[0xc];
+ u8 log_max_eq[0x4];
+
+ u8 max_indirection[0x8];
+ u8 reserved_8[0x1];
+ u8 log_max_mrw_sz[0x7];
+ u8 reserved_9[0x2];
+ u8 log_max_bsf_list_size[0x6];
+ u8 reserved_10[0x2];
+ u8 log_max_klm_list_size[0x6];
+
+ u8 reserved_11[0xa];
+ u8 log_max_ra_req_dc[0x6];
+ u8 reserved_12[0xa];
+ u8 log_max_ra_res_dc[0x6];
+
+ u8 reserved_13[0xa];
+ u8 log_max_ra_req_qp[0x6];
+ u8 reserved_14[0xa];
+ u8 log_max_ra_res_qp[0x6];
+
+ u8 pad_cap[0x1];
+ u8 cc_query_allowed[0x1];
+ u8 cc_modify_allowed[0x1];
+ u8 reserved_15[0x1d];
+
+ u8 reserved_16[0x6];
+ u8 max_qp_cnt[0xa];
+ u8 pkey_table_size[0x10];
+
+ u8 eswitch_owner[0x1];
+ u8 reserved_17[0xa];
+ u8 local_ca_ack_delay[0x5];
+ u8 reserved_18[0x8];
+ u8 num_ports[0x8];
+
+ u8 reserved_19[0x3];
+ u8 log_max_msg[0x5];
+ u8 reserved_20[0x18];
+
+ u8 stat_rate_support[0x10];
+ u8 reserved_21[0x10];
+
+ u8 reserved_22[0x10];
+ u8 cmdif_checksum[0x2];
+ u8 sigerr_cqe[0x1];
+ u8 reserved_23[0x1];
+ u8 wq_signature[0x1];
+ u8 sctr_data_cqe[0x1];
+ u8 reserved_24[0x1];
+ u8 sho[0x1];
+ u8 tph[0x1];
+ u8 rf[0x1];
+ u8 dc[0x1];
+ u8 reserved_25[0x2];
+ u8 roce[0x1];
+ u8 atomic[0x1];
+ u8 rsz_srq[0x1];
+
+ u8 cq_oi[0x1];
+ u8 cq_resize[0x1];
+ u8 cq_moderation[0x1];
+ u8 sniffer_rule_flow[0x1];
+ u8 sniffer_rule_vport[0x1];
+ u8 sniffer_rule_phy[0x1];
+ u8 reserved_26[0x1];
+ u8 pg[0x1];
+ u8 block_lb_mc[0x1];
+ u8 reserved_27[0x3];
+ u8 cd[0x1];
+ u8 reserved_28[0x1];
+ u8 apm[0x1];
+ u8 reserved_29[0x7];
+ u8 qkv[0x1];
+ u8 pkv[0x1];
+ u8 reserved_30[0x4];
+ u8 xrc[0x1];
+ u8 ud[0x1];
+ u8 uc[0x1];
+ u8 rc[0x1];
+
+ u8 reserved_31[0xa];
+ u8 uar_sz[0x6];
+ u8 reserved_32[0x8];
+ u8 log_pg_sz[0x8];
+
+ u8 bf[0x1];
+ u8 reserved_33[0xa];
+ u8 log_bf_reg_size[0x5];
+ u8 reserved_34[0x10];
+
+ u8 reserved_35[0x10];
+ u8 max_wqe_sz_sq[0x10];
+
+ u8 reserved_36[0x10];
+ u8 max_wqe_sz_rq[0x10];
+
+ u8 reserved_37[0x10];
+ u8 max_wqe_sz_sq_dc[0x10];
+
+ u8 reserved_38[0x7];
+ u8 max_qp_mcg[0x19];
+
+ u8 reserved_39[0x18];
+ u8 log_max_mcg[0x8];
+
+ u8 reserved_40[0xb];
+ u8 log_max_pd[0x5];
+ u8 reserved_41[0xb];
+ u8 log_max_xrcd[0x5];
+
+ u8 reserved_42[0x20];
+
+ u8 reserved_43[0x3];
+ u8 log_max_rq[0x5];
+ u8 reserved_44[0x3];
+ u8 log_max_sq[0x5];
+ u8 reserved_45[0x3];
+ u8 log_max_tir[0x5];
+ u8 reserved_46[0x3];
+ u8 log_max_tis[0x5];
+
+ u8 reserved_47[0x13];
+ u8 log_max_rq_per_tir[0x5];
+ u8 reserved_48[0x3];
+ u8 log_max_tis_per_sq[0x5];
+
+ u8 reserved_49[0xe0];
+
+ u8 reserved_50[0x10];
+ u8 log_uar_page_sz[0x10];
+
+ u8 reserved_51[0x100];
+
+ u8 reserved_52[0x1f];
+ u8 cqe_zip[0x1];
+
+ u8 cqe_zip_timeout[0x10];
+ u8 cqe_zip_max_num[0x10];
+
+ u8 reserved_53[0x220];
+};
+
+struct mlx5_ifc_set_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct;
+};
+
+struct mlx5_ifc_query_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 capability_struct[256][0x8];
+};
+
+struct mlx5_ifc_set_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 9709b30e2d69..7c4c0f1f5805 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -342,10 +342,9 @@ struct mlx5_stride_block_ctrl_seg {
};
struct mlx5_core_qp {
+ struct mlx5_core_rsc_common common; /* must be first */
void (*event) (struct mlx5_core_qp *, int);
int qpn;
- atomic_t refcount;
- struct completion free;
struct mlx5_rsc_debug *dbg;
int pid;
};
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 3083c53e0270..c300db3ae285 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -949,7 +949,7 @@ static inline int jedec_feature(struct nand_chip *chip)
: 0;
}
-/**
+/*
* struct nand_sdr_timings - SDR NAND chip timings
*
* This struct defines the timing requirements of a SDR NAND chip.
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 38377392d082..22d54b9b700d 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -543,7 +543,7 @@ struct netdev_queue {
* read mostly part
*/
struct net_device *dev;
- struct Qdisc *qdisc;
+ struct Qdisc __rcu *qdisc;
struct Qdisc *qdisc_sleeping;
#ifdef CONFIG_SYSFS
struct kobject kobj;
@@ -1747,6 +1747,12 @@ struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
return &dev->_tx[index];
}
+static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
+ const struct sk_buff *skb)
+{
+ return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+}
+
static inline void netdev_for_each_tx_queue(struct net_device *dev,
void (*f)(struct net_device *,
struct netdev_queue *,
@@ -1781,24 +1787,13 @@ void dev_net_set(struct net_device *dev, struct net *net)
#endif
}
-static inline bool netdev_uses_dsa_tags(struct net_device *dev)
-{
-#ifdef CONFIG_NET_DSA_TAG_DSA
- if (dev->dsa_ptr != NULL)
- return dsa_uses_dsa_tags(dev->dsa_ptr);
-#endif
-
- return 0;
-}
-
-static inline bool netdev_uses_trailer_tags(struct net_device *dev)
+static inline bool netdev_uses_dsa(struct net_device *dev)
{
-#ifdef CONFIG_NET_DSA_TAG_TRAILER
+#if IS_ENABLED(CONFIG_NET_DSA)
if (dev->dsa_ptr != NULL)
- return dsa_uses_trailer_tags(dev->dsa_ptr);
+ return dsa_uses_tagged_protocol(dev->dsa_ptr);
#endif
-
- return 0;
+ return false;
}
/**
@@ -1879,11 +1874,20 @@ struct napi_gro_cb {
/* jiffies when first packet was created/queued */
unsigned long age;
- /* Used in ipv6_gro_receive() */
+ /* Used in ipv6_gro_receive() and foo-over-udp */
u16 proto;
/* Used in udp_gro_receive */
- u16 udp_mark;
+ u8 udp_mark:1;
+
+ /* GRO checksum is valid */
+ u8 csum_valid:1;
+
+ /* Number of checksums via CHECKSUM_UNNECESSARY */
+ u8 csum_cnt:3;
+
+ /* Used in foo-over-udp, set in udp[46]_gro_receive */
+ u8 is_ipv6:1;
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
@@ -1910,7 +1914,6 @@ struct packet_type {
struct offload_callbacks {
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
netdev_features_t features);
- int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb, int nhoff);
@@ -1924,6 +1927,7 @@ struct packet_offload {
struct udp_offload {
__be16 port;
+ u8 ipproto;
struct offload_callbacks callbacks;
};
@@ -1982,6 +1986,7 @@ struct pcpu_sw_netstats {
#define NETDEV_CHANGEUPPER 0x0015
#define NETDEV_RESEND_IGMP 0x0016
#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
+#define NETDEV_CHANGEINFODATA 0x0018
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2074,8 +2079,8 @@ void __dev_remove_pack(struct packet_type *pt);
void dev_add_offload(struct packet_offload *po);
void dev_remove_offload(struct packet_offload *po);
-struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
- unsigned short mask);
+struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
+ unsigned short mask);
struct net_device *dev_get_by_name(struct net *net, const char *name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
@@ -2153,11 +2158,97 @@ static inline void *skb_gro_network_header(struct sk_buff *skb)
static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
- if (skb->ip_summed == CHECKSUM_COMPLETE)
+ if (NAPI_GRO_CB(skb)->csum_valid)
NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
csum_partial(start, len, 0));
}
+/* GRO checksum functions. These are logical equivalents of the normal
+ * checksum functions (in skbuff.h) except that they operate on the GRO
+ * offsets and fields in sk_buff.
+ */
+
+__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
+
+static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
+ bool zero_okay,
+ __sum16 check)
+{
+ return (skb->ip_summed != CHECKSUM_PARTIAL &&
+ NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ (!zero_okay || check));
+}
+
+static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
+ __wsum psum)
+{
+ if (NAPI_GRO_CB(skb)->csum_valid &&
+ !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
+ return 0;
+
+ NAPI_GRO_CB(skb)->csum = psum;
+
+ return __skb_gro_checksum_complete(skb);
+}
+
+static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
+{
+ if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
+ /* Consume a checksum from CHECKSUM_UNNECESSARY */
+ NAPI_GRO_CB(skb)->csum_cnt--;
+ } else {
+ /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
+ * verified a new top level checksum or an encapsulated one
+ * during GRO. This saves work if we fallback to normal path.
+ */
+ __skb_incr_checksum_unnecessary(skb);
+ }
+}
+
+#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
+ compute_pseudo) \
+({ \
+ __sum16 __ret = 0; \
+ if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
+ __ret = __skb_gro_checksum_validate_complete(skb, \
+ compute_pseudo(skb, proto)); \
+ if (__ret) \
+ __skb_mark_checksum_bad(skb); \
+ else \
+ skb_gro_incr_csum_unnecessary(skb); \
+ __ret; \
+})
+
+#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
+ __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
+
+#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
+ compute_pseudo) \
+ __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
+
+#define skb_gro_checksum_simple_validate(skb) \
+ __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
+
+static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ !NAPI_GRO_CB(skb)->csum_valid);
+}
+
+static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
+ __sum16 check, __wsum pseudo)
+{
+ NAPI_GRO_CB(skb)->csum = ~pseudo;
+ NAPI_GRO_CB(skb)->csum_valid = 1;
+}
+
+#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
+do { \
+ if (__skb_gro_checksum_convert_check(skb)) \
+ __skb_gro_checksum_convert(skb, check, \
+ compute_pseudo(skb, proto)); \
+} while (0)
+
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
@@ -2261,12 +2352,7 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
void __netif_schedule(struct Qdisc *q);
-
-static inline void netif_schedule_queue(struct netdev_queue *txq)
-{
- if (!(txq->state & QUEUE_STATE_ANY_XOFF))
- __netif_schedule(txq->qdisc);
-}
+void netif_schedule_queue(struct netdev_queue *txq);
static inline void netif_tx_schedule_all(struct net_device *dev)
{
@@ -2302,11 +2388,7 @@ static inline void netif_tx_start_all_queues(struct net_device *dev)
}
}
-static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
-{
- if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
- __netif_schedule(dev_queue->qdisc);
-}
+void netif_tx_wake_queue(struct netdev_queue *dev_queue);
/**
* netif_wake_queue - restart transmit
@@ -2578,19 +2660,7 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev,
return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
}
-/**
- * netif_wake_subqueue - allow sending packets on subqueue
- * @dev: network device
- * @queue_index: sub queue index
- *
- * Resume individual transmit queue of a device with multiple transmit queues.
- */
-static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
-{
- struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
- if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
- __netif_schedule(txq->qdisc);
-}
+void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
#ifdef CONFIG_XPS
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
@@ -2754,8 +2824,9 @@ int dev_set_mac_address(struct net_device *, struct sockaddr *);
int dev_change_carrier(struct net_device *, bool new_carrier);
int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_port_id *ppid);
-int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
- struct netdev_queue *txq);
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
+struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ struct netdev_queue *txq, int *ret);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
@@ -3176,7 +3247,7 @@ static inline int __dev_uc_sync(struct net_device *dev,
}
/**
- * __dev_uc_unsync - Remove synchonized addresses from device
+ * __dev_uc_unsync - Remove synchronized addresses from device
* @dev: device to sync
* @unsync: function to call if address should be removed
*
@@ -3220,7 +3291,7 @@ static inline int __dev_mc_sync(struct net_device *dev,
}
/**
- * __dev_mc_unsync - Remove synchonized addresses from device
+ * __dev_mc_unsync - Remove synchronized addresses from device
* @dev: device to sync
* @unsync: function to call if address should be removed
*
@@ -3357,6 +3428,27 @@ int __init dev_proc_init(void);
#define dev_proc_init() 0
#endif
+static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
+ struct sk_buff *skb, struct net_device *dev,
+ bool more)
+{
+ skb->xmit_more = more ? 1 : 0;
+ return ops->ndo_start_xmit(skb, dev);
+}
+
+static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ struct netdev_queue *txq, bool more)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ int rc;
+
+ rc = __netdev_start_xmit(ops, skb, dev, more);
+ if (rc == NETDEV_TX_OK)
+ txq_trans_update(txq);
+
+ return rc;
+}
+
int netdev_class_create_file_ns(struct class_attribute *class_attr,
const void *ns);
void netdev_class_remove_file_ns(struct class_attribute *class_attr,
@@ -3523,22 +3615,22 @@ static inline const char *netdev_reg_state(const struct net_device *dev)
}
__printf(3, 4)
-int netdev_printk(const char *level, const struct net_device *dev,
- const char *format, ...);
+void netdev_printk(const char *level, const struct net_device *dev,
+ const char *format, ...);
__printf(2, 3)
-int netdev_emerg(const struct net_device *dev, const char *format, ...);
+void netdev_emerg(const struct net_device *dev, const char *format, ...);
__printf(2, 3)
-int netdev_alert(const struct net_device *dev, const char *format, ...);
+void netdev_alert(const struct net_device *dev, const char *format, ...);
__printf(2, 3)
-int netdev_crit(const struct net_device *dev, const char *format, ...);
+void netdev_crit(const struct net_device *dev, const char *format, ...);
__printf(2, 3)
-int netdev_err(const struct net_device *dev, const char *format, ...);
+void netdev_err(const struct net_device *dev, const char *format, ...);
__printf(2, 3)
-int netdev_warn(const struct net_device *dev, const char *format, ...);
+void netdev_warn(const struct net_device *dev, const char *format, ...);
__printf(2, 3)
-int netdev_notice(const struct net_device *dev, const char *format, ...);
+void netdev_notice(const struct net_device *dev, const char *format, ...);
__printf(2, 3)
-int netdev_info(const struct net_device *dev, const char *format, ...);
+void netdev_info(const struct net_device *dev, const char *format, ...);
#define MODULE_ALIAS_NETDEV(device) \
MODULE_ALIAS("netdev-" device)
@@ -3556,7 +3648,6 @@ do { \
({ \
if (0) \
netdev_printk(KERN_DEBUG, __dev, format, ##args); \
- 0; \
})
#endif
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 2077489f9887..2517ece98820 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -9,6 +9,7 @@
#include <linux/in6.h>
#include <linux/wait.h>
#include <linux/list.h>
+#include <linux/static_key.h>
#include <uapi/linux/netfilter.h>
#ifdef CONFIG_NETFILTER
static inline int NF_DROP_GETERR(int verdict)
@@ -99,9 +100,9 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-#if defined(CONFIG_JUMP_LABEL)
-#include <linux/static_key.h>
+#ifdef HAVE_JUMP_LABEL
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+
static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
{
if (__builtin_constant_p(pf) &&
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 96afc29184be..f1606fa6132d 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -57,6 +57,8 @@ enum ip_set_extension {
IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER),
IPSET_EXT_BIT_COMMENT = 2,
IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT),
+ IPSET_EXT_BIT_SKBINFO = 3,
+ IPSET_EXT_SKBINFO = (1 << IPSET_EXT_BIT_SKBINFO),
/* Mark set with an extension which needs to call destroy */
IPSET_EXT_BIT_DESTROY = 7,
IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY),
@@ -65,12 +67,14 @@ enum ip_set_extension {
#define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT)
#define SET_WITH_COUNTER(s) ((s)->extensions & IPSET_EXT_COUNTER)
#define SET_WITH_COMMENT(s) ((s)->extensions & IPSET_EXT_COMMENT)
+#define SET_WITH_SKBINFO(s) ((s)->extensions & IPSET_EXT_SKBINFO)
#define SET_WITH_FORCEADD(s) ((s)->flags & IPSET_CREATE_FLAG_FORCEADD)
/* Extension id, in size order */
enum ip_set_ext_id {
IPSET_EXT_ID_COUNTER = 0,
IPSET_EXT_ID_TIMEOUT,
+ IPSET_EXT_ID_SKBINFO,
IPSET_EXT_ID_COMMENT,
IPSET_EXT_ID_MAX,
};
@@ -92,6 +96,10 @@ struct ip_set_ext {
u64 packets;
u64 bytes;
u32 timeout;
+ u32 skbmark;
+ u32 skbmarkmask;
+ u32 skbprio;
+ u16 skbqueue;
char *comment;
};
@@ -104,6 +112,13 @@ struct ip_set_comment {
char *str;
};
+struct ip_set_skbinfo {
+ u32 skbmark;
+ u32 skbmarkmask;
+ u32 skbprio;
+ u16 skbqueue;
+};
+
struct ip_set;
#define ext_timeout(e, s) \
@@ -112,7 +127,8 @@ struct ip_set;
(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])
#define ext_comment(e, s) \
(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])
-
+#define ext_skbinfo(e, s) \
+(struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO])
typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
const struct ip_set_ext *ext,
@@ -256,6 +272,8 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
if (SET_WITH_COMMENT(set))
cadt_flags |= IPSET_FLAG_WITH_COMMENT;
+ if (SET_WITH_SKBINFO(set))
+ cadt_flags |= IPSET_FLAG_WITH_SKBINFO;
if (SET_WITH_FORCEADD(set))
cadt_flags |= IPSET_FLAG_WITH_FORCEADD;
@@ -304,6 +322,43 @@ ip_set_update_counter(struct ip_set_counter *counter,
}
}
+static inline void
+ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
+ const struct ip_set_ext *ext,
+ struct ip_set_ext *mext, u32 flags)
+{
+ mext->skbmark = skbinfo->skbmark;
+ mext->skbmarkmask = skbinfo->skbmarkmask;
+ mext->skbprio = skbinfo->skbprio;
+ mext->skbqueue = skbinfo->skbqueue;
+}
+static inline bool
+ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
+{
+ /* Send nonzero parameters only */
+ return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
+ nla_put_net64(skb, IPSET_ATTR_SKBMARK,
+ cpu_to_be64((u64)skbinfo->skbmark << 32 |
+ skbinfo->skbmarkmask))) ||
+ (skbinfo->skbprio &&
+ nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
+ cpu_to_be32(skbinfo->skbprio))) ||
+ (skbinfo->skbqueue &&
+ nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
+ cpu_to_be16(skbinfo->skbqueue)));
+
+}
+
+static inline void
+ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
+ const struct ip_set_ext *ext)
+{
+ skbinfo->skbmark = ext->skbmark;
+ skbinfo->skbmarkmask = ext->skbmarkmask;
+ skbinfo->skbprio = ext->skbprio;
+ skbinfo->skbqueue = ext->skbqueue;
+}
+
static inline bool
ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter)
{
@@ -497,6 +552,9 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
if (SET_WITH_COMMENT(set) &&
ip_set_put_comment(skb, ext_comment(e, set)))
return -EMSGSIZE;
+ if (SET_WITH_SKBINFO(set) &&
+ ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
+ return -EMSGSIZE;
return 0;
}
diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h
index 68c2aea897f5..fe2622a00151 100644
--- a/include/linux/netfilter/ipset/ip_set_list.h
+++ b/include/linux/netfilter/ipset/ip_set_list.h
@@ -6,5 +6,6 @@
#define IP_SET_LIST_DEFAULT_SIZE 8
#define IP_SET_LIST_MIN_SIZE 4
+#define IP_SET_LIST_MAX_SIZE 65536
#endif /* __IP_SET_LIST_H */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 8ab1c278b66d..c755e4971fa3 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -15,7 +15,7 @@ enum nf_br_hook_priorities {
NF_BR_PRI_LAST = INT_MAX,
};
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
#define BRNF_PKT_TYPE 0x01
#define BRNF_BRIDGED_DNAT 0x02
@@ -24,16 +24,6 @@ enum nf_br_hook_priorities {
#define BRNF_8021Q 0x10
#define BRNF_PPPoE 0x20
-/* Only used in br_forward.c */
-int nf_bridge_copy_header(struct sk_buff *skb);
-static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
-{
- if (skb->nf_bridge &&
- skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT))
- return nf_bridge_copy_header(skb);
- return 0;
-}
-
static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
{
switch (skb->protocol) {
@@ -46,6 +36,44 @@ static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
}
}
+static inline void nf_bridge_update_protocol(struct sk_buff *skb)
+{
+ if (skb->nf_bridge->mask & BRNF_8021Q)
+ skb->protocol = htons(ETH_P_8021Q);
+ else if (skb->nf_bridge->mask & BRNF_PPPoE)
+ skb->protocol = htons(ETH_P_PPP_SES);
+}
+
+/* Fill in the header for fragmented IP packets handled by
+ * the IPv4 connection tracking code.
+ *
+ * Only used in br_forward.c
+ */
+static inline int nf_bridge_copy_header(struct sk_buff *skb)
+{
+ int err;
+ unsigned int header_size;
+
+ nf_bridge_update_protocol(skb);
+ header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
+ err = skb_cow_head(skb, header_size);
+ if (err)
+ return err;
+
+ skb_copy_to_linear_data_offset(skb, -header_size,
+ skb->nf_bridge->data, header_size);
+ __skb_push(skb, nf_bridge_encap_header_len(skb));
+ return 0;
+}
+
+static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
+{
+ if (skb->nf_bridge &&
+ skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT))
+ return nf_bridge_copy_header(skb);
+ return 0;
+}
+
static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
{
if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE))
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 6ad2bbcad405..6c3e06ee2fb7 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -123,6 +123,7 @@ extern int nfs_wait_on_request(struct nfs_page *);
extern void nfs_unlock_request(struct nfs_page *req);
extern void nfs_unlock_and_release_request(struct nfs_page *);
extern int nfs_page_group_lock(struct nfs_page *, bool);
+extern void nfs_page_group_lock_wait(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 61978a460841..96453f9bc8ba 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -303,6 +303,7 @@ struct pci_dev {
D3cold, not set for devices
powered on/off by the
corresponding bridge */
+ unsigned int ignore_hotplug:1; /* Ignore hotplug events */
unsigned int d3_delay; /* D3->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
@@ -1021,6 +1022,11 @@ bool pci_dev_run_wake(struct pci_dev *dev);
bool pci_check_pme_status(struct pci_dev *dev);
void pci_pme_wakeup_bus(struct pci_bus *bus);
+static inline void pci_ignore_hotplug(struct pci_dev *dev)
+{
+ dev->ignore_hotplug = 1;
+}
+
static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
bool enable)
{
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 3dfbf237cd8f..ef5894ca8e50 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -71,6 +71,7 @@ void percpu_ref_reinit(struct percpu_ref *ref);
void percpu_ref_exit(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
+void __percpu_ref_kill_expedited(struct percpu_ref *ref);
/**
* percpu_ref_kill - drop the initial ref
diff --git a/include/linux/phonedev.h b/include/linux/phonedev.h
deleted file mode 100644
index 4269de99e320..000000000000
--- a/include/linux/phonedev.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef __LINUX_PHONEDEV_H
-#define __LINUX_PHONEDEV_H
-
-#include <linux/types.h>
-
-#ifdef __KERNEL__
-
-#include <linux/poll.h>
-
-struct phone_device {
- struct phone_device *next;
- const struct file_operations *f_op;
- int (*open) (struct phone_device *, struct file *);
- int board; /* Device private index */
- int minor;
-};
-
-extern int phonedev_init(void);
-#define PHONE_MAJOR 100
-extern int phone_register_device(struct phone_device *, int unit);
-#define PHONE_UNIT_ANY -1
-extern void phone_unregister_device(struct phone_device *);
-
-#endif
-#endif
diff --git a/include/linux/phy.h b/include/linux/phy.h
index ed39956b5613..d090cfcaa167 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -598,6 +598,19 @@ static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
}
/**
+ * phy_read_mmd_indirect - reads data from the MMD registers
+ * @phydev: The PHY device bus
+ * @prtad: MMD Address
+ * @devad: MMD DEVAD
+ * @addr: PHY address on the MII bus
+ *
+ * Description: it reads data from the MMD registers (clause 22 to access to
+ * clause 45) of the specified phy address.
+ */
+int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
+ int devad, int addr);
+
+/**
* phy_read - Convenience function for reading a given PHY register
* @phydev: the phy_device struct
* @regnum: register number to read
@@ -668,6 +681,20 @@ static inline int phy_write_mmd(struct phy_device *phydev, int devad,
return mdiobus_write(phydev->bus, phydev->addr, regnum, val);
}
+/**
+ * phy_write_mmd_indirect - writes data to the MMD registers
+ * @phydev: The PHY device
+ * @prtad: MMD Address
+ * @devad: MMD DEVAD
+ * @addr: PHY address on the MII bus
+ * @data: data to write in the MMD register
+ *
+ * Description: Write data from the MMD registers of the specified
+ * phy address.
+ */
+void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
+ int devad, int addr, u32 data);
+
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
bool is_c45,
struct phy_c45_device_ids *c45_ids);
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index ae612acebb53..941138664c1d 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -18,6 +18,9 @@ extern int fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
struct device_node *np);
extern void fixed_phy_del(int phy_addr);
+extern int fixed_phy_set_link_update(struct phy_device *phydev,
+ int (*link_update)(struct net_device *,
+ struct fixed_phy_status *));
#else
static inline int fixed_phy_add(unsigned int irq, int phy_id,
struct fixed_phy_status *status)
@@ -34,14 +37,12 @@ static inline int fixed_phy_del(int phy_addr)
{
return -ENODEV;
}
-#endif /* CONFIG_FIXED_PHY */
-
-/*
- * This function issued only by fixed_phy-aware drivers, no need
- * protect it with #ifdef
- */
-extern int fixed_phy_set_link_update(struct phy_device *phydev,
+static inline int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
- struct fixed_phy_status *));
+ struct fixed_phy_status *))
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_FIXED_PHY */
#endif /* __PHY_FIXED_H */
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h
index 660c029d694f..16ec262dfcc8 100644
--- a/include/linux/platform_data/mtd-nand-omap2.h
+++ b/include/linux/platform_data/mtd-nand-omap2.h
@@ -21,8 +21,17 @@ enum nand_io {
};
enum omap_ecc {
- /* 1-bit ECC calculation by GPMC, Error detection by Software */
- OMAP_ECC_HAM1_CODE_HW = 0,
+ /*
+ * 1-bit ECC: calculation and correction by SW
+ * ECC stored at end of spare area
+ */
+ OMAP_ECC_HAM1_CODE_SW = 0,
+
+ /*
+ * 1-bit ECC: calculation by GPMC, Error detection by Software
+ * ECC layout compatible with ROM code layout
+ */
+ OMAP_ECC_HAM1_CODE_HW,
/* 4-bit ECC calculation by GPMC, Error detection by Software */
OMAP_ECC_BCH4_CODE_HW_DETECTION_SW,
/* 4-bit ECC calculation by GPMC, Error detection by ELM */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 7c1d252b20c0..ebc4c76ffb73 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -60,7 +60,7 @@ struct generic_pm_domain {
struct mutex lock;
struct dev_power_governor *gov;
struct work_struct power_off_work;
- char *name;
+ const char *name;
unsigned int in_progress; /* Number of devices being suspended now */
atomic_t sd_count; /* Number of subdomains with power "on" */
enum gpd_status status; /* Current state of the domain */
diff --git a/include/linux/random.h b/include/linux/random.h
index 57fbbffd77a0..b05856e16b75 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -26,7 +26,7 @@ unsigned int get_random_int(void);
unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
u32 prandom_u32(void);
-void prandom_bytes(void *buf, int nbytes);
+void prandom_bytes(void *buf, size_t nbytes);
void prandom_seed(u32 seed);
void prandom_reseed_late(void);
@@ -35,7 +35,7 @@ struct rnd_state {
};
u32 prandom_u32_state(struct rnd_state *state);
-void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
+void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
/**
* prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index bbe03a1924c0..4efa1ed8a2b0 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -218,6 +218,8 @@ enum regulator_type {
* @linear_min_sel: Minimal selector for starting linear mapping
* @fixed_uV: Fixed voltage of rails.
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
+ * @linear_ranges: A constant table of possible voltage ranges.
+ * @n_linear_ranges: Number of entries in the @linear_ranges table.
* @volt_table: Voltage mapping table (if table based mapping)
*
* @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 730e638c5589..0b08d05d470b 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -85,6 +85,7 @@ struct regulator_state {
* bootloader then it will be enabled when the constraints are
* applied.
* @apply_uV: Apply the voltage constraint when initialising.
+ * @ramp_disable: Disable ramp delay when initialising or when setting voltage.
*
* @input_uV: Input voltage for regulator when supplied by another regulator.
*
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 36826c0166c5..fb298e9d6d3a 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -44,6 +44,7 @@ struct rhashtable;
* @head_offset: Offset of rhash_head in struct to be hashed
* @hash_rnd: Seed to use while hashing
* @max_shift: Maximum number of shifts while expanding
+ * @min_shift: Minimum number of shifts while shrinking
* @hashfn: Function to hash key
* @obj_hashfn: Function to hash object
* @grow_decision: If defined, may return true if table should expand
@@ -57,6 +58,7 @@ struct rhashtable_params {
size_t head_offset;
u32 hash_rnd;
size_t max_shift;
+ size_t min_shift;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
bool (*grow_decision)(const struct rhashtable *ht,
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 167bae7bdfa4..6cacbce1a06c 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -47,6 +47,16 @@ static inline int lockdep_rtnl_is_held(void)
rcu_dereference_check(p, lockdep_rtnl_is_held())
/**
+ * rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking
+ * @p: The pointer to read, prior to dereference
+ *
+ * Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh()
+ * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh()
+ */
+#define rcu_dereference_bh_rtnl(p) \
+ rcu_dereference_bh_check(p, lockdep_rtnl_is_held())
+
+/**
* rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL
* @p: The pointer to read, prior to dereferencing
*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5c2c885ee52b..b867a4dab38a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1903,8 +1903,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
-#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
-#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
@@ -1957,17 +1955,31 @@ static inline void memalloc_noio_restore(unsigned int flags)
}
/* Per-process atomic flags. */
-#define PFA_NO_NEW_PRIVS 0x00000001 /* May not gain new privileges. */
+#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
+#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
+#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
-static inline bool task_no_new_privs(struct task_struct *p)
-{
- return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
-}
-static inline void task_set_no_new_privs(struct task_struct *p)
-{
- set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
-}
+#define TASK_PFA_TEST(name, func) \
+ static inline bool task_##func(struct task_struct *p) \
+ { return test_bit(PFA_##name, &p->atomic_flags); }
+#define TASK_PFA_SET(name, func) \
+ static inline void task_set_##func(struct task_struct *p) \
+ { set_bit(PFA_##name, &p->atomic_flags); }
+#define TASK_PFA_CLEAR(name, func) \
+ static inline void task_clear_##func(struct task_struct *p) \
+ { clear_bit(PFA_##name, &p->atomic_flags); }
+
+TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
+TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
+
+TASK_PFA_TEST(SPREAD_PAGE, spread_page)
+TASK_PFA_SET(SPREAD_PAGE, spread_page)
+TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
+
+TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
+TASK_PFA_SET(SPREAD_SLAB, spread_slab)
+TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
/*
* task->jobctl flags
@@ -2608,9 +2620,22 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct
task_thread_info(p)->task = p;
}
+/*
+ * Return the address of the last usable long on the stack.
+ *
+ * When the stack grows down, this is just above the thread
+ * info struct. Going any lower will corrupt the threadinfo.
+ *
+ * When the stack grows up, this is the highest address.
+ * Beyond that position, we corrupt data on the next page.
+ */
static inline unsigned long *end_of_stack(struct task_struct *p)
{
+#ifdef CONFIG_STACK_GROWSUP
+ return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
+#else
return (unsigned long *)(task_thread_info(p) + 1);
+#endif
}
#endif
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
index 3d6003de4b0d..a1ba6a5ccdd6 100644
--- a/include/linux/seqno-fence.h
+++ b/include/linux/seqno-fence.h
@@ -62,6 +62,7 @@ to_seqno_fence(struct fence *fence)
* @context: the execution context this fence is a part of
* @seqno_ofs: the offset within @sync_buf
* @seqno: the sequence # to signal on
+ * @cond: fence wait condition
* @ops: the fence_ops for operations on this seqno fence
*
* This function initializes a struct seqno_fence with passed parameters,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index abde271c18ae..3a5ec7638627 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -47,11 +47,29 @@
*
* The hardware you're dealing with doesn't calculate the full checksum
* (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
- * for specific protocols e.g. TCP/UDP/SCTP, then, for such packets it will
- * set CHECKSUM_UNNECESSARY if their checksums are okay. skb->csum is still
- * undefined in this case though. It is a bad option, but, unfortunately,
- * nowadays most vendors do this. Apparently with the secret goal to sell
- * you new devices, when you will add new protocol to your host, f.e. IPv6 8)
+ * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
+ * if their checksums are okay. skb->csum is still undefined in this case
+ * though. It is a bad option, but, unfortunately, nowadays most vendors do
+ * this. Apparently with the secret goal to sell you new devices, when you
+ * will add new protocol to your host, f.e. IPv6 8)
+ *
+ * CHECKSUM_UNNECESSARY is applicable to following protocols:
+ * TCP: IPv6 and IPv4.
+ * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
+ * zero UDP checksum for either IPv4 or IPv6, the networking stack
+ * may perform further validation in this case.
+ * GRE: only if the checksum is present in the header.
+ * SCTP: indicates the CRC in SCTP header has been validated.
+ *
+ * skb->csum_level indicates the number of consecutive checksums found in
+ * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
+ * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
+ * and a device is able to verify the checksums for UDP (possibly zero),
+ * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
+ * two. If the device were only able to verify the UDP checksum and not
+ * GRE, either because it doesn't support GRE checksum of because GRE
+ * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
+ * not considered in this case).
*
* CHECKSUM_COMPLETE:
*
@@ -112,6 +130,9 @@
#define CHECKSUM_COMPLETE 2
#define CHECKSUM_PARTIAL 3
+/* Maximum value in skb->csum_level */
+#define SKB_MAX_CSUM_LEVEL 3
+
#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
#define SKB_WITH_OVERHEAD(X) \
((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
@@ -135,7 +156,7 @@ struct nf_conntrack {
};
#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info {
atomic_t use;
unsigned int mask;
@@ -318,9 +339,10 @@ struct skb_shared_info {
enum {
- SKB_FCLONE_UNAVAILABLE,
- SKB_FCLONE_ORIG,
- SKB_FCLONE_CLONE,
+ SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
+ SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
+ SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
+ SKB_FCLONE_FREE, /* this companion fclone skb is available */
};
enum {
@@ -452,6 +474,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
* @tc_verd: traffic control verdict
* @hash: the packet hash
* @queue_mapping: Queue mapping for multiqueue devices
+ * @xmit_more: More SKBs are pending for this queue
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
* @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -505,82 +528,97 @@ struct sk_buff {
char cb[48] __aligned(8);
unsigned long _skb_refdst;
+ void (*destructor)(struct sk_buff *skb);
#ifdef CONFIG_XFRM
struct sec_path *sp;
#endif
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ struct nf_conntrack *nfct;
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ struct nf_bridge_info *nf_bridge;
+#endif
unsigned int len,
data_len;
__u16 mac_len,
hdr_len;
- union {
- __wsum csum;
- struct {
- __u16 csum_start;
- __u16 csum_offset;
- };
- };
- __u32 priority;
+
+ /* Following fields are _not_ copied in __copy_skb_header()
+ * Note that queue_mapping is here mostly to fill a hole.
+ */
kmemcheck_bitfield_begin(flags1);
- __u8 ignore_df:1,
- cloned:1,
- ip_summed:2,
+ __u16 queue_mapping;
+ __u8 cloned:1,
nohdr:1,
- nfctinfo:3;
- __u8 pkt_type:3,
fclone:2,
- ipvs_property:1,
peeked:1,
- nf_trace:1;
+ head_frag:1,
+ xmit_more:1;
+ /* one bit hole */
kmemcheck_bitfield_end(flags1);
- __be16 protocol;
-
- void (*destructor)(struct sk_buff *skb);
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
- struct nf_conntrack *nfct;
-#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
- struct nf_bridge_info *nf_bridge;
-#endif
-
- int skb_iif;
-
- __u32 hash;
- __be16 vlan_proto;
- __u16 vlan_tci;
+ /* fields enclosed in headers_start/headers_end are copied
+ * using a single memcpy() in __copy_skb_header()
+ */
+ __u32 headers_start[0];
-#ifdef CONFIG_NET_SCHED
- __u16 tc_index; /* traffic control index */
-#ifdef CONFIG_NET_CLS_ACT
- __u16 tc_verd; /* traffic control verdict */
-#endif
+/* if you move pkt_type around you also must adapt those constants */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define PKT_TYPE_MAX (7 << 5)
+#else
+#define PKT_TYPE_MAX 7
#endif
+#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
- __u16 queue_mapping;
- kmemcheck_bitfield_begin(flags2);
-#ifdef CONFIG_IPV6_NDISC_NODETYPE
- __u8 ndisc_nodetype:2;
-#endif
+ __u8 __pkt_type_offset[0];
+ __u8 pkt_type:3;
__u8 pfmemalloc:1;
+ __u8 ignore_df:1;
+ __u8 nfctinfo:3;
+
+ __u8 nf_trace:1;
+ __u8 ip_summed:2;
__u8 ooo_okay:1;
__u8 l4_hash:1;
__u8 sw_hash:1;
__u8 wifi_acked_valid:1;
__u8 wifi_acked:1;
+
__u8 no_fcs:1;
- __u8 head_frag:1;
- /* Encapsulation protocol and NIC drivers should use
- * this flag to indicate to each other if the skb contains
- * encapsulated packet or not and maybe use the inner packet
- * headers if needed
- */
+ /* Indicates the inner headers are valid in the skbuff. */
__u8 encapsulation:1;
__u8 encap_hdr_csum:1;
__u8 csum_valid:1;
__u8 csum_complete_sw:1;
- /* 2/4 bit hole (depending on ndisc_nodetype presence) */
- kmemcheck_bitfield_end(flags2);
+ __u8 csum_level:2;
+ __u8 csum_bad:1;
+
+#ifdef CONFIG_IPV6_NDISC_NODETYPE
+ __u8 ndisc_nodetype:2;
+#endif
+ __u8 ipvs_property:1;
+ __u8 inner_protocol_type:1;
+ /* 4 or 6 bit hole */
+
+#ifdef CONFIG_NET_SCHED
+ __u16 tc_index; /* traffic control index */
+#ifdef CONFIG_NET_CLS_ACT
+ __u16 tc_verd; /* traffic control verdict */
+#endif
+#endif
+ union {
+ __wsum csum;
+ struct {
+ __u16 csum_start;
+ __u16 csum_offset;
+ };
+ };
+ __u32 priority;
+ int skb_iif;
+ __u32 hash;
+ __be16 vlan_proto;
+ __u16 vlan_tci;
#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
union {
unsigned int napi_id;
@@ -596,13 +634,22 @@ struct sk_buff {
__u32 reserved_tailroom;
};
- __be16 inner_protocol;
+ union {
+ __be16 inner_protocol;
+ __u8 inner_ipproto;
+ };
+
__u16 inner_transport_header;
__u16 inner_network_header;
__u16 inner_mac_header;
+
+ __be16 protocol;
__u16 transport_header;
__u16 network_header;
__u16 mac_header;
+
+ __u32 headers_end[0];
+
/* These elements must be at the end, see alloc_skb() for details. */
sk_buff_data_t tail;
sk_buff_data_t end;
@@ -734,6 +781,37 @@ static inline struct sk_buff *alloc_skb(unsigned int size,
return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
}
+struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
+ unsigned long data_len,
+ int max_page_order,
+ int *errcode,
+ gfp_t gfp_mask);
+
+/* Layout of fast clones : [skb1][skb2][fclone_ref] */
+struct sk_buff_fclones {
+ struct sk_buff skb1;
+
+ struct sk_buff skb2;
+
+ atomic_t fclone_ref;
+};
+
+/**
+ * skb_fclone_busy - check if fclone is busy
+ * @skb: buffer
+ *
+ * Returns true is skb is a fast clone, and its clone is not freed.
+ */
+static inline bool skb_fclone_busy(const struct sk_buff *skb)
+{
+ const struct sk_buff_fclones *fclones;
+
+ fclones = container_of(skb, struct sk_buff_fclones, skb1);
+
+ return skb->fclone == SKB_FCLONE_ORIG &&
+ fclones->skb2.fclone == SKB_FCLONE_CLONE;
+}
+
static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
gfp_t priority)
{
@@ -1042,6 +1120,7 @@ static inline int skb_header_cloned(const struct sk_buff *skb)
* Drop a reference to the header part of the buffer. This is done
* by acquiring a payload reference. You must not read from the header
* part of skb->data after this.
+ * Note : Check if you can use __skb_header_release() instead.
*/
static inline void skb_header_release(struct sk_buff *skb)
{
@@ -1051,6 +1130,20 @@ static inline void skb_header_release(struct sk_buff *skb)
}
/**
+ * __skb_header_release - release reference to header
+ * @skb: buffer to operate on
+ *
+ * Variant of skb_header_release() assuming skb is private to caller.
+ * We can avoid one atomic operation.
+ */
+static inline void __skb_header_release(struct sk_buff *skb)
+{
+ skb->nohdr = 1;
+ atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
+}
+
+
+/**
* skb_shared - is the buffer shared
* @skb: buffer to check
*
@@ -1675,6 +1768,23 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
skb->tail += len;
}
+#define ENCAP_TYPE_ETHER 0
+#define ENCAP_TYPE_IPPROTO 1
+
+static inline void skb_set_inner_protocol(struct sk_buff *skb,
+ __be16 protocol)
+{
+ skb->inner_protocol = protocol;
+ skb->inner_protocol_type = ENCAP_TYPE_ETHER;
+}
+
+static inline void skb_set_inner_ipproto(struct sk_buff *skb,
+ __u8 ipproto)
+{
+ skb->inner_ipproto = ipproto;
+ skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
+}
+
static inline void skb_reset_inner_headers(struct sk_buff *skb)
{
skb->inner_mac_header = skb->mac_header;
@@ -1860,18 +1970,6 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
return pskb_may_pull(skb, skb_network_offset(skb) + len);
}
-static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
-{
- /* Only continue with checksum unnecessary if device indicated
- * it is valid across encapsulation (skb->encapsulation was set).
- */
- if (skb->ip_summed == CHECKSUM_UNNECESSARY && !skb->encapsulation)
- skb->ip_summed = CHECKSUM_NONE;
-
- skb->encapsulation = 0;
- skb->csum_valid = 0;
-}
-
/*
* CPUs often take a performance hit when accessing unaligned memory
* locations. The actual performance hit varies, it can be small if the
@@ -2567,20 +2665,26 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum);
-static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
- int len, void *buffer)
+static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
+ int len, void *data, int hlen, void *buffer)
{
- int hlen = skb_headlen(skb);
-
if (hlen - offset >= len)
- return skb->data + offset;
+ return data + offset;
- if (skb_copy_bits(skb, offset, buffer, len) < 0)
+ if (!skb ||
+ skb_copy_bits(skb, offset, buffer, len) < 0)
return NULL;
return buffer;
}
+static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
+ int len, void *buffer)
+{
+ return __skb_header_pointer(skb, offset, len, skb->data,
+ skb_headlen(skb), buffer);
+}
+
/**
* skb_needs_linearize - check if we need to linearize a given skb
* depending on the given device features.
@@ -2671,6 +2775,8 @@ static inline ktime_t net_invalid_timestamp(void)
return ktime_set(0, 0);
}
+struct sk_buff *skb_clone_sk(struct sk_buff *skb);
+
#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
void skb_clone_tx_timestamp(struct sk_buff *skb);
@@ -2786,6 +2892,42 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
0 : __skb_checksum_complete(skb);
}
+static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (skb->csum_level == 0)
+ skb->ip_summed = CHECKSUM_NONE;
+ else
+ skb->csum_level--;
+ }
+}
+
+static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
+ skb->csum_level++;
+ } else if (skb->ip_summed == CHECKSUM_NONE) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = 0;
+ }
+}
+
+static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
+{
+ /* Mark current checksum as bad (typically called from GRO
+ * path). In the case that ip_summed is CHECKSUM_NONE
+ * this must be the first checksum encountered in the packet.
+ * When ip_summed is CHECKSUM_UNNECESSARY, this is the first
+ * checksum after the last one validated. For UDP, a zero
+ * checksum can not be marked as bad.
+ */
+
+ if (skb->ip_summed == CHECKSUM_NONE ||
+ skb->ip_summed == CHECKSUM_UNNECESSARY)
+ skb->csum_bad = 1;
+}
+
/* Check if we need to perform checksum complete validation.
*
* Returns true if checksum complete is needed, false otherwise
@@ -2797,6 +2939,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
{
if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
skb->csum_valid = 1;
+ __skb_decr_checksum_unnecessary(skb);
return false;
}
@@ -2826,6 +2969,9 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
skb->csum_valid = 1;
return 0;
}
+ } else if (skb->csum_bad) {
+ /* ip_summed == CHECKSUM_NONE in this case */
+ return 1;
}
skb->csum = psum;
@@ -2883,6 +3029,26 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
#define skb_checksum_simple_validate(skb) \
__skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
+static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
+{
+ return (skb->ip_summed == CHECKSUM_NONE &&
+ skb->csum_valid && !skb->csum_bad);
+}
+
+static inline void __skb_checksum_convert(struct sk_buff *skb,
+ __sum16 check, __wsum pseudo)
+{
+ skb->csum = ~pseudo;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+}
+
+#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
+do { \
+ if (__skb_checksum_convert_check(skb)) \
+ __skb_checksum_convert(skb, check, \
+ compute_pseudo(skb, proto)); \
+} while (0)
+
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
void nf_conntrack_destroy(struct nf_conntrack *nfct);
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
@@ -2896,7 +3062,7 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
atomic_inc(&nfct->use);
}
#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
{
if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
@@ -2914,7 +3080,7 @@ static inline void nf_reset(struct sk_buff *skb)
nf_conntrack_put(skb->nfct);
skb->nfct = NULL;
#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
nf_bridge_put(skb->nf_bridge);
skb->nf_bridge = NULL;
#endif
@@ -2928,19 +3094,22 @@ static inline void nf_reset_trace(struct sk_buff *skb)
}
/* Note: This doesn't put any conntrack and bridge info in dst. */
-static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
+static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
+ bool copy)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
dst->nfct = src->nfct;
nf_conntrack_get(src->nfct);
- dst->nfctinfo = src->nfctinfo;
+ if (copy)
+ dst->nfctinfo = src->nfctinfo;
#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
dst->nf_bridge = src->nf_bridge;
nf_bridge_get(src->nf_bridge);
#endif
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
- dst->nf_trace = src->nf_trace;
+ if (copy)
+ dst->nf_trace = src->nf_trace;
#endif
}
@@ -2949,10 +3118,10 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(dst->nfct);
#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
nf_bridge_put(dst->nf_bridge);
#endif
- __nf_copy(dst, src);
+ __nf_copy(dst, src, true);
}
#ifdef CONFIG_NETWORK_SECMARK
@@ -3137,7 +3306,9 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
-u32 __skb_get_poff(const struct sk_buff *skb);
+u32 skb_get_poff(const struct sk_buff *skb);
+u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+ const struct flow_keys *keys, int hlen);
/**
* skb_head_is_locked - Determine if the skb->head is locked down
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index e713543336f1..46d188a9947c 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -253,6 +253,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* the device whose settings are being modified.
* @transfer: adds a message to the controller's transfer queue.
* @cleanup: frees controller-specific state
+ * @can_dma: determine whether this master supports DMA
* @queued: whether this master is providing an internal message queue
* @kworker: thread struct for message pump
* @kworker_task: pointer to task for message pump kworker thread
@@ -262,6 +263,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @cur_msg: the currently in-flight message
* @cur_msg_prepared: spi_prepare_message was called for the currently
* in-flight message
+ * @cur_msg_mapped: message has been mapped for DMA
* @xfer_completion: used by core transfer_one_message()
* @busy: message pump is busy
* @running: message pump is running
@@ -299,6 +301,10 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself).
+ * @dma_tx: DMA transmit channel
+ * @dma_rx: DMA receive channel
+ * @dummy_rx: dummy receive buffer for full-duplex devices
+ * @dummy_tx: dummy transmit buffer for full-duplex devices
*
* Each SPI master controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -632,6 +638,7 @@ struct spi_transfer {
* addresses for each transfer buffer
* @complete: called to report transaction completions
* @context: the argument to complete() when it's called
+ * @frame_length: the total number of bytes in the message
* @actual_length: the total number of bytes that were transferred in all
* successful segments
* @status: zero for success, else negative errno
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 0f86d85a9ce4..bda9b81357cc 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -65,6 +65,7 @@ struct old_linux_dirent;
struct perf_event_attr;
struct file_handle;
struct sigaltstack;
+union bpf_attr;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -875,5 +876,5 @@ asmlinkage long sys_seccomp(unsigned int op, unsigned int flags,
const char __user *uargs);
asmlinkage long sys_getrandom(char __user *buf, size_t count,
unsigned int flags);
-
+asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fa5258f322e7..e567f0dbf282 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -276,7 +276,7 @@ struct tcp_sock {
u32 retrans_stamp; /* Timestamp of the last retransmit,
* also used in SYN-SENT to remember stamp of
* the first SYN. */
- u32 undo_marker; /* tracking retrans started here. */
+ u32 undo_marker; /* snd_una upon a new recovery episode. */
int undo_retrans; /* number of undoable retransmissions. */
u32 total_retrans; /* Total retransmits for entire connection */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 059052306831..9a82c7dc3fdd 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -183,13 +183,8 @@ static inline bool tick_nohz_full_cpu(int cpu)
extern void tick_nohz_init(void);
extern void __tick_nohz_full_check(void);
+extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_cpu(int cpu);
-
-static inline void tick_nohz_full_kick(void)
-{
- tick_nohz_full_kick_cpu(smp_processor_id());
-}
-
extern void tick_nohz_full_kick_all(void);
extern void __tick_nohz_task_switch(struct task_struct *tsk);
#else
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 247cfdcc4b08..ee3277593222 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -49,7 +49,11 @@ struct udp_sock {
unsigned int corkflag; /* Cork is required */
__u8 encap_type; /* Is this an Encapsulation socket? */
unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
- no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */
+ no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
+ convert_csum:1;/* On receive, convert checksum
+ * unnecessary to checksum complete
+ * if possible.
+ */
/*
* Following member retains the information to create a UDP header
* when the socket is uncorked.
@@ -98,6 +102,16 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
return udp_sk(sk)->no_check6_rx;
}
+static inline void udp_set_convert_csum(struct sock *sk, bool val)
+{
+ udp_sk(sk)->convert_csum = val;
+}
+
+static inline bool udp_get_convert_csum(struct sock *sk)
+{
+ return udp_sk(sk)->convert_csum;
+}
+
#define udp_portaddr_for_each_entry(__sk, node, list) \
hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node)
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 48d64e6ab292..290fbf0b6b8a 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -84,7 +84,7 @@ unsigned long iov_iter_alignment(const struct iov_iter *i);
void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
unsigned long nr_segs, size_t count);
ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
- unsigned maxpages, size_t *start);
+ size_t maxsize, unsigned maxpages, size_t *start);
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
size_t maxsize, size_t *start);
int iov_iter_npages(const struct iov_iter *i, int maxpages);
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index 502073a53dd3..b483abd34493 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -64,6 +64,7 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
+void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
#else
@@ -82,6 +83,7 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
+static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
#endif
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 2c02f3a8d2ba..c37bd4d06739 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -182,7 +182,6 @@ extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
* vga_get()...
*/
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
#ifdef CONFIG_VGA_ARB
extern struct pci_dev *vga_default_device(void);
extern void vga_set_default_device(struct pci_dev *pdev);
@@ -190,7 +189,6 @@ extern void vga_set_default_device(struct pci_dev *pdev);
static inline struct pci_dev *vga_default_device(void) { return NULL; };
static inline void vga_set_default_device(struct pci_dev *pdev) { };
#endif
-#endif
/**
* vga_conflicts
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index a0cc2e95ed1b..b996e6cde6bb 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -419,7 +419,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
1, (name))
#define create_singlethread_workqueue(name) \
- alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name))
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
extern void destroy_workqueue(struct workqueue_struct *wq);
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index fc910a622451..2fefcf491aa8 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -295,7 +295,7 @@ struct vb2_buffer {
* can return an error if hardware fails, in that case all
* buffers that have been already given by the @buf_queue
* callback are to be returned by the driver by calling
- * @vb2_buffer_done(VB2_BUF_STATE_DEQUEUED).
+ * @vb2_buffer_done(VB2_BUF_STATE_QUEUED).
* If you need a minimum number of buffers before you can
* start streaming, then set @min_buffers_needed in the
* vb2_queue structure. If that is non-zero then
@@ -380,6 +380,9 @@ struct v4l2_fh;
* @start_streaming_called: start_streaming() was called successfully and we
* started streaming.
* @error: a fatal error occurred on the queue
+ * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
+ * buffers. Only set for capture queues if qbuf has not yet been
+ * called since poll() needs to return POLLERR in that situation.
* @fileio: file io emulator internal data, used only if emulator is active
* @threadio: thread io internal data, used only if thread is active
*/
@@ -417,6 +420,7 @@ struct vb2_queue {
unsigned int streaming:1;
unsigned int start_streaming_called:1;
unsigned int error:1;
+ unsigned int waiting_for_buffers:1;
struct vb2_fileio_data *fileio;
struct vb2_threadio_data *threadio;
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index f679877bb601..d13573bb879e 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -202,8 +202,9 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
void ipv6_sock_ac_close(struct sock *sk);
-int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr);
+int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr);
int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
+void ipv6_ac_destroy_dev(struct inet6_dev *idev);
bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
diff --git a/include/net/ah.h b/include/net/ah.h
index ca95b98969dd..4e2dfa474a7e 100644
--- a/include/net/ah.h
+++ b/include/net/ah.h
@@ -3,9 +3,6 @@
#include <linux/skbuff.h>
-/* This is the maximum truncated ICV length that we know of. */
-#define MAX_AH_AUTH_LEN 64
-
struct crypto_ahash;
struct ah_data {
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 87cb1903640d..6465bae80a4f 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -122,9 +122,7 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
{
- __be32 diff[] = { ~from, to };
-
- *sum = csum_fold(csum_partial(diff, sizeof(diff), ~csum_unfold(*sum)));
+ *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from), to));
}
/* Implements RFC 1624 (Incremental Internet Checksum)
diff --git a/include/net/codel.h b/include/net/codel.h
index fe0eab32ce76..aeee28081245 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -66,7 +66,7 @@ typedef s32 codel_tdiff_t;
static inline codel_time_t codel_get_time(void)
{
- u64 ns = ktime_to_ns(ktime_get());
+ u64 ns = ktime_get_ns();
return ns >> CODEL_SHIFT;
}
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 6efce384451e..58ad8c6492db 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -15,6 +15,17 @@
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+
+enum dsa_tag_protocol {
+ DSA_TAG_PROTO_NONE = 0,
+ DSA_TAG_PROTO_DSA,
+ DSA_TAG_PROTO_TRAILER,
+ DSA_TAG_PROTO_EDSA,
+ DSA_TAG_PROTO_BRCM,
+};
#define DSA_MAX_SWITCHES 4
#define DSA_MAX_PORTS 12
@@ -23,9 +34,15 @@ struct dsa_chip_data {
/*
* How to access the switch configuration registers.
*/
- struct device *mii_bus;
+ struct device *host_dev;
int sw_addr;
+ /* Device tree node pointer for this specific switch chip
+ * used during switch setup in case additional properties
+ * and resources needs to be used
+ */
+ struct device_node *of_node;
+
/*
* The names of the switch's ports. Use "cpu" to
* designate the switch port that the cpu is connected to,
@@ -34,6 +51,7 @@ struct dsa_chip_data {
* or any other string to indicate this is a physical port.
*/
char *port_names[DSA_MAX_PORTS];
+ struct device_node *port_dn[DSA_MAX_PORTS];
/*
* An array (with nr_chips elements) of which element [a]
@@ -59,6 +77,8 @@ struct dsa_platform_data {
struct dsa_chip_data *chip;
};
+struct packet_type;
+
struct dsa_switch_tree {
/*
* Configuration data for the platform device that owns
@@ -71,7 +91,11 @@ struct dsa_switch_tree {
* protocol to use.
*/
struct net_device *master_netdev;
- __be16 tag_protocol;
+ int (*rcv)(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt,
+ struct net_device *orig_dev);
+ enum dsa_tag_protocol tag_protocol;
/*
* The switch and port to which the CPU is attached.
@@ -110,15 +134,16 @@ struct dsa_switch {
struct dsa_switch_driver *drv;
/*
- * Reference to mii bus to use.
+ * Reference to host device to use.
*/
- struct mii_bus *master_mii_bus;
+ struct device *master_dev;
/*
* Slave mii_bus and devices for the individual ports.
*/
u32 dsa_port_mask;
u32 phys_port_mask;
+ u32 phys_mii_mask;
struct mii_bus *slave_mii_bus;
struct net_device *ports[DSA_MAX_PORTS];
};
@@ -147,15 +172,16 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
struct dsa_switch_driver {
struct list_head list;
- __be16 tag_protocol;
+ enum dsa_tag_protocol tag_protocol;
int priv_size;
/*
* Probing and setup.
*/
- char *(*probe)(struct mii_bus *bus, int sw_addr);
+ char *(*probe)(struct device *host_dev, int sw_addr);
int (*setup)(struct dsa_switch *ds);
int (*set_addr)(struct dsa_switch *ds, u8 *addr);
+ u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
/*
* Access to the switch's PHY registers.
@@ -170,37 +196,64 @@ struct dsa_switch_driver {
void (*poll_link)(struct dsa_switch *ds);
/*
+ * Link state adjustment (called from libphy)
+ */
+ void (*adjust_link)(struct dsa_switch *ds, int port,
+ struct phy_device *phydev);
+ void (*fixed_link_update)(struct dsa_switch *ds, int port,
+ struct fixed_phy_status *st);
+
+ /*
* ethtool hardware statistics.
*/
void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data);
void (*get_ethtool_stats)(struct dsa_switch *ds,
int port, uint64_t *data);
int (*get_sset_count)(struct dsa_switch *ds);
+
+ /*
+ * ethtool Wake-on-LAN
+ */
+ void (*get_wol)(struct dsa_switch *ds, int port,
+ struct ethtool_wolinfo *w);
+ int (*set_wol)(struct dsa_switch *ds, int port,
+ struct ethtool_wolinfo *w);
+
+ /*
+ * Suspend and resume
+ */
+ int (*suspend)(struct dsa_switch *ds);
+ int (*resume)(struct dsa_switch *ds);
+
+ /*
+ * Port enable/disable
+ */
+ int (*port_enable)(struct dsa_switch *ds, int port,
+ struct phy_device *phy);
+ void (*port_disable)(struct dsa_switch *ds, int port,
+ struct phy_device *phy);
+
+ /*
+ * EEE setttings
+ */
+ int (*set_eee)(struct dsa_switch *ds, int port,
+ struct phy_device *phydev,
+ struct ethtool_eee *e);
+ int (*get_eee)(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e);
};
void register_switch_driver(struct dsa_switch_driver *type);
void unregister_switch_driver(struct dsa_switch_driver *type);
+struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
static inline void *ds_to_priv(struct dsa_switch *ds)
{
return (void *)(ds + 1);
}
-/*
- * The original DSA tag format and some other tag formats have no
- * ethertype, which means that we need to add a little hack to the
- * networking receive path to make sure that received frames get
- * the right ->protocol assigned to them when one of those tag
- * formats is in use.
- */
-static inline bool dsa_uses_dsa_tags(struct dsa_switch_tree *dst)
-{
- return !!(dst->tag_protocol == htons(ETH_P_DSA));
-}
-
-static inline bool dsa_uses_trailer_tags(struct dsa_switch_tree *dst)
+static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
{
- return !!(dst->tag_protocol == htons(ETH_P_TRAILER));
+ return dst->rcv != NULL;
}
-
#endif
diff --git a/include/net/dst.h b/include/net/dst.h
index 71c60f42be48..a8ae4e760778 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -480,6 +480,7 @@ void dst_init(void);
/* Flags for xfrm_lookup flags argument. */
enum {
XFRM_LOOKUP_ICMP = 1 << 0,
+ XFRM_LOOKUP_QUEUE = 1 << 1,
};
struct flowi;
@@ -490,7 +491,16 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
int flags)
{
return dst_orig;
-}
+}
+
+static inline struct dst_entry *xfrm_lookup_route(struct net *net,
+ struct dst_entry *dst_orig,
+ const struct flowi *fl,
+ struct sock *sk,
+ int flags)
+{
+ return dst_orig;
+}
static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
{
@@ -502,6 +512,10 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, struct sock *sk,
int flags);
+struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
+ const struct flowi *fl, struct sock *sk,
+ int flags);
+
/* skb attached with this dst needs transformation if dst->xfrm is valid */
static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
{
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
index 6667a054763a..7ee2df083542 100644
--- a/include/net/flow_keys.h
+++ b/include/net/flow_keys.h
@@ -27,7 +27,19 @@ struct flow_keys {
u8 ip_proto;
};
-bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
-__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto);
+bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
+ void *data, __be16 proto, int nhoff, int hlen);
+static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
+{
+ return __skb_flow_dissect(skb, flow, NULL, 0, 0, 0);
+}
+__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
+ void *data, int hlen_proto);
+static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
+{
+ return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
+}
u32 flow_hash_from_keys(struct flow_keys *keys);
+unsigned int flow_get_hlen(const unsigned char *data, unsigned int max_len,
+ __be16 protocol);
#endif
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index ea4271dceff0..cbafa3768d48 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -6,6 +6,11 @@
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
+struct gnet_stats_basic_cpu {
+ struct gnet_stats_basic_packed bstats;
+ struct u64_stats_sync syncp;
+};
+
struct gnet_dump {
spinlock_t * lock;
struct sk_buff * skb;
@@ -27,21 +32,29 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
spinlock_t *lock, struct gnet_dump *d);
int gnet_stats_copy_basic(struct gnet_dump *d,
+ struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
+void __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b);
int gnet_stats_copy_rate_est(struct gnet_dump *d,
const struct gnet_stats_basic_packed *b,
struct gnet_stats_rate_est64 *r);
-int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q);
+int gnet_stats_copy_queue(struct gnet_dump *d,
+ struct gnet_stats_queue __percpu *cpu_q,
+ struct gnet_stats_queue *q, __u32 qlen);
int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
int gnet_stats_finish_copy(struct gnet_dump *d);
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, struct nlattr *opt);
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est64 *rate_est);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, struct nlattr *opt);
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 93695f0e22a5..af10c2cf8a1d 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -394,4 +394,12 @@ static inline int genl_set_err(struct genl_family *family, struct net *net,
return netlink_set_err(net->genl_sock, portid, group, code);
}
+static inline int genl_has_listeners(struct genl_family *family,
+ struct sock *sk, unsigned int group)
+{
+ if (WARN_ON_ONCE(group >= family->n_mcgrps))
+ return -EINVAL;
+ group = family->mcgrp_offset + group;
+ return netlink_has_listeners(sk, group);
+}
#endif /* __NET_GENERIC_NETLINK_H */
diff --git a/include/net/gue.h b/include/net/gue.h
new file mode 100644
index 000000000000..b6c332788084
--- /dev/null
+++ b/include/net/gue.h
@@ -0,0 +1,23 @@
+#ifndef __NET_GUE_H
+#define __NET_GUE_H
+
+struct guehdr {
+ union {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 hlen:4,
+ version:4;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u8 version:4,
+ hlen:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 next_hdr;
+ __u16 flags;
+ };
+ __u32 word;
+ };
+};
+
+#endif
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index d07b1a64b4e7..55a8d4056cc9 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -35,7 +35,6 @@ enum {
INET6_IFADDR_STATE_DAD,
INET6_IFADDR_STATE_POSTDAD,
INET6_IFADDR_STATE_ERRDAD,
- INET6_IFADDR_STATE_UP,
INET6_IFADDR_STATE_DEAD,
};
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 5fbe6568c3cf..848e85cb5c61 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -242,6 +242,15 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
#endif
}
+static inline unsigned long
+inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
+ unsigned long max_when)
+{
+ u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff;
+
+ return (unsigned long)min_t(u64, when, max_when);
+}
+
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
struct request_sock *inet_csk_search_req(const struct sock *sk,
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 01d590ee5e7e..80479abddf73 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -61,7 +61,6 @@ struct inet_peer {
struct inet_peer_base {
struct inet_peer __rcu *root;
seqlock_t lock;
- u32 flush_seq;
int total;
};
diff --git a/include/net/ip.h b/include/net/ip.h
index db4a771b9ef3..0bb620702929 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -180,8 +180,10 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
}
-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
- __be32 saddr, const struct ip_reply_arg *arg,
+void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
+ const struct ip_options *sopt,
+ __be32 daddr, __be32 saddr,
+ const struct ip_reply_arg *arg,
unsigned int len);
#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
@@ -229,8 +231,6 @@ static inline int inet_is_local_reserved_port(struct net *net, int port)
}
#endif
-extern int sysctl_ip_nonlocal_bind;
-
/* From inetpeer.c */
extern int inet_peer_threshold;
extern int inet_peer_minttl;
@@ -364,6 +364,14 @@ static inline void inet_set_txhash(struct sock *sk)
sk->sk_txhash = flow_hash_from_keys(&keys);
}
+static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
+{
+ const struct iphdr *iph = skb_gro_network_header(skb);
+
+ return csum_tcpudp_nofold(iph->saddr, iph->daddr,
+ skb_gro_len(skb), proto, 0);
+}
+
/*
* Map a multicast IP onto multicast MAC for type ethernet.
*/
@@ -505,7 +513,14 @@ int ip_forward(struct sk_buff *skb);
void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
__be32 daddr, struct rtable *rt, int is_frag);
-int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
+
+int __ip_options_echo(struct ip_options *dopt, struct sk_buff *skb,
+ const struct ip_options *sopt);
+static inline int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
+{
+ return __ip_options_echo(dopt, skb, &IPCB(skb)->opt);
+}
+
void ip_options_fragment(struct sk_buff *skb);
int ip_options_compile(struct net *net, struct ip_options *opt,
struct sk_buff *skb);
@@ -542,6 +557,10 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
u32 info);
+bool icmp_global_allow(void);
+extern int sysctl_icmp_msgs_per_sec;
+extern int sysctl_icmp_msgs_burst;
+
#ifdef CONFIG_PROC_FS
int ip_misc_proc_init(void);
#endif
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index 55236cb71174..1a49b73f7f6e 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -48,6 +48,14 @@ static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto)
skb->len, proto, 0));
}
+static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
+{
+ const struct ipv6hdr *iph = skb_gro_network_header(skb);
+
+ return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
+ skb_gro_len(skb), proto, 0));
+}
+
static __inline__ __sum16 tcp_v6_check(int len,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 9bcb220bd4ad..cf485f9aa563 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -114,16 +114,13 @@ struct rt6_info {
u32 rt6i_flags;
struct rt6key rt6i_src;
struct rt6key rt6i_prefsrc;
- u32 rt6i_metric;
struct inet6_dev *rt6i_idev;
unsigned long _rt6i_peer;
- u32 rt6i_genid;
-
+ u32 rt6i_metric;
/* more non-fragment space at head required */
unsigned short rt6i_nfheader_len;
-
u8 rt6i_protocol;
};
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 9922093f575e..dc9d2a27c315 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -65,7 +65,8 @@ struct fnhe_hash_bucket {
struct fib_nh_exception __rcu *chain;
};
-#define FNHE_HASH_SIZE 2048
+#define FNHE_HASH_SHIFT 11
+#define FNHE_HASH_SIZE (1 << FNHE_HASH_SHIFT)
#define FNHE_RECLAIM_DEPTH 5
struct fib_nh {
@@ -87,7 +88,7 @@ struct fib_nh {
int nh_saddr_genid;
struct rtable __rcu * __percpu *nh_pcpu_rth_output;
struct rtable __rcu *nh_rth_input;
- struct fnhe_hash_bucket *nh_exceptions;
+ struct fnhe_hash_bucket __rcu *nh_exceptions;
};
/*
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 8dd8cab88b87..7f538ba6e267 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -10,6 +10,7 @@
#include <net/gro_cells.h>
#include <net/inet_ecn.h>
#include <net/ip.h>
+#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#if IS_ENABLED(CONFIG_IPV6)
@@ -31,6 +32,13 @@ struct ip_tunnel_6rd_parm {
};
#endif
+struct ip_tunnel_encap {
+ __u16 type;
+ __u16 flags;
+ __be16 sport;
+ __be16 dport;
+};
+
struct ip_tunnel_prl_entry {
struct ip_tunnel_prl_entry __rcu *next;
__be32 addr;
@@ -56,13 +64,18 @@ struct ip_tunnel {
/* These four fields used only by GRE */
__u32 i_seqno; /* The last seen seqno */
__u32 o_seqno; /* The last output seqno */
- int hlen; /* Precalculated header length */
+ int tun_hlen; /* Precalculated header length */
int mlink;
struct ip_tunnel_dst __percpu *dst_cache;
struct ip_tunnel_parm parms;
+ int encap_hlen; /* Encap header length (FOU,GUE) */
+ struct ip_tunnel_encap encap;
+
+ int hlen; /* tun_hlen + encap_hlen */
+
/* for SIT */
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd_parm ip6rd;
@@ -114,6 +127,8 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
+int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
+ u8 *protocol, struct flowi4 *fl4);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
@@ -131,6 +146,8 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p);
void ip_tunnel_setup(struct net_device *dev, int net_id);
void ip_tunnel_dst_reset_all(struct ip_tunnel *t);
+int ip_tunnel_encap_setup(struct ip_tunnel *t,
+ struct ip_tunnel_encap *ipencap);
/* Extract dsfield from inner protocol */
static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 624a8a54806d..615b20b58545 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1,6 +1,5 @@
-/*
- * IP Virtual Server
- * data structure and functionality definitions
+/* IP Virtual Server
+ * data structure and functionality definitions
*/
#ifndef _NET_IP_VS_H
@@ -12,7 +11,7 @@
#include <linux/list.h> /* for struct list_head */
#include <linux/spinlock.h> /* for struct rwlock_t */
-#include <linux/atomic.h> /* for struct atomic_t */
+#include <linux/atomic.h> /* for struct atomic_t */
#include <linux/compiler.h>
#include <linux/timer.h>
#include <linux/bug.h>
@@ -30,15 +29,13 @@
#endif
#include <net/net_namespace.h> /* Netw namespace */
-/*
- * Generic access of ipvs struct
- */
+/* Generic access of ipvs struct */
static inline struct netns_ipvs *net_ipvs(struct net* net)
{
return net->ipvs;
}
-/*
- * Get net ptr from skb in traffic cases
+
+/* Get net ptr from skb in traffic cases
* use skb_sknet when call is from userland (ioctl or netlink)
*/
static inline struct net *skb_net(const struct sk_buff *skb)
@@ -90,8 +87,8 @@ static inline struct net *skb_sknet(const struct sk_buff *skb)
return &init_net;
#endif
}
-/*
- * This one needed for single_open_net since net is stored directly in
+
+/* This one needed for single_open_net since net is stored directly in
* private not as a struct i.e. seq_file_net can't be used.
*/
static inline struct net *seq_file_single_net(struct seq_file *seq)
@@ -108,7 +105,7 @@ extern int ip_vs_conn_tab_size;
struct ip_vs_iphdr {
__u32 len; /* IPv4 simply where L4 starts
- IPv6 where L4 Transport Header starts */
+ * IPv6 where L4 Transport Header starts */
__u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/
__s16 protocol;
__s32 flags;
@@ -304,16 +301,11 @@ static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
#define LeaveFunction(level) do {} while (0)
#endif
-
-/*
- * The port number of FTP service (in network order).
- */
+/* The port number of FTP service (in network order). */
#define FTPPORT cpu_to_be16(21)
#define FTPDATA cpu_to_be16(20)
-/*
- * TCP State Values
- */
+/* TCP State Values */
enum {
IP_VS_TCP_S_NONE = 0,
IP_VS_TCP_S_ESTABLISHED,
@@ -329,25 +321,19 @@ enum {
IP_VS_TCP_S_LAST
};
-/*
- * UDP State Values
- */
+/* UDP State Values */
enum {
IP_VS_UDP_S_NORMAL,
IP_VS_UDP_S_LAST,
};
-/*
- * ICMP State Values
- */
+/* ICMP State Values */
enum {
IP_VS_ICMP_S_NORMAL,
IP_VS_ICMP_S_LAST,
};
-/*
- * SCTP State Values
- */
+/* SCTP State Values */
enum ip_vs_sctp_states {
IP_VS_SCTP_S_NONE,
IP_VS_SCTP_S_INIT1,
@@ -366,21 +352,18 @@ enum ip_vs_sctp_states {
IP_VS_SCTP_S_LAST
};
-/*
- * Delta sequence info structure
- * Each ip_vs_conn has 2 (output AND input seq. changes).
- * Only used in the VS/NAT.
+/* Delta sequence info structure
+ * Each ip_vs_conn has 2 (output AND input seq. changes).
+ * Only used in the VS/NAT.
*/
struct ip_vs_seq {
__u32 init_seq; /* Add delta from this seq */
__u32 delta; /* Delta in sequence numbers */
__u32 previous_delta; /* Delta in sequence numbers
- before last resized pkt */
+ * before last resized pkt */
};
-/*
- * counters per cpu
- */
+/* counters per cpu */
struct ip_vs_counters {
__u32 conns; /* connections scheduled */
__u32 inpkts; /* incoming packets */
@@ -388,17 +371,13 @@ struct ip_vs_counters {
__u64 inbytes; /* incoming bytes */
__u64 outbytes; /* outgoing bytes */
};
-/*
- * Stats per cpu
- */
+/* Stats per cpu */
struct ip_vs_cpu_stats {
struct ip_vs_counters ustats;
struct u64_stats_sync syncp;
};
-/*
- * IPVS statistics objects
- */
+/* IPVS statistics objects */
struct ip_vs_estimator {
struct list_head list;
@@ -491,9 +470,7 @@ struct ip_vs_protocol {
void (*timeout_change)(struct ip_vs_proto_data *pd, int flags);
};
-/*
- * protocol data per netns
- */
+/* protocol data per netns */
struct ip_vs_proto_data {
struct ip_vs_proto_data *next;
struct ip_vs_protocol *pp;
@@ -520,9 +497,7 @@ struct ip_vs_conn_param {
__u8 pe_data_len;
};
-/*
- * IP_VS structure allocated for each dynamically scheduled connection
- */
+/* IP_VS structure allocated for each dynamically scheduled connection */
struct ip_vs_conn {
struct hlist_node c_list; /* hashed list heads */
/* Protocol, addresses and port numbers */
@@ -535,6 +510,7 @@ struct ip_vs_conn {
union nf_inet_addr daddr; /* destination address */
volatile __u32 flags; /* status flags */
__u16 protocol; /* Which protocol (TCP/UDP) */
+ __u16 daf; /* Address family of the dest */
#ifdef CONFIG_NET_NS
struct net *net; /* Name space */
#endif
@@ -560,17 +536,18 @@ struct ip_vs_conn {
struct ip_vs_dest *dest; /* real server */
atomic_t in_pkts; /* incoming packet counter */
- /* packet transmitter for different forwarding methods. If it
- mangles the packet, it must return NF_DROP or better NF_STOLEN,
- otherwise this must be changed to a sk_buff **.
- NF_ACCEPT can be returned when destination is local.
+ /* Packet transmitter for different forwarding methods. If it
+ * mangles the packet, it must return NF_DROP or better NF_STOLEN,
+ * otherwise this must be changed to a sk_buff **.
+ * NF_ACCEPT can be returned when destination is local.
*/
int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
/* Note: we can group the following members into a structure,
- in order to save more space, and the following members are
- only used in VS/NAT anyway */
+ * in order to save more space, and the following members are
+ * only used in VS/NAT anyway
+ */
struct ip_vs_app *app; /* bound ip_vs_app object */
void *app_data; /* Application private data */
struct ip_vs_seq in_seq; /* incoming seq. struct */
@@ -583,9 +560,7 @@ struct ip_vs_conn {
struct rcu_head rcu_head;
};
-/*
- * To save some memory in conn table when name space is disabled.
- */
+/* To save some memory in conn table when name space is disabled. */
static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
{
#ifdef CONFIG_NET_NS
@@ -594,6 +569,7 @@ static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
return &init_net;
#endif
}
+
static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net)
{
#ifdef CONFIG_NET_NS
@@ -611,13 +587,12 @@ static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp,
#endif
}
-/*
- * Extended internal versions of struct ip_vs_service_user and
- * ip_vs_dest_user for IPv6 support.
+/* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user
+ * for IPv6 support.
*
- * We need these to conveniently pass around service and destination
- * options, but unfortunately, we also need to keep the old definitions to
- * maintain userspace backwards compatibility for the setsockopt interface.
+ * We need these to conveniently pass around service and destination
+ * options, but unfortunately, we also need to keep the old definitions to
+ * maintain userspace backwards compatibility for the setsockopt interface.
*/
struct ip_vs_service_user_kern {
/* virtual service addresses */
@@ -648,12 +623,15 @@ struct ip_vs_dest_user_kern {
/* thresholds for active connections */
u32 u_threshold; /* upper threshold */
u32 l_threshold; /* lower threshold */
+
+ /* Address family of addr */
+ u16 af;
};
/*
- * The information about the virtual service offered to the net
- * and the forwarding entries
+ * The information about the virtual service offered to the net and the
+ * forwarding entries.
*/
struct ip_vs_service {
struct hlist_node s_list; /* for normal service table */
@@ -693,9 +671,8 @@ struct ip_vs_dest_dst {
struct rcu_head rcu_head;
};
-/*
- * The real server destination forwarding entry
- * with ip address, port number, and so on.
+/* The real server destination forwarding entry with ip address, port number,
+ * and so on.
*/
struct ip_vs_dest {
struct list_head n_list; /* for the dests in the service */
@@ -734,10 +711,7 @@ struct ip_vs_dest {
unsigned int in_rs_table:1; /* we are in rs_table */
};
-
-/*
- * The scheduler object
- */
+/* The scheduler object */
struct ip_vs_scheduler {
struct list_head n_list; /* d-linked list head */
char *name; /* scheduler name */
@@ -777,9 +751,7 @@ struct ip_vs_pe {
int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf);
};
-/*
- * The application module object (a.k.a. app incarnation)
- */
+/* The application module object (a.k.a. app incarnation) */
struct ip_vs_app {
struct list_head a_list; /* member in app list */
int type; /* IP_VS_APP_TYPE_xxx */
@@ -795,16 +767,14 @@ struct ip_vs_app {
atomic_t usecnt; /* usage counter */
struct rcu_head rcu_head;
- /*
- * output hook: Process packet in inout direction, diff set for TCP.
+ /* output hook: Process packet in inout direction, diff set for TCP.
* Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok,
* 2=Mangled but checksum was not updated
*/
int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *,
struct sk_buff *, int *diff);
- /*
- * input hook: Process packet in outin direction, diff set for TCP.
+ /* input hook: Process packet in outin direction, diff set for TCP.
* Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok,
* 2=Mangled but checksum was not updated
*/
@@ -863,9 +833,7 @@ struct ipvs_master_sync_state {
struct netns_ipvs {
int gen; /* Generation */
int enable; /* enable like nf_hooks do */
- /*
- * Hash table: for real service lookups
- */
+ /* Hash table: for real service lookups */
#define IP_VS_RTAB_BITS 4
#define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
#define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
@@ -899,7 +867,7 @@ struct netns_ipvs {
struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
#endif
/* ip_vs_conn */
- atomic_t conn_count; /* connection counter */
+ atomic_t conn_count; /* connection counter */
/* ip_vs_ctl */
struct ip_vs_stats tot_stats; /* Statistics & est. */
@@ -986,6 +954,10 @@ struct netns_ipvs {
char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
/* net name space ptr */
struct net *net; /* Needed by timer routines */
+ /* Number of heterogeneous destinations, needed becaus heterogeneous
+ * are not supported when synchronization is enabled.
+ */
+ unsigned int mixed_address_family_dests;
};
#define DEFAULT_SYNC_THRESHOLD 3
@@ -1139,9 +1111,8 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
#endif
-/*
- * IPVS core functions
- * (from ip_vs_core.c)
+/* IPVS core functions
+ * (from ip_vs_core.c)
*/
const char *ip_vs_proto_name(unsigned int proto);
void ip_vs_init_hash_table(struct list_head *table, int rows);
@@ -1149,11 +1120,9 @@ void ip_vs_init_hash_table(struct list_head *table, int rows);
#define IP_VS_APP_TYPE_FTP 1
-/*
- * ip_vs_conn handling functions
- * (from ip_vs_conn.c)
+/* ip_vs_conn handling functions
+ * (from ip_vs_conn.c)
*/
-
enum {
IP_VS_DIR_INPUT = 0,
IP_VS_DIR_OUTPUT,
@@ -1210,7 +1179,7 @@ static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
void ip_vs_conn_put(struct ip_vs_conn *cp);
void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
-struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
+struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
const union nf_inet_addr *daddr,
__be16 dport, unsigned int flags,
struct ip_vs_dest *dest, __u32 fwmark);
@@ -1284,9 +1253,7 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
atomic_inc(&ctl_cp->n_control);
}
-/*
- * IPVS netns init & cleanup functions
- */
+/* IPVS netns init & cleanup functions */
int ip_vs_estimator_net_init(struct net *net);
int ip_vs_control_net_init(struct net *net);
int ip_vs_protocol_net_init(struct net *net);
@@ -1301,9 +1268,8 @@ void ip_vs_estimator_net_cleanup(struct net *net);
void ip_vs_sync_net_cleanup(struct net *net);
void ip_vs_service_net_cleanup(struct net *net);
-/*
- * IPVS application functions
- * (from ip_vs_app.c)
+/* IPVS application functions
+ * (from ip_vs_app.c)
*/
#define IP_VS_APP_MAX_PORTS 8
struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app);
@@ -1323,9 +1289,7 @@ int unregister_ip_vs_pe(struct ip_vs_pe *pe);
struct ip_vs_pe *ip_vs_pe_getbyname(const char *name);
struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
-/*
- * Use a #define to avoid all of module.h just for these trivial ops
- */
+/* Use a #define to avoid all of module.h just for these trivial ops */
#define ip_vs_pe_get(pe) \
if (pe && pe->module) \
__module_get(pe->module);
@@ -1334,9 +1298,7 @@ struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
if (pe && pe->module) \
module_put(pe->module);
-/*
- * IPVS protocol functions (from ip_vs_proto.c)
- */
+/* IPVS protocol functions (from ip_vs_proto.c) */
int ip_vs_protocol_init(void);
void ip_vs_protocol_cleanup(void);
void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
@@ -1354,9 +1316,8 @@ extern struct ip_vs_protocol ip_vs_protocol_esp;
extern struct ip_vs_protocol ip_vs_protocol_ah;
extern struct ip_vs_protocol ip_vs_protocol_sctp;
-/*
- * Registering/unregistering scheduler functions
- * (from ip_vs_sched.c)
+/* Registering/unregistering scheduler functions
+ * (from ip_vs_sched.c)
*/
int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
@@ -1375,10 +1336,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
-
-/*
- * IPVS control data and functions (from ip_vs_ctl.c)
- */
+/* IPVS control data and functions (from ip_vs_ctl.c) */
extern struct ip_vs_stats ip_vs_stats;
extern int sysctl_ip_vs_sync_ver;
@@ -1396,8 +1354,9 @@ void ip_vs_unregister_nl_ioctl(void);
int ip_vs_control_init(void);
void ip_vs_control_cleanup(void);
struct ip_vs_dest *
-ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
- __be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
+ip_vs_find_dest(struct net *net, int svc_af, int dest_af,
+ const union nf_inet_addr *daddr, __be16 dport,
+ const union nf_inet_addr *vaddr, __be16 vport,
__u16 protocol, __u32 fwmark, __u32 flags);
void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
@@ -1418,26 +1377,21 @@ static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
kfree(dest);
}
-/*
- * IPVS sync daemon data and function prototypes
- * (from ip_vs_sync.c)
+/* IPVS sync daemon data and function prototypes
+ * (from ip_vs_sync.c)
*/
int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid);
int stop_sync_thread(struct net *net, int state);
void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
-/*
- * IPVS rate estimator prototypes (from ip_vs_est.c)
- */
+/* IPVS rate estimator prototypes (from ip_vs_est.c) */
void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
void ip_vs_zero_estimator(struct ip_vs_stats *stats);
void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
struct ip_vs_stats *stats);
-/*
- * Various IPVS packet transmitters (from ip_vs_xmit.c)
- */
+/* Various IPVS packet transmitters (from ip_vs_xmit.c) */
int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
@@ -1468,12 +1422,10 @@ int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
#endif
#ifdef CONFIG_SYSCTL
-/*
- * This is a simple mechanism to ignore packets when
- * we are loaded. Just set ip_vs_drop_rate to 'n' and
- * we start to drop 1/rate of the packets
+/* This is a simple mechanism to ignore packets when
+ * we are loaded. Just set ip_vs_drop_rate to 'n' and
+ * we start to drop 1/rate of the packets
*/
-
static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
{
if (!ipvs->drop_rate)
@@ -1487,9 +1439,7 @@ static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; }
#endif
-/*
- * ip_vs_fwd_tag returns the forwarding tag of the connection
- */
+/* ip_vs_fwd_tag returns the forwarding tag of the connection */
#define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK)
static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp)
@@ -1548,9 +1498,7 @@ static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum)
return csum_partial(diff, sizeof(diff), oldsum);
}
-/*
- * Forget current conntrack (unconfirmed) and attach notrack entry
- */
+/* Forget current conntrack (unconfirmed) and attach notrack entry */
static inline void ip_vs_notrack(struct sk_buff *skb)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -1567,9 +1515,8 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
}
#ifdef CONFIG_IP_VS_NFCT
-/*
- * Netfilter connection tracking
- * (from ip_vs_nfct.c)
+/* Netfilter connection tracking
+ * (from ip_vs_nfct.c)
*/
static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
{
@@ -1608,14 +1555,12 @@ static inline int ip_vs_confirm_conntrack(struct sk_buff *skb)
static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
{
}
-/* CONFIG_IP_VS_NFCT */
-#endif
+#endif /* CONFIG_IP_VS_NFCT */
static inline int
ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
{
- /*
- * We think the overhead of processing active connections is 256
+ /* We think the overhead of processing active connections is 256
* times higher than that of inactive connections in average. (This
* 256 times might not be accurate, we will change it later) We
* use the following formula to estimate the overhead now:
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index a2db816e8461..97f472012438 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -121,6 +121,7 @@ struct frag_hdr {
/* sysctls */
extern int sysctl_mld_max_msf;
+extern int sysctl_mld_qrv;
#define _DEVINC(net, statname, modifier, idev, field) \
({ \
@@ -287,7 +288,8 @@ struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
struct ipv6_txoptions *opt);
-bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb);
+bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
+ const struct inet6_skb_parm *opt);
static inline bool ipv6_accept_ra(struct inet6_dev *idev)
{
diff --git a/include/net/mld.h b/include/net/mld.h
index faa1d161bf24..01d751303498 100644
--- a/include/net/mld.h
+++ b/include/net/mld.h
@@ -88,12 +88,15 @@ struct mld2_query {
#define MLDV2_QQIC_EXP(value) (((value) >> 4) & 0x07)
#define MLDV2_QQIC_MAN(value) ((value) & 0x0f)
+#define MLD_EXP_MIN_LIMIT 32768UL
+#define MLDV1_MRD_MAX_COMPAT (MLD_EXP_MIN_LIMIT - 1)
+
static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2)
{
/* RFC3810, 5.1.3. Maximum Response Code */
unsigned long ret, mc_mrc = ntohs(mlh2->mld2q_mrc);
- if (mc_mrc < 32768) {
+ if (mc_mrc < MLD_EXP_MIN_LIMIT) {
ret = mc_mrc;
} else {
unsigned long mc_man, mc_exp;
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 47f425464f84..f60558d0254c 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -373,7 +373,7 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
return 0;
}
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
{
unsigned int seq, hh_alen;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 361d26077196..e0d64667a4b3 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -352,26 +352,12 @@ static inline void rt_genid_bump_ipv4(struct net *net)
atomic_inc(&net->ipv4.rt_genid);
}
-#if IS_ENABLED(CONFIG_IPV6)
-static inline int rt_genid_ipv6(struct net *net)
-{
- return atomic_read(&net->ipv6.rt_genid);
-}
-
-static inline void rt_genid_bump_ipv6(struct net *net)
-{
- atomic_inc(&net->ipv6.rt_genid);
-}
-#else
-static inline int rt_genid_ipv6(struct net *net)
-{
- return 0;
-}
-
+extern void (*__fib6_flush_trees)(struct net *net);
static inline void rt_genid_bump_ipv6(struct net *net)
{
+ if (__fib6_flush_trees)
+ __fib6_flush_trees(net);
}
-#endif
#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
static inline struct netns_ieee802154_lowpan *
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
new file mode 100644
index 000000000000..2aa6048a55c1
--- /dev/null
+++ b/include/net/netfilter/br_netfilter.h
@@ -0,0 +1,6 @@
+#ifndef _BR_NETFILTER_H_
+#define _BR_NETFILTER_H_
+
+void br_netfilter_enable(void);
+
+#endif /* _BR_NETFILTER_H_ */
diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h
new file mode 100644
index 000000000000..a9c001c646da
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h
@@ -0,0 +1,14 @@
+#ifndef _NF_NAT_MASQUERADE_IPV4_H_
+#define _NF_NAT_MASQUERADE_IPV4_H_
+
+#include <net/netfilter/nf_nat.h>
+
+unsigned int
+nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
+ const struct nf_nat_range *range,
+ const struct net_device *out);
+
+void nf_nat_masquerade_ipv4_register_notifier(void);
+void nf_nat_masquerade_ipv4_unregister_notifier(void);
+
+#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index 931fbf812171..e8427193c777 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -1,128 +1,13 @@
#ifndef _IPV4_NF_REJECT_H
#define _IPV4_NF_REJECT_H
-#include <net/ip.h>
-#include <net/tcp.h>
-#include <net/route.h>
-#include <net/dst.h>
+#include <net/icmp.h>
static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
{
icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
}
-/* Send RST reply */
-static void nf_send_reset(struct sk_buff *oldskb, int hook)
-{
- struct sk_buff *nskb;
- const struct iphdr *oiph;
- struct iphdr *niph;
- const struct tcphdr *oth;
- struct tcphdr _otcph, *tcph;
-
- /* IP header checks: fragment. */
- if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
- return;
-
- oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
- sizeof(_otcph), &_otcph);
- if (oth == NULL)
- return;
-
- /* No RST for RST. */
- if (oth->rst)
- return;
-
- if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
- return;
-
- /* Check checksum */
- if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
- return;
- oiph = ip_hdr(oldskb);
-
- nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
- LL_MAX_HEADER, GFP_ATOMIC);
- if (!nskb)
- return;
-
- skb_reserve(nskb, LL_MAX_HEADER);
-
- skb_reset_network_header(nskb);
- niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
- niph->version = 4;
- niph->ihl = sizeof(struct iphdr) / 4;
- niph->tos = 0;
- niph->id = 0;
- niph->frag_off = htons(IP_DF);
- niph->protocol = IPPROTO_TCP;
- niph->check = 0;
- niph->saddr = oiph->daddr;
- niph->daddr = oiph->saddr;
-
- skb_reset_transport_header(nskb);
- tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
- memset(tcph, 0, sizeof(*tcph));
- tcph->source = oth->dest;
- tcph->dest = oth->source;
- tcph->doff = sizeof(struct tcphdr) / 4;
-
- if (oth->ack)
- tcph->seq = oth->ack_seq;
- else {
- tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
- oldskb->len - ip_hdrlen(oldskb) -
- (oth->doff << 2));
- tcph->ack = 1;
- }
-
- tcph->rst = 1;
- tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr,
- niph->daddr, 0);
- nskb->ip_summed = CHECKSUM_PARTIAL;
- nskb->csum_start = (unsigned char *)tcph - nskb->head;
- nskb->csum_offset = offsetof(struct tcphdr, check);
-
- /* ip_route_me_harder expects skb->dst to be set */
- skb_dst_set_noref(nskb, skb_dst(oldskb));
-
- nskb->protocol = htons(ETH_P_IP);
- if (ip_route_me_harder(nskb, RTN_UNSPEC))
- goto free_nskb;
-
- niph->ttl = ip4_dst_hoplimit(skb_dst(nskb));
-
- /* "Never happens" */
- if (nskb->len > dst_mtu(skb_dst(nskb)))
- goto free_nskb;
-
- nf_ct_attach(nskb, oldskb);
-
-#ifdef CONFIG_BRIDGE_NETFILTER
- /* If we use ip_local_out for bridged traffic, the MAC source on
- * the RST will be ours, instead of the destination's. This confuses
- * some routers/firewalls, and they drop the packet. So we need to
- * build the eth header using the original destination's MAC as the
- * source, and send the RST packet directly.
- */
- if (oldskb->nf_bridge) {
- struct ethhdr *oeth = eth_hdr(oldskb);
- nskb->dev = oldskb->nf_bridge->physindev;
- niph->tot_len = htons(nskb->len);
- ip_send_check(niph);
- if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
- oeth->h_source, oeth->h_dest, nskb->len) < 0)
- goto free_nskb;
- dev_queue_xmit(nskb);
- } else
-#endif
- ip_local_out(nskb);
-
- return;
-
- free_nskb:
- kfree_skb(nskb);
-}
-
+void nf_send_reset(struct sk_buff *oldskb, int hook);
#endif /* _IPV4_NF_REJECT_H */
diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h
new file mode 100644
index 000000000000..0a13396cd390
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h
@@ -0,0 +1,10 @@
+#ifndef _NF_NAT_MASQUERADE_IPV6_H_
+#define _NF_NAT_MASQUERADE_IPV6_H_
+
+unsigned int
+nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
+ const struct net_device *out);
+void nf_nat_masquerade_ipv6_register_notifier(void);
+void nf_nat_masquerade_ipv6_unregister_notifier(void);
+
+#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
index 710d17ed70b4..7a10cfcd8e33 100644
--- a/include/net/netfilter/ipv6/nf_reject.h
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -147,7 +147,7 @@ static void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
nf_ct_attach(nskb, oldskb);
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
/* If we use ip6_local_out for bridged traffic, the MAC source on
* the RST will be ours, instead of the destination's. This confuses
* some routers/firewalls, and they drop the packet. So we need to
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index a71dd333ac68..344b1ab19220 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -32,10 +32,8 @@ struct nf_conn_nat {
struct hlist_node bysource;
struct nf_conn *ct;
union nf_conntrack_nat_help help;
-#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
- defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE) || \
- defined(CONFIG_IP6_NF_TARGET_MASQUERADE) || \
- defined(CONFIG_IP6_NF_TARGET_MASQUERADE_MODULE)
+#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV4) || \
+ IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV6)
int masq_index;
#endif
};
@@ -68,8 +66,8 @@ static inline bool nf_nat_oif_changed(unsigned int hooknum,
struct nf_conn_nat *nat,
const struct net_device *out)
{
-#if IS_ENABLED(CONFIG_IP_NF_TARGET_MASQUERADE) || \
- IS_ENABLED(CONFIG_IP6_NF_TARGET_MASQUERADE)
+#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV4) || \
+ IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV6)
return nat->masq_index && hooknum == NF_INET_POST_ROUTING &&
CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL &&
nat->masq_index != out->ifindex;
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
index 5a2919b2e09a..340c013795a4 100644
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -42,8 +42,83 @@ const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto);
int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int hooknum);
+
+unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int hooknum, unsigned int hdrlen);
+unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
#endif /* _NF_NAT_L3PROTO_H */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index c4d86198d3d6..3d7292392fac 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -241,6 +241,7 @@ void nft_unregister_set(struct nft_set_ops *ops);
* @dtype: data type (verdict or numeric type defined by userspace)
* @size: maximum set size
* @nelems: number of elements
+ * @policy: set parameterization (see enum nft_set_policies)
* @ops: set ops
* @flags: set flags
* @klen: key length
@@ -255,6 +256,7 @@ struct nft_set {
u32 dtype;
u32 size;
u32 nelems;
+ u16 policy;
/* runtime data below here */
const struct nft_set_ops *ops ____cacheline_aligned;
u16 flags;
diff --git a/include/net/netfilter/nft_masq.h b/include/net/netfilter/nft_masq.h
new file mode 100644
index 000000000000..c72729f954f4
--- /dev/null
+++ b/include/net/netfilter/nft_masq.h
@@ -0,0 +1,16 @@
+#ifndef _NFT_MASQ_H_
+#define _NFT_MASQ_H_
+
+struct nft_masq {
+ u32 flags;
+};
+
+extern const struct nla_policy nft_masq_policy[];
+
+int nft_masq_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+
+int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr);
+
+#endif /* _NFT_MASQ_H_ */
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
index 36b0da2d55bb..60fa1530006b 100644
--- a/include/net/netfilter/nft_reject.h
+++ b/include/net/netfilter/nft_reject.h
@@ -14,12 +14,7 @@ int nft_reject_init(const struct nft_ctx *ctx,
int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr);
-void nft_reject_ipv4_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
- const struct nft_pktinfo *pkt);
-
-void nft_reject_ipv6_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
- const struct nft_pktinfo *pkt);
+int nft_reject_icmp_code(u8 code);
+int nft_reject_icmpv6_code(u8 code);
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index aec5e12f9f19..24945cefc4fd 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -76,6 +76,7 @@ struct netns_ipv4 {
int sysctl_tcp_ecn;
int sysctl_ip_no_pmtu_disc;
int sysctl_ip_fwd_use_pmtu;
+ int sysctl_ip_nonlocal_bind;
int sysctl_fwmark_reflect;
int sysctl_tcp_fwmark_accept;
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 3492434baf88..9da798256f0e 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -13,6 +13,19 @@ struct ctl_table_header;
struct xfrm_policy_hash {
struct hlist_head *table;
unsigned int hmask;
+ u8 dbits4;
+ u8 sbits4;
+ u8 dbits6;
+ u8 sbits6;
+};
+
+struct xfrm_policy_hthresh {
+ struct work_struct work;
+ seqlock_t lock;
+ u8 lbits4;
+ u8 rbits4;
+ u8 lbits6;
+ u8 rbits6;
};
struct netns_xfrm {
@@ -41,6 +54,7 @@ struct netns_xfrm {
struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2];
unsigned int policy_count[XFRM_POLICY_MAX * 2];
struct work_struct policy_hash_work;
+ struct xfrm_policy_hthresh policy_hthresh;
struct sock *nlsk;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 6da46dcf1049..ef44ad9a6426 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -20,11 +20,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
static inline unsigned long
__cls_set_class(unsigned long *clp, unsigned long cl)
{
- unsigned long old_cl;
-
- old_cl = *clp;
- *clp = cl;
- return old_cl;
+ return xchg(clp, cl);
}
static inline unsigned long
@@ -137,7 +133,7 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
struct nlattr **tb, struct nlattr *rate_tlv,
struct tcf_exts *exts, bool ovr);
-void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts);
+void tcf_exts_destroy(struct tcf_exts *exts);
void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
struct tcf_exts *src);
int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index ec030cd76616..e4b3c828c1c2 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -50,7 +50,7 @@ typedef long psched_tdiff_t;
static inline psched_time_t psched_get_time(void)
{
- return PSCHED_NS2TICKS(ktime_to_ns(ktime_get()));
+ return PSCHED_NS2TICKS(ktime_get_ns());
}
static inline psched_tdiff_t
@@ -99,7 +99,7 @@ void qdisc_put_stab(struct qdisc_size_table *tab);
void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev, struct netdev_queue *txq,
- spinlock_t *root_lock);
+ spinlock_t *root_lock, bool validate);
void __qdisc_run(struct Qdisc *q);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index a3cfb8ebeb53..d17ed6fb2f70 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -6,6 +6,8 @@
#include <linux/rcupdate.h>
#include <linux/pkt_sched.h>
#include <linux/pkt_cls.h>
+#include <linux/percpu.h>
+#include <linux/dynamic_queue_limits.h>
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
@@ -58,6 +60,7 @@ struct Qdisc {
* multiqueue device.
*/
#define TCQ_F_WARN_NONWC (1 << 16)
+#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
@@ -83,9 +86,15 @@ struct Qdisc {
*/
unsigned long state;
struct sk_buff_head q;
- struct gnet_stats_basic_packed bstats;
+ union {
+ struct gnet_stats_basic_packed bstats;
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ } __packed;
unsigned int __state;
- struct gnet_stats_queue qstats;
+ union {
+ struct gnet_stats_queue qstats;
+ struct gnet_stats_queue __percpu *cpu_qstats;
+ } __packed;
struct rcu_head rcu_head;
int padded;
atomic_t refcnt;
@@ -111,6 +120,21 @@ static inline void qdisc_run_end(struct Qdisc *qdisc)
qdisc->__state &= ~__QDISC___STATE_RUNNING;
}
+static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
+{
+ return qdisc->flags & TCQ_F_ONETXQUEUE;
+}
+
+static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
+{
+#ifdef CONFIG_BQL
+ /* Non-BQL migrated drivers will return 0, too. */
+ return dql_avail(&txq->dql);
+#else
+ return 0;
+#endif
+}
+
static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
{
return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
@@ -143,7 +167,7 @@ struct Qdisc_class_ops {
void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
/* Filter manipulation */
- struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
+ struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
u32 classid);
void (*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -212,8 +236,8 @@ struct tcf_proto_ops {
struct tcf_proto {
/* Fast access part */
- struct tcf_proto *next;
- void *root;
+ struct tcf_proto __rcu *next;
+ void __rcu *root;
int (*classify)(struct sk_buff *,
const struct tcf_proto *,
struct tcf_result *);
@@ -225,13 +249,15 @@ struct tcf_proto {
struct Qdisc *q;
void *data;
const struct tcf_proto_ops *ops;
+ struct rcu_head rcu;
};
struct qdisc_skb_cb {
unsigned int pkt_len;
u16 slave_dev_queue_mapping;
u16 _pad;
- unsigned char data[24];
+#define QDISC_CB_PRIV_LEN 20
+ unsigned char data[QDISC_CB_PRIV_LEN];
};
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
@@ -259,7 +285,9 @@ static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
{
- return qdisc->dev_queue->qdisc;
+ struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
+
+ return q;
}
static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
@@ -376,7 +404,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
const struct qdisc_size_table *stab);
void tcf_destroy(struct tcf_proto *tp);
-void tcf_destroy_chain(struct tcf_proto **fl);
+void tcf_destroy_chain(struct tcf_proto __rcu **fl);
/* Reset all TX qdiscs greater then index of a device. */
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
@@ -384,7 +412,7 @@ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
struct Qdisc *qdisc;
for (; i < dev->num_tx_queues; i++) {
- qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
if (qdisc) {
spin_lock_bh(qdisc_lock(qdisc));
qdisc_reset(qdisc);
@@ -402,13 +430,18 @@ static inline void qdisc_reset_all_tx(struct net_device *dev)
static inline bool qdisc_all_tx_empty(const struct net_device *dev)
{
unsigned int i;
+
+ rcu_read_lock();
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- const struct Qdisc *q = txq->qdisc;
+ const struct Qdisc *q = rcu_dereference(txq->qdisc);
- if (q->q.qlen)
+ if (q->q.qlen) {
+ rcu_read_unlock();
return false;
+ }
}
+ rcu_read_unlock();
return true;
}
@@ -416,9 +449,10 @@ static inline bool qdisc_all_tx_empty(const struct net_device *dev)
static inline bool qdisc_tx_changing(const struct net_device *dev)
{
unsigned int i;
+
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- if (txq->qdisc != txq->qdisc_sleeping)
+ if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
return true;
}
return false;
@@ -428,9 +462,10 @@ static inline bool qdisc_tx_changing(const struct net_device *dev)
static inline bool qdisc_tx_is_noop(const struct net_device *dev)
{
unsigned int i;
+
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- if (txq->qdisc != &noop_qdisc)
+ if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
return false;
}
return true;
@@ -476,6 +511,10 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
}
+static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
+{
+ return q->flags & TCQ_F_CPUSTATS;
+}
static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
const struct sk_buff *skb)
@@ -484,17 +523,62 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
}
+static inline void qdisc_bstats_update_cpu(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ struct gnet_stats_basic_cpu *bstats =
+ this_cpu_ptr(sch->cpu_bstats);
+
+ u64_stats_update_begin(&bstats->syncp);
+ bstats_update(&bstats->bstats, skb);
+ u64_stats_update_end(&bstats->syncp);
+}
+
static inline void qdisc_bstats_update(struct Qdisc *sch,
const struct sk_buff *skb)
{
bstats_update(&sch->bstats, skb);
}
+static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+}
+
+static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+}
+
+static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
+{
+ sch->qstats.drops += count;
+}
+
+static inline void qdisc_qstats_drop(struct Qdisc *sch)
+{
+ sch->qstats.drops++;
+}
+
+static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch)
+{
+ struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats);
+
+ qstats->drops++;
+}
+
+static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
+{
+ sch->qstats.overlimits++;
+}
+
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff_head *list)
{
__skb_queue_tail(list, skb);
- sch->qstats.backlog += qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_inc(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -510,7 +594,7 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
struct sk_buff *skb = __skb_dequeue(list);
if (likely(skb != NULL)) {
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
qdisc_bstats_update(sch, skb);
}
@@ -529,7 +613,7 @@ static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb);
- sch->qstats.backlog -= len;
+ qdisc_qstats_backlog_dec(sch, skb);
kfree_skb(skb);
return len;
}
@@ -548,7 +632,7 @@ static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
struct sk_buff *skb = __skb_dequeue_tail(list);
if (likely(skb != NULL))
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
return skb;
}
@@ -630,14 +714,14 @@ static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
{
kfree_skb(skb);
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
return NET_XMIT_DROP;
}
static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
{
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
#ifdef CONFIG_NET_CLS_ACT
if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index f6e7397e799d..9fbd856e6713 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -320,6 +320,19 @@ static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc)
return asoc ? asoc->assoc_id : 0;
}
+static inline enum sctp_sstat_state
+sctp_assoc_to_state(const struct sctp_association *asoc)
+{
+ /* SCTP's uapi always had SCTP_EMPTY(=0) as a dummy state, but we
+ * got rid of it in kernel space. Therefore SCTP_CLOSED et al
+ * start at =1 in user space, but actually as =0 in kernel space.
+ * Now that we can not break user space and SCTP_EMPTY is exposed
+ * there, we need to fix it up with an ugly offset not to break
+ * applications. :(
+ */
+ return asoc->state + 1;
+}
+
/* Look up the association by its id. */
struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
diff --git a/include/net/snmp.h b/include/net/snmp.h
index f1f27fdbb0d5..8fd2f498782e 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -146,19 +146,15 @@ struct linux_xfrm_mib {
#define SNMP_ADD_STATS(mib, field, addend) \
this_cpu_add(mib->mibs[field], addend)
-/*
- * Use "__typeof__(*mib) *ptr" instead of "__typeof__(mib) ptr"
- * to make @ptr a non-percpu pointer.
- */
#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
do { \
- __typeof__(*mib->mibs) *ptr = mib->mibs; \
+ __typeof__((mib->mibs) + 0) ptr = mib->mibs; \
this_cpu_inc(ptr[basefield##PKTS]); \
this_cpu_add(ptr[basefield##OCTETS], addend); \
} while (0)
#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
do { \
- __typeof__(*mib->mibs) *ptr = mib->mibs; \
+ __typeof__((mib->mibs) + 0) ptr = mib->mibs; \
__this_cpu_inc(ptr[basefield##PKTS]); \
__this_cpu_add(ptr[basefield##OCTETS], addend); \
} while (0)
diff --git a/include/net/sock.h b/include/net/sock.h
index 7f2ab72f321a..515a4d01e932 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1574,7 +1574,12 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
void sock_wfree(struct sk_buff *skb);
void skb_orphan_partial(struct sk_buff *skb);
void sock_rfree(struct sk_buff *skb);
+void sock_efree(struct sk_buff *skb);
+#ifdef CONFIG_INET
void sock_edemux(struct sk_buff *skb);
+#else
+#define sock_edemux(skb) sock_efree(skb)
+#endif
int sock_setsockopt(struct socket *sock, int level, int op,
char __user *optval, unsigned int optlen);
@@ -2041,6 +2046,7 @@ void sk_stop_timer(struct sock *sk, struct timer_list *timer);
int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
+struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
/*
* Recover an error report and clear atomically
@@ -2165,9 +2171,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
*/
if (sock_flag(sk, SOCK_RCVTSTAMP) ||
(sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
- (kt.tv64 &&
- (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE ||
- skb_shinfo(skb)->tx_flags & SKBTX_ANY_SW_TSTAMP)) ||
+ (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
(hwtstamps->hwtstamp.tv64 &&
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
__sock_recv_timestamp(msg, sk, skb);
@@ -2195,6 +2199,8 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
sk->sk_stamp = skb->tstamp;
}
+void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
+
/**
* sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
* @sk: socket sending this packet
@@ -2202,7 +2208,13 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
*
* Note : callers should take care of initial *tx_flags value (usually 0)
*/
-void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
+static inline void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags)
+{
+ if (unlikely(sk->sk_tsflags))
+ __sock_tx_timestamp(sk, tx_flags);
+ if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
+ *tx_flags |= SKBTX_WIFI_STATUS;
+}
/**
* sk_eat_skb - Release a skb if it is no longer needed
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 590e01a476ac..ba2f9d03076b 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -672,6 +672,12 @@ void tcp_send_window_probe(struct sock *sk);
*/
#define tcp_time_stamp ((__u32)(jiffies))
+static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
+{
+ return skb->skb_mstamp.stamp_jiffies;
+}
+
+
#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
#define TCPHDR_FIN 0x01
@@ -690,15 +696,18 @@ void tcp_send_window_probe(struct sock *sk);
* If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
*/
struct tcp_skb_cb {
- union {
- struct inet_skb_parm h4;
-#if IS_ENABLED(CONFIG_IPV6)
- struct inet6_skb_parm h6;
-#endif
- } header; /* For incoming frames */
__u32 seq; /* Starting sequence number */
__u32 end_seq; /* SEQ + FIN + SYN + datalen */
- __u32 when; /* used to compute rtt's */
+ union {
+ /* Note : tcp_tw_isn is used in input path only
+ * (isn chosen by tcp_timewait_state_process())
+ *
+ * tcp_gso_segs is used in write queue only,
+ * cf tcp_skb_pcount()
+ */
+ __u32 tcp_tw_isn;
+ __u32 tcp_gso_segs;
+ };
__u8 tcp_flags; /* TCP header flags. (tcp[13]) */
__u8 sacked; /* State flags for SACK/FACK. */
@@ -714,33 +723,32 @@ struct tcp_skb_cb {
__u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
/* 1 byte hole */
__u32 ack_seq; /* Sequence number ACK'd */
+ union {
+ struct inet_skb_parm h4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_skb_parm h6;
+#endif
+ } header; /* For incoming frames */
};
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
-/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
- *
- * If we receive a SYN packet with these bits set, it means a network is
- * playing bad games with TOS bits. In order to avoid possible false congestion
- * notifications, we disable TCP ECN negociation.
+/* Due to TSO, an SKB can be composed of multiple actual
+ * packets. To keep these tracked properly, we use this.
*/
-static inline void
-TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb,
- struct net *net)
+static inline int tcp_skb_pcount(const struct sk_buff *skb)
{
- const struct tcphdr *th = tcp_hdr(skb);
+ return TCP_SKB_CB(skb)->tcp_gso_segs;
+}
- if (net->ipv4.sysctl_tcp_ecn && th->ece && th->cwr &&
- INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield))
- inet_rsk(req)->ecn_ok = 1;
+static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
+{
+ TCP_SKB_CB(skb)->tcp_gso_segs = segs;
}
-/* Due to TSO, an SKB can be composed of multiple actual
- * packets. To keep these tracked properly, we use this.
- */
-static inline int tcp_skb_pcount(const struct sk_buff *skb)
+static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
{
- return skb_shinfo(skb)->gso_segs;
+ TCP_SKB_CB(skb)->tcp_gso_segs += segs;
}
/* This is valid iff tcp_skb_pcount() > 1. */
@@ -755,8 +763,17 @@ enum tcp_ca_event {
CA_EVENT_CWND_RESTART, /* congestion window restart */
CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
CA_EVENT_LOSS, /* loss timeout */
- CA_EVENT_FAST_ACK, /* in sequence ack */
- CA_EVENT_SLOW_ACK, /* other ack */
+ CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
+ CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
+ CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */
+ CA_EVENT_NON_DELAYED_ACK,
+};
+
+/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
+enum tcp_ca_ack_event_flags {
+ CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
+ CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
+ CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
};
/*
@@ -766,7 +783,10 @@ enum tcp_ca_event {
#define TCP_CA_MAX 128
#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
+/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
#define TCP_CONG_NON_RESTRICTED 0x1
+/* Requires ECN/ECT set on all packets */
+#define TCP_CONG_NEEDS_ECN 0x2
struct tcp_congestion_ops {
struct list_head list;
@@ -785,6 +805,8 @@ struct tcp_congestion_ops {
void (*set_state)(struct sock *sk, u8 new_state);
/* call when cwnd event occurs (optional) */
void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
+ /* call when ack arrives (optional) */
+ void (*in_ack_event)(struct sock *sk, u32 flags);
/* new value of cwnd after loss (optional) */
u32 (*undo_cwnd)(struct sock *sk);
/* hook for packet ack accounting (optional) */
@@ -799,6 +821,7 @@ struct tcp_congestion_ops {
int tcp_register_congestion_control(struct tcp_congestion_ops *type);
void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
+void tcp_assign_congestion_control(struct sock *sk);
void tcp_init_congestion_control(struct sock *sk);
void tcp_cleanup_congestion_control(struct sock *sk);
int tcp_set_default_congestion_control(const char *name);
@@ -807,14 +830,20 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
int tcp_set_congestion_control(struct sock *sk, const char *name);
-int tcp_slow_start(struct tcp_sock *tp, u32 acked);
+void tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
-extern struct tcp_congestion_ops tcp_init_congestion_ops;
u32 tcp_reno_ssthresh(struct sock *sk);
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
extern struct tcp_congestion_ops tcp_reno;
+static inline bool tcp_ca_needs_ecn(const struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+
+ return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
+}
+
static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
{
struct inet_connection_sock *icsk = inet_csk(sk);
diff --git a/include/net/udp.h b/include/net/udp.h
index 70f941368ace..07f9b70962f6 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -158,6 +158,24 @@ static inline __sum16 udp_v4_check(int len, __be32 saddr,
void udp_set_csum(bool nocheck, struct sk_buff *skb,
__be32 saddr, __be32 daddr, int len);
+struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
+ struct udphdr *uh);
+int udp_gro_complete(struct sk_buff *skb, int nhoff);
+
+static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
+{
+ struct udphdr *uh;
+ unsigned int hlen, off;
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*uh);
+ uh = skb_gro_header_fast(skb, off);
+ if (skb_gro_header_hard(skb, hlen))
+ uh = skb_gro_header_slow(skb, hlen, off);
+
+ return uh;
+}
+
/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
static inline void udp_lib_hash(struct sock *sk)
{
@@ -221,7 +239,8 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
int udp_disconnect(struct sock *sk, int flags);
unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
- netdev_features_t features);
+ netdev_features_t features,
+ bool is_ipv6);
int udp_lib_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
int udp_lib_setsockopt(struct sock *sk, int level, int optname,
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index ffd69cbded35..a47790bcaa38 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -1,6 +1,14 @@
#ifndef __NET_UDP_TUNNEL_H
#define __NET_UDP_TUNNEL_H
+#include <net/ip_tunnels.h>
+#include <net/udp.h>
+
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#endif
+
struct udp_port_cfg {
u8 family;
@@ -26,7 +34,80 @@ struct udp_port_cfg {
use_udp6_rx_checksums:1;
};
-int udp_sock_create(struct net *net, struct udp_port_cfg *cfg,
- struct socket **sockp);
+int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
+ struct socket **sockp);
+
+#if IS_ENABLED(CONFIG_IPV6)
+int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
+ struct socket **sockp);
+#else
+static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
+ struct socket **sockp)
+{
+ return 0;
+}
+#endif
+
+static inline int udp_sock_create(struct net *net,
+ struct udp_port_cfg *cfg,
+ struct socket **sockp)
+{
+ if (cfg->family == AF_INET)
+ return udp_sock_create4(net, cfg, sockp);
+
+ if (cfg->family == AF_INET6)
+ return udp_sock_create6(net, cfg, sockp);
+
+ return -EPFNOSUPPORT;
+}
+
+typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
+typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
+
+struct udp_tunnel_sock_cfg {
+ void *sk_user_data; /* user data used by encap_rcv call back */
+ /* Used for setting up udp_sock fields, see udp.h for details */
+ __u8 encap_type;
+ udp_tunnel_encap_rcv_t encap_rcv;
+ udp_tunnel_encap_destroy_t encap_destroy;
+};
+
+/* Setup the given (UDP) sock to receive UDP encapsulated packets */
+void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
+ struct udp_tunnel_sock_cfg *sock_cfg);
+
+/* Transmit the skb using UDP encapsulation. */
+int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt,
+ struct sk_buff *skb, __be32 src, __be32 dst,
+ __u8 tos, __u8 ttl, __be16 df, __be16 src_port,
+ __be16 dst_port, bool xnet);
+
+#if IS_ENABLED(CONFIG_IPV6)
+int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
+ struct sk_buff *skb, struct net_device *dev,
+ struct in6_addr *saddr, struct in6_addr *daddr,
+ __u8 prio, __u8 ttl, __be16 src_port,
+ __be16 dst_port);
+#endif
+
+void udp_tunnel_sock_release(struct socket *sock);
+
+static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
+ bool udp_csum)
+{
+ int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+
+ return iptunnel_handle_offloads(skb, udp_csum, type);
+}
+
+static inline void udp_tunnel_encap_enable(struct socket *sock)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sock->sk->sk_family == PF_INET6)
+ ipv6_stub->udpv6_encap_enable();
+ else
+#endif
+ udp_encap_enable();
+}
#endif
diff --git a/include/net/wimax.h b/include/net/wimax.h
index e52ef5357e08..c52b68577cb0 100644
--- a/include/net/wimax.h
+++ b/include/net/wimax.h
@@ -290,7 +290,7 @@ struct wimax_dev;
* This operation has to be synchronous, and return only when the
* reset is complete. In case of having had to resort to bus/cold
* reset implying a device disconnection, the call is allowed to
- * return inmediately.
+ * return immediately.
* NOTE: wimax_dev->mutex is NOT locked when this op is being
* called; however, wimax_dev->mutex_reset IS locked to ensure
* serialization of calls to wimax_reset().
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 721e9c3b11bd..dc4865e90fe4 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1591,6 +1591,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
u32 id, int delete, int *err);
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
+void xfrm_policy_hash_rebuild(struct net *net);
u32 xfrm_get_acqseq(void);
int verify_spi_info(u8 proto, u32 min, u32 max);
int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 1ea0b65c4cfb..a2bf41e0bde9 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -47,6 +47,7 @@ struct ib_umem {
int writable;
int hugetlb;
struct work_struct work;
+ struct pid *pid;
struct mm_struct *mm;
unsigned long diff;
struct sg_table sg_head;
diff --git a/include/rxrpc/types.h b/include/rxrpc/types.h
deleted file mode 100644
index 30d48f6da228..000000000000
--- a/include/rxrpc/types.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* types.h: Rx types
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_RXRPC_TYPES_H
-#define _LINUX_RXRPC_TYPES_H
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/socket.h>
-#include <linux/in.h>
-#include <linux/spinlock.h>
-#include <linux/atomic.h>
-
-typedef uint32_t rxrpc_seq_t; /* Rx message sequence number */
-typedef uint32_t rxrpc_serial_t; /* Rx message serial number */
-typedef __be32 rxrpc_seq_net_t; /* on-the-wire Rx message sequence number */
-typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */
-
-struct rxrpc_call;
-struct rxrpc_connection;
-struct rxrpc_header;
-struct rxrpc_message;
-struct rxrpc_operation;
-struct rxrpc_peer;
-struct rxrpc_service;
-typedef struct rxrpc_timer rxrpc_timer_t;
-struct rxrpc_transport;
-
-typedef void (*rxrpc_call_attn_func_t)(struct rxrpc_call *call);
-typedef void (*rxrpc_call_error_func_t)(struct rxrpc_call *call);
-typedef void (*rxrpc_call_aemap_func_t)(struct rxrpc_call *call);
-
-#endif /* _LINUX_RXRPC_TYPES_H */
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index cdcc90b07ecb..e64583560701 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -68,7 +68,7 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth)
return;
if (!shost_use_blk_mq(sdev->host) &&
- blk_queue_tagged(sdev->request_queue))
+ !blk_queue_tagged(sdev->request_queue))
blk_queue_init_tags(sdev->request_queue, depth,
sdev->host->bqt);
diff --git a/include/sound/soc.h b/include/sound/soc.h
index be6ecae247b0..c83a334dd00f 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -277,7 +277,7 @@
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE | \
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, \
.tlv.c = (snd_soc_bytes_tlv_callback), \
- .info = snd_soc_info_bytes_ext, \
+ .info = snd_soc_bytes_info_ext, \
.private_value = (unsigned long)&(struct soc_bytes_ext) \
{.max = xcount, .get = xhandler_get, .put = xhandler_put, } }
#define SOC_SINGLE_XR_SX(xname, xregbase, xregcount, xnbits, \
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index 1c09820df585..3608bebd3d9c 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -107,7 +107,7 @@ DECLARE_EVENT_CLASS(softirq,
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_exit tracepoint
- * we can determine the softirq handler runtine.
+ * we can determine the softirq handler routine.
*/
DEFINE_EVENT(softirq, softirq_entry,
@@ -121,7 +121,7 @@ DEFINE_EVENT(softirq, softirq_entry,
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_entry tracepoint
- * we can determine the softirq handler runtine.
+ * we can determine the softirq handler routine.
*/
DEFINE_EVENT(softirq, softirq_exit,
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 11d11bc5c78f..22749c134117 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -705,9 +705,11 @@ __SYSCALL(__NR_seccomp, sys_seccomp)
__SYSCALL(__NR_getrandom, sys_getrandom)
#define __NR_memfd_create 279
__SYSCALL(__NR_memfd_create, sys_memfd_create)
+#define __NR_bpf 280
+__SYSCALL(__NR_bpf, sys_bpf)
#undef __NR_syscalls
-#define __NR_syscalls 280
+#define __NR_syscalls 281
/*
* All syscalls below here should go away really,
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 509b2d7a41b7..fea6099608ef 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -944,6 +944,7 @@ struct drm_radeon_cs_chunk {
};
/* drm_radeon_cs_reloc.flags */
+#define RADEON_RELOC_PRIO_MASK (0xf << 0)
struct drm_radeon_cs_reloc {
uint32_t handle;
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 24e9033f8b3f..70e150ebc6c9 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -67,6 +67,7 @@ header-y += bfs_fs.h
header-y += binfmts.h
header-y += blkpg.h
header-y += blktrace_api.h
+header-y += bpf.h
header-y += bpqether.h
header-y += bsg.h
header-y += btrfs.h
@@ -240,6 +241,7 @@ header-y += matroxfb.h
header-y += mdio.h
header-y += media.h
header-y += mei.h
+header-y += memfd.h
header-y += mempolicy.h
header-y += meye.h
header-y += mic_common.h
@@ -395,6 +397,7 @@ header-y += un.h
header-y += unistd.h
header-y += unix_diag.h
header-y += usbdevice_fs.h
+header-y += usbip.h
header-y += utime.h
header-y += utsname.h
header-y += uuid.h
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
new file mode 100644
index 000000000000..31b0ac208a52
--- /dev/null
+++ b/include/uapi/linux/bpf.h
@@ -0,0 +1,155 @@
+/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _UAPI__LINUX_BPF_H__
+#define _UAPI__LINUX_BPF_H__
+
+#include <linux/types.h>
+
+/* Extended instruction set based on top of classic BPF */
+
+/* instruction classes */
+#define BPF_ALU64 0x07 /* alu mode in double word width */
+
+/* ld/ldx fields */
+#define BPF_DW 0x18 /* double word */
+#define BPF_XADD 0xc0 /* exclusive add */
+
+/* alu/jmp fields */
+#define BPF_MOV 0xb0 /* mov reg to reg */
+#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
+
+/* change endianness of a register */
+#define BPF_END 0xd0 /* flags for endianness conversion: */
+#define BPF_TO_LE 0x00 /* convert to little-endian */
+#define BPF_TO_BE 0x08 /* convert to big-endian */
+#define BPF_FROM_LE BPF_TO_LE
+#define BPF_FROM_BE BPF_TO_BE
+
+#define BPF_JNE 0x50 /* jump != */
+#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
+#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
+#define BPF_CALL 0x80 /* function call */
+#define BPF_EXIT 0x90 /* function return */
+
+/* Register numbers */
+enum {
+ BPF_REG_0 = 0,
+ BPF_REG_1,
+ BPF_REG_2,
+ BPF_REG_3,
+ BPF_REG_4,
+ BPF_REG_5,
+ BPF_REG_6,
+ BPF_REG_7,
+ BPF_REG_8,
+ BPF_REG_9,
+ BPF_REG_10,
+ __MAX_BPF_REG,
+};
+
+/* BPF has 10 general purpose 64-bit registers and stack frame. */
+#define MAX_BPF_REG __MAX_BPF_REG
+
+struct bpf_insn {
+ __u8 code; /* opcode */
+ __u8 dst_reg:4; /* dest register */
+ __u8 src_reg:4; /* source register */
+ __s16 off; /* signed offset */
+ __s32 imm; /* signed immediate constant */
+};
+
+/* BPF syscall commands */
+enum bpf_cmd {
+ /* create a map with given type and attributes
+ * fd = bpf(BPF_MAP_CREATE, union bpf_attr *, u32 size)
+ * returns fd or negative error
+ * map is deleted when fd is closed
+ */
+ BPF_MAP_CREATE,
+
+ /* lookup key in a given map
+ * err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size)
+ * Using attr->map_fd, attr->key, attr->value
+ * returns zero and stores found elem into value
+ * or negative error
+ */
+ BPF_MAP_LOOKUP_ELEM,
+
+ /* create or update key/value pair in a given map
+ * err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size)
+ * Using attr->map_fd, attr->key, attr->value
+ * returns zero or negative error
+ */
+ BPF_MAP_UPDATE_ELEM,
+
+ /* find and delete elem by key in a given map
+ * err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size)
+ * Using attr->map_fd, attr->key
+ * returns zero or negative error
+ */
+ BPF_MAP_DELETE_ELEM,
+
+ /* lookup key in a given map and return next key
+ * err = bpf(BPF_MAP_GET_NEXT_KEY, union bpf_attr *attr, u32 size)
+ * Using attr->map_fd, attr->key, attr->next_key
+ * returns zero and stores next key or negative error
+ */
+ BPF_MAP_GET_NEXT_KEY,
+
+ /* verify and load eBPF program
+ * prog_fd = bpf(BPF_PROG_LOAD, union bpf_attr *attr, u32 size)
+ * Using attr->prog_type, attr->insns, attr->license
+ * returns fd or negative error
+ */
+ BPF_PROG_LOAD,
+};
+
+enum bpf_map_type {
+ BPF_MAP_TYPE_UNSPEC,
+};
+
+enum bpf_prog_type {
+ BPF_PROG_TYPE_UNSPEC,
+};
+
+union bpf_attr {
+ struct { /* anonymous struct used by BPF_MAP_CREATE command */
+ __u32 map_type; /* one of enum bpf_map_type */
+ __u32 key_size; /* size of key in bytes */
+ __u32 value_size; /* size of value in bytes */
+ __u32 max_entries; /* max number of entries in a map */
+ };
+
+ struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
+ __u32 map_fd;
+ __aligned_u64 key;
+ union {
+ __aligned_u64 value;
+ __aligned_u64 next_key;
+ };
+ };
+
+ struct { /* anonymous struct used by BPF_PROG_LOAD command */
+ __u32 prog_type; /* one of enum bpf_prog_type */
+ __u32 insn_cnt;
+ __aligned_u64 insns;
+ __aligned_u64 license;
+ __u32 log_level; /* verbosity level of verifier */
+ __u32 log_size; /* size of user buffer */
+ __aligned_u64 log_buf; /* user supplied buffer */
+ };
+} __attribute__((aligned(8)));
+
+/* integer value in 'imm' field of BPF_CALL instruction selects which helper
+ * function eBPF program intends to call
+ */
+enum bpf_func_id {
+ BPF_FUNC_unspec,
+ __BPF_FUNC_MAX_ID,
+};
+
+#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index e3c7a719c76b..7a364f2f3d3f 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -209,6 +209,32 @@ struct ethtool_value {
__u32 data;
};
+enum tunable_id {
+ ETHTOOL_ID_UNSPEC,
+ ETHTOOL_RX_COPYBREAK,
+};
+
+enum tunable_type_id {
+ ETHTOOL_TUNABLE_UNSPEC,
+ ETHTOOL_TUNABLE_U8,
+ ETHTOOL_TUNABLE_U16,
+ ETHTOOL_TUNABLE_U32,
+ ETHTOOL_TUNABLE_U64,
+ ETHTOOL_TUNABLE_STRING,
+ ETHTOOL_TUNABLE_S8,
+ ETHTOOL_TUNABLE_S16,
+ ETHTOOL_TUNABLE_S32,
+ ETHTOOL_TUNABLE_S64,
+};
+
+struct ethtool_tunable {
+ __u32 cmd;
+ __u32 id;
+ __u32 type_id;
+ __u32 len;
+ void *data[0];
+};
+
/**
* struct ethtool_regs - hardware register dump
* @cmd: Command number = %ETHTOOL_GREGS
@@ -1152,6 +1178,8 @@ enum ethtool_sfeatures_retval_bits {
#define ETHTOOL_GRSSH 0x00000046 /* Get RX flow hash configuration */
#define ETHTOOL_SRSSH 0x00000047 /* Set RX flow hash configuration */
+#define ETHTOOL_GTUNABLE 0x00000048 /* Get tunable configuration */
+#define ETHTOOL_STUNABLE 0x00000049 /* Set tunable configuration */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
diff --git a/include/uapi/linux/fou.h b/include/uapi/linux/fou.h
new file mode 100644
index 000000000000..8df06894da23
--- /dev/null
+++ b/include/uapi/linux/fou.h
@@ -0,0 +1,39 @@
+/* fou.h - FOU Interface */
+
+#ifndef _UAPI_LINUX_FOU_H
+#define _UAPI_LINUX_FOU_H
+
+/* NETLINK_GENERIC related info
+ */
+#define FOU_GENL_NAME "fou"
+#define FOU_GENL_VERSION 0x1
+
+enum {
+ FOU_ATTR_UNSPEC,
+ FOU_ATTR_PORT, /* u16 */
+ FOU_ATTR_AF, /* u8 */
+ FOU_ATTR_IPPROTO, /* u8 */
+ FOU_ATTR_TYPE, /* u8 */
+
+ __FOU_ATTR_MAX,
+};
+
+#define FOU_ATTR_MAX (__FOU_ATTR_MAX - 1)
+
+enum {
+ FOU_CMD_UNSPEC,
+ FOU_CMD_ADD,
+ FOU_CMD_DEL,
+
+ __FOU_CMD_MAX,
+};
+
+enum {
+ FOU_ENCAP_UNSPEC,
+ FOU_ENCAP_DIRECT,
+ FOU_ENCAP_GUE,
+};
+
+#define FOU_CMD_MAX (__FOU_CMD_MAX - 1)
+
+#endif /* _UAPI_LINUX_FOU_H */
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 0f8210b8e0bc..aa63ed023c2b 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -128,6 +128,7 @@
#define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */
#define ETH_P_IEEE802154 0x00F6 /* IEEE802.15.4 frame */
#define ETH_P_CAIF 0x00F7 /* ST-Ericsson CAIF protocol */
+#define ETH_P_XDSA 0x00F8 /* Multiplexed DSA protocol */
/*
* This is an Ethernet frame header.
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index ff957604a721..0bdb77e16875 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -215,6 +215,18 @@ enum in6_addr_gen_mode {
IN6_ADDR_GEN_MODE_NONE,
};
+/* Bridge section */
+
+enum {
+ IFLA_BR_UNSPEC,
+ IFLA_BR_FORWARD_DELAY,
+ IFLA_BR_HELLO_TIME,
+ IFLA_BR_MAX_AGE,
+ __IFLA_BR_MAX,
+};
+
+#define IFLA_BR_MAX (__IFLA_BR_MAX - 1)
+
enum {
BRIDGE_MODE_UNSPEC,
BRIDGE_MODE_HAIRPIN,
@@ -291,6 +303,10 @@ enum {
IFLA_MACVLAN_UNSPEC,
IFLA_MACVLAN_MODE,
IFLA_MACVLAN_FLAGS,
+ IFLA_MACVLAN_MACADDR_MODE,
+ IFLA_MACVLAN_MACADDR,
+ IFLA_MACVLAN_MACADDR_DATA,
+ IFLA_MACVLAN_MACADDR_COUNT,
__IFLA_MACVLAN_MAX,
};
@@ -301,6 +317,14 @@ enum macvlan_mode {
MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */
MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */
MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */
+ MACVLAN_MODE_SOURCE = 16,/* use source MAC address list to assign */
+};
+
+enum macvlan_macaddr_mode {
+ MACVLAN_MACADDR_ADD,
+ MACVLAN_MACADDR_DEL,
+ MACVLAN_MACADDR_FLUSH,
+ MACVLAN_MACADDR_SET,
};
#define MACVLAN_FLAG_NOPROMISC 1
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 3bce9e9d9f7c..280d9e092283 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -53,10 +53,23 @@ enum {
IFLA_IPTUN_6RD_RELAY_PREFIX,
IFLA_IPTUN_6RD_PREFIXLEN,
IFLA_IPTUN_6RD_RELAY_PREFIXLEN,
+ IFLA_IPTUN_ENCAP_TYPE,
+ IFLA_IPTUN_ENCAP_FLAGS,
+ IFLA_IPTUN_ENCAP_SPORT,
+ IFLA_IPTUN_ENCAP_DPORT,
__IFLA_IPTUN_MAX,
};
#define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1)
+enum tunnel_encap_types {
+ TUNNEL_ENCAP_NONE,
+ TUNNEL_ENCAP_FOU,
+ TUNNEL_ENCAP_GUE,
+};
+
+#define TUNNEL_ENCAP_FLAG_CSUM (1<<0)
+#define TUNNEL_ENCAP_FLAG_CSUM6 (1<<1)
+
/* SIT-mode i_flags */
#define SIT_ISATAP 0x0001
@@ -94,6 +107,10 @@ enum {
IFLA_GRE_ENCAP_LIMIT,
IFLA_GRE_FLOWINFO,
IFLA_GRE_FLAGS,
+ IFLA_GRE_ENCAP_TYPE,
+ IFLA_GRE_ENCAP_FLAGS,
+ IFLA_GRE_ENCAP_SPORT,
+ IFLA_GRE_ENCAP_DPORT,
__IFLA_GRE_MAX,
};
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index bbde90fa5838..d65c0a09efd3 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -110,10 +110,10 @@ enum {
INET_DIAG_TCLASS,
INET_DIAG_SKMEMINFO,
INET_DIAG_SHUTDOWN,
+ INET_DIAG_DCTCPINFO,
};
-#define INET_DIAG_MAX INET_DIAG_SHUTDOWN
-
+#define INET_DIAG_MAX INET_DIAG_DCTCPINFO
/* INET_DIAG_MEM */
@@ -133,5 +133,14 @@ struct tcpvegas_info {
__u32 tcpv_minrtt;
};
+/* INET_DIAG_DCTCPINFO */
+
+struct tcp_dctcp_info {
+ __u16 dctcp_enabled;
+ __u16 dctcp_ce_state;
+ __u32 dctcp_alpha;
+ __u32 dctcp_ab_ecn;
+ __u32 dctcp_ab_tot;
+};
#endif /* _UAPI_INET_DIAG_H_ */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 19df18c9b8be..1874ebe9ac1e 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -165,6 +165,7 @@ struct input_keymap_entry {
#define INPUT_PROP_BUTTONPAD 0x02 /* has button(s) under pad */
#define INPUT_PROP_SEMI_MT 0x03 /* touch rectangle only */
#define INPUT_PROP_TOPBUTTONPAD 0x04 /* softbuttons at top of pad */
+#define INPUT_PROP_POINTING_STICK 0x05 /* is a pointing stick */
#define INPUT_PROP_MAX 0x1f
#define INPUT_PROP_CNT (INPUT_PROP_MAX + 1)
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index fbcffe8041f7..cabe95d5b461 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -384,6 +384,9 @@ enum {
IPVS_DEST_ATTR_PERSIST_CONNS, /* persistent connections */
IPVS_DEST_ATTR_STATS, /* nested attribute for dest stats */
+
+ IPVS_DEST_ATTR_ADDR_FAMILY, /* Address family of address */
+
__IPVS_DEST_ATTR_MAX,
};
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 78c2f2e79920..ca03119111a2 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -115,6 +115,9 @@ enum {
IPSET_ATTR_BYTES,
IPSET_ATTR_PACKETS,
IPSET_ATTR_COMMENT,
+ IPSET_ATTR_SKBMARK,
+ IPSET_ATTR_SKBPRIO,
+ IPSET_ATTR_SKBQUEUE,
__IPSET_ATTR_ADT_MAX,
};
#define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1)
@@ -147,6 +150,7 @@ enum ipset_errno {
IPSET_ERR_COUNTER,
IPSET_ERR_COMMENT,
IPSET_ERR_INVALID_MARKMASK,
+ IPSET_ERR_SKBINFO,
/* Type specific error codes */
IPSET_ERR_TYPE_SPECIFIC = 4352,
@@ -170,6 +174,12 @@ enum ipset_cmd_flags {
IPSET_FLAG_MATCH_COUNTERS = (1 << IPSET_FLAG_BIT_MATCH_COUNTERS),
IPSET_FLAG_BIT_RETURN_NOMATCH = 7,
IPSET_FLAG_RETURN_NOMATCH = (1 << IPSET_FLAG_BIT_RETURN_NOMATCH),
+ IPSET_FLAG_BIT_MAP_SKBMARK = 8,
+ IPSET_FLAG_MAP_SKBMARK = (1 << IPSET_FLAG_BIT_MAP_SKBMARK),
+ IPSET_FLAG_BIT_MAP_SKBPRIO = 9,
+ IPSET_FLAG_MAP_SKBPRIO = (1 << IPSET_FLAG_BIT_MAP_SKBPRIO),
+ IPSET_FLAG_BIT_MAP_SKBQUEUE = 10,
+ IPSET_FLAG_MAP_SKBQUEUE = (1 << IPSET_FLAG_BIT_MAP_SKBQUEUE),
IPSET_FLAG_CMD_MAX = 15,
};
@@ -187,6 +197,8 @@ enum ipset_cadt_flags {
IPSET_FLAG_WITH_COMMENT = (1 << IPSET_FLAG_BIT_WITH_COMMENT),
IPSET_FLAG_BIT_WITH_FORCEADD = 5,
IPSET_FLAG_WITH_FORCEADD = (1 << IPSET_FLAG_BIT_WITH_FORCEADD),
+ IPSET_FLAG_BIT_WITH_SKBINFO = 6,
+ IPSET_FLAG_WITH_SKBINFO = (1 << IPSET_FLAG_BIT_WITH_SKBINFO),
IPSET_FLAG_CADT_MAX = 15,
};
diff --git a/include/uapi/linux/netfilter/nf_nat.h b/include/uapi/linux/netfilter/nf_nat.h
index 1ad3659102b6..0880781ad7b6 100644
--- a/include/uapi/linux/netfilter/nf_nat.h
+++ b/include/uapi/linux/netfilter/nf_nat.h
@@ -13,6 +13,11 @@
#define NF_NAT_RANGE_PROTO_RANDOM_ALL \
(NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY)
+#define NF_NAT_RANGE_MASK \
+ (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \
+ NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \
+ NF_NAT_RANGE_PROTO_RANDOM_FULLY)
+
struct nf_nat_ipv4_range {
unsigned int flags;
__be32 min_ip;
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 801bdd1e56e3..c26df6787fb0 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -51,6 +51,8 @@ enum nft_verdicts {
* @NFT_MSG_NEWSETELEM: create a new set element (enum nft_set_elem_attributes)
* @NFT_MSG_GETSETELEM: get a set element (enum nft_set_elem_attributes)
* @NFT_MSG_DELSETELEM: delete a set element (enum nft_set_elem_attributes)
+ * @NFT_MSG_NEWGEN: announce a new generation, only for events (enum nft_gen_attributes)
+ * @NFT_MSG_GETGEN: get the rule-set generation (enum nft_gen_attributes)
*/
enum nf_tables_msg_types {
NFT_MSG_NEWTABLE,
@@ -68,6 +70,8 @@ enum nf_tables_msg_types {
NFT_MSG_NEWSETELEM,
NFT_MSG_GETSETELEM,
NFT_MSG_DELSETELEM,
+ NFT_MSG_NEWGEN,
+ NFT_MSG_GETGEN,
NFT_MSG_MAX,
};
@@ -571,6 +575,10 @@ enum nft_exthdr_attributes {
* @NFT_META_L4PROTO: layer 4 protocol number
* @NFT_META_BRI_IIFNAME: packet input bridge interface name
* @NFT_META_BRI_OIFNAME: packet output bridge interface name
+ * @NFT_META_PKTTYPE: packet type (skb->pkt_type), special handling for loopback
+ * @NFT_META_CPU: cpu id through smp_processor_id()
+ * @NFT_META_IIFGROUP: packet input interface group
+ * @NFT_META_OIFGROUP: packet output interface group
*/
enum nft_meta_keys {
NFT_META_LEN,
@@ -592,6 +600,10 @@ enum nft_meta_keys {
NFT_META_L4PROTO,
NFT_META_BRI_IIFNAME,
NFT_META_BRI_OIFNAME,
+ NFT_META_PKTTYPE,
+ NFT_META_CPU,
+ NFT_META_IIFGROUP,
+ NFT_META_OIFGROUP,
};
/**
@@ -737,13 +749,34 @@ enum nft_queue_attributes {
*
* @NFT_REJECT_ICMP_UNREACH: reject using ICMP unreachable
* @NFT_REJECT_TCP_RST: reject using TCP RST
+ * @NFT_REJECT_ICMPX_UNREACH: abstracted ICMP unreachable for bridge and inet
*/
enum nft_reject_types {
NFT_REJECT_ICMP_UNREACH,
NFT_REJECT_TCP_RST,
+ NFT_REJECT_ICMPX_UNREACH,
};
/**
+ * enum nft_reject_code - Generic reject codes for IPv4/IPv6
+ *
+ * @NFT_REJECT_ICMPX_NO_ROUTE: no route to host / network unreachable
+ * @NFT_REJECT_ICMPX_PORT_UNREACH: port unreachable
+ * @NFT_REJECT_ICMPX_HOST_UNREACH: host unreachable
+ * @NFT_REJECT_ICMPX_ADMIN_PROHIBITED: administratively prohibited
+ *
+ * These codes are mapped to real ICMP and ICMPv6 codes.
+ */
+enum nft_reject_inet_code {
+ NFT_REJECT_ICMPX_NO_ROUTE = 0,
+ NFT_REJECT_ICMPX_PORT_UNREACH,
+ NFT_REJECT_ICMPX_HOST_UNREACH,
+ NFT_REJECT_ICMPX_ADMIN_PROHIBITED,
+ __NFT_REJECT_ICMPX_MAX
+};
+#define NFT_REJECT_ICMPX_MAX (__NFT_REJECT_ICMPX_MAX + 1)
+
+/**
* enum nft_reject_attributes - nf_tables reject expression netlink attributes
*
* @NFTA_REJECT_TYPE: packet type to use (NLA_U32: nft_reject_types)
@@ -777,6 +810,7 @@ enum nft_nat_types {
* @NFTA_NAT_REG_ADDR_MAX: source register of address range end (NLA_U32: nft_registers)
* @NFTA_NAT_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers)
* @NFTA_NAT_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers)
+ * @NFTA_NAT_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32)
*/
enum nft_nat_attributes {
NFTA_NAT_UNSPEC,
@@ -786,8 +820,33 @@ enum nft_nat_attributes {
NFTA_NAT_REG_ADDR_MAX,
NFTA_NAT_REG_PROTO_MIN,
NFTA_NAT_REG_PROTO_MAX,
+ NFTA_NAT_FLAGS,
__NFTA_NAT_MAX
};
#define NFTA_NAT_MAX (__NFTA_NAT_MAX - 1)
+/**
+ * enum nft_masq_attributes - nf_tables masquerade expression attributes
+ *
+ * @NFTA_MASQ_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32)
+ */
+enum nft_masq_attributes {
+ NFTA_MASQ_UNSPEC,
+ NFTA_MASQ_FLAGS,
+ __NFTA_MASQ_MAX
+};
+#define NFTA_MASQ_MAX (__NFTA_MASQ_MAX - 1)
+
+/**
+ * enum nft_gen_attributes - nf_tables ruleset generation attributes
+ *
+ * @NFTA_GEN_ID: Ruleset generation ID (NLA_U32)
+ */
+enum nft_gen_attributes {
+ NFTA_GEN_UNSPEC,
+ NFTA_GEN_ID,
+ __NFTA_GEN_MAX
+};
+#define NFTA_GEN_MAX (__NFTA_GEN_MAX - 1)
+
#endif /* _LINUX_NF_TABLES_H */
diff --git a/include/uapi/linux/netfilter/nfnetlink_acct.h b/include/uapi/linux/netfilter/nfnetlink_acct.h
index 51404ec19022..f3e34dbbf966 100644
--- a/include/uapi/linux/netfilter/nfnetlink_acct.h
+++ b/include/uapi/linux/netfilter/nfnetlink_acct.h
@@ -28,9 +28,17 @@ enum nfnl_acct_type {
NFACCT_USE,
NFACCT_FLAGS,
NFACCT_QUOTA,
+ NFACCT_FILTER,
__NFACCT_MAX
};
#define NFACCT_MAX (__NFACCT_MAX - 1)
+enum nfnl_attr_filter_type {
+ NFACCT_FILTER_UNSPEC,
+ NFACCT_FILTER_MASK,
+ NFACCT_FILTER_VALUE,
+ __NFACCT_FILTER_MAX
+};
+#define NFACCT_FILTER_MAX (__NFACCT_FILTER_MAX - 1)
#endif /* _UAPI_NFNL_ACCT_H_ */
diff --git a/include/uapi/linux/netfilter/xt_set.h b/include/uapi/linux/netfilter/xt_set.h
index 964d3d42f874..d6a1df1f2947 100644
--- a/include/uapi/linux/netfilter/xt_set.h
+++ b/include/uapi/linux/netfilter/xt_set.h
@@ -71,4 +71,14 @@ struct xt_set_info_match_v3 {
__u32 flags;
};
+/* Revision 3 target */
+
+struct xt_set_info_target_v3 {
+ struct xt_set_info add_set;
+ struct xt_set_info del_set;
+ struct xt_set_info map_set;
+ __u32 flags;
+ __u32 timeout;
+};
+
#endif /*_XT_SET_H*/
diff --git a/include/uapi/linux/netfilter_arp/arpt_mangle.h b/include/uapi/linux/netfilter_arp/arpt_mangle.h
index 250f502902bb..8c2b16a1f5a0 100644
--- a/include/uapi/linux/netfilter_arp/arpt_mangle.h
+++ b/include/uapi/linux/netfilter_arp/arpt_mangle.h
@@ -13,7 +13,7 @@ struct arpt_mangle
union {
struct in_addr tgt_ip;
} u_t;
- u_int8_t flags;
+ __u8 flags;
int target;
};
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index a794d1dd7b40..f7fc507d82ab 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -289,6 +289,9 @@ enum ovs_key_attr {
OVS_KEY_ATTR_TUNNEL, /* Nested set of ovs_tunnel attributes */
OVS_KEY_ATTR_SCTP, /* struct ovs_key_sctp */
OVS_KEY_ATTR_TCP_FLAGS, /* be16 TCP flags. */
+ OVS_KEY_ATTR_DP_HASH, /* u32 hash value. Value 0 indicates the hash
+ is not computed by the datapath. */
+ OVS_KEY_ATTR_RECIRC_ID, /* u32 recirc id */
#ifdef __KERNEL__
OVS_KEY_ATTR_IPV4_TUNNEL, /* struct ovs_key_ipv4_tunnel */
@@ -493,6 +496,27 @@ struct ovs_action_push_vlan {
__be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */
};
+/* Data path hash algorithm for computing Datapath hash.
+ *
+ * The algorithm type only specifies the fields in a flow
+ * will be used as part of the hash. Each datapath is free
+ * to use its own hash algorithm. The hash value will be
+ * opaque to the user space daemon.
+ */
+enum ovs_hash_alg {
+ OVS_HASH_ALG_L4,
+};
+
+/*
+ * struct ovs_action_hash - %OVS_ACTION_ATTR_HASH action argument.
+ * @hash_alg: Algorithm used to compute hash prior to recirculation.
+ * @hash_basis: basis used for computing hash.
+ */
+struct ovs_action_hash {
+ uint32_t hash_alg; /* One of ovs_hash_alg. */
+ uint32_t hash_basis;
+};
+
/**
* enum ovs_action_attr - Action types.
*
@@ -521,6 +545,8 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_PUSH_VLAN, /* struct ovs_action_push_vlan. */
OVS_ACTION_ATTR_POP_VLAN, /* No argument. */
OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */
+ OVS_ACTION_ATTR_RECIRC, /* u32 recirc_id. */
+ OVS_ACTION_ATTR_HASH, /* struct ovs_action_hash. */
__OVS_ACTION_ATTR_MAX
};
diff --git a/include/uapi/linux/usbip.h b/include/uapi/linux/usbip.h
new file mode 100644
index 000000000000..fa5db30ede36
--- /dev/null
+++ b/include/uapi/linux/usbip.h
@@ -0,0 +1,26 @@
+/*
+ * usbip.h
+ *
+ * USBIP uapi defines and function prototypes etc.
+*/
+
+#ifndef _UAPI_LINUX_USBIP_H
+#define _UAPI_LINUX_USBIP_H
+
+/* usbip device status - exported in usbip device sysfs status */
+enum usbip_device_status {
+ /* sdev is available. */
+ SDEV_ST_AVAILABLE = 0x01,
+ /* sdev is now used. */
+ SDEV_ST_USED,
+ /* sdev is unusable because of a fatal error. */
+ SDEV_ST_ERROR,
+
+ /* vdev does not connect a remote device. */
+ VDEV_ST_NULL,
+ /* vdev is used, but the USB address is not assigned yet */
+ VDEV_ST_NOTASSIGNED,
+ VDEV_ST_USED,
+ VDEV_ST_ERROR
+};
+#endif /* _UAPI_LINUX_USBIP_H */
diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
index c38355c1f3c9..1590c49cae57 100644
--- a/include/uapi/linux/xattr.h
+++ b/include/uapi/linux/xattr.h
@@ -13,7 +13,7 @@
#ifndef _UAPI_LINUX_XATTR_H
#define _UAPI_LINUX_XATTR_H
-#ifdef __UAPI_DEF_XATTR
+#if __UAPI_DEF_XATTR
#define __USE_KERNEL_XATTR_DEFS
#define XATTR_CREATE 0x1 /* set value, fail if attr already exists */
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 25e5dd916ba4..02d5125a5ee8 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -328,6 +328,8 @@ enum xfrm_spdattr_type_t {
XFRMA_SPD_UNSPEC,
XFRMA_SPD_INFO,
XFRMA_SPD_HINFO,
+ XFRMA_SPD_IPV4_HTHRESH,
+ XFRMA_SPD_IPV6_HTHRESH,
__XFRMA_SPD_MAX
#define XFRMA_SPD_MAX (__XFRMA_SPD_MAX - 1)
@@ -347,6 +349,11 @@ struct xfrmu_spdhinfo {
__u32 spdhmcnt;
};
+struct xfrmu_spdhthresh {
+ __u8 lbits;
+ __u8 rbits;
+};
+
struct xfrm_usersa_info {
struct xfrm_selector sel;
struct xfrm_id id;
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
index 131a6ccdba25..14334d0161d5 100644
--- a/include/xen/interface/features.h
+++ b/include/xen/interface/features.h
@@ -53,6 +53,9 @@
/* operation as Dom0 is supported */
#define XENFEAT_dom0 11
+/* Xen also maps grant references at pfn = mfn */
+#define XENFEAT_grant_map_identity 12
+
#define XENFEAT_NR_SUBMAPS 1
#endif /* __XEN_PUBLIC_FEATURES_H__ */