summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h169
-rw-r--r--include/acpi/cppc_acpi.h2
-rw-r--r--include/acpi/video.h9
-rw-r--r--include/asm-generic/numa.h2
-rw-r--r--include/asm-generic/unaligned.h24
-rw-r--r--include/asm-generic/vmlinux.lds.h11
-rw-r--r--include/crypto/hash.h4
-rw-r--r--include/crypto/if_alg.h5
-rw-r--r--include/crypto/skcipher.h133
-rw-r--r--include/drm/ttm/ttm_pool.h2
-rw-r--r--include/kunit/device.h80
-rw-r--r--include/kunit/resource.h21
-rw-r--r--include/kunit/static_stub.h2
-rw-r--r--include/kunit/test.h33
-rw-r--r--include/linux/acpi.h22
-rw-r--r--include/linux/acpi_amd_wbrf.h91
-rw-r--r--include/linux/arch_topology.h8
-rw-r--r--include/linux/async.h2
-rw-r--r--include/linux/backing-file.h42
-rw-r--r--include/linux/blk_types.h8
-rw-r--r--include/linux/blkdev.h31
-rw-r--r--include/linux/bpf_types.h4
-rw-r--r--include/linux/buffer_head.h9
-rw-r--r--include/linux/cgroup-defs.h21
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/cleanup.h52
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/cpuhotplug.h18
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/crash_core.h6
-rw-r--r--include/linux/crc-ccitt.h7
-rw-r--r--include/linux/cred.h58
-rw-r--r--include/linux/damon.h24
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/edac.h3
-rw-r--r--include/linux/efi.h12
-rw-r--r--include/linux/energy_model.h7
-rw-r--r--include/linux/entry-common.h95
-rw-r--r--include/linux/eventfd.h17
-rw-r--r--include/linux/evm.h6
-rw-r--r--include/linux/export-internal.h6
-rw-r--r--include/linux/fdtable.h19
-rw-r--r--include/linux/file.h12
-rw-r--r--include/linux/fs.h127
-rw-r--r--include/linux/fsnotify.h50
-rw-r--r--include/linux/fsnotify_backend.h14
-rw-r--r--include/linux/gfp_types.h17
-rw-r--r--include/linux/hid-sensor-ids.h4
-rw-r--r--include/linux/highmem.h76
-rw-r--r--include/linux/hisi_acc_qm.h28
-rw-r--r--include/linux/huge_mm.h184
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/ieee80211.h3
-rw-r--r--include/linux/init_task.h7
-rw-r--r--include/linux/intel_tpmi.h18
-rw-r--r--include/linux/io_uring_types.h3
-rw-r--r--include/linux/ioport.h3
-rw-r--r--include/linux/jbd2.h40
-rw-r--r--include/linux/kasan.h162
-rw-r--r--include/linux/kexec.h9
-rw-r--r--include/linux/key-type.h1
-rw-r--r--include/linux/kfence.h2
-rw-r--r--include/linux/ksm.h10
-rw-r--r--include/linux/list_lru.h88
-rw-r--r--include/linux/lockdep_types.h2
-rw-r--r--include/linux/lsm_hook_defs.h6
-rw-r--r--include/linux/lsm_hooks.h17
-rw-r--r--include/linux/maple_tree.h349
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/memcontrol.h37
-rw-r--r--include/linux/mempool.h1
-rw-r--r--include/linux/memremap.h12
-rw-r--r--include/linux/mlx5/mlx5_ifc.h9
-rw-r--r--include/linux/mm.h59
-rw-r--r--include/linux/mm_inline.h23
-rw-r--r--include/linux/mm_types.h30
-rw-r--r--include/linux/mmzone.h100
-rw-r--r--include/linux/mnt_idmapping.h3
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/moduleparam.h6
-rw-r--r--include/linux/mount.h5
-rw-r--r--include/linux/mtd/rawnand.h15
-rw-r--r--include/linux/mutex.h3
-rw-r--r--include/linux/net/intel/i40e_client.h2
-rw-r--r--include/linux/nfs4.h22
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nubus.h2
-rw-r--r--include/linux/osq_lock.h5
-rw-r--r--include/linux/page-flags.h9
-rw-r--r--include/linux/pageblock-flags.h4
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/perf/arm_pmu.h28
-rw-r--r--include/linux/perf/arm_pmuv3.h34
-rw-r--r--include/linux/perf_event.h22
-rw-r--r--include/linux/pgtable.h9
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/platform_data/x86/clk-lpss.h2
-rw-r--r--include/linux/pm_opp.h28
-rw-r--r--include/linux/property.h7
-rw-r--r--include/linux/quotaops.h15
-rw-r--r--include/linux/reboot.h12
-rw-r--r--include/linux/regulator/consumer.h47
-rw-r--r--include/linux/regulator/driver.h7
-rw-r--r--include/linux/regulator/machine.h18
-rw-r--r--include/linux/rmap.h411
-rw-r--r--include/linux/rwsem.h8
-rw-r--r--include/linux/sched.h77
-rw-r--r--include/linux/sched/isolation.h4
-rw-r--r--include/linux/sched/signal.h4
-rw-r--r--include/linux/sched/task.h2
-rw-r--r--include/linux/sched/topology.h8
-rw-r--r--include/linux/security.h56
-rw-r--r--include/linux/slab.h24
-rw-r--r--include/linux/slab_def.h124
-rw-r--r--include/linux/slub_def.h204
-rw-r--r--include/linux/spi/spi-mem.h2
-rw-r--r--include/linux/spi/spi.h65
-rw-r--r--include/linux/spinlock.h41
-rw-r--r--include/linux/splice.h51
-rw-r--r--include/linux/stackdepot.h61
-rw-r--r--include/linux/sunrpc/bc_xprt.h3
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/sched.h14
-rw-r--r--include/linux/sunrpc/svc.h37
-rw-r--r--include/linux/sunrpc/svc_rdma.h67
-rw-r--r--include/linux/sunrpc/svcauth.h7
-rw-r--r--include/linux/sunrpc/xprt.h11
-rw-r--r--include/linux/surface_aggregator/serial_hub.h4
-rw-r--r--include/linux/swap.h8
-rw-r--r--include/linux/syscalls.h14
-rw-r--r--include/linux/thermal.h26
-rw-r--r--include/linux/uidgid.h13
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/userfaultfd_k.h11
-rw-r--r--include/linux/vm_event_item.h4
-rw-r--r--include/linux/vmstat.h60
-rw-r--r--include/linux/wmi.h20
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/linux/zswap.h32
-rw-r--r--include/net/addrconf.h12
-rw-r--r--include/net/af_rxrpc.h15
-rw-r--r--include/net/bluetooth/hci_core.h9
-rw-r--r--include/net/if_inet6.h4
-rw-r--r--include/net/ip6_fib.h64
-rw-r--r--include/net/netfilter/nf_flow_table.h10
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h2
-rw-r--r--include/net/scm.h9
-rw-r--r--include/net/sock.h5
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/net/tcp_ao.h26
-rw-r--r--include/trace/events/9p.h11
-rw-r--r--include/trace/events/afs.h779
-rw-r--r--include/trace/events/btrfs.h78
-rw-r--r--include/trace/events/ksm.h33
-rw-r--r--include/trace/events/rpcrdma.h238
-rw-r--r--include/trace/events/rxrpc.h3
-rw-r--r--include/trace/events/sched.h15
-rw-r--r--include/trace/events/sunrpc.h1
-rw-r--r--include/trace/events/timer.h40
-rw-r--r--include/uapi/asm-generic/unistd.h15
-rw-r--r--include/uapi/linux/fs.h1
-rw-r--r--include/uapi/linux/fuse.h10
-rw-r--r--include/uapi/linux/kexec.h1
-rw-r--r--include/uapi/linux/lsm.h90
-rw-r--r--include/uapi/linux/mount.h70
-rw-r--r--include/uapi/linux/perf_event.h13
-rw-r--r--include/uapi/linux/stat.h1
-rw-r--r--include/uapi/linux/userfaultfd.h29
-rw-r--r--include/uapi/regulator/regulator.h90
171 files changed, 4001 insertions, 2054 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 1216d72c650f..2b3ae51f950d 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -14,7 +14,7 @@
struct acpi_handle_list {
u32 count;
- acpi_handle* handles;
+ acpi_handle *handles;
};
/* acpi_utils.h */
@@ -25,16 +25,15 @@ acpi_status
acpi_evaluate_integer(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *arguments, unsigned long long *data);
-acpi_status
-acpi_evaluate_reference(acpi_handle handle,
- acpi_string pathname,
- struct acpi_object_list *arguments,
- struct acpi_handle_list *list);
+bool acpi_evaluate_reference(acpi_handle handle, acpi_string pathname,
+ struct acpi_object_list *arguments,
+ struct acpi_handle_list *list);
bool acpi_handle_list_equal(struct acpi_handle_list *list1,
struct acpi_handle_list *list2);
void acpi_handle_list_replace(struct acpi_handle_list *dst,
struct acpi_handle_list *src);
void acpi_handle_list_free(struct acpi_handle_list *list);
+bool acpi_device_dep(acpi_handle target, acpi_handle match);
acpi_status
acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
struct acpi_buffer *status_buf);
@@ -366,6 +365,98 @@ struct acpi_device_data {
struct acpi_gpio_mapping;
+#define ACPI_DEVICE_SWNODE_ROOT 0
+
+/*
+ * The maximum expected number of CSI-2 data lanes.
+ *
+ * This number is not expected to ever have to be equal to or greater than the
+ * number of bits in an unsigned long variable, but if it needs to be increased
+ * above that limit, code will need to be adjusted accordingly.
+ */
+#define ACPI_DEVICE_CSI2_DATA_LANES 8
+
+#define ACPI_DEVICE_SWNODE_PORT_NAME_LENGTH 8
+
+enum acpi_device_swnode_dev_props {
+ ACPI_DEVICE_SWNODE_DEV_ROTATION,
+ ACPI_DEVICE_SWNODE_DEV_CLOCK_FREQUENCY,
+ ACPI_DEVICE_SWNODE_DEV_LED_MAX_MICROAMP,
+ ACPI_DEVICE_SWNODE_DEV_FLASH_MAX_MICROAMP,
+ ACPI_DEVICE_SWNODE_DEV_FLASH_MAX_TIMEOUT_US,
+ ACPI_DEVICE_SWNODE_DEV_NUM_OF,
+ ACPI_DEVICE_SWNODE_DEV_NUM_ENTRIES
+};
+
+enum acpi_device_swnode_port_props {
+ ACPI_DEVICE_SWNODE_PORT_REG,
+ ACPI_DEVICE_SWNODE_PORT_NUM_OF,
+ ACPI_DEVICE_SWNODE_PORT_NUM_ENTRIES
+};
+
+enum acpi_device_swnode_ep_props {
+ ACPI_DEVICE_SWNODE_EP_REMOTE_EP,
+ ACPI_DEVICE_SWNODE_EP_BUS_TYPE,
+ ACPI_DEVICE_SWNODE_EP_REG,
+ ACPI_DEVICE_SWNODE_EP_CLOCK_LANES,
+ ACPI_DEVICE_SWNODE_EP_DATA_LANES,
+ ACPI_DEVICE_SWNODE_EP_LANE_POLARITIES,
+ /* TX only */
+ ACPI_DEVICE_SWNODE_EP_LINK_FREQUENCIES,
+ ACPI_DEVICE_SWNODE_EP_NUM_OF,
+ ACPI_DEVICE_SWNODE_EP_NUM_ENTRIES
+};
+
+/*
+ * Each device has a root software node plus two times as many nodes as the
+ * number of CSI-2 ports.
+ */
+#define ACPI_DEVICE_SWNODE_PORT(port) (2 * (port) + 1)
+#define ACPI_DEVICE_SWNODE_EP(endpoint) \
+ (ACPI_DEVICE_SWNODE_PORT(endpoint) + 1)
+
+/**
+ * struct acpi_device_software_node_port - MIPI DisCo for Imaging CSI-2 port
+ * @port_name: Port name.
+ * @data_lanes: "data-lanes" property values.
+ * @lane_polarities: "lane-polarities" property values.
+ * @link_frequencies: "link_frequencies" property values.
+ * @port_nr: Port number.
+ * @crs_crs2_local: _CRS CSI2 record present (i.e. this is a transmitter one).
+ * @port_props: Port properties.
+ * @ep_props: Endpoint properties.
+ * @remote_ep: Reference to the remote endpoint.
+ */
+struct acpi_device_software_node_port {
+ char port_name[ACPI_DEVICE_SWNODE_PORT_NAME_LENGTH + 1];
+ u32 data_lanes[ACPI_DEVICE_CSI2_DATA_LANES];
+ u32 lane_polarities[ACPI_DEVICE_CSI2_DATA_LANES + 1 /* clock lane */];
+ u64 link_frequencies[ACPI_DEVICE_CSI2_DATA_LANES];
+ unsigned int port_nr;
+ bool crs_csi2_local;
+
+ struct property_entry port_props[ACPI_DEVICE_SWNODE_PORT_NUM_ENTRIES];
+ struct property_entry ep_props[ACPI_DEVICE_SWNODE_EP_NUM_ENTRIES];
+
+ struct software_node_ref_args remote_ep[1];
+};
+
+/**
+ * struct acpi_device_software_nodes - Software nodes for an ACPI device
+ * @dev_props: Device properties.
+ * @nodes: Software nodes for root as well as ports and endpoints.
+ * @nodeprts: Array of software node pointers, for (un)registering them.
+ * @ports: Information related to each port and endpoint within a port.
+ * @num_ports: The number of ports.
+ */
+struct acpi_device_software_nodes {
+ struct property_entry dev_props[ACPI_DEVICE_SWNODE_DEV_NUM_ENTRIES];
+ struct software_node *nodes;
+ const struct software_node **nodeptrs;
+ struct acpi_device_software_node_port *ports;
+ unsigned int num_ports;
+};
+
/* Device */
struct acpi_device {
u32 pld_crc;
@@ -384,6 +475,7 @@ struct acpi_device {
struct acpi_device_data data;
struct acpi_scan_handler *handler;
struct acpi_hotplug_context *hp;
+ struct acpi_device_software_nodes *swnodes;
const struct acpi_gpio_mapping *driver_gpios;
void *driver_data;
struct device dev;
@@ -764,10 +856,71 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set);
}
-bool acpi_dev_uid_match(struct acpi_device *adev, const char *uid2);
-bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2);
int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer);
+static inline bool acpi_dev_hid_match(struct acpi_device *adev, const char *hid2)
+{
+ const char *hid1 = acpi_device_hid(adev);
+
+ return hid1 && hid2 && !strcmp(hid1, hid2);
+}
+
+static inline bool acpi_str_uid_match(struct acpi_device *adev, const char *uid2)
+{
+ const char *uid1 = acpi_device_uid(adev);
+
+ return uid1 && uid2 && !strcmp(uid1, uid2);
+}
+
+static inline bool acpi_int_uid_match(struct acpi_device *adev, u64 uid2)
+{
+ u64 uid1;
+
+ return !acpi_dev_uid_to_integer(adev, &uid1) && uid1 == uid2;
+}
+
+#define TYPE_ENTRY(type, x) \
+ const type: x, \
+ type: x
+
+#define ACPI_STR_TYPES(match) \
+ TYPE_ENTRY(unsigned char *, match), \
+ TYPE_ENTRY(signed char *, match), \
+ TYPE_ENTRY(char *, match), \
+ TYPE_ENTRY(void *, match)
+
+/**
+ * acpi_dev_uid_match - Match device by supplied UID
+ * @adev: ACPI device to match.
+ * @uid2: Unique ID of the device.
+ *
+ * Matches UID in @adev with given @uid2.
+ *
+ * Returns: %true if matches, %false otherwise.
+ */
+#define acpi_dev_uid_match(adev, uid2) \
+ _Generic(uid2, \
+ /* Treat @uid2 as a string for acpi string types */ \
+ ACPI_STR_TYPES(acpi_str_uid_match), \
+ /* Treat as an integer otherwise */ \
+ default: acpi_int_uid_match)(adev, uid2)
+
+/**
+ * acpi_dev_hid_uid_match - Match device by supplied HID and UID
+ * @adev: ACPI device to match.
+ * @hid2: Hardware ID of the device.
+ * @uid2: Unique ID of the device, pass 0 or NULL to not check _UID.
+ *
+ * Matches HID and UID in @adev with given @hid2 and @uid2. Absence of @uid2
+ * will be treated as a match. If user wants to validate @uid2, it should be
+ * done before calling this function.
+ *
+ * Returns: %true if matches or @uid2 is 0 or NULL, %false otherwise.
+ */
+#define acpi_dev_hid_uid_match(adev, hid2, uid2) \
+ (acpi_dev_hid_match(adev, hid2) && \
+ (!(uid2) || acpi_dev_uid_match(adev, uid2)))
+
void acpi_dev_clear_dependencies(struct acpi_device *supplier);
bool acpi_dev_ready_for_enumeration(const struct acpi_device *device);
struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier,
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 6126c977ece0..3a0995f8bce8 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -144,6 +144,8 @@ extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_set_enable(int cpu, bool enable);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
extern bool cppc_perf_ctrs_in_pcc(void);
+extern unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf);
+extern unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq);
extern bool acpi_cpc_valid(void);
extern bool cppc_allow_fast_switch(void);
extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 4230392b5b0b..3d538d4178ab 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -75,6 +75,15 @@ static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
return __acpi_video_get_backlight_type(false, NULL);
}
+/*
+ * This function MUST only be called by GPU drivers to check if the driver
+ * should register a backlight class device. This function not only checks
+ * if a GPU native backlight device should be registered it *also* tells
+ * the ACPI video-detect code that native GPU backlight control is available.
+ * Therefor calling this from any place other then the GPU driver is wrong!
+ * To check if GPU native backlight control is used in other places instead use:
+ * if (acpi_video_get_backlight_type() == acpi_backlight_native) { ... }
+ */
static inline bool acpi_video_backlight_use_native(void)
{
return __acpi_video_get_backlight_type(true, NULL) == acpi_backlight_native;
diff --git a/include/asm-generic/numa.h b/include/asm-generic/numa.h
index 1a3ad6d29833..c32e0cf23c90 100644
--- a/include/asm-generic/numa.h
+++ b/include/asm-generic/numa.h
@@ -35,6 +35,7 @@ int __init numa_add_memblk(int nodeid, u64 start, u64 end);
void __init numa_set_distance(int from, int to, int distance);
void __init numa_free_distance(void);
void __init early_map_cpu_to_node(unsigned int cpu, int nid);
+int __init early_cpu_to_node(int cpu);
void numa_store_cpu_info(unsigned int cpu);
void numa_add_cpu(unsigned int cpu);
void numa_remove_cpu(unsigned int cpu);
@@ -46,6 +47,7 @@ static inline void numa_add_cpu(unsigned int cpu) { }
static inline void numa_remove_cpu(unsigned int cpu) { }
static inline void arch_numa_init(void) { }
static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
+static inline int early_cpu_to_node(int cpu) { return 0; }
#endif /* CONFIG_NUMA */
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index 699650f81970..a84c64e5f11e 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -104,9 +104,9 @@ static inline u32 get_unaligned_le24(const void *p)
static inline void __put_unaligned_be24(const u32 val, u8 *p)
{
- *p++ = val >> 16;
- *p++ = val >> 8;
- *p++ = val;
+ *p++ = (val >> 16) & 0xff;
+ *p++ = (val >> 8) & 0xff;
+ *p++ = val & 0xff;
}
static inline void put_unaligned_be24(const u32 val, void *p)
@@ -116,9 +116,9 @@ static inline void put_unaligned_be24(const u32 val, void *p)
static inline void __put_unaligned_le24(const u32 val, u8 *p)
{
- *p++ = val;
- *p++ = val >> 8;
- *p++ = val >> 16;
+ *p++ = val & 0xff;
+ *p++ = (val >> 8) & 0xff;
+ *p++ = (val >> 16) & 0xff;
}
static inline void put_unaligned_le24(const u32 val, void *p)
@@ -128,12 +128,12 @@ static inline void put_unaligned_le24(const u32 val, void *p)
static inline void __put_unaligned_be48(const u64 val, u8 *p)
{
- *p++ = val >> 40;
- *p++ = val >> 32;
- *p++ = val >> 24;
- *p++ = val >> 16;
- *p++ = val >> 8;
- *p++ = val;
+ *p++ = (val >> 40) & 0xff;
+ *p++ = (val >> 32) & 0xff;
+ *p++ = (val >> 24) & 0xff;
+ *p++ = (val >> 16) & 0xff;
+ *p++ = (val >> 8) & 0xff;
+ *p++ = val & 0xff;
}
static inline void put_unaligned_be48(const u64 val, void *p)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index bae0fe4d499b..5dd3a61d673d 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -370,7 +370,8 @@
BRANCH_PROFILE() \
TRACE_PRINTKS() \
BPF_RAW_TP() \
- TRACEPOINT_STR()
+ TRACEPOINT_STR() \
+ KUNIT_TABLE()
/*
* Data section helpers
@@ -700,7 +701,7 @@
EARLYCON_TABLE() \
LSM_TABLE() \
EARLY_LSM_TABLE() \
- KUNIT_TABLE()
+ KUNIT_INIT_TABLE()
#define INIT_TEXT \
*(.init.text .init.text.*) \
@@ -926,6 +927,12 @@
. = ALIGN(8); \
BOUNDED_SECTION_POST_LABEL(.kunit_test_suites, __kunit_suites, _start, _end)
+/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
+#define KUNIT_INIT_TABLE() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION_POST_LABEL(.kunit_init_test_suites, \
+ __kunit_init_suites, _start, _end)
+
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index c7bdbece27cc..5d61f576cfc8 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -212,13 +212,9 @@ struct shash_desc {
* This is a counterpart to @init_tfm, used to remove
* various changes set in @init_tfm.
* @clone_tfm: Copy transform into new object, may allocate memory.
- * @digestsize: see struct ahash_alg
- * @statesize: see struct ahash_alg
* @descsize: Size of the operational state for the message digest. This state
* size is the memory size that needs to be allocated for
* shash_desc.__ctx
- * @stat: Statistics for hash algorithm.
- * @base: internally used
* @halg: see struct hash_alg_common
* @HASH_ALG_COMMON: see struct hash_alg_common
*/
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index ef8ce86b1f78..78ecaf5db04c 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -121,6 +121,7 @@ struct af_alg_async_req {
*
* @tsgl_list: Link to TX SGL
* @iv: IV for cipher operation
+ * @state: Existing state for continuing operation
* @aead_assoclen: Length of AAD for AEAD cipher operations
* @completion: Work queue for synchronous operation
* @used: TX bytes sent to kernel. This variable is used to
@@ -136,11 +137,13 @@ struct af_alg_async_req {
* recvmsg is invoked.
* @init: True if metadata has been sent.
* @len: Length of memory allocated for this data structure.
+ * @inflight: Non-zero when AIO requests are in flight.
*/
struct af_alg_ctx {
struct list_head tsgl_list;
void *iv;
+ void *state;
size_t aead_assoclen;
struct crypto_wait wait;
@@ -154,6 +157,8 @@ struct af_alg_ctx {
bool init;
unsigned int len;
+
+ unsigned int inflight;
};
int af_alg_register_type(const struct af_alg_type *type);
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index ea18af48346b..c8857d7bdb37 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -15,6 +15,17 @@
#include <linux/string.h>
#include <linux/types.h>
+/* Set this bit if the lskcipher operation is a continuation. */
+#define CRYPTO_LSKCIPHER_FLAG_CONT 0x00000001
+/* Set this bit if the lskcipher operation is final. */
+#define CRYPTO_LSKCIPHER_FLAG_FINAL 0x00000002
+/* The bit CRYPTO_TFM_REQ_MAY_SLEEP can also be set if needed. */
+
+/* Set this bit if the skcipher operation is a continuation. */
+#define CRYPTO_SKCIPHER_REQ_CONT 0x00000001
+/* Set this bit if the skcipher operation is not final. */
+#define CRYPTO_SKCIPHER_REQ_NOTFINAL 0x00000002
+
struct scatterlist;
/**
@@ -91,6 +102,7 @@ struct crypto_istat_cipher {
* IV of exactly that size to perform the encrypt or decrypt operation.
* @chunksize: Equal to the block size except for stream ciphers such as
* CTR where it is set to the underlying block size.
+ * @statesize: Size of the internal state for the algorithm.
* @stat: Statistics for cipher algorithm
* @base: Definition of a generic crypto algorithm.
*/
@@ -99,6 +111,7 @@ struct crypto_istat_cipher {
unsigned int max_keysize; \
unsigned int ivsize; \
unsigned int chunksize; \
+ unsigned int statesize; \
\
SKCIPHER_ALG_COMMON_STAT \
\
@@ -108,16 +121,6 @@ struct skcipher_alg_common SKCIPHER_ALG_COMMON;
/**
* struct skcipher_alg - symmetric key cipher definition
- * @min_keysize: Minimum key size supported by the transformation. This is the
- * smallest key length supported by this transformation algorithm.
- * This must be set to one of the pre-defined values as this is
- * not hardware specific. Possible values for this field can be
- * found via git grep "_MIN_KEY_SIZE" include/crypto/
- * @max_keysize: Maximum key size supported by the transformation. This is the
- * largest key length supported by this transformation algorithm.
- * This must be set to one of the pre-defined values as this is
- * not hardware specific. Possible values for this field can be
- * found via git grep "_MAX_KEY_SIZE" include/crypto/
* @setkey: Set key for the transformation. This function is used to either
* program a supplied key into the hardware or store the key in the
* transformation context for programming it later. Note that this
@@ -141,6 +144,17 @@ struct skcipher_alg_common SKCIPHER_ALG_COMMON;
* be called in parallel with the same transformation object.
* @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
* and the conditions are exactly the same.
+ * @export: Export partial state of the transformation. This function dumps the
+ * entire state of the ongoing transformation into a provided block of
+ * data so it can be @import 'ed back later on. This is useful in case
+ * you want to save partial result of the transformation after
+ * processing certain amount of data and reload this partial result
+ * multiple times later on for multiple re-use. No data processing
+ * happens at this point.
+ * @import: Import partial state of the transformation. This function loads the
+ * entire state of the ongoing transformation from a provided block of
+ * data so the transformation can continue from this point onward. No
+ * data processing happens at this point.
* @init: Initialize the cryptographic transformation object. This function
* is used to initialize the cryptographic transformation object.
* This function is called only once at the instantiation time, right
@@ -152,15 +166,9 @@ struct skcipher_alg_common SKCIPHER_ALG_COMMON;
* @exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @init, used to remove various changes set in
* @init.
- * @ivsize: IV size applicable for transformation. The consumer must provide an
- * IV of exactly that size to perform the encrypt or decrypt operation.
- * @chunksize: Equal to the block size except for stream ciphers such as
- * CTR where it is set to the underlying block size.
* @walksize: Equal to the chunk size except in cases where the algorithm is
* considerably more efficient if it can operate on multiple chunks
* in parallel. Should be a multiple of chunksize.
- * @stat: Statistics for cipher algorithm
- * @base: Definition of a generic crypto algorithm.
* @co: see struct skcipher_alg_common
*
* All fields except @ivsize are mandatory and must be filled.
@@ -170,6 +178,8 @@ struct skcipher_alg {
unsigned int keylen);
int (*encrypt)(struct skcipher_request *req);
int (*decrypt)(struct skcipher_request *req);
+ int (*export)(struct skcipher_request *req, void *out);
+ int (*import)(struct skcipher_request *req, const void *in);
int (*init)(struct crypto_skcipher *tfm);
void (*exit)(struct crypto_skcipher *tfm);
@@ -200,6 +210,9 @@ struct skcipher_alg {
* may be left over if length is not a multiple of blocks
* and there is more to come (final == false). The number of
* left-over bytes should be returned in case of success.
+ * The siv field shall be as long as ivsize + statesize with
+ * the IV placed at the front. The state will be used by the
+ * algorithm internally.
* @decrypt: Decrypt a number of bytes. This is a reverse counterpart to
* @encrypt and the conditions are exactly the same.
* @init: Initialize the cryptographic transformation object. This function
@@ -215,9 +228,9 @@ struct lskcipher_alg {
int (*setkey)(struct crypto_lskcipher *tfm, const u8 *key,
unsigned int keylen);
int (*encrypt)(struct crypto_lskcipher *tfm, const u8 *src,
- u8 *dst, unsigned len, u8 *iv, bool final);
+ u8 *dst, unsigned len, u8 *siv, u32 flags);
int (*decrypt)(struct crypto_lskcipher *tfm, const u8 *src,
- u8 *dst, unsigned len, u8 *iv, bool final);
+ u8 *dst, unsigned len, u8 *siv, u32 flags);
int (*init)(struct crypto_lskcipher *tfm);
void (*exit)(struct crypto_lskcipher *tfm);
@@ -496,6 +509,40 @@ static inline unsigned int crypto_lskcipher_chunksize(
return crypto_lskcipher_alg(tfm)->co.chunksize;
}
+/**
+ * crypto_skcipher_statesize() - obtain state size
+ * @tfm: cipher handle
+ *
+ * Some algorithms cannot be chained with the IV alone. They carry
+ * internal state which must be replicated if data is to be processed
+ * incrementally. The size of that state can be obtained with this
+ * function.
+ *
+ * Return: state size in bytes
+ */
+static inline unsigned int crypto_skcipher_statesize(
+ struct crypto_skcipher *tfm)
+{
+ return crypto_skcipher_alg_common(tfm)->statesize;
+}
+
+/**
+ * crypto_lskcipher_statesize() - obtain state size
+ * @tfm: cipher handle
+ *
+ * Some algorithms cannot be chained with the IV alone. They carry
+ * internal state which must be replicated if data is to be processed
+ * incrementally. The size of that state can be obtained with this
+ * function.
+ *
+ * Return: state size in bytes
+ */
+static inline unsigned int crypto_lskcipher_statesize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.statesize;
+}
+
static inline unsigned int crypto_sync_skcipher_blocksize(
struct crypto_sync_skcipher *tfm)
{
@@ -684,14 +731,48 @@ int crypto_skcipher_encrypt(struct skcipher_request *req);
int crypto_skcipher_decrypt(struct skcipher_request *req);
/**
+ * crypto_skcipher_export() - export partial state
+ * @req: reference to the skcipher_request handle that holds all information
+ * needed to perform the operation
+ * @out: output buffer of sufficient size that can hold the state
+ *
+ * Export partial state of the transformation. This function dumps the
+ * entire state of the ongoing transformation into a provided block of
+ * data so it can be @import 'ed back later on. This is useful in case
+ * you want to save partial result of the transformation after
+ * processing certain amount of data and reload this partial result
+ * multiple times later on for multiple re-use. No data processing
+ * happens at this point.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+int crypto_skcipher_export(struct skcipher_request *req, void *out);
+
+/**
+ * crypto_skcipher_import() - import partial state
+ * @req: reference to the skcipher_request handle that holds all information
+ * needed to perform the operation
+ * @in: buffer holding the state
+ *
+ * Import partial state of the transformation. This function loads the
+ * entire state of the ongoing transformation from a provided block of
+ * data so the transformation can continue from this point onward. No
+ * data processing happens at this point.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+int crypto_skcipher_import(struct skcipher_request *req, const void *in);
+
+/**
* crypto_lskcipher_encrypt() - encrypt plaintext
* @tfm: lskcipher handle
* @src: source buffer
* @dst: destination buffer
* @len: number of bytes to process
- * @iv: IV for the cipher operation which must comply with the IV size defined
- * by crypto_lskcipher_ivsize
- *
+ * @siv: IV + state for the cipher operation. The length of the IV must
+ * comply with the IV size defined by crypto_lskcipher_ivsize. The
+ * IV is then followed with a buffer with the length as specified by
+ * crypto_lskcipher_statesize.
* Encrypt plaintext data using the lskcipher handle.
*
* Return: >=0 if the cipher operation was successful, if positive
@@ -699,7 +780,7 @@ int crypto_skcipher_decrypt(struct skcipher_request *req);
* < 0 if an error occurred
*/
int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
- u8 *dst, unsigned len, u8 *iv);
+ u8 *dst, unsigned len, u8 *siv);
/**
* crypto_lskcipher_decrypt() - decrypt ciphertext
@@ -707,8 +788,10 @@ int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
* @src: source buffer
* @dst: destination buffer
* @len: number of bytes to process
- * @iv: IV for the cipher operation which must comply with the IV size defined
- * by crypto_lskcipher_ivsize
+ * @siv: IV + state for the cipher operation. The length of the IV must
+ * comply with the IV size defined by crypto_lskcipher_ivsize. The
+ * IV is then followed with a buffer with the length as specified by
+ * crypto_lskcipher_statesize.
*
* Decrypt ciphertext data using the lskcipher handle.
*
@@ -717,7 +800,7 @@ int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
* < 0 if an error occurred
*/
int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
- u8 *dst, unsigned len, u8 *iv);
+ u8 *dst, unsigned len, u8 *siv);
/**
* DOC: Symmetric Key Cipher Request Handle
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
index 30a347e5aa11..4490d43c63e3 100644
--- a/include/drm/ttm/ttm_pool.h
+++ b/include/drm/ttm/ttm_pool.h
@@ -74,7 +74,7 @@ struct ttm_pool {
bool use_dma32;
struct {
- struct ttm_pool_type orders[MAX_ORDER + 1];
+ struct ttm_pool_type orders[NR_PAGE_ORDERS];
} caching[TTM_NUM_CACHING_TYPES];
};
diff --git a/include/kunit/device.h b/include/kunit/device.h
new file mode 100644
index 000000000000..2450110ad64e
--- /dev/null
+++ b/include/kunit/device.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit basic device implementation
+ *
+ * Helpers for creating and managing fake devices for KUnit tests.
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+
+#ifndef _KUNIT_DEVICE_H
+#define _KUNIT_DEVICE_H
+
+#if IS_ENABLED(CONFIG_KUNIT)
+
+#include <kunit/test.h>
+
+struct device;
+struct device_driver;
+
+/**
+ * kunit_driver_create() - Create a struct device_driver attached to the kunit_bus
+ * @test: The test context object.
+ * @name: The name to give the created driver.
+ *
+ * Creates a struct device_driver attached to the kunit_bus, with the name @name.
+ * This driver will automatically be cleaned up on test exit.
+ *
+ * Return: a stub struct device_driver, managed by KUnit, with the name @name.
+ */
+struct device_driver *kunit_driver_create(struct kunit *test, const char *name);
+
+/**
+ * kunit_device_register() - Create a struct device for use in KUnit tests
+ * @test: The test context object.
+ * @name: The name to give the created device.
+ *
+ * Creates a struct kunit_device (which is a struct device) with the given name,
+ * and a corresponding driver. The device and driver will be cleaned up on test
+ * exit, or when kunit_device_unregister is called. See also
+ * kunit_device_register_with_driver, if you wish to provide your own
+ * struct device_driver.
+ *
+ * Return: a pointer to a struct device which will be cleaned up when the test
+ * exits, or an error pointer if the device could not be allocated or registered.
+ */
+struct device *kunit_device_register(struct kunit *test, const char *name);
+
+/**
+ * kunit_device_register_with_driver() - Create a struct device for use in KUnit tests
+ * @test: The test context object.
+ * @name: The name to give the created device.
+ * @drv: The struct device_driver to associate with the device.
+ *
+ * Creates a struct kunit_device (which is a struct device) with the given
+ * name, and driver. The device will be cleaned up on test exit, or when
+ * kunit_device_unregister is called. See also kunit_device_register, if you
+ * wish KUnit to create and manage a driver for you.
+ *
+ * Return: a pointer to a struct device which will be cleaned up when the test
+ * exits, or an error pointer if the device could not be allocated or registered.
+ */
+struct device *kunit_device_register_with_driver(struct kunit *test,
+ const char *name,
+ const struct device_driver *drv);
+
+/**
+ * kunit_device_unregister() - Unregister a KUnit-managed device
+ * @test: The test context object which created the device
+ * @dev: The device.
+ *
+ * Unregisters and destroys a struct device which was created with
+ * kunit_device_register or kunit_device_register_with_driver. If KUnit created
+ * a driver, cleans it up as well.
+ */
+void kunit_device_unregister(struct kunit *test, struct device *dev);
+
+#endif
+
+#endif
diff --git a/include/kunit/resource.h b/include/kunit/resource.h
index c7383e90f5c9..4ad69a2642a5 100644
--- a/include/kunit/resource.h
+++ b/include/kunit/resource.h
@@ -391,6 +391,27 @@ void kunit_remove_resource(struct kunit *test, struct kunit_resource *res);
typedef void (kunit_action_t)(void *);
/**
+ * KUNIT_DEFINE_ACTION_WRAPPER() - Wrap a function for use as a deferred action.
+ *
+ * @wrapper: The name of the new wrapper function define.
+ * @orig: The original function to wrap.
+ * @arg_type: The type of the argument accepted by @orig.
+ *
+ * Defines a wrapper for a function which accepts a single, pointer-sized
+ * argument. This wrapper can then be passed to kunit_add_action() and
+ * similar. This should be used in preference to casting a function
+ * directly to kunit_action_t, as casting function pointers will break
+ * control flow integrity (CFI), leading to crashes.
+ */
+#define KUNIT_DEFINE_ACTION_WRAPPER(wrapper, orig, arg_type) \
+ static void wrapper(void *in) \
+ { \
+ arg_type arg = (arg_type)in; \
+ orig(arg); \
+ }
+
+
+/**
* kunit_add_action() - Call a function when the test ends.
* @test: Test case to associate the action with.
* @action: The function to run on test exit
diff --git a/include/kunit/static_stub.h b/include/kunit/static_stub.h
index 85315c80b303..bf940322dfc0 100644
--- a/include/kunit/static_stub.h
+++ b/include/kunit/static_stub.h
@@ -93,7 +93,7 @@ void __kunit_activate_static_stub(struct kunit *test,
* The redirection can be disabled again with kunit_deactivate_static_stub().
*/
#define kunit_activate_static_stub(test, real_fn_addr, replacement_addr) do { \
- typecheck_fn(typeof(&real_fn_addr), replacement_addr); \
+ typecheck_fn(typeof(&replacement_addr), real_fn_addr); \
__kunit_activate_static_stub(test, real_fn_addr, replacement_addr); \
} while (0)
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 20ed9f9275c9..c2ce379c329b 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -253,6 +253,7 @@ struct kunit_suite {
struct dentry *debugfs;
struct string_stream *log;
int suite_init_err;
+ bool is_init;
};
/* Stores an array of suites, end points one past the end */
@@ -337,6 +338,9 @@ void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites);
void kunit_exec_run_tests(struct kunit_suite_set *suite_set, bool builtin);
void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr);
+struct kunit_suite_set kunit_merge_suite_sets(struct kunit_suite_set init_suite_set,
+ struct kunit_suite_set suite_set);
+
#if IS_BUILTIN(CONFIG_KUNIT)
int kunit_run_all_tests(void);
#else
@@ -371,6 +375,11 @@ static inline int kunit_run_all_tests(void)
#define kunit_test_suite(suite) kunit_test_suites(&suite)
+#define __kunit_init_test_suites(unique_array, ...) \
+ static struct kunit_suite *unique_array[] \
+ __aligned(sizeof(struct kunit_suite *)) \
+ __used __section(".kunit_init_test_suites") = { __VA_ARGS__ }
+
/**
* kunit_test_init_section_suites() - used to register one or more &struct
* kunit_suite containing init functions or
@@ -378,21 +387,21 @@ static inline int kunit_run_all_tests(void)
*
* @__suites: a statically allocated list of &struct kunit_suite.
*
- * This functions identically as kunit_test_suites() except that it suppresses
- * modpost warnings for referencing functions marked __init or data marked
- * __initdata; this is OK because currently KUnit only runs tests upon boot
- * during the init phase or upon loading a module during the init phase.
+ * This functions similar to kunit_test_suites() except that it compiles the
+ * list of suites during init phase.
+ *
+ * This macro also suffixes the array and suite declarations it makes with
+ * _probe; so that modpost suppresses warnings about referencing init data
+ * for symbols named in this manner.
*
- * NOTE TO KUNIT DEVS: If we ever allow KUnit tests to be run after boot, these
- * tests must be excluded.
+ * Note: these init tests are not able to be run after boot so there is no
+ * "run" debugfs file generated for these tests.
*
- * The only thing this macro does that's different from kunit_test_suites is
- * that it suffixes the array and suite declarations it makes with _probe;
- * modpost suppresses warnings about referencing init data for symbols named in
- * this manner.
+ * Also, do not mark the suite or test case structs with __initdata because
+ * they will be used after the init phase with debugfs.
*/
#define kunit_test_init_section_suites(__suites...) \
- __kunit_test_suites(CONCATENATE(__UNIQUE_ID(array), _probe), \
+ __kunit_init_test_suites(CONCATENATE(__UNIQUE_ID(array), _probe), \
##__suites)
#define kunit_test_init_section_suite(suite) \
@@ -749,7 +758,7 @@ do { \
.right_text = #right, \
}; \
\
- if (likely(strcmp(__left, __right) op 0)) \
+ if (likely((__left) && (__right) && (strcmp(__left, __right) op 0))) \
break; \
\
\
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 4db54e928b36..118a18b7ff84 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -424,6 +424,13 @@ extern int acpi_blacklisted(void);
extern void acpi_osi_setup(char *str);
extern bool acpi_osi_is_win8(void);
+#ifdef CONFIG_ACPI_THERMAL_LIB
+int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp);
+int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp);
+int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp);
+int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
+#endif
+
#ifdef CONFIG_ACPI_NUMA
int acpi_map_pxm_to_node(int pxm);
int acpi_get_node(acpi_handle handle);
@@ -756,6 +763,10 @@ const char *acpi_get_subsystem_id(acpi_handle handle);
#define ACPI_HANDLE(dev) (NULL)
#define ACPI_HANDLE_FWNODE(fwnode) (NULL)
+/* Get rid of the -Wunused-variable for adev */
+#define acpi_dev_uid_match(adev, uid2) (adev && false)
+#define acpi_dev_hid_uid_match(adev, hid2, uid2) (adev && false)
+
#include <acpi/acpi_numa.h>
struct fwnode_handle;
@@ -772,17 +783,6 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
struct acpi_device;
-static inline bool acpi_dev_uid_match(struct acpi_device *adev, const char *uid2)
-{
- return false;
-}
-
-static inline bool
-acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2)
-{
- return false;
-}
-
static inline int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer)
{
return -ENODEV;
diff --git a/include/linux/acpi_amd_wbrf.h b/include/linux/acpi_amd_wbrf.h
new file mode 100644
index 000000000000..898f31d536d4
--- /dev/null
+++ b/include/linux/acpi_amd_wbrf.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Wifi Band Exclusion Interface (AMD ACPI Implementation)
+ * Copyright (C) 2023 Advanced Micro Devices
+ */
+
+#ifndef _ACPI_AMD_WBRF_H
+#define _ACPI_AMD_WBRF_H
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+
+/* The maximum number of frequency band ranges */
+#define MAX_NUM_OF_WBRF_RANGES 11
+
+/* Record actions */
+#define WBRF_RECORD_ADD 0x0
+#define WBRF_RECORD_REMOVE 0x1
+
+/**
+ * struct freq_band_range - Wifi frequency band range definition
+ * @start: start frequency point (in Hz)
+ * @end: end frequency point (in Hz)
+ */
+struct freq_band_range {
+ u64 start;
+ u64 end;
+};
+
+/**
+ * struct wbrf_ranges_in_out - wbrf ranges info
+ * @num_of_ranges: total number of band ranges in this struct
+ * @band_list: array of Wifi band ranges
+ */
+struct wbrf_ranges_in_out {
+ u64 num_of_ranges;
+ struct freq_band_range band_list[MAX_NUM_OF_WBRF_RANGES];
+};
+
+/**
+ * enum wbrf_notifier_actions - wbrf notifier actions index
+ * @WBRF_CHANGED: there was some frequency band updates. The consumers
+ * should retrieve the latest active frequency bands.
+ */
+enum wbrf_notifier_actions {
+ WBRF_CHANGED,
+};
+
+#if IS_ENABLED(CONFIG_AMD_WBRF)
+bool acpi_amd_wbrf_supported_producer(struct device *dev);
+int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in);
+bool acpi_amd_wbrf_supported_consumer(struct device *dev);
+int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out);
+int amd_wbrf_register_notifier(struct notifier_block *nb);
+int amd_wbrf_unregister_notifier(struct notifier_block *nb);
+#else
+static inline
+bool acpi_amd_wbrf_supported_consumer(struct device *dev)
+{
+ return false;
+}
+
+static inline
+int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in)
+{
+ return -ENODEV;
+}
+
+static inline
+bool acpi_amd_wbrf_supported_producer(struct device *dev)
+{
+ return false;
+}
+static inline
+int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out)
+{
+ return -ENODEV;
+}
+static inline
+int amd_wbrf_register_notifier(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+static inline
+int amd_wbrf_unregister_notifier(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_AMD_WBRF */
+
+#endif /* _ACPI_AMD_WBRF_H */
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index a07b510e7dc5..a63d61ca55af 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -27,6 +27,13 @@ static inline unsigned long topology_get_cpu_scale(int cpu)
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
+DECLARE_PER_CPU(unsigned long, capacity_freq_ref);
+
+static inline unsigned long topology_get_freq_ref(int cpu)
+{
+ return per_cpu(capacity_freq_ref, cpu);
+}
+
DECLARE_PER_CPU(unsigned long, arch_freq_scale);
static inline unsigned long topology_get_freq_scale(int cpu)
@@ -92,6 +99,7 @@ void update_siblings_masks(unsigned int cpu);
void remove_cpu_topology(unsigned int cpuid);
void reset_cpu_topology(void);
int parse_acpi_topology(void);
+void freq_inv_set_max_ratio(int cpu, u64 max_rate);
#endif
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */
diff --git a/include/linux/async.h b/include/linux/async.h
index cce4ad31e8fc..33c9ff4afb49 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -90,6 +90,8 @@ async_schedule_dev(async_func_t func, struct device *dev)
return async_schedule_node(func, dev, dev_to_node(dev));
}
+bool async_schedule_dev_nocall(async_func_t func, struct device *dev);
+
/**
* async_schedule_dev_domain - A device specific version of async_schedule_domain
* @func: function to execute asynchronously
diff --git a/include/linux/backing-file.h b/include/linux/backing-file.h
new file mode 100644
index 000000000000..3f1fe1774f1b
--- /dev/null
+++ b/include/linux/backing-file.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common helpers for stackable filesystems and backing files.
+ *
+ * Copyright (C) 2023 CTERA Networks.
+ */
+
+#ifndef _LINUX_BACKING_FILE_H
+#define _LINUX_BACKING_FILE_H
+
+#include <linux/file.h>
+#include <linux/uio.h>
+#include <linux/fs.h>
+
+struct backing_file_ctx {
+ const struct cred *cred;
+ struct file *user_file;
+ void (*accessed)(struct file *);
+ void (*end_write)(struct file *);
+};
+
+struct file *backing_file_open(const struct path *user_path, int flags,
+ const struct path *real_path,
+ const struct cred *cred);
+ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *out, loff_t *ppos, size_t len,
+ unsigned int flags,
+ struct backing_file_ctx *ctx);
+int backing_file_mmap(struct file *file, struct vm_area_struct *vma,
+ struct backing_file_ctx *ctx);
+
+#endif /* _LINUX_BACKING_FILE_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index b29ebd53417d..7c2316c91cbd 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -57,20 +57,18 @@ struct block_device {
void * bd_holder;
const struct blk_holder_ops *bd_holder_ops;
struct mutex bd_holder_lock;
- /* The counter of freeze processes */
- int bd_fsfreeze_count;
int bd_holders;
struct kobject *bd_holder_dir;
- /* Mutex for freeze */
- struct mutex bd_fsfreeze_mutex;
- struct super_block *bd_fsfreeze_sb;
+ atomic_t bd_fsfreeze_count; /* number of freeze requests */
+ struct mutex bd_fsfreeze_mutex; /* serialize freeze/thaw */
struct partition_meta_info *bd_meta_info;
#ifdef CONFIG_FAIL_MAKE_REQUEST
bool bd_make_it_fail;
#endif
bool bd_ro_warned;
+ int bd_writers;
/*
* keep this out-of-line as it's both big and not needed in the fast
* path
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 51fa7ffdee83..c30a98e08423 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -124,6 +124,8 @@ typedef unsigned int __bitwise blk_mode_t;
#define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3))
/* open for "writes" only for ioctls (specialy hack for floppy.c) */
#define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4))
+/* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */
+#define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5))
struct gendisk {
/*
@@ -538,7 +540,7 @@ struct request_queue {
#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
#define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */
#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
-#define QUEUE_FLAG_HW_WC 18 /* Write back caching supported */
+#define QUEUE_FLAG_HW_WC 13 /* Write back caching supported */
#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
@@ -1468,8 +1470,23 @@ struct blk_holder_ops {
* Sync the file system mounted on the block device.
*/
void (*sync)(struct block_device *bdev);
+
+ /*
+ * Freeze the file system mounted on the block device.
+ */
+ int (*freeze)(struct block_device *bdev);
+
+ /*
+ * Thaw the file system mounted on the block device.
+ */
+ int (*thaw)(struct block_device *bdev);
};
+/*
+ * For filesystems using @fs_holder_ops, the @holder argument passed to
+ * helpers used to open and claim block devices via
+ * bd_prepare_to_claim() must point to a superblock.
+ */
extern const struct blk_holder_ops fs_holder_ops;
/*
@@ -1477,7 +1494,8 @@ extern const struct blk_holder_ops fs_holder_ops;
* as stored in sb->s_flags.
*/
#define sb_open_mode(flags) \
- (BLK_OPEN_READ | (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
+ (BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \
+ (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
struct bdev_handle {
struct block_device *bdev;
@@ -1485,10 +1503,6 @@ struct bdev_handle {
blk_mode_t mode;
};
-struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder,
- const struct blk_holder_ops *hops);
-struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode,
- void *holder, const struct blk_holder_ops *hops);
struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
const struct blk_holder_ops *hops);
struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
@@ -1496,7 +1510,6 @@ struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
int bd_prepare_to_claim(struct block_device *bdev, void *holder,
const struct blk_holder_ops *hops);
void bd_abort_claiming(struct block_device *bdev, void *holder);
-void blkdev_put(struct block_device *bdev, void *holder);
void bdev_release(struct bdev_handle *handle);
/* just for blk-cgroup, don't use elsewhere */
@@ -1541,8 +1554,8 @@ static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
}
#endif /* CONFIG_BLOCK */
-int freeze_bdev(struct block_device *bdev);
-int thaw_bdev(struct block_device *bdev);
+int bdev_freeze(struct block_device *bdev);
+int bdev_thaw(struct block_device *bdev);
struct io_comp_batch {
struct request *req_list;
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index fc0d6f32c687..94baced5a1ad 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -142,9 +142,13 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
#ifdef CONFIG_NET
BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
+BPF_LINK_TYPE(BPF_LINK_TYPE_NETFILTER, netfilter)
+BPF_LINK_TYPE(BPF_LINK_TYPE_TCX, tcx)
+BPF_LINK_TYPE(BPF_LINK_TYPE_NETKIT, netkit)
#endif
#ifdef CONFIG_PERF_EVENTS
BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf)
#endif
BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi)
BPF_LINK_TYPE(BPF_LINK_TYPE_STRUCT_OPS, struct_ops)
+BPF_LINK_TYPE(BPF_LINK_TYPE_UPROBE_MULTI, uprobe_multi)
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 5f23ee599889..d78454a4dd1f 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -205,7 +205,6 @@ struct buffer_head *create_empty_buffers(struct folio *folio,
unsigned long blocksize, unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
-void end_buffer_async_write(struct buffer_head *bh, int uptodate);
/* Things to do with buffers at mapping->private_list */
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
@@ -252,11 +251,10 @@ void __bh_read_batch(int nr, struct buffer_head *bhs[],
* address_spaces.
*/
void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
-int block_write_full_page(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc);
+int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
+ void *get_block);
int __block_write_full_folio(struct inode *inode, struct folio *folio,
- get_block_t *get_block, struct writeback_control *wbc,
- bh_end_io_t *handler);
+ get_block_t *get_block, struct writeback_control *wbc);
int block_read_full_folio(struct folio *, get_block_t *);
bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
@@ -270,7 +268,6 @@ int generic_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page *, void *);
void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
-void clean_page_buffers(struct page *page);
int cont_write_begin(struct file *, struct address_space *, loff_t,
unsigned, struct page **, void **,
get_block_t *, loff_t *);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 4a6b6b77ccb6..ea48c861cd36 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -496,6 +496,20 @@ struct cgroup {
struct cgroup_rstat_cpu __percpu *rstat_cpu;
struct list_head rstat_css_list;
+ /*
+ * Add padding to separate the read mostly rstat_cpu and
+ * rstat_css_list into a different cacheline from the following
+ * rstat_flush_next and *bstat fields which can have frequent updates.
+ */
+ CACHELINE_PADDING(_pad_);
+
+ /*
+ * A singly-linked list of cgroup structures to be rstat flushed.
+ * This is a scratch field to be used exclusively by
+ * cgroup_rstat_flush_locked() and protected by cgroup_rstat_lock.
+ */
+ struct cgroup *rstat_flush_next;
+
/* cgroup basic resource statistics */
struct cgroup_base_stat last_bstat;
struct cgroup_base_stat bstat;
@@ -548,6 +562,10 @@ struct cgroup_root {
/* Unique id for this hierarchy. */
int hierarchy_id;
+ /* A list running through the active hierarchies */
+ struct list_head root_list;
+ struct rcu_head rcu; /* Must be near the top */
+
/*
* The root cgroup. The containing cgroup_root will be destroyed on its
* release. cgrp->ancestors[0] will be used overflowing into the
@@ -561,9 +579,6 @@ struct cgroup_root {
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
- /* A list running through the active hierarchies */
- struct list_head root_list;
-
/* Hierarchy-specific flags */
unsigned int flags;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 0ef0af66080e..34aaf0e87def 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -69,6 +69,7 @@ struct css_task_iter {
extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
+extern spinlock_t css_set_lock;
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
#include <linux/cgroup_subsys.h>
@@ -386,7 +387,6 @@ static inline void cgroup_unlock(void)
* as locks used during the cgroup_subsys::attach() methods.
*/
#ifdef CONFIG_PROVE_RCU
-extern spinlock_t css_set_lock;
#define task_css_set_check(task, __c) \
rcu_dereference_check((task)->cgroups, \
rcu_read_lock_sched_held() || \
@@ -853,4 +853,6 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
#endif /* CONFIG_CGROUP_BPF */
+struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id);
+
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index 9f1a9c455b68..c2d09bc4f976 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -125,25 +125,55 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
* trivial wrapper around DEFINE_CLASS() above specifically
* for locks.
*
+ * DEFINE_GUARD_COND(name, ext, condlock)
+ * wrapper around EXTEND_CLASS above to add conditional lock
+ * variants to a base class, eg. mutex_trylock() or
+ * mutex_lock_interruptible().
+ *
* guard(name):
- * an anonymous instance of the (guard) class
+ * an anonymous instance of the (guard) class, not recommended for
+ * conditional locks.
*
* scoped_guard (name, args...) { }:
* similar to CLASS(name, scope)(args), except the variable (with the
* explicit name 'scope') is declard in a for-loop such that its scope is
* bound to the next (compound) statement.
*
+ * for conditional locks the loop body is skipped when the lock is not
+ * acquired.
+ *
+ * scoped_cond_guard (name, fail, args...) { }:
+ * similar to scoped_guard(), except it does fail when the lock
+ * acquire fails.
+ *
*/
#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
- DEFINE_CLASS(_name, _type, _unlock, ({ _lock; _T; }), _type _T)
+ DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
+ static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
+ { return *_T; }
+
+#define DEFINE_GUARD_COND(_name, _ext, _condlock) \
+ EXTEND_CLASS(_name, _ext, \
+ ({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \
+ class_##_name##_t _T) \
+ static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ { return class_##_name##_lock_ptr(_T); }
#define guard(_name) \
CLASS(_name, __UNIQUE_ID(guard))
+#define __guard_ptr(_name) class_##_name##_lock_ptr
+
#define scoped_guard(_name, args...) \
for (CLASS(_name, scope)(args), \
- *done = NULL; !done; done = (void *)1)
+ *done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1)
+
+#define scoped_cond_guard(_name, _fail, args...) \
+ for (CLASS(_name, scope)(args), \
+ *done = NULL; !done; done = (void *)1) \
+ if (!__guard_ptr(_name)(&scope)) _fail; \
+ else
/*
* Additional helper macros for generating lock guards with types, either for
@@ -152,6 +182,7 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
*
* DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
* DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
+ * DEFINE_LOCK_GUARD_1_COND(name, ext, condlock)
*
* will result in the following type:
*
@@ -173,6 +204,11 @@ typedef struct { \
static inline void class_##_name##_destructor(class_##_name##_t *_T) \
{ \
if (_T->lock) { _unlock; } \
+} \
+ \
+static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
+{ \
+ return _T->lock; \
}
@@ -201,4 +237,14 @@ __DEFINE_LOCK_GUARD_1(_name, _type, _lock)
__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
__DEFINE_LOCK_GUARD_0(_name, _lock)
+#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \
+ EXTEND_CLASS(_name, _ext, \
+ ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
+ if (_T->lock && !(_condlock)) _T->lock = NULL; \
+ _t; }), \
+ typeof_member(class_##_name##_t, lock) l) \
+ static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ { return class_##_name##_lock_ptr(_T); }
+
+
#endif /* __LINUX_GUARDS_H */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 1c5ca92a0555..afda5f24d3dd 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -1203,6 +1203,7 @@ void arch_set_freq_scale(const struct cpumask *cpus,
{
}
#endif
+
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index efc0c0b07efb..172d0a743e5d 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -66,15 +66,12 @@ enum cpuhp_state {
CPUHP_PERF_POWER,
CPUHP_PERF_SUPERH,
CPUHP_X86_HPET_DEAD,
- CPUHP_X86_APB_DEAD,
CPUHP_X86_MCE_DEAD,
CPUHP_VIRT_NET_DEAD,
CPUHP_IBMVNIC_DEAD,
CPUHP_SLUB_DEAD,
CPUHP_DEBUG_OBJ_DEAD,
CPUHP_MM_WRITEBACK_DEAD,
- /* Must be after CPUHP_MM_VMSTAT_DEAD */
- CPUHP_MM_DEMOTION_DEAD,
CPUHP_MM_VMSTAT_DEAD,
CPUHP_SOFTIRQ_DEAD,
CPUHP_NET_MVNETA_DEAD,
@@ -96,7 +93,6 @@ enum cpuhp_state {
CPUHP_NET_DEV_DEAD,
CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_IOVA_DEAD,
- CPUHP_LUSTRE_CFS_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
CPUHP_PADATA_DEAD,
CPUHP_AP_DTPM_CPU_DEAD,
@@ -108,7 +104,6 @@ enum cpuhp_state {
CPUHP_X2APIC_PREPARE,
CPUHP_SMPCFD_PREPARE,
CPUHP_RELAY_PREPARE,
- CPUHP_SLAB_PREPARE,
CPUHP_MD_RAID5_PREPARE,
CPUHP_RCUTREE_PREP,
CPUHP_CPUIDLE_COUPLED_PREPARE,
@@ -118,13 +113,11 @@ enum cpuhp_state {
CPUHP_XEN_EVTCHN_PREPARE,
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
CPUHP_SH_SH3X_PREPARE,
- CPUHP_NET_FLOW_PREPARE,
CPUHP_TOPOLOGY_PREPARE,
CPUHP_NET_IUCV_PREPARE,
CPUHP_ARM_BL_PREPARE,
CPUHP_TRACE_RB_PREPARE,
CPUHP_MM_ZS_PREPARE,
- CPUHP_MM_ZSWP_MEM_PREPARE,
CPUHP_MM_ZSWP_POOL_PREPARE,
CPUHP_KVM_PPC_BOOK3S_PREPARE,
CPUHP_ZCOMP_PREPARE,
@@ -151,18 +144,14 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
- CPUHP_AP_IRQ_RISCV_STARTING,
CPUHP_AP_IRQ_LOONGARCH_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
- CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
- CPUHP_AP_PERF_X86_CQM_STARTING,
CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING,
- CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
@@ -179,7 +168,6 @@ enum cpuhp_state {
CPUHP_AP_QCOM_TIMER_STARTING,
CPUHP_AP_TEGRA_TIMER_STARTING,
CPUHP_AP_ARMADA_TIMER_STARTING,
- CPUHP_AP_MARCO_TIMER_STARTING,
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
CPUHP_AP_RISCV_TIMER_STARTING,
@@ -217,9 +205,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
CPUHP_AP_PERF_X86_RAPL_ONLINE,
- CPUHP_AP_PERF_X86_CQM_ONLINE,
CPUHP_AP_PERF_X86_CSTATE_ONLINE,
- CPUHP_AP_PERF_X86_IDXD_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
@@ -251,9 +237,7 @@ enum cpuhp_state {
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_BASE_CACHEINFO_ONLINE,
CPUHP_AP_ONLINE_DYN,
- CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
- /* Must be after CPUHP_AP_ONLINE_DYN for node_states[N_CPU] update */
- CPUHP_AP_MM_DEMOTION_ONLINE,
+ CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 40,
CPUHP_AP_X86_HPET_ONLINE,
CPUHP_AP_X86_KVM_CLK_ONLINE,
CPUHP_AP_ACTIVE,
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index d629094fac6e..875d12598bd2 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -77,6 +77,7 @@ extern void cpuset_lock(void);
extern void cpuset_unlock(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
+extern bool cpuset_cpu_is_isolated(int cpu);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
@@ -207,6 +208,11 @@ static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
return false;
}
+static inline bool cpuset_cpu_is_isolated(int cpu)
+{
+ return false;
+}
+
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
{
return node_possible_map;
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 5126a4fecb44..9eaeaafe0cad 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -87,12 +87,6 @@ Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
void *data, size_t data_len);
void final_note(Elf_Word *buf);
-#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
-#ifndef DEFAULT_CRASH_KERNEL_LOW_SIZE
-#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
-#endif
-#endif
-
int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
unsigned long long *crash_size, unsigned long long *crash_base,
unsigned long long *low_size, bool *high);
diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h
index 72c92c396bb8..cd4f420231ba 100644
--- a/include/linux/crc-ccitt.h
+++ b/include/linux/crc-ccitt.h
@@ -5,19 +5,12 @@
#include <linux/types.h>
extern u16 const crc_ccitt_table[256];
-extern u16 const crc_ccitt_false_table[256];
extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len);
-extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len);
static inline u16 crc_ccitt_byte(u16 crc, const u8 c)
{
return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
}
-static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c)
-{
- return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c];
-}
-
#endif /* _LINUX_CRC_CCITT_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index af8d353a4b86..2976f534a7a3 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -109,14 +109,7 @@ static inline int groups_search(const struct group_info *group_info, kgid_t grp)
* same context as task->real_cred.
*/
struct cred {
- atomic_t usage;
-#ifdef CONFIG_DEBUG_CREDENTIALS
- atomic_t subscribers; /* number of processes subscribed */
- void *put_addr;
- unsigned magic;
-#define CRED_MAGIC 0x43736564
-#define CRED_MAGIC_DEAD 0x44656144
-#endif
+ atomic_long_t usage;
kuid_t uid; /* real UID of the task */
kgid_t gid; /* real GID of the task */
kuid_t suid; /* saved UID of the task */
@@ -172,46 +165,6 @@ extern int cred_fscmp(const struct cred *, const struct cred *);
extern void __init cred_init(void);
extern int set_cred_ucounts(struct cred *);
-/*
- * check for validity of credentials
- */
-#ifdef CONFIG_DEBUG_CREDENTIALS
-extern void __noreturn __invalid_creds(const struct cred *, const char *, unsigned);
-extern void __validate_process_creds(struct task_struct *,
- const char *, unsigned);
-
-extern bool creds_are_invalid(const struct cred *cred);
-
-static inline void __validate_creds(const struct cred *cred,
- const char *file, unsigned line)
-{
- if (unlikely(creds_are_invalid(cred)))
- __invalid_creds(cred, file, line);
-}
-
-#define validate_creds(cred) \
-do { \
- __validate_creds((cred), __FILE__, __LINE__); \
-} while(0)
-
-#define validate_process_creds() \
-do { \
- __validate_process_creds(current, __FILE__, __LINE__); \
-} while(0)
-
-extern void validate_creds_for_do_exit(struct task_struct *);
-#else
-static inline void validate_creds(const struct cred *cred)
-{
-}
-static inline void validate_creds_for_do_exit(struct task_struct *tsk)
-{
-}
-static inline void validate_process_creds(void)
-{
-}
-#endif
-
static inline bool cap_ambient_invariant_ok(const struct cred *cred)
{
return cap_issubset(cred->cap_ambient,
@@ -229,7 +182,7 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
*/
static inline struct cred *get_new_cred_many(struct cred *cred, int nr)
{
- atomic_add(nr, &cred->usage);
+ atomic_long_add(nr, &cred->usage);
return cred;
}
@@ -264,7 +217,6 @@ static inline const struct cred *get_cred_many(const struct cred *cred, int nr)
struct cred *nonconst_cred = (struct cred *) cred;
if (!cred)
return cred;
- validate_creds(cred);
nonconst_cred->non_rcu = 0;
return get_new_cred_many(nonconst_cred, nr);
}
@@ -288,9 +240,8 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
struct cred *nonconst_cred = (struct cred *) cred;
if (!cred)
return NULL;
- if (!atomic_inc_not_zero(&nonconst_cred->usage))
+ if (!atomic_long_inc_not_zero(&nonconst_cred->usage))
return NULL;
- validate_creds(cred);
nonconst_cred->non_rcu = 0;
return cred;
}
@@ -312,8 +263,7 @@ static inline void put_cred_many(const struct cred *_cred, int nr)
struct cred *cred = (struct cred *) _cred;
if (cred) {
- validate_creds(cred);
- if (atomic_sub_and_test(nr, &cred->usage))
+ if (atomic_long_sub_and_test(nr, &cred->usage))
__put_cred(cred);
}
}
diff --git a/include/linux/damon.h b/include/linux/damon.h
index ab2f17d9926b..5881e4ac30be 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -2,7 +2,7 @@
/*
* DAMON api
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#ifndef _DAMON_H_
@@ -136,6 +136,9 @@ enum damos_action {
* @weight_nr_accesses: Weight of the region's nr_accesses for prioritization.
* @weight_age: Weight of the region's age for prioritization.
*
+ * @get_score: Feedback function for self-tuning quota.
+ * @get_score_arg: Parameter for @get_score
+ *
* To avoid consuming too much CPU time or IO resources for applying the
* &struct damos->action to large memory, DAMON allows users to set time and/or
* size quotas. The quotas can be set by writing non-zero values to &ms and
@@ -153,6 +156,17 @@ enum damos_action {
* You could customize the prioritization logic by setting &weight_sz,
* &weight_nr_accesses, and &weight_age, because monitoring operations are
* encouraged to respect those.
+ *
+ * If @get_score function pointer is set, DAMON calls it back with
+ * @get_score_arg and get the return value of it for every @reset_interval.
+ * Then, DAMON adjusts the effective quota using the return value as a feedback
+ * score to the current quota, using its internal feedback loop algorithm.
+ *
+ * The feedback loop algorithem assumes the quota input and the feedback score
+ * output are in a positive proportional relationship, and the goal of the
+ * tuning is getting the feedback screo value of 10,000. If @ms and/or @sz are
+ * set together, those work as a hard limit quota. If neither @ms nor @sz are
+ * set, the mechanism starts from the quota of one byte.
*/
struct damos_quota {
unsigned long ms;
@@ -163,6 +177,9 @@ struct damos_quota {
unsigned int weight_nr_accesses;
unsigned int weight_age;
+ unsigned long (*get_score)(void *arg);
+ void *get_score_arg;
+
/* private: */
/* For throughput estimation */
unsigned long total_charged_sz;
@@ -179,6 +196,9 @@ struct damos_quota {
/* For prioritization */
unsigned long histogram[DAMOS_MAX_SCORE + 1];
unsigned int min_score;
+
+ /* For feedback loop */
+ unsigned long esz_bp;
};
/**
@@ -559,6 +579,8 @@ struct damon_ctx {
* update
*/
unsigned long next_ops_update_sis;
+ /* for waiting until the execution of the kdamond_fn is started */
+ struct completion kdamond_started;
/* public: */
struct task_struct *kdamond;
diff --git a/include/linux/device.h b/include/linux/device.h
index d7a72a8749ea..6c83294395ac 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1007,6 +1007,8 @@ static inline void device_unlock(struct device *dev)
mutex_unlock(&dev->mutex);
}
+DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T))
+
static inline void device_lock_assert(struct device *dev)
{
lockdep_assert_held(&dev->mutex);
diff --git a/include/linux/edac.h b/include/linux/edac.h
index fa4bda2a70f6..1174beb94ab6 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -187,6 +187,7 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* @MEM_NVDIMM: Non-volatile RAM
* @MEM_WIO2: Wide I/O 2.
* @MEM_HBM2: High bandwidth Memory Gen 2.
+ * @MEM_HBM3: High bandwidth Memory Gen 3.
*/
enum mem_type {
MEM_EMPTY = 0,
@@ -218,6 +219,7 @@ enum mem_type {
MEM_NVDIMM,
MEM_WIO2,
MEM_HBM2,
+ MEM_HBM3,
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -248,6 +250,7 @@ enum mem_type {
#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
#define MEM_FLAG_WIO2 BIT(MEM_WIO2)
#define MEM_FLAG_HBM2 BIT(MEM_HBM2)
+#define MEM_FLAG_HBM3 BIT(MEM_HBM3)
/**
* enum edac_type - Error Detection and Correction capabilities and mode
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 9cc5bf32f6f2..c74f47711f0b 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -40,6 +40,7 @@ struct screen_info;
#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1)))
#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_ACCESS_DENIED (15 | (1UL << (BITS_PER_LONG-1)))
#define EFI_TIMEOUT (18 | (1UL << (BITS_PER_LONG-1)))
#define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1)))
#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
@@ -1348,4 +1349,15 @@ bool efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table)
umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n);
+/*
+ * efivar ops event type
+ */
+#define EFIVAR_OPS_RDONLY 0
+#define EFIVAR_OPS_RDWR 1
+
+extern struct blocking_notifier_head efivar_ops_nh;
+
+void efivars_generic_ops_register(void);
+void efivars_generic_ops_unregister(void);
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index b9caa01dfac4..88d91e087471 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -224,7 +224,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
unsigned long max_util, unsigned long sum_util,
unsigned long allowed_cpu_cap)
{
- unsigned long freq, scale_cpu;
+ unsigned long freq, ref_freq, scale_cpu;
struct em_perf_state *ps;
int cpu;
@@ -241,11 +241,10 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
*/
cpu = cpumask_first(to_cpumask(pd->cpus));
scale_cpu = arch_scale_cpu_capacity(cpu);
- ps = &pd->table[pd->nr_perf_states - 1];
+ ref_freq = arch_scale_freq_ref(cpu);
- max_util = map_util_perf(max_util);
max_util = min(max_util, allowed_cpu_cap);
- freq = map_util_freq(max_util, ps->frequency, scale_cpu);
+ freq = map_util_freq(max_util, ref_freq, scale_cpu);
/*
* Find the lowest performance state of the Energy Model above the
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index d95ab85f96ba..b0fb775a600d 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -7,6 +7,11 @@
#include <linux/syscalls.h>
#include <linux/seccomp.h>
#include <linux/sched.h>
+#include <linux/context_tracking.h>
+#include <linux/livepatch.h>
+#include <linux/resume_user_mode.h>
+#include <linux/tick.h>
+#include <linux/kmsan.h>
#include <asm/entry-common.h>
@@ -98,7 +103,19 @@ static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {}
* done between establishing state and enabling interrupts. The caller must
* enable interrupts before invoking syscall_enter_from_user_mode_work().
*/
-void enter_from_user_mode(struct pt_regs *regs);
+static __always_inline void enter_from_user_mode(struct pt_regs *regs)
+{
+ arch_enter_from_user_mode(regs);
+ lockdep_hardirqs_off(CALLER_ADDR0);
+
+ CT_WARN_ON(__ct_state() != CONTEXT_USER);
+ user_exit_irqoff();
+
+ instrumentation_begin();
+ kmsan_unpoison_entry_regs(regs);
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
/**
* syscall_enter_from_user_mode_prepare - Establish state and enable interrupts
@@ -117,6 +134,9 @@ void enter_from_user_mode(struct pt_regs *regs);
*/
void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
+long syscall_trace_enter(struct pt_regs *regs, long syscall,
+ unsigned long work);
+
/**
* syscall_enter_from_user_mode_work - Check and handle work before invoking
* a syscall
@@ -140,7 +160,15 @@ void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
* ptrace_report_syscall_entry(), __secure_computing(), trace_sys_enter()
* 2) Invocation of audit_syscall_entry()
*/
-long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
+static __always_inline long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
+{
+ unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
+
+ if (work & SYSCALL_WORK_ENTER)
+ syscall = syscall_trace_enter(regs, syscall, work);
+
+ return syscall;
+}
/**
* syscall_enter_from_user_mode - Establish state and check and handle work
@@ -159,7 +187,19 @@ long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
* Returns: The original or a modified syscall number. See
* syscall_enter_from_user_mode_work() for further explanation.
*/
-long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall);
+static __always_inline long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
+{
+ long ret;
+
+ enter_from_user_mode(regs);
+
+ instrumentation_begin();
+ local_irq_enable();
+ ret = syscall_enter_from_user_mode_work(regs, syscall);
+ instrumentation_end();
+
+ return ret;
+}
/**
* local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable()
@@ -259,6 +299,43 @@ static __always_inline void arch_exit_to_user_mode(void) { }
void arch_do_signal_or_restart(struct pt_regs *regs);
/**
+ * exit_to_user_mode_loop - do any pending work before leaving to user space
+ */
+unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
+ unsigned long ti_work);
+
+/**
+ * exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs: Pointer to pt_regs on entry stack
+ *
+ * 1) check that interrupts are disabled
+ * 2) call tick_nohz_user_enter_prepare()
+ * 3) call exit_to_user_mode_loop() if any flags from
+ * EXIT_TO_USER_MODE_WORK are set
+ * 4) check that interrupts are still disabled
+ */
+static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+ unsigned long ti_work;
+
+ lockdep_assert_irqs_disabled();
+
+ /* Flush pending rcuog wakeup before the last need_resched() check */
+ tick_nohz_user_enter_prepare();
+
+ ti_work = read_thread_flags();
+ if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
+ ti_work = exit_to_user_mode_loop(regs, ti_work);
+
+ arch_exit_to_user_mode_prepare(regs, ti_work);
+
+ /* Ensure that kernel state is sane for a return to userspace */
+ kmap_assert_nomap();
+ lockdep_assert_irqs_disabled();
+ lockdep_sys_exit();
+}
+
+/**
* exit_to_user_mode - Fixup state when exiting to user mode
*
* Syscall/interrupt exit enables interrupts, but the kernel state is
@@ -276,7 +353,17 @@ void arch_do_signal_or_restart(struct pt_regs *regs);
* non-instrumentable.
* The caller has to invoke syscall_exit_to_user_mode_work() before this.
*/
-void exit_to_user_mode(void);
+static __always_inline void exit_to_user_mode(void)
+{
+ instrumentation_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+
+ user_enter_irqoff();
+ arch_exit_to_user_mode();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
/**
* syscall_exit_to_user_mode_work - Handle work before returning to user mode
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index b9d83652c097..e32bee4345fb 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -35,8 +35,7 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
-__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
-__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask);
+void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
@@ -58,15 +57,8 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
return ERR_PTR(-ENOSYS);
}
-static inline int eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
+static inline void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
{
- return -ENOSYS;
-}
-
-static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n,
- unsigned mask)
-{
- return -ENOSYS;
}
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
@@ -92,5 +84,10 @@ static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
#endif
+static inline void eventfd_signal(struct eventfd_ctx *ctx)
+{
+ eventfd_signal_mask(ctx, 0);
+}
+
#endif /* _LINUX_EVENTFD_H */
diff --git a/include/linux/evm.h b/include/linux/evm.h
index 01fc495a83e2..36ec884320d9 100644
--- a/include/linux/evm.h
+++ b/include/linux/evm.h
@@ -31,6 +31,7 @@ extern void evm_inode_post_setxattr(struct dentry *dentry,
const char *xattr_name,
const void *xattr_value,
size_t xattr_value_len);
+extern int evm_inode_copy_up_xattr(const char *name);
extern int evm_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *xattr_name);
extern void evm_inode_post_removexattr(struct dentry *dentry,
@@ -117,6 +118,11 @@ static inline void evm_inode_post_setxattr(struct dentry *dentry,
return;
}
+static inline int evm_inode_copy_up_xattr(const char *name)
+{
+ return 0;
+}
+
static inline int evm_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry,
const char *xattr_name)
diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h
index 69501e0ec239..d445705ac13c 100644
--- a/include/linux/export-internal.h
+++ b/include/linux/export-internal.h
@@ -16,10 +16,13 @@
* and eliminates the need for absolute relocations that require runtime
* processing on relocatable kernels.
*/
+#define __KSYM_ALIGN ".balign 4"
#define __KSYM_REF(sym) ".long " #sym "- ."
#elif defined(CONFIG_64BIT)
+#define __KSYM_ALIGN ".balign 8"
#define __KSYM_REF(sym) ".quad " #sym
#else
+#define __KSYM_ALIGN ".balign 4"
#define __KSYM_REF(sym) ".long " #sym
#endif
@@ -42,7 +45,7 @@
" .asciz \"" ns "\"" "\n" \
" .previous" "\n" \
" .section \"___ksymtab" sec "+" #name "\", \"a\"" "\n" \
- " .balign 4" "\n" \
+ __KSYM_ALIGN "\n" \
"__ksymtab_" #name ":" "\n" \
__KSYM_REF(sym) "\n" \
__KSYM_REF(__kstrtab_ ##name) "\n" \
@@ -61,6 +64,7 @@
#define SYMBOL_CRC(sym, crc, sec) \
asm(".section \"___kcrctab" sec "+" #sym "\",\"a\"" "\n" \
+ ".balign 4" "\n" \
"__crc_" #sym ":" "\n" \
".long " #crc "\n" \
".previous" "\n")
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index bc4c3287a65e..78c8326d74ae 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -83,12 +83,17 @@ struct dentry;
static inline struct file *files_lookup_fd_raw(struct files_struct *files, unsigned int fd)
{
struct fdtable *fdt = rcu_dereference_raw(files->fdt);
-
- if (fd < fdt->max_fds) {
- fd = array_index_nospec(fd, fdt->max_fds);
- return rcu_dereference_raw(fdt->fd[fd]);
- }
- return NULL;
+ unsigned long mask = array_index_mask_nospec(fd, fdt->max_fds);
+ struct file *needs_masking;
+
+ /*
+ * 'mask' is zero for an out-of-bounds fd, all ones for ok.
+ * 'fd&mask' is 'fd' for ok, or 0 for out of bounds.
+ *
+ * Accessing fdt->fd[0] is ok, but needs masking of the result.
+ */
+ needs_masking = rcu_dereference_raw(fdt->fd[fd&mask]);
+ return (struct file *)(mask & (unsigned long)needs_masking);
}
static inline struct file *files_lookup_fd_locked(struct files_struct *files, unsigned int fd)
@@ -114,7 +119,7 @@ int iterate_fd(struct files_struct *, unsigned,
extern int close_fd(unsigned int fd);
extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags);
-extern struct file *close_fd_get_file(unsigned int fd);
+extern struct file *file_close_fd(unsigned int fd);
extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
struct files_struct **new_fdp);
diff --git a/include/linux/file.h b/include/linux/file.h
index 6e9099d29343..6834a29338c4 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -96,18 +96,8 @@ DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
extern void fd_install(unsigned int fd, struct file *file);
-extern int __receive_fd(struct file *file, int __user *ufd,
- unsigned int o_flags);
+int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags);
-extern int receive_fd(struct file *file, unsigned int o_flags);
-
-static inline int receive_fd_user(struct file *file, int __user *ufd,
- unsigned int o_flags)
-{
- if (ufd == NULL)
- return -EFAULT;
- return __receive_fd(file, ufd, o_flags);
-}
int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags);
extern void flush_delayed_fput(void);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 98b7a7a8c42e..e6ba0cc6f2ee 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -434,7 +434,7 @@ struct address_space_operations {
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb);
- int (*error_remove_page)(struct address_space *, struct page *);
+ int (*error_remove_folio)(struct address_space *, struct folio *);
/* swapfile support */
int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
@@ -463,9 +463,9 @@ extern const struct address_space_operations empty_aops;
* @a_ops: Methods.
* @flags: Error bits and flags (AS_*).
* @wb_err: The most recent error which has occurred.
- * @private_lock: For use by the owner of the address_space.
- * @private_list: For use by the owner of the address_space.
- * @private_data: For use by the owner of the address_space.
+ * @i_private_lock: For use by the owner of the address_space.
+ * @i_private_list: For use by the owner of the address_space.
+ * @i_private_data: For use by the owner of the address_space.
*/
struct address_space {
struct inode *host;
@@ -484,9 +484,9 @@ struct address_space {
unsigned long flags;
struct rw_semaphore i_mmap_rwsem;
errseq_t wb_err;
- spinlock_t private_lock;
- struct list_head private_list;
- void *private_data;
+ spinlock_t i_private_lock;
+ struct list_head i_private_list;
+ void * i_private_data;
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
@@ -991,8 +991,10 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
*/
struct file {
union {
+ /* fput() uses task work when closing and freeing file (default). */
+ struct callback_head f_task_work;
+ /* fput() must use workqueue (most kernel threads). */
struct llist_node f_llist;
- struct rcu_head f_rcuhead;
unsigned int f_iocb_flags;
};
@@ -1164,6 +1166,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
#define SB_I_UNTRUSTED_MOUNTER 0x00000040
+#define SB_I_EVM_UNSUPPORTED 0x00000080
#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
@@ -1185,7 +1188,8 @@ enum {
struct sb_writers {
unsigned short frozen; /* Is sb frozen? */
- unsigned short freeze_holders; /* Who froze fs? */
+ int freeze_kcount; /* How many kernel freeze requests? */
+ int freeze_ucount; /* How many userspace freeze requests? */
struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
@@ -1645,9 +1649,70 @@ static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
#define __sb_writers_release(sb, lev) \
percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+/**
+ * __sb_write_started - check if sb freeze level is held
+ * @sb: the super we write to
+ * @level: the freeze level
+ *
+ * * > 0 - sb freeze level is held
+ * * 0 - sb freeze level is not held
+ * * < 0 - !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN
+ */
+static inline int __sb_write_started(const struct super_block *sb, int level)
+{
+ return lockdep_is_held_type(sb->s_writers.rw_sem + level - 1, 1);
+}
+
+/**
+ * sb_write_started - check if SB_FREEZE_WRITE is held
+ * @sb: the super we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ */
static inline bool sb_write_started(const struct super_block *sb)
{
- return lockdep_is_held_type(sb->s_writers.rw_sem + SB_FREEZE_WRITE - 1, 1);
+ return __sb_write_started(sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * sb_write_not_started - check if SB_FREEZE_WRITE is not held
+ * @sb: the super we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ */
+static inline bool sb_write_not_started(const struct super_block *sb)
+{
+ return __sb_write_started(sb, SB_FREEZE_WRITE) <= 0;
+}
+
+/**
+ * file_write_started - check if SB_FREEZE_WRITE is held
+ * @file: the file we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ * May be false positive with !S_ISREG, because file_start_write() has
+ * no effect on !S_ISREG.
+ */
+static inline bool file_write_started(const struct file *file)
+{
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return true;
+ return sb_write_started(file_inode(file)->i_sb);
+}
+
+/**
+ * file_write_not_started - check if SB_FREEZE_WRITE is not held
+ * @file: the file we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ * May be false positive with !S_ISREG, because file_start_write() has
+ * no effect on !S_ISREG.
+ */
+static inline bool file_write_not_started(const struct file *file)
+{
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return true;
+ return sb_write_not_started(file_inode(file)->i_sb);
}
/**
@@ -2029,9 +2094,6 @@ extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
-extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- size_t len, unsigned int flags);
int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t *len, unsigned int remap_flags,
@@ -2051,9 +2113,24 @@ extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
struct file *dst_file, loff_t dst_pos,
loff_t len, unsigned int remap_flags);
+/**
+ * enum freeze_holder - holder of the freeze
+ * @FREEZE_HOLDER_KERNEL: kernel wants to freeze or thaw filesystem
+ * @FREEZE_HOLDER_USERSPACE: userspace wants to freeze or thaw filesystem
+ * @FREEZE_MAY_NEST: whether nesting freeze and thaw requests is allowed
+ *
+ * Indicate who the owner of the freeze or thaw request is and whether
+ * the freeze needs to be exclusive or can nest.
+ * Without @FREEZE_MAY_NEST, multiple freeze and thaw requests from the
+ * same holder aren't allowed. It is however allowed to hold a single
+ * @FREEZE_HOLDER_USERSPACE and a single @FREEZE_HOLDER_KERNEL freeze at
+ * the same time. This is relied upon by some filesystems during online
+ * repair or similar.
+ */
enum freeze_holder {
FREEZE_HOLDER_KERNEL = (1U << 0),
FREEZE_HOLDER_USERSPACE = (1U << 1),
+ FREEZE_MAY_NEST = (1U << 2),
};
struct super_operations {
@@ -2517,26 +2594,31 @@ struct file *dentry_open(const struct path *path, int flags,
const struct cred *creds);
struct file *dentry_create(const struct path *path, int flags, umode_t mode,
const struct cred *cred);
-struct file *backing_file_open(const struct path *user_path, int flags,
- const struct path *real_path,
- const struct cred *cred);
struct path *backing_file_user_path(struct file *f);
/*
- * file_user_path - get the path to display for memory mapped file
- *
* When mmapping a file on a stackable filesystem (e.g., overlayfs), the file
* stored in ->vm_file is a backing file whose f_inode is on the underlying
- * filesystem. When the mapped file path is displayed to user (e.g. via
- * /proc/<pid>/maps), this helper should be used to get the path to display
- * to the user, which is the path of the fd that user has requested to map.
+ * filesystem. When the mapped file path and inode number are displayed to
+ * user (e.g. via /proc/<pid>/maps), these helpers should be used to get the
+ * path and inode number to display to the user, which is the path of the fd
+ * that user has requested to map and the inode number that would be returned
+ * by fstat() on that same fd.
*/
+/* Get the path to display in /proc/<pid>/maps */
static inline const struct path *file_user_path(struct file *f)
{
if (unlikely(f->f_mode & FMODE_BACKING))
return backing_file_user_path(f);
return &f->f_path;
}
+/* Get the inode whose inode number to display in /proc/<pid>/maps */
+static inline const struct inode *file_user_inode(struct file *f)
+{
+ if (unlikely(f->f_mode & FMODE_BACKING))
+ return d_inode(backing_file_user_path(f)->dentry);
+ return file_inode(f);
+}
static inline struct file *file_clone_open(struct file *file)
{
@@ -2991,8 +3073,6 @@ ssize_t copy_splice_read(struct file *in, loff_t *ppos,
size_t len, unsigned int flags);
extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
struct file *, loff_t *, size_t, unsigned int);
-extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
- loff_t *opos, size_t len, unsigned int flags);
extern void
@@ -3121,7 +3201,6 @@ extern int vfs_readlink(struct dentry *, char __user *, int);
extern struct file_system_type *get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
-extern struct super_block *get_active_super(struct block_device *bdev);
extern void drop_super(struct super_block *sb);
extern void drop_super_exclusive(struct super_block *sb);
extern void iterate_supers(void (*)(struct super_block *, void *), void *);
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index bcb6609b54b3..11e6434b8e71 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -100,29 +100,49 @@ static inline int fsnotify_file(struct file *file, __u32 mask)
return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
}
-/* Simple call site for access decisions */
-static inline int fsnotify_perm(struct file *file, int mask)
+/*
+ * fsnotify_file_area_perm - permission hook before access to file range
+ */
+static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
+ const loff_t *ppos, size_t count)
{
- int ret;
- __u32 fsnotify_mask = 0;
+ __u32 fsnotify_mask = FS_ACCESS_PERM;
+
+ /*
+ * filesystem may be modified in the context of permission events
+ * (e.g. by HSM filling a file on access), so sb freeze protection
+ * must not be held.
+ */
+ lockdep_assert_once(file_write_not_started(file));
- if (!(mask & (MAY_READ | MAY_OPEN)))
+ if (!(perm_mask & MAY_READ))
return 0;
- if (mask & MAY_OPEN) {
- fsnotify_mask = FS_OPEN_PERM;
+ return fsnotify_file(file, fsnotify_mask);
+}
+
+/*
+ * fsnotify_file_perm - permission hook before file access
+ */
+static inline int fsnotify_file_perm(struct file *file, int perm_mask)
+{
+ return fsnotify_file_area_perm(file, perm_mask, NULL, 0);
+}
- if (file->f_flags & __FMODE_EXEC) {
- ret = fsnotify_file(file, FS_OPEN_EXEC_PERM);
+/*
+ * fsnotify_open_perm - permission hook before file open
+ */
+static inline int fsnotify_open_perm(struct file *file)
+{
+ int ret;
- if (ret)
- return ret;
- }
- } else if (mask & MAY_READ) {
- fsnotify_mask = FS_ACCESS_PERM;
+ if (file->f_flags & __FMODE_EXEC) {
+ ret = fsnotify_file(file, FS_OPEN_EXEC_PERM);
+ if (ret)
+ return ret;
}
- return fsnotify_file(file, fsnotify_mask);
+ return fsnotify_file(file, FS_OPEN_PERM);
}
/*
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index c0892d75ce33..7f63be5ca0f1 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -472,10 +472,8 @@ typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t;
struct fsnotify_mark_connector {
spinlock_t lock;
unsigned short type; /* Type of object [lock] */
-#define FSNOTIFY_CONN_FLAG_HAS_FSID 0x01
#define FSNOTIFY_CONN_FLAG_HAS_IREF 0x02
unsigned short flags; /* flags [lock] */
- __kernel_fsid_t fsid; /* fsid of filesystem containing object */
union {
/* Object pointer [lock] */
fsnotify_connp_t *obj;
@@ -530,6 +528,8 @@ struct fsnotify_mark {
#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x0100
#define FSNOTIFY_MARK_FLAG_NO_IREF 0x0200
#define FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS 0x0400
+#define FSNOTIFY_MARK_FLAG_HAS_FSID 0x0800
+#define FSNOTIFY_MARK_FLAG_WEAK_FSID 0x1000
unsigned int flags; /* flags [mark->lock] */
};
@@ -763,11 +763,10 @@ extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp,
/* attach the mark to the object */
extern int fsnotify_add_mark(struct fsnotify_mark *mark,
fsnotify_connp_t *connp, unsigned int obj_type,
- int add_flags, __kernel_fsid_t *fsid);
+ int add_flags);
extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
fsnotify_connp_t *connp,
- unsigned int obj_type, int add_flags,
- __kernel_fsid_t *fsid);
+ unsigned int obj_type, int add_flags);
/* attach the mark to the inode */
static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
@@ -775,15 +774,14 @@ static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
int add_flags)
{
return fsnotify_add_mark(mark, &inode->i_fsnotify_marks,
- FSNOTIFY_OBJ_TYPE_INODE, add_flags, NULL);
+ FSNOTIFY_OBJ_TYPE_INODE, add_flags);
}
static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark,
struct inode *inode,
int add_flags)
{
return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks,
- FSNOTIFY_OBJ_TYPE_INODE, add_flags,
- NULL);
+ FSNOTIFY_OBJ_TYPE_INODE, add_flags);
}
/* given a group and a mark, flag mark to be freed when all references are dropped */
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 6583a58670c5..1b6053da8754 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -162,25 +162,25 @@ typedef unsigned int __bitwise gfp_t;
* %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
*
* The default allocator behavior depends on the request size. We have a concept
- * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
+ * of so-called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
* !costly allocations are too essential to fail so they are implicitly
* non-failing by default (with some exceptions like OOM victims might fail so
* the caller still has to check for failures) while costly requests try to be
* not disruptive and back off even without invoking the OOM killer.
* The following three modifiers might be used to override some of these
- * implicit rules
+ * implicit rules.
*
* %__GFP_NORETRY: The VM implementation will try only very lightweight
* memory direct reclaim to get some memory under memory pressure (thus
* it can sleep). It will avoid disruptive actions like OOM killer. The
* caller must handle the failure which is quite likely to happen under
* heavy memory pressure. The flag is suitable when failure can easily be
- * handled at small cost, such as reduced throughput
+ * handled at small cost, such as reduced throughput.
*
* %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
* procedures that have previously failed if there is some indication
- * that progress has been made else where. It can wait for other
- * tasks to attempt high level approaches to freeing memory such as
+ * that progress has been made elsewhere. It can wait for other
+ * tasks to attempt high-level approaches to freeing memory such as
* compaction (which removes fragmentation) and page-out.
* There is still a definite limit to the number of retries, but it is
* a larger limit than with %__GFP_NORETRY.
@@ -230,7 +230,7 @@ typedef unsigned int __bitwise gfp_t;
* is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that
* __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting
* memory tags at the same time as zeroing memory has minimal additional
- * performace impact.
+ * performance impact.
*
* %__GFP_SKIP_KASAN makes KASAN skip unpoisoning on page allocation.
* Used for userspace and vmalloc pages; the latter are unpoisoned by
@@ -274,7 +274,8 @@ typedef unsigned int __bitwise gfp_t;
* accounted to kmemcg.
*
* %GFP_NOWAIT is for kernel allocations that should not stall for direct
- * reclaim, start physical IO or use any filesystem callback.
+ * reclaim, start physical IO or use any filesystem callback. It is very
+ * likely to fail to allocate memory, even for very small allocations.
*
* %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
* that do not require the starting of any physical IO.
@@ -325,7 +326,7 @@ typedef unsigned int __bitwise gfp_t;
#define GFP_ATOMIC (__GFP_HIGH|__GFP_KSWAPD_RECLAIM)
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
-#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
+#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM | __GFP_NOWARN)
#define GFP_NOIO (__GFP_RECLAIM)
#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 6730ee900ee1..13b1e65fbdcc 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -21,10 +21,6 @@
#define HID_USAGE_SENSOR_ALS 0x200041
#define HID_USAGE_SENSOR_DATA_LIGHT 0x2004d0
#define HID_USAGE_SENSOR_LIGHT_ILLUM 0x2004d1
-#define HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE 0x2004d2
-#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY 0x2004d3
-#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X 0x2004d4
-#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y 0x2004d5
/* PROX (200011) */
#define HID_USAGE_SENSOR_PROX 0x200011
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index be20cff4ba73..451c1dff0e87 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -484,6 +484,82 @@ static inline void memcpy_to_folio(struct folio *folio, size_t offset,
}
/**
+ * folio_zero_tail - Zero the tail of a folio.
+ * @folio: The folio to zero.
+ * @offset: The byte offset in the folio to start zeroing at.
+ * @kaddr: The address the folio is currently mapped to.
+ *
+ * If you have already used kmap_local_folio() to map a folio, written
+ * some data to it and now need to zero the end of the folio (and flush
+ * the dcache), you can use this function. If you do not have the
+ * folio kmapped (eg the folio has been partially populated by DMA),
+ * use folio_zero_range() or folio_zero_segment() instead.
+ *
+ * Return: An address which can be passed to kunmap_local().
+ */
+static inline __must_check void *folio_zero_tail(struct folio *folio,
+ size_t offset, void *kaddr)
+{
+ size_t len = folio_size(folio) - offset;
+
+ if (folio_test_highmem(folio)) {
+ size_t max = PAGE_SIZE - offset_in_page(offset);
+
+ while (len > max) {
+ memset(kaddr, 0, max);
+ kunmap_local(kaddr);
+ len -= max;
+ offset += max;
+ max = PAGE_SIZE;
+ kaddr = kmap_local_folio(folio, offset);
+ }
+ }
+
+ memset(kaddr, 0, len);
+ flush_dcache_folio(folio);
+
+ return kaddr;
+}
+
+/**
+ * folio_fill_tail - Copy some data to a folio and pad with zeroes.
+ * @folio: The destination folio.
+ * @offset: The offset into @folio at which to start copying.
+ * @from: The data to copy.
+ * @len: How many bytes of data to copy.
+ *
+ * This function is most useful for filesystems which support inline data.
+ * When they want to copy data from the inode into the page cache, this
+ * function does everything for them. It supports large folios even on
+ * HIGHMEM configurations.
+ */
+static inline void folio_fill_tail(struct folio *folio, size_t offset,
+ const char *from, size_t len)
+{
+ char *to = kmap_local_folio(folio, offset);
+
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ if (folio_test_highmem(folio)) {
+ size_t max = PAGE_SIZE - offset_in_page(offset);
+
+ while (len > max) {
+ memcpy(to, from, max);
+ kunmap_local(to);
+ len -= max;
+ from += max;
+ offset += max;
+ max = PAGE_SIZE;
+ to = kmap_local_folio(folio, offset);
+ }
+ }
+
+ memcpy(to, from, len);
+ to = folio_zero_tail(folio, offset + len, to + len);
+ kunmap_local(to);
+}
+
+/**
* memcpy_from_file_folio - Copy some bytes from a file folio.
* @to: The destination buffer.
* @folio: The folio to copy from.
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index ddc7ebb70523..5f4c74facf6a 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -108,17 +108,13 @@ enum qm_stop_reason {
};
enum qm_state {
- QM_INIT = 0,
- QM_START,
- QM_CLOSE,
+ QM_WORK = 0,
QM_STOP,
};
enum qp_state {
- QP_INIT = 1,
- QP_START,
+ QP_START = 1,
QP_STOP,
- QP_CLOSE,
};
enum qm_hw_ver {
@@ -160,6 +156,11 @@ enum qm_cap_bits {
QM_SUPPORT_RPM,
};
+struct qm_dev_alg {
+ u64 alg_msk;
+ const char *alg;
+};
+
struct dfx_diff_registers {
u32 *regs;
u32 reg_offset;
@@ -265,6 +266,16 @@ struct hisi_qm_cap_info {
u32 v3_val;
};
+struct hisi_qm_cap_record {
+ u32 type;
+ u32 cap_val;
+};
+
+struct hisi_qm_cap_tables {
+ struct hisi_qm_cap_record *qm_cap_table;
+ struct hisi_qm_cap_record *dev_cap_table;
+};
+
struct hisi_qm_list {
struct mutex lock;
struct list_head list;
@@ -365,7 +376,6 @@ struct hisi_qm {
struct work_struct rst_work;
struct work_struct cmd_process;
- const char *algs;
bool use_sva;
resource_size_t phys_base;
@@ -376,6 +386,8 @@ struct hisi_qm {
u32 mb_qos;
u32 type_rate;
struct qm_err_isolate isolate_data;
+
+ struct hisi_qm_cap_tables cap_tables;
};
struct hisi_qp_status {
@@ -563,6 +575,8 @@ void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
const struct hisi_qm_cap_info *info_table,
u32 index, bool is_read);
+int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
+ u32 dev_algs_size);
/* Used by VFIO ACC live migration driver */
struct pci_driver *hisi_sec_get_pf_driver(void);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index fa0350b0812a..5adb86af35fc 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -67,6 +67,26 @@ extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
+/*
+ * Mask of all large folio orders supported for anonymous THP; all orders up to
+ * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
+ * (which is a limitation of the THP implementation).
+ */
+#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
+
+/*
+ * Mask of all large folio orders supported for file THP.
+ */
+#define THP_ORDERS_ALL_FILE (BIT(PMD_ORDER) | BIT(PUD_ORDER))
+
+/*
+ * Mask of all large folio orders supported for THP.
+ */
+#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
+
+#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define HPAGE_PMD_SHIFT PMD_SHIFT
#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
@@ -77,45 +97,105 @@ extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
extern unsigned long transparent_hugepage_flags;
+extern unsigned long huge_anon_orders_always;
+extern unsigned long huge_anon_orders_madvise;
+extern unsigned long huge_anon_orders_inherit;
-#define hugepage_flags_enabled() \
- (transparent_hugepage_flags & \
- ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
-#define hugepage_flags_always() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_FLAG))
+static inline bool hugepage_global_enabled(void)
+{
+ return transparent_hugepage_flags &
+ ((1<<TRANSPARENT_HUGEPAGE_FLAG) |
+ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
+}
+
+static inline bool hugepage_global_always(void)
+{
+ return transparent_hugepage_flags &
+ (1<<TRANSPARENT_HUGEPAGE_FLAG);
+}
+
+static inline bool hugepage_flags_enabled(void)
+{
+ /*
+ * We cover both the anon and the file-backed case here; we must return
+ * true if globally enabled, even when all anon sizes are set to never.
+ * So we don't need to look at huge_anon_orders_inherit.
+ */
+ return hugepage_global_enabled() ||
+ huge_anon_orders_always ||
+ huge_anon_orders_madvise;
+}
+
+static inline int highest_order(unsigned long orders)
+{
+ return fls_long(orders) - 1;
+}
+
+static inline int next_order(unsigned long *orders, int prev)
+{
+ *orders &= ~BIT(prev);
+ return highest_order(*orders);
+}
/*
* Do the below checks:
* - For file vma, check if the linear page offset of vma is
- * HPAGE_PMD_NR aligned within the file. The hugepage is
- * guaranteed to be hugepage-aligned within the file, but we must
- * check that the PMD-aligned addresses in the VMA map to
- * PMD-aligned offsets within the file, else the hugepage will
- * not be PMD-mappable.
- * - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
+ * order-aligned within the file. The hugepage is
+ * guaranteed to be order-aligned within the file, but we must
+ * check that the order-aligned addresses in the VMA map to
+ * order-aligned offsets within the file, else the hugepage will
+ * not be mappable.
+ * - For all vmas, check if the haddr is in an aligned hugepage
* area.
*/
-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long addr)
+static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
+ unsigned long addr, int order)
{
+ unsigned long hpage_size = PAGE_SIZE << order;
unsigned long haddr;
/* Don't have to check pgoff for anonymous vma */
if (!vma_is_anonymous(vma)) {
if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
- HPAGE_PMD_NR))
+ hpage_size >> PAGE_SHIFT))
return false;
}
- haddr = addr & HPAGE_PMD_MASK;
+ haddr = ALIGN_DOWN(addr, hpage_size);
- if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
+ if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end)
return false;
return true;
}
+/*
+ * Filter the bitfield of input orders to the ones suitable for use in the vma.
+ * See thp_vma_suitable_order().
+ * All orders that pass the checks are returned as a bitfield.
+ */
+static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long orders)
+{
+ int order;
+
+ /*
+ * Iterate over orders, highest to lowest, removing orders that don't
+ * meet alignment requirements from the set. Exit loop at first order
+ * that meets requirements, since all lower orders must also meet
+ * requirements.
+ */
+
+ order = highest_order(orders);
+
+ while (orders) {
+ if (thp_vma_suitable_order(vma, addr, order))
+ break;
+ order = next_order(&orders, order);
+ }
+
+ return orders;
+}
+
static inline bool file_thp_enabled(struct vm_area_struct *vma)
{
struct inode *inode;
@@ -126,12 +206,55 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
inode = vma->vm_file->f_inode;
return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
- (vma->vm_flags & VM_EXEC) &&
!inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
}
-bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
- bool smaps, bool in_pf, bool enforce_sysfs);
+unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs,
+ unsigned long orders);
+
+/**
+ * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
+ * @vma: the vm area to check
+ * @vm_flags: use these vm_flags instead of vma->vm_flags
+ * @smaps: whether answer will be used for smaps file
+ * @in_pf: whether answer will be used by page fault handler
+ * @enforce_sysfs: whether sysfs config should be taken into account
+ * @orders: bitfield of all orders to consider
+ *
+ * Calculates the intersection of the requested hugepage orders and the allowed
+ * hugepage orders for the provided vma. Permitted orders are encoded as a set
+ * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
+ * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
+ *
+ * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
+ * orders are allowed.
+ */
+static inline
+unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs,
+ unsigned long orders)
+{
+ /* Optimization to check if required orders are enabled early. */
+ if (enforce_sysfs && vma_is_anonymous(vma)) {
+ unsigned long mask = READ_ONCE(huge_anon_orders_always);
+
+ if (vm_flags & VM_HUGEPAGE)
+ mask |= READ_ONCE(huge_anon_orders_madvise);
+ if (hugepage_global_always() ||
+ ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
+ mask |= READ_ONCE(huge_anon_orders_inherit);
+
+ orders &= mask;
+ if (!orders)
+ return 0;
+ }
+
+ return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf,
+ enforce_sysfs, orders);
+}
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
@@ -267,17 +390,24 @@ static inline bool folio_test_pmd_mappable(struct folio *folio)
return false;
}
-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long addr)
+static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
+ unsigned long addr, int order)
{
return false;
}
-static inline bool hugepage_vma_check(struct vm_area_struct *vma,
- unsigned long vm_flags, bool smaps,
- bool in_pf, bool enforce_sysfs)
+static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long orders)
{
- return false;
+ return 0;
+}
+
+static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs,
+ unsigned long orders)
+{
+ return 0;
}
static inline void folio_prep_large_rmappable(struct folio *folio) {}
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 236ec7b63c54..c1ee640d87b1 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -829,7 +829,7 @@ static inline unsigned huge_page_shift(struct hstate *h)
static inline bool hstate_is_gigantic(struct hstate *h)
{
- return huge_page_order(h) > MAX_ORDER;
+ return huge_page_order(h) > MAX_PAGE_ORDER;
}
static inline unsigned int pages_per_huge_page(const struct hstate *h)
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index c2ac9e9e7ee9..2b5e500bf093 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -4447,7 +4447,8 @@ ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
action != WLAN_PUB_ACTION_LOC_TRACK_NOTI &&
action != WLAN_PUB_ACTION_FTM_REQUEST &&
action != WLAN_PUB_ACTION_FTM_RESPONSE &&
- action != WLAN_PUB_ACTION_FILS_DISCOVERY;
+ action != WLAN_PUB_ACTION_FILS_DISCOVERY &&
+ action != WLAN_PUB_ACTION_VENDOR_SPECIFIC;
}
/**
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 40fc5813cf93..bccb3f1f6262 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -37,13 +37,6 @@ extern struct cred init_cred;
#define INIT_TASK_COMM "swapper"
-/* Attach to the init_task data structure for proper alignment */
-#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
-#define __init_task_data __section(".data..init_task")
-#else
-#define __init_task_data /**/
-#endif
-
/* Attach to the thread_info data structure for proper alignment */
#define __init_thread_info __section(".data..init_thread_info")
diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h
index ee07393445f9..a3529b962be6 100644
--- a/include/linux/intel_tpmi.h
+++ b/include/linux/intel_tpmi.h
@@ -12,6 +12,19 @@
#define TPMI_MINOR_VERSION(val) FIELD_GET(GENMASK(4, 0), val)
#define TPMI_MAJOR_VERSION(val) FIELD_GET(GENMASK(7, 5), val)
+/*
+ * List of supported TMPI IDs.
+ * Some TMPI IDs are not used by Linux, so the numbers are not consecutive.
+ */
+enum intel_tpmi_id {
+ TPMI_ID_RAPL = 0, /* Running Average Power Limit */
+ TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */
+ TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */
+ TPMI_ID_SST = 5, /* Speed Select Technology */
+ TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */
+ TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */
+};
+
/**
* struct intel_tpmi_plat_info - Platform information for a TPMI device instance
* @package_id: CPU Package id
@@ -32,7 +45,6 @@ struct intel_tpmi_plat_info {
struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev);
struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index);
int tpmi_get_resource_count(struct auxiliary_device *auxdev);
-
-int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, int *locked,
- int *disabled);
+int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, bool *read_blocked,
+ bool *write_blocked);
#endif
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 805bb635cdf5..239a4f68801b 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -434,6 +434,7 @@ enum {
/* keep async read/write and isreg together and in order */
REQ_F_SUPPORT_NOWAIT_BIT,
REQ_F_ISREG_BIT,
+ REQ_F_POLL_NO_LAZY_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@@ -501,6 +502,8 @@ enum {
REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT),
/* hashed into ->cancel_hash_locked, protected by ->uring_lock */
REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT),
+ /* don't use lazy poll wake for this request */
+ REQ_F_POLL_NO_LAZY = BIT(REQ_F_POLL_NO_LAZY_BIT),
};
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 14f5cfabbbc8..db7fe25f3370 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -331,6 +331,9 @@ extern int
walk_system_ram_res(u64 start, u64 end, void *arg,
int (*func)(struct resource *, void *));
extern int
+walk_system_ram_res_rev(u64 start, u64 end, void *arg,
+ int (*func)(struct resource *, void *));
+extern int
walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
void *arg, int (*func)(struct resource *, void *));
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 6dcbb4eb80fb..971f3e826e15 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -756,11 +756,6 @@ struct journal_s
unsigned long j_flags;
/**
- * @j_atomic_flags: Atomic journaling state flags.
- */
- unsigned long j_atomic_flags;
-
- /**
* @j_errno:
*
* Is there an outstanding uncleared error on the journal (from a prior
@@ -999,6 +994,13 @@ struct journal_s
struct block_device *j_fs_dev;
/**
+ * @j_fs_dev_wb_err:
+ *
+ * Records the errseq of the client fs's backing block device.
+ */
+ errseq_t j_fs_dev_wb_err;
+
+ /**
* @j_total_len: Total maximum capacity of the journal region on disk.
*/
unsigned int j_total_len;
@@ -1374,6 +1376,9 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2)
JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT)
+/* Journal high priority write IO operation flags */
+#define JBD2_JOURNAL_REQ_FLAGS (REQ_META | REQ_SYNC | REQ_IDLE)
+
/*
* Journal flag definitions
*/
@@ -1397,12 +1402,6 @@ JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT)
JBD2_JOURNAL_FLUSH_ZEROOUT)
/*
- * Journal atomic flag definitions
- */
-#define JBD2_CHECKPOINT_IO_ERROR 0x001 /* Detect io error while writing
- * buffer back to disk */
-
-/*
* Function declarations for the journaling transaction and buffer
* management
*/
@@ -1695,6 +1694,25 @@ static inline void jbd2_journal_abort_handle(handle_t *handle)
handle->h_aborted = 1;
}
+static inline void jbd2_init_fs_dev_write_error(journal_t *journal)
+{
+ struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping;
+
+ /*
+ * Save the original wb_err value of client fs's bdev mapping which
+ * could be used to detect the client fs's metadata async write error.
+ */
+ errseq_check_and_advance(&mapping->wb_err, &journal->j_fs_dev_wb_err);
+}
+
+static inline int jbd2_check_fs_dev_write_error(journal_t *journal)
+{
+ struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping;
+
+ return errseq_check(&mapping->wb_err,
+ READ_ONCE(journal->j_fs_dev_wb_err));
+}
+
#endif /* __KERNEL__ */
/* Comparison functions for transaction IDs: perform comparisons using
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 72cb693b075b..dbb06d789e74 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -4,6 +4,7 @@
#include <linux/bug.h>
#include <linux/kasan-enabled.h>
+#include <linux/kasan-tags.h>
#include <linux/kernel.h>
#include <linux/static_key.h>
#include <linux/types.h>
@@ -129,20 +130,39 @@ static __always_inline void kasan_poison_slab(struct slab *slab)
__kasan_poison_slab(slab);
}
-void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
-static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * temporarily unpoisons an object from a newly allocated slab without doing
+ * anything else. The object must later be repoisoned by
+ * kasan_poison_new_object().
+ */
+static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
void *object)
{
if (kasan_enabled())
- __kasan_unpoison_object_data(cache, object);
+ __kasan_unpoison_new_object(cache, object);
}
-void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
-static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
+void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Repoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * repoisons an object that was previously unpoisoned by
+ * kasan_unpoison_new_object() without doing anything else.
+ */
+static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
void *object)
{
if (kasan_enabled())
- __kasan_poison_object_data(cache, object);
+ __kasan_poison_new_object(cache, object);
}
void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
@@ -172,13 +192,6 @@ static __always_inline void kasan_kfree_large(void *ptr)
__kasan_kfree_large(ptr, _RET_IP_);
}
-void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
-static __always_inline void kasan_slab_free_mempool(void *ptr)
-{
- if (kasan_enabled())
- __kasan_slab_free_mempool(ptr, _RET_IP_);
-}
-
void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
void *object, gfp_t flags, bool init);
static __always_inline void * __must_check kasan_slab_alloc(
@@ -219,6 +232,113 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
return (void *)object;
}
+bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
+ unsigned long ip);
+/**
+ * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
+ * @page: Pointer to the page allocation.
+ * @order: Order of the allocation.
+ *
+ * This function is intended for kernel subsystems that cache page allocations
+ * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
+ *
+ * This function is similar to kasan_mempool_poison_object() but operates on
+ * page allocations.
+ *
+ * Before the poisoned allocation can be reused, it must be unpoisoned via
+ * kasan_mempool_unpoison_pages().
+ *
+ * Return: true if the allocation can be safely reused; false otherwise.
+ */
+static __always_inline bool kasan_mempool_poison_pages(struct page *page,
+ unsigned int order)
+{
+ if (kasan_enabled())
+ return __kasan_mempool_poison_pages(page, order, _RET_IP_);
+ return true;
+}
+
+void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
+ unsigned long ip);
+/**
+ * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
+ * @page: Pointer to the page allocation.
+ * @order: Order of the allocation.
+ *
+ * This function is intended for kernel subsystems that cache page allocations
+ * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
+ *
+ * This function unpoisons a page allocation that was previously poisoned by
+ * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
+ * the tag-based modes, this function assigns a new tag to the allocation.
+ */
+static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
+ unsigned int order)
+{
+ if (kasan_enabled())
+ __kasan_mempool_unpoison_pages(page, order, _RET_IP_);
+}
+
+bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
+/**
+ * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
+ * @ptr: Pointer to the slab allocation.
+ *
+ * This function is intended for kernel subsystems that cache slab allocations
+ * to reuse them instead of freeing them back to the slab allocator (e.g.
+ * mempool).
+ *
+ * This function poisons a slab allocation and saves a free stack trace for it
+ * without initializing the allocation's memory and without putting it into the
+ * quarantine (for the Generic mode).
+ *
+ * This function also performs checks to detect double-free and invalid-free
+ * bugs and reports them. The caller can use the return value of this function
+ * to find out if the allocation is buggy.
+ *
+ * Before the poisoned allocation can be reused, it must be unpoisoned via
+ * kasan_mempool_unpoison_object().
+ *
+ * This function operates on all slab allocations including large kmalloc
+ * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
+ * size > KMALLOC_MAX_SIZE).
+ *
+ * Return: true if the allocation can be safely reused; false otherwise.
+ */
+static __always_inline bool kasan_mempool_poison_object(void *ptr)
+{
+ if (kasan_enabled())
+ return __kasan_mempool_poison_object(ptr, _RET_IP_);
+ return true;
+}
+
+void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
+/**
+ * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
+ * @ptr: Pointer to the slab allocation.
+ * @size: Size to be unpoisoned.
+ *
+ * This function is intended for kernel subsystems that cache slab allocations
+ * to reuse them instead of freeing them back to the slab allocator (e.g.
+ * mempool).
+ *
+ * This function unpoisons a slab allocation that was previously poisoned via
+ * kasan_mempool_poison_object() and saves an alloc stack trace for it without
+ * initializing the allocation's memory. For the tag-based modes, this function
+ * does not assign a new tag to the allocation and instead restores the
+ * original tags based on the pointer value.
+ *
+ * This function operates on all slab allocations including large kmalloc
+ * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
+ * size > KMALLOC_MAX_SIZE).
+ */
+static __always_inline void kasan_mempool_unpoison_object(void *ptr,
+ size_t size)
+{
+ if (kasan_enabled())
+ __kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
+}
+
/*
* Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
* the hardware tag-based mode that doesn't rely on compiler instrumentation.
@@ -242,9 +362,9 @@ static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
return false;
}
static inline void kasan_poison_slab(struct slab *slab) {}
-static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
void *object) {}
-static inline void kasan_poison_object_data(struct kmem_cache *cache,
+static inline void kasan_poison_new_object(struct kmem_cache *cache,
void *object) {}
static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
const void *object)
@@ -256,7 +376,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init
return false;
}
static inline void kasan_kfree_large(void *ptr) {}
-static inline void kasan_slab_free_mempool(void *ptr) {}
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags, bool init)
{
@@ -276,6 +395,17 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
{
return (void *)object;
}
+static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
+{
+ return true;
+}
+static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
+static inline bool kasan_mempool_poison_object(void *ptr)
+{
+ return true;
+}
+static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
+
static inline bool kasan_check_byte(const void *address)
{
return true;
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 8227455192b7..400cb6c02176 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -403,7 +403,7 @@ bool kexec_load_permitted(int kexec_image_type);
/* List of defined/legal kexec file flags */
#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
- KEXEC_FILE_NO_INITRAMFS)
+ KEXEC_FILE_NO_INITRAMFS | KEXEC_FILE_DEBUG)
/* flag to track if kexec reboot is in progress */
extern bool kexec_in_progress;
@@ -500,6 +500,13 @@ static inline int crash_hotplug_memory_support(void) { return 0; }
static inline unsigned int crash_get_elfcorehdr_size(void) { return 0; }
#endif
+extern bool kexec_file_dbg_print;
+
+#define kexec_dprintk(fmt, ...) \
+ printk("%s" fmt, \
+ kexec_file_dbg_print ? KERN_INFO : KERN_DEBUG, \
+ ##__VA_ARGS__)
+
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 7d985a1dfe4a..5caf3ce82373 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -73,6 +73,7 @@ struct key_type {
unsigned int flags;
#define KEY_TYPE_NET_DOMAIN 0x00000001 /* Keys of this type have a net namespace domain */
+#define KEY_TYPE_INSTANT_REAP 0x00000002 /* Keys of this type don't have a delay after expiring */
/* vet a description */
int (*vet_description)(const char *description);
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 401af4757514..88100cc9caba 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -223,6 +223,8 @@ bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *sla
#else /* CONFIG_KFENCE */
+#define kfence_sample_interval (0)
+
static inline bool is_kfence_address(const void *addr) { return false; }
static inline void kfence_alloc_pool_and_metadata(void) { }
static inline void kfence_init(void) { }
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index c2dd786a30e1..401348e9f92b 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -76,8 +76,8 @@ static inline void ksm_exit(struct mm_struct *mm)
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
* but what if the vma was unmerged while the page was swapped out?
*/
-struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address);
+struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr);
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
@@ -129,10 +129,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
return 0;
}
-static inline struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr)
{
- return page;
+ return folio;
}
static inline void rmap_walk_ksm(struct folio *folio,
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index b35968ee9fb5..7675a48a0701 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -73,8 +73,10 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren
/**
* list_lru_add: add an element to the lru list's tail
- * @list_lru: the lru pointer
+ * @lru: the lru pointer
* @item: the item to be added.
+ * @nid: the node id of the sublist to add the item to.
+ * @memcg: the cgroup of the sublist to add the item to.
*
* If the element is already part of a list, this function returns doing
* nothing. Therefore the caller does not need to keep state about whether or
@@ -83,24 +85,54 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren
* the caller organize itself in a way that elements can be in more than
* one type of list, it is up to the caller to fully remove the item from
* the previous list (with list_lru_del() for instance) before moving it
- * to @list_lru
+ * to @lru.
+ *
+ * Return: true if the list was updated, false otherwise
+ */
+bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
+
+/**
+ * list_lru_add_obj: add an element to the lru list's tail
+ * @lru: the lru pointer
+ * @item: the item to be added.
+ *
+ * This function is similar to list_lru_add(), but the NUMA node and the
+ * memcg of the sublist is determined by @item list_head. This assumption is
+ * valid for slab objects LRU such as dentries, inodes, etc.
*
* Return value: true if the list was updated, false otherwise
*/
-bool list_lru_add(struct list_lru *lru, struct list_head *item);
+bool list_lru_add_obj(struct list_lru *lru, struct list_head *item);
/**
- * list_lru_del: delete an element to the lru list
- * @list_lru: the lru pointer
+ * list_lru_del: delete an element from the lru list
+ * @lru: the lru pointer
* @item: the item to be deleted.
+ * @nid: the node id of the sublist to delete the item from.
+ * @memcg: the cgroup of the sublist to delete the item from.
*
- * This function works analogously as list_lru_add in terms of list
+ * This function works analogously as list_lru_add() in terms of list
* manipulation. The comments about an element already pertaining to
- * a list are also valid for list_lru_del.
+ * a list are also valid for list_lru_del().
*
- * Return value: true if the list was updated, false otherwise
+ * Return: true if the list was updated, false otherwise
*/
-bool list_lru_del(struct list_lru *lru, struct list_head *item);
+bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
+
+/**
+ * list_lru_del_obj: delete an element from the lru list
+ * @lru: the lru pointer
+ * @item: the item to be deleted.
+ *
+ * This function is similar to list_lru_del(), but the NUMA node and the
+ * memcg of the sublist is determined by @item list_head. This assumption is
+ * valid for slab objects LRU such as dentries, inodes, etc.
+ *
+ * Return value: true if the list was updated, false otherwise.
+ */
+bool list_lru_del_obj(struct list_lru *lru, struct list_head *item);
/**
* list_lru_count_one: return the number of objects currently held by @lru
@@ -108,9 +140,11 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item);
* @nid: the node id to count from.
* @memcg: the cgroup to count from.
*
- * Always return a non-negative number, 0 for empty lists. There is no
- * guarantee that the list is not updated while the count is being computed.
- * Callers that want such a guarantee need to provide an outer lock.
+ * There is no guarantee that the list is not updated while the count is being
+ * computed. Callers that want such a guarantee need to provide an outer lock.
+ *
+ * Return: 0 for empty lists, otherwise the number of objects
+ * currently held by @lru.
*/
unsigned long list_lru_count_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg);
@@ -136,12 +170,28 @@ static inline unsigned long list_lru_count(struct list_lru *lru)
void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
struct list_head *head);
+/**
+ * list_lru_putback: undo list_lru_isolate
+ * @lru: the lru pointer.
+ * @item: the item to put back.
+ * @nid: the node id of the sublist to put the item back to.
+ * @memcg: the cgroup of the sublist to put the item back to.
+ *
+ * Put back an isolated item into its original LRU. Note that unlike
+ * list_lru_add, this does not increment the node LRU count (as
+ * list_lru_isolate does not originally decrement this count).
+ *
+ * Since we might have dropped the LRU lock in between, recompute list_lru_one
+ * from the node's id and memcg.
+ */
+void list_lru_putback(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
/**
- * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
+ * list_lru_walk_one: walk a @lru, isolating and disposing freeable items.
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
@@ -150,24 +200,24 @@ typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
*
- * This function will scan all elements in a particular list_lru, calling the
+ * This function will scan all elements in a particular @lru, calling the
* @isolate callback for each of those items, along with the current list
* spinlock and a caller-provided opaque. The @isolate callback can choose to
* drop the lock internally, but *must* return with the lock held. The callback
- * will return an enum lru_status telling the list_lru infrastructure what to
+ * will return an enum lru_status telling the @lru infrastructure what to
* do with the object being scanned.
*
- * Please note that nr_to_walk does not mean how many objects will be freed,
+ * Please note that @nr_to_walk does not mean how many objects will be freed,
* just how many objects will be scanned.
*
- * Return value: the number of objects effectively removed from the LRU.
+ * Return: the number of objects effectively removed from the LRU.
*/
unsigned long list_lru_walk_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
/**
- * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
+ * list_lru_walk_one_irq: walk a @lru, isolating and disposing freeable items.
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
@@ -176,7 +226,7 @@ unsigned long list_lru_walk_one(struct list_lru *lru,
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
*
- * Same as @list_lru_walk_one except that the spinlock is acquired with
+ * Same as list_lru_walk_one() except that the spinlock is acquired with
* spin_lock_irq().
*/
unsigned long list_lru_walk_one_irq(struct list_lru *lru,
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index 9c533c8d701e..70d30d40ea4a 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -127,12 +127,12 @@ struct lock_class {
unsigned long usage_mask;
const struct lock_trace *usage_traces[LOCK_TRACE_STATES];
+ const char *name;
/*
* Generation counter, when doing certain classes of graph walking,
* to ensure that we check one node only once:
*/
int name_version;
- const char *name;
u8 wait_type_inner;
u8 wait_type_outer;
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index ff217a5ce552..185924c56378 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -171,6 +171,8 @@ LSM_HOOK(int, 0, file_alloc_security, struct file *file)
LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file)
LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd,
unsigned long arg)
+LSM_HOOK(int, 0, file_ioctl_compat, struct file *file, unsigned int cmd,
+ unsigned long arg)
LSM_HOOK(int, 0, mmap_addr, unsigned long addr)
LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags)
@@ -262,6 +264,10 @@ LSM_HOOK(int, 0, sem_semop, struct kern_ipc_perm *perm, struct sembuf *sops,
LSM_HOOK(int, 0, netlink_send, struct sock *sk, struct sk_buff *skb)
LSM_HOOK(void, LSM_RET_VOID, d_instantiate, struct dentry *dentry,
struct inode *inode)
+LSM_HOOK(int, -EOPNOTSUPP, getselfattr, unsigned int attr,
+ struct lsm_ctx __user *ctx, size_t *size, u32 flags)
+LSM_HOOK(int, -EOPNOTSUPP, setselfattr, unsigned int attr,
+ struct lsm_ctx *ctx, size_t size, u32 flags)
LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, const char *name,
char **value)
LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index dcb5e5b5eb13..a2ade0ffe9e7 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -25,6 +25,7 @@
#ifndef __LINUX_LSM_HOOKS_H
#define __LINUX_LSM_HOOKS_H
+#include <uapi/linux/lsm.h>
#include <linux/security.h>
#include <linux/init.h>
#include <linux/rculist.h>
@@ -42,6 +43,18 @@ struct security_hook_heads {
#undef LSM_HOOK
} __randomize_layout;
+/**
+ * struct lsm_id - Identify a Linux Security Module.
+ * @lsm: name of the LSM, must be approved by the LSM maintainers
+ * @id: LSM ID number from uapi/linux/lsm.h
+ *
+ * Contains the information that identifies the LSM.
+ */
+struct lsm_id {
+ const char *name;
+ u64 id;
+};
+
/*
* Security module hook list structure.
* For use with generic list macros for common operations.
@@ -50,7 +63,7 @@ struct security_hook_list {
struct hlist_node list;
struct hlist_head *head;
union security_list_options hook;
- const char *lsm;
+ const struct lsm_id *lsmid;
} __randomize_layout;
/*
@@ -104,7 +117,7 @@ extern struct security_hook_heads security_hook_heads;
extern char *lsm_names;
extern void security_add_hooks(struct security_hook_list *hooks, int count,
- const char *lsm);
+ const struct lsm_id *lsmid);
#define LSM_FLAG_LEGACY_MAJOR BIT(0)
#define LSM_FLAG_EXCLUSIVE BIT(1)
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index d01e850b570f..b3d63123b945 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -256,6 +256,8 @@ struct maple_tree {
struct maple_tree name = MTREE_INIT(name, 0)
#define mtree_lock(mt) spin_lock((&(mt)->ma_lock))
+#define mtree_lock_nested(mas, subclass) \
+ spin_lock_nested((&(mt)->ma_lock), subclass)
#define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock))
/*
@@ -327,6 +329,9 @@ int mtree_store(struct maple_tree *mt, unsigned long index,
void *entry, gfp_t gfp);
void *mtree_erase(struct maple_tree *mt, unsigned long index);
+int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
+int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
+
void mtree_destroy(struct maple_tree *mt);
void __mt_destroy(struct maple_tree *mt);
@@ -345,6 +350,36 @@ static inline bool mtree_empty(const struct maple_tree *mt)
/* Advanced API */
/*
+ * Maple State Status
+ * ma_active means the maple state is pointing to a node and offset and can
+ * continue operating on the tree.
+ * ma_start means we have not searched the tree.
+ * ma_root means we have searched the tree and the entry we found lives in
+ * the root of the tree (ie it has index 0, length 1 and is the only entry in
+ * the tree).
+ * ma_none means we have searched the tree and there is no node in the
+ * tree for this entry. For example, we searched for index 1 in an empty
+ * tree. Or we have a tree which points to a full leaf node and we
+ * searched for an entry which is larger than can be contained in that
+ * leaf node.
+ * ma_pause means the data within the maple state may be stale, restart the
+ * operation
+ * ma_overflow means the search has reached the upper limit of the search
+ * ma_underflow means the search has reached the lower limit of the search
+ * ma_error means there was an error, check the node for the error number.
+ */
+enum maple_status {
+ ma_active,
+ ma_start,
+ ma_root,
+ ma_none,
+ ma_pause,
+ ma_overflow,
+ ma_underflow,
+ ma_error,
+};
+
+/*
* The maple state is defined in the struct ma_state and is used to keep track
* of information during operations, and even between operations when using the
* advanced API.
@@ -376,6 +411,13 @@ static inline bool mtree_empty(const struct maple_tree *mt)
* When returning a value the maple state index and last respectively contain
* the start and end of the range for the entry. Ranges are inclusive in the
* Maple Tree.
+ *
+ * The status of the state is used to determine how the next action should treat
+ * the state. For instance, if the status is ma_start then the next action
+ * should start at the root of the tree and walk down. If the status is
+ * ma_pause then the node may be stale data and should be discarded. If the
+ * status is ma_overflow, then the last action hit the upper limit.
+ *
*/
struct ma_state {
struct maple_tree *tree; /* The tree we're operating in */
@@ -385,9 +427,11 @@ struct ma_state {
unsigned long min; /* The minimum index of this node - implied pivot min */
unsigned long max; /* The maximum index of this node - implied pivot max */
struct maple_alloc *alloc; /* Allocated nodes for this operation */
+ enum maple_status status; /* The status of the state (active, start, none, etc) */
unsigned char depth; /* depth of tree descent during write */
unsigned char offset;
unsigned char mas_flags;
+ unsigned char end; /* The end of the node */
};
struct ma_wr_state {
@@ -397,7 +441,6 @@ struct ma_wr_state {
unsigned long r_max; /* range max */
enum maple_type type; /* mas->node type */
unsigned char offset_end; /* The offset where the write ends */
- unsigned char node_end; /* mas->node end */
unsigned long *pivots; /* mas->node->pivots pointer */
unsigned long end_piv; /* The pivot at the offset end */
void __rcu **slots; /* mas->node->slots pointer */
@@ -406,30 +449,16 @@ struct ma_wr_state {
};
#define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock))
+#define mas_lock_nested(mas, subclass) \
+ spin_lock_nested(&((mas)->tree->ma_lock), subclass)
#define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock))
-
/*
* Special values for ma_state.node.
- * MAS_START means we have not searched the tree.
- * MAS_ROOT means we have searched the tree and the entry we found lives in
- * the root of the tree (ie it has index 0, length 1 and is the only entry in
- * the tree).
- * MAS_NONE means we have searched the tree and there is no node in the
- * tree for this entry. For example, we searched for index 1 in an empty
- * tree. Or we have a tree which points to a full leaf node and we
- * searched for an entry which is larger than can be contained in that
- * leaf node.
* MA_ERROR represents an errno. After dropping the lock and attempting
* to resolve the error, the walk would have to be restarted from the
* top of the tree as the tree may have been modified.
*/
-#define MAS_START ((struct maple_enode *)1UL)
-#define MAS_ROOT ((struct maple_enode *)5UL)
-#define MAS_NONE ((struct maple_enode *)9UL)
-#define MAS_PAUSE ((struct maple_enode *)17UL)
-#define MAS_OVERFLOW ((struct maple_enode *)33UL)
-#define MAS_UNDERFLOW ((struct maple_enode *)65UL)
#define MA_ERROR(err) \
((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
@@ -438,7 +467,8 @@ struct ma_wr_state {
.tree = mt, \
.index = first, \
.last = end, \
- .node = MAS_START, \
+ .node = NULL, \
+ .status = ma_start, \
.min = 0, \
.max = ULONG_MAX, \
.alloc = NULL, \
@@ -469,7 +499,6 @@ void *mas_find_range(struct ma_state *mas, unsigned long max);
void *mas_find_rev(struct ma_state *mas, unsigned long min);
void *mas_find_range_rev(struct ma_state *mas, unsigned long max);
int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp);
-bool mas_is_err(struct ma_state *mas);
bool mas_nomem(struct ma_state *mas, gfp_t gfp);
void mas_pause(struct ma_state *mas);
@@ -498,28 +527,18 @@ static inline void mas_init(struct ma_state *mas, struct maple_tree *tree,
mas->tree = tree;
mas->index = mas->last = addr;
mas->max = ULONG_MAX;
- mas->node = MAS_START;
-}
-
-/* Checks if a mas has not found anything */
-static inline bool mas_is_none(const struct ma_state *mas)
-{
- return mas->node == MAS_NONE;
+ mas->status = ma_start;
+ mas->node = NULL;
}
-/* Checks if a mas has been paused */
-static inline bool mas_is_paused(const struct ma_state *mas)
+static inline bool mas_is_active(struct ma_state *mas)
{
- return mas->node == MAS_PAUSE;
+ return mas->status == ma_active;
}
-/* Check if the mas is pointing to a node or not */
-static inline bool mas_is_active(struct ma_state *mas)
+static inline bool mas_is_err(struct ma_state *mas)
{
- if ((unsigned long)mas->node >= MAPLE_RESERVED_RANGE)
- return true;
-
- return false;
+ return mas->status == ma_error;
}
/**
@@ -532,9 +551,10 @@ static inline bool mas_is_active(struct ma_state *mas)
*
* Context: Any context.
*/
-static inline void mas_reset(struct ma_state *mas)
+static __always_inline void mas_reset(struct ma_state *mas)
{
- mas->node = MAS_START;
+ mas->status = ma_start;
+ mas->node = NULL;
}
/**
@@ -550,6 +570,131 @@ static inline void mas_reset(struct ma_state *mas)
*/
#define mas_for_each(__mas, __entry, __max) \
while (((__entry) = mas_find((__mas), (__max))) != NULL)
+
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+enum mt_dump_format {
+ mt_dump_dec,
+ mt_dump_hex,
+};
+
+extern atomic_t maple_tree_tests_run;
+extern atomic_t maple_tree_tests_passed;
+
+void mt_dump(const struct maple_tree *mt, enum mt_dump_format format);
+void mas_dump(const struct ma_state *mas);
+void mas_wr_dump(const struct ma_wr_state *wr_mas);
+void mt_validate(struct maple_tree *mt);
+void mt_cache_shrink(void);
+#define MT_BUG_ON(__tree, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MAS_BUG_ON(__mas, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_dump(__mas); \
+ mt_dump((__mas)->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MAS_WR_BUG_ON(__wrmas, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_wr_dump(__wrmas); \
+ mas_dump((__wrmas)->mas); \
+ mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MT_WARN_ON(__tree, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+
+#define MAS_WARN_ON(__mas, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_dump(__mas); \
+ mt_dump((__mas)->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+
+#define MAS_WR_WARN_ON(__wrmas, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_wr_dump(__wrmas); \
+ mas_dump((__wrmas)->mas); \
+ mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+#else
+#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
+#define MAS_BUG_ON(__mas, __x) BUG_ON(__x)
+#define MAS_WR_BUG_ON(__mas, __x) BUG_ON(__x)
+#define MT_WARN_ON(__tree, __x) WARN_ON(__x)
+#define MAS_WARN_ON(__mas, __x) WARN_ON(__x)
+#define MAS_WR_WARN_ON(__mas, __x) WARN_ON(__x)
+#endif /* CONFIG_DEBUG_MAPLE_TREE */
+
/**
* __mas_set_range() - Set up Maple Tree operation state to a sub-range of the
* current location.
@@ -563,6 +708,9 @@ static inline void mas_reset(struct ma_state *mas)
static inline void __mas_set_range(struct ma_state *mas, unsigned long start,
unsigned long last)
{
+ /* Ensure the range starts within the current slot */
+ MAS_WARN_ON(mas, mas_is_active(mas) &&
+ (mas->index > start || mas->last < start));
mas->index = start;
mas->last = last;
}
@@ -580,8 +728,8 @@ static inline void __mas_set_range(struct ma_state *mas, unsigned long start,
static inline
void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
{
+ mas_reset(mas);
__mas_set_range(mas, start, last);
- mas->node = MAS_START;
}
/**
@@ -706,129 +854,4 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
for (__entry = mt_find(__tree, &(__index), __max); \
__entry; __entry = mt_find_after(__tree, &(__index), __max))
-
-#ifdef CONFIG_DEBUG_MAPLE_TREE
-enum mt_dump_format {
- mt_dump_dec,
- mt_dump_hex,
-};
-
-extern atomic_t maple_tree_tests_run;
-extern atomic_t maple_tree_tests_passed;
-
-void mt_dump(const struct maple_tree *mt, enum mt_dump_format format);
-void mas_dump(const struct ma_state *mas);
-void mas_wr_dump(const struct ma_wr_state *wr_mas);
-void mt_validate(struct maple_tree *mt);
-void mt_cache_shrink(void);
-#define MT_BUG_ON(__tree, __x) do { \
- atomic_inc(&maple_tree_tests_run); \
- if (__x) { \
- pr_info("BUG at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mt_dump(__tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
-} while (0)
-
-#define MAS_BUG_ON(__mas, __x) do { \
- atomic_inc(&maple_tree_tests_run); \
- if (__x) { \
- pr_info("BUG at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mas_dump(__mas); \
- mt_dump((__mas)->tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
-} while (0)
-
-#define MAS_WR_BUG_ON(__wrmas, __x) do { \
- atomic_inc(&maple_tree_tests_run); \
- if (__x) { \
- pr_info("BUG at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mas_wr_dump(__wrmas); \
- mas_dump((__wrmas)->mas); \
- mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
-} while (0)
-
-#define MT_WARN_ON(__tree, __x) ({ \
- int ret = !!(__x); \
- atomic_inc(&maple_tree_tests_run); \
- if (ret) { \
- pr_info("WARN at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mt_dump(__tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
- unlikely(ret); \
-})
-
-#define MAS_WARN_ON(__mas, __x) ({ \
- int ret = !!(__x); \
- atomic_inc(&maple_tree_tests_run); \
- if (ret) { \
- pr_info("WARN at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mas_dump(__mas); \
- mt_dump((__mas)->tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
- unlikely(ret); \
-})
-
-#define MAS_WR_WARN_ON(__wrmas, __x) ({ \
- int ret = !!(__x); \
- atomic_inc(&maple_tree_tests_run); \
- if (ret) { \
- pr_info("WARN at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mas_wr_dump(__wrmas); \
- mas_dump((__wrmas)->mas); \
- mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
- unlikely(ret); \
-})
-#else
-#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
-#define MAS_BUG_ON(__mas, __x) BUG_ON(__x)
-#define MAS_WR_BUG_ON(__mas, __x) BUG_ON(__x)
-#define MT_WARN_ON(__tree, __x) WARN_ON(__x)
-#define MAS_WARN_ON(__mas, __x) WARN_ON(__x)
-#define MAS_WR_WARN_ON(__mas, __x) WARN_ON(__x)
-#endif /* CONFIG_DEBUG_MAPLE_TREE */
-
#endif /*_LINUX_MAPLE_TREE_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index ae3bde302f70..b695f9e946da 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -123,6 +123,7 @@ int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
void memblock_trim_memory(phys_addr_t align);
bool memblock_overlaps_region(struct memblock_type *type,
phys_addr_t base, phys_addr_t size);
+bool memblock_validate_numa_coverage(unsigned long threshold_bytes);
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 7bdcf3020d7a..20ff87f8e001 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -219,6 +219,12 @@ struct mem_cgroup {
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
unsigned long zswap_max;
+
+ /*
+ * Prevent pages from this memcg from being written back from zswap to
+ * swap, and from being swapped out on zswap store failures.
+ */
+ bool zswap_writeback;
#endif
unsigned long soft_limit;
@@ -324,7 +330,7 @@ struct mem_cgroup {
struct deferred_split deferred_split_queue;
#endif
-#ifdef CONFIG_LRU_GEN
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
/* per-memcg mm_struct list */
struct lru_gen_mm_list mm_list;
#endif
@@ -821,6 +827,11 @@ static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
return !memcg || css_tryget(&memcg->css);
}
+static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
+{
+ return !memcg || css_tryget_online(&memcg->css);
+}
+
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
if (memcg)
@@ -1046,8 +1057,8 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return x;
}
-void mem_cgroup_flush_stats(void);
-void mem_cgroup_flush_stats_ratelimited(void);
+void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
+void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);
@@ -1187,6 +1198,11 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page)
return NULL;
}
+static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
+{
+ return NULL;
+}
+
static inline bool folio_memcg_kmem(struct folio *folio)
{
return false;
@@ -1349,6 +1365,11 @@ static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
return true;
}
+static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
+{
+ return true;
+}
+
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
}
@@ -1548,11 +1569,11 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return node_page_state(lruvec_pgdat(lruvec), idx);
}
-static inline void mem_cgroup_flush_stats(void)
+static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
{
}
-static inline void mem_cgroup_flush_stats_ratelimited(void)
+static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
{
}
@@ -1926,6 +1947,7 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
+bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg);
#else
static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
{
@@ -1939,6 +1961,11 @@ static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
size_t size)
{
}
+static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
+{
+ /* if zswap is disabled, do not block pages going to the swapping device */
+ return true;
+}
#endif
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index 4aae6c06c5f2..7be1e32e6d42 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -51,6 +51,7 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
extern int mempool_resize(mempool_t *pool, int new_min_nr);
extern void mempool_destroy(mempool_t *pool);
extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
+extern void *mempool_alloc_preallocated(mempool_t *pool) __malloc;
extern void mempool_free(void *element, mempool_t *pool);
/*
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 1314d9c5f05b..744c830f4b13 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -196,8 +196,6 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
struct dev_pagemap *pgmap);
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
-unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
-void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
unsigned long memremap_compat_align(void);
#else
static inline void *devm_memremap_pages(struct device *dev,
@@ -228,16 +226,6 @@ static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
return false;
}
-static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
-{
- return 0;
-}
-
-static inline void vmem_altmap_free(struct vmem_altmap *altmap,
- unsigned long nr_pfns)
-{
-}
-
/* when memremap_pages() is disabled all archs can remap a single page */
static inline unsigned long memremap_compat_align(void)
{
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6f3631425f38..3f7b664d625b 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -621,7 +621,7 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_140[0x8];
u8 bth_dst_qp[0x18];
- u8 reserved_at_160[0x20];
+ u8 inner_esp_spi[0x20];
u8 outer_esp_spi[0x20];
u8 reserved_at_1a0[0x60];
};
@@ -12001,6 +12001,13 @@ enum {
MLX5_IPSEC_ASO_INC_SN = 0x2,
};
+enum {
+ MLX5_IPSEC_ASO_REPLAY_WIN_32BIT = 0x0,
+ MLX5_IPSEC_ASO_REPLAY_WIN_64BIT = 0x1,
+ MLX5_IPSEC_ASO_REPLAY_WIN_128BIT = 0x2,
+ MLX5_IPSEC_ASO_REPLAY_WIN_256BIT = 0x3,
+};
+
struct mlx5_ifc_ipsec_aso_bits {
u8 valid[0x1];
u8 reserved_at_201[0x1];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 418d26608ece..f5a97dec5169 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -886,8 +886,8 @@ static inline bool vma_is_anonymous(struct vm_area_struct *vma)
*/
static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
{
- return vma->vm_start <= vma->vm_mm->brk &&
- vma->vm_end >= vma->vm_mm->start_brk;
+ return vma->vm_start < vma->vm_mm->brk &&
+ vma->vm_end > vma->vm_mm->start_brk;
}
/*
@@ -901,8 +901,8 @@ static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
* its "stack". It's not even well-defined for programs written
* languages like Go.
*/
- return vma->vm_start <= vma->vm_mm->start_stack &&
- vma->vm_end >= vma->vm_mm->start_stack;
+ return vma->vm_start <= vma->vm_mm->start_stack &&
+ vma->vm_end >= vma->vm_mm->start_stack;
}
static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
@@ -994,6 +994,17 @@ static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
return mas_expected_entries(&vmi->mas, count);
}
+static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
+ unsigned long start, unsigned long end, gfp_t gfp)
+{
+ __mas_set_range(&vmi->mas, start, end - 1);
+ mas_store_gfp(&vmi->mas, NULL, gfp);
+ if (unlikely(mas_is_err(&vmi->mas)))
+ return -ENOMEM;
+
+ return 0;
+}
+
/* Free any unused preallocations */
static inline void vma_iter_free(struct vma_iterator *vmi)
{
@@ -1804,7 +1815,7 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
static inline u8 page_kasan_tag(const struct page *page)
{
- u8 tag = 0xff;
+ u8 tag = KASAN_TAG_KERNEL;
if (kasan_enabled()) {
tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
@@ -1833,7 +1844,7 @@ static inline void page_kasan_tag_set(struct page *page, u8 tag)
static inline void page_kasan_tag_reset(struct page *page)
{
if (kasan_enabled())
- page_kasan_tag_set(page, 0xff);
+ page_kasan_tag_set(page, KASAN_TAG_KERNEL);
}
#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
@@ -1953,15 +1964,15 @@ static inline bool page_maybe_dma_pinned(struct page *page)
*
* The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
*/
-static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
- struct page *page)
+static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
+ struct folio *folio)
{
VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
return false;
- return page_maybe_dma_pinned(page);
+ return folio_maybe_dma_pinned(folio);
}
/**
@@ -2373,7 +2384,8 @@ extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
-int generic_error_remove_page(struct address_space *mapping, struct page *page);
+int generic_error_remove_folio(struct address_space *mapping,
+ struct folio *folio);
struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
unsigned long address, struct pt_regs *regs);
@@ -3859,6 +3871,32 @@ void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap);
#endif
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+{
+ /* number of pfns from base where pfn_to_page() is valid */
+ if (altmap)
+ return altmap->reserve + altmap->free;
+ return 0;
+}
+
+static inline void vmem_altmap_free(struct vmem_altmap *altmap,
+ unsigned long nr_pfns)
+{
+ altmap->alloc -= nr_pfns;
+}
+#else
+static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+{
+ return 0;
+}
+
+static inline void vmem_altmap_free(struct vmem_altmap *altmap,
+ unsigned long nr_pfns)
+{
+}
+#endif
+
#define VMEMMAP_RESERVE_NR 2
#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
@@ -3904,6 +3942,7 @@ enum mf_flags {
MF_UNPOISON = 1 << 4,
MF_SW_SIMULATED = 1 << 5,
MF_NO_RETRY = 1 << 6,
+ MF_MEM_PRE_REMOVE = 1 << 7,
};
int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
unsigned long count, int mf_flags);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 9ae7def16cb2..f4fe593c1400 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -232,22 +232,27 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio,
if (folio_test_unevictable(folio) || !lrugen->enabled)
return false;
/*
- * There are three common cases for this page:
- * 1. If it's hot, e.g., freshly faulted in or previously hot and
- * migrated, add it to the youngest generation.
- * 2. If it's cold but can't be evicted immediately, i.e., an anon page
- * not in swapcache or a dirty page pending writeback, add it to the
- * second oldest generation.
- * 3. Everything else (clean, cold) is added to the oldest generation.
+ * There are four common cases for this page:
+ * 1. If it's hot, i.e., freshly faulted in, add it to the youngest
+ * generation, and it's protected over the rest below.
+ * 2. If it can't be evicted immediately, i.e., a dirty page pending
+ * writeback, add it to the second youngest generation.
+ * 3. If it should be evicted first, e.g., cold and clean from
+ * folio_rotate_reclaimable(), add it to the oldest generation.
+ * 4. Everything else falls between 2 & 3 above and is added to the
+ * second oldest generation if it's considered inactive, or the
+ * oldest generation otherwise. See lru_gen_is_active().
*/
if (folio_test_active(folio))
seq = lrugen->max_seq;
else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
(folio_test_reclaim(folio) &&
(folio_test_dirty(folio) || folio_test_writeback(folio))))
- seq = lrugen->min_seq[type] + 1;
- else
+ seq = lrugen->max_seq - 1;
+ else if (reclaiming || lrugen->min_seq[type] + MIN_NR_GENS >= lrugen->max_seq)
seq = lrugen->min_seq[type];
+ else
+ seq = lrugen->min_seq[type] + 1;
gen = lru_gen_from_seq(seq);
flags = (gen + 1UL) << LRU_GEN_PGOFF;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 957ce38768b2..4dd996ad0bd3 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -401,11 +401,11 @@ FOLIO_MATCH(compound_head, _head_2a);
* @pmd_huge_pte: Protected by ptdesc->ptl, used for THPs.
* @__page_mapping: Aliases with page->mapping. Unused for page tables.
* @pt_mm: Used for x86 pgds.
- * @pt_frag_refcount: For fragmented page table tracking. Powerpc and s390 only.
+ * @pt_frag_refcount: For fragmented page table tracking. Powerpc only.
* @_pt_pad_2: Padding to ensure proper alignment.
* @ptl: Lock for the page table.
* @__page_type: Same as page->page_type. Unused for page tables.
- * @_refcount: Same as page refcount. Used for s390 page tables.
+ * @__page_refcount: Same as page refcount.
* @pt_memcg_data: Memcg data. Tracked for page tables here.
*
* This struct overlays struct page for now. Do not modify without a good
@@ -438,7 +438,7 @@ struct ptdesc {
#endif
};
unsigned int __page_type;
- atomic_t _refcount;
+ atomic_t __page_refcount;
#ifdef CONFIG_MEMCG
unsigned long pt_memcg_data;
#endif
@@ -452,7 +452,7 @@ TABLE_MATCH(compound_head, _pt_pad_1);
TABLE_MATCH(mapping, __page_mapping);
TABLE_MATCH(rcu_head, pt_rcu_head);
TABLE_MATCH(page_type, __page_type);
-TABLE_MATCH(_refcount, _refcount);
+TABLE_MATCH(_refcount, __page_refcount);
#ifdef CONFIG_MEMCG
TABLE_MATCH(memcg_data, pt_memcg_data);
#endif
@@ -600,6 +600,9 @@ struct vma_numab_state {
*/
unsigned long pids_active[2];
+ /* MM scan sequence ID when scan first started after VMA creation */
+ int start_scan_seq;
+
/*
* MM scan sequence ID when the VMA was last completely scanned.
* A VMA is not eligible for scanning if prev_scan_seq == numa_scan_seq
@@ -958,7 +961,7 @@ struct mm_struct {
*/
unsigned long ksm_zero_pages;
#endif /* CONFIG_KSM */
-#ifdef CONFIG_LRU_GEN
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
struct {
/* this mm_struct is on lru_gen_mm_list */
struct list_head list;
@@ -973,7 +976,7 @@ struct mm_struct {
struct mem_cgroup *memcg;
#endif
} lru_gen;
-#endif /* CONFIG_LRU_GEN */
+#endif /* CONFIG_LRU_GEN_WALKS_MMU */
} __randomize_layout;
/*
@@ -1011,11 +1014,13 @@ struct lru_gen_mm_list {
spinlock_t lock;
};
+#endif /* CONFIG_LRU_GEN */
+
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
+
void lru_gen_add_mm(struct mm_struct *mm);
void lru_gen_del_mm(struct mm_struct *mm);
-#ifdef CONFIG_MEMCG
void lru_gen_migrate_mm(struct mm_struct *mm);
-#endif
static inline void lru_gen_init_mm(struct mm_struct *mm)
{
@@ -1036,7 +1041,7 @@ static inline void lru_gen_use_mm(struct mm_struct *mm)
WRITE_ONCE(mm->lru_gen.bitmap, -1);
}
-#else /* !CONFIG_LRU_GEN */
+#else /* !CONFIG_LRU_GEN_WALKS_MMU */
static inline void lru_gen_add_mm(struct mm_struct *mm)
{
@@ -1046,11 +1051,9 @@ static inline void lru_gen_del_mm(struct mm_struct *mm)
{
}
-#ifdef CONFIG_MEMCG
static inline void lru_gen_migrate_mm(struct mm_struct *mm)
{
}
-#endif
static inline void lru_gen_init_mm(struct mm_struct *mm)
{
@@ -1060,7 +1063,7 @@ static inline void lru_gen_use_mm(struct mm_struct *mm)
{
}
-#endif /* CONFIG_LRU_GEN */
+#endif /* CONFIG_LRU_GEN_WALKS_MMU */
struct vma_iterator {
struct ma_state mas;
@@ -1071,7 +1074,8 @@ struct vma_iterator {
.mas = { \
.tree = &(__mm)->mm_mt, \
.index = __addr, \
- .node = MAS_START, \
+ .node = NULL, \
+ .status = ma_start, \
}, \
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3c25226beeed..4ed33b127821 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -22,18 +22,21 @@
#include <linux/mm_types.h>
#include <linux/page-flags.h>
#include <linux/local_lock.h>
+#include <linux/zswap.h>
#include <asm/page.h>
/* Free memory management - zoned buddy allocator. */
#ifndef CONFIG_ARCH_FORCE_MAX_ORDER
-#define MAX_ORDER 10
+#define MAX_PAGE_ORDER 10
#else
-#define MAX_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
+#define MAX_PAGE_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
#endif
-#define MAX_ORDER_NR_PAGES (1 << MAX_ORDER)
+#define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER)
#define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES)
+#define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1)
+
/*
* PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
* costly to service. That is between allocation orders which should
@@ -95,7 +98,7 @@ static inline bool migratetype_is_mergeable(int mt)
}
#define for_each_migratetype_order(order, type) \
- for (order = 0; order <= MAX_ORDER; order++) \
+ for (order = 0; order < NR_PAGE_ORDERS; order++) \
for (type = 0; type < MIGRATE_TYPES; type++)
extern int page_group_by_mobility_disabled;
@@ -207,6 +210,10 @@ enum node_stat_item {
PGPROMOTE_SUCCESS, /* promote successfully */
PGPROMOTE_CANDIDATE, /* candidate pages to promote */
#endif
+ /* PGDEMOTE_*: pages demoted */
+ PGDEMOTE_KSWAPD,
+ PGDEMOTE_DIRECT,
+ PGDEMOTE_KHUGEPAGED,
NR_VM_NODE_STAT_ITEMS
};
@@ -435,14 +442,12 @@ struct lru_gen_folio {
atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
/* whether the multi-gen LRU is enabled */
bool enabled;
-#ifdef CONFIG_MEMCG
/* the memcg generation this lru_gen_folio belongs to */
u8 gen;
/* the list segment this lru_gen_folio belongs to */
u8 seg;
/* per-node lru_gen_folio list for global reclaim */
struct hlist_nulls_node list;
-#endif
};
enum {
@@ -488,11 +493,6 @@ struct lru_gen_mm_walk {
bool force_scan;
};
-void lru_gen_init_lruvec(struct lruvec *lruvec);
-void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
-
-#ifdef CONFIG_MEMCG
-
/*
* For each node, memcgs are divided into two generations: the old and the
* young. For each generation, memcgs are randomly sharded into multiple bins
@@ -505,33 +505,37 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
* the old generation, is incremented when all its bins become empty.
*
* There are four operations:
- * 1. MEMCG_LRU_HEAD, which moves an memcg to the head of a random bin in its
+ * 1. MEMCG_LRU_HEAD, which moves a memcg to the head of a random bin in its
* current generation (old or young) and updates its "seg" to "head";
- * 2. MEMCG_LRU_TAIL, which moves an memcg to the tail of a random bin in its
+ * 2. MEMCG_LRU_TAIL, which moves a memcg to the tail of a random bin in its
* current generation (old or young) and updates its "seg" to "tail";
- * 3. MEMCG_LRU_OLD, which moves an memcg to the head of a random bin in the old
+ * 3. MEMCG_LRU_OLD, which moves a memcg to the head of a random bin in the old
* generation, updates its "gen" to "old" and resets its "seg" to "default";
- * 4. MEMCG_LRU_YOUNG, which moves an memcg to the tail of a random bin in the
+ * 4. MEMCG_LRU_YOUNG, which moves a memcg to the tail of a random bin in the
* young generation, updates its "gen" to "young" and resets its "seg" to
* "default".
*
* The events that trigger the above operations are:
* 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD;
- * 2. The first attempt to reclaim an memcg below low, which triggers
+ * 2. The first attempt to reclaim a memcg below low, which triggers
* MEMCG_LRU_TAIL;
- * 3. The first attempt to reclaim an memcg below reclaimable size threshold,
- * which triggers MEMCG_LRU_TAIL;
- * 4. The second attempt to reclaim an memcg below reclaimable size threshold,
- * which triggers MEMCG_LRU_YOUNG;
- * 5. Attempting to reclaim an memcg below min, which triggers MEMCG_LRU_YOUNG;
+ * 3. The first attempt to reclaim a memcg offlined or below reclaimable size
+ * threshold, which triggers MEMCG_LRU_TAIL;
+ * 4. The second attempt to reclaim a memcg offlined or below reclaimable size
+ * threshold, which triggers MEMCG_LRU_YOUNG;
+ * 5. Attempting to reclaim a memcg below min, which triggers MEMCG_LRU_YOUNG;
* 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG;
- * 7. Offlining an memcg, which triggers MEMCG_LRU_OLD.
+ * 7. Offlining a memcg, which triggers MEMCG_LRU_OLD.
*
- * Note that memcg LRU only applies to global reclaim, and the round-robin
- * incrementing of their max_seq counters ensures the eventual fairness to all
- * eligible memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
+ * Notes:
+ * 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing
+ * of their max_seq counters ensures the eventual fairness to all eligible
+ * memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
+ * 2. There are only two valid generations: old (seq) and young (seq+1).
+ * MEMCG_NR_GENS is set to three so that when reading the generation counter
+ * locklessly, a stale value (seq-1) does not wraparound to young.
*/
-#define MEMCG_NR_GENS 2
+#define MEMCG_NR_GENS 3
#define MEMCG_NR_BINS 8
struct lru_gen_memcg {
@@ -546,6 +550,8 @@ struct lru_gen_memcg {
};
void lru_gen_init_pgdat(struct pglist_data *pgdat);
+void lru_gen_init_lruvec(struct lruvec *lruvec);
+void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
void lru_gen_init_memcg(struct mem_cgroup *memcg);
void lru_gen_exit_memcg(struct mem_cgroup *memcg);
@@ -554,19 +560,6 @@ void lru_gen_offline_memcg(struct mem_cgroup *memcg);
void lru_gen_release_memcg(struct mem_cgroup *memcg);
void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid);
-#else /* !CONFIG_MEMCG */
-
-#define MEMCG_NR_GENS 1
-
-struct lru_gen_memcg {
-};
-
-static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
-{
-}
-
-#endif /* CONFIG_MEMCG */
-
#else /* !CONFIG_LRU_GEN */
static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
@@ -581,8 +574,6 @@ static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
{
}
-#ifdef CONFIG_MEMCG
-
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
}
@@ -607,8 +598,6 @@ static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
{
}
-#endif /* CONFIG_MEMCG */
-
#endif /* CONFIG_LRU_GEN */
struct lruvec {
@@ -631,12 +620,15 @@ struct lruvec {
#ifdef CONFIG_LRU_GEN
/* evictable pages divided into generations */
struct lru_gen_folio lrugen;
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
/* to concurrently iterate lru_gen_mm_list */
struct lru_gen_mm_state mm_state;
#endif
+#endif /* CONFIG_LRU_GEN */
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
#endif
+ struct zswap_lruvec_state zswap_lruvec_state;
};
/* Isolate for asynchronous migration */
@@ -943,10 +935,10 @@ struct zone {
CACHELINE_PADDING(_pad1_);
/* free areas of different sizes */
- struct free_area free_area[MAX_ORDER + 1];
+ struct free_area free_area[NR_PAGE_ORDERS];
#ifdef CONFIG_UNACCEPTED_MEMORY
- /* Pages to be accepted. All pages on the list are MAX_ORDER */
+ /* Pages to be accepted. All pages on the list are MAX_PAGE_ORDER */
struct list_head unaccepted_pages;
#endif
@@ -1756,8 +1748,8 @@ static inline bool movable_only_nodes(nodemask_t *nodes)
#define SECTION_BLOCKFLAGS_BITS \
((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
-#if (MAX_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS
-#error Allocator MAX_ORDER exceeds SECTION_SIZE
+#if (MAX_PAGE_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS
+#error Allocator MAX_PAGE_ORDER exceeds SECTION_SIZE
#endif
static inline unsigned long pfn_to_section_nr(unsigned long pfn)
@@ -1789,6 +1781,7 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
struct mem_section_usage {
+ struct rcu_head rcu;
#ifdef CONFIG_SPARSEMEM_VMEMMAP
DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
#endif
@@ -1982,7 +1975,7 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
int idx = subsection_map_index(pfn);
- return test_bit(idx, ms->usage->subsection_map);
+ return test_bit(idx, READ_ONCE(ms->usage)->subsection_map);
}
#else
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
@@ -2006,6 +1999,7 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
static inline int pfn_valid(unsigned long pfn)
{
struct mem_section *ms;
+ int ret;
/*
* Ensure the upper PAGE_SHIFT bits are clear in the
@@ -2019,13 +2013,19 @@ static inline int pfn_valid(unsigned long pfn)
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
ms = __pfn_to_section(pfn);
- if (!valid_section(ms))
+ rcu_read_lock();
+ if (!valid_section(ms)) {
+ rcu_read_unlock();
return 0;
+ }
/*
* Traditionally early sections always returned pfn_valid() for
* the entire section-sized span.
*/
- return early_section(ms) || pfn_section_valid(ms, pfn);
+ ret = early_section(ms) || pfn_section_valid(ms, pfn);
+ rcu_read_unlock();
+
+ return ret;
}
#endif
diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h
index b8da2db4ecd2..cd4d5c8781f5 100644
--- a/include/linux/mnt_idmapping.h
+++ b/include/linux/mnt_idmapping.h
@@ -244,7 +244,4 @@ static inline kgid_t mapped_fsgid(struct mnt_idmap *idmap,
return from_vfsgid(idmap, fs_userns, VFSGIDT_INIT(current_fsgid()));
}
-bool check_fsmapping(const struct mnt_idmap *idmap,
- const struct super_block *sb);
-
#endif /* _LINUX_MNT_IDMAPPING_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index a98e188cf37b..9cd0009bd050 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -540,6 +540,8 @@ struct module {
struct static_call_site *static_call_sites;
#endif
#if IS_ENABLED(CONFIG_KUNIT)
+ int num_kunit_init_suites;
+ struct kunit_suite **kunit_init_suites;
int num_kunit_suites;
struct kunit_suite **kunit_suites;
#endif
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 4fa9726bc328..bfb85fd13e1f 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -385,6 +385,8 @@ extern bool parameq(const char *name1, const char *name2);
*/
extern bool parameqn(const char *name1, const char *name2, size_t n);
+typedef int (*parse_unknown_fn)(char *param, char *val, const char *doing, void *arg);
+
/* Called on module insert or kernel boot */
extern char *parse_args(const char *name,
char *args,
@@ -392,9 +394,7 @@ extern char *parse_args(const char *name,
unsigned num,
s16 level_min,
s16 level_max,
- void *arg,
- int (*unknown)(char *param, char *val,
- const char *doing, void *arg));
+ void *arg, parse_unknown_fn unknown);
/* Called by module remove. */
#ifdef CONFIG_SYSFS
diff --git a/include/linux/mount.h b/include/linux/mount.h
index ac3dd2876197..c34c18b4e8f3 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -50,8 +50,7 @@ struct path;
#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
- MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | \
- MNT_CURSOR)
+ MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | MNT_ONRB)
#define MNT_INTERNAL 0x4000
@@ -65,7 +64,7 @@ struct path;
#define MNT_SYNC_UMOUNT 0x2000000
#define MNT_MARKED 0x4000000
#define MNT_UMOUNT 0x8000000
-#define MNT_CURSOR 0x10000000
+#define MNT_ONRB 0x10000000
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index c29ace15a053..e84522e31301 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -1003,6 +1003,8 @@ struct nand_op_parser {
/**
* struct nand_operation - NAND operation descriptor
* @cs: the CS line to select for this NAND operation
+ * @deassert_wp: set to true when the operation requires the WP pin to be
+ * de-asserted (ERASE, PROG, ...)
* @instrs: array of instructions to execute
* @ninstrs: length of the @instrs array
*
@@ -1010,6 +1012,7 @@ struct nand_op_parser {
*/
struct nand_operation {
unsigned int cs;
+ bool deassert_wp;
const struct nand_op_instr *instrs;
unsigned int ninstrs;
};
@@ -1021,6 +1024,14 @@ struct nand_operation {
.ninstrs = ARRAY_SIZE(_instrs), \
}
+#define NAND_DESTRUCTIVE_OPERATION(_cs, _instrs) \
+ { \
+ .cs = _cs, \
+ .deassert_wp = true, \
+ .instrs = _instrs, \
+ .ninstrs = ARRAY_SIZE(_instrs), \
+ }
+
int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_op_parser *parser,
const struct nand_operation *op, bool check_only);
@@ -1104,6 +1115,7 @@ struct nand_controller_ops {
* the bus without restarting an entire read operation nor
* changing the column.
* @supported_op.cont_read: The controller supports sequential cache reads.
+ * @controller_wp: the controller is in charge of handling the WP pin.
*/
struct nand_controller {
struct mutex lock;
@@ -1112,6 +1124,7 @@ struct nand_controller {
unsigned int data_only_read: 1;
unsigned int cont_read: 1;
} supported_op;
+ bool controller_wp;
};
static inline void nand_controller_init(struct nand_controller *nfc)
@@ -1265,6 +1278,7 @@ struct nand_secure_region {
* @cont_read: Sequential page read internals
* @cont_read.ongoing: Whether a continuous read is ongoing or not
* @cont_read.first_page: Start of the continuous read operation
+ * @cont_read.pause_page: End of the current sequential cache read operation
* @cont_read.last_page: End of the continuous read operation
* @controller: The hardware controller structure which is shared among multiple
* independent devices
@@ -1321,6 +1335,7 @@ struct nand_chip {
struct {
bool ongoing;
unsigned int first_page;
+ unsigned int pause_page;
unsigned int last_page;
} cont_read;
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 0dfba5df6524..7e208d46ba5b 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -171,6 +171,7 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
-DEFINE_FREE(mutex, struct mutex *, if (_T) mutex_unlock(_T))
+DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
+DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)
#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h
index ed42bd5f639f..0aa4411528fc 100644
--- a/include/linux/net/intel/i40e_client.h
+++ b/include/linux/net/intel/i40e_client.h
@@ -45,7 +45,7 @@ struct i40e_qv_info {
struct i40e_qvlist_info {
u32 num_vectors;
- struct i40e_qv_info qv_info[];
+ struct i40e_qv_info qv_info[] __counted_by(num_vectors);
};
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index c11c4db34639..ef8d2d618d5b 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -869,4 +869,26 @@ enum {
RCA4_TYPE_MASK_OTHER_LAYOUT_MAX = 15,
};
+enum nfs_cb_opnum4 {
+ OP_CB_GETATTR = 3,
+ OP_CB_RECALL = 4,
+
+ /* Callback operations new to NFSv4.1 */
+ OP_CB_LAYOUTRECALL = 5,
+ OP_CB_NOTIFY = 6,
+ OP_CB_PUSH_DELEG = 7,
+ OP_CB_RECALL_ANY = 8,
+ OP_CB_RECALLABLE_OBJ_AVAIL = 9,
+ OP_CB_RECALL_SLOT = 10,
+ OP_CB_SEQUENCE = 11,
+ OP_CB_WANTS_CANCELLED = 12,
+ OP_CB_NOTIFY_LOCK = 13,
+ OP_CB_NOTIFY_DEVICEID = 14,
+
+ /* Callback operations new to NFSv4.2 */
+ OP_CB_OFFLOAD = 15,
+
+ OP_CB_ILLEGAL = 10044,
+};
+
#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 279262057a92..f5ce7b101146 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -595,7 +595,6 @@ extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
* linux/fs/nfs/write.c
*/
extern int nfs_congestion_kb;
-extern int nfs_writepage(struct page *page, struct writeback_control *wbc);
extern int nfs_writepages(struct address_space *, struct writeback_control *);
extern int nfs_flush_incompatible(struct file *file, struct folio *folio);
extern int nfs_update_folio(struct file *file, struct folio *folio,
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index bdcd85e622d8..4d103ac8f5c7 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -89,8 +89,6 @@ struct nubus_driver {
void (*remove)(struct nubus_board *board);
};
-extern struct bus_type nubus_bus_type;
-
/* Generic NuBus interface functions, modelled after the PCI interface */
#ifdef CONFIG_PROC_FS
extern bool nubus_populate_procfs;
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
index 5581dbd3bd34..ea8fb31379e3 100644
--- a/include/linux/osq_lock.h
+++ b/include/linux/osq_lock.h
@@ -6,11 +6,6 @@
* An MCS like lock especially tailored for optimistic spinning for sleeping
* lock implementations (mutex, rwsem, etc).
*/
-struct optimistic_spin_node {
- struct optimistic_spin_node *next, *prev;
- int locked; /* 1 if lock acquired */
- int cpu; /* encoded CPU # + 1 value */
-};
struct optimistic_spin_queue {
/*
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index a88e64acebfe..735cddc13d20 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -772,19 +772,14 @@ static __always_inline void SetPageUptodate(struct page *page)
CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
-bool __folio_start_writeback(struct folio *folio, bool keep_write);
-bool set_page_writeback(struct page *page);
+void __folio_start_writeback(struct folio *folio, bool keep_write);
+void set_page_writeback(struct page *page);
#define folio_start_writeback(folio) \
__folio_start_writeback(folio, false)
#define folio_start_writeback_keepwrite(folio) \
__folio_start_writeback(folio, true)
-static inline bool test_set_page_writeback(struct page *page)
-{
- return set_page_writeback(page);
-}
-
static __always_inline bool folio_test_head(struct folio *folio)
{
return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY));
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index e83c4c095041..3f2409b968ec 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -41,14 +41,14 @@ extern unsigned int pageblock_order;
* Huge pages are a constant size, but don't exceed the maximum allocation
* granularity.
*/
-#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_ORDER)
+#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_PAGE_ORDER)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
#else /* CONFIG_HUGETLB_PAGE */
/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
-#define pageblock_order MAX_ORDER
+#define pageblock_order MAX_PAGE_ORDER
#endif /* CONFIG_HUGETLB_PAGE */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 60ca768bc867..58a4c976c39b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1239,6 +1239,8 @@ int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
+void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
+ u32 clear, u32 set);
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
@@ -1829,6 +1831,7 @@ extern bool pcie_ports_native;
int pci_disable_link_state(struct pci_dev *pdev, int state);
int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
int pci_enable_link_state(struct pci_dev *pdev, int state);
+int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
void pcie_no_aspm(void);
bool pcie_aspm_support_enabled(void);
bool pcie_aspm_enabled(struct pci_dev *pdev);
@@ -1839,6 +1842,8 @@ static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{ return 0; }
static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
{ return 0; }
+static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
+{ return 0; }
static inline void pcie_no_aspm(void) { }
static inline bool pcie_aspm_support_enabled(void) { return false; }
static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 275799b5f535..844ffdac8d7d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2605,6 +2605,8 @@
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
+#define PCI_VENDOR_ID_ALIBABA 0x1ded
+
#define PCI_VENDOR_ID_TEHUTI 0x1fc9
#define PCI_DEVICE_ID_TEHUTI_3009 0x3009
#define PCI_DEVICE_ID_TEHUTI_3010 0x3010
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 143fbc10ecfe..b3b34f6670cf 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -60,12 +60,6 @@ struct pmu_hw_events {
DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
/*
- * Hardware lock to serialize accesses to PMU registers. Needed for the
- * read/modify/write sequences.
- */
- raw_spinlock_t pmu_lock;
-
- /*
* When using percpu IRQs, we need a percpu dev_id. Place it here as we
* already have to allocate this struct per cpu.
*/
@@ -189,4 +183,26 @@ void armpmu_free_irq(int irq, int cpu);
#define ARMV8_SPE_PDEV_NAME "arm,spe-v1"
#define ARMV8_TRBE_PDEV_NAME "arm,trbe"
+/* Why does everything I do descend into this? */
+#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
+ (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
+
+#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
+ __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
+
+#define GEN_PMU_FORMAT_ATTR(name) \
+ PMU_FORMAT_ATTR(name, \
+ _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \
+ ATTR_CFG_FLD_##name##_LO, \
+ ATTR_CFG_FLD_##name##_HI))
+
+#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \
+ ((((attr)->cfg) >> lo) & GENMASK_ULL(hi - lo, 0))
+
+#define ATTR_CFG_GET_FLD(attr, name) \
+ _ATTR_CFG_GET_FLD(attr, \
+ ATTR_CFG_FLD_##name##_CFG, \
+ ATTR_CFG_FLD_##name##_LO, \
+ ATTR_CFG_FLD_##name##_HI)
+
#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h
index 9c226adf938a..46377e134d67 100644
--- a/include/linux/perf/arm_pmuv3.h
+++ b/include/linux/perf/arm_pmuv3.h
@@ -215,21 +215,27 @@
#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
#define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
#define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */
-#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */
-#define ARMV8_PMU_PMCR_N_MASK 0x1f
-#define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */
+#define ARMV8_PMU_PMCR_N GENMASK(15, 11) /* Number of counters supported */
+/* Mask for writable bits */
+#define ARMV8_PMU_PMCR_MASK (ARMV8_PMU_PMCR_E | ARMV8_PMU_PMCR_P | \
+ ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_D | \
+ ARMV8_PMU_PMCR_X | ARMV8_PMU_PMCR_DP | \
+ ARMV8_PMU_PMCR_LC | ARMV8_PMU_PMCR_LP)
/*
* PMOVSR: counters overflow flag status reg
*/
-#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */
-#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK
+#define ARMV8_PMU_OVSR_P GENMASK(30, 0)
+#define ARMV8_PMU_OVSR_C BIT(31)
+/* Mask for writable bits is both P and C fields */
+#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C)
/*
* PMXEVTYPER: Event selection reg
*/
-#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
-#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
+#define ARMV8_PMU_EVTYPE_EVENT GENMASK(15, 0) /* Mask for EVENT bits */
+#define ARMV8_PMU_EVTYPE_TH GENMASK_ULL(43, 32) /* arm64 only */
+#define ARMV8_PMU_EVTYPE_TC GENMASK_ULL(63, 61) /* arm64 only */
/*
* Event filters for PMUv3
@@ -244,19 +250,19 @@
/*
* PMUSERENR: user enable reg
*/
-#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */
#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
+/* Mask for writable bits */
+#define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \
+ ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER)
/* PMMIR_EL1.SLOTS mask */
-#define ARMV8_PMU_SLOTS_MASK 0xff
-
-#define ARMV8_PMU_BUS_SLOTS_SHIFT 8
-#define ARMV8_PMU_BUS_SLOTS_MASK 0xff
-#define ARMV8_PMU_BUS_WIDTH_SHIFT 16
-#define ARMV8_PMU_BUS_WIDTH_MASK 0xf
+#define ARMV8_PMU_SLOTS GENMASK(7, 0)
+#define ARMV8_PMU_BUS_SLOTS GENMASK(15, 8)
+#define ARMV8_PMU_BUS_WIDTH GENMASK(19, 16)
+#define ARMV8_PMU_THWIDTH GENMASK(23, 20)
/*
* This code is really good
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 5547ba68e6e4..d2a15c0c6f8a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1143,6 +1143,15 @@ static inline bool branch_sample_priv(const struct perf_event *event)
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
}
+static inline bool branch_sample_counters(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS;
+}
+
+static inline bool branch_sample_call_stack(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
+}
struct perf_sample_data {
/*
@@ -1177,6 +1186,7 @@ struct perf_sample_data {
struct perf_callchain_entry *callchain;
struct perf_raw_record *raw;
struct perf_branch_stack *br_stack;
+ u64 *br_stack_cntr;
union perf_sample_weight weight;
union perf_mem_data_src data_src;
u64 txn;
@@ -1254,7 +1264,8 @@ static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
static inline void perf_sample_save_brstack(struct perf_sample_data *data,
struct perf_event *event,
- struct perf_branch_stack *brs)
+ struct perf_branch_stack *brs,
+ u64 *brs_cntr)
{
int size = sizeof(u64); /* nr */
@@ -1262,7 +1273,16 @@ static inline void perf_sample_save_brstack(struct perf_sample_data *data,
size += sizeof(u64);
size += brs->nr * sizeof(struct perf_branch_entry);
+ /*
+ * The extension space for counters is appended after the
+ * struct perf_branch_stack. It is used to store the occurrences
+ * of events of each branch.
+ */
+ if (brs_cntr)
+ size += brs->nr * sizeof(u64);
+
data->br_stack = brs;
+ data->br_stack_cntr = brs_cntr;
data->dyn_size += size;
data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index af7639c3b0a3..466cf477551a 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -184,6 +184,13 @@ static inline int pmd_young(pmd_t pmd)
}
#endif
+#ifndef pmd_dirty
+static inline int pmd_dirty(pmd_t pmd)
+{
+ return 0;
+}
+#endif
+
/*
* A facility to provide lazy MMU batching. This allows PTE updates and
* page invalidations to be delayed until a call to leave lazy MMU mode
@@ -375,7 +382,7 @@ static inline bool arch_has_hw_nonleaf_pmd_young(void)
*/
static inline bool arch_has_hw_pte_young(void)
{
- return false;
+ return IS_ENABLED(CONFIG_ARCH_HAS_HW_PTE_YOUNG);
}
#endif
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 3cc52826f18e..bd285950972c 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -568,7 +568,6 @@ struct macsec_ops;
* - Bits [31:24] are reserved for defining generic
* PHY driver behavior.
* @irq: IRQ number of the PHY's interrupt (-1 if none)
- * @phy_timer: The timer for handling the state machine
* @phylink: Pointer to phylink instance for this PHY
* @sfp_bus_attached: Flag indicating whether the SFP bus has been attached
* @sfp_bus: SFP bus attached to this PHY's fiber port
diff --git a/include/linux/platform_data/x86/clk-lpss.h b/include/linux/platform_data/x86/clk-lpss.h
index 41df326583f9..7f132029316a 100644
--- a/include/linux/platform_data/x86/clk-lpss.h
+++ b/include/linux/platform_data/x86/clk-lpss.h
@@ -15,6 +15,6 @@ struct lpss_clk_data {
struct clk *clk;
};
-extern int lpss_atom_clk_init(void);
+int lpss_atom_clk_init(void);
#endif /* __CLK_LPSS_H */
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index ccd97bcef269..76dcb7f37bcd 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -45,18 +45,6 @@ struct dev_pm_opp_supply {
unsigned long u_watt;
};
-/**
- * struct dev_pm_opp_icc_bw - Interconnect bandwidth values
- * @avg: Average bandwidth corresponding to this OPP (in icc units)
- * @peak: Peak bandwidth corresponding to this OPP (in icc units)
- *
- * This structure stores the bandwidth values for a single interconnect path.
- */
-struct dev_pm_opp_icc_bw {
- u32 avg;
- u32 peak;
-};
-
typedef int (*config_regulators_t)(struct device *dev,
struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
struct regulator **regulators, unsigned int count);
@@ -74,8 +62,10 @@ typedef int (*config_clks_t)(struct device *dev, struct opp_table *opp_table,
* @supported_hw_count: Number of elements in the array.
* @regulator_names: Array of pointers to the names of the regulator, NULL terminated.
* @genpd_names: Null terminated array of pointers containing names of genpd to
- * attach.
- * @virt_devs: Pointer to return the array of virtual devices.
+ * attach. Mutually exclusive with required_devs.
+ * @virt_devs: Pointer to return the array of genpd virtual devices. Mutually
+ * exclusive with required_devs.
+ * @required_devs: Required OPP devices. Mutually exclusive with genpd_names/virt_devs.
*
* This structure contains platform specific OPP configurations for the device.
*/
@@ -90,11 +80,15 @@ struct dev_pm_opp_config {
const char * const *regulator_names;
const char * const *genpd_names;
struct device ***virt_devs;
+ struct device **required_devs;
};
+#define OPP_LEVEL_UNSET U32_MAX
+
/**
* struct dev_pm_opp_data - The data to use to initialize an OPP.
- * @level: The performance level for the OPP.
+ * @level: The performance level for the OPP. Set level to OPP_LEVEL_UNSET if
+ * level field isn't used.
* @freq: The clock rate in Hz for the OPP.
* @u_volt: The voltage in uV for the OPP.
*/
@@ -157,7 +151,7 @@ struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
unsigned int *level);
struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev,
- unsigned long *level);
+ unsigned int *level);
struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
unsigned int *bw, int index);
@@ -324,7 +318,7 @@ static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
}
static inline struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev,
- unsigned long *level)
+ unsigned int *level)
{
return ERR_PTR(-EOPNOTSUPP);
}
diff --git a/include/linux/property.h b/include/linux/property.h
index 9f2585d705a8..97f901c0914e 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -489,6 +489,13 @@ struct software_node {
const struct property_entry *properties;
};
+#define SOFTWARE_NODE(_name_, _properties_, _parent_) \
+ (struct software_node) { \
+ .name = _name_, \
+ .properties = _properties_, \
+ .parent = _parent_, \
+ }
+
bool is_software_node(const struct fwnode_handle *fwnode);
const struct software_node *
to_software_node(const struct fwnode_handle *fwnode);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 4fa4ef0a173a..06cc8888199e 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -74,7 +74,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags);
int dquot_alloc_inode(struct inode *inode);
-int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
+void dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
void dquot_free_inode(struct inode *inode);
void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number);
@@ -257,10 +257,9 @@ static inline void __dquot_free_space(struct inode *inode, qsize_t number,
inode_sub_bytes(inode, number);
}
-static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
+static inline void dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
{
inode_add_bytes(inode, number);
- return 0;
}
static inline int dquot_reclaim_space_nodirty(struct inode *inode,
@@ -358,14 +357,10 @@ static inline int dquot_reserve_block(struct inode *inode, qsize_t nr)
DQUOT_SPACE_WARN|DQUOT_SPACE_RESERVE);
}
-static inline int dquot_claim_block(struct inode *inode, qsize_t nr)
+static inline void dquot_claim_block(struct inode *inode, qsize_t nr)
{
- int ret;
-
- ret = dquot_claim_space_nodirty(inode, nr << inode->i_blkbits);
- if (!ret)
- mark_inode_dirty_sync(inode);
- return ret;
+ dquot_claim_space_nodirty(inode, nr << inode->i_blkbits);
+ mark_inode_dirty_sync(inode);
}
static inline void dquot_reclaim_block(struct inode *inode, qsize_t nr)
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index c4cc3b89ced1..abcdde4df697 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -177,7 +177,17 @@ void ctrl_alt_del(void);
extern void orderly_poweroff(bool force);
extern void orderly_reboot(void);
-void hw_protection_shutdown(const char *reason, int ms_until_forced);
+void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shutdown);
+
+static inline void hw_protection_reboot(const char *reason, int ms_until_forced)
+{
+ __hw_protection_shutdown(reason, ms_until_forced, false);
+}
+
+static inline void hw_protection_shutdown(const char *reason, int ms_until_forced)
+{
+ __hw_protection_shutdown(reason, ms_until_forced, true);
+}
/*
* Emergency restart, callable from an interrupt handler.
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 39b666b40ea6..4660582a3302 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -33,6 +33,7 @@
#include <linux/err.h>
#include <linux/suspend.h>
+#include <regulator/regulator.h>
struct device;
struct notifier_block;
@@ -85,52 +86,6 @@ struct regulator_dev;
#define REGULATOR_MODE_STANDBY 0x8
/*
- * Regulator notifier events.
- *
- * UNDER_VOLTAGE Regulator output is under voltage.
- * OVER_CURRENT Regulator output current is too high.
- * REGULATION_OUT Regulator output is out of regulation.
- * FAIL Regulator output has failed.
- * OVER_TEMP Regulator over temp.
- * FORCE_DISABLE Regulator forcibly shut down by software.
- * VOLTAGE_CHANGE Regulator voltage changed.
- * Data passed is old voltage cast to (void *).
- * DISABLE Regulator was disabled.
- * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed.
- * Data passed is "struct pre_voltage_change_data"
- * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason.
- * Data passed is old voltage cast to (void *).
- * PRE_DISABLE Regulator is about to be disabled
- * ABORT_DISABLE Regulator disable failed for some reason
- *
- * NOTE: These events can be OR'ed together when passed into handler.
- */
-
-#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01
-#define REGULATOR_EVENT_OVER_CURRENT 0x02
-#define REGULATOR_EVENT_REGULATION_OUT 0x04
-#define REGULATOR_EVENT_FAIL 0x08
-#define REGULATOR_EVENT_OVER_TEMP 0x10
-#define REGULATOR_EVENT_FORCE_DISABLE 0x20
-#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
-#define REGULATOR_EVENT_DISABLE 0x80
-#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100
-#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
-#define REGULATOR_EVENT_PRE_DISABLE 0x400
-#define REGULATOR_EVENT_ABORT_DISABLE 0x800
-#define REGULATOR_EVENT_ENABLE 0x1000
-/*
- * Following notifications should be emitted only if detected condition
- * is such that the HW is likely to still be working but consumers should
- * take a recovery action to prevent problems esacalating into errors.
- */
-#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000
-#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000
-#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000
-#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000
-#define REGULATOR_EVENT_WARN_MASK 0x1E000
-
-/*
* Regulator errors that can be queried using regulator_get_error_flags
*
* UNDER_VOLTAGE Regulator output is under voltage.
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 4b7eceb3828b..22a07c0900a4 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -51,12 +51,7 @@ enum regulator_detection_severity {
/* Initialize struct linear_range for regulators */
#define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \
-{ \
- .min = _min_uV, \
- .min_sel = _min_sel, \
- .max_sel = _max_sel, \
- .step = _step_uV, \
-}
+ LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV)
/**
* struct regulator_ops - regulator operations.
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 621b7f4a3639..0cd76d264727 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -49,6 +49,13 @@ struct regulator;
#define DISABLE_IN_SUSPEND 1
#define ENABLE_IN_SUSPEND 2
+/*
+ * Default time window (in milliseconds) following a critical under-voltage
+ * event during which less critical actions can be safely carried out by the
+ * system.
+ */
+#define REGULATOR_DEF_UV_LESS_CRITICAL_WINDOW_MS 10
+
/* Regulator active discharge flags */
enum regulator_active_discharge {
REGULATOR_ACTIVE_DISCHARGE_DEFAULT,
@@ -127,6 +134,8 @@ struct notification_limit {
* @ramp_disable: Disable ramp delay when initialising or when setting voltage.
* @soft_start: Enable soft start so that voltage ramps slowly.
* @pull_down: Enable pull down when regulator is disabled.
+ * @system_critical: Set if the regulator is critical to system stability or
+ * functionality.
* @over_current_protection: Auto disable on over current event.
*
* @over_current_detection: Configure over current limits.
@@ -153,6 +162,13 @@ struct notification_limit {
* regulator_active_discharge values are used for
* initialisation.
* @enable_time: Turn-on time of the rails (unit: microseconds)
+ * @uv_less_critical_window_ms: Specifies the time window (in milliseconds)
+ * following a critical under-voltage (UV) event
+ * during which less critical actions can be
+ * safely carried out by the system (for example
+ * logging). After this time window more critical
+ * actions should be done (for example prevent
+ * HW damage).
*/
struct regulation_constraints {
@@ -204,6 +220,7 @@ struct regulation_constraints {
unsigned int settling_time_up;
unsigned int settling_time_down;
unsigned int enable_time;
+ unsigned int uv_less_critical_window_ms;
unsigned int active_discharge;
@@ -214,6 +231,7 @@ struct regulation_constraints {
unsigned ramp_disable:1; /* disable ramp delay */
unsigned soft_start:1; /* ramp voltage slowly */
unsigned pull_down:1; /* pull down resistor when regulator off */
+ unsigned system_critical:1; /* critical to system stability */
unsigned over_current_protection:1; /* auto disable on over current */
unsigned over_current_detection:1; /* notify on over current */
unsigned over_voltage_detection:1; /* notify on over voltage */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index b26fe858fd44..b7944a833668 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -121,6 +121,11 @@ static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
down_write(&anon_vma->root->rwsem);
}
+static inline int anon_vma_trylock_write(struct anon_vma *anon_vma)
+{
+ return down_write_trylock(&anon_vma->root->rwsem);
+}
+
static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
{
up_write(&anon_vma->root->rwsem);
@@ -172,133 +177,323 @@ struct anon_vma *folio_get_anon_vma(struct folio *folio);
typedef int __bitwise rmap_t;
/*
- * No special request: if the page is a subpage of a compound page, it is
- * mapped via a PTE. The mapped (sub)page is possibly shared between processes.
+ * No special request: A mapped anonymous (sub)page is possibly shared between
+ * processes.
*/
#define RMAP_NONE ((__force rmap_t)0)
-/* The (sub)page is exclusive to a single process. */
+/* The anonymous (sub)page is exclusive to a single process. */
#define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0))
/*
- * The compound page is not mapped via PTEs, but instead via a single PMD and
- * should be accounted accordingly.
+ * Internally, we're using an enum to specify the granularity. We make the
+ * compiler emit specialized code for each granularity.
*/
-#define RMAP_COMPOUND ((__force rmap_t)BIT(1))
+enum rmap_level {
+ RMAP_LEVEL_PTE = 0,
+ RMAP_LEVEL_PMD,
+};
+
+static inline void __folio_rmap_sanity_checks(struct folio *folio,
+ struct page *page, int nr_pages, enum rmap_level level)
+{
+ /* hugetlb folios are handled separately. */
+ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
+
+ /*
+ * TODO: we get driver-allocated folios that have nothing to do with
+ * the rmap using vm_insert_page(); therefore, we cannot assume that
+ * folio_test_large_rmappable() holds for large folios. We should
+ * handle any desired mapcount+stats accounting for these folios in
+ * VM_MIXEDMAP VMAs separately, and then sanity-check here that
+ * we really only get rmappable folios.
+ */
+
+ VM_WARN_ON_ONCE(nr_pages <= 0);
+ VM_WARN_ON_FOLIO(page_folio(page) != folio, folio);
+ VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio);
+
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ break;
+ case RMAP_LEVEL_PMD:
+ /*
+ * We don't support folios larger than a single PMD yet. So
+ * when RMAP_LEVEL_PMD is set, we assume that we are creating
+ * a single "entire" mapping of the folio.
+ */
+ VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio);
+ VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio);
+ break;
+ default:
+ VM_WARN_ON_ONCE(true);
+ }
+}
/*
* rmap interfaces called when adding or removing pte of page
*/
void folio_move_anon_rmap(struct folio *, struct vm_area_struct *);
-void page_add_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long address, rmap_t flags);
-void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long address);
+void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *, unsigned long address, rmap_t flags);
+#define folio_add_anon_rmap_pte(folio, page, vma, address, flags) \
+ folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags)
+void folio_add_anon_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *, unsigned long address, rmap_t flags);
void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address);
-void page_add_file_rmap(struct page *, struct vm_area_struct *,
- bool compound);
-void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr,
- struct vm_area_struct *, bool compound);
-void page_remove_rmap(struct page *, struct vm_area_struct *,
- bool compound);
-
-void hugepage_add_anon_rmap(struct folio *, struct vm_area_struct *,
+void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *);
+#define folio_add_file_rmap_pte(folio, page, vma) \
+ folio_add_file_rmap_ptes(folio, page, 1, vma)
+void folio_add_file_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *);
+void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *);
+#define folio_remove_rmap_pte(folio, page, vma) \
+ folio_remove_rmap_ptes(folio, page, 1, vma)
+void folio_remove_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *);
+
+void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address, rmap_t flags);
-void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
+void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address);
-static inline void __page_dup_rmap(struct page *page, bool compound)
+/* See folio_try_dup_anon_rmap_*() */
+static inline int hugetlb_try_dup_anon_rmap(struct folio *folio,
+ struct vm_area_struct *vma)
{
- if (compound) {
- struct folio *folio = (struct folio *)page;
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
- VM_BUG_ON_PAGE(compound && !PageHead(page), page);
- atomic_inc(&folio->_entire_mapcount);
- } else {
- atomic_inc(&page->_mapcount);
+ if (PageAnonExclusive(&folio->page)) {
+ if (unlikely(folio_needs_cow_for_dma(vma, folio)))
+ return -EBUSY;
+ ClearPageAnonExclusive(&folio->page);
}
+ atomic_inc(&folio->_entire_mapcount);
+ return 0;
+}
+
+/* See folio_try_share_anon_rmap_*() */
+static inline int hugetlb_try_share_anon_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio);
+
+ /* Paired with the memory barrier in try_grab_folio(). */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb();
+
+ if (unlikely(folio_maybe_dma_pinned(folio)))
+ return -EBUSY;
+ ClearPageAnonExclusive(&folio->page);
+
+ /*
+ * This is conceptually a smp_wmb() paired with the smp_rmb() in
+ * gup_must_unshare().
+ */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb__after_atomic();
+ return 0;
+}
+
+static inline void hugetlb_add_file_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
+
+ atomic_inc(&folio->_entire_mapcount);
+}
+
+static inline void hugetlb_remove_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+
+ atomic_dec(&folio->_entire_mapcount);
}
-static inline void page_dup_file_rmap(struct page *page, bool compound)
+static __always_inline void __folio_dup_file_rmap(struct folio *folio,
+ struct page *page, int nr_pages, enum rmap_level level)
{
- __page_dup_rmap(page, compound);
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
+
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ do {
+ atomic_inc(&page->_mapcount);
+ } while (page++, --nr_pages > 0);
+ break;
+ case RMAP_LEVEL_PMD:
+ atomic_inc(&folio->_entire_mapcount);
+ break;
+ }
}
/**
- * page_try_dup_anon_rmap - try duplicating a mapping of an already mapped
- * anonymous page
- * @page: the page to duplicate the mapping for
- * @compound: the page is mapped as compound or as a small page
- * @vma: the source vma
+ * folio_dup_file_rmap_ptes - duplicate PTE mappings of a page range of a folio
+ * @folio: The folio to duplicate the mappings of
+ * @page: The first page to duplicate the mappings of
+ * @nr_pages: The number of pages of which the mapping will be duplicated
*
- * The caller needs to hold the PT lock and the vma->vma_mm->write_protect_seq.
+ * The page range of the folio is defined by [page, page + nr_pages)
*
- * Duplicating the mapping can only fail if the page may be pinned; device
- * private pages cannot get pinned and consequently this function cannot fail.
+ * The caller needs to hold the page table lock.
+ */
+static inline void folio_dup_file_rmap_ptes(struct folio *folio,
+ struct page *page, int nr_pages)
+{
+ __folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE);
+}
+#define folio_dup_file_rmap_pte(folio, page) \
+ folio_dup_file_rmap_ptes(folio, page, 1)
+
+/**
+ * folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio
+ * @folio: The folio to duplicate the mapping of
+ * @page: The first page to duplicate the mapping of
*
- * If duplicating the mapping succeeds, the page has to be mapped R/O into
- * the parent and the child. It must *not* get mapped writable after this call.
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
*
- * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
+ * The caller needs to hold the page table lock.
*/
-static inline int page_try_dup_anon_rmap(struct page *page, bool compound,
- struct vm_area_struct *vma)
+static inline void folio_dup_file_rmap_pmd(struct folio *folio,
+ struct page *page)
{
- VM_BUG_ON_PAGE(!PageAnon(page), page);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, RMAP_LEVEL_PTE);
+#else
+ WARN_ON_ONCE(true);
+#endif
+}
- /*
- * No need to check+clear for already shared pages, including KSM
- * pages.
- */
- if (!PageAnonExclusive(page))
- goto dup;
+static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *src_vma,
+ enum rmap_level level)
+{
+ bool maybe_pinned;
+ int i;
+
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
/*
- * If this page may have been pinned by the parent process,
- * don't allow to duplicate the mapping but instead require to e.g.,
- * copy the page immediately for the child so that we'll always
- * guarantee the pinned page won't be randomly replaced in the
+ * If this folio may have been pinned by the parent process,
+ * don't allow to duplicate the mappings but instead require to e.g.,
+ * copy the subpage immediately for the child so that we'll always
+ * guarantee the pinned folio won't be randomly replaced in the
* future on write faults.
*/
- if (likely(!is_device_private_page(page) &&
- unlikely(page_needs_cow_for_dma(vma, page))))
- return -EBUSY;
+ maybe_pinned = likely(!folio_is_device_private(folio)) &&
+ unlikely(folio_needs_cow_for_dma(src_vma, folio));
- ClearPageAnonExclusive(page);
/*
- * It's okay to share the anon page between both processes, mapping
- * the page R/O into both processes.
+ * No need to check+clear for already shared PTEs/PMDs of the
+ * folio. But if any page is PageAnonExclusive, we must fallback to
+ * copying if the folio maybe pinned.
*/
-dup:
- __page_dup_rmap(page, compound);
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ if (unlikely(maybe_pinned)) {
+ for (i = 0; i < nr_pages; i++)
+ if (PageAnonExclusive(page + i))
+ return -EBUSY;
+ }
+ do {
+ if (PageAnonExclusive(page))
+ ClearPageAnonExclusive(page);
+ atomic_inc(&page->_mapcount);
+ } while (page++, --nr_pages > 0);
+ break;
+ case RMAP_LEVEL_PMD:
+ if (PageAnonExclusive(page)) {
+ if (unlikely(maybe_pinned))
+ return -EBUSY;
+ ClearPageAnonExclusive(page);
+ }
+ atomic_inc(&folio->_entire_mapcount);
+ break;
+ }
return 0;
}
/**
- * page_try_share_anon_rmap - try marking an exclusive anonymous page possibly
- * shared to prepare for KSM or temporary unmapping
- * @page: the exclusive anonymous page to try marking possibly shared
+ * folio_try_dup_anon_rmap_ptes - try duplicating PTE mappings of a page range
+ * of a folio
+ * @folio: The folio to duplicate the mappings of
+ * @page: The first page to duplicate the mappings of
+ * @nr_pages: The number of pages of which the mapping will be duplicated
+ * @src_vma: The vm area from which the mappings are duplicated
+ *
+ * The page range of the folio is defined by [page, page + nr_pages)
*
- * The caller needs to hold the PT lock and has to have the page table entry
- * cleared/invalidated.
+ * The caller needs to hold the page table lock and the
+ * vma->vma_mm->write_protect_seq.
*
- * This is similar to page_try_dup_anon_rmap(), however, not used during fork()
- * to duplicate a mapping, but instead to prepare for KSM or temporarily
- * unmapping a page (swap, migration) via page_remove_rmap().
+ * Duplicating the mappings can only fail if the folio may be pinned; device
+ * private folios cannot get pinned and consequently this function cannot fail
+ * for them.
*
- * Marking the page shared can only fail if the page may be pinned; device
- * private pages cannot get pinned and consequently this function cannot fail.
+ * If duplicating the mappings succeeded, the duplicated PTEs have to be R/O in
+ * the parent and the child. They must *not* be writable after this call
+ * succeeded.
*
- * Returns 0 if marking the page possibly shared succeeded. Returns -EBUSY
- * otherwise.
+ * Returns 0 if duplicating the mappings succeeded. Returns -EBUSY otherwise.
*/
-static inline int page_try_share_anon_rmap(struct page *page)
+static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *src_vma)
{
- VM_BUG_ON_PAGE(!PageAnon(page) || !PageAnonExclusive(page), page);
+ return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma,
+ RMAP_LEVEL_PTE);
+}
+#define folio_try_dup_anon_rmap_pte(folio, page, vma) \
+ folio_try_dup_anon_rmap_ptes(folio, page, 1, vma)
- /* device private pages cannot get pinned via GUP. */
- if (unlikely(is_device_private_page(page))) {
+/**
+ * folio_try_dup_anon_rmap_pmd - try duplicating a PMD mapping of a page range
+ * of a folio
+ * @folio: The folio to duplicate the mapping of
+ * @page: The first page to duplicate the mapping of
+ * @src_vma: The vm area from which the mapping is duplicated
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock and the
+ * vma->vma_mm->write_protect_seq.
+ *
+ * Duplicating the mapping can only fail if the folio may be pinned; device
+ * private folios cannot get pinned and consequently this function cannot fail
+ * for them.
+ *
+ * If duplicating the mapping succeeds, the duplicated PMD has to be R/O in
+ * the parent and the child. They must *not* be writable after this call
+ * succeeded.
+ *
+ * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
+ */
+static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
+ struct page *page, struct vm_area_struct *src_vma)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma,
+ RMAP_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+ return -EBUSY;
+#endif
+}
+
+static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
+ struct page *page, int nr_pages, enum rmap_level level)
+{
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio);
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
+
+ /* device private folios cannot get pinned via GUP. */
+ if (unlikely(folio_is_device_private(folio))) {
ClearPageAnonExclusive(page);
return 0;
}
@@ -349,7 +544,7 @@ static inline int page_try_share_anon_rmap(struct page *page)
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
smp_mb();
- if (unlikely(page_maybe_dma_pinned(page)))
+ if (unlikely(folio_maybe_dma_pinned(folio)))
return -EBUSY;
ClearPageAnonExclusive(page);
@@ -362,6 +557,68 @@ static inline int page_try_share_anon_rmap(struct page *page)
return 0;
}
+/**
+ * folio_try_share_anon_rmap_pte - try marking an exclusive anonymous page
+ * mapped by a PTE possibly shared to prepare
+ * for KSM or temporary unmapping
+ * @folio: The folio to share a mapping of
+ * @page: The mapped exclusive page
+ *
+ * The caller needs to hold the page table lock and has to have the page table
+ * entries cleared/invalidated.
+ *
+ * This is similar to folio_try_dup_anon_rmap_pte(), however, not used during
+ * fork() to duplicate mappings, but instead to prepare for KSM or temporarily
+ * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pte().
+ *
+ * Marking the mapped page shared can only fail if the folio maybe pinned;
+ * device private folios cannot get pinned and consequently this function cannot
+ * fail.
+ *
+ * Returns 0 if marking the mapped page possibly shared succeeded. Returns
+ * -EBUSY otherwise.
+ */
+static inline int folio_try_share_anon_rmap_pte(struct folio *folio,
+ struct page *page)
+{
+ return __folio_try_share_anon_rmap(folio, page, 1, RMAP_LEVEL_PTE);
+}
+
+/**
+ * folio_try_share_anon_rmap_pmd - try marking an exclusive anonymous page
+ * range mapped by a PMD possibly shared to
+ * prepare for temporary unmapping
+ * @folio: The folio to share the mapping of
+ * @page: The first page to share the mapping of
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock and has to have the page table
+ * entries cleared/invalidated.
+ *
+ * This is similar to folio_try_dup_anon_rmap_pmd(), however, not used during
+ * fork() to duplicate a mapping, but instead to prepare for temporarily
+ * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pmd().
+ *
+ * Marking the mapped pages shared can only fail if the folio maybe pinned;
+ * device private folios cannot get pinned and consequently this function cannot
+ * fail.
+ *
+ * Returns 0 if marking the mapped pages possibly shared succeeded. Returns
+ * -EBUSY otherwise.
+ */
+static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
+ struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR,
+ RMAP_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+ return -EBUSY;
+#endif
+}
+
/*
* Called from mm/vmscan.c to handle paging out
*/
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 1dd530ce8b45..9c29689ff505 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -203,11 +203,11 @@ extern void up_read(struct rw_semaphore *sem);
extern void up_write(struct rw_semaphore *sem);
DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
-DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
-
-DEFINE_FREE(up_read, struct rw_semaphore *, if (_T) up_read(_T))
-DEFINE_FREE(up_write, struct rw_semaphore *, if (_T) up_write(_T))
+DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
+DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0)
+DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
+DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
/*
* downgrade write lock to read lock
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 814bfdafbc1c..9a66147915b2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -71,11 +71,13 @@ struct robust_list_head;
struct root_domain;
struct rq;
struct sched_attr;
+struct sched_dl_entity;
struct seq_file;
struct sighand_struct;
struct signal_struct;
struct task_delay_info;
struct task_group;
+struct task_struct;
struct user_event_mm;
/*
@@ -421,42 +423,6 @@ struct load_weight {
u32 inv_weight;
};
-/**
- * struct util_est - Estimation utilization of FAIR tasks
- * @enqueued: instantaneous estimated utilization of a task/cpu
- * @ewma: the Exponential Weighted Moving Average (EWMA)
- * utilization of a task
- *
- * Support data structure to track an Exponential Weighted Moving Average
- * (EWMA) of a FAIR task's utilization. New samples are added to the moving
- * average each time a task completes an activation. Sample's weight is chosen
- * so that the EWMA will be relatively insensitive to transient changes to the
- * task's workload.
- *
- * The enqueued attribute has a slightly different meaning for tasks and cpus:
- * - task: the task's util_avg at last task dequeue time
- * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
- * Thus, the util_est.enqueued of a task represents the contribution on the
- * estimated utilization of the CPU where that task is currently enqueued.
- *
- * Only for tasks we track a moving average of the past instantaneous
- * estimated utilization. This allows to absorb sporadic drops in utilization
- * of an otherwise almost periodic task.
- *
- * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
- * updates. When a task is dequeued, its util_est should not be updated if its
- * util_avg has not been updated in the meantime.
- * This information is mapped into the MSB bit of util_est.enqueued at dequeue
- * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
- * for a task) it is safe to use MSB.
- */
-struct util_est {
- unsigned int enqueued;
- unsigned int ewma;
-#define UTIL_EST_WEIGHT_SHIFT 2
-#define UTIL_AVG_UNCHANGED 0x80000000
-} __attribute__((__aligned__(sizeof(u64))));
-
/*
* The load/runnable/util_avg accumulates an infinite geometric series
* (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
@@ -511,9 +477,20 @@ struct sched_avg {
unsigned long load_avg;
unsigned long runnable_avg;
unsigned long util_avg;
- struct util_est util_est;
+ unsigned int util_est;
} ____cacheline_aligned;
+/*
+ * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
+ * updates. When a task is dequeued, its util_est should not be updated if its
+ * util_avg has not been updated in the meantime.
+ * This information is mapped into the MSB bit of util_est at dequeue time.
+ * Since max value of util_est for a task is 1024 (PELT util_avg for a task)
+ * it is safe to use MSB.
+ */
+#define UTIL_EST_WEIGHT_SHIFT 2
+#define UTIL_AVG_UNCHANGED 0x80000000
+
struct sched_statistics {
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
@@ -531,7 +508,7 @@ struct sched_statistics {
u64 block_max;
s64 sum_block_runtime;
- u64 exec_max;
+ s64 exec_max;
u64 slice_max;
u64 nr_migrations_cold;
@@ -561,7 +538,7 @@ struct sched_entity {
struct load_weight load;
struct rb_node run_node;
u64 deadline;
- u64 min_deadline;
+ u64 min_vruntime;
struct list_head group_node;
unsigned int on_rq;
@@ -615,6 +592,9 @@ struct sched_rt_entity {
#endif
} __randomize_layout;
+typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *);
+typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *);
+
struct sched_dl_entity {
struct rb_node rb_node;
@@ -662,6 +642,7 @@ struct sched_dl_entity {
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
+ unsigned int dl_server : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -676,7 +657,20 @@ struct sched_dl_entity {
* timer is needed to decrease the active utilization at the correct
* time.
*/
- struct hrtimer inactive_timer;
+ struct hrtimer inactive_timer;
+
+ /*
+ * Bits for DL-server functionality. Also see the comment near
+ * dl_server_update().
+ *
+ * @rq the runqueue this server is for
+ *
+ * @server_has_tasks() returns true if @server_pick return a
+ * runnable task.
+ */
+ struct rq *rq;
+ dl_server_has_tasks_f server_has_tasks;
+ dl_server_pick_f server_pick;
#ifdef CONFIG_RT_MUTEXES
/*
@@ -803,6 +797,7 @@ struct task_struct {
struct sched_entity se;
struct sched_rt_entity rt;
struct sched_dl_entity dl;
+ struct sched_dl_entity *dl_server;
const struct sched_class *sched_class;
#ifdef CONFIG_SCHED_CORE
@@ -1841,9 +1836,7 @@ extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
-#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
struct task_struct task;
-#endif
#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
#endif
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index fe1a46f30d24..2b461129d1fa 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -2,6 +2,7 @@
#define _LINUX_SCHED_ISOLATION_H
#include <linux/cpumask.h>
+#include <linux/cpuset.h>
#include <linux/init.h>
#include <linux/tick.h>
@@ -67,7 +68,8 @@ static inline bool housekeeping_cpu(int cpu, enum hk_type type)
static inline bool cpu_is_isolated(int cpu)
{
return !housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN) ||
- !housekeeping_test_cpu(cpu, HK_TYPE_TICK);
+ !housekeeping_test_cpu(cpu, HK_TYPE_TICK) ||
+ cpuset_cpu_is_isolated(cpu);
}
#endif /* _LINUX_SCHED_ISOLATION_H */
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index b847d8fa75a9..4b7664c56208 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -433,7 +433,6 @@ static inline bool fault_signal_pending(vm_fault_t fault_flags,
* This is required every time the blocked sigset_t changes.
* callers must hold sighand->siglock.
*/
-extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
extern void calculate_sigpending(void);
@@ -647,6 +646,9 @@ extern bool current_is_single_threaded(void);
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
+#define for_other_threads(p, t) \
+ for (t = p; (t = next_thread(t)) != p; )
+
#define __for_each_thread(signal, t) \
list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \
lockdep_is_held(&tasklist_lock))
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 538cdfbe895f..d362aacf9f89 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -228,4 +228,6 @@ static inline void task_unlock(struct task_struct *p)
spin_unlock(&p->alloc_lock);
}
+DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))
+
#endif /* _LINUX_SCHED_TASK_H */
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index de545ba85218..a6e04b4a21d7 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -279,6 +279,14 @@ void arch_update_thermal_pressure(const struct cpumask *cpus,
{ }
#endif
+#ifndef arch_scale_freq_ref
+static __always_inline
+unsigned int arch_scale_freq_ref(int cpu)
+{
+ return 0;
+}
+#endif
+
static inline int task_node(const struct task_struct *p)
{
return cpu_to_node(task_cpu(p));
diff --git a/include/linux/security.h b/include/linux/security.h
index 1d1df326c881..d0eb20f90b26 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -32,6 +32,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/sockptr.h>
+#include <uapi/linux/lsm.h>
struct linux_binprm;
struct cred;
@@ -60,6 +61,7 @@ struct fs_parameter;
enum fs_value_type;
struct watch;
struct watch_notification;
+struct lsm_ctx;
/* Default (no) options for the capable function */
#define CAP_OPT_NONE 0x0
@@ -138,6 +140,8 @@ enum lockdown_reason {
};
extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1];
+extern u32 lsm_active_cnt;
+extern const struct lsm_id *lsm_idlist[];
/* These functions are in security/commoncap.c */
extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
@@ -261,6 +265,7 @@ int unregister_blocking_lsm_notifier(struct notifier_block *nb);
/* prototypes */
extern int security_init(void);
extern int early_security_init(void);
+extern u64 lsm_name_to_attr(const char *name);
/* Security operations */
int security_binder_set_context_mgr(const struct cred *mgr);
@@ -389,6 +394,8 @@ int security_file_permission(struct file *file, int mask);
int security_file_alloc(struct file *file);
void security_file_free(struct file *file);
int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int security_file_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg);
int security_mmap_file(struct file *file, unsigned long prot,
unsigned long flags);
int security_mmap_addr(unsigned long addr);
@@ -470,10 +477,13 @@ int security_sem_semctl(struct kern_ipc_perm *sma, int cmd);
int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
unsigned nsops, int alter);
void security_d_instantiate(struct dentry *dentry, struct inode *inode);
-int security_getprocattr(struct task_struct *p, const char *lsm, const char *name,
+int security_getselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
+ size_t __user *size, u32 flags);
+int security_setselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
+ size_t size, u32 flags);
+int security_getprocattr(struct task_struct *p, int lsmid, const char *name,
char **value);
-int security_setprocattr(const char *lsm, const char *name, void *value,
- size_t size);
+int security_setprocattr(int lsmid, const char *name, void *value, size_t size);
int security_netlink_send(struct sock *sk, struct sk_buff *skb);
int security_ismaclabel(const char *name);
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
@@ -484,6 +494,8 @@ int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
int security_locked_down(enum lockdown_reason what);
+int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, size_t *uctx_len,
+ void *val, size_t val_len, u64 id, u64 flags);
#else /* CONFIG_SECURITY */
static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data)
@@ -501,6 +513,11 @@ static inline int unregister_blocking_lsm_notifier(struct notifier_block *nb)
return 0;
}
+static inline u64 lsm_name_to_attr(const char *name)
+{
+ return LSM_ATTR_UNDEF;
+}
+
static inline void security_free_mnt_opts(void **mnt_opts)
{
}
@@ -987,6 +1004,13 @@ static inline int security_file_ioctl(struct file *file, unsigned int cmd,
return 0;
}
+static inline int security_file_ioctl_compat(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ return 0;
+}
+
static inline int security_mmap_file(struct file *file, unsigned long prot,
unsigned long flags)
{
@@ -1337,14 +1361,28 @@ static inline void security_d_instantiate(struct dentry *dentry,
struct inode *inode)
{ }
-static inline int security_getprocattr(struct task_struct *p, const char *lsm,
+static inline int security_getselfattr(unsigned int attr,
+ struct lsm_ctx __user *ctx,
+ size_t __user *size, u32 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int security_setselfattr(unsigned int attr,
+ struct lsm_ctx __user *ctx,
+ size_t size, u32 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int security_getprocattr(struct task_struct *p, int lsmid,
const char *name, char **value)
{
return -EINVAL;
}
-static inline int security_setprocattr(const char *lsm, char *name,
- void *value, size_t size)
+static inline int security_setprocattr(int lsmid, char *name, void *value,
+ size_t size)
{
return -EINVAL;
}
@@ -1395,6 +1433,12 @@ static inline int security_locked_down(enum lockdown_reason what)
{
return 0;
}
+static inline int lsm_fill_user_ctx(struct lsm_ctx __user *uctx,
+ size_t *uctx_len, void *val, size_t val_len,
+ u64 id, u64 flags)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_SECURITY */
#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index d6d6ffeeb9a2..b5f5ee8308d0 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -24,7 +24,7 @@
/*
* Flags to pass to kmem_cache_create().
- * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
+ * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
*/
/* DEBUG: Perform (expensive) checks on alloc/free */
#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
@@ -302,25 +302,15 @@ static inline unsigned int arch_slab_minalign(void)
* Kmalloc array related definitions
*/
-#ifdef CONFIG_SLAB
/*
- * SLAB and SLUB directly allocates requests fitting in to an order-1 page
+ * SLUB directly allocates requests fitting in to an order-1 page
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
-#ifndef KMALLOC_SHIFT_LOW
-#define KMALLOC_SHIFT_LOW 5
-#endif
-#endif
-
-#ifdef CONFIG_SLUB
-#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
+#define KMALLOC_SHIFT_MAX (MAX_PAGE_ORDER + PAGE_SHIFT)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
-#endif
/* Maximum allocatable size */
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
@@ -788,12 +778,4 @@ size_t kmalloc_size_roundup(size_t size);
void __init kmem_cache_init_late(void);
-#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
-int slab_prepare_cpu(unsigned int cpu);
-int slab_dead_cpu(unsigned int cpu);
-#else
-#define slab_prepare_cpu NULL
-#define slab_dead_cpu NULL
-#endif
-
#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
deleted file mode 100644
index a61e7d55d0d3..000000000000
--- a/include/linux/slab_def.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SLAB_DEF_H
-#define _LINUX_SLAB_DEF_H
-
-#include <linux/kfence.h>
-#include <linux/reciprocal_div.h>
-
-/*
- * Definitions unique to the original Linux SLAB allocator.
- */
-
-struct kmem_cache {
- struct array_cache __percpu *cpu_cache;
-
-/* 1) Cache tunables. Protected by slab_mutex */
- unsigned int batchcount;
- unsigned int limit;
- unsigned int shared;
-
- unsigned int size;
- struct reciprocal_value reciprocal_buffer_size;
-/* 2) touched by every alloc & free from the backend */
-
- slab_flags_t flags; /* constant flags */
- unsigned int num; /* # of objs per slab */
-
-/* 3) cache_grow/shrink */
- /* order of pgs per slab (2^n) */
- unsigned int gfporder;
-
- /* force GFP flags, e.g. GFP_DMA */
- gfp_t allocflags;
-
- size_t colour; /* cache colouring range */
- unsigned int colour_off; /* colour offset */
- unsigned int freelist_size;
-
- /* constructor func */
- void (*ctor)(void *obj);
-
-/* 4) cache creation/removal */
- const char *name;
- struct list_head list;
- int refcount;
- int object_size;
- int align;
-
-/* 5) statistics */
-#ifdef CONFIG_DEBUG_SLAB
- unsigned long num_active;
- unsigned long num_allocations;
- unsigned long high_mark;
- unsigned long grown;
- unsigned long reaped;
- unsigned long errors;
- unsigned long max_freeable;
- unsigned long node_allocs;
- unsigned long node_frees;
- unsigned long node_overflow;
- atomic_t allochit;
- atomic_t allocmiss;
- atomic_t freehit;
- atomic_t freemiss;
-
- /*
- * If debugging is enabled, then the allocator can add additional
- * fields and/or padding to every object. 'size' contains the total
- * object size including these internal fields, while 'obj_offset'
- * and 'object_size' contain the offset to the user object and its
- * size.
- */
- int obj_offset;
-#endif /* CONFIG_DEBUG_SLAB */
-
-#ifdef CONFIG_KASAN_GENERIC
- struct kasan_cache kasan_info;
-#endif
-
-#ifdef CONFIG_SLAB_FREELIST_RANDOM
- unsigned int *random_seq;
-#endif
-
-#ifdef CONFIG_HARDENED_USERCOPY
- unsigned int useroffset; /* Usercopy region offset */
- unsigned int usersize; /* Usercopy region size */
-#endif
-
- struct kmem_cache_node *node[MAX_NUMNODES];
-};
-
-static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
- void *x)
-{
- void *object = x - (x - slab->s_mem) % cache->size;
- void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
-
- if (unlikely(object > last_object))
- return last_object;
- else
- return object;
-}
-
-/*
- * We want to avoid an expensive divide : (offset / cache->size)
- * Using the fact that size is a constant for a particular cache,
- * we can replace (offset / cache->size) by
- * reciprocal_divide(offset, cache->reciprocal_buffer_size)
- */
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
-{
- u32 offset = (obj - slab->s_mem);
- return reciprocal_divide(offset, cache->reciprocal_buffer_size);
-}
-
-static inline int objs_per_slab(const struct kmem_cache *cache,
- const struct slab *slab)
-{
- if (is_kfence_address(slab_address(slab)))
- return 1;
- return cache->num;
-}
-
-#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
deleted file mode 100644
index deb90cf4bffb..000000000000
--- a/include/linux/slub_def.h
+++ /dev/null
@@ -1,204 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SLUB_DEF_H
-#define _LINUX_SLUB_DEF_H
-
-/*
- * SLUB : A Slab allocator without object queues.
- *
- * (C) 2007 SGI, Christoph Lameter
- */
-#include <linux/kfence.h>
-#include <linux/kobject.h>
-#include <linux/reciprocal_div.h>
-#include <linux/local_lock.h>
-
-enum stat_item {
- ALLOC_FASTPATH, /* Allocation from cpu slab */
- ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
- FREE_FASTPATH, /* Free to cpu slab */
- FREE_SLOWPATH, /* Freeing not to cpu slab */
- FREE_FROZEN, /* Freeing to frozen slab */
- FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
- FREE_REMOVE_PARTIAL, /* Freeing removes last object */
- ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
- ALLOC_SLAB, /* Cpu slab acquired from page allocator */
- ALLOC_REFILL, /* Refill cpu slab from slab freelist */
- ALLOC_NODE_MISMATCH, /* Switching cpu slab */
- FREE_SLAB, /* Slab freed to the page allocator */
- CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
- DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
- DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
- DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
- DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
- DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
- DEACTIVATE_BYPASS, /* Implicit deactivation */
- ORDER_FALLBACK, /* Number of times fallback was necessary */
- CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
- CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
- CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
- CPU_PARTIAL_FREE, /* Refill cpu partial on free */
- CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
- CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
- NR_SLUB_STAT_ITEMS
-};
-
-#ifndef CONFIG_SLUB_TINY
-/*
- * When changing the layout, make sure freelist and tid are still compatible
- * with this_cpu_cmpxchg_double() alignment requirements.
- */
-struct kmem_cache_cpu {
- union {
- struct {
- void **freelist; /* Pointer to next available object */
- unsigned long tid; /* Globally unique transaction id */
- };
- freelist_aba_t freelist_tid;
- };
- struct slab *slab; /* The slab from which we are allocating */
-#ifdef CONFIG_SLUB_CPU_PARTIAL
- struct slab *partial; /* Partially allocated frozen slabs */
-#endif
- local_lock_t lock; /* Protects the fields above */
-#ifdef CONFIG_SLUB_STATS
- unsigned stat[NR_SLUB_STAT_ITEMS];
-#endif
-};
-#endif /* CONFIG_SLUB_TINY */
-
-#ifdef CONFIG_SLUB_CPU_PARTIAL
-#define slub_percpu_partial(c) ((c)->partial)
-
-#define slub_set_percpu_partial(c, p) \
-({ \
- slub_percpu_partial(c) = (p)->next; \
-})
-
-#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
-#else
-#define slub_percpu_partial(c) NULL
-
-#define slub_set_percpu_partial(c, p)
-
-#define slub_percpu_partial_read_once(c) NULL
-#endif // CONFIG_SLUB_CPU_PARTIAL
-
-/*
- * Word size structure that can be atomically updated or read and that
- * contains both the order and the number of objects that a slab of the
- * given order would contain.
- */
-struct kmem_cache_order_objects {
- unsigned int x;
-};
-
-/*
- * Slab cache management.
- */
-struct kmem_cache {
-#ifndef CONFIG_SLUB_TINY
- struct kmem_cache_cpu __percpu *cpu_slab;
-#endif
- /* Used for retrieving partial slabs, etc. */
- slab_flags_t flags;
- unsigned long min_partial;
- unsigned int size; /* The size of an object including metadata */
- unsigned int object_size;/* The size of an object without metadata */
- struct reciprocal_value reciprocal_size;
- unsigned int offset; /* Free pointer offset */
-#ifdef CONFIG_SLUB_CPU_PARTIAL
- /* Number of per cpu partial objects to keep around */
- unsigned int cpu_partial;
- /* Number of per cpu partial slabs to keep around */
- unsigned int cpu_partial_slabs;
-#endif
- struct kmem_cache_order_objects oo;
-
- /* Allocation and freeing of slabs */
- struct kmem_cache_order_objects min;
- gfp_t allocflags; /* gfp flags to use on each alloc */
- int refcount; /* Refcount for slab cache destroy */
- void (*ctor)(void *);
- unsigned int inuse; /* Offset to metadata */
- unsigned int align; /* Alignment */
- unsigned int red_left_pad; /* Left redzone padding size */
- const char *name; /* Name (only for display!) */
- struct list_head list; /* List of slab caches */
-#ifdef CONFIG_SYSFS
- struct kobject kobj; /* For sysfs */
-#endif
-#ifdef CONFIG_SLAB_FREELIST_HARDENED
- unsigned long random;
-#endif
-
-#ifdef CONFIG_NUMA
- /*
- * Defragmentation by allocating from a remote node.
- */
- unsigned int remote_node_defrag_ratio;
-#endif
-
-#ifdef CONFIG_SLAB_FREELIST_RANDOM
- unsigned int *random_seq;
-#endif
-
-#ifdef CONFIG_KASAN_GENERIC
- struct kasan_cache kasan_info;
-#endif
-
-#ifdef CONFIG_HARDENED_USERCOPY
- unsigned int useroffset; /* Usercopy region offset */
- unsigned int usersize; /* Usercopy region size */
-#endif
-
- struct kmem_cache_node *node[MAX_NUMNODES];
-};
-
-#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
-#define SLAB_SUPPORTS_SYSFS
-void sysfs_slab_unlink(struct kmem_cache *);
-void sysfs_slab_release(struct kmem_cache *);
-#else
-static inline void sysfs_slab_unlink(struct kmem_cache *s)
-{
-}
-static inline void sysfs_slab_release(struct kmem_cache *s)
-{
-}
-#endif
-
-void *fixup_red_left(struct kmem_cache *s, void *p);
-
-static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
- void *x) {
- void *object = x - (x - slab_address(slab)) % cache->size;
- void *last_object = slab_address(slab) +
- (slab->objects - 1) * cache->size;
- void *result = (unlikely(object > last_object)) ? last_object : object;
-
- result = fixup_red_left(cache, result);
- return result;
-}
-
-/* Determine object index from a given position */
-static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
- void *addr, void *obj)
-{
- return reciprocal_divide(kasan_reset_tag(obj) - addr,
- cache->reciprocal_size);
-}
-
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
-{
- if (is_kfence_address(obj))
- return 0;
- return __obj_to_index(cache, slab_address(slab), obj);
-}
-
-static inline int objs_per_slab(const struct kmem_cache *cache,
- const struct slab *slab)
-{
- return slab->objects;
-}
-#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 6b0a7dc48a4b..f866d5c8ed32 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -233,6 +233,8 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
* limitations)
* @supports_op: check if an operation is supported by the controller
* @exec_op: execute a SPI memory operation
+ * not all driver provides supports_op(), so it can return -EOPNOTSUPP
+ * if the op is not supported by the driver/controller
* @get_name: get a custom name for the SPI mem device from the controller.
* This might be needed if the controller driver has been ported
* to use the SPI mem layer and a custom name is used to keep
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 255a0562aea5..471fe2ff9066 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -20,6 +20,9 @@
#include <uapi/linux/spi/spi.h>
+/* Max no. of CS supported per spi device */
+#define SPI_CS_CNT_MAX 4
+
struct dma_chan;
struct software_node;
struct ptp_system_timestamp;
@@ -132,7 +135,8 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* @max_speed_hz: Maximum clock rate to be used with this chip
* (on this board); may be changed by the device's driver.
* The spi_transfer.speed_hz can override this for each transfer.
- * @chip_select: Chipselect, distinguishing chips handled by @controller.
+ * @chip_select: Array of physical chipselect, spi->chipselect[i] gives
+ * the corresponding physical CS for logical CS i.
* @mode: The spi mode defines how data is clocked out and in.
* This may be changed by the device's driver.
* The "active low" default for chipselect mode can be overridden
@@ -157,8 +161,8 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* the device will bind to the named driver and only the named driver.
* Do not set directly, because core frees it; use driver_set_override() to
* set or clear it.
- * @cs_gpiod: GPIO descriptor of the chipselect line (optional, NULL when
- * not using a GPIO line)
+ * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines
+ * (optional, NULL when not using a GPIO line)
* @word_delay: delay to be inserted between consecutive
* words of a transfer
* @cs_setup: delay to be introduced by the controller after CS is asserted
@@ -167,6 +171,7 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* deasserted. If @cs_change_delay is used from @spi_transfer, then the
* two delays will be added up.
* @pcpu_statistics: statistics for the spi_device
+ * @cs_index_mask: Bit mask of the active chipselect(s) in the chipselect array
*
* A @spi_device is used to interchange data between an SPI slave
* (usually a discrete chip) and CPU memory.
@@ -182,7 +187,7 @@ struct spi_device {
struct spi_controller *controller;
struct spi_controller *master; /* Compatibility layer */
u32 max_speed_hz;
- u8 chip_select;
+ u8 chip_select[SPI_CS_CNT_MAX];
u8 bits_per_word;
bool rt;
#define SPI_NO_TX BIT(31) /* No transmit wire */
@@ -213,7 +218,7 @@ struct spi_device {
void *controller_data;
char modalias[SPI_NAME_SIZE];
const char *driver_override;
- struct gpio_desc *cs_gpiod; /* Chip select GPIO descriptor */
+ struct gpio_desc *cs_gpiod[SPI_CS_CNT_MAX]; /* Chip select gpio desc */
struct spi_delay word_delay; /* Inter-word delay */
/* CS delays */
struct spi_delay cs_setup;
@@ -223,6 +228,13 @@ struct spi_device {
/* The statistics */
struct spi_statistics __percpu *pcpu_statistics;
+ /* Bit mask of the chipselect(s) that the driver need to use from
+ * the chipselect array.When the controller is capable to handle
+ * multiple chip selects & memories are connected in parallel
+ * then more than one bit need to be set in cs_index_mask.
+ */
+ u32 cs_index_mask : SPI_CS_CNT_MAX;
+
/*
* Likely need more hooks for more protocol options affecting how
* the controller talks to each chip, like:
@@ -279,22 +291,33 @@ static inline void *spi_get_drvdata(const struct spi_device *spi)
static inline u8 spi_get_chipselect(const struct spi_device *spi, u8 idx)
{
- return spi->chip_select;
+ return spi->chip_select[idx];
}
static inline void spi_set_chipselect(struct spi_device *spi, u8 idx, u8 chipselect)
{
- spi->chip_select = chipselect;
+ spi->chip_select[idx] = chipselect;
}
static inline struct gpio_desc *spi_get_csgpiod(const struct spi_device *spi, u8 idx)
{
- return spi->cs_gpiod;
+ return spi->cs_gpiod[idx];
}
static inline void spi_set_csgpiod(struct spi_device *spi, u8 idx, struct gpio_desc *csgpiod)
{
- spi->cs_gpiod = csgpiod;
+ spi->cs_gpiod[idx] = csgpiod;
+}
+
+static inline bool spi_is_csgpiod(struct spi_device *spi)
+{
+ u8 idx;
+
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ if (spi_get_csgpiod(spi, idx))
+ return true;
+ }
+ return false;
}
/**
@@ -399,6 +422,8 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @bus_lock_spinlock: spinlock for SPI bus locking
* @bus_lock_mutex: mutex for exclusion of multiple callers
* @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
+ * @multi_cs_cap: indicates that the SPI Controller can assert/de-assert
+ * more than one chip select at once.
* @setup: updates the device mode and clocking records used by a
* device's SPI controller; protocol code may call this. This
* must fail if an unrecognized or unsupported mode is requested.
@@ -461,10 +486,13 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* - return 1 if the transfer is still in progress. When
* the driver is finished with this transfer it must
* call spi_finalize_current_transfer() so the subsystem
- * can issue the next transfer. Note: transfer_one and
- * transfer_one_message are mutually exclusive; when both
- * are set, the generic subsystem does not call your
- * transfer_one callback.
+ * can issue the next transfer. If the transfer fails, the
+ * driver must set the flag SPI_TRANS_FAIL_IO to
+ * spi_transfer->error first, before calling
+ * spi_finalize_current_transfer().
+ * Note: transfer_one and transfer_one_message are mutually
+ * exclusive; when both are set, the generic subsystem does
+ * not call your transfer_one callback.
* @handle_err: the subsystem calls the driver to handle an error that occurs
* in the generic implementation of transfer_one_message().
* @mem_ops: optimized/dedicated operations for interactions with SPI memory.
@@ -567,6 +595,11 @@ struct spi_controller {
#define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */
#define SPI_CONTROLLER_GPIO_SS BIT(5) /* GPIO CS must select slave */
#define SPI_CONTROLLER_SUSPENDED BIT(6) /* Currently suspended */
+ /*
+ * The spi-controller has multi chip select capability and can
+ * assert/de-assert more than one chip select at once.
+ */
+#define SPI_CONTROLLER_MULTI_CS BIT(7)
/* Flag indicating if the allocation of this struct is devres-managed */
bool devm_allocated;
@@ -677,7 +710,8 @@ struct spi_controller {
bool rt;
bool auto_runtime_pm;
bool cur_msg_mapped;
- char last_cs;
+ char last_cs[SPI_CS_CNT_MAX];
+ char last_cs_index_mask;
bool last_cs_mode_high;
bool fallback;
struct completion xfer_completion;
@@ -1040,6 +1074,7 @@ struct spi_transfer {
unsigned len;
#define SPI_TRANS_FAIL_NO_START BIT(0)
+#define SPI_TRANS_FAIL_IO BIT(1)
u16 error;
dma_addr_t tx_dma;
@@ -1638,8 +1673,6 @@ spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
/* Compatibility layer */
#define spi_master spi_controller
-#define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX
-
#define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr)
#define spi_master_set_devdata(_ctlr, _data) \
spi_controller_set_devdata(_ctlr, _data)
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 0c71f06454d9..eaac8b0da25b 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -538,6 +538,8 @@ DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
raw_spin_lock(_T->lock),
raw_spin_unlock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
+
DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
raw_spin_unlock(_T->lock))
@@ -546,23 +548,62 @@ DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
raw_spin_lock_irq(_T->lock),
raw_spin_unlock_irq(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
+
DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
raw_spin_lock_irqsave(_T->lock, _T->flags),
raw_spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
+ raw_spin_trylock_irqsave(_T->lock, _T->flags))
+
DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
spin_lock(_T->lock),
spin_unlock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
+
DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
spin_lock_irq(_T->lock),
spin_unlock_irq(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
+ spin_trylock_irq(_T->lock))
+
DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
spin_lock_irqsave(_T->lock, _T->flags),
spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
+ spin_trylock_irqsave(_T->lock, _T->flags))
+
+DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
+ read_lock(_T->lock),
+ read_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t,
+ read_lock_irq(_T->lock),
+ read_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t,
+ read_lock_irqsave(_T->lock, _T->flags),
+ read_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+DEFINE_LOCK_GUARD_1(write_lock, rwlock_t,
+ write_lock(_T->lock),
+ write_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t,
+ write_lock_irq(_T->lock),
+ write_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t,
+ write_lock_irqsave(_T->lock, _T->flags),
+ write_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
#undef __LINUX_INSIDE_SPINLOCK_H
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 6c461573434d..9dec4861d09f 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -68,28 +68,37 @@ typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
typedef int (splice_direct_actor)(struct pipe_inode_info *,
struct splice_desc *);
-extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
- loff_t *, size_t, unsigned int,
- splice_actor *);
-extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
- struct splice_desc *, splice_actor *);
-extern ssize_t splice_to_pipe(struct pipe_inode_info *,
- struct splice_pipe_desc *);
-extern ssize_t add_to_pipe(struct pipe_inode_info *,
- struct pipe_buffer *);
-long vfs_splice_read(struct file *in, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
- unsigned int flags);
-extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
- splice_direct_actor *);
-extern long do_splice(struct file *in, loff_t *off_in,
- struct file *out, loff_t *off_out,
- size_t len, unsigned int flags);
+ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags,
+ splice_actor *actor);
+ssize_t __splice_from_pipe(struct pipe_inode_info *pipe,
+ struct splice_desc *sd, splice_actor *actor);
+ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+ struct splice_pipe_desc *spd);
+ssize_t add_to_pipe(struct pipe_inode_info *pipe, struct pipe_buffer *buf);
+ssize_t vfs_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
+ssize_t splice_direct_to_actor(struct file *file, struct splice_desc *sd,
+ splice_direct_actor *actor);
+ssize_t do_splice(struct file *in, loff_t *off_in, struct file *out,
+ loff_t *off_out, size_t len, unsigned int flags);
+ssize_t do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
+ loff_t *opos, size_t len, unsigned int flags);
+ssize_t splice_file_range(struct file *in, loff_t *ppos, struct file *out,
+ loff_t *opos, size_t len);
-extern long do_tee(struct file *in, struct file *out, size_t len,
- unsigned int flags);
-extern ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out,
- loff_t *ppos, size_t len, unsigned int flags);
+static inline long splice_copy_file_range(struct file *in, loff_t pos_in,
+ struct file *out, loff_t pos_out,
+ size_t len)
+{
+ return splice_file_range(in, &pos_in, out, &pos_out, len);
+}
+
+ssize_t do_tee(struct file *in, struct file *out, size_t len,
+ unsigned int flags);
+ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags);
/*
* for dynamic pipe sizing
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index e58306783d8e..adcbb8f23600 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -11,8 +11,6 @@
* SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free
* stack traces often repeat, using stack depot allows to save about 100x space.
*
- * Stack traces are never removed from the stack depot.
- *
* Author: Alexander Potapenko <glider@google.com>
* Copyright (C) 2016 Google, Inc.
*
@@ -32,6 +30,18 @@ typedef u32 depot_stack_handle_t;
*/
#define STACK_DEPOT_EXTRA_BITS 5
+typedef u32 depot_flags_t;
+
+/*
+ * Flags that can be passed to stack_depot_save_flags(); see the comment next
+ * to its declaration for more details.
+ */
+#define STACK_DEPOT_FLAG_CAN_ALLOC ((depot_flags_t)0x0001)
+#define STACK_DEPOT_FLAG_GET ((depot_flags_t)0x0002)
+
+#define STACK_DEPOT_FLAGS_NUM 2
+#define STACK_DEPOT_FLAGS_MASK ((depot_flags_t)((1 << STACK_DEPOT_FLAGS_NUM) - 1))
+
/*
* Using stack depot requires its initialization, which can be done in 3 ways:
*
@@ -69,31 +79,39 @@ static inline int stack_depot_early_init(void) { return 0; }
#endif
/**
- * __stack_depot_save - Save a stack trace to stack depot
+ * stack_depot_save_flags - Save a stack trace to stack depot
*
* @entries: Pointer to the stack trace
* @nr_entries: Number of frames in the stack
* @alloc_flags: Allocation GFP flags
- * @can_alloc: Allocate stack pools (increased chance of failure if false)
+ * @depot_flags: Stack depot flags
+ *
+ * Saves a stack trace from @entries array of size @nr_entries.
+ *
+ * If STACK_DEPOT_FLAG_CAN_ALLOC is set in @depot_flags, stack depot can
+ * replenish the stack pools in case no space is left (allocates using GFP
+ * flags of @alloc_flags). Otherwise, stack depot avoids any allocations and
+ * fails if no space is left to store the stack trace.
*
- * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
- * %true, stack depot can replenish the stack pools in case no space is left
- * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
- * any allocations and fails if no space is left to store the stack trace.
+ * If STACK_DEPOT_FLAG_GET is set in @depot_flags, stack depot will increment
+ * the refcount on the saved stack trace if it already exists in stack depot.
+ * Users of this flag must also call stack_depot_put() when keeping the stack
+ * trace is no longer required to avoid overflowing the refcount.
*
* If the provided stack trace comes from the interrupt context, only the part
* up to the interrupt entry is saved.
*
- * Context: Any context, but setting @can_alloc to %false is required if
+ * Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if
* alloc_pages() cannot be used from the current context. Currently
* this is the case for contexts where neither %GFP_ATOMIC nor
* %GFP_NOWAIT can be used (NMI, raw_spin_lock).
*
* Return: Handle of the stack struct stored in depot, 0 on failure
*/
-depot_stack_handle_t __stack_depot_save(unsigned long *entries,
- unsigned int nr_entries,
- gfp_t gfp_flags, bool can_alloc);
+depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t gfp_flags,
+ depot_flags_t depot_flags);
/**
* stack_depot_save - Save a stack trace to stack depot
@@ -102,8 +120,11 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
* @nr_entries: Number of frames in the stack
* @alloc_flags: Allocation GFP flags
*
- * Context: Contexts where allocations via alloc_pages() are allowed.
- * See __stack_depot_save() for more details.
+ * Does not increment the refcount on the saved stack trace; see
+ * stack_depot_save_flags() for more details.
+ *
+ * Context: Contexts where allocations via alloc_pages() are allowed;
+ * see stack_depot_save_flags() for more details.
*
* Return: Handle of the stack trace stored in depot, 0 on failure
*/
@@ -142,6 +163,18 @@ int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces);
/**
+ * stack_depot_put - Drop a reference to a stack trace from stack depot
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ *
+ * The stack trace is evicted from stack depot once all references to it have
+ * been dropped (once the number of stack_depot_evict() calls matches the
+ * number of stack_depot_save_flags() calls with STACK_DEPOT_FLAG_GET set for
+ * this stack trace).
+ */
+void stack_depot_put(depot_stack_handle_t handle);
+
+/**
* stack_depot_set_extra_bits - Set extra bits in a stack depot handle
*
* @handle: Stack depot handle returned from stack_depot_save()
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index db30a159f9d5..f22bf915dcf6 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -20,7 +20,8 @@
#ifdef CONFIG_SUNRPC_BACKCHANNEL
struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid);
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied);
-void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task);
+void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task,
+ const struct rpc_timeout *to);
void xprt_free_bc_request(struct rpc_rqst *req);
int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index e9d4377d03c6..5e9d1469c6fa 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -252,7 +252,6 @@ void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *,
const char *rpc_proc_name(const struct rpc_task *task);
-void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *, struct rpc_xprt *);
bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 8ada7dc802d3..2d61987b3545 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -38,6 +38,17 @@ struct rpc_wait {
};
/*
+ * This describes a timeout strategy
+ */
+struct rpc_timeout {
+ unsigned long to_initval, /* initial timeout */
+ to_maxval, /* max timeout */
+ to_increment; /* if !exponential */
+ unsigned int to_retries; /* max # of retries */
+ unsigned char to_exponential;
+};
+
+/*
* This is the RPC task struct
*/
struct rpc_task {
@@ -205,7 +216,8 @@ struct rpc_wait_queue {
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
-struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
+struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
+ struct rpc_timeout *timeout);
void rpc_put_task(struct rpc_task *);
void rpc_put_task_async(struct rpc_task *);
bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status);
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index b10f987509cc..67cf1c9efd80 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -69,7 +69,6 @@ struct svc_serv {
struct svc_program * sv_program; /* RPC program */
struct svc_stat * sv_stats; /* RPC statistics */
spinlock_t sv_lock;
- struct kref sv_refcnt;
unsigned int sv_nrthreads; /* # of server threads */
unsigned int sv_maxconn; /* max connections allowed or
* '0' causing max to be based
@@ -97,31 +96,13 @@ struct svc_serv {
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
};
-/**
- * svc_get() - increment reference count on a SUNRPC serv
- * @serv: the svc_serv to have count incremented
- *
- * Returns: the svc_serv that was passed in.
- */
-static inline struct svc_serv *svc_get(struct svc_serv *serv)
-{
- kref_get(&serv->sv_refcnt);
- return serv;
-}
-
-void svc_destroy(struct kref *);
+/* This is used by pool_stats to find and lock an svc */
+struct svc_info {
+ struct svc_serv *serv;
+ struct mutex *mutex;
+};
-/**
- * svc_put - decrement reference count on a SUNRPC serv
- * @serv: the svc_serv to have count decremented
- *
- * When the reference count reaches zero, svc_destroy()
- * is called to clean up and free the serv.
- */
-static inline void svc_put(struct svc_serv *serv)
-{
- kref_put(&serv->sv_refcnt, svc_destroy);
-}
+void svc_destroy(struct svc_serv **svcp);
/*
* Maximum payload size supported by a kernel RPC server.
@@ -250,6 +231,8 @@ struct svc_rqst {
struct net *rq_bc_net; /* pointer to backchannel's
* net namespace
*/
+ unsigned long bc_to_initval;
+ unsigned int bc_to_retries;
void ** rq_lease_breaker; /* The v4 client breaking a lease */
unsigned int rq_status_counter; /* RPC processing counter */
};
@@ -260,8 +243,6 @@ enum {
RQ_LOCAL, /* local request */
RQ_USEDEFERRAL, /* use deferral */
RQ_DROPME, /* drop current reply */
- RQ_SPLICE_OK, /* turned off in gss privacy to prevent
- * encrypting page cache pages */
RQ_VICTIM, /* Have agreed to shut down */
RQ_DATA, /* request has data */
};
@@ -433,7 +414,7 @@ void svc_exit_thread(struct svc_rqst *);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
int (*threadfn)(void *data));
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
-int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
+int svc_pool_stats_open(struct svc_info *si, struct file *file);
void svc_process(struct svc_rqst *rqstp);
void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp);
int svc_register(const struct svc_serv *, struct net *, const int,
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index a5ee0af2a310..e7595ae62fe2 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -65,6 +65,7 @@ extern unsigned int svcrdma_ord;
extern unsigned int svcrdma_max_requests;
extern unsigned int svcrdma_max_bc_requests;
extern unsigned int svcrdma_max_req_size;
+extern struct workqueue_struct *svcrdma_wq;
extern struct percpu_counter svcrdma_stat_read;
extern struct percpu_counter svcrdma_stat_recv;
@@ -97,6 +98,7 @@ struct svcxprt_rdma {
u32 sc_pending_recvs;
u32 sc_recv_batch;
struct list_head sc_rq_dto_q;
+ struct list_head sc_read_complete_q;
spinlock_t sc_rq_dto_lock;
struct ib_qp *sc_qp;
struct ib_cq *sc_rq_cq;
@@ -115,6 +117,13 @@ struct svcxprt_rdma {
/* sc_flags */
#define RDMAXPRT_CONN_PENDING 3
+static inline struct svcxprt_rdma *svc_rdma_rqst_rdma(struct svc_rqst *rqstp)
+{
+ struct svc_xprt *xprt = rqstp->rq_xprt;
+
+ return container_of(xprt, struct svcxprt_rdma, sc_xprt);
+}
+
/*
* Default connection parameters
*/
@@ -126,6 +135,43 @@ enum {
#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
+/**
+ * svc_rdma_send_cid_init - Initialize a Receive Queue completion ID
+ * @rdma: controlling transport
+ * @cid: completion ID to initialize
+ */
+static inline void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
+ struct rpc_rdma_cid *cid)
+{
+ cid->ci_queue_id = rdma->sc_rq_cq->res.id;
+ cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
+}
+
+/**
+ * svc_rdma_send_cid_init - Initialize a Send Queue completion ID
+ * @rdma: controlling transport
+ * @cid: completion ID to initialize
+ */
+static inline void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
+ struct rpc_rdma_cid *cid)
+{
+ cid->ci_queue_id = rdma->sc_sq_cq->res.id;
+ cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
+}
+
+/*
+ * A chunk context tracks all I/O for moving one Read or Write
+ * chunk. This is a set of rdma_rw's that handle data movement
+ * for all segments of one chunk.
+ */
+struct svc_rdma_chunk_ctxt {
+ struct rpc_rdma_cid cc_cid;
+ struct ib_cqe cc_cqe;
+ struct list_head cc_rwctxts;
+ ktime_t cc_posttime;
+ int cc_sqecount;
+};
+
struct svc_rdma_recv_ctxt {
struct llist_node rc_node;
struct list_head rc_list;
@@ -136,22 +182,33 @@ struct svc_rdma_recv_ctxt {
void *rc_recv_buf;
struct xdr_stream rc_stream;
u32 rc_byte_len;
- unsigned int rc_page_count;
u32 rc_inv_rkey;
__be32 rc_msgtype;
+ /* State for pulling a Read chunk */
+ unsigned int rc_pageoff;
+ unsigned int rc_curpage;
+ unsigned int rc_readbytes;
+ struct xdr_buf rc_saved_arg;
+ struct svc_rdma_chunk_ctxt rc_cc;
+
struct svc_rdma_pcl rc_call_pcl;
struct svc_rdma_pcl rc_read_pcl;
struct svc_rdma_chunk *rc_cur_result_payload;
struct svc_rdma_pcl rc_write_pcl;
struct svc_rdma_pcl rc_reply_pcl;
+
+ unsigned int rc_page_count;
+ struct page *rc_pages[RPCSVC_MAXPAGES];
};
struct svc_rdma_send_ctxt {
struct llist_node sc_node;
struct rpc_rdma_cid sc_cid;
+ struct work_struct sc_work;
+ struct svcxprt_rdma *sc_rdma;
struct ib_send_wr sc_send_wr;
struct ib_cqe sc_cqe;
struct xdr_buf sc_hdrbuf;
@@ -180,6 +237,11 @@ extern int svc_rdma_recvfrom(struct svc_rqst *);
/* svc_rdma_rw.c */
extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma);
+extern void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
+ struct svc_rdma_chunk_ctxt *cc);
+extern void svc_rdma_cc_release(struct svcxprt_rdma *rdma,
+ struct svc_rdma_chunk_ctxt *cc,
+ enum dma_data_direction dir);
extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
const struct svc_rdma_chunk *chunk,
const struct xdr_buf *xdr);
@@ -200,7 +262,8 @@ extern int svc_rdma_send(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *ctxt);
extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *sctxt,
- const struct svc_rdma_recv_ctxt *rctxt,
+ const struct svc_rdma_pcl *write_pcl,
+ const struct svc_rdma_pcl *reply_pcl,
const struct xdr_buf *xdr);
extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *sctxt,
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 6f90203edbf8..61c455f1e1f5 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -131,8 +131,11 @@ enum svc_auth_status {
* This call releases a domain.
*
* set_client()
- * Givens a pending request (struct svc_rqst), finds and assigns
+ * Given a pending request (struct svc_rqst), finds and assigns
* an appropriate 'auth_domain' as the client.
+ *
+ * pseudoflavor()
+ * Returns RPC_AUTH pseudoflavor in use by @rqstp.
*/
struct auth_ops {
char * name;
@@ -143,11 +146,13 @@ struct auth_ops {
int (*release)(struct svc_rqst *rqstp);
void (*domain_release)(struct auth_domain *dom);
enum svc_auth_status (*set_client)(struct svc_rqst *rqstp);
+ rpc_authflavor_t (*pseudoflavor)(struct svc_rqst *rqstp);
};
struct svc_xprt;
extern enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp);
+extern rpc_authflavor_t svc_auth_flavor(struct svc_rqst *rqstp);
extern int svc_authorise(struct svc_rqst *rqstp);
extern enum svc_auth_status svc_set_client(struct svc_rqst *rqstp);
extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index f85d3a0daca2..464f6a9492ab 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -30,17 +30,6 @@
#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
-/*
- * This describes a timeout strategy
- */
-struct rpc_timeout {
- unsigned long to_initval, /* initial timeout */
- to_maxval, /* max timeout */
- to_increment; /* if !exponential */
- unsigned int to_retries; /* max # of retries */
- unsigned char to_exponential;
-};
-
enum rpc_display_format_t {
RPC_DISPLAY_ADDR = 0,
RPC_DISPLAY_PORT,
diff --git a/include/linux/surface_aggregator/serial_hub.h b/include/linux/surface_aggregator/serial_hub.h
index 5c4ae1a26183..d8dbef6b7fc2 100644
--- a/include/linux/surface_aggregator/serial_hub.h
+++ b/include/linux/surface_aggregator/serial_hub.h
@@ -12,7 +12,7 @@
#ifndef _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
#define _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
-#include <linux/crc-ccitt.h>
+#include <linux/crc-itu-t.h>
#include <linux/kref.h>
#include <linux/ktime.h>
#include <linux/list.h>
@@ -188,7 +188,7 @@ static_assert(sizeof(struct ssh_command) == 8);
*/
static inline u16 ssh_crc(const u8 *buf, size_t len)
{
- return crc_ccitt_false(0xffff, buf, len);
+ return crc_itu_t(0xffff, buf, len);
}
/*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index f6dd6575b905..4db00ddad261 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -397,9 +397,6 @@ void folio_deactivate(struct folio *folio);
void folio_mark_lazyfree(struct folio *folio);
extern void swap_setup(void);
-extern void lru_cache_add_inactive_or_unevictable(struct page *page,
- struct vm_area_struct *vma);
-
/* linux/mm/vmscan.c */
extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
@@ -490,13 +487,12 @@ extern sector_t swapdev_block(int, pgoff_t);
extern int __swap_count(swp_entry_t entry);
extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
-extern struct swap_info_struct *page_swap_info(struct page *);
-extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
+struct swap_info_struct *swp_swap_info(swp_entry_t entry);
struct backing_dev_info;
extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
extern void exit_swap_address_space(unsigned int type);
extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
-sector_t swap_page_sector(struct page *page);
+sector_t swap_folio_sector(struct folio *folio);
static inline void put_swap_device(struct swap_info_struct *si)
{
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index fd9d12de7e92..5c0dbef55792 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -71,9 +71,12 @@ struct clone_args;
struct open_how;
struct mount_attr;
struct landlock_ruleset_attr;
+struct lsm_ctx;
enum landlock_rule_type;
struct cachestat_range;
struct cachestat;
+struct statmount;
+struct mnt_id_req;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -407,6 +410,12 @@ asmlinkage long sys_statfs64(const char __user *path, size_t sz,
asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf);
asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz,
struct statfs64 __user *buf);
+asmlinkage long sys_statmount(const struct mnt_id_req __user *req,
+ struct statmount __user *buf, size_t bufsize,
+ unsigned int flags);
+asmlinkage long sys_listmount(const struct mnt_id_req __user *req,
+ u64 __user *buf, size_t bufsize,
+ unsigned int flags);
asmlinkage long sys_truncate(const char __user *path, long length);
asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
#if BITS_PER_LONG == 32
@@ -949,6 +958,11 @@ asmlinkage long sys_cachestat(unsigned int fd,
struct cachestat_range __user *cstat_range,
struct cachestat __user *cstat, unsigned int flags);
asmlinkage long sys_map_shadow_stack(unsigned long addr, unsigned long size, unsigned int flags);
+asmlinkage long sys_lsm_get_self_attr(unsigned int attr, struct lsm_ctx *ctx,
+ size_t *size, __u32 flags);
+asmlinkage long sys_lsm_set_self_attr(unsigned int attr, struct lsm_ctx *ctx,
+ size_t size, __u32 flags);
+asmlinkage long sys_lsm_list_modules(u64 *ids, size_t *size, u32 flags);
/*
* Architecture-specific system calls
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index cee814d5d1ac..bf84595a4e86 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -51,18 +51,23 @@ enum thermal_notify_event {
THERMAL_DEVICE_POWER_CAPABILITY_CHANGED, /* power capability changed */
THERMAL_TABLE_CHANGED, /* Thermal table(s) changed */
THERMAL_EVENT_KEEP_ALIVE, /* Request for user space handler to respond */
+ THERMAL_TZ_BIND_CDEV, /* Cooling dev is bind to the thermal zone */
+ THERMAL_TZ_UNBIND_CDEV, /* Cooling dev is unbind from the thermal zone */
+ THERMAL_INSTANCE_WEIGHT_CHANGED, /* Thermal instance weight changed */
};
/**
* struct thermal_trip - representation of a point in temperature domain
* @temperature: temperature value in miliCelsius
* @hysteresis: relative hysteresis in miliCelsius
+ * @threshold: trip crossing notification threshold miliCelsius
* @type: trip point type
* @priv: pointer to driver data associated with this trip
*/
struct thermal_trip {
int temperature;
int hysteresis;
+ int threshold;
enum thermal_trip_type type;
void *priv;
};
@@ -115,6 +120,7 @@ struct thermal_cooling_device {
* @id: unique id number for each thermal zone
* @type: the thermal zone device type
* @device: &struct device for this thermal zone
+ * @removal: removal completion
* @trip_temp_attrs: attributes for trip points for sysfs: trip temperature
* @trip_type_attrs: attributes for trip points for sysfs: trip type
* @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
@@ -149,11 +155,13 @@ struct thermal_cooling_device {
* @node: node in thermal_tz_list (in thermal_core.c)
* @poll_queue: delayed work for polling
* @notify_event: Last notification event
+ * @suspended: thermal zone suspend indicator
*/
struct thermal_zone_device {
int id;
char type[THERMAL_NAME_LENGTH];
struct device device;
+ struct completion removal;
struct attribute_group trips_attribute_group;
struct thermal_attr *trip_temp_attrs;
struct thermal_attr *trip_type_attrs;
@@ -181,6 +189,7 @@ struct thermal_zone_device {
struct list_head node;
struct delayed_work poll_queue;
enum thermal_notify_event notify_event;
+ bool suspended;
};
/**
@@ -193,6 +202,8 @@ struct thermal_zone_device {
* thermal zone.
* @throttle: callback called for every trip point even if temperature is
* below the trip point temperature
+ * @update_tz: callback called when thermal zone internals have changed, e.g.
+ * thermal cooling instance was added/removed
* @governor_list: node in thermal_governor_list (in thermal_core.c)
*/
struct thermal_governor {
@@ -201,6 +212,8 @@ struct thermal_governor {
void (*unbind_from_tz)(struct thermal_zone_device *tz);
int (*throttle)(struct thermal_zone_device *tz,
const struct thermal_trip *trip);
+ void (*update_tz)(struct thermal_zone_device *tz,
+ enum thermal_notify_event reason);
struct list_head governor_list;
};
@@ -280,10 +293,6 @@ int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
struct thermal_trip *trip);
int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
struct thermal_trip *trip);
-
-int thermal_zone_set_trip(struct thermal_zone_device *tz, int trip_id,
- const struct thermal_trip *trip);
-
int for_each_thermal_trip(struct thermal_zone_device *tz,
int (*cb)(struct thermal_trip *, void *),
void *data);
@@ -291,16 +300,11 @@ int thermal_zone_for_each_trip(struct thermal_zone_device *tz,
int (*cb)(struct thermal_trip *, void *),
void *data);
int thermal_zone_get_num_trips(struct thermal_zone_device *tz);
+void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
+ struct thermal_trip *trip, int temp);
int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp);
-#ifdef CONFIG_THERMAL_ACPI
-int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp);
-int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp);
-int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp);
-int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
-#endif
-
#ifdef CONFIG_THERMAL
struct thermal_zone_device *thermal_zone_device_register_with_trips(
const char *type,
diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
index ba20b62f13e1..f85ec5613721 100644
--- a/include/linux/uidgid.h
+++ b/include/linux/uidgid.h
@@ -17,6 +17,7 @@
struct user_namespace;
extern struct user_namespace init_user_ns;
+struct uid_gid_map;
#define KUIDT_INIT(value) (kuid_t){ value }
#define KGIDT_INIT(value) (kgid_t){ value }
@@ -129,6 +130,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
return from_kgid(ns, gid) != (gid_t) -1;
}
+u32 map_id_down(struct uid_gid_map *map, u32 id);
+u32 map_id_up(struct uid_gid_map *map, u32 id);
+
#else
static inline kuid_t make_kuid(struct user_namespace *from, uid_t uid)
@@ -177,6 +181,15 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
return gid_valid(gid);
}
+static inline u32 map_id_down(struct uid_gid_map *map, u32 id)
+{
+ return id;
+}
+
+static inline u32 map_id_up(struct uid_gid_map *map, u32 id)
+{
+ return id;
+}
#endif /* CONFIG_USER_NS */
#endif /* _LINUX_UIDGID_H */
diff --git a/include/linux/uio.h b/include/linux/uio.h
index b6214cbf2a43..bea9c89922d9 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -347,8 +347,6 @@ ssize_t import_iovec(int type, const struct iovec __user *uvec,
ssize_t __import_iovec(int type, const struct iovec __user *uvec,
unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
struct iov_iter *i, bool compat);
-int import_single_range(int type, void __user *buf, size_t len,
- struct iovec *iov, struct iov_iter *i);
int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i);
static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index f2dc19f40d05..e4056547fbe6 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -93,6 +93,17 @@ extern int mwriteprotect_range(struct mm_struct *dst_mm,
extern long uffd_wp_range(struct vm_area_struct *vma,
unsigned long start, unsigned long len, bool enable_wp);
+/* move_pages */
+void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2);
+void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2);
+ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
+ unsigned long dst_start, unsigned long src_start,
+ unsigned long len, __u64 flags);
+int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
+ struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma,
+ unsigned long dst_addr, unsigned long src_addr);
+
/* mm helpers */
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
struct vm_userfaultfd_ctx vm_ctx)
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 8abfa1240040..747943bc8cc2 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -41,9 +41,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGSTEAL_KSWAPD,
PGSTEAL_DIRECT,
PGSTEAL_KHUGEPAGED,
- PGDEMOTE_KSWAPD,
- PGDEMOTE_DIRECT,
- PGDEMOTE_KHUGEPAGED,
PGSCAN_KSWAPD,
PGSCAN_DIRECT,
PGSCAN_KHUGEPAGED,
@@ -145,6 +142,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_ZSWAP
ZSWPIN,
ZSWPOUT,
+ ZSWPWB,
#endif
#ifdef CONFIG_X86
DIRECT_MAP_LEVEL2_SPLIT,
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index fed855bae6d8..343906a98d6e 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -556,19 +556,25 @@ static inline void mod_lruvec_state(struct lruvec *lruvec,
local_irq_restore(flags);
}
-void __mod_lruvec_page_state(struct page *page,
+void __lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val);
-static inline void mod_lruvec_page_state(struct page *page,
+static inline void lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val)
{
unsigned long flags;
local_irq_save(flags);
- __mod_lruvec_page_state(page, idx, val);
+ __lruvec_stat_mod_folio(folio, idx, val);
local_irq_restore(flags);
}
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ lruvec_stat_mod_folio(page_folio(page), idx, val);
+}
+
#else
static inline void __mod_lruvec_state(struct lruvec *lruvec,
@@ -583,37 +589,25 @@ static inline void mod_lruvec_state(struct lruvec *lruvec,
mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
}
-static inline void __mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
-{
- __mod_node_page_state(page_pgdat(page), idx, val);
-}
-
-static inline void mod_lruvec_page_state(struct page *page,
+static inline void __lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val)
{
- mod_node_page_state(page_pgdat(page), idx, val);
+ __mod_node_page_state(folio_pgdat(folio), idx, val);
}
-#endif /* CONFIG_MEMCG */
-
-static inline void __inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+static inline void lruvec_stat_mod_folio(struct folio *folio,
+ enum node_stat_item idx, int val)
{
- __mod_lruvec_page_state(page, idx, 1);
+ mod_node_page_state(folio_pgdat(folio), idx, val);
}
-static inline void __dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
{
- __mod_lruvec_page_state(page, idx, -1);
+ mod_node_page_state(page_pgdat(page), idx, val);
}
-static inline void __lruvec_stat_mod_folio(struct folio *folio,
- enum node_stat_item idx, int val)
-{
- __mod_lruvec_page_state(&folio->page, idx, val);
-}
+#endif /* CONFIG_MEMCG */
static inline void __lruvec_stat_add_folio(struct folio *folio,
enum node_stat_item idx)
@@ -627,24 +621,6 @@ static inline void __lruvec_stat_sub_folio(struct folio *folio,
__lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
}
-static inline void inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- mod_lruvec_page_state(page, idx, 1);
-}
-
-static inline void dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- mod_lruvec_page_state(page, idx, -1);
-}
-
-static inline void lruvec_stat_mod_folio(struct folio *folio,
- enum node_stat_item idx, int val)
-{
- mod_lruvec_page_state(&folio->page, idx, val);
-}
-
static inline void lruvec_stat_add_folio(struct folio *folio,
enum node_stat_item idx)
{
diff --git a/include/linux/wmi.h b/include/linux/wmi.h
index 763bd382cf2d..686291b87852 100644
--- a/include/linux/wmi.h
+++ b/include/linux/wmi.h
@@ -11,7 +11,6 @@
#include <linux/device.h>
#include <linux/acpi.h>
#include <linux/mod_devicetable.h>
-#include <uapi/linux/wmi.h>
/**
* struct wmi_device - WMI device structure
@@ -22,11 +21,17 @@
*/
struct wmi_device {
struct device dev;
-
- /* private: used by the WMI driver core */
bool setable;
};
+/**
+ * to_wmi_device() - Helper macro to cast a device to a wmi_device
+ * @device: device struct
+ *
+ * Cast a struct device to a struct wmi_device.
+ */
+#define to_wmi_device(device) container_of(device, struct wmi_device, dev)
+
extern acpi_status wmidev_evaluate_method(struct wmi_device *wdev,
u8 instance, u32 method_id,
const struct acpi_buffer *in,
@@ -35,9 +40,9 @@ extern acpi_status wmidev_evaluate_method(struct wmi_device *wdev,
extern union acpi_object *wmidev_block_query(struct wmi_device *wdev,
u8 instance);
-u8 wmidev_instance_count(struct wmi_device *wdev);
+acpi_status wmidev_block_set(struct wmi_device *wdev, u8 instance, const struct acpi_buffer *in);
-extern int set_required_buffer_size(struct wmi_device *wdev, u64 length);
+u8 wmidev_instance_count(struct wmi_device *wdev);
/**
* struct wmi_driver - WMI driver structure
@@ -47,11 +52,8 @@ extern int set_required_buffer_size(struct wmi_device *wdev, u64 length);
* @probe: Callback for device binding
* @remove: Callback for device unbinding
* @notify: Callback for receiving WMI events
- * @filter_callback: Callback for filtering device IOCTLs
*
* This represents WMI drivers which handle WMI devices.
- * @filter_callback is only necessary for drivers which
- * want to set up a WMI IOCTL interface.
*/
struct wmi_driver {
struct device_driver driver;
@@ -61,8 +63,6 @@ struct wmi_driver {
int (*probe)(struct wmi_device *wdev, const void *context);
void (*remove)(struct wmi_device *wdev);
void (*notify)(struct wmi_device *device, union acpi_object *data);
- long (*filter_callback)(struct wmi_device *wdev, unsigned int cmd,
- struct wmi_ioctl_buffer *arg);
};
extern int __must_check __wmi_driver_register(struct wmi_driver *driver,
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index f1bb2e35301f..2cc0a9606175 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -477,7 +477,7 @@ struct workqueue_attrs *alloc_workqueue_attrs(void);
void free_workqueue_attrs(struct workqueue_attrs *attrs);
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs);
-int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
+extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask);
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 083387c00f0c..6d0a14f7019d 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -193,7 +193,6 @@ void inode_io_list_del(struct inode *inode);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
{
- might_sleep();
wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
}
diff --git a/include/linux/zswap.h b/include/linux/zswap.h
index 2a60ce39cfde..0b709f5bc65f 100644
--- a/include/linux/zswap.h
+++ b/include/linux/zswap.h
@@ -5,19 +5,41 @@
#include <linux/types.h>
#include <linux/mm_types.h>
+struct lruvec;
+
extern u64 zswap_pool_total_size;
extern atomic_t zswap_stored_pages;
#ifdef CONFIG_ZSWAP
+struct zswap_lruvec_state {
+ /*
+ * Number of pages in zswap that should be protected from the shrinker.
+ * This number is an estimate of the following counts:
+ *
+ * a) Recent page faults.
+ * b) Recent insertion to the zswap LRU. This includes new zswap stores,
+ * as well as recent zswap LRU rotations.
+ *
+ * These pages are likely to be warm, and might incur IO if the are written
+ * to swap.
+ */
+ atomic_long_t nr_zswap_protected;
+};
+
bool zswap_store(struct folio *folio);
bool zswap_load(struct folio *folio);
void zswap_invalidate(int type, pgoff_t offset);
void zswap_swapon(int type);
void zswap_swapoff(int type);
-
+void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg);
+void zswap_lruvec_state_init(struct lruvec *lruvec);
+void zswap_folio_swapin(struct folio *folio);
+bool is_zswap_enabled(void);
#else
+struct zswap_lruvec_state {};
+
static inline bool zswap_store(struct folio *folio)
{
return false;
@@ -31,6 +53,14 @@ static inline bool zswap_load(struct folio *folio)
static inline void zswap_invalidate(int type, pgoff_t offset) {}
static inline void zswap_swapon(int type) {}
static inline void zswap_swapoff(int type) {}
+static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {}
+static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {}
+static inline void zswap_folio_swapin(struct folio *folio) {}
+
+static inline bool is_zswap_enabled(void)
+{
+ return false;
+}
#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 82da55101b5a..61ebe723ee4d 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -31,17 +31,22 @@ struct prefix_info {
__u8 length;
__u8 prefix_len;
+ union __packed {
+ __u8 flags;
+ struct __packed {
#if defined(__BIG_ENDIAN_BITFIELD)
- __u8 onlink : 1,
+ __u8 onlink : 1,
autoconf : 1,
reserved : 6;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 reserved : 6,
+ __u8 reserved : 6,
autoconf : 1,
onlink : 1;
#else
#error "Please fix <asm/byteorder.h>"
#endif
+ };
+ };
__be32 valid;
__be32 prefered;
__be32 reserved2;
@@ -49,6 +54,9 @@ struct prefix_info {
struct in6_addr prefix;
};
+/* rfc4861 4.6.2: IPv6 PIO is 32 bytes in size */
+static_assert(sizeof(struct prefix_info) == 32);
+
#include <linux/ipv6.h>
#include <linux/netdevice.h>
#include <net/if_inet6.h>
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 5531dd08061e..0754c463224a 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -15,6 +15,7 @@ struct key;
struct sock;
struct socket;
struct rxrpc_call;
+struct rxrpc_peer;
enum rxrpc_abort_reason;
enum rxrpc_interruptibility {
@@ -41,13 +42,14 @@ void rxrpc_kernel_new_call_notification(struct socket *,
rxrpc_notify_new_call_t,
rxrpc_discard_new_call_t);
struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
- struct sockaddr_rxrpc *srx,
+ struct rxrpc_peer *peer,
struct key *key,
unsigned long user_call_ID,
s64 tx_total_len,
u32 hard_timeout,
gfp_t gfp,
rxrpc_notify_rx_t notify_rx,
+ u16 service_id,
bool upgrade,
enum rxrpc_interruptibility interruptibility,
unsigned int debug_id);
@@ -60,9 +62,14 @@ bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
u32, int, enum rxrpc_abort_reason);
void rxrpc_kernel_shutdown_call(struct socket *sock, struct rxrpc_call *call);
void rxrpc_kernel_put_call(struct socket *sock, struct rxrpc_call *call);
-void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
- struct sockaddr_rxrpc *);
-bool rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *, u32 *);
+struct rxrpc_peer *rxrpc_kernel_lookup_peer(struct socket *sock,
+ struct sockaddr_rxrpc *srx, gfp_t gfp);
+void rxrpc_kernel_put_peer(struct rxrpc_peer *peer);
+struct rxrpc_peer *rxrpc_kernel_get_peer(struct rxrpc_peer *peer);
+struct rxrpc_peer *rxrpc_kernel_get_call_peer(struct socket *sock, struct rxrpc_call *call);
+const struct sockaddr_rxrpc *rxrpc_kernel_remote_srx(const struct rxrpc_peer *peer);
+const struct sockaddr *rxrpc_kernel_remote_addr(const struct rxrpc_peer *peer);
+unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer *);
int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t,
unsigned int);
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 20988623c5cc..a3a1ea2696a8 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -189,6 +189,7 @@ struct blocked_key {
struct smp_csrk {
bdaddr_t bdaddr;
u8 bdaddr_type;
+ u8 link_type;
u8 type;
u8 val[16];
};
@@ -198,6 +199,7 @@ struct smp_ltk {
struct rcu_head rcu;
bdaddr_t bdaddr;
u8 bdaddr_type;
+ u8 link_type;
u8 authenticated;
u8 type;
u8 enc_size;
@@ -212,6 +214,7 @@ struct smp_irk {
bdaddr_t rpa;
bdaddr_t bdaddr;
u8 addr_type;
+ u8 link_type;
u8 val[16];
};
@@ -219,6 +222,8 @@ struct link_key {
struct list_head list;
struct rcu_head rcu;
bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 link_type;
u8 type;
u8 val[HCI_LINK_KEY_SIZE];
u8 pin_len;
@@ -1227,11 +1232,11 @@ static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev,
continue;
/* Match CIG ID if set */
- if (cig != BT_ISO_QOS_CIG_UNSET && cig != c->iso_qos.ucast.cig)
+ if (cig != c->iso_qos.ucast.cig)
continue;
/* Match CIS ID if set */
- if (id != BT_ISO_QOS_CIS_UNSET && id != c->iso_qos.ucast.cis)
+ if (id != c->iso_qos.ucast.cis)
continue;
/* Match destination address if set */
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 3e454c4d7ba6..f07642264c1e 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -22,10 +22,6 @@
#define IF_RS_SENT 0x10
#define IF_READY 0x80000000
-/* prefix flags */
-#define IF_PREFIX_ONLINK 0x01
-#define IF_PREFIX_AUTOCONF 0x02
-
enum {
INET6_IFADDR_STATE_PREDAD,
INET6_IFADDR_STATE_DAD,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 1ba9f4ddf2f6..9ba6413fd2e3 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -179,9 +179,6 @@ struct fib6_info {
refcount_t fib6_ref;
unsigned long expires;
-
- struct hlist_node gc_link;
-
struct dst_metrics *fib6_metrics;
#define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1]
@@ -250,6 +247,19 @@ static inline bool fib6_requires_src(const struct fib6_info *rt)
return rt->fib6_src.plen > 0;
}
+static inline void fib6_clean_expires(struct fib6_info *f6i)
+{
+ f6i->fib6_flags &= ~RTF_EXPIRES;
+ f6i->expires = 0;
+}
+
+static inline void fib6_set_expires(struct fib6_info *f6i,
+ unsigned long expires)
+{
+ f6i->expires = expires;
+ f6i->fib6_flags |= RTF_EXPIRES;
+}
+
static inline bool fib6_check_expired(const struct fib6_info *f6i)
{
if (f6i->fib6_flags & RTF_EXPIRES)
@@ -257,11 +267,6 @@ static inline bool fib6_check_expired(const struct fib6_info *f6i)
return false;
}
-static inline bool fib6_has_expires(const struct fib6_info *f6i)
-{
- return f6i->fib6_flags & RTF_EXPIRES;
-}
-
/* Function to safely get fn->fn_sernum for passed in rt
* and store result in passed in cookie.
* Return true if we can get cookie safely
@@ -383,7 +388,6 @@ struct fib6_table {
struct inet_peer_base tb6_peers;
unsigned int flags;
unsigned int fib_seq;
- struct hlist_head tb6_gc_hlist; /* GC candidates */
#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
};
@@ -500,48 +504,6 @@ void fib6_gc_cleanup(void);
int fib6_init(void);
-/* fib6_info must be locked by the caller, and fib6_info->fib6_table can be
- * NULL.
- */
-static inline void fib6_set_expires_locked(struct fib6_info *f6i,
- unsigned long expires)
-{
- struct fib6_table *tb6;
-
- tb6 = f6i->fib6_table;
- f6i->expires = expires;
- if (tb6 && !fib6_has_expires(f6i))
- hlist_add_head(&f6i->gc_link, &tb6->tb6_gc_hlist);
- f6i->fib6_flags |= RTF_EXPIRES;
-}
-
-/* fib6_info must be locked by the caller, and fib6_info->fib6_table can be
- * NULL. If fib6_table is NULL, the fib6_info will no be inserted into the
- * list of GC candidates until it is inserted into a table.
- */
-static inline void fib6_set_expires(struct fib6_info *f6i,
- unsigned long expires)
-{
- spin_lock_bh(&f6i->fib6_table->tb6_lock);
- fib6_set_expires_locked(f6i, expires);
- spin_unlock_bh(&f6i->fib6_table->tb6_lock);
-}
-
-static inline void fib6_clean_expires_locked(struct fib6_info *f6i)
-{
- if (fib6_has_expires(f6i))
- hlist_del_init(&f6i->gc_link);
- f6i->fib6_flags &= ~RTF_EXPIRES;
- f6i->expires = 0;
-}
-
-static inline void fib6_clean_expires(struct fib6_info *f6i)
-{
- spin_lock_bh(&f6i->fib6_table->tb6_lock);
- fib6_clean_expires_locked(f6i);
- spin_unlock_bh(&f6i->fib6_table->tb6_lock);
-}
-
struct ipv6_route_iter {
struct seq_net_private p;
struct fib6_walker w;
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index fe1507c1db82..692d5955911c 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -62,6 +62,8 @@ struct nf_flowtable_type {
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule);
void (*free)(struct nf_flowtable *ft);
+ void (*get)(struct nf_flowtable *ft);
+ void (*put)(struct nf_flowtable *ft);
nf_hookfn *hook;
struct module *owner;
};
@@ -240,6 +242,11 @@ nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
}
list_add_tail(&block_cb->list, &block->cb_list);
+ up_write(&flow_table->flow_block_lock);
+
+ if (flow_table->type->get)
+ flow_table->type->get(flow_table);
+ return 0;
unlock:
up_write(&flow_table->flow_block_lock);
@@ -262,6 +269,9 @@ nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
WARN_ON(true);
}
up_write(&flow_table->flow_block_lock);
+
+ if (flow_table->type->put)
+ flow_table->type->put(flow_table);
}
void flow_offload_route_init(struct flow_offload *flow,
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index 947973623dc7..60a7d0ce3080 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -30,7 +30,7 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
return -1;
len = iph_totlen(pkt->skb, iph);
- thoff = iph->ihl * 4;
+ thoff = skb_network_offset(pkt->skb) + (iph->ihl * 4);
if (pkt->skb->len < len)
return -1;
else if (len < thoff)
diff --git a/include/net/scm.h b/include/net/scm.h
index e8c76b4be2fe..cf68acec4d70 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -5,6 +5,7 @@
#include <linux/limits.h>
#include <linux/net.h>
#include <linux/cred.h>
+#include <linux/file.h>
#include <linux/security.h>
#include <linux/pid.h>
#include <linux/nsproxy.h>
@@ -208,5 +209,13 @@ static inline void scm_recv_unix(struct socket *sock, struct msghdr *msg,
scm_destroy_cred(scm);
}
+static inline int scm_recv_one_fd(struct file *f, int __user *ufd,
+ unsigned int flags)
+{
+ if (!ufd)
+ return -EFAULT;
+ return receive_fd(f, ufd, flags);
+}
+
#endif /* __LINUX_NET_SCM_H */
diff --git a/include/net/sock.h b/include/net/sock.h
index 1d6931caf0c3..0201136b0b9c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2799,6 +2799,11 @@ static inline bool sk_is_tcp(const struct sock *sk)
return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
}
+static inline bool sk_is_stream_unix(const struct sock *sk)
+{
+ return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM;
+}
+
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 144ba48bb07b..87f0e6c2e1f2 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1788,8 +1788,6 @@ struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG
-#include <linux/jump_label.h>
-extern struct static_key_false_deferred tcp_md5_needed;
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
const union tcp_md5_addr *addr,
int family, bool any_l3index);
diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
index 647781080613..b04afced4cc9 100644
--- a/include/net/tcp_ao.h
+++ b/include/net/tcp_ao.h
@@ -127,12 +127,35 @@ struct tcp_ao_info {
struct rcu_head rcu;
};
+#ifdef CONFIG_TCP_MD5SIG
+#include <linux/jump_label.h>
+extern struct static_key_false_deferred tcp_md5_needed;
+#define static_branch_tcp_md5() static_branch_unlikely(&tcp_md5_needed.key)
+#else
+#define static_branch_tcp_md5() false
+#endif
+#ifdef CONFIG_TCP_AO
+/* TCP-AO structures and functions */
+#include <linux/jump_label.h>
+extern struct static_key_false_deferred tcp_ao_needed;
+#define static_branch_tcp_ao() static_branch_unlikely(&tcp_ao_needed.key)
+#else
+#define static_branch_tcp_ao() false
+#endif
+
+static inline bool tcp_hash_should_produce_warnings(void)
+{
+ return static_branch_tcp_md5() || static_branch_tcp_ao();
+}
+
#define tcp_hash_fail(msg, family, skb, fmt, ...) \
do { \
const struct tcphdr *th = tcp_hdr(skb); \
char hdr_flags[6]; \
char *f = hdr_flags; \
\
+ if (!tcp_hash_should_produce_warnings()) \
+ break; \
if (th->fin) \
*f++ = 'F'; \
if (th->syn) \
@@ -159,9 +182,6 @@ do { \
#ifdef CONFIG_TCP_AO
/* TCP-AO structures and functions */
-#include <linux/jump_label.h>
-extern struct static_key_false_deferred tcp_ao_needed;
-
struct tcp4_ao_context {
__be32 saddr;
__be32 daddr;
diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h
index 4dfa6d7f83ba..cd104a1343e2 100644
--- a/include/trace/events/9p.h
+++ b/include/trace/events/9p.h
@@ -178,18 +178,21 @@ TRACE_EVENT(9p_protocol_dump,
__field( void *, clnt )
__field( __u8, type )
__field( __u16, tag )
- __array( unsigned char, line, P9_PROTO_DUMP_SZ )
+ __dynamic_array(unsigned char, line,
+ min_t(size_t, pdu->capacity, P9_PROTO_DUMP_SZ))
),
TP_fast_assign(
__entry->clnt = clnt;
__entry->type = pdu->id;
__entry->tag = pdu->tag;
- memcpy(__entry->line, pdu->sdata, P9_PROTO_DUMP_SZ);
+ memcpy(__get_dynamic_array(line), pdu->sdata,
+ __get_dynamic_array_len(line));
),
- TP_printk("clnt %lu %s(tag = %d)\n%.3x: %16ph\n%.3x: %16ph\n",
+ TP_printk("clnt %lu %s(tag = %d)\n%*ph\n",
(unsigned long)__entry->clnt, show_9p_op(__entry->type),
- __entry->tag, 0, __entry->line, 16, __entry->line + 16)
+ __entry->tag, __get_dynamic_array_len(line),
+ __get_dynamic_array(line))
);
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index e9d412d19dbb..5194b7e6dc8d 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -18,97 +18,6 @@
#ifndef __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
-enum afs_call_trace {
- afs_call_trace_alloc,
- afs_call_trace_free,
- afs_call_trace_get,
- afs_call_trace_put,
- afs_call_trace_wake,
- afs_call_trace_work,
-};
-
-enum afs_server_trace {
- afs_server_trace_alloc,
- afs_server_trace_callback,
- afs_server_trace_destroy,
- afs_server_trace_free,
- afs_server_trace_gc,
- afs_server_trace_get_by_addr,
- afs_server_trace_get_by_uuid,
- afs_server_trace_get_caps,
- afs_server_trace_get_install,
- afs_server_trace_get_new_cbi,
- afs_server_trace_get_probe,
- afs_server_trace_give_up_cb,
- afs_server_trace_purging,
- afs_server_trace_put_call,
- afs_server_trace_put_cbi,
- afs_server_trace_put_find_rsq,
- afs_server_trace_put_probe,
- afs_server_trace_put_slist,
- afs_server_trace_put_slist_isort,
- afs_server_trace_put_uuid_rsq,
- afs_server_trace_update,
-};
-
-
-enum afs_volume_trace {
- afs_volume_trace_alloc,
- afs_volume_trace_free,
- afs_volume_trace_get_alloc_sbi,
- afs_volume_trace_get_cell_insert,
- afs_volume_trace_get_new_op,
- afs_volume_trace_get_query_alias,
- afs_volume_trace_put_cell_dup,
- afs_volume_trace_put_cell_root,
- afs_volume_trace_put_destroy_sbi,
- afs_volume_trace_put_free_fc,
- afs_volume_trace_put_put_op,
- afs_volume_trace_put_query_alias,
- afs_volume_trace_put_validate_fc,
- afs_volume_trace_remove,
-};
-
-enum afs_cell_trace {
- afs_cell_trace_alloc,
- afs_cell_trace_free,
- afs_cell_trace_get_queue_dns,
- afs_cell_trace_get_queue_manage,
- afs_cell_trace_get_queue_new,
- afs_cell_trace_get_vol,
- afs_cell_trace_insert,
- afs_cell_trace_manage,
- afs_cell_trace_put_candidate,
- afs_cell_trace_put_destroy,
- afs_cell_trace_put_queue_fail,
- afs_cell_trace_put_queue_work,
- afs_cell_trace_put_vol,
- afs_cell_trace_see_source,
- afs_cell_trace_see_ws,
- afs_cell_trace_unuse_alias,
- afs_cell_trace_unuse_check_alias,
- afs_cell_trace_unuse_delete,
- afs_cell_trace_unuse_fc,
- afs_cell_trace_unuse_lookup,
- afs_cell_trace_unuse_mntpt,
- afs_cell_trace_unuse_no_pin,
- afs_cell_trace_unuse_parse,
- afs_cell_trace_unuse_pin,
- afs_cell_trace_unuse_probe,
- afs_cell_trace_unuse_sbi,
- afs_cell_trace_unuse_ws,
- afs_cell_trace_use_alias,
- afs_cell_trace_use_check_alias,
- afs_cell_trace_use_fc,
- afs_cell_trace_use_fc_alias,
- afs_cell_trace_use_lookup,
- afs_cell_trace_use_mntpt,
- afs_cell_trace_use_pin,
- afs_cell_trace_use_probe,
- afs_cell_trace_use_sbi,
- afs_cell_trace_wait,
-};
-
enum afs_fs_operation {
afs_FS_FetchData = 130, /* AFS Fetch file data */
afs_FS_FetchACL = 131, /* AFS Fetch file ACL */
@@ -202,121 +111,6 @@ enum yfs_cm_operation {
yfs_CB_CallBack = 64204,
};
-enum afs_edit_dir_op {
- afs_edit_dir_create,
- afs_edit_dir_create_error,
- afs_edit_dir_create_inval,
- afs_edit_dir_create_nospc,
- afs_edit_dir_delete,
- afs_edit_dir_delete_error,
- afs_edit_dir_delete_inval,
- afs_edit_dir_delete_noent,
-};
-
-enum afs_edit_dir_reason {
- afs_edit_dir_for_create,
- afs_edit_dir_for_link,
- afs_edit_dir_for_mkdir,
- afs_edit_dir_for_rename_0,
- afs_edit_dir_for_rename_1,
- afs_edit_dir_for_rename_2,
- afs_edit_dir_for_rmdir,
- afs_edit_dir_for_silly_0,
- afs_edit_dir_for_silly_1,
- afs_edit_dir_for_symlink,
- afs_edit_dir_for_unlink,
-};
-
-enum afs_eproto_cause {
- afs_eproto_bad_status,
- afs_eproto_cb_count,
- afs_eproto_cb_fid_count,
- afs_eproto_cellname_len,
- afs_eproto_file_type,
- afs_eproto_ibulkst_cb_count,
- afs_eproto_ibulkst_count,
- afs_eproto_motd_len,
- afs_eproto_offline_msg_len,
- afs_eproto_volname_len,
- afs_eproto_yvl_fsendpt4_len,
- afs_eproto_yvl_fsendpt6_len,
- afs_eproto_yvl_fsendpt_num,
- afs_eproto_yvl_fsendpt_type,
- afs_eproto_yvl_vlendpt4_len,
- afs_eproto_yvl_vlendpt6_len,
- afs_eproto_yvl_vlendpt_type,
-};
-
-enum afs_io_error {
- afs_io_error_cm_reply,
- afs_io_error_extract,
- afs_io_error_fs_probe_fail,
- afs_io_error_vl_lookup_fail,
- afs_io_error_vl_probe_fail,
-};
-
-enum afs_file_error {
- afs_file_error_dir_bad_magic,
- afs_file_error_dir_big,
- afs_file_error_dir_missing_page,
- afs_file_error_dir_name_too_long,
- afs_file_error_dir_over_end,
- afs_file_error_dir_small,
- afs_file_error_dir_unmarked_ext,
- afs_file_error_mntpt,
- afs_file_error_writeback_fail,
-};
-
-enum afs_flock_event {
- afs_flock_acquired,
- afs_flock_callback_break,
- afs_flock_defer_unlock,
- afs_flock_extend_fail,
- afs_flock_fail_other,
- afs_flock_fail_perm,
- afs_flock_no_lockers,
- afs_flock_release_fail,
- afs_flock_silly_delete,
- afs_flock_timestamp,
- afs_flock_try_to_lock,
- afs_flock_vfs_lock,
- afs_flock_vfs_locking,
- afs_flock_waited,
- afs_flock_waiting,
- afs_flock_work_extending,
- afs_flock_work_retry,
- afs_flock_work_unlocking,
- afs_flock_would_block,
-};
-
-enum afs_flock_operation {
- afs_flock_op_copy_lock,
- afs_flock_op_flock,
- afs_flock_op_grant,
- afs_flock_op_lock,
- afs_flock_op_release_lock,
- afs_flock_op_return_ok,
- afs_flock_op_return_eagain,
- afs_flock_op_return_edeadlk,
- afs_flock_op_return_error,
- afs_flock_op_set_lock,
- afs_flock_op_unlock,
- afs_flock_op_wake,
-};
-
-enum afs_cb_break_reason {
- afs_cb_break_no_break,
- afs_cb_break_no_promise,
- afs_cb_break_for_callback,
- afs_cb_break_for_deleted,
- afs_cb_break_for_lapsed,
- afs_cb_break_for_s_reinit,
- afs_cb_break_for_unlink,
- afs_cb_break_for_v_break,
- afs_cb_break_for_volume_callback,
- afs_cb_break_for_zap,
-};
-
#endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */
/*
@@ -357,9 +151,11 @@ enum afs_cb_break_reason {
EM(afs_volume_trace_alloc, "ALLOC ") \
EM(afs_volume_trace_free, "FREE ") \
EM(afs_volume_trace_get_alloc_sbi, "GET sbi-alloc ") \
+ EM(afs_volume_trace_get_callback, "GET callback ") \
EM(afs_volume_trace_get_cell_insert, "GET cell-insrt") \
EM(afs_volume_trace_get_new_op, "GET op-new ") \
EM(afs_volume_trace_get_query_alias, "GET cell-alias") \
+ EM(afs_volume_trace_put_callback, "PUT callback ") \
EM(afs_volume_trace_put_cell_dup, "PUT cell-dup ") \
EM(afs_volume_trace_put_cell_root, "PUT cell-root ") \
EM(afs_volume_trace_put_destroy_sbi, "PUT sbi-destry") \
@@ -391,6 +187,7 @@ enum afs_cb_break_reason {
EM(afs_cell_trace_unuse_fc, "UNU fc ") \
EM(afs_cell_trace_unuse_lookup, "UNU lookup") \
EM(afs_cell_trace_unuse_mntpt, "UNU mntpt ") \
+ EM(afs_cell_trace_unuse_no_pin, "UNU no-pin") \
EM(afs_cell_trace_unuse_parse, "UNU parse ") \
EM(afs_cell_trace_unuse_pin, "UNU pin ") \
EM(afs_cell_trace_unuse_probe, "UNU probe ") \
@@ -407,6 +204,40 @@ enum afs_cb_break_reason {
EM(afs_cell_trace_use_sbi, "USE sbi ") \
E_(afs_cell_trace_wait, "WAIT ")
+#define afs_alist_traces \
+ EM(afs_alist_trace_alloc, "ALLOC ") \
+ EM(afs_alist_trace_get_estate, "GET estate") \
+ EM(afs_alist_trace_get_vlgetcaps, "GET vgtcap") \
+ EM(afs_alist_trace_get_vlprobe, "GET vprobe") \
+ EM(afs_alist_trace_get_vlrotate_set, "GET vl-rot") \
+ EM(afs_alist_trace_put_estate, "PUT estate") \
+ EM(afs_alist_trace_put_getaddru, "PUT GtAdrU") \
+ EM(afs_alist_trace_put_parse_empty, "PUT p-empt") \
+ EM(afs_alist_trace_put_parse_error, "PUT p-err ") \
+ EM(afs_alist_trace_put_server_dup, "PUT sv-dup") \
+ EM(afs_alist_trace_put_server_oom, "PUT sv-oom") \
+ EM(afs_alist_trace_put_server_update, "PUT sv-upd") \
+ EM(afs_alist_trace_put_vlgetcaps, "PUT vgtcap") \
+ EM(afs_alist_trace_put_vlprobe, "PUT vprobe") \
+ EM(afs_alist_trace_put_vlrotate_end, "PUT vr-end") \
+ EM(afs_alist_trace_put_vlrotate_fail, "PUT vr-fai") \
+ EM(afs_alist_trace_put_vlrotate_next, "PUT vr-nxt") \
+ EM(afs_alist_trace_put_vlrotate_restart,"PUT vr-rst") \
+ EM(afs_alist_trace_put_vlserver, "PUT vlsrvr") \
+ EM(afs_alist_trace_put_vlserver_old, "PUT vs-old") \
+ E_(afs_alist_trace_free, "FREE ")
+
+#define afs_estate_traces \
+ EM(afs_estate_trace_alloc_probe, "ALLOC prob") \
+ EM(afs_estate_trace_alloc_server, "ALLOC srvr") \
+ EM(afs_estate_trace_get_server_state, "GET srv-st") \
+ EM(afs_estate_trace_get_getcaps, "GET getcap") \
+ EM(afs_estate_trace_put_getcaps, "PUT getcap") \
+ EM(afs_estate_trace_put_probe, "PUT probe ") \
+ EM(afs_estate_trace_put_server, "PUT server") \
+ EM(afs_estate_trace_put_server_state, "PUT srv-st") \
+ E_(afs_estate_trace_free, "FREE ")
+
#define afs_fs_operations \
EM(afs_FS_FetchData, "FS.FetchData") \
EM(afs_FS_FetchStatus, "FS.FetchStatus") \
@@ -604,15 +435,67 @@ enum afs_cb_break_reason {
#define afs_cb_break_reasons \
EM(afs_cb_break_no_break, "no-break") \
- EM(afs_cb_break_no_promise, "no-promise") \
EM(afs_cb_break_for_callback, "break-cb") \
+ EM(afs_cb_break_for_creation_regress, "creation-regress") \
EM(afs_cb_break_for_deleted, "break-del") \
- EM(afs_cb_break_for_lapsed, "break-lapsed") \
EM(afs_cb_break_for_s_reinit, "s-reinit") \
EM(afs_cb_break_for_unlink, "break-unlink") \
- EM(afs_cb_break_for_v_break, "break-v") \
+ EM(afs_cb_break_for_update_regress, "update-regress") \
EM(afs_cb_break_for_volume_callback, "break-v-cb") \
- E_(afs_cb_break_for_zap, "break-zap")
+ EM(afs_cb_break_for_vos_release, "break-vos-release") \
+ E_(afs_cb_break_volume_excluded, "vol-excluded")
+
+#define afs_rotate_traces \
+ EM(afs_rotate_trace_aborted, "Abortd") \
+ EM(afs_rotate_trace_busy_sleep, "BsySlp") \
+ EM(afs_rotate_trace_check_vol_status, "VolStt") \
+ EM(afs_rotate_trace_failed, "Failed") \
+ EM(afs_rotate_trace_iter, "Iter ") \
+ EM(afs_rotate_trace_iterate_addr, "ItAddr") \
+ EM(afs_rotate_trace_next_server, "NextSv") \
+ EM(afs_rotate_trace_no_more_servers, "NoMore") \
+ EM(afs_rotate_trace_nomem, "Nomem ") \
+ EM(afs_rotate_trace_probe_error, "PrbErr") \
+ EM(afs_rotate_trace_probe_fileserver, "PrbFsv") \
+ EM(afs_rotate_trace_probe_none, "PrbNon") \
+ EM(afs_rotate_trace_probe_response, "PrbRsp") \
+ EM(afs_rotate_trace_probe_superseded, "PrbSup") \
+ EM(afs_rotate_trace_restart, "Rstart") \
+ EM(afs_rotate_trace_retry_server, "RtrySv") \
+ EM(afs_rotate_trace_selected_server, "SlctSv") \
+ EM(afs_rotate_trace_stale_lock, "StlLck") \
+ EM(afs_rotate_trace_start, "Start ") \
+ EM(afs_rotate_trace_stop, "Stop ") \
+ E_(afs_rotate_trace_stopped, "Stoppd")
+
+/*
+ * Generate enums for tracing information.
+ */
+#ifndef __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY
+#define __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY
+
+#undef EM
+#undef E_
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum afs_alist_trace { afs_alist_traces } __mode(byte);
+enum afs_call_trace { afs_call_traces } __mode(byte);
+enum afs_cb_break_reason { afs_cb_break_reasons } __mode(byte);
+enum afs_cell_trace { afs_cell_traces } __mode(byte);
+enum afs_edit_dir_op { afs_edit_dir_ops } __mode(byte);
+enum afs_edit_dir_reason { afs_edit_dir_reasons } __mode(byte);
+enum afs_eproto_cause { afs_eproto_causes } __mode(byte);
+enum afs_estate_trace { afs_estate_traces } __mode(byte);
+enum afs_file_error { afs_file_errors } __mode(byte);
+enum afs_flock_event { afs_flock_events } __mode(byte);
+enum afs_flock_operation { afs_flock_operations } __mode(byte);
+enum afs_io_error { afs_io_errors } __mode(byte);
+enum afs_rotate_trace { afs_rotate_traces } __mode(byte);
+enum afs_server_trace { afs_server_traces } __mode(byte);
+enum afs_volume_trace { afs_volume_traces } __mode(byte);
+
+#endif /* end __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY */
/*
* Export enum symbols via userspace.
@@ -622,21 +505,24 @@ enum afs_cb_break_reason {
#define EM(a, b) TRACE_DEFINE_ENUM(a);
#define E_(a, b) TRACE_DEFINE_ENUM(a);
+afs_alist_traces;
afs_call_traces;
-afs_server_traces;
+afs_cb_break_reasons;
afs_cell_traces;
-afs_fs_operations;
-afs_vl_operations;
afs_cm_operations;
-yfs_cm_operations;
afs_edit_dir_ops;
afs_edit_dir_reasons;
afs_eproto_causes;
-afs_io_errors;
+afs_estate_traces;
afs_file_errors;
-afs_flock_types;
afs_flock_operations;
-afs_cb_break_reasons;
+afs_flock_types;
+afs_fs_operations;
+afs_io_errors;
+afs_rotate_traces;
+afs_server_traces;
+afs_vl_operations;
+yfs_cm_operations;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -654,12 +540,12 @@ TRACE_EVENT(afs_receive_data,
TP_ARGS(call, iter, want_more, ret),
TP_STRUCT__entry(
- __field(loff_t, remain )
- __field(unsigned int, call )
- __field(enum afs_call_state, state )
- __field(unsigned short, unmarshall )
- __field(bool, want_more )
- __field(int, ret )
+ __field(loff_t, remain)
+ __field(unsigned int, call)
+ __field(enum afs_call_state, state)
+ __field(unsigned short, unmarshall)
+ __field(bool, want_more)
+ __field(int, ret)
),
TP_fast_assign(
@@ -686,9 +572,9 @@ TRACE_EVENT(afs_notify_call,
TP_ARGS(rxcall, call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_call_state, state )
- __field(unsigned short, unmarshall )
+ __field(unsigned int, call)
+ __field(enum afs_call_state, state)
+ __field(unsigned short, unmarshall)
),
TP_fast_assign(
@@ -708,9 +594,9 @@ TRACE_EVENT(afs_cb_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(u32, op )
- __field(u16, service_id )
+ __field(unsigned int, call)
+ __field(u32, op)
+ __field(u16, service_id)
),
TP_fast_assign(
@@ -733,11 +619,11 @@ TRACE_EVENT(afs_call,
TP_ARGS(call_debug_id, op, ref, outstanding, where),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, op )
- __field(int, ref )
- __field(int, outstanding )
- __field(const void *, where )
+ __field(unsigned int, call)
+ __field(int, op)
+ __field(int, ref)
+ __field(int, outstanding)
+ __field(const void *, where)
),
TP_fast_assign(
@@ -762,9 +648,9 @@ TRACE_EVENT(afs_make_fs_call,
TP_ARGS(call, fid),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -794,10 +680,10 @@ TRACE_EVENT(afs_make_fs_calli,
TP_ARGS(call, fid, i),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, i )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
+ __field(unsigned int, call)
+ __field(unsigned int, i)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -829,10 +715,10 @@ TRACE_EVENT(afs_make_fs_call1,
TP_ARGS(call, fid, name),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -866,11 +752,11 @@ TRACE_EVENT(afs_make_fs_call2,
TP_ARGS(call, fid, name, name2),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
- __array(char, name2, 24 )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
+ __array(char, name2, 24)
),
TP_fast_assign(
@@ -907,8 +793,8 @@ TRACE_EVENT(afs_make_vl_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_vl_operation, op )
+ __field(unsigned int, call)
+ __field(enum afs_vl_operation, op)
),
TP_fast_assign(
@@ -927,10 +813,10 @@ TRACE_EVENT(afs_call_done,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(struct rxrpc_call *, rx_call )
- __field(int, ret )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(struct rxrpc_call *, rx_call)
+ __field(int, ret)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -953,10 +839,10 @@ TRACE_EVENT(afs_send_data,
TP_ARGS(call, msg),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, flags )
- __field(loff_t, offset )
- __field(loff_t, count )
+ __field(unsigned int, call)
+ __field(unsigned int, flags)
+ __field(loff_t, offset)
+ __field(loff_t, count)
),
TP_fast_assign(
@@ -977,10 +863,10 @@ TRACE_EVENT(afs_sent_data,
TP_ARGS(call, msg, ret),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, ret )
- __field(loff_t, offset )
- __field(loff_t, count )
+ __field(unsigned int, call)
+ __field(int, ret)
+ __field(loff_t, offset)
+ __field(loff_t, count)
),
TP_fast_assign(
@@ -1001,9 +887,9 @@ TRACE_EVENT(afs_dir_check_failed,
TP_ARGS(vnode, off, i_size),
TP_STRUCT__entry(
- __field(struct afs_vnode *, vnode )
- __field(loff_t, off )
- __field(loff_t, i_size )
+ __field(struct afs_vnode *, vnode)
+ __field(loff_t, off)
+ __field(loff_t, i_size)
),
TP_fast_assign(
@@ -1022,11 +908,11 @@ TRACE_EVENT(afs_folio_dirty,
TP_ARGS(vnode, where, folio),
TP_STRUCT__entry(
- __field(struct afs_vnode *, vnode )
- __field(const char *, where )
- __field(pgoff_t, index )
- __field(unsigned long, from )
- __field(unsigned long, to )
+ __field(struct afs_vnode *, vnode)
+ __field(const char *, where)
+ __field(pgoff_t, index)
+ __field(unsigned long, from)
+ __field(unsigned long, to)
),
TP_fast_assign(
@@ -1056,11 +942,11 @@ TRACE_EVENT(afs_call_state,
TP_ARGS(call, from, to, ret, remote_abort),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_call_state, from )
- __field(enum afs_call_state, to )
- __field(int, ret )
- __field(u32, abort )
+ __field(unsigned int, call)
+ __field(enum afs_call_state, from)
+ __field(enum afs_call_state, to)
+ __field(int, ret)
+ __field(u32, abort)
),
TP_fast_assign(
@@ -1084,9 +970,9 @@ TRACE_EVENT(afs_lookup,
TP_ARGS(dvnode, name, fid),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, dfid )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
+ __field_struct(struct afs_fid, dfid)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -1116,15 +1002,15 @@ TRACE_EVENT(afs_edit_dir,
TP_ARGS(dvnode, why, op, block, slot, f_vnode, f_unique, name),
TP_STRUCT__entry(
- __field(unsigned int, vnode )
- __field(unsigned int, unique )
- __field(enum afs_edit_dir_reason, why )
- __field(enum afs_edit_dir_op, op )
- __field(unsigned int, block )
- __field(unsigned short, slot )
- __field(unsigned int, f_vnode )
- __field(unsigned int, f_unique )
- __array(char, name, 24 )
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(enum afs_edit_dir_reason, why)
+ __field(enum afs_edit_dir_op, op)
+ __field(unsigned int, block)
+ __field(unsigned short, slot)
+ __field(unsigned int, f_vnode)
+ __field(unsigned int, f_unique)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -1157,8 +1043,8 @@ TRACE_EVENT(afs_protocol_error,
TP_ARGS(call, cause),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_eproto_cause, cause )
+ __field(unsigned int, call)
+ __field(enum afs_eproto_cause, cause)
),
TP_fast_assign(
@@ -1177,9 +1063,9 @@ TRACE_EVENT(afs_io_error,
TP_ARGS(call, error, where),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, error )
- __field(enum afs_io_error, where )
+ __field(unsigned int, call)
+ __field(int, error)
+ __field(enum afs_io_error, where)
),
TP_fast_assign(
@@ -1199,9 +1085,9 @@ TRACE_EVENT(afs_file_error,
TP_ARGS(vnode, error, where),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(int, error )
- __field(enum afs_file_error, where )
+ __field_struct(struct afs_fid, fid)
+ __field(int, error)
+ __field(enum afs_file_error, where)
),
TP_fast_assign(
@@ -1222,9 +1108,9 @@ TRACE_EVENT(afs_cm_no_server,
TP_ARGS(call, srx),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, op_id )
- __field_struct(struct sockaddr_rxrpc, srx )
+ __field(unsigned int, call)
+ __field(unsigned int, op_id)
+ __field_struct(struct sockaddr_rxrpc, srx)
),
TP_fast_assign(
@@ -1243,9 +1129,9 @@ TRACE_EVENT(afs_cm_no_server_u,
TP_ARGS(call, uuid),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, op_id )
- __field_struct(uuid_t, uuid )
+ __field(unsigned int, call)
+ __field(unsigned int, op_id)
+ __field_struct(uuid_t, uuid)
),
TP_fast_assign(
@@ -1265,11 +1151,11 @@ TRACE_EVENT(afs_flock_ev,
TP_ARGS(vnode, fl, event, error),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(enum afs_flock_event, event )
- __field(enum afs_lock_state, state )
- __field(int, error )
- __field(unsigned int, debug_id )
+ __field_struct(struct afs_fid, fid)
+ __field(enum afs_flock_event, event)
+ __field(enum afs_lock_state, state)
+ __field(int, error)
+ __field(unsigned int, debug_id)
),
TP_fast_assign(
@@ -1295,13 +1181,13 @@ TRACE_EVENT(afs_flock_op,
TP_ARGS(vnode, fl, op),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(loff_t, from )
- __field(loff_t, len )
- __field(enum afs_flock_operation, op )
- __field(unsigned char, type )
- __field(unsigned int, flags )
- __field(unsigned int, debug_id )
+ __field_struct(struct afs_fid, fid)
+ __field(loff_t, from)
+ __field(loff_t, len)
+ __field(enum afs_flock_operation, op)
+ __field(unsigned char, type)
+ __field(unsigned int, flags)
+ __field(unsigned int, debug_id)
),
TP_fast_assign(
@@ -1328,7 +1214,7 @@ TRACE_EVENT(afs_reload_dir,
TP_ARGS(vnode),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -1345,8 +1231,8 @@ TRACE_EVENT(afs_silly_rename,
TP_ARGS(vnode, done),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(bool, done )
+ __field_struct(struct afs_fid, fid)
+ __field(bool, done)
),
TP_fast_assign(
@@ -1365,9 +1251,9 @@ TRACE_EVENT(afs_get_tree,
TP_ARGS(cell, volume),
TP_STRUCT__entry(
- __field(u64, vid )
- __array(char, cell, 24 )
- __array(char, volume, 24 )
+ __field(u64, vid)
+ __array(char, cell, 24)
+ __array(char, volume, 24)
),
TP_fast_assign(
@@ -1385,6 +1271,30 @@ TRACE_EVENT(afs_get_tree,
__entry->cell, __entry->volume, __entry->vid)
);
+TRACE_EVENT(afs_cb_v_break,
+ TP_PROTO(afs_volid_t vid, unsigned int cb_v_break,
+ enum afs_cb_break_reason reason),
+
+ TP_ARGS(vid, cb_v_break, reason),
+
+ TP_STRUCT__entry(
+ __field(afs_volid_t, vid)
+ __field(unsigned int, cb_v_break)
+ __field(enum afs_cb_break_reason, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->vid = vid;
+ __entry->cb_v_break = cb_v_break;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("%llx vb=%x %s",
+ __entry->vid,
+ __entry->cb_v_break,
+ __print_symbolic(__entry->reason, afs_cb_break_reasons))
+ );
+
TRACE_EVENT(afs_cb_break,
TP_PROTO(struct afs_fid *fid, unsigned int cb_break,
enum afs_cb_break_reason reason, bool skipped),
@@ -1392,10 +1302,10 @@ TRACE_EVENT(afs_cb_break,
TP_ARGS(fid, cb_break, reason, skipped),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(unsigned int, cb_break )
- __field(enum afs_cb_break_reason, reason )
- __field(bool, skipped )
+ __field_struct(struct afs_fid, fid)
+ __field(unsigned int, cb_break)
+ __field(enum afs_cb_break_reason, reason)
+ __field(bool, skipped)
),
TP_fast_assign(
@@ -1418,8 +1328,8 @@ TRACE_EVENT(afs_cb_miss,
TP_ARGS(fid, reason),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(enum afs_cb_break_reason, reason )
+ __field_struct(struct afs_fid, fid)
+ __field(enum afs_cb_break_reason, reason)
),
TP_fast_assign(
@@ -1439,10 +1349,10 @@ TRACE_EVENT(afs_server,
TP_ARGS(server_debug_id, ref, active, reason),
TP_STRUCT__entry(
- __field(unsigned int, server )
- __field(int, ref )
- __field(int, active )
- __field(int, reason )
+ __field(unsigned int, server)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
),
TP_fast_assign(
@@ -1465,9 +1375,9 @@ TRACE_EVENT(afs_volume,
TP_ARGS(vid, ref, reason),
TP_STRUCT__entry(
- __field(afs_volid_t, vid )
- __field(int, ref )
- __field(enum afs_volume_trace, reason )
+ __field(afs_volid_t, vid)
+ __field(int, ref)
+ __field(enum afs_volume_trace, reason)
),
TP_fast_assign(
@@ -1489,10 +1399,10 @@ TRACE_EVENT(afs_cell,
TP_ARGS(cell_debug_id, ref, active, reason),
TP_STRUCT__entry(
- __field(unsigned int, cell )
- __field(int, ref )
- __field(int, active )
- __field(int, reason )
+ __field(unsigned int, cell)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
),
TP_fast_assign(
@@ -1509,6 +1419,199 @@ TRACE_EVENT(afs_cell,
__entry->active)
);
+TRACE_EVENT(afs_alist,
+ TP_PROTO(unsigned int alist_debug_id, int ref, enum afs_alist_trace reason),
+
+ TP_ARGS(alist_debug_id, ref, reason),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, alist)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->alist = alist_debug_id;
+ __entry->ref = ref;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("AL=%08x %s r=%d",
+ __entry->alist,
+ __print_symbolic(__entry->reason, afs_alist_traces),
+ __entry->ref)
+ );
+
+TRACE_EVENT(afs_estate,
+ TP_PROTO(unsigned int server_debug_id, unsigned int estate_debug_id,
+ int ref, enum afs_estate_trace reason),
+
+ TP_ARGS(server_debug_id, estate_debug_id, ref, reason),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, server)
+ __field(unsigned int, estate)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->server = server_debug_id;
+ __entry->estate = estate_debug_id;
+ __entry->ref = ref;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("ES=%08x[%x] %s r=%d",
+ __entry->server,
+ __entry->estate,
+ __print_symbolic(__entry->reason, afs_estate_traces),
+ __entry->ref)
+ );
+
+TRACE_EVENT(afs_fs_probe,
+ TP_PROTO(struct afs_server *server, bool tx, struct afs_endpoint_state *estate,
+ unsigned int addr_index, int error, s32 abort_code, unsigned int rtt_us),
+
+ TP_ARGS(server, tx, estate, addr_index, error, abort_code, rtt_us),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, server)
+ __field(unsigned int, estate)
+ __field(bool, tx)
+ __field(u16, addr_index)
+ __field(short, error)
+ __field(s32, abort_code)
+ __field(unsigned int, rtt_us)
+ __field_struct(struct sockaddr_rxrpc, srx)
+ ),
+
+ TP_fast_assign(
+ struct afs_addr_list *alist = estate->addresses;
+ __entry->server = server->debug_id;
+ __entry->estate = estate->probe_seq;
+ __entry->tx = tx;
+ __entry->addr_index = addr_index;
+ __entry->error = error;
+ __entry->abort_code = abort_code;
+ __entry->rtt_us = rtt_us;
+ memcpy(&__entry->srx, rxrpc_kernel_remote_srx(alist->addrs[addr_index].peer),
+ sizeof(__entry->srx));
+ ),
+
+ TP_printk("s=%08x %s pq=%x ax=%u e=%d ac=%d rtt=%d %pISpc",
+ __entry->server, __entry->tx ? "tx" : "rx", __entry->estate,
+ __entry->addr_index, __entry->error, __entry->abort_code, __entry->rtt_us,
+ &__entry->srx.transport)
+ );
+
+TRACE_EVENT(afs_vl_probe,
+ TP_PROTO(struct afs_vlserver *server, bool tx, struct afs_addr_list *alist,
+ unsigned int addr_index, int error, s32 abort_code, unsigned int rtt_us),
+
+ TP_ARGS(server, tx, alist, addr_index, error, abort_code, rtt_us),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, server)
+ __field(bool, tx)
+ __field(unsigned short, flags)
+ __field(u16, addr_index)
+ __field(short, error)
+ __field(s32, abort_code)
+ __field(unsigned int, rtt_us)
+ __field_struct(struct sockaddr_rxrpc, srx)
+ ),
+
+ TP_fast_assign(
+ __entry->server = server->debug_id;
+ __entry->tx = tx;
+ __entry->addr_index = addr_index;
+ __entry->error = error;
+ __entry->abort_code = abort_code;
+ __entry->rtt_us = rtt_us;
+ memcpy(&__entry->srx, rxrpc_kernel_remote_srx(alist->addrs[addr_index].peer),
+ sizeof(__entry->srx));
+ ),
+
+ TP_printk("vl=%08x %s ax=%u e=%d ac=%d rtt=%d %pISpc",
+ __entry->server, __entry->tx ? "tx" : "rx", __entry->addr_index,
+ __entry->error, __entry->abort_code, __entry->rtt_us,
+ &__entry->srx.transport)
+ );
+
+TRACE_EVENT(afs_rotate,
+ TP_PROTO(struct afs_operation *op, enum afs_rotate_trace reason, unsigned int extra),
+
+ TP_ARGS(op, reason, extra),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, op)
+ __field(unsigned int, flags)
+ __field(unsigned int, extra)
+ __field(unsigned short, iteration)
+ __field(short, server_index)
+ __field(short, addr_index)
+ __field(enum afs_rotate_trace, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->op = op->debug_id;
+ __entry->flags = op->flags;
+ __entry->iteration = op->nr_iterations;
+ __entry->server_index = op->server_index;
+ __entry->addr_index = op->addr_index;
+ __entry->reason = reason;
+ __entry->extra = extra;
+ ),
+
+ TP_printk("OP=%08x it=%02x %s fl=%x sx=%d ax=%d ext=%d",
+ __entry->op,
+ __entry->iteration,
+ __print_symbolic(__entry->reason, afs_rotate_traces),
+ __entry->flags,
+ __entry->server_index,
+ __entry->addr_index,
+ __entry->extra)
+ );
+
+TRACE_EVENT(afs_make_call,
+ TP_PROTO(struct afs_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(bool, is_vl)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
+ __field_struct(struct sockaddr_rxrpc, srx)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->op = call->operation_ID;
+ __entry->fid = call->fid;
+ memcpy(&__entry->srx, rxrpc_kernel_remote_srx(call->peer),
+ sizeof(__entry->srx));
+ __entry->srx.srx_service = call->service_id;
+ __entry->is_vl = (__entry->srx.srx_service == VL_SERVICE ||
+ __entry->srx.srx_service == YFS_VL_SERVICE);
+ ),
+
+ TP_printk("c=%08x %pISpc+%u %s %llx:%llx:%x",
+ __entry->call,
+ &__entry->srx.transport,
+ __entry->srx.srx_service,
+ __entry->is_vl ?
+ __print_symbolic(__entry->op, afs_vl_operations) :
+ __print_symbolic(__entry->op, afs_fs_operations),
+ __entry->fid.vid,
+ __entry->fid.vnode,
+ __entry->fid.unique)
+ );
+
#endif /* _TRACE_AFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 279a7a0c90c0..90b0222390e5 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -21,7 +21,7 @@ struct btrfs_delayed_data_ref;
struct btrfs_delayed_ref_head;
struct btrfs_block_group;
struct btrfs_free_cluster;
-struct map_lookup;
+struct btrfs_chunk_map;
struct extent_buffer;
struct btrfs_work;
struct btrfs_workqueue;
@@ -265,20 +265,20 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
__print_symbolic_u64(type, \
{ EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \
{ EXTENT_MAP_HOLE, "HOLE" }, \
- { EXTENT_MAP_INLINE, "INLINE" }, \
- { EXTENT_MAP_DELALLOC, "DELALLOC" })
+ { EXTENT_MAP_INLINE, "INLINE" })
#define show_map_type(type) \
type, (type >= EXTENT_MAP_LAST_BYTE) ? "-" : __show_map_type(type)
#define show_map_flags(flag) \
__print_flags(flag, "|", \
- { (1 << EXTENT_FLAG_PINNED), "PINNED" },\
- { (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\
- { (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\
- { (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\
- { (1 << EXTENT_FLAG_FILLING), "FILLING" },\
- { (1 << EXTENT_FLAG_FS_MAPPING), "FS_MAPPING" })
+ { EXTENT_FLAG_PINNED, "PINNED" },\
+ { EXTENT_FLAG_COMPRESS_ZLIB, "COMPRESS_ZLIB" },\
+ { EXTENT_FLAG_COMPRESS_LZO, "COMPRESS_LZO" },\
+ { EXTENT_FLAG_COMPRESS_ZSTD, "COMPRESS_ZSTD" },\
+ { EXTENT_FLAG_PREALLOC, "PREALLOC" },\
+ { EXTENT_FLAG_LOGGING, "LOGGING" },\
+ { EXTENT_FLAG_FILLING, "FILLING" })
TRACE_EVENT_CONDITION(btrfs_get_extent,
@@ -297,9 +297,8 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__field( u64, orig_start )
__field( u64, block_start )
__field( u64, block_len )
- __field( unsigned long, flags )
+ __field( u32, flags )
__field( int, refs )
- __field( unsigned int, compress_type )
),
TP_fast_assign_btrfs(root->fs_info,
@@ -312,13 +311,11 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__entry->block_len = map->block_len;
__entry->flags = map->flags;
__entry->refs = refcount_read(&map->refs);
- __entry->compress_type = map->compress_type;
),
TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu "
"orig_start=%llu block_start=%llu(%s) "
- "block_len=%llu flags=%s refs=%u "
- "compress_type=%u",
+ "block_len=%llu flags=%s refs=%u",
show_root_type(__entry->root_objectid),
__entry->ino,
__entry->start,
@@ -327,7 +324,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
show_map_type(__entry->block_start),
__entry->block_len,
show_map_flags(__entry->flags),
- __entry->refs, __entry->compress_type)
+ __entry->refs)
);
TRACE_EVENT(btrfs_handle_em_exist,
@@ -1061,7 +1058,7 @@ DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head,
DECLARE_EVENT_CLASS(btrfs__chunk,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct map_lookup *map, u64 offset, u64 size),
+ const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size),
@@ -1095,7 +1092,7 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct map_lookup *map, u64 offset, u64 size),
+ const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size)
);
@@ -1103,7 +1100,7 @@ DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_free,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct map_lookup *map, u64 offset, u64 size),
+ const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size)
);
@@ -2099,17 +2096,12 @@ TRACE_EVENT(btrfs_set_extent_bit,
__field( unsigned, set_bits)
),
- TP_fast_assign_btrfs(tree->fs_info,
- __entry->owner = tree->owner;
- if (tree->inode) {
- const struct btrfs_inode *inode = tree->inode;
+ TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
- __entry->ino = btrfs_ino(inode);
- __entry->rootid = inode->root->root_key.objectid;
- } else {
- __entry->ino = 0;
- __entry->rootid = 0;
- }
+ __entry->owner = tree->owner;
+ __entry->ino = inode ? btrfs_ino(inode) : 0;
+ __entry->rootid = inode ? inode->root->root_key.objectid : 0;
__entry->start = start;
__entry->len = len;
__entry->set_bits = set_bits;
@@ -2137,17 +2129,12 @@ TRACE_EVENT(btrfs_clear_extent_bit,
__field( unsigned, clear_bits)
),
- TP_fast_assign_btrfs(tree->fs_info,
- __entry->owner = tree->owner;
- if (tree->inode) {
- const struct btrfs_inode *inode = tree->inode;
+ TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
- __entry->ino = btrfs_ino(inode);
- __entry->rootid = inode->root->root_key.objectid;
- } else {
- __entry->ino = 0;
- __entry->rootid = 0;
- }
+ __entry->owner = tree->owner;
+ __entry->ino = inode ? btrfs_ino(inode) : 0;
+ __entry->rootid = inode ? inode->root->root_key.objectid : 0;
__entry->start = start;
__entry->len = len;
__entry->clear_bits = clear_bits;
@@ -2176,17 +2163,12 @@ TRACE_EVENT(btrfs_convert_extent_bit,
__field( unsigned, clear_bits)
),
- TP_fast_assign_btrfs(tree->fs_info,
- __entry->owner = tree->owner;
- if (tree->inode) {
- const struct btrfs_inode *inode = tree->inode;
+ TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
- __entry->ino = btrfs_ino(inode);
- __entry->rootid = inode->root->root_key.objectid;
- } else {
- __entry->ino = 0;
- __entry->rootid = 0;
- }
+ __entry->owner = tree->owner;
+ __entry->ino = inode ? btrfs_ino(inode) : 0;
+ __entry->rootid = inode ? inode->root->root_key.objectid : 0;
__entry->start = start;
__entry->len = len;
__entry->set_bits = set_bits;
diff --git a/include/trace/events/ksm.h b/include/trace/events/ksm.h
index b5ac35c1d0e8..e728647b5d26 100644
--- a/include/trace/events/ksm.h
+++ b/include/trace/events/ksm.h
@@ -245,6 +245,39 @@ TRACE_EVENT(ksm_remove_rmap_item,
__entry->pfn, __entry->rmap_item, __entry->mm)
);
+/**
+ * ksm_advisor - called after the advisor has run
+ *
+ * @scan_time: scan time in seconds
+ * @pages_to_scan: new pages_to_scan value
+ * @cpu_percent: cpu usage in percent
+ *
+ * Allows to trace the ksm advisor.
+ */
+TRACE_EVENT(ksm_advisor,
+
+ TP_PROTO(s64 scan_time, unsigned long pages_to_scan,
+ unsigned int cpu_percent),
+
+ TP_ARGS(scan_time, pages_to_scan, cpu_percent),
+
+ TP_STRUCT__entry(
+ __field(s64, scan_time)
+ __field(unsigned long, pages_to_scan)
+ __field(unsigned int, cpu_percent)
+ ),
+
+ TP_fast_assign(
+ __entry->scan_time = scan_time;
+ __entry->pages_to_scan = pages_to_scan;
+ __entry->cpu_percent = cpu_percent;
+ ),
+
+ TP_printk("ksm scan time %lld pages_to_scan %lu cpu percent %u",
+ __entry->scan_time, __entry->pages_to_scan,
+ __entry->cpu_percent)
+);
+
#endif /* _TRACE_KSM_H */
/* This part must be outside protection */
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 718df1d9b834..110c1475c527 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -22,47 +22,37 @@
** Event classes
**/
-DECLARE_EVENT_CLASS(rpcrdma_completion_class,
+DECLARE_EVENT_CLASS(rpcrdma_simple_cid_class,
TP_PROTO(
- const struct ib_wc *wc,
const struct rpc_rdma_cid *cid
),
- TP_ARGS(wc, cid),
+ TP_ARGS(cid),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
- __field(unsigned long, status)
- __field(unsigned int, vendor_err)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
- __entry->status = wc->status;
- if (wc->status)
- __entry->vendor_err = wc->vendor_err;
- else
- __entry->vendor_err = 0;
),
- TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
- __entry->cq_id, __entry->completion_id,
- rdma_show_wc_status(__entry->status),
- __entry->status, __entry->vendor_err
+ TP_printk("cq.id=%d cid=%d",
+ __entry->cq_id, __entry->completion_id
)
);
-#define DEFINE_COMPLETION_EVENT(name) \
- DEFINE_EVENT(rpcrdma_completion_class, name, \
+#define DEFINE_SIMPLE_CID_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_simple_cid_class, name, \
TP_PROTO( \
- const struct ib_wc *wc, \
const struct rpc_rdma_cid *cid \
), \
- TP_ARGS(wc, cid))
+ TP_ARGS(cid) \
+ )
-DECLARE_EVENT_CLASS(rpcrdma_send_completion_class,
+DECLARE_EVENT_CLASS(rpcrdma_completion_class,
TP_PROTO(
const struct ib_wc *wc,
const struct rpc_rdma_cid *cid
@@ -73,20 +63,29 @@ DECLARE_EVENT_CLASS(rpcrdma_send_completion_class,
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
+ __field(unsigned long, status)
+ __field(unsigned int, vendor_err)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
+ __entry->status = wc->status;
+ if (wc->status)
+ __entry->vendor_err = wc->vendor_err;
+ else
+ __entry->vendor_err = 0;
),
- TP_printk("cq.id=%u cid=%d",
- __entry->cq_id, __entry->completion_id
+ TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
+ __entry->cq_id, __entry->completion_id,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
)
);
-#define DEFINE_SEND_COMPLETION_EVENT(name) \
- DEFINE_EVENT(rpcrdma_send_completion_class, name, \
+#define DEFINE_COMPLETION_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_completion_class, name, \
TP_PROTO( \
const struct ib_wc *wc, \
const struct rpc_rdma_cid *cid \
@@ -978,27 +977,7 @@ TRACE_EVENT(xprtrdma_post_send_err,
)
);
-TRACE_EVENT(xprtrdma_post_recv,
- TP_PROTO(
- const struct rpcrdma_rep *rep
- ),
-
- TP_ARGS(rep),
-
- TP_STRUCT__entry(
- __field(u32, cq_id)
- __field(int, completion_id)
- ),
-
- TP_fast_assign(
- __entry->cq_id = rep->rr_cid.ci_queue_id;
- __entry->completion_id = rep->rr_cid.ci_completion_id;
- ),
-
- TP_printk("cq.id=%d cid=%d",
- __entry->cq_id, __entry->completion_id
- )
-);
+DEFINE_SIMPLE_CID_EVENT(xprtrdma_post_recv);
TRACE_EVENT(xprtrdma_post_recvs,
TP_PROTO(
@@ -1783,29 +1762,29 @@ DEFINE_ERROR_EVENT(chunk);
DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
TP_PROTO(
- const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid,
u64 dma_addr,
u32 length
),
- TP_ARGS(rdma, dma_addr, length),
+ TP_ARGS(cid, dma_addr, length),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(u64, dma_addr)
__field(u32, length)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->dma_addr = dma_addr;
__entry->length = length;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
- __get_str(addr), __get_str(device),
+ TP_printk("cq.id=%u cid=%d dma_addr=%llu length=%u",
+ __entry->cq_id, __entry->completion_id,
__entry->dma_addr, __entry->length
)
);
@@ -1813,11 +1792,12 @@ DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
#define DEFINE_SVC_DMA_EVENT(name) \
DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \
TP_PROTO( \
- const struct svcxprt_rdma *rdma,\
+ const struct rpc_rdma_cid *cid, \
u64 dma_addr, \
u32 length \
), \
- TP_ARGS(rdma, dma_addr, length))
+ TP_ARGS(cid, dma_addr, length) \
+ )
DEFINE_SVC_DMA_EVENT(dma_map_page);
DEFINE_SVC_DMA_EVENT(dma_map_err);
@@ -1826,33 +1806,37 @@ DEFINE_SVC_DMA_EVENT(dma_unmap_page);
TRACE_EVENT(svcrdma_dma_map_rw_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
+ u64 offset,
+ u32 handle,
unsigned int nents,
int status
),
- TP_ARGS(rdma, nents, status),
+ TP_ARGS(rdma, offset, handle, nents, status),
TP_STRUCT__entry(
- __field(int, status)
+ __field(u32, cq_id)
+ __field(u32, handle)
+ __field(u64, offset)
__field(unsigned int, nents)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ __field(int, status)
),
TP_fast_assign(
- __entry->status = status;
+ __entry->cq_id = rdma->sc_sq_cq->res.id;
+ __entry->handle = handle;
+ __entry->offset = offset;
__entry->nents = nents;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ __entry->status = status;
),
- TP_printk("addr=%s device=%s nents=%u status=%d",
- __get_str(addr), __get_str(device), __entry->nents,
- __entry->status
+ TP_printk("cq.id=%u 0x%016llx:0x%08x nents=%u status=%d",
+ __entry->cq_id, (unsigned long long)__entry->offset,
+ __entry->handle, __entry->nents, __entry->status
)
);
-TRACE_EVENT(svcrdma_no_rwctx_err,
+TRACE_EVENT(svcrdma_rwctx_empty,
TP_PROTO(
const struct svcxprt_rdma *rdma,
unsigned int num_sges
@@ -1861,79 +1845,75 @@ TRACE_EVENT(svcrdma_no_rwctx_err,
TP_ARGS(rdma, num_sges),
TP_STRUCT__entry(
+ __field(u32, cq_id)
__field(unsigned int, num_sges)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = rdma->sc_sq_cq->res.id;
__entry->num_sges = num_sges;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s num_sges=%d",
- __get_str(addr), __get_str(device), __entry->num_sges
+ TP_printk("cq.id=%u num_sges=%d",
+ __entry->cq_id, __entry->num_sges
)
);
TRACE_EVENT(svcrdma_page_overrun_err,
TP_PROTO(
- const struct svcxprt_rdma *rdma,
- const struct svc_rqst *rqst,
+ const struct rpc_rdma_cid *cid,
unsigned int pageno
),
- TP_ARGS(rdma, rqst, pageno),
+ TP_ARGS(cid, pageno),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(unsigned int, pageno)
- __field(u32, xid)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->pageno = pageno;
- __entry->xid = __be32_to_cpu(rqst->rq_xid);
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
- __get_str(device), __entry->xid, __entry->pageno
+ TP_printk("cq.id=%u cid=%d pageno=%u",
+ __entry->cq_id, __entry->completion_id,
+ __entry->pageno
)
);
TRACE_EVENT(svcrdma_small_wrch_err,
TP_PROTO(
- const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid,
unsigned int remaining,
unsigned int seg_no,
unsigned int num_segs
),
- TP_ARGS(rdma, remaining, seg_no, num_segs),
+ TP_ARGS(cid, remaining, seg_no, num_segs),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(unsigned int, remaining)
__field(unsigned int, seg_no)
__field(unsigned int, num_segs)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->remaining = remaining;
__entry->seg_no = seg_no;
__entry->num_segs = num_segs;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
- __get_str(addr), __get_str(device), __entry->remaining,
- __entry->seg_no, __entry->num_segs
+ TP_printk("cq.id=%u cid=%d remaining=%u seg_no=%u num_segs=%u",
+ __entry->cq_id, __entry->completion_id,
+ __entry->remaining, __entry->seg_no, __entry->num_segs
)
);
@@ -2020,31 +2000,11 @@ TRACE_EVENT(svcrdma_post_send,
)
);
-DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_send);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_send);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_err);
-TRACE_EVENT(svcrdma_post_recv,
- TP_PROTO(
- const struct svc_rdma_recv_ctxt *ctxt
- ),
-
- TP_ARGS(ctxt),
-
- TP_STRUCT__entry(
- __field(u32, cq_id)
- __field(int, completion_id)
- ),
-
- TP_fast_assign(
- __entry->cq_id = ctxt->rc_cid.ci_queue_id;
- __entry->completion_id = ctxt->rc_cid.ci_completion_id;
- ),
-
- TP_printk("cq.id=%d cid=%d",
- __entry->cq_id, __entry->completion_id
- )
-);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_post_recv);
DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv);
DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush);
@@ -2152,8 +2112,9 @@ TRACE_EVENT(svcrdma_wc_read,
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_err);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_read_finished);
-DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_write);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_write);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_err);
@@ -2184,65 +2145,74 @@ TRACE_EVENT(svcrdma_qp_error,
)
);
-DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
+DECLARE_EVENT_CLASS(svcrdma_sendqueue_class,
TP_PROTO(
- const struct svcxprt_rdma *rdma
+ const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid
),
- TP_ARGS(rdma),
+ TP_ARGS(rdma, cid),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(int, avail)
__field(int, depth)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->avail = atomic_read(&rdma->sc_sq_avail);
__entry->depth = rdma->sc_sq_depth;
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s sc_sq_avail=%d/%d",
- __get_str(addr), __entry->avail, __entry->depth
+ TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d",
+ __entry->cq_id, __entry->completion_id,
+ __entry->avail, __entry->depth
)
);
#define DEFINE_SQ_EVENT(name) \
- DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
- TP_PROTO( \
- const struct svcxprt_rdma *rdma \
- ), \
- TP_ARGS(rdma))
+ DEFINE_EVENT(svcrdma_sendqueue_class, name, \
+ TP_PROTO( \
+ const struct svcxprt_rdma *rdma, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(rdma, cid) \
+ )
-DEFINE_SQ_EVENT(full);
-DEFINE_SQ_EVENT(retry);
+DEFINE_SQ_EVENT(svcrdma_sq_full);
+DEFINE_SQ_EVENT(svcrdma_sq_retry);
TRACE_EVENT(svcrdma_sq_post_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid,
int status
),
- TP_ARGS(rdma, status),
+ TP_ARGS(rdma, cid, status),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(int, avail)
__field(int, depth)
__field(int, status)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->avail = atomic_read(&rdma->sc_sq_avail);
__entry->depth = rdma->sc_sq_depth;
__entry->status = status;
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
- __get_str(addr), __entry->avail, __entry->depth,
- __entry->status
+ TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d status=%d",
+ __entry->cq_id, __entry->completion_id,
+ __entry->avail, __entry->depth, __entry->status
)
);
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index f7e537f64db4..4c1ef7b3705c 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -178,7 +178,9 @@
#define rxrpc_peer_traces \
EM(rxrpc_peer_free, "FREE ") \
EM(rxrpc_peer_get_accept, "GET accept ") \
+ EM(rxrpc_peer_get_application, "GET app ") \
EM(rxrpc_peer_get_bundle, "GET bundle ") \
+ EM(rxrpc_peer_get_call, "GET call ") \
EM(rxrpc_peer_get_client_conn, "GET cln-conn") \
EM(rxrpc_peer_get_input, "GET input ") \
EM(rxrpc_peer_get_input_error, "GET inpt-err") \
@@ -187,6 +189,7 @@
EM(rxrpc_peer_get_service_conn, "GET srv-conn") \
EM(rxrpc_peer_new_client, "NEW client ") \
EM(rxrpc_peer_new_prealloc, "NEW prealloc") \
+ EM(rxrpc_peer_put_application, "PUT app ") \
EM(rxrpc_peer_put_bundle, "PUT bundle ") \
EM(rxrpc_peer_put_call, "PUT call ") \
EM(rxrpc_peer_put_conn, "PUT conn ") \
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 6188ad0d9e0d..dbb01b4b7451 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -493,33 +493,30 @@ DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
*/
DECLARE_EVENT_CLASS(sched_stat_runtime,
- TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
+ TP_PROTO(struct task_struct *tsk, u64 runtime),
- TP_ARGS(tsk, __perf_count(runtime), vruntime),
+ TP_ARGS(tsk, __perf_count(runtime)),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( u64, runtime )
- __field( u64, vruntime )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->runtime = runtime;
- __entry->vruntime = vruntime;
),
- TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
+ TP_printk("comm=%s pid=%d runtime=%Lu [ns]",
__entry->comm, __entry->pid,
- (unsigned long long)__entry->runtime,
- (unsigned long long)__entry->vruntime)
+ (unsigned long long)__entry->runtime)
);
DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
- TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
- TP_ARGS(tsk, runtime, vruntime));
+ TP_PROTO(struct task_struct *tsk, u64 runtime),
+ TP_ARGS(tsk, runtime));
/*
* Tracepoint for showing priority inheritance modifying a tasks
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 337c90787fb1..cdd3a45e6003 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -1675,7 +1675,6 @@ DEFINE_SVCXDRBUF_EVENT(sendto);
svc_rqst_flag(LOCAL) \
svc_rqst_flag(USEDEFERRAL) \
svc_rqst_flag(DROPME) \
- svc_rqst_flag(SPLICE_OK) \
svc_rqst_flag(VICTIM) \
svc_rqst_flag_end(DATA)
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index b4bc2828fa09..1ef58a04fc57 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -46,22 +46,21 @@ DEFINE_EVENT(timer_class, timer_init,
/**
* timer_start - called when the timer is started
- * @timer: pointer to struct timer_list
- * @expires: the timers expiry time
- * @flags: the timers flags
+ * @timer: pointer to struct timer_list
+ * @bucket_expiry: the bucket expiry time
*/
TRACE_EVENT(timer_start,
TP_PROTO(struct timer_list *timer,
- unsigned long expires,
- unsigned int flags),
+ unsigned long bucket_expiry),
- TP_ARGS(timer, expires, flags),
+ TP_ARGS(timer, bucket_expiry),
TP_STRUCT__entry(
__field( void *, timer )
__field( void *, function )
__field( unsigned long, expires )
+ __field( unsigned long, bucket_expiry )
__field( unsigned long, now )
__field( unsigned int, flags )
),
@@ -69,15 +68,16 @@ TRACE_EVENT(timer_start,
TP_fast_assign(
__entry->timer = timer;
__entry->function = timer->function;
- __entry->expires = expires;
+ __entry->expires = timer->expires;
+ __entry->bucket_expiry = bucket_expiry;
__entry->now = jiffies;
- __entry->flags = flags;
+ __entry->flags = timer->flags;
),
- TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s",
+ TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] bucket_expiry=%lu cpu=%u idx=%u flags=%s",
__entry->timer, __entry->function, __entry->expires,
(long)__entry->expires - __entry->now,
- __entry->flags & TIMER_CPUMASK,
+ __entry->bucket_expiry, __entry->flags & TIMER_CPUMASK,
__entry->flags >> TIMER_ARRAYSHIFT,
decode_timer_flags(__entry->flags & TIMER_TRACE_FLAGMASK))
);
@@ -142,6 +142,26 @@ DEFINE_EVENT(timer_class, timer_cancel,
TP_ARGS(timer)
);
+TRACE_EVENT(timer_base_idle,
+
+ TP_PROTO(bool is_idle, unsigned int cpu),
+
+ TP_ARGS(is_idle, cpu),
+
+ TP_STRUCT__entry(
+ __field( bool, is_idle )
+ __field( unsigned int, cpu )
+ ),
+
+ TP_fast_assign(
+ __entry->is_idle = is_idle;
+ __entry->cpu = cpu;
+ ),
+
+ TP_printk("is_idle=%d cpu=%d",
+ __entry->is_idle, __entry->cpu)
+);
+
#define decode_clockid(type) \
__print_symbolic(type, \
{ CLOCK_REALTIME, "CLOCK_REALTIME" }, \
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 756b013fb832..75f00965ab15 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -829,8 +829,21 @@ __SYSCALL(__NR_futex_wait, sys_futex_wait)
#define __NR_futex_requeue 456
__SYSCALL(__NR_futex_requeue, sys_futex_requeue)
+#define __NR_statmount 457
+__SYSCALL(__NR_statmount, sys_statmount)
+
+#define __NR_listmount 458
+__SYSCALL(__NR_listmount, sys_listmount)
+
+#define __NR_lsm_get_self_attr 459
+__SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr)
+#define __NR_lsm_set_self_attr 460
+__SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr)
+#define __NR_lsm_list_modules 461
+__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
+
#undef __NR_syscalls
-#define __NR_syscalls 457
+#define __NR_syscalls 462
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index da43810b7485..48ad69f7722e 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -316,6 +316,7 @@ typedef int __bitwise __kernel_rwf_t;
#define PAGE_IS_SWAPPED (1 << 4)
#define PAGE_IS_PFNZERO (1 << 5)
#define PAGE_IS_HUGE (1 << 6)
+#define PAGE_IS_SOFT_DIRTY (1 << 7)
/*
* struct page_region - Page region with flags
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index db92a7202b34..e7418d15fe39 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -209,7 +209,7 @@
* - add FUSE_HAS_EXPIRE_ONLY
*
* 7.39
- * - add FUSE_DIRECT_IO_RELAX
+ * - add FUSE_DIRECT_IO_ALLOW_MMAP
* - add FUSE_STATX and related structures
*/
@@ -409,8 +409,7 @@ struct fuse_file_lock {
* FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir,
* symlink and mknod (single group that matches parent)
* FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation
- * FUSE_DIRECT_IO_RELAX: relax restrictions in FOPEN_DIRECT_IO mode, for now
- * allow shared mmap
+ * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode.
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -449,7 +448,10 @@ struct fuse_file_lock {
#define FUSE_HAS_INODE_DAX (1ULL << 33)
#define FUSE_CREATE_SUPP_GROUP (1ULL << 34)
#define FUSE_HAS_EXPIRE_ONLY (1ULL << 35)
-#define FUSE_DIRECT_IO_RELAX (1ULL << 36)
+#define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36)
+
+/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
+#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP
/**
* CUSE INIT request/reply flags
diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h
index 01766dd839b0..c17bb096ea68 100644
--- a/include/uapi/linux/kexec.h
+++ b/include/uapi/linux/kexec.h
@@ -25,6 +25,7 @@
#define KEXEC_FILE_UNLOAD 0x00000001
#define KEXEC_FILE_ON_CRASH 0x00000002
#define KEXEC_FILE_NO_INITRAMFS 0x00000004
+#define KEXEC_FILE_DEBUG 0x00000008
/* These values match the ELF architecture values.
* Unless there is a good reason that should continue to be the case.
diff --git a/include/uapi/linux/lsm.h b/include/uapi/linux/lsm.h
new file mode 100644
index 000000000000..f8aef9ade549
--- /dev/null
+++ b/include/uapi/linux/lsm.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Linux Security Modules (LSM) - User space API
+ *
+ * Copyright (C) 2022 Casey Schaufler <casey@schaufler-ca.com>
+ * Copyright (C) 2022 Intel Corporation
+ */
+
+#ifndef _UAPI_LINUX_LSM_H
+#define _UAPI_LINUX_LSM_H
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+
+/**
+ * struct lsm_ctx - LSM context information
+ * @id: the LSM id number, see LSM_ID_XXX
+ * @flags: LSM specific flags
+ * @len: length of the lsm_ctx struct, @ctx and any other data or padding
+ * @ctx_len: the size of @ctx
+ * @ctx: the LSM context value
+ *
+ * The @len field MUST be equal to the size of the lsm_ctx struct
+ * plus any additional padding and/or data placed after @ctx.
+ *
+ * In all cases @ctx_len MUST be equal to the length of @ctx.
+ * If @ctx is a string value it should be nul terminated with
+ * @ctx_len equal to `strlen(@ctx) + 1`. Binary values are
+ * supported.
+ *
+ * The @flags and @ctx fields SHOULD only be interpreted by the
+ * LSM specified by @id; they MUST be set to zero/0 when not used.
+ */
+struct lsm_ctx {
+ __u64 id;
+ __u64 flags;
+ __u64 len;
+ __u64 ctx_len;
+ __u8 ctx[] __counted_by(ctx_len);
+};
+
+/*
+ * ID tokens to identify Linux Security Modules (LSMs)
+ *
+ * These token values are used to uniquely identify specific LSMs
+ * in the kernel as well as in the kernel's LSM userspace API.
+ *
+ * A value of zero/0 is considered undefined and should not be used
+ * outside the kernel. Values 1-99 are reserved for potential
+ * future use.
+ */
+#define LSM_ID_UNDEF 0
+#define LSM_ID_CAPABILITY 100
+#define LSM_ID_SELINUX 101
+#define LSM_ID_SMACK 102
+#define LSM_ID_TOMOYO 103
+#define LSM_ID_APPARMOR 104
+#define LSM_ID_YAMA 105
+#define LSM_ID_LOADPIN 106
+#define LSM_ID_SAFESETID 107
+#define LSM_ID_LOCKDOWN 108
+#define LSM_ID_BPF 109
+#define LSM_ID_LANDLOCK 110
+
+/*
+ * LSM_ATTR_XXX definitions identify different LSM attributes
+ * which are used in the kernel's LSM userspace API. Support
+ * for these attributes vary across the different LSMs. None
+ * are required.
+ *
+ * A value of zero/0 is considered undefined and should not be used
+ * outside the kernel. Values 1-99 are reserved for potential
+ * future use.
+ */
+#define LSM_ATTR_UNDEF 0
+#define LSM_ATTR_CURRENT 100
+#define LSM_ATTR_EXEC 101
+#define LSM_ATTR_FSCREATE 102
+#define LSM_ATTR_KEYCREATE 103
+#define LSM_ATTR_PREV 104
+#define LSM_ATTR_SOCKCREATE 105
+
+/*
+ * LSM_FLAG_XXX definitions identify special handling instructions
+ * for the API.
+ */
+#define LSM_FLAG_SINGLE 0x0001
+
+#endif /* _UAPI_LINUX_LSM_H */
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index bb242fdcfe6b..ad5478dbad00 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -138,4 +138,74 @@ struct mount_attr {
/* List of all mount_attr versions. */
#define MOUNT_ATTR_SIZE_VER0 32 /* sizeof first published struct */
+
+/*
+ * Structure for getting mount/superblock/filesystem info with statmount(2).
+ *
+ * The interface is similar to statx(2): individual fields or groups can be
+ * selected with the @mask argument of statmount(). Kernel will set the @mask
+ * field according to the supported fields.
+ *
+ * If string fields are selected, then the caller needs to pass a buffer that
+ * has space after the fixed part of the structure. Nul terminated strings are
+ * copied there and offsets relative to @str are stored in the relevant fields.
+ * If the buffer is too small, then EOVERFLOW is returned. The actually used
+ * size is returned in @size.
+ */
+struct statmount {
+ __u32 size; /* Total size, including strings */
+ __u32 __spare1;
+ __u64 mask; /* What results were written */
+ __u32 sb_dev_major; /* Device ID */
+ __u32 sb_dev_minor;
+ __u64 sb_magic; /* ..._SUPER_MAGIC */
+ __u32 sb_flags; /* SB_{RDONLY,SYNCHRONOUS,DIRSYNC,LAZYTIME} */
+ __u32 fs_type; /* [str] Filesystem type */
+ __u64 mnt_id; /* Unique ID of mount */
+ __u64 mnt_parent_id; /* Unique ID of parent (for root == mnt_id) */
+ __u32 mnt_id_old; /* Reused IDs used in proc/.../mountinfo */
+ __u32 mnt_parent_id_old;
+ __u64 mnt_attr; /* MOUNT_ATTR_... */
+ __u64 mnt_propagation; /* MS_{SHARED,SLAVE,PRIVATE,UNBINDABLE} */
+ __u64 mnt_peer_group; /* ID of shared peer group */
+ __u64 mnt_master; /* Mount receives propagation from this ID */
+ __u64 propagate_from; /* Propagation from in current namespace */
+ __u32 mnt_root; /* [str] Root of mount relative to root of fs */
+ __u32 mnt_point; /* [str] Mountpoint relative to current root */
+ __u64 __spare2[50];
+ char str[]; /* Variable size part containing strings */
+};
+
+/*
+ * Structure for passing mount ID and miscellaneous parameters to statmount(2)
+ * and listmount(2).
+ *
+ * For statmount(2) @param represents the request mask.
+ * For listmount(2) @param represents the last listed mount id (or zero).
+ */
+struct mnt_id_req {
+ __u32 size;
+ __u32 spare;
+ __u64 mnt_id;
+ __u64 param;
+};
+
+/* List of all mnt_id_req versions. */
+#define MNT_ID_REQ_SIZE_VER0 24 /* sizeof first published struct */
+
+/*
+ * @mask bits for statmount(2)
+ */
+#define STATMOUNT_SB_BASIC 0x00000001U /* Want/got sb_... */
+#define STATMOUNT_MNT_BASIC 0x00000002U /* Want/got mnt_... */
+#define STATMOUNT_PROPAGATE_FROM 0x00000004U /* Want/got propagate_from */
+#define STATMOUNT_MNT_ROOT 0x00000008U /* Want/got mnt_root */
+#define STATMOUNT_MNT_POINT 0x00000010U /* Want/got mnt_point */
+#define STATMOUNT_FS_TYPE 0x00000020U /* Want/got fs_type */
+
+/*
+ * Special @mnt_id values that can be passed to listmount
+ */
+#define LSMT_ROOT 0xffffffffffffffff /* root mount */
+
#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 39c6a250dd1b..3a64499b0f5d 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -204,6 +204,8 @@ enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */
+ PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */
+
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
};
@@ -235,6 +237,8 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT,
+ PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT,
+
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
@@ -982,6 +986,12 @@ enum perf_event_type {
* { u64 nr;
* { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
* { u64 from, to, flags } lbr[nr];
+ * #
+ * # The format of the counters is decided by the
+ * # "branch_counter_nr" and "branch_counter_width",
+ * # which are defined in the ABI.
+ * #
+ * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS
* } && PERF_SAMPLE_BRANCH_STACK
*
* { u64 abi; # enum perf_sample_regs_abi
@@ -1427,6 +1437,9 @@ struct perf_branch_entry {
reserved:31;
};
+/* Size of used info bits in struct perf_branch_entry */
+#define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33
+
union perf_sample_weight {
__u64 full;
#if defined(__LITTLE_ENDIAN_BITFIELD)
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 7cab2c65d3d7..2f2ee82d5517 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -154,6 +154,7 @@ struct statx {
#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */
#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
+#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index 0dbc81015018..2841e4ea8f2c 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -41,7 +41,8 @@
UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \
UFFD_FEATURE_WP_UNPOPULATED | \
UFFD_FEATURE_POISON | \
- UFFD_FEATURE_WP_ASYNC)
+ UFFD_FEATURE_WP_ASYNC | \
+ UFFD_FEATURE_MOVE)
#define UFFD_API_IOCTLS \
((__u64)1 << _UFFDIO_REGISTER | \
(__u64)1 << _UFFDIO_UNREGISTER | \
@@ -50,6 +51,7 @@
((__u64)1 << _UFFDIO_WAKE | \
(__u64)1 << _UFFDIO_COPY | \
(__u64)1 << _UFFDIO_ZEROPAGE | \
+ (__u64)1 << _UFFDIO_MOVE | \
(__u64)1 << _UFFDIO_WRITEPROTECT | \
(__u64)1 << _UFFDIO_CONTINUE | \
(__u64)1 << _UFFDIO_POISON)
@@ -73,6 +75,7 @@
#define _UFFDIO_WAKE (0x02)
#define _UFFDIO_COPY (0x03)
#define _UFFDIO_ZEROPAGE (0x04)
+#define _UFFDIO_MOVE (0x05)
#define _UFFDIO_WRITEPROTECT (0x06)
#define _UFFDIO_CONTINUE (0x07)
#define _UFFDIO_POISON (0x08)
@@ -92,6 +95,8 @@
struct uffdio_copy)
#define UFFDIO_ZEROPAGE _IOWR(UFFDIO, _UFFDIO_ZEROPAGE, \
struct uffdio_zeropage)
+#define UFFDIO_MOVE _IOWR(UFFDIO, _UFFDIO_MOVE, \
+ struct uffdio_move)
#define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \
struct uffdio_writeprotect)
#define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \
@@ -222,6 +227,9 @@ struct uffdio_api {
* asynchronous mode is supported in which the write fault is
* automatically resolved and write-protection is un-set.
* It implies UFFD_FEATURE_WP_UNPOPULATED.
+ *
+ * UFFD_FEATURE_MOVE indicates that the kernel supports moving an
+ * existing page contents from userspace.
*/
#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
#define UFFD_FEATURE_EVENT_FORK (1<<1)
@@ -239,6 +247,7 @@ struct uffdio_api {
#define UFFD_FEATURE_WP_UNPOPULATED (1<<13)
#define UFFD_FEATURE_POISON (1<<14)
#define UFFD_FEATURE_WP_ASYNC (1<<15)
+#define UFFD_FEATURE_MOVE (1<<16)
__u64 features;
__u64 ioctls;
@@ -347,6 +356,24 @@ struct uffdio_poison {
__s64 updated;
};
+struct uffdio_move {
+ __u64 dst;
+ __u64 src;
+ __u64 len;
+ /*
+ * Especially if used to atomically remove memory from the
+ * address space the wake on the dst range is not needed.
+ */
+#define UFFDIO_MOVE_MODE_DONTWAKE ((__u64)1<<0)
+#define UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES ((__u64)1<<1)
+ __u64 mode;
+ /*
+ * "move" is written by the ioctl and must be at the end: the
+ * copy_from_user will not read the last 8 bytes.
+ */
+ __s64 move;
+};
+
/*
* Flags for the userfaultfd(2) system call itself.
*/
diff --git a/include/uapi/regulator/regulator.h b/include/uapi/regulator/regulator.h
new file mode 100644
index 000000000000..71bf71a22e7f
--- /dev/null
+++ b/include/uapi/regulator/regulator.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Regulator uapi header
+ *
+ * Author: Naresh Solanki <Naresh.Solanki@9elements.com>
+ */
+
+#ifndef _UAPI_REGULATOR_H
+#define _UAPI_REGULATOR_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+/*
+ * Regulator notifier events.
+ *
+ * UNDER_VOLTAGE Regulator output is under voltage.
+ * OVER_CURRENT Regulator output current is too high.
+ * REGULATION_OUT Regulator output is out of regulation.
+ * FAIL Regulator output has failed.
+ * OVER_TEMP Regulator over temp.
+ * FORCE_DISABLE Regulator forcibly shut down by software.
+ * VOLTAGE_CHANGE Regulator voltage changed.
+ * Data passed is old voltage cast to (void *).
+ * DISABLE Regulator was disabled.
+ * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed.
+ * Data passed is "struct pre_voltage_change_data"
+ * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason.
+ * Data passed is old voltage cast to (void *).
+ * PRE_DISABLE Regulator is about to be disabled
+ * ABORT_DISABLE Regulator disable failed for some reason
+ *
+ * NOTE: These events can be OR'ed together when passed into handler.
+ */
+
+#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01
+#define REGULATOR_EVENT_OVER_CURRENT 0x02
+#define REGULATOR_EVENT_REGULATION_OUT 0x04
+#define REGULATOR_EVENT_FAIL 0x08
+#define REGULATOR_EVENT_OVER_TEMP 0x10
+#define REGULATOR_EVENT_FORCE_DISABLE 0x20
+#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
+#define REGULATOR_EVENT_DISABLE 0x80
+#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100
+#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
+#define REGULATOR_EVENT_PRE_DISABLE 0x400
+#define REGULATOR_EVENT_ABORT_DISABLE 0x800
+#define REGULATOR_EVENT_ENABLE 0x1000
+/*
+ * Following notifications should be emitted only if detected condition
+ * is such that the HW is likely to still be working but consumers should
+ * take a recovery action to prevent problems escalating into errors.
+ */
+#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000
+#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000
+#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000
+#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000
+#define REGULATOR_EVENT_WARN_MASK 0x1E000
+
+struct reg_genl_event {
+ char reg_name[32];
+ uint64_t event;
+};
+
+/* attributes of reg_genl_family */
+enum {
+ REG_GENL_ATTR_UNSPEC,
+ REG_GENL_ATTR_EVENT, /* reg event info needed by user space */
+ __REG_GENL_ATTR_MAX,
+};
+
+#define REG_GENL_ATTR_MAX (__REG_GENL_ATTR_MAX - 1)
+
+/* commands supported by the reg_genl_family */
+enum {
+ REG_GENL_CMD_UNSPEC,
+ REG_GENL_CMD_EVENT, /* kernel->user notifications for reg events */
+ __REG_GENL_CMD_MAX,
+};
+
+#define REG_GENL_CMD_MAX (__REG_GENL_CMD_MAX - 1)
+
+#define REG_GENL_FAMILY_NAME "reg_event"
+#define REG_GENL_VERSION 0x01
+#define REG_GENL_MCAST_GROUP_NAME "reg_mc_group"
+
+#endif /* _UAPI_REGULATOR_H */