summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h21
-rw-r--r--include/linux/acpi_iort.h4
-rw-r--r--include/linux/amba/bus.h11
-rw-r--r--include/linux/amd-pstate.h20
-rw-r--r--include/linux/anon_inodes.h5
-rw-r--r--include/linux/arch_topology.h8
-rw-r--r--include/linux/arm_ffa.h27
-rw-r--r--include/linux/bio.h11
-rw-r--r--include/linux/bitmap.h95
-rw-r--r--include/linux/bitops.h23
-rw-r--r--include/linux/blk-mq.h85
-rw-r--r--include/linux/blk_types.h36
-rw-r--r--include/linux/blkdev.h145
-rw-r--r--include/linux/bootconfig.h7
-rw-r--r--include/linux/bpf.h31
-rw-r--r--include/linux/bpf_crypto.h24
-rw-r--r--include/linux/bpf_verifier.h11
-rw-r--r--include/linux/bsg-lib.h3
-rw-r--r--include/linux/btf_ids.h2
-rw-r--r--include/linux/bus/stm32_firewall_device.h142
-rw-r--r--include/linux/ceph/ceph_debug.h18
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/clk.h5
-rw-r--r--include/linux/cmpxchg-emu.h15
-rw-r--r--include/linux/compiler_types.h29
-rw-r--r--include/linux/coredump.h2
-rw-r--r--include/linux/cpu.h11
-rw-r--r--include/linux/cpufreq.h10
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpumask.h42
-rw-r--r--include/linux/cpuset.h3
-rw-r--r--include/linux/crash_core.h15
-rw-r--r--include/linux/cxl-event.h39
-rw-r--r--include/linux/dev_printk.h25
-rw-r--r--include/linux/devcoredump.h5
-rw-r--r--include/linux/devm-helpers.h4
-rw-r--r--include/linux/dma-buf.h2
-rw-r--r--include/linux/dma-direct.h18
-rw-r--r--include/linux/dma-fence.h7
-rw-r--r--include/linux/dma-map-ops.h6
-rw-r--r--include/linux/dmar.h2
-rw-r--r--include/linux/dynamic_debug.h4
-rw-r--r--include/linux/dynamic_queue_limits.h50
-rw-r--r--include/linux/efi.h9
-rw-r--r--include/linux/elf.h2
-rw-r--r--include/linux/energy_model.h5
-rw-r--r--include/linux/etherdevice.h37
-rw-r--r--include/linux/ethtool.h27
-rw-r--r--include/linux/evm.h8
-rw-r--r--include/linux/execmem.h132
-rw-r--r--include/linux/fb.h4
-rw-r--r--include/linux/file.h1
-rw-r--r--include/linux/filter.h52
-rw-r--r--include/linux/find.h27
-rw-r--r--include/linux/firewire.h3
-rw-r--r--include/linux/firmware/cirrus/cs_dsp.h4
-rw-r--r--include/linux/firmware/qcom/qcom_qseecom.h55
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h10
-rw-r--r--include/linux/fortify-string.h9
-rw-r--r--include/linux/fprobe.h18
-rw-r--r--include/linux/fs.h96
-rw-r--r--include/linux/fs_parser.h4
-rw-r--r--include/linux/fscache.h22
-rw-r--r--include/linux/ftrace.h3
-rw-r--r--include/linux/genetlink.h19
-rw-r--r--include/linux/genl_magic_struct.h2
-rw-r--r--include/linux/gpio.h21
-rw-r--r--include/linux/gpio/driver.h4
-rw-r--r--include/linux/gpio/property.h5
-rw-r--r--include/linux/hid.h6
-rw-r--r--include/linux/hid_bpf.h3
-rw-r--r--include/linux/hugetlb.h8
-rw-r--r--include/linux/ieee80211.h30
-rw-r--r--include/linux/integrity.h34
-rw-r--r--include/linux/intel_rapl.h32
-rw-r--r--include/linux/intel_tpmi.h12
-rw-r--r--include/linux/io.h8
-rw-r--r--include/linux/io_uring.h6
-rw-r--r--include/linux/io_uring/cmd.h24
-rw-r--r--include/linux/io_uring/net.h18
-rw-r--r--include/linux/io_uring_types.h19
-rw-r--r--include/linux/iommu.h16
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/irqchip/riscv-aplic.h145
-rw-r--r--include/linux/irqchip/riscv-imsic.h87
-rw-r--r--include/linux/irqdesc.h16
-rw-r--r--include/linux/jbd2.h4
-rw-r--r--include/linux/jump_label.h3
-rw-r--r--include/linux/kernel_stat.h8
-rw-r--r--include/linux/kexec.h11
-rw-r--r--include/linux/kprobes.h7
-rw-r--r--include/linux/kvm_host.h4
-rw-r--r--include/linux/kvm_types.h1
-rw-r--r--include/linux/libata.h16
-rw-r--r--include/linux/linkmode.h27
-rw-r--r--include/linux/livepatch.h6
-rw-r--r--include/linux/lsm_hook_defs.h3
-rw-r--r--include/linux/marvell_phy.h3
-rw-r--r--include/linux/math64.h8
-rw-r--r--include/linux/mfd/axp20x.h98
-rw-r--r--include/linux/mhi.h18
-rw-r--r--include/linux/mlx5/cq.h7
-rw-r--r--include/linux/mlx5/device.h8
-rw-r--r--include/linux/mlx5/driver.h10
-rw-r--r--include/linux/mlx5/mlx5_ifc.h63
-rw-r--r--include/linux/mmc/host.h4
-rw-r--r--include/linux/mmc/sdio_func.h5
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mmc/slot-gpio.h6
-rw-r--r--include/linux/mmu_notifier.h44
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/module.h9
-rw-r--r--include/linux/moduleloader.h15
-rw-r--r--include/linux/namei.h1
-rw-r--r--include/linux/net.h4
-rw-r--r--include/linux/net/intel/libie/rx.h50
-rw-r--r--include/linux/netdevice.h55
-rw-r--r--include/linux/netfs.h198
-rw-r--r--include/linux/nfs4.h6
-rw-r--r--include/linux/numa.h7
-rw-r--r--include/linux/objpool.h105
-rw-r--r--include/linux/of_reserved_mem.h1
-rw-r--r--include/linux/oid_registry.h1
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--include/linux/papr_scm.h49
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/peci.h1
-rw-r--r--include/linux/perf/riscv_pmu.h8
-rw-r--r--include/linux/perf_event.h37
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/phy/phy-dp.h3
-rw-r--r--include/linux/phylink.h42
-rw-r--r--include/linux/platform_data/davinci_asp.h15
-rw-r--r--include/linux/platform_data/spi-omap2-mcspi.h3
-rw-r--r--include/linux/platform_data/ti-sysc.h1
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h6
-rw-r--r--include/linux/platform_profile.h1
-rw-r--r--include/linux/pm_opp.h8
-rw-r--r--include/linux/pm_wakeup.h12
-rw-r--r--include/linux/power/bq27xxx_battery.h8
-rw-r--r--include/linux/printk.h4
-rw-r--r--include/linux/profile.h5
-rw-r--r--include/linux/pse-pd/pse.h83
-rw-r--r--include/linux/pwm.h36
-rw-r--r--include/linux/pxa2xx_ssp.h2
-rw-r--r--include/linux/qat/qat_mig_dev.h31
-rw-r--r--include/linux/rcupdate.h22
-rw-r--r--include/linux/rcupdate_wait.h18
-rw-r--r--include/linux/regmap.h70
-rw-r--r--include/linux/regulator/consumer.h9
-rw-r--r--include/linux/regulator/pca9450.h1
-rw-r--r--include/linux/rhashtable.h10
-rw-r--r--include/linux/ring_buffer.h6
-rw-r--r--include/linux/rtnetlink.h3
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/sched/idle.h2
-rw-r--r--include/linux/sched/topology.h10
-rw-r--r--include/linux/scmi_protocol.h86
-rw-r--r--include/linux/security.h4
-rw-r--r--include/linux/seq_file.h13
-rw-r--r--include/linux/sfp.h4
-rw-r--r--include/linux/shm.h5
-rw-r--r--include/linux/skbuff.h151
-rw-r--r--include/linux/skbuff_ref.h75
-rw-r--r--include/linux/skmsg.h6
-rw-r--r--include/linux/slab.h21
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h139
-rw-r--r--include/linux/socket.h3
-rw-r--r--include/linux/spi/pxa2xx_spi.h56
-rw-r--r--include/linux/spi/rspi.h18
-rw-r--r--include/linux/spi/spi.h12
-rw-r--r--include/linux/spi/xilinx_spi.h14
-rw-r--r--include/linux/srcutiny.h2
-rw-r--r--include/linux/ssb/ssb.h8
-rw-r--r--include/linux/stat.h1
-rw-r--r--include/linux/stmmac.h18
-rw-r--r--include/linux/string.h62
-rw-r--r--include/linux/sunrpc/svc_rdma.h13
-rw-r--r--include/linux/sunrpc/svc_xprt.h5
-rw-r--r--include/linux/sysctl.h27
-rw-r--r--include/linux/tcp.h6
-rw-r--r--include/linux/tee_core.h306
-rw-r--r--include/linux/tee_drv.h285
-rw-r--r--include/linux/thermal.h109
-rw-r--r--include/linux/threads.h4
-rw-r--r--include/linux/timerqueue.h5
-rw-r--r--include/linux/tpm.h316
-rw-r--r--include/linux/trace_events.h36
-rw-r--r--include/linux/trace_recursion.h2
-rw-r--r--include/linux/tracefs.h3
-rw-r--r--include/linux/tty.h14
-rw-r--r--include/linux/udp.h4
-rw-r--r--include/linux/uio.h10
-rw-r--r--include/linux/virtio.h35
-rw-r--r--include/linux/vtime.h5
-rw-r--r--include/linux/workqueue.h54
-rw-r--r--include/linux/xarray.h2
197 files changed, 3615 insertions, 1733 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 34829f2c517a..28c3fb2bef0d 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -242,9 +242,6 @@ static inline bool acpi_gicc_is_usable(struct acpi_madt_generic_interrupt *gicc)
return gicc->flags & ACPI_MADT_ENABLED;
}
-/* the following numa functions are architecture-dependent */
-void acpi_numa_slit_init (struct acpi_table_slit *slit);
-
#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
#else
@@ -267,8 +264,6 @@ static inline void
acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { }
#endif
-int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
-
#ifndef PHYS_CPUID_INVALID
typedef u32 phys_cpuid_t;
#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1)
@@ -421,7 +416,7 @@ extern char *wmi_get_acpi_device_uid(const char *guid);
extern char acpi_video_backlight_string[];
extern long acpi_is_video_device(acpi_handle handle);
-extern int acpi_blacklisted(void);
+
extern void acpi_osi_setup(char *str);
extern bool acpi_osi_is_win8(void);
@@ -573,9 +568,13 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_CPCV2_SUPPORT 0x00000040
#define OSC_SB_PCLPI_SUPPORT 0x00000080
#define OSC_SB_OSLPI_SUPPORT 0x00000100
+#define OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT 0x00000200
+#define OSC_SB_OVER_16_PSTATES_SUPPORT 0x00000400
+#define OSC_SB_GED_SUPPORT 0x00000800
#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
-#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000
+#define OSC_SB_IRQ_RESOURCE_SOURCE_SUPPORT 0x00002000
#define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000
+#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00020000
#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
#define OSC_SB_PRM_SUPPORT 0x00200000
#define OSC_SB_FFH_OPR_SUPPORT 0x00400000
@@ -1233,7 +1232,7 @@ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
struct acpi_resource_gpio **agpio);
bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
struct acpi_resource_gpio **agpio);
-int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, int index,
+int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index,
bool *wake_capable);
#else
static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
@@ -1246,7 +1245,7 @@ static inline bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
{
return false;
}
-static inline int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name,
+static inline int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id,
int index, bool *wake_capable)
{
return -ENXIO;
@@ -1259,10 +1258,10 @@ static inline int acpi_dev_gpio_irq_wake_get(struct acpi_device *adev, int index
return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, wake_capable);
}
-static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name,
+static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *con_id,
int index)
{
- return acpi_dev_gpio_irq_wake_get_by(adev, name, index, NULL);
+ return acpi_dev_gpio_irq_wake_get_by(adev, con_id, index, NULL);
}
static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 1cb65592c95d..d4ed5622cf2b 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -39,7 +39,7 @@ void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
struct list_head *head);
/* IOMMU interface */
-int iort_dma_get_ranges(struct device *dev, u64 *size);
+int iort_dma_get_ranges(struct device *dev, u64 *limit);
int iort_iommu_configure_id(struct device *dev, const u32 *id_in);
void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head);
phys_addr_t acpi_iort_dma_get_max_cpu_address(void);
@@ -55,7 +55,7 @@ void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *hea
static inline
void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
/* IOMMU interface */
-static inline int iort_dma_get_ranges(struct device *dev, u64 *size)
+static inline int iort_dma_get_ranges(struct device *dev, u64 *limit)
{ return -ENODEV; }
static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
{ return -ENODEV; }
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index c60a6a14638c..958a55bcc708 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -112,11 +112,18 @@ extern struct bus_type amba_bustype;
#define amba_get_drvdata(d) dev_get_drvdata(&d->dev)
#define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p)
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE
+ */
+#define amba_driver_register(drv) \
+ __amba_driver_register(drv, THIS_MODULE)
+
#ifdef CONFIG_ARM_AMBA
-int amba_driver_register(struct amba_driver *);
+int __amba_driver_register(struct amba_driver *, struct module *);
void amba_driver_unregister(struct amba_driver *);
#else
-static inline int amba_driver_register(struct amba_driver *drv)
+static inline int __amba_driver_register(struct amba_driver *drv,
+ struct module *owner)
{
return -EINVAL;
}
diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
index d21838835abd..d58fc022ec46 100644
--- a/include/linux/amd-pstate.h
+++ b/include/linux/amd-pstate.h
@@ -49,13 +49,17 @@ struct amd_aperf_mperf {
* @lowest_perf: the absolute lowest performance level of the processor
* @prefcore_ranking: the preferred core ranking, the higher value indicates a higher
* priority.
- * @max_freq: the frequency that mapped to highest_perf
- * @min_freq: the frequency that mapped to lowest_perf
- * @nominal_freq: the frequency that mapped to nominal_perf
- * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf
+ * @min_limit_perf: Cached value of the performance corresponding to policy->min
+ * @max_limit_perf: Cached value of the performance corresponding to policy->max
+ * @min_limit_freq: Cached value of policy->min (in khz)
+ * @max_limit_freq: Cached value of policy->max (in khz)
+ * @max_freq: the frequency (in khz) that mapped to highest_perf
+ * @min_freq: the frequency (in khz) that mapped to lowest_perf
+ * @nominal_freq: the frequency (in khz) that mapped to nominal_perf
+ * @lowest_nonlinear_freq: the frequency (in khz) that mapped to lowest_nonlinear_perf
* @cur: Difference of Aperf/Mperf/tsc count between last and current sample
* @prev: Last Aperf/Mperf/tsc count value read from register
- * @freq: current cpu frequency value
+ * @freq: current cpu frequency value (in khz)
* @boost_supported: check whether the Processor or SBIOS supports boost mode
* @hw_prefcore: check whether HW supports preferred core featue.
* Only when hw_prefcore and early prefcore param are true,
@@ -124,4 +128,10 @@ static const char * const amd_pstate_mode_string[] = {
[AMD_PSTATE_GUIDED] = "guided",
NULL,
};
+
+struct quirk_entry {
+ u32 nominal_freq;
+ u32 lowest_freq;
+};
+
#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h
index 93a5f16d03f3..edef565c2a1a 100644
--- a/include/linux/anon_inodes.h
+++ b/include/linux/anon_inodes.h
@@ -9,12 +9,17 @@
#ifndef _LINUX_ANON_INODES_H
#define _LINUX_ANON_INODES_H
+#include <linux/types.h>
+
struct file_operations;
struct inode;
struct file *anon_inode_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags);
+struct file *anon_inode_getfile_fmode(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags, fmode_t f_mode);
struct file *anon_inode_create_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags,
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index a63d61ca55af..b721f360d759 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -60,14 +60,14 @@ void topology_scale_freq_tick(void);
void topology_set_scale_freq_source(struct scale_freq_data *data, const struct cpumask *cpus);
void topology_clear_scale_freq_source(enum scale_freq_source source, const struct cpumask *cpus);
-DECLARE_PER_CPU(unsigned long, thermal_pressure);
+DECLARE_PER_CPU(unsigned long, hw_pressure);
-static inline unsigned long topology_get_thermal_pressure(int cpu)
+static inline unsigned long topology_get_hw_pressure(int cpu)
{
- return per_cpu(thermal_pressure, cpu);
+ return per_cpu(hw_pressure, cpu);
}
-void topology_update_thermal_pressure(const struct cpumask *cpus,
+void topology_update_hw_pressure(const struct cpumask *cpus,
unsigned long capped_freq);
struct cpu_topology {
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index c906f666ff5d..c82d56768101 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -126,6 +126,7 @@
/* FFA Bus/Device/Driver related */
struct ffa_device {
u32 id;
+ u32 properties;
int vm_id;
bool mode_32bit;
uuid_t uuid;
@@ -221,12 +222,29 @@ struct ffa_partition_info {
#define FFA_PARTITION_DIRECT_SEND BIT(1)
/* partition can send and receive indirect messages. */
#define FFA_PARTITION_INDIRECT_MSG BIT(2)
+/* partition can receive notifications */
+#define FFA_PARTITION_NOTIFICATION_RECV BIT(3)
/* partition runs in the AArch64 execution state. */
#define FFA_PARTITION_AARCH64_EXEC BIT(8)
u32 properties;
u32 uuid[4];
};
+static inline
+bool ffa_partition_check_property(struct ffa_device *dev, u32 property)
+{
+ return dev->properties & property;
+}
+
+#define ffa_partition_supports_notify_recv(dev) \
+ ffa_partition_check_property(dev, FFA_PARTITION_NOTIFICATION_RECV)
+
+#define ffa_partition_supports_indirect_msg(dev) \
+ ffa_partition_check_property(dev, FFA_PARTITION_INDIRECT_MSG)
+
+#define ffa_partition_supports_direct_recv(dev) \
+ ffa_partition_check_property(dev, FFA_PARTITION_DIRECT_RECV)
+
/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */
struct ffa_send_direct_data {
unsigned long data0; /* w3/x3 */
@@ -236,6 +254,14 @@ struct ffa_send_direct_data {
unsigned long data4; /* w7/x7 */
};
+struct ffa_indirect_msg_hdr {
+ u32 flags;
+ u32 res0;
+ u32 offset;
+ u32 send_recv_id;
+ u32 size;
+};
+
struct ffa_mem_region_addr_range {
/* The base IPA of the constituent memory region, aligned to 4 kiB */
u64 address;
@@ -396,6 +422,7 @@ struct ffa_msg_ops {
void (*mode_32bit_set)(struct ffa_device *dev);
int (*sync_send_receive)(struct ffa_device *dev,
struct ffa_send_direct_data *data);
+ int (*indirect_send)(struct ffa_device *dev, void *buf, size_t sz);
};
struct ffa_mem_ops {
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 875d792bffff..d5379548d684 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -615,6 +615,13 @@ static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
bl->tail = bl2->tail;
}
+static inline void bio_list_merge_init(struct bio_list *bl,
+ struct bio_list *bl2)
+{
+ bio_list_merge(bl, bl2);
+ bio_list_init(bl2);
+}
+
static inline void bio_list_merge_head(struct bio_list *bl,
struct bio_list *bl2)
{
@@ -824,5 +831,9 @@ static inline void bio_clear_polled(struct bio *bio)
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
+struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new);
+
+struct bio *blk_alloc_discard_bio(struct block_device *bdev,
+ sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask);
#endif /* __LINUX_BIO_H */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index aa4096126553..8c4768c44a01 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -83,6 +83,10 @@ struct device;
* bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst
* bitmap_get_value8(map, start) Get 8bit value from map at start
* bitmap_set_value8(map, value, start) Set 8bit value to map at start
+ * bitmap_read(map, start, nbits) Read an nbits-sized value from
+ * map at start
+ * bitmap_write(map, value, start, nbits) Write an nbits-sized value to
+ * map at start
*
* Note, bitmap_zero() and bitmap_fill() operate over the region of
* unsigned longs, that is, bits behind bitmap till the unsigned long
@@ -222,9 +226,11 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig,
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
+#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE)
+
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ unsigned int len = bitmap_size(nbits);
if (small_const_nbits(nbits))
*dst = 0;
@@ -234,7 +240,7 @@ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ unsigned int len = bitmap_size(nbits);
if (small_const_nbits(nbits))
*dst = ~0UL;
@@ -245,7 +251,7 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
unsigned int nbits)
{
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ unsigned int len = bitmap_size(nbits);
if (small_const_nbits(nbits))
*dst = *src;
@@ -722,38 +728,83 @@ static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
}
/**
- * bitmap_get_value8 - get an 8-bit value within a memory region
+ * bitmap_read - read a value of n-bits from the memory region
* @map: address to the bitmap memory region
- * @start: bit offset of the 8-bit value; must be a multiple of 8
+ * @start: bit offset of the n-bit value
+ * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG
*
- * Returns the 8-bit value located at the @start bit offset within the @src
- * memory region.
+ * Returns: value of @nbits bits located at the @start bit offset within the
+ * @map memory region. For @nbits = 0 and @nbits > BITS_PER_LONG the return
+ * value is undefined.
*/
-static inline unsigned long bitmap_get_value8(const unsigned long *map,
- unsigned long start)
+static inline unsigned long bitmap_read(const unsigned long *map,
+ unsigned long start,
+ unsigned long nbits)
{
- const size_t index = BIT_WORD(start);
- const unsigned long offset = start % BITS_PER_LONG;
+ size_t index = BIT_WORD(start);
+ unsigned long offset = start % BITS_PER_LONG;
+ unsigned long space = BITS_PER_LONG - offset;
+ unsigned long value_low, value_high;
+
+ if (unlikely(!nbits || nbits > BITS_PER_LONG))
+ return 0;
+
+ if (space >= nbits)
+ return (map[index] >> offset) & BITMAP_LAST_WORD_MASK(nbits);
- return (map[index] >> offset) & 0xFF;
+ value_low = map[index] & BITMAP_FIRST_WORD_MASK(start);
+ value_high = map[index + 1] & BITMAP_LAST_WORD_MASK(start + nbits);
+ return (value_low >> offset) | (value_high << space);
}
/**
- * bitmap_set_value8 - set an 8-bit value within a memory region
+ * bitmap_write - write n-bit value within a memory region
* @map: address to the bitmap memory region
- * @value: the 8-bit value; values wider than 8 bits may clobber bitmap
- * @start: bit offset of the 8-bit value; must be a multiple of 8
+ * @value: value to write, clamped to nbits
+ * @start: bit offset of the n-bit value
+ * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG.
+ *
+ * bitmap_write() behaves as-if implemented as @nbits calls of __assign_bit(),
+ * i.e. bits beyond @nbits are ignored:
+ *
+ * for (bit = 0; bit < nbits; bit++)
+ * __assign_bit(start + bit, bitmap, val & BIT(bit));
+ *
+ * For @nbits == 0 and @nbits > BITS_PER_LONG no writes are performed.
*/
-static inline void bitmap_set_value8(unsigned long *map, unsigned long value,
- unsigned long start)
-{
- const size_t index = BIT_WORD(start);
- const unsigned long offset = start % BITS_PER_LONG;
-
- map[index] &= ~(0xFFUL << offset);
+static inline void bitmap_write(unsigned long *map, unsigned long value,
+ unsigned long start, unsigned long nbits)
+{
+ size_t index;
+ unsigned long offset;
+ unsigned long space;
+ unsigned long mask;
+ bool fit;
+
+ if (unlikely(!nbits || nbits > BITS_PER_LONG))
+ return;
+
+ mask = BITMAP_LAST_WORD_MASK(nbits);
+ value &= mask;
+ offset = start % BITS_PER_LONG;
+ space = BITS_PER_LONG - offset;
+ fit = space >= nbits;
+ index = BIT_WORD(start);
+
+ map[index] &= (fit ? (~(mask << offset)) : ~BITMAP_FIRST_WORD_MASK(start));
map[index] |= value << offset;
+ if (fit)
+ return;
+
+ map[index + 1] &= BITMAP_FIRST_WORD_MASK(start + nbits);
+ map[index + 1] |= (value >> space);
}
+#define bitmap_get_value8(map, start) \
+ bitmap_read(map, start, BITS_PER_BYTE)
+#define bitmap_set_value8(map, value, start) \
+ bitmap_write(map, value, start, BITS_PER_BYTE)
+
#endif /* __ASSEMBLY__ */
#endif /* __LINUX_BITMAP_H */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 2ba557e067fe..b25dc8742124 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -21,6 +21,8 @@
#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
+#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_BYTE)
+
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w);
@@ -80,6 +82,7 @@ __check_bitop_pr(__test_and_set_bit);
__check_bitop_pr(__test_and_clear_bit);
__check_bitop_pr(__test_and_change_bit);
__check_bitop_pr(test_bit);
+__check_bitop_pr(test_bit_acquire);
#undef __check_bitop_pr
@@ -272,23 +275,11 @@ static inline unsigned long fns(unsigned long word, unsigned int n)
* @addr: the address to start counting from
* @value: the value to assign
*/
-static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
- bool value)
-{
- if (value)
- set_bit(nr, addr);
- else
- clear_bit(nr, addr);
-}
+#define assign_bit(nr, addr, value) \
+ ((value) ? set_bit((nr), (addr)) : clear_bit((nr), (addr)))
-static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
- bool value)
-{
- if (value)
- __set_bit(nr, addr);
- else
- __clear_bit(nr, addr);
-}
+#define __assign_bit(nr, addr, value) \
+ ((value) ? __set_bit((nr), (addr)) : __clear_bit((nr), (addr)))
/**
* __ptr_set_bit - Set bit in a pointer's value
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index d3d8fd8e229b..89ba6b16fe8b 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -54,8 +54,8 @@ typedef __u32 __bitwise req_flags_t;
/* Look at ->special_vec for the actual data payload instead of the
bio chain. */
#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
-/* The per-zone write lock is held for this request */
-#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
+/* The request completion needs to be signaled to zone write pluging. */
+#define RQF_ZONE_WRITE_PLUGGING ((__force req_flags_t)(1 << 20))
/* ->timeout has been called, don't expire again */
#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
#define RQF_RESV ((__force req_flags_t)(1 << 23))
@@ -1150,85 +1150,4 @@ static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
}
void blk_dump_rq_flags(struct request *, char *);
-#ifdef CONFIG_BLK_DEV_ZONED
-static inline unsigned int blk_rq_zone_no(struct request *rq)
-{
- return disk_zone_no(rq->q->disk, blk_rq_pos(rq));
-}
-
-static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
-{
- return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
-}
-
-/**
- * blk_rq_is_seq_zoned_write() - Check if @rq requires write serialization.
- * @rq: Request to examine.
- *
- * Note: REQ_OP_ZONE_APPEND requests do not require serialization.
- */
-static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
-{
- return op_needs_zoned_write_locking(req_op(rq)) &&
- blk_rq_zone_is_seq(rq);
-}
-
-bool blk_req_needs_zone_write_lock(struct request *rq);
-bool blk_req_zone_write_trylock(struct request *rq);
-void __blk_req_zone_write_lock(struct request *rq);
-void __blk_req_zone_write_unlock(struct request *rq);
-
-static inline void blk_req_zone_write_lock(struct request *rq)
-{
- if (blk_req_needs_zone_write_lock(rq))
- __blk_req_zone_write_lock(rq);
-}
-
-static inline void blk_req_zone_write_unlock(struct request *rq)
-{
- if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
- __blk_req_zone_write_unlock(rq);
-}
-
-static inline bool blk_req_zone_is_write_locked(struct request *rq)
-{
- return rq->q->disk->seq_zones_wlock &&
- test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock);
-}
-
-static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
-{
- if (!blk_req_needs_zone_write_lock(rq))
- return true;
- return !blk_req_zone_is_write_locked(rq);
-}
-#else /* CONFIG_BLK_DEV_ZONED */
-static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
-{
- return false;
-}
-
-static inline bool blk_req_needs_zone_write_lock(struct request *rq)
-{
- return false;
-}
-
-static inline void blk_req_zone_write_lock(struct request *rq)
-{
-}
-
-static inline void blk_req_zone_write_unlock(struct request *rq)
-{
-}
-static inline bool blk_req_zone_is_write_locked(struct request *rq)
-{
- return false;
-}
-
-static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
-{
- return true;
-}
-#endif /* CONFIG_BLK_DEV_ZONED */
-
#endif /* BLK_MQ_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index cb1526ec44b5..25dbf1097085 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -88,15 +88,9 @@ struct block_device {
/*
* Block error status values. See block/blk-core:blk_errors for the details.
- * Alpha cannot write a byte atomically, so we need to use 32-bit value.
*/
-#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
-typedef u32 __bitwise blk_status_t;
-typedef u32 blk_short_t;
-#else
typedef u8 __bitwise blk_status_t;
typedef u16 blk_short_t;
-#endif
#define BLK_STS_OK 0
#define BLK_STS_NOTSUPP ((__force blk_status_t)1)
#define BLK_STS_TIMEOUT ((__force blk_status_t)2)
@@ -137,25 +131,13 @@ typedef u16 blk_short_t;
#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
/*
- * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
- * related resources are unavailable, but the driver can guarantee the queue
- * will be rerun in the future once the resources become available again.
- *
- * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
- * a zone specific resource and IO to a different zone on the same device could
- * still be served. Examples of that are zones that are write-locked, but a read
- * to the same zone could be served.
- */
-#define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14)
-
-/*
* BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
* path if the device returns a status indicating that too many zone resources
* are currently open. The same command should be successful if resubmitted
* after the number of open zones decreases below the device's limits, which is
* reported in the request_queue's max_open_zones.
*/
-#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15)
+#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)14)
/*
* BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
@@ -164,20 +146,20 @@ typedef u16 blk_short_t;
* after the number of active zones decreases below the device's limits, which
* is reported in the request_queue's max_active_zones.
*/
-#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16)
+#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)15)
/*
* BLK_STS_OFFLINE is returned from the driver when the target device is offline
* or is being taken offline. This could help differentiate the case where a
* device is intentionally being shut down from a real I/O error.
*/
-#define BLK_STS_OFFLINE ((__force blk_status_t)17)
+#define BLK_STS_OFFLINE ((__force blk_status_t)16)
/*
* BLK_STS_DURATION_LIMIT is returned from the driver when the target device
* aborted the command because it exceeded one of its Command Duration Limits.
*/
-#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)18)
+#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)17)
/**
* blk_path_error - returns true if error may be path related
@@ -234,7 +216,12 @@ struct bio {
struct bvec_iter bi_iter;
- blk_qc_t bi_cookie;
+ union {
+ /* for polled bios: */
+ blk_qc_t bi_cookie;
+ /* for plugged zoned writes only: */
+ unsigned int __bi_nr_segments;
+ };
bio_end_io_t *bi_end_io;
void *bi_private;
#ifdef CONFIG_BLK_CGROUP
@@ -304,7 +291,8 @@ enum {
BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
BIO_QOS_MERGED, /* but went through rq_qos merge path */
BIO_REMAPPED,
- BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
+ BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */
+ BIO_EMULATES_ZONE_APPEND, /* bio emulates a zone append operation */
BIO_FLAG_LAST
};
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 172c91879999..fd5951dd6b7d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -128,6 +128,8 @@ typedef unsigned int __bitwise blk_mode_t;
#define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4))
/* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */
#define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5))
+/* return partition scanning errors */
+#define BLK_OPEN_STRICT_SCAN ((__force blk_mode_t)(1 << 6))
struct gendisk {
/*
@@ -177,22 +179,21 @@ struct gendisk {
#ifdef CONFIG_BLK_DEV_ZONED
/*
- * Zoned block device information for request dispatch control.
- * nr_zones is the total number of zones of the device. This is always
- * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
- * bits which indicates if a zone is conventional (bit set) or
- * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
- * bits which indicates if a zone is write locked, that is, if a write
- * request targeting the zone was dispatched.
- *
- * Reads of this information must be protected with blk_queue_enter() /
- * blk_queue_exit(). Modifying this information is only allowed while
- * no requests are being processed. See also blk_mq_freeze_queue() and
- * blk_mq_unfreeze_queue().
+ * Zoned block device information. Reads of this information must be
+ * protected with blk_queue_enter() / blk_queue_exit(). Modifying this
+ * information is only allowed while no requests are being processed.
+ * See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue().
*/
unsigned int nr_zones;
+ unsigned int zone_capacity;
unsigned long *conv_zones_bitmap;
- unsigned long *seq_zones_wlock;
+ unsigned int zone_wplugs_hash_bits;
+ spinlock_t zone_wplugs_lock;
+ struct mempool_s *zone_wplugs_pool;
+ struct hlist_head *zone_wplugs_hash;
+ struct list_head zone_wplugs_err_list;
+ struct work_struct zone_wplugs_work;
+ struct workqueue_struct *zone_wplugs_wq;
#endif /* CONFIG_BLK_DEV_ZONED */
#if IS_ENABLED(CONFIG_CDROM)
@@ -231,6 +232,19 @@ static inline unsigned int disk_openers(struct gendisk *disk)
return atomic_read(&disk->part0->bd_openers);
}
+/**
+ * disk_has_partscan - return %true if partition scanning is enabled on a disk
+ * @disk: disk to check
+ *
+ * Returns %true if partitions scanning is enabled for @disk, or %false if
+ * partition scanning is disabled either permanently or temporarily.
+ */
+static inline bool disk_has_partscan(struct gendisk *disk)
+{
+ return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) &&
+ !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
+}
+
/*
* The gendisk is refcounted by the part0 block_device, and the bd_device
* therein is also used for device model presentation in sysfs.
@@ -329,8 +343,7 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
sector_t sectors, sector_t nr_sectors);
-int blk_revalidate_disk_zones(struct gendisk *disk,
- void (*update_driver_data)(struct gendisk *disk));
+int blk_revalidate_disk_zones(struct gendisk *disk);
/*
* Independent access ranges: struct blk_independent_access_range describes
@@ -447,8 +460,6 @@ struct request_queue {
atomic_t nr_active_requests_shared_tags;
- unsigned int required_elevator_features;
-
struct blk_mq_tags *sched_shared_tags;
struct list_head icq_list;
@@ -631,15 +642,6 @@ static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
return sector >> ilog2(disk->queue->limits.chunk_sectors);
}
-static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
-{
- if (!blk_queue_is_zoned(disk->queue))
- return false;
- if (!disk->conv_zones_bitmap)
- return true;
- return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
-}
-
static inline void disk_set_max_open_zones(struct gendisk *disk,
unsigned int max_open_zones)
{
@@ -662,6 +664,7 @@ static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
return bdev->bd_disk->queue->limits.max_active_zones;
}
+bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
#else /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int bdev_nr_zones(struct block_device *bdev)
{
@@ -672,10 +675,6 @@ static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
return 0;
}
-static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
-{
- return false;
-}
static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
{
return 0;
@@ -689,6 +688,10 @@ static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
{
return 0;
}
+static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+{
+ return false;
+}
#endif /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int blk_queue_depth(struct request_queue *q)
@@ -853,9 +856,11 @@ static inline unsigned int bio_zone_no(struct bio *bio)
return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
}
-static inline unsigned int bio_zone_is_seq(struct bio *bio)
+static inline bool bio_straddles_zones(struct bio *bio)
{
- return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
+ return bio_sectors(bio) &&
+ bio_zone_no(bio) !=
+ disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1);
}
/*
@@ -892,18 +897,25 @@ int queue_limits_commit_update(struct request_queue *q,
struct queue_limits *lim);
int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
+/**
+ * queue_limits_cancel_update - cancel an atomic update of queue limits
+ * @q: queue to update
+ *
+ * This functions cancels an atomic update of the queue limits started by
+ * queue_limits_start_update() and should be used when an error occurs after
+ * starting update.
+ */
+static inline void queue_limits_cancel_update(struct request_queue *q)
+{
+ mutex_unlock(&q->limits_lock);
+}
+
/*
* Access functions for manipulating queue properties
*/
-void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
-extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
-extern void blk_queue_max_segments(struct request_queue *, unsigned short);
-extern void blk_queue_max_discard_segments(struct request_queue *,
- unsigned short);
void blk_queue_max_secure_erase_sectors(struct request_queue *q,
unsigned int max_sectors);
-extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
@@ -920,7 +932,6 @@ void disk_update_readahead(struct gendisk *disk);
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
-extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
@@ -928,10 +939,6 @@ extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
sector_t offset, const char *pfx);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
-extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
-extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
-extern void blk_queue_dma_alignment(struct request_queue *, int);
-extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
@@ -940,17 +947,6 @@ disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
void disk_set_independent_access_ranges(struct gendisk *disk,
struct blk_independent_access_ranges *iars);
-/*
- * Elevator features for blk_queue_required_elevator_features:
- */
-/* Supports zoned block devices sequential write constraint */
-#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
-
-extern void blk_queue_required_elevator_features(struct request_queue *q,
- unsigned int features);
-extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
- struct device *dev);
-
bool __must_check blk_get_queue(struct request_queue *);
extern void blk_put_queue(struct request_queue *);
@@ -1154,12 +1150,29 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
return q->limits.max_segment_size;
}
-static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
+static inline unsigned int queue_limits_max_zone_append_sectors(struct queue_limits *l)
+{
+ unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors);
+
+ return min_not_zero(l->max_zone_append_sectors, max_sectors);
+}
+
+static inline unsigned int queue_max_zone_append_sectors(struct request_queue *q)
{
+ if (!blk_queue_is_zoned(q))
+ return 0;
+
+ return queue_limits_max_zone_append_sectors(&q->limits);
+}
- const struct queue_limits *l = &q->limits;
+static inline bool queue_emulates_zone_append(struct request_queue *q)
+{
+ return blk_queue_is_zoned(q) && !q->limits.max_zone_append_sectors;
+}
- return min(l->max_zone_append_sectors, l->max_sectors);
+static inline bool bdev_emulates_zone_append(struct block_device *bdev)
+{
+ return queue_emulates_zone_append(bdev_get_queue(bdev));
}
static inline unsigned int
@@ -1301,18 +1314,6 @@ static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
return disk_zone_no(bdev->bd_disk, sec);
}
-/* Whether write serialization is required for @op on zoned devices. */
-static inline bool op_needs_zoned_write_locking(enum req_op op)
-{
- return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES;
-}
-
-static inline bool bdev_op_is_zoned_write(struct block_device *bdev,
- enum req_op op)
-{
- return bdev_is_zoned(bdev) && op_needs_zoned_write_locking(op);
-}
-
static inline sector_t bdev_zone_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
@@ -1328,6 +1329,12 @@ static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev,
return sector & (bdev_zone_sectors(bdev) - 1);
}
+static inline sector_t bio_offset_from_zone_start(struct bio *bio)
+{
+ return bdev_offset_from_zone_start(bio->bi_bdev,
+ bio->bi_iter.bi_sector);
+}
+
static inline bool bdev_is_zone_start(struct block_device *bdev,
sector_t sector)
{
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index e5ee2c694401..3f4b4ac527ca 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -288,7 +288,12 @@ int __init xbc_init(const char *buf, size_t size, const char **emsg, int *epos);
int __init xbc_get_info(int *node_size, size_t *data_size);
/* XBC cleanup data structures */
-void __init xbc_exit(void);
+void __init _xbc_exit(bool early);
+
+static inline void xbc_exit(void)
+{
+ _xbc_exit(false);
+}
/* XBC embedded bootconfig data in kernel */
#ifdef CONFIG_BOOT_CONFIG_EMBED
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index a86da1f38b3c..5e694a308081 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -184,8 +184,8 @@ struct bpf_map_ops {
};
enum {
- /* Support at most 10 fields in a BTF type */
- BTF_FIELDS_MAX = 10,
+ /* Support at most 11 fields in a BTF type */
+ BTF_FIELDS_MAX = 11,
};
enum btf_field_type {
@@ -202,6 +202,7 @@ enum btf_field_type {
BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
BPF_REFCOUNT = (1 << 9),
+ BPF_WORKQUEUE = (1 << 10),
};
typedef void (*btf_dtor_kfunc_t)(void *);
@@ -238,6 +239,7 @@ struct btf_record {
u32 field_mask;
int spin_lock_off;
int timer_off;
+ int wq_off;
int refcount_off;
struct btf_field fields[];
};
@@ -312,6 +314,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
return "bpf_spin_lock";
case BPF_TIMER:
return "bpf_timer";
+ case BPF_WORKQUEUE:
+ return "bpf_wq";
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
return "kptr";
@@ -340,6 +344,8 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
return sizeof(struct bpf_spin_lock);
case BPF_TIMER:
return sizeof(struct bpf_timer);
+ case BPF_WORKQUEUE:
+ return sizeof(struct bpf_wq);
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
@@ -367,6 +373,8 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
return __alignof__(struct bpf_spin_lock);
case BPF_TIMER:
return __alignof__(struct bpf_timer);
+ case BPF_WORKQUEUE:
+ return __alignof__(struct bpf_wq);
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
@@ -406,6 +414,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
/* RB_ROOT_CACHED 0-inits, no need to do anything after memset */
case BPF_SPIN_LOCK:
case BPF_TIMER:
+ case BPF_WORKQUEUE:
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
@@ -525,6 +534,7 @@ static inline void zero_map_value(struct bpf_map *map, void *dst)
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
bool lock_src);
void bpf_timer_cancel_and_free(void *timer);
+void bpf_wq_cancel_and_free(void *timer);
void bpf_list_head_free(const struct btf_field *field, void *list_head,
struct bpf_spin_lock *spin_lock);
void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
@@ -1116,8 +1126,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
void *func_addr);
void *arch_alloc_bpf_trampoline(unsigned int size);
void arch_free_bpf_trampoline(void *image, unsigned int size);
-void arch_protect_bpf_trampoline(void *image, unsigned int size);
-void arch_unprotect_bpf_trampoline(void *image, unsigned int size);
+int __must_check arch_protect_bpf_trampoline(void *image, unsigned int size);
int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
struct bpf_tramp_links *tlinks, void *func_addr);
@@ -1266,6 +1275,7 @@ int bpf_dynptr_check_size(u32 size);
u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len);
void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
+bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
#ifdef CONFIG_BPF_JIT
int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
@@ -1622,6 +1632,12 @@ struct bpf_tracing_link {
struct bpf_prog *tgt_prog;
};
+struct bpf_raw_tp_link {
+ struct bpf_link link;
+ struct bpf_raw_event_map *btp;
+ u64 cookie;
+};
+
struct bpf_link_primer {
struct bpf_link *link;
struct file *file;
@@ -2204,6 +2220,7 @@ void bpf_map_free_record(struct bpf_map *map);
struct btf_record *btf_record_dup(const struct btf_record *rec);
bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
+void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj);
void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
@@ -2988,6 +3005,7 @@ int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
int sock_map_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr);
+int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog);
void sock_map_unhash(struct sock *sk);
void sock_map_destroy(struct sock *sk);
@@ -3086,6 +3104,11 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
{
return -EINVAL;
}
+
+static inline int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_BPF_SYSCALL */
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
diff --git a/include/linux/bpf_crypto.h b/include/linux/bpf_crypto.h
new file mode 100644
index 000000000000..a41e71d4e2d9
--- /dev/null
+++ b/include/linux/bpf_crypto.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#ifndef _BPF_CRYPTO_H
+#define _BPF_CRYPTO_H
+
+struct bpf_crypto_type {
+ void *(*alloc_tfm)(const char *algo);
+ void (*free_tfm)(void *tfm);
+ int (*has_algo)(const char *algo);
+ int (*setkey)(void *tfm, const u8 *key, unsigned int keylen);
+ int (*setauthsize)(void *tfm, unsigned int authsize);
+ int (*encrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv);
+ int (*decrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv);
+ unsigned int (*ivsize)(void *tfm);
+ unsigned int (*statesize)(void *tfm);
+ u32 (*get_flags)(void *tfm);
+ struct module *owner;
+ char name[14];
+};
+
+int bpf_crypto_register_type(const struct bpf_crypto_type *type);
+int bpf_crypto_unregister_type(const struct bpf_crypto_type *type);
+
+#endif /* _BPF_CRYPTO_H */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 7cb1b75eee38..50aa87f8d77f 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -421,11 +421,13 @@ struct bpf_verifier_state {
struct bpf_active_lock active_lock;
bool speculative;
bool active_rcu_lock;
+ u32 active_preempt_lock;
/* If this state was ever pointed-to by other state's loop_entry field
* this flag would be set to true. Used to avoid freeing such states
* while they are still in use.
*/
bool used_as_loop_entry;
+ bool in_sleepable;
/* first and last insn idx of this verifier state */
u32 first_insn_idx;
@@ -502,6 +504,13 @@ struct bpf_loop_inline_state {
u32 callback_subprogno; /* valid when fit_for_inline is true */
};
+/* pointer and state for maps */
+struct bpf_map_ptr_state {
+ struct bpf_map *map_ptr;
+ bool poison;
+ bool unpriv;
+};
+
/* Possible states for alu_state member. */
#define BPF_ALU_SANITIZE_SRC (1U << 0)
#define BPF_ALU_SANITIZE_DST (1U << 1)
@@ -514,7 +523,7 @@ struct bpf_loop_inline_state {
struct bpf_insn_aux_data {
union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
- unsigned long map_ptr_state; /* pointer/poison value for maps */
+ struct bpf_map_ptr_state map_ptr_state;
s32 call_imm; /* saved imm field of call insn */
u32 alu_limit; /* limit for add/sub register with pointer */
struct {
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 9e97ced2896d..14fa93268630 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -65,7 +65,8 @@ struct bsg_job {
void bsg_job_done(struct bsg_job *job, int result,
unsigned int reply_payload_rcv_len);
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
- bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size);
+ struct queue_limits *lim, bsg_job_fn *job_fn,
+ bsg_timeout_fn *timeout, int dd_job_size);
void bsg_remove_queue(struct request_queue *q);
void bsg_job_put(struct bsg_job *job);
int __must_check bsg_job_get(struct bsg_job *job);
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index e24aabfe8ecc..c0e3e1426a82 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -3,6 +3,8 @@
#ifndef _LINUX_BTF_IDS_H
#define _LINUX_BTF_IDS_H
+#include <linux/types.h> /* for u32 */
+
struct btf_id_set {
u32 cnt;
u32 ids[];
diff --git a/include/linux/bus/stm32_firewall_device.h b/include/linux/bus/stm32_firewall_device.h
new file mode 100644
index 000000000000..18e0a2fc3816
--- /dev/null
+++ b/include/linux/bus/stm32_firewall_device.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef STM32_FIREWALL_DEVICE_H
+#define STM32_FIREWALL_DEVICE_H
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define STM32_FIREWALL_MAX_EXTRA_ARGS 5
+
+/* Opaque reference to stm32_firewall_controller */
+struct stm32_firewall_controller;
+
+/**
+ * struct stm32_firewall - Information on a device's firewall. Each device can have more than one
+ * firewall.
+ *
+ * @firewall_ctrl: Pointer referencing a firewall controller of the device. It is
+ * opaque so a device cannot manipulate the controller's ops or access
+ * the controller's data
+ * @extra_args: Extra arguments that are implementation dependent
+ * @entry: Name of the firewall entry
+ * @extra_args_size: Number of extra arguments
+ * @firewall_id: Firewall ID associated the device for this firewall controller
+ */
+struct stm32_firewall {
+ struct stm32_firewall_controller *firewall_ctrl;
+ u32 extra_args[STM32_FIREWALL_MAX_EXTRA_ARGS];
+ const char *entry;
+ size_t extra_args_size;
+ u32 firewall_id;
+};
+
+#if IS_ENABLED(CONFIG_STM32_FIREWALL)
+/**
+ * stm32_firewall_get_firewall - Get the firewall(s) associated to given device.
+ * The firewall controller reference is always the first argument
+ * of each of the access-controller property entries.
+ * The firewall ID is always the second argument of each of the
+ * access-controller property entries.
+ * If there's no argument linked to the phandle, then the firewall ID
+ * field is set to U32_MAX, which is an invalid ID.
+ *
+ * @np: Device node to parse
+ * @firewall: Array of firewall references
+ * @nb_firewall: Number of firewall references to get. Must be at least 1.
+ *
+ * Returns 0 on success, -ENODEV if there's no match with a firewall controller or appropriate errno
+ * code if error occurred.
+ */
+int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall,
+ unsigned int nb_firewall);
+
+/**
+ * stm32_firewall_grant_access - Request firewall access rights and grant access.
+ *
+ * @firewall: Firewall reference containing the ID to check against its firewall
+ * controller
+ *
+ * Returns 0 if access is granted, -EACCES if access is denied, -ENODEV if firewall is null or
+ * appropriate errno code if error occurred
+ */
+int stm32_firewall_grant_access(struct stm32_firewall *firewall);
+
+/**
+ * stm32_firewall_release_access - Release access granted from a call to
+ * stm32_firewall_grant_access().
+ *
+ * @firewall: Firewall reference containing the ID to check against its firewall
+ * controller
+ */
+void stm32_firewall_release_access(struct stm32_firewall *firewall);
+
+/**
+ * stm32_firewall_grant_access_by_id - Request firewall access rights of a given device
+ * based on a specific firewall ID
+ *
+ * Warnings:
+ * There is no way to ensure that the given ID will correspond to the firewall referenced in the
+ * device node if the ID did not come from stm32_firewall_get_firewall(). In that case, this
+ * function must be used with caution.
+ * This function should be used for subsystem resources that do not have the same firewall ID
+ * as their parent.
+ * U32_MAX is an invalid ID.
+ *
+ * @firewall: Firewall reference containing the firewall controller
+ * @subsystem_id: Firewall ID of the subsystem resource
+ *
+ * Returns 0 if access is granted, -EACCES if access is denied, -ENODEV if firewall is null or
+ * appropriate errno code if error occurred
+ */
+int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id);
+
+/**
+ * stm32_firewall_release_access_by_id - Release access granted from a call to
+ * stm32_firewall_grant_access_by_id().
+ *
+ * Warnings:
+ * There is no way to ensure that the given ID will correspond to the firewall referenced in the
+ * device node if the ID did not come from stm32_firewall_get_firewall(). In that case, this
+ * function must be used with caution.
+ * This function should be used for subsystem resources that do not have the same firewall ID
+ * as their parent.
+ * U32_MAX is an invalid ID.
+ *
+ * @firewall: Firewall reference containing the firewall controller
+ * @subsystem_id: Firewall ID of the subsystem resource
+ */
+void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id);
+
+#else /* CONFIG_STM32_FIREWALL */
+
+int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall,
+ unsigned int nb_firewall);
+{
+ return -ENODEV;
+}
+
+int stm32_firewall_grant_access(struct stm32_firewall *firewall)
+{
+ return -ENODEV;
+}
+
+void stm32_firewall_release_access(struct stm32_firewall *firewall)
+{
+}
+
+int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id)
+{
+ return -ENODEV;
+}
+
+void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id)
+{
+}
+
+#endif /* CONFIG_STM32_FIREWALL */
+#endif /* STM32_FIREWALL_DEVICE_H */
diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h
index 11a92a946016..5f904591fa5f 100644
--- a/include/linux/ceph/ceph_debug.h
+++ b/include/linux/ceph/ceph_debug.h
@@ -27,17 +27,13 @@
##__VA_ARGS__)
# else
/* faux printk call just to see any compiler warnings. */
-# define dout(fmt, ...) do { \
- if (0) \
- printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
- } while (0)
-# define doutc(client, fmt, ...) do { \
- if (0) \
- printk(KERN_DEBUG "[%pU %llu] " fmt, \
- &client->fsid, \
- client->monc.auth->global_id, \
- ##__VA_ARGS__); \
- } while (0)
+# define dout(fmt, ...) \
+ no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+# define doutc(client, fmt, ...) \
+ no_printk(KERN_DEBUG "[%pU %llu] " fmt, \
+ &client->fsid, \
+ client->monc.auth->global_id, \
+ ##__VA_ARGS__)
# endif
#else
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 34aaf0e87def..2150ca60394b 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -690,7 +690,7 @@ static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
void cgroup_rstat_flush(struct cgroup *cgrp);
void cgroup_rstat_flush_hold(struct cgroup *cgrp);
-void cgroup_rstat_flush_release(void);
+void cgroup_rstat_flush_release(struct cgroup *cgrp);
/*
* Basic resource stats.
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 00623f4de5e1..0fa56d672532 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -286,6 +286,11 @@ static inline int clk_rate_exclusive_get(struct clk *clk)
return 0;
}
+static inline int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk)
+{
+ return 0;
+}
+
static inline void clk_rate_exclusive_put(struct clk *clk) {}
#endif
diff --git a/include/linux/cmpxchg-emu.h b/include/linux/cmpxchg-emu.h
new file mode 100644
index 000000000000..998deec67740
--- /dev/null
+++ b/include/linux/cmpxchg-emu.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Emulated 1-byte and 2-byte cmpxchg operations for architectures
+ * lacking direct support for these sizes. These are implemented in terms
+ * of 4-byte cmpxchg operations.
+ *
+ * Copyright (C) 2024 Paul E. McKenney.
+ */
+
+#ifndef __LINUX_CMPXCHG_EMU_H
+#define __LINUX_CMPXCHG_EMU_H
+
+uintptr_t cmpxchg_emu_u8(volatile u8 *p, uintptr_t old, uintptr_t new);
+
+#endif /* __LINUX_CMPXCHG_EMU_H */
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 2abaa3a825a9..d1a9dbb8e1a7 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -273,15 +273,44 @@ struct ftrace_likely_data {
* disable all instrumentation. See Kconfig.kcsan where this is mandatory.
*/
# define __no_kcsan __no_sanitize_thread __disable_sanitizer_instrumentation
+/*
+ * Type qualifier to mark variables where all data-racy accesses should be
+ * ignored by KCSAN. Note, the implementation simply marks these variables as
+ * volatile, since KCSAN will treat such accesses as "marked".
+ */
+# define __data_racy volatile
# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused
#else
# define __no_kcsan
+# define __data_racy
+#endif
+
+#ifdef __SANITIZE_MEMORY__
+/*
+ * Similarly to KASAN and KCSAN, KMSAN loses function attributes of inlined
+ * functions, therefore disabling KMSAN checks also requires disabling inlining.
+ *
+ * __no_sanitize_or_inline effectively prevents KMSAN from reporting errors
+ * within the function and marks all its outputs as initialized.
+ */
+# define __no_sanitize_or_inline __no_kmsan_checks notrace __maybe_unused
#endif
#ifndef __no_sanitize_or_inline
#define __no_sanitize_or_inline __always_inline
#endif
+/*
+ * Apply __counted_by() when the Endianness matches to increase test coverage.
+ */
+#ifdef __LITTLE_ENDIAN
+#define __counted_by_le(member) __counted_by(member)
+#define __counted_by_be(member)
+#else
+#define __counted_by_le(member)
+#define __counted_by_be(member) __counted_by(member)
+#endif
+
/* Do not trap wrapping arithmetic within an annotated function. */
#ifdef CONFIG_UBSAN_SIGNED_WRAP
# define __signed_wrap __attribute__((no_sanitize("signed-integer-overflow")))
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index d3eba4360150..0904ba010341 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -30,6 +30,8 @@ struct coredump_params {
struct core_vma_metadata *vma_meta;
};
+extern unsigned int core_file_note_size_limit;
+
/*
* These are the only things you should do on a core-file: use only these
* functions to write out all the necessary info.
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 272e4e79e15c..861c3bfc5f17 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -221,7 +221,18 @@ void cpuhp_report_idle_dead(void);
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+#ifdef CONFIG_CPU_MITIGATIONS
extern bool cpu_mitigations_off(void);
extern bool cpu_mitigations_auto_nosmt(void);
+#else
+static inline bool cpu_mitigations_off(void)
+{
+ return true;
+}
+static inline bool cpu_mitigations_auto_nosmt(void)
+{
+ return false;
+}
+#endif
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 9956afb9acc2..20f7e98ee8af 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -241,6 +241,12 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
bool has_target_index(void);
+
+DECLARE_PER_CPU(unsigned long, cpufreq_pressure);
+static inline unsigned long cpufreq_get_pressure(int cpu)
+{
+ return READ_ONCE(per_cpu(cpufreq_pressure, cpu));
+}
#else
static inline unsigned int cpufreq_get(unsigned int cpu)
{
@@ -264,6 +270,10 @@ static inline bool cpufreq_supports_freq_invariance(void)
}
static inline void disable_cpufreq(void) { }
static inline void cpufreq_update_limits(unsigned int cpu) { }
+static inline unsigned long cpufreq_get_pressure(int cpu)
+{
+ return 0;
+}
#endif
#ifdef CONFIG_CPU_FREQ_STAT
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 35e78ddb2b37..7a5785f405b6 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -146,6 +146,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
CPUHP_AP_IRQ_LOONGARCH_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ CPUHP_AP_IRQ_RISCV_IMSIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 1c29947db848..e8c412ee6400 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -188,6 +188,23 @@ unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask
}
/**
+ * cpumask_first_and_and - return the first cpu from *srcp1 & *srcp2 & *srcp3
+ * @srcp1: the first input
+ * @srcp2: the second input
+ * @srcp3: the third input
+ *
+ * Return: >= nr_cpu_ids if no cpus set in all.
+ */
+static inline
+unsigned int cpumask_first_and_and(const struct cpumask *srcp1,
+ const struct cpumask *srcp2,
+ const struct cpumask *srcp3)
+{
+ return find_first_and_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
+ cpumask_bits(srcp3), small_cpumask_bits);
+}
+
+/**
* cpumask_last - get the last CPU in a cpumask
* @srcp: - the cpumask pointer
*
@@ -389,6 +406,29 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
}
/**
+ * cpumask_any_and_but - pick a "random" cpu from *mask1 & *mask2, but not this one.
+ * @mask1: the first input cpumask
+ * @mask2: the second input cpumask
+ * @cpu: the cpu to ignore
+ *
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+static inline
+unsigned int cpumask_any_and_but(const struct cpumask *mask1,
+ const struct cpumask *mask2,
+ unsigned int cpu)
+{
+ unsigned int i;
+
+ cpumask_check(cpu);
+ i = cpumask_first_and(mask1, mask2);
+ if (i != cpu)
+ return i;
+
+ return cpumask_next_and(cpu, mask1, mask2);
+}
+
+/**
* cpumask_nth - get the Nth cpu in a cpumask
* @srcp: the cpumask pointer
* @cpu: the Nth cpu to find, starting from 0
@@ -853,7 +893,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
*/
static inline unsigned int cpumask_size(void)
{
- return BITS_TO_LONGS(large_cpumask_bits) * sizeof(long);
+ return bitmap_size(large_cpumask_bits);
}
/*
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 0ce6ff0d9c9a..de4cf0ee96f7 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -70,7 +70,6 @@ extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void);
-extern void cpuset_wait_for_hotplug(void);
extern void inc_dl_tasks_cs(struct task_struct *task);
extern void dec_dl_tasks_cs(struct task_struct *task);
extern void cpuset_lock(void);
@@ -185,8 +184,6 @@ static inline void cpuset_update_active_cpus(void)
partition_sched_domains(1, NULL, NULL);
}
-static inline void cpuset_wait_for_hotplug(void) { }
-
static inline void inc_dl_tasks_cs(struct task_struct *task) { }
static inline void dec_dl_tasks_cs(struct task_struct *task) { }
static inline void cpuset_lock(void) { }
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index d33352c2e386..44305336314e 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -37,17 +37,16 @@ static inline void arch_kexec_unprotect_crashkres(void) { }
#ifndef arch_crash_handle_hotplug_event
-static inline void arch_crash_handle_hotplug_event(struct kimage *image) { }
+static inline void arch_crash_handle_hotplug_event(struct kimage *image, void *arg) { }
#endif
-int crash_check_update_elfcorehdr(void);
+int crash_check_hotplug_support(void);
-#ifndef crash_hotplug_cpu_support
-static inline int crash_hotplug_cpu_support(void) { return 0; }
-#endif
-
-#ifndef crash_hotplug_memory_support
-static inline int crash_hotplug_memory_support(void) { return 0; }
+#ifndef arch_crash_hotplug_support
+static inline int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags)
+{
+ return 0;
+}
#endif
#ifndef crash_get_elfcorehdr_size
diff --git a/include/linux/cxl-event.h b/include/linux/cxl-event.h
index 03fa6d50d46f..60b25020281f 100644
--- a/include/linux/cxl-event.h
+++ b/include/linux/cxl-event.h
@@ -3,6 +3,10 @@
#ifndef _LINUX_CXL_EVENT_H
#define _LINUX_CXL_EVENT_H
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/workqueue_types.h>
+
/*
* Common Event Record Format
* CXL rev 3.0 section 8.2.9.2.1; Table 8-42
@@ -91,11 +95,21 @@ struct cxl_event_mem_module {
u8 reserved[0x3d];
} __packed;
+/*
+ * General Media or DRAM Event Common Fields
+ * - provides common access to phys_addr
+ */
+struct cxl_event_common {
+ struct cxl_event_record_hdr hdr;
+ __le64 phys_addr;
+} __packed;
+
union cxl_event {
struct cxl_event_generic generic;
struct cxl_event_gen_media gen_media;
struct cxl_event_dram dram;
struct cxl_event_mem_module mem_module;
+ struct cxl_event_common common;
} __packed;
/*
@@ -140,4 +154,29 @@ struct cxl_cper_event_rec {
union cxl_event event;
} __packed;
+struct cxl_cper_work_data {
+ enum cxl_event_type event_type;
+ struct cxl_cper_event_rec rec;
+};
+
+#ifdef CONFIG_ACPI_APEI_GHES
+int cxl_cper_register_work(struct work_struct *work);
+int cxl_cper_unregister_work(struct work_struct *work);
+int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd);
+#else
+static inline int cxl_cper_register_work(struct work_struct *work)
+{
+ return 0;
+}
+
+static inline int cxl_cper_unregister_work(struct work_struct *work)
+{
+ return 0;
+}
+static inline int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
+{
+ return 0;
+}
+#endif
+
#endif /* _LINUX_CXL_EVENT_H */
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
index 6bfe70decc9f..ae80a303c216 100644
--- a/include/linux/dev_printk.h
+++ b/include/linux/dev_printk.h
@@ -130,6 +130,16 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
})
/*
+ * Dummy dev_printk for disabled debugging statements to use whilst maintaining
+ * gcc's format checking.
+ */
+#define dev_no_printk(level, dev, fmt, ...) \
+ ({ \
+ if (0) \
+ _dev_printk(level, dev, fmt, ##__VA_ARGS__); \
+ })
+
+/*
* #defines for all the dev_<level> macros to prefix with whatever
* possible use of #define dev_fmt(fmt) ...
*/
@@ -158,10 +168,7 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
#else
#define dev_dbg(dev, fmt, ...) \
-({ \
- if (0) \
- dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
-})
+ dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
#endif
#ifdef CONFIG_PRINTK
@@ -247,20 +254,14 @@ do { \
} while (0)
#else
#define dev_dbg_ratelimited(dev, fmt, ...) \
-do { \
- if (0) \
- dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
-} while (0)
+ dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
#endif
#ifdef VERBOSE_DEBUG
#define dev_vdbg dev_dbg
#else
#define dev_vdbg(dev, fmt, ...) \
-({ \
- if (0) \
- dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
-})
+ dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
#endif
/*
diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h
index c008169ed2c6..c8f7eb6cc191 100644
--- a/include/linux/devcoredump.h
+++ b/include/linux/devcoredump.h
@@ -63,6 +63,8 @@ void dev_coredumpm(struct device *dev, struct module *owner,
void dev_coredumpsg(struct device *dev, struct scatterlist *table,
size_t datalen, gfp_t gfp);
+
+void dev_coredump_put(struct device *dev);
#else
static inline void dev_coredumpv(struct device *dev, void *data,
size_t datalen, gfp_t gfp)
@@ -85,6 +87,9 @@ static inline void dev_coredumpsg(struct device *dev, struct scatterlist *table,
{
_devcd_free_sgtable(table);
}
+static inline void dev_coredump_put(struct device *dev)
+{
+}
#endif /* CONFIG_DEV_COREDUMP */
#endif /* __DEVCOREDUMP_H */
diff --git a/include/linux/devm-helpers.h b/include/linux/devm-helpers.h
index 74891802200d..708ca9131402 100644
--- a/include/linux/devm-helpers.h
+++ b/include/linux/devm-helpers.h
@@ -41,7 +41,7 @@ static inline void devm_delayed_work_drop(void *res)
* detached. A few drivers need delayed work which must be cancelled before
* driver is detached to avoid accessing removed resources.
* devm_delayed_work_autocancel() can be used to omit the explicit
- * cancelleation when driver is detached.
+ * cancellation when driver is detached.
*/
static inline int devm_delayed_work_autocancel(struct device *dev,
struct delayed_work *w,
@@ -66,7 +66,7 @@ static inline void devm_work_drop(void *res)
* A few drivers need to queue work which must be cancelled before driver
* is detached to avoid accessing removed resources.
* devm_work_autocancel() can be used to omit the explicit
- * cancelleation when driver is detached.
+ * cancellation when driver is detached.
*/
static inline int devm_work_autocancel(struct device *dev,
struct work_struct *w,
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 8ff4add71f88..36216d28d8bd 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -370,8 +370,10 @@ struct dma_buf {
*/
struct module *owner;
+#if IS_ENABLED(CONFIG_DEBUG_FS)
/** @list_node: node for dma_buf accounting and debugging. */
struct list_head list_node;
+#endif
/** @priv: exporter specific private data for this buffer object. */
void *priv;
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 3eb3589ff43e..edbe13d00776 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -54,6 +54,24 @@ static inline phys_addr_t translate_dma_to_phys(struct device *dev,
return (phys_addr_t)-1;
}
+static inline dma_addr_t dma_range_map_min(const struct bus_dma_region *map)
+{
+ dma_addr_t ret = (dma_addr_t)U64_MAX;
+
+ for (; map->size; map++)
+ ret = min(ret, map->dma_start);
+ return ret;
+}
+
+static inline dma_addr_t dma_range_map_max(const struct bus_dma_region *map)
+{
+ dma_addr_t ret = 0;
+
+ for (; map->size; map++)
+ ret = max(ret, map->dma_start + map->size - 1);
+ return ret;
+}
+
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
#include <asm/dma-direct.h>
#ifndef phys_to_dma_unencrypted
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index c3f9bb6602ba..e06bad467f55 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -682,11 +682,4 @@ static inline bool dma_fence_is_container(struct dma_fence *fence)
return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
}
-#define DMA_FENCE_WARN(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
- ##args); \
- } while (0)
-
#endif /* __LINUX_DMA_FENCE_H */
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 9ee319851b5f..bdb3abb77a87 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -426,11 +426,9 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
#endif
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- bool coherent);
+void arch_setup_dma_ops(struct device *dev, bool coherent);
#else
-static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size, bool coherent)
+static inline void arch_setup_dma_ops(struct device *dev, bool coherent)
{
}
#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index e34b601b71fd..499bb2c63483 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -117,7 +117,7 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
int count);
/* Intel IOMMU detection */
void detect_intel_iommu(void);
-extern int enable_drhd_fault_handling(void);
+extern int enable_drhd_fault_handling(unsigned int cpu);
extern int dmar_device_add(acpi_handle handle);
extern int dmar_device_remove(acpi_handle handle);
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 4fcbf4d4fd0a..ff44ec346162 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -305,9 +305,9 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
#define DYNAMIC_DEBUG_BRANCH(descriptor) false
#define dynamic_pr_debug(fmt, ...) \
- do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#define dynamic_dev_dbg(dev, fmt, ...) \
- do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
+ dev_no_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__)
#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
do { if (0) \
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index 5693a4be0d9a..281298e77a15 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -50,6 +50,9 @@ struct dql {
unsigned int adj_limit; /* limit + num_completed */
unsigned int last_obj_cnt; /* Count at last queuing */
+ /* Stall threshold (in jiffies), defined by user */
+ unsigned short stall_thrs;
+
unsigned long history_head; /* top 58 bits of jiffies */
/* stall entries, a bit per entry */
unsigned long history[DQL_HIST_LEN];
@@ -71,8 +74,6 @@ struct dql {
unsigned int min_limit; /* Minimum limit */
unsigned int slack_hold_time; /* Time to measure slack */
- /* Stall threshold (in jiffies), defined by user */
- unsigned short stall_thrs;
/* Longest stall detected, reported to user */
unsigned short stall_max;
unsigned long last_reap; /* Last reap (in jiffies) */
@@ -83,27 +84,11 @@ struct dql {
#define DQL_MAX_OBJECT (UINT_MAX / 16)
#define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT)
-/*
- * Record number of objects queued. Assumes that caller has already checked
- * availability in the queue with dql_avail.
- */
-static inline void dql_queued(struct dql *dql, unsigned int count)
+/* Populate the bitmap to be processed later in dql_check_stall() */
+static inline void dql_queue_stall(struct dql *dql)
{
unsigned long map, now, now_hi, i;
- BUG_ON(count > DQL_MAX_OBJECT);
-
- dql->last_obj_cnt = count;
-
- /* We want to force a write first, so that cpu do not attempt
- * to get cache line containing last_obj_cnt, num_queued, adj_limit
- * in Shared state, but directly does a Request For Ownership
- * It is only a hint, we use barrier() only.
- */
- barrier();
-
- dql->num_queued += count;
-
now = jiffies;
now_hi = now / BITS_PER_LONG;
@@ -133,6 +118,31 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
WRITE_ONCE(DQL_HIST_ENT(dql, now_hi), map | BIT_MASK(now));
}
+/*
+ * Record number of objects queued. Assumes that caller has already checked
+ * availability in the queue with dql_avail.
+ */
+static inline void dql_queued(struct dql *dql, unsigned int count)
+{
+ if (WARN_ON_ONCE(count > DQL_MAX_OBJECT))
+ return;
+
+ dql->last_obj_cnt = count;
+
+ /* We want to force a write first, so that cpu do not attempt
+ * to get cache line containing last_obj_cnt, num_queued, adj_limit
+ * in Shared state, but directly does a Request For Ownership
+ * It is only a hint, we use barrier() only.
+ */
+ barrier();
+
+ dql->num_queued += count;
+
+ /* Only populate stall information if the threshold is set */
+ if (READ_ONCE(dql->stall_thrs))
+ dql_queue_stall(dql);
+}
+
/* Returns how many objects can be queued, < 0 indicates over limit. */
static inline int dql_avail(const struct dql *dql)
{
diff --git a/include/linux/efi.h b/include/linux/efi.h
index d59b0947fba0..418e555459da 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1072,12 +1072,11 @@ static inline u64 efivar_reserved_space(void) { return 0; }
#endif
/*
- * The maximum size of VariableName + Data = 1024
- * Therefore, it's reasonable to save that much
- * space in each part of the structure,
- * and we use a page for reading/writing.
+ * There is no actual upper limit specified for the variable name size.
+ *
+ * This limit exists only for practical purposes, since name conversions
+ * are bounds-checked and name data is occasionally stored in-line.
*/
-
#define EFI_VAR_NAME_LEN 1024
int efivars_register(struct efivars *efivars,
diff --git a/include/linux/elf.h b/include/linux/elf.h
index c9a46c4e183b..5c402788da19 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -65,7 +65,7 @@ extern Elf64_Dyn _DYNAMIC [];
struct file;
struct coredump_params;
-#ifndef ARCH_HAVE_EXTRA_ELF_NOTES
+#ifndef CONFIG_ARCH_HAVE_EXTRA_ELF_NOTES
static inline int elf_coredump_extra_notes_size(void) { return 0; }
static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm) { return 0; }
#else
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index 70cd7258cd29..1ff52020cf75 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -172,6 +172,7 @@ struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd);
void em_table_free(struct em_perf_table __rcu *table);
int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
int nr_states);
+int em_dev_update_chip_binning(struct device *dev);
/**
* em_pd_get_efficient_state() - Get an efficient performance state from the EM
@@ -386,6 +387,10 @@ int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
{
return -EINVAL;
}
+static inline int em_dev_update_chip_binning(struct device *dev)
+{
+ return -EINVAL;
+}
#endif
#endif
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 224645f17c33..2ad1ffa4ccb9 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -71,6 +71,12 @@ static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
#define eth_stp_addr eth_reserved_addr_base
+static const u8 eth_ipv4_mcast_addr_base[ETH_ALEN] __aligned(2) =
+{ 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
+
+static const u8 eth_ipv6_mcast_addr_base[ETH_ALEN] __aligned(2) =
+{ 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
+
/**
* is_link_local_ether_addr - Determine if given Ethernet address is link-local
* @addr: Pointer to a six-byte array containing the Ethernet address
@@ -430,18 +436,16 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
static inline bool ether_addr_is_ipv4_mcast(const u8 *addr)
{
- u8 base[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
- return ether_addr_equal_masked(addr, base, mask);
+ return ether_addr_equal_masked(addr, eth_ipv4_mcast_addr_base, mask);
}
static inline bool ether_addr_is_ipv6_mcast(const u8 *addr)
{
- u8 base[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
u8 mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
- return ether_addr_equal_masked(addr, base, mask);
+ return ether_addr_equal_masked(addr, eth_ipv6_mcast_addr_base, mask);
}
static inline bool ether_addr_is_ip_mcast(const u8 *addr)
@@ -608,6 +612,31 @@ static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
}
/**
+ * eth_skb_pkt_type - Assign packet type if destination address does not match
+ * @skb: Assigned a packet type if address does not match @dev address
+ * @dev: Network device used to compare packet address against
+ *
+ * If the destination MAC address of the packet does not match the network
+ * device address, assign an appropriate packet type.
+ */
+static inline void eth_skb_pkt_type(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ const struct ethhdr *eth = eth_hdr(skb);
+
+ if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) {
+ if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
+ if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+ } else {
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+ }
+}
+
+/**
* eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
* @skb: Buffer to pad
*
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 9901e563f706..6fd9107d3cc0 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -480,6 +480,26 @@ struct ethtool_rmon_stats {
);
};
+/**
+ * struct ethtool_ts_stats - HW timestamping statistics
+ * @pkts: Number of packets successfully timestamped by the hardware.
+ * @lost: Number of hardware timestamping requests where the timestamping
+ * information from the hardware never arrived for submission with
+ * the skb.
+ * @err: Number of arbitrary timestamp generation error events that the
+ * hardware encountered, exclusive of @lost statistics. Cases such
+ * as resource exhaustion, unavailability, firmware errors, and
+ * detected illogical timestamp values not submitted with the skb
+ * are inclusive to this counter.
+ */
+struct ethtool_ts_stats {
+ struct_group(tx_stats,
+ u64 pkts;
+ u64 lost;
+ u64 err;
+ );
+};
+
#define ETH_MODULE_EEPROM_PAGE_LEN 128
#define ETH_MODULE_MAX_I2C_ADDRESS 0x7f
@@ -755,7 +775,10 @@ struct ethtool_rxfh_param {
* @get_ts_info: Get the time stamping and PTP hardware clock capabilities.
* It may be called with RCU, or rtnl or reference on the device.
* Drivers supporting transmit time stamps in software should set this to
- * ethtool_op_get_ts_info().
+ * ethtool_op_get_ts_info(). Drivers must not zero statistics which they
+ * don't report. The stats structure is initialized to ETHTOOL_STAT_NOT_SET
+ * indicating driver does not report statistics.
+ * @get_ts_stats: Query the device hardware timestamping statistics.
* @get_module_info: Get the size and type of the eeprom contained within
* a plug-in module.
* @get_module_eeprom: Get the eeprom information from the plug-in module
@@ -898,6 +921,8 @@ struct ethtool_ops {
struct ethtool_dump *, void *);
int (*set_dump)(struct net_device *, struct ethtool_dump *);
int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *);
+ void (*get_ts_stats)(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats);
int (*get_module_info)(struct net_device *,
struct ethtool_modinfo *);
int (*get_module_eeprom)(struct net_device *,
diff --git a/include/linux/evm.h b/include/linux/evm.h
index d48d6da32315..ddece4a6b25d 100644
--- a/include/linux/evm.h
+++ b/include/linux/evm.h
@@ -26,6 +26,8 @@ extern int evm_protected_xattr_if_enabled(const char *req_xattr_name);
extern int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
int buffer_size, char type,
bool canonical_fmt);
+extern bool evm_metadata_changed(struct inode *inode,
+ struct inode *metadata_inode);
#ifdef CONFIG_FS_POSIX_ACL
extern int posix_xattr_acl(const char *xattrname);
#else
@@ -76,5 +78,11 @@ static inline int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
return -EOPNOTSUPP;
}
+static inline bool evm_metadata_changed(struct inode *inode,
+ struct inode *metadata_inode)
+{
+ return false;
+}
+
#endif /* CONFIG_EVM */
#endif /* LINUX_EVM_H */
diff --git a/include/linux/execmem.h b/include/linux/execmem.h
new file mode 100644
index 000000000000..32cef1144117
--- /dev/null
+++ b/include/linux/execmem.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_EXECMEM_ALLOC_H
+#define _LINUX_EXECMEM_ALLOC_H
+
+#include <linux/types.h>
+#include <linux/moduleloader.h>
+
+#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
+ !defined(CONFIG_KASAN_VMALLOC)
+#include <linux/kasan.h>
+#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
+#define MODULE_ALIGN PAGE_SIZE
+#endif
+
+/**
+ * enum execmem_type - types of executable memory ranges
+ *
+ * There are several subsystems that allocate executable memory.
+ * Architectures define different restrictions on placement,
+ * permissions, alignment and other parameters for memory that can be used
+ * by these subsystems.
+ * Types in this enum identify subsystems that allocate executable memory
+ * and let architectures define parameters for ranges suitable for
+ * allocations by each subsystem.
+ *
+ * @EXECMEM_DEFAULT: default parameters that would be used for types that
+ * are not explicitly defined.
+ * @EXECMEM_MODULE_TEXT: parameters for module text sections
+ * @EXECMEM_KPROBES: parameters for kprobes
+ * @EXECMEM_FTRACE: parameters for ftrace
+ * @EXECMEM_BPF: parameters for BPF
+ * @EXECMEM_MODULE_DATA: parameters for module data sections
+ * @EXECMEM_TYPE_MAX:
+ */
+enum execmem_type {
+ EXECMEM_DEFAULT,
+ EXECMEM_MODULE_TEXT = EXECMEM_DEFAULT,
+ EXECMEM_KPROBES,
+ EXECMEM_FTRACE,
+ EXECMEM_BPF,
+ EXECMEM_MODULE_DATA,
+ EXECMEM_TYPE_MAX,
+};
+
+/**
+ * enum execmem_range_flags - options for executable memory allocations
+ * @EXECMEM_KASAN_SHADOW: allocate kasan shadow
+ */
+enum execmem_range_flags {
+ EXECMEM_KASAN_SHADOW = (1 << 0),
+};
+
+/**
+ * struct execmem_range - definition of an address space suitable for code and
+ * related data allocations
+ * @start: address space start
+ * @end: address space end (inclusive)
+ * @fallback_start: start of the secondary address space range for fallback
+ * allocations on architectures that require it
+ * @fallback_end: start of the secondary address space (inclusive)
+ * @pgprot: permissions for memory in this address space
+ * @alignment: alignment required for text allocations
+ * @flags: options for memory allocations for this range
+ */
+struct execmem_range {
+ unsigned long start;
+ unsigned long end;
+ unsigned long fallback_start;
+ unsigned long fallback_end;
+ pgprot_t pgprot;
+ unsigned int alignment;
+ enum execmem_range_flags flags;
+};
+
+/**
+ * struct execmem_info - architecture parameters for code allocations
+ * @ranges: array of parameter sets defining architecture specific
+ * parameters for executable memory allocations. The ranges that are not
+ * explicitly initialized by an architecture use parameters defined for
+ * @EXECMEM_DEFAULT.
+ */
+struct execmem_info {
+ struct execmem_range ranges[EXECMEM_TYPE_MAX];
+};
+
+/**
+ * execmem_arch_setup - define parameters for allocations of executable memory
+ *
+ * A hook for architectures to define parameters for allocations of
+ * executable memory. These parameters should be filled into the
+ * @execmem_info structure.
+ *
+ * For architectures that do not implement this method a default set of
+ * parameters will be used
+ *
+ * Return: a structure defining architecture parameters and restrictions
+ * for allocations of executable memory
+ */
+struct execmem_info *execmem_arch_setup(void);
+
+/**
+ * execmem_alloc - allocate executable memory
+ * @type: type of the allocation
+ * @size: how many bytes of memory are required
+ *
+ * Allocates memory that will contain executable code, either generated or
+ * loaded from kernel modules.
+ *
+ * Allocates memory that will contain data coupled with executable code,
+ * like data sections in kernel modules.
+ *
+ * The memory will have protections defined by architecture for executable
+ * region of the @type.
+ *
+ * Return: a pointer to the allocated memory or %NULL
+ */
+void *execmem_alloc(enum execmem_type type, size_t size);
+
+/**
+ * execmem_free - free executable memory
+ * @ptr: pointer to the memory that should be freed
+ */
+void execmem_free(void *ptr);
+
+#if defined(CONFIG_EXECMEM) && !defined(CONFIG_ARCH_WANTS_EXECMEM_LATE)
+void execmem_init(void);
+#else
+static inline void execmem_init(void) {}
+#endif
+
+#endif /* _LINUX_EXECMEM_ALLOC_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 0dd27364d56f..811e47f9d1c3 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -694,6 +694,10 @@ extern int fb_deferred_io_fsync(struct file *file, loff_t start,
__FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
__FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
+#define FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(__prefix, __damage_range, __damage_area) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
+
/*
* Initializes struct fb_ops for deferred I/O.
*/
diff --git a/include/linux/file.h b/include/linux/file.h
index 169692cb1906..45d0f4800abd 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -84,6 +84,7 @@ static inline void fdput_pos(struct fd f)
}
DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd)
+DEFINE_CLASS(fd_raw, struct fd, fdput(_T), fdget_raw(fd), int fd)
extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index c99bc3df2d28..0f12cf01070e 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -75,6 +75,9 @@ struct ctl_table_header;
/* unused opcode to mark special load instruction. Same as BPF_MSH */
#define BPF_PROBE_MEM32 0xa0
+/* unused opcode to mark special atomic instruction */
+#define BPF_PROBE_ATOMIC 0xe0
+
/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0
@@ -178,6 +181,25 @@ struct ctl_table_header;
.off = 0, \
.imm = 0 })
+/* Special (internal-only) form of mov, used to resolve per-CPU addrs:
+ * dst_reg = src_reg + <percpu_base_off>
+ * BPF_ADDR_PERCPU is used as a special insn->off value.
+ */
+#define BPF_ADDR_PERCPU (-1)
+
+#define BPF_MOV64_PERCPU_REG(DST, SRC) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = BPF_ADDR_PERCPU, \
+ .imm = 0 })
+
+static inline bool insn_is_mov_percpu_addr(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->off == BPF_ADDR_PERCPU;
+}
+
/* Short form of mov, dst_reg = imm32 */
#define BPF_MOV64_IMM(DST, IMM) \
@@ -228,6 +250,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1;
}
+/* addr_space_cast from as(0) to as(1) is for converting bpf arena pointers
+ * to pointers in user vma.
+ */
+static inline bool insn_is_cast_user(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
+ insn->off == BPF_ADDR_SPACE_CAST &&
+ insn->imm == 1U << 16;
+}
+
/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
#define BPF_LD_IMM64(DST, IMM) \
BPF_LD_IMM64_RAW(DST, 0, IMM)
@@ -644,14 +676,16 @@ static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
cant_migrate();
if (static_branch_unlikely(&bpf_stats_enabled_key)) {
struct bpf_prog_stats *stats;
- u64 start = sched_clock();
+ u64 duration, start = sched_clock();
unsigned long flags;
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+
+ duration = sched_clock() - start;
stats = this_cpu_ptr(prog->stats);
flags = u64_stats_update_begin_irqsave(&stats->syncp);
u64_stats_inc(&stats->cnt);
- u64_stats_add(&stats->nsecs, sched_clock() - start);
+ u64_stats_add(&stats->nsecs, duration);
u64_stats_update_end_irqrestore(&stats->syncp, flags);
} else {
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
@@ -887,20 +921,22 @@ bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
+static inline int __must_check bpf_prog_lock_ro(struct bpf_prog *fp)
{
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
if (!fp->jited) {
set_vm_flush_reset_perms(fp);
- set_memory_ro((unsigned long)fp, fp->pages);
+ return set_memory_ro((unsigned long)fp, fp->pages);
}
#endif
+ return 0;
}
-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
+static inline int __must_check
+bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
set_vm_flush_reset_perms(hdr);
- set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
+ return set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
}
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
@@ -957,12 +993,16 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog);
bool bpf_jit_needs_zext(void);
+bool bpf_jit_inlines_helper_call(s32 imm);
bool bpf_jit_supports_subprog_tailcalls(void);
+bool bpf_jit_supports_percpu_insn(void);
bool bpf_jit_supports_kfunc_call(void);
bool bpf_jit_supports_far_kfunc_call(void);
bool bpf_jit_supports_exceptions(void);
bool bpf_jit_supports_ptr_xchg(void);
bool bpf_jit_supports_arena(void);
+bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
+u64 bpf_arch_uaddress_limit(void);
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
bool bpf_helper_changes_pkt_data(void *func);
diff --git a/include/linux/find.h b/include/linux/find.h
index c69598e383c1..28ec5a03393a 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -29,6 +29,8 @@ unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1, const unsign
unsigned long n);
extern unsigned long _find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size);
+unsigned long _find_first_and_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ const unsigned long *addr3, unsigned long size);
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
@@ -345,6 +347,31 @@ unsigned long find_first_and_bit(const unsigned long *addr1,
}
#endif
+/**
+ * find_first_and_and_bit - find the first set bit in 3 memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @addr3: The third address to base the search on
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the first set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_first_and_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ const unsigned long *addr3,
+ unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & *addr3 & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_and_and_bit(addr1, addr2, addr3, size);
+}
+
#ifndef find_first_zero_bit
/**
* find_first_zero_bit - find the first cleared bit in a memory region
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index dd9f2d765e68..00abe0e5d602 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -463,7 +463,8 @@ struct fw_iso_packet {
u32 tag:2; /* tx: Tag in packet header */
u32 sy:4; /* tx: Sy in packet header */
u32 header_length:8; /* Length of immediate header */
- u32 header[]; /* tx: Top of 1394 isoch. data_block */
+ /* tx: Top of 1394 isoch. data_block */
+ u32 header[] __counted_by(header_length);
};
#define FW_ISO_CONTEXT_TRANSMIT 0
diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h
index 23384a54d575..82687e07a7c2 100644
--- a/include/linux/firmware/cirrus/cs_dsp.h
+++ b/include/linux/firmware/cirrus/cs_dsp.h
@@ -238,8 +238,12 @@ void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp);
int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int event_id);
int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
const void *buf, size_t len);
+int cs_dsp_coeff_lock_and_write_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ const void *buf, size_t len);
int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
void *buf, size_t len);
+int cs_dsp_coeff_lock_and_read_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ void *buf, size_t len);
struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, int type,
unsigned int alg);
diff --git a/include/linux/firmware/qcom/qcom_qseecom.h b/include/linux/firmware/qcom/qcom_qseecom.h
index 5c28298a98be..366243ee9609 100644
--- a/include/linux/firmware/qcom/qcom_qseecom.h
+++ b/include/linux/firmware/qcom/qcom_qseecom.h
@@ -10,6 +10,7 @@
#define __QCOM_QSEECOM_H
#include <linux/auxiliary_bus.h>
+#include <linux/dma-mapping.h>
#include <linux/types.h>
#include <linux/firmware/qcom/qcom_scm.h>
@@ -25,11 +26,56 @@ struct qseecom_client {
};
/**
+ * qseecom_scm_dev() - Get the SCM device associated with the QSEECOM client.
+ * @client: The QSEECOM client device.
+ *
+ * Returns the SCM device under which the provided QSEECOM client device
+ * operates. This function is intended to be used for DMA allocations.
+ */
+static inline struct device *qseecom_scm_dev(struct qseecom_client *client)
+{
+ return client->aux_dev.dev.parent->parent;
+}
+
+/**
+ * qseecom_dma_alloc() - Allocate DMA memory for a QSEECOM client.
+ * @client: The QSEECOM client to allocate the memory for.
+ * @size: The number of bytes to allocate.
+ * @dma_handle: Pointer to where the DMA address should be stored.
+ * @gfp: Allocation flags.
+ *
+ * Wrapper function for dma_alloc_coherent(), allocating DMA memory usable for
+ * TZ/QSEECOM communication. Refer to dma_alloc_coherent() for details.
+ */
+static inline void *qseecom_dma_alloc(struct qseecom_client *client, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ return dma_alloc_coherent(qseecom_scm_dev(client), size, dma_handle, gfp);
+}
+
+/**
+ * dma_free_coherent() - Free QSEECOM DMA memory.
+ * @client: The QSEECOM client for which the memory has been allocated.
+ * @size: The number of bytes allocated.
+ * @cpu_addr: Virtual memory address to free.
+ * @dma_handle: DMA memory address to free.
+ *
+ * Wrapper function for dma_free_coherent(), freeing memory previously
+ * allocated with qseecom_dma_alloc(). Refer to dma_free_coherent() for
+ * details.
+ */
+static inline void qseecom_dma_free(struct qseecom_client *client, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ return dma_free_coherent(qseecom_scm_dev(client), size, cpu_addr, dma_handle);
+}
+
+/**
* qcom_qseecom_app_send() - Send to and receive data from a given QSEE app.
* @client: The QSEECOM client associated with the target app.
- * @req: Request buffer sent to the app (must be DMA-mappable).
+ * @req: DMA address of the request buffer sent to the app.
* @req_size: Size of the request buffer.
- * @rsp: Response buffer, written to by the app (must be DMA-mappable).
+ * @rsp: DMA address of the response buffer, written to by the app.
* @rsp_size: Size of the response buffer.
*
* Sends a request to the QSEE app associated with the given client and read
@@ -43,8 +89,9 @@ struct qseecom_client {
*
* Return: Zero on success, nonzero on failure.
*/
-static inline int qcom_qseecom_app_send(struct qseecom_client *client, void *req, size_t req_size,
- void *rsp, size_t rsp_size)
+static inline int qcom_qseecom_app_send(struct qseecom_client *client,
+ dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size)
{
return qcom_scm_qseecom_app_send(client->app_id, req, req_size, rsp, rsp_size);
}
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index ccaf28846054..aaa19f93ac43 100644
--- a/include/linux/firmware/qcom/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -118,8 +118,8 @@ bool qcom_scm_lmh_dcvsh_available(void);
#ifdef CONFIG_QCOM_QSEECOM
int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id);
-int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
- size_t rsp_size);
+int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size);
#else /* CONFIG_QCOM_QSEECOM */
@@ -128,9 +128,9 @@ static inline int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
return -EINVAL;
}
-static inline int qcom_scm_qseecom_app_send(u32 app_id, void *req,
- size_t req_size, void *rsp,
- size_t rsp_size)
+static inline int qcom_scm_qseecom_app_send(u32 app_id,
+ dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size)
{
return -EINVAL;
}
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index b24d62bad0b3..d658ae729a02 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -15,10 +15,14 @@
#define FORTIFY_REASON(func, write) (FIELD_PREP(BIT(0), write) | \
FIELD_PREP(GENMASK(7, 1), func))
+/* Overridden by KUnit tests. */
#ifndef fortify_panic
# define fortify_panic(func, write, avail, size, retfail) \
__fortify_panic(FORTIFY_REASON(func, write), avail, size)
#endif
+#ifndef fortify_warn_once
+# define fortify_warn_once(x...) WARN_ONCE(x)
+#endif
#define FORTIFY_READ 0
#define FORTIFY_WRITE 1
@@ -609,7 +613,7 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
const size_t __q_size = (q_size); \
const size_t __p_size_field = (p_size_field); \
const size_t __q_size_field = (q_size_field); \
- WARN_ONCE(fortify_memcpy_chk(__fortify_size, __p_size, \
+ fortify_warn_once(fortify_memcpy_chk(__fortify_size, __p_size, \
__q_size, __p_size_field, \
__q_size_field, FORTIFY_FUNC_ ##op), \
#op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \
@@ -734,7 +738,8 @@ __FORTIFY_INLINE void *kmemdup_noprof(const void * const POS0 p, size_t size, gf
if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
- fortify_panic(FORTIFY_FUNC_kmemdup, FORTIFY_READ, p_size, size, NULL);
+ fortify_panic(FORTIFY_FUNC_kmemdup, FORTIFY_READ, p_size, size,
+ __real_kmemdup(p, 0, gfp));
return __real_kmemdup(p, size, gfp);
}
#define kmemdup(...) alloc_hooks(kmemdup_noprof(__VA_ARGS__))
diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h
index 3e03758151f4..f39869588117 100644
--- a/include/linux/fprobe.h
+++ b/include/linux/fprobe.h
@@ -7,6 +7,16 @@
#include <linux/ftrace.h>
#include <linux/rethook.h>
+struct fprobe;
+
+typedef int (*fprobe_entry_cb)(struct fprobe *fp, unsigned long entry_ip,
+ unsigned long ret_ip, struct pt_regs *regs,
+ void *entry_data);
+
+typedef void (*fprobe_exit_cb)(struct fprobe *fp, unsigned long entry_ip,
+ unsigned long ret_ip, struct pt_regs *regs,
+ void *entry_data);
+
/**
* struct fprobe - ftrace based probe.
* @ops: The ftrace_ops.
@@ -34,12 +44,8 @@ struct fprobe {
size_t entry_data_size;
int nr_maxactive;
- int (*entry_handler)(struct fprobe *fp, unsigned long entry_ip,
- unsigned long ret_ip, struct pt_regs *regs,
- void *entry_data);
- void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip,
- unsigned long ret_ip, struct pt_regs *regs,
- void *entry_data);
+ fprobe_entry_cb entry_handler;
+ fprobe_exit_cb exit_handler;
};
/* This fprobe is soft-disabled. */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 4f2e2e5b6ab1..a7e7e37ba417 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -110,23 +110,26 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
*/
/* file is open for reading */
-#define FMODE_READ ((__force fmode_t)0x1)
+#define FMODE_READ ((__force fmode_t)(1 << 0))
/* file is open for writing */
-#define FMODE_WRITE ((__force fmode_t)0x2)
+#define FMODE_WRITE ((__force fmode_t)(1 << 1))
/* file is seekable */
-#define FMODE_LSEEK ((__force fmode_t)0x4)
+#define FMODE_LSEEK ((__force fmode_t)(1 << 2))
/* file can be accessed using pread */
-#define FMODE_PREAD ((__force fmode_t)0x8)
+#define FMODE_PREAD ((__force fmode_t)(1 << 3))
/* file can be accessed using pwrite */
-#define FMODE_PWRITE ((__force fmode_t)0x10)
+#define FMODE_PWRITE ((__force fmode_t)(1 << 4))
/* File is opened for execution with sys_execve / sys_uselib */
-#define FMODE_EXEC ((__force fmode_t)0x20)
+#define FMODE_EXEC ((__force fmode_t)(1 << 5))
/* File writes are restricted (block device specific) */
-#define FMODE_WRITE_RESTRICTED ((__force fmode_t)0x40)
+#define FMODE_WRITE_RESTRICTED ((__force fmode_t)(1 << 6))
+
+/* FMODE_* bits 7 to 8 */
+
/* 32bit hashes as llseek() offset (for directories) */
-#define FMODE_32BITHASH ((__force fmode_t)0x200)
+#define FMODE_32BITHASH ((__force fmode_t)(1 << 9))
/* 64bit hashes as llseek() offset (for directories) */
-#define FMODE_64BITHASH ((__force fmode_t)0x400)
+#define FMODE_64BITHASH ((__force fmode_t)(1 << 10))
/*
* Don't update ctime and mtime.
@@ -134,60 +137,53 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
* Currently a special hack for the XFS open_by_handle ioctl, but we'll
* hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon.
*/
-#define FMODE_NOCMTIME ((__force fmode_t)0x800)
+#define FMODE_NOCMTIME ((__force fmode_t)(1 << 11))
/* Expect random access pattern */
-#define FMODE_RANDOM ((__force fmode_t)0x1000)
+#define FMODE_RANDOM ((__force fmode_t)(1 << 12))
/* File is huge (eg. /dev/mem): treat loff_t as unsigned */
-#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
+#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)(1 << 13))
/* File is opened with O_PATH; almost nothing can be done with it */
-#define FMODE_PATH ((__force fmode_t)0x4000)
+#define FMODE_PATH ((__force fmode_t)(1 << 14))
/* File needs atomic accesses to f_pos */
-#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000)
+#define FMODE_ATOMIC_POS ((__force fmode_t)(1 << 15))
/* Write access to underlying fs */
-#define FMODE_WRITER ((__force fmode_t)0x10000)
+#define FMODE_WRITER ((__force fmode_t)(1 << 16))
/* Has read method(s) */
-#define FMODE_CAN_READ ((__force fmode_t)0x20000)
+#define FMODE_CAN_READ ((__force fmode_t)(1 << 17))
/* Has write method(s) */
-#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
+#define FMODE_CAN_WRITE ((__force fmode_t)(1 << 18))
-#define FMODE_OPENED ((__force fmode_t)0x80000)
-#define FMODE_CREATED ((__force fmode_t)0x100000)
+#define FMODE_OPENED ((__force fmode_t)(1 << 19))
+#define FMODE_CREATED ((__force fmode_t)(1 << 20))
/* File is stream-like */
-#define FMODE_STREAM ((__force fmode_t)0x200000)
+#define FMODE_STREAM ((__force fmode_t)(1 << 21))
/* File supports DIRECT IO */
-#define FMODE_CAN_ODIRECT ((__force fmode_t)0x400000)
+#define FMODE_CAN_ODIRECT ((__force fmode_t)(1 << 22))
-#define FMODE_NOREUSE ((__force fmode_t)0x800000)
+#define FMODE_NOREUSE ((__force fmode_t)(1 << 23))
-/* File supports non-exclusive O_DIRECT writes from multiple threads */
-#define FMODE_DIO_PARALLEL_WRITE ((__force fmode_t)0x1000000)
+/* FMODE_* bit 24 */
/* File is embedded in backing_file object */
-#define FMODE_BACKING ((__force fmode_t)0x2000000)
+#define FMODE_BACKING ((__force fmode_t)(1 << 25))
/* File was opened by fanotify and shouldn't generate fanotify events */
-#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
+#define FMODE_NONOTIFY ((__force fmode_t)(1 << 26))
/* File is capable of returning -EAGAIN if I/O will block */
-#define FMODE_NOWAIT ((__force fmode_t)0x8000000)
+#define FMODE_NOWAIT ((__force fmode_t)(1 << 27))
/* File represents mount that needs unmounting */
-#define FMODE_NEED_UNMOUNT ((__force fmode_t)0x10000000)
+#define FMODE_NEED_UNMOUNT ((__force fmode_t)(1 << 28))
/* File does not contribute to nr_files count */
-#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000)
-
-/* File supports async buffered reads */
-#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000)
-
-/* File supports async nowait buffered writes */
-#define FMODE_BUF_WASYNC ((__force fmode_t)0x80000000)
+#define FMODE_NOACCOUNT ((__force fmode_t)(1 << 29))
/*
* Attribute flags. These should be or-ed together to figure out what
@@ -1035,12 +1031,13 @@ struct file_handle {
__u32 handle_bytes;
int handle_type;
/* file identifier */
- unsigned char f_handle[];
+ unsigned char f_handle[] __counted_by(handle_bytes);
};
static inline struct file *get_file(struct file *f)
{
- atomic_long_inc(&f->f_count);
+ long prior = atomic_long_fetch_inc_relaxed(&f->f_count);
+ WARN_ONCE(!prior, "struct file::f_count incremented from zero; use-after-free condition present!\n");
return f;
}
@@ -1177,7 +1174,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
#define SB_I_UNTRUSTED_MOUNTER 0x00000040
-#define SB_I_EVM_UNSUPPORTED 0x00000080
+#define SB_I_EVM_HMAC_UNSUPPORTED 0x00000080
#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
@@ -2003,8 +2000,11 @@ struct iov_iter;
struct io_uring_cmd;
struct offset_ctx;
+typedef unsigned int __bitwise fop_flags_t;
+
struct file_operations {
struct module *owner;
+ fop_flags_t fop_flags;
loff_t (*llseek) (struct file *, loff_t, int);
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
@@ -2017,7 +2017,6 @@ struct file_operations {
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
- unsigned long mmap_supported_flags;
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
@@ -2048,6 +2047,17 @@ struct file_operations {
unsigned int poll_flags);
} __randomize_layout;
+/* Supports async buffered reads */
+#define FOP_BUFFER_RASYNC ((__force fop_flags_t)(1 << 0))
+/* Supports async buffered writes */
+#define FOP_BUFFER_WASYNC ((__force fop_flags_t)(1 << 1))
+/* Supports synchronous page faults for mappings */
+#define FOP_MMAP_SYNC ((__force fop_flags_t)(1 << 2))
+/* Supports non-exclusive O_DIRECT writes from multiple threads */
+#define FOP_DIO_PARALLEL_WRITE ((__force fop_flags_t)(1 << 3))
+/* Contains huge pages */
+#define FOP_HUGE_PAGES ((__force fop_flags_t)(1 << 4))
+
/* Wrap a directory iterator that needs exclusive inode access */
int wrap_directory_iterator(struct file *, struct dir_context *,
int (*) (struct file *, struct dir_context *));
@@ -2253,7 +2263,13 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
+
+#ifdef CONFIG_SWAP
#define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE)
+#else
+#define IS_SWAPFILE(inode) ((void)(inode), 0U)
+#endif
+
#define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE)
#define IS_IMA(inode) ((inode)->i_flags & S_IMA)
#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
@@ -3336,6 +3352,8 @@ void simple_offset_init(struct offset_ctx *octx);
int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry);
void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry);
int simple_offset_empty(struct dentry *dentry);
+int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
int simple_offset_rename_exchange(struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
index 01542c4b87a2..d3350979115f 100644
--- a/include/linux/fs_parser.h
+++ b/include/linux/fs_parser.h
@@ -132,4 +132,8 @@ static inline bool fs_validate_description(const char *name,
#define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0, NULL)
#define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0, NULL)
+/* String parameter that allows empty argument */
+#define fsparam_string_empty(NAME, OPT) \
+ __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL)
+
#endif /* _LINUX_FS_PARSER_H */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 6e8562cbcc43..9de27643607f 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -172,9 +172,12 @@ extern void __fscache_invalidate(struct fscache_cookie *, const void *, loff_t,
extern int __fscache_begin_read_operation(struct netfs_cache_resources *, struct fscache_cookie *);
extern int __fscache_begin_write_operation(struct netfs_cache_resources *, struct fscache_cookie *);
-extern void __fscache_write_to_cache(struct fscache_cookie *, struct address_space *,
- loff_t, size_t, loff_t, netfs_io_terminated_t, void *,
- bool);
+void __fscache_write_to_cache(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ loff_t start, size_t len, loff_t i_size,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv,
+ bool using_pgpriv2, bool cond);
extern void __fscache_clear_page_bits(struct address_space *, loff_t, size_t);
/**
@@ -597,7 +600,8 @@ static inline void fscache_clear_page_bits(struct address_space *mapping,
* @i_size: The new size of the inode
* @term_func: The function to call upon completion
* @term_func_priv: The private data for @term_func
- * @caching: If PG_fscache has been set
+ * @using_pgpriv2: If we're using PG_private_2 to mark in-progress write
+ * @caching: If we actually want to do the caching
*
* Helper function for a netfs to write dirty data from an inode into the cache
* object that's backing it.
@@ -608,19 +612,21 @@ static inline void fscache_clear_page_bits(struct address_space *mapping,
* marked with PG_fscache.
*
* If given, @term_func will be called upon completion and supplied with
- * @term_func_priv. Note that the PG_fscache flags will have been cleared by
- * this point, so the netfs must retain its own pin on the mapping.
+ * @term_func_priv. Note that if @using_pgpriv2 is set, the PG_private_2 flags
+ * will have been cleared by this point, so the netfs must retain its own pin
+ * on the mapping.
*/
static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
struct address_space *mapping,
loff_t start, size_t len, loff_t i_size,
netfs_io_terminated_t term_func,
void *term_func_priv,
- bool caching)
+ bool using_pgpriv2, bool caching)
{
if (caching)
__fscache_write_to_cache(cookie, mapping, start, len, i_size,
- term_func, term_func_priv, caching);
+ term_func, term_func_priv,
+ using_pgpriv2, caching);
else if (term_func)
term_func(term_func_priv, -ENOBUFS, false);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 54d53f345d14..e3a83ebd1b33 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -83,7 +83,6 @@ static inline void early_trace_init(void) { }
struct module;
struct ftrace_hash;
-struct ftrace_direct_func;
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
defined(CONFIG_DYNAMIC_FTRACE)
@@ -414,7 +413,6 @@ struct ftrace_func_entry {
};
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
-extern int ftrace_direct_func_count;
unsigned long ftrace_find_rec_direct(unsigned long ip);
int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
@@ -426,7 +424,6 @@ void ftrace_stub_direct_tramp(void);
#else
struct ftrace_ops;
-# define ftrace_direct_func_count 0
static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
{
return 0;
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
deleted file mode 100644
index c285968e437a..000000000000
--- a/include/linux/genetlink.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_GENERIC_NETLINK_H
-#define __LINUX_GENERIC_NETLINK_H
-
-#include <uapi/linux/genetlink.h>
-
-
-/* All generic netlink requests are serialized by a global lock. */
-extern void genl_lock(void);
-extern void genl_unlock(void);
-
-/* for synchronisation between af_netlink and genetlink */
-extern atomic_t genl_sk_destructing_cnt;
-extern wait_queue_head_t genl_sk_destructing_waitq;
-
-#define MODULE_ALIAS_GENL_FAMILY(family)\
- MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)
-
-#endif /* __LINUX_GENERIC_NETLINK_H */
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
index a419d93789ff..621b87a87d74 100644
--- a/include/linux/genl_magic_struct.h
+++ b/include/linux/genl_magic_struct.h
@@ -15,8 +15,8 @@
#endif
#include <linux/args.h>
-#include <linux/genetlink.h>
#include <linux/types.h>
+#include <net/genetlink.h>
extern int CONCATENATE(GENL_MAGIC_FAMILY, _genl_register)(void);
extern void CONCATENATE(GENL_MAGIC_FAMILY, _genl_unregister)(void);
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 7ecc25c543ce..56ac7e7a2889 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -74,6 +74,12 @@ static inline bool gpio_is_valid(int number)
* Until they are all fixed, leave 0-512 space for them.
*/
#define GPIO_DYNAMIC_BASE 512
+/*
+ * Define the maximum of the possible GPIO in the global numberspace.
+ * While the GPIO base and numbers are positive, we limit it with signed
+ * maximum as a lot of code is using negative values for special cases.
+ */
+#define GPIO_DYNAMIC_MAX INT_MAX
/* Always use the library code for GPIO management calls,
* or when sleeping may be involved.
@@ -114,8 +120,6 @@ static inline int gpio_to_irq(unsigned gpio)
}
int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
-int gpio_request_array(const struct gpio *array, size_t num);
-void gpio_free_array(const struct gpio *array, size_t num);
/* CONFIG_GPIOLIB: bindings for managed devices that want to request gpios */
@@ -146,11 +150,6 @@ static inline int gpio_request_one(unsigned gpio,
return -ENOSYS;
}
-static inline int gpio_request_array(const struct gpio *array, size_t num)
-{
- return -ENOSYS;
-}
-
static inline void gpio_free(unsigned gpio)
{
might_sleep();
@@ -159,14 +158,6 @@ static inline void gpio_free(unsigned gpio)
WARN_ON(1);
}
-static inline void gpio_free_array(const struct gpio *array, size_t num)
-{
- might_sleep();
-
- /* GPIO can never have been requested */
- WARN_ON(1);
-}
-
static inline int gpio_direction_input(unsigned gpio)
{
return -ENOSYS;
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index f8617eaf08ba..0032bb6e7d8f 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -376,9 +376,7 @@ struct gpio_irq_chip {
* @names: if set, must be an array of strings to use as alternative
* names for the GPIOs in this chip. Any entry in the array
* may be NULL if there is no alias for the GPIO, however the
- * array must be @ngpio entries long. A name can include a single printk
- * format specifier for an unsigned int. It is substituted by the actual
- * number of the gpio.
+ * array must be @ngpio entries long.
* @can_sleep: flag must be set iff get()/set() methods sleep, as they
* must while accessing GPIO expander chips over I2C or SPI. This
* implies that if the chip supports IRQs, these IRQs need to be threaded
diff --git a/include/linux/gpio/property.h b/include/linux/gpio/property.h
index 6c75c8bd44a0..0d2209308002 100644
--- a/include/linux/gpio/property.h
+++ b/include/linux/gpio/property.h
@@ -2,10 +2,13 @@
#ifndef __LINUX_GPIO_PROPERTY_H
#define __LINUX_GPIO_PROPERTY_H
-#include <dt-bindings/gpio/gpio.h> /* for GPIO_* flags */
#include <linux/property.h>
+struct software_node;
+
#define PROPERTY_ENTRY_GPIO(_name_, _chip_node_, _idx_, _flags_) \
PROPERTY_ENTRY_REF(_name_, _chip_node_, _idx_, _flags_)
+extern const struct software_node swnode_gpio_undefined;
+
#endif /* __LINUX_GPIO_PROPERTY_H */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index b12cb1c8e682..8e06d89698e6 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -474,9 +474,9 @@ struct hid_usage {
__s8 wheel_factor; /* 120/resolution_multiplier */
__u16 code; /* input driver code */
__u8 type; /* input driver type */
- __s8 hat_min; /* hat switch fun */
- __s8 hat_max; /* ditto */
- __s8 hat_dir; /* ditto */
+ __s16 hat_min; /* hat switch fun */
+ __s16 hat_max; /* ditto */
+ __s16 hat_dir; /* ditto */
__s16 wheel_accumulated; /* hi-res wheel */
};
diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h
index ca70eeb6393e..eec2592dec12 100644
--- a/include/linux/hid_bpf.h
+++ b/include/linux/hid_bpf.h
@@ -103,6 +103,9 @@ struct hid_bpf_ops {
unsigned char reportnum, __u8 *buf,
size_t len, enum hid_report_type rtype,
enum hid_class_request reqtype);
+ int (*hid_hw_output_report)(struct hid_device *hdev, __u8 *buf, size_t len);
+ int (*hid_input_report)(struct hid_device *hid, enum hid_report_type type,
+ u8 *data, u32 size, int interrupt);
struct module *owner;
const struct bus_type *bus_type;
};
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 68244bb3637a..2b3c3a404769 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -526,17 +526,13 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
}
-extern const struct file_operations hugetlbfs_file_operations;
extern const struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
int creat_flags, int page_size_log);
-static inline bool is_file_hugepages(struct file *file)
+static inline bool is_file_hugepages(const struct file *file)
{
- if (file->f_op == &hugetlbfs_file_operations)
- return true;
-
- return is_file_shm_hugepages(file);
+ return file->f_op->fop_flags & FOP_HUGE_PAGES;
}
static inline struct hstate *hstate_inode(struct inode *i)
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 3385a2cc5b09..de2dce743ee2 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1287,6 +1287,24 @@ struct ieee80211_ttlm_elem {
u8 optional[];
} __packed;
+/**
+ * struct ieee80211_bss_load_elem - BSS Load elemen
+ *
+ * Defined in section 9.4.2.26 in IEEE 802.11-REVme D4.1
+ *
+ * @sta_count: total number of STAs currently associated with the AP.
+ * @channel_util: Percentage of time that the access point sensed the channel
+ * was busy. This value is in range [0, 255], the highest value means
+ * 100% busy.
+ * @avail_admission_capa: remaining amount of medium time used for admission
+ * control.
+ */
+struct ieee80211_bss_load_elem {
+ __le16 sta_count;
+ u8 channel_util;
+ __le16 avail_admission_capa;
+} __packed;
+
struct ieee80211_mgmt {
__le16 frame_control;
__le16 duration;
@@ -2742,9 +2760,11 @@ static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len)
#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000
-#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0
-#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1
-#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2
+#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0
+#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1
+#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2
+#define IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP 3
+#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP 4
/**
* struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
@@ -5166,7 +5186,7 @@ static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len)
bool check_common_len = false;
u16 control;
- if (len < fixed)
+ if (!data || len < fixed)
return false;
control = le16_to_cpu(mle->control);
@@ -5302,7 +5322,7 @@ static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data,
info_len += 1;
return prof->sta_info_len >= info_len &&
- fixed + prof->sta_info_len <= len;
+ fixed + prof->sta_info_len - 1 <= len;
}
/**
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index 459b79683783..f5842372359b 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -8,6 +8,7 @@
#define _LINUX_INTEGRITY_H
#include <linux/fs.h>
+#include <linux/iversion.h>
enum integrity_status {
INTEGRITY_PASS = 0,
@@ -28,4 +29,37 @@ static inline void integrity_load_keys(void)
}
#endif /* CONFIG_INTEGRITY */
+/* An inode's attributes for detection of changes */
+struct integrity_inode_attributes {
+ u64 version; /* track inode changes */
+ unsigned long ino;
+ dev_t dev;
+};
+
+/*
+ * On stacked filesystems the i_version alone is not enough to detect file data
+ * or metadata change. Additional metadata is required.
+ */
+static inline void
+integrity_inode_attrs_store(struct integrity_inode_attributes *attrs,
+ u64 i_version, const struct inode *inode)
+{
+ attrs->version = i_version;
+ attrs->dev = inode->i_sb->s_dev;
+ attrs->ino = inode->i_ino;
+}
+
+/*
+ * On stacked filesystems detect whether the inode or its content has changed.
+ */
+static inline bool
+integrity_inode_attrs_changed(const struct integrity_inode_attributes *attrs,
+ const struct inode *inode)
+{
+ return (inode->i_sb->s_dev != attrs->dev ||
+ inode->i_ino != attrs->ino ||
+ !inode_eq_iversion(inode, attrs->version));
+}
+
+
#endif /* _LINUX_INTEGRITY_H */
diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h
index f3196f82fd8a..c0397423d3a8 100644
--- a/include/linux/intel_rapl.h
+++ b/include/linux/intel_rapl.h
@@ -158,6 +158,26 @@ struct rapl_if_priv {
void *rpi;
};
+#ifdef CONFIG_PERF_EVENTS
+/**
+ * struct rapl_package_pmu_data: Per package data for PMU support
+ * @scale: Scale of 2^-32 Joules for each energy counter increase.
+ * @lock: Lock to protect n_active and active_list.
+ * @n_active: Number of active events.
+ * @active_list: List of active events.
+ * @timer_interval: Maximum timer expiration time before counter overflow.
+ * @hrtimer: Periodically update the counter to prevent overflow.
+ */
+struct rapl_package_pmu_data {
+ u64 scale[RAPL_DOMAIN_MAX];
+ raw_spinlock_t lock;
+ int n_active;
+ struct list_head active_list;
+ ktime_t timer_interval;
+ struct hrtimer hrtimer;
+};
+#endif
+
/* maximum rapl package domain name: package-%d-die-%d */
#define PACKAGE_DOMAIN_NAME_LENGTH 30
@@ -176,6 +196,10 @@ struct rapl_package {
struct cpumask cpumask;
char name[PACKAGE_DOMAIN_NAME_LENGTH];
struct rapl_if_priv *priv;
+#ifdef CONFIG_PERF_EVENTS
+ bool has_pmu;
+ struct rapl_package_pmu_data pmu_data;
+#endif
};
struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv,
@@ -188,4 +212,12 @@ struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv,
struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu);
void rapl_remove_package(struct rapl_package *rp);
+#ifdef CONFIG_PERF_EVENTS
+int rapl_package_add_pmu(struct rapl_package *rp);
+void rapl_package_remove_pmu(struct rapl_package *rp);
+#else
+static inline int rapl_package_add_pmu(struct rapl_package *rp) { return 0; }
+static inline void rapl_package_remove_pmu(struct rapl_package *rp) { }
+#endif
+
#endif /* __INTEL_RAPL_H__ */
diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h
index a3529b962be6..1e880cb0f454 100644
--- a/include/linux/intel_tpmi.h
+++ b/include/linux/intel_tpmi.h
@@ -27,16 +27,22 @@ enum intel_tpmi_id {
/**
* struct intel_tpmi_plat_info - Platform information for a TPMI device instance
- * @package_id: CPU Package id
- * @bus_number: PCI bus number
- * @device_number: PCI device number
+ * @cdie_mask: Mask of all compute dies in the partition
+ * @package_id: CPU Package id
+ * @partition: Package partition id when multiple VSEC PCI devices per package
+ * @segment: PCI segment ID
+ * @bus_number: PCI bus number
+ * @device_number: PCI device number
* @function_number: PCI function number
*
* Structure to store platform data for a TPMI device instance. This
* struct is used to return data via tpmi_get_platform_data().
*/
struct intel_tpmi_plat_info {
+ u16 cdie_mask;
u8 package_id;
+ u8 partition;
+ u8 segment;
u8 bus_number;
u8 device_number;
u8 function_number;
diff --git a/include/linux/io.h b/include/linux/io.h
index e8ee40ee1843..59ec5eea696c 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -17,9 +17,15 @@
struct device;
struct resource;
-__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
+#ifndef __iowrite32_copy
+void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
+#endif
+
void __ioread32_copy(void *to, const void __iomem *from, size_t count);
+
+#ifndef __iowrite64_copy
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
+#endif
#ifdef CONFIG_MMU
int ioremap_page_range(unsigned long addr, unsigned long end,
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 68ed6697fece..e123d5e17b52 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -11,7 +11,6 @@ void __io_uring_cancel(bool cancel_all);
void __io_uring_free(struct task_struct *tsk);
void io_uring_unreg_ringfd(void);
const char *io_uring_get_opcode(u8 opcode);
-int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
bool io_is_uring_fops(struct file *file);
static inline void io_uring_files_cancel(void)
@@ -45,11 +44,6 @@ static inline const char *io_uring_get_opcode(u8 opcode)
{
return "";
}
-static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
-{
- return -EOPNOTSUPP;
-}
static inline bool io_is_uring_fops(struct file *file)
{
return false;
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
index e453a997c060..447fbfd32215 100644
--- a/include/linux/io_uring/cmd.h
+++ b/include/linux/io_uring/cmd.h
@@ -26,12 +26,25 @@ static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
#if defined(CONFIG_IO_URING)
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd);
+
+/*
+ * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd
+ * and the corresponding io_uring request.
+ *
+ * Note: the caller should never hard code @issue_flags and is only allowed
+ * to pass the mask provided by the core io_uring code.
+ */
void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
unsigned issue_flags);
+
void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
void (*task_work_cb)(struct io_uring_cmd *, unsigned),
unsigned flags);
+/*
+ * Note: the caller should never hard code @issue_flags and only use the
+ * mask provided by the core io_uring code.
+ */
void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
unsigned int issue_flags);
@@ -56,6 +69,17 @@ static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
}
#endif
+/*
+ * Polled completions must ensure they are coming from a poll queue, and
+ * hence are completed inside the usual poll handling loops.
+ */
+static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd,
+ ssize_t ret, ssize_t res2)
+{
+ lockdep_assert(in_task());
+ io_uring_cmd_done(ioucmd, ret, res2, 0);
+}
+
/* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
void (*task_work_cb)(struct io_uring_cmd *, unsigned))
diff --git a/include/linux/io_uring/net.h b/include/linux/io_uring/net.h
new file mode 100644
index 000000000000..b58f39fed4d5
--- /dev/null
+++ b/include/linux/io_uring/net.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_IO_URING_NET_H
+#define _LINUX_IO_URING_NET_H
+
+struct io_uring_cmd;
+
+#if defined(CONFIG_IO_URING)
+int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
+
+#else
+static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index ac333ea81d31..7a6b190c7da7 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -205,6 +205,7 @@ struct io_submit_state {
bool plug_started;
bool need_plug;
+ bool cq_flush;
unsigned short submit_nr;
unsigned int cqes_count;
struct blk_plug plug;
@@ -219,7 +220,7 @@ struct io_ev_fd {
};
struct io_alloc_cache {
- struct io_wq_work_node list;
+ void **entries;
unsigned int nr_cached;
unsigned int max_cached;
size_t elem_size;
@@ -299,6 +300,8 @@ struct io_ring_ctx {
struct io_hash_table cancel_table_locked;
struct io_alloc_cache apoll_cache;
struct io_alloc_cache netmsg_cache;
+ struct io_alloc_cache rw_cache;
+ struct io_alloc_cache uring_cache;
/*
* Any cancelable uring_cmd is added to this list in
@@ -341,14 +344,8 @@ struct io_ring_ctx {
unsigned cq_last_tm_flush;
} ____cacheline_aligned_in_smp;
- struct io_uring_cqe completion_cqes[16];
-
spinlock_t completion_lock;
- /* IRQ completion list, under ->completion_lock */
- unsigned int locked_free_nr;
- struct io_wq_work_list locked_free_list;
-
struct list_head io_buffers_comp;
struct list_head cq_overflow_list;
struct io_hash_table cancel_table;
@@ -371,9 +368,6 @@ struct io_ring_ctx {
struct list_head io_buffers_cache;
- /* deferred free list, protected by ->uring_lock */
- struct hlist_head io_buf_list;
-
/* Keep this last, we don't need it for the fast path */
struct wait_queue_head poll_wq;
struct io_restriction restrictions;
@@ -438,8 +432,6 @@ struct io_ring_ctx {
};
struct io_tw_state {
- /* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */
- bool locked;
};
enum {
@@ -480,6 +472,7 @@ enum {
REQ_F_CAN_POLL_BIT,
REQ_F_BL_EMPTY_BIT,
REQ_F_BL_NO_RECYCLE_BIT,
+ REQ_F_BUFFERS_COMMIT_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@@ -558,6 +551,8 @@ enum {
REQ_F_BL_EMPTY = IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT),
/* don't recycle provided buffers for this request */
REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT),
+ /* buffer ring head needs incrementing on put */
+ REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
};
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 2e925b5eba53..7bc8dff7cf6d 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -69,8 +69,7 @@ enum iommu_fault_type {
struct iommu_fault_page_request {
#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0)
#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
-#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2)
-#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 3)
+#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 2)
u32 flags;
u32 pasid;
u32 grpid;
@@ -518,6 +517,7 @@ static inline int __iommu_copy_struct_from_user_array(
* Upon failure, ERR_PTR must be returned.
* @domain_alloc_paging: Allocate an iommu_domain that can be used for
* UNMANAGED, DMA, and DMA_FQ domain types.
+ * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing.
* @probe_device: Add device to iommu driver handling
* @release_device: Remove device from iommu driver handling
* @probe_finalize: Do final setup work after the device is added to an IOMMU
@@ -558,6 +558,8 @@ struct iommu_ops {
struct device *dev, u32 flags, struct iommu_domain *parent,
const struct iommu_user_data *user_data);
struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
+ struct iommu_domain *(*domain_alloc_sva)(struct device *dev,
+ struct mm_struct *mm);
struct iommu_device *(*probe_device)(struct device *dev);
void (*release_device)(struct device *dev);
@@ -578,7 +580,8 @@ struct iommu_ops {
struct iommu_page_response *msg);
int (*def_domain_type)(struct device *dev);
- void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid);
+ void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid,
+ struct iommu_domain *domain);
const struct iommu_domain_ops *default_domain_ops;
unsigned long pgsize_bitmap;
@@ -1445,9 +1448,6 @@ static inline void iommu_debugfs_setup(void) {}
#ifdef CONFIG_IOMMU_DMA
#include <linux/msi.h>
-/* Setup call for arch DMA mapping code */
-void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
-
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
@@ -1458,10 +1458,6 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg);
struct msi_desc;
struct msi_msg;
-static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
-{
-}
-
static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
{
return -ENODEV;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 97baa937ab5b..a217e1029c1d 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -115,7 +115,7 @@ enum {
* Return value for chip->irq_set_affinity()
*
* IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity
- * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity
+ * IRQ_SET_MASK_NOCOPY - OK, chip did update irq_common_data.affinity
* IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
* support stacked irqchips, which indicates skipping
* all descendant irqchips.
diff --git a/include/linux/irqchip/riscv-aplic.h b/include/linux/irqchip/riscv-aplic.h
new file mode 100644
index 000000000000..ec8f7df50583
--- /dev/null
+++ b/include/linux/irqchip/riscv-aplic.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+#ifndef __LINUX_IRQCHIP_RISCV_APLIC_H
+#define __LINUX_IRQCHIP_RISCV_APLIC_H
+
+#include <linux/bitops.h>
+
+#define APLIC_MAX_IDC BIT(14)
+#define APLIC_MAX_SOURCE 1024
+
+#define APLIC_DOMAINCFG 0x0000
+#define APLIC_DOMAINCFG_RDONLY 0x80000000
+#define APLIC_DOMAINCFG_IE BIT(8)
+#define APLIC_DOMAINCFG_DM BIT(2)
+#define APLIC_DOMAINCFG_BE BIT(0)
+
+#define APLIC_SOURCECFG_BASE 0x0004
+#define APLIC_SOURCECFG_D BIT(10)
+#define APLIC_SOURCECFG_CHILDIDX_MASK 0x000003ff
+#define APLIC_SOURCECFG_SM_MASK 0x00000007
+#define APLIC_SOURCECFG_SM_INACTIVE 0x0
+#define APLIC_SOURCECFG_SM_DETACH 0x1
+#define APLIC_SOURCECFG_SM_EDGE_RISE 0x4
+#define APLIC_SOURCECFG_SM_EDGE_FALL 0x5
+#define APLIC_SOURCECFG_SM_LEVEL_HIGH 0x6
+#define APLIC_SOURCECFG_SM_LEVEL_LOW 0x7
+
+#define APLIC_MMSICFGADDR 0x1bc0
+#define APLIC_MMSICFGADDRH 0x1bc4
+#define APLIC_SMSICFGADDR 0x1bc8
+#define APLIC_SMSICFGADDRH 0x1bcc
+
+#ifdef CONFIG_RISCV_M_MODE
+#define APLIC_xMSICFGADDR APLIC_MMSICFGADDR
+#define APLIC_xMSICFGADDRH APLIC_MMSICFGADDRH
+#else
+#define APLIC_xMSICFGADDR APLIC_SMSICFGADDR
+#define APLIC_xMSICFGADDRH APLIC_SMSICFGADDRH
+#endif
+
+#define APLIC_xMSICFGADDRH_L BIT(31)
+#define APLIC_xMSICFGADDRH_HHXS_MASK 0x1f
+#define APLIC_xMSICFGADDRH_HHXS_SHIFT 24
+#define APLIC_xMSICFGADDRH_HHXS (APLIC_xMSICFGADDRH_HHXS_MASK << \
+ APLIC_xMSICFGADDRH_HHXS_SHIFT)
+#define APLIC_xMSICFGADDRH_LHXS_MASK 0x7
+#define APLIC_xMSICFGADDRH_LHXS_SHIFT 20
+#define APLIC_xMSICFGADDRH_LHXS (APLIC_xMSICFGADDRH_LHXS_MASK << \
+ APLIC_xMSICFGADDRH_LHXS_SHIFT)
+#define APLIC_xMSICFGADDRH_HHXW_MASK 0x7
+#define APLIC_xMSICFGADDRH_HHXW_SHIFT 16
+#define APLIC_xMSICFGADDRH_HHXW (APLIC_xMSICFGADDRH_HHXW_MASK << \
+ APLIC_xMSICFGADDRH_HHXW_SHIFT)
+#define APLIC_xMSICFGADDRH_LHXW_MASK 0xf
+#define APLIC_xMSICFGADDRH_LHXW_SHIFT 12
+#define APLIC_xMSICFGADDRH_LHXW (APLIC_xMSICFGADDRH_LHXW_MASK << \
+ APLIC_xMSICFGADDRH_LHXW_SHIFT)
+#define APLIC_xMSICFGADDRH_BAPPN_MASK 0xfff
+#define APLIC_xMSICFGADDRH_BAPPN_SHIFT 0
+#define APLIC_xMSICFGADDRH_BAPPN (APLIC_xMSICFGADDRH_BAPPN_MASK << \
+ APLIC_xMSICFGADDRH_BAPPN_SHIFT)
+
+#define APLIC_xMSICFGADDR_PPN_SHIFT 12
+
+#define APLIC_xMSICFGADDR_PPN_HART(__lhxs) \
+ (BIT(__lhxs) - 1)
+
+#define APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) \
+ (BIT(__lhxw) - 1)
+#define APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs) \
+ ((__lhxs))
+#define APLIC_xMSICFGADDR_PPN_LHX(__lhxw, __lhxs) \
+ (APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) << \
+ APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs))
+
+#define APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) \
+ (BIT(__hhxw) - 1)
+#define APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs) \
+ ((__hhxs) + APLIC_xMSICFGADDR_PPN_SHIFT)
+#define APLIC_xMSICFGADDR_PPN_HHX(__hhxw, __hhxs) \
+ (APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) << \
+ APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs))
+
+#define APLIC_IRQBITS_PER_REG 32
+
+#define APLIC_SETIP_BASE 0x1c00
+#define APLIC_SETIPNUM 0x1cdc
+
+#define APLIC_CLRIP_BASE 0x1d00
+#define APLIC_CLRIPNUM 0x1ddc
+
+#define APLIC_SETIE_BASE 0x1e00
+#define APLIC_SETIENUM 0x1edc
+
+#define APLIC_CLRIE_BASE 0x1f00
+#define APLIC_CLRIENUM 0x1fdc
+
+#define APLIC_SETIPNUM_LE 0x2000
+#define APLIC_SETIPNUM_BE 0x2004
+
+#define APLIC_GENMSI 0x3000
+
+#define APLIC_TARGET_BASE 0x3004
+#define APLIC_TARGET_HART_IDX_SHIFT 18
+#define APLIC_TARGET_HART_IDX_MASK 0x3fff
+#define APLIC_TARGET_HART_IDX (APLIC_TARGET_HART_IDX_MASK << \
+ APLIC_TARGET_HART_IDX_SHIFT)
+#define APLIC_TARGET_GUEST_IDX_SHIFT 12
+#define APLIC_TARGET_GUEST_IDX_MASK 0x3f
+#define APLIC_TARGET_GUEST_IDX (APLIC_TARGET_GUEST_IDX_MASK << \
+ APLIC_TARGET_GUEST_IDX_SHIFT)
+#define APLIC_TARGET_IPRIO_SHIFT 0
+#define APLIC_TARGET_IPRIO_MASK 0xff
+#define APLIC_TARGET_IPRIO (APLIC_TARGET_IPRIO_MASK << \
+ APLIC_TARGET_IPRIO_SHIFT)
+#define APLIC_TARGET_EIID_SHIFT 0
+#define APLIC_TARGET_EIID_MASK 0x7ff
+#define APLIC_TARGET_EIID (APLIC_TARGET_EIID_MASK << \
+ APLIC_TARGET_EIID_SHIFT)
+
+#define APLIC_IDC_BASE 0x4000
+#define APLIC_IDC_SIZE 32
+
+#define APLIC_IDC_IDELIVERY 0x00
+
+#define APLIC_IDC_IFORCE 0x04
+
+#define APLIC_IDC_ITHRESHOLD 0x08
+
+#define APLIC_IDC_TOPI 0x18
+#define APLIC_IDC_TOPI_ID_SHIFT 16
+#define APLIC_IDC_TOPI_ID_MASK 0x3ff
+#define APLIC_IDC_TOPI_ID (APLIC_IDC_TOPI_ID_MASK << \
+ APLIC_IDC_TOPI_ID_SHIFT)
+#define APLIC_IDC_TOPI_PRIO_SHIFT 0
+#define APLIC_IDC_TOPI_PRIO_MASK 0xff
+#define APLIC_IDC_TOPI_PRIO (APLIC_IDC_TOPI_PRIO_MASK << \
+ APLIC_IDC_TOPI_PRIO_SHIFT)
+
+#define APLIC_IDC_CLAIMI 0x1c
+
+#endif
diff --git a/include/linux/irqchip/riscv-imsic.h b/include/linux/irqchip/riscv-imsic.h
new file mode 100644
index 000000000000..faf0b800b1b0
--- /dev/null
+++ b/include/linux/irqchip/riscv-imsic.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+#ifndef __LINUX_IRQCHIP_RISCV_IMSIC_H
+#define __LINUX_IRQCHIP_RISCV_IMSIC_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <asm/csr.h>
+
+#define IMSIC_MMIO_PAGE_SHIFT 12
+#define IMSIC_MMIO_PAGE_SZ BIT(IMSIC_MMIO_PAGE_SHIFT)
+#define IMSIC_MMIO_PAGE_LE 0x00
+#define IMSIC_MMIO_PAGE_BE 0x04
+
+#define IMSIC_MIN_ID 63
+#define IMSIC_MAX_ID 2048
+
+#define IMSIC_EIDELIVERY 0x70
+
+#define IMSIC_EITHRESHOLD 0x72
+
+#define IMSIC_EIP0 0x80
+#define IMSIC_EIP63 0xbf
+#define IMSIC_EIPx_BITS 32
+
+#define IMSIC_EIE0 0xc0
+#define IMSIC_EIE63 0xff
+#define IMSIC_EIEx_BITS 32
+
+#define IMSIC_FIRST IMSIC_EIDELIVERY
+#define IMSIC_LAST IMSIC_EIE63
+
+#define IMSIC_MMIO_SETIPNUM_LE 0x00
+#define IMSIC_MMIO_SETIPNUM_BE 0x04
+
+struct imsic_local_config {
+ phys_addr_t msi_pa;
+ void __iomem *msi_va;
+};
+
+struct imsic_global_config {
+ /*
+ * MSI Target Address Scheme
+ *
+ * XLEN-1 12 0
+ * | | |
+ * -------------------------------------------------------------
+ * |xxxxxx|Group Index|xxxxxxxxxxx|HART Index|Guest Index| 0 |
+ * -------------------------------------------------------------
+ */
+
+ /* Bits representing Guest index, HART index, and Group index */
+ u32 guest_index_bits;
+ u32 hart_index_bits;
+ u32 group_index_bits;
+ u32 group_index_shift;
+
+ /* Global base address matching all target MSI addresses */
+ phys_addr_t base_addr;
+
+ /* Number of interrupt identities */
+ u32 nr_ids;
+
+ /* Number of guest interrupt identities */
+ u32 nr_guest_ids;
+
+ /* Per-CPU IMSIC addresses */
+ struct imsic_local_config __percpu *local;
+};
+
+#ifdef CONFIG_RISCV_IMSIC
+
+const struct imsic_global_config *imsic_get_global_config(void);
+
+#else
+
+static inline const struct imsic_global_config *imsic_get_global_config(void)
+{
+ return NULL;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index d9451d456a73..fd091c35d572 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -18,6 +18,18 @@ struct irq_domain;
struct pt_regs;
/**
+ * struct irqstat - interrupt statistics
+ * @cnt: real-time interrupt count
+ * @ref: snapshot of interrupt count
+ */
+struct irqstat {
+ unsigned int cnt;
+#ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT
+ unsigned int ref;
+#endif
+};
+
+/**
* struct irq_desc - interrupt descriptor
* @irq_common_data: per irq and chip data passed down to chip functions
* @kstat_irqs: irq stats per cpu
@@ -55,7 +67,7 @@ struct pt_regs;
struct irq_desc {
struct irq_common_data irq_common_data;
struct irq_data irq_data;
- unsigned int __percpu *kstat_irqs;
+ struct irqstat __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
struct irqaction *action; /* IRQ action list */
unsigned int status_use_accessors;
@@ -119,7 +131,7 @@ extern struct irq_desc irq_desc[NR_IRQS];
static inline unsigned int irq_desc_kstat_cpu(struct irq_desc *desc,
unsigned int cpu)
{
- return desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
+ return desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0;
}
static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 9512fe332668..d8787a3c6a9e 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1434,7 +1434,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
extern void jbd2_journal_commit_transaction(journal_t *);
/* Checkpoint list management */
-void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
+enum jbd2_shrink_type {JBD2_SHRINK_DESTROY, JBD2_SHRINK_BUSY_STOP, JBD2_SHRINK_BUSY_SKIP};
+
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal, enum jbd2_shrink_type type);
unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan);
int __jbd2_journal_remove_checkpoint(struct journal_head *);
int jbd2_journal_try_remove_checkpoint(struct journal_head *jh);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index f0a949b7c973..f5a2727ca4a9 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -216,6 +216,7 @@ extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[];
extern void jump_label_init(void);
+extern void jump_label_init_ro(void);
extern void jump_label_lock(void);
extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
@@ -265,6 +266,8 @@ static __always_inline void jump_label_init(void)
static_key_initialized = true;
}
+static __always_inline void jump_label_init_ro(void) { }
+
static __always_inline bool static_key_false(struct static_key *key)
{
if (unlikely_notrace(static_key_count(key) > 0))
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 9935f7ecbfb9..9c042c6384bb 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -79,6 +79,14 @@ static inline unsigned int kstat_cpu_softirqs_sum(int cpu)
return sum;
}
+#ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT
+extern void kstat_snapshot_irqs(void);
+extern unsigned int kstat_get_irq_since_snapshot(unsigned int irq);
+#else
+static inline void kstat_snapshot_irqs(void) { }
+static inline unsigned int kstat_get_irq_since_snapshot(unsigned int irq) { return 0; }
+#endif
+
/*
* Number of interrupts per specific IRQ source, since bootup
*/
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 060835bb82d5..5b93a5767413 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -319,8 +319,10 @@ struct kimage {
/* If set, we are using file mode kexec syscall */
unsigned int file_mode:1;
#ifdef CONFIG_CRASH_HOTPLUG
- /* If set, allow changes to elfcorehdr of kexec_load'd image */
- unsigned int update_elfcorehdr:1;
+ /* If set, it is safe to update kexec segments that are
+ * excluded from SHA calculation.
+ */
+ unsigned int hotplug_support:1;
#endif
#ifdef ARCH_HAS_KIMAGE_ARCH
@@ -391,9 +393,10 @@ bool kexec_load_permitted(int kexec_image_type);
/* List of defined/legal kexec flags */
#ifndef CONFIG_KEXEC_JUMP
-#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_UPDATE_ELFCOREHDR)
+#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_UPDATE_ELFCOREHDR | KEXEC_CRASH_HOTPLUG_SUPPORT)
#else
-#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT | KEXEC_UPDATE_ELFCOREHDR)
+#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT | KEXEC_UPDATE_ELFCOREHDR | \
+ KEXEC_CRASH_HOTPLUG_SUPPORT)
#endif
/* List of defined/legal kexec file flags */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 0ff44d6633e3..5fcbc254d186 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -378,11 +378,15 @@ static inline void wait_for_kprobe_optimizer(void) { }
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_regs *fregs);
extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
+/* Set when ftrace has been killed: kprobes on ftrace must be disabled for safety */
+extern bool kprobe_ftrace_disabled __read_mostly;
+extern void kprobe_ftrace_kill(void);
#else
static inline int arch_prepare_kprobe_ftrace(struct kprobe *p)
{
return -EINVAL;
}
+static inline void kprobe_ftrace_kill(void) {}
#endif /* CONFIG_KPROBES_ON_FTRACE */
/* Get the kprobe at this addr (if any) - called with preemption disabled */
@@ -495,6 +499,9 @@ static inline void kprobe_flush_task(struct task_struct *tk)
static inline void kprobe_free_init_mem(void)
{
}
+static inline void kprobe_ftrace_kill(void)
+{
+}
static inline int disable_kprobe(struct kprobe *kp)
{
return -EOPNOTSUPP;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 48f31dcd318a..692c01e41a18 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -193,8 +193,6 @@ static inline bool is_error_page(struct page *page)
bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
unsigned long *vcpu_bitmap);
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
-bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
- struct kvm_vcpu *except);
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -259,7 +257,6 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
union kvm_mmu_notifier_arg {
- pte_t pte;
unsigned long attributes;
};
@@ -273,7 +270,6 @@ struct kvm_gfn_range {
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
-bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
#endif
enum {
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index d93f6522b2c3..827ecc0b7e10 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -86,6 +86,7 @@ struct gfn_to_pfn_cache {
struct kvm_mmu_memory_cache {
gfp_t gfp_zero;
gfp_t gfp_custom;
+ u64 init_value;
struct kmem_cache *kmem_cache;
int capacity;
int nobjs;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 324d792e7c78..13fb41d25da6 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1152,12 +1152,19 @@ extern int ata_std_bios_param(struct scsi_device *sdev,
sector_t capacity, int geom[]);
extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
extern int ata_scsi_slave_alloc(struct scsi_device *sdev);
-extern int ata_scsi_slave_config(struct scsi_device *sdev);
+int ata_scsi_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
int queue_depth);
extern int ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
int queue_depth);
+extern int ata_ncq_prio_supported(struct ata_port *ap, struct scsi_device *sdev,
+ bool *supported);
+extern int ata_ncq_prio_enabled(struct ata_port *ap, struct scsi_device *sdev,
+ bool *enabled);
+extern int ata_ncq_prio_enable(struct ata_port *ap, struct scsi_device *sdev,
+ bool enable);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
@@ -1244,7 +1251,8 @@ extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
extern void ata_port_probe(struct ata_port *ap);
extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
extern void ata_sas_tport_delete(struct ata_port *ap);
-extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
+int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_port *ap);
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
extern void ata_tf_to_fis(const struct ata_taskfile *tf,
u8 pmp, int is_cmd, u8 *fis);
@@ -1410,13 +1418,13 @@ extern const struct attribute_group *ata_common_sdev_groups[];
__ATA_BASE_SHT(drv_name), \
.can_queue = ATA_DEF_QUEUE, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
- .slave_configure = ata_scsi_slave_config
+ .device_configure = ata_scsi_device_configure
#define ATA_SUBBASE_SHT_QD(drv_name, drv_qd) \
__ATA_BASE_SHT(drv_name), \
.can_queue = drv_qd, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
- .slave_configure = ata_scsi_slave_config
+ .device_configure = ata_scsi_device_configure
#define ATA_BASE_SHT(drv_name) \
ATA_SUBBASE_SHT(drv_name), \
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
index 287f590ed56b..d94bfd9ac8cc 100644
--- a/include/linux/linkmode.h
+++ b/include/linux/linkmode.h
@@ -43,29 +43,10 @@ static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1,
return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static inline void linkmode_set_bit(int nr, volatile unsigned long *addr)
-{
- __set_bit(nr, addr);
-}
-
-static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr)
-{
- __clear_bit(nr, addr);
-}
-
-static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr,
- int set)
-{
- if (set)
- linkmode_set_bit(nr, addr);
- else
- linkmode_clear_bit(nr, addr);
-}
-
-static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr)
-{
- return test_bit(nr, addr);
-}
+#define linkmode_test_bit test_bit
+#define linkmode_set_bit __set_bit
+#define linkmode_clear_bit __clear_bit
+#define linkmode_mod_bit __assign_bit
static inline void linkmode_set_bit_array(const int *array, int array_size,
unsigned long *addr)
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 9b9b38e89563..51a258c24ff5 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -18,9 +18,9 @@
#if IS_ENABLED(CONFIG_LIVEPATCH)
/* task patch states */
-#define KLP_UNDEFINED -1
-#define KLP_UNPATCHED 0
-#define KLP_PATCHED 1
+#define KLP_TRANSITION_IDLE -1
+#define KLP_TRANSITION_UNPATCHED 0
+#define KLP_TRANSITION_PATCHED 1
/**
* struct klp_func - function structure for live patching
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 334e00efbde4..f804b76cde44 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -176,7 +176,8 @@ LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer,
size_t buffer_size)
LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid)
LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new)
-LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, const char *name)
+LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, struct dentry *src,
+ const char *name)
LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir,
struct kernfs_node *kn)
LSM_HOOK(int, 0, file_permission, struct file *file, int mask)
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 693eba9869e4..b1fbe4118414 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -7,6 +7,7 @@
/* Known PHY IDs */
#define MARVELL_PHY_ID_88E1101 0x01410c60
+#define MARVELL_PHY_ID_88E3082 0x01410c80
#define MARVELL_PHY_ID_88E1112 0x01410c90
#define MARVELL_PHY_ID_88E1111 0x01410cc0
#define MARVELL_PHY_ID_88E1118 0x01410e10
@@ -31,6 +32,8 @@
/* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
#define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
+/* ID from 88E6020, assumed to be the same for the whole 6250 family */
+#define MARVELL_PHY_ID_88E6250_FAMILY 0x01410db0
/* These Ethernet switch families contain embedded PHYs, but they do
* not have a model ID. So the switch driver traps reads to the ID2
* register and returns the switch family ID
diff --git a/include/linux/math64.h b/include/linux/math64.h
index bf74478926d4..d34def7f9a8c 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -4,8 +4,8 @@
#include <linux/types.h>
#include <linux/math.h>
-#include <vdso/math64.h>
#include <asm/div64.h>
+#include <vdso/math64.h>
#if BITS_PER_LONG == 64
@@ -179,16 +179,12 @@ static __always_inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
#ifndef mul_u64_u32_shr
static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
- u32 ah, al;
+ u32 ah = a >> 32, al = a;
u64 ret;
- al = a;
- ah = a >> 32;
-
ret = mul_u32_u32(al, mul) >> shift;
if (ah)
ret += mul_u32_u32(ah, mul) << (32 - shift);
-
return ret;
}
#endif /* mul_u64_u32_shr */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index f1755163dd9f..8c0a33a2e9ce 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -19,6 +19,7 @@ enum axp20x_variants {
AXP223_ID,
AXP288_ID,
AXP313A_ID,
+ AXP717_ID,
AXP803_ID,
AXP806_ID,
AXP809_ID,
@@ -104,15 +105,47 @@ enum axp20x_variants {
#define AXP313A_ON_INDICATE 0x00
#define AXP313A_OUTPUT_CONTROL 0x10
-#define AXP313A_DCDC1_CONRTOL 0x13
-#define AXP313A_DCDC2_CONRTOL 0x14
-#define AXP313A_DCDC3_CONRTOL 0x15
-#define AXP313A_ALDO1_CONRTOL 0x16
-#define AXP313A_DLDO1_CONRTOL 0x17
+#define AXP313A_DCDC1_CONTROL 0x13
+#define AXP313A_DCDC2_CONTROL 0x14
+#define AXP313A_DCDC3_CONTROL 0x15
+#define AXP313A_ALDO1_CONTROL 0x16
+#define AXP313A_DLDO1_CONTROL 0x17
#define AXP313A_SHUTDOWN_CTRL 0x1a
#define AXP313A_IRQ_EN 0x20
#define AXP313A_IRQ_STATE 0x21
+#define AXP717_ON_INDICATE 0x00
+#define AXP717_IRQ0_EN 0x40
+#define AXP717_IRQ1_EN 0x41
+#define AXP717_IRQ2_EN 0x42
+#define AXP717_IRQ3_EN 0x43
+#define AXP717_IRQ4_EN 0x44
+#define AXP717_IRQ0_STATE 0x48
+#define AXP717_IRQ1_STATE 0x49
+#define AXP717_IRQ2_STATE 0x4a
+#define AXP717_IRQ3_STATE 0x4b
+#define AXP717_IRQ4_STATE 0x4c
+#define AXP717_DCDC_OUTPUT_CONTROL 0x80
+#define AXP717_DCDC1_CONTROL 0x83
+#define AXP717_DCDC2_CONTROL 0x84
+#define AXP717_DCDC3_CONTROL 0x85
+#define AXP717_DCDC4_CONTROL 0x86
+#define AXP717_LDO0_OUTPUT_CONTROL 0x90
+#define AXP717_LDO1_OUTPUT_CONTROL 0x91
+#define AXP717_ALDO1_CONTROL 0x93
+#define AXP717_ALDO2_CONTROL 0x94
+#define AXP717_ALDO3_CONTROL 0x95
+#define AXP717_ALDO4_CONTROL 0x96
+#define AXP717_BLDO1_CONTROL 0x97
+#define AXP717_BLDO2_CONTROL 0x98
+#define AXP717_BLDO3_CONTROL 0x99
+#define AXP717_BLDO4_CONTROL 0x9a
+#define AXP717_CLDO1_CONTROL 0x9b
+#define AXP717_CLDO2_CONTROL 0x9c
+#define AXP717_CLDO3_CONTROL 0x9d
+#define AXP717_CLDO4_CONTROL 0x9e
+#define AXP717_CPUSLDO_CONTROL 0x9f
+
#define AXP806_STARTUP_SRC 0x00
#define AXP806_CHIP_ID 0x03
#define AXP806_PWR_OUT_CTRL1 0x10
@@ -434,6 +467,27 @@ enum {
};
enum {
+ AXP717_DCDC1 = 0,
+ AXP717_DCDC2,
+ AXP717_DCDC3,
+ AXP717_DCDC4,
+ AXP717_ALDO1,
+ AXP717_ALDO2,
+ AXP717_ALDO3,
+ AXP717_ALDO4,
+ AXP717_BLDO1,
+ AXP717_BLDO2,
+ AXP717_BLDO3,
+ AXP717_BLDO4,
+ AXP717_CLDO1,
+ AXP717_CLDO2,
+ AXP717_CLDO3,
+ AXP717_CLDO4,
+ AXP717_CPUSLDO,
+ AXP717_REG_ID_MAX,
+};
+
+enum {
AXP806_DCDCA = 0,
AXP806_DCDCB,
AXP806_DCDCC,
@@ -732,6 +786,40 @@ enum axp313a_irqs {
AXP313A_IRQ_PEK_RIS_EDGE,
};
+enum axp717_irqs {
+ AXP717_IRQ_VBUS_FAULT,
+ AXP717_IRQ_VBUS_OVER_V,
+ AXP717_IRQ_BOOST_OVER_V,
+ AXP717_IRQ_GAUGE_NEW_SOC = 4,
+ AXP717_IRQ_SOC_DROP_LVL1 = 6,
+ AXP717_IRQ_SOC_DROP_LVL2,
+ AXP717_IRQ_PEK_RIS_EDGE,
+ AXP717_IRQ_PEK_FAL_EDGE,
+ AXP717_IRQ_PEK_LONG,
+ AXP717_IRQ_PEK_SHORT,
+ AXP717_IRQ_BATT_REMOVAL,
+ AXP717_IRQ_BATT_PLUGIN,
+ AXP717_IRQ_VBUS_REMOVAL,
+ AXP717_IRQ_VBUS_PLUGIN,
+ AXP717_IRQ_BATT_OVER_V,
+ AXP717_IRQ_CHARG_TIMER,
+ AXP717_IRQ_DIE_TEMP_HIGH,
+ AXP717_IRQ_CHARG,
+ AXP717_IRQ_CHARG_DONE,
+ AXP717_IRQ_BATT_OVER_CURR,
+ AXP717_IRQ_LDO_OVER_CURR,
+ AXP717_IRQ_WDOG_EXPIRE,
+ AXP717_IRQ_BATT_ACT_TEMP_LOW,
+ AXP717_IRQ_BATT_ACT_TEMP_HIGH,
+ AXP717_IRQ_BATT_CHG_TEMP_LOW,
+ AXP717_IRQ_BATT_CHG_TEMP_HIGH,
+ AXP717_IRQ_BATT_QUIT_TEMP_HIGH,
+ AXP717_IRQ_BC_USB_CHNG = 30,
+ AXP717_IRQ_BC_USB_DONE,
+ AXP717_IRQ_TYPEC_PLUGIN = 37,
+ AXP717_IRQ_TYPEC_REMOVE,
+};
+
enum axp803_irqs {
AXP803_IRQ_ACIN_OVER_V = 1,
AXP803_IRQ_ACIN_PLUGIN,
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 77b8c0a26674..cde01e133a1b 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -630,13 +630,29 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
/**
- * mhi_power_down - Start MHI power down sequence
+ * mhi_power_down - Power down the MHI device and also destroy the
+ * 'struct device' for the channels associated with it.
+ * See also mhi_power_down_keep_dev() which is a variant
+ * of this API that keeps the 'struct device' for channels
+ * (useful during suspend/hibernation).
* @mhi_cntrl: MHI controller
* @graceful: Link is still accessible, so do a graceful shutdown process
*/
void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
/**
+ * mhi_power_down_keep_dev - Power down the MHI device but keep the 'struct
+ * device' for the channels associated with it.
+ * This is a variant of 'mhi_power_down()' and
+ * useful in scenarios such as suspend/hibernation
+ * where destroying of the 'struct device' is not
+ * needed.
+ * @mhi_cntrl: MHI controller
+ * @graceful: Link is still accessible, so do a graceful shutdown process
+ */
+void mhi_power_down_keep_dev(struct mhi_controller *mhi_cntrl, bool graceful);
+
+/**
* mhi_unprepare_after_power_down - Free any allocated memory after power down
* @mhi_cntrl: MHI controller
*/
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index cb15308b5cb0..991526039ccb 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -95,9 +95,10 @@ enum {
};
enum {
- MLX5_CQ_MODIFY_PERIOD = 1 << 0,
- MLX5_CQ_MODIFY_COUNT = 1 << 1,
- MLX5_CQ_MODIFY_OVERRUN = 1 << 2,
+ MLX5_CQ_MODIFY_PERIOD = BIT(0),
+ MLX5_CQ_MODIFY_COUNT = BIT(1),
+ MLX5_CQ_MODIFY_OVERRUN = BIT(2),
+ MLX5_CQ_MODIFY_PERIOD_MODE = BIT(4),
};
enum {
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 01275c6e8468..d7bb31d9a446 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -68,7 +68,7 @@
#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
-#define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld)))
+#define MLX5_ADDR_OF(typ, p, fld) ((void *)((u8 *)(p) + MLX5_BYTE_OFF(typ, fld)))
/* insert a value to a struct */
#define MLX5_SET(typ, p, fld, v) do { \
@@ -1336,6 +1336,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap)
+#define MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, ft_field_support_2_nic_receive.cap)
+
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
@@ -1359,6 +1362,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
+#define MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION(mdev, ft_field_support_2_port_selection.cap)
+
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index bf9324a31ae9..779cfdf2e9d6 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -85,7 +85,7 @@ enum mlx5_sqp_t {
};
enum {
- MLX5_MAX_PORTS = 4,
+ MLX5_MAX_PORTS = 8,
};
enum {
@@ -862,6 +862,7 @@ struct mlx5_cmd_work_ent {
void *context;
int idx;
struct completion handling;
+ struct completion slotted;
struct completion done;
struct mlx5_cmd *cmd;
struct work_struct work;
@@ -1374,11 +1375,4 @@ static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev)
enum {
MLX5_OCTWORD = 16,
};
-
-struct msi_map mlx5_msix_alloc(struct mlx5_core_dev *dev,
- irqreturn_t (*handler)(int, void *),
- const struct irq_affinity_desc *affdesc,
- const char *name);
-void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map);
-
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index c940b329a475..f468763478ae 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -416,7 +416,10 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
/* Table 2170 - Flow Table Fields Supported 2 Format */
struct mlx5_ifc_flow_table_fields_supported_2_bits {
- u8 reserved_at_0[0xe];
+ u8 reserved_at_0[0x2];
+ u8 inner_l4_type[0x1];
+ u8 outer_l4_type[0x1];
+ u8 reserved_at_4[0xa];
u8 bth_opcode[0x1];
u8 reserved_at_f[0x1];
u8 tunnel_header_0_1[0x1];
@@ -525,6 +528,12 @@ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
u8 reserved_at_0[0x80];
};
+enum {
+ MLX5_PACKET_L4_TYPE_NONE,
+ MLX5_PACKET_L4_TYPE_TCP,
+ MLX5_PACKET_L4_TYPE_UDP,
+};
+
struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 smac_47_16[0x20];
@@ -550,7 +559,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 tcp_sport[0x10];
u8 tcp_dport[0x10];
- u8 reserved_at_c0[0x10];
+ u8 l4_type[0x2];
+ u8 reserved_at_c2[0xe];
u8 ipv4_ihl[0x4];
u8 reserved_at_c4[0x4];
@@ -846,7 +856,11 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
- u8 reserved_at_e00[0x700];
+ u8 reserved_at_e00[0x600];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive;
+
+ u8 reserved_at_1480[0x80];
struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive_rdma;
@@ -876,7 +890,9 @@ struct mlx5_ifc_port_selection_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection;
- u8 reserved_at_400[0x7c00];
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_port_selection;
+
+ u8 reserved_at_480[0x7b80];
};
enum {
@@ -1469,7 +1485,9 @@ enum {
};
struct mlx5_ifc_cmd_hca_cap_bits {
- u8 reserved_at_0[0x10];
+ u8 reserved_at_0[0x6];
+ u8 page_request_disable[0x1];
+ u8 reserved_at_7[0x9];
u8 shared_object_to_user_object_allowed[0x1];
u8 reserved_at_13[0xe];
u8 vhca_resource_manager[0x1];
@@ -1668,7 +1686,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cq_oi[0x1];
u8 cq_resize[0x1];
u8 cq_moderation[0x1];
- u8 reserved_at_223[0x3];
+ u8 cq_period_mode_modify[0x1];
+ u8 reserved_at_224[0x2];
u8 cq_eq_remap[0x1];
u8 pg[0x1];
u8 block_lb_mc[0x1];
@@ -2004,7 +2023,13 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_3a0[0x10];
u8 max_rqt_vhca_id[0x10];
- u8 reserved_at_3c0[0x440];
+ u8 reserved_at_3c0[0x20];
+
+ u8 reserved_at_3e0[0x10];
+ u8 pcc_ifa2[0x1];
+ u8 reserved_at_3f1[0xf];
+
+ u8 reserved_at_400[0x400];
};
enum mlx5_ifc_flow_destination_type {
@@ -4361,10 +4386,10 @@ enum {
MLX5_CQC_ST_FIRED = 0xa,
};
-enum {
+enum mlx5_cq_period_mode {
MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
- MLX5_CQ_PERIOD_NUM_MODES
+ MLX5_CQ_PERIOD_NUM_MODES,
};
struct mlx5_ifc_cqc_bits {
@@ -9793,7 +9818,21 @@ struct mlx5_ifc_pplm_reg_bits {
u8 fec_override_admin_100g_2x[0x10];
u8 fec_override_admin_50g_1x[0x10];
- u8 reserved_at_140[0x140];
+ u8 fec_override_cap_800g_8x[0x10];
+ u8 fec_override_cap_400g_4x[0x10];
+
+ u8 fec_override_cap_200g_2x[0x10];
+ u8 fec_override_cap_100g_1x[0x10];
+
+ u8 reserved_at_180[0xa0];
+
+ u8 fec_override_admin_800g_8x[0x10];
+ u8 fec_override_admin_400g_4x[0x10];
+
+ u8 fec_override_admin_200g_2x[0x10];
+ u8 fec_override_admin_100g_1x[0x10];
+
+ u8 reserved_at_260[0x20];
};
struct mlx5_ifc_ppcnt_reg_bits {
@@ -10165,7 +10204,9 @@ struct mlx5_ifc_mtutc_reg_bits {
};
struct mlx5_ifc_pcam_enhanced_features_bits {
- u8 reserved_at_0[0x68];
+ u8 reserved_at_0[0x48];
+ u8 fec_100G_per_lane_in_pplm[0x1];
+ u8 reserved_at_49[0x1f];
u8 fec_50G_per_lane_in_pplm[0x1];
u8 reserved_at_69[0x4];
u8 rx_icrc_encapsulated_counter[0x1];
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 5894bf912f7b..88c6a76042ee 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -433,8 +433,8 @@ struct mmc_host {
mmc_pm_flag_t pm_caps; /* supported pm features */
/* host specific block data */
- unsigned int max_seg_size; /* see blk_queue_max_segment_size */
- unsigned short max_segs; /* see blk_queue_max_segments */
+ unsigned int max_seg_size; /* lim->max_segment_size */
+ unsigned short max_segs; /* lim->max_segments */
unsigned short unused;
unsigned int max_req_size; /* maximum number of bytes in one req */
unsigned int max_blk_size; /* maximum size of one mmc block */
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index 478855b8e406..fed1f5f4a8d3 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -106,7 +106,10 @@ struct sdio_driver {
.class = (dev_class), \
.vendor = SDIO_ANY_ID, .device = SDIO_ANY_ID
-extern int sdio_register_driver(struct sdio_driver *);
+/* use a macro to avoid include chaining to get THIS_MODULE */
+#define sdio_register_driver(drv) \
+ __sdio_register_driver(drv, THIS_MODULE)
+extern int __sdio_register_driver(struct sdio_driver *, struct module *);
extern void sdio_unregister_driver(struct sdio_driver *);
/**
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 7fada7a714fe..7cddfdac2f57 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -124,6 +124,7 @@
#define SDIO_DEVICE_ID_REALTEK_RTW8723DS_2ANT 0xd723
#define SDIO_DEVICE_ID_REALTEK_RTW8723DS_1ANT 0xd724
#define SDIO_DEVICE_ID_REALTEK_RTW8821DS 0xd821
+#define SDIO_DEVICE_ID_REALTEK_RTW8723CS 0xb703
#define SDIO_VENDOR_ID_SIANO 0x039a
#define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 5d3d15e97868..274a2767ea49 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -8,8 +8,8 @@
#ifndef MMC_SLOT_GPIO_H
#define MMC_SLOT_GPIO_H
+#include <linux/interrupt.h>
#include <linux/types.h>
-#include <linux/irqreturn.h>
struct mmc_host;
@@ -21,8 +21,8 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int debounce);
int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
unsigned int idx, unsigned int debounce);
-void mmc_gpio_set_cd_isr(struct mmc_host *host,
- irqreturn_t (*isr)(int irq, void *dev_id));
+int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config);
+void mmc_gpio_set_cd_isr(struct mmc_host *host, irq_handler_t isr);
int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on);
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
bool mmc_can_gpio_cd(struct mmc_host *host);
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index f349e08a9dfe..d39ebb10caeb 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -123,15 +123,6 @@ struct mmu_notifier_ops {
unsigned long address);
/*
- * change_pte is called in cases that pte mapping to page is changed:
- * for example, when ksm remaps pte to point to a new shared page.
- */
- void (*change_pte)(struct mmu_notifier *subscription,
- struct mm_struct *mm,
- unsigned long address,
- pte_t pte);
-
- /*
* invalidate_range_start() and invalidate_range_end() must be
* paired and are called only when the mmap_lock and/or the
* locks protecting the reverse maps are held. If the subsystem
@@ -392,8 +383,6 @@ extern int __mmu_notifier_clear_young(struct mm_struct *mm,
unsigned long end);
extern int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address);
-extern void __mmu_notifier_change_pte(struct mm_struct *mm,
- unsigned long address, pte_t pte);
extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r);
extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
@@ -439,13 +428,6 @@ static inline int mmu_notifier_test_young(struct mm_struct *mm,
return 0;
}
-static inline void mmu_notifier_change_pte(struct mm_struct *mm,
- unsigned long address, pte_t pte)
-{
- if (mm_has_notifiers(mm))
- __mmu_notifier_change_pte(mm, address, pte);
-}
-
static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
@@ -581,26 +563,6 @@ static inline void mmu_notifier_range_init_owner(
__young; \
})
-/*
- * set_pte_at_notify() sets the pte _after_ running the notifier.
- * This is safe to start by updating the secondary MMUs, because the primary MMU
- * pte invalidate must have already happened with a ptep_clear_flush() before
- * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
- * required when we change both the protection of the mapping from read-only to
- * read-write and the pfn (like during copy on write page faults). Otherwise the
- * old page would remain mapped readonly in the secondary MMUs after the new
- * page is already writable by some CPU through the primary MMU.
- */
-#define set_pte_at_notify(__mm, __address, __ptep, __pte) \
-({ \
- struct mm_struct *___mm = __mm; \
- unsigned long ___address = __address; \
- pte_t ___pte = __pte; \
- \
- mmu_notifier_change_pte(___mm, ___address, ___pte); \
- set_pte_at(___mm, ___address, __ptep, ___pte); \
-})
-
#else /* CONFIG_MMU_NOTIFIER */
struct mmu_notifier_range {
@@ -650,11 +612,6 @@ static inline int mmu_notifier_test_young(struct mm_struct *mm,
return 0;
}
-static inline void mmu_notifier_change_pte(struct mm_struct *mm,
- unsigned long address, pte_t pte)
-{
-}
-
static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
@@ -693,7 +650,6 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
#define ptep_clear_flush_notify ptep_clear_flush
#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
-#define set_pte_at_notify set_pte_at
static inline void mmu_notifier_synchronize(void)
{
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index c11b7cde81ef..8f9c9590a42c 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -205,7 +205,10 @@ enum node_stat_item {
NR_KERNEL_SCS_KB, /* measured in KiB */
#endif
NR_PAGETABLE, /* used for pagetables */
- NR_SECONDARY_PAGETABLE, /* secondary pagetables, e.g. KVM pagetables */
+ NR_SECONDARY_PAGETABLE, /* secondary pagetables, KVM & IOMMU */
+#ifdef CONFIG_IOMMU_SUPPORT
+ NR_IOMMU_PAGES, /* # of pages allocated by IOMMU */
+#endif
#ifdef CONFIG_SWAP
NR_SWAPCACHE,
#endif
diff --git a/include/linux/module.h b/include/linux/module.h
index 1153b0d99a80..ffa1c603163c 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -605,6 +605,11 @@ static inline bool module_is_live(struct module *mod)
return mod->state != MODULE_STATE_GOING;
}
+static inline bool module_is_coming(struct module *mod)
+{
+ return mod->state == MODULE_STATE_COMING;
+}
+
struct module *__module_text_address(unsigned long addr);
struct module *__module_address(unsigned long addr);
bool is_module_address(unsigned long addr);
@@ -857,6 +862,10 @@ void *dereference_module_function_descriptor(struct module *mod, void *ptr)
return ptr;
}
+static inline bool module_is_coming(struct module *mod)
+{
+ return false;
+}
#endif /* CONFIG_MODULES */
#ifdef CONFIG_SYSFS
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index 89b1e0ed9811..e395461d59e5 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -25,13 +25,6 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
/* Additional bytes needed by arch in front of individual sections */
unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
-/* Allocator used for allocating struct module, core sections and init
- sections. Returns NULL on failure. */
-void *module_alloc(unsigned long size);
-
-/* Free memory returned from module_alloc. */
-void module_memfree(void *module_region);
-
/* Determines if the section name is an init section (that is only used during
* module loading).
*/
@@ -129,12 +122,4 @@ void module_arch_cleanup(struct module *mod);
/* Any cleanup before freeing mod->module_init */
void module_arch_freeing_init(struct module *mod);
-#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
- !defined(CONFIG_KASAN_VMALLOC)
-#include <linux/kasan.h>
-#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
-#else
-#define MODULE_ALIGN PAGE_SIZE
-#endif
-
#endif
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 74e0cc14ebf8..967aa9ea9f96 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -44,6 +44,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT};
#define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */
#define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */
#define LOOKUP_CACHED 0x200000 /* Only do cached lookup */
+#define LOOKUP_LINKAT_EMPTY 0x400000 /* Linkat request with empty path. */
/* LOOKUP_* flags which do scope-related checks based on the dirfd. */
#define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT)
diff --git a/include/linux/net.h b/include/linux/net.h
index 15df6d5f27a7..688320b79fcc 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -153,6 +153,7 @@ struct sockaddr;
struct msghdr;
struct module;
struct sk_buff;
+struct proto_accept_arg;
typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
unsigned int, size_t);
typedef int (*skb_read_actor_t)(struct sock *, struct sk_buff *);
@@ -171,7 +172,8 @@ struct proto_ops {
int (*socketpair)(struct socket *sock1,
struct socket *sock2);
int (*accept) (struct socket *sock,
- struct socket *newsock, int flags, bool kern);
+ struct socket *newsock,
+ struct proto_accept_arg *arg);
int (*getname) (struct socket *sock,
struct sockaddr *addr,
int peer);
diff --git a/include/linux/net/intel/libie/rx.h b/include/linux/net/intel/libie/rx.h
new file mode 100644
index 000000000000..8e97775f1d66
--- /dev/null
+++ b/include/linux/net/intel/libie/rx.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef __LIBIE_RX_H
+#define __LIBIE_RX_H
+
+#include <net/libeth/rx.h>
+
+/* Rx buffer management */
+
+/* The largest size for a single descriptor as per HW */
+#define LIBIE_MAX_RX_BUF_LEN 9728U
+/* "True" HW-writeable space: minimum from SW and HW values */
+#define LIBIE_RX_BUF_LEN(hr) min_t(u32, LIBETH_RX_PAGE_LEN(hr), \
+ LIBIE_MAX_RX_BUF_LEN)
+
+/* The maximum frame size as per HW (S/G) */
+#define __LIBIE_MAX_RX_FRM_LEN 16382U
+/* ATST, HW can chain up to 5 Rx descriptors */
+#define LIBIE_MAX_RX_FRM_LEN(hr) \
+ min_t(u32, __LIBIE_MAX_RX_FRM_LEN, LIBIE_RX_BUF_LEN(hr) * 5)
+/* Maximum frame size minus LL overhead */
+#define LIBIE_MAX_MTU \
+ (LIBIE_MAX_RX_FRM_LEN(LIBETH_MAX_HEADROOM) - LIBETH_RX_LL_LEN)
+
+/* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed
+ * bitfield struct.
+ */
+
+#define LIBIE_RX_PT_NUM 154
+
+extern const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM];
+
+/**
+ * libie_rx_pt_parse - convert HW packet type to software bitfield structure
+ * @pt: 10-bit hardware packet type value from the descriptor
+ *
+ * ```libie_rx_pt_lut``` must be accessed only using this wrapper.
+ *
+ * Return: parsed bitfield struct corresponding to the provided ptype.
+ */
+static inline struct libeth_rx_pt libie_rx_pt_parse(u32 pt)
+{
+ if (unlikely(pt >= LIBIE_RX_PT_NUM))
+ pt = 0;
+
+ return libie_rx_pt_lut[pt];
+}
+
+#endif /* __LIBIE_RX_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index cb37817d6382..d20c6c99eb88 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -59,7 +59,7 @@ struct ethtool_ops;
struct kernel_hwtstamp_config;
struct phy_device;
struct dsa_port;
-struct ip_tunnel_parm;
+struct ip_tunnel_parm_kern;
struct macsec_context;
struct macsec_ops;
struct netdev_name_node;
@@ -1327,7 +1327,7 @@ struct netdev_net_notifier {
* queue id bound to an AF_XDP socket. The flags field specifies if
* only RX, only Tx, or both should be woken up using the flags
* XDP_WAKEUP_RX and XDP_WAKEUP_TX.
- * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p,
+ * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm_kern *p,
* int cmd);
* Add, change, delete or get information on an IPv4 tunnel.
* struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
@@ -1583,7 +1583,8 @@ struct net_device_ops {
int (*ndo_xsk_wakeup)(struct net_device *dev,
u32 queue_id, u32 flags);
int (*ndo_tunnel_ctl)(struct net_device *dev,
- struct ip_tunnel_parm *p, int cmd);
+ struct ip_tunnel_parm_kern *p,
+ int cmd);
struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
struct net_device_path *path);
@@ -1956,6 +1957,7 @@ enum netdev_reg_state {
* @sysfs_rx_queue_group: Space for optional per-rx queue attributes
* @rtnl_link_ops: Rtnl_link_ops
* @stat_ops: Optional ops for queue-aware statistics
+ * @queue_mgmt_ops: Optional ops for queue management
*
* @gso_max_size: Maximum size of generic segmentation offload
* @tso_max_size: Device (as in HW) limit on the max TSO request size
@@ -2338,6 +2340,8 @@ struct net_device {
const struct netdev_stat_ops *stat_ops;
+ const struct netdev_queue_mgmt_ops *queue_mgmt_ops;
+
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SEGS 65535u
#define GSO_LEGACY_MAX_SIZE 65536u
@@ -2367,8 +2371,8 @@ struct net_device {
struct sfp_bus *sfp_bus;
struct lock_class_key *qdisc_tx_busylock;
bool proto_down;
+ bool threaded;
unsigned wol_enabled:1;
- unsigned threaded:1;
struct list_head net_notifier_list;
@@ -3133,6 +3137,7 @@ struct net_device *netdev_get_by_name(struct net *net, const char *name,
netdevice_tracker *tracker, gfp_t gfp);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
+void netdev_copy_name(struct net_device *dev, char *name);
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
@@ -3203,6 +3208,7 @@ struct softnet_data {
struct softnet_data *rps_ipi_list;
#endif
+ unsigned int received_rps;
bool in_net_rx_action;
bool in_napi_threaded_poll;
@@ -3235,11 +3241,11 @@ struct softnet_data {
unsigned int cpu;
unsigned int input_queue_tail;
#endif
- unsigned int received_rps;
- unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
+ atomic_t dropped ____cacheline_aligned_in_smp;
+
/* Another possibly contended cache line */
spinlock_t defer_lock ____cacheline_aligned_in_smp;
int defer_count;
@@ -3248,21 +3254,6 @@ struct softnet_data {
call_single_data_t defer_csd;
};
-static inline void input_queue_head_incr(struct softnet_data *sd)
-{
-#ifdef CONFIG_RPS
- sd->input_queue_head++;
-#endif
-}
-
-static inline void input_queue_tail_incr_save(struct softnet_data *sd,
- unsigned int *qtail)
-{
-#ifdef CONFIG_RPS
- *qtail = ++sd->input_queue_tail;
-#endif
-}
-
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
static inline int dev_recursion_level(void)
@@ -3270,23 +3261,6 @@ static inline int dev_recursion_level(void)
return this_cpu_read(softnet_data.xmit.recursion);
}
-#define XMIT_RECURSION_LIMIT 8
-static inline bool dev_xmit_recursion(void)
-{
- return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
- XMIT_RECURSION_LIMIT);
-}
-
-static inline void dev_xmit_recursion_inc(void)
-{
- __this_cpu_inc(softnet_data.xmit.recursion);
-}
-
-static inline void dev_xmit_recursion_dec(void)
-{
- __this_cpu_dec(softnet_data.xmit.recursion);
-}
-
void __netif_schedule(struct Qdisc *q);
void netif_schedule_queue(struct netdev_queue *txq);
@@ -4127,6 +4101,8 @@ static inline void dev_put(struct net_device *dev)
netdev_put(dev, NULL);
}
+DEFINE_FREE(dev_put, struct net_device *, if (_T) dev_put(_T))
+
static inline void netdev_ref_replace(struct net_device *odev,
struct net_device *ndev,
netdevice_tracker *tracker,
@@ -4545,6 +4521,9 @@ static inline void netif_addr_unlock_bh(struct net_device *dev)
void ether_setup(struct net_device *dev);
+/* Allocate dummy net_device */
+struct net_device *alloc_netdev_dummy(int sizeof_priv);
+
/* Support for loadable net-drivers */
struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
unsigned char name_assign_type,
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 100cbb261269..ca56a4428043 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -20,95 +20,24 @@
#include <linux/uio.h>
enum netfs_sreq_ref_trace;
-
-/*
- * Overload PG_private_2 to give us PG_fscache - this is used to indicate that
- * a page is currently backed by a local disk cache
- */
-#define folio_test_fscache(folio) folio_test_private_2(folio)
-#define PageFsCache(page) PagePrivate2((page))
-#define SetPageFsCache(page) SetPagePrivate2((page))
-#define ClearPageFsCache(page) ClearPagePrivate2((page))
-#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
-#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
+typedef struct mempool_s mempool_t;
/**
- * folio_start_fscache - Start an fscache write on a folio.
+ * folio_start_private_2 - Start an fscache write on a folio. [DEPRECATED]
* @folio: The folio.
*
* Call this function before writing a folio to a local cache. Starting a
* second write before the first one finishes is not allowed.
+ *
+ * Note that this should no longer be used.
*/
-static inline void folio_start_fscache(struct folio *folio)
+static inline void folio_start_private_2(struct folio *folio)
{
VM_BUG_ON_FOLIO(folio_test_private_2(folio), folio);
folio_get(folio);
folio_set_private_2(folio);
}
-/**
- * folio_end_fscache - End an fscache write on a folio.
- * @folio: The folio.
- *
- * Call this function after the folio has been written to the local cache.
- * This will wake any sleepers waiting on this folio.
- */
-static inline void folio_end_fscache(struct folio *folio)
-{
- folio_end_private_2(folio);
-}
-
-/**
- * folio_wait_fscache - Wait for an fscache write on this folio to end.
- * @folio: The folio.
- *
- * If this folio is currently being written to a local cache, wait for
- * the write to finish. Another write may start after this one finishes,
- * unless the caller holds the folio lock.
- */
-static inline void folio_wait_fscache(struct folio *folio)
-{
- folio_wait_private_2(folio);
-}
-
-/**
- * folio_wait_fscache_killable - Wait for an fscache write on this folio to end.
- * @folio: The folio.
- *
- * If this folio is currently being written to a local cache, wait
- * for the write to finish or for a fatal signal to be received.
- * Another write may start after this one finishes, unless the caller
- * holds the folio lock.
- *
- * Return:
- * - 0 if successful.
- * - -EINTR if a fatal signal was encountered.
- */
-static inline int folio_wait_fscache_killable(struct folio *folio)
-{
- return folio_wait_private_2_killable(folio);
-}
-
-static inline void set_page_fscache(struct page *page)
-{
- folio_start_fscache(page_folio(page));
-}
-
-static inline void end_page_fscache(struct page *page)
-{
- folio_end_private_2(page_folio(page));
-}
-
-static inline void wait_on_page_fscache(struct page *page)
-{
- folio_wait_private_2(page_folio(page));
-}
-
-static inline int wait_on_page_fscache_killable(struct page *page)
-{
- return folio_wait_private_2_killable(page_folio(page));
-}
-
/* Marks used on xarray-based buffers */
#define NETFS_BUF_PUT_MARK XA_MARK_0 /* - Page needs putting */
#define NETFS_BUF_PAGECACHE_MARK XA_MARK_1 /* - Page needs wb/dirty flag wrangling */
@@ -135,6 +64,7 @@ struct netfs_inode {
#if IS_ENABLED(CONFIG_FSCACHE)
struct fscache_cookie *cache;
#endif
+ struct mutex wb_lock; /* Writeback serialisation */
loff_t remote_i_size; /* Size of the remote file */
loff_t zero_point; /* Size after which we assume there's no data
* on the server */
@@ -142,7 +72,8 @@ struct netfs_inode {
#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
#define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */
-#define NETFS_ICTX_NO_WRITE_STREAMING 3 /* Don't engage in write-streaming */
+#define NETFS_ICTX_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark
+ * write to cache on read */
};
/*
@@ -165,16 +96,25 @@ struct netfs_folio {
unsigned int dirty_len; /* Write-streaming dirty data length */
};
#define NETFS_FOLIO_INFO 0x1UL /* OR'd with folio->private. */
+#define NETFS_FOLIO_COPY_TO_CACHE ((struct netfs_group *)0x356UL) /* Write to the cache only */
-static inline struct netfs_folio *netfs_folio_info(struct folio *folio)
+static inline bool netfs_is_folio_info(const void *priv)
{
- void *priv = folio_get_private(folio);
+ return (unsigned long)priv & NETFS_FOLIO_INFO;
+}
- if ((unsigned long)priv & NETFS_FOLIO_INFO)
+static inline struct netfs_folio *__netfs_folio_info(const void *priv)
+{
+ if (netfs_is_folio_info(priv))
return (struct netfs_folio *)((unsigned long)priv & ~NETFS_FOLIO_INFO);
return NULL;
}
+static inline struct netfs_folio *netfs_folio_info(struct folio *folio)
+{
+ return __netfs_folio_info(folio_get_private(folio));
+}
+
static inline struct netfs_group *netfs_folio_group(struct folio *folio)
{
struct netfs_folio *finfo;
@@ -187,6 +127,33 @@ static inline struct netfs_group *netfs_folio_group(struct folio *folio)
}
/*
+ * Stream of I/O subrequests going to a particular destination, such as the
+ * server or the local cache. This is mainly intended for writing where we may
+ * have to write to multiple destinations concurrently.
+ */
+struct netfs_io_stream {
+ /* Submission tracking */
+ struct netfs_io_subrequest *construct; /* Op being constructed */
+ unsigned int submit_off; /* Folio offset we're submitting from */
+ unsigned int submit_len; /* Amount of data left to submit */
+ unsigned int submit_max_len; /* Amount I/O can be rounded up to */
+ void (*prepare_write)(struct netfs_io_subrequest *subreq);
+ void (*issue_write)(struct netfs_io_subrequest *subreq);
+ /* Collection tracking */
+ struct list_head subrequests; /* Contributory I/O operations */
+ struct netfs_io_subrequest *front; /* Op being collected */
+ unsigned long long collected_to; /* Position we've collected results to */
+ size_t transferred; /* The amount transferred from this stream */
+ enum netfs_io_source source; /* Where to read from/write to */
+ unsigned short error; /* Aggregate error for the stream */
+ unsigned char stream_nr; /* Index of stream in parent table */
+ bool avail; /* T if stream is available */
+ bool active; /* T if stream is active */
+ bool need_retry; /* T if this stream needs retrying */
+ bool failed; /* T if this stream failed */
+};
+
+/*
* Resources required to do operations on a cache.
*/
struct netfs_cache_resources {
@@ -209,14 +176,17 @@ struct netfs_io_subrequest {
struct work_struct work;
struct list_head rreq_link; /* Link in rreq->subrequests */
struct iov_iter io_iter; /* Iterator for this subrequest */
- loff_t start; /* Where to start the I/O */
+ unsigned long long start; /* Where to start the I/O */
+ size_t max_len; /* Maximum size of the I/O */
size_t len; /* Size of the I/O */
size_t transferred; /* Amount of data transferred */
refcount_t ref;
short error; /* 0 or error that occurred */
unsigned short debug_index; /* Index in list (for debugging output) */
+ unsigned int nr_segs; /* Number of segs in io_iter */
unsigned int max_nr_segs; /* 0 or max number of segments in an iterator */
enum netfs_io_source source; /* Where to read from/write to */
+ unsigned char stream_nr; /* I/O stream this belongs to */
unsigned long flags;
#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
#define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */
@@ -224,15 +194,20 @@ struct netfs_io_subrequest {
#define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */
#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */
#define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */
+#define NETFS_SREQ_BOUNDARY 6 /* Set if ends on hard boundary (eg. ceph object) */
+#define NETFS_SREQ_IN_PROGRESS 8 /* Unlocked when the subrequest completes */
+#define NETFS_SREQ_NEED_RETRY 9 /* Set if the filesystem requests a retry */
+#define NETFS_SREQ_RETRYING 10 /* Set if we're retrying */
+#define NETFS_SREQ_FAILED 11 /* Set if the subreq failed unretryably */
};
enum netfs_io_origin {
NETFS_READAHEAD, /* This read was triggered by readahead */
NETFS_READPAGE, /* This read is a synchronous read */
NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
+ NETFS_COPY_TO_CACHE, /* This write is to copy a read to the cache */
NETFS_WRITEBACK, /* This write was triggered by writepages */
NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */
- NETFS_LAUNDER_WRITE, /* This is triggered by ->launder_folio() */
NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */
NETFS_DIO_READ, /* This is a direct I/O read */
NETFS_DIO_WRITE, /* This is a direct I/O write */
@@ -254,26 +229,36 @@ struct netfs_io_request {
struct netfs_cache_resources cache_resources;
struct list_head proc_link; /* Link in netfs_iorequests */
struct list_head subrequests; /* Contributory I/O operations */
+ struct netfs_io_stream io_streams[2]; /* Streams of parallel I/O operations */
+#define NR_IO_STREAMS 2 //wreq->nr_io_streams
+ struct netfs_group *group; /* Writeback group being written back */
struct iov_iter iter; /* Unencrypted-side iterator */
struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */
void *netfs_priv; /* Private data for the netfs */
+ void *netfs_priv2; /* Private data for the netfs */
struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */
unsigned int direct_bv_count; /* Number of elements in direct_bv[] */
unsigned int debug_id;
unsigned int rsize; /* Maximum read size (0 for none) */
unsigned int wsize; /* Maximum write size (0 for none) */
- unsigned int subreq_counter; /* Next subreq->debug_index */
+ atomic_t subreq_counter; /* Next subreq->debug_index */
+ unsigned int nr_group_rel; /* Number of refs to release on ->group */
+ spinlock_t lock; /* Lock for queuing subreqs */
atomic_t nr_outstanding; /* Number of ops in progress */
atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */
- size_t submitted; /* Amount submitted for I/O so far */
- size_t len; /* Length of the request */
size_t upper_len; /* Length can be extended to here */
+ unsigned long long submitted; /* Amount submitted for I/O so far */
+ unsigned long long len; /* Length of the request */
size_t transferred; /* Amount to be indicated as transferred */
short error; /* 0 or error that occurred */
enum netfs_io_origin origin; /* Origin of the request */
bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
- loff_t i_size; /* Size of the file */
- loff_t start; /* Start position */
+ unsigned long long i_size; /* Size of the file */
+ unsigned long long start; /* Start position */
+ atomic64_t issued_to; /* Write issuer folio cursor */
+ unsigned long long contiguity; /* Tracking for gaps in the writeback sequence */
+ unsigned long long collected_to; /* Point we've collected to */
+ unsigned long long cleaned_to; /* Position we've cleaned folios to */
pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
refcount_t ref;
unsigned long flags;
@@ -287,6 +272,11 @@ struct netfs_io_request {
#define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */
#define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */
#define NETFS_RREQ_BLOCKED 10 /* We blocked */
+#define NETFS_RREQ_PAUSE 11 /* Pause subrequest generation */
+#define NETFS_RREQ_USE_IO_ITER 12 /* Use ->io_iter rather than ->i_pages */
+#define NETFS_RREQ_ALL_QUEUED 13 /* All subreqs are now queued */
+#define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark
+ * write to cache on read */
const struct netfs_request_ops *netfs_ops;
void (*cleanup)(struct netfs_io_request *req);
};
@@ -295,8 +285,8 @@ struct netfs_io_request {
* Operations the network filesystem can/must provide to the helpers.
*/
struct netfs_request_ops {
- unsigned int io_request_size; /* Alloc size for netfs_io_request struct */
- unsigned int io_subrequest_size; /* Alloc size for netfs_io_subrequest struct */
+ mempool_t *request_pool;
+ mempool_t *subrequest_pool;
int (*init_request)(struct netfs_io_request *rreq, struct file *file);
void (*free_request)(struct netfs_io_request *rreq);
void (*free_subrequest)(struct netfs_io_subrequest *rreq);
@@ -312,10 +302,13 @@ struct netfs_request_ops {
/* Modification handling */
void (*update_i_size)(struct inode *inode, loff_t i_size);
+ void (*post_modify)(struct inode *inode);
/* Write request handling */
- void (*create_write_requests)(struct netfs_io_request *wreq,
- loff_t start, size_t len);
+ void (*begin_writeback)(struct netfs_io_request *wreq);
+ void (*prepare_write)(struct netfs_io_subrequest *subreq);
+ void (*issue_write)(struct netfs_io_subrequest *subreq);
+ void (*retry_request)(struct netfs_io_request *wreq, struct netfs_io_stream *stream);
void (*invalidate_cache)(struct netfs_io_request *wreq);
};
@@ -350,15 +343,27 @@ struct netfs_cache_ops {
netfs_io_terminated_t term_func,
void *term_func_priv);
+ /* Write data to the cache from a netfs subrequest. */
+ void (*issue_write)(struct netfs_io_subrequest *subreq);
+
/* Expand readahead request */
void (*expand_readahead)(struct netfs_cache_resources *cres,
- loff_t *_start, size_t *_len, loff_t i_size);
+ unsigned long long *_start,
+ unsigned long long *_len,
+ unsigned long long i_size);
/* Prepare a read operation, shortening it to a cached/uncached
* boundary as appropriate.
*/
enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq,
- loff_t i_size);
+ unsigned long long i_size);
+
+ /* Prepare a write subrequest, working out if we're allowed to do it
+ * and finding out the maximum amount of data to gather before
+ * attempting to submit. If we're not permitted to do it, the
+ * subrequest should be marked failed.
+ */
+ void (*prepare_write_subreq)(struct netfs_io_subrequest *subreq);
/* Prepare a write operation, working out what part of the write we can
* actually do.
@@ -384,6 +389,7 @@ struct netfs_cache_ops {
};
/* High-level read API. */
+ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *iter);
ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
@@ -410,7 +416,6 @@ int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
bool netfs_release_folio(struct folio *folio, gfp_t gfp);
-int netfs_launder_folio(struct folio *folio);
/* VMA operations API. */
vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
@@ -426,9 +431,7 @@ ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
iov_iter_extraction_t extraction_flags);
size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
size_t max_size, size_t max_segs);
-struct netfs_io_subrequest *netfs_create_write_request(
- struct netfs_io_request *wreq, enum netfs_io_source dest,
- loff_t start, size_t len, work_func_t worker);
+void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq);
void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
bool was_async);
void netfs_queue_write_request(struct netfs_io_subrequest *subreq);
@@ -472,6 +475,7 @@ static inline void netfs_inode_init(struct netfs_inode *ctx,
#if IS_ENABLED(CONFIG_FSCACHE)
ctx->cache = NULL;
#endif
+ mutex_init(&ctx->wb_lock);
/* ->releasepage() drives zero_point */
if (use_zero_point) {
ctx->zero_point = ctx->remote_i_size;
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index ef8d2d618d5b..0d896ce296ce 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -701,6 +701,12 @@ enum state_protect_how4 {
SP4_SSV = 2
};
+/* GET_DIR_DELEGATION non-fatal status codes */
+enum gddrnf4_status {
+ GDD4_OK = 0,
+ GDD4_UNAVAIL = 1
+};
+
enum pnfs_layouttype {
LAYOUT_NFSV4_1_FILES = 1,
LAYOUT_OSD2_OBJECTS = 2,
diff --git a/include/linux/numa.h b/include/linux/numa.h
index 915033a75731..1d43371fafd2 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -36,12 +36,7 @@ int memory_add_physaddr_to_nid(u64 start);
int phys_to_target_node(u64 start);
#endif
-#ifndef numa_fill_memblks
-static inline int __init numa_fill_memblks(u64 start, u64 end)
-{
- return NUMA_NO_MEMBLK;
-}
-#endif
+int numa_fill_memblks(u64 start, u64 end);
#else /* !CONFIG_NUMA */
static inline int numa_nearest_node(int node, unsigned int state)
diff --git a/include/linux/objpool.h b/include/linux/objpool.h
index 15aff4a17f0c..cb1758eaa2d3 100644
--- a/include/linux/objpool.h
+++ b/include/linux/objpool.h
@@ -5,6 +5,10 @@
#include <linux/types.h>
#include <linux/refcount.h>
+#include <linux/atomic.h>
+#include <linux/cpumask.h>
+#include <linux/irqflags.h>
+#include <linux/smp.h>
/*
* objpool: ring-array based lockless MPMC queue
@@ -69,7 +73,7 @@ typedef int (*objpool_fini_cb)(struct objpool_head *head, void *context);
* struct objpool_head - object pooling metadata
* @obj_size: object size, aligned to sizeof(void *)
* @nr_objs: total objs (to be pre-allocated with objpool)
- * @nr_cpus: local copy of nr_cpu_ids
+ * @nr_possible_cpus: cached value of num_possible_cpus()
* @capacity: max objs can be managed by one objpool_slot
* @gfp: gfp flags for kmalloc & vmalloc
* @ref: refcount of objpool
@@ -81,7 +85,7 @@ typedef int (*objpool_fini_cb)(struct objpool_head *head, void *context);
struct objpool_head {
int obj_size;
int nr_objs;
- int nr_cpus;
+ int nr_possible_cpus;
int capacity;
gfp_t gfp;
refcount_t ref;
@@ -118,13 +122,94 @@ int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
gfp_t gfp, void *context, objpool_init_obj_cb objinit,
objpool_fini_cb release);
+/* try to retrieve object from slot */
+static inline void *__objpool_try_get_slot(struct objpool_head *pool, int cpu)
+{
+ struct objpool_slot *slot = pool->cpu_slots[cpu];
+ /* load head snapshot, other cpus may change it */
+ uint32_t head = smp_load_acquire(&slot->head);
+
+ while (head != READ_ONCE(slot->last)) {
+ void *obj;
+
+ /*
+ * data visibility of 'last' and 'head' could be out of
+ * order since memory updating of 'last' and 'head' are
+ * performed in push() and pop() independently
+ *
+ * before any retrieving attempts, pop() must guarantee
+ * 'last' is behind 'head', that is to say, there must
+ * be available objects in slot, which could be ensured
+ * by condition 'last != head && last - head <= nr_objs'
+ * that is equivalent to 'last - head - 1 < nr_objs' as
+ * 'last' and 'head' are both unsigned int32
+ */
+ if (READ_ONCE(slot->last) - head - 1 >= pool->nr_objs) {
+ head = READ_ONCE(slot->head);
+ continue;
+ }
+
+ /* obj must be retrieved before moving forward head */
+ obj = READ_ONCE(slot->entries[head & slot->mask]);
+
+ /* move head forward to mark it's consumption */
+ if (try_cmpxchg_release(&slot->head, &head, head + 1))
+ return obj;
+ }
+
+ return NULL;
+}
+
/**
* objpool_pop() - allocate an object from objpool
* @pool: object pool
*
* return value: object ptr or NULL if failed
*/
-void *objpool_pop(struct objpool_head *pool);
+static inline void *objpool_pop(struct objpool_head *pool)
+{
+ void *obj = NULL;
+ unsigned long flags;
+ int i, cpu;
+
+ /* disable local irq to avoid preemption & interruption */
+ raw_local_irq_save(flags);
+
+ cpu = raw_smp_processor_id();
+ for (i = 0; i < pool->nr_possible_cpus; i++) {
+ obj = __objpool_try_get_slot(pool, cpu);
+ if (obj)
+ break;
+ cpu = cpumask_next_wrap(cpu, cpu_possible_mask, -1, 1);
+ }
+ raw_local_irq_restore(flags);
+
+ return obj;
+}
+
+/* adding object to slot, abort if the slot was already full */
+static inline int
+__objpool_try_add_slot(void *obj, struct objpool_head *pool, int cpu)
+{
+ struct objpool_slot *slot = pool->cpu_slots[cpu];
+ uint32_t head, tail;
+
+ /* loading tail and head as a local snapshot, tail first */
+ tail = READ_ONCE(slot->tail);
+
+ do {
+ head = READ_ONCE(slot->head);
+ /* fault caught: something must be wrong */
+ WARN_ON_ONCE(tail - head > pool->nr_objs);
+ } while (!try_cmpxchg_acquire(&slot->tail, &tail, tail + 1));
+
+ /* now the tail position is reserved for the given obj */
+ WRITE_ONCE(slot->entries[tail & slot->mask], obj);
+ /* update sequence to make this obj available for pop() */
+ smp_store_release(&slot->last, tail + 1);
+
+ return 0;
+}
/**
* objpool_push() - reclaim the object and return back to objpool
@@ -134,7 +219,19 @@ void *objpool_pop(struct objpool_head *pool);
* return: 0 or error code (it fails only when user tries to push
* the same object multiple times or wrong "objects" into objpool)
*/
-int objpool_push(void *obj, struct objpool_head *pool);
+static inline int objpool_push(void *obj, struct objpool_head *pool)
+{
+ unsigned long flags;
+ int rc;
+
+ /* disable local irq to avoid preemption & interruption */
+ raw_local_irq_save(flags);
+ rc = __objpool_try_add_slot(obj, pool, raw_smp_processor_id());
+ raw_local_irq_restore(flags);
+
+ return rc;
+}
+
/**
* objpool_drop() - discard the object and deref objpool
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index 4de2a24cadc9..e338282da652 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -11,7 +11,6 @@ struct reserved_mem_ops;
struct reserved_mem {
const char *name;
unsigned long fdt_node;
- unsigned long phandle;
const struct reserved_mem_ops *ops;
phys_addr_t base;
phys_addr_t size;
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index 51421fdbb0ba..6f9242259edc 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -69,6 +69,7 @@ enum OID {
OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
OID_sha1, /* 1.3.14.3.2.26 */
OID_id_ansip384r1, /* 1.3.132.0.34 */
+ OID_id_ansip521r1, /* 1.3.132.0.35 */
OID_sha256, /* 2.16.840.1.101.3.4.2.1 */
OID_sha384, /* 2.16.840.1.101.3.4.2.2 */
OID_sha512, /* 2.16.840.1.101.3.4.2.3 */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 850d32057939..3d69589c00a4 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -40,6 +40,8 @@ int filemap_fdatawait_keep_errors(struct address_space *mapping);
int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
loff_t start_byte, loff_t end_byte);
+int filemap_invalidate_inode(struct inode *inode, bool flush,
+ loff_t start, loff_t end);
static inline int filemap_fdatawait(struct address_space *mapping)
{
diff --git a/include/linux/papr_scm.h b/include/linux/papr_scm.h
new file mode 100644
index 000000000000..eb36453813db
--- /dev/null
+++ b/include/linux/papr_scm.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __LINUX_PAPR_SCM_H
+#define __LINUX_PAPR_SCM_H
+
+/* DIMM health bitmap indicators */
+/* SCM device is unable to persist memory contents */
+#define PAPR_PMEM_UNARMED (1ULL << (63 - 0))
+/* SCM device failed to persist memory contents */
+#define PAPR_PMEM_SHUTDOWN_DIRTY (1ULL << (63 - 1))
+/* SCM device contents are persisted from previous IPL */
+#define PAPR_PMEM_SHUTDOWN_CLEAN (1ULL << (63 - 2))
+/* SCM device contents are not persisted from previous IPL */
+#define PAPR_PMEM_EMPTY (1ULL << (63 - 3))
+/* SCM device memory life remaining is critically low */
+#define PAPR_PMEM_HEALTH_CRITICAL (1ULL << (63 - 4))
+/* SCM device will be garded off next IPL due to failure */
+#define PAPR_PMEM_HEALTH_FATAL (1ULL << (63 - 5))
+/* SCM contents cannot persist due to current platform health status */
+#define PAPR_PMEM_HEALTH_UNHEALTHY (1ULL << (63 - 6))
+/* SCM device is unable to persist memory contents in certain conditions */
+#define PAPR_PMEM_HEALTH_NON_CRITICAL (1ULL << (63 - 7))
+/* SCM device is encrypted */
+#define PAPR_PMEM_ENCRYPTED (1ULL << (63 - 8))
+/* SCM device has been scrubbed and locked */
+#define PAPR_PMEM_SCRUBBED_AND_LOCKED (1ULL << (63 - 9))
+
+#define PAPR_PMEM_SAVE_FAILED (1ULL << (63 - 10))
+
+/* Bits status indicators for health bitmap indicating unarmed dimm */
+#define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED | \
+ PAPR_PMEM_HEALTH_UNHEALTHY)
+
+/* Bits status indicators for health bitmap indicating unflushed dimm */
+#define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY)
+
+/* Bits status indicators for health bitmap indicating unrestored dimm */
+#define PAPR_PMEM_BAD_RESTORE_MASK (PAPR_PMEM_EMPTY)
+
+/* Bit status indicators for smart event notification */
+#define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \
+ PAPR_PMEM_HEALTH_FATAL | \
+ PAPR_PMEM_HEALTH_UNHEALTHY)
+
+#define PAPR_PMEM_SAVE_MASK (PAPR_PMEM_SAVE_FAILED)
+
+#define PAPR_SCM_PERF_STATS_EYECATCHER __stringify(SCMSTATS)
+#define PAPR_SCM_PERF_STATS_VERSION 0x1
+
+#endif /* __LINUX_PAPR_SCM_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a0c75e467df3..7a099e1282d2 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -580,6 +580,7 @@
#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb
#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3
#define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb
+#define PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3 0x12bb
#define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3
#define PCI_DEVICE_ID_AMD_MI300_DF_F3 0x152b
#define PCI_DEVICE_ID_AMD_VANGOGH_USB 0x163a
@@ -2686,8 +2687,10 @@
#define PCI_DEVICE_ID_INTEL_I960 0x0960
#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
#define PCI_DEVICE_ID_INTEL_HDA_HSW_0 0x0a0c
+#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
#define PCI_DEVICE_ID_INTEL_HDA_HSW_2 0x0c0c
#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
+#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
#define PCI_DEVICE_ID_INTEL_HDA_HSW_3 0x0d0c
#define PCI_DEVICE_ID_INTEL_HDA_BYT 0x0f04
#define PCI_DEVICE_ID_INTEL_SST_BYT 0x0f28
@@ -3106,6 +3109,7 @@
#define PCI_DEVICE_ID_INTEL_HDA_CML_S 0xa3f0
#define PCI_DEVICE_ID_INTEL_HDA_LNL_P 0xa828
#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
+#define PCI_DEVICE_ID_INTEL_HDA_BMG 0xe2f7
#define PCI_DEVICE_ID_INTEL_HDA_CML_R 0xf0c8
#define PCI_DEVICE_ID_INTEL_HDA_RKL_S 0xf1c8
diff --git a/include/linux/peci.h b/include/linux/peci.h
index 9b3d36aff431..90e241458ef6 100644
--- a/include/linux/peci.h
+++ b/include/linux/peci.h
@@ -58,7 +58,6 @@ static inline struct peci_controller *to_peci_controller(void *d)
/**
* struct peci_device - PECI device
* @dev: device object to register PECI device to the device model
- * @controller: manages the bus segment hosting this PECI device
* @info: PECI device characteristics
* @info.family: device family
* @info.model: device model
diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h
index 43282e22ebe1..701974639ff2 100644
--- a/include/linux/perf/riscv_pmu.h
+++ b/include/linux/perf/riscv_pmu.h
@@ -39,6 +39,14 @@ struct cpu_hw_events {
DECLARE_BITMAP(used_hw_ctrs, RISCV_MAX_COUNTERS);
/* currently enabled firmware counters */
DECLARE_BITMAP(used_fw_ctrs, RISCV_MAX_COUNTERS);
+ /* The virtual address of the shared memory where counter snapshot will be taken */
+ void *snapshot_addr;
+ /* The physical address of the shared memory where counter snapshot will be taken */
+ phys_addr_t snapshot_addr_phys;
+ /* Boolean flag to indicate setup is already done */
+ bool snapshot_set_done;
+ /* A shadow copy of the counter values to avoid clobbering during multiple SBI calls */
+ u64 snapshot_cval_shcopy[RISCV_MAX_COUNTERS];
};
struct riscv_pmu {
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index d2a15c0c6f8a..a5304ae8c654 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -809,11 +809,8 @@ struct perf_event {
u64 (*clock)(void);
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
-#ifdef CONFIG_BPF_SYSCALL
- perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
u64 bpf_cookie;
-#endif
#ifdef CONFIG_EVENT_TRACING
struct trace_event_call *tp_event;
@@ -883,6 +880,7 @@ struct perf_event_pmu_context {
unsigned int nr_events;
unsigned int nr_cgroups;
+ unsigned int nr_freq;
atomic_t refcount; /* event <-> epc */
struct rcu_head rcu_head;
@@ -897,6 +895,11 @@ struct perf_event_pmu_context {
int rotate_necessary;
};
+static inline bool perf_pmu_ctx_is_active(struct perf_event_pmu_context *epc)
+{
+ return !list_empty(&epc->flexible_active) || !list_empty(&epc->pinned_active);
+}
+
struct perf_event_groups {
struct rb_root tree;
u64 index;
@@ -1342,8 +1345,10 @@ extern int perf_event_output(struct perf_event *event,
struct pt_regs *regs);
static inline bool
-__is_default_overflow_handler(perf_overflow_handler_t overflow_handler)
+is_default_overflow_handler(struct perf_event *event)
{
+ perf_overflow_handler_t overflow_handler = event->overflow_handler;
+
if (likely(overflow_handler == perf_event_output_forward))
return true;
if (unlikely(overflow_handler == perf_event_output_backward))
@@ -1351,22 +1356,6 @@ __is_default_overflow_handler(perf_overflow_handler_t overflow_handler)
return false;
}
-#define is_default_overflow_handler(event) \
- __is_default_overflow_handler((event)->overflow_handler)
-
-#ifdef CONFIG_BPF_SYSCALL
-static inline bool uses_default_overflow_handler(struct perf_event *event)
-{
- if (likely(is_default_overflow_handler(event)))
- return true;
-
- return __is_default_overflow_handler(event->orig_overflow_handler);
-}
-#else
-#define uses_default_overflow_handler(event) \
- is_default_overflow_handler(event)
-#endif
-
extern void
perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
@@ -1697,6 +1686,14 @@ perf_event_addr_filters(struct perf_event *event)
return ifh;
}
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
+{
+ /* Only the parent has fasync state */
+ if (event->parent)
+ event = event->parent;
+ return &event->fasync;
+}
+
extern void perf_event_addr_filters_sync(struct perf_event *event);
extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 3f68b8239bb1..e6e83304558e 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -778,6 +778,7 @@ struct phy_device {
/* Generic phy_device::dev_flags */
#define PHY_F_NO_IRQ 0x80000000
+#define PHY_F_RXC_ALWAYS_ON 0x40000000
static inline struct phy_device *to_phy_device(const struct device *dev)
{
diff --git a/include/linux/phy/phy-dp.h b/include/linux/phy/phy-dp.h
index 18cad23642cd..9cce5766bc0b 100644
--- a/include/linux/phy/phy-dp.h
+++ b/include/linux/phy/phy-dp.h
@@ -8,6 +8,9 @@
#include <linux/types.h>
+#define PHY_SUBMODE_DP 0
+#define PHY_SUBMODE_EDP 1
+
/**
* struct phy_configure_opts_dp - DisplayPort PHY configuration set
*
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 9a57deefcb07..5ea6b2ad2396 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -138,6 +138,9 @@ enum phylink_op_type {
* @poll_fixed_state: if true, starts link_poll,
* if MAC link is at %MLO_AN_FIXED mode.
* @mac_managed_pm: if true, indicate the MAC driver is responsible for PHY PM.
+ * @mac_requires_rxc: if true, the MAC always requires a receive clock from PHY.
+ * The PHY driver should start the clock signal as soon as
+ * possible and avoid stopping it during suspend events.
* @ovr_an_inband: if true, override PCS to MLO_AN_INBAND
* @get_fixed_state: callback to execute to determine the fixed link state,
* if MAC link is at %MLO_AN_FIXED mode.
@@ -150,6 +153,7 @@ struct phylink_config {
enum phylink_op_type type;
bool poll_fixed_state;
bool mac_managed_pm;
+ bool mac_requires_rxc;
bool ovr_an_inband;
void (*get_fixed_state)(struct phylink_config *config,
struct phylink_link_state *state);
@@ -392,6 +396,10 @@ struct phylink_pcs_ops;
* @phylink: pointer to &struct phylink_config
* @neg_mode: provide PCS neg mode via "mode" argument
* @poll: poll the PCS for link changes
+ * @rxc_always_on: The MAC driver requires the reference clock
+ * to always be on. Standalone PCS drivers which
+ * do not have access to a PHY device can check
+ * this instead of PHY_F_RXC_ALWAYS_ON.
*
* This structure is designed to be embedded within the PCS private data,
* and will be passed between phylink and the PCS.
@@ -404,6 +412,7 @@ struct phylink_pcs {
struct phylink *phylink;
bool neg_mode;
bool poll;
+ bool rxc_always_on;
};
/**
@@ -418,6 +427,8 @@ struct phylink_pcs {
* @pcs_an_restart: restart 802.3z BaseX autonegotiation.
* @pcs_link_up: program the PCS for the resolved link configuration
* (where necessary).
+ * @pcs_pre_init: configure PCS components necessary for MAC hardware
+ * initialization e.g. RX clock for stmmac.
*/
struct phylink_pcs_ops {
int (*pcs_validate)(struct phylink_pcs *pcs, unsigned long *supported,
@@ -437,6 +448,7 @@ struct phylink_pcs_ops {
void (*pcs_an_restart)(struct phylink_pcs *pcs);
void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex);
+ int (*pcs_pre_init)(struct phylink_pcs *pcs);
};
#if 0 /* For kernel-doc purposes only. */
@@ -542,6 +554,34 @@ void pcs_an_restart(struct phylink_pcs *pcs);
*/
void pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex);
+
+/**
+ * pcs_pre_init() - Configure PCS components necessary for MAC initialization
+ * @pcs: a pointer to a &struct phylink_pcs.
+ *
+ * This function can be called by MAC drivers through the
+ * phylink_pcs_pre_init() wrapper, before their hardware is initialized. It
+ * should not be called after the link is brought up, as reconfiguring the PCS
+ * at this point could break the link.
+ *
+ * Some MAC devices require specific hardware initialization to be performed by
+ * their associated PCS device before they can properly initialize their own
+ * hardware. An example of this is the initialization of stmmac controllers,
+ * which requires an active REF_CLK signal to be provided by the PHY/PCS.
+ *
+ * By calling phylink_pcs_pre_init(), MAC drivers can ensure that the PCS is
+ * setup in a way that allows for successful hardware initialization.
+ *
+ * The specific configuration performed by pcs_pre_init() is dependent on the
+ * model of PCS and the requirements of the MAC device attached to it. PCS
+ * driver authors should consider whether their target device is to be used in
+ * conjunction with a MAC device whose driver calls phylink_pcs_pre_init(). MAC
+ * driver authors should document their requirements for the PCS
+ * pre-initialization.
+ *
+ */
+int pcs_pre_init(struct phylink_pcs *pcs);
+
#endif
struct phylink *phylink_create(struct phylink_config *,
@@ -561,6 +601,8 @@ void phylink_disconnect_phy(struct phylink *);
void phylink_mac_change(struct phylink *, bool up);
void phylink_pcs_change(struct phylink_pcs *, bool up);
+int phylink_pcs_pre_init(struct phylink *pl, struct phylink_pcs *pcs);
+
void phylink_start(struct phylink *);
void phylink_stop(struct phylink *);
diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h
index c8645b2ed3c0..b9c8520b4bd3 100644
--- a/include/linux/platform_data/davinci_asp.h
+++ b/include/linux/platform_data/davinci_asp.h
@@ -26,16 +26,6 @@ struct davinci_mcasp_pdata {
struct gen_pool *sram_pool;
/*
- * If McBSP peripheral gets the clock from an external pin,
- * there are three chooses, that are MCBSP_CLKX, MCBSP_CLKR
- * and MCBSP_CLKS.
- * Depending on different hardware connections it is possible
- * to use this setting to change the behaviour of McBSP
- * driver.
- */
- int clk_input_pin;
-
- /*
* This flag works when both clock and FS are outputs for the cpu
* and makes clock more accurate (FS is not symmetrical and the
* clock is very fast.
@@ -91,11 +81,6 @@ enum {
MCASP_VERSION_OMAP, /* OMAP4/5 */
};
-enum mcbsp_clk_input_pin {
- MCBSP_CLKR = 0, /* as in DM365 */
- MCBSP_CLKS,
-};
-
#define INACTIVE_MODE 0
#define TX_MODE 1
#define RX_MODE 2
diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h
index 3b400b1919a9..9e3c15b4ac91 100644
--- a/include/linux/platform_data/spi-omap2-mcspi.h
+++ b/include/linux/platform_data/spi-omap2-mcspi.h
@@ -16,9 +16,6 @@ struct omap2_mcspi_platform_config {
struct omap2_mcspi_device_config {
unsigned turbo_mode:1;
-
- /* toggle chip select after every word */
- unsigned cs_per_word:1;
};
#endif
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index eb556f988d57..d8f15770a522 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -71,7 +71,6 @@ struct sysc_regbits {
#define SYSC_QUIRK_SWSUP_SIDLE_ACT BIT(12)
#define SYSC_QUIRK_SWSUP_SIDLE BIT(11)
#define SYSC_QUIRK_EXT_OPT_CLOCK BIT(10)
-#define SYSC_QUIRK_LEGACY_IDLE BIT(9)
#define SYSC_QUIRK_RESET_STATUS BIT(8)
#define SYSC_QUIRK_NO_IDLE BIT(7)
#define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6)
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index ab1c7deff118..3eb5cd6773ad 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -71,6 +71,7 @@
#define ASUS_WMI_DEVID_LID_FLIP 0x00060062
#define ASUS_WMI_DEVID_LID_FLIP_ROG 0x00060077
#define ASUS_WMI_DEVID_MINI_LED_MODE 0x0005001E
+#define ASUS_WMI_DEVID_MINI_LED_MODE2 0x0005002E
/* Storage */
#define ASUS_WMI_DEVID_CARDREADER 0x00080013
@@ -127,13 +128,18 @@
/* gpu mux switch, 0 = dGPU, 1 = Optimus */
#define ASUS_WMI_DEVID_GPU_MUX 0x00090016
+#define ASUS_WMI_DEVID_GPU_MUX_VIVO 0x00090026
/* TUF laptop RGB modes/colours */
#define ASUS_WMI_DEVID_TUF_RGB_MODE 0x00100056
+#define ASUS_WMI_DEVID_TUF_RGB_MODE2 0x0010005A
/* TUF laptop RGB power/state */
#define ASUS_WMI_DEVID_TUF_RGB_STATE 0x00100057
+/* Bootup sound control */
+#define ASUS_WMI_DEVID_BOOT_SOUND 0x00130022
+
/* DSTS masks */
#define ASUS_WMI_DSTS_STATUS_BIT 0x00000001
#define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002
diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h
index e5cbb6841f3a..f5492ed413f3 100644
--- a/include/linux/platform_profile.h
+++ b/include/linux/platform_profile.h
@@ -36,6 +36,7 @@ struct platform_profile_handler {
int platform_profile_register(struct platform_profile_handler *pprof);
int platform_profile_remove(void);
+int platform_profile_cycle(void);
void platform_profile_notify(void);
#endif /*_PLATFORM_PROFILE_H_*/
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 065a47382302..dd7c8441af42 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -476,6 +476,8 @@ struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
int of_get_required_opp_performance_state(struct device_node *np, int index);
int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table);
int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus);
+int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW,
+ unsigned long *kHz);
static inline void dev_pm_opp_of_unregister_em(struct device *dev)
{
em_dev_unregister_perf_domain(dev);
@@ -539,6 +541,12 @@ static inline void dev_pm_opp_of_unregister_em(struct device *dev)
{
}
+static inline int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW,
+ unsigned long *kHz)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int of_get_required_opp_performance_state(struct device_node *np, int index)
{
return -EOPNOTSUPP;
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 6eb9adaef52b..76cd1f9f1365 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -107,7 +107,7 @@ extern void wakeup_sources_read_unlock(int idx);
extern struct wakeup_source *wakeup_sources_walk_start(void);
extern struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws);
extern int device_wakeup_enable(struct device *dev);
-extern int device_wakeup_disable(struct device *dev);
+extern void device_wakeup_disable(struct device *dev);
extern void device_set_wakeup_capable(struct device *dev, bool capable);
extern int device_set_wakeup_enable(struct device *dev, bool enable);
extern void __pm_stay_awake(struct wakeup_source *ws);
@@ -154,10 +154,9 @@ static inline int device_wakeup_enable(struct device *dev)
return 0;
}
-static inline int device_wakeup_disable(struct device *dev)
+static inline void device_wakeup_disable(struct device *dev)
{
dev->power.should_wakeup = false;
- return 0;
}
static inline int device_set_wakeup_enable(struct device *dev, bool enable)
@@ -235,11 +234,10 @@ static inline int device_init_wakeup(struct device *dev, bool enable)
if (enable) {
device_set_wakeup_capable(dev, true);
return device_wakeup_enable(dev);
- } else {
- device_wakeup_disable(dev);
- device_set_wakeup_capable(dev, false);
- return 0;
}
+ device_wakeup_disable(dev);
+ device_set_wakeup_capable(dev, false);
+ return 0;
}
#endif /* _LINUX_PM_WAKEUP_H */
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index b9e5bd2b42d3..5180dc9f1706 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -47,16 +47,8 @@ struct bq27xxx_access_methods {
};
struct bq27xxx_reg_cache {
- int temperature;
- int time_to_empty;
- int time_to_empty_avg;
- int time_to_full;
- int charge_full;
- int cycle_count;
int capacity;
- int energy;
int flags;
- int health;
};
struct bq27xxx_device_info {
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 955e31860095..dbbd202b1cb3 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -71,7 +71,7 @@ extern void console_verbose(void);
/* strlen("ratelimit") + 1 */
#define DEVKMSG_STR_MAX_SIZE 10
-extern char devkmsg_log_str[];
+extern char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE];
struct ctl_table;
extern int suppress_printk;
@@ -126,7 +126,7 @@ struct va_format {
#define no_printk(fmt, ...) \
({ \
if (0) \
- printk(fmt, ##__VA_ARGS__); \
+ _printk(fmt, ##__VA_ARGS__); \
0; \
})
diff --git a/include/linux/profile.h b/include/linux/profile.h
index 11db1ec516e2..04ae5ebcb637 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -18,13 +18,8 @@ struct proc_dir_entry;
struct notifier_block;
#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS)
-void create_prof_cpu_mask(void);
int create_proc_profile(void);
#else
-static inline void create_prof_cpu_mask(void)
-{
-}
-
static inline int create_proc_profile(void)
{
return 0;
diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
index fb724c65c77b..6d07c95dabb9 100644
--- a/include/linux/pse-pd/pse.h
+++ b/include/linux/pse-pd/pse.h
@@ -15,11 +15,14 @@ struct pse_controller_dev;
/**
* struct pse_control_config - PSE control/channel configuration.
*
- * @admin_cotrol: set PoDL PSE admin control as described in
+ * @podl_admin_control: set PoDL PSE admin control as described in
* IEEE 802.3-2018 30.15.1.2.1 acPoDLPSEAdminControl
+ * @c33_admin_control: set PSE admin control as described in
+ * IEEE 802.3-2022 30.9.1.2.1 acPSEAdminControl
*/
struct pse_control_config {
- enum ethtool_podl_pse_admin_state admin_cotrol;
+ enum ethtool_podl_pse_admin_state podl_admin_control;
+ enum ethtool_c33_pse_admin_state c33_admin_control;
};
/**
@@ -29,25 +32,36 @@ struct pse_control_config {
* functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
* @podl_pw_status: power detection status of the PoDL PSE.
* IEEE 802.3-2018 30.15.1.1.3 aPoDLPSEPowerDetectionStatus:
+ * @c33_admin_state: operational state of the PSE
+ * functions. IEEE 802.3-2022 30.9.1.1.2 aPSEAdminState
+ * @c33_pw_status: power detection status of the PSE.
+ * IEEE 802.3-2022 30.9.1.1.5 aPSEPowerDetectionStatus:
*/
struct pse_control_status {
enum ethtool_podl_pse_admin_state podl_admin_state;
enum ethtool_podl_pse_pw_d_status podl_pw_status;
+ enum ethtool_c33_pse_admin_state c33_admin_state;
+ enum ethtool_c33_pse_pw_d_status c33_pw_status;
};
/**
* struct pse_controller_ops - PSE controller driver callbacks
*
* @ethtool_get_status: get PSE control status for ethtool interface
- * @ethtool_set_config: set PSE control configuration over ethtool interface
+ * @setup_pi_matrix: setup PI matrix of the PSE controller
+ * @pi_is_enabled: Return 1 if the PSE PI is enabled, 0 if not.
+ * May also return negative errno.
+ * @pi_enable: Configure the PSE PI as enabled.
+ * @pi_disable: Configure the PSE PI as disabled.
*/
struct pse_controller_ops {
int (*ethtool_get_status)(struct pse_controller_dev *pcdev,
unsigned long id, struct netlink_ext_ack *extack,
struct pse_control_status *status);
- int (*ethtool_set_config)(struct pse_controller_dev *pcdev,
- unsigned long id, struct netlink_ext_ack *extack,
- const struct pse_control_config *config);
+ int (*setup_pi_matrix)(struct pse_controller_dev *pcdev);
+ int (*pi_is_enabled)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_enable)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_disable)(struct pse_controller_dev *pcdev, int id);
};
struct module;
@@ -55,6 +69,40 @@ struct device_node;
struct of_phandle_args;
struct pse_control;
+/* PSE PI pairset pinout can either be Alternative A or Alternative B */
+enum pse_pi_pairset_pinout {
+ ALTERNATIVE_A,
+ ALTERNATIVE_B,
+};
+
+/**
+ * struct pse_pi_pairset - PSE PI pairset entity describing the pinout
+ * alternative ant its phandle
+ *
+ * @pinout: description of the pinout alternative
+ * @np: device node pointer describing the pairset phandle
+ */
+struct pse_pi_pairset {
+ enum pse_pi_pairset_pinout pinout;
+ struct device_node *np;
+};
+
+/**
+ * struct pse_pi - PSE PI (Power Interface) entity as described in
+ * IEEE 802.3-2022 145.2.4
+ *
+ * @pairset: table of the PSE PI pinout alternative for the two pairset
+ * @np: device node pointer of the PSE PI node
+ * @rdev: regulator represented by the PSE PI
+ * @admin_state_enabled: PI enabled state
+ */
+struct pse_pi {
+ struct pse_pi_pairset pairset[2];
+ struct device_node *np;
+ struct regulator_dev *rdev;
+ bool admin_state_enabled;
+};
+
/**
* struct pse_controller_dev - PSE controller entity that might
* provide multiple PSE controls
@@ -64,10 +112,11 @@ struct pse_control;
* @pse_control_head: head of internal list of requested PSE controls
* @dev: corresponding driver model device struct
* @of_pse_n_cells: number of cells in PSE line specifiers
- * @of_xlate: translation function to translate from specifier as found in the
- * device tree to id as given to the PSE control ops
* @nr_lines: number of PSE controls in this controller device
* @lock: Mutex for serialization access to the PSE controller
+ * @types: types of the PSE controller
+ * @pi: table of PSE PIs described in this controller device
+ * @no_of_pse_pi: flag set if the pse_pis devicetree node is not used
*/
struct pse_controller_dev {
const struct pse_controller_ops *ops;
@@ -76,10 +125,11 @@ struct pse_controller_dev {
struct list_head pse_control_head;
struct device *dev;
int of_pse_n_cells;
- int (*of_xlate)(struct pse_controller_dev *pcdev,
- const struct of_phandle_args *pse_spec);
unsigned int nr_lines;
struct mutex lock;
+ enum ethtool_pse_types types;
+ struct pse_pi *pi;
+ bool no_of_pse_pi;
};
#if IS_ENABLED(CONFIG_PSE_CONTROLLER)
@@ -99,6 +149,9 @@ int pse_ethtool_set_config(struct pse_control *psec,
struct netlink_ext_ack *extack,
const struct pse_control_config *config);
+bool pse_has_podl(struct pse_control *psec);
+bool pse_has_c33(struct pse_control *psec);
+
#else
static inline struct pse_control *of_pse_control_get(struct device_node *node)
@@ -124,6 +177,16 @@ static inline int pse_ethtool_set_config(struct pse_control *psec,
return -ENOTSUPP;
}
+static inline bool pse_has_podl(struct pse_control *psec)
+{
+ return false;
+}
+
+static inline bool pse_has_c33(struct pse_control *psec)
+{
+ return false;
+}
+
#endif
#endif
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 4a6568dfdf3f..60b92c2c75ef 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -2,6 +2,7 @@
#ifndef __LINUX_PWM_H
#define __LINUX_PWM_H
+#include <linux/device.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/of.h>
@@ -272,11 +273,11 @@ struct pwm_ops {
* @npwm: number of PWMs controlled by this chip
* @of_xlate: request a PWM device given a device tree PWM specifier
* @atomic: can the driver's ->apply() be called in atomic context
- * @driver_data: Private pointer for driver specific info
+ * @uses_pwmchip_alloc: signals if pwmchip_allow was used to allocate this chip
* @pwms: array of PWM devices allocated by the framework
*/
struct pwm_chip {
- struct device *dev;
+ struct device dev;
const struct pwm_ops *ops;
struct module *owner;
unsigned int id;
@@ -287,31 +288,23 @@ struct pwm_chip {
bool atomic;
/* only used internally by the PWM framework */
- void *driver_data;
- struct pwm_device *pwms;
+ bool uses_pwmchip_alloc;
+ struct pwm_device pwms[] __counted_by(npwm);
};
static inline struct device *pwmchip_parent(const struct pwm_chip *chip)
{
- return chip->dev;
+ return chip->dev.parent;
}
static inline void *pwmchip_get_drvdata(struct pwm_chip *chip)
{
- /*
- * After pwm_chip got a dedicated struct device, this can be replaced by
- * dev_get_drvdata(&chip->dev);
- */
- return chip->driver_data;
+ return dev_get_drvdata(&chip->dev);
}
static inline void pwmchip_set_drvdata(struct pwm_chip *chip, void *data)
{
- /*
- * After pwm_chip got a dedicated struct device, this can be replaced by
- * dev_set_drvdata(&chip->dev, data);
- */
- chip->driver_data = data;
+ dev_set_drvdata(&chip->dev, data);
}
#if IS_ENABLED(CONFIG_PWM)
@@ -628,17 +621,4 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
}
#endif
-#ifdef CONFIG_PWM_SYSFS
-void pwmchip_sysfs_export(struct pwm_chip *chip);
-void pwmchip_sysfs_unexport(struct pwm_chip *chip);
-#else
-static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
-{
-}
-
-static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
-{
-}
-#endif /* CONFIG_PWM_SYSFS */
-
#endif /* __LINUX_PWM_H */
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index cd1973e6ac4b..844a2743ca94 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -217,9 +217,9 @@ enum pxa_ssp_type {
PXA27x_SSP,
PXA3xx_SSP,
PXA168_SSP,
- MMP2_SSP,
PXA910_SSP,
CE4100_SSP,
+ MMP2_SSP,
MRFLD_SSP,
QUARK_X1000_SSP,
/* Keep LPSS types sorted with lpss_platforms[] */
diff --git a/include/linux/qat/qat_mig_dev.h b/include/linux/qat/qat_mig_dev.h
new file mode 100644
index 000000000000..dbbb6a063dd2
--- /dev/null
+++ b/include/linux/qat/qat_mig_dev.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation */
+#ifndef QAT_MIG_DEV_H_
+#define QAT_MIG_DEV_H_
+
+struct pci_dev;
+
+struct qat_mig_dev {
+ void *parent_accel_dev;
+ u8 *state;
+ u32 setup_size;
+ u32 remote_setup_size;
+ u32 state_size;
+ s32 vf_id;
+};
+
+struct qat_mig_dev *qat_vfmig_create(struct pci_dev *pdev, int vf_id);
+int qat_vfmig_init(struct qat_mig_dev *mdev);
+void qat_vfmig_cleanup(struct qat_mig_dev *mdev);
+void qat_vfmig_reset(struct qat_mig_dev *mdev);
+int qat_vfmig_open(struct qat_mig_dev *mdev);
+void qat_vfmig_close(struct qat_mig_dev *mdev);
+int qat_vfmig_suspend(struct qat_mig_dev *mdev);
+int qat_vfmig_resume(struct qat_mig_dev *mdev);
+int qat_vfmig_save_state(struct qat_mig_dev *mdev);
+int qat_vfmig_save_setup(struct qat_mig_dev *mdev);
+int qat_vfmig_load_state(struct qat_mig_dev *mdev);
+int qat_vfmig_load_setup(struct qat_mig_dev *mdev, int size);
+void qat_vfmig_destroy(struct qat_mig_dev *mdev);
+
+#endif /*QAT_MIG_DEV_H_*/
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 17d7ed5f3ae6..dfd2399f2cde 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -401,15 +401,15 @@ static inline int debug_lockdep_rcu_enabled(void)
} \
} while (0)
-#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
+#ifndef CONFIG_PREEMPT_RCU
static inline void rcu_preempt_sleep_check(void)
{
RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
"Illegal context switch in RCU read-side critical section");
}
-#else /* #ifdef CONFIG_PROVE_RCU */
+#else // #ifndef CONFIG_PREEMPT_RCU
static inline void rcu_preempt_sleep_check(void) { }
-#endif /* #else #ifdef CONFIG_PROVE_RCU */
+#endif // #else // #ifndef CONFIG_PREEMPT_RCU
#define rcu_sleep_check() \
do { \
@@ -809,9 +809,9 @@ static inline void rcu_read_unlock(void)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
+ rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
__release(RCU);
__rcu_read_unlock();
- rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
}
/**
@@ -1090,6 +1090,18 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
extern int rcu_expedited;
extern int rcu_normal;
-DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
+DEFINE_LOCK_GUARD_0(rcu,
+ do {
+ rcu_read_lock();
+ /*
+ * sparse doesn't call the cleanup function,
+ * so just release immediately and don't track
+ * the context. We don't need to anyway, since
+ * the whole point of the guard is to not need
+ * the explicit unlock.
+ */
+ __release(RCU);
+ } while (0),
+ rcu_read_unlock())
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index d07f0848802e..303ab9bee155 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -19,18 +19,18 @@ struct rcu_synchronize {
};
void wakeme_after_rcu(struct rcu_head *head);
-void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
+void __wait_rcu_gp(bool checktiny, unsigned int state, int n, call_rcu_func_t *crcu_array,
struct rcu_synchronize *rs_array);
-#define _wait_rcu_gp(checktiny, ...) \
-do { \
- call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
- struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \
- __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \
- __crcu_array, __rs_array); \
+#define _wait_rcu_gp(checktiny, state, ...) \
+do { \
+ call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
+ struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \
+ __wait_rcu_gp(checktiny, state, ARRAY_SIZE(__crcu_array), __crcu_array, __rs_array); \
} while (0)
-#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
+#define wait_rcu_gp(...) _wait_rcu_gp(false, TASK_UNINTERRUPTIBLE, __VA_ARGS__)
+#define wait_rcu_gp_state(state, ...) _wait_rcu_gp(false, state, __VA_ARGS__)
/**
* synchronize_rcu_mult - Wait concurrently for multiple grace periods
@@ -54,7 +54,7 @@ do { \
* grace period.
*/
#define synchronize_rcu_mult(...) \
- _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
+ _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), TASK_UNINTERRUPTIBLE, __VA_ARGS__)
static inline void cond_resched_rcu(void)
{
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index b743241cfb7c..a6bc2980a98b 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -297,15 +297,6 @@ typedef void (*regmap_unlock)(void *);
* performed on such table (a register is no increment
* readable if it belongs to one of the ranges specified
* by rd_noinc_table).
- * @disable_locking: This regmap is either protected by external means or
- * is guaranteed not to be accessed from multiple threads.
- * Don't use any locking mechanisms.
- * @lock: Optional lock callback (overrides regmap's default lock
- * function, based on spinlock or mutex).
- * @unlock: As above for unlocking.
- * @lock_arg: this field is passed as the only argument of lock/unlock
- * functions (ignored in case regular lock/unlock functions
- * are not overridden).
* @reg_read: Optional callback that if filled will be used to perform
* all the reads from the registers. Should only be provided for
* devices whose read operation cannot be represented as a simple
@@ -323,6 +314,7 @@ typedef void (*regmap_unlock)(void *);
* @write: Same as above for writing.
* @max_raw_read: Max raw read size that can be used on the device.
* @max_raw_write: Max raw write size that can be used on the device.
+ * @can_sleep: Optional, specifies whether regmap operations can sleep.
* @fast_io: Register IO is fast. Use a spinlock instead of a mutex
* to perform locking. This field is ignored if custom lock/unlock
* functions are used (see fields lock/unlock of struct regmap_config).
@@ -331,6 +323,15 @@ typedef void (*regmap_unlock)(void *);
* Use it only for "no-bus" cases.
* @io_port: Support IO port accessors. Makes sense only when MMIO vs. IO port
* access can be distinguished.
+ * @disable_locking: This regmap is either protected by external means or
+ * is guaranteed not to be accessed from multiple threads.
+ * Don't use any locking mechanisms.
+ * @lock: Optional lock callback (overrides regmap's default lock
+ * function, based on spinlock or mutex).
+ * @unlock: As above for unlocking.
+ * @lock_arg: This field is passed as the only argument of lock/unlock
+ * functions (ignored in case regular lock/unlock functions
+ * are not overridden).
* @max_register: Optional, specifies the maximum valid register address.
* @max_register_is_0: Optional, specifies that zero value in @max_register
* should be taken into account. This is a workaround to
@@ -373,21 +374,20 @@ typedef void (*regmap_unlock)(void *);
* @reg_defaults_raw: Power on reset values for registers (for use with
* register cache support).
* @num_reg_defaults_raw: Number of elements in reg_defaults_raw.
- * @reg_format_endian: Endianness for formatted register addresses. If this is
- * DEFAULT, the @reg_format_endian_default value from the
- * regmap bus is used.
- * @val_format_endian: Endianness for formatted register values. If this is
- * DEFAULT, the @reg_format_endian_default value from the
- * regmap bus is used.
- *
- * @ranges: Array of configuration entries for virtual address ranges.
- * @num_ranges: Number of range configuration entries.
* @use_hwlock: Indicate if a hardware spinlock should be used.
* @use_raw_spinlock: Indicate if a raw spinlock should be used.
* @hwlock_id: Specify the hardware spinlock id.
* @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
* HWLOCK_IRQ or 0.
- * @can_sleep: Optional, specifies whether regmap operations can sleep.
+ * @reg_format_endian: Endianness for formatted register addresses. If this is
+ * DEFAULT, the @reg_format_endian_default value from the
+ * regmap bus is used.
+ * @val_format_endian: Endianness for formatted register values. If this is
+ * DEFAULT, the @reg_format_endian_default value from the
+ * regmap bus is used.
+ *
+ * @ranges: Array of configuration entries for virtual address ranges.
+ * @num_ranges: Number of range configuration entries.
*/
struct regmap_config {
const char *name;
@@ -406,11 +406,6 @@ struct regmap_config {
bool (*writeable_noinc_reg)(struct device *dev, unsigned int reg);
bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
- bool disable_locking;
- regmap_lock lock;
- regmap_unlock unlock;
- void *lock_arg;
-
int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
int (*reg_write)(void *context, unsigned int reg, unsigned int val);
int (*reg_update_bits)(void *context, unsigned int reg,
@@ -422,9 +417,16 @@ struct regmap_config {
size_t max_raw_read;
size_t max_raw_write;
+ bool can_sleep;
+
bool fast_io;
bool io_port;
+ bool disable_locking;
+ regmap_lock lock;
+ regmap_unlock unlock;
+ void *lock_arg;
+
unsigned int max_register;
bool max_register_is_0;
const struct regmap_access_table *wr_table;
@@ -448,18 +450,16 @@ struct regmap_config {
bool use_relaxed_mmio;
bool can_multi_write;
- enum regmap_endian reg_format_endian;
- enum regmap_endian val_format_endian;
-
- const struct regmap_range_cfg *ranges;
- unsigned int num_ranges;
-
bool use_hwlock;
bool use_raw_spinlock;
unsigned int hwlock_id;
unsigned int hwlock_mode;
- bool can_sleep;
+ enum regmap_endian reg_format_endian;
+ enum regmap_endian val_format_endian;
+
+ const struct regmap_range_cfg *ranges;
+ unsigned int num_ranges;
};
/**
@@ -1230,6 +1230,7 @@ int regmap_multi_reg_write_bypassed(struct regmap *map,
int regmap_raw_write_async(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
+int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val);
int regmap_raw_read(struct regmap *map, unsigned int reg,
void *val, size_t val_len);
int regmap_noinc_read(struct regmap *map, unsigned int reg,
@@ -1739,6 +1740,13 @@ static inline int regmap_read(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_read_bypassed(struct regmap *map, unsigned int reg,
+ unsigned int *val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_raw_read(struct regmap *map, unsigned int reg,
void *val, size_t val_len)
{
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 4660582a3302..59d0b9a79e6e 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -164,6 +164,7 @@ struct regulator *__must_check devm_regulator_get_optional(struct device *dev,
const char *id);
int devm_regulator_get_enable(struct device *dev, const char *id);
int devm_regulator_get_enable_optional(struct device *dev, const char *id);
+int devm_regulator_get_enable_read_voltage(struct device *dev, const char *id);
void regulator_put(struct regulator *regulator);
void devm_regulator_put(struct regulator *regulator);
@@ -320,12 +321,18 @@ devm_regulator_get_exclusive(struct device *dev, const char *id)
static inline int devm_regulator_get_enable(struct device *dev, const char *id)
{
- return -ENODEV;
+ return 0;
}
static inline int devm_regulator_get_enable_optional(struct device *dev,
const char *id)
{
+ return 0;
+}
+
+static inline int devm_regulator_get_enable_read_voltage(struct device *dev,
+ const char *id)
+{
return -ENODEV;
}
diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h
index 505c908dbb81..243633c8dceb 100644
--- a/include/linux/regulator/pca9450.h
+++ b/include/linux/regulator/pca9450.h
@@ -9,6 +9,7 @@
enum pca9450_chip_type {
PCA9450_TYPE_PCA9450A = 0,
PCA9450_TYPE_PCA9450BC,
+ PCA9450_TYPE_PCA9451A,
PCA9450_TYPE_AMOUNT,
};
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 5b5357c0bd8c..8463a128e2f4 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -633,7 +633,7 @@ restart:
* @params: hash table parameters
*
* Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. The first matching entry is returned.
+ * for an entry with an identical key. The first matching entry is returned.
*
* This must only be called under the RCU read lock.
*
@@ -655,7 +655,7 @@ static inline void *rhashtable_lookup(
* @params: hash table parameters
*
* Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. The first matching entry is returned.
+ * for an entry with an identical key. The first matching entry is returned.
*
* Only use this function when you have other mechanisms guaranteeing
* that the object won't go away after the RCU read lock is released.
@@ -682,7 +682,7 @@ static inline void *rhashtable_lookup_fast(
* @params: hash table parameters
*
* Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. All matching entries are returned
+ * for an entry with an identical key. All matching entries are returned
* in a list.
*
* This must only be called under the RCU read lock.
@@ -699,7 +699,7 @@ static inline struct rhlist_head *rhltable_lookup(
}
/* Internal function, please use rhashtable_insert_fast() instead. This
- * function returns the existing element already in hashes in there is a clash,
+ * function returns the existing element already in hashes if there is a clash,
* otherwise it returns an error via ERR_PTR().
*/
static inline void *__rhashtable_insert_fast(
@@ -1130,7 +1130,7 @@ static inline int rhashtable_remove_fast(
*
* Since the hash chain is single linked, the removal operation needs to
* walk the bucket chain upon removal. The removal operation is thus
- * considerable slow if the hash table is not correctly sized.
+ * considerably slower if the hash table is not correctly sized.
*
* Will automatically shrink the table if permitted when residency drops
* below 30%
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index dc5ae4e96aee..96d2140b471e 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -6,6 +6,8 @@
#include <linux/seq_file.h>
#include <linux/poll.h>
+#include <uapi/linux/trace_mmap.h>
+
struct trace_buffer;
struct ring_buffer_iter;
@@ -223,4 +225,8 @@ int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
#define trace_rb_cpu_prepare NULL
#endif
+int ring_buffer_map(struct trace_buffer *buffer, int cpu,
+ struct vm_area_struct *vma);
+int ring_buffer_unmap(struct trace_buffer *buffer, int cpu);
+int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu);
#endif /* _LINUX_RING_BUFFER_H */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index cdfc897f1e3c..a7da7dfc06a2 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -7,6 +7,7 @@
#include <linux/netdevice.h>
#include <linux/wait.h>
#include <linux/refcount.h>
+#include <linux/cleanup.h>
#include <uapi/linux/rtnetlink.h>
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
@@ -46,6 +47,8 @@ extern int rtnl_is_locked(void);
extern int rtnl_lock_killable(void);
extern bool refcount_dec_and_rtnl_lock(refcount_t *r);
+DEFINE_LOCK_GUARD_0(rtnl, rtnl_lock(), rtnl_unlock())
+
extern wait_queue_head_t netdev_unregistering_wq;
extern atomic_t dev_unreg_count;
extern struct rw_semaphore pernet_ops_rwsem;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 427de5e4754b..61591ac6eab6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -301,7 +301,7 @@ enum {
TASK_COMM_LEN = 16,
};
-extern void scheduler_tick(void);
+extern void sched_tick(void);
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
@@ -840,6 +840,7 @@ struct task_struct {
#endif
unsigned int policy;
+ unsigned long max_allowed_capacity;
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
cpumask_t *user_cpus_ptr;
diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h
index 478084f9105e..e670ac282333 100644
--- a/include/linux/sched/idle.h
+++ b/include/linux/sched/idle.h
@@ -5,8 +5,8 @@
#include <linux/sched.h>
enum cpu_idle_type {
+ __CPU_NOT_IDLE = 0,
CPU_IDLE,
- CPU_NOT_IDLE,
CPU_NEWLY_IDLE,
CPU_MAX_IDLE_TYPES
};
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 18572c9ea724..4237daa5ac7a 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -110,7 +110,7 @@ struct sched_domain {
unsigned long last_decay_max_lb_cost;
#ifdef CONFIG_SCHEDSTATS
- /* load_balance() stats */
+ /* sched_balance_rq() stats */
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
@@ -270,17 +270,17 @@ unsigned long arch_scale_cpu_capacity(int cpu)
}
#endif
-#ifndef arch_scale_thermal_pressure
+#ifndef arch_scale_hw_pressure
static __always_inline
-unsigned long arch_scale_thermal_pressure(int cpu)
+unsigned long arch_scale_hw_pressure(int cpu)
{
return 0;
}
#endif
-#ifndef arch_update_thermal_pressure
+#ifndef arch_update_hw_pressure
static __always_inline
-void arch_update_thermal_pressure(const struct cpumask *cpus,
+void arch_update_hw_pressure(const struct cpumask *cpus,
unsigned long capped_frequency)
{ }
#endif
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index b807141acc14..3a9bb5b9a9e8 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -737,6 +737,89 @@ struct scmi_powercap_proto_ops {
u32 *power_thresh_high);
};
+enum scmi_pinctrl_selector_type {
+ PIN_TYPE = 0,
+ GROUP_TYPE,
+ FUNCTION_TYPE,
+};
+
+enum scmi_pinctrl_conf_type {
+ SCMI_PIN_DEFAULT = 0,
+ SCMI_PIN_BIAS_BUS_HOLD = 1,
+ SCMI_PIN_BIAS_DISABLE = 2,
+ SCMI_PIN_BIAS_HIGH_IMPEDANCE = 3,
+ SCMI_PIN_BIAS_PULL_UP = 4,
+ SCMI_PIN_BIAS_PULL_DEFAULT = 5,
+ SCMI_PIN_BIAS_PULL_DOWN = 6,
+ SCMI_PIN_DRIVE_OPEN_DRAIN = 7,
+ SCMI_PIN_DRIVE_OPEN_SOURCE = 8,
+ SCMI_PIN_DRIVE_PUSH_PULL = 9,
+ SCMI_PIN_DRIVE_STRENGTH = 10,
+ SCMI_PIN_INPUT_DEBOUNCE = 11,
+ SCMI_PIN_INPUT_MODE = 12,
+ SCMI_PIN_PULL_MODE = 13,
+ SCMI_PIN_INPUT_VALUE = 14,
+ SCMI_PIN_INPUT_SCHMITT = 15,
+ SCMI_PIN_LOW_POWER_MODE = 16,
+ SCMI_PIN_OUTPUT_MODE = 17,
+ SCMI_PIN_OUTPUT_VALUE = 18,
+ SCMI_PIN_POWER_SOURCE = 19,
+ SCMI_PIN_SLEW_RATE = 20,
+ SCMI_PIN_OEM_START = 192,
+ SCMI_PIN_OEM_END = 255,
+};
+
+/**
+ * struct scmi_pinctrl_proto_ops - represents the various operations provided
+ * by SCMI Pinctrl Protocol
+ *
+ * @count_get: returns count of the registered elements in given type
+ * @name_get: returns name by index of given type
+ * @group_pins_get: returns the set of pins, assigned to the specified group
+ * @function_groups_get: returns the set of groups, assigned to the specified
+ * function
+ * @mux_set: set muxing function for groups of pins
+ * @settings_get_one: returns one configuration parameter for pin or group
+ * specified by config_type
+ * @settings_get_all: returns all configuration parameters for pin or group
+ * @settings_conf: sets the configuration parameter for pin or group
+ * @pin_request: aquire pin before selecting mux setting
+ * @pin_free: frees pin, acquired by request_pin call
+ */
+struct scmi_pinctrl_proto_ops {
+ int (*count_get)(const struct scmi_protocol_handle *ph,
+ enum scmi_pinctrl_selector_type type);
+ int (*name_get)(const struct scmi_protocol_handle *ph, u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ const char **name);
+ int (*group_pins_get)(const struct scmi_protocol_handle *ph,
+ u32 selector, const unsigned int **pins,
+ unsigned int *nr_pins);
+ int (*function_groups_get)(const struct scmi_protocol_handle *ph,
+ u32 selector, unsigned int *nr_groups,
+ const unsigned int **groups);
+ int (*mux_set)(const struct scmi_protocol_handle *ph, u32 selector,
+ u32 group);
+ int (*settings_get_one)(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ enum scmi_pinctrl_conf_type config_type,
+ u32 *config_value);
+ int (*settings_get_all)(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ unsigned int *nr_configs,
+ enum scmi_pinctrl_conf_type *config_types,
+ u32 *config_values);
+ int (*settings_conf)(const struct scmi_protocol_handle *ph,
+ u32 selector, enum scmi_pinctrl_selector_type type,
+ unsigned int nr_configs,
+ enum scmi_pinctrl_conf_type *config_type,
+ u32 *config_value);
+ int (*pin_request)(const struct scmi_protocol_handle *ph, u32 pin);
+ int (*pin_free)(const struct scmi_protocol_handle *ph, u32 pin);
+};
+
/**
* struct scmi_notify_ops - represents notifications' operations provided by
* SCMI core
@@ -783,8 +866,6 @@ struct scmi_notify_ops {
const u32 *src_id,
struct notifier_block *nb);
int (*devm_event_notifier_unregister)(struct scmi_device *sdev,
- u8 proto_id, u8 evt_id,
- const u32 *src_id,
struct notifier_block *nb);
int (*event_notifier_register)(const struct scmi_handle *handle,
u8 proto_id, u8 evt_id,
@@ -844,6 +925,7 @@ enum scmi_std_protocol {
SCMI_PROTOCOL_RESET = 0x16,
SCMI_PROTOCOL_VOLTAGE = 0x17,
SCMI_PROTOCOL_POWERCAP = 0x18,
+ SCMI_PROTOCOL_PINCTRL = 0x19,
};
enum scmi_system_events {
diff --git a/include/linux/security.h b/include/linux/security.h
index 41a8f667bdfa..21cf70346b33 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -398,7 +398,7 @@ int security_inode_setsecurity(struct inode *inode, const char *name, const void
int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size);
void security_inode_getsecid(struct inode *inode, u32 *secid);
int security_inode_copy_up(struct dentry *src, struct cred **new);
-int security_inode_copy_up_xattr(const char *name);
+int security_inode_copy_up_xattr(struct dentry *src, const char *name);
int security_kernfs_init_security(struct kernfs_node *kn_dir,
struct kernfs_node *kn);
int security_file_permission(struct file *file, int mask);
@@ -1016,7 +1016,7 @@ static inline int security_kernfs_init_security(struct kernfs_node *kn_dir,
return 0;
}
-static inline int security_inode_copy_up_xattr(const char *name)
+static inline int security_inode_copy_up_xattr(struct dentry *src, const char *name)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 234bcdb1fba4..8bd4fda6e027 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -118,7 +118,18 @@ void seq_vprintf(struct seq_file *m, const char *fmt, va_list args);
__printf(2, 3)
void seq_printf(struct seq_file *m, const char *fmt, ...);
void seq_putc(struct seq_file *m, char c);
-void seq_puts(struct seq_file *m, const char *s);
+void __seq_puts(struct seq_file *m, const char *s);
+
+static __always_inline void seq_puts(struct seq_file *m, const char *s)
+{
+ if (!__builtin_constant_p(*s))
+ __seq_puts(m, s);
+ else if (s[0] && !s[1])
+ seq_putc(m, s[0]);
+ else
+ seq_write(m, s, __builtin_strlen(s));
+}
+
void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter,
unsigned long long num, unsigned int width);
void seq_put_decimal_ull(struct seq_file *m, const char *delimiter,
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index 9346cd44814d..a45da7eef9a2 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -554,7 +554,7 @@ bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support, unsigned long *interfaces);
phy_interface_t sfp_select_interface(struct sfp_bus *bus,
- unsigned long *link_modes);
+ const unsigned long *link_modes);
int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo);
int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
@@ -592,7 +592,7 @@ static inline void sfp_parse_support(struct sfp_bus *bus,
}
static inline phy_interface_t sfp_select_interface(struct sfp_bus *bus,
- unsigned long *link_modes)
+ const unsigned long *link_modes)
{
return PHY_INTERFACE_MODE_NA;
}
diff --git a/include/linux/shm.h b/include/linux/shm.h
index c55bef0538e5..1d3d3ae958fb 100644
--- a/include/linux/shm.h
+++ b/include/linux/shm.h
@@ -16,7 +16,6 @@ struct sysv_shm {
long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr,
unsigned long shmlba);
-bool is_file_shm_hugepages(struct file *file);
void exit_shm(struct task_struct *task);
#define shm_init_task(task) INIT_LIST_HEAD(&(task)->sysvshm.shm_clist)
#else
@@ -30,10 +29,6 @@ static inline long do_shmat(int shmid, char __user *shmaddr,
{
return -ENOSYS;
}
-static inline bool is_file_shm_hugepages(struct file *file)
-{
- return false;
-}
static inline void exit_shm(struct task_struct *task)
{
}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b72920c9887b..1c2902eaebd3 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -353,8 +353,6 @@ struct sk_buff;
#define MAX_SKB_FRAGS CONFIG_MAX_SKB_FRAGS
-extern int sysctl_max_skb_frags;
-
/* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
* segment using its current segmentation instead.
*/
@@ -527,6 +525,13 @@ enum {
#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY | \
SKBFL_DONT_ORPHAN | SKBFL_MANAGED_FRAG_REFS)
+struct ubuf_info_ops {
+ void (*complete)(struct sk_buff *, struct ubuf_info *,
+ bool zerocopy_success);
+ /* has to be compatible with skb_zcopy_set() */
+ int (*link_skb)(struct sk_buff *skb, struct ubuf_info *uarg);
+};
+
/*
* The callback notifies userspace to release buffers when skb DMA is done in
* lower device, the skb last reference should be 0 when calling this.
@@ -536,8 +541,7 @@ enum {
* The desc field is used to track userspace buffer index.
*/
struct ubuf_info {
- void (*callback)(struct sk_buff *, struct ubuf_info *,
- bool zerocopy_success);
+ const struct ubuf_info_ops *ops;
refcount_t refcnt;
u8 flags;
};
@@ -992,7 +996,7 @@ struct sk_buff {
#ifdef CONFIG_NETFILTER_SKIP_EGRESS
__u8 nf_skip_egress:1;
#endif
-#ifdef CONFIG_TLS_DEVICE
+#ifdef CONFIG_SKB_DECRYPTED
__u8 decrypted:1;
#endif
__u8 slow_gro:1;
@@ -1174,15 +1178,6 @@ static inline bool skb_dst_is_noref(const struct sk_buff *skb)
return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
}
-/**
- * skb_rtable - Returns the skb &rtable
- * @skb: buffer
- */
-static inline struct rtable *skb_rtable(const struct sk_buff *skb)
-{
- return (struct rtable *)skb_dst(skb);
-}
-
/* For mangling skb->pkt_type from user space side from applications
* such as nft, tc, etc, we only allow a conservative subset of
* possible pkt_types to be set.
@@ -1615,17 +1610,26 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
static inline int skb_cmp_decrypted(const struct sk_buff *skb1,
const struct sk_buff *skb2)
{
-#ifdef CONFIG_TLS_DEVICE
+#ifdef CONFIG_SKB_DECRYPTED
return skb2->decrypted - skb1->decrypted;
#else
return 0;
#endif
}
+static inline bool skb_is_decrypted(const struct sk_buff *skb)
+{
+#ifdef CONFIG_SKB_DECRYPTED
+ return skb->decrypted;
+#else
+ return false;
+#endif
+}
+
static inline void skb_copy_decrypted(struct sk_buff *to,
const struct sk_buff *from)
{
-#ifdef CONFIG_TLS_DEVICE
+#ifdef CONFIG_SKB_DECRYPTED
to->decrypted = from->decrypted;
#endif
}
@@ -1662,14 +1666,13 @@ static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
}
#endif
+extern const struct ubuf_info_ops msg_zerocopy_ubuf_ops;
+
struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
struct ubuf_info *uarg);
void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
-void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
- bool success);
-
int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, struct iov_iter *from,
size_t length);
@@ -1757,13 +1760,13 @@ static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
static inline void net_zcopy_put(struct ubuf_info *uarg)
{
if (uarg)
- uarg->callback(NULL, uarg, true);
+ uarg->ops->complete(NULL, uarg, true);
}
static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref)
{
if (uarg) {
- if (uarg->callback == msg_zerocopy_callback)
+ if (uarg->ops == &msg_zerocopy_ubuf_ops)
msg_zerocopy_put_abort(uarg, have_uref);
else if (have_uref)
net_zcopy_put(uarg);
@@ -1777,7 +1780,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
if (uarg) {
if (!skb_zcopy_is_nouarg(skb))
- uarg->callback(skb, uarg, zerocopy_success);
+ uarg->ops->complete(skb, uarg, zerocopy_success);
skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY;
}
@@ -3031,6 +3034,21 @@ static inline void skb_mac_header_rebuild(struct sk_buff *skb)
}
}
+/* Move the full mac header up to current network_header.
+ * Leaves skb->data pointing at offset skb->mac_len into the mac_header.
+ * Must be provided the complete mac header length.
+ */
+static inline void skb_mac_header_rebuild_full(struct sk_buff *skb, u32 full_mac_len)
+{
+ if (skb_mac_header_was_set(skb)) {
+ const unsigned char *old_mac = skb_mac_header(skb);
+
+ skb_set_mac_header(skb, -full_mac_len);
+ memmove(skb_mac_header(skb), old_mac, full_mac_len);
+ __skb_push(skb, full_mac_len - skb->mac_len);
+ }
+}
+
static inline int skb_checksum_start_offset(const struct sk_buff *skb)
{
return skb->csum_start - skb_headroom(skb);
@@ -3350,13 +3368,7 @@ static inline void *napi_alloc_frag_align(unsigned int fragsz,
return __napi_alloc_frag_align(fragsz, -align);
}
-struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
- unsigned int length, gfp_t gfp_mask);
-static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
- unsigned int length)
-{
- return __napi_alloc_skb(napi, length, GFP_ATOMIC);
-}
+struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int length);
void napi_consume_skb(struct sk_buff *skb, int budget);
void napi_skb_free_stolen_head(struct sk_buff *skb);
@@ -3485,85 +3497,10 @@ static inline struct page *skb_frag_page(const skb_frag_t *frag)
return netmem_to_page(frag->netmem);
}
-/**
- * __skb_frag_ref - take an addition reference on a paged fragment.
- * @frag: the paged fragment
- *
- * Takes an additional reference on the paged fragment @frag.
- */
-static inline void __skb_frag_ref(skb_frag_t *frag)
-{
- get_page(skb_frag_page(frag));
-}
-
-/**
- * skb_frag_ref - take an addition reference on a paged fragment of an skb.
- * @skb: the buffer
- * @f: the fragment offset.
- *
- * Takes an additional reference on the @f'th paged fragment of @skb.
- */
-static inline void skb_frag_ref(struct sk_buff *skb, int f)
-{
- __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
-}
-
int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
unsigned int headroom);
int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
struct bpf_prog *prog);
-bool napi_pp_put_page(struct page *page, bool napi_safe);
-
-static inline void
-skb_page_unref(const struct sk_buff *skb, struct page *page, bool napi_safe)
-{
-#ifdef CONFIG_PAGE_POOL
- if (skb->pp_recycle && napi_pp_put_page(page, napi_safe))
- return;
-#endif
- put_page(page);
-}
-
-static inline void
-napi_frag_unref(skb_frag_t *frag, bool recycle, bool napi_safe)
-{
- struct page *page = skb_frag_page(frag);
-
-#ifdef CONFIG_PAGE_POOL
- if (recycle && napi_pp_put_page(page, napi_safe))
- return;
-#endif
- put_page(page);
-}
-
-/**
- * __skb_frag_unref - release a reference on a paged fragment.
- * @frag: the paged fragment
- * @recycle: recycle the page if allocated via page_pool
- *
- * Releases a reference on the paged fragment @frag
- * or recycles the page via the page_pool API.
- */
-static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
-{
- napi_frag_unref(frag, recycle, false);
-}
-
-/**
- * skb_frag_unref - release a reference on a paged fragment of an skb.
- * @skb: the buffer
- * @f: the fragment offset
- *
- * Releases a reference on the @f'th paged fragment of @skb.
- */
-static inline void skb_frag_unref(struct sk_buff *skb, int f)
-{
- struct skb_shared_info *shinfo = skb_shinfo(skb);
-
- if (!skb_zcopy_managed(skb))
- __skb_frag_unref(&shinfo->frags[f], skb->pp_recycle);
-}
-
/**
* skb_frag_address - gets the address of the data contained in a paged fragment
* @frag: the paged fragment buffer
@@ -4054,12 +3991,6 @@ int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
struct iov_iter *from, int len);
int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
-void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
-static inline void skb_free_datagram_locked(struct sock *sk,
- struct sk_buff *skb)
-{
- __skb_free_datagram_locked(sk, skb, 0);
-}
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
diff --git a/include/linux/skbuff_ref.h b/include/linux/skbuff_ref.h
new file mode 100644
index 000000000000..11f0a4063403
--- /dev/null
+++ b/include/linux/skbuff_ref.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Skb ref helpers.
+ *
+ */
+
+#ifndef _LINUX_SKBUFF_REF_H
+#define _LINUX_SKBUFF_REF_H
+
+#include <linux/skbuff.h>
+
+/**
+ * __skb_frag_ref - take an addition reference on a paged fragment.
+ * @frag: the paged fragment
+ *
+ * Takes an additional reference on the paged fragment @frag.
+ */
+static inline void __skb_frag_ref(skb_frag_t *frag)
+{
+ get_page(skb_frag_page(frag));
+}
+
+/**
+ * skb_frag_ref - take an addition reference on a paged fragment of an skb.
+ * @skb: the buffer
+ * @f: the fragment offset.
+ *
+ * Takes an additional reference on the @f'th paged fragment of @skb.
+ */
+static inline void skb_frag_ref(struct sk_buff *skb, int f)
+{
+ __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
+}
+
+bool napi_pp_put_page(struct page *page);
+
+static inline void
+skb_page_unref(struct page *page, bool recycle)
+{
+#ifdef CONFIG_PAGE_POOL
+ if (recycle && napi_pp_put_page(page))
+ return;
+#endif
+ put_page(page);
+}
+
+/**
+ * __skb_frag_unref - release a reference on a paged fragment.
+ * @frag: the paged fragment
+ * @recycle: recycle the page if allocated via page_pool
+ *
+ * Releases a reference on the paged fragment @frag
+ * or recycles the page via the page_pool API.
+ */
+static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
+{
+ skb_page_unref(skb_frag_page(frag), recycle);
+}
+
+/**
+ * skb_frag_unref - release a reference on a paged fragment of an skb.
+ * @skb: the buffer
+ * @f: the fragment offset
+ *
+ * Releases a reference on the @f'th paged fragment of @skb.
+ */
+static inline void skb_frag_unref(struct sk_buff *skb, int f)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ if (!skb_zcopy_managed(skb))
+ __skb_frag_unref(&shinfo->frags[f], skb->pp_recycle);
+}
+
+#endif /* _LINUX_SKBUFF_REF_H */
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 78efc5b20284..c9efda9df285 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -58,6 +58,10 @@ struct sk_psock_progs {
struct bpf_prog *stream_parser;
struct bpf_prog *stream_verdict;
struct bpf_prog *skb_verdict;
+ struct bpf_link *msg_parser_link;
+ struct bpf_link *stream_parser_link;
+ struct bpf_link *stream_verdict_link;
+ struct bpf_link *skb_verdict_link;
};
enum sk_psock_state_bits {
@@ -459,10 +463,12 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
{
+ read_lock_bh(&sk->sk_callback_lock);
if (psock->saved_data_ready)
psock->saved_data_ready(sk);
else
sk->sk_data_ready(sk);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static inline void psock_set_prog(struct bpf_prog **pprog,
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 4cc37ef22aae..7247e217e21b 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -279,7 +279,7 @@ void kfree(const void *objp);
void kfree_sensitive(const void *objp);
size_t __ksize(const void *objp);
-DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
+DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
/**
* ksize - Report actual allocation size of associated object
@@ -785,30 +785,35 @@ extern void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) __alloc_si
#define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE)
#define kvmalloc_noprof(_size, _flags) kvmalloc_node_noprof(_size, _flags, NUMA_NO_NODE)
-#define kvzalloc(_size, _flags) kvmalloc(_size, _flags|__GFP_ZERO)
+#define kvzalloc(_size, _flags) kvmalloc(_size, (_flags)|__GFP_ZERO)
-#define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, _flags|__GFP_ZERO, _node)
+#define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
-static inline __alloc_size(1, 2) void *kvmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
+static inline __alloc_size(1, 2) void *
+kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
{
size_t bytes;
if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
- return kvmalloc_node_noprof(bytes, flags, NUMA_NO_NODE);
+ return kvmalloc_node_noprof(bytes, flags, node);
}
+#define kvmalloc_array_noprof(...) kvmalloc_array_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
+#define kvcalloc_node_noprof(_n,_s,_f,_node) kvmalloc_array_node_noprof(_n,_s,(_f)|__GFP_ZERO,_node)
+#define kvcalloc_noprof(...) kvcalloc_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
+
#define kvmalloc_array(...) alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__))
-#define kvcalloc(_n, _size, _flags) kvmalloc_array(_n, _size, _flags|__GFP_ZERO)
-#define kvcalloc_noprof(_n, _size, _flags) kvmalloc_array_noprof(_n, _size, _flags|__GFP_ZERO)
+#define kvcalloc_node(...) alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__))
+#define kvcalloc(...) alloc_hooks(kvcalloc_noprof(__VA_ARGS__))
extern void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
__realloc_size(3);
#define kvrealloc(...) alloc_hooks(kvrealloc_noprof(__VA_ARGS__))
extern void kvfree(const void *addr);
-DEFINE_FREE(kvfree, void *, if (_T) kvfree(_T))
+DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T))
extern void kvfree_sensitive(const void *addr, size_t len);
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index 649955d2cf5c..d4a8e34505e6 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -14,6 +14,15 @@
#define CMDQ_ADDR_HIGH(addr) ((u32)(((addr) >> 16) & GENMASK(31, 0)))
#define CMDQ_ADDR_LOW(addr) ((u16)(addr) | BIT(1))
+/*
+ * Every cmdq thread has its own SPRs (Specific Purpose Registers),
+ * so there are 4 * N (threads) SPRs in GCE that shares the same indexes below.
+ */
+#define CMDQ_THR_SPR_IDX0 (0)
+#define CMDQ_THR_SPR_IDX1 (1)
+#define CMDQ_THR_SPR_IDX2 (2)
+#define CMDQ_THR_SPR_IDX3 (3)
+
struct cmdq_pkt;
struct cmdq_client_reg {
@@ -62,17 +71,19 @@ void cmdq_mbox_destroy(struct cmdq_client *client);
/**
* cmdq_pkt_create() - create a CMDQ packet
* @client: the CMDQ mailbox client
+ * @pkt: the CMDQ packet
* @size: required CMDQ buffer size
*
- * Return: CMDQ packet pointer
+ * Return: 0 for success; else the error code is returned
*/
-struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size);
+int cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, size_t size);
/**
* cmdq_pkt_destroy() - destroy the CMDQ packet
+ * @client: the CMDQ mailbox client
* @pkt: the CMDQ packet
*/
-void cmdq_pkt_destroy(struct cmdq_pkt *pkt);
+void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt);
/**
* cmdq_pkt_write() - append write command to the CMDQ packet
@@ -174,6 +185,18 @@ int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
u16 addr_low, u32 value, u32 mask);
/**
+ * cmdq_pkt_mem_move() - append memory move command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @src_addr: source address
+ * @dst_addr: destination address
+ *
+ * Appends a CMDQ command to copy the value found in `src_addr` to `dst_addr`.
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_mem_move(struct cmdq_pkt *pkt, dma_addr_t src_addr, dma_addr_t dst_addr);
+
+/**
* cmdq_pkt_wfe() - append wait for event command to the CMDQ packet
* @pkt: the CMDQ packet
* @event: the desired event type to wait
@@ -184,6 +207,21 @@ int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear);
/**
+ * cmdq_pkt_acquire_event() - append acquire event command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @event: the desired event to be acquired
+ *
+ * User can use cmdq_pkt_acquire_event() as `mutex_lock` and cmdq_pkt_clear_event()
+ * as `mutex_unlock` to protect some `critical section` instructions between them.
+ * cmdq_pkt_acquire_event() would wait for event to be cleared.
+ * After event is cleared by cmdq_pkt_clear_event in other GCE threads,
+ * cmdq_pkt_acquire_event() would set event and keep executing next instruction.
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_acquire_event(struct cmdq_pkt *pkt, u16 event);
+
+/**
* cmdq_pkt_clear_event() - append clear event command to the CMDQ packet
* @pkt: the CMDQ packet
* @event: the desired event to be cleared
@@ -248,36 +286,76 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value);
/**
- * cmdq_pkt_jump() - Append jump command to the CMDQ packet, ask GCE
- * to execute an instruction that change current thread PC to
- * a physical address which should contains more instruction.
+ * cmdq_pkt_poll_addr() - Append blocking POLL command to CMDQ packet
+ * @pkt: the CMDQ packet
+ * @addr: the hardware register address
+ * @value: the specified target register value
+ * @mask: the specified target register mask
+ *
+ * Appends a polling (POLL) command to the CMDQ packet and asks the GCE
+ * to execute an instruction that checks for the specified `value` (with
+ * or without `mask`) to appear in the specified hardware register `addr`.
+ * All GCE threads will be blocked by this instruction.
+ *
+ * Return: 0 for success or negative error code
+ */
+int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mask);
+
+/**
+ * cmdq_pkt_jump_abs() - Append jump command to the CMDQ packet, ask GCE
+ * to execute an instruction that change current thread
+ * PC to a absolute physical address which should
+ * contains more instruction.
* @pkt: the CMDQ packet
- * @addr: physical address of target instruction buffer
+ * @addr: absolute physical address of target instruction buffer
+ * @shift_pa: shift bits of physical address in CMDQ instruction. This value
+ * is got by cmdq_get_shift_pa().
*
* Return: 0 for success; else the error code is returned
*/
-int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr);
+int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa);
+
+/* This wrapper has to be removed after all users migrated to jump_abs */
+static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa)
+{
+ return cmdq_pkt_jump_abs(pkt, addr, shift_pa);
+}
/**
- * cmdq_pkt_finalize() - Append EOC and jump command to pkt.
+ * cmdq_pkt_jump_rel() - Append jump command to the CMDQ packet, ask GCE
+ * to execute an instruction that change current thread
+ * PC to a physical address with relative offset. The
+ * target address should contains more instruction.
* @pkt: the CMDQ packet
+ * @offset: relative offset of target instruction buffer from current PC.
+ * @shift_pa: shift bits of physical address in CMDQ instruction. This value
+ * is got by cmdq_get_shift_pa().
*
* Return: 0 for success; else the error code is returned
*/
-int cmdq_pkt_finalize(struct cmdq_pkt *pkt);
+int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa);
+
+/**
+ * cmdq_pkt_eoc() - Append EOC and ask GCE to generate an IRQ at end of execution
+ * @pkt: The CMDQ packet
+ *
+ * Appends an End Of Code (EOC) command to the CMDQ packet and asks the GCE
+ * to generate an interrupt at the end of the execution of all commands in
+ * the pipeline.
+ * The EOC command is usually appended to the end of the pipeline to notify
+ * that all commands are done.
+ *
+ * Return: 0 for success or negative error number
+ */
+int cmdq_pkt_eoc(struct cmdq_pkt *pkt);
/**
- * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ
- * packet and call back at the end of done packet
+ * cmdq_pkt_finalize() - Append EOC and jump command to pkt.
* @pkt: the CMDQ packet
*
* Return: 0 for success; else the error code is returned
- *
- * Trigger CMDQ to asynchronously execute the CMDQ packet and call back
- * at the end of done packet. Note that this is an ASYNC function. When the
- * function returned, it may or may not be finished.
*/
-int cmdq_pkt_flush_async(struct cmdq_pkt *pkt);
+int cmdq_pkt_finalize(struct cmdq_pkt *pkt);
#else /* IS_ENABLED(CONFIG_MTK_CMDQ) */
@@ -294,12 +372,12 @@ static inline struct cmdq_client *cmdq_mbox_create(struct device *dev, int index
static inline void cmdq_mbox_destroy(struct cmdq_client *client) { }
-static inline struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
+static inline int cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, size_t size)
{
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
-static inline void cmdq_pkt_destroy(struct cmdq_pkt *pkt) { }
+static inline void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt) { }
static inline int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
{
@@ -374,17 +452,32 @@ static inline int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
return -EINVAL;
}
-static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
+static inline int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mask)
{
return -EINVAL;
}
-static inline int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
+static inline int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa)
{
return -EINVAL;
}
-static inline int cmdq_pkt_flush_async(struct cmdq_pkt *pkt)
+static inline int cmdq_pkt_eoc(struct cmdq_pkt *pkt)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
{
return -EINVAL;
}
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 139c330ccf2c..89d16b90370b 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -16,6 +16,7 @@ struct cred;
struct socket;
struct sock;
struct sk_buff;
+struct proto_accept_arg;
#define __sockaddr_check_size(size) \
BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage)))
@@ -433,7 +434,7 @@ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
extern int __sys_sendto(int fd, void __user *buff, size_t len,
unsigned int flags, struct sockaddr __user *addr,
int addr_len);
-extern struct file *do_accept(struct file *file, unsigned file_flags,
+extern struct file *do_accept(struct file *file, struct proto_accept_arg *arg,
struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags);
extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
deleted file mode 100644
index ca2cd4e30ead..000000000000
--- a/include/linux/spi/pxa2xx_spi.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
- */
-#ifndef __LINUX_SPI_PXA2XX_SPI_H
-#define __LINUX_SPI_PXA2XX_SPI_H
-
-#include <linux/dmaengine.h>
-#include <linux/types.h>
-
-#include <linux/pxa2xx_ssp.h>
-
-struct dma_chan;
-
-/*
- * The platform data for SSP controller devices
- * (resides in device.platform_data).
- */
-struct pxa2xx_spi_controller {
- u16 num_chipselect;
- u8 enable_dma;
- u8 dma_burst_size;
- bool is_target;
-
- /* DMA engine specific config */
- dma_filter_fn dma_filter;
- void *tx_param;
- void *rx_param;
-
- /* For non-PXA arches */
- struct ssp_device ssp;
-};
-
-/*
- * The controller specific data for SPI target devices
- * (resides in spi_board_info.controller_data),
- * copied to spi_device.platform_data ... mostly for
- * DMA tuning.
- */
-struct pxa2xx_spi_chip {
- u8 tx_threshold;
- u8 tx_hi_threshold;
- u8 rx_threshold;
- u8 dma_burst_size;
- u32 timeout;
-};
-
-#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
-
-#include <linux/clk.h>
-
-extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info);
-
-#endif
-
-#endif /* __LINUX_SPI_PXA2XX_SPI_H */
diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h
deleted file mode 100644
index dbdfcc7a3db2..000000000000
--- a/include/linux/spi/rspi.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Renesas SPI driver
- *
- * Copyright (C) 2012 Renesas Solutions Corp.
- */
-
-#ifndef __LINUX_SPI_RENESAS_SPI_H__
-#define __LINUX_SPI_RENESAS_SPI_H__
-
-struct rspi_plat_data {
- unsigned int dma_tx_id;
- unsigned int dma_rx_id;
-
- u16 num_chipselect;
-};
-
-#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index c459809efee4..e8e1e798924f 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -453,6 +453,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs.
* @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip
* selected
+ * @last_cs_index_mask: bit mask the last chip selects that were used
* @xfer_completion: used by core transfer_one_message()
* @busy: message pump is busy
* @running: message pump is running
@@ -955,8 +956,8 @@ struct spi_res {
* struct spi_transfer - a read/write buffer pair
* @tx_buf: data to be written (DMA-safe memory), or NULL
* @rx_buf: data to be read (DMA-safe memory), or NULL
- * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
- * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
+ * @tx_dma: DMA address of tx_buf, currently not for client use
+ * @rx_dma: DMA address of rx_buf, currently not for client use
* @tx_nbits: number of bits used for writing. If 0 the default
* (SPI_NBITS_SINGLE) is used.
* @rx_nbits: number of bits used for reading. If 0 the default
@@ -1066,8 +1067,7 @@ struct spi_transfer {
/*
* It's okay if tx_buf == rx_buf (right?).
* For MicroWire, one buffer must be NULL.
- * Buffers must work with dma_*map_single() calls, unless
- * spi_message.is_dma_mapped reports a pre-existing mapping.
+ * Buffers must work with dma_*map_single() calls.
*/
const void *tx_buf;
void *rx_buf;
@@ -1111,8 +1111,6 @@ struct spi_transfer {
* struct spi_message - one multi-segment SPI transaction
* @transfers: list of transfer segments in this transaction
* @spi: SPI device to which the transaction is queued
- * @is_dma_mapped: if true, the caller provided both DMA and CPU virtual
- * addresses for each transfer buffer
* @pre_optimized: peripheral driver pre-optimized the message
* @optimized: the message is in the optimized state
* @prepared: spi_prepare_message was called for the this message
@@ -1146,8 +1144,6 @@ struct spi_message {
struct spi_device *spi;
- unsigned is_dma_mapped:1;
-
/* spi_optimize_message() was called for this message */
bool pre_optimized;
/* __spi_optimize_message() was called for this message */
diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h
index 3934ce789d87..1b8d984668b6 100644
--- a/include/linux/spi/xilinx_spi.h
+++ b/include/linux/spi/xilinx_spi.h
@@ -2,19 +2,23 @@
#ifndef __LINUX_SPI_XILINX_SPI_H
#define __LINUX_SPI_XILINX_SPI_H
+#include <linux/types.h>
+
+struct spi_board_info;
+
/**
* struct xspi_platform_data - Platform data of the Xilinx SPI driver
- * @num_chipselect: Number of chip select by the IP.
- * @little_endian: If registers should be accessed little endian or not.
- * @bits_per_word: Number of bits per word.
* @devices: Devices to add when the driver is probed.
* @num_devices: Number of devices in the devices array.
+ * @num_chipselect: Number of chip select by the IP.
+ * @bits_per_word: Number of bits per word.
+ * @force_irq: If set, forces QSPI transaction requirements.
*/
struct xspi_platform_data {
- u16 num_chipselect;
- u8 bits_per_word;
struct spi_board_info *devices;
u8 num_devices;
+ u8 num_chipselect;
+ u8 bits_per_word;
bool force_irq;
};
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index 447133171d95..4d96bbdb45f0 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -64,8 +64,10 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
{
int idx;
+ preempt_disable(); // Needed for PREEMPT_AUTO
idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
+ preempt_enable();
return idx;
}
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 1f326da289d3..a2257380c3f1 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -621,14 +621,6 @@ extern u32 ssb_dma_translation(struct ssb_device *dev);
#define SSB_DMA_TRANSLATION_MASK 0xC0000000
#define SSB_DMA_TRANSLATION_SHIFT 30
-static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev)
-{
-#ifdef CONFIG_SSB_DEBUG
- printk(KERN_ERR "SSB: BUG! Calling DMA API for "
- "unsupported bustype %d\n", dev->bus->bustype);
-#endif /* DEBUG */
-}
-
#ifdef CONFIG_SSB_PCIHOST
/* PCI-host wrapper driver */
extern int ssb_pcihost_register(struct pci_driver *driver);
diff --git a/include/linux/stat.h b/include/linux/stat.h
index 52150570d37a..bf92441dbad2 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -53,6 +53,7 @@ struct kstat {
u32 dio_mem_align;
u32 dio_offset_align;
u64 change_cookie;
+ u64 subvol;
};
/* These definitions are internal to the kernel for now. Mainly used by nfsd. */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index dfa1828cd756..f92c195c76ed 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -115,21 +115,6 @@ struct stmmac_axi {
bool axi_rb;
};
-#define EST_GCL 1024
-struct stmmac_est {
- struct mutex lock;
- int enable;
- u32 btr_reserve[2];
- u32 btr_offset[2];
- u32 btr[2];
- u32 ctr[2];
- u32 ter;
- u32 gcl_unaligned[EST_GCL];
- u32 gcl[EST_GCL];
- u32 gcl_size;
- u32 max_sdu[MTL_MAX_TX_QUEUES];
-};
-
struct stmmac_rxq_cfg {
u8 mode_to_use;
u32 chan;
@@ -246,7 +231,6 @@ struct plat_stmmacenet_data {
struct fwnode_handle *port_node;
struct device_node *mdio_node;
struct stmmac_dma_cfg *dma_cfg;
- struct stmmac_est *est;
struct stmmac_fpe_cfg *fpe_cfg;
struct stmmac_safety_feature_cfg *safety_feat_cfg;
int clk_csr;
@@ -285,6 +269,8 @@ struct plat_stmmacenet_data {
int (*crosststamp)(ktime_t *device, struct system_counterval_t *system,
void *ctx);
void (*dump_debug_regs)(void *priv);
+ int (*pcs_init)(struct stmmac_priv *priv);
+ void (*pcs_exit)(struct stmmac_priv *priv);
void *bsp_priv;
struct clk *stmmac_clk;
struct clk *pclk;
diff --git a/include/linux/string.h b/include/linux/string.h
index 793c27ad7c0d..60168aa2af07 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -14,8 +14,8 @@
#include <uapi/linux/string.h>
extern char *strndup_user(const char __user *, long);
-extern void *memdup_user(const void __user *, size_t);
-extern void *vmemdup_user(const void __user *, size_t);
+extern void *memdup_user(const void __user *, size_t) __realloc_size(2);
+extern void *vmemdup_user(const void __user *, size_t) __realloc_size(2);
extern void *memdup_user_nul(const void __user *, size_t);
/**
@@ -27,7 +27,8 @@ extern void *memdup_user_nul(const void __user *, size_t);
* Return: an ERR_PTR() on failure. Result is physically
* contiguous, to be freed by kfree().
*/
-static inline void *memdup_array_user(const void __user *src, size_t n, size_t size)
+static inline __realloc_size(2, 3)
+void *memdup_array_user(const void __user *src, size_t n, size_t size)
{
size_t nbytes;
@@ -46,7 +47,8 @@ static inline void *memdup_array_user(const void __user *src, size_t n, size_t s
* Return: an ERR_PTR() on failure. Result may be not
* physically contiguous. Use kvfree() to free.
*/
-static inline void *vmemdup_array_user(const void __user *src, size_t n, size_t size)
+static inline __realloc_size(2, 3)
+void *vmemdup_array_user(const void __user *src, size_t n, size_t size)
{
size_t nbytes;
@@ -287,7 +289,8 @@ extern void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp) __realloc_si
extern void *kvmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);
-extern void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp);
+extern void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp)
+ __realloc_size(2, 3);
/* lib/argv_split.c */
extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
@@ -425,6 +428,55 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
} while (0)
/**
+ * memtostr - Copy a possibly non-NUL-term string to a NUL-term string
+ * @dest: Pointer to destination NUL-terminates string
+ * @src: Pointer to character array (likely marked as __nonstring)
+ *
+ * This is a replacement for strncpy() uses where the source is not
+ * a NUL-terminated string.
+ *
+ * Note that sizes of @dest and @src must be known at compile-time.
+ */
+#define memtostr(dest, src) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
+ const size_t _src_len = __builtin_object_size(src, 1); \
+ const size_t _src_chars = strnlen(src, _src_len); \
+ const size_t _copy_len = min(_dest_len - 1, _src_chars); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ !__builtin_constant_p(_src_len) || \
+ _dest_len == 0 || _dest_len == (size_t)-1 || \
+ _src_len == 0 || _src_len == (size_t)-1); \
+ memcpy(dest, src, _copy_len); \
+ dest[_copy_len] = '\0'; \
+} while (0)
+
+/**
+ * memtostr_pad - Copy a possibly non-NUL-term string to a NUL-term string
+ * with NUL padding in the destination
+ * @dest: Pointer to destination NUL-terminates string
+ * @src: Pointer to character array (likely marked as __nonstring)
+ *
+ * This is a replacement for strncpy() uses where the source is not
+ * a NUL-terminated string.
+ *
+ * Note that sizes of @dest and @src must be known at compile-time.
+ */
+#define memtostr_pad(dest, src) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
+ const size_t _src_len = __builtin_object_size(src, 1); \
+ const size_t _src_chars = strnlen(src, _src_len); \
+ const size_t _copy_len = min(_dest_len - 1, _src_chars); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ !__builtin_constant_p(_src_len) || \
+ _dest_len == 0 || _dest_len == (size_t)-1 || \
+ _src_len == 0 || _src_len == (size_t)-1); \
+ memcpy(dest, src, _copy_len); \
+ memset(&dest[_copy_len], 0, _dest_len - _copy_len); \
+} while (0)
+
+/**
* memset_after - Set a value after a struct member to the end of a struct
*
* @obj: Address of target struct instance
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 24cd199dd6f3..d33bab33099a 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -210,7 +210,6 @@ struct svc_rdma_recv_ctxt {
*/
struct svc_rdma_write_info {
struct svcxprt_rdma *wi_rdma;
- struct list_head wi_list;
const struct svc_rdma_chunk *wi_chunk;
@@ -239,10 +238,7 @@ struct svc_rdma_send_ctxt {
struct ib_cqe sc_cqe;
struct xdr_buf sc_hdrbuf;
struct xdr_stream sc_stream;
-
- struct list_head sc_write_info_list;
struct svc_rdma_write_info sc_reply_info;
-
void *sc_xprt_buf;
int sc_page_count;
int sc_cur_sge_no;
@@ -274,14 +270,11 @@ extern void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
extern void svc_rdma_cc_release(struct svcxprt_rdma *rdma,
struct svc_rdma_chunk_ctxt *cc,
enum dma_data_direction dir);
-extern void svc_rdma_write_chunk_release(struct svcxprt_rdma *rdma,
- struct svc_rdma_send_ctxt *ctxt);
extern void svc_rdma_reply_chunk_release(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *ctxt);
-extern int svc_rdma_prepare_write_list(struct svcxprt_rdma *rdma,
- const struct svc_rdma_pcl *write_pcl,
- struct svc_rdma_send_ctxt *sctxt,
- const struct xdr_buf *xdr);
+extern int svc_rdma_send_write_list(struct svcxprt_rdma *rdma,
+ const struct svc_rdma_recv_ctxt *rctxt,
+ const struct xdr_buf *xdr);
extern int svc_rdma_prepare_reply_chunk(struct svcxprt_rdma *rdma,
const struct svc_rdma_pcl *write_pcl,
const struct svc_rdma_pcl *reply_pcl,
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 8e20cd60e2e7..0981e35a9fed 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -135,6 +135,9 @@ int svc_reg_xprt_class(struct svc_xprt_class *);
void svc_unreg_xprt_class(struct svc_xprt_class *);
void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *,
struct svc_serv *);
+int svc_xprt_create_from_sa(struct svc_serv *serv, const char *xprt_name,
+ struct net *net, struct sockaddr *sap,
+ int flags, const struct cred *cred);
int svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
struct net *net, const int family,
const unsigned short port, int flags,
@@ -147,6 +150,8 @@ void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt);
void svc_xprt_close(struct svc_xprt *xprt);
int svc_port_is_privileged(struct sockaddr *sin);
int svc_print_xprts(char *buf, int maxlen);
+struct svc_xprt *svc_find_listener(struct svc_serv *serv, const char *xcl_name,
+ struct net *net, const struct sockaddr *sa);
struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
struct net *net, const sa_family_t af,
const unsigned short port);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index ee7d33b89e9e..09db2f2e6488 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -137,17 +137,6 @@ struct ctl_table {
void *data;
int maxlen;
umode_t mode;
- /**
- * enum type - Enumeration to differentiate between ctl target types
- * @SYSCTL_TABLE_TYPE_DEFAULT: ctl target with no special considerations
- * @SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY: Used to identify a permanently
- * empty directory target to serve
- * as mount point.
- */
- enum {
- SYSCTL_TABLE_TYPE_DEFAULT,
- SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY
- } type;
proc_handler *proc_handler; /* Callback for text formatting */
struct ctl_table_poll *poll;
void *extra1;
@@ -182,12 +171,23 @@ struct ctl_table_header {
struct rcu_head rcu;
};
struct completion *unregistering;
- struct ctl_table *ctl_table_arg;
+ const struct ctl_table *ctl_table_arg;
struct ctl_table_root *root;
struct ctl_table_set *set;
struct ctl_dir *parent;
struct ctl_node *node;
struct hlist_head inodes; /* head for proc_inode->sysctl_inodes */
+ /**
+ * enum type - Enumeration to differentiate between ctl target types
+ * @SYSCTL_TABLE_TYPE_DEFAULT: ctl target with no special considerations
+ * @SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY: Used to identify a permanently
+ * empty directory target to serve
+ * as mount point.
+ */
+ enum {
+ SYSCTL_TABLE_TYPE_DEFAULT,
+ SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY,
+ } type;
};
struct ctl_dir {
@@ -205,9 +205,8 @@ struct ctl_table_root {
struct ctl_table_set default_set;
struct ctl_table_set *(*lookup)(struct ctl_table_root *root);
void (*set_ownership)(struct ctl_table_header *head,
- struct ctl_table *table,
kuid_t *uid, kgid_t *gid);
- int (*permissions)(struct ctl_table_header *head, struct ctl_table *table);
+ int (*permissions)(struct ctl_table_header *head, const struct ctl_table *table);
};
#define register_sysctl(path, table) \
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 55399ee2a57e..6a5e08b937b3 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -244,6 +244,7 @@ struct tcp_sock {
/* OOO segments go in this rbtree. Socket lock must be held. */
struct rb_root out_of_order_queue;
u32 snd_ssthresh; /* Slow start size threshold */
+ u8 recvmsg_inq : 1;/* Indicate # of bytes in queue upon recvmsg */
__cacheline_group_end(tcp_sock_read_rx);
/* TX read-write hotpath cache lines */
@@ -266,8 +267,6 @@ struct tcp_sock {
u32 mdev_us; /* medium deviation */
u32 rtt_seq; /* sequence number to update rttvar */
u64 tcp_wstamp_ns; /* departure time for next sent data packet */
- u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
- u64 tcp_mstamp; /* most recent packet received/sent */
struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */
struct sk_buff *highest_sack; /* skb just after the highest
* skb with SACKed bit set
@@ -284,6 +283,8 @@ struct tcp_sock {
* 0x5?10 << 16 + snd_wnd in net byte order
*/
__be32 pred_flags;
+ u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
+ u64 tcp_mstamp; /* most recent packet received/sent */
u32 rcv_nxt; /* What we want to receive next */
u32 snd_nxt; /* Next sequence we send */
u32 snd_una; /* First byte we want an ack for */
@@ -370,7 +371,6 @@ struct tcp_sock {
tlp_retrans:1, /* TLP is a retransmission */
unused:5;
u8 thin_lto : 1,/* Use linear timeouts for thin streams */
- recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */
fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */
fastopen_client_fail:2, /* reason why fastopen failed */
diff --git a/include/linux/tee_core.h b/include/linux/tee_core.h
new file mode 100644
index 000000000000..efd16ed52315
--- /dev/null
+++ b/include/linux/tee_core.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 Linaro Limited
+ */
+
+#ifndef __TEE_CORE_H
+#define __TEE_CORE_H
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/tee.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+
+/*
+ * The file describes the API provided by the generic TEE driver to the
+ * specific TEE driver.
+ */
+
+#define TEE_SHM_DYNAMIC BIT(0) /* Dynamic shared memory registered */
+ /* in secure world */
+#define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */
+#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */
+#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */
+
+#define TEE_DEVICE_FLAG_REGISTERED 0x1
+#define TEE_MAX_DEV_NAME_LEN 32
+
+/**
+ * struct tee_device - TEE Device representation
+ * @name: name of device
+ * @desc: description of device
+ * @id: unique id of device
+ * @flags: represented by TEE_DEVICE_FLAG_REGISTERED above
+ * @dev: embedded basic device structure
+ * @cdev: embedded cdev
+ * @num_users: number of active users of this device
+ * @c_no_user: completion used when unregistering the device
+ * @mutex: mutex protecting @num_users and @idr
+ * @idr: register of user space shared memory objects allocated or
+ * registered on this device
+ * @pool: shared memory pool
+ */
+struct tee_device {
+ char name[TEE_MAX_DEV_NAME_LEN];
+ const struct tee_desc *desc;
+ int id;
+ unsigned int flags;
+
+ struct device dev;
+ struct cdev cdev;
+
+ size_t num_users;
+ struct completion c_no_users;
+ struct mutex mutex; /* protects num_users and idr */
+
+ struct idr idr;
+ struct tee_shm_pool *pool;
+};
+
+/**
+ * struct tee_driver_ops - driver operations vtable
+ * @get_version: returns version of driver
+ * @open: called when the device file is opened
+ * @release: release this open file
+ * @open_session: open a new session
+ * @close_session: close a session
+ * @system_session: declare session as a system session
+ * @invoke_func: invoke a trusted function
+ * @cancel_req: request cancel of an ongoing invoke or open
+ * @supp_recv: called for supplicant to get a command
+ * @supp_send: called for supplicant to send a response
+ * @shm_register: register shared memory buffer in TEE
+ * @shm_unregister: unregister shared memory buffer in TEE
+ */
+struct tee_driver_ops {
+ void (*get_version)(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers);
+ int (*open)(struct tee_context *ctx);
+ void (*release)(struct tee_context *ctx);
+ int (*open_session)(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param);
+ int (*close_session)(struct tee_context *ctx, u32 session);
+ int (*system_session)(struct tee_context *ctx, u32 session);
+ int (*invoke_func)(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param);
+ int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session);
+ int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params,
+ struct tee_param *param);
+ int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params,
+ struct tee_param *param);
+ int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start);
+ int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm);
+};
+
+/**
+ * struct tee_desc - Describes the TEE driver to the subsystem
+ * @name: name of driver
+ * @ops: driver operations vtable
+ * @owner: module providing the driver
+ * @flags: Extra properties of driver, defined by TEE_DESC_* below
+ */
+#define TEE_DESC_PRIVILEGED 0x1
+struct tee_desc {
+ const char *name;
+ const struct tee_driver_ops *ops;
+ struct module *owner;
+ u32 flags;
+};
+
+/**
+ * tee_device_alloc() - Allocate a new struct tee_device instance
+ * @teedesc: Descriptor for this driver
+ * @dev: Parent device for this device
+ * @pool: Shared memory pool, NULL if not used
+ * @driver_data: Private driver data for this device
+ *
+ * Allocates a new struct tee_device instance. The device is
+ * removed by tee_device_unregister().
+ *
+ * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
+ */
+struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
+ struct device *dev,
+ struct tee_shm_pool *pool,
+ void *driver_data);
+
+/**
+ * tee_device_register() - Registers a TEE device
+ * @teedev: Device to register
+ *
+ * tee_device_unregister() need to be called to remove the @teedev if
+ * this function fails.
+ *
+ * @returns < 0 on failure
+ */
+int tee_device_register(struct tee_device *teedev);
+
+/**
+ * tee_device_unregister() - Removes a TEE device
+ * @teedev: Device to unregister
+ *
+ * This function should be called to remove the @teedev even if
+ * tee_device_register() hasn't been called yet. Does nothing if
+ * @teedev is NULL.
+ */
+void tee_device_unregister(struct tee_device *teedev);
+
+/**
+ * tee_session_calc_client_uuid() - Calculates client UUID for session
+ * @uuid: Resulting UUID
+ * @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*)
+ * @connectuon_data: Connection data for opening session
+ *
+ * Based on connection method calculates UUIDv5 based client UUID.
+ *
+ * For group based logins verifies that calling process has specified
+ * credentials.
+ *
+ * @return < 0 on failure
+ */
+int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
+ const u8 connection_data[TEE_IOCTL_UUID_LEN]);
+
+/**
+ * struct tee_shm_pool - shared memory pool
+ * @ops: operations
+ * @private_data: private data for the shared memory manager
+ */
+struct tee_shm_pool {
+ const struct tee_shm_pool_ops *ops;
+ void *private_data;
+};
+
+/**
+ * struct tee_shm_pool_ops - shared memory pool operations
+ * @alloc: called when allocating shared memory
+ * @free: called when freeing shared memory
+ * @destroy_pool: called when destroying the pool
+ */
+struct tee_shm_pool_ops {
+ int (*alloc)(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align);
+ void (*free)(struct tee_shm_pool *pool, struct tee_shm *shm);
+ void (*destroy_pool)(struct tee_shm_pool *pool);
+};
+
+/*
+ * tee_shm_pool_alloc_res_mem() - Create a shm manager for reserved memory
+ * @vaddr: Virtual address of start of pool
+ * @paddr: Physical address of start of pool
+ * @size: Size in bytes of the pool
+ *
+ * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
+ */
+struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr,
+ phys_addr_t paddr, size_t size,
+ int min_alloc_order);
+
+/**
+ * tee_shm_pool_free() - Free a shared memory pool
+ * @pool: The shared memory pool to free
+ *
+ * The must be no remaining shared memory allocated from this pool when
+ * this function is called.
+ */
+static inline void tee_shm_pool_free(struct tee_shm_pool *pool)
+{
+ pool->ops->destroy_pool(pool);
+}
+
+/**
+ * tee_get_drvdata() - Return driver_data pointer
+ * @returns the driver_data pointer supplied to tee_register().
+ */
+void *tee_get_drvdata(struct tee_device *teedev);
+
+/**
+ * tee_shm_alloc_priv_buf() - Allocate shared memory for private use by specific
+ * TEE driver
+ * @ctx: The TEE context for shared memory allocation
+ * @size: Shared memory allocation size
+ * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
+ */
+struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size);
+
+int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
+ int (*shm_register)(struct tee_context *ctx,
+ struct tee_shm *shm,
+ struct page **pages,
+ size_t num_pages,
+ unsigned long start));
+void tee_dyn_shm_free_helper(struct tee_shm *shm,
+ int (*shm_unregister)(struct tee_context *ctx,
+ struct tee_shm *shm));
+
+/**
+ * tee_shm_is_dynamic() - Check if shared memory object is of the dynamic kind
+ * @shm: Shared memory handle
+ * @returns true if object is dynamic shared memory
+ */
+static inline bool tee_shm_is_dynamic(struct tee_shm *shm)
+{
+ return shm && (shm->flags & TEE_SHM_DYNAMIC);
+}
+
+/**
+ * tee_shm_put() - Decrease reference count on a shared memory handle
+ * @shm: Shared memory handle
+ */
+void tee_shm_put(struct tee_shm *shm);
+
+/**
+ * tee_shm_get_id() - Get id of a shared memory object
+ * @shm: Shared memory handle
+ * @returns id
+ */
+static inline int tee_shm_get_id(struct tee_shm *shm)
+{
+ return shm->id;
+}
+
+/**
+ * tee_shm_get_from_id() - Find shared memory object and increase reference
+ * count
+ * @ctx: Context owning the shared memory
+ * @id: Id of shared memory object
+ * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
+ */
+struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
+
+static inline bool tee_param_is_memref(struct tee_param *param)
+{
+ switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * teedev_open() - Open a struct tee_device
+ * @teedev: Device to open
+ *
+ * @return a pointer to struct tee_context on success or an ERR_PTR on failure.
+ */
+struct tee_context *teedev_open(struct tee_device *teedev);
+
+/**
+ * teedev_close_context() - closes a struct tee_context
+ * @ctx: The struct tee_context to close
+ */
+void teedev_close_context(struct tee_context *ctx);
+
+#endif /*__TEE_CORE_H*/
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 71632e3c5f18..786b9ae6cf4d 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -1,40 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015-2022 Linaro Limited
+ * Copyright (c) 2015-2024 Linaro Limited
*/
#ifndef __TEE_DRV_H
#define __TEE_DRV_H
#include <linux/device.h>
-#include <linux/idr.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/mod_devicetable.h>
#include <linux/tee.h>
#include <linux/types.h>
-#include <linux/uuid.h>
/*
- * The file describes the API provided by the generic TEE driver to the
- * specific TEE driver.
+ * The file describes the API provided by the TEE subsystem to the
+ * TEE client drivers.
*/
-#define TEE_SHM_DYNAMIC BIT(0) /* Dynamic shared memory registered */
- /* in secure world */
-#define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */
-#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */
-#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */
-
-struct device;
struct tee_device;
-struct tee_shm;
-struct tee_shm_pool;
/**
* struct tee_context - driver specific context on file pointer data
* @teedev: pointer to this drivers struct tee_device
- * @list_shm: List of shared memory object owned by this context
* @data: driver specific context data, managed by the driver
* @refcount: reference counter for this structure
* @releasing: flag that indicates if context is being released right now.
@@ -57,134 +45,6 @@ struct tee_context {
bool cap_memref_null;
};
-struct tee_param_memref {
- size_t shm_offs;
- size_t size;
- struct tee_shm *shm;
-};
-
-struct tee_param_value {
- u64 a;
- u64 b;
- u64 c;
-};
-
-struct tee_param {
- u64 attr;
- union {
- struct tee_param_memref memref;
- struct tee_param_value value;
- } u;
-};
-
-/**
- * struct tee_driver_ops - driver operations vtable
- * @get_version: returns version of driver
- * @open: called when the device file is opened
- * @release: release this open file
- * @open_session: open a new session
- * @close_session: close a session
- * @system_session: declare session as a system session
- * @invoke_func: invoke a trusted function
- * @cancel_req: request cancel of an ongoing invoke or open
- * @supp_recv: called for supplicant to get a command
- * @supp_send: called for supplicant to send a response
- * @shm_register: register shared memory buffer in TEE
- * @shm_unregister: unregister shared memory buffer in TEE
- */
-struct tee_driver_ops {
- void (*get_version)(struct tee_device *teedev,
- struct tee_ioctl_version_data *vers);
- int (*open)(struct tee_context *ctx);
- void (*release)(struct tee_context *ctx);
- int (*open_session)(struct tee_context *ctx,
- struct tee_ioctl_open_session_arg *arg,
- struct tee_param *param);
- int (*close_session)(struct tee_context *ctx, u32 session);
- int (*system_session)(struct tee_context *ctx, u32 session);
- int (*invoke_func)(struct tee_context *ctx,
- struct tee_ioctl_invoke_arg *arg,
- struct tee_param *param);
- int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session);
- int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params,
- struct tee_param *param);
- int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params,
- struct tee_param *param);
- int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm,
- struct page **pages, size_t num_pages,
- unsigned long start);
- int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm);
-};
-
-/**
- * struct tee_desc - Describes the TEE driver to the subsystem
- * @name: name of driver
- * @ops: driver operations vtable
- * @owner: module providing the driver
- * @flags: Extra properties of driver, defined by TEE_DESC_* below
- */
-#define TEE_DESC_PRIVILEGED 0x1
-struct tee_desc {
- const char *name;
- const struct tee_driver_ops *ops;
- struct module *owner;
- u32 flags;
-};
-
-/**
- * tee_device_alloc() - Allocate a new struct tee_device instance
- * @teedesc: Descriptor for this driver
- * @dev: Parent device for this device
- * @pool: Shared memory pool, NULL if not used
- * @driver_data: Private driver data for this device
- *
- * Allocates a new struct tee_device instance. The device is
- * removed by tee_device_unregister().
- *
- * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
- */
-struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
- struct device *dev,
- struct tee_shm_pool *pool,
- void *driver_data);
-
-/**
- * tee_device_register() - Registers a TEE device
- * @teedev: Device to register
- *
- * tee_device_unregister() need to be called to remove the @teedev if
- * this function fails.
- *
- * @returns < 0 on failure
- */
-int tee_device_register(struct tee_device *teedev);
-
-/**
- * tee_device_unregister() - Removes a TEE device
- * @teedev: Device to unregister
- *
- * This function should be called to remove the @teedev even if
- * tee_device_register() hasn't been called yet. Does nothing if
- * @teedev is NULL.
- */
-void tee_device_unregister(struct tee_device *teedev);
-
-/**
- * tee_session_calc_client_uuid() - Calculates client UUID for session
- * @uuid: Resulting UUID
- * @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*)
- * @connectuon_data: Connection data for opening session
- *
- * Based on connection method calculates UUIDv5 based client UUID.
- *
- * For group based logins verifies that calling process has specified
- * credentials.
- *
- * @return < 0 on failure
- */
-int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
- const u8 connection_data[TEE_IOCTL_UUID_LEN]);
-
/**
* struct tee_shm - shared memory object
* @ctx: context using the object
@@ -195,15 +55,12 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
* @pages: locked pages from userspace
* @num_pages: number of locked pages
* @refcount: reference counter
- * @flags: defined by TEE_SHM_* in tee_drv.h
+ * @flags: defined by TEE_SHM_* in tee_core.h
* @id: unique id of a shared memory object on this device, shared
* with user space
* @sec_world_id:
* secure world assigned id of this shared memory object, not
* used by all drivers
- *
- * This pool is only supposed to be accessed directly from the TEE
- * subsystem and from drivers that implements their own shm pool manager.
*/
struct tee_shm {
struct tee_context *ctx;
@@ -219,88 +76,53 @@ struct tee_shm {
u64 sec_world_id;
};
-/**
- * struct tee_shm_pool - shared memory pool
- * @ops: operations
- * @private_data: private data for the shared memory manager
- */
-struct tee_shm_pool {
- const struct tee_shm_pool_ops *ops;
- void *private_data;
+struct tee_param_memref {
+ size_t shm_offs;
+ size_t size;
+ struct tee_shm *shm;
};
-/**
- * struct tee_shm_pool_ops - shared memory pool operations
- * @alloc: called when allocating shared memory
- * @free: called when freeing shared memory
- * @destroy_pool: called when destroying the pool
- */
-struct tee_shm_pool_ops {
- int (*alloc)(struct tee_shm_pool *pool, struct tee_shm *shm,
- size_t size, size_t align);
- void (*free)(struct tee_shm_pool *pool, struct tee_shm *shm);
- void (*destroy_pool)(struct tee_shm_pool *pool);
+struct tee_param_value {
+ u64 a;
+ u64 b;
+ u64 c;
};
-/*
- * tee_shm_pool_alloc_res_mem() - Create a shm manager for reserved memory
- * @vaddr: Virtual address of start of pool
- * @paddr: Physical address of start of pool
- * @size: Size in bytes of the pool
- *
- * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
- */
-struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr,
- phys_addr_t paddr, size_t size,
- int min_alloc_order);
+struct tee_param {
+ u64 attr;
+ union {
+ struct tee_param_memref memref;
+ struct tee_param_value value;
+ } u;
+};
/**
- * tee_shm_pool_free() - Free a shared memory pool
- * @pool: The shared memory pool to free
- *
- * The must be no remaining shared memory allocated from this pool when
- * this function is called.
+ * tee_shm_alloc_kernel_buf() - Allocate kernel shared memory for a
+ * particular TEE client driver
+ * @ctx: The TEE context for shared memory allocation
+ * @size: Shared memory allocation size
+ * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
*/
-static inline void tee_shm_pool_free(struct tee_shm_pool *pool)
-{
- pool->ops->destroy_pool(pool);
-}
+struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size);
/**
- * tee_get_drvdata() - Return driver_data pointer
- * @returns the driver_data pointer supplied to tee_register().
+ * tee_shm_register_kernel_buf() - Register kernel shared memory for a
+ * particular TEE client driver
+ * @ctx: The TEE context for shared memory registration
+ * @addr: Kernel buffer address
+ * @length: Kernel buffer length
+ * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
*/
-void *tee_get_drvdata(struct tee_device *teedev);
-
-struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size);
-struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size);
-
struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
void *addr, size_t length);
/**
- * tee_shm_is_dynamic() - Check if shared memory object is of the dynamic kind
- * @shm: Shared memory handle
- * @returns true if object is dynamic shared memory
- */
-static inline bool tee_shm_is_dynamic(struct tee_shm *shm)
-{
- return shm && (shm->flags & TEE_SHM_DYNAMIC);
-}
-
-/**
* tee_shm_free() - Free shared memory
* @shm: Handle to shared memory to free
*/
void tee_shm_free(struct tee_shm *shm);
/**
- * tee_shm_put() - Decrease reference count on a shared memory handle
- * @shm: Shared memory handle
- */
-void tee_shm_put(struct tee_shm *shm);
-
-/**
* tee_shm_get_va() - Get virtual address of a shared memory plus an offset
* @shm: Shared memory handle
* @offs: Offset from start of this shared memory
@@ -353,25 +175,6 @@ static inline size_t tee_shm_get_page_offset(struct tee_shm *shm)
}
/**
- * tee_shm_get_id() - Get id of a shared memory object
- * @shm: Shared memory handle
- * @returns id
- */
-static inline int tee_shm_get_id(struct tee_shm *shm)
-{
- return shm->id;
-}
-
-/**
- * tee_shm_get_from_id() - Find shared memory object and increase reference
- * count
- * @ctx: Context owning the shared memory
- * @id: Id of shared memory object
- * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
- */
-struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
-
-/**
* tee_client_open_context() - Open a TEE context
* @start: if not NULL, continue search after this context
* @match: function to check TEE device
@@ -470,18 +273,6 @@ int tee_client_invoke_func(struct tee_context *ctx,
int tee_client_cancel_req(struct tee_context *ctx,
struct tee_ioctl_cancel_arg *arg);
-static inline bool tee_param_is_memref(struct tee_param *param)
-{
- switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
- return true;
- default:
- return false;
- }
-}
-
extern const struct bus_type tee_bus_type;
/**
@@ -509,18 +300,4 @@ struct tee_client_driver {
#define to_tee_client_driver(d) \
container_of(d, struct tee_client_driver, driver)
-/**
- * teedev_open() - Open a struct tee_device
- * @teedev: Device to open
- *
- * @return a pointer to struct tee_context on success or an ERR_PTR on failure.
- */
-struct tee_context *teedev_open(struct tee_device *teedev);
-
-/**
- * teedev_close_context() - closes a struct tee_context
- * @ctx: The struct tee_context to close
- */
-void teedev_close_context(struct tee_context *ctx);
-
#endif /*__TEE_DRV_H*/
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index c33f50177f51..f1155c0439c4 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -61,7 +61,6 @@ enum thermal_notify_event {
* struct thermal_trip - representation of a point in temperature domain
* @temperature: temperature value in miliCelsius
* @hysteresis: relative hysteresis in miliCelsius
- * @threshold: trip crossing notification threshold miliCelsius
* @type: trip point type
* @priv: pointer to driver data associated with this trip
* @flags: flags representing binary properties of the trip
@@ -69,7 +68,6 @@ enum thermal_notify_event {
struct thermal_trip {
int temperature;
int hysteresis;
- int threshold;
enum thermal_trip_type type;
u8 flags;
void *priv;
@@ -81,6 +79,8 @@ struct thermal_trip {
#define THERMAL_TRIP_FLAG_RW (THERMAL_TRIP_FLAG_RW_TEMP | \
THERMAL_TRIP_FLAG_RW_HYST)
+struct thermal_zone_device;
+
struct thermal_zone_device_ops {
int (*bind) (struct thermal_zone_device *,
struct thermal_cooling_device *);
@@ -126,111 +126,6 @@ struct thermal_cooling_device {
#endif
};
-/**
- * struct thermal_zone_device - structure for a thermal zone
- * @id: unique id number for each thermal zone
- * @type: the thermal zone device type
- * @device: &struct device for this thermal zone
- * @removal: removal completion
- * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature
- * @trip_type_attrs: attributes for trip points for sysfs: trip type
- * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
- * @mode: current mode of this thermal zone
- * @devdata: private pointer for device private data
- * @num_trips: number of trip points the thermal zone supports
- * @passive_delay_jiffies: number of jiffies to wait between polls when
- * performing passive cooling.
- * @polling_delay_jiffies: number of jiffies to wait between polls when
- * checking whether trip points have been crossed (0 for
- * interrupt driven systems)
- * @temperature: current temperature. This is only for core code,
- * drivers should use thermal_zone_get_temp() to get the
- * current temperature
- * @last_temperature: previous temperature read
- * @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION
- * @passive: 1 if you've crossed a passive trip point, 0 otherwise.
- * @prev_low_trip: the low current temperature if you've crossed a passive
- trip point.
- * @prev_high_trip: the above current temperature if you've crossed a
- passive trip point.
- * @need_update: if equals 1, thermal_zone_device_update needs to be invoked.
- * @ops: operations this &thermal_zone_device supports
- * @tzp: thermal zone parameters
- * @governor: pointer to the governor for this thermal zone
- * @governor_data: private pointer for governor data
- * @thermal_instances: list of &struct thermal_instance of this thermal zone
- * @ida: &struct ida to generate unique id for this zone's cooling
- * devices
- * @lock: lock to protect thermal_instances list
- * @node: node in thermal_tz_list (in thermal_core.c)
- * @poll_queue: delayed work for polling
- * @notify_event: Last notification event
- * @suspended: thermal zone suspend indicator
- * @trips: array of struct thermal_trip objects
- */
-struct thermal_zone_device {
- int id;
- char type[THERMAL_NAME_LENGTH];
- struct device device;
- struct completion removal;
- struct attribute_group trips_attribute_group;
- struct thermal_attr *trip_temp_attrs;
- struct thermal_attr *trip_type_attrs;
- struct thermal_attr *trip_hyst_attrs;
- enum thermal_device_mode mode;
- void *devdata;
- int num_trips;
- unsigned long passive_delay_jiffies;
- unsigned long polling_delay_jiffies;
- int temperature;
- int last_temperature;
- int emul_temperature;
- int passive;
- int prev_low_trip;
- int prev_high_trip;
- atomic_t need_update;
- struct thermal_zone_device_ops ops;
- struct thermal_zone_params *tzp;
- struct thermal_governor *governor;
- void *governor_data;
- struct list_head thermal_instances;
- struct ida ida;
- struct mutex lock;
- struct list_head node;
- struct delayed_work poll_queue;
- enum thermal_notify_event notify_event;
- bool suspended;
-#ifdef CONFIG_THERMAL_DEBUGFS
- struct thermal_debugfs *debugfs;
-#endif
- struct thermal_trip trips[] __counted_by(num_trips);
-};
-
-/**
- * struct thermal_governor - structure that holds thermal governor information
- * @name: name of the governor
- * @bind_to_tz: callback called when binding to a thermal zone. If it
- * returns 0, the governor is bound to the thermal zone,
- * otherwise it fails.
- * @unbind_from_tz: callback called when a governor is unbound from a
- * thermal zone.
- * @throttle: callback called for every trip point even if temperature is
- * below the trip point temperature
- * @update_tz: callback called when thermal zone internals have changed, e.g.
- * thermal cooling instance was added/removed
- * @governor_list: node in thermal_governor_list (in thermal_core.c)
- */
-struct thermal_governor {
- const char *name;
- int (*bind_to_tz)(struct thermal_zone_device *tz);
- void (*unbind_from_tz)(struct thermal_zone_device *tz);
- int (*throttle)(struct thermal_zone_device *tz,
- const struct thermal_trip *trip);
- void (*update_tz)(struct thermal_zone_device *tz,
- enum thermal_notify_event reason);
- struct list_head governor_list;
-};
-
/* Structure to define Thermal Zone parameters */
struct thermal_zone_params {
const char *governor_name;
diff --git a/include/linux/threads.h b/include/linux/threads.h
index c34173e6c5f1..1674a471b0b4 100644
--- a/include/linux/threads.h
+++ b/include/linux/threads.h
@@ -25,13 +25,13 @@
/*
* This controls the default maximum pid allocated to a process
*/
-#define PID_MAX_DEFAULT (CONFIG_BASE_SMALL ? 0x1000 : 0x8000)
+#define PID_MAX_DEFAULT (IS_ENABLED(CONFIG_BASE_SMALL) ? 0x1000 : 0x8000)
/*
* A maximum of 4 million PIDs should be enough for a while.
* [NOTE: PID/TIDs are limited to 2^30 ~= 1 billion, see FUTEX_TID_MASK.]
*/
-#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \
+#define PID_MAX_LIMIT (IS_ENABLED(CONFIG_BASE_SMALL) ? PAGE_SIZE * 8 : \
(sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT))
/*
diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h
index 62973f7d4610..d306d9dd2207 100644
--- a/include/linux/timerqueue.h
+++ b/include/linux/timerqueue.h
@@ -37,11 +37,6 @@ static inline bool timerqueue_node_queued(struct timerqueue_node *node)
return !RB_EMPTY_NODE(&node->node);
}
-static inline bool timerqueue_node_expires(struct timerqueue_node *node)
-{
- return node->expires;
-}
-
static inline void timerqueue_init_head(struct timerqueue_head *head)
{
head->rb_root = RB_ROOT_CACHED;
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 4ee9d13749ad..c17e4efbb2e5 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -23,6 +23,7 @@
#include <linux/fs.h>
#include <linux/highmem.h>
#include <crypto/hash_info.h>
+#include <crypto/aes.h>
#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */
#define TPM_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
@@ -30,17 +31,28 @@
struct tpm_chip;
struct trusted_key_payload;
struct trusted_key_options;
+/* opaque structure, holds auth session parameters like the session key */
+struct tpm2_auth;
+
+enum tpm2_session_types {
+ TPM2_SE_HMAC = 0x00,
+ TPM2_SE_POLICY = 0x01,
+ TPM2_SE_TRIAL = 0x02,
+};
/* if you add a new hash to this, increment TPM_MAX_HASHES below */
enum tpm_algorithms {
TPM_ALG_ERROR = 0x0000,
TPM_ALG_SHA1 = 0x0004,
+ TPM_ALG_AES = 0x0006,
TPM_ALG_KEYEDHASH = 0x0008,
TPM_ALG_SHA256 = 0x000B,
TPM_ALG_SHA384 = 0x000C,
TPM_ALG_SHA512 = 0x000D,
TPM_ALG_NULL = 0x0010,
TPM_ALG_SM3_256 = 0x0012,
+ TPM_ALG_ECC = 0x0023,
+ TPM_ALG_CFB = 0x0043,
};
/*
@@ -49,6 +61,11 @@ enum tpm_algorithms {
*/
#define TPM_MAX_HASHES 5
+enum tpm2_curves {
+ TPM2_ECC_NONE = 0x0000,
+ TPM2_ECC_NIST_P256 = 0x0003,
+};
+
struct tpm_digest {
u16 alg_id;
u8 digest[TPM_MAX_DIGEST_SIZE];
@@ -116,6 +133,20 @@ struct tpm_chip_seqops {
const struct seq_operations *seqops;
};
+/* fixed define for the curve we use which is NIST_P256 */
+#define EC_PT_SZ 32
+
+/*
+ * fixed define for the size of a name. This is actually HASHALG size
+ * plus 2, so 32 for SHA256
+ */
+#define TPM2_NAME_SIZE 34
+
+/*
+ * The maximum size for an object context
+ */
+#define TPM2_MAX_CONTEXT_SIZE 4096
+
struct tpm_chip {
struct device dev;
struct device devs;
@@ -170,6 +201,18 @@ struct tpm_chip {
/* active locality */
int locality;
+
+#ifdef CONFIG_TCG_TPM2_HMAC
+ /* details for communication security via sessions */
+
+ /* saved context for NULL seed */
+ u8 null_key_context[TPM2_MAX_CONTEXT_SIZE];
+ /* name of NULL seed */
+ u8 null_key_name[TPM2_NAME_SIZE];
+ u8 null_ec_key_x[EC_PT_SZ];
+ u8 null_ec_key_y[EC_PT_SZ];
+ struct tpm2_auth *auth;
+#endif
};
#define TPM_HEADER_SIZE 10
@@ -194,6 +237,7 @@ enum tpm2_timeouts {
enum tpm2_structures {
TPM2_ST_NO_SESSIONS = 0x8001,
TPM2_ST_SESSIONS = 0x8002,
+ TPM2_ST_CREATION = 0x8021,
};
/* Indicates from what layer of the software stack the error comes from */
@@ -204,6 +248,7 @@ enum tpm2_return_codes {
TPM2_RC_SUCCESS = 0x0000,
TPM2_RC_HASH = 0x0083, /* RC_FMT1 */
TPM2_RC_HANDLE = 0x008B,
+ TPM2_RC_INTEGRITY = 0x009F,
TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */
TPM2_RC_FAILURE = 0x0101,
TPM2_RC_DISABLED = 0x0120,
@@ -231,6 +276,8 @@ enum tpm2_command_codes {
TPM2_CC_CONTEXT_LOAD = 0x0161,
TPM2_CC_CONTEXT_SAVE = 0x0162,
TPM2_CC_FLUSH_CONTEXT = 0x0165,
+ TPM2_CC_READ_PUBLIC = 0x0173,
+ TPM2_CC_START_AUTH_SESS = 0x0176,
TPM2_CC_VERIFY_SIGNATURE = 0x0177,
TPM2_CC_GET_CAPABILITY = 0x017A,
TPM2_CC_GET_RANDOM = 0x017B,
@@ -243,9 +290,25 @@ enum tpm2_command_codes {
};
enum tpm2_permanent_handles {
+ TPM2_RH_NULL = 0x40000007,
TPM2_RS_PW = 0x40000009,
};
+/* Most Significant Octet for key types */
+enum tpm2_mso_type {
+ TPM2_MSO_NVRAM = 0x01,
+ TPM2_MSO_SESSION = 0x02,
+ TPM2_MSO_POLICY = 0x03,
+ TPM2_MSO_PERMANENT = 0x40,
+ TPM2_MSO_VOLATILE = 0x80,
+ TPM2_MSO_PERSISTENT = 0x81,
+};
+
+static inline enum tpm2_mso_type tpm2_handle_mso(u32 handle)
+{
+ return handle >> 24;
+}
+
enum tpm2_capabilities {
TPM2_CAP_HANDLES = 1,
TPM2_CAP_COMMANDS = 2,
@@ -284,6 +347,7 @@ enum tpm_chip_flags {
TPM_CHIP_FLAG_FIRMWARE_UPGRADE = BIT(7),
TPM_CHIP_FLAG_SUSPENDED = BIT(8),
TPM_CHIP_FLAG_HWRNG_DISABLED = BIT(9),
+ TPM_CHIP_FLAG_DISABLE = BIT(10),
};
#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
@@ -297,28 +361,61 @@ struct tpm_header {
};
} __packed;
-/* A string buffer type for constructing TPM commands. This is based on the
- * ideas of string buffer code in security/keys/trusted.h but is heap based
- * in order to keep the stack usage minimal.
- */
-
enum tpm_buf_flags {
+ /* the capacity exceeded: */
TPM_BUF_OVERFLOW = BIT(0),
+ /* TPM2B format: */
+ TPM_BUF_TPM2B = BIT(1),
+ /* read out of boundary: */
+ TPM_BUF_BOUNDARY_ERROR = BIT(2),
};
+/*
+ * A string buffer type for constructing TPM commands.
+ */
struct tpm_buf {
- unsigned int flags;
+ u32 flags;
+ u32 length;
u8 *data;
+ u8 handles;
};
enum tpm2_object_attributes {
TPM2_OA_FIXED_TPM = BIT(1),
+ TPM2_OA_ST_CLEAR = BIT(2),
TPM2_OA_FIXED_PARENT = BIT(4),
+ TPM2_OA_SENSITIVE_DATA_ORIGIN = BIT(5),
TPM2_OA_USER_WITH_AUTH = BIT(6),
+ TPM2_OA_ADMIN_WITH_POLICY = BIT(7),
+ TPM2_OA_NO_DA = BIT(10),
+ TPM2_OA_ENCRYPTED_DUPLICATION = BIT(11),
+ TPM2_OA_RESTRICTED = BIT(16),
+ TPM2_OA_DECRYPT = BIT(17),
+ TPM2_OA_SIGN = BIT(18),
};
+/*
+ * definitions for the canonical template. These are mandated
+ * by the TCG key template documents
+ */
+
+#define AES_KEY_BYTES AES_KEYSIZE_128
+#define AES_KEY_BITS (AES_KEY_BYTES*8)
+#define TPM2_OA_TMPL (TPM2_OA_NO_DA | \
+ TPM2_OA_FIXED_TPM | \
+ TPM2_OA_FIXED_PARENT | \
+ TPM2_OA_SENSITIVE_DATA_ORIGIN | \
+ TPM2_OA_USER_WITH_AUTH | \
+ TPM2_OA_DECRYPT | \
+ TPM2_OA_RESTRICTED)
+
enum tpm2_session_attributes {
TPM2_SA_CONTINUE_SESSION = BIT(0),
+ TPM2_SA_AUDIT_EXCLUSIVE = BIT(1),
+ TPM2_SA_AUDIT_RESET = BIT(3),
+ TPM2_SA_DECRYPT = BIT(5),
+ TPM2_SA_ENCRYPT = BIT(6),
+ TPM2_SA_AUDIT = BIT(7),
};
struct tpm2_hash {
@@ -326,84 +423,21 @@ struct tpm2_hash {
unsigned int tpm_id;
};
-static inline void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- head->tag = cpu_to_be16(tag);
- head->length = cpu_to_be32(sizeof(*head));
- head->ordinal = cpu_to_be32(ordinal);
-}
-
-static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
-{
- buf->data = (u8 *)__get_free_page(GFP_KERNEL);
- if (!buf->data)
- return -ENOMEM;
-
- buf->flags = 0;
- tpm_buf_reset(buf, tag, ordinal);
- return 0;
-}
-
-static inline void tpm_buf_destroy(struct tpm_buf *buf)
-{
- free_page((unsigned long)buf->data);
-}
-
-static inline u32 tpm_buf_length(struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- return be32_to_cpu(head->length);
-}
-
-static inline u16 tpm_buf_tag(struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- return be16_to_cpu(head->tag);
-}
-
-static inline void tpm_buf_append(struct tpm_buf *buf,
- const unsigned char *new_data,
- unsigned int new_len)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
- u32 len = tpm_buf_length(buf);
-
- /* Return silently if overflow has already happened. */
- if (buf->flags & TPM_BUF_OVERFLOW)
- return;
-
- if ((len + new_len) > PAGE_SIZE) {
- WARN(1, "tpm_buf: overflow\n");
- buf->flags |= TPM_BUF_OVERFLOW;
- return;
- }
-
- memcpy(&buf->data[len], new_data, new_len);
- head->length = cpu_to_be32(len + new_len);
-}
-
-static inline void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value)
-{
- tpm_buf_append(buf, &value, 1);
-}
-
-static inline void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value)
-{
- __be16 value2 = cpu_to_be16(value);
-
- tpm_buf_append(buf, (u8 *) &value2, 2);
-}
-
-static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
-{
- __be32 value2 = cpu_to_be32(value);
-
- tpm_buf_append(buf, (u8 *) &value2, 4);
-}
+int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal);
+void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal);
+int tpm_buf_init_sized(struct tpm_buf *buf);
+void tpm_buf_reset_sized(struct tpm_buf *buf);
+void tpm_buf_destroy(struct tpm_buf *buf);
+u32 tpm_buf_length(struct tpm_buf *buf);
+void tpm_buf_append(struct tpm_buf *buf, const u8 *new_data, u16 new_length);
+void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value);
+void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value);
+void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value);
+u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset);
+u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset);
+u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset);
+
+u8 *tpm_buf_parameters(struct tpm_buf *buf);
/*
* Check if TPM device is in the firmware upgrade mode.
@@ -415,7 +449,7 @@ static inline bool tpm_is_firmware_upgrade(struct tpm_chip *chip)
static inline u32 tpm2_rc_value(u32 rc)
{
- return (rc & BIT(7)) ? rc & 0xff : rc;
+ return (rc & BIT(7)) ? rc & 0xbf : rc;
}
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
@@ -429,10 +463,19 @@ extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digest);
extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digests);
-extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen);
extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max);
extern struct tpm_chip *tpm_default_chip(void);
void tpm2_flush_context(struct tpm_chip *chip, u32 handle);
+
+static inline void tpm_buf_append_empty_auth(struct tpm_buf *buf, u32 handle)
+{
+ /* simple authorization for empty auth */
+ tpm_buf_append_u32(buf, 9); /* total length of auth */
+ tpm_buf_append_u32(buf, handle);
+ tpm_buf_append_u16(buf, 0); /* nonce len */
+ tpm_buf_append_u8(buf, 0); /* attributes */
+ tpm_buf_append_u16(buf, 0); /* hmac len */
+}
#else
static inline int tpm_is_tpm2(struct tpm_chip *chip)
{
@@ -450,10 +493,6 @@ static inline int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
return -ENODEV;
}
-static inline int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
-{
- return -ENODEV;
-}
static inline int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max)
{
return -ENODEV;
@@ -463,5 +502,102 @@ static inline struct tpm_chip *tpm_default_chip(void)
{
return NULL;
}
+
+static inline void tpm_buf_append_empty_auth(struct tpm_buf *buf, u32 handle)
+{
+}
#endif
+#ifdef CONFIG_TCG_TPM2_HMAC
+
+int tpm2_start_auth_session(struct tpm_chip *chip);
+void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
+ u32 handle, u8 *name);
+void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
+ u8 attributes, u8 *passphrase,
+ int passphraselen);
+static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip,
+ struct tpm_buf *buf,
+ u8 attributes,
+ u8 *passphrase,
+ int passphraselen)
+{
+ tpm_buf_append_hmac_session(chip, buf, attributes, passphrase,
+ passphraselen);
+}
+void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf);
+int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
+ int rc);
+void tpm2_end_auth_session(struct tpm_chip *chip);
+#else
+#include <asm/unaligned.h>
+
+static inline int tpm2_start_auth_session(struct tpm_chip *chip)
+{
+ return 0;
+}
+static inline void tpm2_end_auth_session(struct tpm_chip *chip)
+{
+}
+static inline void tpm_buf_append_name(struct tpm_chip *chip,
+ struct tpm_buf *buf,
+ u32 handle, u8 *name)
+{
+ tpm_buf_append_u32(buf, handle);
+ /* count the number of handles in the upper bits of flags */
+ buf->handles++;
+}
+static inline void tpm_buf_append_hmac_session(struct tpm_chip *chip,
+ struct tpm_buf *buf,
+ u8 attributes, u8 *passphrase,
+ int passphraselen)
+{
+ /* offset tells us where the sessions area begins */
+ int offset = buf->handles * 4 + TPM_HEADER_SIZE;
+ u32 len = 9 + passphraselen;
+
+ if (tpm_buf_length(buf) != offset) {
+ /* not the first session so update the existing length */
+ len += get_unaligned_be32(&buf->data[offset]);
+ put_unaligned_be32(len, &buf->data[offset]);
+ } else {
+ tpm_buf_append_u32(buf, len);
+ }
+ /* auth handle */
+ tpm_buf_append_u32(buf, TPM2_RS_PW);
+ /* nonce */
+ tpm_buf_append_u16(buf, 0);
+ /* attributes */
+ tpm_buf_append_u8(buf, 0);
+ /* passphrase */
+ tpm_buf_append_u16(buf, passphraselen);
+ tpm_buf_append(buf, passphrase, passphraselen);
+}
+static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip,
+ struct tpm_buf *buf,
+ u8 attributes,
+ u8 *passphrase,
+ int passphraselen)
+{
+ int offset = buf->handles * 4 + TPM_HEADER_SIZE;
+ struct tpm_header *head = (struct tpm_header *) buf->data;
+
+ /*
+ * if the only sessions are optional, the command tag
+ * must change to TPM2_ST_NO_SESSIONS
+ */
+ if (tpm_buf_length(buf) == offset)
+ head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
+}
+static inline void tpm_buf_fill_hmac_session(struct tpm_chip *chip,
+ struct tpm_buf *buf)
+{
+}
+static inline int tpm_buf_check_hmac_response(struct tpm_chip *chip,
+ struct tpm_buf *buf,
+ int rc)
+{
+ return rc;
+}
+#endif /* CONFIG_TCG_TPM2_HMAC */
+
#endif
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 6f9bdfb09d1d..9df3e2973626 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -765,8 +765,11 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
void perf_event_detach_bpf_prog(struct perf_event *event);
int perf_event_query_prog_array(struct perf_event *event, void __user *info);
-int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
-int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
+
+struct bpf_raw_tp_link;
+int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link);
+int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link);
+
struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
@@ -794,11 +797,12 @@ perf_event_query_prog_array(struct perf_event *event, void __user *info)
{
return -EOPNOTSUPP;
}
-static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p)
+struct bpf_raw_tp_link;
+static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
{
return -EOPNOTSUPP;
}
-static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p)
+static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
{
return -EOPNOTSUPP;
}
@@ -909,31 +913,31 @@ void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
void perf_event_free_bpf_prog(struct perf_event *event);
-void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
-void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
-void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run1(struct bpf_raw_tp_link *link, u64 arg1);
+void bpf_trace_run2(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2);
+void bpf_trace_run3(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3);
-void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run4(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4);
-void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run5(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5);
-void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run6(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6);
-void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run7(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
-void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run8(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
u64 arg8);
-void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run9(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
u64 arg8, u64 arg9);
-void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run10(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
u64 arg8, u64 arg9, u64 arg10);
-void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run11(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
u64 arg8, u64 arg9, u64 arg10, u64 arg11);
-void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run12(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h
index d48cd92d2364..24ea8ac049b4 100644
--- a/include/linux/trace_recursion.h
+++ b/include/linux/trace_recursion.h
@@ -135,7 +135,7 @@ extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip);
# define do_ftrace_record_recursion(ip, pip) do { } while (0)
#endif
-#ifdef CONFIG_ARCH_WANTS_NO_INSTR
+#ifdef CONFIG_FTRACE_VALIDATE_RCU_IS_WATCHING
# define trace_warn_on_no_rcu(ip) \
({ \
bool __ret = !rcu_is_watching(); \
diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h
index 7a5fe17b6bf9..d03f74658716 100644
--- a/include/linux/tracefs.h
+++ b/include/linux/tracefs.h
@@ -62,6 +62,8 @@ struct eventfs_file;
typedef int (*eventfs_callback)(const char *name, umode_t *mode, void **data,
const struct file_operations **fops);
+typedef void (*eventfs_release)(const char *name, void *data);
+
/**
* struct eventfs_entry - dynamically created eventfs file call back handler
* @name: Then name of the dynamic file in an eventfs directory
@@ -72,6 +74,7 @@ typedef int (*eventfs_callback)(const char *name, umode_t *mode, void **data,
struct eventfs_entry {
const char *name;
eventfs_callback callback;
+ eventfs_release release;
};
struct eventfs_inode;
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 2b2e6f0a54d6..2372f9357240 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -145,15 +145,12 @@ struct tty_operations;
* @count: count of open processes, reaching zero cancels all the work for
* this tty and drops a @kref too (but does not free this tty)
* @winsize: size of the terminal "window" (cf. @winsize_mutex)
- * @flow: flow settings grouped together, see also @flow.unused
+ * @flow: flow settings grouped together
* @flow.lock: lock for @flow members
* @flow.stopped: tty stopped/started by stop_tty()/start_tty()
* @flow.tco_stopped: tty stopped/started by %TCOOFF/%TCOON ioctls (it has
* precedence over @flow.stopped)
- * @flow.unused: alignment for Alpha, so that no members other than @flow.* are
- * modified by the same 64b word store. The @flow's __aligned is
- * there for the very same reason.
- * @ctrl: control settings grouped together, see also @ctrl.unused
+ * @ctrl: control settings grouped together
* @ctrl.lock: lock for @ctrl members
* @ctrl.pgrp: process group of this tty (setpgrp(2))
* @ctrl.session: session of this tty (setsid(2)). Writes are protected by both
@@ -161,7 +158,6 @@ struct tty_operations;
* them.
* @ctrl.pktstatus: packet mode status (bitwise OR of %TIOCPKT_ constants)
* @ctrl.packet: packet mode enabled
- * @ctrl.unused: alignment for Alpha, see @flow.unused for explanation
* @hw_stopped: not controlled by the tty layer, under @driver's control for CTS
* handling
* @receive_room: bytes permitted to feed to @ldisc without any being lost
@@ -216,8 +212,7 @@ struct tty_struct {
spinlock_t lock;
bool stopped;
bool tco_stopped;
- unsigned long unused[0];
- } __aligned(sizeof(unsigned long)) flow;
+ } flow;
struct {
struct pid *pgrp;
@@ -225,8 +220,7 @@ struct tty_struct {
spinlock_t lock;
unsigned char pktstatus;
bool packet;
- unsigned long unused[0];
- } __aligned(sizeof(unsigned long)) ctrl;
+ } ctrl;
bool hw_stopped;
bool closing;
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 17539d089666..3eb3f2b9a2a0 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -24,7 +24,7 @@ static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
}
#define UDP_HTABLE_SIZE_MIN_PERNET 128
-#define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256)
+#define UDP_HTABLE_SIZE_MIN (IS_ENABLED(CONFIG_BASE_SMALL) ? 128 : 256)
#define UDP_HTABLE_SIZE_MAX 65536
static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
@@ -108,7 +108,7 @@ struct udp_sock {
#define udp_assign_bit(nr, sk, val) \
assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
-#define UDP_MAX_SEGMENTS (1 << 6UL)
+#define UDP_MAX_SEGMENTS (1 << 7UL)
#define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 00cebe2b70de..7020adedfa08 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -206,6 +206,16 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
}
static __always_inline __must_check
+bool copy_to_iter_full(const void *addr, size_t bytes, struct iov_iter *i)
+{
+ size_t copied = copy_to_iter(addr, bytes, i);
+ if (likely(copied == bytes))
+ return true;
+ iov_iter_revert(i, copied);
+ return false;
+}
+
+static __always_inline __must_check
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
size_t copied = copy_from_iter(addr, bytes, i);
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 26c4325aa373..96fea920873b 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -126,6 +126,8 @@ struct virtio_admin_cmd {
* @vqs: the list of virtqueues for this device.
* @features: the features supported by both driver and device.
* @priv: private pointer for the driver's use.
+ * @debugfs_dir: debugfs directory entry.
+ * @debugfs_filter_features: features to be filtered set by debugfs.
*/
struct virtio_device {
int index;
@@ -141,6 +143,10 @@ struct virtio_device {
struct list_head vqs;
u64 features;
void *priv;
+#ifdef CONFIG_VIRTIO_DEBUG
+ struct dentry *debugfs_dir;
+ u64 debugfs_filter_features;
+#endif
};
#define dev_to_virtio(_dev) container_of_const(_dev, struct virtio_device, dev)
@@ -237,4 +243,33 @@ void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t a
void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr,
unsigned long offset, size_t size,
enum dma_data_direction dir);
+
+#ifdef CONFIG_VIRTIO_DEBUG
+void virtio_debug_device_init(struct virtio_device *dev);
+void virtio_debug_device_exit(struct virtio_device *dev);
+void virtio_debug_device_filter_features(struct virtio_device *dev);
+void virtio_debug_init(void);
+void virtio_debug_exit(void);
+#else
+static inline void virtio_debug_device_init(struct virtio_device *dev)
+{
+}
+
+static inline void virtio_debug_device_exit(struct virtio_device *dev)
+{
+}
+
+static inline void virtio_debug_device_filter_features(struct virtio_device *dev)
+{
+}
+
+static inline void virtio_debug_init(void)
+{
+}
+
+static inline void virtio_debug_exit(void)
+{
+}
+#endif
+
#endif /* _LINUX_VIRTIO_H */
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index 3684487d01e1..29dd5b91dd7d 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -5,10 +5,6 @@
#include <linux/context_tracking_state.h>
#include <linux/sched.h>
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-#include <asm/vtime.h>
-#endif
-
/*
* Common vtime APIs
*/
@@ -18,7 +14,6 @@ extern void vtime_account_idle(struct task_struct *tsk);
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-extern void arch_vtime_task_switch(struct task_struct *tsk);
extern void vtime_user_enter(struct task_struct *tsk);
extern void vtime_user_exit(struct task_struct *tsk);
extern void vtime_guest_enter(struct task_struct *tsk);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 158784dd189a..fb3993894536 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -51,20 +51,23 @@ enum work_bits {
* data contains off-queue information when !WORK_STRUCT_PWQ.
*
* MSB
- * [ pool ID ] [ OFFQ flags ] [ STRUCT flags ]
- * 1 bit 4 or 5 bits
+ * [ pool ID ] [ disable depth ] [ OFFQ flags ] [ STRUCT flags ]
+ * 16 bits 1 bit 4 or 5 bits
*/
WORK_OFFQ_FLAG_SHIFT = WORK_STRUCT_FLAG_BITS,
- WORK_OFFQ_CANCELING_BIT = WORK_OFFQ_FLAG_SHIFT,
+ WORK_OFFQ_BH_BIT = WORK_OFFQ_FLAG_SHIFT,
WORK_OFFQ_FLAG_END,
WORK_OFFQ_FLAG_BITS = WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT,
+ WORK_OFFQ_DISABLE_SHIFT = WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS,
+ WORK_OFFQ_DISABLE_BITS = 16,
+
/*
* When a work item is off queue, the high bits encode off-queue flags
* and the last pool it was on. Cap pool ID to 31 bits and use the
* highest number to indicate that no pool is associated.
*/
- WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS,
+ WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_DISABLE_SHIFT + WORK_OFFQ_DISABLE_BITS,
WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
};
@@ -96,7 +99,9 @@ enum wq_misc_consts {
};
/* Convenience constants - of type 'unsigned long', not 'enum'! */
-#define WORK_OFFQ_CANCELING (1ul << WORK_OFFQ_CANCELING_BIT)
+#define WORK_OFFQ_BH (1ul << WORK_OFFQ_BH_BIT)
+#define WORK_OFFQ_FLAG_MASK (((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT)
+#define WORK_OFFQ_DISABLE_MASK (((1ul << WORK_OFFQ_DISABLE_BITS) - 1) << WORK_OFFQ_DISABLE_SHIFT)
#define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1)
#define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT)
#define WORK_STRUCT_PWQ_MASK (~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1))
@@ -180,6 +185,9 @@ struct workqueue_attrs {
* Below fields aren't properties of a worker_pool. They only modify how
* :c:func:`apply_workqueue_attrs` select pools and thus don't
* participate in pool hash calculations or equality comparisons.
+ *
+ * If @affn_strict is set, @cpumask isn't a property of a worker_pool
+ * either.
*/
/**
@@ -465,7 +473,7 @@ void workqueue_softirq_dead(unsigned int cpu);
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags
* @max_active: max in-flight work items, 0 for default
- * remaining args: args for @fmt
+ * @...: args for @fmt
*
* For a per-cpu workqueue, @max_active limits the number of in-flight work
* items for each CPU. e.g. @max_active of 1 indicates that each CPU can be
@@ -559,6 +567,14 @@ extern bool flush_delayed_work(struct delayed_work *dwork);
extern bool cancel_delayed_work(struct delayed_work *dwork);
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
+extern bool disable_work(struct work_struct *work);
+extern bool disable_work_sync(struct work_struct *work);
+extern bool enable_work(struct work_struct *work);
+
+extern bool disable_delayed_work(struct delayed_work *dwork);
+extern bool disable_delayed_work_sync(struct delayed_work *dwork);
+extern bool enable_delayed_work(struct delayed_work *dwork);
+
extern bool flush_rcu_work(struct rcu_work *rwork);
extern void workqueue_set_max_active(struct workqueue_struct *wq,
@@ -666,6 +682,32 @@ static inline bool schedule_work(struct work_struct *work)
return queue_work(system_wq, work);
}
+/**
+ * enable_and_queue_work - Enable and queue a work item on a specific workqueue
+ * @wq: The target workqueue
+ * @work: The work item to be enabled and queued
+ *
+ * This function combines the operations of enable_work() and queue_work(),
+ * providing a convenient way to enable and queue a work item in a single call.
+ * It invokes enable_work() on @work and then queues it if the disable depth
+ * reached 0. Returns %true if the disable depth reached 0 and @work is queued,
+ * and %false otherwise.
+ *
+ * Note that @work is always queued when disable depth reaches zero. If the
+ * desired behavior is queueing only if certain events took place while @work is
+ * disabled, the user should implement the necessary state tracking and perform
+ * explicit conditional queueing after enable_work().
+ */
+static inline bool enable_and_queue_work(struct workqueue_struct *wq,
+ struct work_struct *work)
+{
+ if (enable_work(work)) {
+ queue_work(wq, work);
+ return true;
+ }
+ return false;
+}
+
/*
* Detect attempt to flush system-wide workqueues at compile time when possible.
* Warn attempt to flush system-wide workqueues at runtime.
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index d2e4d3624689..0b618ec04115 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -1145,7 +1145,7 @@ static inline void xa_release(struct xarray *xa, unsigned long index)
* doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
*/
#ifndef XA_CHUNK_SHIFT
-#define XA_CHUNK_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
+#define XA_CHUNK_SHIFT (IS_ENABLED(CONFIG_BASE_SMALL) ? 4 : 6)
#endif
#define XA_CHUNK_SIZE (1UL << XA_CHUNK_SHIFT)
#define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1)