summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acpi_bus.h2
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl1.h151
-rw-r--r--include/acpi/actbl2.h162
-rw-r--r--include/acpi/actypes.h10
-rw-r--r--include/acpi/acuuid.h3
-rw-r--r--include/acpi/battery.h4
-rw-r--r--include/acpi/ghes.h34
-rw-r--r--include/acpi/processor.h10
-rw-r--r--include/asm-generic/compat.h2
-rw-r--r--include/asm-generic/hyperv-tlfs.h9
-rw-r--r--include/asm-generic/msi.h4
-rw-r--r--include/asm-generic/tlb.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h29
-rw-r--r--include/clocksource/hyperv_timer.h11
-rw-r--r--include/clocksource/timer-ti-dm.h2
-rw-r--r--include/drm/gpu_scheduler.h9
-rw-r--r--include/dt-bindings/arm/qcom,ids.h170
-rw-r--r--include/dt-bindings/clock/rk3399-cru.h6
-rw-r--r--include/dt-bindings/clock/tegra234-clock.h639
-rw-r--r--include/dt-bindings/firmware/imx/rsrc.h302
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h90
-rw-r--r--include/dt-bindings/memory/tegra234-mc.h440
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h42
-rw-r--r--include/dt-bindings/power/tegra234-powergate.h15
-rw-r--r--include/dt-bindings/reset/tegra234-reset.h111
-rw-r--r--include/kunit/assert.h74
-rw-r--r--include/kunit/test-bug.h53
-rw-r--r--include/kunit/test.h118
-rw-r--r--include/kunit/visibility.h33
-rw-r--r--include/linux/acpi.h13
-rw-r--r--include/linux/acpi_apmt.h19
-rw-r--r--include/linux/arm_ffa.h85
-rw-r--r--include/linux/bcm47xx_nvram.h6
-rw-r--r--include/linux/blk-crypto.h1
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blkdev.h16
-rw-r--r--include/linux/bpf.h50
-rw-r--r--include/linux/can/dev.h16
-rw-r--r--include/linux/can/platform/sja1000.h2
-rw-r--r--include/linux/cgroup.h100
-rw-r--r--include/linux/cgroup_refcnt.h96
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/configfs.h3
-rw-r--r--include/linux/console.h129
-rw-r--r--include/linux/coredump.h2
-rw-r--r--include/linux/counter.h5
-rw-r--r--include/linux/cpufreq.h28
-rw-r--r--include/linux/damon.h2
-rw-r--r--include/linux/debugfs.h19
-rw-r--r--include/linux/devfreq.h7
-rw-r--r--include/linux/device.h8
-rw-r--r--include/linux/dsa/tag_qca.h8
-rw-r--r--include/linux/efi.h6
-rw-r--r--include/linux/elfcore.h13
-rw-r--r--include/linux/eventfd.h2
-rw-r--r--include/linux/evm.h49
-rw-r--r--include/linux/fault-inject.h7
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/fortify-string.h30
-rw-r--r--include/linux/fs.h103
-rw-r--r--include/linux/fs_context.h16
-rw-r--r--include/linux/fs_parser.h1
-rw-r--r--include/linux/fscache.h2
-rw-r--r--include/linux/fscrypt.h4
-rw-r--r--include/linux/ftrace.h47
-rw-r--r--include/linux/gfp.h18
-rw-r--r--include/linux/gpio/driver.h2
-rw-r--r--include/linux/hyperv.h4
-rw-r--r--include/linux/ima.h24
-rw-r--r--include/linux/init.h3
-rw-r--r--include/linux/io-mapping.h4
-rw-r--r--include/linux/io_uring.h3
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/ioport.h2
-rw-r--r--include/linux/irqdomain.h143
-rw-r--r--include/linux/irqdomain_defs.h31
-rw-r--r--include/linux/irqreturn.h8
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/kasan.h5
-rw-r--r--include/linux/kcov.h2
-rw-r--r--include/linux/kexec.h7
-rw-r--r--include/linux/kmsan_string.h21
-rw-r--r--include/linux/kvm_host.h29
-rw-r--r--include/linux/libnvdimm.h7
-rw-r--r--include/linux/lsm_hook_defs.h9
-rw-r--r--include/linux/lsm_hooks.h263
-rw-r--r--include/linux/maple_tree.h7
-rw-r--r--include/linux/math64.h26
-rw-r--r--include/linux/mbcache.h9
-rw-r--r--include/linux/memregion.h38
-rw-r--r--include/linux/minmax.h26
-rw-r--r--include/linux/mlx5/driver.h3
-rw-r--r--include/linux/mm.h29
-rw-r--r--include/linux/mmc/mmc.h2
-rw-r--r--include/linux/mnt_idmapping.h108
-rw-r--r--include/linux/mount.h9
-rw-r--r--include/linux/msi.h357
-rw-r--r--include/linux/msi_api.h73
-rw-r--r--include/linux/net.h1
-rw-r--r--include/linux/netdevice.h10
-rw-r--r--include/linux/netfs.h8
-rw-r--r--include/linux/nfs4.h13
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nodemask.h2
-rw-r--r--include/linux/nsproxy.h1
-rw-r--r--include/linux/overflow.h38
-rw-r--r--include/linux/pci.h49
-rw-r--r--include/linux/percpu.h9
-rw-r--r--include/linux/perf/arm_pmu.h3
-rw-r--r--include/linux/perf_event.h144
-rw-r--r--include/linux/pgtable.h18
-rw-r--r--include/linux/phylink.h2
-rw-r--r--include/linux/platform_data/gpmc-omap.h8
-rw-r--r--include/linux/platform_data/st33zp24.h16
-rw-r--r--include/linux/pm_domain.h7
-rw-r--r--include/linux/posix_acl.h41
-rw-r--r--include/linux/posix_acl_xattr.h47
-rw-r--r--include/linux/prandom.h19
-rw-r--r--include/linux/psi_types.h4
-rw-r--r--include/linux/pstore_ram.h99
-rw-r--r--include/linux/ptrace.h9
-rw-r--r--include/linux/random.h102
-rw-r--r--include/linux/rcupdate.h14
-rw-r--r--include/linux/rcutiny.h8
-rw-r--r--include/linux/rcutree.h4
-rw-r--r--include/linux/regset.h15
-rw-r--r--include/linux/resctrl.h6
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/linux/scs.h18
-rw-r--r--include/linux/seccomp.h1
-rw-r--r--include/linux/security.h46
-rw-r--r--include/linux/serial_core.h10
-rw-r--r--include/linux/skmsg.h2
-rw-r--r--include/linux/slab.h98
-rw-r--r--include/linux/slab_def.h2
-rw-r--r--include/linux/slub_def.h8
-rw-r--r--include/linux/soc/mediatek/mtk-mmsys.h7
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h12
-rw-r--r--include/linux/spi/spi-mem.h2
-rw-r--r--include/linux/srcu.h72
-rw-r--r--include/linux/srcutree.h5
-rw-r--r--include/linux/stackprotector.h19
-rw-r--r--include/linux/sunrpc/svc.h8
-rw-r--r--include/linux/swapops.h8
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/thermal.h1
-rw-r--r--include/linux/time_namespace.h6
-rw-r--r--include/linux/timer.h35
-rw-r--r--include/linux/timerqueue.h2
-rw-r--r--include/linux/trace.h4
-rw-r--r--include/linux/uio.h3
-rw-r--r--include/linux/userfaultfd_k.h6
-rw-r--r--include/linux/utsname.h1
-rw-r--r--include/linux/vfio.h1
-rw-r--r--include/linux/xattr.h12
-rw-r--r--include/media/i2c/ir-kbd-i2c.h1
-rw-r--r--include/media/media-device.h15
-rw-r--r--include/media/media-entity.h169
-rw-r--r--include/media/v4l2-common.h3
-rw-r--r--include/media/v4l2-ctrls.h28
-rw-r--r--include/media/v4l2-dev.h102
-rw-r--r--include/media/v4l2-fwnode.h4
-rw-r--r--include/media/v4l2-subdev.h12
-rw-r--r--include/memory/renesas-rpc-if.h1
-rw-r--r--include/net/bluetooth/hci.h12
-rw-r--r--include/net/genetlink.h18
-rw-r--r--include/net/inet_hashtables.h3
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/netlink.h48
-rw-r--r--include/net/ping.h3
-rw-r--r--include/net/sctp/stream_sched.h2
-rw-r--r--include/net/sock.h11
-rw-r--r--include/net/sock_reuseport.h11
-rw-r--r--include/soc/amlogic/meson_ddr_pmu.h66
-rw-r--r--include/soc/at91/sama7-ddr.h5
-rw-r--r--include/soc/fsl/qe/qe.h5
-rw-r--r--include/soc/qcom/qcom-spmi-pmic.h17
-rw-r--r--include/soc/tegra/bpmp-abi.h1796
-rw-r--r--include/soc/tegra/bpmp.h17
-rw-r--r--include/soc/tegra/fuse.h15
-rw-r--r--include/soc/tegra/ivc.h12
-rw-r--r--include/soc/tegra/pmc.h6
-rw-r--r--include/sound/control.h1
-rw-r--r--include/sound/simple_card_utils.h1
-rw-r--r--include/sound/sof/dai.h2
-rw-r--r--include/sound/sof/info.h4
-rw-r--r--include/trace/events/btrfs.h27
-rw-r--r--include/trace/events/cachefiles.h27
-rw-r--r--include/trace/events/cxl.h112
-rw-r--r--include/trace/events/dlm.h303
-rw-r--r--include/trace/events/ext4.h64
-rw-r--r--include/trace/events/fscache.h2
-rw-r--r--include/trace/events/huge_memory.h8
-rw-r--r--include/trace/events/jbd2.h44
-rw-r--r--include/trace/events/rpcgss.h2
-rw-r--r--include/trace/events/rpcrdma.h4
-rw-r--r--include/trace/events/sunrpc.h6
-rw-r--r--include/trace/events/watchdog.h66
-rw-r--r--include/trace/misc/fs.h (renamed from include/trace/events/fs.h)0
-rw-r--r--include/trace/misc/nfs.h (renamed from include/trace/events/nfs.h)12
-rw-r--r--include/trace/misc/rdma.h (renamed from include/trace/events/rdma.h)0
-rw-r--r--include/trace/misc/sunrpc.h (renamed from include/trace/events/sunrpc_base.h)0
-rw-r--r--include/uapi/drm/amdgpu_drm.h2
-rw-r--r--include/uapi/drm/panfrost_drm.h38
-rw-r--r--include/uapi/linux/audit.h2
-rw-r--r--include/uapi/linux/btrfs.h36
-rw-r--r--include/uapi/linux/btrfs_tree.h235
-rw-r--r--include/uapi/linux/capability.h2
-rw-r--r--include/uapi/linux/cec-funcs.h14
-rw-r--r--include/uapi/linux/cec.h2
-rw-r--r--include/uapi/linux/elf.h14
-rw-r--r--include/uapi/linux/fscrypt.h4
-rw-r--r--include/uapi/linux/fuse.h16
-rw-r--r--include/uapi/linux/idxd.h1
-rw-r--r--include/uapi/linux/in.h1
-rw-r--r--include/uapi/linux/io_uring.h2
-rw-r--r--include/uapi/linux/ip.h6
-rw-r--r--include/uapi/linux/ipv6.h6
-rw-r--r--include/uapi/linux/landlock.h21
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/uapi/linux/perf_event.h2
-rw-r--r--include/uapi/linux/rkisp1-config.h77
-rw-r--r--include/uapi/linux/tdx-guest.h42
-rw-r--r--include/uapi/linux/videodev2.h3
-rw-r--r--include/xen/arm/xen-ops.h4
-rw-r--r--include/xen/xen-ops.h16
-rw-r--r--include/xen/xen.h4
232 files changed, 7409 insertions, 2032 deletions
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index c3ae3ea88e17..151e40385673 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -190,6 +190,8 @@
#define ACPI_PRM_INPUT_BUFFER_SIZE 26
+#define ACPI_FFH_INPUT_BUFFER_SIZE 256
+
/* _sx_d and _sx_w control methods */
#define ACPI_NUM_sx_d_METHODS 4
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index c09d72986968..cd3b75e08ec3 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -149,7 +149,7 @@ struct acpi_hotplug_context {
*/
typedef int (*acpi_op_add) (struct acpi_device * device);
-typedef int (*acpi_op_remove) (struct acpi_device * device);
+typedef void (*acpi_op_remove) (struct acpi_device *device);
typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event);
struct acpi_device_ops {
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 67c0b9e734b6..9e49b37fc869 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20220331
+#define ACPI_CA_VERSION 0x20221020
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 15c78678c5d3..4175dce3967c 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -45,6 +45,7 @@
#define ACPI_SIG_HMAT "HMAT" /* Heterogeneous Memory Attributes Table */
#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */
#define ACPI_SIG_IBFT "IBFT" /* iSCSI Boot Firmware Table */
+#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */
#define ACPI_SIG_S3PT "S3PT" /* S3 Performance (sub)Table */
#define ACPI_SIG_PCCS "PCC" /* PCC Shared Memory Region */
@@ -305,10 +306,123 @@ struct acpi_table_boot {
/*******************************************************************************
*
+ * CDAT - Coherent Device Attribute Table
+ * Version 1
+ *
+ * Conforms to the "Coherent Device Attribute Table (CDAT) Specification
+ " (Revision 1.01, October 2020.)
+ *
+ ******************************************************************************/
+
+struct acpi_table_cdat {
+ u32 length; /* Length of table in bytes, including this header */
+ u8 revision; /* ACPI Specification minor version number */
+ u8 checksum; /* To make sum of entire table == 0 */
+ u8 reserved[6];
+ u32 sequence; /* Used to detect runtime CDAT table changes */
+};
+
+/* CDAT common subtable header */
+
+struct acpi_cdat_header {
+ u8 type;
+ u8 reserved;
+ u16 length;
+};
+
+/* Values for Type field above */
+
+enum acpi_cdat_type {
+ ACPI_CDAT_TYPE_DSMAS = 0,
+ ACPI_CDAT_TYPE_DSLBIS = 1,
+ ACPI_CDAT_TYPE_DSMSCIS = 2,
+ ACPI_CDAT_TYPE_DSIS = 3,
+ ACPI_CDAT_TYPE_DSEMTS = 4,
+ ACPI_CDAT_TYPE_SSLBIS = 5,
+ ACPI_CDAT_TYPE_RESERVED = 6 /* 6 through 0xFF are reserved */
+};
+
+/* Subtable 0: Device Scoped Memory Affinity Structure (DSMAS) */
+
+struct acpi_cadt_dsmas {
+ u8 dsmad_handle;
+ u8 flags;
+ u16 reserved;
+ u64 dpa_base_address;
+ u64 dpa_length;
+};
+
+/* Flags for subtable above */
+
+#define ACPI_CEDT_DSMAS_NON_VOLATILE (1 << 2)
+
+/* Subtable 1: Device scoped Latency and Bandwidth Information Structure (DSLBIS) */
+
+struct acpi_cdat_dslbis {
+ u8 handle;
+ u8 flags; /* If Handle matches a DSMAS handle, the definition of this field matches
+ * Flags field in HMAT System Locality Latency */
+ u8 data_type;
+ u8 reserved;
+ u64 entry_base_unit;
+ u16 entry[3];
+ u16 reserved2;
+};
+
+/* Subtable 2: Device Scoped Memory Side Cache Information Structure (DSMSCIS) */
+
+struct acpi_cdat_dsmscis {
+ u8 dsmas_handle;
+ u8 reserved[3];
+ u64 side_cache_size;
+ u32 cache_attributes;
+};
+
+/* Subtable 3: Device Scoped Initiator Structure (DSIS) */
+
+struct acpi_cdat_dsis {
+ u8 flags;
+ u8 handle;
+ u16 reserved;
+};
+
+/* Flags for above subtable */
+
+#define ACPI_CDAT_DSIS_MEM_ATTACHED (1 << 0)
+
+/* Subtable 4: Device Scoped EFI Memory Type Structure (DSEMTS) */
+
+struct acpi_cdat_dsemts {
+ u8 dsmas_handle;
+ u8 memory_type;
+ u16 reserved;
+ u64 dpa_offset;
+ u64 range_length;
+};
+
+/* Subtable 5: Switch Scoped Latency and Bandwidth Information Structure (SSLBIS) */
+
+struct acpi_cdat_sslbis {
+ u8 data_type;
+ u8 reserved[3];
+ u64 entry_base_unit;
+};
+
+/* Sub-subtable for above, sslbe_entries field */
+
+struct acpi_cdat_sslbe {
+ u16 portx_id;
+ u16 porty_id;
+ u16 latency_or_bandwidth;
+ u16 reserved;
+};
+
+/*******************************************************************************
+ *
* CEDT - CXL Early Discovery Table
* Version 1
*
- * Conforms to the "CXL Early Discovery Table" (CXL 2.0)
+ * Conforms to the "CXL Early Discovery Table" (CXL 2.0, October 2020)
*
******************************************************************************/
@@ -329,7 +443,9 @@ struct acpi_cedt_header {
enum acpi_cedt_type {
ACPI_CEDT_TYPE_CHBS = 0,
ACPI_CEDT_TYPE_CFMWS = 1,
- ACPI_CEDT_TYPE_RESERVED = 2,
+ ACPI_CEDT_TYPE_CXIMS = 2,
+ ACPI_CEDT_TYPE_RDPAS = 3,
+ ACPI_CEDT_TYPE_RESERVED = 4,
};
/* Values for version field above */
@@ -380,6 +496,7 @@ struct acpi_cedt_cfmws_target_element {
/* Values for Interleave Arithmetic field above */
#define ACPI_CEDT_CFMWS_ARITHMETIC_MODULO (0)
+#define ACPI_CEDT_CFMWS_ARITHMETIC_XOR (1)
/* Values for Restrictions field above */
@@ -389,6 +506,36 @@ struct acpi_cedt_cfmws_target_element {
#define ACPI_CEDT_CFMWS_RESTRICT_PMEM (1<<3)
#define ACPI_CEDT_CFMWS_RESTRICT_FIXED (1<<4)
+/* 2: CXL XOR Interleave Math Structure */
+
+struct acpi_cedt_cxims {
+ struct acpi_cedt_header header;
+ u16 reserved1;
+ u8 hbig;
+ u8 nr_xormaps;
+ u64 xormap_list[];
+};
+
+/* 3: CXL RCEC Downstream Port Association Structure */
+
+struct acpi_cedt_rdpas {
+ struct acpi_cedt_header header;
+ u8 reserved1;
+ u16 length;
+ u16 segment;
+ u16 bdf;
+ u8 protocol;
+ u64 address;
+};
+
+/* Masks for bdf field above */
+#define ACPI_CEDT_RDPAS_BUS_MASK 0xff00
+#define ACPI_CEDT_RDPAS_DEVICE_MASK 0x00f8
+#define ACPI_CEDT_RDPAS_FUNCTION_MASK 0x0007
+
+#define ACPI_CEDT_RDPAS_PROTOCOL_IO (0)
+#define ACPI_CEDT_RDPAS_PROTOCOL_CACHEMEM (1)
+
/*******************************************************************************
*
* CPEP - Corrected Platform Error Polling table (ACPI 4.0)
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 655102bc6d14..b2973dbe37ee 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -27,6 +27,8 @@
#define ACPI_SIG_AGDI "AGDI" /* Arm Generic Diagnostic Dump and Reset Device Interface */
#define ACPI_SIG_APMT "APMT" /* Arm Performance Monitoring Unit table */
#define ACPI_SIG_BDAT "BDAT" /* BIOS Data ACPI Table */
+#define ACPI_SIG_CCEL "CCEL" /* CC Event Log Table */
+#define ACPI_SIG_CDAT "CDAT" /* Coherent Device Attribute Table */
#define ACPI_SIG_IORT "IORT" /* IO Remapping Table */
#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */
@@ -34,7 +36,6 @@
#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
#define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */
#define ACPI_SIG_MPST "MPST" /* Memory Power State Table */
-#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */
#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
#define ACPI_SIG_NHLT "NHLT" /* Non HD Audio Link Table */
@@ -354,10 +355,27 @@ struct acpi_table_bdat {
/*******************************************************************************
*
+ * CCEL - CC-Event Log
+ * From: "Guest-Host-Communication Interface (GHCI) for Intel
+ * Trust Domain Extensions (Intel TDX)". Feb 2022
+ *
+ ******************************************************************************/
+
+struct acpi_table_ccel {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 CCtype;
+ u8 Ccsub_type;
+ u16 reserved;
+ u64 log_area_minimum_length;
+ u64 log_area_start_address;
+};
+
+/*******************************************************************************
+ *
* IORT - IO Remapping Table
*
* Conforms to "IO Remapping Table System Software on ARM Platforms",
- * Document number: ARM DEN 0049E.d, Feb 2022
+ * Document number: ARM DEN 0049E.e, Sep 2022
*
******************************************************************************/
@@ -528,6 +546,7 @@ struct acpi_iort_smmu_v3 {
#define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE (1)
#define ACPI_IORT_SMMU_V3_HTTU_OVERRIDE (3<<1)
#define ACPI_IORT_SMMU_V3_PXM_VALID (1<<3)
+#define ACPI_IORT_SMMU_V3_DEVICEID_VALID (1<<4)
struct acpi_iort_pmcg {
u64 page0_base_address;
@@ -865,7 +884,14 @@ enum acpi_madt_type {
ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14,
ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15,
ACPI_MADT_TYPE_MULTIPROC_WAKEUP = 16,
- ACPI_MADT_TYPE_RESERVED = 17, /* 17 to 0x7F are reserved */
+ ACPI_MADT_TYPE_CORE_PIC = 17,
+ ACPI_MADT_TYPE_LIO_PIC = 18,
+ ACPI_MADT_TYPE_HT_PIC = 19,
+ ACPI_MADT_TYPE_EIO_PIC = 20,
+ ACPI_MADT_TYPE_MSI_PIC = 21,
+ ACPI_MADT_TYPE_BIO_PIC = 22,
+ ACPI_MADT_TYPE_LPC_PIC = 23,
+ ACPI_MADT_TYPE_RESERVED = 24, /* 24 to 0x7F are reserved */
ACPI_MADT_TYPE_OEM_RESERVED = 0x80 /* 0x80 to 0xFF are reserved for OEM use */
};
@@ -1096,7 +1122,135 @@ struct acpi_madt_multiproc_wakeup_mailbox {
#define ACPI_MP_WAKE_COMMAND_WAKEUP 1
-/* 17: OEM data */
+/* 17: CPU Core Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_core_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u32 processor_id;
+ u32 core_id;
+ u32 flags;
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_core_pic_version {
+ ACPI_MADT_CORE_PIC_VERSION_NONE = 0,
+ ACPI_MADT_CORE_PIC_VERSION_V1 = 1,
+ ACPI_MADT_CORE_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 18: Legacy I/O Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_lio_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u64 address;
+ u16 size;
+ u8 cascade[2];
+ u32 cascade_map[2];
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_lio_pic_version {
+ ACPI_MADT_LIO_PIC_VERSION_NONE = 0,
+ ACPI_MADT_LIO_PIC_VERSION_V1 = 1,
+ ACPI_MADT_LIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 19: HT Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_ht_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u64 address;
+ u16 size;
+ u8 cascade[8];
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_ht_pic_version {
+ ACPI_MADT_HT_PIC_VERSION_NONE = 0,
+ ACPI_MADT_HT_PIC_VERSION_V1 = 1,
+ ACPI_MADT_HT_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 20: Extend I/O Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_eio_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 cascade;
+ u8 node;
+ u64 node_map;
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_eio_pic_version {
+ ACPI_MADT_EIO_PIC_VERSION_NONE = 0,
+ ACPI_MADT_EIO_PIC_VERSION_V1 = 1,
+ ACPI_MADT_EIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 21: MSI Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_msi_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u64 msg_address;
+ u32 start;
+ u32 count;
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_msi_pic_version {
+ ACPI_MADT_MSI_PIC_VERSION_NONE = 0,
+ ACPI_MADT_MSI_PIC_VERSION_V1 = 1,
+ ACPI_MADT_MSI_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 22: Bridge I/O Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_bio_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u64 address;
+ u16 size;
+ u16 id;
+ u16 gsi_base;
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_bio_pic_version {
+ ACPI_MADT_BIO_PIC_VERSION_NONE = 0,
+ ACPI_MADT_BIO_PIC_VERSION_V1 = 1,
+ ACPI_MADT_BIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 23: LPC Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_lpc_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u64 address;
+ u16 size;
+ u8 cascade;
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_lpc_pic_version {
+ ACPI_MADT_LPC_PIC_VERSION_NONE = 0,
+ ACPI_MADT_LPC_PIC_VERSION_V1 = 1,
+ ACPI_MADT_LPC_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 80: OEM data */
struct acpi_madt_oem_data {
u8 oem_data[0];
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 3491e454b2ab..95e4f56f9754 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -723,7 +723,8 @@ typedef u32 acpi_event_type;
#define ACPI_EVENT_POWER_BUTTON 2
#define ACPI_EVENT_SLEEP_BUTTON 3
#define ACPI_EVENT_RTC 4
-#define ACPI_EVENT_MAX 4
+#define ACPI_EVENT_PCIE_WAKE 5
+#define ACPI_EVENT_MAX 5
#define ACPI_NUM_FIXED_EVENTS ACPI_EVENT_MAX + 1
/*
@@ -1115,6 +1116,13 @@ struct acpi_pcc_info {
u8 *internal_buffer;
};
+/* Special Context data for FFH Opregion (ACPI 6.5) */
+
+struct acpi_ffh_info {
+ u64 offset;
+ u64 length;
+};
+
typedef
acpi_status (*acpi_adr_space_setup) (acpi_handle region_handle,
u32 function,
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
index 8f1e7c489df5..171bb0b708a2 100644
--- a/include/acpi/acuuid.h
+++ b/include/acpi/acuuid.h
@@ -69,5 +69,6 @@
#define UUID_HIERARCHICAL_DATA_EXTENSION "dbb8e3e6-5886-4ba6-8795-1319f52a966b"
#define UUID_CORESIGHT_GRAPH "3ecbc8b6-1d0e-4fb3-8107-e627f805c6cd"
#define UUID_USB4_CAPABILITIES "23a0d13a-26ab-486c-9c5f-0ffa525a575a"
-
+#define UUID_1ST_FUNCTION_ID "893f00a6-660c-494e-bcfd-3043f4fb67c0"
+#define UUID_2ND_FUNCTION_ID "107ededd-d381-4fd7-8da9-08e9a6c79644"
#endif /* __ACUUID_H__ */
diff --git a/include/acpi/battery.h b/include/acpi/battery.h
index b8d56b702c7a..611a2561a014 100644
--- a/include/acpi/battery.h
+++ b/include/acpi/battery.h
@@ -12,8 +12,8 @@
struct acpi_battery_hook {
const char *name;
- int (*add_battery)(struct power_supply *battery);
- int (*remove_battery)(struct power_supply *battery);
+ int (*add_battery)(struct power_supply *battery, struct acpi_battery_hook *hook);
+ int (*remove_battery)(struct power_supply *battery, struct acpi_battery_hook *hook);
struct list_head list;
};
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index 34fb3431a8f3..3c8bba9f1114 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -27,6 +27,8 @@ struct ghes {
struct timer_list timer;
unsigned int irq;
};
+ struct device *dev;
+ struct list_head elist;
};
struct ghes_estatus_node {
@@ -69,35 +71,14 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb);
* @nb: pointer to the notifier_block structure of the vendor record handler.
*/
void ghes_unregister_vendor_record_notifier(struct notifier_block *nb);
-#endif
-
-int ghes_estatus_pool_init(int num_ghes);
-
-/* From drivers/edac/ghes_edac.c */
-
-#ifdef CONFIG_EDAC_GHES
-void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err);
-
-int ghes_edac_register(struct ghes *ghes, struct device *dev);
-
-void ghes_edac_unregister(struct ghes *ghes);
+struct list_head *ghes_get_devices(void);
#else
-static inline void ghes_edac_report_mem_error(int sev,
- struct cper_sec_mem_err *mem_err)
-{
-}
-
-static inline int ghes_edac_register(struct ghes *ghes, struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline void ghes_edac_unregister(struct ghes *ghes)
-{
-}
+static inline struct list_head *ghes_get_devices(void) { return NULL; }
#endif
+int ghes_estatus_pool_init(unsigned int num_ghes);
+
static inline int acpi_hest_get_version(struct acpi_hest_generic_data *gdata)
{
return gdata->revision >> 8;
@@ -145,4 +126,7 @@ int ghes_notify_sea(void);
static inline int ghes_notify_sea(void) { return -ENOENT; }
#endif
+struct notifier_block;
+extern void ghes_register_report_chain(struct notifier_block *nb);
+extern void ghes_unregister_report_chain(struct notifier_block *nb);
#endif /* GHES_H */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 9fa49686957a..94181fe9780a 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -460,4 +460,14 @@ static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
}
#endif /* CONFIG_CPU_FREQ */
+#ifdef CONFIG_ACPI_PROCESSOR_IDLE
+extern int acpi_processor_ffh_lpi_probe(unsigned int cpu);
+extern int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
+#endif
+
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+extern int arch_register_cpu(int cpu);
+extern void arch_unregister_cpu(int cpu);
+#endif
+
#endif
diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h
index aeb257ad3d1a..8392caea398f 100644
--- a/include/asm-generic/compat.h
+++ b/include/asm-generic/compat.h
@@ -15,7 +15,7 @@
#endif
#ifndef compat_arg_u64
-#ifdef CONFIG_CPU_BIG_ENDIAN
+#ifndef CONFIG_CPU_BIG_ENDIAN
#define compat_arg_u64(name) u32 name##_lo, u32 name##_hi
#define compat_arg_u64_dual(name) u32, name##_lo, u32, name##_hi
#else
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
index fdce7a4cfc6f..b17c6eeb9afa 100644
--- a/include/asm-generic/hyperv-tlfs.h
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -102,6 +102,15 @@ struct ms_hyperv_tsc_page {
volatile s64 tsc_offset;
} __packed;
+union hv_reference_tsc_msr {
+ u64 as_uint64;
+ struct {
+ u64 enable:1;
+ u64 reserved:11;
+ u64 pfn:52;
+ } __packed;
+};
+
/*
* The guest OS needs to register the guest ID with the hypervisor.
* The guest ID is a 64 bit entity and the structure of this ID is
diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h
index bf910d47e900..124c734ca5d9 100644
--- a/include/asm-generic/msi.h
+++ b/include/asm-generic/msi.h
@@ -4,7 +4,7 @@
#include <linux/types.h>
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+#ifdef CONFIG_GENERIC_MSI_IRQ
#ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS
# define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2
@@ -36,6 +36,6 @@ typedef struct msi_alloc_info {
#define GENERIC_MSI_DOMAIN_OPS 1
-#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
+#endif /* CONFIG_GENERIC_MSI_IRQ */
#endif
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 492dce43236e..cab7cfebf40b 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -222,12 +222,16 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#define tlb_needs_table_invalidate() (true)
#endif
+void tlb_remove_table_sync_one(void);
+
#else
#ifdef tlb_needs_table_invalidate
#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
#endif
+static inline void tlb_remove_table_sync_one(void) { }
+
#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index c15de165ec8f..c8ab800652b5 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -162,6 +162,16 @@
#define PATCHABLE_DISCARDS *(__patchable_function_entries)
#endif
+#ifndef CONFIG_ARCH_SUPPORTS_CFI_CLANG
+/*
+ * Simply points to ftrace_stub, but with the proper protocol.
+ * Defined by the linker script in linux/vmlinux.lds.h
+ */
+#define FTRACE_STUB_HACK ftrace_stub_graph = ftrace_stub;
+#else
+#define FTRACE_STUB_HACK
+#endif
+
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
/*
* The ftrace call sites are logged to a section whose name depends on the
@@ -169,10 +179,6 @@
* FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
* dependencies for FTRACE_CALLSITE_SECTION's definition.
*
- * Need to also make ftrace_stub_graph point to ftrace_stub
- * so that the same stub location may have different protocols
- * and not mess up with C verifiers.
- *
* ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func
* as some archs will have a different prototype for that function
* but ftrace_ops_list_func() will have a single prototype.
@@ -182,11 +188,11 @@
KEEP(*(__mcount_loc)) \
KEEP_PATCHABLE \
__stop_mcount_loc = .; \
- ftrace_stub_graph = ftrace_stub; \
+ FTRACE_STUB_HACK \
ftrace_ops_list_func = arch_ftrace_ops_list_func;
#else
# ifdef CONFIG_FUNCTION_TRACER
-# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub; \
+# define MCOUNT_REC() FTRACE_STUB_HACK \
ftrace_ops_list_func = arch_ftrace_ops_list_func;
# else
# define MCOUNT_REC()
@@ -341,6 +347,7 @@
#define DATA_DATA \
*(.xiptext) \
*(DATA_MAIN) \
+ *(.data..decrypted) \
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
MEM_KEEP(init.data*) \
@@ -989,7 +996,6 @@
#ifdef CONFIG_AMD_MEM_ENCRYPT
#define PERCPU_DECRYPTED_SECTION \
. = ALIGN(PAGE_SIZE); \
- *(.data..decrypted) \
*(.data..percpu..decrypted) \
. = ALIGN(PAGE_SIZE);
#else
@@ -1021,14 +1027,19 @@
* keep any .init_array.* sections.
* https://bugs.llvm.org/show_bug.cgi?id=46478
*/
+#ifdef CONFIG_UNWIND_TABLES
+#define DISCARD_EH_FRAME
+#else
+#define DISCARD_EH_FRAME *(.eh_frame)
+#endif
#if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN)
# ifdef CONFIG_CONSTRUCTORS
# define SANITIZER_DISCARDS \
- *(.eh_frame)
+ DISCARD_EH_FRAME
# else
# define SANITIZER_DISCARDS \
*(.init_array) *(.init_array.*) \
- *(.eh_frame)
+ DISCARD_EH_FRAME
# endif
#else
# define SANITIZER_DISCARDS
diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h
index b3f5d73ae1d6..536f897375d0 100644
--- a/include/clocksource/hyperv_timer.h
+++ b/include/clocksource/hyperv_timer.h
@@ -15,13 +15,15 @@
#include <linux/clocksource.h>
#include <linux/math64.h>
-#include <asm/mshyperv.h>
+#include <asm/hyperv-tlfs.h>
#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
#define HV_MIN_DELTA_TICKS 1
#ifdef CONFIG_HYPERV_TIMER
+#include <asm/hyperv_timer.h>
+
/* Routines called by the VMbus driver */
extern int hv_stimer_alloc(bool have_percpu_irqs);
extern int hv_stimer_cleanup(unsigned int cpu);
@@ -31,7 +33,9 @@ extern void hv_stimer_global_cleanup(void);
extern void hv_stimer0_isr(void);
extern void hv_init_clocksource(void);
+extern void hv_remap_tsc_clocksource(void);
+extern unsigned long hv_get_tsc_pfn(void);
extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
static inline notrace u64
@@ -90,6 +94,11 @@ hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg)
}
#else /* CONFIG_HYPERV_TIMER */
+static inline unsigned long hv_get_tsc_pfn(void)
+{
+ return 0;
+}
+
static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
{
return NULL;
diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h
index 77eceeae708c..dcc1712f75e7 100644
--- a/include/clocksource/timer-ti-dm.h
+++ b/include/clocksource/timer-ti-dm.h
@@ -62,8 +62,6 @@
struct omap_dm_timer {
};
-int omap_dm_timer_get_irq(struct omap_dm_timer *timer);
-
u32 omap_dm_timer_modify_idlect_mask(u32 inputmask);
/*
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 599855c6a672..2ae4fd62e01c 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -32,6 +32,15 @@
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
+/**
+ * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
+ *
+ * Setting this flag on a scheduler fence prevents pipelining of jobs depending
+ * on this fence. In other words we always insert a full CPU round trip before
+ * dependen jobs are pushed to the hw queue.
+ */
+#define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
+
struct drm_gem_object;
struct drm_gpu_scheduler;
diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h
new file mode 100644
index 000000000000..f7aef3f310d7
--- /dev/null
+++ b/include/dt-bindings/arm/qcom,ids.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Linaro Ltd
+ * Author: Krzysztof Kozlowski <krzk@kernel.org> based on previous work of Kumar Gala.
+ */
+#ifndef _DT_BINDINGS_ARM_QCOM_IDS_H
+#define _DT_BINDINGS_ARM_QCOM_IDS_H
+
+/*
+ * The MSM chipset and hardware revision used by Qualcomm bootloaders, DTS for
+ * older chipsets (qcom,msm-id) and in socinfo driver:
+ */
+#define QCOM_ID_MSM8960 87
+#define QCOM_ID_APQ8064 109
+#define QCOM_ID_MSM8660A 122
+#define QCOM_ID_MSM8260A 123
+#define QCOM_ID_APQ8060A 124
+#define QCOM_ID_MSM8974 126
+#define QCOM_ID_MPQ8064 130
+#define QCOM_ID_MSM8960AB 138
+#define QCOM_ID_APQ8060AB 139
+#define QCOM_ID_MSM8260AB 140
+#define QCOM_ID_MSM8660AB 141
+#define QCOM_ID_MSM8626 145
+#define QCOM_ID_MSM8610 147
+#define QCOM_ID_APQ8064AB 153
+#define QCOM_ID_MSM8226 158
+#define QCOM_ID_MSM8526 159
+#define QCOM_ID_MSM8110 161
+#define QCOM_ID_MSM8210 162
+#define QCOM_ID_MSM8810 163
+#define QCOM_ID_MSM8212 164
+#define QCOM_ID_MSM8612 165
+#define QCOM_ID_MSM8112 166
+#define QCOM_ID_MSM8225Q 168
+#define QCOM_ID_MSM8625Q 169
+#define QCOM_ID_MSM8125Q 170
+#define QCOM_ID_APQ8064AA 172
+#define QCOM_ID_APQ8084 178
+#define QCOM_ID_APQ8074 184
+#define QCOM_ID_MSM8274 185
+#define QCOM_ID_MSM8674 186
+#define QCOM_ID_MSM8974PRO_AC 194
+#define QCOM_ID_MSM8126 198
+#define QCOM_ID_APQ8026 199
+#define QCOM_ID_MSM8926 200
+#define QCOM_ID_MSM8326 205
+#define QCOM_ID_MSM8916 206
+#define QCOM_ID_MSM8994 207
+#define QCOM_ID_APQ8074PRO_AA 208
+#define QCOM_ID_APQ8074PRO_AB 209
+#define QCOM_ID_APQ8074PRO_AC 210
+#define QCOM_ID_MSM8274PRO_AA 211
+#define QCOM_ID_MSM8274PRO_AB 212
+#define QCOM_ID_MSM8274PRO_AC 213
+#define QCOM_ID_MSM8674PRO_AA 214
+#define QCOM_ID_MSM8674PRO_AB 215
+#define QCOM_ID_MSM8674PRO_AC 216
+#define QCOM_ID_MSM8974PRO_AA 217
+#define QCOM_ID_MSM8974PRO_AB 218
+#define QCOM_ID_APQ8028 219
+#define QCOM_ID_MSM8128 220
+#define QCOM_ID_MSM8228 221
+#define QCOM_ID_MSM8528 222
+#define QCOM_ID_MSM8628 223
+#define QCOM_ID_MSM8928 224
+#define QCOM_ID_MSM8510 225
+#define QCOM_ID_MSM8512 226
+#define QCOM_ID_MSM8936 233
+#define QCOM_ID_MSM8939 239
+#define QCOM_ID_APQ8036 240
+#define QCOM_ID_APQ8039 241
+#define QCOM_ID_MSM8996 246
+#define QCOM_ID_APQ8016 247
+#define QCOM_ID_MSM8216 248
+#define QCOM_ID_MSM8116 249
+#define QCOM_ID_MSM8616 250
+#define QCOM_ID_MSM8992 251
+#define QCOM_ID_APQ8094 253
+#define QCOM_ID_MSM8956 266
+#define QCOM_ID_MSM8976 278
+#define QCOM_ID_MDM9607 290
+#define QCOM_ID_APQ8096 291
+#define QCOM_ID_MSM8998 292
+#define QCOM_ID_MSM8953 293
+#define QCOM_ID_MDM8207 296
+#define QCOM_ID_MDM9207 297
+#define QCOM_ID_MDM9307 298
+#define QCOM_ID_MDM9628 299
+#define QCOM_ID_APQ8053 304
+#define QCOM_ID_MSM8996SG 305
+#define QCOM_ID_MSM8996AU 310
+#define QCOM_ID_APQ8096AU 311
+#define QCOM_ID_APQ8096SG 312
+#define QCOM_ID_SDM660 317
+#define QCOM_ID_SDM630 318
+#define QCOM_ID_APQ8098 319
+#define QCOM_ID_SDM845 321
+#define QCOM_ID_MDM9206 322
+#define QCOM_ID_IPQ8074 323
+#define QCOM_ID_SDA660 324
+#define QCOM_ID_SDM658 325
+#define QCOM_ID_SDA658 326
+#define QCOM_ID_SDA630 327
+#define QCOM_ID_SDM450 338
+#define QCOM_ID_SM8150 339
+#define QCOM_ID_SDA845 341
+#define QCOM_ID_IPQ8072 342
+#define QCOM_ID_IPQ8076 343
+#define QCOM_ID_IPQ8078 344
+#define QCOM_ID_SDM636 345
+#define QCOM_ID_SDA636 346
+#define QCOM_ID_SDM632 349
+#define QCOM_ID_SDA632 350
+#define QCOM_ID_SDA450 351
+#define QCOM_ID_SM8250 356
+#define QCOM_ID_SA8155 362
+#define QCOM_ID_IPQ8070 375
+#define QCOM_ID_IPQ8071 376
+#define QCOM_ID_IPQ8072A 389
+#define QCOM_ID_IPQ8074A 390
+#define QCOM_ID_IPQ8076A 391
+#define QCOM_ID_IPQ8078A 392
+#define QCOM_ID_SM6125 394
+#define QCOM_ID_IPQ8070A 395
+#define QCOM_ID_IPQ8071A 396
+#define QCOM_ID_IPQ6018 402
+#define QCOM_ID_IPQ6028 403
+#define QCOM_ID_SM4250 417
+#define QCOM_ID_IPQ6000 421
+#define QCOM_ID_IPQ6010 422
+#define QCOM_ID_SC7180 425
+#define QCOM_ID_SM6350 434
+#define QCOM_ID_SM8350 439
+#define QCOM_ID_SM6115 444
+#define QCOM_ID_SC8280XP 449
+#define QCOM_ID_IPQ6005 453
+#define QCOM_ID_QRB5165 455
+#define QCOM_ID_SM8450 457
+#define QCOM_ID_SM7225 459
+#define QCOM_ID_SA8295P 460
+#define QCOM_ID_SA8540P 461
+#define QCOM_ID_QCM4290 469
+#define QCOM_ID_QCS4290 470
+#define QCOM_ID_SM8450_2 480
+#define QCOM_ID_SM8450_3 482
+#define QCOM_ID_SC7280 487
+#define QCOM_ID_SC7180P 495
+#define QCOM_ID_SM6375 507
+#define QCOM_ID_SM8550 519
+#define QCOM_ID_QRU1000 539
+#define QCOM_ID_QDU1000 545
+#define QCOM_ID_QDU1010 587
+#define QCOM_ID_QRU1032 588
+#define QCOM_ID_QRU1052 589
+#define QCOM_ID_QRU1062 590
+
+/*
+ * The board type and revision information, used by Qualcomm bootloaders and
+ * DTS for older chipsets (qcom,board-id):
+ */
+#define QCOM_BOARD_ID(a, major, minor) \
+ (((major & 0xff) << 16) | ((minor & 0xff) << 8) | QCOM_BOARD_ID_##a)
+
+#define QCOM_BOARD_ID_MTP 8
+#define QCOM_BOARD_ID_DRAGONBOARD 10
+#define QCOM_BOARD_ID_SBC 24
+
+#endif /* _DT_BINDINGS_ARM_QCOM_IDS_H */
diff --git a/include/dt-bindings/clock/rk3399-cru.h b/include/dt-bindings/clock/rk3399-cru.h
index 44e0a319f077..39169d94a44e 100644
--- a/include/dt-bindings/clock/rk3399-cru.h
+++ b/include/dt-bindings/clock/rk3399-cru.h
@@ -547,8 +547,8 @@
#define SRST_H_PERILP0 171
#define SRST_H_PERILP0_NOC 172
#define SRST_ROM 173
-#define SRST_CRYPTO_S 174
-#define SRST_CRYPTO_M 175
+#define SRST_CRYPTO0_S 174
+#define SRST_CRYPTO0_M 175
/* cru_softrst_con11 */
#define SRST_P_DCF 176
@@ -556,7 +556,7 @@
#define SRST_CM0S 178
#define SRST_CM0S_DBG 179
#define SRST_CM0S_PO 180
-#define SRST_CRYPTO 181
+#define SRST_CRYPTO0 181
#define SRST_P_PERILP1_SGRF 182
#define SRST_P_PERILP1_GRF 183
#define SRST_CRYPTO1_S 184
diff --git a/include/dt-bindings/clock/tegra234-clock.h b/include/dt-bindings/clock/tegra234-clock.h
index 173364a93381..c360455d02ee 100644
--- a/include/dt-bindings/clock/tegra234-clock.h
+++ b/include/dt-bindings/clock/tegra234-clock.h
@@ -9,6 +9,12 @@
* @defgroup bpmp_clock_ids Clock ID's
* @{
*/
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_ACTMON */
+#define TEGRA234_CLK_ACTMON 1U
+/** @brief output of gate CLK_ENB_ADSP */
+#define TEGRA234_CLK_ADSP 2U
+/** @brief output of gate CLK_ENB_ADSPNEON */
+#define TEGRA234_CLK_ADSPNEON 3U
/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AHUB */
#define TEGRA234_CLK_AHUB 4U
/** @brief output of gate CLK_ENB_APB2APE */
@@ -17,6 +23,18 @@
#define TEGRA234_CLK_APE 6U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AUD_MCLK */
#define TEGRA234_CLK_AUD_MCLK 7U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AXI_CBB */
+#define TEGRA234_CLK_AXI_CBB 8U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_CAN1 */
+#define TEGRA234_CLK_CAN1 9U
+/** @brief output of gate CLK_ENB_CAN1_HOST */
+#define TEGRA234_CLK_CAN1_HOST 10U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_CAN2 */
+#define TEGRA234_CLK_CAN2 11U
+/** @brief output of gate CLK_ENB_CAN2_HOST */
+#define TEGRA234_CLK_CAN2_HOST 12U
+/** @brief output of divider CLK_RST_CONTROLLER_CLK_M_DIVIDE */
+#define TEGRA234_CLK_CLK_M 14U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC1 */
#define TEGRA234_CLK_DMIC1 15U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC2 */
@@ -25,6 +43,28 @@
#define TEGRA234_CLK_DMIC3 17U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC4 */
#define TEGRA234_CLK_DMIC4 18U
+/** @brief output of gate CLK_ENB_DPAUX */
+#define TEGRA234_CLK_DPAUX 19U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVJPG1 */
+#define TEGRA234_CLK_NVJPG1 20U
+/**
+ * @brief output of mux controlled by CLK_RST_CONTROLLER_ACLK_BURST_POLICY
+ * divided by the divider controlled by ACLK_CLK_DIVISOR in
+ * CLK_RST_CONTROLLER_SUPER_ACLK_DIVIDER
+ */
+#define TEGRA234_CLK_ACLK 21U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_MSS_ENCRYPT switch divider output */
+#define TEGRA234_CLK_MSS_ENCRYPT 22U
+/** @brief clock recovered from EAVB input */
+#define TEGRA234_CLK_EQOS_RX_INPUT 23U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_AON_APB switch divider output */
+#define TEGRA234_CLK_AON_APB 25U
+/** @brief CLK_RST_CONTROLLER_AON_NIC_RATE divider output */
+#define TEGRA234_CLK_AON_NIC 26U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_AON_CPU_NIC switch divider output */
+#define TEGRA234_CLK_AON_CPU_NIC 27U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLA1_BASE for use by audio clocks */
+#define TEGRA234_CLK_PLLA1 28U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK1 */
#define TEGRA234_CLK_DSPK1 29U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK2 */
@@ -38,10 +78,33 @@
* throughput and memory controller power.
*/
#define TEGRA234_CLK_EMC 31U
-/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HOST1X */
-#define TEGRA234_CLK_HOST1X 46U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_AXI_CLK_0 divider gated output */
+#define TEGRA234_CLK_EQOS_AXI 32U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_PTP_REF_CLK_0 divider gated output */
+#define TEGRA234_CLK_EQOS_PTP_REF 33U
+/** @brief output of gate CLK_ENB_EQOS_RX */
+#define TEGRA234_CLK_EQOS_RX 34U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_TX_CLK divider gated output */
+#define TEGRA234_CLK_EQOS_TX 35U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH1 */
+#define TEGRA234_CLK_EXTPERIPH1 36U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH2 */
+#define TEGRA234_CLK_EXTPERIPH2 37U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH3 */
+#define TEGRA234_CLK_EXTPERIPH3 38U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH4 */
+#define TEGRA234_CLK_EXTPERIPH4 39U
/** @brief output of gate CLK_ENB_FUSE */
#define TEGRA234_CLK_FUSE 40U
+/** @brief output of GPU GPC0 clkGen (in 1x mode same rate as GPC0 MUX2 out) */
+#define TEGRA234_CLK_GPC0CLK 41U
+/** @brief TODO */
+#define TEGRA234_CLK_GPU_PWR 42U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HDA2CODEC_2X */
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HOST1X */
+#define TEGRA234_CLK_HOST1X 46U
+/** @brief xusb_hs_hsicp_clk */
+#define TEGRA234_CLK_XUSB_HS_HSICP 47U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C1 */
#define TEGRA234_CLK_I2C1 48U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C2 */
@@ -82,10 +145,66 @@
#define TEGRA234_CLK_I2S6 66U
/** @brief clock recovered from I2S6 input */
#define TEGRA234_CLK_I2S6_SYNC_INPUT 67U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_ISP */
+#define TEGRA234_CLK_ISP 69U
+/** @brief Monitored branch of EQOS_RX clock */
+#define TEGRA234_CLK_EQOS_RX_M 70U
+/** @brief CLK_RST_CONTROLLER_MAUDCLK_OUT_SWITCH_DIVIDER switch divider output (maudclk) */
+#define TEGRA234_CLK_MAUD 71U
+/** @brief output of gate CLK_ENB_MIPI_CAL */
+#define TEGRA234_CLK_MIPI_CAL 72U
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_CORE_PLL_FIXED */
+#define TEGRA234_CLK_MPHY_CORE_PLL_FIXED 73U
+/** @brief output of gate CLK_ENB_MPHY_L0_RX_ANA */
+#define TEGRA234_CLK_MPHY_L0_RX_ANA 74U
+/** @brief output of gate CLK_ENB_MPHY_L0_RX_LS_BIT */
+#define TEGRA234_CLK_MPHY_L0_RX_LS_BIT 75U
+/** @brief output of gate CLK_ENB_MPHY_L0_RX_SYMB */
+#define TEGRA234_CLK_MPHY_L0_RX_SYMB 76U
+/** @brief output of gate CLK_ENB_MPHY_L0_TX_LS_3XBIT */
+#define TEGRA234_CLK_MPHY_L0_TX_LS_3XBIT 77U
+/** @brief output of gate CLK_ENB_MPHY_L0_TX_SYMB */
+#define TEGRA234_CLK_MPHY_L0_TX_SYMB 78U
+/** @brief output of gate CLK_ENB_MPHY_L1_RX_ANA */
+#define TEGRA234_CLK_MPHY_L1_RX_ANA 79U
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_TX_1MHZ_REF */
+#define TEGRA234_CLK_MPHY_TX_1MHZ_REF 80U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVCSI */
+#define TEGRA234_CLK_NVCSI 81U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVCSILP */
+#define TEGRA234_CLK_NVCSILP 82U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDEC */
+#define TEGRA234_CLK_NVDEC 83U
+/** @brief CLK_RST_CONTROLLER_HUBCLK_OUT_SWITCH_DIVIDER switch divider output (hubclk) */
+#define TEGRA234_CLK_HUB 84U
+/** @brief CLK_RST_CONTROLLER_DISPCLK_SWITCH_DIVIDER switch divider output (dispclk) */
+#define TEGRA234_CLK_DISP 85U
+/** @brief RG_CLK_CTRL__0_DIV divider output (nvdisplay_p0_clk) */
+#define TEGRA234_CLK_NVDISPLAY_P0 86U
+/** @brief RG_CLK_CTRL__1_DIV divider output (nvdisplay_p1_clk) */
+#define TEGRA234_CLK_NVDISPLAY_P1 87U
+/** @brief DSC_CLK (DISPCLK ÷ 3) */
+#define TEGRA234_CLK_DSC 88U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVENC */
+#define TEGRA234_CLK_NVENC 89U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVJPG */
+#define TEGRA234_CLK_NVJPG 90U
+/** @brief input from Tegra's XTAL_IN */
+#define TEGRA234_CLK_OSC 91U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_AON_TOUCH switch divider output */
+#define TEGRA234_CLK_AON_TOUCH 92U
/** PLL controlled by CLK_RST_CONTROLLER_PLLA_BASE for use by audio clocks */
#define TEGRA234_CLK_PLLA 93U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLAON_BASE for use by IP blocks in the AON domain */
+#define TEGRA234_CLK_PLLAON 94U
+/** Fixed 100MHz PLL for PCIe, SATA and superspeed USB */
+#define TEGRA234_CLK_PLLE 100U
+/** @brief PLLP vco output */
+#define TEGRA234_CLK_PLLP 101U
/** @brief PLLP clk output */
#define TEGRA234_CLK_PLLP_OUT0 102U
+/** Fixed frequency 960MHz PLL for USB and EAVB */
+#define TEGRA234_CLK_UTMIP_PLL 103U
/** @brief output of the divider CLK_RST_CONTROLLER_PLLA_OUT */
#define TEGRA234_CLK_PLLA_OUT0 104U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM1 */
@@ -104,8 +223,50 @@
#define TEGRA234_CLK_PWM7 111U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM8 */
#define TEGRA234_CLK_PWM8 112U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_RCE_CPU_NIC output */
+#define TEGRA234_CLK_RCE_CPU_NIC 113U
+/** @brief CLK_RST_CONTROLLER_RCE_NIC_RATE divider output */
+#define TEGRA234_CLK_RCE_NIC 114U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_AON_I2C_SLOW switch divider output */
+#define TEGRA234_CLK_AON_I2C_SLOW 117U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SCE_CPU_NIC */
+#define TEGRA234_CLK_SCE_CPU_NIC 118U
+/** @brief output of divider CLK_RST_CONTROLLER_SCE_NIC_RATE */
+#define TEGRA234_CLK_SCE_NIC 119U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC1 */
+#define TEGRA234_CLK_SDMMC1 120U
+/** @brief Logical clk for setting the UPHY PLL3 rate */
+#define TEGRA234_CLK_UPHY_PLL3 121U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC4 */
#define TEGRA234_CLK_SDMMC4 123U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SE switch divider gated output */
+#define TEGRA234_CLK_SE 124U
+/** @brief VPLL select for sor0_ref clk driven by disp_2clk_sor0_head_sel signal */
+#define TEGRA234_CLK_SOR0_PLL_REF 125U
+/** @brief Output of mux controlled by disp_2clk_sor0_pll_ref_clk_safe signal (sor0_ref_clk) */
+#define TEGRA234_CLK_SOR0_REF 126U
+/** @brief VPLL select for sor1_ref clk driven by disp_2clk_sor0_head_sel signal */
+#define TEGRA234_CLK_SOR1_PLL_REF 127U
+/** @brief SOR_PLL_REF_CLK_CTRL__0_DIV divider output */
+#define TEGRA234_CLK_PRE_SOR0_REF 128U
+/** @brief Output of mux controlled by disp_2clk_sor1_pll_ref_clk_safe signal (sor1_ref_clk) */
+#define TEGRA234_CLK_SOR1_REF 129U
+/** @brief SOR_PLL_REF_CLK_CTRL__1_DIV divider output */
+#define TEGRA234_CLK_PRE_SOR1_REF 130U
+/** @brief output of gate CLK_ENB_SOR_SAFE */
+#define TEGRA234_CLK_SOR_SAFE 131U
+/** @brief SOR_CLK_CTRL__0_DIV divider output */
+#define TEGRA234_CLK_SOR0_DIV 132U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC5 */
+#define TEGRA234_CLK_DMIC5 134U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI1 */
+#define TEGRA234_CLK_SPI1 135U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI2 */
+#define TEGRA234_CLK_SPI2 136U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI3 */
+#define TEGRA234_CLK_SPI3 137U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C_SLOW */
+#define TEGRA234_CLK_I2C_SLOW 138U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC1 */
#define TEGRA234_CLK_SYNC_DMIC1 139U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC2 */
@@ -130,28 +291,132 @@
#define TEGRA234_CLK_SYNC_I2S5 149U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S6 */
#define TEGRA234_CLK_SYNC_I2S6 150U
+/** @brief controls MPHY_FORCE_LS_MODE upon enable & disable */
+#define TEGRA234_CLK_MPHY_FORCE_LS_MODE 151U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TACH0 */
+#define TEGRA234_CLK_TACH0 152U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSEC */
+#define TEGRA234_CLK_TSEC 153U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PKA */
+#define TEGRA234_CLK_TSEC_PKA 154U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTA */
#define TEGRA234_CLK_UARTA 155U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTB */
+#define TEGRA234_CLK_UARTB 156U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTC */
+#define TEGRA234_CLK_UARTC 157U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTD */
+#define TEGRA234_CLK_UARTD 158U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTE */
+#define TEGRA234_CLK_UARTE 159U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTF */
+#define TEGRA234_CLK_UARTF 160U
/** @brief output of gate CLK_ENB_PEX1_CORE_6 */
#define TEGRA234_CLK_PEX1_C6_CORE 161U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UART_FST_MIPI_CAL */
+#define TEGRA234_CLK_UART_FST_MIPI_CAL 162U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UFSDEV_REF */
+#define TEGRA234_CLK_UFSDEV_REF 163U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UFSHC_CG_SYS */
+#define TEGRA234_CLK_UFSHC 164U
+/** @brief output of gate CLK_ENB_USB2_TRK */
+#define TEGRA234_CLK_USB2_TRK 165U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VI */
+#define TEGRA234_CLK_VI 166U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VIC */
-#define TEGRA234_CLK_VIC 167U
+#define TEGRA234_CLK_VIC 167U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_CSITE switch divider output */
+#define TEGRA234_CLK_CSITE 168U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_IST switch divider output */
+#define TEGRA234_CLK_IST 169U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_IST_JTAG_REG_CLK_SEL */
+#define TEGRA234_CLK_JTAG_INTFC_PRE_CG 170U
/** @brief output of gate CLK_ENB_PEX2_CORE_7 */
#define TEGRA234_CLK_PEX2_C7_CORE 171U
/** @brief output of gate CLK_ENB_PEX2_CORE_8 */
#define TEGRA234_CLK_PEX2_C8_CORE 172U
/** @brief output of gate CLK_ENB_PEX2_CORE_9 */
#define TEGRA234_CLK_PEX2_C9_CORE 173U
+/** @brief dla0_falcon_clk */
+#define TEGRA234_CLK_DLA0_FALCON 174U
+/** @brief dla0_core_clk */
+#define TEGRA234_CLK_DLA0_CORE 175U
+/** @brief dla1_falcon_clk */
+#define TEGRA234_CLK_DLA1_FALCON 176U
+/** @brief dla1_core_clk */
+#define TEGRA234_CLK_DLA1_CORE 177U
+/** @brief Output of mux controlled by disp_2clk_sor0_clk_safe signal (sor0_clk) */
+#define TEGRA234_CLK_SOR0 178U
+/** @brief Output of mux controlled by disp_2clk_sor1_clk_safe signal (sor1_clk) */
+#define TEGRA234_CLK_SOR1 179U
+/** @brief DP macro feedback clock (same as LINKA_SYM CLKOUT) */
+#define TEGRA234_CLK_SOR_PAD_INPUT 180U
+/** @brief Output of mux controlled by disp_2clk_h0_dsi_sel signal in sf0_clk path */
+#define TEGRA234_CLK_PRE_SF0 181U
+/** @brief Output of mux controlled by disp_2clk_sf0_clk_safe signal (sf0_clk) */
+#define TEGRA234_CLK_SF0 182U
+/** @brief Output of mux controlled by disp_2clk_sf1_clk_safe signal (sf1_clk) */
+#define TEGRA234_CLK_SF1 183U
+/** @brief CLKOUT_AB output from DSI BRICK A (dsi_clkout_ab) */
+#define TEGRA234_CLK_DSI_PAD_INPUT 184U
/** @brief output of gate CLK_ENB_PEX2_CORE_10 */
#define TEGRA234_CLK_PEX2_C10_CORE 187U
-/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_QSPI0 switch divider output */
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_UARTI switch divider output (uarti_r_clk) */
+#define TEGRA234_CLK_UARTI 188U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_UARTJ switch divider output (uartj_r_clk) */
+#define TEGRA234_CLK_UARTJ 189U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_UARTH switch divider output */
+#define TEGRA234_CLK_UARTH 190U
+/** @brief ungated version of fuse clk */
+#define TEGRA234_CLK_FUSE_SERIAL 191U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_QSPI0 switch divider output (qspi0_2x_pm_clk) */
#define TEGRA234_CLK_QSPI0_2X_PM 192U
-/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_QSPI1 switch divider output */
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_QSPI1 switch divider output (qspi1_2x_pm_clk) */
#define TEGRA234_CLK_QSPI1_2X_PM 193U
-/** @brief output of the divider QSPI_CLK_DIV2_SEL in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI0 */
+/** @brief output of the divider QSPI_CLK_DIV2_SEL in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI0 (qspi0_pm_clk) */
#define TEGRA234_CLK_QSPI0_PM 194U
-/** @brief output of the divider QSPI_CLK_DIV2_SEL in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI1 */
+/** @brief output of the divider QSPI_CLK_DIV2_SEL in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI1 (qspi1_pm_clk) */
#define TEGRA234_CLK_QSPI1_PM 195U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_VI_CONST switch divider output */
+#define TEGRA234_CLK_VI_CONST 196U
+/** @brief NAFLL clock source for BPMP */
+#define TEGRA234_CLK_NAFLL_BPMP 197U
+/** @brief NAFLL clock source for SCE */
+#define TEGRA234_CLK_NAFLL_SCE 198U
+/** @brief NAFLL clock source for NVDEC */
+#define TEGRA234_CLK_NAFLL_NVDEC 199U
+/** @brief NAFLL clock source for NVJPG */
+#define TEGRA234_CLK_NAFLL_NVJPG 200U
+/** @brief NAFLL clock source for TSEC */
+#define TEGRA234_CLK_NAFLL_TSEC 201U
+/** @brief NAFLL clock source for VI */
+#define TEGRA234_CLK_NAFLL_VI 203U
+/** @brief NAFLL clock source for SE */
+#define TEGRA234_CLK_NAFLL_SE 204U
+/** @brief NAFLL clock source for NVENC */
+#define TEGRA234_CLK_NAFLL_NVENC 205U
+/** @brief NAFLL clock source for ISP */
+#define TEGRA234_CLK_NAFLL_ISP 206U
+/** @brief NAFLL clock source for VIC */
+#define TEGRA234_CLK_NAFLL_VIC 207U
+/** @brief NAFLL clock source for AXICBB */
+#define TEGRA234_CLK_NAFLL_AXICBB 209U
+/** @brief NAFLL clock source for NVJPG1 */
+#define TEGRA234_CLK_NAFLL_NVJPG1 210U
+/** @brief NAFLL clock source for PVA core */
+#define TEGRA234_CLK_NAFLL_PVA0_CORE 211U
+/** @brief NAFLL clock source for PVA VPS */
+#define TEGRA234_CLK_NAFLL_PVA0_VPS 212U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_DBGAPB_0 switch divider output (dbgapb_clk) */
+#define TEGRA234_CLK_DBGAPB 213U
+/** @brief NAFLL clock source for RCE */
+#define TEGRA234_CLK_NAFLL_RCE 214U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_LA switch divider output (la_r_clk) */
+#define TEGRA234_CLK_LA 215U
+/** @brief output of the divider CLK_RST_CONTROLLER_PLLP_OUTD */
+#define TEGRA234_CLK_PLLP_OUT_JTAG 216U
+/** @brief AXI_CBB branch sharing gate control with SDMMC4 */
+#define TEGRA234_CLK_SDMMC4_AXICIF 217U
/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC_LEGACY_TM switch divider output */
#define TEGRA234_CLK_SDMMC_LEGACY_TM 219U
/** @brief output of gate CLK_ENB_PEX0_CORE_0 */
@@ -166,8 +431,42 @@
#define TEGRA234_CLK_PEX0_C4_CORE 224U
/** @brief output of gate CLK_ENB_PEX1_CORE_5 */
#define TEGRA234_CLK_PEX1_C5_CORE 225U
+/** @brief Monitored branch of PEX0_C0_CORE clock */
+#define TEGRA234_CLK_PEX0_C0_CORE_M 229U
+/** @brief Monitored branch of PEX0_C1_CORE clock */
+#define TEGRA234_CLK_PEX0_C1_CORE_M 230U
+/** @brief Monitored branch of PEX0_C2_CORE clock */
+#define TEGRA234_CLK_PEX0_C2_CORE_M 231U
+/** @brief Monitored branch of PEX0_C3_CORE clock */
+#define TEGRA234_CLK_PEX0_C3_CORE_M 232U
+/** @brief Monitored branch of PEX0_C4_CORE clock */
+#define TEGRA234_CLK_PEX0_C4_CORE_M 233U
+/** @brief Monitored branch of PEX1_C5_CORE clock */
+#define TEGRA234_CLK_PEX1_C5_CORE_M 234U
+/** @brief Monitored branch of PEX1_C6_CORE clock */
+#define TEGRA234_CLK_PEX1_C6_CORE_M 235U
+/** @brief output of GPU GPC1 clkGen (in 1x mode same rate as GPC1 MUX2 out) */
+#define TEGRA234_CLK_GPC1CLK 236U
/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC4_BASE */
#define TEGRA234_CLK_PLLC4 237U
+/** @brief PLLC4 VCO followed by DIV3 path */
+#define TEGRA234_CLK_PLLC4_OUT1 239U
+/** @brief PLLC4 VCO followed by DIV5 path */
+#define TEGRA234_CLK_PLLC4_OUT2 240U
+/** @brief output of the mux controlled by PLLC4_CLK_SEL */
+#define TEGRA234_CLK_PLLC4_MUXED 241U
+/** @brief PLLC4 VCO followed by DIV2 path */
+#define TEGRA234_CLK_PLLC4_VCO_DIV2 242U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLNVHS_BASE */
+#define TEGRA234_CLK_PLLNVHS 243U
+/** @brief Monitored branch of PEX2_C7_CORE clock */
+#define TEGRA234_CLK_PEX2_C7_CORE_M 244U
+/** @brief Monitored branch of PEX2_C8_CORE clock */
+#define TEGRA234_CLK_PEX2_C8_CORE_M 245U
+/** @brief Monitored branch of PEX2_C9_CORE clock */
+#define TEGRA234_CLK_PEX2_C9_CORE_M 246U
+/** @brief Monitored branch of PEX2_C10_CORE clock */
+#define TEGRA234_CLK_PEX2_C10_CORE_M 247U
/** @brief RX clock recovered from MGBE0 lane input */
#define TEGRA234_CLK_MGBE0_RX_INPUT 248U
/** @brief RX clock recovered from MGBE1 lane input */
@@ -176,8 +475,185 @@
#define TEGRA234_CLK_MGBE2_RX_INPUT 250U
/** @brief RX clock recovered from MGBE3 lane input */
#define TEGRA234_CLK_MGBE3_RX_INPUT 251U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PEX_SATA_USB_RX_BYP switch divider output */
+#define TEGRA234_CLK_PEX_SATA_USB_RX_BYP 254U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL0_MGMT switch divider output */
+#define TEGRA234_CLK_PEX_USB_PAD_PLL0_MGMT 255U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL1_MGMT switch divider output */
+#define TEGRA234_CLK_PEX_USB_PAD_PLL1_MGMT 256U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL2_MGMT switch divider output */
+#define TEGRA234_CLK_PEX_USB_PAD_PLL2_MGMT 257U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL3_MGMT switch divider output */
+#define TEGRA234_CLK_PEX_USB_PAD_PLL3_MGMT 258U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_NVHS_RX_BYP switch divider output */
+#define TEGRA234_CLK_NVHS_RX_BYP_REF 263U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_NVHS_PLL0_MGMT switch divider output */
+#define TEGRA234_CLK_NVHS_PLL0_MGMT 264U
+/** @brief xusb_core_dev_clk */
+#define TEGRA234_CLK_XUSB_CORE_DEV 265U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_XUSB_CORE_HOST switch divider output */
+#define TEGRA234_CLK_XUSB_CORE_MUX 266U
+/** @brief xusb_core_host_clk */
+#define TEGRA234_CLK_XUSB_CORE_HOST 267U
+/** @brief xusb_core_superspeed_clk */
+#define TEGRA234_CLK_XUSB_CORE_SS 268U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_XUSB_FALCON switch divider output */
+#define TEGRA234_CLK_XUSB_FALCON 269U
+/** @brief xusb_falcon_host_clk */
+#define TEGRA234_CLK_XUSB_FALCON_HOST 270U
+/** @brief xusb_falcon_superspeed_clk */
+#define TEGRA234_CLK_XUSB_FALCON_SS 271U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_XUSB_FS switch divider output */
+#define TEGRA234_CLK_XUSB_FS 272U
+/** @brief xusb_fs_host_clk */
+#define TEGRA234_CLK_XUSB_FS_HOST 273U
+/** @brief xusb_fs_dev_clk */
+#define TEGRA234_CLK_XUSB_FS_DEV 274U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_XUSB_SS switch divider output */
+#define TEGRA234_CLK_XUSB_SS 275U
+/** @brief xusb_ss_dev_clk */
+#define TEGRA234_CLK_XUSB_SS_DEV 276U
+/** @brief xusb_ss_superspeed_clk */
+#define TEGRA234_CLK_XUSB_SS_SUPERSPEED 277U
+/** @brief NAFLL clock source for CPU cluster 0 */
+#define TEGRA234_CLK_NAFLL_CLUSTER0 280U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER0_CORE 280U
+/** @brief NAFLL clock source for CPU cluster 1 */
+#define TEGRA234_CLK_NAFLL_CLUSTER1 281U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER1_CORE 281U
+/** @brief NAFLL clock source for CPU cluster 2 */
+#define TEGRA234_CLK_NAFLL_CLUSTER2 282U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER2_CORE 282U
+/** @brief CLK_RST_CONTROLLER_CAN1_CORE_RATE divider output */
+#define TEGRA234_CLK_CAN1_CORE 284U
+/** @brief CLK_RST_CONTROLLER_CAN2_CORE_RATE divider outputt */
+#define TEGRA234_CLK_CAN2_CORE 285U
+/** @brief CLK_RST_CONTROLLER_PLLA1_OUT1 switch divider output */
+#define TEGRA234_CLK_PLLA1_OUT1 286U
+/** @brief NVHS PLL hardware power sequencer (overrides 'manual' programming of PLL) */
+#define TEGRA234_CLK_PLLNVHS_HPS 287U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLREFE_BASE */
+#define TEGRA234_CLK_PLLREFE_VCOOUT 288U
/** @brief 32K input clock provided by PMIC */
#define TEGRA234_CLK_CLK_32K 289U
+/** @brief Fixed 48MHz clock divided down from utmipll */
+#define TEGRA234_CLK_UTMIPLL_CLKOUT48 291U
+/** @brief Fixed 480MHz clock divided down from utmipll */
+#define TEGRA234_CLK_UTMIPLL_CLKOUT480 292U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLNVCSI_BASE */
+#define TEGRA234_CLK_PLLNVCSI 294U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PVA0_CPU_AXI switch divider output */
+#define TEGRA234_CLK_PVA0_CPU_AXI 295U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PVA0_VPS switch divider output */
+#define TEGRA234_CLK_PVA0_VPS 297U
+/** @brief DLA0_CORE_NAFLL */
+#define TEGRA234_CLK_NAFLL_DLA0_CORE 299U
+/** @brief DLA0_FALCON_NAFLL */
+#define TEGRA234_CLK_NAFLL_DLA0_FALCON 300U
+/** @brief DLA1_CORE_NAFLL */
+#define TEGRA234_CLK_NAFLL_DLA1_CORE 301U
+/** @brief DLA1_FALCON_NAFLL */
+#define TEGRA234_CLK_NAFLL_DLA1_FALCON 302U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_UART_FST_MIPI_CAL */
+#define TEGRA234_CLK_AON_UART_FST_MIPI_CAL 303U
+/** @brief GPU system clock */
+#define TEGRA234_CLK_GPUSYS 304U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C5 */
+#define TEGRA234_CLK_I2C5 305U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SE switch divider free running clk */
+#define TEGRA234_CLK_FR_SE 306U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_BPMP_CPU_NIC switch divider output */
+#define TEGRA234_CLK_BPMP_CPU_NIC 307U
+/** @brief output of gate CLK_ENB_BPMP_CPU */
+#define TEGRA234_CLK_BPMP_CPU 308U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_TSC switch divider output */
+#define TEGRA234_CLK_TSC 309U
+/** @brief output of mem pll A sync mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EMC */
+#define TEGRA234_CLK_EMCSA_MPLL 310U
+/** @brief output of mem pll B sync mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EMCSB */
+#define TEGRA234_CLK_EMCSB_MPLL 311U
+/** @brief output of mem pll C sync mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EMCSC */
+#define TEGRA234_CLK_EMCSC_MPLL 312U
+/** @brief output of mem pll D sync mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EMCSD */
+#define TEGRA234_CLK_EMCSD_MPLL 313U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC_BASE */
+#define TEGRA234_CLK_PLLC 314U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC2_BASE */
+#define TEGRA234_CLK_PLLC2 315U
+/** @brief CLK_RST_CONTROLLER_TSC_HS_SUPER_CLK_DIVIDER skip divider output */
+#define TEGRA234_CLK_TSC_REF 317U
+/** @brief Dummy clock to ensure minimum SoC voltage for fuse burning */
+#define TEGRA234_CLK_FUSE_BURN 318U
+/** @brief GBE PLL */
+#define TEGRA234_CLK_PLLGBE 319U
+/** @brief GBE PLL hardware power sequencer */
+#define TEGRA234_CLK_PLLGBE_HPS 320U
+/** @brief output of EMC CDB side A fixed (DIV4) divider */
+#define TEGRA234_CLK_EMCSA_EMC 321U
+/** @brief output of EMC CDB side B fixed (DIV4) divider */
+#define TEGRA234_CLK_EMCSB_EMC 322U
+/** @brief output of EMC CDB side C fixed (DIV4) divider */
+#define TEGRA234_CLK_EMCSC_EMC 323U
+/** @brief output of EMC CDB side D fixed (DIV4) divider */
+#define TEGRA234_CLK_EMCSD_EMC 324U
+/** @brief PLLE hardware power sequencer (overrides 'manual' programming of PLL) */
+#define TEGRA234_CLK_PLLE_HPS 326U
+/** @brief CLK_ENB_PLLREFE_OUT gate output */
+#define TEGRA234_CLK_PLLREFE_VCOOUT_GATED 327U
+/** @brief TEGRA234_CLK_SOR_SAFE clk source (PLLP_OUT0 divided by 17) */
+#define TEGRA234_CLK_PLLP_DIV17 328U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SOC_THERM switch divider output */
+#define TEGRA234_CLK_SOC_THERM 329U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_TSENSE switch divider output */
+#define TEGRA234_CLK_TSENSE 330U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SEU1 switch divider free running clk */
+#define TEGRA234_CLK_FR_SEU1 331U
+/** @brief NAFLL clock source for OFA */
+#define TEGRA234_CLK_NAFLL_OFA 333U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_OFA switch divider output */
+#define TEGRA234_CLK_OFA 334U
+/** @brief NAFLL clock source for SEU1 */
+#define TEGRA234_CLK_NAFLL_SEU1 335U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SEU1 switch divider gated output */
+#define TEGRA234_CLK_SEU1 336U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI4 */
+#define TEGRA234_CLK_SPI4 337U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI5 */
+#define TEGRA234_CLK_SPI5 338U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DCE_CPU_NIC */
+#define TEGRA234_CLK_DCE_CPU_NIC 339U
+/** @brief output of divider CLK_RST_CONTROLLER_DCE_NIC_RATE */
+#define TEGRA234_CLK_DCE_NIC 340U
+/** @brief NAFLL clock source for DCE */
+#define TEGRA234_CLK_NAFLL_DCE 341U
+/** @brief Monitored branch of MPHY_L0_RX_ANA clock */
+#define TEGRA234_CLK_MPHY_L0_RX_ANA_M 342U
+/** @brief Monitored branch of MPHY_L1_RX_ANA clock */
+#define TEGRA234_CLK_MPHY_L1_RX_ANA_M 343U
+/** @brief ungated version of TX symbol clock after fixed 1/2 divider */
+#define TEGRA234_CLK_MPHY_L0_TX_PRE_SYMB 344U
+/** @brief output of divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_L0_TX_LS_SYMB */
+#define TEGRA234_CLK_MPHY_L0_TX_LS_SYMB_DIV 345U
+/** @brief output of gate CLK_ENB_MPHY_L0_TX_2X_SYMB */
+#define TEGRA234_CLK_MPHY_L0_TX_2X_SYMB 346U
+/** @brief output of SW_MPHY_L0_TX_HS_SYMB divider in CLK_RST_CONTROLLER_MPHY_L0_TX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_TX_HS_SYMB_DIV 347U
+/** @brief output of SW_MPHY_L0_TX_LS_3XBIT divider in CLK_RST_CONTROLLER_MPHY_L0_TX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_TX_LS_3XBIT_DIV 348U
+/** @brief LS/HS divider mux SW_MPHY_L0_TX_LS_HS_SEL in CLK_RST_CONTROLLER_MPHY_L0_TX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_TX_MUX_SYMB_DIV 349U
+/** @brief Monitored branch of MPHY_L0_TX_SYMB clock */
+#define TEGRA234_CLK_MPHY_L0_TX_SYMB_M 350U
+/** @brief output of divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_L0_RX_LS_SYMB */
+#define TEGRA234_CLK_MPHY_L0_RX_LS_SYMB_DIV 351U
+/** @brief output of SW_MPHY_L0_RX_HS_SYMB divider in CLK_RST_CONTROLLER_MPHY_L0_RX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_RX_HS_SYMB_DIV 352U
+/** @brief output of SW_MPHY_L0_RX_LS_BIT divider in CLK_RST_CONTROLLER_MPHY_L0_RX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_RX_LS_BIT_DIV 353U
+/** @brief LS/HS divider mux SW_MPHY_L0_RX_LS_HS_SEL in CLK_RST_CONTROLLER_MPHY_L0_RX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_RX_MUX_SYMB_DIV 354U
+/** @brief Monitored branch of MPHY_L0_RX_SYMB clock */
+#define TEGRA234_CLK_MPHY_L0_RX_SYMB_M 355U
/** @brief Monitored branch of MBGE0 RX input clock */
#define TEGRA234_CLK_MGBE0_RX_INPUT_M 357U
/** @brief Monitored branch of MBGE1 RX input clock */
@@ -194,6 +670,14 @@
#define TEGRA234_CLK_MGBE2_RX_PCS_M 363U
/** @brief Monitored branch of MGBE3 RX PCS mux output */
#define TEGRA234_CLK_MGBE3_RX_PCS_M 364U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TACH1 */
+#define TEGRA234_CLK_TACH1 365U
+/** @brief GBE_UPHY_MGBES_APP_CLK switch divider gated output */
+#define TEGRA234_CLK_MGBES_APP 366U
+/** @brief Logical clk for setting GBE UPHY PLL2 TX_REF rate */
+#define TEGRA234_CLK_UPHY_GBE_PLL2_TX_REF 367U
+/** @brief Logical clk for setting GBE UPHY PLL2 XDIG rate */
+#define TEGRA234_CLK_UPHY_GBE_PLL2_XDIG 368U
/** @brief RX PCS clock recovered from MGBE0 lane input */
#define TEGRA234_CLK_MGBE0_RX_PCS_INPUT 369U
/** @brief RX PCS clock recovered from MGBE1 lane input */
@@ -230,6 +714,8 @@
#define TEGRA234_CLK_MGBE1_MAC_DIVIDER 385U
/** @brief GBE_UPHY_MGBE1_MAC_CLK gate output */
#define TEGRA234_CLK_MGBE1_MAC 386U
+/** @brief GBE_UPHY_MGBE1_MACSEC_CLK gate output */
+#define TEGRA234_CLK_MGBE1_MACSEC 387U
/** @brief GBE_UPHY_MGBE1_EEE_PCS_CLK gate output */
#define TEGRA234_CLK_MGBE1_EEE_PCS 388U
/** @brief GBE_UPHY_MGBE1_APP_CLK gate output */
@@ -246,6 +732,8 @@
#define TEGRA234_CLK_MGBE2_MAC_DIVIDER 394U
/** @brief GBE_UPHY_MGBE2_MAC_CLK gate output */
#define TEGRA234_CLK_MGBE2_MAC 395U
+/** @brief GBE_UPHY_MGBE2_MACSEC_CLK gate output */
+#define TEGRA234_CLK_MGBE2_MACSEC 396U
/** @brief GBE_UPHY_MGBE2_EEE_PCS_CLK gate output */
#define TEGRA234_CLK_MGBE2_EEE_PCS 397U
/** @brief GBE_UPHY_MGBE2_APP_CLK gate output */
@@ -270,9 +758,146 @@
#define TEGRA234_CLK_MGBE3_APP 407U
/** @brief GBE_UPHY_MGBE3_PTP_REF_CLK divider gated output */
#define TEGRA234_CLK_MGBE3_PTP_REF 408U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_GBE_RX_BYP switch divider output */
+#define TEGRA234_CLK_GBE_RX_BYP_REF 409U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_GBE_PLL0_MGMT switch divider output */
+#define TEGRA234_CLK_GBE_PLL0_MGMT 410U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_GBE_PLL1_MGMT switch divider output */
+#define TEGRA234_CLK_GBE_PLL1_MGMT 411U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_GBE_PLL2_MGMT switch divider output */
+#define TEGRA234_CLK_GBE_PLL2_MGMT 412U
+/** @brief output of gate CLK_ENB_EQOS_MACSEC_RX */
+#define TEGRA234_CLK_EQOS_MACSEC_RX 413U
+/** @brief output of gate CLK_ENB_EQOS_MACSEC_TX */
+#define TEGRA234_CLK_EQOS_MACSEC_TX 414U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_TX_CLK divider ungated output */
+#define TEGRA234_CLK_EQOS_TX_DIVIDER 415U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_NVHS_PLL1_MGMT switch divider output */
+#define TEGRA234_CLK_NVHS_PLL1_MGMT 416U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_EMCHUB mux output */
+#define TEGRA234_CLK_EMCHUB 417U
+/** @brief clock recovered from I2S7 input */
+#define TEGRA234_CLK_I2S7_SYNC_INPUT 418U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S7 */
+#define TEGRA234_CLK_SYNC_I2S7 419U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S7 */
+#define TEGRA234_CLK_I2S7 420U
+/** @brief Monitored output of I2S7 pad macro mux */
+#define TEGRA234_CLK_I2S7_PAD_M 421U
+/** @brief clock recovered from I2S8 input */
+#define TEGRA234_CLK_I2S8_SYNC_INPUT 422U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S8 */
+#define TEGRA234_CLK_SYNC_I2S8 423U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S8 */
+#define TEGRA234_CLK_I2S8 424U
+/** @brief Monitored output of I2S8 pad macro mux */
+#define TEGRA234_CLK_I2S8_PAD_M 425U
+/** @brief NAFLL clock source for GPU GPC0 */
+#define TEGRA234_CLK_NAFLL_GPC0 426U
+/** @brief NAFLL clock source for GPU GPC1 */
+#define TEGRA234_CLK_NAFLL_GPC1 427U
+/** @brief NAFLL clock source for GPU SYSCLK */
+#define TEGRA234_CLK_NAFLL_GPUSYS 428U
+/** @brief NAFLL clock source for CPU cluster 0 DSUCLK */
+#define TEGRA234_CLK_NAFLL_DSU0 429U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER0_DSU 429U
+/** @brief NAFLL clock source for CPU cluster 1 DSUCLK */
+#define TEGRA234_CLK_NAFLL_DSU1 430U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER1_DSU 430U
+/** @brief NAFLL clock source for CPU cluster 2 DSUCLK */
+#define TEGRA234_CLK_NAFLL_DSU2 431U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER2_DSU 431U
+/** @brief output of gate CLK_ENB_SCE_CPU */
+#define TEGRA234_CLK_SCE_CPU 432U
+/** @brief output of gate CLK_ENB_RCE_CPU */
+#define TEGRA234_CLK_RCE_CPU 433U
+/** @brief output of gate CLK_ENB_DCE_CPU */
+#define TEGRA234_CLK_DCE_CPU 434U
+/** @brief DSIPLL VCO output */
+#define TEGRA234_CLK_DSIPLL_VCO 435U
+/** @brief DSIPLL SYNC_CLKOUTP/N differential output */
+#define TEGRA234_CLK_DSIPLL_CLKOUTPN 436U
+/** @brief DSIPLL SYNC_CLKOUTA output */
+#define TEGRA234_CLK_DSIPLL_CLKOUTA 437U
+/** @brief SPPLL0 VCO output */
+#define TEGRA234_CLK_SPPLL0_VCO 438U
+/** @brief SPPLL0 SYNC_CLKOUTP/N differential output */
+#define TEGRA234_CLK_SPPLL0_CLKOUTPN 439U
+/** @brief SPPLL0 SYNC_CLKOUTA output */
+#define TEGRA234_CLK_SPPLL0_CLKOUTA 440U
+/** @brief SPPLL0 SYNC_CLKOUTB output */
+#define TEGRA234_CLK_SPPLL0_CLKOUTB 441U
+/** @brief SPPLL0 CLKOUT_DIVBY10 output */
+#define TEGRA234_CLK_SPPLL0_DIV10 442U
+/** @brief SPPLL0 CLKOUT_DIVBY25 output */
+#define TEGRA234_CLK_SPPLL0_DIV25 443U
+/** @brief SPPLL0 CLKOUT_DIVBY27P/N differential output */
+#define TEGRA234_CLK_SPPLL0_DIV27PN 444U
+/** @brief SPPLL1 VCO output */
+#define TEGRA234_CLK_SPPLL1_VCO 445U
+/** @brief SPPLL1 SYNC_CLKOUTP/N differential output */
+#define TEGRA234_CLK_SPPLL1_CLKOUTPN 446U
+/** @brief SPPLL1 CLKOUT_DIVBY27P/N differential output */
+#define TEGRA234_CLK_SPPLL1_DIV27PN 447U
+/** @brief VPLL0 reference clock */
+#define TEGRA234_CLK_VPLL0_REF 448U
+/** @brief VPLL0 */
+#define TEGRA234_CLK_VPLL0 449U
+/** @brief VPLL1 */
+#define TEGRA234_CLK_VPLL1 450U
+/** @brief NVDISPLAY_P0_CLK reference select */
+#define TEGRA234_CLK_NVDISPLAY_P0_REF 451U
+/** @brief RG0_PCLK */
+#define TEGRA234_CLK_RG0 452U
+/** @brief RG1_PCLK */
+#define TEGRA234_CLK_RG1 453U
+/** @brief DISPPLL output */
+#define TEGRA234_CLK_DISPPLL 454U
+/** @brief DISPHUBPLL output */
+#define TEGRA234_CLK_DISPHUBPLL 455U
+/** @brief CLK_RST_CONTROLLER_DSI_LP_SWITCH_DIVIDER switch divider output (dsi_lp_clk) */
+#define TEGRA234_CLK_DSI_LP 456U
/** @brief CLK_RST_CONTROLLER_AZA2XBITCLK_OUT_SWITCH_DIVIDER switch divider output (aza_2xbitclk) */
#define TEGRA234_CLK_AZA_2XBIT 457U
/** @brief aza_2xbitclk / 2 (aza_bitclk) */
#define TEGRA234_CLK_AZA_BIT 458U
+/** @brief SWITCH_DSI_CORE_PIXEL_MISC_DSI_CORE_CLK_SRC switch output (dsi_core_clk) */
+#define TEGRA234_CLK_DSI_CORE 459U
+/** @brief Output of mux controlled by pkt_wr_fifo_signal from dsi (dsi_pixel_clk) */
+#define TEGRA234_CLK_DSI_PIXEL 460U
+/** @brief Output of mux controlled by disp_2clk_sor0_dp_sel (pre_sor0_clk) */
+#define TEGRA234_CLK_PRE_SOR0 461U
+/** @brief Output of mux controlled by disp_2clk_sor1_dp_sel (pre_sor1_clk) */
+#define TEGRA234_CLK_PRE_SOR1 462U
+/** @brief CLK_RST_CONTROLLER_LINK_REFCLK_CFG__0 output */
+#define TEGRA234_CLK_DP_LINK_REF 463U
+/** @brief Link clock input from DP macro brick PLL */
+#define TEGRA234_CLK_SOR_LINKA_INPUT 464U
+/** @brief SOR AFIFO clock outut */
+#define TEGRA234_CLK_SOR_LINKA_AFIFO 465U
+/** @brief Monitored branch of linka_afifo_clk */
+#define TEGRA234_CLK_SOR_LINKA_AFIFO_M 466U
+/** @brief Monitored branch of rg0_pclk */
+#define TEGRA234_CLK_RG0_M 467U
+/** @brief Monitored branch of rg1_pclk */
+#define TEGRA234_CLK_RG1_M 468U
+/** @brief Monitored branch of sor0_clk */
+#define TEGRA234_CLK_SOR0_M 469U
+/** @brief Monitored branch of sor1_clk */
+#define TEGRA234_CLK_SOR1_M 470U
+/** @brief EMC PLLHUB output */
+#define TEGRA234_CLK_PLLHUB 471U
+/** @brief output of fixed (DIV2) MC HUB divider */
+#define TEGRA234_CLK_MCHUB 472U
+/** @brief output of divider controlled by EMC side A MC_EMC_SAFE_SAME_FREQ */
+#define TEGRA234_CLK_EMCSA_MC 473U
+/** @brief output of divider controlled by EMC side B MC_EMC_SAFE_SAME_FREQ */
+#define TEGRA234_CLK_EMCSB_MC 474U
+/** @brief output of divider controlled by EMC side C MC_EMC_SAFE_SAME_FREQ */
+#define TEGRA234_CLK_EMCSC_MC 475U
+/** @brief output of divider controlled by EMC side D MC_EMC_SAFE_SAME_FREQ */
+#define TEGRA234_CLK_EMCSD_MC 476U
+
+/** @} */
#endif
diff --git a/include/dt-bindings/firmware/imx/rsrc.h b/include/dt-bindings/firmware/imx/rsrc.h
index 1675de05ad33..1a8c025d77b8 100644
--- a/include/dt-bindings/firmware/imx/rsrc.h
+++ b/include/dt-bindings/firmware/imx/rsrc.h
@@ -13,30 +13,30 @@
* never be changed or removed (only added to at the end of the list).
*/
-#define IMX_SC_R_A53 0
-#define IMX_SC_R_A53_0 1
-#define IMX_SC_R_A53_1 2
-#define IMX_SC_R_A53_2 3
-#define IMX_SC_R_A53_3 4
-#define IMX_SC_R_A72 5
-#define IMX_SC_R_A72_0 6
-#define IMX_SC_R_A72_1 7
-#define IMX_SC_R_A72_2 8
-#define IMX_SC_R_A72_3 9
+#define IMX_SC_R_AP_0 0
+#define IMX_SC_R_AP_0_0 1
+#define IMX_SC_R_AP_0_1 2
+#define IMX_SC_R_AP_0_2 3
+#define IMX_SC_R_AP_0_3 4
+#define IMX_SC_R_AP_1 5
+#define IMX_SC_R_AP_1_0 6
+#define IMX_SC_R_AP_1_1 7
+#define IMX_SC_R_AP_1_2 8
+#define IMX_SC_R_AP_1_3 9
#define IMX_SC_R_CCI 10
#define IMX_SC_R_DB 11
#define IMX_SC_R_DRC_0 12
#define IMX_SC_R_DRC_1 13
#define IMX_SC_R_GIC_SMMU 14
-#define IMX_SC_R_IRQSTR_M4_0 15
-#define IMX_SC_R_IRQSTR_M4_1 16
-#define IMX_SC_R_SMMU 17
-#define IMX_SC_R_GIC 18
+#define IMX_SC_R_IRQSTR_MCU_0 15
+#define IMX_SC_R_IRQSTR_MCU_1 16
+#define IMX_SC_R_SMMU_0 17
+#define IMX_SC_R_GIC_0 18
#define IMX_SC_R_DC_0_BLIT0 19
#define IMX_SC_R_DC_0_BLIT1 20
#define IMX_SC_R_DC_0_BLIT2 21
#define IMX_SC_R_DC_0_BLIT_OUT 22
-#define IMX_SC_R_PERF 23
+#define IMX_SC_R_PERF_0 23
#define IMX_SC_R_USB_1_PHY 24
#define IMX_SC_R_DC_0_WARP 25
#define IMX_SC_R_V2X_MU_0 26
@@ -56,11 +56,14 @@
#define IMX_SC_R_V2X_MU_3 40
#define IMX_SC_R_V2X_MU_4 41
#define IMX_SC_R_DC_1_WARP 42
+#define IMX_SC_R_STM 43
#define IMX_SC_R_SECVIO 44
#define IMX_SC_R_DC_1_VIDEO0 45
#define IMX_SC_R_DC_1_VIDEO1 46
#define IMX_SC_R_DC_1_FRAC0 47
+#define IMX_SC_R_V2X 48
#define IMX_SC_R_DC_1 49
+#define IMX_SC_R_UNUSED14 50
#define IMX_SC_R_DC_1_PLL_0 51
#define IMX_SC_R_DC_1_PLL_1 52
#define IMX_SC_R_SPI_0 53
@@ -151,10 +154,10 @@
#define IMX_SC_R_DMA_1_CH29 137
#define IMX_SC_R_DMA_1_CH30 138
#define IMX_SC_R_DMA_1_CH31 139
-#define IMX_SC_R_UNUSED1 140
-#define IMX_SC_R_UNUSED2 141
-#define IMX_SC_R_UNUSED3 142
-#define IMX_SC_R_UNUSED4 143
+#define IMX_SC_R_V2X_PID0 140
+#define IMX_SC_R_V2X_PID1 141
+#define IMX_SC_R_V2X_PID2 142
+#define IMX_SC_R_V2X_PID3 143
#define IMX_SC_R_GPU_0_PID0 144
#define IMX_SC_R_GPU_0_PID1 145
#define IMX_SC_R_GPU_0_PID2 146
@@ -183,7 +186,7 @@
#define IMX_SC_R_PCIE_B 169
#define IMX_SC_R_SATA_0 170
#define IMX_SC_R_SERDES_1 171
-#define IMX_SC_R_HSIO_GPIO 172
+#define IMX_SC_R_HSIO_GPIO_0 172
#define IMX_SC_R_MATCH_15 173
#define IMX_SC_R_MATCH_16 174
#define IMX_SC_R_MATCH_17 175
@@ -250,15 +253,15 @@
#define IMX_SC_R_ROM_0 236
#define IMX_SC_R_FSPI_0 237
#define IMX_SC_R_FSPI_1 238
-#define IMX_SC_R_IEE 239
-#define IMX_SC_R_IEE_R0 240
-#define IMX_SC_R_IEE_R1 241
-#define IMX_SC_R_IEE_R2 242
-#define IMX_SC_R_IEE_R3 243
-#define IMX_SC_R_IEE_R4 244
-#define IMX_SC_R_IEE_R5 245
-#define IMX_SC_R_IEE_R6 246
-#define IMX_SC_R_IEE_R7 247
+#define IMX_SC_R_IEE_0 239
+#define IMX_SC_R_IEE_0_R0 240
+#define IMX_SC_R_IEE_0_R1 241
+#define IMX_SC_R_IEE_0_R2 242
+#define IMX_SC_R_IEE_0_R3 243
+#define IMX_SC_R_IEE_0_R4 244
+#define IMX_SC_R_IEE_0_R5 245
+#define IMX_SC_R_IEE_0_R6 246
+#define IMX_SC_R_IEE_0_R7 247
#define IMX_SC_R_SDHC_0 248
#define IMX_SC_R_SDHC_1 249
#define IMX_SC_R_SDHC_2 250
@@ -289,46 +292,50 @@
#define IMX_SC_R_LVDS_2_PWM_0 275
#define IMX_SC_R_LVDS_2_I2C_0 276
#define IMX_SC_R_LVDS_2_I2C_1 277
-#define IMX_SC_R_M4_0_PID0 278
-#define IMX_SC_R_M4_0_PID1 279
-#define IMX_SC_R_M4_0_PID2 280
-#define IMX_SC_R_M4_0_PID3 281
-#define IMX_SC_R_M4_0_PID4 282
-#define IMX_SC_R_M4_0_RGPIO 283
-#define IMX_SC_R_M4_0_SEMA42 284
-#define IMX_SC_R_M4_0_TPM 285
-#define IMX_SC_R_M4_0_PIT 286
-#define IMX_SC_R_M4_0_UART 287
-#define IMX_SC_R_M4_0_I2C 288
-#define IMX_SC_R_M4_0_INTMUX 289
-#define IMX_SC_R_M4_0_MU_0B 292
-#define IMX_SC_R_M4_0_MU_0A0 293
-#define IMX_SC_R_M4_0_MU_0A1 294
-#define IMX_SC_R_M4_0_MU_0A2 295
-#define IMX_SC_R_M4_0_MU_0A3 296
-#define IMX_SC_R_M4_0_MU_1A 297
-#define IMX_SC_R_M4_1_PID0 298
-#define IMX_SC_R_M4_1_PID1 299
-#define IMX_SC_R_M4_1_PID2 300
-#define IMX_SC_R_M4_1_PID3 301
-#define IMX_SC_R_M4_1_PID4 302
-#define IMX_SC_R_M4_1_RGPIO 303
-#define IMX_SC_R_M4_1_SEMA42 304
-#define IMX_SC_R_M4_1_TPM 305
-#define IMX_SC_R_M4_1_PIT 306
-#define IMX_SC_R_M4_1_UART 307
-#define IMX_SC_R_M4_1_I2C 308
-#define IMX_SC_R_M4_1_INTMUX 309
-#define IMX_SC_R_M4_1_MU_0B 312
-#define IMX_SC_R_M4_1_MU_0A0 313
-#define IMX_SC_R_M4_1_MU_0A1 314
-#define IMX_SC_R_M4_1_MU_0A2 315
-#define IMX_SC_R_M4_1_MU_0A3 316
-#define IMX_SC_R_M4_1_MU_1A 317
+#define IMX_SC_R_MCU_0_PID0 278
+#define IMX_SC_R_MCU_0_PID1 279
+#define IMX_SC_R_MCU_0_PID2 280
+#define IMX_SC_R_MCU_0_PID3 281
+#define IMX_SC_R_MCU_0_PID4 282
+#define IMX_SC_R_MCU_0_RGPIO 283
+#define IMX_SC_R_MCU_0_SEMA42 284
+#define IMX_SC_R_MCU_0_TPM 285
+#define IMX_SC_R_MCU_0_PIT 286
+#define IMX_SC_R_MCU_0_UART 287
+#define IMX_SC_R_MCU_0_I2C 288
+#define IMX_SC_R_MCU_0_INTMUX 289
+#define IMX_SC_R_ENET_0_A0 290
+#define IMX_SC_R_ENET_0_A1 291
+#define IMX_SC_R_MCU_0_MU_0B 292
+#define IMX_SC_R_MCU_0_MU_0A0 293
+#define IMX_SC_R_MCU_0_MU_0A1 294
+#define IMX_SC_R_MCU_0_MU_0A2 295
+#define IMX_SC_R_MCU_0_MU_0A3 296
+#define IMX_SC_R_MCU_0_MU_1A 297
+#define IMX_SC_R_MCU_1_PID0 298
+#define IMX_SC_R_MCU_1_PID1 299
+#define IMX_SC_R_MCU_1_PID2 300
+#define IMX_SC_R_MCU_1_PID3 301
+#define IMX_SC_R_MCU_1_PID4 302
+#define IMX_SC_R_MCU_1_RGPIO 303
+#define IMX_SC_R_MCU_1_SEMA42 304
+#define IMX_SC_R_MCU_1_TPM 305
+#define IMX_SC_R_MCU_1_PIT 306
+#define IMX_SC_R_MCU_1_UART 307
+#define IMX_SC_R_MCU_1_I2C 308
+#define IMX_SC_R_MCU_1_INTMUX 309
+#define IMX_SC_R_UNUSED17 310
+#define IMX_SC_R_UNUSED18 311
+#define IMX_SC_R_MCU_1_MU_0B 312
+#define IMX_SC_R_MCU_1_MU_0A0 313
+#define IMX_SC_R_MCU_1_MU_0A1 314
+#define IMX_SC_R_MCU_1_MU_0A2 315
+#define IMX_SC_R_MCU_1_MU_0A3 316
+#define IMX_SC_R_MCU_1_MU_1A 317
#define IMX_SC_R_SAI_0 318
#define IMX_SC_R_SAI_1 319
#define IMX_SC_R_SAI_2 320
-#define IMX_SC_R_IRQSTR_SCU2 321
+#define IMX_SC_R_IRQSTR_AP_0 321
#define IMX_SC_R_IRQSTR_DSP 322
#define IMX_SC_R_ELCDIF_PLL 323
#define IMX_SC_R_OCRAM 324
@@ -373,33 +380,33 @@
#define IMX_SC_R_VPU_PID5 363
#define IMX_SC_R_VPU_PID6 364
#define IMX_SC_R_VPU_PID7 365
-#define IMX_SC_R_VPU_UART 366
-#define IMX_SC_R_VPUCORE 367
-#define IMX_SC_R_VPUCORE_0 368
-#define IMX_SC_R_VPUCORE_1 369
-#define IMX_SC_R_VPUCORE_2 370
-#define IMX_SC_R_VPUCORE_3 371
+#define IMX_SC_R_ENET_0_A2 366
+#define IMX_SC_R_ENET_1_A0 367
+#define IMX_SC_R_ENET_1_A1 368
+#define IMX_SC_R_ENET_1_A2 369
+#define IMX_SC_R_ENET_1_A3 370
+#define IMX_SC_R_ENET_1_A4 371
#define IMX_SC_R_DMA_4_CH0 372
#define IMX_SC_R_DMA_4_CH1 373
#define IMX_SC_R_DMA_4_CH2 374
#define IMX_SC_R_DMA_4_CH3 375
#define IMX_SC_R_DMA_4_CH4 376
-#define IMX_SC_R_ISI_CH0 377
-#define IMX_SC_R_ISI_CH1 378
-#define IMX_SC_R_ISI_CH2 379
-#define IMX_SC_R_ISI_CH3 380
-#define IMX_SC_R_ISI_CH4 381
-#define IMX_SC_R_ISI_CH5 382
-#define IMX_SC_R_ISI_CH6 383
-#define IMX_SC_R_ISI_CH7 384
-#define IMX_SC_R_MJPEG_DEC_S0 385
-#define IMX_SC_R_MJPEG_DEC_S1 386
-#define IMX_SC_R_MJPEG_DEC_S2 387
-#define IMX_SC_R_MJPEG_DEC_S3 388
-#define IMX_SC_R_MJPEG_ENC_S0 389
-#define IMX_SC_R_MJPEG_ENC_S1 390
-#define IMX_SC_R_MJPEG_ENC_S2 391
-#define IMX_SC_R_MJPEG_ENC_S3 392
+#define IMX_SC_R_ISI_0_CH0 377
+#define IMX_SC_R_ISI_0_CH1 378
+#define IMX_SC_R_ISI_0_CH2 379
+#define IMX_SC_R_ISI_0_CH3 380
+#define IMX_SC_R_ISI_0_CH4 381
+#define IMX_SC_R_ISI_0_CH5 382
+#define IMX_SC_R_ISI_0_CH6 383
+#define IMX_SC_R_ISI_0_CH7 384
+#define IMX_SC_R_MJPEG_0_DEC_S0 385
+#define IMX_SC_R_MJPEG_0_DEC_S1 386
+#define IMX_SC_R_MJPEG_0_DEC_S2 387
+#define IMX_SC_R_MJPEG_0_DEC_S3 388
+#define IMX_SC_R_MJPEG_0_ENC_S0 389
+#define IMX_SC_R_MJPEG_0_ENC_S1 390
+#define IMX_SC_R_MJPEG_0_ENC_S2 391
+#define IMX_SC_R_MJPEG_0_ENC_S3 392
#define IMX_SC_R_MIPI_0 393
#define IMX_SC_R_MIPI_0_PWM_0 394
#define IMX_SC_R_MIPI_0_I2C_0 395
@@ -514,11 +521,11 @@
#define IMX_SC_R_SECO_MU_3 504
#define IMX_SC_R_SECO_MU_4 505
#define IMX_SC_R_HDMI_RX_PWM_0 506
-#define IMX_SC_R_A35 507
-#define IMX_SC_R_A35_0 508
-#define IMX_SC_R_A35_1 509
-#define IMX_SC_R_A35_2 510
-#define IMX_SC_R_A35_3 511
+#define IMX_SC_R_AP_2 507
+#define IMX_SC_R_AP_2_0 508
+#define IMX_SC_R_AP_2_1 509
+#define IMX_SC_R_AP_2_2 510
+#define IMX_SC_R_AP_2_3 511
#define IMX_SC_R_DSP 512
#define IMX_SC_R_DSP_RAM 513
#define IMX_SC_R_CAAM_JR1_OUT 514
@@ -539,8 +546,8 @@
#define IMX_SC_R_BOARD_R5 529
#define IMX_SC_R_BOARD_R6 530
#define IMX_SC_R_BOARD_R7 531
-#define IMX_SC_R_MJPEG_DEC_MP 532
-#define IMX_SC_R_MJPEG_ENC_MP 533
+#define IMX_SC_R_MJPEG_0_DEC_MP 532
+#define IMX_SC_R_MJPEG_0_ENC_MP 533
#define IMX_SC_R_VPU_TS_0 534
#define IMX_SC_R_VPU_MU_0 535
#define IMX_SC_R_VPU_MU_1 536
@@ -573,6 +580,105 @@
#define IMX_SC_PM_CLK_BYPASS 4 /* Bypass clock */
/*
+ * Compatibility defines for sc_rsrc_t
+ */
+#define IMX_SC_R_A35 IMX_SC_R_AP_2
+#define IMX_SC_R_A35_0 IMX_SC_R_AP_2_0
+#define IMX_SC_R_A35_1 IMX_SC_R_AP_2_1
+#define IMX_SC_R_A35_2 IMX_SC_R_AP_2_2
+#define IMX_SC_R_A35_3 IMX_SC_R_AP_2_3
+#define IMX_SC_R_A53 IMX_SC_R_AP_0
+#define IMX_SC_R_A53_0 IMX_SC_R_AP_0_0
+#define IMX_SC_R_A53_1 IMX_SC_R_AP_0_1
+#define IMX_SC_R_A53_2 IMX_SC_R_AP_0_2
+#define IMX_SC_R_A53_3 IMX_SC_R_AP_0_3
+#define IMX_SC_R_A72 IMX_SC_R_AP_1
+#define IMX_SC_R_A72_0 IMX_SC_R_AP_1_0
+#define IMX_SC_R_A72_1 IMX_SC_R_AP_1_1
+#define IMX_SC_R_A72_2 IMX_SC_R_AP_1_2
+#define IMX_SC_R_A72_3 IMX_SC_R_AP_1_3
+#define IMX_SC_R_GIC IMX_SC_R_GIC_0
+#define IMX_SC_R_HSIO_GPIO IMX_SC_R_HSIO_GPIO_0
+#define IMX_SC_R_IEE IMX_SC_R_IEE_0
+#define IMX_SC_R_IEE_R0 IMX_SC_R_IEE_0_R0
+#define IMX_SC_R_IEE_R1 IMX_SC_R_IEE_0_R1
+#define IMX_SC_R_IEE_R2 IMX_SC_R_IEE_0_R2
+#define IMX_SC_R_IEE_R3 IMX_SC_R_IEE_0_R3
+#define IMX_SC_R_IEE_R4 IMX_SC_R_IEE_0_R4
+#define IMX_SC_R_IEE_R5 IMX_SC_R_IEE_0_R5
+#define IMX_SC_R_IEE_R6 IMX_SC_R_IEE_0_R6
+#define IMX_SC_R_IEE_R7 IMX_SC_R_IEE_0_R7
+#define IMX_SC_R_IRQSTR_M4_0 IMX_SC_R_IRQSTR_MCU_0
+#define IMX_SC_R_IRQSTR_M4_1 IMX_SC_R_IRQSTR_MCU_1
+#define IMX_SC_R_IRQSTR_SCU2 IMX_SC_R_IRQSTR_AP_0
+#define IMX_SC_R_ISI_CH0 IMX_SC_R_ISI_0_CH0
+#define IMX_SC_R_ISI_CH1 IMX_SC_R_ISI_0_CH1
+#define IMX_SC_R_ISI_CH2 IMX_SC_R_ISI_0_CH2
+#define IMX_SC_R_ISI_CH3 IMX_SC_R_ISI_0_CH3
+#define IMX_SC_R_ISI_CH4 IMX_SC_R_ISI_0_CH4
+#define IMX_SC_R_ISI_CH5 IMX_SC_R_ISI_0_CH5
+#define IMX_SC_R_ISI_CH6 IMX_SC_R_ISI_0_CH6
+#define IMX_SC_R_ISI_CH7 IMX_SC_R_ISI_0_CH7
+#define IMX_SC_R_M4_0_I2C IMX_SC_R_MCU_0_I2C
+#define IMX_SC_R_M4_0_INTMUX IMX_SC_R_MCU_0_INTMUX
+#define IMX_SC_R_M4_0_MU_0A0 IMX_SC_R_MCU_0_MU_0A0
+#define IMX_SC_R_M4_0_MU_0A1 IMX_SC_R_MCU_0_MU_0A1
+#define IMX_SC_R_M4_0_MU_0A2 IMX_SC_R_MCU_0_MU_0A2
+#define IMX_SC_R_M4_0_MU_0A3 IMX_SC_R_MCU_0_MU_0A3
+#define IMX_SC_R_M4_0_MU_0B IMX_SC_R_MCU_0_MU_0B
+#define IMX_SC_R_M4_0_MU_1A IMX_SC_R_MCU_0_MU_1A
+#define IMX_SC_R_M4_0_PID0 IMX_SC_R_MCU_0_PID0
+#define IMX_SC_R_M4_0_PID1 IMX_SC_R_MCU_0_PID1
+#define IMX_SC_R_M4_0_PID2 IMX_SC_R_MCU_0_PID2
+#define IMX_SC_R_M4_0_PID3 IMX_SC_R_MCU_0_PID3
+#define IMX_SC_R_M4_0_PID4 IMX_SC_R_MCU_0_PID4
+#define IMX_SC_R_M4_0_PIT IMX_SC_R_MCU_0_PIT
+#define IMX_SC_R_M4_0_RGPIO IMX_SC_R_MCU_0_RGPIO
+#define IMX_SC_R_M4_0_SEMA42 IMX_SC_R_MCU_0_SEMA42
+#define IMX_SC_R_M4_0_TPM IMX_SC_R_MCU_0_TPM
+#define IMX_SC_R_M4_0_UART IMX_SC_R_MCU_0_UART
+#define IMX_SC_R_M4_1_I2C IMX_SC_R_MCU_1_I2C
+#define IMX_SC_R_M4_1_INTMUX IMX_SC_R_MCU_1_INTMUX
+#define IMX_SC_R_M4_1_MU_0A0 IMX_SC_R_MCU_1_MU_0A0
+#define IMX_SC_R_M4_1_MU_0A1 IMX_SC_R_MCU_1_MU_0A1
+#define IMX_SC_R_M4_1_MU_0A2 IMX_SC_R_MCU_1_MU_0A2
+#define IMX_SC_R_M4_1_MU_0A3 IMX_SC_R_MCU_1_MU_0A3
+#define IMX_SC_R_M4_1_MU_0B IMX_SC_R_MCU_1_MU_0B
+#define IMX_SC_R_M4_1_MU_1A IMX_SC_R_MCU_1_MU_1A
+#define IMX_SC_R_M4_1_PID0 IMX_SC_R_MCU_1_PID0
+#define IMX_SC_R_M4_1_PID1 IMX_SC_R_MCU_1_PID1
+#define IMX_SC_R_M4_1_PID2 IMX_SC_R_MCU_1_PID2
+#define IMX_SC_R_M4_1_PID3 IMX_SC_R_MCU_1_PID3
+#define IMX_SC_R_M4_1_PID4 IMX_SC_R_MCU_1_PID4
+#define IMX_SC_R_M4_1_PIT IMX_SC_R_MCU_1_PIT
+#define IMX_SC_R_M4_1_RGPIO IMX_SC_R_MCU_1_RGPIO
+#define IMX_SC_R_M4_1_SEMA42 IMX_SC_R_MCU_1_SEMA42
+#define IMX_SC_R_M4_1_TPM IMX_SC_R_MCU_1_TPM
+#define IMX_SC_R_M4_1_UART IMX_SC_R_MCU_1_UART
+#define IMX_SC_R_MJPEG_DEC_MP IMX_SC_R_MJPEG_0_DEC_MP
+#define IMX_SC_R_MJPEG_DEC_S0 IMX_SC_R_MJPEG_0_DEC_S0
+#define IMX_SC_R_MJPEG_DEC_S1 IMX_SC_R_MJPEG_0_DEC_S1
+#define IMX_SC_R_MJPEG_DEC_S2 IMX_SC_R_MJPEG_0_DEC_S2
+#define IMX_SC_R_MJPEG_DEC_S3 IMX_SC_R_MJPEG_0_DEC_S3
+#define IMX_SC_R_MJPEG_ENC_MP IMX_SC_R_MJPEG_0_ENC_MP
+#define IMX_SC_R_MJPEG_ENC_S0 IMX_SC_R_MJPEG_0_ENC_S0
+#define IMX_SC_R_MJPEG_ENC_S1 IMX_SC_R_MJPEG_0_ENC_S1
+#define IMX_SC_R_MJPEG_ENC_S2 IMX_SC_R_MJPEG_0_ENC_S2
+#define IMX_SC_R_MJPEG_ENC_S3 IMX_SC_R_MJPEG_0_ENC_S3
+#define IMX_SC_R_PERF IMX_SC_R_PERF_0
+#define IMX_SC_R_SMMU IMX_SC_R_SMMU_0
+#define IMX_SC_R_VPU_UART IMX_SC_R_ENET_0_A2
+#define IMX_SC_R_VPUCORE IMX_SC_R_ENET_1_A0
+#define IMX_SC_R_VPUCORE_0 IMX_SC_R_ENET_1_A1
+#define IMX_SC_R_VPUCORE_1 IMX_SC_R_ENET_1_A2
+#define IMX_SC_R_VPUCORE_2 IMX_SC_R_ENET_1_A3
+#define IMX_SC_R_VPUCORE_3 IMX_SC_R_ENET_1_A4
+#define IMX_SC_R_UNUSED1 IMX_SC_R_V2X_PID0
+#define IMX_SC_R_UNUSED2 IMX_SC_R_V2X_PID1
+#define IMX_SC_R_UNUSED3 IMX_SC_R_V2X_PID2
+#define IMX_SC_R_UNUSED4 IMX_SC_R_V2X_PID3
+
+/*
* Defines for SC CONTROL
*/
#define IMX_SC_C_TEMP 0
@@ -637,6 +743,10 @@
#define IMX_SC_C_INTF_SEL 59
#define IMX_SC_C_RXC_DLY 60
#define IMX_SC_C_TIMER_SEL 61
-#define IMX_SC_C_LAST 62
+#define IMX_SC_C_MISC0 62
+#define IMX_SC_C_MISC1 63
+#define IMX_SC_C_MISC2 64
+#define IMX_SC_C_MISC3 65
+#define IMX_SC_C_LAST 66
#endif /* __DT_BINDINGS_RSCRC_IMX_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
index 9426f27a1946..09fd169ad18e 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
@@ -6,62 +6,58 @@
#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H
#define _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H
-#ifndef PM8350_SID
-#define PM8350_SID 1
-#endif
-
/* ADC channels for PM8350_ADC for PMIC7 */
-#define PM8350_ADC7_REF_GND (PM8350_SID << 8 | 0x0)
-#define PM8350_ADC7_1P25VREF (PM8350_SID << 8 | 0x01)
-#define PM8350_ADC7_VREF_VADC (PM8350_SID << 8 | 0x02)
-#define PM8350_ADC7_DIE_TEMP (PM8350_SID << 8 | 0x03)
-
-#define PM8350_ADC7_AMUX_THM1 (PM8350_SID << 8 | 0x04)
-#define PM8350_ADC7_AMUX_THM2 (PM8350_SID << 8 | 0x05)
-#define PM8350_ADC7_AMUX_THM3 (PM8350_SID << 8 | 0x06)
-#define PM8350_ADC7_AMUX_THM4 (PM8350_SID << 8 | 0x07)
-#define PM8350_ADC7_AMUX_THM5 (PM8350_SID << 8 | 0x08)
-#define PM8350_ADC7_GPIO1 (PM8350_SID << 8 | 0x0a)
-#define PM8350_ADC7_GPIO2 (PM8350_SID << 8 | 0x0b)
-#define PM8350_ADC7_GPIO3 (PM8350_SID << 8 | 0x0c)
-#define PM8350_ADC7_GPIO4 (PM8350_SID << 8 | 0x0d)
+#define PM8350_ADC7_REF_GND(sid) ((sid) << 8 | 0x0)
+#define PM8350_ADC7_1P25VREF(sid) ((sid) << 8 | 0x01)
+#define PM8350_ADC7_VREF_VADC(sid) ((sid) << 8 | 0x02)
+#define PM8350_ADC7_DIE_TEMP(sid) ((sid) << 8 | 0x03)
+
+#define PM8350_ADC7_AMUX_THM1(sid) ((sid) << 8 | 0x04)
+#define PM8350_ADC7_AMUX_THM2(sid) ((sid) << 8 | 0x05)
+#define PM8350_ADC7_AMUX_THM3(sid) ((sid) << 8 | 0x06)
+#define PM8350_ADC7_AMUX_THM4(sid) ((sid) << 8 | 0x07)
+#define PM8350_ADC7_AMUX_THM5(sid) ((sid) << 8 | 0x08)
+#define PM8350_ADC7_GPIO1(sid) ((sid) << 8 | 0x0a)
+#define PM8350_ADC7_GPIO2(sid) ((sid) << 8 | 0x0b)
+#define PM8350_ADC7_GPIO3(sid) ((sid) << 8 | 0x0c)
+#define PM8350_ADC7_GPIO4(sid) ((sid) << 8 | 0x0d)
/* 30k pull-up1 */
-#define PM8350_ADC7_AMUX_THM1_30K_PU (PM8350_SID << 8 | 0x24)
-#define PM8350_ADC7_AMUX_THM2_30K_PU (PM8350_SID << 8 | 0x25)
-#define PM8350_ADC7_AMUX_THM3_30K_PU (PM8350_SID << 8 | 0x26)
-#define PM8350_ADC7_AMUX_THM4_30K_PU (PM8350_SID << 8 | 0x27)
-#define PM8350_ADC7_AMUX_THM5_30K_PU (PM8350_SID << 8 | 0x28)
-#define PM8350_ADC7_GPIO1_30K_PU (PM8350_SID << 8 | 0x2a)
-#define PM8350_ADC7_GPIO2_30K_PU (PM8350_SID << 8 | 0x2b)
-#define PM8350_ADC7_GPIO3_30K_PU (PM8350_SID << 8 | 0x2c)
-#define PM8350_ADC7_GPIO4_30K_PU (PM8350_SID << 8 | 0x2d)
+#define PM8350_ADC7_AMUX_THM1_30K_PU(sid) ((sid) << 8 | 0x24)
+#define PM8350_ADC7_AMUX_THM2_30K_PU(sid) ((sid) << 8 | 0x25)
+#define PM8350_ADC7_AMUX_THM3_30K_PU(sid) ((sid) << 8 | 0x26)
+#define PM8350_ADC7_AMUX_THM4_30K_PU(sid) ((sid) << 8 | 0x27)
+#define PM8350_ADC7_AMUX_THM5_30K_PU(sid) ((sid) << 8 | 0x28)
+#define PM8350_ADC7_GPIO1_30K_PU(sid) ((sid) << 8 | 0x2a)
+#define PM8350_ADC7_GPIO2_30K_PU(sid) ((sid) << 8 | 0x2b)
+#define PM8350_ADC7_GPIO3_30K_PU(sid) ((sid) << 8 | 0x2c)
+#define PM8350_ADC7_GPIO4_30K_PU(sid) ((sid) << 8 | 0x2d)
/* 100k pull-up2 */
-#define PM8350_ADC7_AMUX_THM1_100K_PU (PM8350_SID << 8 | 0x44)
-#define PM8350_ADC7_AMUX_THM2_100K_PU (PM8350_SID << 8 | 0x45)
-#define PM8350_ADC7_AMUX_THM3_100K_PU (PM8350_SID << 8 | 0x46)
-#define PM8350_ADC7_AMUX_THM4_100K_PU (PM8350_SID << 8 | 0x47)
-#define PM8350_ADC7_AMUX_THM5_100K_PU (PM8350_SID << 8 | 0x48)
-#define PM8350_ADC7_GPIO1_100K_PU (PM8350_SID << 8 | 0x4a)
-#define PM8350_ADC7_GPIO2_100K_PU (PM8350_SID << 8 | 0x4b)
-#define PM8350_ADC7_GPIO3_100K_PU (PM8350_SID << 8 | 0x4c)
-#define PM8350_ADC7_GPIO4_100K_PU (PM8350_SID << 8 | 0x4d)
+#define PM8350_ADC7_AMUX_THM1_100K_PU(sid) ((sid) << 8 | 0x44)
+#define PM8350_ADC7_AMUX_THM2_100K_PU(sid) ((sid) << 8 | 0x45)
+#define PM8350_ADC7_AMUX_THM3_100K_PU(sid) ((sid) << 8 | 0x46)
+#define PM8350_ADC7_AMUX_THM4_100K_PU(sid) ((sid) << 8 | 0x47)
+#define PM8350_ADC7_AMUX_THM5_100K_PU(sid) ((sid) << 8 | 0x48)
+#define PM8350_ADC7_GPIO1_100K_PU(sid) ((sid) << 8 | 0x4a)
+#define PM8350_ADC7_GPIO2_100K_PU(sid) ((sid) << 8 | 0x4b)
+#define PM8350_ADC7_GPIO3_100K_PU(sid) ((sid) << 8 | 0x4c)
+#define PM8350_ADC7_GPIO4_100K_PU(sid) ((sid) << 8 | 0x4d)
/* 400k pull-up3 */
-#define PM8350_ADC7_AMUX_THM1_400K_PU (PM8350_SID << 8 | 0x64)
-#define PM8350_ADC7_AMUX_THM2_400K_PU (PM8350_SID << 8 | 0x65)
-#define PM8350_ADC7_AMUX_THM3_400K_PU (PM8350_SID << 8 | 0x66)
-#define PM8350_ADC7_AMUX_THM4_400K_PU (PM8350_SID << 8 | 0x67)
-#define PM8350_ADC7_AMUX_THM5_400K_PU (PM8350_SID << 8 | 0x68)
-#define PM8350_ADC7_GPIO1_400K_PU (PM8350_SID << 8 | 0x6a)
-#define PM8350_ADC7_GPIO2_400K_PU (PM8350_SID << 8 | 0x6b)
-#define PM8350_ADC7_GPIO3_400K_PU (PM8350_SID << 8 | 0x6c)
-#define PM8350_ADC7_GPIO4_400K_PU (PM8350_SID << 8 | 0x6d)
+#define PM8350_ADC7_AMUX_THM1_400K_PU(sid) ((sid) << 8 | 0x64)
+#define PM8350_ADC7_AMUX_THM2_400K_PU(sid) ((sid) << 8 | 0x65)
+#define PM8350_ADC7_AMUX_THM3_400K_PU(sid) ((sid) << 8 | 0x66)
+#define PM8350_ADC7_AMUX_THM4_400K_PU(sid) ((sid) << 8 | 0x67)
+#define PM8350_ADC7_AMUX_THM5_400K_PU(sid) ((sid) << 8 | 0x68)
+#define PM8350_ADC7_GPIO1_400K_PU(sid) ((sid) << 8 | 0x6a)
+#define PM8350_ADC7_GPIO2_400K_PU(sid) ((sid) << 8 | 0x6b)
+#define PM8350_ADC7_GPIO3_400K_PU(sid) ((sid) << 8 | 0x6c)
+#define PM8350_ADC7_GPIO4_400K_PU(sid) ((sid) << 8 | 0x6d)
/* 1/3 Divider */
-#define PM8350_ADC7_GPIO4_DIV3 (PM8350_SID << 8 | 0x8d)
+#define PM8350_ADC7_GPIO4_DIV3(sid) ((sid) << 8 | 0x8d)
-#define PM8350_ADC7_VPH_PWR (PM8350_SID << 8 | 0x8e)
+#define PM8350_ADC7_VPH_PWR(sid) ((sid) << 8 | 0x8e)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H */
diff --git a/include/dt-bindings/memory/tegra234-mc.h b/include/dt-bindings/memory/tegra234-mc.h
index bd71cc1d7990..347e55e89a2a 100644
--- a/include/dt-bindings/memory/tegra234-mc.h
+++ b/include/dt-bindings/memory/tegra234-mc.h
@@ -8,31 +8,158 @@
#define TEGRA234_SID_INVALID 0x00
#define TEGRA234_SID_PASSTHROUGH 0x7f
+/* ISO stream IDs */
+#define TEGRA234_SID_ISO_NVDISPLAY 0x01
+#define TEGRA234_SID_ISO_VI 0x02
+#define TEGRA234_SID_ISO_VIFALC 0x03
+#define TEGRA234_SID_ISO_VI2 0x04
+#define TEGRA234_SID_ISO_VI2FALC 0x05
+#define TEGRA234_SID_ISO_VI_VM2 0x06
+#define TEGRA234_SID_ISO_VI2_VM2 0x07
+
/* NISO0 stream IDs */
-#define TEGRA234_SID_APE 0x02
-#define TEGRA234_SID_HDA 0x03
-#define TEGRA234_SID_GPCDMA 0x04
-#define TEGRA234_SID_MGBE 0x06
-#define TEGRA234_SID_PCIE0 0x12
-#define TEGRA234_SID_PCIE4 0x13
-#define TEGRA234_SID_PCIE5 0x14
-#define TEGRA234_SID_PCIE6 0x15
-#define TEGRA234_SID_PCIE9 0x1f
-#define TEGRA234_SID_MGBE_VF1 0x49
-#define TEGRA234_SID_MGBE_VF2 0x4a
-#define TEGRA234_SID_MGBE_VF3 0x4b
+#define TEGRA234_SID_AON 0x01
+#define TEGRA234_SID_APE 0x02
+#define TEGRA234_SID_HDA 0x03
+#define TEGRA234_SID_GPCDMA 0x04
+#define TEGRA234_SID_ETR 0x05
+#define TEGRA234_SID_MGBE 0x06
+#define TEGRA234_SID_NVDISPLAY 0x07
+#define TEGRA234_SID_DCE 0x08
+#define TEGRA234_SID_PSC 0x09
+#define TEGRA234_SID_RCE 0x0a
+#define TEGRA234_SID_SCE 0x0b
+#define TEGRA234_SID_UFSHC 0x0c
+#define TEGRA234_SID_APE_1 0x0d
+#define TEGRA234_SID_GPCDMA_1 0x0e
+#define TEGRA234_SID_GPCDMA_2 0x0f
+#define TEGRA234_SID_GPCDMA_3 0x10
+#define TEGRA234_SID_GPCDMA_4 0x11
+#define TEGRA234_SID_PCIE0 0x12
+#define TEGRA234_SID_PCIE4 0x13
+#define TEGRA234_SID_PCIE5 0x14
+#define TEGRA234_SID_PCIE6 0x15
+#define TEGRA234_SID_RCE_VM2 0x16
+#define TEGRA234_SID_RCE_SERVER 0x17
+#define TEGRA234_SID_SMMU_TEST 0x18
+#define TEGRA234_SID_UFS_1 0x19
+#define TEGRA234_SID_UFS_2 0x1a
+#define TEGRA234_SID_UFS_3 0x1b
+#define TEGRA234_SID_UFS_4 0x1c
+#define TEGRA234_SID_UFS_5 0x1d
+#define TEGRA234_SID_UFS_6 0x1e
+#define TEGRA234_SID_PCIE9 0x1f
+#define TEGRA234_SID_VSE_GPCDMA_VM0 0x20
+#define TEGRA234_SID_VSE_GPCDMA_VM1 0x21
+#define TEGRA234_SID_VSE_GPCDMA_VM2 0x22
+#define TEGRA234_SID_NVDLA1 0x23
+#define TEGRA234_SID_NVENC 0x24
+#define TEGRA234_SID_NVJPG1 0x25
+#define TEGRA234_SID_OFA 0x26
+#define TEGRA234_SID_MGBE_VF1 0x49
+#define TEGRA234_SID_MGBE_VF2 0x4a
+#define TEGRA234_SID_MGBE_VF3 0x4b
+#define TEGRA234_SID_MGBE_VF4 0x4c
+#define TEGRA234_SID_MGBE_VF5 0x4d
+#define TEGRA234_SID_MGBE_VF6 0x4e
+#define TEGRA234_SID_MGBE_VF7 0x4f
+#define TEGRA234_SID_MGBE_VF8 0x50
+#define TEGRA234_SID_MGBE_VF9 0x51
+#define TEGRA234_SID_MGBE_VF10 0x52
+#define TEGRA234_SID_MGBE_VF11 0x53
+#define TEGRA234_SID_MGBE_VF12 0x54
+#define TEGRA234_SID_MGBE_VF13 0x55
+#define TEGRA234_SID_MGBE_VF14 0x56
+#define TEGRA234_SID_MGBE_VF15 0x57
+#define TEGRA234_SID_MGBE_VF16 0x58
+#define TEGRA234_SID_MGBE_VF17 0x59
+#define TEGRA234_SID_MGBE_VF18 0x5a
+#define TEGRA234_SID_MGBE_VF19 0x5b
+#define TEGRA234_SID_MGBE_VF20 0x5c
+#define TEGRA234_SID_APE_2 0x5e
+#define TEGRA234_SID_APE_3 0x5f
+#define TEGRA234_SID_UFS_7 0x60
+#define TEGRA234_SID_UFS_8 0x61
+#define TEGRA234_SID_UFS_9 0x62
+#define TEGRA234_SID_UFS_10 0x63
+#define TEGRA234_SID_UFS_11 0x64
+#define TEGRA234_SID_UFS_12 0x65
+#define TEGRA234_SID_UFS_13 0x66
+#define TEGRA234_SID_UFS_14 0x67
+#define TEGRA234_SID_UFS_15 0x68
+#define TEGRA234_SID_UFS_16 0x69
+#define TEGRA234_SID_UFS_17 0x6a
+#define TEGRA234_SID_UFS_18 0x6b
+#define TEGRA234_SID_UFS_19 0x6c
+#define TEGRA234_SID_UFS_20 0x6d
+#define TEGRA234_SID_GPCDMA_5 0x6e
+#define TEGRA234_SID_GPCDMA_6 0x6f
+#define TEGRA234_SID_GPCDMA_7 0x70
+#define TEGRA234_SID_GPCDMA_8 0x71
+#define TEGRA234_SID_GPCDMA_9 0x72
/* NISO1 stream IDs */
-#define TEGRA234_SID_SDMMC4 0x02
-#define TEGRA234_SID_PCIE1 0x05
-#define TEGRA234_SID_PCIE2 0x06
-#define TEGRA234_SID_PCIE3 0x07
-#define TEGRA234_SID_PCIE7 0x08
-#define TEGRA234_SID_PCIE8 0x09
-#define TEGRA234_SID_PCIE10 0x0b
-#define TEGRA234_SID_BPMP 0x10
-#define TEGRA234_SID_HOST1X 0x27
-#define TEGRA234_SID_VIC 0x34
+#define TEGRA234_SID_SDMMC1A 0x01
+#define TEGRA234_SID_SDMMC4 0x02
+#define TEGRA234_SID_EQOS 0x03
+#define TEGRA234_SID_HWMP_PMA 0x04
+#define TEGRA234_SID_PCIE1 0x05
+#define TEGRA234_SID_PCIE2 0x06
+#define TEGRA234_SID_PCIE3 0x07
+#define TEGRA234_SID_PCIE7 0x08
+#define TEGRA234_SID_PCIE8 0x09
+#define TEGRA234_SID_PCIE10 0x0b
+#define TEGRA234_SID_QSPI0 0x0c
+#define TEGRA234_SID_QSPI1 0x0d
+#define TEGRA234_SID_XUSB_HOST 0x0e
+#define TEGRA234_SID_XUSB_DEV 0x0f
+#define TEGRA234_SID_BPMP 0x10
+#define TEGRA234_SID_FSI 0x11
+#define TEGRA234_SID_PVA0_VM0 0x12
+#define TEGRA234_SID_PVA0_VM1 0x13
+#define TEGRA234_SID_PVA0_VM2 0x14
+#define TEGRA234_SID_PVA0_VM3 0x15
+#define TEGRA234_SID_PVA0_VM4 0x16
+#define TEGRA234_SID_PVA0_VM5 0x17
+#define TEGRA234_SID_PVA0_VM6 0x18
+#define TEGRA234_SID_PVA0_VM7 0x19
+#define TEGRA234_SID_XUSB_VF0 0x1a
+#define TEGRA234_SID_XUSB_VF1 0x1b
+#define TEGRA234_SID_XUSB_VF2 0x1c
+#define TEGRA234_SID_XUSB_VF3 0x1d
+#define TEGRA234_SID_EQOS_VF1 0x1e
+#define TEGRA234_SID_EQOS_VF2 0x1f
+#define TEGRA234_SID_EQOS_VF3 0x20
+#define TEGRA234_SID_EQOS_VF4 0x21
+#define TEGRA234_SID_ISP_VM2 0x22
+#define TEGRA234_SID_HOST1X 0x27
+#define TEGRA234_SID_ISP 0x28
+#define TEGRA234_SID_NVDEC 0x29
+#define TEGRA234_SID_NVJPG 0x2a
+#define TEGRA234_SID_NVDLA0 0x2b
+#define TEGRA234_SID_PVA0 0x2c
+#define TEGRA234_SID_SES_SE0 0x2d
+#define TEGRA234_SID_SES_SE1 0x2e
+#define TEGRA234_SID_SES_SE2 0x2f
+#define TEGRA234_SID_SEU1_SE0 0x30
+#define TEGRA234_SID_SEU1_SE1 0x31
+#define TEGRA234_SID_SEU1_SE2 0x32
+#define TEGRA234_SID_TSEC 0x33
+#define TEGRA234_SID_VIC 0x34
+#define TEGRA234_SID_HC_VM0 0x3d
+#define TEGRA234_SID_HC_VM1 0x3e
+#define TEGRA234_SID_HC_VM2 0x3f
+#define TEGRA234_SID_HC_VM3 0x40
+#define TEGRA234_SID_HC_VM4 0x41
+#define TEGRA234_SID_HC_VM5 0x42
+#define TEGRA234_SID_HC_VM6 0x43
+#define TEGRA234_SID_HC_VM7 0x44
+#define TEGRA234_SID_SE_VM0 0x45
+#define TEGRA234_SID_SE_VM1 0x46
+#define TEGRA234_SID_SE_VM2 0x47
+#define TEGRA234_SID_ISPFALC 0x48
+#define TEGRA234_SID_NISO1_SMMU_TEST 0x49
+#define TEGRA234_SID_TSEC_VM0 0x4a
/* Shared stream IDs */
#define TEGRA234_SID_HOST1X_CTX0 0x35
@@ -48,21 +175,81 @@
* memory client IDs
*/
+/* Misses from System Memory Management Unit (SMMU) Page Table Cache (PTC) */
+#define TEGRA234_MEMORY_CLIENT_PTCR 0x00
+/* MSS internal memqual MIU7 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU7R 0x01
+/* MSS internal memqual MIU7 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU7W 0x02
+/* MSS internal memqual MIU8 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU8R 0x03
+/* MSS internal memqual MIU8 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU8W 0x04
+/* MSS internal memqual MIU9 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU9R 0x05
+/* MSS internal memqual MIU9 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU9W 0x06
+/* MSS internal memqual MIU10 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU10R 0x07
+/* MSS internal memqual MIU10 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU10W 0x08
+/* MSS internal memqual MIU11 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU11R 0x09
+/* MSS internal memqual MIU11 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU11W 0x0a
+/* MSS internal memqual MIU12 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU12R 0x0b
+/* MSS internal memqual MIU12 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU12W 0x0c
+/* MSS internal memqual MIU13 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU13R 0x0d
+/* MSS internal memqual MIU13 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU13W 0x0e
+#define TEGRA234_MEMORY_CLIENT_NVL5RHP 0x13
+#define TEGRA234_MEMORY_CLIENT_NVL5R 0x14
/* High-definition audio (HDA) read clients */
#define TEGRA234_MEMORY_CLIENT_HDAR 0x15
+/* Host channel data read clients */
#define TEGRA234_MEMORY_CLIENT_HOST1XDMAR 0x16
+#define TEGRA234_MEMORY_CLIENT_NVL5W 0x17
+#define TEGRA234_MEMORY_CLIENT_NVL6RHP 0x18
+#define TEGRA234_MEMORY_CLIENT_NVL6R 0x19
+#define TEGRA234_MEMORY_CLIENT_NVL6W 0x1a
+#define TEGRA234_MEMORY_CLIENT_NVL7RHP 0x1b
+#define TEGRA234_MEMORY_CLIENT_NVENCSRD 0x1c
+#define TEGRA234_MEMORY_CLIENT_NVL7R 0x1d
+#define TEGRA234_MEMORY_CLIENT_NVL7W 0x1e
+#define TEGRA234_MEMORY_CLIENT_NVL8RHP 0x20
+#define TEGRA234_MEMORY_CLIENT_NVL8R 0x21
+#define TEGRA234_MEMORY_CLIENT_NVL8W 0x22
+#define TEGRA234_MEMORY_CLIENT_NVL9RHP 0x23
+#define TEGRA234_MEMORY_CLIENT_NVL9R 0x24
+#define TEGRA234_MEMORY_CLIENT_NVL9W 0x25
/* PCIE6 read clients */
#define TEGRA234_MEMORY_CLIENT_PCIE6AR 0x28
/* PCIE6 write clients */
#define TEGRA234_MEMORY_CLIENT_PCIE6AW 0x29
/* PCIE7 read clients */
#define TEGRA234_MEMORY_CLIENT_PCIE7AR 0x2a
+#define TEGRA234_MEMORY_CLIENT_NVENCSWR 0x2b
+/* DLA0ARDB read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0RDB 0x2c
+/* DLA0ARDB1 read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0RDB1 0x2d
+/* DLA0 writes */
+#define TEGRA234_MEMORY_CLIENT_DLA0WRB 0x2e
+/* DLA1ARDB read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1RDB 0x2f
/* PCIE7 write clients */
#define TEGRA234_MEMORY_CLIENT_PCIE7AW 0x30
/* PCIE8 read clients */
#define TEGRA234_MEMORY_CLIENT_PCIE8AR 0x32
/* High-definition audio (HDA) write clients */
#define TEGRA234_MEMORY_CLIENT_HDAW 0x35
+/* Writes from Cortex-A9 4 CPU cores via the L2 cache */
+#define TEGRA234_MEMORY_CLIENT_MPCOREW 0x39
+/* OFAA client */
+#define TEGRA234_MEMORY_CLIENT_OFAR1 0x3a
/* PCIE8 write clients */
#define TEGRA234_MEMORY_CLIENT_PCIE8AW 0x3b
/* PCIE9 read clients */
@@ -75,10 +262,32 @@
#define TEGRA234_MEMORY_CLIENT_PCIE10AR 0x3f
/* PCIE10 write clients */
#define TEGRA234_MEMORY_CLIENT_PCIE10AW 0x40
+/* ISP read client for Crossbar A */
+#define TEGRA234_MEMORY_CLIENT_ISPRA 0x44
+/* ISP read client 1 for Crossbar A */
+#define TEGRA234_MEMORY_CLIENT_ISPFALR 0x45
+/* ISP Write client for Crossbar A */
+#define TEGRA234_MEMORY_CLIENT_ISPWA 0x46
+/* ISP Write client Crossbar B */
+#define TEGRA234_MEMORY_CLIENT_ISPWB 0x47
/* PCIE10r1 read clients */
#define TEGRA234_MEMORY_CLIENT_PCIE10AR1 0x48
/* PCIE7r1 read clients */
#define TEGRA234_MEMORY_CLIENT_PCIE7AR1 0x49
+/* XUSB_HOST read clients */
+#define TEGRA234_MEMORY_CLIENT_XUSB_HOSTR 0x4a
+/* XUSB_HOST write clients */
+#define TEGRA234_MEMORY_CLIENT_XUSB_HOSTW 0x4b
+/* XUSB read clients */
+#define TEGRA234_MEMORY_CLIENT_XUSB_DEVR 0x4c
+/* XUSB_DEV write clients */
+#define TEGRA234_MEMORY_CLIENT_XUSB_DEVW 0x4d
+/* TSEC Memory Return Data Client Description */
+#define TEGRA234_MEMORY_CLIENT_TSECSRD 0x54
+/* TSEC Memory Write Client Description */
+#define TEGRA234_MEMORY_CLIENT_TSECSWR 0x55
+/* XSPI writes */
+#define TEGRA234_MEMORY_CLIENT_XSPI1W 0x56
/* MGBE0 read client */
#define TEGRA234_MEMORY_CLIENT_MGBEARD 0x58
/* MGBEB read client */
@@ -89,18 +298,86 @@
#define TEGRA234_MEMORY_CLIENT_MGBEDRD 0x5b
/* MGBE0 write client */
#define TEGRA234_MEMORY_CLIENT_MGBEAWR 0x5c
+/* OFAA client */
+#define TEGRA234_MEMORY_CLIENT_OFAR 0x5d
+/* OFAA writes */
+#define TEGRA234_MEMORY_CLIENT_OFAW 0x5e
/* MGBEB write client */
#define TEGRA234_MEMORY_CLIENT_MGBEBWR 0x5f
+/* sdmmca memory read client */
+#define TEGRA234_MEMORY_CLIENT_SDMMCRA 0x60
/* MGBEC write client */
#define TEGRA234_MEMORY_CLIENT_MGBECWR 0x61
/* sdmmcd memory read client */
#define TEGRA234_MEMORY_CLIENT_SDMMCRAB 0x63
+/* sdmmca memory write client */
+#define TEGRA234_MEMORY_CLIENT_SDMMCWA 0x64
/* MGBED write client */
#define TEGRA234_MEMORY_CLIENT_MGBEDWR 0x65
/* sdmmcd memory write client */
#define TEGRA234_MEMORY_CLIENT_SDMMCWAB 0x67
+/* SE Memory Return Data Client Description */
+#define TEGRA234_MEMORY_CLIENT_SEU1RD 0x68
+/* SE Memory Write Client Description */
+#define TEGRA234_MEMORY_CLIENT_SUE1WR 0x69
#define TEGRA234_MEMORY_CLIENT_VICSRD 0x6c
#define TEGRA234_MEMORY_CLIENT_VICSWR 0x6d
+/* DLA1ARDB1 read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1RDB1 0x6e
+/* DLA1 writes */
+#define TEGRA234_MEMORY_CLIENT_DLA1WRB 0x6f
+/* VI FLACON read clients */
+#define TEGRA234_MEMORY_CLIENT_VI2FALR 0x71
+/* VI Write client */
+#define TEGRA234_MEMORY_CLIENT_VI2W 0x70
+/* VI Write client */
+#define TEGRA234_MEMORY_CLIENT_VIW 0x72
+/* NISO display read client */
+#define TEGRA234_MEMORY_CLIENT_NVDISPNISOR 0x73
+/* NVDISPNISO writes */
+#define TEGRA234_MEMORY_CLIENT_NVDISPNISOW 0x74
+/* XSPI client */
+#define TEGRA234_MEMORY_CLIENT_XSPI0R 0x75
+/* XSPI writes */
+#define TEGRA234_MEMORY_CLIENT_XSPI0W 0x76
+/* XSPI client */
+#define TEGRA234_MEMORY_CLIENT_XSPI1R 0x77
+#define TEGRA234_MEMORY_CLIENT_NVDECSRD 0x78
+#define TEGRA234_MEMORY_CLIENT_NVDECSWR 0x79
+/* Audio Processing (APE) engine read clients */
+#define TEGRA234_MEMORY_CLIENT_APER 0x7a
+/* Audio Processing (APE) engine write clients */
+#define TEGRA234_MEMORY_CLIENT_APEW 0x7b
+/* VI2FAL writes */
+#define TEGRA234_MEMORY_CLIENT_VI2FALW 0x7c
+#define TEGRA234_MEMORY_CLIENT_NVJPGSRD 0x7e
+#define TEGRA234_MEMORY_CLIENT_NVJPGSWR 0x7f
+/* SE Memory Return Data Client Description */
+#define TEGRA234_MEMORY_CLIENT_SESRD 0x80
+/* SE Memory Write Client Description */
+#define TEGRA234_MEMORY_CLIENT_SESWR 0x81
+/* AXI AP and DFD-AUX0/1 read clients Both share the same interface on the on MSS */
+#define TEGRA234_MEMORY_CLIENT_AXIAPR 0x82
+/* AXI AP and DFD-AUX0/1 write clients Both sahre the same interface on MSS */
+#define TEGRA234_MEMORY_CLIENT_AXIAPW 0x83
+/* ETR read clients */
+#define TEGRA234_MEMORY_CLIENT_ETRR 0x84
+/* ETR write clients */
+#define TEGRA234_MEMORY_CLIENT_ETRW 0x85
+/* AXI Switch read client */
+#define TEGRA234_MEMORY_CLIENT_AXISR 0x8c
+/* AXI Switch write client */
+#define TEGRA234_MEMORY_CLIENT_AXISW 0x8d
+/* EQOS read client */
+#define TEGRA234_MEMORY_CLIENT_EQOSR 0x8e
+/* EQOS write client */
+#define TEGRA234_MEMORY_CLIENT_EQOSW 0x8f
+/* UFSHC read client */
+#define TEGRA234_MEMORY_CLIENT_UFSHCR 0x90
+/* UFSHC write client */
+#define TEGRA234_MEMORY_CLIENT_UFSHCW 0x91
+/* NVDISPLAY read client */
+#define TEGRA234_MEMORY_CLIENT_NVDISPLAYR 0x92
/* BPMP read client */
#define TEGRA234_MEMORY_CLIENT_BPMPR 0x93
/* BPMP write client */
@@ -109,10 +386,97 @@
#define TEGRA234_MEMORY_CLIENT_BPMPDMAR 0x95
/* BPMPDMA write client */
#define TEGRA234_MEMORY_CLIENT_BPMPDMAW 0x96
+/* AON read client */
+#define TEGRA234_MEMORY_CLIENT_AONR 0x97
+/* AON write client */
+#define TEGRA234_MEMORY_CLIENT_AONW 0x98
+/* AONDMA read client */
+#define TEGRA234_MEMORY_CLIENT_AONDMAR 0x99
+/* AONDMA write client */
+#define TEGRA234_MEMORY_CLIENT_AONDMAW 0x9a
+/* SCE read client */
+#define TEGRA234_MEMORY_CLIENT_SCER 0x9b
+/* SCE write client */
+#define TEGRA234_MEMORY_CLIENT_SCEW 0x9c
+/* SCEDMA read client */
+#define TEGRA234_MEMORY_CLIENT_SCEDMAR 0x9d
+/* SCEDMA write client */
+#define TEGRA234_MEMORY_CLIENT_SCEDMAW 0x9e
/* APEDMA read client */
#define TEGRA234_MEMORY_CLIENT_APEDMAR 0x9f
/* APEDMA write client */
#define TEGRA234_MEMORY_CLIENT_APEDMAW 0xa0
+/* NVDISPLAY read client instance 2 */
+#define TEGRA234_MEMORY_CLIENT_NVDISPLAYR1 0xa1
+#define TEGRA234_MEMORY_CLIENT_VICSRD1 0xa2
+/* MSS internal memqual MIU0 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU0R 0xa6
+/* MSS internal memqual MIU0 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU0W 0xa7
+/* MSS internal memqual MIU1 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU1R 0xa8
+/* MSS internal memqual MIU1 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU1W 0xa9
+/* MSS internal memqual MIU2 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU2R 0xae
+/* MSS internal memqual MIU2 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU2W 0xaf
+/* MSS internal memqual MIU3 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU3R 0xb0
+/* MSS internal memqual MIU3 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU3W 0xb1
+/* MSS internal memqual MIU4 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU4R 0xb2
+/* MSS internal memqual MIU4 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU4W 0xb3
+#define TEGRA234_MEMORY_CLIENT_DPMUR 0xb4
+#define TEGRA234_MEMORY_CLIENT_DPMUW 0xb5
+#define TEGRA234_MEMORY_CLIENT_NVL0R 0xb6
+#define TEGRA234_MEMORY_CLIENT_NVL0W 0xb7
+#define TEGRA234_MEMORY_CLIENT_NVL1R 0xb8
+#define TEGRA234_MEMORY_CLIENT_NVL1W 0xb9
+#define TEGRA234_MEMORY_CLIENT_NVL2R 0xba
+#define TEGRA234_MEMORY_CLIENT_NVL2W 0xbb
+/* VI FLACON read clients */
+#define TEGRA234_MEMORY_CLIENT_VIFALR 0xbc
+/* VIFAL write clients */
+#define TEGRA234_MEMORY_CLIENT_VIFALW 0xbd
+/* DLA0ARDA read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0RDA 0xbe
+/* DLA0 Falcon read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0FALRDB 0xbf
+/* DLA0 write clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0WRA 0xc0
+/* DLA0 write clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0FALWRB 0xc1
+/* DLA1ARDA read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1RDA 0xc2
+/* DLA1 Falcon read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1FALRDB 0xc3
+/* DLA1 write clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1WRA 0xc4
+/* DLA1 write clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1FALWRB 0xc5
+/* PVA0RDA read clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0RDA 0xc6
+/* PVA0RDB read clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0RDB 0xc7
+/* PVA0RDC read clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0RDC 0xc8
+/* PVA0WRA write clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0WRA 0xc9
+/* PVA0WRB write clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0WRB 0xca
+/* PVA0WRC write clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0WRC 0xcb
+/* RCE read client */
+#define TEGRA234_MEMORY_CLIENT_RCER 0xd2
+/* RCE write client */
+#define TEGRA234_MEMORY_CLIENT_RCEW 0xd3
+/* RCEDMA read client */
+#define TEGRA234_MEMORY_CLIENT_RCEDMAR 0xd4
+/* RCEDMA write client */
+#define TEGRA234_MEMORY_CLIENT_RCEDMAW 0xd5
/* PCIE0 read clients */
#define TEGRA234_MEMORY_CLIENT_PCIE0R 0xd8
/* PCIE0 write clients */
@@ -137,7 +501,39 @@
#define TEGRA234_MEMORY_CLIENT_PCIE5R 0xe2
/* PCIE5 write clients */
#define TEGRA234_MEMORY_CLIENT_PCIE5W 0xe3
+/* ISP read client 1 for Crossbar A */
+#define TEGRA234_MEMORY_CLIENT_ISPFALW 0xe4
+#define TEGRA234_MEMORY_CLIENT_NVL3R 0xe5
+#define TEGRA234_MEMORY_CLIENT_NVL3W 0xe6
+#define TEGRA234_MEMORY_CLIENT_NVL4R 0xe7
+#define TEGRA234_MEMORY_CLIENT_NVL4W 0xe8
+/* DLA0ARDA1 read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0RDA1 0xe9
+/* DLA1ARDA1 read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1RDA1 0xea
+/* PVA0RDA1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0RDA1 0xeb
+/* PVA0RDB1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0RDB1 0xec
/* PCIE5r1 read clients */
#define TEGRA234_MEMORY_CLIENT_PCIE5R1 0xef
+#define TEGRA234_MEMORY_CLIENT_NVENCSRD1 0xf0
+/* ISP read client for Crossbar A */
+#define TEGRA234_MEMORY_CLIENT_ISPRA1 0xf2
+#define TEGRA234_MEMORY_CLIENT_NVL0RHP 0xf4
+#define TEGRA234_MEMORY_CLIENT_NVL1RHP 0xf5
+#define TEGRA234_MEMORY_CLIENT_NVL2RHP 0xf6
+#define TEGRA234_MEMORY_CLIENT_NVL3RHP 0xf7
+#define TEGRA234_MEMORY_CLIENT_NVL4RHP 0xf8
+/* MSS internal memqual MIU5 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU5R 0xfc
+/* MSS internal memqual MIU5 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU5W 0xfd
+/* MSS internal memqual MIU6 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU6R 0xfe
+/* MSS internal memqual MIU6 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU6W 0xff
+#define TEGRA234_MEMORY_CLIENT_NVJPG1SRD 0x123
+#define TEGRA234_MEMORY_CLIENT_NVJPG1SWR 0x124
#endif
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index f5f82dde7399..1e19e258a74d 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -4,6 +4,16 @@
#ifndef _DT_BINDINGS_POWER_QCOM_RPMPD_H
#define _DT_BINDINGS_POWER_QCOM_RPMPD_H
+/* SDM670 Power Domain Indexes */
+#define SDM670_MX 0
+#define SDM670_MX_AO 1
+#define SDM670_CX 2
+#define SDM670_CX_AO 3
+#define SDM670_LMX 4
+#define SDM670_LCX 5
+#define SDM670_GFX 6
+#define SDM670_MSS 7
+
/* SDM845 Power Domain Indexes */
#define SDM845_EBI 0
#define SDM845_MX 1
@@ -103,6 +113,28 @@
#define SM8450_MXC_AO 11
#define SM8450_MSS 12
+/* SM8550 Power Domain Indexes */
+#define SM8550_CX 0
+#define SM8550_CX_AO 1
+#define SM8550_EBI 2
+#define SM8550_GFX 3
+#define SM8550_LCX 4
+#define SM8550_LMX 5
+#define SM8550_MMCX 6
+#define SM8550_MMCX_AO 7
+#define SM8550_MX 8
+#define SM8550_MX_AO 9
+#define SM8550_MXC 10
+#define SM8550_MXC_AO 11
+#define SM8550_MSS 12
+#define SM8550_NSP 13
+
+/* QDU1000/QRU1000 Power Domain Indexes */
+#define QDU1000_EBI 0
+#define QDU1000_MSS 1
+#define QDU1000_CX 2
+#define QDU1000_MX 3
+
/* SC7180 Power Domain Indexes */
#define SC7180_CX 0
#define SC7180_CX_AO 1
@@ -274,6 +306,16 @@
#define SDM660_SSCMX 8
#define SDM660_SSCMX_VFL 9
+/* SM4250 Power Domains */
+#define SM4250_VDDCX 0
+#define SM4250_VDDCX_AO 1
+#define SM4250_VDDCX_VFL 2
+#define SM4250_VDDMX 3
+#define SM4250_VDDMX_AO 4
+#define SM4250_VDDMX_VFL 5
+#define SM4250_VDD_LPI_CX 6
+#define SM4250_VDD_LPI_MX 7
+
/* SM6115 Power Domains */
#define SM6115_VDDCX 0
#define SM6115_VDDCX_AO 1
diff --git a/include/dt-bindings/power/tegra234-powergate.h b/include/dt-bindings/power/tegra234-powergate.h
index ae9286cef85c..b0fec2ddec84 100644
--- a/include/dt-bindings/power/tegra234-powergate.h
+++ b/include/dt-bindings/power/tegra234-powergate.h
@@ -4,6 +4,7 @@
#ifndef __ABI_MACH_T234_POWERGATE_T234_H_
#define __ABI_MACH_T234_POWERGATE_T234_H_
+#define TEGRA234_POWER_DOMAIN_OFA 1U
#define TEGRA234_POWER_DOMAIN_AUD 2U
#define TEGRA234_POWER_DOMAIN_DISP 3U
#define TEGRA234_POWER_DOMAIN_PCIEX8A 5U
@@ -11,6 +12,9 @@
#define TEGRA234_POWER_DOMAIN_PCIEX4BA 7U
#define TEGRA234_POWER_DOMAIN_PCIEX4BB 8U
#define TEGRA234_POWER_DOMAIN_PCIEX1A 9U
+#define TEGRA234_POWER_DOMAIN_XUSBA 10U
+#define TEGRA234_POWER_DOMAIN_XUSBB 11U
+#define TEGRA234_POWER_DOMAIN_XUSBC 12U
#define TEGRA234_POWER_DOMAIN_PCIEX4CA 13U
#define TEGRA234_POWER_DOMAIN_PCIEX4CB 14U
#define TEGRA234_POWER_DOMAIN_PCIEX4CC 15U
@@ -19,6 +23,17 @@
#define TEGRA234_POWER_DOMAIN_MGBEB 18U
#define TEGRA234_POWER_DOMAIN_MGBEC 19U
#define TEGRA234_POWER_DOMAIN_MGBED 20U
+#define TEGRA234_POWER_DOMAIN_ISPA 22U
+#define TEGRA234_POWER_DOMAIN_NVDEC 23U
+#define TEGRA234_POWER_DOMAIN_NVJPGA 24U
+#define TEGRA234_POWER_DOMAIN_NVENC 25U
+#define TEGRA234_POWER_DOMAIN_VI 28U
#define TEGRA234_POWER_DOMAIN_VIC 29U
+#define TEGRA234_POWER_DOMAIN_PVA 30U
+#define TEGRA234_POWER_DOMAIN_DLAA 32U
+#define TEGRA234_POWER_DOMAIN_DLAB 33U
+#define TEGRA234_POWER_DOMAIN_CV 34U
+#define TEGRA234_POWER_DOMAIN_GPU 35U
+#define TEGRA234_POWER_DOMAIN_NVJPGB 36U
#endif
diff --git a/include/dt-bindings/reset/tegra234-reset.h b/include/dt-bindings/reset/tegra234-reset.h
index d48d22b2bc7f..85cc423a7bdf 100644
--- a/include/dt-bindings/reset/tegra234-reset.h
+++ b/include/dt-bindings/reset/tegra234-reset.h
@@ -10,14 +10,29 @@
* @brief Identifiers for Resets controllable by firmware
* @{
*/
+#define TEGRA234_RESET_ACTMON 1U
+#define TEGRA234_RESET_ADSP_ALL 2U
+#define TEGRA234_RESET_DSI_CORE 3U
+#define TEGRA234_RESET_CAN1 4U
+#define TEGRA234_RESET_CAN2 5U
+#define TEGRA234_RESET_DLA0 6U
+#define TEGRA234_RESET_DLA1 7U
+#define TEGRA234_RESET_DPAUX 8U
+#define TEGRA234_RESET_OFA 9U
+#define TEGRA234_RESET_NVJPG1 10U
#define TEGRA234_RESET_PEX1_CORE_6 11U
#define TEGRA234_RESET_PEX1_CORE_6_APB 12U
#define TEGRA234_RESET_PEX1_COMMON_APB 13U
#define TEGRA234_RESET_PEX2_CORE_7 14U
#define TEGRA234_RESET_PEX2_CORE_7_APB 15U
+#define TEGRA234_RESET_NVDISPLAY 16U
+#define TEGRA234_RESET_EQOS 17U
#define TEGRA234_RESET_GPCDMA 18U
+#define TEGRA234_RESET_GPU 19U
#define TEGRA234_RESET_HDA 20U
#define TEGRA234_RESET_HDACODEC 21U
+#define TEGRA234_RESET_EQOS_MACSEC 22U
+#define TEGRA234_RESET_EQOS_MACSEC_SECURE 23U
#define TEGRA234_RESET_I2C1 24U
#define TEGRA234_RESET_PEX2_CORE_8 25U
#define TEGRA234_RESET_PEX2_CORE_8_APB 26U
@@ -30,15 +45,36 @@
#define TEGRA234_RESET_I2C7 33U
#define TEGRA234_RESET_I2C8 34U
#define TEGRA234_RESET_I2C9 35U
+#define TEGRA234_RESET_ISP 36U
+#define TEGRA234_RESET_MIPI_CAL 37U
+#define TEGRA234_RESET_MPHY_CLK_CTL 38U
+#define TEGRA234_RESET_MPHY_L0_RX 39U
+#define TEGRA234_RESET_MPHY_L0_TX 40U
+#define TEGRA234_RESET_MPHY_L1_RX 41U
+#define TEGRA234_RESET_MPHY_L1_TX 42U
+#define TEGRA234_RESET_NVCSI 43U
+#define TEGRA234_RESET_NVDEC 44U
#define TEGRA234_RESET_MGBE0_PCS 45U
#define TEGRA234_RESET_MGBE0_MAC 46U
+#define TEGRA234_RESET_MGBE0_MACSEC 47U
+#define TEGRA234_RESET_MGBE0_MACSEC_SECURE 48U
#define TEGRA234_RESET_MGBE1_PCS 49U
#define TEGRA234_RESET_MGBE1_MAC 50U
+#define TEGRA234_RESET_MGBE1_MACSEC 51U
+#define TEGRA234_RESET_MGBE1_MACSEC_SECURE 52U
#define TEGRA234_RESET_MGBE2_PCS 53U
#define TEGRA234_RESET_MGBE2_MAC 54U
+#define TEGRA234_RESET_MGBE2_MACSEC 55U
#define TEGRA234_RESET_PEX2_CORE_10 56U
#define TEGRA234_RESET_PEX2_CORE_10_APB 57U
#define TEGRA234_RESET_PEX2_COMMON_APB 58U
+#define TEGRA234_RESET_NVENC 59U
+#define TEGRA234_RESET_MGBE2_MACSEC_SECURE 60U
+#define TEGRA234_RESET_NVJPG 61U
+#define TEGRA234_RESET_LA 64U
+#define TEGRA234_RESET_HWPM 65U
+#define TEGRA234_RESET_PVA0_ALL 66U
+#define TEGRA234_RESET_CEC 67U
#define TEGRA234_RESET_PWM1 68U
#define TEGRA234_RESET_PWM2 69U
#define TEGRA234_RESET_PWM3 70U
@@ -49,11 +85,43 @@
#define TEGRA234_RESET_PWM8 75U
#define TEGRA234_RESET_QSPI0 76U
#define TEGRA234_RESET_QSPI1 77U
+#define TEGRA234_RESET_I2S7 78U
+#define TEGRA234_RESET_I2S8 79U
+#define TEGRA234_RESET_SCE_ALL 80U
+#define TEGRA234_RESET_RCE_ALL 81U
+#define TEGRA234_RESET_SDMMC1 82U
+#define TEGRA234_RESET_RSVD_83 83U
+#define TEGRA234_RESET_RSVD_84 84U
#define TEGRA234_RESET_SDMMC4 85U
#define TEGRA234_RESET_MGBE3_PCS 87U
#define TEGRA234_RESET_MGBE3_MAC 88U
+#define TEGRA234_RESET_MGBE3_MACSEC 89U
+#define TEGRA234_RESET_MGBE3_MACSEC_SECURE 90U
+#define TEGRA234_RESET_SPI1 91U
+#define TEGRA234_RESET_SPI2 92U
+#define TEGRA234_RESET_SPI3 93U
+#define TEGRA234_RESET_SPI4 94U
+#define TEGRA234_RESET_TACH0 95U
+#define TEGRA234_RESET_TACH1 96U
+#define TEGRA234_RESET_SPI5 97U
+#define TEGRA234_RESET_TSEC 98U
+#define TEGRA234_RESET_UARTI 99U
#define TEGRA234_RESET_UARTA 100U
-#define TEGRA234_RESET_VIC 113U
+#define TEGRA234_RESET_UARTB 101U
+#define TEGRA234_RESET_UARTC 102U
+#define TEGRA234_RESET_UARTD 103U
+#define TEGRA234_RESET_UARTE 104U
+#define TEGRA234_RESET_UARTF 105U
+#define TEGRA234_RESET_UARTJ 106U
+#define TEGRA234_RESET_UARTH 107U
+#define TEGRA234_RESET_UFSHC 108U
+#define TEGRA234_RESET_UFSHC_AXI_M 109U
+#define TEGRA234_RESET_UFSHC_LP_SEQ 110U
+#define TEGRA234_RESET_RSVD_111 111U
+#define TEGRA234_RESET_VI 112U
+#define TEGRA234_RESET_VIC 113U
+#define TEGRA234_RESET_XUSB_PADCTL 114U
+#define TEGRA234_RESET_VI2 115U
#define TEGRA234_RESET_PEX0_CORE_0 116U
#define TEGRA234_RESET_PEX0_CORE_1 117U
#define TEGRA234_RESET_PEX0_CORE_2 118U
@@ -65,8 +133,49 @@
#define TEGRA234_RESET_PEX0_CORE_3_APB 124U
#define TEGRA234_RESET_PEX0_CORE_4_APB 125U
#define TEGRA234_RESET_PEX0_COMMON_APB 126U
+#define TEGRA234_RESET_RSVD_127 127U
+#define TEGRA234_RESET_NVHS_UPHY_PLL1 128U
#define TEGRA234_RESET_PEX1_CORE_5 129U
#define TEGRA234_RESET_PEX1_CORE_5_APB 130U
+#define TEGRA234_RESET_GBE_UPHY 131U
+#define TEGRA234_RESET_GBE_UPHY_PM 132U
+#define TEGRA234_RESET_NVHS_UPHY 133U
+#define TEGRA234_RESET_NVHS_UPHY_PLL0 134U
+#define TEGRA234_RESET_NVHS_UPHY_L0 135U
+#define TEGRA234_RESET_NVHS_UPHY_L1 136U
+#define TEGRA234_RESET_NVHS_UPHY_L2 137U
+#define TEGRA234_RESET_NVHS_UPHY_L3 138U
+#define TEGRA234_RESET_NVHS_UPHY_L4 139U
+#define TEGRA234_RESET_NVHS_UPHY_L5 140U
+#define TEGRA234_RESET_NVHS_UPHY_L6 141U
+#define TEGRA234_RESET_NVHS_UPHY_L7 142U
+#define TEGRA234_RESET_NVHS_UPHY_PM 143U
+#define TEGRA234_RESET_DMIC5 144U
+#define TEGRA234_RESET_APE 145U
+#define TEGRA234_RESET_PEX_USB_UPHY 146U
+#define TEGRA234_RESET_PEX_USB_UPHY_L0 147U
+#define TEGRA234_RESET_PEX_USB_UPHY_L1 148U
+#define TEGRA234_RESET_PEX_USB_UPHY_L2 149U
+#define TEGRA234_RESET_PEX_USB_UPHY_L3 150U
+#define TEGRA234_RESET_PEX_USB_UPHY_L4 151U
+#define TEGRA234_RESET_PEX_USB_UPHY_L5 152U
+#define TEGRA234_RESET_PEX_USB_UPHY_L6 153U
+#define TEGRA234_RESET_PEX_USB_UPHY_L7 154U
+#define TEGRA234_RESET_PEX_USB_UPHY_PLL0 159U
+#define TEGRA234_RESET_PEX_USB_UPHY_PLL1 160U
+#define TEGRA234_RESET_PEX_USB_UPHY_PLL2 161U
+#define TEGRA234_RESET_PEX_USB_UPHY_PLL3 162U
+#define TEGRA234_RESET_GBE_UPHY_L0 163U
+#define TEGRA234_RESET_GBE_UPHY_L1 164U
+#define TEGRA234_RESET_GBE_UPHY_L2 165U
+#define TEGRA234_RESET_GBE_UPHY_L3 166U
+#define TEGRA234_RESET_GBE_UPHY_L4 167U
+#define TEGRA234_RESET_GBE_UPHY_L5 168U
+#define TEGRA234_RESET_GBE_UPHY_L6 169U
+#define TEGRA234_RESET_GBE_UPHY_L7 170U
+#define TEGRA234_RESET_GBE_UPHY_PLL0 171U
+#define TEGRA234_RESET_GBE_UPHY_PLL1 172U
+#define TEGRA234_RESET_GBE_UPHY_PLL2 173U
/** @} */
diff --git a/include/kunit/assert.h b/include/kunit/assert.h
index ace3de8d1ee7..24c2b9fa61e8 100644
--- a/include/kunit/assert.h
+++ b/include/kunit/assert.h
@@ -91,19 +91,6 @@ void kunit_unary_assert_format(const struct kunit_assert *assert,
struct string_stream *stream);
/**
- * KUNIT_INIT_UNARY_ASSERT_STRUCT() - Initializes &struct kunit_unary_assert.
- * @cond: A string representation of the expression asserted true or false.
- * @expect_true: True if of type KUNIT_{EXPECT|ASSERT}_TRUE, false otherwise.
- *
- * Initializes a &struct kunit_unary_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
- */
-#define KUNIT_INIT_UNARY_ASSERT_STRUCT(cond, expect_true) { \
- .condition = cond, \
- .expected_true = expect_true \
-}
-
-/**
* struct kunit_ptr_not_err_assert - An expectation/assertion that a pointer is
* not NULL and not a -errno.
* @assert: The parent of this type.
@@ -124,20 +111,6 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
struct string_stream *stream);
/**
- * KUNIT_INIT_PTR_NOT_ERR_ASSERT_STRUCT() - Initializes a
- * &struct kunit_ptr_not_err_assert.
- * @txt: A string representation of the expression passed to the expectation.
- * @val: The actual evaluated pointer value of the expression.
- *
- * Initializes a &struct kunit_ptr_not_err_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
- */
-#define KUNIT_INIT_PTR_NOT_ERR_STRUCT(txt, val) { \
- .text = txt, \
- .value = val \
-}
-
-/**
* struct kunit_binary_assert_text - holds strings for &struct
* kunit_binary_assert and friends to try and make the structs smaller.
* @operation: A string representation of the comparison operator (e.g. "==").
@@ -174,27 +147,6 @@ void kunit_binary_assert_format(const struct kunit_assert *assert,
struct string_stream *stream);
/**
- * KUNIT_INIT_BINARY_ASSERT_STRUCT() - Initializes a binary assert like
- * kunit_binary_assert, kunit_binary_ptr_assert, etc.
- *
- * @text_: Pointer to a kunit_binary_assert_text.
- * @left_val: The actual evaluated value of the expression in the left slot.
- * @right_val: The actual evaluated value of the expression in the right slot.
- *
- * Initializes a binary assert like kunit_binary_assert,
- * kunit_binary_ptr_assert, etc. This relies on these structs having the same
- * fields but with different types for left_val/right_val.
- * This is ultimately used by binary assertion macros like KUNIT_EXPECT_EQ, etc.
- */
-#define KUNIT_INIT_BINARY_ASSERT_STRUCT(text_, \
- left_val, \
- right_val) { \
- .text = text_, \
- .left_value = left_val, \
- .right_value = right_val \
-}
-
-/**
* struct kunit_binary_ptr_assert - An expectation/assertion that compares two
* pointer values (for example, KUNIT_EXPECT_PTR_EQ(test, foo, bar)).
* @assert: The parent of this type.
@@ -240,4 +192,30 @@ void kunit_binary_str_assert_format(const struct kunit_assert *assert,
const struct va_format *message,
struct string_stream *stream);
+/**
+ * struct kunit_mem_assert - An expectation/assertion that compares two
+ * memory blocks.
+ * @assert: The parent of this type.
+ * @text: Holds the textual representations of the operands and comparator.
+ * @left_value: The actual evaluated value of the expression in the left slot.
+ * @right_value: The actual evaluated value of the expression in the right slot.
+ * @size: Size of the memory block analysed in bytes.
+ *
+ * Represents an expectation/assertion that compares two memory blocks. For
+ * example, to expect that the first three bytes of foo is equal to the
+ * first three bytes of bar, you can use the expectation
+ * KUNIT_EXPECT_MEMEQ(test, foo, bar, 3);
+ */
+struct kunit_mem_assert {
+ struct kunit_assert assert;
+ const struct kunit_binary_assert_text *text;
+ const void *left_value;
+ const void *right_value;
+ const size_t size;
+};
+
+void kunit_mem_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream);
+
#endif /* _KUNIT_ASSERT_H */
diff --git a/include/kunit/test-bug.h b/include/kunit/test-bug.h
index 5fc58081d511..c1b2e14eab64 100644
--- a/include/kunit/test-bug.h
+++ b/include/kunit/test-bug.h
@@ -9,16 +9,63 @@
#ifndef _KUNIT_TEST_BUG_H
#define _KUNIT_TEST_BUG_H
-#define kunit_fail_current_test(fmt, ...) \
- __kunit_fail_current_test(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
-
#if IS_BUILTIN(CONFIG_KUNIT)
+#include <linux/jump_label.h> /* For static branch */
+#include <linux/sched.h>
+
+/* Static key if KUnit is running any tests. */
+DECLARE_STATIC_KEY_FALSE(kunit_running);
+
+/**
+ * kunit_get_current_test() - Return a pointer to the currently running
+ * KUnit test.
+ *
+ * If a KUnit test is running in the current task, returns a pointer to its
+ * associated struct kunit. This pointer can then be passed to any KUnit
+ * function or assertion. If no test is running (or a test is running in a
+ * different task), returns NULL.
+ *
+ * This function is safe to call even when KUnit is disabled. If CONFIG_KUNIT
+ * is not enabled, it will compile down to nothing and will return quickly no
+ * test is running.
+ */
+static inline struct kunit *kunit_get_current_test(void)
+{
+ if (!static_branch_unlikely(&kunit_running))
+ return NULL;
+
+ return current->kunit_test;
+}
+
+
+/**
+ * kunit_fail_current_test() - If a KUnit test is running, fail it.
+ *
+ * If a KUnit test is running in the current task, mark that test as failed.
+ *
+ * This macro will only work if KUnit is built-in (though the tests
+ * themselves can be modules). Otherwise, it compiles down to nothing.
+ */
+#define kunit_fail_current_test(fmt, ...) do { \
+ if (static_branch_unlikely(&kunit_running)) { \
+ __kunit_fail_current_test(__FILE__, __LINE__, \
+ fmt, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+
extern __printf(3, 4) void __kunit_fail_current_test(const char *file, int line,
const char *fmt, ...);
#else
+static inline struct kunit *kunit_get_current_test(void) { return NULL; }
+
+/* We define this with an empty helper function so format string warnings work */
+#define kunit_fail_current_test(fmt, ...) \
+ __kunit_fail_current_test(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
+
static inline __printf(3, 4) void __kunit_fail_current_test(const char *file, int line,
const char *fmt, ...)
{
diff --git a/include/kunit/test.h b/include/kunit/test.h
index b1ab6b32216d..87ea90576b50 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -16,6 +16,7 @@
#include <linux/container_of.h>
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/jump_label.h>
#include <linux/kconfig.h>
#include <linux/kref.h>
#include <linux/list.h>
@@ -27,6 +28,9 @@
#include <asm/rwonce.h>
+/* Static key: true if any KUnit tests are currently running */
+DECLARE_STATIC_KEY_FALSE(kunit_running);
+
struct kunit;
/* Size of log associated with test. */
@@ -515,22 +519,25 @@ void kunit_do_failed_assertion(struct kunit *test,
fmt, \
##__VA_ARGS__)
+/* Helper to safely pass around an initializer list to other macros. */
+#define KUNIT_INIT_ASSERT(initializers...) { initializers }
+
#define KUNIT_UNARY_ASSERTION(test, \
assert_type, \
- condition, \
- expected_true, \
+ condition_, \
+ expected_true_, \
fmt, \
...) \
do { \
- if (likely(!!(condition) == !!expected_true)) \
+ if (likely(!!(condition_) == !!expected_true_)) \
break; \
\
_KUNIT_FAILED(test, \
assert_type, \
kunit_unary_assert, \
kunit_unary_assert_format, \
- KUNIT_INIT_UNARY_ASSERT_STRUCT(#condition, \
- expected_true), \
+ KUNIT_INIT_ASSERT(.condition = #condition_, \
+ .expected_true = expected_true_), \
fmt, \
##__VA_ARGS__); \
} while (0)
@@ -590,9 +597,9 @@ do { \
assert_type, \
assert_class, \
format_func, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT(&__text, \
- __left, \
- __right), \
+ KUNIT_INIT_ASSERT(.text = &__text, \
+ .left_value = __left, \
+ .right_value = __right), \
fmt, \
##__VA_ARGS__); \
} while (0)
@@ -651,9 +658,42 @@ do { \
assert_type, \
kunit_binary_str_assert, \
kunit_binary_str_assert_format, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT(&__text, \
- __left, \
- __right), \
+ KUNIT_INIT_ASSERT(.text = &__text, \
+ .left_value = __left, \
+ .right_value = __right), \
+ fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define KUNIT_MEM_ASSERTION(test, \
+ assert_type, \
+ left, \
+ op, \
+ right, \
+ size_, \
+ fmt, \
+ ...) \
+do { \
+ const void *__left = (left); \
+ const void *__right = (right); \
+ const size_t __size = (size_); \
+ static const struct kunit_binary_assert_text __text = { \
+ .operation = #op, \
+ .left_text = #left, \
+ .right_text = #right, \
+ }; \
+ \
+ if (likely(memcmp(__left, __right, __size) op 0)) \
+ break; \
+ \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_mem_assert, \
+ kunit_mem_assert_format, \
+ KUNIT_INIT_ASSERT(.text = &__text, \
+ .left_value = __left, \
+ .right_value = __right, \
+ .size = __size), \
fmt, \
##__VA_ARGS__); \
} while (0)
@@ -673,7 +713,7 @@ do { \
assert_type, \
kunit_ptr_not_err_assert, \
kunit_ptr_not_err_assert_format, \
- KUNIT_INIT_PTR_NOT_ERR_STRUCT(#ptr, __ptr), \
+ KUNIT_INIT_ASSERT(.text = #ptr, .value = __ptr), \
fmt, \
##__VA_ARGS__); \
} while (0)
@@ -929,6 +969,60 @@ do { \
##__VA_ARGS__)
/**
+ * KUNIT_EXPECT_MEMEQ() - Expects that the first @size bytes of @left and @right are equal.
+ * @test: The test context object.
+ * @left: An arbitrary expression that evaluates to the specified size.
+ * @right: An arbitrary expression that evaluates to the specified size.
+ * @size: Number of bytes compared.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are
+ * equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, !memcmp((@left), (@right), (@size))). See
+ * KUNIT_EXPECT_TRUE() for more information.
+ *
+ * Although this expectation works for any memory block, it is not recommended
+ * for comparing more structured data, such as structs. This expectation is
+ * recommended for comparing, for example, data arrays.
+ */
+#define KUNIT_EXPECT_MEMEQ(test, left, right, size) \
+ KUNIT_EXPECT_MEMEQ_MSG(test, left, right, size, NULL)
+
+#define KUNIT_EXPECT_MEMEQ_MSG(test, left, right, size, fmt, ...) \
+ KUNIT_MEM_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, ==, right, \
+ size, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_MEMNEQ() - Expects that the first @size bytes of @left and @right are not equal.
+ * @test: The test context object.
+ * @left: An arbitrary expression that evaluates to the specified size.
+ * @right: An arbitrary expression that evaluates to the specified size.
+ * @size: Number of bytes compared.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are
+ * not equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, memcmp((@left), (@right), (@size))). See
+ * KUNIT_EXPECT_TRUE() for more information.
+ *
+ * Although this expectation works for any memory block, it is not recommended
+ * for comparing more structured data, such as structs. This expectation is
+ * recommended for comparing, for example, data arrays.
+ */
+#define KUNIT_EXPECT_MEMNEQ(test, left, right, size) \
+ KUNIT_EXPECT_MEMNEQ_MSG(test, left, right, size, NULL)
+
+#define KUNIT_EXPECT_MEMNEQ_MSG(test, left, right, size, fmt, ...) \
+ KUNIT_MEM_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, !=, right, \
+ size, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
* KUNIT_EXPECT_NULL() - Expects that @ptr is null.
* @test: The test context object.
* @ptr: an arbitrary pointer.
diff --git a/include/kunit/visibility.h b/include/kunit/visibility.h
new file mode 100644
index 000000000000..0dfe35feeec6
--- /dev/null
+++ b/include/kunit/visibility.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit API to allow symbols to be conditionally visible during KUnit
+ * testing
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: Rae Moar <rmoar@google.com>
+ */
+
+#ifndef _KUNIT_VISIBILITY_H
+#define _KUNIT_VISIBILITY_H
+
+#if IS_ENABLED(CONFIG_KUNIT)
+ /**
+ * VISIBLE_IF_KUNIT - A macro that sets symbols to be static if
+ * CONFIG_KUNIT is not enabled. Otherwise if CONFIG_KUNIT is enabled
+ * there is no change to the symbol definition.
+ */
+ #define VISIBLE_IF_KUNIT
+ /**
+ * EXPORT_SYMBOL_IF_KUNIT(symbol) - Exports symbol into
+ * EXPORTED_FOR_KUNIT_TESTING namespace only if CONFIG_KUNIT is
+ * enabled. Must use MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING)
+ * in test file in order to use symbols.
+ */
+ #define EXPORT_SYMBOL_IF_KUNIT(symbol) EXPORT_SYMBOL_NS(symbol, \
+ EXPORTED_FOR_KUNIT_TESTING)
+#else
+ #define VISIBLE_IF_KUNIT static
+ #define EXPORT_SYMBOL_IF_KUNIT(symbol)
+#endif
+
+#endif /* _KUNIT_VISIBILITY_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 3015235d65e3..5e6a876e17ba 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -586,6 +586,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000
#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
#define OSC_SB_PRM_SUPPORT 0x00200000
+#define OSC_SB_FFH_OPR_SUPPORT 0x00400000
extern bool osc_sb_apei_support_acked;
extern bool osc_pc_lpi_support_confirmed;
@@ -1136,6 +1137,7 @@ int acpi_subsys_freeze(struct device *dev);
int acpi_subsys_poweroff(struct device *dev);
void acpi_ec_mark_gpe_for_wake(void);
void acpi_ec_set_gpe_wake_mask(u8 action);
+int acpi_subsys_restore_early(struct device *dev);
#else
static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
static inline void acpi_subsys_complete(struct device *dev) {}
@@ -1144,6 +1146,7 @@ static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; }
static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
static inline int acpi_subsys_poweroff(struct device *dev) { return 0; }
+static inline int acpi_subsys_restore_early(struct device *dev) { return 0; }
static inline void acpi_ec_mark_gpe_for_wake(void) {}
static inline void acpi_ec_set_gpe_wake_mask(u8 action) {}
#endif
@@ -1488,6 +1491,16 @@ void acpi_init_pcc(void);
static inline void acpi_init_pcc(void) { }
#endif
+#ifdef CONFIG_ACPI_FFH
+void acpi_init_ffh(void);
+extern int acpi_ffh_address_space_arch_setup(void *handler_ctxt,
+ void **region_ctxt);
+extern int acpi_ffh_address_space_arch_handler(acpi_integer *value,
+ void *region_context);
+#else
+static inline void acpi_init_ffh(void) { }
+#endif
+
#ifdef CONFIG_ACPI
extern void acpi_device_notify(struct device *dev);
extern void acpi_device_notify_remove(struct device *dev);
diff --git a/include/linux/acpi_apmt.h b/include/linux/acpi_apmt.h
new file mode 100644
index 000000000000..40bd634d082f
--- /dev/null
+++ b/include/linux/acpi_apmt.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * ARM CoreSight PMU driver.
+ * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES.
+ *
+ */
+
+#ifndef __ACPI_APMT_H__
+#define __ACPI_APMT_H__
+
+#include <linux/acpi.h>
+
+#ifdef CONFIG_ACPI_APMT
+void acpi_apmt_init(void);
+#else
+static inline void acpi_apmt_init(void) { }
+#endif /* CONFIG_ACPI_APMT */
+
+#endif /* __ACPI_APMT_H__ */
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index 5f02d2e6b9d9..c87aeecaa9b2 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -11,6 +11,89 @@
#include <linux/types.h>
#include <linux/uuid.h>
+#define FFA_SMC(calling_convention, func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, (calling_convention), \
+ ARM_SMCCC_OWNER_STANDARD, (func_num))
+
+#define FFA_SMC_32(func_num) FFA_SMC(ARM_SMCCC_SMC_32, (func_num))
+#define FFA_SMC_64(func_num) FFA_SMC(ARM_SMCCC_SMC_64, (func_num))
+
+#define FFA_ERROR FFA_SMC_32(0x60)
+#define FFA_SUCCESS FFA_SMC_32(0x61)
+#define FFA_INTERRUPT FFA_SMC_32(0x62)
+#define FFA_VERSION FFA_SMC_32(0x63)
+#define FFA_FEATURES FFA_SMC_32(0x64)
+#define FFA_RX_RELEASE FFA_SMC_32(0x65)
+#define FFA_RXTX_MAP FFA_SMC_32(0x66)
+#define FFA_FN64_RXTX_MAP FFA_SMC_64(0x66)
+#define FFA_RXTX_UNMAP FFA_SMC_32(0x67)
+#define FFA_PARTITION_INFO_GET FFA_SMC_32(0x68)
+#define FFA_ID_GET FFA_SMC_32(0x69)
+#define FFA_MSG_POLL FFA_SMC_32(0x6A)
+#define FFA_MSG_WAIT FFA_SMC_32(0x6B)
+#define FFA_YIELD FFA_SMC_32(0x6C)
+#define FFA_RUN FFA_SMC_32(0x6D)
+#define FFA_MSG_SEND FFA_SMC_32(0x6E)
+#define FFA_MSG_SEND_DIRECT_REQ FFA_SMC_32(0x6F)
+#define FFA_FN64_MSG_SEND_DIRECT_REQ FFA_SMC_64(0x6F)
+#define FFA_MSG_SEND_DIRECT_RESP FFA_SMC_32(0x70)
+#define FFA_FN64_MSG_SEND_DIRECT_RESP FFA_SMC_64(0x70)
+#define FFA_MEM_DONATE FFA_SMC_32(0x71)
+#define FFA_FN64_MEM_DONATE FFA_SMC_64(0x71)
+#define FFA_MEM_LEND FFA_SMC_32(0x72)
+#define FFA_FN64_MEM_LEND FFA_SMC_64(0x72)
+#define FFA_MEM_SHARE FFA_SMC_32(0x73)
+#define FFA_FN64_MEM_SHARE FFA_SMC_64(0x73)
+#define FFA_MEM_RETRIEVE_REQ FFA_SMC_32(0x74)
+#define FFA_FN64_MEM_RETRIEVE_REQ FFA_SMC_64(0x74)
+#define FFA_MEM_RETRIEVE_RESP FFA_SMC_32(0x75)
+#define FFA_MEM_RELINQUISH FFA_SMC_32(0x76)
+#define FFA_MEM_RECLAIM FFA_SMC_32(0x77)
+#define FFA_MEM_OP_PAUSE FFA_SMC_32(0x78)
+#define FFA_MEM_OP_RESUME FFA_SMC_32(0x79)
+#define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A)
+#define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B)
+#define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C)
+
+/*
+ * For some calls it is necessary to use SMC64 to pass or return 64-bit values.
+ * For such calls FFA_FN_NATIVE(name) will choose the appropriate
+ * (native-width) function ID.
+ */
+#ifdef CONFIG_64BIT
+#define FFA_FN_NATIVE(name) FFA_FN64_##name
+#else
+#define FFA_FN_NATIVE(name) FFA_##name
+#endif
+
+/* FFA error codes. */
+#define FFA_RET_SUCCESS (0)
+#define FFA_RET_NOT_SUPPORTED (-1)
+#define FFA_RET_INVALID_PARAMETERS (-2)
+#define FFA_RET_NO_MEMORY (-3)
+#define FFA_RET_BUSY (-4)
+#define FFA_RET_INTERRUPTED (-5)
+#define FFA_RET_DENIED (-6)
+#define FFA_RET_RETRY (-7)
+#define FFA_RET_ABORTED (-8)
+
+/* FFA version encoding */
+#define FFA_MAJOR_VERSION_MASK GENMASK(30, 16)
+#define FFA_MINOR_VERSION_MASK GENMASK(15, 0)
+#define FFA_MAJOR_VERSION(x) ((u16)(FIELD_GET(FFA_MAJOR_VERSION_MASK, (x))))
+#define FFA_MINOR_VERSION(x) ((u16)(FIELD_GET(FFA_MINOR_VERSION_MASK, (x))))
+#define FFA_PACK_VERSION_INFO(major, minor) \
+ (FIELD_PREP(FFA_MAJOR_VERSION_MASK, (major)) | \
+ FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor)))
+#define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0)
+
+/**
+ * FF-A specification mentions explicitly about '4K pages'. This should
+ * not be confused with the kernel PAGE_SIZE, which is the translation
+ * granule kernel is configured and may be one among 4K, 16K and 64K.
+ */
+#define FFA_PAGE_SIZE SZ_4K
+
/* FFA Bus/Device/Driver related */
struct ffa_device {
int vm_id;
@@ -161,11 +244,11 @@ struct ffa_mem_region_attributes {
*/
#define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0)
u8 flag;
- u32 composite_off;
/*
* Offset in bytes from the start of the outer `ffa_memory_region` to
* an `struct ffa_mem_region_addr_range`.
*/
+ u32 composite_off;
u64 reserved;
};
diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h
index 53b31f69b74a..7615f8d7b1ed 100644
--- a/include/linux/bcm47xx_nvram.h
+++ b/include/linux/bcm47xx_nvram.h
@@ -11,6 +11,7 @@
#include <linux/vmalloc.h>
#ifdef CONFIG_BCM47XX_NVRAM
+int bcm47xx_nvram_init_from_iomem(void __iomem *nvram_start, size_t res_size);
int bcm47xx_nvram_init_from_mem(u32 base, u32 lim);
int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len);
int bcm47xx_nvram_gpio_pin(const char *name);
@@ -20,6 +21,11 @@ static inline void bcm47xx_nvram_release_contents(char *nvram)
vfree(nvram);
};
#else
+static inline int bcm47xx_nvram_init_from_iomem(void __iomem *nvram_start,
+ size_t res_size)
+{
+ return -ENOTSUPP;
+}
static inline int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
{
return -ENOTSUPP;
diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
index 69b24fe92cbf..26b1b71c3091 100644
--- a/include/linux/blk-crypto.h
+++ b/include/linux/blk-crypto.h
@@ -13,6 +13,7 @@ enum blk_crypto_mode_num {
BLK_ENCRYPTION_MODE_AES_256_XTS,
BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV,
BLK_ENCRYPTION_MODE_ADIANTUM,
+ BLK_ENCRYPTION_MODE_SM4_XTS,
BLK_ENCRYPTION_MODE_MAX,
};
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index ba18e9bdb799..d6119c5d1069 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -853,7 +853,8 @@ static inline bool blk_mq_add_to_batch(struct request *req,
struct io_comp_batch *iob, int ioerror,
void (*complete)(struct io_comp_batch *))
{
- if (!iob || (req->rq_flags & RQF_ELV) || ioerror)
+ if (!iob || (req->rq_flags & RQF_ELV) || ioerror ||
+ (req->end_io && !blk_rq_is_passthrough(req)))
return false;
if (!iob->complete)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 50e358a19d98..891f8cbcd043 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -311,6 +311,13 @@ struct queue_limits {
unsigned char discard_misaligned;
unsigned char raid_partial_stripes_expensive;
enum blk_zoned_model zoned;
+
+ /*
+ * Drivers that set dma_alignment to less than 511 must be prepared to
+ * handle individual bvec's that are not a multiple of a SECTOR_SIZE
+ * due to possible offsets.
+ */
+ unsigned int dma_alignment;
};
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
@@ -456,12 +463,6 @@ struct request_queue {
unsigned long nr_requests; /* Max # of requests */
unsigned int dma_pad_mask;
- /*
- * Drivers that set dma_alignment to less than 511 must be prepared to
- * handle individual bvec's that are not a multiple of a SECTOR_SIZE
- * due to possible offsets.
- */
- unsigned int dma_alignment;
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct blk_crypto_profile *crypto_profile;
@@ -944,7 +945,6 @@ extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
-extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
@@ -1324,7 +1324,7 @@ static inline sector_t bdev_zone_sectors(struct block_device *bdev)
static inline int queue_dma_alignment(const struct request_queue *q)
{
- return q ? q->dma_alignment : 511;
+ return q ? q->limits.dma_alignment : 511;
}
static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 9e7d46d16032..c1bd1bd10506 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -27,6 +27,7 @@
#include <linux/bpfptr.h>
#include <linux/btf.h>
#include <linux/rcupdate_trace.h>
+#include <linux/static_call.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -314,7 +315,7 @@ static inline void __copy_map_value(struct bpf_map *map, void *dst, void *src, b
u32 next_off = map->off_arr->field_off[i];
memcpy(dst + curr_off, src + curr_off, next_off - curr_off);
- curr_off += map->off_arr->field_sz[i];
+ curr_off = next_off + map->off_arr->field_sz[i];
}
memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off);
}
@@ -343,7 +344,7 @@ static inline void zero_map_value(struct bpf_map *map, void *dst)
u32 next_off = map->off_arr->field_off[i];
memset(dst + curr_off, 0, next_off - curr_off);
- curr_off += map->off_arr->field_sz[i];
+ curr_off = next_off + map->off_arr->field_sz[i];
}
memset(dst + curr_off, 0, map->value_size - curr_off);
}
@@ -953,6 +954,10 @@ struct bpf_dispatcher {
void *rw_image;
u32 image_off;
struct bpf_ksym ksym;
+#ifdef CONFIG_HAVE_STATIC_CALL
+ struct static_call_key *sc_key;
+ void *sc_tramp;
+#endif
};
static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
@@ -970,6 +975,34 @@ struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
+
+/*
+ * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
+ * indirection with a direct call to the bpf program. If the architecture does
+ * not have STATIC_CALL, avoid a double-indirection.
+ */
+#ifdef CONFIG_HAVE_STATIC_CALL
+
+#define __BPF_DISPATCHER_SC_INIT(_name) \
+ .sc_key = &STATIC_CALL_KEY(_name), \
+ .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),
+
+#define __BPF_DISPATCHER_SC(name) \
+ DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func)
+
+#define __BPF_DISPATCHER_CALL(name) \
+ static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
+
+#define __BPF_DISPATCHER_UPDATE(_d, _new) \
+ __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))
+
+#else
+#define __BPF_DISPATCHER_SC_INIT(name)
+#define __BPF_DISPATCHER_SC(name)
+#define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi)
+#define __BPF_DISPATCHER_UPDATE(_d, _new)
+#endif
+
#define BPF_DISPATCHER_INIT(_name) { \
.mutex = __MUTEX_INITIALIZER(_name.mutex), \
.func = &_name##_func, \
@@ -981,32 +1014,29 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
.name = #_name, \
.lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
}, \
+ __BPF_DISPATCHER_SC_INIT(_name##_call) \
}
-#ifdef CONFIG_X86_64
-#define BPF_DISPATCHER_ATTRIBUTES __attribute__((patchable_function_entry(5)))
-#else
-#define BPF_DISPATCHER_ATTRIBUTES
-#endif
-
#define DEFINE_BPF_DISPATCHER(name) \
- notrace BPF_DISPATCHER_ATTRIBUTES \
+ __BPF_DISPATCHER_SC(name); \
noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
bpf_func_t bpf_func) \
{ \
- return bpf_func(ctx, insnsi); \
+ return __BPF_DISPATCHER_CALL(name); \
} \
EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
struct bpf_dispatcher bpf_dispatcher_##name = \
BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
+
#define DECLARE_BPF_DISPATCHER(name) \
unsigned int bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
bpf_func_t bpf_func); \
extern struct bpf_dispatcher bpf_dispatcher_##name;
+
#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 58f5431a5559..982ba245eb41 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -152,6 +152,22 @@ static inline bool can_is_canxl_dev_mtu(unsigned int mtu)
return (mtu >= CANXL_MIN_MTU && mtu <= CANXL_MAX_MTU);
}
+/* drop skb if it does not contain a valid CAN frame for sending */
+static inline bool can_dev_dropped_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (priv->ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ netdev_info_once(dev,
+ "interface in listen only mode, dropping skb\n");
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return true;
+ }
+
+ return can_dropped_invalid_skb(dev, skb);
+}
+
void can_setup(struct net_device *dev);
struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h
index 5755ae5a4712..6a869682c120 100644
--- a/include/linux/can/platform/sja1000.h
+++ b/include/linux/can/platform/sja1000.h
@@ -14,7 +14,7 @@
#define OCR_MODE_TEST 0x01
#define OCR_MODE_NORMAL 0x02
#define OCR_MODE_CLOCK 0x03
-#define OCR_MODE_MASK 0x07
+#define OCR_MODE_MASK 0x03
#define OCR_TX0_INVERT 0x04
#define OCR_TX0_PULLDOWN 0x08
#define OCR_TX0_PULLUP 0x10
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index f2a9f2274c3b..3410aecffdb4 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -68,6 +68,7 @@ struct css_task_iter {
struct list_head iters_node; /* css_set->task_iters */
};
+extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
@@ -106,6 +107,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup *cgroup_get_from_path(const char *path);
struct cgroup *cgroup_get_from_fd(int fd);
+struct cgroup *cgroup_v1v2_get_from_fd(int fd);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
@@ -308,72 +310,25 @@ void css_task_iter_end(struct css_task_iter *it);
* Inline functions.
*/
+#ifdef CONFIG_DEBUG_CGROUP_REF
+void css_get(struct cgroup_subsys_state *css);
+void css_get_many(struct cgroup_subsys_state *css, unsigned int n);
+bool css_tryget(struct cgroup_subsys_state *css);
+bool css_tryget_online(struct cgroup_subsys_state *css);
+void css_put(struct cgroup_subsys_state *css);
+void css_put_many(struct cgroup_subsys_state *css, unsigned int n);
+#else
+#define CGROUP_REF_FN_ATTRS static inline
+#define CGROUP_REF_EXPORT(fn)
+#include <linux/cgroup_refcnt.h>
+#endif
+
static inline u64 cgroup_id(const struct cgroup *cgrp)
{
return cgrp->kn->id;
}
/**
- * css_get - obtain a reference on the specified css
- * @css: target css
- *
- * The caller must already have a reference.
- */
-static inline void css_get(struct cgroup_subsys_state *css)
-{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_get(&css->refcnt);
-}
-
-/**
- * css_get_many - obtain references on the specified css
- * @css: target css
- * @n: number of references to get
- *
- * The caller must already have a reference.
- */
-static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
-{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_get_many(&css->refcnt, n);
-}
-
-/**
- * css_tryget - try to obtain a reference on the specified css
- * @css: target css
- *
- * Obtain a reference on @css unless it already has reached zero and is
- * being released. This function doesn't care whether @css is on or
- * offline. The caller naturally needs to ensure that @css is accessible
- * but doesn't have to be holding a reference on it - IOW, RCU protected
- * access is good enough for this function. Returns %true if a reference
- * count was successfully obtained; %false otherwise.
- */
-static inline bool css_tryget(struct cgroup_subsys_state *css)
-{
- if (!(css->flags & CSS_NO_REF))
- return percpu_ref_tryget(&css->refcnt);
- return true;
-}
-
-/**
- * css_tryget_online - try to obtain a reference on the specified css if online
- * @css: target css
- *
- * Obtain a reference on @css if it's online. The caller naturally needs
- * to ensure that @css is accessible but doesn't have to be holding a
- * reference on it - IOW, RCU protected access is good enough for this
- * function. Returns %true if a reference count was successfully obtained;
- * %false otherwise.
- */
-static inline bool css_tryget_online(struct cgroup_subsys_state *css)
-{
- if (!(css->flags & CSS_NO_REF))
- return percpu_ref_tryget_live(&css->refcnt);
- return true;
-}
-
-/**
* css_is_dying - test whether the specified css is dying
* @css: target css
*
@@ -393,31 +348,6 @@ static inline bool css_is_dying(struct cgroup_subsys_state *css)
return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
}
-/**
- * css_put - put a css reference
- * @css: target css
- *
- * Put a reference obtained via css_get() and css_tryget_online().
- */
-static inline void css_put(struct cgroup_subsys_state *css)
-{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_put(&css->refcnt);
-}
-
-/**
- * css_put_many - put css references
- * @css: target css
- * @n: number of references to put
- *
- * Put references obtained via css_get() and css_tryget_online().
- */
-static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
-{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_put_many(&css->refcnt, n);
-}
-
static inline void cgroup_get(struct cgroup *cgrp)
{
css_get(&cgrp->self);
diff --git a/include/linux/cgroup_refcnt.h b/include/linux/cgroup_refcnt.h
new file mode 100644
index 000000000000..2eea0a69ecfc
--- /dev/null
+++ b/include/linux/cgroup_refcnt.h
@@ -0,0 +1,96 @@
+/**
+ * css_get - obtain a reference on the specified css
+ * @css: target css
+ *
+ * The caller must already have a reference.
+ */
+CGROUP_REF_FN_ATTRS
+void css_get(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_get(&css->refcnt);
+}
+CGROUP_REF_EXPORT(css_get)
+
+/**
+ * css_get_many - obtain references on the specified css
+ * @css: target css
+ * @n: number of references to get
+ *
+ * The caller must already have a reference.
+ */
+CGROUP_REF_FN_ATTRS
+void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_get_many(&css->refcnt, n);
+}
+CGROUP_REF_EXPORT(css_get_many)
+
+/**
+ * css_tryget - try to obtain a reference on the specified css
+ * @css: target css
+ *
+ * Obtain a reference on @css unless it already has reached zero and is
+ * being released. This function doesn't care whether @css is on or
+ * offline. The caller naturally needs to ensure that @css is accessible
+ * but doesn't have to be holding a reference on it - IOW, RCU protected
+ * access is good enough for this function. Returns %true if a reference
+ * count was successfully obtained; %false otherwise.
+ */
+CGROUP_REF_FN_ATTRS
+bool css_tryget(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ return percpu_ref_tryget(&css->refcnt);
+ return true;
+}
+CGROUP_REF_EXPORT(css_tryget)
+
+/**
+ * css_tryget_online - try to obtain a reference on the specified css if online
+ * @css: target css
+ *
+ * Obtain a reference on @css if it's online. The caller naturally needs
+ * to ensure that @css is accessible but doesn't have to be holding a
+ * reference on it - IOW, RCU protected access is good enough for this
+ * function. Returns %true if a reference count was successfully obtained;
+ * %false otherwise.
+ */
+CGROUP_REF_FN_ATTRS
+bool css_tryget_online(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ return percpu_ref_tryget_live(&css->refcnt);
+ return true;
+}
+CGROUP_REF_EXPORT(css_tryget_online)
+
+/**
+ * css_put - put a css reference
+ * @css: target css
+ *
+ * Put a reference obtained via css_get() and css_tryget_online().
+ */
+CGROUP_REF_FN_ATTRS
+void css_put(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_put(&css->refcnt);
+}
+CGROUP_REF_EXPORT(css_put)
+
+/**
+ * css_put_many - put css references
+ * @css: target css
+ * @n: number of references to put
+ *
+ * Put references obtained via css_get() and css_tryget_online().
+ */
+CGROUP_REF_FN_ATTRS
+void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_put_many(&css->refcnt, n);
+}
+CGROUP_REF_EXPORT(css_put_many)
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 594357881b0b..44b1736c95b5 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -126,11 +126,9 @@ struct compat_tms {
#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW)
-#ifndef compat_sigset_t
typedef struct {
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
} compat_sigset_t;
-#endif
int set_compat_user_sigmask(const compat_sigset_t __user *umask,
size_t sigsetsize);
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 97cfd13bae51..2606711adb18 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -204,8 +204,6 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \
* group children. default_groups may coexist alongsize make_group() or
* make_item(), but if the group wishes to have only default_groups
* children (disallowing mkdir(2)), it need not provide either function.
- * If the group has commit(), it supports pending and committed (active)
- * items.
*/
struct configfs_item_operations {
void (*release)(struct config_item *);
@@ -216,7 +214,6 @@ struct configfs_item_operations {
struct configfs_group_operations {
struct config_item *(*make_item)(struct config_group *group, const char *name);
struct config_group *(*make_group)(struct config_group *group, const char *name);
- int (*commit_item)(struct config_item *item);
void (*disconnect_notify)(struct config_group *group, struct config_item *item);
void (*drop_item)(struct config_group *group, struct config_item *item);
};
diff --git a/include/linux/console.h b/include/linux/console.h
index 8c1686e2c233..9cea254b34b8 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -15,6 +15,7 @@
#define _LINUX_CONSOLE_H_ 1
#include <linux/atomic.h>
+#include <linux/rculist.h>
#include <linux/types.h>
struct vc_data;
@@ -154,14 +155,132 @@ struct console {
u64 seq;
unsigned long dropped;
void *data;
- struct console *next;
+ struct hlist_node node;
};
+#ifdef CONFIG_LOCKDEP
+extern void lockdep_assert_console_list_lock_held(void);
+#else
+static inline void lockdep_assert_console_list_lock_held(void)
+{
+}
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern bool console_srcu_read_lock_is_held(void);
+#else
+static inline bool console_srcu_read_lock_is_held(void)
+{
+ return 1;
+}
+#endif
+
+extern int console_srcu_read_lock(void);
+extern void console_srcu_read_unlock(int cookie);
+
+extern void console_list_lock(void) __acquires(console_mutex);
+extern void console_list_unlock(void) __releases(console_mutex);
+
+extern struct hlist_head console_list;
+
+/**
+ * console_srcu_read_flags - Locklessly read the console flags
+ * @con: struct console pointer of console to read flags from
+ *
+ * This function provides the necessary READ_ONCE() and data_race()
+ * notation for locklessly reading the console flags. The READ_ONCE()
+ * in this function matches the WRITE_ONCE() when @flags are modified
+ * for registered consoles with console_srcu_write_flags().
+ *
+ * Only use this function to read console flags when locklessly
+ * iterating the console list via srcu.
+ *
+ * Context: Any context.
+ */
+static inline short console_srcu_read_flags(const struct console *con)
+{
+ WARN_ON_ONCE(!console_srcu_read_lock_is_held());
+
+ /*
+ * Locklessly reading console->flags provides a consistent
+ * read value because there is at most one CPU modifying
+ * console->flags and that CPU is using only read-modify-write
+ * operations to do so.
+ */
+ return data_race(READ_ONCE(con->flags));
+}
+
+/**
+ * console_srcu_write_flags - Write flags for a registered console
+ * @con: struct console pointer of console to write flags to
+ * @flags: new flags value to write
+ *
+ * Only use this function to write flags for registered consoles. It
+ * requires holding the console_list_lock.
+ *
+ * Context: Any context.
+ */
+static inline void console_srcu_write_flags(struct console *con, short flags)
+{
+ lockdep_assert_console_list_lock_held();
+
+ /* This matches the READ_ONCE() in console_srcu_read_flags(). */
+ WRITE_ONCE(con->flags, flags);
+}
+
+/* Variant of console_is_registered() when the console_list_lock is held. */
+static inline bool console_is_registered_locked(const struct console *con)
+{
+ lockdep_assert_console_list_lock_held();
+ return !hlist_unhashed(&con->node);
+}
+
/*
- * for_each_console() allows you to iterate on each console
+ * console_is_registered - Check if the console is registered
+ * @con: struct console pointer of console to check
+ *
+ * Context: Process context. May sleep while acquiring console list lock.
+ * Return: true if the console is in the console list, otherwise false.
+ *
+ * If false is returned for a console that was previously registered, it
+ * can be assumed that the console's unregistration is fully completed,
+ * including the exit() callback after console list removal.
+ */
+static inline bool console_is_registered(const struct console *con)
+{
+ bool ret;
+
+ console_list_lock();
+ ret = console_is_registered_locked(con);
+ console_list_unlock();
+ return ret;
+}
+
+/**
+ * for_each_console_srcu() - Iterator over registered consoles
+ * @con: struct console pointer used as loop cursor
+ *
+ * Although SRCU guarantees the console list will be consistent, the
+ * struct console fields may be updated by other CPUs while iterating.
+ *
+ * Requires console_srcu_read_lock to be held. Can be invoked from
+ * any context.
+ */
+#define for_each_console_srcu(con) \
+ hlist_for_each_entry_srcu(con, &console_list, node, \
+ console_srcu_read_lock_is_held())
+
+/**
+ * for_each_console() - Iterator over registered consoles
+ * @con: struct console pointer used as loop cursor
+ *
+ * The console list and the console->flags are immutable while iterating.
+ *
+ * Requires console_list_lock to be held.
*/
-#define for_each_console(con) \
- for (con = console_drivers; con != NULL; con = con->next)
+#define for_each_console(con) \
+ lockdep_assert_console_list_lock_held(); \
+ hlist_for_each_entry(con, &console_list, node)
extern int console_set_on_cmdline;
extern struct console *early_console;
@@ -172,9 +291,9 @@ enum con_flush_mode {
};
extern int add_preferred_console(char *name, int idx, char *options);
+extern void console_force_preferred_locked(struct console *con);
extern void register_console(struct console *);
extern int unregister_console(struct console *);
-extern struct console *console_drivers;
extern void console_lock(void);
extern int console_trylock(void);
extern void console_unlock(void);
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 08a1d3e7e46d..d3eba4360150 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -18,10 +18,10 @@ struct core_vma_metadata {
struct coredump_params {
const kernel_siginfo_t *siginfo;
- struct pt_regs *regs;
struct file *file;
unsigned long limit;
unsigned long mm_flags;
+ int cpu;
loff_t written;
loff_t pos;
loff_t to_skip;
diff --git a/include/linux/counter.h b/include/linux/counter.h
index c41fa602ed28..b63746637de2 100644
--- a/include/linux/counter.h
+++ b/include/linux/counter.h
@@ -542,11 +542,10 @@ struct counter_array {
#define DEFINE_COUNTER_ARRAY_CAPTURE(_name, _length) \
DEFINE_COUNTER_ARRAY_U64(_name, _length)
-#define DEFINE_COUNTER_ARRAY_POLARITY(_name, _enums, _length) \
- DEFINE_COUNTER_AVAILABLE(_name##_available, _enums); \
+#define DEFINE_COUNTER_ARRAY_POLARITY(_name, _available, _length) \
struct counter_array _name = { \
.type = COUNTER_COMP_SIGNAL_POLARITY, \
- .avail = &(_name##_available), \
+ .avail = &(_available), \
.length = (_length), \
}
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index d5595d57f4e5..6a94a6eaad27 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -1110,10 +1110,10 @@ cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
}
static inline int parse_perf_domain(int cpu, const char *list_name,
- const char *cell_name)
+ const char *cell_name,
+ struct of_phandle_args *args)
{
struct device_node *cpu_np;
- struct of_phandle_args args;
int ret;
cpu_np = of_cpu_device_node_get(cpu);
@@ -1121,41 +1121,44 @@ static inline int parse_perf_domain(int cpu, const char *list_name,
return -ENODEV;
ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0,
- &args);
+ args);
if (ret < 0)
return ret;
of_node_put(cpu_np);
- return args.args[0];
+ return 0;
}
static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
- const char *cell_name, struct cpumask *cpumask)
+ const char *cell_name, struct cpumask *cpumask,
+ struct of_phandle_args *pargs)
{
- int target_idx;
int cpu, ret;
+ struct of_phandle_args args;
- ret = parse_perf_domain(pcpu, list_name, cell_name);
+ ret = parse_perf_domain(pcpu, list_name, cell_name, pargs);
if (ret < 0)
return ret;
- target_idx = ret;
cpumask_set_cpu(pcpu, cpumask);
for_each_possible_cpu(cpu) {
if (cpu == pcpu)
continue;
- ret = parse_perf_domain(cpu, list_name, cell_name);
+ ret = parse_perf_domain(cpu, list_name, cell_name, &args);
if (ret < 0)
continue;
- if (target_idx == ret)
+ if (pargs->np == args.np && pargs->args_count == args.args_count &&
+ !memcmp(pargs->args, args.args, sizeof(args.args[0]) * args.args_count))
cpumask_set_cpu(cpu, cpumask);
+
+ of_node_put(args.np);
}
- return target_idx;
+ return 0;
}
#else
static inline int cpufreq_boost_trigger_state(int state)
@@ -1185,7 +1188,8 @@ cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
}
static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
- const char *cell_name, struct cpumask *cpumask)
+ const char *cell_name, struct cpumask *cpumask,
+ struct of_phandle_args *pargs)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/damon.h b/include/linux/damon.h
index 620ada094c3b..84525b9cdf6e 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -21,7 +21,7 @@
/* Get a random number in [l, r) */
static inline unsigned long damon_rand(unsigned long l, unsigned long r)
{
- return l + prandom_u32_max(r - l);
+ return l + get_random_u32_below(r - l);
}
/**
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index f60674692d36..ea2d919fd9c7 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -45,7 +45,7 @@ struct debugfs_u32_array {
extern struct dentry *arch_debugfs_dir;
-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+#define DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
__simple_attr_check_format(__fmt, 0ull); \
@@ -56,10 +56,16 @@ static const struct file_operations __fops = { \
.open = __fops ## _open, \
.release = simple_attr_release, \
.read = debugfs_attr_read, \
- .write = debugfs_attr_write, \
+ .write = (__is_signed) ? debugfs_attr_write_signed : debugfs_attr_write, \
.llseek = no_llseek, \
}
+#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
+
+#define DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
+ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
+
typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
#if defined(CONFIG_DEBUG_FS)
@@ -102,6 +108,8 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos);
ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
+ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, const char *new_name);
@@ -254,6 +262,13 @@ static inline ssize_t debugfs_attr_write(struct file *file,
return -ENODEV;
}
+static inline ssize_t debugfs_attr_write_signed(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, char *new_name)
{
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 34aab4dd336c..4dc7cda4fd46 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -152,8 +152,8 @@ struct devfreq_stats {
* @max_state: count of entry present in the frequency table.
* @previous_freq: previously configured frequency value.
* @last_status: devfreq user device info, performance statistics
- * @data: Private data of the governor. The devfreq framework does not
- * touch this.
+ * @data: devfreq driver pass to governors, governor should not change it.
+ * @governor_data: private data for governors, devfreq core doesn't touch it.
* @user_min_freq_req: PM QoS minimum frequency request from user (via sysfs)
* @user_max_freq_req: PM QoS maximum frequency request from user (via sysfs)
* @scaling_min_freq: Limit minimum frequency requested by OPP interface
@@ -193,7 +193,8 @@ struct devfreq {
unsigned long previous_freq;
struct devfreq_dev_status last_status;
- void *data; /* private data for governors */
+ void *data;
+ void *governor_data;
struct dev_pm_qos_request user_min_freq_req;
struct dev_pm_qos_request user_max_freq_req;
diff --git a/include/linux/device.h b/include/linux/device.h
index 424b55df0272..c90a444be1c4 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -378,10 +378,8 @@ struct dev_links_info {
* @data: Pointer to MSI device data
*/
struct dev_msi_info {
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- struct irq_domain *domain;
-#endif
#ifdef CONFIG_GENERIC_MSI_IRQ
+ struct irq_domain *domain;
struct msi_device_data *data;
#endif
};
@@ -742,7 +740,7 @@ static inline void set_dev_node(struct device *dev, int node)
static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
{
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+#ifdef CONFIG_GENERIC_MSI_IRQ
return dev->msi.domain;
#else
return NULL;
@@ -751,7 +749,7 @@ static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
{
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+#ifdef CONFIG_GENERIC_MSI_IRQ
dev->msi.domain = d;
#endif
}
diff --git a/include/linux/dsa/tag_qca.h b/include/linux/dsa/tag_qca.h
index 50be7cbd93a5..b1b5720d89a5 100644
--- a/include/linux/dsa/tag_qca.h
+++ b/include/linux/dsa/tag_qca.h
@@ -61,9 +61,9 @@ struct sk_buff;
/* Special struct emulating a Ethernet header */
struct qca_mgmt_ethhdr {
- u32 command; /* command bit 31:0 */
- u32 seq; /* seq 63:32 */
- u32 mdio_data; /* first 4byte mdio */
+ __le32 command; /* command bit 31:0 */
+ __le32 seq; /* seq 63:32 */
+ __le32 mdio_data; /* first 4byte mdio */
__be16 hdr; /* qca hdr */
} __packed;
@@ -73,7 +73,7 @@ enum mdio_cmd {
};
struct mib_ethhdr {
- u32 data[3]; /* first 3 mib counter */
+ __le32 data[3]; /* first 3 mib counter */
__be16 hdr; /* qca hdr */
} __packed;
diff --git a/include/linux/efi.h b/include/linux/efi.h
index da3974bf05d3..7603fc58c47c 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -389,6 +389,7 @@ void efi_native_runtime_setup(void);
#define EFI_LOAD_FILE2_PROTOCOL_GUID EFI_GUID(0x4006c0c1, 0xfcb3, 0x403e, 0x99, 0x6d, 0x4a, 0x6c, 0x87, 0x24, 0xe0, 0x6d)
#define EFI_RT_PROPERTIES_TABLE_GUID EFI_GUID(0xeb66918a, 0x7eef, 0x402a, 0x84, 0x2e, 0x93, 0x1d, 0x21, 0xc3, 0x8a, 0xe9)
#define EFI_DXE_SERVICES_TABLE_GUID EFI_GUID(0x05ad34ba, 0x6f02, 0x4214, 0x95, 0x2e, 0x4d, 0xa0, 0x39, 0x8e, 0x2b, 0xb9)
+#define EFI_SMBIOS_PROTOCOL_GUID EFI_GUID(0x03583ff6, 0xcb36, 0x4940, 0x94, 0x7e, 0xb9, 0xb3, 0x9f, 0x4a, 0xfa, 0xf7)
#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
@@ -1085,9 +1086,6 @@ efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor,
efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size, void *data);
-efi_status_t check_var_size(u32 attributes, unsigned long size);
-efi_status_t check_var_size_nonblocking(u32 attributes, unsigned long size);
-
#if IS_ENABLED(CONFIG_EFI_CAPSULE_LOADER)
extern bool efi_capsule_pending(int *reset_type);
@@ -1225,7 +1223,7 @@ efi_status_t efi_random_get_seed(void);
arch_efi_call_virt_teardown(); \
})
-#define EFI_RANDOM_SEED_SIZE 64U
+#define EFI_RANDOM_SEED_SIZE 32U // BLAKE2S_HASH_SIZE
struct linux_efi_random_seed {
u32 size;
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index 346a8b56cdc8..9ec81290e3c8 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -88,22 +88,13 @@ static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t*
{
#if defined (ELF_CORE_COPY_TASK_REGS)
return ELF_CORE_COPY_TASK_REGS(t, elfregs);
-#elif defined (task_pt_regs)
+#else
elf_core_copy_regs(elfregs, task_pt_regs(t));
#endif
return 0;
}
-extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
-
-static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_regs *regs, elf_fpregset_t *fpu)
-{
-#ifdef ELF_CORE_COPY_FPREGS
- return ELF_CORE_COPY_FPREGS(t, fpu);
-#else
- return dump_fpu(regs, fpu);
-#endif
-}
+int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
#ifdef CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS
/*
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 30eb30d6909b..3cd202d3eefb 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -61,7 +61,7 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
return ERR_PTR(-ENOSYS);
}
-static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
+static inline int eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
{
return -ENOSYS;
}
diff --git a/include/linux/evm.h b/include/linux/evm.h
index aa63e0b3c0a2..7a9ee2157f69 100644
--- a/include/linux/evm.h
+++ b/include/linux/evm.h
@@ -35,6 +35,27 @@ extern int evm_inode_removexattr(struct user_namespace *mnt_userns,
struct dentry *dentry, const char *xattr_name);
extern void evm_inode_post_removexattr(struct dentry *dentry,
const char *xattr_name);
+static inline void evm_inode_post_remove_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ evm_inode_post_removexattr(dentry, acl_name);
+}
+extern int evm_inode_set_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *acl_name,
+ struct posix_acl *kacl);
+static inline int evm_inode_remove_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return evm_inode_set_acl(mnt_userns, dentry, acl_name, NULL);
+}
+static inline void evm_inode_post_set_acl(struct dentry *dentry,
+ const char *acl_name,
+ struct posix_acl *kacl)
+{
+ return evm_inode_post_setxattr(dentry, acl_name, NULL, 0);
+}
extern int evm_inode_init_security(struct inode *inode,
const struct xattr *xattr_array,
struct xattr *evm);
@@ -108,6 +129,34 @@ static inline void evm_inode_post_removexattr(struct dentry *dentry,
return;
}
+static inline void evm_inode_post_remove_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return;
+}
+
+static inline int evm_inode_set_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *acl_name,
+ struct posix_acl *kacl)
+{
+ return 0;
+}
+
+static inline int evm_inode_remove_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return 0;
+}
+
+static inline void evm_inode_post_set_acl(struct dentry *dentry,
+ const char *acl_name,
+ struct posix_acl *kacl)
+{
+ return;
+}
+
static inline int evm_inode_init_security(struct inode *inode,
const struct xattr *xattr_array,
struct xattr *evm)
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 9f6e25467844..444236dadcf0 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -20,7 +20,6 @@ struct fault_attr {
atomic_t space;
unsigned long verbose;
bool task_filter;
- bool no_warn;
unsigned long stacktrace_depth;
unsigned long require_start;
unsigned long require_end;
@@ -32,6 +31,10 @@ struct fault_attr {
struct dentry *dname;
};
+enum fault_flags {
+ FAULT_NOWARN = 1 << 0,
+};
+
#define FAULT_ATTR_INITIALIZER { \
.interval = 1, \
.times = ATOMIC_INIT(1), \
@@ -40,11 +43,11 @@ struct fault_attr {
.ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \
.verbose = 2, \
.dname = NULL, \
- .no_warn = false, \
}
#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
int setup_fault_attr(struct fault_attr *attr, char *str);
+bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags);
bool should_fail(struct fault_attr *attr, ssize_t size);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 0aff76bcbb00..bcb8658f5b64 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -555,7 +555,7 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || \
defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || \
- defined(__arm__) || defined(__aarch64__)
+ defined(__arm__) || defined(__aarch64__) || defined(__mips__)
#define fb_readb __raw_readb
#define fb_readw __raw_readw
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index 4029fe368a4f..1067a8450826 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -43,11 +43,24 @@ extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
#else
-#define __underlying_memchr __builtin_memchr
-#define __underlying_memcmp __builtin_memcmp
+
+#if defined(__SANITIZE_MEMORY__)
+/*
+ * For KMSAN builds all memcpy/memset/memmove calls should be replaced by the
+ * corresponding __msan_XXX functions.
+ */
+#include <linux/kmsan_string.h>
+#define __underlying_memcpy __msan_memcpy
+#define __underlying_memmove __msan_memmove
+#define __underlying_memset __msan_memset
+#else
#define __underlying_memcpy __builtin_memcpy
#define __underlying_memmove __builtin_memmove
#define __underlying_memset __builtin_memset
+#endif
+
+#define __underlying_memchr __builtin_memchr
+#define __underlying_memcmp __builtin_memcmp
#define __underlying_strcat __builtin_strcat
#define __underlying_strcpy __builtin_strcpy
#define __underlying_strlen __builtin_strlen
@@ -441,13 +454,18 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
#define __fortify_memcpy_chk(p, q, size, p_size, q_size, \
p_size_field, q_size_field, op) ({ \
- size_t __fortify_size = (size_t)(size); \
- WARN_ONCE(fortify_memcpy_chk(__fortify_size, p_size, q_size, \
- p_size_field, q_size_field, #op), \
+ const size_t __fortify_size = (size_t)(size); \
+ const size_t __p_size = (p_size); \
+ const size_t __q_size = (q_size); \
+ const size_t __p_size_field = (p_size_field); \
+ const size_t __q_size_field = (q_size_field); \
+ WARN_ONCE(fortify_memcpy_chk(__fortify_size, __p_size, \
+ __q_size, __p_size_field, \
+ __q_size_field, #op), \
#op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \
__fortify_size, \
"field \"" #p "\" at " __FILE__ ":" __stringify(__LINE__), \
- p_size_field); \
+ __p_size_field); \
__underlying_##op(p, q, __fortify_size); \
})
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e654435f1651..066555ad1bf8 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -560,8 +560,8 @@ struct posix_acl;
#define ACL_NOT_CACHED ((void *)(-1))
/*
* ACL_DONT_CACHE is for stacked filesystems, that rely on underlying fs to
- * cache the ACL. This also means that ->get_acl() can be called in RCU mode
- * with the LOOKUP_RCU flag.
+ * cache the ACL. This also means that ->get_inode_acl() can be called in RCU
+ * mode with the LOOKUP_RCU flag.
*/
#define ACL_DONT_CACHE ((void *)(-3))
@@ -1131,9 +1131,8 @@ struct file_lock_context {
/* The following constant reflects the upper bound of the file/locking space */
#ifndef OFFSET_MAX
-#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1)))
-#define OFFSET_MAX INT_LIMIT(loff_t)
-#define OFFT_OFFSET_MAX INT_LIMIT(off_t)
+#define OFFSET_MAX type_max(loff_t)
+#define OFFT_OFFSET_MAX type_max(off_t)
#endif
extern void send_sigio(struct fown_struct *fown, int fd, int band);
@@ -1170,6 +1169,7 @@ extern int locks_delete_block(struct file_lock *);
extern int vfs_test_lock(struct file *, struct file_lock *);
extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
+bool vfs_inode_has_locks(struct inode *inode);
extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
extern void lease_get_mtime(struct inode *, struct timespec64 *time);
@@ -1186,6 +1186,13 @@ extern void show_fd_locks(struct seq_file *f,
struct file *filp, struct files_struct *files);
extern bool locks_owner_has_blockers(struct file_lock_context *flctx,
fl_owner_t owner);
+
+static inline struct file_lock_context *
+locks_inode_context(const struct inode *inode)
+{
+ return smp_load_acquire(&inode->i_flctx);
+}
+
#else /* !CONFIG_FILE_LOCKING */
static inline int fcntl_getlk(struct file *file, unsigned int cmd,
struct flock __user *user)
@@ -1284,6 +1291,11 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
return 0;
}
+static inline bool vfs_inode_has_locks(struct inode *inode)
+{
+ return false;
+}
+
static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
{
return -ENOLCK;
@@ -1326,6 +1338,13 @@ static inline bool locks_owner_has_blockers(struct file_lock_context *flctx,
{
return false;
}
+
+static inline struct file_lock_context *
+locks_inode_context(const struct inode *inode)
+{
+ return NULL;
+}
+
#endif /* !CONFIG_FILE_LOCKING */
static inline struct inode *file_inode(const struct file *f)
@@ -1613,23 +1632,6 @@ static inline void i_gid_write(struct inode *inode, gid_t gid)
}
/**
- * i_uid_into_mnt - map an inode's i_uid down into a mnt_userns
- * @mnt_userns: user namespace of the mount the inode was found from
- * @inode: inode to map
- *
- * Note, this will eventually be removed completely in favor of the type-safe
- * i_uid_into_vfsuid().
- *
- * Return: the inode's i_uid mapped down according to @mnt_userns.
- * If the inode's i_uid has no mapping INVALID_UID is returned.
- */
-static inline kuid_t i_uid_into_mnt(struct user_namespace *mnt_userns,
- const struct inode *inode)
-{
- return AS_KUIDT(make_vfsuid(mnt_userns, i_user_ns(inode), inode->i_uid));
-}
-
-/**
* i_uid_into_vfsuid - map an inode's i_uid down into a mnt_userns
* @mnt_userns: user namespace of the mount the inode was found from
* @inode: inode to map
@@ -1682,23 +1684,6 @@ static inline void i_uid_update(struct user_namespace *mnt_userns,
}
/**
- * i_gid_into_mnt - map an inode's i_gid down into a mnt_userns
- * @mnt_userns: user namespace of the mount the inode was found from
- * @inode: inode to map
- *
- * Note, this will eventually be removed completely in favor of the type-safe
- * i_gid_into_vfsgid().
- *
- * Return: the inode's i_gid mapped down according to @mnt_userns.
- * If the inode's i_gid has no mapping INVALID_GID is returned.
- */
-static inline kgid_t i_gid_into_mnt(struct user_namespace *mnt_userns,
- const struct inode *inode)
-{
- return AS_KGIDT(make_vfsgid(mnt_userns, i_user_ns(inode), inode->i_gid));
-}
-
-/**
* i_gid_into_vfsgid - map an inode's i_gid down into a mnt_userns
* @mnt_userns: user namespace of the mount the inode was found from
* @inode: inode to map
@@ -2089,6 +2074,14 @@ struct dir_context {
*/
#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN)
+/*
+ * These flags control the behavior of vfs_copy_file_range().
+ * They are not available to the user via syscall.
+ *
+ * COPY_FILE_SPLICE: call splice direct instead of fs clone/copy ops
+ */
+#define COPY_FILE_SPLICE (1 << 0)
+
struct iov_iter;
struct io_uring_cmd;
@@ -2142,7 +2135,7 @@ struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *);
int (*permission) (struct user_namespace *, struct inode *, int);
- struct posix_acl * (*get_acl)(struct inode *, int, bool);
+ struct posix_acl * (*get_inode_acl)(struct inode *, int, bool);
int (*readlink) (struct dentry *, char __user *,int);
@@ -2172,7 +2165,9 @@ struct inode_operations {
umode_t create_mode);
int (*tmpfile) (struct user_namespace *, struct inode *,
struct file *, umode_t);
- int (*set_acl)(struct user_namespace *, struct inode *,
+ struct posix_acl *(*get_acl)(struct user_namespace *, struct dentry *,
+ int);
+ int (*set_acl)(struct user_namespace *, struct dentry *,
struct posix_acl *, int);
int (*fileattr_set)(struct user_namespace *mnt_userns,
struct dentry *dentry, struct fileattr *fa);
@@ -2732,18 +2727,22 @@ static inline struct user_namespace *file_mnt_user_ns(struct file *file)
return mnt_user_ns(file->f_path.mnt);
}
+static inline struct mnt_idmap *file_mnt_idmap(struct file *file)
+{
+ return mnt_idmap(file->f_path.mnt);
+}
+
/**
* is_idmapped_mnt - check whether a mount is mapped
* @mnt: the mount to check
*
- * If @mnt has an idmapping attached different from the
- * filesystem's idmapping then @mnt is mapped.
+ * If @mnt has an non @nop_mnt_idmap attached to it then @mnt is mapped.
*
* Return: true if mount is mapped, false if not.
*/
static inline bool is_idmapped_mnt(const struct vfsmount *mnt)
{
- return mnt_user_ns(mnt) != mnt->mnt_sb->s_user_ns;
+ return mnt_idmap(mnt) != &nop_mnt_idmap;
}
extern long vfs_truncate(const struct path *, loff_t);
@@ -3104,7 +3103,7 @@ extern void __destroy_inode(struct inode *);
extern struct inode *new_inode_pseudo(struct super_block *sb);
extern struct inode *new_inode(struct super_block *sb);
extern void free_inode_nonrcu(struct inode *inode);
-extern int should_remove_suid(struct dentry *);
+extern int setattr_should_drop_suidgid(struct user_namespace *, struct inode *);
extern int file_remove_privs(struct file *);
/*
@@ -3485,7 +3484,7 @@ void simple_transaction_set(struct file *file, size_t n);
* All attributes contain a text representation of a numeric value
* that are accessed with the get() and set() functions.
*/
-#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
+#define DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
__simple_attr_check_format(__fmt, 0ull); \
@@ -3496,10 +3495,16 @@ static const struct file_operations __fops = { \
.open = __fops ## _open, \
.release = simple_attr_release, \
.read = simple_attr_read, \
- .write = simple_attr_write, \
+ .write = (__is_signed) ? simple_attr_write_signed : simple_attr_write, \
.llseek = generic_file_llseek, \
}
+#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
+ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
+
+#define DEFINE_SIMPLE_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
+ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
+
static inline __printf(1, 2)
void __simple_attr_check_format(const char *fmt, ...)
{
@@ -3514,6 +3519,8 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos);
ssize_t simple_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
+ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
struct ctl_table;
int __init list_bdev_fs_names(char *buf, size_t size);
@@ -3527,7 +3534,7 @@ int __init list_bdev_fs_names(char *buf, size_t size);
static inline bool is_sxid(umode_t mode)
{
- return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
+ return mode & (S_ISUID | S_ISGID);
}
static inline int check_sticky(struct user_namespace *mnt_userns,
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index 13fa6f3df8e4..5469ffee21c7 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -99,7 +99,7 @@ struct fs_context {
const struct cred *cred; /* The mounter's credentials */
struct p_log log; /* Logging buffer */
const char *source; /* The source name (eg. dev path) */
- void *security; /* Linux S&M options */
+ void *security; /* LSM options */
void *s_fs_info; /* Proposed s_fs_info */
unsigned int sb_flags; /* Proposed superblock flags (SB_*) */
unsigned int sb_flags_mask; /* Superblock flags that were changed */
@@ -145,20 +145,6 @@ extern void fc_drop_locked(struct fs_context *fc);
int reconfigure_single(struct super_block *s,
int flags, void *data);
-/*
- * sget() wrappers to be called from the ->get_tree() op.
- */
-enum vfs_get_super_keying {
- vfs_get_single_super, /* Only one such superblock may exist */
- vfs_get_single_reconf_super, /* As above, but reconfigure if it exists */
- vfs_get_keyed_super, /* Superblocks with different s_fs_info keys may exist */
- vfs_get_independent_super, /* Multiple independent superblocks may exist */
-};
-extern int vfs_get_super(struct fs_context *fc,
- enum vfs_get_super_keying keying,
- int (*fill_super)(struct super_block *sb,
- struct fs_context *fc));
-
extern int get_tree_nodev(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc));
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
index f103c91139d4..01542c4b87a2 100644
--- a/include/linux/fs_parser.h
+++ b/include/linux/fs_parser.h
@@ -76,6 +76,7 @@ static inline int fs_parse(struct fs_context *fc,
extern int fs_lookup_param(struct fs_context *fc,
struct fs_parameter *param,
bool want_bdev,
+ unsigned int flags,
struct path *_path);
extern int lookup_constant(const struct constant_table tbl[], const char *name, int not_found);
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 36e5dd84cf59..8e312c8323a8 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -75,7 +75,7 @@ struct fscache_volume {
atomic_t n_accesses; /* Number of cache accesses in progress */
unsigned int debug_id;
unsigned int key_hash; /* Hash of key string */
- char *key; /* Volume ID, eg. "afs@example.com@1234" */
+ u8 *key; /* Volume ID, eg. "afs@example.com@1234" */
struct list_head proc_link; /* Link in /proc/fs/fscache/volumes */
struct hlist_bl_node hash_link; /* Link in hash table */
struct work_struct work;
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index cad78b569c7e..4f5f8a651213 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -307,7 +307,7 @@ fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
}
/* keyring.c */
-void fscrypt_sb_delete(struct super_block *sb);
+void fscrypt_destroy_keyring(struct super_block *sb);
int fscrypt_ioctl_add_key(struct file *filp, void __user *arg);
int fscrypt_add_test_dummy_key(struct super_block *sb,
const struct fscrypt_dummy_policy *dummy_policy);
@@ -521,7 +521,7 @@ fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
}
/* keyring.c */
-static inline void fscrypt_sb_delete(struct super_block *sb)
+static inline void fscrypt_destroy_keyring(struct super_block *sb)
{
}
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 62557d4bffc2..99f1146614c0 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -37,9 +37,10 @@ extern void ftrace_boot_snapshot(void);
static inline void ftrace_boot_snapshot(void) { }
#endif
-#ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops;
struct ftrace_regs;
+
+#ifdef CONFIG_FUNCTION_TRACER
/*
* If the arch's mcount caller does not support all of ftrace's
* features, then it must call an indirect function that
@@ -110,12 +111,11 @@ struct ftrace_regs {
#define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
/*
- * ftrace_instruction_pointer_set() is to be defined by the architecture
- * if to allow setting of the instruction pointer from the ftrace_regs
- * when HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports
- * live kernel patching.
+ * ftrace_regs_set_instruction_pointer() is to be defined by the architecture
+ * if to allow setting of the instruction pointer from the ftrace_regs when
+ * HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports live kernel patching.
*/
-#define ftrace_instruction_pointer_set(fregs, ip) do { } while (0)
+#define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
@@ -126,6 +126,35 @@ static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs
return arch_ftrace_get_regs(fregs);
}
+/*
+ * When true, the ftrace_regs_{get,set}_*() functions may be used on fregs.
+ * Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs.
+ */
+static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs)
+{
+ if (IS_ENABLED(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS))
+ return true;
+
+ return ftrace_get_regs(fregs) != NULL;
+}
+
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+#define ftrace_regs_get_instruction_pointer(fregs) \
+ instruction_pointer(ftrace_get_regs(fregs))
+#define ftrace_regs_get_argument(fregs, n) \
+ regs_get_kernel_argument(ftrace_get_regs(fregs), n)
+#define ftrace_regs_get_stack_pointer(fregs) \
+ kernel_stack_pointer(ftrace_get_regs(fregs))
+#define ftrace_regs_return_value(fregs) \
+ regs_return_value(ftrace_get_regs(fregs))
+#define ftrace_regs_set_return_value(fregs, ret) \
+ regs_set_return_value(ftrace_get_regs(fregs), ret)
+#define ftrace_override_function_with_return(fregs) \
+ override_function_with_return(ftrace_get_regs(fregs))
+#define ftrace_regs_query_register_offset(name) \
+ regs_query_register_offset(name)
+#endif
+
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs);
@@ -427,9 +456,7 @@ static inline int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsi
{
return -ENODEV;
}
-#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
-#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
/*
* This must be implemented by the architecture.
* It is the way the ftrace direct_ops helper, when called
@@ -443,9 +470,9 @@ static inline int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsi
* the return from the trampoline jump to the direct caller
* instead of going back to the function it just traced.
*/
-static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
+static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
unsigned long addr) { }
-#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
#ifdef CONFIG_STACK_TRACER
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index ef4aea3b356e..65a78773dcca 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -210,6 +210,20 @@ alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct p
return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array);
}
+static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
+{
+ gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN);
+
+ if (warn_gfp != (__GFP_THISNODE|__GFP_NOWARN))
+ return;
+
+ if (node_online(this_node))
+ return;
+
+ pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node);
+ dump_stack();
+}
+
/*
* Allocate pages, preferring the node given as nid. The node must be valid and
* online. For more general interface, see alloc_pages_node().
@@ -218,7 +232,7 @@ static inline struct page *
__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
- VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
+ warn_if_node_offline(nid, gfp_mask);
return __alloc_pages(gfp_mask, order, nid, NULL);
}
@@ -227,7 +241,7 @@ static inline
struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
- VM_WARN_ON((gfp & __GFP_THISNODE) && !node_online(nid));
+ warn_if_node_offline(nid, gfp);
return __folio_alloc(gfp, order, nid, NULL);
}
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 6aeea1071b1b..88ae4513abb5 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -27,7 +27,7 @@ struct gpio_chip;
union gpio_irq_fwspec {
struct irq_fwspec fwspec;
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+#ifdef CONFIG_GENERIC_MSI_IRQ
msi_alloc_info_t msiinfo;
#endif
};
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 3b42264333ef..85f7c5a63aa6 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -969,7 +969,7 @@ struct vmbus_channel {
* mechanism improves throughput by:
*
* A) Making the host more efficient - each time it wakes up,
- * potentially it will process morev number of packets. The
+ * potentially it will process more number of packets. The
* monitor latency allows a batch to build up.
* B) By deferring the hypercall to signal, we will also minimize
* the interrupts.
@@ -1341,6 +1341,8 @@ struct hv_ring_buffer_debug_info {
int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info);
+bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel);
+
/* Vmbus interface */
#define vmbus_driver_register(driver) \
__vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 81708ca0ebc7..5a0b2a285a18 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -187,6 +187,15 @@ extern void ima_inode_post_setattr(struct user_namespace *mnt_userns,
struct dentry *dentry);
extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
const void *xattr_value, size_t xattr_value_len);
+extern int ima_inode_set_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *acl_name,
+ struct posix_acl *kacl);
+static inline int ima_inode_remove_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return ima_inode_set_acl(mnt_userns, dentry, acl_name, NULL);
+}
extern int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name);
#else
static inline bool is_ima_appraise_enabled(void)
@@ -208,11 +217,26 @@ static inline int ima_inode_setxattr(struct dentry *dentry,
return 0;
}
+static inline int ima_inode_set_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *acl_name,
+ struct posix_acl *kacl)
+{
+
+ return 0;
+}
+
static inline int ima_inode_removexattr(struct dentry *dentry,
const char *xattr_name)
{
return 0;
}
+
+static inline int ima_inode_remove_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return 0;
+}
#endif /* CONFIG_IMA_APPRAISE */
#if defined(CONFIG_IMA_APPRAISE) && defined(CONFIG_INTEGRITY_TRUSTED_KEYRING)
diff --git a/include/linux/init.h b/include/linux/init.h
index 077d7f93b402..c5fe6d26f5b1 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -2,7 +2,9 @@
#ifndef _LINUX_INIT_H
#define _LINUX_INIT_H
+#include <linux/build_bug.h>
#include <linux/compiler.h>
+#include <linux/stringify.h>
#include <linux/types.h>
/* Built-in __init functions needn't be compiled with retpoline */
@@ -143,6 +145,7 @@ struct file_system_type;
extern int do_one_initcall(initcall_t fn);
extern char __initdata boot_command_line[];
extern char *saved_command_line;
+extern unsigned int saved_command_line_len;
extern unsigned int reset_devices;
/* used by init/main.c */
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 66a774d2710e..09d4f17c8d3b 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -213,7 +213,7 @@ io_mapping_free(struct io_mapping *iomap)
kfree(iomap);
}
-#endif /* _LINUX_IO_MAPPING_H */
-
int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size);
+
+#endif /* _LINUX_IO_MAPPING_H */
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 43bc8a2edccf..0ded9e271523 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -16,6 +16,9 @@ enum io_uring_cmd_flags {
IO_URING_F_SQE128 = 4,
IO_URING_F_CQE32 = 8,
IO_URING_F_IOPOLL = 16,
+
+ /* the request is executed from poll, it should not be freed */
+ IO_URING_F_MULTISHOT = 32,
};
struct io_uring_cmd {
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index a325532aeab5..3c9da1f8979e 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -455,7 +455,7 @@ extern void iommu_set_default_translated(bool cmd_line);
extern bool iommu_default_passthrough(void);
extern struct iommu_resv_region *
iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
- enum iommu_resv_type type);
+ enum iommu_resv_type type, gfp_t gfp);
extern int iommu_get_group_resv_regions(struct iommu_group *group,
struct list_head *head);
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 27642ca15d93..4ae3c541ea6f 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -318,6 +318,8 @@ extern void __devm_release_region(struct device *dev, struct resource *parent,
resource_size_t start, resource_size_t n);
extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
extern bool iomem_is_exclusive(u64 addr);
+extern bool resource_is_exclusive(struct resource *resource, u64 addr,
+ resource_size_t size);
extern int
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 00d577f90883..a372086750ca 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -31,6 +31,7 @@
#define _LINUX_IRQDOMAIN_H
#include <linux/types.h>
+#include <linux/irqdomain_defs.h>
#include <linux/irqhandler.h>
#include <linux/of.h>
#include <linux/mutex.h>
@@ -45,6 +46,7 @@ struct irq_desc;
struct cpumask;
struct seq_file;
struct irq_affinity_desc;
+struct msi_parent_ops;
#define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16
@@ -68,27 +70,6 @@ struct irq_fwspec {
void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
unsigned int count, struct irq_fwspec *fwspec);
-/*
- * Should several domains have the same device node, but serve
- * different purposes (for example one domain is for PCI/MSI, and the
- * other for wired IRQs), they can be distinguished using a
- * bus-specific token. Most domains are expected to only carry
- * DOMAIN_BUS_ANY.
- */
-enum irq_domain_bus_token {
- DOMAIN_BUS_ANY = 0,
- DOMAIN_BUS_WIRED,
- DOMAIN_BUS_GENERIC_MSI,
- DOMAIN_BUS_PCI_MSI,
- DOMAIN_BUS_PLATFORM_MSI,
- DOMAIN_BUS_NEXUS,
- DOMAIN_BUS_IPI,
- DOMAIN_BUS_FSL_MC_MSI,
- DOMAIN_BUS_TI_SCI_INTA_MSI,
- DOMAIN_BUS_WAKEUP,
- DOMAIN_BUS_VMD_MSI,
-};
-
/**
* struct irq_domain_ops - Methods for irq_domain objects
* @match: Match an interrupt controller device node to a host, returns
@@ -137,53 +118,61 @@ struct irq_domain_chip_generic;
/**
* struct irq_domain - Hardware interrupt number translation object
- * @link: Element in global irq_domain list.
- * @name: Name of interrupt domain
- * @ops: pointer to irq_domain methods
- * @host_data: private data pointer for use by owner. Not touched by irq_domain
- * core code.
- * @flags: host per irq_domain flags
- * @mapcount: The number of mapped interrupts
+ * @link: Element in global irq_domain list.
+ * @name: Name of interrupt domain
+ * @ops: Pointer to irq_domain methods
+ * @host_data: Private data pointer for use by owner. Not touched by irq_domain
+ * core code.
+ * @flags: Per irq_domain flags
+ * @mapcount: The number of mapped interrupts
*
- * Optional elements
- * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy
- * to swap it for the of_node via the irq_domain_get_of_node accessor
- * @gc: Pointer to a list of generic chips. There is a helper function for
- * setting up one or more generic chips for interrupt controllers
- * drivers using the generic chip library which uses this pointer.
- * @dev: Pointer to a device that the domain represent, and that will be
- * used for power management purposes.
- * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
+ * Optional elements:
+ * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy
+ * to swap it for the of_node via the irq_domain_get_of_node accessor
+ * @gc: Pointer to a list of generic chips. There is a helper function for
+ * setting up one or more generic chips for interrupt controllers
+ * drivers using the generic chip library which uses this pointer.
+ * @dev: Pointer to the device which instantiated the irqdomain
+ * With per device irq domains this is not necessarily the same
+ * as @pm_dev.
+ * @pm_dev: Pointer to a device that can be utilized for power management
+ * purposes related to the irq domain.
+ * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
+ * @msi_parent_ops: Pointer to MSI parent domain methods for per device domain init
*
- * Revmap data, used internally by irq_domain
- * @revmap_size: Size of the linear map table @revmap[]
- * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
- * @revmap_mutex: Lock for the revmap
- * @revmap: Linear table of irq_data pointers
+ * Revmap data, used internally by the irq domain code:
+ * @revmap_size: Size of the linear map table @revmap[]
+ * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
+ * @revmap_mutex: Lock for the revmap
+ * @revmap: Linear table of irq_data pointers
*/
struct irq_domain {
- struct list_head link;
- const char *name;
- const struct irq_domain_ops *ops;
- void *host_data;
- unsigned int flags;
- unsigned int mapcount;
+ struct list_head link;
+ const char *name;
+ const struct irq_domain_ops *ops;
+ void *host_data;
+ unsigned int flags;
+ unsigned int mapcount;
/* Optional data */
- struct fwnode_handle *fwnode;
- enum irq_domain_bus_token bus_token;
- struct irq_domain_chip_generic *gc;
- struct device *dev;
+ struct fwnode_handle *fwnode;
+ enum irq_domain_bus_token bus_token;
+ struct irq_domain_chip_generic *gc;
+ struct device *dev;
+ struct device *pm_dev;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
- struct irq_domain *parent;
+ struct irq_domain *parent;
+#endif
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ const struct msi_parent_ops *msi_parent_ops;
#endif
/* reverse map data. The linear map gets appended to the irq_domain */
- irq_hw_number_t hwirq_max;
- unsigned int revmap_size;
- struct radix_tree_root revmap_tree;
- struct mutex revmap_mutex;
- struct irq_data __rcu *revmap[];
+ irq_hw_number_t hwirq_max;
+ unsigned int revmap_size;
+ struct radix_tree_root revmap_tree;
+ struct mutex revmap_mutex;
+ struct irq_data __rcu *revmap[];
};
/* Irq domain flags */
@@ -206,15 +195,14 @@ enum {
/* Irq domain implements MSI remapping */
IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5),
- /*
- * Quirk to handle MSI implementations which do not provide
- * masking. Currently known to affect x86, but partially
- * handled in core code.
- */
- IRQ_DOMAIN_MSI_NOMASK_QUIRK = (1 << 6),
-
/* Irq domain doesn't translate anything */
- IRQ_DOMAIN_FLAG_NO_MAP = (1 << 7),
+ IRQ_DOMAIN_FLAG_NO_MAP = (1 << 6),
+
+ /* Irq domain is a MSI parent domain */
+ IRQ_DOMAIN_FLAG_MSI_PARENT = (1 << 8),
+
+ /* Irq domain is a MSI device domain */
+ IRQ_DOMAIN_FLAG_MSI_DEVICE = (1 << 9),
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
@@ -233,7 +221,7 @@ static inline void irq_domain_set_pm_device(struct irq_domain *d,
struct device *dev)
{
if (d)
- d->dev = dev;
+ d->pm_dev = dev;
}
#ifdef CONFIG_IRQ_DOMAIN
@@ -578,6 +566,16 @@ static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
+static inline bool irq_domain_is_msi_parent(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_MSI_PARENT;
+}
+
+static inline bool irq_domain_is_msi_device(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_MSI_DEVICE;
+}
+
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
unsigned int nr_irqs, int node, void *arg)
@@ -623,6 +621,17 @@ irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
{
return false;
}
+
+static inline bool irq_domain_is_msi_parent(struct irq_domain *domain)
+{
+ return false;
+}
+
+static inline bool irq_domain_is_msi_device(struct irq_domain *domain)
+{
+ return false;
+}
+
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
#else /* CONFIG_IRQ_DOMAIN */
diff --git a/include/linux/irqdomain_defs.h b/include/linux/irqdomain_defs.h
new file mode 100644
index 000000000000..c29921fd8cd1
--- /dev/null
+++ b/include/linux/irqdomain_defs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IRQDOMAIN_DEFS_H
+#define _LINUX_IRQDOMAIN_DEFS_H
+
+/*
+ * Should several domains have the same device node, but serve
+ * different purposes (for example one domain is for PCI/MSI, and the
+ * other for wired IRQs), they can be distinguished using a
+ * bus-specific token. Most domains are expected to only carry
+ * DOMAIN_BUS_ANY.
+ */
+enum irq_domain_bus_token {
+ DOMAIN_BUS_ANY = 0,
+ DOMAIN_BUS_WIRED,
+ DOMAIN_BUS_GENERIC_MSI,
+ DOMAIN_BUS_PCI_MSI,
+ DOMAIN_BUS_PLATFORM_MSI,
+ DOMAIN_BUS_NEXUS,
+ DOMAIN_BUS_IPI,
+ DOMAIN_BUS_FSL_MC_MSI,
+ DOMAIN_BUS_TI_SCI_INTA_MSI,
+ DOMAIN_BUS_WAKEUP,
+ DOMAIN_BUS_VMD_MSI,
+ DOMAIN_BUS_PCI_DEVICE_MSI,
+ DOMAIN_BUS_PCI_DEVICE_MSIX,
+ DOMAIN_BUS_DMAR,
+ DOMAIN_BUS_AMDVI,
+ DOMAIN_BUS_PCI_DEVICE_IMS,
+};
+
+#endif /* _LINUX_IRQDOMAIN_DEFS_H */
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
index bd4c066ad39b..d426c7ad92bf 100644
--- a/include/linux/irqreturn.h
+++ b/include/linux/irqreturn.h
@@ -3,10 +3,10 @@
#define _LINUX_IRQRETURN_H
/**
- * enum irqreturn
- * @IRQ_NONE interrupt was not from this device or was not handled
- * @IRQ_HANDLED interrupt was handled by this device
- * @IRQ_WAKE_THREAD handler requests to wake the handler thread
+ * enum irqreturn - irqreturn type values
+ * @IRQ_NONE: interrupt was not from this device or was not handled
+ * @IRQ_HANDLED: interrupt was handled by this device
+ * @IRQ_WAKE_THREAD: handler requests to wake the handler thread
*/
enum irqreturn {
IRQ_NONE = (0 << 0),
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 0b7242370b56..2170e0cc279d 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1662,7 +1662,7 @@ int jbd2_fc_begin_commit(journal_t *journal, tid_t tid);
int jbd2_fc_end_commit(journal_t *journal);
int jbd2_fc_end_commit_fallback(journal_t *journal);
int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out);
-int jbd2_submit_inode_data(struct jbd2_inode *jinode);
+int jbd2_submit_inode_data(journal_t *journal, struct jbd2_inode *jinode);
int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode);
int jbd2_fc_wait_bufs(journal_t *journal, int num_blks);
int jbd2_fc_release_bufs(journal_t *journal);
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index d811b3d7d2a1..96c9d56e5510 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -302,7 +302,7 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
#ifdef CONFIG_KASAN_GENERIC
-size_t kasan_metadata_size(struct kmem_cache *cache);
+size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
slab_flags_t kasan_never_merge(void);
void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
slab_flags_t *flags);
@@ -315,7 +315,8 @@ void kasan_record_aux_stack_noalloc(void *ptr);
#else /* CONFIG_KASAN_GENERIC */
/* Tag-based KASAN modes do not use per-object metadata. */
-static inline size_t kasan_metadata_size(struct kmem_cache *cache)
+static inline size_t kasan_metadata_size(struct kmem_cache *cache,
+ bool in_object)
{
return 0;
}
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index 55dc338f6bcd..ee04256f28af 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -56,7 +56,7 @@ static inline void kcov_remote_start_usb(u64 id)
/*
* The softirq flavor of kcov_remote_*() functions is introduced as a temporary
* work around for kcov's lack of nested remote coverage sections support in
- * task context. Adding suport for nested sections is tracked in:
+ * task context. Adding support for nested sections is tracked in:
* https://bugzilla.kernel.org/show_bug.cgi?id=210337
*/
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 41a686996aaa..5dd4343c1bbe 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -17,6 +17,7 @@
#include <linux/crash_core.h>
#include <asm/io.h>
+#include <linux/range.h>
#include <uapi/linux/kexec.h>
#include <linux/verification.h>
@@ -240,14 +241,10 @@ static inline int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
/* Alignment required for elf header segment */
#define ELF_CORE_HEADER_ALIGN 4096
-struct crash_mem_range {
- u64 start, end;
-};
-
struct crash_mem {
unsigned int max_nr_ranges;
unsigned int nr_ranges;
- struct crash_mem_range ranges[];
+ struct range ranges[];
};
extern int crash_exclude_mem_range(struct crash_mem *mem,
diff --git a/include/linux/kmsan_string.h b/include/linux/kmsan_string.h
new file mode 100644
index 000000000000..7287da6f52ef
--- /dev/null
+++ b/include/linux/kmsan_string.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN string functions API used in other headers.
+ *
+ * Copyright (C) 2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_STRING_H
+#define _LINUX_KMSAN_STRING_H
+
+/*
+ * KMSAN overrides the default memcpy/memset/memmove implementations in the
+ * kernel, which requires having __msan_XXX function prototypes in several other
+ * headers. Keep them in one place instead of open-coding.
+ */
+void *__msan_memcpy(void *dst, const void *src, size_t size);
+void *__msan_memset(void *s, int c, size_t n);
+void *__msan_memmove(void *dest, const void *src, size_t len);
+
+#endif /* _LINUX_KMSAN_STRING_H */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 32f259fa5801..915142abdf76 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -416,7 +416,7 @@ static __always_inline void guest_context_enter_irqoff(void)
*/
if (!context_tracking_guest_enter()) {
instrumentation_begin();
- rcu_virt_note_context_switch(smp_processor_id());
+ rcu_virt_note_context_switch();
instrumentation_end();
}
}
@@ -776,6 +776,7 @@ struct kvm {
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
pid_t userspace_pid;
+ bool override_halt_poll_ns;
unsigned int max_halt_poll_ns;
u32 dirty_ring_size;
bool vm_bugged;
@@ -1240,8 +1241,18 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
/**
- * kvm_gfn_to_pfn_cache_init - prepare a cached kernel mapping and HPA for a
- * given guest physical address.
+ * kvm_gpc_init - initialize gfn_to_pfn_cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ *
+ * This sets up a gfn_to_pfn_cache by initializing locks. Note, the cache must
+ * be zero-allocated (or zeroed by the caller before init).
+ */
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc);
+
+/**
+ * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
+ * physical address.
*
* @kvm: pointer to kvm instance.
* @gpc: struct gfn_to_pfn_cache object.
@@ -1265,9 +1276,9 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
* kvm_gfn_to_pfn_cache_check() to ensure that the cache is valid before
* accessing the target page.
*/
-int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
- struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
- gpa_t gpa, unsigned long len);
+int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+ struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
+ gpa_t gpa, unsigned long len);
/**
* kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
@@ -1324,7 +1335,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
/**
- * kvm_gfn_to_pfn_cache_destroy - destroy and unlink a gfn_to_pfn_cache.
+ * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
*
* @kvm: pointer to kvm instance.
* @gpc: struct gfn_to_pfn_cache object.
@@ -1332,7 +1343,7 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
* This removes a cache from the @kvm's list to be processed on MMU notifier
* invocation.
*/
-void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
+void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
void kvm_sigset_activate(struct kvm_vcpu *vcpu);
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
@@ -1390,6 +1401,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
struct kvm_enable_cap *cap);
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
+long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg);
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index c74acfa1a3fe..af38252ad704 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -35,6 +35,11 @@ enum {
NDD_WORK_PENDING = 4,
/* dimm supports namespace labels */
NDD_LABELING = 6,
+ /*
+ * dimm contents have changed requiring invalidation of CPU caches prior
+ * to activation of a region that includes this device
+ */
+ NDD_INCOHERENT = 7,
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
@@ -183,6 +188,8 @@ struct nvdimm_security_ops {
int (*overwrite)(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data);
int (*query_overwrite)(struct nvdimm *nvdimm);
+ int (*disable_master)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data);
};
enum nvdimm_fwa_state {
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index ec119da1d89b..ed6cb2ac55fa 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -145,6 +145,12 @@ LSM_HOOK(int, 0, inode_getxattr, struct dentry *dentry, const char *name)
LSM_HOOK(int, 0, inode_listxattr, struct dentry *dentry)
LSM_HOOK(int, 0, inode_removexattr, struct user_namespace *mnt_userns,
struct dentry *dentry, const char *name)
+LSM_HOOK(int, 0, inode_set_acl, struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *acl_name, struct posix_acl *kacl)
+LSM_HOOK(int, 0, inode_get_acl, struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *acl_name)
+LSM_HOOK(int, 0, inode_remove_acl, struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *acl_name)
LSM_HOOK(int, 0, inode_need_killpriv, struct dentry *dentry)
LSM_HOOK(int, 0, inode_killpriv, struct user_namespace *mnt_userns,
struct dentry *dentry)
@@ -177,6 +183,7 @@ LSM_HOOK(int, 0, file_send_sigiotask, struct task_struct *tsk,
struct fown_struct *fown, int sig)
LSM_HOOK(int, 0, file_receive, struct file *file)
LSM_HOOK(int, 0, file_open, struct file *file)
+LSM_HOOK(int, 0, file_truncate, struct file *file)
LSM_HOOK(int, 0, task_alloc, struct task_struct *task,
unsigned long clone_flags)
LSM_HOOK(void, LSM_RET_VOID, task_free, struct task_struct *task)
@@ -302,7 +309,7 @@ LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname)
LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how)
LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb)
LSM_HOOK(int, 0, socket_getpeersec_stream, struct socket *sock,
- char __user *optval, int __user *optlen, unsigned len)
+ sockptr_t optval, sockptr_t optlen, unsigned int len)
LSM_HOOK(int, 0, socket_getpeersec_dgram, struct socket *sock,
struct sk_buff *skb, u32 *secid)
LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 4ec80b96c22e..0a5ba81f7367 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -92,13 +92,14 @@
* is initialised to NULL by the caller.
* @fc indicates the new filesystem context.
* @src_fc indicates the original filesystem context.
+ * Return 0 on success or a negative error code on failure.
* @fs_context_parse_param:
* Userspace provided a parameter to configure a superblock. The LSM may
* reject it with an error and may use it for itself, in which case it
* should return 0; otherwise it should return -ENOPARAM to pass it on to
* the filesystem.
* @fc indicates the filesystem context.
- * @param The parameter
+ * @param The parameter.
*
* Security hooks for filesystem operations.
*
@@ -118,6 +119,7 @@
* Free memory associated with @mnt_ops.
* @sb_eat_lsm_opts:
* Eat (scan @orig options) and save them in @mnt_opts.
+ * Return 0 on success, negative values on failure.
* @sb_statfs:
* Check permission before obtaining filesystem statistics for the @mnt
* mountpoint.
@@ -136,31 +138,24 @@
* @flags contains the mount flags.
* @data contains the filesystem-specific data.
* Return 0 if permission is granted.
- * @sb_copy_data:
- * Allow mount option data to be copied prior to parsing by the filesystem,
- * so that the security module can extract security-specific mount
- * options cleanly (a filesystem may modify the data e.g. with strsep()).
- * This also allows the original mount data to be stripped of security-
- * specific options to avoid having to make filesystems aware of them.
- * @orig the original mount data copied from userspace.
- * @copy copied data which will be passed to the security module.
- * Returns 0 if the copy was successful.
* @sb_mnt_opts_compat:
* Determine if the new mount options in @mnt_opts are allowed given
* the existing mounted filesystem at @sb.
- * @sb superblock being compared
- * @mnt_opts new mount options
+ * @sb superblock being compared.
+ * @mnt_opts new mount options.
* Return 0 if options are compatible.
* @sb_remount:
* Extracts security system specific mount options and verifies no changes
* are being made to those options.
- * @sb superblock being remounted
+ * @sb superblock being remounted.
* @data contains the filesystem-specific data.
* Return 0 if permission is granted.
* @sb_kern_mount:
- * Mount this @sb if allowed by permissions.
+ * Mount this @sb if allowed by permissions.
+ * Return 0 if permission is granted.
* @sb_show_options:
* Show (print on @m) mount options for this @sb.
+ * Return 0 on success, negative values on failure.
* @sb_umount:
* Check permission before the @mnt file system is unmounted.
* @mnt contains the mounted file system.
@@ -174,31 +169,31 @@
* Return 0 if permission is granted.
* @sb_set_mnt_opts:
* Set the security relevant mount options used for a superblock
- * @sb the superblock to set security mount options for
- * @opts binary data structure containing all lsm mount data
+ * @sb the superblock to set security mount options for.
+ * @opts binary data structure containing all lsm mount data.
+ * Return 0 on success, error on failure.
* @sb_clone_mnt_opts:
* Copy all security options from a given superblock to another
- * @oldsb old superblock which contain information to clone
- * @newsb new superblock which needs filled in
- * @sb_parse_opts_str:
- * Parse a string of security data filling in the opts structure
- * @options string containing all mount options known by the LSM
- * @opts binary data structure usable by the LSM
+ * @oldsb old superblock which contain information to clone.
+ * @newsb new superblock which needs filled in.
+ * Return 0 on success, error on failure.
* @move_mount:
* Check permission before a mount is moved.
* @from_path indicates the mount that is going to be moved.
* @to_path indicates the mountpoint that will be mounted upon.
+ * Return 0 if permission is granted.
* @dentry_init_security:
* Compute a context for a dentry as the inode is not yet available
* since NFSv4 has no label backed by an EA anyway.
* @dentry dentry to use in calculating the context.
* @mode mode used to determine resource type.
- * @name name of the last path component used to create file
+ * @name name of the last path component used to create file.
* @xattr_name pointer to place the pointer to security xattr name.
* Caller does not have to free the resulting pointer. Its
* a pointer to static string.
* @ctx pointer to place the pointer to the resulting context in.
* @ctxlen point to place the length of the resulting context.
+ * Return 0 on success, negative values on failure.
* @dentry_create_files_as:
* Compute a context for a dentry as the inode is not yet available
* and set that context in passed in creds so that new files are
@@ -206,9 +201,10 @@
* passed in creds and not the creds of the caller.
* @dentry dentry to use in calculating the context.
* @mode mode used to determine resource type.
- * @name name of the last path component used to create file
- * @old creds which should be used for context calculation
- * @new creds to modify
+ * @name name of the last path component used to create file.
+ * @old creds which should be used for context calculation.
+ * @new creds to modify.
+ * Return 0 on success, error on failure.
*
*
* Security hooks for inode operations.
@@ -236,7 +232,7 @@
* then it should return -EOPNOTSUPP to skip this processing.
* @inode contains the inode structure of the newly created inode.
* @dir contains the inode structure of the parent directory.
- * @qstr contains the last path component of the new object
+ * @qstr contains the last path component of the new object.
* @name will be set to the allocated name suffix (e.g. selinux).
* @value will be set to the allocated attribute value.
* @len will be set to the length of the value.
@@ -247,9 +243,9 @@
* Set up the incore security field for the new anonymous inode
* and return whether the inode creation is permitted by the security
* module or not.
- * @inode contains the inode structure
- * @name name of the anonymous inode class
- * @context_inode optional related inode
+ * @inode contains the inode structure.
+ * @name name of the anonymous inode class.
+ * @context_inode optional related inode.
* Returns 0 on success, -EACCES if the security module denies the
* creation of this inode, or another -errno upon other errors.
* @inode_create:
@@ -365,7 +361,7 @@
* mode is specified in @mode.
* @path contains the path structure of the file to change the mode.
* @mode contains the new DAC's permission, which is a bitmask of
- * constants from <include/uapi/linux/stat.h>
+ * constants from <include/uapi/linux/stat.h>.
* Return 0 if permission is granted.
* @path_chown:
* Check for permission to change owner/group of a file or directory.
@@ -380,6 +376,7 @@
* @path_notify:
* Check permissions before setting a watch on events as defined by @mask,
* on an object at @path, whose type is defined by @obj_type.
+ * Return 0 if permission is granted.
* @inode_readlink:
* Check the permission to read the symbolic link.
* @dentry contains the dentry structure for the file link.
@@ -387,7 +384,7 @@
* @inode_follow_link:
* Check permission to follow a symbolic link when looking up a pathname.
* @dentry contains the dentry structure for the link.
- * @inode contains the inode, which itself is not stable in RCU-walk
+ * @inode contains the inode, which itself is not stable in RCU-walk.
* @rcu indicates whether we are in RCU-walk mode.
* Return 0 if permission is granted.
* @inode_permission:
@@ -409,7 +406,9 @@
* @attr is the iattr structure containing the new file attributes.
* Return 0 if permission is granted.
* @path_truncate:
- * Check permission before truncating a file.
+ * Check permission before truncating the file indicated by path.
+ * Note that truncation permissions may also be checked based on
+ * already opened files, using the @file_truncate hook.
* @path contains the path structure for the file.
* Return 0 if permission is granted.
* @inode_getattr:
@@ -435,13 +434,25 @@
* Check permission before removing the extended attribute
* identified by @name for @dentry.
* Return 0 if permission is granted.
+ * @inode_set_acl:
+ * Check permission before setting posix acls
+ * The posix acls in @kacl are identified by @acl_name.
+ * Return 0 if permission is granted.
+ * @inode_get_acl:
+ * Check permission before getting osix acls
+ * The posix acls are identified by @acl_name.
+ * Return 0 if permission is granted.
+ * @inode_remove_acl:
+ * Check permission before removing posix acls
+ * The posix acls are identified by @acl_name.
+ * Return 0 if permission is granted.
* @inode_getsecurity:
* Retrieve a copy of the extended attribute representation of the
* security label associated with @name for @inode via @buffer. Note that
* @name is the remainder of the attribute name after the security prefix
- * has been removed. @alloc is used to specify of the call should return a
- * value via the buffer or just the value length Return size of buffer on
- * success.
+ * has been removed. @alloc is used to specify if the call should return a
+ * value via the buffer or just the value length.
+ * Return size of buffer on success.
* @inode_setsecurity:
* Set the security label associated with @name for @inode from the
* extended attribute value @value. @size indicates the size of the
@@ -464,7 +475,7 @@
* @inode_killpriv:
* The setuid bit is being removed. Remove similar security labels.
* Called with the dentry->d_inode->i_mutex held.
- * @mnt_userns: user namespace of the mount
+ * @mnt_userns: user namespace of the mount.
* @dentry is the dentry being changed.
* Return 0 on success. If error is returned, then the operation
* causing setuid bit removal is failed.
@@ -491,20 +502,22 @@
* to abort the copy up. Note that the caller is responsible for reading
* and writing the xattrs as this hook is merely a filter.
* @d_instantiate:
- * Fill in @inode security information for a @dentry if allowed.
+ * Fill in @inode security information for a @dentry if allowed.
* @getprocattr:
- * Read attribute @name for process @p and store it into @value if allowed.
+ * Read attribute @name for process @p and store it into @value if allowed.
+ * Return the length of @value on success, a negative value otherwise.
* @setprocattr:
- * Write (set) attribute @name to @value, size @size if allowed.
+ * Write (set) attribute @name to @value, size @size if allowed.
+ * Return written bytes on success, a negative value otherwise.
*
* Security hooks for kernfs node operations
*
* @kernfs_init_security:
* Initialize the security context of a newly created kernfs node based
* on its own and its parent's attributes.
- *
- * @kn_dir the parent kernfs node
- * @kn the new child kernfs node
+ * @kn_dir the parent kernfs node.
+ * @kn the new child kernfs node.
+ * Return 0 if permission is granted.
*
* Security hooks for file operations
*
@@ -543,11 +556,11 @@
* simple integer value. When @arg represents a user space pointer, it
* should never be used by the security module.
* Return 0 if permission is granted.
- * @mmap_addr :
+ * @mmap_addr:
* Check permissions for a mmap operation at @addr.
* @addr contains virtual address that will be used for the operation.
* Return 0 if permission is granted.
- * @mmap_file :
+ * @mmap_file:
* Check permissions for a mmap operation. The @file may be NULL, e.g.
* if mapping anonymous memory.
* @file contains the file structure for file to map (may be NULL).
@@ -598,10 +611,17 @@
* to receive an open file descriptor via socket IPC.
* @file contains the file structure being received.
* Return 0 if permission is granted.
+ * @file_truncate:
+ * Check permission before truncating a file, i.e. using ftruncate.
+ * Note that truncation permission may also be checked based on the path,
+ * using the @path_truncate hook.
+ * @file contains the file structure for the file.
+ * Return 0 if permission is granted.
* @file_open:
* Save open-time permission checking state for later use upon
* file_permission, and recheck access if anything has changed
* since inode_permission.
+ * Return 0 if permission is granted.
*
* Security hooks for task operations.
*
@@ -619,6 +639,7 @@
* @gfp indicates the atomicity of any memory allocations.
* Only allocate sufficient memory and attach to @cred such that
* cred_transfer() will not get ENOMEM.
+ * Return 0 on success, negative values on failure.
* @cred_free:
* @cred points to the credentials.
* Deallocate and clear the cred->security field in a set of credentials.
@@ -627,6 +648,7 @@
* @old points to the original credentials.
* @gfp indicates the atomicity of any memory allocations.
* Prepare a new set of credentials by copying the data from the old set.
+ * Return 0 on success, negative values on failure.
* @cred_transfer:
* @new points to the new credentials.
* @old points to the original credentials.
@@ -638,7 +660,7 @@
* @kernel_act_as:
* Set the credentials for a kernel service to act as (subjective context).
* @new points to the credentials to be modified.
- * @secid specifies the security ID to be set
+ * @secid specifies the security ID to be set.
* The current task must be the one that nominated @secid.
* Return 0 if successful.
* @kernel_create_files_as:
@@ -651,19 +673,19 @@
* @kernel_module_request:
* Ability to trigger the kernel to automatically upcall to userspace for
* userspace to load a kernel module with the given name.
- * @kmod_name name of the module requested by the kernel
+ * @kmod_name name of the module requested by the kernel.
* Return 0 if successful.
* @kernel_load_data:
* Load data provided by userspace.
- * @id kernel load data identifier
+ * @id kernel load data identifier.
* @contents if a subsequent @kernel_post_load_data will be called.
* Return 0 if permission is granted.
* @kernel_post_load_data:
* Load data provided by a non-file source (usually userspace buffer).
* @buf pointer to buffer containing the data contents.
* @size length of the data contents.
- * @id kernel load data identifier
- * @description a text description of what was loaded, @id-specific
+ * @id kernel load data identifier.
+ * @description a text description of what was loaded, @id-specific.
* Return 0 if permission is granted.
* This must be paired with a prior @kernel_load_data call that had
* @contents set to true.
@@ -671,7 +693,7 @@
* Read a file specified by userspace.
* @file contains the file structure pointing to the file being read
* by the kernel.
- * @id kernel read file identifier
+ * @id kernel read file identifier.
* @contents if a subsequent @kernel_post_read_file will be called.
* Return 0 if permission is granted.
* @kernel_post_read_file:
@@ -680,7 +702,7 @@
* by the kernel.
* @buf pointer to buffer containing the file contents.
* @size length of the file contents.
- * @id kernel read file identifier
+ * @id kernel read file identifier.
* This must be paired with a prior @kernel_read_file call that had
* @contents set to true.
* Return 0 if permission is granted.
@@ -690,7 +712,7 @@
* indicates which of the set*uid system calls invoked this hook. If
* @new is the set of credentials that will be installed. Modifications
* should be made to this rather than to @current->cred.
- * @old is the set of credentials that are being replaces
+ * @old is the set of credentials that are being replaced.
* @flags contains one of the LSM_SETID_* values.
* Return 0 on success.
* @task_fix_setgid:
@@ -742,7 +764,7 @@
* @task_setioprio:
* Check permission before setting the ioprio value of @p to @ioprio.
* @p contains the task_struct of process.
- * @ioprio contains the new ioprio value
+ * @ioprio contains the new ioprio value.
* Return 0 if permission is granted.
* @task_getioprio:
* Check permission before getting the ioprio value of @p.
@@ -873,6 +895,7 @@
* @type contains the requested communications type.
* @protocol contains the requested protocol.
* @kern set to 1 if a kernel socket.
+ * Return 0 if permission is granted.
* @socket_socketpair:
* Check permissions before creating a fresh pair of sockets.
* @socka contains the first socket structure.
@@ -956,14 +979,15 @@
* Must not sleep inside this hook because some callers hold spinlocks.
* @sk contains the sock (not socket) associated with the incoming sk_buff.
* @skb contains the incoming network data.
+ * Return 0 if permission is granted.
* @socket_getpeersec_stream:
* This hook allows the security module to provide peer socket security
* state for unix or connected tcp sockets to userspace via getsockopt
* SO_GETPEERSEC. For tcp sockets this can be meaningful if the
* socket is associated with an ipsec SA.
* @sock is the local socket.
- * @optval userspace memory where the security state is to be copied.
- * @optlen userspace int where the module should copy the actual length
+ * @optval memory where the security state is to be copied.
+ * @optlen memory where the module should copy the actual length
* of the security state.
* @len as input is the maximum length to copy to userspace provided
* by the caller.
@@ -983,6 +1007,7 @@
* @sk_alloc_security:
* Allocate and attach a security structure to the sk->sk_security field,
* which is used to copy security attributes between local stream sockets.
+ * Return 0 on success, error on failure.
* @sk_free_security:
* Deallocate security structure.
* @sk_clone_security:
@@ -995,17 +1020,19 @@
* @inet_conn_request:
* Sets the openreq's sid to socket's sid with MLS portion taken
* from peer sid.
+ * Return 0 if permission is granted.
* @inet_csk_clone:
* Sets the new child socket's sid to the openreq sid.
* @inet_conn_established:
* Sets the connection's peersid to the secmark on skb.
* @secmark_relabel_packet:
- * check if the process should be allowed to relabel packets to
- * the given secid
+ * Check if the process should be allowed to relabel packets to
+ * the given secid.
+ * Return 0 if permission is granted.
* @secmark_refcount_inc:
- * tells the LSM to increment the number of secmark labeling rules loaded
+ * Tells the LSM to increment the number of secmark labeling rules loaded.
* @secmark_refcount_dec:
- * tells the LSM to decrement the number of secmark labeling rules loaded
+ * Tells the LSM to decrement the number of secmark labeling rules loaded.
* @req_classify_flow:
* Sets the flow's sid to the openreq sid.
* @tun_dev_alloc_security:
@@ -1016,21 +1043,25 @@
* @tun_dev_free_security:
* This hook allows a module to free the security structure for a TUN
* device.
- * @security pointer to the TUN device's security structure
+ * @security pointer to the TUN device's security structure.
* @tun_dev_create:
* Check permissions prior to creating a new TUN device.
+ * Return 0 if permission is granted.
* @tun_dev_attach_queue:
* Check permissions prior to attaching to a TUN device queue.
* @security pointer to the TUN device's security structure.
+ * Return 0 if permission is granted.
* @tun_dev_attach:
* This hook can be used by the module to update any security state
* associated with the TUN device's sock structure.
* @sk contains the existing sock structure.
* @security pointer to the TUN device's security structure.
+ * Return 0 if permission is granted.
* @tun_dev_open:
* This hook can be used by the module to update any security state
* associated with the TUN device's security structure.
* @security pointer to the TUN devices's security structure.
+ * Return 0 if permission is granted.
*
* Security hooks for SCTP
*
@@ -1063,6 +1094,7 @@
* to the security module.
* @asoc pointer to sctp association structure.
* @skb pointer to skbuff of association packet.
+ * Return 0 if permission is granted.
*
* Security hooks for Infiniband
*
@@ -1071,15 +1103,17 @@
* @subnet_prefix the subnet prefix of the port being used.
* @pkey the pkey to be accessed.
* @sec pointer to a security structure.
+ * Return 0 if permission is granted.
* @ib_endport_manage_subnet:
* Check permissions to send and receive SMPs on a end port.
* @dev_name the IB device name (i.e. mlx4_0).
* @port_num the port number.
* @sec pointer to a security structure.
+ * Return 0 if permission is granted.
* @ib_alloc_security:
* Allocate a security structure for Infiniband objects.
* @sec pointer to a security structure pointer.
- * Returns 0 on success, non-zero on failure
+ * Returns 0 on success, non-zero on failure.
* @ib_free_security:
* Deallocate an Infiniband security structure.
* @sec contains the security structure to be freed.
@@ -1091,10 +1125,11 @@
* Database used by the XFRM system.
* @sec_ctx contains the security context information being provided by
* the user-level policy update program (e.g., setkey).
+ * @gfp is to specify the context for the allocation.
* Allocate a security structure to the xp->security field; the security
* field is initialized to NULL when the xfrm_policy is allocated.
- * Return 0 if operation was successful (memory to allocate, legal context)
- * @gfp is to specify the context for the allocation
+ * Return 0 if operation was successful (memory to allocate, legal
+ * context).
* @xfrm_policy_clone_security:
* @old_ctx contains an existing xfrm_sec_ctx.
* @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
@@ -1102,11 +1137,12 @@
* information from the old_ctx structure.
* Return 0 if operation was successful (memory to allocate).
* @xfrm_policy_free_security:
- * @ctx contains the xfrm_sec_ctx
+ * @ctx contains the xfrm_sec_ctx.
* Deallocate xp->security.
* @xfrm_policy_delete_security:
* @ctx contains the xfrm_sec_ctx.
* Authorize deletion of xp->security.
+ * Return 0 if permission is granted.
* @xfrm_state_alloc:
* @x contains the xfrm_state being added to the Security Association
* Database by the XFRM system.
@@ -1132,6 +1168,7 @@
* @xfrm_state_delete_security:
* @x contains the xfrm_state.
* Authorize deletion of x->security.
+ * Return 0 if permission is granted.
* @xfrm_policy_lookup:
* @ctx contains the xfrm_sec_ctx for which the access control is being
* checked.
@@ -1160,7 +1197,7 @@
* Permit allocation of a key and assign security data. Note that key does
* not have a serial number assigned at this point.
* @key points to the key.
- * @flags is the allocation flags
+ * @flags is the allocation flags.
* Return 0 if permission is granted, -ve error otherwise.
* @key_free:
* Notification of destruction; free security data.
@@ -1190,8 +1227,8 @@
*
* @ipc_permission:
* Check permissions for access to IPC
- * @ipcp contains the kernel IPC permission structure
- * @flag contains the desired (requested) permission set
+ * @ipcp contains the kernel IPC permission structure.
+ * @flag contains the desired (requested) permission set.
* Return 0 if permission is granted.
* @ipc_getsecid:
* Get the secid associated with the ipc object.
@@ -1336,15 +1373,18 @@
* to @to.
* @from contains the struct cred for the sending process.
* @to contains the struct cred for the receiving process.
+ * Return 0 if permission is granted.
* @binder_transfer_binder:
* Check whether @from is allowed to transfer a binder reference to @to.
* @from contains the struct cred for the sending process.
* @to contains the struct cred for the receiving process.
+ * Return 0 if permission is granted.
* @binder_transfer_file:
* Check whether @from is allowed to transfer @file to @to.
* @from contains the struct cred for the sending process.
* @file contains the struct file being transferred.
* @to contains the struct cred for the receiving process.
+ * Return 0 if permission is granted.
*
* @ptrace_access_check:
* Check permission before allowing the current process to trace the
@@ -1386,32 +1426,39 @@
* Check whether the @tsk process has the @cap capability in the indicated
* credentials.
* @cred contains the credentials to use.
- * @ns contains the user namespace we want the capability in
+ * @ns contains the user namespace we want the capability in.
* @cap contains the capability <include/linux/capability.h>.
- * @opts contains options for the capable check <include/linux/security.h>
+ * @opts contains options for the capable check <include/linux/security.h>.
* Return 0 if the capability is granted for @tsk.
* @quotactl:
- * Check whether the quotactl syscall is allowed for this @sb.
+ * Check whether the quotactl syscall is allowed for this @sb.
+ * Return 0 if permission is granted.
* @quota_on:
- * Check whether QUOTAON is allowed for this @dentry.
+ * Check whether QUOTAON is allowed for this @dentry.
+ * Return 0 if permission is granted.
* @syslog:
* Check permission before accessing the kernel message ring or changing
* logging to the console.
* See the syslog(2) manual page for an explanation of the @type values.
- * @type contains the SYSLOG_ACTION_* constant from <include/linux/syslog.h>
+ * @type contains the SYSLOG_ACTION_* constant from
+ * <include/linux/syslog.h>.
* Return 0 if permission is granted.
* @settime:
* Check permission to change the system time.
* struct timespec64 is defined in <include/linux/time64.h> and timezone
* is defined in <include/linux/time.h>
- * @ts contains new time
- * @tz contains new timezone
+ * @ts contains new time.
+ * @tz contains new timezone.
* Return 0 if permission is granted.
* @vm_enough_memory:
* Check permissions for allocating a new virtual mapping.
* @mm contains the mm struct it is being added to.
* @pages contains the number of pages.
- * Return 0 if permission is granted.
+ * Return 0 if permission is granted by the LSM infrastructure to the
+ * caller. If all LSMs return a positive value, __vm_enough_memory() will
+ * be called with cap_sys_admin set. If at least one LSM returns 0 or
+ * negative, __vm_enough_memory() will be called with cap_sys_admin
+ * cleared.
*
* @ismaclabel:
* Check if the extended attribute specified by @name
@@ -1429,11 +1476,13 @@
* @secid contains the security ID.
* @secdata contains the pointer that stores the converted security
* context.
- * @seclen pointer which contains the length of the data
+ * @seclen pointer which contains the length of the data.
+ * Return 0 on success, error on failure.
* @secctx_to_secid:
* Convert security context to secid.
* @secid contains the pointer to the generated security ID.
* @secdata contains the security context.
+ * Return 0 on success, error on failure.
*
* @release_secctx:
* Release the security context.
@@ -1470,7 +1519,7 @@
* @audit_rule_free:
* Deallocate the LSM audit rule structure previously allocated by
* audit_rule_init.
- * @lsmrule contains the allocated rule
+ * @lsmrule contains the allocated rule.
*
* @inode_invalidate_secctx:
* Notify the security module that it must revalidate the security context
@@ -1487,6 +1536,7 @@
* @inode we wish to set the security context of.
* @ctx contains the string which we wish to set in the inode.
* @ctxlen contains the length of @ctx.
+ * Return 0 on success, error on failure.
*
* @inode_setsecctx:
* Change the security context of an inode. Updates the
@@ -1500,6 +1550,7 @@
* @dentry contains the inode we wish to set the security context of.
* @ctx contains the string which we wish to set in the inode.
* @ctxlen contains the length of @ctx.
+ * Return 0 on success, error on failure.
*
* @inode_getsecctx:
* On success, returns 0 and fills out @ctx and @ctxlen with the security
@@ -1507,6 +1558,7 @@
* @inode we wish to get the security context of.
* @ctx is a pointer in which to place the allocated security context.
* @ctxlen points to the place to put the length of @ctx.
+ * Return 0 on success, error on failure.
*
* Security hooks for the general notification queue:
*
@@ -1514,13 +1566,15 @@
* Check to see if a watch notification can be posted to a particular
* queue.
* @w_cred: The credentials of the whoever set the watch.
- * @cred: The event-triggerer's credentials
- * @n: The notification being posted
+ * @cred: The event-triggerer's credentials.
+ * @n: The notification being posted.
+ * Return 0 if permission is granted.
*
* @watch_key:
* Check to see if a process is allowed to watch for event notifications
* from a key or keyring.
* @key: The key to watch.
+ * Return 0 if permission is granted.
*
* Security hooks for using the eBPF maps and programs functionalities through
* eBPF syscalls.
@@ -1529,65 +1583,74 @@
* Do a initial check for all bpf syscalls after the attribute is copied
* into the kernel. The actual security module can implement their own
* rules to check the specific cmd they need.
+ * Return 0 if permission is granted.
*
* @bpf_map:
* Do a check when the kernel generate and return a file descriptor for
* eBPF maps.
- *
- * @map: bpf map that we want to access
- * @mask: the access flags
+ * @map: bpf map that we want to access.
+ * @mask: the access flags.
+ * Return 0 if permission is granted.
*
* @bpf_prog:
* Do a check when the kernel generate and return a file descriptor for
* eBPF programs.
- *
* @prog: bpf prog that userspace want to use.
+ * Return 0 if permission is granted.
*
* @bpf_map_alloc_security:
* Initialize the security field inside bpf map.
+ * Return 0 on success, error on failure.
*
* @bpf_map_free_security:
* Clean up the security information stored inside bpf map.
*
* @bpf_prog_alloc_security:
* Initialize the security field inside bpf program.
+ * Return 0 on success, error on failure.
*
* @bpf_prog_free_security:
* Clean up the security information stored inside bpf prog.
*
* @locked_down:
- * Determine whether a kernel feature that potentially enables arbitrary
- * code execution in kernel space should be permitted.
- *
- * @what: kernel feature being accessed
+ * Determine whether a kernel feature that potentially enables arbitrary
+ * code execution in kernel space should be permitted.
+ * @what: kernel feature being accessed.
+ * Return 0 if permission is granted.
*
* Security hooks for perf events
*
* @perf_event_open:
- * Check whether the @type of perf_event_open syscall is allowed.
+ * Check whether the @type of perf_event_open syscall is allowed.
+ * Return 0 if permission is granted.
* @perf_event_alloc:
- * Allocate and save perf_event security info.
+ * Allocate and save perf_event security info.
+ * Return 0 on success, error on failure.
* @perf_event_free:
- * Release (free) perf_event security info.
+ * Release (free) perf_event security info.
* @perf_event_read:
- * Read perf_event security info if allowed.
+ * Read perf_event security info if allowed.
+ * Return 0 if permission is granted.
* @perf_event_write:
- * Write perf_event security info if allowed.
+ * Write perf_event security info if allowed.
+ * Return 0 if permission is granted.
*
* Security hooks for io_uring
*
* @uring_override_creds:
- * Check if the current task, executing an io_uring operation, is allowed
- * to override it's credentials with @new.
- *
- * @new: the new creds to use
+ * Check if the current task, executing an io_uring operation, is allowed
+ * to override it's credentials with @new.
+ * @new: the new creds to use.
+ * Return 0 if permission is granted.
*
* @uring_sqpoll:
- * Check whether the current task is allowed to spawn a io_uring polling
- * thread (IORING_SETUP_SQPOLL).
+ * Check whether the current task is allowed to spawn a io_uring polling
+ * thread (IORING_SETUP_SQPOLL).
+ * Return 0 if permission is granted.
*
* @uring_cmd:
- * Check whether the file_operations uring_cmd is allowed to run.
+ * Check whether the file_operations uring_cmd is allowed to run.
+ * Return 0 if permission is granted.
*
*/
union security_list_options {
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index 2effab72add1..e594db58a0f1 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -638,6 +638,12 @@ static inline void mt_set_in_rcu(struct maple_tree *mt)
}
}
+static inline unsigned int mt_height(const struct maple_tree *mt)
+
+{
+ return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
+}
+
void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
void *mt_find_after(struct maple_tree *mt, unsigned long *index,
unsigned long max);
@@ -664,6 +670,7 @@ extern atomic_t maple_tree_tests_passed;
void mt_dump(const struct maple_tree *mt);
void mt_validate(struct maple_tree *mt);
+void mt_cache_shrink(void);
#define MT_BUG_ON(__tree, __x) do { \
atomic_inc(&maple_tree_tests_run); \
if (__x) { \
diff --git a/include/linux/math64.h b/include/linux/math64.h
index a14f40de1dca..8958f4c005c1 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -29,7 +29,7 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
return dividend / divisor;
}
-/*
+/**
* div_s64_rem - signed 64bit divide with 32bit divisor with remainder
* @dividend: signed 64bit dividend
* @divisor: signed 32bit divisor
@@ -43,7 +43,7 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
return dividend / divisor;
}
-/*
+/**
* div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
* @dividend: unsigned 64bit dividend
* @divisor: unsigned 64bit divisor
@@ -57,7 +57,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
return dividend / divisor;
}
-/*
+/**
* div64_u64 - unsigned 64bit divide with 64bit divisor
* @dividend: unsigned 64bit dividend
* @divisor: unsigned 64bit divisor
@@ -69,7 +69,7 @@ static inline u64 div64_u64(u64 dividend, u64 divisor)
return dividend / divisor;
}
-/*
+/**
* div64_s64 - signed 64bit divide with 64bit divisor
* @dividend: signed 64bit dividend
* @divisor: signed 64bit divisor
@@ -120,6 +120,8 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
* This is the most common 64bit divide and should be used if possible,
* as many 32bit archs can optimize this variant better than a full 64bit
* divide.
+ *
+ * Return: dividend / divisor
*/
#ifndef div_u64
static inline u64 div_u64(u64 dividend, u32 divisor)
@@ -133,6 +135,8 @@ static inline u64 div_u64(u64 dividend, u32 divisor)
* div_s64 - signed 64bit divide with 32bit divisor
* @dividend: signed 64bit dividend
* @divisor: signed 32bit divisor
+ *
+ * Return: dividend / divisor
*/
#ifndef div_s64
static inline s64 div_s64(s64 dividend, s32 divisor)
@@ -284,6 +288,16 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
+/**
+ * DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up
+ * @ll: unsigned 64bit dividend
+ * @d: unsigned 64bit divisor
+ *
+ * Divide unsigned 64bit dividend by unsigned 64bit divisor
+ * and round up.
+ *
+ * Return: dividend / divisor rounded up
+ */
#define DIV64_U64_ROUND_UP(ll, d) \
({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
@@ -300,7 +314,7 @@ u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
#define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \
({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); })
-/*
+/**
* DIV_U64_ROUND_CLOSEST - unsigned 64bit divide with 32bit divisor rounded to nearest integer
* @dividend: unsigned 64bit dividend
* @divisor: unsigned 32bit divisor
@@ -313,7 +327,7 @@ u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
#define DIV_U64_ROUND_CLOSEST(dividend, divisor) \
({ u32 _tmp = (divisor); div_u64((u64)(dividend) + _tmp / 2, _tmp); })
-/*
+/**
* DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer
* @dividend: signed 64bit dividend
* @divisor: signed 32bit divisor
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 2da63fd7b98f..97e64184767d 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -10,6 +10,12 @@
struct mb_cache;
+/* Cache entry flags */
+enum {
+ MBE_REFERENCED_B = 0,
+ MBE_REUSABLE_B
+};
+
struct mb_cache_entry {
/* List of entries in cache - protected by cache->c_list_lock */
struct list_head e_list;
@@ -26,8 +32,7 @@ struct mb_cache_entry {
atomic_t e_refcnt;
/* Key in hash - stable during lifetime of the entry */
u32 e_key;
- u32 e_referenced:1;
- u32 e_reusable:1;
+ unsigned long e_flags;
/* User provided value - stable during lifetime of the entry */
u64 e_value;
};
diff --git a/include/linux/memregion.h b/include/linux/memregion.h
index c04c4fd2e209..bf83363807ac 100644
--- a/include/linux/memregion.h
+++ b/include/linux/memregion.h
@@ -3,6 +3,7 @@
#define _MEMREGION_H_
#include <linux/types.h>
#include <linux/errno.h>
+#include <linux/bug.h>
struct memregion_info {
int target_node;
@@ -20,4 +21,41 @@ static inline void memregion_free(int id)
{
}
#endif
+
+/**
+ * cpu_cache_invalidate_memregion - drop any CPU cached data for
+ * memregions described by @res_desc
+ * @res_desc: one of the IORES_DESC_* types
+ *
+ * Perform cache maintenance after a memory event / operation that
+ * changes the contents of physical memory in a cache-incoherent manner.
+ * For example, device memory technologies like NVDIMM and CXL have
+ * device secure erase, and dynamic region provision that can replace
+ * the memory mapped to a given physical address.
+ *
+ * Limit the functionality to architectures that have an efficient way
+ * to writeback and invalidate potentially terabytes of address space at
+ * once. Note that this routine may or may not write back any dirty
+ * contents while performing the invalidation. It is only exported for
+ * the explicit usage of the NVDIMM and CXL modules in the 'DEVMEM'
+ * symbol namespace on bare platforms.
+ *
+ * Returns 0 on success or negative error code on a failure to perform
+ * the cache maintenance.
+ */
+#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
+int cpu_cache_invalidate_memregion(int res_desc);
+bool cpu_cache_has_invalidate_memregion(void);
+#else
+static inline bool cpu_cache_has_invalidate_memregion(void)
+{
+ return false;
+}
+
+static inline int cpu_cache_invalidate_memregion(int res_desc)
+{
+ WARN_ON_ONCE("CPU cache invalidation required");
+ return -ENXIO;
+}
+#endif
#endif /* _MEMREGION_H_ */
diff --git a/include/linux/minmax.h b/include/linux/minmax.h
index 5433c08fcc68..396df1121bff 100644
--- a/include/linux/minmax.h
+++ b/include/linux/minmax.h
@@ -37,6 +37,28 @@
__cmp(x, y, op), \
__cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
+#define __clamp(val, lo, hi) \
+ ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
+
+#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \
+ typeof(val) unique_val = (val); \
+ typeof(lo) unique_lo = (lo); \
+ typeof(hi) unique_hi = (hi); \
+ __clamp(unique_val, unique_lo, unique_hi); })
+
+#define __clamp_input_check(lo, hi) \
+ (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
+ __is_constexpr((lo) > (hi)), (lo) > (hi), false)))
+
+#define __careful_clamp(val, lo, hi) ({ \
+ __clamp_input_check(lo, hi) + \
+ __builtin_choose_expr(__typecheck(val, lo) && __typecheck(val, hi) && \
+ __typecheck(hi, lo) && __is_constexpr(val) && \
+ __is_constexpr(lo) && __is_constexpr(hi), \
+ __clamp(val, lo, hi), \
+ __clamp_once(val, lo, hi, __UNIQUE_ID(__val), \
+ __UNIQUE_ID(__lo), __UNIQUE_ID(__hi))); })
+
/**
* min - return minimum of two values of the same or compatible types
* @x: first value
@@ -86,7 +108,7 @@
* This macro does strict typechecking of @lo/@hi to make sure they are of the
* same type as @val. See the unnecessary pointer comparisons.
*/
-#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
+#define clamp(val, lo, hi) __careful_clamp(val, lo, hi)
/*
* ..and if you can't take the strict
@@ -121,7 +143,7 @@
* This macro does no typechecking and uses temporary variables of type
* @type to make all the comparisons.
*/
-#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
+#define clamp_t(type, val, lo, hi) __careful_clamp((type)(val), (type)(lo), (type)(hi))
/**
* clamp_val - return a value clamped to a given range using val's type
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index a12929bc31b2..06cbad166225 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -970,7 +970,7 @@ void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
struct mlx5_async_ctx {
struct mlx5_core_dev *dev;
atomic_t num_inflight;
- struct wait_queue_head wait;
+ struct completion inflight_done;
};
struct mlx5_async_work;
@@ -981,6 +981,7 @@ struct mlx5_async_work {
struct mlx5_async_ctx *ctx;
mlx5_async_cbk_t user_callback;
u16 opcode; /* cmd opcode */
+ u16 op_mod; /* cmd op_mod */
void *out; /* pointer to the cmd output buffer */
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8bbcccbc5565..974ccca609d2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1852,6 +1852,25 @@ static void __maybe_unused show_free_areas(unsigned int flags, nodemask_t *nodem
__show_free_areas(flags, nodemask, MAX_NR_ZONES - 1);
}
+/*
+ * Parameter block passed down to zap_pte_range in exceptional cases.
+ */
+struct zap_details {
+ struct folio *single_folio; /* Locked folio to be unmapped */
+ bool even_cows; /* Zap COWed private pages too? */
+ zap_flags_t zap_flags; /* Extra flags for zapping */
+};
+
+/*
+ * Whether to drop the pte markers, for example, the uffd-wp information for
+ * file-backed memory. This should only be specified when we will completely
+ * drop the page in the mm, either by truncation or unmapping of the vma. By
+ * default, the flag is not set.
+ */
+#define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0))
+/* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */
+#define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1))
+
#ifdef CONFIG_MMU
extern bool can_do_mlock(void);
#else
@@ -1869,6 +1888,8 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
+void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size, struct zap_details *details);
void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
struct vm_area_struct *start_vma, unsigned long start,
unsigned long end);
@@ -3467,12 +3488,4 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
}
#endif
-/*
- * Whether to drop the pte markers, for example, the uffd-wp information for
- * file-backed memory. This should only be specified when we will completely
- * drop the page in the mm, either by truncation or unmapping of the vma. By
- * default, the flag is not set.
- */
-#define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0))
-
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 9c50bc40f8ff..6f7993803ee7 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -451,7 +451,7 @@ static inline bool mmc_ready_for_data(u32 status)
#define MMC_SECURE_TRIM1_ARG 0x80000001
#define MMC_SECURE_TRIM2_ARG 0x80008000
#define MMC_SECURE_ARGS 0x80000000
-#define MMC_TRIM_ARGS 0x00008001
+#define MMC_TRIM_OR_DISCARD_ARGS 0x00008003
#define mmc_driver_type_mask(n) (1 << (n))
diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h
index f6e5369d2928..092c52aa6c2c 100644
--- a/include/linux/mnt_idmapping.h
+++ b/include/linux/mnt_idmapping.h
@@ -5,12 +5,10 @@
#include <linux/types.h>
#include <linux/uidgid.h>
+struct mnt_idmap;
struct user_namespace;
-/*
- * Carries the initial idmapping of 0:0:4294967295 which is an identity
- * mapping. This means that {g,u}id 0 is mapped to {g,u}id 0, {g,u}id 1 is
- * mapped to {g,u}id 1, [...], {g,u}id 1000 to {g,u}id 1000, [...].
- */
+
+extern struct mnt_idmap nop_mnt_idmap;
extern struct user_namespace init_user_ns;
typedef struct {
@@ -98,6 +96,26 @@ static inline bool vfsgid_eq_kgid(vfsgid_t vfsgid, kgid_t kgid)
return vfsgid_valid(vfsgid) && __vfsgid_val(vfsgid) == __kgid_val(kgid);
}
+static inline bool vfsuid_gt_kuid(vfsuid_t vfsuid, kuid_t kuid)
+{
+ return __vfsuid_val(vfsuid) > __kuid_val(kuid);
+}
+
+static inline bool vfsgid_gt_kgid(vfsgid_t vfsgid, kgid_t kgid)
+{
+ return __vfsgid_val(vfsgid) > __kgid_val(kgid);
+}
+
+static inline bool vfsuid_lt_kuid(vfsuid_t vfsuid, kuid_t kuid)
+{
+ return __vfsuid_val(vfsuid) < __kuid_val(kuid);
+}
+
+static inline bool vfsgid_lt_kgid(vfsgid_t vfsgid, kgid_t kgid)
+{
+ return __vfsgid_val(vfsgid) < __kgid_val(kgid);
+}
+
/*
* vfs{g,u}ids are created from k{g,u}ids.
* We don't allow them to be created from regular {u,g}id.
@@ -208,13 +226,6 @@ static inline vfsuid_t make_vfsuid(struct user_namespace *mnt_userns,
return VFSUIDT_INIT(make_kuid(mnt_userns, uid));
}
-static inline kuid_t mapped_kuid_fs(struct user_namespace *mnt_userns,
- struct user_namespace *fs_userns,
- kuid_t kuid)
-{
- return AS_KUIDT(make_vfsuid(mnt_userns, fs_userns, kuid));
-}
-
/**
* make_vfsgid - map a filesystem kgid into a mnt_userns
* @mnt_userns: the mount's idmapping
@@ -253,13 +264,6 @@ static inline vfsgid_t make_vfsgid(struct user_namespace *mnt_userns,
return VFSGIDT_INIT(make_kgid(mnt_userns, gid));
}
-static inline kgid_t mapped_kgid_fs(struct user_namespace *mnt_userns,
- struct user_namespace *fs_userns,
- kgid_t kgid)
-{
- return AS_KGIDT(make_vfsgid(mnt_userns, fs_userns, kgid));
-}
-
/**
* from_vfsuid - map a vfsuid into the filesystem idmapping
* @mnt_userns: the mount's idmapping
@@ -288,33 +292,6 @@ static inline kuid_t from_vfsuid(struct user_namespace *mnt_userns,
}
/**
- * mapped_kuid_user - map a user kuid into a mnt_userns
- * @mnt_userns: the mount's idmapping
- * @fs_userns: the filesystem's idmapping
- * @kuid : kuid to be mapped
- *
- * Use the idmapping of @mnt_userns to remap a @kuid into @fs_userns. Use this
- * function when preparing a @kuid to be written to disk or inode.
- *
- * If no_idmapping() determines that this is not an idmapped mount we can
- * simply return @kuid unchanged.
- * If initial_idmapping() tells us that the filesystem is not mounted with an
- * idmapping we know the value of @kuid won't change when calling
- * make_kuid() so we can simply retrieve the value via KUIDT_INIT()
- * directly.
- *
- * Return: @kuid mapped according to @mnt_userns.
- * If @kuid has no mapping in either @mnt_userns or @fs_userns INVALID_UID is
- * returned.
- */
-static inline kuid_t mapped_kuid_user(struct user_namespace *mnt_userns,
- struct user_namespace *fs_userns,
- kuid_t kuid)
-{
- return from_vfsuid(mnt_userns, fs_userns, VFSUIDT_INIT(kuid));
-}
-
-/**
* vfsuid_has_fsmapping - check whether a vfsuid maps into the filesystem
* @mnt_userns: the mount's idmapping
* @fs_userns: the filesystem's idmapping
@@ -333,6 +310,12 @@ static inline bool vfsuid_has_fsmapping(struct user_namespace *mnt_userns,
return uid_valid(from_vfsuid(mnt_userns, fs_userns, vfsuid));
}
+static inline bool vfsuid_has_mapping(struct user_namespace *userns,
+ vfsuid_t vfsuid)
+{
+ return from_kuid(userns, AS_KUIDT(vfsuid)) != (uid_t)-1;
+}
+
/**
* vfsuid_into_kuid - convert vfsuid into kuid
* @vfsuid: the vfsuid to convert
@@ -374,33 +357,6 @@ static inline kgid_t from_vfsgid(struct user_namespace *mnt_userns,
}
/**
- * mapped_kgid_user - map a user kgid into a mnt_userns
- * @mnt_userns: the mount's idmapping
- * @fs_userns: the filesystem's idmapping
- * @kgid : kgid to be mapped
- *
- * Use the idmapping of @mnt_userns to remap a @kgid into @fs_userns. Use this
- * function when preparing a @kgid to be written to disk or inode.
- *
- * If no_idmapping() determines that this is not an idmapped mount we can
- * simply return @kgid unchanged.
- * If initial_idmapping() tells us that the filesystem is not mounted with an
- * idmapping we know the value of @kgid won't change when calling
- * make_kgid() so we can simply retrieve the value via KGIDT_INIT()
- * directly.
- *
- * Return: @kgid mapped according to @mnt_userns.
- * If @kgid has no mapping in either @mnt_userns or @fs_userns INVALID_GID is
- * returned.
- */
-static inline kgid_t mapped_kgid_user(struct user_namespace *mnt_userns,
- struct user_namespace *fs_userns,
- kgid_t kgid)
-{
- return from_vfsgid(mnt_userns, fs_userns, VFSGIDT_INIT(kgid));
-}
-
-/**
* vfsgid_has_fsmapping - check whether a vfsgid maps into the filesystem
* @mnt_userns: the mount's idmapping
* @fs_userns: the filesystem's idmapping
@@ -419,6 +375,12 @@ static inline bool vfsgid_has_fsmapping(struct user_namespace *mnt_userns,
return gid_valid(from_vfsgid(mnt_userns, fs_userns, vfsgid));
}
+static inline bool vfsgid_has_mapping(struct user_namespace *userns,
+ vfsgid_t vfsgid)
+{
+ return from_kgid(userns, AS_KGIDT(vfsgid)) != (gid_t)-1;
+}
+
/**
* vfsgid_into_kgid - convert vfsgid into kgid
* @vfsgid: the vfsgid to convert
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 55a4abaf6715..62475996fac6 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -16,6 +16,7 @@
struct super_block;
struct dentry;
struct user_namespace;
+struct mnt_idmap;
struct file_system_type;
struct fs_context;
struct file;
@@ -70,13 +71,15 @@ struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
struct super_block *mnt_sb; /* pointer to superblock */
int mnt_flags;
- struct user_namespace *mnt_userns;
+ struct mnt_idmap *mnt_idmap;
} __randomize_layout;
-static inline struct user_namespace *mnt_user_ns(const struct vfsmount *mnt)
+struct user_namespace *mnt_user_ns(const struct vfsmount *mnt);
+struct user_namespace *mnt_idmap_owner(const struct mnt_idmap *idmap);
+static inline struct mnt_idmap *mnt_idmap(const struct vfsmount *mnt)
{
/* Pairs with smp_store_release() in do_idmap_mount(). */
- return smp_load_acquire(&mnt->mnt_userns);
+ return smp_load_acquire(&mnt->mnt_idmap);
}
extern int mnt_want_write(struct vfsmount *mnt);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index fc918a658d48..a112b913fff9 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -13,13 +13,20 @@
*
* Regular device drivers have no business with any of these functions and
* especially storing MSI descriptor pointers in random code is considered
- * abuse. The only function which is relevant for drivers is msi_get_virq().
+ * abuse.
+ *
+ * Device driver relevant functions are available in <linux/msi_api.h>
*/
+#include <linux/irqdomain_defs.h>
#include <linux/cpumask.h>
+#include <linux/msi_api.h>
#include <linux/xarray.h>
#include <linux/mutex.h>
#include <linux/list.h>
+#include <linux/irq.h>
+#include <linux/bits.h>
+
#include <asm/msi.h>
/* Dummy shadow structures if an architecture does not define them */
@@ -68,19 +75,18 @@ struct msi_msg {
extern int pci_msi_ignore_mask;
/* Helper functions */
-struct irq_data;
struct msi_desc;
struct pci_dev;
struct platform_msi_priv_data;
struct device_attribute;
+struct irq_domain;
+struct irq_affinity_desc;
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
#ifdef CONFIG_GENERIC_MSI_IRQ
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
#else
-static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
-{
-}
+static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) { }
#endif
typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
@@ -120,6 +126,38 @@ struct pci_msi_desc {
};
};
+/**
+ * union msi_domain_cookie - Opaque MSI domain specific data
+ * @value: u64 value store
+ * @ptr: Pointer to domain specific data
+ * @iobase: Domain specific IOmem pointer
+ *
+ * The content of this data is implementation defined and used by the MSI
+ * domain to store domain specific information which is requried for
+ * interrupt chip callbacks.
+ */
+union msi_domain_cookie {
+ u64 value;
+ void *ptr;
+ void __iomem *iobase;
+};
+
+/**
+ * struct msi_desc_data - Generic MSI descriptor data
+ * @dcookie: Cookie for MSI domain specific data which is required
+ * for irq_chip callbacks
+ * @icookie: Cookie for the MSI interrupt instance provided by
+ * the usage site to the allocation function
+ *
+ * The content of this data is implementation defined, e.g. PCI/IMS
+ * implementations define the meaning of the data. The MSI core ignores
+ * this data completely.
+ */
+struct msi_desc_data {
+ union msi_domain_cookie dcookie;
+ union msi_instance_cookie icookie;
+};
+
#define MSI_MAX_INDEX ((unsigned int)USHRT_MAX)
/**
@@ -137,6 +175,7 @@ struct pci_msi_desc {
*
* @msi_index: Index of the msi descriptor
* @pci: PCI specific msi descriptor data
+ * @data: Generic MSI descriptor data
*/
struct msi_desc {
/* Shared device/bus type independent data */
@@ -156,7 +195,10 @@ struct msi_desc {
void *write_msi_msg_data;
u16 msi_index;
- struct pci_msi_desc pci;
+ union {
+ struct pci_msi_desc pci;
+ struct msi_desc_data data;
+ };
};
/*
@@ -171,33 +213,80 @@ enum msi_desc_filter {
MSI_DESC_ASSOCIATED,
};
+
+/**
+ * struct msi_dev_domain - The internals of MSI domain info per device
+ * @store: Xarray for storing MSI descriptor pointers
+ * @irqdomain: Pointer to a per device interrupt domain
+ */
+struct msi_dev_domain {
+ struct xarray store;
+ struct irq_domain *domain;
+};
+
/**
* msi_device_data - MSI per device data
* @properties: MSI properties which are interesting to drivers
* @platform_data: Platform-MSI specific data
* @mutex: Mutex protecting the MSI descriptor store
- * @__store: Xarray for storing MSI descriptor pointers
+ * @__domains: Internal data for per device MSI domains
* @__iter_idx: Index to search the next entry for iterators
*/
struct msi_device_data {
unsigned long properties;
struct platform_msi_priv_data *platform_data;
struct mutex mutex;
- struct xarray __store;
+ struct msi_dev_domain __domains[MSI_MAX_DEVICE_IRQDOMAINS];
unsigned long __iter_idx;
};
int msi_setup_device_data(struct device *dev);
-unsigned int msi_get_virq(struct device *dev, unsigned int index);
void msi_lock_descs(struct device *dev);
void msi_unlock_descs(struct device *dev);
-struct msi_desc *msi_first_desc(struct device *dev, enum msi_desc_filter filter);
-struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter);
+struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid,
+ enum msi_desc_filter filter);
/**
- * msi_for_each_desc - Iterate the MSI descriptors
+ * msi_first_desc - Get the first MSI descriptor of the default irqdomain
+ * @dev: Device to operate on
+ * @filter: Descriptor state filter
+ *
+ * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
+ * must be invoked before the call.
+ *
+ * Return: Pointer to the first MSI descriptor matching the search
+ * criteria, NULL if none found.
+ */
+static inline struct msi_desc *msi_first_desc(struct device *dev,
+ enum msi_desc_filter filter)
+{
+ return msi_domain_first_desc(dev, MSI_DEFAULT_DOMAIN, filter);
+}
+
+struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid,
+ enum msi_desc_filter filter);
+
+/**
+ * msi_domain_for_each_desc - Iterate the MSI descriptors in a specific domain
+ *
+ * @desc: struct msi_desc pointer used as iterator
+ * @dev: struct device pointer - device to iterate
+ * @domid: The id of the interrupt domain which should be walked.
+ * @filter: Filter for descriptor selection
+ *
+ * Notes:
+ * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs()
+ * pair.
+ * - It is safe to remove a retrieved MSI descriptor in the loop.
+ */
+#define msi_domain_for_each_desc(desc, dev, domid, filter) \
+ for ((desc) = msi_domain_first_desc((dev), (domid), (filter)); (desc); \
+ (desc) = msi_next_desc((dev), (domid), (filter)))
+
+/**
+ * msi_for_each_desc - Iterate the MSI descriptors in the default irqdomain
*
* @desc: struct msi_desc pointer used as iterator
* @dev: struct device pointer - device to iterate
@@ -208,9 +297,8 @@ struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter);
* pair.
* - It is safe to remove a retrieved MSI descriptor in the loop.
*/
-#define msi_for_each_desc(desc, dev, filter) \
- for ((desc) = msi_first_desc((dev), (filter)); (desc); \
- (desc) = msi_next_desc((dev), (filter)))
+#define msi_for_each_desc(desc, dev, filter) \
+ msi_domain_for_each_desc((desc), (dev), MSI_DEFAULT_DOMAIN, (filter))
#define msi_desc_to_dev(desc) ((desc)->dev)
@@ -237,34 +325,47 @@ static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
}
#endif
-#ifdef CONFIG_PCI_MSI
-struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
-void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
-#else /* CONFIG_PCI_MSI */
-static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
+int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid,
+ struct msi_desc *init_desc);
+/**
+ * msi_insert_msi_desc - Allocate and initialize a MSI descriptor in the
+ * default irqdomain and insert it at @init_desc->msi_index
+ * @dev: Pointer to the device for which the descriptor is allocated
+ * @init_desc: Pointer to an MSI descriptor to initialize the new descriptor
+ *
+ * Return: 0 on success or an appropriate failure code.
+ */
+static inline int msi_insert_msi_desc(struct device *dev, struct msi_desc *init_desc)
{
+ return msi_domain_insert_msi_desc(dev, MSI_DEFAULT_DOMAIN, init_desc);
}
-#endif /* CONFIG_PCI_MSI */
-int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc);
-void msi_free_msi_descs_range(struct device *dev, enum msi_desc_filter filter,
- unsigned int first_index, unsigned int last_index);
+void msi_domain_free_msi_descs_range(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+
+/**
+ * msi_free_msi_descs_range - Free a range of MSI descriptors of a device
+ * in the default irqdomain
+ *
+ * @dev: Device for which to free the descriptors
+ * @first: Index to start freeing from (inclusive)
+ * @last: Last index to be freed (inclusive)
+ */
+static inline void msi_free_msi_descs_range(struct device *dev, unsigned int first,
+ unsigned int last)
+{
+ msi_domain_free_msi_descs_range(dev, MSI_DEFAULT_DOMAIN, first, last);
+}
/**
- * msi_free_msi_descs - Free MSI descriptors of a device
+ * msi_free_msi_descs - Free all MSI descriptors of a device in the default irqdomain
* @dev: Device to free the descriptors
*/
static inline void msi_free_msi_descs(struct device *dev)
{
- msi_free_msi_descs_range(dev, MSI_DESC_ALL, 0, MSI_MAX_INDEX);
+ msi_free_msi_descs_range(dev, 0, MSI_MAX_INDEX);
}
-void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-
-void pci_msi_mask_irq(struct irq_data *data);
-void pci_msi_unmask_irq(struct irq_data *data);
-
/*
* The arch hooks to setup up msi irqs. Default functions are implemented
* as weak symbols so that they /can/ be overriden by architecture specific
@@ -293,7 +394,7 @@ static inline void msi_device_destroy_sysfs(struct device *dev) { }
*/
bool arch_restore_msi_irqs(struct pci_dev *dev);
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+#ifdef CONFIG_GENERIC_MSI_IRQ
#include <linux/irqhandler.h>
@@ -309,19 +410,22 @@ struct msi_domain_info;
* @get_hwirq: Retrieve the resulting hw irq number
* @msi_init: Domain specific init function for MSI interrupts
* @msi_free: Domain specific function to free a MSI interrupts
- * @msi_check: Callback for verification of the domain/info/dev data
* @msi_prepare: Prepare the allocation of the interrupts in the domain
+ * @prepare_desc: Optional function to prepare the allocated MSI descriptor
+ * in the domain
* @set_desc: Set the msi descriptor for an interrupt
* @domain_alloc_irqs: Optional function to override the default allocation
* function.
* @domain_free_irqs: Optional function to override the default free
* function.
+ * @msi_post_free: Optional function which is invoked after freeing
+ * all interrupts.
*
* @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying
* irqdomain.
*
- * @msi_check, @msi_prepare and @set_desc are callbacks used by
- * msi_domain_alloc/free_irqs().
+ * @msi_check, @msi_prepare, @prepare_desc and @set_desc are callbacks used by the
+ * msi_domain_alloc/free_irqs*() variants.
*
* @domain_alloc_irqs, @domain_free_irqs can be used to override the
* default allocation/free functions (__msi_domain_alloc/free_irqs). This
@@ -329,15 +433,6 @@ struct msi_domain_info;
* be wrapped into the regular irq domains concepts by mere mortals. This
* allows to universally use msi_domain_alloc/free_irqs without having to
* special case XEN all over the place.
- *
- * Contrary to other operations @domain_alloc_irqs and @domain_free_irqs
- * are set to the default implementation if NULL and even when
- * MSI_FLAG_USE_DEF_DOM_OPS is not set to avoid breaking existing users and
- * because these callbacks are obviously mandatory.
- *
- * This is NOT meant to be abused, but it can be useful to build wrappers
- * for specialized MSI irq domains which need extra work before and after
- * calling __msi_domain_alloc_irqs()/__msi_domain_free_irqs().
*/
struct msi_domain_ops {
irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
@@ -349,23 +444,29 @@ struct msi_domain_ops {
void (*msi_free)(struct irq_domain *domain,
struct msi_domain_info *info,
unsigned int virq);
- int (*msi_check)(struct irq_domain *domain,
- struct msi_domain_info *info,
- struct device *dev);
int (*msi_prepare)(struct irq_domain *domain,
struct device *dev, int nvec,
msi_alloc_info_t *arg);
+ void (*prepare_desc)(struct irq_domain *domain, msi_alloc_info_t *arg,
+ struct msi_desc *desc);
void (*set_desc)(msi_alloc_info_t *arg,
struct msi_desc *desc);
int (*domain_alloc_irqs)(struct irq_domain *domain,
struct device *dev, int nvec);
void (*domain_free_irqs)(struct irq_domain *domain,
struct device *dev);
+ void (*msi_post_free)(struct irq_domain *domain,
+ struct device *dev);
};
/**
* struct msi_domain_info - MSI interrupt domain data
* @flags: Flags to decribe features and capabilities
+ * @bus_token: The domain bus token
+ * @hwsize: The hardware table size or the software index limit.
+ * If 0 then the size is considered unlimited and
+ * gets initialized to the maximum software index limit
+ * by the domain creation code.
* @ops: The callback data structure
* @chip: Optional: associated interrupt chip
* @chip_data: Optional: associated interrupt chip data
@@ -375,17 +476,42 @@ struct msi_domain_ops {
* @data: Optional: domain specific data
*/
struct msi_domain_info {
- u32 flags;
- struct msi_domain_ops *ops;
- struct irq_chip *chip;
- void *chip_data;
- irq_flow_handler_t handler;
- void *handler_data;
- const char *handler_name;
- void *data;
+ u32 flags;
+ enum irq_domain_bus_token bus_token;
+ unsigned int hwsize;
+ struct msi_domain_ops *ops;
+ struct irq_chip *chip;
+ void *chip_data;
+ irq_flow_handler_t handler;
+ void *handler_data;
+ const char *handler_name;
+ void *data;
};
-/* Flags for msi_domain_info */
+/**
+ * struct msi_domain_template - Template for MSI device domains
+ * @name: Storage for the resulting name. Filled in by the core.
+ * @chip: Interrupt chip for this domain
+ * @ops: MSI domain ops
+ * @info: MSI domain info data
+ */
+struct msi_domain_template {
+ char name[48];
+ struct irq_chip chip;
+ struct msi_domain_ops ops;
+ struct msi_domain_info info;
+};
+
+/*
+ * Flags for msi_domain_info
+ *
+ * Bit 0-15: Generic MSI functionality which is not subject to restriction
+ * by parent domains
+ *
+ * Bit 16-31: Functionality which depends on the underlying parent domain and
+ * can be masked out by msi_parent_ops::init_dev_msi_info() when
+ * a device MSI domain is initialized.
+ */
enum {
/*
* Init non implemented ops callbacks with default MSI domain
@@ -397,44 +523,100 @@ enum {
* callbacks.
*/
MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
- /* Support multiple PCI MSI interrupts */
- MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
- /* Support PCI MSIX interrupts */
- MSI_FLAG_PCI_MSIX = (1 << 3),
/* Needs early activate, required for PCI */
- MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
+ MSI_FLAG_ACTIVATE_EARLY = (1 << 2),
/*
* Must reactivate when irq is started even when
* MSI_FLAG_ACTIVATE_EARLY has been set.
*/
- MSI_FLAG_MUST_REACTIVATE = (1 << 5),
- /* Is level-triggered capable, using two messages */
- MSI_FLAG_LEVEL_CAPABLE = (1 << 6),
+ MSI_FLAG_MUST_REACTIVATE = (1 << 3),
/* Populate sysfs on alloc() and destroy it on free() */
- MSI_FLAG_DEV_SYSFS = (1 << 7),
- /* MSI-X entries must be contiguous */
- MSI_FLAG_MSIX_CONTIGUOUS = (1 << 8),
+ MSI_FLAG_DEV_SYSFS = (1 << 4),
/* Allocate simple MSI descriptors */
- MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 9),
+ MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 5),
/* Free MSI descriptors */
- MSI_FLAG_FREE_MSI_DESCS = (1 << 10),
+ MSI_FLAG_FREE_MSI_DESCS = (1 << 6),
+ /*
+ * Quirk to handle MSI implementations which do not provide
+ * masking. Currently known to affect x86, but has to be partially
+ * handled in the core MSI code.
+ */
+ MSI_FLAG_NOMASK_QUIRK = (1 << 7),
+
+ /* Mask for the generic functionality */
+ MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
+
+ /* Mask for the domain specific functionality */
+ MSI_DOMAIN_FLAGS_MASK = GENMASK(31, 16),
+
+ /* Support multiple PCI MSI interrupts */
+ MSI_FLAG_MULTI_PCI_MSI = (1 << 16),
+ /* Support PCI MSIX interrupts */
+ MSI_FLAG_PCI_MSIX = (1 << 17),
+ /* Is level-triggered capable, using two messages */
+ MSI_FLAG_LEVEL_CAPABLE = (1 << 18),
+ /* MSI-X entries must be contiguous */
+ MSI_FLAG_MSIX_CONTIGUOUS = (1 << 19),
+ /* PCI/MSI-X vectors can be dynamically allocated/freed post MSI-X enable */
+ MSI_FLAG_PCI_MSIX_ALLOC_DYN = (1 << 20),
+ /* Support for PCI/IMS */
+ MSI_FLAG_PCI_IMS = (1 << 21),
};
+/**
+ * struct msi_parent_ops - MSI parent domain callbacks and configuration info
+ *
+ * @supported_flags: Required: The supported MSI flags of the parent domain
+ * @prefix: Optional: Prefix for the domain and chip name
+ * @init_dev_msi_info: Required: Callback for MSI parent domains to setup parent
+ * domain specific domain flags, domain ops and interrupt chip
+ * callbacks when a per device domain is created.
+ */
+struct msi_parent_ops {
+ u32 supported_flags;
+ const char *prefix;
+ bool (*init_dev_msi_info)(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *msi_parent_domain,
+ struct msi_domain_info *msi_child_info);
+};
+
+bool msi_parent_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *msi_parent_domain,
+ struct msi_domain_info *msi_child_info);
+
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force);
struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
-int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
- int nvec);
-int msi_domain_alloc_irqs_descs_locked(struct irq_domain *domain, struct device *dev,
- int nvec);
-int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
- int nvec);
-void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
-void msi_domain_free_irqs_descs_locked(struct irq_domain *domain, struct device *dev);
-void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
+
+bool msi_create_device_irq_domain(struct device *dev, unsigned int domid,
+ const struct msi_domain_template *template,
+ unsigned int hwsize, void *domain_data,
+ void *chip_data);
+void msi_remove_device_irq_domain(struct device *dev, unsigned int domid);
+
+bool msi_match_device_irq_domain(struct device *dev, unsigned int domid,
+ enum irq_domain_bus_token bus_token);
+
+int msi_domain_alloc_irqs_range_locked(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+int msi_domain_alloc_irqs_all_locked(struct device *dev, unsigned int domid, int nirqs);
+
+struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, unsigned int index,
+ const struct irq_affinity_desc *affdesc,
+ union msi_instance_cookie *cookie);
+
+void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+void msi_domain_free_irqs_range(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+void msi_domain_free_irqs_all_locked(struct device *dev, unsigned int domid);
+void msi_domain_free_irqs_all(struct device *dev, unsigned int domid);
+
struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
@@ -467,20 +649,27 @@ int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int vir
void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nvec);
void *platform_msi_get_host_data(struct irq_domain *domain);
-#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
+#endif /* CONFIG_GENERIC_MSI_IRQ */
-#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
+/* PCI specific interfaces */
+#ifdef CONFIG_PCI_MSI
+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
+void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
+void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void pci_msi_mask_irq(struct irq_data *data);
+void pci_msi_unmask_irq(struct irq_data *data);
struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
-bool pci_dev_has_special_msi_domain(struct pci_dev *pdev);
-#else
+#else /* CONFIG_PCI_MSI */
static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
{
return NULL;
}
-#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
+static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) { }
+#endif /* !CONFIG_PCI_MSI */
#endif /* LINUX_MSI_H */
diff --git a/include/linux/msi_api.h b/include/linux/msi_api.h
new file mode 100644
index 000000000000..391087ad99b1
--- /dev/null
+++ b/include/linux/msi_api.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_MSI_API_H
+#define LINUX_MSI_API_H
+
+/*
+ * APIs which are relevant for device driver code for allocating and
+ * freeing MSI interrupts and querying the associations between
+ * hardware/software MSI indices and the Linux interrupt number.
+ */
+
+struct device;
+
+/*
+ * Per device interrupt domain related constants.
+ */
+enum msi_domain_ids {
+ MSI_DEFAULT_DOMAIN,
+ MSI_SECONDARY_DOMAIN,
+ MSI_MAX_DEVICE_IRQDOMAINS,
+};
+
+/**
+ * union msi_instance_cookie - MSI instance cookie
+ * @value: u64 value store
+ * @ptr: Pointer to usage site specific data
+ *
+ * This cookie is handed to the IMS allocation function and stored in the
+ * MSI descriptor for the interrupt chip callbacks.
+ *
+ * The content of this cookie is MSI domain implementation defined. For
+ * PCI/IMS implementations this could be a PASID or a pointer to queue
+ * memory.
+ */
+union msi_instance_cookie {
+ u64 value;
+ void *ptr;
+};
+
+/**
+ * msi_map - Mapping between MSI index and Linux interrupt number
+ * @index: The MSI index, e.g. slot in the MSI-X table or
+ * a software managed index if >= 0. If negative
+ * the allocation function failed and it contains
+ * the error code.
+ * @virq: The associated Linux interrupt number
+ */
+struct msi_map {
+ int index;
+ int virq;
+};
+
+/*
+ * Constant to be used for dynamic allocations when the allocation is any
+ * free MSI index, which is either an entry in a hardware table or a
+ * software managed index.
+ */
+#define MSI_ANY_INDEX UINT_MAX
+
+unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigned int index);
+
+/**
+ * msi_get_virq - Lookup the Linux interrupt number for a MSI index on the default interrupt domain
+ * @dev: Device for which the lookup happens
+ * @index: The MSI index to lookup
+ *
+ * Return: The Linux interrupt number on success (> 0), 0 if not found
+ */
+static inline unsigned int msi_get_virq(struct device *dev, unsigned int index)
+{
+ return msi_domain_get_virq(dev, MSI_DEFAULT_DOMAIN, index);
+}
+
+#endif
diff --git a/include/linux/net.h b/include/linux/net.h
index 711c3593c3b8..18d942bbdf6e 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -41,6 +41,7 @@ struct net;
#define SOCK_NOSPACE 2
#define SOCK_PASSCRED 3
#define SOCK_PASSSEC 4
+#define SOCK_SUPPORT_ZC 5
#ifndef ARCH_HAS_SOCKET_TYPES
/**
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a36edb0ec199..eddf8ee270e7 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3663,8 +3663,9 @@ static inline bool netif_attr_test_online(unsigned long j,
static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
unsigned int nr_bits)
{
- /* n is a prior cpu */
- cpu_max_bits_warn(n + 1, nr_bits);
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpu_max_bits_warn(n, nr_bits);
if (srcp)
return find_next_bit(srcp, nr_bits, n + 1);
@@ -3685,8 +3686,9 @@ static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
const unsigned long *src2p,
unsigned int nr_bits)
{
- /* n is a prior cpu */
- cpu_max_bits_warn(n + 1, nr_bits);
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpu_max_bits_warn(n, nr_bits);
if (src1p && src2p)
return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index f2402ddeafbf..4c76ddfb6a67 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -267,6 +267,14 @@ struct netfs_cache_ops {
loff_t *_start, size_t *_len, loff_t i_size,
bool no_space_allocated_yet);
+ /* Prepare an on-demand read operation, shortening it to a cached/uncached
+ * boundary as appropriate.
+ */
+ enum netfs_io_source (*prepare_ondemand_read)(struct netfs_cache_resources *cres,
+ loff_t start, size_t *_len,
+ loff_t i_size,
+ unsigned long *_flags, ino_t ino);
+
/* Query the occupancy of the cache in a region, returning where the
* next chunk of data starts and how long it is.
*/
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 8d04b6a5964c..730003c4f4af 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -732,4 +732,17 @@ enum nfs4_setxattr_options {
SETXATTR4_CREATE = 1,
SETXATTR4_REPLACE = 2,
};
+
+enum {
+ RCA4_TYPE_MASK_RDATA_DLG = 0,
+ RCA4_TYPE_MASK_WDATA_DLG = 1,
+ RCA4_TYPE_MASK_DIR_DLG = 2,
+ RCA4_TYPE_MASK_FILE_LAYOUT = 3,
+ RCA4_TYPE_MASK_BLK_LAYOUT = 4,
+ RCA4_TYPE_MASK_OBJ_LAYOUT_MIN = 8,
+ RCA4_TYPE_MASK_OBJ_LAYOUT_MAX = 9,
+ RCA4_TYPE_MASK_OTHER_LAYOUT_MIN = 12,
+ RCA4_TYPE_MASK_OTHER_LAYOUT_MAX = 15,
+};
+
#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 7931fa472561..d92fdfd2444c 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -59,6 +59,7 @@ struct nfs_access_entry {
kuid_t fsuid;
kgid_t fsgid;
struct group_info *group_info;
+ u64 timestamp;
__u32 mask;
struct rcu_head rcu_head;
};
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index efef68c9352a..bb0ee80526b2 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -516,7 +516,7 @@ static inline int node_random(const nodemask_t *maskp)
bit = first_node(*maskp);
break;
default:
- bit = find_nth_bit(maskp->bits, MAX_NUMNODES, prandom_u32_max(w));
+ bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_u32_below(w));
break;
}
return bit;
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index cdb171efc7cb..fee881cded01 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -94,6 +94,7 @@ static inline struct cred *nsset_cred(struct nsset *set)
int copy_namespaces(unsigned long flags, struct task_struct *tsk);
void exit_task_namespaces(struct task_struct *tsk);
void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new);
+int exec_task_namespaces(void);
void free_nsproxy(struct nsproxy *ns);
int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **,
struct cred *, struct fs_struct *);
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 19dfdd74835e..1d3be1a2204c 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -51,8 +51,8 @@ static inline bool __must_check __must_check_overflow(bool overflow)
return unlikely(overflow);
}
-/** check_add_overflow() - Calculate addition with overflow checking
- *
+/**
+ * check_add_overflow() - Calculate addition with overflow checking
* @a: first addend
* @b: second addend
* @d: pointer to store sum
@@ -66,8 +66,8 @@ static inline bool __must_check __must_check_overflow(bool overflow)
#define check_add_overflow(a, b, d) \
__must_check_overflow(__builtin_add_overflow(a, b, d))
-/** check_sub_overflow() - Calculate subtraction with overflow checking
- *
+/**
+ * check_sub_overflow() - Calculate subtraction with overflow checking
* @a: minuend; value to subtract from
* @b: subtrahend; value to subtract from @a
* @d: pointer to store difference
@@ -81,8 +81,8 @@ static inline bool __must_check __must_check_overflow(bool overflow)
#define check_sub_overflow(a, b, d) \
__must_check_overflow(__builtin_sub_overflow(a, b, d))
-/** check_mul_overflow() - Calculate multiplication with overflow checking
- *
+/**
+ * check_mul_overflow() - Calculate multiplication with overflow checking
* @a: first factor
* @b: second factor
* @d: pointer to store product
@@ -96,23 +96,24 @@ static inline bool __must_check __must_check_overflow(bool overflow)
#define check_mul_overflow(a, b, d) \
__must_check_overflow(__builtin_mul_overflow(a, b, d))
-/** check_shl_overflow() - Calculate a left-shifted value and check overflow
- *
+/**
+ * check_shl_overflow() - Calculate a left-shifted value and check overflow
* @a: Value to be shifted
* @s: How many bits left to shift
* @d: Pointer to where to store the result
*
* Computes *@d = (@a << @s)
*
- * Returns true if '*d' cannot hold the result or when 'a << s' doesn't
+ * Returns true if '*@d' cannot hold the result or when '@a << @s' doesn't
* make sense. Example conditions:
- * - 'a << s' causes bits to be lost when stored in *d.
- * - 's' is garbage (e.g. negative) or so large that the result of
- * 'a << s' is guaranteed to be 0.
- * - 'a' is negative.
- * - 'a << s' sets the sign bit, if any, in '*d'.
*
- * '*d' will hold the results of the attempted shift, but is not
+ * - '@a << @s' causes bits to be lost when stored in *@d.
+ * - '@s' is garbage (e.g. negative) or so large that the result of
+ * '@a << @s' is guaranteed to be 0.
+ * - '@a' is negative.
+ * - '@a << @s' sets the sign bit, if any, in '*@d'.
+ *
+ * '*@d' will hold the results of the attempted shift, but is not
* considered "safe for use" if true is returned.
*/
#define check_shl_overflow(a, s, d) __must_check_overflow(({ \
@@ -129,7 +130,6 @@ static inline bool __must_check __must_check_overflow(bool overflow)
/**
* size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX
- *
* @factor1: first factor
* @factor2: second factor
*
@@ -149,7 +149,6 @@ static inline size_t __must_check size_mul(size_t factor1, size_t factor2)
/**
* size_add() - Calculate size_t addition with saturation at SIZE_MAX
- *
* @addend1: first addend
* @addend2: second addend
*
@@ -169,7 +168,6 @@ static inline size_t __must_check size_add(size_t addend1, size_t addend2)
/**
* size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX
- *
* @minuend: value to subtract from
* @subtrahend: value to subtract from @minuend
*
@@ -192,7 +190,6 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
/**
* array_size() - Calculate size of 2-dimensional array.
- *
* @a: dimension one
* @b: dimension two
*
@@ -205,7 +202,6 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
/**
* array3_size() - Calculate size of 3-dimensional array.
- *
* @a: dimension one
* @b: dimension two
* @c: dimension three
@@ -220,7 +216,6 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
/**
* flex_array_size() - Calculate size of a flexible array member
* within an enclosing structure.
- *
* @p: Pointer to the structure.
* @member: Name of the flexible array member.
* @count: Number of elements in the array.
@@ -237,7 +232,6 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
/**
* struct_size() - Calculate size of structure with trailing flexible array.
- *
* @p: Pointer to the structure.
* @member: Name of the array member.
* @count: Number of elements in the array.
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 2bda4a4e47e8..c0d939f3169c 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -38,6 +38,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/resource_ext.h>
+#include <linux/msi_api.h>
#include <uapi/linux/pci.h>
#include <linux/pci_ids.h>
@@ -409,6 +410,7 @@ struct pci_dev {
*/
unsigned int irq;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
+ struct resource driver_exclusive_resource; /* driver exclusive resource ranges */
bool match_driver; /* Skip attaching driver */
@@ -843,6 +845,9 @@ struct pci_error_handlers {
/* Device driver may resume normal operations */
void (*resume)(struct pci_dev *dev);
+
+ /* Allow device driver to record more details of a correctable error */
+ void (*cor_error_detected)(struct pci_dev *dev);
};
@@ -1407,6 +1412,21 @@ int pci_request_selected_regions(struct pci_dev *, int, const char *);
int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
void pci_release_selected_regions(struct pci_dev *, int);
+static inline __must_check struct resource *
+pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
+ unsigned int len, const char *name)
+{
+ return __request_region(&pdev->driver_exclusive_resource, offset, len,
+ name, IORESOURCE_EXCLUSIVE);
+}
+
+static inline void pci_release_config_region(struct pci_dev *pdev,
+ unsigned int offset,
+ unsigned int len)
+{
+ __release_region(&pdev->driver_exclusive_resource, offset, len);
+}
+
/* drivers/pci/bus.c */
void pci_add_resource(struct list_head *resources, struct resource *res);
void pci_add_resource_offset(struct list_head *resources, struct resource *res,
@@ -1553,10 +1573,17 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
return rc;
return 0;
}
+int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags);
int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags,
struct irq_affinity *affd);
+bool pci_msix_can_alloc_dyn(struct pci_dev *dev);
+struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
+ const struct irq_affinity_desc *affdesc);
+void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
+
void pci_free_irq_vectors(struct pci_dev *dev);
int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
@@ -1586,6 +1613,13 @@ pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
return 1;
return -ENOSPC;
}
+static inline int
+pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags)
+{
+ return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs,
+ flags, NULL);
+}
static inline void pci_free_irq_vectors(struct pci_dev *dev)
{
@@ -1898,15 +1932,13 @@ pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
{
return -ENOSPC;
}
-#endif /* CONFIG_PCI */
-
static inline int
pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags)
{
- return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
- NULL);
+ return -ENOSPC;
}
+#endif /* CONFIG_PCI */
/* Include architecture-dependent settings and functions */
@@ -2474,6 +2506,14 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#endif
+struct msi_domain_template;
+
+bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
+ unsigned int hwsize, void *data);
+struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie,
+ const struct irq_affinity_desc *affdesc);
+void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map);
+
#include <linux/dma-mapping.h>
#define pci_printk(level, pdev, fmt, arg...) \
@@ -2484,6 +2524,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
+#define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index f1ec5ad1351c..1338ea2aa720 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -37,12 +37,11 @@
/*
* Percpu allocator can serve percpu allocations before slab is
* initialized which allows slab to depend on the percpu allocator.
- * The following two parameters decide how much resource to
- * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
- * larger than PERCPU_DYNAMIC_EARLY_SIZE.
+ * The following parameter decide how much resource to preallocate
+ * for this. Keep PERCPU_DYNAMIC_RESERVE equal to or larger than
+ * PERCPU_DYNAMIC_EARLY_SIZE.
*/
-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
-#define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
+#define PERCPU_DYNAMIC_EARLY_SIZE (20 << 10)
/*
* PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 0356cb6a215d..ef914a600087 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -100,7 +100,7 @@ struct arm_pmu {
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
- int (*filter_match)(struct perf_event *event);
+ bool (*filter)(struct pmu *pmu, int cpu);
int num_events;
bool secure_access; /* 32-bit ARM only */
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
@@ -174,7 +174,6 @@ void kvm_host_pmu_init(struct arm_pmu *pmu);
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
-struct arm_pmu *armpmu_alloc_atomic(void);
void armpmu_free(struct arm_pmu *pmu);
int armpmu_register(struct arm_pmu *pmu);
int armpmu_request_irq(int irq, int cpu);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 853f64b6c8c2..c6a3bac76966 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -266,6 +266,7 @@ struct hw_perf_event {
};
struct perf_event;
+struct perf_event_pmu_context;
/*
* Common implementation detail of pmu::{start,commit,cancel}_txn
@@ -308,7 +309,7 @@ struct pmu {
int capabilities;
int __percpu *pmu_disable_count;
- struct perf_cpu_context __percpu *pmu_cpu_context;
+ struct perf_cpu_pmu_context __percpu *cpu_pmu_context;
atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
int task_ctx_nr;
int hrtimer_interval_ms;
@@ -443,7 +444,7 @@ struct pmu {
/*
* context-switches callback
*/
- void (*sched_task) (struct perf_event_context *ctx,
+ void (*sched_task) (struct perf_event_pmu_context *pmu_ctx,
bool sched_in);
/*
@@ -457,8 +458,8 @@ struct pmu {
* implementation and Perf core context switch handling callbacks for usage
* examples.
*/
- void (*swap_task_ctx) (struct perf_event_context *prev,
- struct perf_event_context *next);
+ void (*swap_task_ctx) (struct perf_event_pmu_context *prev_epc,
+ struct perf_event_pmu_context *next_epc);
/* optional */
/*
@@ -522,9 +523,10 @@ struct pmu {
/* optional */
/*
- * Filter events for PMU-specific reasons.
+ * Skip programming this PMU on the given CPU. Typically needed for
+ * big.LITTLE things.
*/
- int (*filter_match) (struct perf_event *event); /* optional */
+ bool (*filter) (struct pmu *pmu, int cpu); /* optional */
/*
* Check period value for PERF_EVENT_IOC_PERIOD ioctl.
@@ -695,6 +697,11 @@ struct perf_event {
int group_caps;
struct perf_event *group_leader;
+ /*
+ * event->pmu will always point to pmu in which this event belongs.
+ * Whereas event->pmu_ctx->pmu may point to other pmu when group of
+ * different pmu events is created.
+ */
struct pmu *pmu;
void *pmu_private;
@@ -720,6 +727,12 @@ struct perf_event {
struct hw_perf_event hw;
struct perf_event_context *ctx;
+ /*
+ * event->pmu_ctx points to perf_event_pmu_context in which the event
+ * is added. This pmu_ctx can be of other pmu for sw event when that
+ * sw event is part of a group which also contains non-sw events.
+ */
+ struct perf_event_pmu_context *pmu_ctx;
atomic_long_t refcount;
/*
@@ -756,11 +769,14 @@ struct perf_event {
struct fasync_struct *fasync;
/* delayed work for NMIs and such */
- int pending_wakeup;
- int pending_kill;
- int pending_disable;
+ unsigned int pending_wakeup;
+ unsigned int pending_kill;
+ unsigned int pending_disable;
+ unsigned int pending_sigtrap;
unsigned long pending_addr; /* SIGTRAP */
- struct irq_work pending;
+ struct irq_work pending_irq;
+ struct callback_head pending_task;
+ unsigned int pending_work;
atomic_t event_limit;
@@ -809,19 +825,69 @@ struct perf_event {
#endif /* CONFIG_PERF_EVENTS */
};
+/*
+ * ,-----------------------[1:n]----------------------.
+ * V V
+ * perf_event_context <-[1:n]-> perf_event_pmu_context <--- perf_event
+ * ^ ^ | |
+ * `--------[1:n]---------' `-[n:1]-> pmu <-[1:n]-'
+ *
+ *
+ * struct perf_event_pmu_context lifetime is refcount based and RCU freed
+ * (similar to perf_event_context). Locking is as if it were a member of
+ * perf_event_context; specifically:
+ *
+ * modification, both: ctx->mutex && ctx->lock
+ * reading, either: ctx->mutex || ctx->lock
+ *
+ * There is one exception to this; namely put_pmu_ctx() isn't always called
+ * with ctx->mutex held; this means that as long as we can guarantee the epc
+ * has events the above rules hold.
+ *
+ * Specificially, sys_perf_event_open()'s group_leader case depends on
+ * ctx->mutex pinning the configuration. Since we hold a reference on
+ * group_leader (through the filedesc) it can't go away, therefore it's
+ * associated pmu_ctx must exist and cannot change due to ctx->mutex.
+ */
+struct perf_event_pmu_context {
+ struct pmu *pmu;
+ struct perf_event_context *ctx;
+
+ struct list_head pmu_ctx_entry;
+
+ struct list_head pinned_active;
+ struct list_head flexible_active;
+
+ /* Used to avoid freeing per-cpu perf_event_pmu_context */
+ unsigned int embedded : 1;
+
+ unsigned int nr_events;
+
+ atomic_t refcount; /* event <-> epc */
+ struct rcu_head rcu_head;
+
+ void *task_ctx_data; /* pmu specific data */
+ /*
+ * Set when one or more (plausibly active) event can't be scheduled
+ * due to pmu overcommit or pmu constraints, except tolerant to
+ * events not necessary to be active due to scheduling constraints,
+ * such as cgroups.
+ */
+ int rotate_necessary;
+};
struct perf_event_groups {
struct rb_root tree;
u64 index;
};
+
/**
* struct perf_event_context - event context structure
*
* Used as a container for task events and CPU events as well:
*/
struct perf_event_context {
- struct pmu *pmu;
/*
* Protect the states of the events in the list,
* nr_active, and the list:
@@ -834,27 +900,21 @@ struct perf_event_context {
*/
struct mutex mutex;
- struct list_head active_ctx_list;
+ struct list_head pmu_ctx_list;
struct perf_event_groups pinned_groups;
struct perf_event_groups flexible_groups;
struct list_head event_list;
- struct list_head pinned_active;
- struct list_head flexible_active;
-
int nr_events;
- int nr_active;
int nr_user;
int is_active;
+
+ int nr_task_data;
int nr_stat;
int nr_freq;
int rotate_disable;
- /*
- * Set when nr_events != nr_active, except tolerant to events not
- * necessary to be active due to scheduling constraints, such as cgroups.
- */
- int rotate_necessary;
- refcount_t refcount;
+
+ refcount_t refcount; /* event <-> ctx */
struct task_struct *task;
/*
@@ -875,8 +935,15 @@ struct perf_event_context {
#ifdef CONFIG_CGROUP_PERF
int nr_cgroups; /* cgroup evts */
#endif
- void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head;
+
+ /*
+ * Sum (event->pending_sigtrap + event->pending_work)
+ *
+ * The SIGTRAP is targeted at ctx->task, as such it won't do changing
+ * that until the signal is delivered.
+ */
+ local_t nr_pending;
};
/*
@@ -885,12 +952,13 @@ struct perf_event_context {
*/
#define PERF_NR_CONTEXTS 4
-/**
- * struct perf_cpu_context - per cpu event context structure
- */
-struct perf_cpu_context {
- struct perf_event_context ctx;
- struct perf_event_context *task_ctx;
+struct perf_cpu_pmu_context {
+ struct perf_event_pmu_context epc;
+ struct perf_event_pmu_context *task_epc;
+
+ struct list_head sched_cb_entry;
+ int sched_cb_usage;
+
int active_oncpu;
int exclusive;
@@ -898,16 +966,20 @@ struct perf_cpu_context {
struct hrtimer hrtimer;
ktime_t hrtimer_interval;
unsigned int hrtimer_active;
+};
+
+/**
+ * struct perf_event_cpu_context - per cpu event context structure
+ */
+struct perf_cpu_context {
+ struct perf_event_context ctx;
+ struct perf_event_context *task_ctx;
+ int online;
#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp;
- struct list_head cgrp_cpuctx_entry;
#endif
- struct list_head sched_cb_entry;
- int sched_cb_usage;
-
- int online;
/*
* Per-CPU storage for iterators used in visit_groups_merge. The default
* storage is of size 2 to hold the CPU and any CPU event iterators.
@@ -971,6 +1043,8 @@ perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
#ifdef CONFIG_PERF_EVENTS
+extern struct perf_event_context *perf_cpu_task_ctx(void);
+
extern void *perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event);
extern void perf_aux_output_end(struct perf_output_handle *handle,
@@ -1176,7 +1250,7 @@ static inline int is_software_event(struct perf_event *event)
*/
static inline int in_software_context(struct perf_event *event)
{
- return event->ctx->pmu->task_ctx_nr == perf_sw_context;
+ return event->pmu_ctx->pmu->task_ctx_nr == perf_sw_context;
}
static inline int is_exclusive_pmu(struct pmu *pmu)
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index a108b60a6962..5f0d7d0b9471 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -165,6 +165,13 @@ static inline pte_t *virt_to_kpte(unsigned long vaddr)
return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
}
+#ifndef pmd_young
+static inline int pmd_young(pmd_t pmd)
+{
+ return 0;
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
@@ -260,6 +267,17 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef arch_has_hw_nonleaf_pmd_young
+/*
+ * Return whether the accessed bit in non-leaf PMD entries is supported on the
+ * local CPU.
+ */
+static inline bool arch_has_hw_nonleaf_pmd_young(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG);
+}
+#endif
+
#ifndef arch_has_hw_pte_young
/*
* Return whether the accessed bit is supported on the local CPU.
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 664dd409feb9..3f01ac8017e0 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -122,6 +122,7 @@ enum phylink_op_type {
* (See commit 7cceb599d15d ("net: phylink: avoid mac_config calls")
* @poll_fixed_state: if true, starts link_poll,
* if MAC link is at %MLO_AN_FIXED mode.
+ * @mac_managed_pm: if true, indicate the MAC driver is responsible for PHY PM.
* @ovr_an_inband: if true, override PCS to MLO_AN_INBAND
* @get_fixed_state: callback to execute to determine the fixed link state,
* if MAC link is at %MLO_AN_FIXED mode.
@@ -134,6 +135,7 @@ struct phylink_config {
enum phylink_op_type type;
bool legacy_pre_march2020;
bool poll_fixed_state;
+ bool mac_managed_pm;
bool ovr_an_inband;
void (*get_fixed_state)(struct phylink_config *config,
struct phylink_link_state *state);
diff --git a/include/linux/platform_data/gpmc-omap.h b/include/linux/platform_data/gpmc-omap.h
index c9cc4e32435d..dcca6c5e23bb 100644
--- a/include/linux/platform_data/gpmc-omap.h
+++ b/include/linux/platform_data/gpmc-omap.h
@@ -136,6 +136,13 @@ struct gpmc_device_timings {
#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */
#define GPMC_MUX_AD 2 /* Addr-Data multiplex */
+/* Wait pin polarity values */
+#define GPMC_WAITPINPOLARITY_INVALID UINT_MAX
+#define GPMC_WAITPINPOLARITY_ACTIVE_LOW 0
+#define GPMC_WAITPINPOLARITY_ACTIVE_HIGH 1
+
+#define GPMC_WAITPIN_INVALID UINT_MAX
+
struct gpmc_settings {
bool burst_wrap; /* enables wrap bursting */
bool burst_read; /* enables read page/burst mode */
@@ -149,6 +156,7 @@ struct gpmc_settings {
u32 device_width; /* device bus width (8 or 16 bit) */
u32 mux_add_data; /* multiplex address & data */
u32 wait_pin; /* wait-pin to be used */
+ u32 wait_pin_polarity;
};
/* Data for each chip select */
diff --git a/include/linux/platform_data/st33zp24.h b/include/linux/platform_data/st33zp24.h
deleted file mode 100644
index 61db674f36cc..000000000000
--- a/include/linux/platform_data/st33zp24.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * STMicroelectronics TPM Linux driver for TPM 1.2 ST33ZP24
- * Copyright (C) 2009 - 2016 STMicroelectronics
- */
-#ifndef __ST33ZP24_H__
-#define __ST33ZP24_H__
-
-#define TPM_ST33_I2C "st33zp24-i2c"
-#define TPM_ST33_SPI "st33zp24-spi"
-
-struct st33zp24_platform_data {
- int io_lpcpd;
-};
-
-#endif /* __ST33ZP24_H__ */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index ebc351698090..1cd41bdf73cf 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -17,6 +17,7 @@
#include <linux/notifier.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
+#include <linux/time64.h>
/*
* Flags to control the behaviour of a genpd.
@@ -95,6 +96,7 @@ struct genpd_governor_data {
s64 max_off_time_ns;
bool max_off_time_changed;
ktime_t next_wakeup;
+ ktime_t next_hrtimer;
bool cached_power_down_ok;
bool cached_power_down_state_idx;
};
@@ -232,6 +234,7 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb);
int dev_pm_genpd_remove_notifier(struct device *dev);
void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next);
+ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev);
extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
@@ -293,6 +296,10 @@ static inline int dev_pm_genpd_remove_notifier(struct device *dev)
static inline void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
{ }
+static inline ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
+{
+ return KTIME_MAX;
+}
#define simple_qos_governor (*(struct dev_power_governor *)(NULL))
#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
#endif
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 7d1e604c1325..ee608d22ecb9 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -69,21 +69,21 @@ extern int __posix_acl_create(struct posix_acl **, gfp_t, umode_t *);
extern int __posix_acl_chmod(struct posix_acl **, gfp_t, umode_t);
extern struct posix_acl *get_posix_acl(struct inode *, int);
-extern int set_posix_acl(struct user_namespace *, struct inode *, int,
- struct posix_acl *);
+int set_posix_acl(struct user_namespace *, struct dentry *, int,
+ struct posix_acl *);
struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type);
struct posix_acl *posix_acl_clone(const struct posix_acl *acl, gfp_t flags);
#ifdef CONFIG_FS_POSIX_ACL
-int posix_acl_chmod(struct user_namespace *, struct inode *, umode_t);
+int posix_acl_chmod(struct user_namespace *, struct dentry *, umode_t);
extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **,
struct posix_acl **);
int posix_acl_update_mode(struct user_namespace *, struct inode *, umode_t *,
struct posix_acl **);
-extern int simple_set_acl(struct user_namespace *, struct inode *,
- struct posix_acl *, int);
+int simple_set_acl(struct user_namespace *, struct dentry *,
+ struct posix_acl *, int);
extern int simple_acl_create(struct inode *, struct inode *);
struct posix_acl *get_cached_acl(struct inode *inode, int type);
@@ -99,9 +99,16 @@ static inline void cache_no_acl(struct inode *inode)
inode->i_acl = NULL;
inode->i_default_acl = NULL;
}
+
+int vfs_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry,
+ const char *acl_name, struct posix_acl *kacl);
+struct posix_acl *vfs_get_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *acl_name);
+int vfs_remove_acl(struct user_namespace *mnt_userns, struct dentry *dentry,
+ const char *acl_name);
#else
static inline int posix_acl_chmod(struct user_namespace *mnt_userns,
- struct inode *inode, umode_t mode)
+ struct dentry *dentry, umode_t mode)
{
return 0;
}
@@ -126,8 +133,28 @@ static inline int posix_acl_create(struct inode *inode, umode_t *mode,
static inline void forget_all_cached_acls(struct inode *inode)
{
}
+
+static inline int vfs_set_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name,
+ struct posix_acl *acl)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct posix_acl *vfs_get_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int vfs_remove_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *acl_name)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_FS_POSIX_ACL */
-struct posix_acl *get_acl(struct inode *inode, int type);
+struct posix_acl *get_inode_acl(struct inode *inode, int type);
#endif /* __LINUX_POSIX_ACL_H */
diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h
index 8163dd48c430..54cd7a14330d 100644
--- a/include/linux/posix_acl_xattr.h
+++ b/include/linux/posix_acl_xattr.h
@@ -33,33 +33,40 @@ posix_acl_xattr_count(size_t size)
}
#ifdef CONFIG_FS_POSIX_ACL
-void posix_acl_fix_xattr_from_user(void *value, size_t size);
-void posix_acl_fix_xattr_to_user(void *value, size_t size);
-void posix_acl_getxattr_idmapped_mnt(struct user_namespace *mnt_userns,
- const struct inode *inode,
- void *value, size_t size);
+struct posix_acl *posix_acl_from_xattr(struct user_namespace *user_ns,
+ const void *value, size_t size);
#else
-static inline void posix_acl_fix_xattr_from_user(void *value, size_t size)
-{
-}
-static inline void posix_acl_fix_xattr_to_user(void *value, size_t size)
-{
-}
-static inline void
-posix_acl_getxattr_idmapped_mnt(struct user_namespace *mnt_userns,
- const struct inode *inode, void *value,
- size_t size)
+static inline struct posix_acl *
+posix_acl_from_xattr(struct user_namespace *user_ns, const void *value,
+ size_t size)
{
+ return ERR_PTR(-EOPNOTSUPP);
}
#endif
-struct posix_acl *posix_acl_from_xattr(struct user_namespace *user_ns,
- const void *value, size_t size);
int posix_acl_to_xattr(struct user_namespace *user_ns,
const struct posix_acl *acl, void *buffer, size_t size);
-struct posix_acl *vfs_set_acl_prepare(struct user_namespace *mnt_userns,
- struct user_namespace *fs_userns,
- const void *value, size_t size);
+static inline const char *posix_acl_xattr_name(int type)
+{
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ return XATTR_NAME_POSIX_ACL_ACCESS;
+ case ACL_TYPE_DEFAULT:
+ return XATTR_NAME_POSIX_ACL_DEFAULT;
+ }
+
+ return "";
+}
+
+static inline int posix_acl_type(const char *name)
+{
+ if (strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS) == 0)
+ return ACL_TYPE_ACCESS;
+ else if (strcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT) == 0)
+ return ACL_TYPE_DEFAULT;
+
+ return -1;
+}
extern const struct xattr_handler posix_acl_access_xattr_handler;
extern const struct xattr_handler posix_acl_default_xattr_handler;
diff --git a/include/linux/prandom.h b/include/linux/prandom.h
index e0a0759dd09c..c94c02ba065c 100644
--- a/include/linux/prandom.h
+++ b/include/linux/prandom.h
@@ -9,6 +9,7 @@
#define _LINUX_PRANDOM_H
#include <linux/types.h>
+#include <linux/once.h>
#include <linux/percpu.h>
#include <linux/random.h>
@@ -23,24 +24,10 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
#define prandom_init_once(pcpu_state) \
DO_ONCE(prandom_seed_full_state, (pcpu_state))
-/**
- * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
- * @ep_ro: right open interval endpoint
- *
- * Returns a pseudo-random number that is in interval [0, ep_ro). This is
- * useful when requesting a random index of an array containing ep_ro elements,
- * for example. The result is somewhat biased when ep_ro is not a power of 2,
- * so do not use this for cryptographic purposes.
- *
- * Returns: pseudo-random number in interval [0, ep_ro)
- */
+/* Deprecated: use get_random_u32_below() instead. */
static inline u32 prandom_u32_max(u32 ep_ro)
{
- if (__builtin_constant_p(ep_ro <= 1U << 8) && ep_ro <= 1U << 8)
- return (get_random_u8() * ep_ro) >> 8;
- if (__builtin_constant_p(ep_ro <= 1U << 16) && ep_ro <= 1U << 16)
- return (get_random_u16() * ep_ro) >> 16;
- return ((u64)get_random_u32() * ep_ro) >> 32;
+ return get_random_u32_below(ep_ro);
}
/*
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index 6e4372735068..1e0a0d7ace3a 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -72,6 +72,9 @@ enum psi_states {
/* Use one bit in the state mask to track TSK_ONCPU */
#define PSI_ONCPU (1 << NR_PSI_STATES)
+/* Flag whether to re-arm avgs_work, see details in get_recent_times() */
+#define PSI_STATE_RESCHEDULE (1 << (NR_PSI_STATES + 1))
+
enum psi_aggregators {
PSI_AVGS = 0,
PSI_POLL,
@@ -177,6 +180,7 @@ struct psi_group {
struct timer_list poll_timer;
wait_queue_head_t poll_wait;
atomic_t poll_wakeup;
+ atomic_t poll_scheduled;
/* Protects data used by the monitor */
struct mutex trigger_lock;
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 9f16afec7290..9d65ff94e216 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -8,28 +8,7 @@
#ifndef __LINUX_PSTORE_RAM_H__
#define __LINUX_PSTORE_RAM_H__
-#include <linux/compiler.h>
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
#include <linux/pstore.h>
-#include <linux/types.h>
-
-/*
- * Choose whether access to the RAM zone requires locking or not. If a zone
- * can be written to from different CPUs like with ftrace for example, then
- * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required.
- */
-#define PRZ_FLAG_NO_LOCK BIT(0)
-/*
- * If a PRZ should only have a single-boot lifetime, this marks it as
- * getting wiped after its contents get copied out after boot.
- */
-#define PRZ_FLAG_ZAP_OLD BIT(1)
-
-struct persistent_ram_buffer;
-struct rs_control;
struct persistent_ram_ecc_info {
int block_size;
@@ -39,84 +18,6 @@ struct persistent_ram_ecc_info {
uint16_t *par;
};
-/**
- * struct persistent_ram_zone - Details of a persistent RAM zone (PRZ)
- * used as a pstore backend
- *
- * @paddr: physical address of the mapped RAM area
- * @size: size of mapping
- * @label: unique name of this PRZ
- * @type: frontend type for this PRZ
- * @flags: holds PRZ_FLAGS_* bits
- *
- * @buffer_lock:
- * locks access to @buffer "size" bytes and "start" offset
- * @buffer:
- * pointer to actual RAM area managed by this PRZ
- * @buffer_size:
- * bytes in @buffer->data (not including any trailing ECC bytes)
- *
- * @par_buffer:
- * pointer into @buffer->data containing ECC bytes for @buffer->data
- * @par_header:
- * pointer into @buffer->data containing ECC bytes for @buffer header
- * (i.e. all fields up to @data)
- * @rs_decoder:
- * RSLIB instance for doing ECC calculations
- * @corrected_bytes:
- * ECC corrected bytes accounting since boot
- * @bad_blocks:
- * ECC uncorrectable bytes accounting since boot
- * @ecc_info:
- * ECC configuration details
- *
- * @old_log:
- * saved copy of @buffer->data prior to most recent wipe
- * @old_log_size:
- * bytes contained in @old_log
- *
- */
-struct persistent_ram_zone {
- phys_addr_t paddr;
- size_t size;
- void *vaddr;
- char *label;
- enum pstore_type_id type;
- u32 flags;
-
- raw_spinlock_t buffer_lock;
- struct persistent_ram_buffer *buffer;
- size_t buffer_size;
-
- char *par_buffer;
- char *par_header;
- struct rs_control *rs_decoder;
- int corrected_bytes;
- int bad_blocks;
- struct persistent_ram_ecc_info ecc_info;
-
- char *old_log;
- size_t old_log_size;
-};
-
-struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
- u32 sig, struct persistent_ram_ecc_info *ecc_info,
- unsigned int memtype, u32 flags, char *label);
-void persistent_ram_free(struct persistent_ram_zone *prz);
-void persistent_ram_zap(struct persistent_ram_zone *prz);
-
-int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
- unsigned int count);
-int persistent_ram_write_user(struct persistent_ram_zone *prz,
- const void __user *s, unsigned int count);
-
-void persistent_ram_save_old(struct persistent_ram_zone *prz);
-size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
-void *persistent_ram_old(struct persistent_ram_zone *prz);
-void persistent_ram_free_old(struct persistent_ram_zone *prz);
-ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
- char *str, size_t len);
-
/*
* Ramoops platform data
* @mem_size memory size for ramoops
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index c952c5ba8fab..eaaef3ffec22 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -389,15 +389,6 @@ static inline void user_single_step_report(struct pt_regs *regs)
#define current_pt_regs() task_pt_regs(current)
#endif
-/*
- * unlike current_pt_regs(), this one is equal to task_pt_regs(current)
- * on *all* architectures; the only reason to have a per-arch definition
- * is optimisation.
- */
-#ifndef signal_pt_regs
-#define signal_pt_regs() task_pt_regs(current)
-#endif
-
#ifndef current_user_stack_pointer
#define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
#endif
diff --git a/include/linux/random.h b/include/linux/random.h
index 147a5e0d0b8e..4a2a1de423cd 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -6,7 +6,6 @@
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/once.h>
#include <uapi/linux/random.h>
@@ -17,16 +16,16 @@ void __init add_bootloader_randomness(const void *buf, size_t len);
void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value) __latent_entropy;
void add_interrupt_randomness(int irq) __latent_entropy;
-void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
+void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after);
-#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
static inline void add_latent_entropy(void)
{
+#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
-}
#else
-static inline void add_latent_entropy(void) { }
+ add_device_randomness(NULL, 0);
#endif
+}
#if IS_ENABLED(CONFIG_VMGENID)
void add_vmfork_randomness(const void *unique_vm_id, size_t len);
@@ -51,29 +50,76 @@ static inline unsigned long get_random_long(void)
#endif
}
+u32 __get_random_u32_below(u32 ceil);
+
/*
- * On 64-bit architectures, protect against non-terminated C string overflows
- * by zeroing out the first byte of the canary; this leaves 56 bits of entropy.
+ * Returns a random integer in the interval [0, ceil), with uniform
+ * distribution, suitable for all uses. Fastest when ceil is a constant, but
+ * still fast for variable ceil as well.
*/
-#ifdef CONFIG_64BIT
-# ifdef __LITTLE_ENDIAN
-# define CANARY_MASK 0xffffffffffffff00UL
-# else /* big endian, 64 bits: */
-# define CANARY_MASK 0x00ffffffffffffffUL
-# endif
-#else /* 32 bits: */
-# define CANARY_MASK 0xffffffffUL
-#endif
+static inline u32 get_random_u32_below(u32 ceil)
+{
+ if (!__builtin_constant_p(ceil))
+ return __get_random_u32_below(ceil);
+
+ /*
+ * For the fast path, below, all operations on ceil are precomputed by
+ * the compiler, so this incurs no overhead for checking pow2, doing
+ * divisions, or branching based on integer size. The resultant
+ * algorithm does traditional reciprocal multiplication (typically
+ * optimized by the compiler into shifts and adds), rejecting samples
+ * whose lower half would indicate a range indivisible by ceil.
+ */
+ BUILD_BUG_ON_MSG(!ceil, "get_random_u32_below() must take ceil > 0");
+ if (ceil <= 1)
+ return 0;
+ for (;;) {
+ if (ceil <= 1U << 8) {
+ u32 mult = ceil * get_random_u8();
+ if (likely(is_power_of_2(ceil) || (u8)mult >= (1U << 8) % ceil))
+ return mult >> 8;
+ } else if (ceil <= 1U << 16) {
+ u32 mult = ceil * get_random_u16();
+ if (likely(is_power_of_2(ceil) || (u16)mult >= (1U << 16) % ceil))
+ return mult >> 16;
+ } else {
+ u64 mult = (u64)ceil * get_random_u32();
+ if (likely(is_power_of_2(ceil) || (u32)mult >= -ceil % ceil))
+ return mult >> 32;
+ }
+ }
+}
+
+/*
+ * Returns a random integer in the interval (floor, U32_MAX], with uniform
+ * distribution, suitable for all uses. Fastest when floor is a constant, but
+ * still fast for variable floor as well.
+ */
+static inline u32 get_random_u32_above(u32 floor)
+{
+ BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && floor == U32_MAX,
+ "get_random_u32_above() must take floor < U32_MAX");
+ return floor + 1 + get_random_u32_below(U32_MAX - floor);
+}
-static inline unsigned long get_random_canary(void)
+/*
+ * Returns a random integer in the interval [floor, ceil], with uniform
+ * distribution, suitable for all uses. Fastest when floor and ceil are
+ * constant, but still fast for variable floor and ceil as well.
+ */
+static inline u32 get_random_u32_inclusive(u32 floor, u32 ceil)
{
- return get_random_long() & CANARY_MASK;
+ BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && __builtin_constant_p(ceil) &&
+ (floor > ceil || ceil - floor == U32_MAX),
+ "get_random_u32_inclusive() must take floor <= ceil");
+ return floor + get_random_u32_below(ceil - floor + 1);
}
void __init random_init_early(const char *command_line);
void __init random_init(void);
bool rng_is_initialized(void);
int wait_for_random_bytes(void);
+int execute_with_initialized_rng(struct notifier_block *nb);
/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
* Returns the result of the call to wait_for_random_bytes. */
@@ -108,26 +154,6 @@ declare_get_random_var_wait(long, unsigned long)
#include <asm/archrandom.h>
-/*
- * Called from the boot CPU during startup; not valid to call once
- * secondary CPUs are up and preemption is possible.
- */
-#ifndef arch_get_random_seed_longs_early
-static inline size_t __init arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs)
-{
- WARN_ON(system_state != SYSTEM_BOOTING);
- return arch_get_random_seed_longs(v, max_longs);
-}
-#endif
-
-#ifndef arch_get_random_longs_early
-static inline bool __init arch_get_random_longs_early(unsigned long *v, size_t max_longs)
-{
- WARN_ON(system_state != SYSTEM_BOOTING);
- return arch_get_random_longs(v, max_longs);
-}
-#endif
-
#ifdef CONFIG_SMP
int random_prepare_cpu(unsigned int cpu);
int random_online_cpu(unsigned int cpu);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 08605ce7379d..4da98ca6273e 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -108,6 +108,15 @@ static inline int rcu_preempt_depth(void)
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_RCU_LAZY
+void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func);
+#else
+static inline void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
+{
+ call_rcu(head, func);
+}
+#endif
+
/* Internal to kernel */
void rcu_init(void);
extern int rcu_scheduler_active;
@@ -340,6 +349,11 @@ static inline int rcu_read_lock_any_held(void)
return !preemptible();
}
+static inline int debug_lockdep_rcu_enabled(void)
+{
+ return 0;
+}
+
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
#ifdef CONFIG_PROVE_RCU
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 768196a5f39d..68f9070aa111 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -142,23 +142,17 @@ static inline int rcu_needs_cpu(void)
* Take advantage of the fact that there is only one CPU, which
* allows us to ignore virtualization-based context switches.
*/
-static inline void rcu_virt_note_context_switch(int cpu) { }
+static inline void rcu_virt_note_context_switch(void) { }
static inline void rcu_cpu_stall_reset(void) { }
static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
static inline void rcu_irq_exit_check_preempt(void) { }
-#define rcu_is_idle_cpu(cpu) \
- (is_idle_task(current) && !in_nmi() && !in_hardirq() && !in_serving_softirq())
static inline void exit_rcu(void) { }
static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{
return false;
}
static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
-#ifdef CONFIG_SRCU
void rcu_scheduler_starting(void);
-#else /* #ifndef CONFIG_SRCU */
-static inline void rcu_scheduler_starting(void) { }
-#endif /* #else #ifndef CONFIG_SRCU */
static inline void rcu_end_inkernel_boot(void) { }
static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
static inline bool rcu_is_watching(void) { return true; }
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 5efb51486e8a..4003bf6cfa1c 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -27,7 +27,7 @@ void rcu_cpu_stall_reset(void);
* wrapper around rcu_note_context_switch(), which allows TINY_RCU
* to save a few bytes. The caller must have disabled interrupts.
*/
-static inline void rcu_virt_note_context_switch(int cpu)
+static inline void rcu_virt_note_context_switch(void)
{
rcu_note_context_switch(false);
}
@@ -87,8 +87,6 @@ bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
void cond_synchronize_rcu(unsigned long oldstate);
void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
-bool rcu_is_idle_cpu(int cpu);
-
#ifdef CONFIG_PROVE_RCU
void rcu_irq_exit_check_preempt(void);
#else
diff --git a/include/linux/regset.h b/include/linux/regset.h
index a00765f0e8cf..9061266dd8de 100644
--- a/include/linux/regset.h
+++ b/include/linux/regset.h
@@ -275,15 +275,15 @@ static inline int user_regset_copyin(unsigned int *pos, unsigned int *count,
return 0;
}
-static inline int user_regset_copyin_ignore(unsigned int *pos,
- unsigned int *count,
- const void **kbuf,
- const void __user **ubuf,
- const int start_pos,
- const int end_pos)
+static inline void user_regset_copyin_ignore(unsigned int *pos,
+ unsigned int *count,
+ const void **kbuf,
+ const void __user **ubuf,
+ const int start_pos,
+ const int end_pos)
{
if (*count == 0)
- return 0;
+ return;
BUG_ON(*pos < start_pos);
if (end_pos < 0 || *pos < end_pos) {
unsigned int copy = (end_pos < 0 ? *count
@@ -295,7 +295,6 @@ static inline int user_regset_copyin_ignore(unsigned int *pos,
*pos += copy;
*count -= copy;
}
- return 0;
}
extern int regset_get(struct task_struct *target,
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 0cf5b20c6ddf..0cee154abc9f 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -89,11 +89,12 @@ struct rdt_domain {
/**
* struct resctrl_cache - Cache allocation related data
* @cbm_len: Length of the cache bit mask
- * @min_cbm_bits: Minimum number of consecutive bits to be set
+ * @min_cbm_bits: Minimum number of consecutive bits to be set.
+ * The value 0 means the architecture can support
+ * zero CBM.
* @shareable_bits: Bitmask of shareable resource with other
* executing entities
* @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid.
- * @arch_has_empty_bitmaps: True if the '0' bitmap is valid.
* @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache
* level has CPU scope.
*/
@@ -102,7 +103,6 @@ struct resctrl_cache {
unsigned int min_cbm_bits;
unsigned int shareable_bits;
bool arch_has_sparse_bitmaps;
- bool arch_has_empty_bitmaps;
bool arch_has_per_cpu_cfg;
};
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 2504df9a0453..3c7d295746f6 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -100,7 +100,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
- struct file *filp, poll_table *poll_table);
+ struct file *filp, poll_table *poll_table, int full);
void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
#define RING_BUFFER_ALL_CPUS -1
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ffb6eb55cd13..5affff14993d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -888,9 +888,6 @@ struct task_struct {
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
-#ifdef CONFIG_PSI
- unsigned sched_psi_wake_requeue:1;
-#endif
/* Force alignment to the next boundary: */
unsigned :0;
@@ -1243,7 +1240,7 @@ struct task_struct {
unsigned int futex_state;
#endif
#ifdef CONFIG_PERF_EVENTS
- struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
+ struct perf_event_context *perf_event_ctxp;
struct mutex perf_event_mutex;
struct list_head perf_event_list;
#endif
diff --git a/include/linux/scs.h b/include/linux/scs.h
index 18122d9e17ff..4ab5bdc898cf 100644
--- a/include/linux/scs.h
+++ b/include/linux/scs.h
@@ -53,6 +53,22 @@ static inline bool task_scs_end_corrupted(struct task_struct *tsk)
return sz >= SCS_SIZE - 1 || READ_ONCE_NOCHECK(*magic) != SCS_END_MAGIC;
}
+DECLARE_STATIC_KEY_FALSE(dynamic_scs_enabled);
+
+static inline bool scs_is_dynamic(void)
+{
+ if (!IS_ENABLED(CONFIG_DYNAMIC_SCS))
+ return false;
+ return static_branch_likely(&dynamic_scs_enabled);
+}
+
+static inline bool scs_is_enabled(void)
+{
+ if (!IS_ENABLED(CONFIG_DYNAMIC_SCS))
+ return true;
+ return scs_is_dynamic();
+}
+
#else /* CONFIG_SHADOW_CALL_STACK */
static inline void *scs_alloc(int node) { return NULL; }
@@ -62,6 +78,8 @@ static inline void scs_task_reset(struct task_struct *tsk) {}
static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; }
static inline void scs_release(struct task_struct *tsk) {}
static inline bool task_scs_end_corrupted(struct task_struct *tsk) { return false; }
+static inline bool scs_is_enabled(void) { return false; }
+static inline bool scs_is_dynamic(void) { return false; }
#endif /* CONFIG_SHADOW_CALL_STACK */
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index d31d76be4982..175079552f68 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -27,6 +27,7 @@ struct seccomp_filter;
*
* @mode: indicates one of the valid values above for controlled
* system calls available to a process.
+ * @filter_count: number of seccomp filters
* @filter: must always point to a valid seccomp-filter or NULL as it is
* accessed without locking during system call entry.
*
diff --git a/include/linux/security.h b/include/linux/security.h
index ca1b7109c0db..5b67f208f7de 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -31,6 +31,7 @@
#include <linux/err.h>
#include <linux/string.h>
#include <linux/mm.h>
+#include <linux/sockptr.h>
struct linux_binprm;
struct cred;
@@ -361,6 +362,13 @@ int security_inode_getattr(const struct path *path);
int security_inode_setxattr(struct user_namespace *mnt_userns,
struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
+int security_inode_set_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *acl_name,
+ struct posix_acl *kacl);
+int security_inode_get_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *acl_name);
+int security_inode_remove_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *acl_name);
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
int security_inode_getxattr(struct dentry *dentry, const char *name);
@@ -396,6 +404,7 @@ int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
int security_file_open(struct file *file);
+int security_file_truncate(struct file *file);
int security_task_alloc(struct task_struct *task, unsigned long clone_flags);
void security_task_free(struct task_struct *task);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
@@ -872,6 +881,28 @@ static inline int security_inode_setxattr(struct user_namespace *mnt_userns,
return cap_inode_setxattr(dentry, name, value, size, flags);
}
+static inline int security_inode_set_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry,
+ const char *acl_name,
+ struct posix_acl *kacl)
+{
+ return 0;
+}
+
+static inline int security_inode_get_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return 0;
+}
+
+static inline int security_inode_remove_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return 0;
+}
+
static inline void security_inode_post_setxattr(struct dentry *dentry,
const char *name, const void *value, size_t size, int flags)
{ }
@@ -1014,6 +1045,11 @@ static inline int security_file_open(struct file *file)
return 0;
}
+static inline int security_file_truncate(struct file *file)
+{
+ return 0;
+}
+
static inline int security_task_alloc(struct task_struct *task,
unsigned long clone_flags)
{
@@ -1411,8 +1447,8 @@ int security_socket_getsockopt(struct socket *sock, int level, int optname);
int security_socket_setsockopt(struct socket *sock, int level, int optname);
int security_socket_shutdown(struct socket *sock, int how);
int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb);
-int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
- int __user *optlen, unsigned len);
+int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval,
+ sockptr_t optlen, unsigned int len);
int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid);
int security_sk_alloc(struct sock *sk, int family, gfp_t priority);
void security_sk_free(struct sock *sk);
@@ -1548,8 +1584,10 @@ static inline int security_sock_rcv_skb(struct sock *sk,
return 0;
}
-static inline int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
- int __user *optlen, unsigned len)
+static inline int security_socket_getpeersec_stream(struct socket *sock,
+ sockptr_t optval,
+ sockptr_t optlen,
+ unsigned int len)
{
return -ENOPROTOOPT;
}
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index d657f2a42a7b..91871464b99d 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -743,9 +743,15 @@ static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED;
static inline int setup_earlycon(char *buf) { return 0; }
#endif
-static inline bool uart_console_enabled(struct uart_port *port)
+/* Variant of uart_console_registered() when the console_list_lock is held. */
+static inline bool uart_console_registered_locked(struct uart_port *port)
{
- return uart_console(port) && (port->cons->flags & CON_ENABLED);
+ return uart_console(port) && console_is_registered_locked(port->cons);
+}
+
+static inline bool uart_console_registered(struct uart_port *port)
+{
+ return uart_console(port) && console_is_registered(port->cons);
}
struct uart_port *uart_get_console(struct uart_port *ports, int nr,
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 48f4b645193b..70d6cb94e580 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -376,7 +376,7 @@ static inline void sk_psock_report_error(struct sk_psock *psock, int err)
}
struct sk_psock *sk_psock_init(struct sock *sk, int node);
-void sk_psock_stop(struct sk_psock *psock, bool wait);
+void sk_psock_stop(struct sk_psock *psock);
#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 90877fcde70b..45af70315a94 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -76,6 +76,17 @@
* rcu_read_lock before reading the address, then rcu_read_unlock after
* taking the spinlock within the structure expected at that address.
*
+ * Note that it is not possible to acquire a lock within a structure
+ * allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference
+ * as described above. The reason is that SLAB_TYPESAFE_BY_RCU pages
+ * are not zeroed before being given to the slab, which means that any
+ * locks must be initialized after each and every kmem_struct_alloc().
+ * Alternatively, make the ctor passed to kmem_cache_create() initialize
+ * the locks at page-allocation time, as is done in __i915_request_ctor(),
+ * sighand_ctor(), and anon_vma_ctor(). Such a ctor permits readers
+ * to safely acquire those ctor-initialized locks under rcu_read_lock()
+ * protection.
+ *
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
*/
/* Defer freeing slabs to RCU */
@@ -129,7 +140,11 @@
/* The following flags affect the page allocator grouping pages by mobility */
/* Objects are reclaimable */
+#ifndef CONFIG_SLUB_TINY
#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
+#else
+#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0)
+#endif
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/*
@@ -336,13 +351,18 @@ enum kmalloc_cache_type {
#endif
#ifndef CONFIG_MEMCG_KMEM
KMALLOC_CGROUP = KMALLOC_NORMAL,
-#else
- KMALLOC_CGROUP,
#endif
+#ifdef CONFIG_SLUB_TINY
+ KMALLOC_RECLAIM = KMALLOC_NORMAL,
+#else
KMALLOC_RECLAIM,
+#endif
#ifdef CONFIG_ZONE_DMA
KMALLOC_DMA,
#endif
+#ifdef CONFIG_MEMCG_KMEM
+ KMALLOC_CGROUP,
+#endif
NR_KMALLOC_TYPES
};
@@ -441,7 +461,18 @@ static_assert(PAGE_SHIFT <= 20);
#endif /* !CONFIG_SLOB */
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
-void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
+
+/**
+ * kmem_cache_alloc - Allocate an object
+ * @cachep: The cache to allocate from.
+ * @flags: See kmalloc().
+ *
+ * Allocate an object from this cache.
+ * See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags.
+ *
+ * Return: pointer to the new object or %NULL in case of error
+ */
+void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
gfp_t gfpflags) __assume_slab_alignment __malloc;
void kmem_cache_free(struct kmem_cache *s, void *objp);
@@ -470,35 +501,12 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignm
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
__malloc;
-#ifdef CONFIG_TRACING
void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
__assume_kmalloc_alignment __alloc_size(3);
void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size) __assume_kmalloc_alignment
__alloc_size(4);
-#else /* CONFIG_TRACING */
-/* Save a function call when CONFIG_TRACING=n */
-static __always_inline __alloc_size(3)
-void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
-{
- void *ret = kmem_cache_alloc(s, flags);
-
- ret = kasan_kmalloc(s, ret, size, flags);
- return ret;
-}
-
-static __always_inline __alloc_size(4)
-void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
- int node, size_t size)
-{
- void *ret = kmem_cache_alloc_node(s, gfpflags, node);
-
- ret = kasan_kmalloc(s, ret, size, gfpflags);
- return ret;
-}
-#endif /* CONFIG_TRACING */
-
void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
__alloc_size(1);
@@ -506,9 +514,9 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
__alloc_size(1);
/**
- * kmalloc - allocate memory
+ * kmalloc - allocate kernel memory
* @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
+ * @flags: describe the allocation context
*
* kmalloc is the normal method of allocating memory
* for objects smaller than page size in the kernel.
@@ -535,12 +543,12 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
* %GFP_ATOMIC
* Allocation will not sleep. May use emergency pools.
*
- * %GFP_HIGHUSER
- * Allocate memory from high memory on behalf of user.
- *
* Also it is possible to set different flags by OR'ing
* in one or more of the following additional @flags:
*
+ * %__GFP_ZERO
+ * Zero the allocated memory before returning. Also see kzalloc().
+ *
* %__GFP_HIGH
* This allocation has high priority and may use emergency pools.
*
@@ -559,42 +567,42 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
* Try really hard to succeed the allocation but fail
* eventually.
*/
+#ifndef CONFIG_SLOB
static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
{
- if (__builtin_constant_p(size)) {
-#ifndef CONFIG_SLOB
+ if (__builtin_constant_p(size) && size) {
unsigned int index;
-#endif
+
if (size > KMALLOC_MAX_CACHE_SIZE)
return kmalloc_large(size, flags);
-#ifndef CONFIG_SLOB
- index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
+ index = kmalloc_index(size);
return kmalloc_trace(
kmalloc_caches[kmalloc_type(flags)][index],
flags, size);
-#endif
}
return __kmalloc(size, flags);
}
+#else
+static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
+{
+ if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large(size, flags);
+
+ return __kmalloc(size, flags);
+}
+#endif
#ifndef CONFIG_SLOB
static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
{
- if (__builtin_constant_p(size)) {
+ if (__builtin_constant_p(size) && size) {
unsigned int index;
if (size > KMALLOC_MAX_CACHE_SIZE)
return kmalloc_large_node(size, flags, node);
index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
-
return kmalloc_node_trace(
kmalloc_caches[kmalloc_type(flags)][index],
flags, node, size);
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index f0ffad6a3365..5834bad8ad78 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -80,8 +80,10 @@ struct kmem_cache {
unsigned int *random_seq;
#endif
+#ifdef CONFIG_HARDENED_USERCOPY
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
+#endif
struct kmem_cache_node *node[MAX_NUMNODES];
};
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index f9c68a9dac04..aa0ee1678d29 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -41,6 +41,7 @@ enum stat_item {
CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
NR_SLUB_STAT_ITEMS };
+#ifndef CONFIG_SLUB_TINY
/*
* When changing the layout, make sure freelist and tid are still compatible
* with this_cpu_cmpxchg_double() alignment requirements.
@@ -57,6 +58,7 @@ struct kmem_cache_cpu {
unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
};
+#endif /* CONFIG_SLUB_TINY */
#ifdef CONFIG_SLUB_CPU_PARTIAL
#define slub_percpu_partial(c) ((c)->partial)
@@ -88,7 +90,9 @@ struct kmem_cache_order_objects {
* Slab cache management.
*/
struct kmem_cache {
+#ifndef CONFIG_SLUB_TINY
struct kmem_cache_cpu __percpu *cpu_slab;
+#endif
/* Used for retrieving partial slabs, etc. */
slab_flags_t flags;
unsigned long min_partial;
@@ -136,13 +140,15 @@ struct kmem_cache {
struct kasan_cache kasan_info;
#endif
+#ifdef CONFIG_HARDENED_USERCOPY
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
+#endif
struct kmem_cache_node *node[MAX_NUMNODES];
};
-#ifdef CONFIG_SYSFS
+#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
#define SLAB_SUPPORTS_SYSFS
void sysfs_slab_unlink(struct kmem_cache *);
void sysfs_slab_release(struct kmem_cache *);
diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h
index d2b02bb43768..b85f66db33e1 100644
--- a/include/linux/soc/mediatek/mtk-mmsys.h
+++ b/include/linux/soc/mediatek/mtk-mmsys.h
@@ -9,6 +9,13 @@
enum mtk_ddp_comp_id;
struct device;
+enum mtk_dpi_out_format_con {
+ MTK_DPI_RGB888_SDR_CON,
+ MTK_DPI_RGB888_DDR_CON,
+ MTK_DPI_RGB565_SDR_CON,
+ MTK_DPI_RGB565_DDR_CON
+};
+
enum mtk_ddp_comp_id {
DDP_COMPONENT_AAL0,
DDP_COMPONENT_AAL1,
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index bc2fb8343a94..ad1fd718169d 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -42,7 +42,19 @@
#define LLCC_CPUHWT 36
#define LLCC_MDMCLAD2 37
#define LLCC_CAMEXP1 38
+#define LLCC_CMPTHCP 39
+#define LLCC_LCPDARE 40
#define LLCC_AENPU 45
+#define LLCC_ISLAND1 46
+#define LLCC_ISLAND2 47
+#define LLCC_ISLAND3 48
+#define LLCC_ISLAND4 49
+#define LLCC_CAMEXP2 50
+#define LLCC_CAMEXP3 51
+#define LLCC_CAMEXP4 52
+#define LLCC_DISP_WB 53
+#define LLCC_DISP_1 54
+#define LLCC_VIDVSP 64
/**
* struct llcc_slice_desc - Cache slice descriptor
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 2ba044d0d5e5..8e984d75f5b6 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -225,7 +225,7 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
/**
* struct spi_controller_mem_ops - SPI memory operations
* @adjust_op_size: shrink the data xfer of an operation to match controller's
- * limitations (can be alignment of max RX/TX size
+ * limitations (can be alignment or max RX/TX size
* limitations)
* @supports_op: check if an operation is supported by the controller
* @exec_op: execute a SPI memory operation
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 01226e4d960a..9b9d0bbf1d3c 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -47,11 +47,8 @@ int init_srcu_struct(struct srcu_struct *ssp);
#include <linux/srcutiny.h>
#elif defined(CONFIG_TREE_SRCU)
#include <linux/srcutree.h>
-#elif defined(CONFIG_SRCU)
-#error "Unknown SRCU implementation specified to kernel configuration"
#else
-/* Dummy definition for things like notifiers. Actual use gets link error. */
-struct srcu_struct { };
+#error "Unknown SRCU implementation specified to kernel configuration"
#endif
void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
@@ -64,11 +61,21 @@ unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
-#ifdef CONFIG_SRCU
+#ifdef CONFIG_NEED_SRCU_NMI_SAFE
+int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
+void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp);
+#else
+static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
+{
+ return __srcu_read_lock(ssp);
+}
+static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
+{
+ __srcu_read_unlock(ssp, idx);
+}
+#endif /* CONFIG_NEED_SRCU_NMI_SAFE */
+
void srcu_init(void);
-#else /* #ifdef CONFIG_SRCU */
-static inline void srcu_init(void) { }
-#endif /* #else #ifdef CONFIG_SRCU */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -104,6 +111,18 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#define SRCU_NMI_UNKNOWN 0x0
+#define SRCU_NMI_UNSAFE 0x1
+#define SRCU_NMI_SAFE 0x2
+
+#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_TREE_SRCU)
+void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe);
+#else
+static inline void srcu_check_nmi_safety(struct srcu_struct *ssp,
+ bool nmi_safe) { }
+#endif
+
+
/**
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
* @p: the pointer to fetch and protect for later dereferencing
@@ -161,17 +180,36 @@ static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
+ srcu_check_nmi_safety(ssp, false);
retval = __srcu_read_lock(ssp);
rcu_lock_acquire(&(ssp)->dep_map);
return retval;
}
+/**
+ * srcu_read_lock_nmisafe - register a new reader for an SRCU-protected structure.
+ * @ssp: srcu_struct in which to register the new reader.
+ *
+ * Enter an SRCU read-side critical section, but in an NMI-safe manner.
+ * See srcu_read_lock() for more information.
+ */
+static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp)
+{
+ int retval;
+
+ srcu_check_nmi_safety(ssp, true);
+ retval = __srcu_read_lock_nmisafe(ssp);
+ rcu_lock_acquire(&(ssp)->dep_map);
+ return retval;
+}
+
/* Used by tracing, cannot be traced and cannot invoke lockdep. */
static inline notrace int
srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
+ srcu_check_nmi_safety(ssp, false);
retval = __srcu_read_lock(ssp);
return retval;
}
@@ -187,14 +225,32 @@ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
__releases(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
+ srcu_check_nmi_safety(ssp, false);
rcu_lock_release(&(ssp)->dep_map);
__srcu_read_unlock(ssp, idx);
}
+/**
+ * srcu_read_unlock_nmisafe - unregister a old reader from an SRCU-protected structure.
+ * @ssp: srcu_struct in which to unregister the old reader.
+ * @idx: return value from corresponding srcu_read_lock().
+ *
+ * Exit an SRCU read-side critical section, but in an NMI-safe manner.
+ */
+static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
+ __releases(ssp)
+{
+ WARN_ON_ONCE(idx & ~0x1);
+ srcu_check_nmi_safety(ssp, true);
+ rcu_lock_release(&(ssp)->dep_map);
+ __srcu_read_unlock_nmisafe(ssp, idx);
+}
+
/* Used by tracing, cannot be traced and cannot call lockdep. */
static inline notrace void
srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
{
+ srcu_check_nmi_safety(ssp, false);
__srcu_read_unlock(ssp, idx);
}
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index e3014319d1ad..c689a81752c9 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -23,8 +23,9 @@ struct srcu_struct;
*/
struct srcu_data {
/* Read-side state. */
- unsigned long srcu_lock_count[2]; /* Locks per CPU. */
- unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */
+ atomic_long_t srcu_lock_count[2]; /* Locks per CPU. */
+ atomic_long_t srcu_unlock_count[2]; /* Unlocks per CPU. */
+ int srcu_nmi_safety; /* NMI-safe srcu_struct structure? */
/* Update-side state. */
spinlock_t __private lock ____cacheline_internodealigned_in_smp;
diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h
index 4c678c4fec58..9c88707d9a0f 100644
--- a/include/linux/stackprotector.h
+++ b/include/linux/stackprotector.h
@@ -6,6 +6,25 @@
#include <linux/sched.h>
#include <linux/random.h>
+/*
+ * On 64-bit architectures, protect against non-terminated C string overflows
+ * by zeroing out the first byte of the canary; this leaves 56 bits of entropy.
+ */
+#ifdef CONFIG_64BIT
+# ifdef __LITTLE_ENDIAN
+# define CANARY_MASK 0xffffffffffffff00UL
+# else /* big endian, 64 bits: */
+# define CANARY_MASK 0x00ffffffffffffffUL
+# endif
+#else /* 32 bits: */
+# define CANARY_MASK 0xffffffffUL
+#endif
+
+static inline unsigned long get_random_canary(void)
+{
+ return get_random_long() & CANARY_MASK;
+}
+
#if defined(CONFIG_STACKPROTECTOR) || defined(CONFIG_ARM64_PTR_AUTH)
# include <asm/stackprotector.h>
#else
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 88de45491376..ed722dd33b46 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -220,13 +220,6 @@ static inline __be32 svc_getu32(struct kvec *iov)
return val;
}
-static inline void svc_ungetu32(struct kvec *iov)
-{
- __be32 *vp = (__be32 *)iov->iov_base;
- iov->iov_base = (void *)(vp - 1);
- iov->iov_len += sizeof(*vp);
-}
-
static inline void svc_putu32(struct kvec *iov, __be32 val)
{
__be32 *vp = iov->iov_base + iov->iov_len;
@@ -311,7 +304,6 @@ struct svc_rqst {
struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
struct svc_cacherep * rq_cacherep; /* cache info */
struct task_struct *rq_task; /* service thread */
- spinlock_t rq_lock; /* per-request lock */
struct net *rq_bc_net; /* pointer to backchannel's
* net namespace
*/
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 86b95ccb81bb..b07b277d6a16 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -33,11 +33,13 @@
* can use the extra bits to store other information besides PFN.
*/
#ifdef MAX_PHYSMEM_BITS
-#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
+#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
#else /* MAX_PHYSMEM_BITS */
-#define SWP_PFN_BITS (BITS_PER_LONG - PAGE_SHIFT)
+#define SWP_PFN_BITS min_t(int, \
+ sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \
+ SWP_TYPE_SHIFT)
#endif /* MAX_PHYSMEM_BITS */
-#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
+#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
/**
* Migration swap entry specific bitfield definitions. Layout:
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index a34b0f9a9972..33a0ee3bcb2e 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -264,6 +264,7 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
#define SC_VAL64(type, name) ((type) name##_hi << 32 | name##_lo)
#ifdef CONFIG_COMPAT
+#define SYSCALL32_DEFINE0 COMPAT_SYSCALL_DEFINE0
#define SYSCALL32_DEFINE1 COMPAT_SYSCALL_DEFINE1
#define SYSCALL32_DEFINE2 COMPAT_SYSCALL_DEFINE2
#define SYSCALL32_DEFINE3 COMPAT_SYSCALL_DEFINE3
@@ -271,6 +272,7 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
#define SYSCALL32_DEFINE5 COMPAT_SYSCALL_DEFINE5
#define SYSCALL32_DEFINE6 COMPAT_SYSCALL_DEFINE6
#else
+#define SYSCALL32_DEFINE0 SYSCALL_DEFINE0
#define SYSCALL32_DEFINE1 SYSCALL_DEFINE1
#define SYSCALL32_DEFINE2 SYSCALL_DEFINE2
#define SYSCALL32_DEFINE3 SYSCALL_DEFINE3
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 9ecc128944a1..5e093602e8fc 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -100,6 +100,7 @@ struct thermal_cooling_device_ops {
struct thermal_cooling_device {
int id;
char *type;
+ unsigned long max_state;
struct device device;
struct device_node *np;
void *devdata;
diff --git a/include/linux/time_namespace.h b/include/linux/time_namespace.h
index 3146f1c056c9..bb9d3f5542f8 100644
--- a/include/linux/time_namespace.h
+++ b/include/linux/time_namespace.h
@@ -45,6 +45,7 @@ struct time_namespace *copy_time_ns(unsigned long flags,
void free_time_ns(struct time_namespace *ns);
void timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk);
struct vdso_data *arch_get_vdso_data(void *vvar_page);
+struct page *find_timens_vvar_page(struct vm_area_struct *vma);
static inline void put_time_ns(struct time_namespace *ns)
{
@@ -141,6 +142,11 @@ static inline void timens_on_fork(struct nsproxy *nsproxy,
return;
}
+static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+
static inline void timens_add_monotonic(struct timespec64 *ts) { }
static inline void timens_add_boottime(struct timespec64 *ts) { }
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 648f00105f58..9162f275819a 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -169,7 +169,6 @@ static inline int timer_pending(const struct timer_list * timer)
}
extern void add_timer_on(struct timer_list *timer, int cpu);
-extern int del_timer(struct timer_list * timer);
extern int mod_timer(struct timer_list *timer, unsigned long expires);
extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
extern int timer_reduce(struct timer_list *timer, unsigned long expires);
@@ -183,14 +182,36 @@ extern int timer_reduce(struct timer_list *timer, unsigned long expires);
extern void add_timer(struct timer_list *timer);
extern int try_to_del_timer_sync(struct timer_list *timer);
+extern int timer_delete_sync(struct timer_list *timer);
+extern int timer_delete(struct timer_list *timer);
+extern int timer_shutdown_sync(struct timer_list *timer);
+extern int timer_shutdown(struct timer_list *timer);
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
- extern int del_timer_sync(struct timer_list *timer);
-#else
-# define del_timer_sync(t) del_timer(t)
-#endif
+/**
+ * del_timer_sync - Delete a pending timer and wait for a running callback
+ * @timer: The timer to be deleted
+ *
+ * See timer_delete_sync() for detailed explanation.
+ *
+ * Do not use in new code. Use timer_delete_sync() instead.
+ */
+static inline int del_timer_sync(struct timer_list *timer)
+{
+ return timer_delete_sync(timer);
+}
-#define del_singleshot_timer_sync(t) del_timer_sync(t)
+/**
+ * del_timer - Delete a pending timer
+ * @timer: The timer to be deleted
+ *
+ * See timer_delete() for detailed explanation.
+ *
+ * Do not use in new code. Use timer_delete() instead.
+ */
+static inline int del_timer(struct timer_list *timer)
+{
+ return timer_delete(timer);
+}
extern void init_timers(void);
struct hrtimer;
diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h
index 93884086f392..adc80e29168e 100644
--- a/include/linux/timerqueue.h
+++ b/include/linux/timerqueue.h
@@ -35,7 +35,7 @@ struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head)
{
struct rb_node *leftmost = rb_first_cached(&head->rb_root);
- return rb_entry(leftmost, struct timerqueue_node, node);
+ return rb_entry_safe(leftmost, struct timerqueue_node, node);
}
static inline void timerqueue_init(struct timerqueue_node *node)
diff --git a/include/linux/trace.h b/include/linux/trace.h
index b5e16e438448..80ffda871749 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -26,13 +26,13 @@ struct trace_export {
int flags;
};
+struct trace_array;
+
#ifdef CONFIG_TRACING
int register_ftrace_export(struct trace_export *export);
int unregister_ftrace_export(struct trace_export *export);
-struct trace_array;
-
void trace_printk_init_buffers(void);
__printf(3, 4)
int trace_array_printk(struct trace_array *tr, unsigned long ip,
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 2e3134b14ffd..87fc3d0dda98 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -29,6 +29,9 @@ enum iter_type {
ITER_UBUF,
};
+#define ITER_SOURCE 1 // == WRITE
+#define ITER_DEST 0 // == READ
+
struct iov_iter_state {
size_t iov_offset;
size_t count;
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index f07e6998bb68..9df0b9a762cc 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -146,9 +146,9 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma)
static inline bool vma_can_userfault(struct vm_area_struct *vma,
unsigned long vm_flags)
{
- if (vm_flags & VM_UFFD_MINOR)
- return is_vm_hugetlb_page(vma) || vma_is_shmem(vma);
-
+ if ((vm_flags & VM_UFFD_MINOR) &&
+ (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
+ return false;
#ifndef CONFIG_PTE_MARKER_UFFD_WP
/*
* If user requested uffd-wp but not enabled pte markers for
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 2b1737c9b244..bf7613ba412b 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -10,6 +10,7 @@
#include <uapi/linux/utsname.h>
enum uts_proc {
+ UTS_PROC_ARCH,
UTS_PROC_OSTYPE,
UTS_PROC_OSRELEASE,
UTS_PROC_VERSION,
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index e7cebeb875dd..fdd393f70b19 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -189,6 +189,7 @@ int vfio_register_emulated_iommu_dev(struct vfio_device *device);
void vfio_unregister_group_dev(struct vfio_device *device);
int vfio_assign_device_set(struct vfio_device *device, void *set_id);
+unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set);
int vfio_mig_get_next_state(struct vfio_device *device,
enum vfio_device_mig_state cur_fsm,
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index b559c6bbcad0..2e7dd44926e4 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -22,6 +22,12 @@
struct inode;
struct dentry;
+static inline bool is_posix_acl_xattr(const char *name)
+{
+ return (strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
+ (strcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT) == 0);
+}
+
/*
* struct xattr_handler: When @name is set, match attributes with exactly that
* name. When @prefix is set instead, match attributes with that prefix and
@@ -68,9 +74,9 @@ int __vfs_removexattr_locked(struct user_namespace *, struct dentry *,
int vfs_removexattr(struct user_namespace *, struct dentry *, const char *);
ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
-ssize_t vfs_getxattr_alloc(struct user_namespace *mnt_userns,
- struct dentry *dentry, const char *name,
- char **xattr_value, size_t size, gfp_t flags);
+int vfs_getxattr_alloc(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name,
+ char **xattr_value, size_t size, gfp_t flags);
int xattr_supported_namespace(struct inode *inode, const char *prefix);
diff --git a/include/media/i2c/ir-kbd-i2c.h b/include/media/i2c/ir-kbd-i2c.h
index 9f47d6a48cff..0b58f8b9e7a4 100644
--- a/include/media/i2c/ir-kbd-i2c.h
+++ b/include/media/i2c/ir-kbd-i2c.h
@@ -35,6 +35,7 @@ enum ir_kbd_get_key_fn {
IR_KBD_GET_KEY_PIXELVIEW,
IR_KBD_GET_KEY_HAUP,
IR_KBD_GET_KEY_KNC1,
+ IR_KBD_GET_KEY_GENIATECH,
IR_KBD_GET_KEY_FUSIONHDTV,
IR_KBD_GET_KEY_HAUP_XVR,
IR_KBD_GET_KEY_AVERMEDIA_CARDBUS,
diff --git a/include/media/media-device.h b/include/media/media-device.h
index a10b30507524..86716ee7cc6c 100644
--- a/include/media/media-device.h
+++ b/include/media/media-device.h
@@ -192,21 +192,6 @@ struct usb_device;
#define MEDIA_DEV_NOTIFY_POST_LINK_CH 1
/**
- * media_entity_enum_init - Initialise an entity enumeration
- *
- * @ent_enum: Entity enumeration to be initialised
- * @mdev: The related media device
- *
- * Return: zero on success or a negative error code.
- */
-static inline __must_check int media_entity_enum_init(
- struct media_entity_enum *ent_enum, struct media_device *mdev)
-{
- return __media_entity_enum_init(ent_enum,
- mdev->entity_internal_idx_max + 1);
-}
-
-/**
* media_device_init() - Initializes a media device element
*
* @mdev: pointer to struct &media_device
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index f16ffe70f7a6..28c9de8a1f34 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -17,6 +17,7 @@
#include <linux/fwnode.h>
#include <linux/list.h>
#include <linux/media.h>
+#include <linux/minmax.h>
#include <linux/types.h>
/* Enums used internally at the media controller to represent graphs */
@@ -99,12 +100,34 @@ struct media_graph {
/**
* struct media_pipeline - Media pipeline related information
*
- * @streaming_count: Streaming start count - streaming stop count
- * @graph: Media graph walk during pipeline start / stop
+ * @allocated: Media pipeline allocated and freed by the framework
+ * @mdev: The media device the pipeline is part of
+ * @pads: List of media_pipeline_pad
+ * @start_count: Media pipeline start - stop count
*/
struct media_pipeline {
- int streaming_count;
- struct media_graph graph;
+ bool allocated;
+ struct media_device *mdev;
+ struct list_head pads;
+ int start_count;
+};
+
+/**
+ * struct media_pipeline_pad - A pad part of a media pipeline
+ *
+ * @list: Entry in the media_pad pads list
+ * @pipe: The media_pipeline that the pad is part of
+ * @pad: The media pad
+ *
+ * This structure associate a pad with a media pipeline. Instances of
+ * media_pipeline_pad are created by media_pipeline_start() when it builds the
+ * pipeline, and stored in the &media_pad.pads list. media_pipeline_stop()
+ * removes the entries from the list and deletes them.
+ */
+struct media_pipeline_pad {
+ struct list_head list;
+ struct media_pipeline *pipe;
+ struct media_pad *pad;
};
/**
@@ -186,6 +209,8 @@ enum media_pad_signal_type {
* @flags: Pad flags, as defined in
* :ref:`include/uapi/linux/media.h <media_header>`
* (seek for ``MEDIA_PAD_FL_*``)
+ * @pipe: Pipeline this pad belongs to. Use media_entity_pipeline() to
+ * access this field.
*/
struct media_pad {
struct media_gobj graph_obj; /* must be first field in struct */
@@ -193,6 +218,12 @@ struct media_pad {
u16 index;
enum media_pad_signal_type sig_type;
unsigned long flags;
+
+ /*
+ * The fields below are private, and should only be accessed via
+ * appropriate functions.
+ */
+ struct media_pipeline *pipe;
};
/**
@@ -206,6 +237,14 @@ struct media_pad {
* @link_validate: Return whether a link is valid from the entity point of
* view. The media_pipeline_start() function
* validates all links by calling this operation. Optional.
+ * @has_pad_interdep: Return whether a two pads inside the entity are
+ * interdependent. If two pads are interdependent they are
+ * part of the same pipeline and enabling one of the pads
+ * means that the other pad will become "locked" and
+ * doesn't allow configuration changes. pad0 and pad1 are
+ * guaranteed to not both be sinks or sources.
+ * Optional: If the operation isn't implemented all pads
+ * will be considered as interdependent.
*
* .. note::
*
@@ -219,6 +258,8 @@ struct media_entity_operations {
const struct media_pad *local,
const struct media_pad *remote, u32 flags);
int (*link_validate)(struct media_link *link);
+ bool (*has_pad_interdep)(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1);
};
/**
@@ -269,7 +310,6 @@ enum media_entity_type {
* @links: List of data links.
* @ops: Entity operations.
* @use_count: Use count for the entity.
- * @pipe: Pipeline this entity belongs to.
* @info: Union with devnode information. Kept just for backward
* compatibility.
* @info.dev: Contains device major and minor info.
@@ -305,8 +345,6 @@ struct media_entity {
int use_count;
- struct media_pipeline *pipe;
-
union {
struct {
u32 major;
@@ -316,6 +354,18 @@ struct media_entity {
};
/**
+ * media_entity_for_each_pad - Iterate on all pads in an entity
+ * @entity: The entity the pads belong to
+ * @iter: The iterator pad
+ *
+ * Iterate on all pads in a media entity.
+ */
+#define media_entity_for_each_pad(entity, iter) \
+ for (iter = (entity)->pads; \
+ iter < &(entity)->pads[(entity)->num_pads]; \
+ ++iter)
+
+/**
* struct media_interface - A media interface graph object.
*
* @graph_obj: embedded graph object
@@ -426,15 +476,15 @@ static inline bool is_media_entity_v4l2_subdev(struct media_entity *entity)
}
/**
- * __media_entity_enum_init - Initialise an entity enumeration
+ * media_entity_enum_init - Initialise an entity enumeration
*
* @ent_enum: Entity enumeration to be initialised
- * @idx_max: Maximum number of entities in the enumeration
+ * @mdev: The related media device
*
- * Return: Returns zero on success or a negative error code.
+ * Return: zero on success or a negative error code.
*/
-__must_check int __media_entity_enum_init(struct media_entity_enum *ent_enum,
- int idx_max);
+__must_check int media_entity_enum_init(struct media_entity_enum *ent_enum,
+ struct media_device *mdev);
/**
* media_entity_enum_cleanup - Release resources of an entity enumeration
@@ -924,6 +974,18 @@ media_entity_remote_source_pad_unique(const struct media_entity *entity)
}
/**
+ * media_pad_is_streaming - Test if a pad is part of a streaming pipeline
+ * @pad: The pad
+ *
+ * Return: True if the pad is part of a pipeline started with the
+ * media_pipeline_start() function, false otherwise.
+ */
+static inline bool media_pad_is_streaming(const struct media_pad *pad)
+{
+ return pad->pipe;
+}
+
+/**
* media_entity_is_streaming - Test if an entity is part of a streaming pipeline
* @entity: The entity
*
@@ -932,10 +994,50 @@ media_entity_remote_source_pad_unique(const struct media_entity *entity)
*/
static inline bool media_entity_is_streaming(const struct media_entity *entity)
{
- return entity->pipe;
+ struct media_pad *pad;
+
+ media_entity_for_each_pad(entity, pad) {
+ if (media_pad_is_streaming(pad))
+ return true;
+ }
+
+ return false;
}
/**
+ * media_entity_pipeline - Get the media pipeline an entity is part of
+ * @entity: The entity
+ *
+ * DEPRECATED: use media_pad_pipeline() instead.
+ *
+ * This function returns the media pipeline that an entity has been associated
+ * with when constructing the pipeline with media_pipeline_start(). The pointer
+ * remains valid until media_pipeline_stop() is called.
+ *
+ * In general, entities can be part of multiple pipelines, when carrying
+ * multiple streams (either on different pads, or on the same pad using
+ * multiplexed streams). This function is to be used only for entities that
+ * do not support multiple pipelines.
+ *
+ * Return: The media_pipeline the entity is part of, or NULL if the entity is
+ * not part of any pipeline.
+ */
+struct media_pipeline *media_entity_pipeline(struct media_entity *entity);
+
+/**
+ * media_pad_pipeline - Get the media pipeline a pad is part of
+ * @pad: The pad
+ *
+ * This function returns the media pipeline that a pad has been associated
+ * with when constructing the pipeline with media_pipeline_start(). The pointer
+ * remains valid until media_pipeline_stop() is called.
+ *
+ * Return: The media_pipeline the pad is part of, or NULL if the pad is
+ * not part of any pipeline.
+ */
+struct media_pipeline *media_pad_pipeline(struct media_pad *pad);
+
+/**
* media_entity_get_fwnode_pad - Get pad number from fwnode
*
* @entity: The entity
@@ -1013,53 +1115,66 @@ struct media_entity *media_graph_walk_next(struct media_graph *graph);
/**
* media_pipeline_start - Mark a pipeline as streaming
- * @entity: Starting entity
- * @pipe: Media pipeline to be assigned to all entities in the pipeline.
+ * @pad: Starting pad
+ * @pipe: Media pipeline to be assigned to all pads in the pipeline.
*
- * Mark all entities connected to a given entity through enabled links, either
+ * Mark all pads connected to a given pad through enabled links, either
* directly or indirectly, as streaming. The given pipeline object is assigned
- * to every entity in the pipeline and stored in the media_entity pipe field.
+ * to every pad in the pipeline and stored in the media_pad pipe field.
*
* Calls to this function can be nested, in which case the same number of
* media_pipeline_stop() calls will be required to stop streaming. The
* pipeline pointer must be identical for all nested calls to
* media_pipeline_start().
*/
-__must_check int media_pipeline_start(struct media_entity *entity,
+__must_check int media_pipeline_start(struct media_pad *pad,
struct media_pipeline *pipe);
/**
* __media_pipeline_start - Mark a pipeline as streaming
*
- * @entity: Starting entity
- * @pipe: Media pipeline to be assigned to all entities in the pipeline.
+ * @pad: Starting pad
+ * @pipe: Media pipeline to be assigned to all pads in the pipeline.
*
* ..note:: This is the non-locking version of media_pipeline_start()
*/
-__must_check int __media_pipeline_start(struct media_entity *entity,
+__must_check int __media_pipeline_start(struct media_pad *pad,
struct media_pipeline *pipe);
/**
* media_pipeline_stop - Mark a pipeline as not streaming
- * @entity: Starting entity
+ * @pad: Starting pad
*
- * Mark all entities connected to a given entity through enabled links, either
- * directly or indirectly, as not streaming. The media_entity pipe field is
+ * Mark all pads connected to a given pads through enabled links, either
+ * directly or indirectly, as not streaming. The media_pad pipe field is
* reset to %NULL.
*
* If multiple calls to media_pipeline_start() have been made, the same
* number of calls to this function are required to mark the pipeline as not
* streaming.
*/
-void media_pipeline_stop(struct media_entity *entity);
+void media_pipeline_stop(struct media_pad *pad);
/**
* __media_pipeline_stop - Mark a pipeline as not streaming
*
- * @entity: Starting entity
+ * @pad: Starting pad
*
* .. note:: This is the non-locking version of media_pipeline_stop()
*/
-void __media_pipeline_stop(struct media_entity *entity);
+void __media_pipeline_stop(struct media_pad *pad);
+
+/**
+ * media_pipeline_alloc_start - Mark a pipeline as streaming
+ * @pad: Starting pad
+ *
+ * media_pipeline_alloc_start() is similar to media_pipeline_start() but instead
+ * of working on a given pipeline the function will use an existing pipeline if
+ * the pad is already part of a pipeline, or allocate a new pipeline.
+ *
+ * Calls to media_pipeline_alloc_start() must be matched with
+ * media_pipeline_stop().
+ */
+__must_check int media_pipeline_alloc_start(struct media_pad *pad);
/**
* media_devnode_create() - creates and initializes a device node interface
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 725ff91b26e0..1bdaea248089 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -175,7 +175,8 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
*
* @sd: pointer to &struct v4l2_subdev
* @client: pointer to struct i2c_client
- * @devname: the name of the device; if NULL, the I²C device's name will be used
+ * @devname: the name of the device; if NULL, the I²C device drivers's name
+ * will be used
* @postfix: sub-device specific string to put right after the I²C device name;
* may be NULL
*/
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index b76a0714d425..e59d9a234631 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -121,21 +121,19 @@ struct v4l2_ctrl_ops {
* struct v4l2_ctrl_type_ops - The control type operations that the driver
* has to provide.
*
- * @equal: return true if both values are equal.
- * @init: initialize the value.
+ * @equal: return true if all ctrl->elems array elements are equal.
+ * @init: initialize the value for array elements from from_idx to ctrl->elems.
* @log: log the value.
- * @validate: validate the value. Return 0 on success and a negative value
- * otherwise.
+ * @validate: validate the value for ctrl->new_elems array elements.
+ * Return 0 on success and a negative value otherwise.
*/
struct v4l2_ctrl_type_ops {
- bool (*equal)(const struct v4l2_ctrl *ctrl, u32 elems,
- union v4l2_ctrl_ptr ptr1,
- union v4l2_ctrl_ptr ptr2);
- void (*init)(const struct v4l2_ctrl *ctrl, u32 from_idx, u32 tot_elems,
+ bool (*equal)(const struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2);
+ void (*init)(const struct v4l2_ctrl *ctrl, u32 from_idx,
union v4l2_ctrl_ptr ptr);
void (*log)(const struct v4l2_ctrl *ctrl);
- int (*validate)(const struct v4l2_ctrl *ctrl, u32 elems,
- union v4l2_ctrl_ptr ptr);
+ int (*validate)(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr);
};
/**
@@ -1543,13 +1541,12 @@ int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl,
* v4l2_ctrl_type_op_equal - Default v4l2_ctrl_type_ops equal callback.
*
* @ctrl: The v4l2_ctrl pointer.
- * @elems: The number of elements to compare.
* @ptr1: A v4l2 control value.
* @ptr2: A v4l2 control value.
*
* Return: true if values are equal, otherwise false.
*/
-bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, u32 elems,
+bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl,
union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2);
/**
@@ -1557,13 +1554,12 @@ bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, u32 elems,
*
* @ctrl: The v4l2_ctrl pointer.
* @from_idx: Starting element index.
- * @elems: The number of elements to initialize.
* @ptr: The v4l2 control value.
*
* Return: void
*/
void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
- u32 elems, union v4l2_ctrl_ptr ptr);
+ union v4l2_ctrl_ptr ptr);
/**
* v4l2_ctrl_type_op_log - Default v4l2_ctrl_type_ops log callback.
@@ -1578,12 +1574,10 @@ void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl);
* v4l2_ctrl_type_op_validate - Default v4l2_ctrl_type_ops validate callback.
*
* @ctrl: The v4l2_ctrl pointer.
- * @elems: The number of elements in the control.
* @ptr: The v4l2 control value.
*
* Return: 0 on success, a negative error code on failure.
*/
-int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, u32 elems,
- union v4l2_ctrl_ptr ptr);
+int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr);
#endif
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 5cf1edefb822..e0a13505f88d 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -539,4 +539,106 @@ static inline int video_is_registered(struct video_device *vdev)
return test_bit(V4L2_FL_REGISTERED, &vdev->flags);
}
+#if defined(CONFIG_MEDIA_CONTROLLER)
+
+/**
+ * video_device_pipeline_start - Mark a pipeline as streaming
+ * @vdev: Starting video device
+ * @pipe: Media pipeline to be assigned to all entities in the pipeline.
+ *
+ * Mark all entities connected to a given video device through enabled links,
+ * either directly or indirectly, as streaming. The given pipeline object is
+ * assigned to every pad in the pipeline and stored in the media_pad pipe
+ * field.
+ *
+ * Calls to this function can be nested, in which case the same number of
+ * video_device_pipeline_stop() calls will be required to stop streaming. The
+ * pipeline pointer must be identical for all nested calls to
+ * video_device_pipeline_start().
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around media_pipeline_start().
+ */
+__must_check int video_device_pipeline_start(struct video_device *vdev,
+ struct media_pipeline *pipe);
+
+/**
+ * __video_device_pipeline_start - Mark a pipeline as streaming
+ * @vdev: Starting video device
+ * @pipe: Media pipeline to be assigned to all entities in the pipeline.
+ *
+ * ..note:: This is the non-locking version of video_device_pipeline_start()
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around __media_pipeline_start().
+ */
+__must_check int __video_device_pipeline_start(struct video_device *vdev,
+ struct media_pipeline *pipe);
+
+/**
+ * video_device_pipeline_stop - Mark a pipeline as not streaming
+ * @vdev: Starting video device
+ *
+ * Mark all entities connected to a given video device through enabled links,
+ * either directly or indirectly, as not streaming. The media_pad pipe field
+ * is reset to %NULL.
+ *
+ * If multiple calls to media_pipeline_start() have been made, the same
+ * number of calls to this function are required to mark the pipeline as not
+ * streaming.
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around media_pipeline_stop().
+ */
+void video_device_pipeline_stop(struct video_device *vdev);
+
+/**
+ * __video_device_pipeline_stop - Mark a pipeline as not streaming
+ * @vdev: Starting video device
+ *
+ * .. note:: This is the non-locking version of media_pipeline_stop()
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around __media_pipeline_stop().
+ */
+void __video_device_pipeline_stop(struct video_device *vdev);
+
+/**
+ * video_device_pipeline_alloc_start - Mark a pipeline as streaming
+ * @vdev: Starting video device
+ *
+ * video_device_pipeline_alloc_start() is similar to video_device_pipeline_start()
+ * but instead of working on a given pipeline the function will use an
+ * existing pipeline if the video device is already part of a pipeline, or
+ * allocate a new pipeline.
+ *
+ * Calls to video_device_pipeline_alloc_start() must be matched with
+ * video_device_pipeline_stop().
+ */
+__must_check int video_device_pipeline_alloc_start(struct video_device *vdev);
+
+/**
+ * video_device_pipeline - Get the media pipeline a video device is part of
+ * @vdev: The video device
+ *
+ * This function returns the media pipeline that a video device has been
+ * associated with when constructing the pipeline with
+ * video_device_pipeline_start(). The pointer remains valid until
+ * video_device_pipeline_stop() is called.
+ *
+ * Return: The media_pipeline the video device is part of, or NULL if the video
+ * device is not part of any pipeline.
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around media_entity_pipeline().
+ */
+struct media_pipeline *video_device_pipeline(struct video_device *vdev);
+
+#endif /* CONFIG_MEDIA_CONTROLLER */
+
#endif /* _V4L2_DEV_H */
diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h
index 15e4ab672223..394d798f3dfa 100644
--- a/include/media/v4l2-fwnode.h
+++ b/include/media/v4l2-fwnode.h
@@ -45,10 +45,6 @@ struct v4l2_async_subdev;
*/
struct v4l2_fwnode_endpoint {
struct fwnode_endpoint base;
- /*
- * Fields below this line will be zeroed by
- * v4l2_fwnode_endpoint_parse()
- */
enum v4l2_mbus_type bus_type;
struct {
struct v4l2_mbus_config_parallel parallel;
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 9689f38a0af1..2f80c9c818ed 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -358,7 +358,11 @@ struct v4l2_mbus_frame_desc_entry {
} bus;
};
-#define V4L2_FRAME_DESC_ENTRY_MAX 4
+ /*
+ * If this number is too small, it should be dropped altogether and the
+ * API switched to a dynamic number of frame descriptor entries.
+ */
+#define V4L2_FRAME_DESC_ENTRY_MAX 8
/**
* enum v4l2_mbus_frame_desc_type - media bus frame description type
@@ -1046,6 +1050,8 @@ v4l2_subdev_get_pad_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
unsigned int pad)
{
+ if (WARN_ON(!state))
+ return NULL;
if (WARN_ON(pad >= sd->entity.num_pads))
pad = 0;
return &state->pads[pad].try_fmt;
@@ -1064,6 +1070,8 @@ v4l2_subdev_get_pad_crop(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
unsigned int pad)
{
+ if (WARN_ON(!state))
+ return NULL;
if (WARN_ON(pad >= sd->entity.num_pads))
pad = 0;
return &state->pads[pad].try_crop;
@@ -1082,6 +1090,8 @@ v4l2_subdev_get_pad_compose(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
unsigned int pad)
{
+ if (WARN_ON(!state))
+ return NULL;
if (WARN_ON(pad >= sd->entity.num_pads))
pad = 0;
return &state->pads[pad].try_compose;
diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h
index 9c0ad64b8d29..862eff613dc7 100644
--- a/include/memory/renesas-rpc-if.h
+++ b/include/memory/renesas-rpc-if.h
@@ -59,6 +59,7 @@ struct rpcif_op {
enum rpcif_type {
RPCIF_RCAR_GEN3,
+ RPCIF_RCAR_GEN4,
RPCIF_RZ_G2L,
};
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index e004ba04a9ae..684f1cd28730 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -228,6 +228,17 @@ enum {
*/
HCI_QUIRK_VALID_LE_STATES,
+ /* When this quirk is set, then erroneous data reporting
+ * is ignored. This is mainly due to the fact that the HCI
+ * Read Default Erroneous Data Reporting command is advertised,
+ * but not supported; these controllers often reply with unknown
+ * command and tend to lock up randomly. Needing a hard reset.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_ERR_DATA_REPORTING,
+
/*
* When this quirk is set, then the hci_suspend_notifier is not
* registered. This is intended for devices which drop completely
@@ -1424,7 +1435,6 @@ struct hci_std_codecs_v2 {
} __packed;
struct hci_vnd_codec_v2 {
- __u8 id;
__le16 cid;
__le16 vid;
__u8 transport;
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 8f780170e2f8..9f97f73615b6 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -37,16 +37,25 @@ struct genl_info;
* do additional, common, filtering and return an error
* @post_doit: called after an operation's doit callback, it may
* undo operations done by pre_doit, for example release locks
+ * @module: pointer to the owning module (set to THIS_MODULE)
* @mcgrps: multicast groups used by this family
* @n_mcgrps: number of multicast groups
* @resv_start_op: first operation for which reserved fields of the header
- * can be validated, new families should leave this field at zero
+ * can be validated and policies are required (see below);
+ * new families should leave this field at zero
* @mcgrp_offset: starting number of multicast group IDs in this family
* (private)
* @ops: the operations supported by this family
* @n_ops: number of operations supported by this family
* @small_ops: the small-struct operations supported by this family
* @n_small_ops: number of small-struct operations supported by this family
+ *
+ * Attribute policies (the combination of @policy and @maxattr fields)
+ * can be attached at the family level or at the operation level.
+ * If both are present the per-operation policy takes precedence.
+ * For operations before @resv_start_op lack of policy means that the core
+ * will perform no attribute parsing or validation. For newer operations
+ * if policy is not provided core will reject all TLV attributes.
*/
struct genl_family {
int id; /* private */
@@ -173,9 +182,9 @@ struct genl_ops {
};
/**
- * struct genl_info - info that is available during dumpit op call
+ * struct genl_dumpit_info - info that is available during dumpit op call
* @family: generic netlink family - for internal genl code usage
- * @ops: generic netlink ops - for internal genl code usage
+ * @op: generic netlink ops - for internal genl code usage
* @attrs: netlink attributes
*/
struct genl_dumpit_info {
@@ -354,6 +363,7 @@ int genlmsg_multicast_allns(const struct genl_family *family,
/**
* genlmsg_unicast - unicast a netlink message
+ * @net: network namespace to look up @portid in
* @skb: netlink message as socket buffer
* @portid: netlink portid of the destination socket
*/
@@ -373,7 +383,7 @@ static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info)
}
/**
- * gennlmsg_data - head of message payload
+ * genlmsg_data - head of message payload
* @gnlh: genetlink message header
*/
static inline void *genlmsg_data(const struct genlmsghdr *gnlh)
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 3af1e927247d..69174093078f 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -281,7 +281,8 @@ inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, in
* sk_v6_rcv_saddr (ipv6) changes after it has been binded. The socket's
* rcv_saddr field should already have been updated when this is called.
*/
-int inet_bhash2_update_saddr(struct inet_bind_hashbucket *prev_saddr, struct sock *sk);
+int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family);
+void inet_bhash2_reset_saddr(struct sock *sk);
void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
struct inet_bind2_bucket *tb2, unsigned short port);
diff --git a/include/net/ip.h b/include/net/ip.h
index 038097c2a152..144bdfbb25af 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -563,7 +563,7 @@ static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
offsetof(typeof(flow->addrs), v4addrs.src) +
sizeof(flow->addrs.v4addrs.src));
- memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
+ memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs));
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
}
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 37943ba3a73c..d383c895592a 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -897,7 +897,7 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) !=
offsetof(typeof(flow->addrs), v6addrs.src) +
sizeof(flow->addrs.v6addrs.src));
- memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs));
+ memcpy(&flow->addrs.v6addrs, &iph->addrs, sizeof(flow->addrs.v6addrs));
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
}
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 20745cf7ae1a..2f2a6023fb0e 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -83,7 +83,7 @@ struct neigh_parms {
struct rcu_head rcu_head;
int reachable_time;
- int qlen;
+ u32 qlen;
int data[NEIGH_VAR_DATA_MAX];
DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
};
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 4418b1981e31..6bfa972f2fbf 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -181,6 +181,8 @@ enum {
NLA_S64,
NLA_BITFIELD32,
NLA_REJECT,
+ NLA_BE16,
+ NLA_BE32,
__NLA_TYPE_MAX,
};
@@ -231,6 +233,7 @@ enum nla_policy_validation {
* NLA_U32, NLA_U64,
* NLA_S8, NLA_S16,
* NLA_S32, NLA_S64,
+ * NLA_BE16, NLA_BE32,
* NLA_MSECS Leaving the length field zero will verify the
* given type fits, using it verifies minimum length
* just like "All other"
@@ -261,6 +264,8 @@ enum nla_policy_validation {
* NLA_U16,
* NLA_U32,
* NLA_U64,
+ * NLA_BE16,
+ * NLA_BE32,
* NLA_S8,
* NLA_S16,
* NLA_S32,
@@ -317,19 +322,10 @@ struct nla_policy {
u8 validation_type;
u16 len;
union {
- const u32 bitfield32_valid;
- const u32 mask;
- const char *reject_message;
- const struct nla_policy *nested_policy;
- struct netlink_range_validation *range;
- struct netlink_range_validation_signed *range_signed;
- struct {
- s16 min, max;
- u8 network_byte_order:1;
- };
- int (*validate)(const struct nlattr *attr,
- struct netlink_ext_ack *extack);
- /* This entry is special, and used for the attribute at index 0
+ /**
+ * @strict_start_type: first attribute to validate strictly
+ *
+ * This entry is special, and used for the attribute at index 0
* only, and specifies special data about the policy, namely it
* specifies the "boundary type" where strict length validation
* starts for any attribute types >= this value, also, strict
@@ -348,6 +344,19 @@ struct nla_policy {
* was added to enforce strict validation from thereon.
*/
u16 strict_start_type;
+
+ /* private: use NLA_POLICY_*() to set */
+ const u32 bitfield32_valid;
+ const u32 mask;
+ const char *reject_message;
+ const struct nla_policy *nested_policy;
+ struct netlink_range_validation *range;
+ struct netlink_range_validation_signed *range_signed;
+ struct {
+ s16 min, max;
+ };
+ int (*validate)(const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
};
};
@@ -369,6 +378,8 @@ struct nla_policy {
(tp == NLA_U8 || tp == NLA_U16 || tp == NLA_U32 || tp == NLA_U64)
#define __NLA_IS_SINT_TYPE(tp) \
(tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64)
+#define __NLA_IS_BEINT_TYPE(tp) \
+ (tp == NLA_BE16 || tp == NLA_BE32)
#define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition))
#define NLA_ENSURE_UINT_TYPE(tp) \
@@ -382,6 +393,7 @@ struct nla_policy {
#define NLA_ENSURE_INT_OR_BINARY_TYPE(tp) \
(__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp) || \
__NLA_IS_SINT_TYPE(tp) || \
+ __NLA_IS_BEINT_TYPE(tp) || \
tp == NLA_MSECS || \
tp == NLA_BINARY) + tp)
#define NLA_ENSURE_NO_VALIDATION_PTR(tp) \
@@ -389,6 +401,8 @@ struct nla_policy {
tp != NLA_REJECT && \
tp != NLA_NESTED && \
tp != NLA_NESTED_ARRAY) + tp)
+#define NLA_ENSURE_BEINT_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_BEINT_TYPE(tp)) + tp)
#define NLA_POLICY_RANGE(tp, _min, _max) { \
.type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
@@ -419,14 +433,6 @@ struct nla_policy {
.type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_MAX, \
.max = _max, \
- .network_byte_order = 0, \
-}
-
-#define NLA_POLICY_MAX_BE(tp, _max) { \
- .type = NLA_ENSURE_UINT_TYPE(tp), \
- .validation_type = NLA_VALIDATE_MAX, \
- .max = _max, \
- .network_byte_order = 1, \
}
#define NLA_POLICY_MASK(tp, _mask) { \
diff --git a/include/net/ping.h b/include/net/ping.h
index e4ff3911cbf5..9233ad3de0ad 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -16,9 +16,6 @@
#define PING_HTABLE_SIZE 64
#define PING_HTABLE_MASK (PING_HTABLE_SIZE-1)
-#define ping_portaddr_for_each_entry(__sk, node, list) \
- hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
-
/*
* gid_t is either uint or ushort. We want to pass it to
* proc_dointvec_minmax(), so it must not be larger than MAX_INT
diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h
index 01a70b27e026..65058faea4db 100644
--- a/include/net/sctp/stream_sched.h
+++ b/include/net/sctp/stream_sched.h
@@ -26,6 +26,8 @@ struct sctp_sched_ops {
int (*init)(struct sctp_stream *stream);
/* Init a stream */
int (*init_sid)(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
+ /* free a stream */
+ void (*free_sid)(struct sctp_stream *stream, __u16 sid);
/* Frees the entire thing */
void (*free)(struct sctp_stream *stream);
diff --git a/include/net/sock.h b/include/net/sock.h
index 9e464f6409a7..e0517ecc6531 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -323,7 +323,7 @@ struct sk_filter;
* @sk_tskey: counter to disambiguate concurrent tstamp requests
* @sk_zckey: counter to order MSG_ZEROCOPY notifications
* @sk_socket: Identd and reporting IO signals
- * @sk_user_data: RPC layer private data
+ * @sk_user_data: RPC layer private data. Write-protected by @sk_callback_lock.
* @sk_frag: cached page frag
* @sk_peek_off: current peek_offset value
* @sk_send_head: front of stuff to transmit
@@ -1889,6 +1889,13 @@ void sock_kfree_s(struct sock *sk, void *mem, int size);
void sock_kzfree_s(struct sock *sk, void *mem, int size);
void sk_send_sigurg(struct sock *sk);
+static inline void sock_replace_proto(struct sock *sk, struct proto *proto)
+{
+ if (sk->sk_socket)
+ clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
+ WRITE_ONCE(sk->sk_prot, proto);
+}
+
struct sockcm_cookie {
u64 transmit_time;
u32 mark;
@@ -2585,7 +2592,7 @@ static inline gfp_t gfp_any(void)
static inline gfp_t gfp_memcg_charge(void)
{
- return in_softirq() ? GFP_NOWAIT : GFP_KERNEL;
+ return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
}
static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
index 473b0b0fa4ab..efc9085c6892 100644
--- a/include/net/sock_reuseport.h
+++ b/include/net/sock_reuseport.h
@@ -43,21 +43,20 @@ struct sock *reuseport_migrate_sock(struct sock *sk,
extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
extern int reuseport_detach_prog(struct sock *sk);
-static inline bool reuseport_has_conns(struct sock *sk, bool set)
+static inline bool reuseport_has_conns(struct sock *sk)
{
struct sock_reuseport *reuse;
bool ret = false;
rcu_read_lock();
reuse = rcu_dereference(sk->sk_reuseport_cb);
- if (reuse) {
- if (set)
- reuse->has_conns = 1;
- ret = reuse->has_conns;
- }
+ if (reuse && reuse->has_conns)
+ ret = true;
rcu_read_unlock();
return ret;
}
+void reuseport_has_conns_set(struct sock *sk);
+
#endif /* _SOCK_REUSEPORT_H */
diff --git a/include/soc/amlogic/meson_ddr_pmu.h b/include/soc/amlogic/meson_ddr_pmu.h
new file mode 100644
index 000000000000..4a33e4ab8ada
--- /dev/null
+++ b/include/soc/amlogic/meson_ddr_pmu.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 Amlogic, Inc. All rights reserved.
+ */
+
+#ifndef __MESON_DDR_PMU_H__
+#define __MESON_DDR_PMU_H__
+
+#define MAX_CHANNEL_NUM 8
+
+enum {
+ ALL_CHAN_COUNTER_ID,
+ CHAN1_COUNTER_ID,
+ CHAN2_COUNTER_ID,
+ CHAN3_COUNTER_ID,
+ CHAN4_COUNTER_ID,
+ CHAN5_COUNTER_ID,
+ CHAN6_COUNTER_ID,
+ CHAN7_COUNTER_ID,
+ CHAN8_COUNTER_ID,
+ COUNTER_MAX_ID,
+};
+
+struct dmc_info;
+
+struct dmc_counter {
+ u64 all_cnt; /* The count of all requests come in/out ddr controller */
+ union {
+ u64 all_req;
+ struct {
+ u64 all_idle_cnt;
+ u64 all_16bit_cnt;
+ };
+ };
+ u64 channel_cnt[MAX_CHANNEL_NUM]; /* To save a DMC bandwidth-monitor channel counter */
+};
+
+struct dmc_hw_info {
+ void (*enable)(struct dmc_info *info);
+ void (*disable)(struct dmc_info *info);
+ /* Bind an axi line to a bandwidth-monitor channel */
+ void (*set_axi_filter)(struct dmc_info *info, int axi_id, int chann);
+ int (*irq_handler)(struct dmc_info *info,
+ struct dmc_counter *counter);
+ void (*get_counters)(struct dmc_info *info,
+ struct dmc_counter *counter);
+
+ int dmc_nr; /* The number of dmc controller */
+ int chann_nr; /* The number of dmc bandwidth monitor channels */
+ struct attribute **fmt_attr;
+ const u64 capability[2];
+};
+
+struct dmc_info {
+ const struct dmc_hw_info *hw_info;
+
+ void __iomem *ddr_reg[4];
+ unsigned long timer_value; /* Timer value in TIMER register */
+ void __iomem *pll_reg;
+ int irq_num; /* irq vector number */
+};
+
+int meson_ddr_pmu_create(struct platform_device *pdev);
+int meson_ddr_pmu_remove(struct platform_device *pdev);
+
+#endif /* __MESON_DDR_PMU_H__ */
diff --git a/include/soc/at91/sama7-ddr.h b/include/soc/at91/sama7-ddr.h
index 6ce3bd22f6c6..5ad7ac2e3a7c 100644
--- a/include/soc/at91/sama7-ddr.h
+++ b/include/soc/at91/sama7-ddr.h
@@ -26,7 +26,10 @@
#define DDR3PHY_PGSR (0x0C) /* DDR3PHY PHY General Status Register */
#define DDR3PHY_PGSR_IDONE (1 << 0) /* Initialization Done */
-#define DDR3PHY_ACIOCR (0x24) /* DDR3PHY AC I/O Configuration Register */
+#define DDR3PHY_ACDLLCR (0x14) /* DDR3PHY AC DLL Control Register */
+#define DDR3PHY_ACDLLCR_DLLSRST (1 << 30) /* DLL Soft Reset */
+
+#define DDR3PHY_ACIOCR (0x24) /* DDR3PHY AC I/O Configuration Register */
#define DDR3PHY_ACIOCR_CSPDD_CS0 (1 << 18) /* CS#[0] Power Down Driver */
#define DDR3PHY_ACIOCR_CKPDD_CK0 (1 << 8) /* CK[0] Power Down Driver */
#define DDR3PHY_ACIORC_ACPDD (1 << 3) /* AC Power Down Driver */
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index b02e9fe69146..eb5079904cc8 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -172,14 +172,15 @@ static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; }
/*
* Pin multiplexing functions.
*/
+struct device;
struct qe_pin;
#ifdef CONFIG_QE_GPIO
-extern struct qe_pin *qe_pin_request(struct device_node *np, int index);
+extern struct qe_pin *qe_pin_request(struct device *dev, int index);
extern void qe_pin_free(struct qe_pin *qe_pin);
extern void qe_pin_set_gpio(struct qe_pin *qe_pin);
extern void qe_pin_set_dedicated(struct qe_pin *pin);
#else
-static inline struct qe_pin *qe_pin_request(struct device_node *np, int index)
+static inline struct qe_pin *qe_pin_request(struct device *dev, int index)
{
return ERR_PTR(-ENOSYS);
}
diff --git a/include/soc/qcom/qcom-spmi-pmic.h b/include/soc/qcom/qcom-spmi-pmic.h
index 72398ff44719..c47cc71a999e 100644
--- a/include/soc/qcom/qcom-spmi-pmic.h
+++ b/include/soc/qcom/qcom-spmi-pmic.h
@@ -26,18 +26,29 @@
#define PM8901_SUBTYPE 0x0f
#define PM8950_SUBTYPE 0x10
#define PMI8950_SUBTYPE 0x11
+#define PMK8001_SUBTYPE 0x12
+#define PMI8996_SUBTYPE 0x13
#define PM8998_SUBTYPE 0x14
#define PMI8998_SUBTYPE 0x15
#define PM8005_SUBTYPE 0x18
-#define PM660L_SUBTYPE 0x1A
-#define PM660_SUBTYPE 0x1B
-#define PM8150_SUBTYPE 0x1E
+#define PM660L_SUBTYPE 0x1a
+#define PM660_SUBTYPE 0x1b
+#define PM8150_SUBTYPE 0x1e
#define PM8150L_SUBTYPE 0x1f
#define PM8150B_SUBTYPE 0x20
#define PMK8002_SUBTYPE 0x21
#define PM8009_SUBTYPE 0x24
+#define PMI632_SUBTYPE 0x25
#define PM8150C_SUBTYPE 0x26
+#define PM6150_SUBTYPE 0x28
#define SMB2351_SUBTYPE 0x29
+#define PM8008_SUBTYPE 0x2c
+#define PM6125_SUBTYPE 0x2d
+#define PM7250B_SUBTYPE 0x2e
+#define PMK8350_SUBTYPE 0x2f
+#define PMR735B_SUBTYPE 0x34
+#define PM6350_SUBTYPE 0x36
+#define PM2250_SUBTYPE 0x37
#define PMI8998_FAB_ID_SMIC 0x11
#define PMI8998_FAB_ID_GF 0x30
diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h
index 53171e324d1c..ecefcaec7e66 100644
--- a/include/soc/tegra/bpmp-abi.h
+++ b/include/soc/tegra/bpmp-abi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef ABI_BPMP_ABI_H
@@ -74,6 +74,32 @@
/**
* @ingroup MRQ_Format
+ * Request an answer from the peer.
+ * This should be set in mrq_request::flags for all requests targetted
+ * at BPMP. For requests originating in BPMP, this flag is optional except
+ * for messages targeting MCE, for which the field must be set.
+ * When this flag is not set, the remote peer must not send a response
+ * back.
+ */
+#define BPMP_MAIL_DO_ACK (1U << 0U)
+
+/**
+ * @ingroup MRQ_Format
+ * Ring the sender's doorbell when responding. This should be set unless
+ * the sender wants to poll the underlying communications layer directly.
+ *
+ * An optional direction that can be specified in mrq_request::flags.
+ */
+#define BPMP_MAIL_RING_DB (1U << 1U)
+
+/**
+ * @ingroup MRQ_Format
+ * CRC present
+ */
+#define BPMP_MAIL_CRC_PRESENT (1U << 2U)
+
+/**
+ * @ingroup MRQ_Format
* @brief Header for an MRQ message
*
* Provides the MRQ number for the MRQ message: #mrq. The remainder of
@@ -85,12 +111,139 @@ struct mrq_request {
uint32_t mrq;
/**
- * @brief Flags providing follow up directions to the receiver
+ * @brief 32bit word containing a number of fields as follows:
+ *
+ * struct {
+ * uint8_t options:4;
+ * uint8_t xid:4;
+ * uint8_t payload_length;
+ * uint16_t crc16;
+ * };
+ *
+ * **options** directions to the receiver and indicates CRC presence.
+ *
+ * #BPMP_MAIL_DO_ACK and #BPMP_MAIL_RING_DB see documentation of respective options.
+ * #BPMP_MAIL_CRC_PRESENT is supported on T234 and later platforms. It indicates the
+ * crc16, xid and length fields are present when set.
+ * Some platform configurations, especially when targeted to applications requiring
+ * functional safety, mandate this option being set or otherwise will respond with
+ * -BPMP_EBADMSG and ignore the request.
+ *
+ * **xid** is a transaction ID.
+ *
+ * Only used when #BPMP_MAIL_CRC_PRESENT is set.
+ *
+ * **payload_length** of the message expressed in bytes without the size of this header.
+ * See table below for minimum accepted payload lengths for each MRQ.
+ * Note: For DMCE communication, this field expresses the length as a multiple of 4 bytes
+ * rather than bytes.
+ *
+ * Only used when #BPMP_MAIL_CRC_PRESENT is set.
+ *
+ * | MRQ | CMD | minimum payload length
+ * | -------------------- | ------------------------------------ | ------------------------------------------ |
+ * | MRQ_PING | | 4 |
+ * | MRQ_THREADED_PING | | 4 |
+ * | MRQ_RESET | any | 8 |
+ * | MRQ_I2C | | 12 + cmd_i2c_xfer_request.data_size |
+ * | MRQ_CLK | CMD_CLK_GET_RATE | 4 |
+ * | MRQ_CLK | CMD_CLK_SET_RATE | 16 |
+ * | MRQ_CLK | CMD_CLK_ROUND_RATE | 16 |
+ * | MRQ_CLK | CMD_CLK_GET_PARENT | 4 |
+ * | MRQ_CLK | CMD_CLK_SET_PARENT | 8 |
+ * | MRQ_CLK | CMD_CLK_ENABLE | 4 |
+ * | MRQ_CLK | CMD_CLK_DISABLE | 4 |
+ * | MRQ_CLK | CMD_CLK_IS_ENABLED | 4 |
+ * | MRQ_CLK | CMD_CLK_GET_ALL_INFO | 4 |
+ * | MRQ_CLK | CMD_CLK_GET_MAX_CLK_ID | 4 |
+ * | MRQ_CLK | CMD_CLK_GET_FMAX_AT_VMIN | 4 |
+ * | MRQ_QUERY_ABI | | 4 |
+ * | MRQ_PG | CMD_PG_QUERY_ABI | 12 |
+ * | MRQ_PG | CMD_PG_SET_STATE | 12 |
+ * | MRQ_PG | CMD_PG_GET_STATE | 8 |
+ * | MRQ_PG | CMD_PG_GET_NAME | 8 |
+ * | MRQ_PG | CMD_PG_GET_MAX_ID | 8 |
+ * | MRQ_THERMAL | CMD_THERMAL_QUERY_ABI | 8 |
+ * | MRQ_THERMAL | CMD_THERMAL_GET_TEMP | 8 |
+ * | MRQ_THERMAL | CMD_THERMAL_SET_TRIP | 20 |
+ * | MRQ_THERMAL | CMD_THERMAL_GET_NUM_ZONES | 4 |
+ * | MRQ_THERMAL | CMD_THERMAL_GET_THERMTRIP | 8 |
+ * | MRQ_CPU_VHINT | | 8 |
+ * | MRQ_ABI_RATCHET | | 2 |
+ * | MRQ_EMC_DVFS_LATENCY | | 8 |
+ * | MRQ_EMC_DVFS_EMCHUB | | 8 |
+ * | MRQ_EMC_DISP_RFL | | 4 |
+ * | MRQ_BWMGR | CMD_BWMGR_QUERY_ABI | 8 |
+ * | MRQ_BWMGR | CMD_BWMGR_CALC_RATE | 8 + 8 * bwmgr_rate_req.num_iso_clients |
+ * | MRQ_ISO_CLIENT | CMD_ISO_CLIENT_QUERY_ABI | 8 |
+ * | MRQ_ISO_CLIENT | CMD_ISO_CLIENT_CALCULATE_LA | 16 |
+ * | MRQ_ISO_CLIENT | CMD_ISO_CLIENT_SET_LA | 16 |
+ * | MRQ_ISO_CLIENT | CMD_ISO_CLIENT_GET_MAX_BW | 8 |
+ * | MRQ_CPU_NDIV_LIMITS | | 4 |
+ * | MRQ_CPU_AUTO_CC3 | | 4 |
+ * | MRQ_RINGBUF_CONSOLE | CMD_RINGBUF_CONSOLE_QUERY_ABI | 8 |
+ * | MRQ_RINGBUF_CONSOLE | CMD_RINGBUF_CONSOLE_READ | 5 |
+ * | MRQ_RINGBUF_CONSOLE | CMD_RINGBUF_CONSOLE_WRITE | 5 + cmd_ringbuf_console_write_req.len |
+ * | MRQ_RINGBUF_CONSOLE | CMD_RINGBUF_CONSOLE_GET_FIFO | 4 |
+ * | MRQ_STRAP | STRAP_SET | 12 |
+ * | MRQ_UPHY | CMD_UPHY_PCIE_LANE_MARGIN_CONTROL | 24 |
+ * | MRQ_UPHY | CMD_UPHY_PCIE_LANE_MARGIN_STATUS | 4 |
+ * | MRQ_UPHY | CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT | 5 |
+ * | MRQ_UPHY | CMD_UPHY_PCIE_CONTROLLER_STATE | 6 |
+ * | MRQ_UPHY | CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF | 5 |
+ * | MRQ_FMON | CMD_FMON_GEAR_CLAMP | 16 |
+ * | MRQ_FMON | CMD_FMON_GEAR_FREE | 4 |
+ * | MRQ_FMON | CMD_FMON_GEAR_GET | 4 |
+ * | MRQ_FMON | CMD_FMON_FAULT_STS_GET | 8 |
+ * | MRQ_EC | CMD_EC_STATUS_EX_GET | 12 |
+ * | MRQ_QUERY_FW_TAG | | 0 |
+ * | MRQ_DEBUG | CMD_DEBUG_OPEN_RO | 4 + length of cmd_debug_fopen_request.name |
+ * | MRQ_DEBUG | CMD_DEBUG_OPEN_WO | 4 + length of cmd_debug_fopen_request.name |
+ * | MRQ_DEBUG | CMD_DEBUG_READ | 8 |
+ * | MRQ_DEBUG | CMD_DEBUG_WRITE | 12 + cmd_debug_fwrite_request.datalen |
+ * | MRQ_DEBUG | CMD_DEBUG_CLOSE | 8 |
+ * | MRQ_TELEMETRY | | 8 |
+ * | MRQ_PWR_LIMIT | CMD_PWR_LIMIT_QUERY_ABI | 8 |
+ * | MRQ_PWR_LIMIT | CMD_PWR_LIMIT_SET | 20 |
+ * | MRQ_PWR_LIMIT | CMD_PWR_LIMIT_GET | 16 |
+ * | MRQ_PWR_LIMIT | CMD_PWR_LIMIT_CURR_CAP | 8 |
+ * | MRQ_GEARS | | 0 |
+ * | MRQ_BWMGR_INT | CMD_BWMGR_INT_QUERY_ABI | 8 |
+ * | MRQ_BWMGR_INT | CMD_BWMGR_INT_CALC_AND_SET | 16 |
+ * | MRQ_BWMGR_INT | CMD_BWMGR_INT_CAP_SET | 8 |
+ * | MRQ_OC_STATUS | | 0 |
+ *
+ * **crc16**
+ *
+ * CRC16 using polynomial x^16 + x^14 + x^12 + x^11 + x^8 + x^5 + x^4 + x^2 + 1
+ * and initialization value 0x4657. The CRC is calculated over all bytes of the message
+ * including this header. However the crc16 field is considered to be set to 0 when
+ * calculating the CRC. Only used when #BPMP_MAIL_CRC_PRESENT is set. If
+ * #BPMP_MAIL_CRC_PRESENT is set and this field does not match the CRC as
+ * calculated by BPMP, -BPMP_EBADMSG will be returned and the request will
+ * be ignored. See code snippet below on how to calculate the CRC.
*
- * | Bit | Description |
- * |-----|--------------------------------------------|
- * | 1 | ring the sender's doorbell when responding |
- * | 0 | should be 1 |
+ * @code
+ * uint16_t calc_crc_digest(uint16_t crc, uint8_t *data, size_t size)
+ * {
+ * for (size_t i = 0; i < size; i++) {
+ * crc ^= data[i] << 8;
+ * for (size_t j = 0; j < 8; j++) {
+ * if ((crc & 0x8000) == 0x8000) {
+ * crc = (crc << 1) ^ 0xAC9A;
+ * } else {
+ * crc = (crc << 1);
+ * }
+ * }
+ * }
+ * return crc;
+ * }
+ *
+ * uint16_t calc_crc(uint8_t *data, size_t size)
+ * {
+ * return calc_crc_digest(0x4657, data, size);
+ * }
+ * @endcode
*/
uint32_t flags;
} BPMP_ABI_PACKED;
@@ -107,7 +260,35 @@ struct mrq_request {
struct mrq_response {
/** @brief Error code for the MRQ request itself */
int32_t err;
- /** @brief Reserved for future use */
+
+ /**
+ * @brief 32bit word containing a number of fields as follows:
+ *
+ * struct {
+ * uint8_t options:4;
+ * uint8_t xid:4;
+ * uint8_t payload_length;
+ * uint16_t crc16;
+ * };
+ *
+ * **options** indicates CRC presence.
+ *
+ * #BPMP_MAIL_CRC_PRESENT is supported on T234 and later platforms and
+ * indicates the crc16 related fields are present when set.
+ *
+ * **xid** is the transaction ID as sent by the requestor.
+ *
+ * **length** of the message expressed in bytes without the size of this header.
+ * Note: For DMCE communication, this field expresses the length as a multiple of 4 bytes
+ * rather than bytes.
+ *
+ * **crc16**
+ *
+ * CRC16 using polynomial x^16 + x^14 + x^12 + x^11 + x^8 + x^5 + x^4 + x^2 + 1
+ * and initialization value 0x4657. The CRC is calculated over all bytes of the message
+ * including this header. However the crc16 field is considered to be set to 0 when
+ * calculating the CRC. Only used when #BPMP_MAIL_CRC_PRESENT is set.
+ */
uint32_t flags;
} BPMP_ABI_PACKED;
@@ -131,24 +312,16 @@ struct mrq_response {
#define MRQ_PING 0U
#define MRQ_QUERY_TAG 1U
-#define MRQ_MODULE_LOAD 4U
-#define MRQ_MODULE_UNLOAD 5U
-#define MRQ_TRACE_MODIFY 7U
-#define MRQ_WRITE_TRACE 8U
#define MRQ_THREADED_PING 9U
-#define MRQ_MODULE_MAIL 11U
#define MRQ_DEBUGFS 19U
#define MRQ_RESET 20U
#define MRQ_I2C 21U
#define MRQ_CLK 22U
#define MRQ_QUERY_ABI 23U
-#define MRQ_PG_READ_STATE 25U
-#define MRQ_PG_UPDATE_STATE 26U
#define MRQ_THERMAL 27U
#define MRQ_CPU_VHINT 28U
#define MRQ_ABI_RATCHET 29U
#define MRQ_EMC_DVFS_LATENCY 31U
-#define MRQ_TRACE_ITER 64U
#define MRQ_RINGBUF_CONSOLE 65U
#define MRQ_PG 66U
#define MRQ_CPU_NDIV_LIMITS 67U
@@ -159,6 +332,40 @@ struct mrq_response {
#define MRQ_FMON 72U
#define MRQ_EC 73U
#define MRQ_DEBUG 75U
+#define MRQ_EMC_DVFS_EMCHUB 76U
+#define MRQ_BWMGR 77U
+#define MRQ_ISO_CLIENT 78U
+#define MRQ_EMC_DISP_RFL 79U
+#define MRQ_TELEMETRY 80U
+#define MRQ_PWR_LIMIT 81U
+#define MRQ_GEARS 82U
+#define MRQ_BWMGR_INT 83U
+#define MRQ_OC_STATUS 84U
+
+/** @cond DEPRECATED */
+#define MRQ_RESERVED_2 2U
+#define MRQ_RESERVED_3 3U
+#define MRQ_RESERVED_4 4U
+#define MRQ_RESERVED_5 5U
+#define MRQ_RESERVED_6 6U
+#define MRQ_RESERVED_7 7U
+#define MRQ_RESERVED_8 8U
+#define MRQ_RESERVED_10 10U
+#define MRQ_RESERVED_11 11U
+#define MRQ_RESERVED_12 12U
+#define MRQ_RESERVED_13 13U
+#define MRQ_RESERVED_14 14U
+#define MRQ_RESERVED_15 15U
+#define MRQ_RESERVED_16 16U
+#define MRQ_RESERVED_17 17U
+#define MRQ_RESERVED_18 18U
+#define MRQ_RESERVED_24 24U
+#define MRQ_RESERVED_25 25U
+#define MRQ_RESERVED_26 26U
+#define MRQ_RESERVED_30 30U
+#define MRQ_RESERVED_64 64U
+#define MRQ_RESERVED_74 74U
+/** @endcond DEPRECATED */
/** @} */
@@ -167,7 +374,7 @@ struct mrq_response {
* @brief Maximum MRQ code to be sent by CPU software to
* BPMP. Subject to change in future
*/
-#define MAX_CPU_MRQ_ID 75U
+#define MAX_CPU_MRQ_ID 84U
/**
* @addtogroup MRQ_Payloads
@@ -183,8 +390,11 @@ struct mrq_response {
* @defgroup ABI_info ABI Info
* @defgroup Powergating Power Gating
* @defgroup Thermal Thermal
+ * @defgroup OC_status OC status
* @defgroup Vhint CPU Voltage hint
* @defgroup EMC EMC
+ * @defgroup BWMGR BWMGR
+ * @defgroup ISO_CLIENT ISO_CLIENT
* @defgroup CPU NDIV Limits
* @defgroup RingbufConsole Ring Buffer Console
* @defgroup Strap Straps
@@ -192,8 +402,11 @@ struct mrq_response {
* @defgroup CC3 Auto-CC3
* @defgroup FMON FMON
* @defgroup EC EC
- * @defgroup Fbvolt_status Fuse Burn Voltage Status
- * @}
+ * @defgroup Telemetry Telemetry
+ * @defgroup Pwrlimit PWR_LIMIT
+ * @defgroup Gears Gears
+ * @defgroup BWMGR_INT Bandwidth Manager Integrated
+ * @} MRQ_Payloads
*/
/**
@@ -304,190 +517,6 @@ struct mrq_query_fw_tag_response {
uint8_t tag[32];
} BPMP_ABI_PACKED;
-/**
- * @ingroup MRQ_Codes
- * @def MRQ_MODULE_LOAD
- * @brief Dynamically load a BPMP code module
- *
- * * Platforms: T210, T210B01, T186
- * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186)
- * * Initiators: CCPLEX
- * * Targets: BPMP
- * * Request Payload: @ref mrq_module_load_request
- * * Response Payload: @ref mrq_module_load_response
- *
- * @note This MRQ is disabled on production systems
- *
- */
-
-/**
- * @ingroup Module
- * @brief Request with #MRQ_MODULE_LOAD
- *
- * Used by #MRQ_MODULE_LOAD calls to ask the recipient to dynamically
- * load the code located at #phys_addr and having size #size
- * bytes. #phys_addr is treated as a void pointer.
- *
- * The recipient copies the code from #phys_addr to locally allocated
- * memory prior to responding to this message.
- *
- * @todo document the module header format
- *
- * The sender is responsible for ensuring that the code is mapped in
- * the recipient's address map.
- *
- */
-struct mrq_module_load_request {
- /** @brief Base address of the code to load */
- uint32_t phys_addr;
- /** @brief Size in bytes of code to load */
- uint32_t size;
-} BPMP_ABI_PACKED;
-
-/**
- * @ingroup Module
- * @brief Response to #MRQ_MODULE_LOAD
- *
- * @todo document mrq_response::err
- */
-struct mrq_module_load_response {
- /** @brief Handle to the loaded module */
- uint32_t base;
-} BPMP_ABI_PACKED;
-/** @endcond*/
-
-/**
- * @ingroup MRQ_Codes
- * @def MRQ_MODULE_UNLOAD
- * @brief Unload a previously loaded code module
- *
- * * Platforms: T210, T210B01, T186
- * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186)
- * * Initiators: CCPLEX
- * * Targets: BPMP
- * * Request Payload: @ref mrq_module_unload_request
- * * Response Payload: N/A
- *
- * @note This MRQ is disabled on production systems
- */
-
-/**
- * @ingroup Module
- * @brief Request with #MRQ_MODULE_UNLOAD
- *
- * Used by #MRQ_MODULE_UNLOAD calls to request that a previously loaded
- * module be unloaded.
- */
-struct mrq_module_unload_request {
- /** @brief Handle of the module to unload */
- uint32_t base;
-} BPMP_ABI_PACKED;
-/** @endcond*/
-
-/**
- * @ingroup MRQ_Codes
- * @def MRQ_TRACE_MODIFY
- * @brief Modify the set of enabled trace events
- *
- * @deprecated
- *
- * * Platforms: All
- * * Initiators: CCPLEX
- * * Targets: BPMP
- * * Request Payload: @ref mrq_trace_modify_request
- * * Response Payload: @ref mrq_trace_modify_response
- *
- * @note This MRQ is disabled on production systems
- */
-
-/**
- * @ingroup Trace
- * @brief Request with #MRQ_TRACE_MODIFY
- *
- * Used by %MRQ_TRACE_MODIFY calls to enable or disable specify trace
- * events. #set takes precedence for any bit set in both #set and
- * #clr.
- */
-struct mrq_trace_modify_request {
- /** @brief Bit mask of trace events to disable */
- uint32_t clr;
- /** @brief Bit mask of trace events to enable */
- uint32_t set;
-} BPMP_ABI_PACKED;
-
-/**
- * @ingroup Trace
- * @brief Response to #MRQ_TRACE_MODIFY
- *
- * Sent in repsonse to an #MRQ_TRACE_MODIFY message. #mask reflects the
- * state of which events are enabled after the recipient acted on the
- * message.
- *
- */
-struct mrq_trace_modify_response {
- /** @brief Bit mask of trace event enable states */
- uint32_t mask;
-} BPMP_ABI_PACKED;
-
-/**
- * @ingroup MRQ_Codes
- * @def MRQ_WRITE_TRACE
- * @brief Write trace data to a buffer
- *
- * @deprecated
- *
- * * Platforms: All
- * * Initiators: CCPLEX
- * * Targets: BPMP
- * * Request Payload: @ref mrq_write_trace_request
- * * Response Payload: @ref mrq_write_trace_response
- *
- * mrq_response::err depends on the @ref mrq_write_trace_request field
- * values. err is -#BPMP_EINVAL if size is zero or area is NULL or
- * area is in an illegal range. A positive value for err indicates the
- * number of bytes written to area.
- *
- * @note This MRQ is disabled on production systems
- */
-
-/**
- * @ingroup Trace
- * @brief Request with #MRQ_WRITE_TRACE
- *
- * Used by MRQ_WRITE_TRACE calls to ask the recipient to copy trace
- * data from the recipient's local buffer to the output buffer. #area
- * is treated as a byte-aligned pointer in the recipient's address
- * space.
- *
- * The sender is responsible for ensuring that the output
- * buffer is mapped in the recipient's address map. The recipient is
- * responsible for protecting its own code and data from accidental
- * overwrites.
- */
-struct mrq_write_trace_request {
- /** @brief Base address of output buffer */
- uint32_t area;
- /** @brief Size in bytes of the output buffer */
- uint32_t size;
-} BPMP_ABI_PACKED;
-
-/**
- * @ingroup Trace
- * @brief Response to #MRQ_WRITE_TRACE
- *
- * Once this response is sent, the respondent will not access the
- * output buffer further.
- */
-struct mrq_write_trace_response {
- /**
- * @brief Flag whether more data remains in local buffer
- *
- * Value is 1 if the entire local trace buffer has been
- * drained to the outputbuffer. Value is 0 otherwise.
- */
- uint32_t eof;
-} BPMP_ABI_PACKED;
-
/** @private */
struct mrq_threaded_ping_request {
uint32_t challenge;
@@ -500,50 +529,6 @@ struct mrq_threaded_ping_response {
/**
* @ingroup MRQ_Codes
- * @def MRQ_MODULE_MAIL
- * @brief Send a message to a loadable module
- *
- * * Platforms: T210, T210B01, T186
- * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186)
- * * Initiators: Any
- * * Targets: BPMP
- * * Request Payload: @ref mrq_module_mail_request
- * * Response Payload: @ref mrq_module_mail_response
- *
- * @note This MRQ is disabled on production systems
- */
-
-/**
- * @ingroup Module
- * @brief Request with #MRQ_MODULE_MAIL
- */
-struct mrq_module_mail_request {
- /** @brief Handle to the previously loaded module */
- uint32_t base;
- /** @brief Module-specific mail payload
- *
- * The length of data[ ] is unknown to the BPMP core firmware
- * but it is limited to the size of an IPC message.
- */
- uint8_t data[BPMP_ABI_EMPTY_ARRAY];
-} BPMP_ABI_PACKED;
-
-/**
- * @ingroup Module
- * @brief Response to #MRQ_MODULE_MAIL
- */
-struct mrq_module_mail_response {
- /** @brief Module-specific mail payload
- *
- * The length of data[ ] is unknown to the BPMP core firmware
- * but it is limited to the size of an IPC message.
- */
- uint8_t data[BPMP_ABI_EMPTY_ARRAY];
-} BPMP_ABI_PACKED;
-/** @endcond */
-
-/**
- * @ingroup MRQ_Codes
* @def MRQ_DEBUGFS
* @brief Interact with BPMP's debugfs file nodes
*
@@ -686,7 +671,7 @@ struct mrq_debugfs_response {
#define DEBUGFS_S_ISDIR (1 << 9)
#define DEBUGFS_S_IRUSR (1 << 8)
#define DEBUGFS_S_IWUSR (1 << 7)
-/** @} */
+/** @} Debugfs */
/**
* @ingroup MRQ_Codes
@@ -931,7 +916,7 @@ enum mrq_reset_commands {
* @brief Request with MRQ_RESET
*
* Used by the sender of an #MRQ_RESET message to request BPMP to
- * assert or deassert a given reset line.
+ * assert or or deassert a given reset line.
*/
struct mrq_reset_request {
/** @brief Reset action to perform (@ref mrq_reset_commands) */
@@ -970,7 +955,7 @@ struct mrq_reset_response {
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/** @} */
+/** @} Reset */
/**
* @ingroup MRQ_Codes
@@ -1032,7 +1017,17 @@ struct serial_i2c_request {
* @brief Trigger one or more i2c transactions
*/
struct cmd_i2c_xfer_request {
- /** @brief Valid bus number from @ref bpmp_i2c_ids*/
+ /**
+ * @brief Tegra PWR_I2C bus identifier
+ *
+ * @cond (bpmp_t234 || bpmp_t239 || bpmp_t194)
+ * Must be set to 5.
+ * @endcond (bpmp_t234 || bpmp_t239 || bpmp_t194)
+ * @cond bpmp_th500
+ * Must be set to 1.
+ * @endcond bpmp_th500
+ *
+ */
uint32_t bus_id;
/** @brief Count of valid bytes in #data_buf*/
@@ -1084,7 +1079,7 @@ struct mrq_i2c_response {
struct cmd_i2c_xfer_response xfer;
} BPMP_ABI_PACKED;
-/** @} */
+/** @} I2C */
/**
* @ingroup MRQ_Codes
@@ -1109,6 +1104,13 @@ enum {
CMD_CLK_IS_ENABLED = 6,
CMD_CLK_ENABLE = 7,
CMD_CLK_DISABLE = 8,
+/** @cond DEPRECATED */
+ CMD_CLK_PROPERTIES = 9,
+ CMD_CLK_POSSIBLE_PARENTS = 10,
+ CMD_CLK_NUM_POSSIBLE_PARENTS = 11,
+ CMD_CLK_GET_POSSIBLE_PARENT = 12,
+ CMD_CLK_RESET_REFCOUNTS = 13,
+/** @endcond DEPRECATED */
CMD_CLK_GET_ALL_INFO = 14,
CMD_CLK_GET_MAX_CLK_ID = 15,
CMD_CLK_GET_FMAX_AT_VMIN = 16,
@@ -1119,6 +1121,21 @@ enum {
#define BPMP_CLK_HAS_SET_RATE (1U << 1U)
#define BPMP_CLK_IS_ROOT (1U << 2U)
#define BPMP_CLK_IS_VAR_ROOT (1U << 3U)
+/**
+ * @brief Protection against rate and parent changes
+ *
+ * #MRQ_CLK command #CMD_CLK_SET_RATE or #MRQ_CLK command #CMD_CLK_SET_PARENT will return
+ * -#BPMP_EACCES.
+ */
+#define BPMP_CLK_RATE_PARENT_CHANGE_DENIED (1U << 30)
+
+/**
+ * @brief Protection against state changes
+ *
+ * #MRQ_CLK command #CMD_CLK_ENABLE or #MRQ_CLK command #CMD_CLK_DISABLE will return
+ * -#BPMP_EACCES.
+ */
+#define BPMP_CLK_STATE_CHANGE_DENIED (1U << 31)
#define MRQ_CLK_NAME_MAXLEN 40U
#define MRQ_CLK_MAX_PARENTS 16U
@@ -1210,6 +1227,46 @@ struct cmd_clk_disable_response {
BPMP_ABI_EMPTY
} BPMP_ABI_PACKED;
+/** @cond DEPRECATED */
+/** @private */
+struct cmd_clk_properties_request {
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
+
+/** @todo flags need to be spelled out here */
+struct cmd_clk_properties_response {
+ uint32_t flags;
+} BPMP_ABI_PACKED;
+
+/** @private */
+struct cmd_clk_possible_parents_request {
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
+
+struct cmd_clk_possible_parents_response {
+ uint8_t num_parents;
+ uint8_t reserved[3];
+ uint32_t parent_id[MRQ_CLK_MAX_PARENTS];
+} BPMP_ABI_PACKED;
+
+/** @private */
+struct cmd_clk_num_possible_parents_request {
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
+
+struct cmd_clk_num_possible_parents_response {
+ uint8_t num_parents;
+} BPMP_ABI_PACKED;
+
+struct cmd_clk_get_possible_parent_request {
+ uint8_t parent_idx;
+} BPMP_ABI_PACKED;
+
+struct cmd_clk_get_possible_parent_response {
+ uint32_t parent_id;
+} BPMP_ABI_PACKED;
+/** @endcond DEPRECATED */
+
/** @private */
struct cmd_clk_get_all_info_request {
BPMP_ABI_EMPTY
@@ -1241,6 +1298,7 @@ struct cmd_clk_get_fmax_at_vmin_response {
int64_t rate;
} BPMP_ABI_PACKED;
+
/**
* @ingroup Clocks
* @brief Request with #MRQ_CLK
@@ -1267,6 +1325,17 @@ struct cmd_clk_get_fmax_at_vmin_response {
*
*/
+/** @cond DEPRECATED
+ *
+ * Older versions of firmware also supported following sub-commands:
+ * |CMD_CLK_PROPERTIES |- |
+ * |CMD_CLK_POSSIBLE_PARENTS |- |
+ * |CMD_CLK_NUM_POSSIBLE_PARENTS|- |
+ * |CMD_CLK_GET_POSSIBLE_PARENT |clk_get_possible_parent|
+ * |CMD_CLK_RESET_REFCOUNTS |- |
+ *
+ * @endcond DEPRECATED */
+
struct mrq_clk_request {
/** @brief Sub-command and clock id concatenated to 32-bit word.
* - bits[31..24] is the sub-cmd.
@@ -1288,6 +1357,15 @@ struct mrq_clk_request {
struct cmd_clk_disable_request clk_disable;
/** @private */
struct cmd_clk_is_enabled_request clk_is_enabled;
+ /** @cond DEPRECATED */
+ /** @private */
+ struct cmd_clk_properties_request clk_properties;
+ /** @private */
+ struct cmd_clk_possible_parents_request clk_possible_parents;
+ /** @private */
+ struct cmd_clk_num_possible_parents_request clk_num_possible_parents;
+ struct cmd_clk_get_possible_parent_request clk_get_possible_parent;
+ /** @endcond DEPRECATED */
/** @private */
struct cmd_clk_get_all_info_request clk_get_all_info;
/** @private */
@@ -1321,6 +1399,17 @@ struct mrq_clk_request {
*
*/
+/** @cond DEPRECATED
+ *
+ * Older versions of firmware also supported following sub-commands:
+ * |CMD_CLK_PROPERTIES |clk_properties |
+ * |CMD_CLK_POSSIBLE_PARENTS |clk_possible_parents |
+ * |CMD_CLK_NUM_POSSIBLE_PARENTS|clk_num_possible_parents|
+ * |CMD_CLK_GET_POSSIBLE_PARENT |clk_get_possible_parents|
+ * |CMD_CLK_RESET_REFCOUNTS |- |
+ *
+ * @endcond DEPRECATED */
+
struct mrq_clk_response {
union {
struct cmd_clk_get_rate_response clk_get_rate;
@@ -1333,13 +1422,19 @@ struct mrq_clk_response {
/** @private */
struct cmd_clk_disable_response clk_disable;
struct cmd_clk_is_enabled_response clk_is_enabled;
+ /** @cond DEPRECATED */
+ struct cmd_clk_properties_response clk_properties;
+ struct cmd_clk_possible_parents_response clk_possible_parents;
+ struct cmd_clk_num_possible_parents_response clk_num_possible_parents;
+ struct cmd_clk_get_possible_parent_response clk_get_possible_parent;
+ /** @endcond DEPRECATED */
struct cmd_clk_get_all_info_response clk_get_all_info;
struct cmd_clk_get_max_clk_id_response clk_get_max_clk_id;
struct cmd_clk_get_fmax_at_vmin_response clk_get_fmax_at_vmin;
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/** @} */
+/** @} Clocks */
/**
* @ingroup MRQ_Codes
@@ -1378,107 +1473,20 @@ struct mrq_query_abi_response {
} BPMP_ABI_PACKED;
/**
- * @ingroup MRQ_Codes
- * @def MRQ_PG_READ_STATE
- * @brief Read the power-gating state of a partition
*
- * * Platforms: T186
- * @cond bpmp_t186
- * * Initiators: Any
- * * Targets: BPMP
- * * Request Payload: @ref mrq_pg_read_state_request
- * * Response Payload: @ref mrq_pg_read_state_response
- */
-
-/**
- * @ingroup Powergating
- * @brief Request with #MRQ_PG_READ_STATE
- *
- * Used by MRQ_PG_READ_STATE call to read the current state of a
- * partition.
- */
-struct mrq_pg_read_state_request {
- /** @brief ID of partition */
- uint32_t partition_id;
-} BPMP_ABI_PACKED;
-
-/**
- * @ingroup Powergating
- * @brief Response to MRQ_PG_READ_STATE
- * @todo define possible errors.
- */
-struct mrq_pg_read_state_response {
- /** @brief Read as don't care */
- uint32_t sram_state;
- /** @brief State of power partition
- * * 0 : off
- * * 1 : on
- */
- uint32_t logic_state;
-} BPMP_ABI_PACKED;
-/** @endcond*/
-/** @} */
-
-/**
- * @ingroup MRQ_Codes
- * @def MRQ_PG_UPDATE_STATE
- * @brief Modify the power-gating state of a partition. In contrast to
- * MRQ_PG calls, the operations that change state (on/off) of power
- * partition are reference counted.
- *
- * * Platforms: T186
- * @cond bpmp_t186
- * * Initiators: Any
- * * Targets: BPMP
- * * Request Payload: @ref mrq_pg_update_state_request
- * * Response Payload: N/A
- */
-
-/**
- * @ingroup Powergating
- * @brief Request with mrq_pg_update_state_request
- *
- * Used by #MRQ_PG_UPDATE_STATE call to request BPMP to change the
- * state of a power partition #partition_id.
- */
-struct mrq_pg_update_state_request {
- /** @brief ID of partition */
- uint32_t partition_id;
- /** @brief Secondary control of power partition
- * @details Ignored by many versions of the BPMP
- * firmware. For maximum compatibility, set the value
- * according to @ref logic_state
- * * 0x1: power ON partition (@ref logic_state == 0x3)
- * * 0x3: power OFF partition (@ref logic_state == 0x1)
- */
- uint32_t sram_state;
- /** @brief Controls state of power partition, legal values are
- * * 0x1 : power OFF partition
- * * 0x3 : power ON partition
- */
- uint32_t logic_state;
- /** @brief Change state of clocks of the power partition, legal values
- * * 0x0 : do not change clock state
- * * 0x1 : disable partition clocks (only applicable when
- * @ref logic_state == 0x1)
- * * 0x3 : enable partition clocks (only applicable when
- * @ref logic_state == 0x3)
- */
- uint32_t clock_state;
-} BPMP_ABI_PACKED;
-/** @endcond*/
-
-/**
* @ingroup MRQ_Codes
* @def MRQ_PG
* @brief Control power-gating state of a partition. In contrast to
* MRQ_PG_UPDATE_STATE, operations that change the power partition
* state are NOT reference counted
*
- * @note BPMP-FW forcefully turns off some partitions as part of SC7 entry
- * because their state cannot be adequately restored on exit. Therefore,
- * it is recommended to power off all domains via MRQ_PG prior to SC7 entry.
+ * @cond (bpmp_t194 || bpmp_t186)
+ * @note On T194 and earlier BPMP-FW forcefully turns off some partitions as
+ * part of SC7 entry because their state cannot be adequately restored on exit.
+ * Therefore, it is recommended to power off all domains via MRQ_PG prior to SC7
+ * entry.
* See @ref bpmp_pdomain_ids for further detail.
+ * @endcond (bpmp_t194 || bpmp_t186)
*
* * Platforms: T186, T194
* * Initiators: Any
@@ -1643,7 +1651,7 @@ struct mrq_pg_response {
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/** @} */
+/** @} Powergating */
/**
* @ingroup MRQ_Codes
@@ -1889,7 +1897,44 @@ union mrq_thermal_bpmp_to_host_response {
struct cmd_thermal_get_thermtrip_response get_thermtrip;
struct cmd_thermal_get_num_zones_response get_num_zones;
} BPMP_ABI_PACKED;
-/** @} */
+
+/** @} Thermal */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_OC_STATUS
+ * @brief Query over current status
+ *
+ * * Platforms: T234
+ * @cond bpmp_t234
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: N/A
+ * * Response Payload: @ref mrq_oc_status_response
+ *
+ * @addtogroup OC_status
+ * @{
+ */
+
+#define OC_STATUS_MAX_SIZE 24U
+
+/*
+ * @brief Response to #MRQ_OC_STATUS
+ *
+ * throt_en: Value for each OC alarm where zero signifies throttle is
+ * disabled, and non-zero throttle is enabled.
+ * event_cnt: Total number of OC events for each OC alarm.
+ *
+ * mrq_response::err is 0 if the operation was successful and
+ * -#BPMP_ENODEV otherwise.
+ */
+struct mrq_oc_status_response {
+ uint8_t throt_en[OC_STATUS_MAX_SIZE];
+ uint32_t event_cnt[OC_STATUS_MAX_SIZE];
+} BPMP_ABI_PACKED;
+
+/** @} OC_status */
+/** @endcond bpmp_t234 */
/**
* @ingroup MRQ_Codes
@@ -1948,8 +1993,9 @@ struct cpu_vhint_data {
/** reserved for future use */
uint16_t reserved[328];
} BPMP_ABI_PACKED;
-/** @endcond */
-/** @} */
+
+/** @} Vhint */
+/** @endcond bpmp_t186 */
/**
* @ingroup MRQ_Codes
@@ -2016,14 +2062,15 @@ struct mrq_abi_ratchet_response {
/** @brief BPMP's ratchet value */
uint16_t ratchet;
};
-/** @} */
+
+/** @} ABI_info */
/**
* @ingroup MRQ_Codes
* @def MRQ_EMC_DVFS_LATENCY
* @brief Query frequency dependent EMC DVFS latency
*
- * * Platforms: T186, T194
+ * * Platforms: T186, T194, T234
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: N/A
@@ -2053,7 +2100,543 @@ struct mrq_emc_dvfs_latency_response {
struct emc_dvfs_latency pairs[EMC_DVFS_LATENCY_MAX_SIZE];
} BPMP_ABI_PACKED;
-/** @} */
+/** @} EMC */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_EMC_DVFS_EMCHUB
+ * @brief Query EMC HUB frequencies
+ *
+ * * Platforms: T234 onwards
+ * @cond (bpmp_t234 || bpmp_t239 || bpmp_th500)
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: N/A
+ * * Response Payload: @ref mrq_emc_dvfs_emchub_response
+ * @addtogroup EMC
+ * @{
+ */
+
+/**
+ * @brief Used by @ref mrq_emc_dvfs_emchub_response
+ */
+struct emc_dvfs_emchub {
+ /** @brief EMC DVFS node frequency in kHz */
+ uint32_t freq;
+ /** @brief EMC HUB frequency in kHz */
+ uint32_t hub_freq;
+} BPMP_ABI_PACKED;
+
+#define EMC_DVFS_EMCHUB_MAX_SIZE EMC_DVFS_LATENCY_MAX_SIZE
+/**
+ * @brief Response to #MRQ_EMC_DVFS_EMCHUB
+ */
+struct mrq_emc_dvfs_emchub_response {
+ /** @brief The number valid entries in #pairs */
+ uint32_t num_pairs;
+ /** @brief EMC DVFS node <frequency, hub frequency> information */
+ struct emc_dvfs_emchub pairs[EMC_DVFS_EMCHUB_MAX_SIZE];
+} BPMP_ABI_PACKED;
+
+/** @} EMC */
+/** @endcond (bpmp_t234 || bpmp_t239 || bpmp_th500) */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_EMC_DISP_RFL
+ * @brief Set EMC display RFL handshake mode of operations
+ *
+ * * Platforms: T234 onwards
+ * @cond (bpmp_t234 || bpmp_t239 || bpmp_th500)
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_emc_disp_rfl_request
+ * * Response Payload: N/A
+ *
+ * @addtogroup EMC
+ * @{
+ */
+
+enum mrq_emc_disp_rfl_mode {
+ /** @brief EMC display RFL handshake disabled */
+ EMC_DISP_RFL_MODE_DISABLED = 0,
+ /** @brief EMC display RFL handshake enabled */
+ EMC_DISP_RFL_MODE_ENABLED = 1,
+};
+
+/**
+ * @ingroup EMC
+ * @brief Request with #MRQ_EMC_DISP_RFL
+ *
+ * Used by the sender of an #MRQ_EMC_DISP_RFL message to
+ * request the mode of EMC display RFL handshake.
+ *
+ * mrq_response::err is
+ * * 0: RFL mode is set successfully
+ * * -#BPMP_EINVAL: invalid mode requested
+ * * -#BPMP_ENOSYS: RFL handshake is not supported
+ * * -#BPMP_EACCES: Permission denied
+ * * -#BPMP_ENODEV: if disp rfl mrq is not supported by BPMP-FW
+ */
+struct mrq_emc_disp_rfl_request {
+ /** @brief EMC display RFL mode (@ref mrq_emc_disp_rfl_mode) */
+ uint32_t mode;
+} BPMP_ABI_PACKED;
+
+/** @} EMC */
+/** @endcond (bpmp_t234 || bpmp_t239 || bpmp_th500) */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_BWMGR
+ * @brief bwmgr requests
+ *
+ * * Platforms: T234 onwards
+ * @cond (bpmp_t234 || bpmp_t239 || bpmp_th500)
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_bwmgr_request
+ * * Response Payload: @ref mrq_bwmgr_response
+ *
+ * @addtogroup BWMGR
+ *
+ * @{
+ */
+
+enum mrq_bwmgr_cmd {
+ /**
+ * @brief Check whether the BPMP driver supports the specified
+ * request type
+ *
+ * mrq_response::err is 0 if the specified request is
+ * supported and -#BPMP_ENODEV otherwise.
+ */
+ CMD_BWMGR_QUERY_ABI = 0,
+
+ /**
+ * @brief Determine dram rate to satisfy iso/niso bw requests
+ *
+ * mrq_response::err is
+ * * 0: calc_rate succeeded.
+ * * -#BPMP_EINVAL: Invalid request parameters.
+ * * -#BPMP_ENOTSUP: Requested bw is not available.
+ */
+ CMD_BWMGR_CALC_RATE = 1
+};
+
+/*
+ * request data for request type CMD_BWMGR_QUERY_ABI
+ *
+ * type: Request type for which to check existence.
+ */
+struct cmd_bwmgr_query_abi_request {
+ uint32_t type;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Used by @ref cmd_bwmgr_calc_rate_request
+ */
+struct iso_req {
+ /* @brief bwmgr client ID @ref bpmp_bwmgr_ids */
+ uint32_t id;
+ /* @brief bw in kBps requested by client */
+ uint32_t iso_bw;
+} BPMP_ABI_PACKED;
+
+#define MAX_ISO_CLIENTS 13U
+/*
+ * request data for request type CMD_BWMGR_CALC_RATE
+ */
+struct cmd_bwmgr_calc_rate_request {
+ /* @brief total bw in kBps requested by all niso clients */
+ uint32_t sum_niso_bw;
+ /* @brief The number of iso clients */
+ uint32_t num_iso_clients;
+ /* @brief iso_req <id, iso_bw> information */
+ struct iso_req isobw_reqs[MAX_ISO_CLIENTS];
+} BPMP_ABI_PACKED;
+
+/*
+ * response data for request type CMD_BWMGR_CALC_RATE
+ *
+ * iso_rate_min: min dram data clk rate in kHz to satisfy all iso bw reqs
+ * total_rate_min: min dram data clk rate in kHz to satisfy all bw reqs
+ */
+struct cmd_bwmgr_calc_rate_response {
+ uint32_t iso_rate_min;
+ uint32_t total_rate_min;
+} BPMP_ABI_PACKED;
+
+/*
+ * @brief Request with #MRQ_BWMGR
+ *
+ *
+ * |sub-command |payload |
+ * |----------------------------|------------------------------|
+ * |CMD_BWMGR_QUERY_ABI | cmd_bwmgr_query_abi_request |
+ * |CMD_BWMGR_CALC_RATE | cmd_bwmgr_calc_rate_request |
+ *
+ */
+struct mrq_bwmgr_request {
+ uint32_t cmd;
+ union {
+ struct cmd_bwmgr_query_abi_request query_abi;
+ struct cmd_bwmgr_calc_rate_request bwmgr_rate_req;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/*
+ * @brief Response to MRQ_BWMGR
+ *
+ * |sub-command |payload |
+ * |----------------------------|------------------------------|
+ * |CMD_BWMGR_CALC_RATE | cmd_bwmgr_calc_rate_response |
+ */
+struct mrq_bwmgr_response {
+ union {
+ struct cmd_bwmgr_calc_rate_response bwmgr_rate_resp;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/** @} BWMGR */
+/** @endcond (bpmp_t234 || bpmp_t239 || bpmp_th500) */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_BWMGR_INT
+ * @brief bpmp-integrated bwmgr requests
+ *
+ * * Platforms: T234 onwards
+ * @cond (bpmp_t234 || bpmp_t239 || bpmp_th500)
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_bwmgr_int_request
+ * * Response Payload: @ref mrq_bwmgr_int_response
+ *
+ * @addtogroup BWMGR_INT
+ * @{
+ */
+
+enum mrq_bwmgr_int_cmd {
+ /**
+ * @brief Check whether the BPMP-FW supports the specified
+ * request type
+ *
+ * mrq_response::err is 0 if the specified request is
+ * supported and -#BPMP_ENODEV otherwise.
+ */
+ CMD_BWMGR_INT_QUERY_ABI = 1,
+
+ /**
+ * @brief Determine and set dram rate to satisfy iso/niso bw request
+ *
+ * mrq_response::err is
+ * * 0: request succeeded.
+ * * -#BPMP_EINVAL: Invalid request parameters.
+ * set_frequency in @ref cmd_bwmgr_int_calc_and_set_response
+ * will not be set.
+ * * -#BPMP_ENOTSUP: Requested bw is not available.
+ * set_frequency in @ref cmd_bwmgr_int_calc_and_set_response
+ * will be current dram-clk rate.
+ */
+ CMD_BWMGR_INT_CALC_AND_SET = 2,
+
+ /**
+ * @brief Set a max DRAM frequency for the bandwidth-manager
+ *
+ * mrq_response::err is
+ * * 0: request succeeded.
+ * * -#BPMP_ENOTSUP: Requested cap frequency is not possible.
+ */
+ CMD_BWMGR_INT_CAP_SET = 3
+};
+
+/*
+ * request structure for request type CMD_BWMGR_QUERY_ABI
+ *
+ * type: Request type for which to check existence.
+ */
+struct cmd_bwmgr_int_query_abi_request {
+ /* @brief request type determined by @ref mrq_bwmgr_int_cmd */
+ uint32_t type;
+} BPMP_ABI_PACKED;
+
+/**
+ * @defgroup bwmgr_int_unit_type BWMGR_INT floor unit-types
+ * @addtogroup bwmgr_int_unit_type
+ * @{
+ */
+/** @brief kilobytes per second unit-type */
+#define BWMGR_INT_UNIT_KBPS 0U
+/** @brief kilohertz unit-type */
+#define BWMGR_INT_UNIT_KHZ 1U
+
+/** @} bwmgr_int_unit_type */
+
+/*
+ * request data for request type CMD_BWMGR_INT_CALC_AND_SET
+ */
+struct cmd_bwmgr_int_calc_and_set_request {
+ /* @brief bwmgr client ID @ref bpmp_bwmgr_ids */
+ uint32_t client_id;
+ /* @brief average niso bw usage in kBps requested by client. */
+ uint32_t niso_bw;
+ /*
+ * @brief average iso bw usage in kBps requested by client.
+ * Value is ignored if client is niso. Determined by client_id.
+ */
+ uint32_t iso_bw;
+ /*
+ * @brief memory clock floor requested by client.
+ * Unit determined by floor_unit.
+ */
+ uint32_t mc_floor;
+ /*
+ * @brief toggle to determine the unit-type of floor value.
+ * See @ref bwmgr_int_unit_type definitions for unit-type mappings.
+ */
+ uint8_t floor_unit;
+} BPMP_ABI_PACKED;
+
+struct cmd_bwmgr_int_cap_set_request {
+ /* @brief requested cap frequency in Hz. */
+ uint64_t rate;
+} BPMP_ABI_PACKED;
+
+/*
+ * response data for request type CMD_BWMGR_CALC_AND_SET
+ */
+struct cmd_bwmgr_int_calc_and_set_response {
+ /* @brief current set memory clock frequency in Hz */
+ uint64_t rate;
+} BPMP_ABI_PACKED;
+
+/*
+ * @brief Request with #MRQ_BWMGR_INT
+ *
+ *
+ * |sub-command |payload |
+ * |----------------------------|-----------------------------------|
+ * |CMD_BWMGR_INT_QUERY_ABI | cmd_bwmgr_int_query_abi_request |
+ * |CMD_BWMGR_INT_CALC_AND_SET | cmd_bwmgr_int_calc_and_set_request|
+ * |CMD_BWMGR_INT_CAP_SET | cmd_bwmgr_int_cap_set_request |
+ *
+ */
+struct mrq_bwmgr_int_request {
+ uint32_t cmd;
+ union {
+ struct cmd_bwmgr_int_query_abi_request query_abi;
+ struct cmd_bwmgr_int_calc_and_set_request bwmgr_calc_set_req;
+ struct cmd_bwmgr_int_cap_set_request bwmgr_cap_set_req;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/*
+ * @brief Response to MRQ_BWMGR_INT
+ *
+ * |sub-command |payload |
+ * |----------------------------|---------------------------------------|
+ * |CMD_BWMGR_INT_CALC_AND_SET | cmd_bwmgr_int_calc_and_set_response |
+ */
+struct mrq_bwmgr_int_response {
+ union {
+ struct cmd_bwmgr_int_calc_and_set_response bwmgr_calc_set_resp;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/** @} BWMGR_INT */
+/** @endcond (bpmp_t234 || bpmp_t239 || bpmp_th500) */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_ISO_CLIENT
+ * @brief ISO client requests
+ *
+ * * Platforms: T234 onwards
+ * @cond (bpmp_t234 || bpmp_t239 || bpmp_th500)
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_iso_client_request
+ * * Response Payload: @ref mrq_iso_client_response
+ *
+ * @addtogroup ISO_CLIENT
+ * @{
+ */
+
+enum mrq_iso_client_cmd {
+ /**
+ * @brief Check whether the BPMP driver supports the specified
+ * request type
+ *
+ * mrq_response::err is 0 if the specified request is
+ * supported and -#BPMP_ENODEV otherwise.
+ */
+ CMD_ISO_CLIENT_QUERY_ABI = 0,
+
+ /*
+ * @brief check for legal LA for the iso client. Without programming
+ * LA MC registers, calculate and ensure that legal LA is possible for
+ * iso bw requested by the ISO client.
+ *
+ * mrq_response::err is
+ * * 0: check la succeeded.
+ * * -#BPMP_EINVAL: Invalid request parameters.
+ * * -#BPMP_EFAULT: Legal LA is not possible for client requested iso_bw
+ */
+ CMD_ISO_CLIENT_CALCULATE_LA = 1,
+
+ /*
+ * @brief set LA for the iso client. Calculate and program the LA/PTSA
+ * MC registers corresponding to the client making bw request
+ *
+ * mrq_response::err is
+ * * 0: set la succeeded.
+ * * -#BPMP_EINVAL: Invalid request parameters.
+ * * -#BPMP_EFAULT: Failed to calculate or program MC registers.
+ */
+ CMD_ISO_CLIENT_SET_LA = 2,
+
+ /*
+ * @brief Get max possible bw for iso client
+ *
+ * mrq_response::err is
+ * * 0: get_max_bw succeeded.
+ * * -#BPMP_EINVAL: Invalid request parameters.
+ */
+ CMD_ISO_CLIENT_GET_MAX_BW = 3
+};
+
+/*
+ * request data for request type CMD_ISO_CLIENT_QUERY_ABI
+ *
+ * type: Request type for which to check existence.
+ */
+struct cmd_iso_client_query_abi_request {
+ uint32_t type;
+} BPMP_ABI_PACKED;
+
+/*
+ * request data for request type CMD_ISO_CLIENT_CALCULATE_LA
+ *
+ * id: client ID in @ref bpmp_bwmgr_ids
+ * bw: bw requested in kBps by client ID.
+ * init_bw_floor: initial dram_bw_floor in kBps passed by client ID.
+ * ISO client will perform mempool allocation and DVFS buffering based
+ * on this dram_bw_floor.
+ */
+struct cmd_iso_client_calculate_la_request {
+ uint32_t id;
+ uint32_t bw;
+ uint32_t init_bw_floor;
+} BPMP_ABI_PACKED;
+
+/*
+ * request data for request type CMD_ISO_CLIENT_SET_LA
+ *
+ * id: client ID in @ref bpmp_bwmgr_ids
+ * bw: bw requested in kBps by client ID.
+ * final_bw_floor: final dram_bw_floor in kBps.
+ * Sometimes the initial dram_bw_floor passed by ISO client may need to be
+ * updated by considering higher dram freq's. This is the final dram_bw_floor
+ * used to calculate and program MC registers.
+ */
+struct cmd_iso_client_set_la_request {
+ uint32_t id;
+ uint32_t bw;
+ uint32_t final_bw_floor;
+} BPMP_ABI_PACKED;
+
+/*
+ * request data for request type CMD_ISO_CLIENT_GET_MAX_BW
+ *
+ * id: client ID in @ref bpmp_bwmgr_ids
+ */
+struct cmd_iso_client_get_max_bw_request {
+ uint32_t id;
+} BPMP_ABI_PACKED;
+
+/*
+ * response data for request type CMD_ISO_CLIENT_CALCULATE_LA
+ *
+ * la_rate_floor: minimum dram_rate_floor in kHz at which a legal la is possible
+ * iso_client_only_rate: Minimum dram freq in kHz required to satisfy this clients
+ * iso bw request, assuming all other iso clients are inactive
+ */
+struct cmd_iso_client_calculate_la_response {
+ uint32_t la_rate_floor;
+ uint32_t iso_client_only_rate;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Used by @ref cmd_iso_client_get_max_bw_response
+ */
+struct iso_max_bw {
+ /* @brief dram frequency in kHz */
+ uint32_t freq;
+ /* @brief max possible iso-bw in kBps */
+ uint32_t iso_bw;
+} BPMP_ABI_PACKED;
+
+#define ISO_MAX_BW_MAX_SIZE 14U
+/*
+ * response data for request type CMD_ISO_CLIENT_GET_MAX_BW
+ */
+struct cmd_iso_client_get_max_bw_response {
+ /* @brief The number valid entries in iso_max_bw pairs */
+ uint32_t num_pairs;
+ /* @brief max ISOBW <dram freq, max bw> information */
+ struct iso_max_bw pairs[ISO_MAX_BW_MAX_SIZE];
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request with #MRQ_ISO_CLIENT
+ *
+ * Used by the sender of an #MRQ_ISO_CLIENT message.
+ *
+ * |sub-command |payload |
+ * |------------------------------------ |----------------------------------------|
+ * |CMD_ISO_CLIENT_QUERY_ABI |cmd_iso_client_query_abi_request |
+ * |CMD_ISO_CLIENT_CALCULATE_LA |cmd_iso_client_calculate_la_request |
+ * |CMD_ISO_CLIENT_SET_LA |cmd_iso_client_set_la_request |
+ * |CMD_ISO_CLIENT_GET_MAX_BW |cmd_iso_client_get_max_bw_request |
+ *
+ */
+
+struct mrq_iso_client_request {
+ /* Type of request. Values listed in enum mrq_iso_client_cmd */
+ uint32_t cmd;
+ union {
+ struct cmd_iso_client_query_abi_request query_abi;
+ struct cmd_iso_client_calculate_la_request calculate_la_req;
+ struct cmd_iso_client_set_la_request set_la_req;
+ struct cmd_iso_client_get_max_bw_request max_isobw_req;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response to MRQ_ISO_CLIENT
+ *
+ * Each sub-command supported by @ref mrq_iso_client_request may return
+ * sub-command-specific data. Some do and some do not as indicated in
+ * the following table
+ *
+ * |sub-command |payload |
+ * |---------------------------- |------------------------------------|
+ * |CMD_ISO_CLIENT_CALCULATE_LA |cmd_iso_client_calculate_la_response|
+ * |CMD_ISO_CLIENT_SET_LA |N/A |
+ * |CMD_ISO_CLIENT_GET_MAX_BW |cmd_iso_client_get_max_bw_response |
+ *
+ */
+
+struct mrq_iso_client_response {
+ union {
+ struct cmd_iso_client_calculate_la_response calculate_la_resp;
+ struct cmd_iso_client_get_max_bw_response max_isobw_resp;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/** @} ISO_CLIENT */
+/** @endcond (bpmp_t234 || bpmp_t239 || bpmp_th500) */
/**
* @ingroup MRQ_Codes
@@ -2061,7 +2644,7 @@ struct mrq_emc_dvfs_latency_response {
* @brief CPU freq. limits in ndiv
*
* * Platforms: T194 onwards
- * @cond bpmp_t194
+ * @cond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_cpu_ndiv_limits_request
@@ -2094,15 +2677,15 @@ struct mrq_cpu_ndiv_limits_response {
uint16_t ndiv_min;
} BPMP_ABI_PACKED;
-/** @} */
-/** @endcond */
+/** @} CPU */
+/** @endcond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500) */
/**
* @ingroup MRQ_Codes
* @def MRQ_CPU_AUTO_CC3
* @brief Query CPU cluster auto-CC3 configuration
*
- * * Platforms: T194 onwards
+ * * Platforms: T194
* @cond bpmp_t194
* * Initiators: CCPLEX
* * Targets: BPMP
@@ -2140,40 +2723,8 @@ struct mrq_cpu_auto_cc3_response {
uint32_t auto_cc3_config;
} BPMP_ABI_PACKED;
-/** @} */
-/** @endcond */
-
-/**
- * @ingroup MRQ_Codes
- * @def MRQ_TRACE_ITER
- * @brief Manage the trace iterator
- *
- * @deprecated
- *
- * * Platforms: All
- * * Initiators: CCPLEX
- * * Targets: BPMP
- * * Request Payload: N/A
- * * Response Payload: @ref mrq_trace_iter_request
- * @addtogroup Trace
- * @{
- */
-enum {
- /** @brief (re)start the tracing now. Ignore older events */
- TRACE_ITER_INIT = 0,
- /** @brief Clobber all events in the trace buffer */
- TRACE_ITER_CLEAN = 1
-};
-
-/**
- * @brief Request with #MRQ_TRACE_ITER
- */
-struct mrq_trace_iter_request {
- /** @brief TRACE_ITER_INIT or TRACE_ITER_CLEAN */
- uint32_t cmd;
-} BPMP_ABI_PACKED;
-
-/** @} */
+/** @} CC3 */
+/** @endcond bpmp_t194 */
/**
* @ingroup MRQ_Codes
@@ -2351,7 +2902,8 @@ union mrq_ringbuf_console_bpmp_to_host_response {
struct cmd_ringbuf_console_write_resp write;
struct cmd_ringbuf_console_get_fifo_resp get_fifo;
} BPMP_ABI_PACKED;
-/** @} */
+
+/** @} RingbufConsole */
/**
* @ingroup MRQ_Codes
@@ -2359,7 +2911,7 @@ union mrq_ringbuf_console_bpmp_to_host_response {
* @brief Set a strap value controlled by BPMP
*
* * Platforms: T194 onwards
- * @cond bpmp_t194
+ * @cond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_strap_request
@@ -2390,17 +2942,14 @@ enum mrq_strap_cmd {
struct mrq_strap_request {
/** @brief @ref mrq_strap_cmd */
uint32_t cmd;
- /** @brief Strap ID from @ref Strap_Ids */
+ /** @brief Strap ID from @ref Strap_Identifiers */
uint32_t id;
/** @brief Desired value for strap (if cmd is #STRAP_SET) */
uint32_t value;
} BPMP_ABI_PACKED;
-/**
- * @defgroup Strap_Ids Strap Identifiers
- * @}
- */
-/** @endcond */
+/** @} Strap */
+/** @endcond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500) */
/**
* @ingroup MRQ_Codes
@@ -2408,7 +2957,7 @@ struct mrq_strap_request {
* @brief Perform a UPHY operation
*
* * Platforms: T194 onwards
- * @cond bpmp_t194
+ * @cond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_uphy_request
@@ -2423,6 +2972,9 @@ enum {
CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT = 3,
CMD_UPHY_PCIE_CONTROLLER_STATE = 4,
CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF = 5,
+ CMD_UPHY_DISPLAY_PORT_INIT = 6,
+ CMD_UPHY_DISPLAY_PORT_OFF = 7,
+ CMD_UPHY_XUSB_DYN_LANES_RESTORE = 8,
CMD_UPHY_MAX,
};
@@ -2445,28 +2997,41 @@ struct cmd_uphy_margin_status_response {
} BPMP_ABI_PACKED;
struct cmd_uphy_ep_controller_pll_init_request {
- /** @brief EP controller number, valid: 0, 4, 5 */
+ /** @brief EP controller number, T194 valid: 0, 4, 5; T234 valid: 5, 6, 7, 10; T239 valid: 0 */
uint8_t ep_controller;
} BPMP_ABI_PACKED;
struct cmd_uphy_pcie_controller_state_request {
- /** @brief PCIE controller number, valid: 0, 1, 2, 3, 4 */
+ /** @brief PCIE controller number, T194 valid: 0-4; T234 valid: 0-10; T239 valid: 0-3 */
uint8_t pcie_controller;
uint8_t enable;
} BPMP_ABI_PACKED;
struct cmd_uphy_ep_controller_pll_off_request {
- /** @brief EP controller number, valid: 0, 4, 5 */
+ /** @brief EP controller number, T194 valid: 0, 4, 5; T234 valid: 5, 6, 7, 10; T239 valid: 0 */
uint8_t ep_controller;
} BPMP_ABI_PACKED;
+struct cmd_uphy_display_port_init_request {
+ /** @brief DisplayPort link rate, T239 valid: 1620, 2700, 5400, 8100, 2160, 2430, 3240, 4320, 6750 */
+ uint16_t link_rate;
+ /** @brief 1: lane 0; 2: lane 1; 3: lane 0 and 1 */
+ uint16_t lanes_bitmap;
+} BPMP_ABI_PACKED;
+
+struct cmd_uphy_xusb_dyn_lanes_restore_request {
+ /** @brief 1: lane 0; 2: lane 1; 3: lane 0 and 1 */
+ uint16_t lanes_bitmap;
+} BPMP_ABI_PACKED;
+
/**
* @ingroup UPHY
* @brief Request with #MRQ_UPHY
*
- * Used by the sender of an #MRQ_UPHY message to control UPHY Lane RX margining.
- * The uphy_request is split into several sub-commands. Some sub-commands
- * require no additional data. Others have a sub-command specific payload
+ * Used by the sender of an #MRQ_UPHY message to control UPHY.
+ * The uphy_request is split into several sub-commands. CMD_UPHY_PCIE_LANE_MARGIN_STATUS
+ * requires no additional data. Others have a sub-command specific payload. Below table
+ * shows sub-commands with their corresponding payload data.
*
* |sub-command |payload |
* |------------------------------------ |----------------------------------------|
@@ -2475,6 +3040,9 @@ struct cmd_uphy_ep_controller_pll_off_request {
* |CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT |cmd_uphy_ep_controller_pll_init_request |
* |CMD_UPHY_PCIE_CONTROLLER_STATE |cmd_uphy_pcie_controller_state_request |
* |CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF |cmd_uphy_ep_controller_pll_off_request |
+ * |CMD_UPHY_PCIE_DISPLAY_PORT_INIT |cmd_uphy_display_port_init_request |
+ * |CMD_UPHY_PCIE_DISPLAY_PORT_OFF | |
+ * |CMD_UPHY_XUSB_DYN_LANES_RESTORE |cmd_uphy_xusb_dyn_lanes_restore_request |
*
*/
@@ -2489,6 +3057,8 @@ struct mrq_uphy_request {
struct cmd_uphy_ep_controller_pll_init_request ep_ctrlr_pll_init;
struct cmd_uphy_pcie_controller_state_request controller_state;
struct cmd_uphy_ep_controller_pll_off_request ep_ctrlr_pll_off;
+ struct cmd_uphy_display_port_init_request display_port_init;
+ struct cmd_uphy_xusb_dyn_lanes_restore_request xusb_dyn_lanes_restore;
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
@@ -2513,8 +3083,8 @@ struct mrq_uphy_response {
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/** @} */
-/** @endcond */
+/** @} UPHY */
+/** @endcond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500) */
/**
* @ingroup MRQ_Codes
@@ -2522,14 +3092,16 @@ struct mrq_uphy_response {
* @brief Perform a frequency monitor configuration operations
*
* * Platforms: T194 onwards
- * @cond bpmp_t194
+ * @cond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_fmon_request
* * Response Payload: @ref mrq_fmon_response
+ * @endcond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500)
*
* @addtogroup FMON
* @{
+ * @cond (bpmp_t194 || bpmp_t234)
*/
enum {
/**
@@ -2538,6 +3110,20 @@ enum {
* The monitored clock must be running for clamp to succeed. If
* clamped, FMON configuration is preserved when clock rate
* and/or state is changed.
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_EACCES: FMON access error @n
+ * -#BPMP_EBADCMD if subcommand is not supported @n
+ * -#BPMP_EBADSLT: clamp FMON on cluster with auto-CC3 enabled @n
+ * -#BPMP_EBUSY: fmon is already clamped at different rate @n
+ * -#BPMP_EFAULT: self-diagnostic error @n
+ * -#BPMP_EINVAL: invalid FMON configuration @n
+ * -#BPMP_EOPNOTSUPP: not in production mode @n
+ * -#BPMP_ENODEV: invalid clk_id @n
+ * -#BPMP_ENOENT: no calibration data, uninitialized @n
+ * -#BPMP_ENOTSUP: avfs config not set @n
+ * -#BPMP_ENOSYS: clamp FMON on cluster clock w/ no NAFLL @n
+ * -#BPMP_ETIMEDOUT: operation timed out @n
*/
CMD_FMON_GEAR_CLAMP = 1,
/**
@@ -2545,6 +3131,13 @@ enum {
*
* Allow FMON configuration to follow monitored clock rate
* and/or state changes.
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_EBADCMD if subcommand is not supported @n
+ * -#BPMP_ENODEV: invalid clk_id @n
+ * -#BPMP_ENOENT: no calibration data, uninitialized @n
+ * -#BPMP_ENOTSUP: avfs config not set @n
+ * -#BPMP_EOPNOTSUPP: not in production mode @n
*/
CMD_FMON_GEAR_FREE = 2,
/**
@@ -2553,11 +3146,54 @@ enum {
*
* Inherently racy, since clamp state can be changed
* concurrently. Useful for testing.
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_EBADCMD if subcommand is not supported @n
+ * -#BPMP_ENODEV: invalid clk_id @n
+ * -#BPMP_ENOENT: no calibration data, uninitialized @n
+ * -#BPMP_ENOTSUP: avfs config not set @n
+ * -#BPMP_EOPNOTSUPP: not in production mode @n
*/
CMD_FMON_GEAR_GET = 3,
- CMD_FMON_NUM,
+ /**
+ * @brief Return current status of FMON faults detected by FMON
+ * h/w or s/w since last invocation of this command.
+ * Clears fault status.
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_EBADCMD if subcommand is not supported @n
+ * -#BPMP_EINVAL: invalid fault type @n
+ * -#BPMP_ENODEV: invalid clk_id @n
+ * -#BPMP_ENOENT: no calibration data, uninitialized @n
+ * -#BPMP_ENOTSUP: avfs config not set @n
+ * -#BPMP_EOPNOTSUPP: not in production mode @n
+ */
+ CMD_FMON_FAULT_STS_GET = 4,
};
+/**
+ * @cond DEPRECATED
+ * Kept for backward compatibility
+ */
+#define CMD_FMON_NUM 4
+
+/** @endcond DEPRECATED */
+
+/**
+ * @defgroup fmon_fault_type FMON fault type
+ * @addtogroup fmon_fault_type
+ * @{
+ */
+/** @brief All detected FMON faults (h/w or s/w) */
+#define FMON_FAULT_TYPE_ALL 0U
+/** @brief FMON faults detected by h/w */
+#define FMON_FAULT_TYPE_HW 1U
+/** @brief FMON faults detected by s/w */
+#define FMON_FAULT_TYPE_SW 2U
+
+/** @} fmon_fault_type */
+
+
struct cmd_fmon_gear_clamp_request {
int32_t unused;
int64_t rate;
@@ -2587,6 +3223,14 @@ struct cmd_fmon_gear_get_response {
int64_t rate;
} BPMP_ABI_PACKED;
+struct cmd_fmon_fault_sts_get_request {
+ uint32_t fault_type; /**< @ref fmon_fault_type */
+} BPMP_ABI_PACKED;
+
+struct cmd_fmon_fault_sts_get_response {
+ uint32_t fault_sts;
+} BPMP_ABI_PACKED;
+
/**
* @ingroup FMON
* @brief Request with #MRQ_FMON
@@ -2601,9 +3245,9 @@ struct cmd_fmon_gear_get_response {
* |CMD_FMON_GEAR_CLAMP |fmon_gear_clamp |
* |CMD_FMON_GEAR_FREE |- |
* |CMD_FMON_GEAR_GET |- |
+ * |CMD_FMON_FAULT_STS_GET |fmon_fault_sts_get |
*
*/
-
struct mrq_fmon_request {
/** @brief Sub-command and clock id concatenated to 32-bit word.
* - bits[31..24] is the sub-cmd.
@@ -2618,6 +3262,7 @@ struct mrq_fmon_request {
struct cmd_fmon_gear_free_request fmon_gear_free;
/** @private */
struct cmd_fmon_gear_get_request fmon_gear_get;
+ struct cmd_fmon_fault_sts_get_request fmon_fault_sts_get;
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
@@ -2633,6 +3278,7 @@ struct mrq_fmon_request {
* |CMD_FMON_GEAR_CLAMP |- |
* |CMD_FMON_GEAR_FREE |- |
* |CMD_FMON_GEAR_GET |fmon_gear_get |
+ * |CMD_FMON_FAULT_STS_GET |fmon_fault_sts_get |
*
*/
@@ -2643,11 +3289,12 @@ struct mrq_fmon_response {
/** @private */
struct cmd_fmon_gear_free_response fmon_gear_free;
struct cmd_fmon_gear_get_response fmon_gear_get;
+ struct cmd_fmon_fault_sts_get_response fmon_fault_sts_get;
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/** @} */
-/** @endcond */
+/** @endcond (bpmp_t194 || bpmp_t234) */
+/** @} FMON */
/**
* @ingroup MRQ_Codes
@@ -2655,7 +3302,7 @@ struct mrq_fmon_response {
* @brief Provide status information on faults reported by Error
* Collator (EC) to HSM.
*
- * * Platforms: T194 onwards
+ * * Platforms: T194
* @cond bpmp_t194
* * Initiators: CCPLEX
* * Targets: BPMP
@@ -2664,8 +3311,10 @@ struct mrq_fmon_response {
*
* @note This MRQ ABI is under construction, and subject to change
*
+ * @endcond bpmp_t194
* @addtogroup EC
* @{
+ * @cond bpmp_t194
*/
enum {
/**
@@ -2676,7 +3325,7 @@ enum {
* -#BPMP_ENODEV if target EC is not owned by BPMP @n
* -#BPMP_EACCES if target EC power domain is turned off @n
* -#BPMP_EBADCMD if subcommand is not supported
- * @endcond
+ * @endcond DEPRECATED
*/
CMD_EC_STATUS_GET = 1, /* deprecated */
@@ -2787,7 +3436,8 @@ enum ec_registers_group {
#define EC_STATUS_FLAG_LAST_ERROR 0x0002U
/** @brief EC latent error flag */
#define EC_STATUS_FLAG_LATENT_ERROR 0x0004U
-/** @} */
+
+/** @} bpmp_ec_status_flags */
/**
* @defgroup bpmp_ec_desc_flags EC Descriptor Flags
@@ -2798,7 +3448,8 @@ enum ec_registers_group {
#define EC_DESC_FLAG_RESOLVED 0x0001U
/** @brief EC descriptor failed to retrieve id flag */
#define EC_DESC_FLAG_NO_ID 0x0002U
-/** @} */
+
+/** @} bpmp_ec_desc_flags */
/**
* |error type | fmon_clk_id values |
@@ -2810,14 +3461,18 @@ struct ec_err_fmon_desc {
uint16_t desc_flags;
/** @brief FMON monitored clock id */
uint16_t fmon_clk_id;
- /** @brief Bitmask of @ref bpmp_fmon_faults_flags */
+ /**
+ * @brief Bitmask of fault flags
+ *
+ * @ref bpmp_fmon_faults_flags
+ */
uint32_t fmon_faults;
/** @brief FMON faults access error */
int32_t fmon_access_error;
} BPMP_ABI_PACKED;
/**
- * |error type | vmon_adc_id values |
+ * | error type | vmon_adc_id values |
* |---------------------------------|---------------------------|
* |@ref EC_ERR_TYPE_VOLTAGE_MONITOR |@ref bpmp_adc_ids |
*/
@@ -2826,16 +3481,16 @@ struct ec_err_vmon_desc {
uint16_t desc_flags;
/** @brief VMON rail adc id */
uint16_t vmon_adc_id;
- /** @brief Bitmask of @ref bpmp_vmon_faults_flags */
+ /** @brief Bitmask of bpmp_vmon_faults_flags */
uint32_t vmon_faults;
/** @brief VMON faults access error */
int32_t vmon_access_error;
} BPMP_ABI_PACKED;
/**
- * |error type | reg_id values |
- * |---------------------------------|---------------------------|
- * |@ref EC_ERR_TYPE_REGISTER_PARITY |@ref bpmp_ec_registers_ids |
+ * |error type | reg_id values |
+ * |---------------------------------|-----------------------|
+ * |@ref EC_ERR_TYPE_REGISTER_PARITY | bpmp_ec_registers_ids |
*/
struct ec_err_reg_parity_desc {
/** @brief Bitmask of @ref bpmp_ec_desc_flags */
@@ -2847,10 +3502,10 @@ struct ec_err_reg_parity_desc {
} BPMP_ABI_PACKED;
/**
- * |error type | err_source_id values |
- * |--------------------------------- |--------------------------|
- * |@ref EC_ERR_TYPE_SW_CORRECTABLE | @ref bpmp_ec_ce_swd_ids |
- * |@ref EC_ERR_TYPE_SW_UNCORRECTABLE | @ref bpmp_ec_ue_swd_ids |
+ * |error type | err_source_id values |
+ * |--------------------------------- |----------------------|
+ * |@ref EC_ERR_TYPE_SW_CORRECTABLE | bpmp_ec_ce_swd_ids |
+ * |@ref EC_ERR_TYPE_SW_UNCORRECTABLE | bpmp_ec_ue_swd_ids |
*/
struct ec_err_sw_error_desc {
/** @brief Bitmask of @ref bpmp_ec_desc_flags */
@@ -2862,15 +3517,15 @@ struct ec_err_sw_error_desc {
} BPMP_ABI_PACKED;
/**
- * |error type | err_source_id values |
- * |----------------------------------------|---------------------------|
- * |@ref EC_ERR_TYPE_PARITY_INTERNAL |@ref bpmp_ec_ipath_ids |
- * |@ref EC_ERR_TYPE_ECC_SEC_INTERNAL |@ref bpmp_ec_ipath_ids |
- * |@ref EC_ERR_TYPE_ECC_DED_INTERNAL |@ref bpmp_ec_ipath_ids |
- * |@ref EC_ERR_TYPE_COMPARATOR |@ref bpmp_ec_comparator_ids|
- * |@ref EC_ERR_TYPE_PARITY_SRAM |@ref bpmp_clock_ids |
- * |@ref EC_ERR_TYPE_OTHER_HW_CORRECTABLE |@ref bpmp_ec_misc_hwd_ids |
- * |@ref EC_ERR_TYPE_OTHER_HW_UNCORRECTABLE |@ref bpmp_ec_misc_hwd_ids |
+ * |error type | err_source_id values |
+ * |----------------------------------------|------------------------|
+ * |@ref EC_ERR_TYPE_PARITY_INTERNAL | bpmp_ec_ipath_ids |
+ * |@ref EC_ERR_TYPE_ECC_SEC_INTERNAL | bpmp_ec_ipath_ids |
+ * |@ref EC_ERR_TYPE_ECC_DED_INTERNAL | bpmp_ec_ipath_ids |
+ * |@ref EC_ERR_TYPE_COMPARATOR | bpmp_ec_comparator_ids|
+ * |@ref EC_ERR_TYPE_OTHER_HW_CORRECTABLE | bpmp_ec_misc_hwd_ids |
+ * |@ref EC_ERR_TYPE_OTHER_HW_UNCORRECTABLE | bpmp_ec_misc_hwd_ids |
+ * |@ref EC_ERR_TYPE_PARITY_SRAM | bpmp_clock_ids |
*/
struct ec_err_simple_desc {
/** @brief Bitmask of @ref bpmp_ec_desc_flags */
@@ -2917,7 +3572,7 @@ struct cmd_ec_status_get_response {
/** @brief EC error descriptors */
union ec_err_desc error_descs[EC_ERR_STATUS_DESC_MAX_NUM];
} BPMP_ABI_PACKED;
-/** @endcond */
+/** @endcond DEPRECATED */
struct cmd_ec_status_ex_get_response {
/** @brief Target EC id (the same id received with request). */
@@ -2955,7 +3610,7 @@ struct cmd_ec_status_ex_get_response {
* |sub-command |payload |
* |----------------------------|-----------------------|
* |@ref CMD_EC_STATUS_GET |ec_status_get |
- * @endcond
+ * @endcond DEPRECATED
*
* |sub-command |payload |
* |----------------------------|-----------------------|
@@ -2983,7 +3638,7 @@ struct mrq_ec_request {
* |sub-command |payload |
* |----------------------------|------------------------|
* |@ref CMD_EC_STATUS_GET |ec_status_get |
- * @endcond
+ * @endcond DEPRECATED
*
* |sub-command |payload |
* |----------------------------|------------------------|
@@ -2997,13 +3652,264 @@ struct mrq_ec_response {
* @cond DEPRECATED
*/
struct cmd_ec_status_get_response ec_status_get;
- /** @endcond */
+ /** @endcond DEPRECATED */
struct cmd_ec_status_ex_get_response ec_status_ex_get;
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/** @} */
-/** @endcond */
+/** @endcond bpmp_t194 */
+/** @} EC */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_TELEMETRY
+ * @brief Get address of memory buffer refreshed with recently sampled
+ * telemetry data
+ *
+ * * Platforms: TH500 onwards
+ * @cond bpmp_th500
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: N/A
+ * * Response Payload: @ref mrq_telemetry_response
+ * @addtogroup Telemetry
+ * @{
+ */
+
+/**
+ * @brief Response to #MRQ_TELEMETRY
+ *
+ * mrq_response::err is
+ * * 0: Telemetry data is available at returned address
+ * * -#BPMP_EACCES: MRQ master is not allowed to request buffer refresh
+ * * -#BPMP_ENAVAIL: Telemetry buffer cannot be refreshed via this MRQ channel
+ * * -#BPMP_ENOTSUP: Telemetry buffer is not supported by BPMP-FW
+ * * -#BPMP_ENODEV: Telemetry mrq is not supported by BPMP-FW
+ */
+struct mrq_telemetry_response {
+ /** @brief Physical address of telemetry data buffer */
+ uint64_t data_buf_addr; /**< see @ref bpmp_telemetry_layout */
+} BPMP_ABI_PACKED;
+
+/** @} Telemetry */
+/** @endcond bpmp_th500 */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_PWR_LIMIT
+ * @brief Control power limits.
+ *
+ * * Platforms: TH500 onwards
+ * @cond bpmp_th500
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_pwr_limit_request
+ * * Response Payload: @ref mrq_pwr_limit_response
+ *
+ * @addtogroup Pwrlimit
+ * @{
+ */
+enum mrq_pwr_limit_cmd {
+ /**
+ * @brief Check whether the BPMP-FW supports the specified
+ * command
+ *
+ * mrq_response::err is 0 if the specified request is
+ * supported and -#BPMP_ENODEV otherwise.
+ */
+ CMD_PWR_LIMIT_QUERY_ABI = 0,
+
+ /**
+ * @brief Set power limit
+ *
+ * mrq_response:err is
+ * * 0: Success
+ * * -#BPMP_ENODEV: Pwr limit mrq is not supported by BPMP-FW
+ * * -#BPMP_ENAVAIL: Invalid request parameters
+ * * -#BPMP_EACCES: Request is not accepted
+ */
+ CMD_PWR_LIMIT_SET = 1,
+
+ /**
+ * @brief Get power limit setting
+ *
+ * mrq_response:err is
+ * * 0: Success
+ * * -#BPMP_ENODEV: Pwr limit mrq is not supported by BPMP-FW
+ * * -#BPMP_ENAVAIL: Invalid request parameters
+ */
+ CMD_PWR_LIMIT_GET = 2,
+
+ /**
+ * @brief Get current power cap
+ *
+ * mrq_response:err is
+ * * 0: Success
+ * * -#BPMP_ENODEV: Pwr limit mrq is not supported by BPMP-FW
+ * * -#BPMP_ENAVAIL: Invalid request parameters
+ */
+ CMD_PWR_LIMIT_CURR_CAP = 3,
+};
+
+/**
+ * @defgroup bpmp_pwr_limit_type PWR_LIMIT TYPEs
+ * @{
+ */
+/** @brief Limit value specifies traget cap */
+#define PWR_LIMIT_TYPE_TARGET_CAP 0U
+/** @brief Limit value specifies maximum possible target cap */
+#define PWR_LIMIT_TYPE_BOUND_MAX 1U
+/** @brief Limit value specifies minimum possible target cap */
+#define PWR_LIMIT_TYPE_BOUND_MIN 2U
+/** @brief Number of limit types supported by mrq interface */
+#define PWR_LIMIT_TYPE_NUM 3U
+
+/** @} bpmp_pwr_limit_type */
+
+/**
+ * @brief Request data for #MRQ_PWR_LIMIT command CMD_PWR_LIMIT_QUERY_ABI
+ */
+struct cmd_pwr_limit_query_abi_request {
+ uint32_t cmd_code; /**< @ref mrq_pwr_limit_cmd */
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_PWR_LIMIT command CMD_PWR_LIMIT_SET
+ *
+ * Set specified limit of specified type from specified source. The success of
+ * the request means that specified value is accepted as input to arbitration
+ * with other sources settings for the same limit of the same type. Zero limit
+ * is ignored by the arbitration (i.e., indicates "no limit set").
+ */
+struct cmd_pwr_limit_set_request {
+ uint32_t limit_id; /**< @ref bpmp_pwr_limit_id */
+ uint32_t limit_src; /**< @ref bpmp_pwr_limit_src */
+ uint32_t limit_type; /**< @ref bpmp_pwr_limit_type */
+ uint32_t limit_setting;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_PWR_LIMIT command CMD_PWR_LIMIT_GET
+ *
+ * Get previously set from specified source specified limit value of specified
+ * type.
+ */
+struct cmd_pwr_limit_get_request {
+ uint32_t limit_id; /**< @ref bpmp_pwr_limit_id */
+ uint32_t limit_src; /**< @ref bpmp_pwr_limit_src */
+ uint32_t limit_type; /**< @ref bpmp_pwr_limit_type */
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response data for #MRQ_PWR_LIMIT command CMD_PWR_LIMIT_GET
+ */
+struct cmd_pwr_limit_get_response {
+ uint32_t limit_setting;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_PWR_LIMIT command CMD_PWR_LIMIT_CURR_CAP
+ *
+ * For specified limit get current power cap aggregated from all sources.
+ */
+struct cmd_pwr_limit_curr_cap_request {
+ uint32_t limit_id; /**< @ref bpmp_pwr_limit_id */
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response data for #MRQ_PWR_LIMIT command CMD_PWR_LIMIT_CURR_CAP
+ */
+struct cmd_pwr_limit_curr_cap_response {
+ uint32_t curr_cap;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request with #MRQ_PWR_LIMIT
+ *
+ * |sub-command |payload |
+ * |----------------------------|---------------------------------|
+ * |CMD_PWR_LIMIT_QUERY_ABI | cmd_pwr_limit_query_abi_request |
+ * |CMD_PWR_LIMIT_SET | cmd_pwr_limit_set_request |
+ * |CMD_PWR_LIMIT_GET | cmd_pwr_limit_get_request |
+ * |CMD_PWR_LIMIT_CURR_CAP | cmd_pwr_limit_curr_cap_request |
+ */
+struct mrq_pwr_limit_request {
+ uint32_t cmd;
+ union {
+ struct cmd_pwr_limit_query_abi_request pwr_limit_query_abi_req;
+ struct cmd_pwr_limit_set_request pwr_limit_set_req;
+ struct cmd_pwr_limit_get_request pwr_limit_get_req;
+ struct cmd_pwr_limit_curr_cap_request pwr_limit_curr_cap_req;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response to MRQ_PWR_LIMIT
+ *
+ * |sub-command |payload |
+ * |----------------------------|---------------------------------|
+ * |CMD_PWR_LIMIT_QUERY_ABI | - |
+ * |CMD_PWR_LIMIT_SET | - |
+ * |CMD_PWR_LIMIT_GET | cmd_pwr_limit_get_response |
+ * |CMD_PWR_LIMIT_CURR_CAP | cmd_pwr_limit_curr_cap_response |
+ */
+struct mrq_pwr_limit_response {
+ union {
+ struct cmd_pwr_limit_get_response pwr_limit_get_rsp;
+ struct cmd_pwr_limit_curr_cap_response pwr_limit_curr_cap_rsp;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/** @} PwrLimit */
+/** @endcond bpmp_th500 */
+
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_GEARS
+ * @brief Get thresholds for NDIV offset switching
+ *
+ * * Platforms: TH500 onwards
+ * @cond bpmp_th500
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: N/A
+ * * Response Payload: @ref mrq_gears_response
+ * @addtogroup Gears
+ * @{
+ */
+
+/**
+ * @brief Response to #MRQ_GEARS
+ *
+ * Used by the sender of an #MRQ_GEARS message to request thresholds
+ * for NDIV offset switching.
+ *
+ * The mrq_gears_response::ncpu array defines four thresholds in units
+ * of number of online CPUS to be used for choosing between five different
+ * NDIV offset settings for CCPLEX cluster NAFLLs
+ *
+ * 1. If number of online CPUs < ncpu[0] use offset0
+ * 2. If number of online CPUs < ncpu[1] use offset1
+ * 3. If number of online CPUs < ncpu[2] use offset2
+ * 4. If number of online CPUs < ncpu[3] use offset3
+ * 5. If number of online CPUs >= ncpu[3] disable offsetting
+ *
+ * For TH500 mrq_gears_response::ncpu array has four valid entries.
+ *
+ * mrq_response::err is
+ * * 0: gears defined and response data valid
+ * * -#BPMP_ENODEV: MRQ is not supported by BPMP-FW
+ * * -#BPMP_EACCES: Operation not permitted for the MRQ master
+ * * -#BPMP_ENAVAIL: NDIV offsetting is disabled
+ */
+struct mrq_gears_response {
+ /** @brief number of online CPUs for each gear */
+ uint32_t ncpu[16];
+} BPMP_ABI_PACKED;
+
+/** @} Gears */
+/** @endcond bpmp_th500 */
/**
* @addtogroup Error_Codes
@@ -3047,12 +3953,18 @@ struct mrq_ec_response {
#define BPMP_ENOSYS 38
/** @brief Invalid slot */
#define BPMP_EBADSLT 57
+/** @brief Invalid message */
+#define BPMP_EBADMSG 77
+/** @brief Operation not supported */
+#define BPMP_EOPNOTSUPP 95
+/** @brief Targeted resource not available */
+#define BPMP_ENAVAIL 119
/** @brief Not supported */
#define BPMP_ENOTSUP 134
/** @brief No such device or address */
#define BPMP_ENXIO 140
-/** @} */
+/** @} Error_Codes */
#if defined(BPMP_ABI_CHECKS)
#include "bpmp_abi_checks.h"
diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h
index f2604e99af09..5842e38bb288 100644
--- a/include/soc/tegra/bpmp.h
+++ b/include/soc/tegra/bpmp.h
@@ -6,6 +6,7 @@
#ifndef __SOC_TEGRA_BPMP_H
#define __SOC_TEGRA_BPMP_H
+#include <linux/iosys-map.h>
#include <linux/mailbox_client.h>
#include <linux/pm_domain.h>
#include <linux/reset-controller.h>
@@ -36,10 +37,22 @@ struct tegra_bpmp_mb_data {
u8 data[MSG_DATA_MIN_SZ];
} __packed;
+#define tegra_bpmp_mb_read(dst, mb, size) \
+ iosys_map_memcpy_from(dst, mb, offsetof(struct tegra_bpmp_mb_data, data), size)
+
+#define tegra_bpmp_mb_write(mb, src, size) \
+ iosys_map_memcpy_to(mb, offsetof(struct tegra_bpmp_mb_data, data), src, size)
+
+#define tegra_bpmp_mb_read_field(mb, field) \
+ iosys_map_rd_field(mb, 0, struct tegra_bpmp_mb_data, field)
+
+#define tegra_bpmp_mb_write_field(mb, field, value) \
+ iosys_map_wr_field(mb, 0, struct tegra_bpmp_mb_data, field, value)
+
struct tegra_bpmp_channel {
struct tegra_bpmp *bpmp;
- struct tegra_bpmp_mb_data *ib;
- struct tegra_bpmp_mb_data *ob;
+ struct iosys_map ib;
+ struct iosys_map ob;
struct completion completion;
struct tegra_ivc *ivc;
unsigned int index;
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index 977c334136e9..a63de5da8124 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -34,6 +34,20 @@ enum tegra_revision {
TEGRA_REVISION_MAX,
};
+enum tegra_platform {
+ TEGRA_PLATFORM_SILICON = 0,
+ TEGRA_PLATFORM_QT,
+ TEGRA_PLATFORM_SYSTEM_FPGA,
+ TEGRA_PLATFORM_UNIT_FPGA,
+ TEGRA_PLATFORM_ASIM_QT,
+ TEGRA_PLATFORM_ASIM_LINSIM,
+ TEGRA_PLATFORM_DSIM_ASIM_LINSIM,
+ TEGRA_PLATFORM_VERIFICATION_SIMULATION,
+ TEGRA_PLATFORM_VDK,
+ TEGRA_PLATFORM_VSP,
+ TEGRA_PLATFORM_MAX,
+};
+
struct tegra_sku_info {
int sku_id;
int cpu_process_id;
@@ -47,6 +61,7 @@ struct tegra_sku_info {
int gpu_speedo_id;
int gpu_speedo_value;
enum tegra_revision revision;
+ enum tegra_platform platform;
};
#ifdef CONFIG_ARCH_TEGRA
diff --git a/include/soc/tegra/ivc.h b/include/soc/tegra/ivc.h
index 4aeb77cc22c5..be45d5f5adea 100644
--- a/include/soc/tegra/ivc.h
+++ b/include/soc/tegra/ivc.h
@@ -4,9 +4,11 @@
*/
#ifndef __TEGRA_IVC_H
+#define __TEGRA_IVC_H
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/iosys-map.h>
#include <linux/types.h>
struct tegra_ivc_header;
@@ -15,7 +17,7 @@ struct tegra_ivc {
struct device *peer;
struct {
- struct tegra_ivc_header *channel;
+ struct iosys_map map;
unsigned int position;
dma_addr_t phys;
} rx, tx;
@@ -36,7 +38,7 @@ struct tegra_ivc {
*
* Returns a pointer to the frame, or an error encoded pointer.
*/
-void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc);
+int tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc, struct iosys_map *map);
/**
* tegra_ivc_read_advance - Advance the read queue
@@ -56,7 +58,7 @@ int tegra_ivc_read_advance(struct tegra_ivc *ivc);
*
* Returns a pointer to the frame, or an error encoded pointer.
*/
-void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc);
+int tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc, struct iosys_map *map);
/**
* tegra_ivc_write_advance - Advance the write queue
@@ -91,8 +93,8 @@ void tegra_ivc_reset(struct tegra_ivc *ivc);
size_t tegra_ivc_align(size_t size);
unsigned tegra_ivc_total_queue_size(unsigned queue_size);
-int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
- dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
+int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, const struct iosys_map *rx,
+ dma_addr_t rx_phys, const struct iosys_map *tx, dma_addr_t tx_phys,
unsigned int num_frames, size_t frame_size,
void (*notify)(struct tegra_ivc *ivc, void *data),
void *data);
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index d186bccd125d..aadb845d281d 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -118,9 +118,9 @@ enum tegra_io_pad {
TEGRA_IO_PAD_PEX_CLK_2,
TEGRA_IO_PAD_PEX_CNTRL,
TEGRA_IO_PAD_PEX_CTL2,
- TEGRA_IO_PAD_PEX_L0_RST_N,
- TEGRA_IO_PAD_PEX_L1_RST_N,
- TEGRA_IO_PAD_PEX_L5_RST_N,
+ TEGRA_IO_PAD_PEX_L0_RST,
+ TEGRA_IO_PAD_PEX_L1_RST,
+ TEGRA_IO_PAD_PEX_L5_RST,
TEGRA_IO_PAD_PWR_CTL,
TEGRA_IO_PAD_SDMMC1,
TEGRA_IO_PAD_SDMMC1_HV,
diff --git a/include/sound/control.h b/include/sound/control.h
index eae443ba79ba..cc3dcc6cfb0f 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -138,6 +138,7 @@ int snd_ctl_remove(struct snd_card * card, struct snd_kcontrol * kcontrol);
int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol, bool add_on_replace);
int snd_ctl_remove_id(struct snd_card * card, struct snd_ctl_elem_id *id);
int snd_ctl_rename_id(struct snd_card * card, struct snd_ctl_elem_id *src_id, struct snd_ctl_elem_id *dst_id);
+void snd_ctl_rename(struct snd_card *card, struct snd_kcontrol *kctl, const char *name);
int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id, int active);
struct snd_kcontrol *snd_ctl_find_numid(struct snd_card * card, unsigned int numid);
struct snd_kcontrol *snd_ctl_find_id(struct snd_card * card, struct snd_ctl_elem_id *id);
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index a0b827f0c2f6..25e049f44178 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -177,6 +177,7 @@ void asoc_simple_convert_fixup(struct asoc_simple_data *data,
struct snd_pcm_hw_params *params);
void asoc_simple_parse_convert(struct device_node *np, char *prefix,
struct asoc_simple_data *data);
+bool asoc_simple_is_convert_required(const struct asoc_simple_data *data);
int asoc_simple_parse_routing(struct snd_soc_card *card,
char *prefix);
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
index 83fd81c82e4c..9fbd3832bcdc 100644
--- a/include/sound/sof/dai.h
+++ b/include/sound/sof/dai.h
@@ -84,8 +84,8 @@ enum sof_ipc_dai_type {
SOF_DAI_AMD_BT, /**< AMD ACP BT*/
SOF_DAI_AMD_SP, /**< AMD ACP SP */
SOF_DAI_AMD_DMIC, /**< AMD ACP DMIC */
- SOF_DAI_AMD_HS, /**< Amd HS */
SOF_DAI_MEDIATEK_AFE, /**< Mediatek AFE */
+ SOF_DAI_AMD_HS, /**< Amd HS */
};
/* general purpose DAI configuration */
diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h
index 65e86e4e9fd8..75193850ead0 100644
--- a/include/sound/sof/info.h
+++ b/include/sound/sof/info.h
@@ -36,6 +36,10 @@ enum sof_ipc_ext_data {
SOF_IPC_EXT_USER_ABI_INFO = 4,
};
+/* Build u32 number in format MMmmmppp */
+#define SOF_FW_VER(MAJOR, MINOR, PATCH) ((uint32_t)( \
+ ((MAJOR) << 24) | ((MINOR) << 12) | (PATCH)))
+
/* FW version - SOF_IPC_GLB_VERSION */
struct sof_ipc_fw_version {
struct sof_ipc_hdr hdr;
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index ed50e81174bf..0bce0b4ff2fa 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -1993,12 +1993,11 @@ TRACE_EVENT(btrfs_set_extent_bit,
TP_fast_assign_btrfs(tree->fs_info,
__entry->owner = tree->owner;
- if (tree->private_data) {
- const struct inode *inode = tree->private_data;
+ if (tree->inode) {
+ const struct btrfs_inode *inode = tree->inode;
- __entry->ino = btrfs_ino(BTRFS_I(inode));
- __entry->rootid =
- BTRFS_I(inode)->root->root_key.objectid;
+ __entry->ino = btrfs_ino(inode);
+ __entry->rootid = inode->root->root_key.objectid;
} else {
__entry->ino = 0;
__entry->rootid = 0;
@@ -2032,12 +2031,11 @@ TRACE_EVENT(btrfs_clear_extent_bit,
TP_fast_assign_btrfs(tree->fs_info,
__entry->owner = tree->owner;
- if (tree->private_data) {
- const struct inode *inode = tree->private_data;
+ if (tree->inode) {
+ const struct btrfs_inode *inode = tree->inode;
- __entry->ino = btrfs_ino(BTRFS_I(inode));
- __entry->rootid =
- BTRFS_I(inode)->root->root_key.objectid;
+ __entry->ino = btrfs_ino(inode);
+ __entry->rootid = inode->root->root_key.objectid;
} else {
__entry->ino = 0;
__entry->rootid = 0;
@@ -2072,12 +2070,11 @@ TRACE_EVENT(btrfs_convert_extent_bit,
TP_fast_assign_btrfs(tree->fs_info,
__entry->owner = tree->owner;
- if (tree->private_data) {
- const struct inode *inode = tree->private_data;
+ if (tree->inode) {
+ const struct btrfs_inode *inode = tree->inode;
- __entry->ino = btrfs_ino(BTRFS_I(inode));
- __entry->rootid =
- BTRFS_I(inode)->root->root_key.objectid;
+ __entry->ino = btrfs_ino(inode);
+ __entry->rootid = inode->root->root_key.objectid;
} else {
__entry->ino = 0;
__entry->rootid = 0;
diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
index d8d4d73fe7b6..cf4b98b9a9ed 100644
--- a/include/trace/events/cachefiles.h
+++ b/include/trace/events/cachefiles.h
@@ -428,16 +428,18 @@ TRACE_EVENT(cachefiles_vol_coherency,
);
TRACE_EVENT(cachefiles_prep_read,
- TP_PROTO(struct netfs_io_subrequest *sreq,
+ TP_PROTO(struct cachefiles_object *obj,
+ loff_t start,
+ size_t len,
+ unsigned short flags,
enum netfs_io_source source,
enum cachefiles_prepare_read_trace why,
- ino_t cache_inode),
+ ino_t cache_inode, ino_t netfs_inode),
- TP_ARGS(sreq, source, why, cache_inode),
+ TP_ARGS(obj, start, len, flags, source, why, cache_inode, netfs_inode),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(unsigned short, index )
+ __field(unsigned int, obj )
__field(unsigned short, flags )
__field(enum netfs_io_source, source )
__field(enum cachefiles_prepare_read_trace, why )
@@ -448,19 +450,18 @@ TRACE_EVENT(cachefiles_prep_read,
),
TP_fast_assign(
- __entry->rreq = sreq->rreq->debug_id;
- __entry->index = sreq->debug_index;
- __entry->flags = sreq->flags;
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->flags = flags;
__entry->source = source;
__entry->why = why;
- __entry->len = sreq->len;
- __entry->start = sreq->start;
- __entry->netfs_inode = sreq->rreq->inode->i_ino;
+ __entry->len = len;
+ __entry->start = start;
+ __entry->netfs_inode = netfs_inode;
__entry->cache_inode = cache_inode;
),
- TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx ni=%x B=%x",
- __entry->rreq, __entry->index,
+ TP_printk("o=%08x %s %s f=%02x s=%llx %zx ni=%x B=%x",
+ __entry->obj,
__print_symbolic(__entry->source, netfs_sreq_sources),
__print_symbolic(__entry->why, cachefiles_prepare_read_traces),
__entry->flags,
diff --git a/include/trace/events/cxl.h b/include/trace/events/cxl.h
new file mode 100644
index 000000000000..ad085a2534ef
--- /dev/null
+++ b/include/trace/events/cxl.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cxl
+
+#if !defined(_CXL_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _CXL_EVENTS_H
+
+#include <linux/tracepoint.h>
+
+#define CXL_HEADERLOG_SIZE SZ_512
+#define CXL_HEADERLOG_SIZE_U32 SZ_512 / sizeof(u32)
+
+#define CXL_RAS_UC_CACHE_DATA_PARITY BIT(0)
+#define CXL_RAS_UC_CACHE_ADDR_PARITY BIT(1)
+#define CXL_RAS_UC_CACHE_BE_PARITY BIT(2)
+#define CXL_RAS_UC_CACHE_DATA_ECC BIT(3)
+#define CXL_RAS_UC_MEM_DATA_PARITY BIT(4)
+#define CXL_RAS_UC_MEM_ADDR_PARITY BIT(5)
+#define CXL_RAS_UC_MEM_BE_PARITY BIT(6)
+#define CXL_RAS_UC_MEM_DATA_ECC BIT(7)
+#define CXL_RAS_UC_REINIT_THRESH BIT(8)
+#define CXL_RAS_UC_RSVD_ENCODE BIT(9)
+#define CXL_RAS_UC_POISON BIT(10)
+#define CXL_RAS_UC_RECV_OVERFLOW BIT(11)
+#define CXL_RAS_UC_INTERNAL_ERR BIT(14)
+#define CXL_RAS_UC_IDE_TX_ERR BIT(15)
+#define CXL_RAS_UC_IDE_RX_ERR BIT(16)
+
+#define show_uc_errs(status) __print_flags(status, " | ", \
+ { CXL_RAS_UC_CACHE_DATA_PARITY, "Cache Data Parity Error" }, \
+ { CXL_RAS_UC_CACHE_ADDR_PARITY, "Cache Address Parity Error" }, \
+ { CXL_RAS_UC_CACHE_BE_PARITY, "Cache Byte Enable Parity Error" }, \
+ { CXL_RAS_UC_CACHE_DATA_ECC, "Cache Data ECC Error" }, \
+ { CXL_RAS_UC_MEM_DATA_PARITY, "Memory Data Parity Error" }, \
+ { CXL_RAS_UC_MEM_ADDR_PARITY, "Memory Address Parity Error" }, \
+ { CXL_RAS_UC_MEM_BE_PARITY, "Memory Byte Enable Parity Error" }, \
+ { CXL_RAS_UC_MEM_DATA_ECC, "Memory Data ECC Error" }, \
+ { CXL_RAS_UC_REINIT_THRESH, "REINIT Threshold Hit" }, \
+ { CXL_RAS_UC_RSVD_ENCODE, "Received Unrecognized Encoding" }, \
+ { CXL_RAS_UC_POISON, "Received Poison From Peer" }, \
+ { CXL_RAS_UC_RECV_OVERFLOW, "Receiver Overflow" }, \
+ { CXL_RAS_UC_INTERNAL_ERR, "Component Specific Error" }, \
+ { CXL_RAS_UC_IDE_TX_ERR, "IDE Tx Error" }, \
+ { CXL_RAS_UC_IDE_RX_ERR, "IDE Rx Error" } \
+)
+
+TRACE_EVENT(cxl_aer_uncorrectable_error,
+ TP_PROTO(const struct device *dev, u32 status, u32 fe, u32 *hl),
+ TP_ARGS(dev, status, fe, hl),
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name(dev))
+ __field(u32, status)
+ __field(u32, first_error)
+ __array(u32, header_log, CXL_HEADERLOG_SIZE_U32)
+ ),
+ TP_fast_assign(
+ __assign_str(dev_name, dev_name(dev));
+ __entry->status = status;
+ __entry->first_error = fe;
+ /*
+ * Embed the 512B headerlog data for user app retrieval and
+ * parsing, but no need to print this in the trace buffer.
+ */
+ memcpy(__entry->header_log, hl, CXL_HEADERLOG_SIZE);
+ ),
+ TP_printk("%s: status: '%s' first_error: '%s'",
+ __get_str(dev_name),
+ show_uc_errs(__entry->status),
+ show_uc_errs(__entry->first_error)
+ )
+);
+
+#define CXL_RAS_CE_CACHE_DATA_ECC BIT(0)
+#define CXL_RAS_CE_MEM_DATA_ECC BIT(1)
+#define CXL_RAS_CE_CRC_THRESH BIT(2)
+#define CLX_RAS_CE_RETRY_THRESH BIT(3)
+#define CXL_RAS_CE_CACHE_POISON BIT(4)
+#define CXL_RAS_CE_MEM_POISON BIT(5)
+#define CXL_RAS_CE_PHYS_LAYER_ERR BIT(6)
+
+#define show_ce_errs(status) __print_flags(status, " | ", \
+ { CXL_RAS_CE_CACHE_DATA_ECC, "Cache Data ECC Error" }, \
+ { CXL_RAS_CE_MEM_DATA_ECC, "Memory Data ECC Error" }, \
+ { CXL_RAS_CE_CRC_THRESH, "CRC Threshold Hit" }, \
+ { CLX_RAS_CE_RETRY_THRESH, "Retry Threshold" }, \
+ { CXL_RAS_CE_CACHE_POISON, "Received Cache Poison From Peer" }, \
+ { CXL_RAS_CE_MEM_POISON, "Received Memory Poison From Peer" }, \
+ { CXL_RAS_CE_PHYS_LAYER_ERR, "Received Error From Physical Layer" } \
+)
+
+TRACE_EVENT(cxl_aer_correctable_error,
+ TP_PROTO(const struct device *dev, u32 status),
+ TP_ARGS(dev, status),
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name(dev))
+ __field(u32, status)
+ ),
+ TP_fast_assign(
+ __assign_str(dev_name, dev_name(dev));
+ __entry->status = status;
+ ),
+ TP_printk("%s: status: '%s'",
+ __get_str(dev_name), show_ce_errs(__entry->status)
+ )
+);
+
+#endif /* _CXL_EVENTS_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cxl
+#include <trace/define_trace.h>
diff --git a/include/trace/events/dlm.h b/include/trace/events/dlm.h
index da0eaae98fa3..37eb79e29b28 100644
--- a/include/trace/events/dlm.h
+++ b/include/trace/events/dlm.h
@@ -46,6 +46,56 @@
{ DLM_SBF_VALNOTVALID, "VALNOTVALID" }, \
{ DLM_SBF_ALTMODE, "ALTMODE" })
+#define show_lkb_flags(flags) __print_flags(flags, "|", \
+ { DLM_IFL_MSTCPY, "MSTCPY" }, \
+ { DLM_IFL_RESEND, "RESEND" }, \
+ { DLM_IFL_DEAD, "DEAD" }, \
+ { DLM_IFL_OVERLAP_UNLOCK, "OVERLAP_UNLOCK" }, \
+ { DLM_IFL_OVERLAP_CANCEL, "OVERLAP_CANCEL" }, \
+ { DLM_IFL_ENDOFLIFE, "ENDOFLIFE" }, \
+ { DLM_IFL_DEADLOCK_CANCEL, "DEADLOCK_CANCEL" }, \
+ { DLM_IFL_STUB_MS, "STUB_MS" }, \
+ { DLM_IFL_USER, "USER" }, \
+ { DLM_IFL_ORPHAN, "ORPHAN" })
+
+#define show_header_cmd(cmd) __print_symbolic(cmd, \
+ { DLM_MSG, "MSG"}, \
+ { DLM_RCOM, "RCOM"}, \
+ { DLM_OPTS, "OPTS"}, \
+ { DLM_ACK, "ACK"}, \
+ { DLM_FIN, "FIN"})
+
+#define show_message_version(version) __print_symbolic(version, \
+ { DLM_VERSION_3_1, "3.1"}, \
+ { DLM_VERSION_3_2, "3.2"})
+
+#define show_message_type(type) __print_symbolic(type, \
+ { DLM_MSG_REQUEST, "REQUEST"}, \
+ { DLM_MSG_CONVERT, "CONVERT"}, \
+ { DLM_MSG_UNLOCK, "UNLOCK"}, \
+ { DLM_MSG_CANCEL, "CANCEL"}, \
+ { DLM_MSG_REQUEST_REPLY, "REQUEST_REPLY"}, \
+ { DLM_MSG_CONVERT_REPLY, "CONVERT_REPLY"}, \
+ { DLM_MSG_UNLOCK_REPLY, "UNLOCK_REPLY"}, \
+ { DLM_MSG_CANCEL_REPLY, "CANCEL_REPLY"}, \
+ { DLM_MSG_GRANT, "GRANT"}, \
+ { DLM_MSG_BAST, "BAST"}, \
+ { DLM_MSG_LOOKUP, "LOOKUP"}, \
+ { DLM_MSG_REMOVE, "REMOVE"}, \
+ { DLM_MSG_LOOKUP_REPLY, "LOOKUP_REPLY"}, \
+ { DLM_MSG_PURGE, "PURGE"})
+
+#define show_rcom_type(type) __print_symbolic(type, \
+ { DLM_RCOM_STATUS, "STATUS"}, \
+ { DLM_RCOM_NAMES, "NAMES"}, \
+ { DLM_RCOM_LOOKUP, "LOOKUP"}, \
+ { DLM_RCOM_LOCK, "LOCK"}, \
+ { DLM_RCOM_STATUS_REPLY, "STATUS_REPLY"}, \
+ { DLM_RCOM_NAMES_REPLY, "NAMES_REPLY"}, \
+ { DLM_RCOM_LOOKUP_REPLY, "LOOKUP_REPLY"}, \
+ { DLM_RCOM_LOCK_REPLY, "LOCK_REPLY"})
+
+
/* note: we begin tracing dlm_lock_start() only if ls and lkb are found */
TRACE_EVENT(dlm_lock_start,
@@ -290,6 +340,259 @@ TRACE_EVENT(dlm_unlock_end,
);
+DECLARE_EVENT_CLASS(dlm_rcom_template,
+
+ TP_PROTO(uint32_t dst, uint32_t h_seq, const struct dlm_rcom *rc),
+
+ TP_ARGS(dst, h_seq, rc),
+
+ TP_STRUCT__entry(
+ __field(uint32_t, dst)
+ __field(uint32_t, h_seq)
+ __field(uint32_t, h_version)
+ __field(uint32_t, h_lockspace)
+ __field(uint32_t, h_nodeid)
+ __field(uint16_t, h_length)
+ __field(uint8_t, h_cmd)
+ __field(uint32_t, rc_type)
+ __field(int32_t, rc_result)
+ __field(uint64_t, rc_id)
+ __field(uint64_t, rc_seq)
+ __field(uint64_t, rc_seq_reply)
+ __dynamic_array(unsigned char, rc_buf,
+ le16_to_cpu(rc->rc_header.h_length) - sizeof(*rc))
+ ),
+
+ TP_fast_assign(
+ __entry->dst = dst;
+ __entry->h_seq = h_seq;
+ __entry->h_version = le32_to_cpu(rc->rc_header.h_version);
+ __entry->h_lockspace = le32_to_cpu(rc->rc_header.u.h_lockspace);
+ __entry->h_nodeid = le32_to_cpu(rc->rc_header.h_nodeid);
+ __entry->h_length = le16_to_cpu(rc->rc_header.h_length);
+ __entry->h_cmd = rc->rc_header.h_cmd;
+ __entry->rc_type = le32_to_cpu(rc->rc_type);
+ __entry->rc_result = le32_to_cpu(rc->rc_result);
+ __entry->rc_id = le64_to_cpu(rc->rc_id);
+ __entry->rc_seq = le64_to_cpu(rc->rc_seq);
+ __entry->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply);
+ memcpy(__get_dynamic_array(rc_buf), rc->rc_buf,
+ __get_dynamic_array_len(rc_buf));
+ ),
+
+ TP_printk("dst=%u h_seq=%u h_version=%s h_lockspace=%u h_nodeid=%u "
+ "h_length=%u h_cmd=%s rc_type=%s rc_result=%d "
+ "rc_id=%llu rc_seq=%llu rc_seq_reply=%llu "
+ "rc_buf=0x%s", __entry->dst, __entry->h_seq,
+ show_message_version(__entry->h_version),
+ __entry->h_lockspace, __entry->h_nodeid, __entry->h_length,
+ show_header_cmd(__entry->h_cmd),
+ show_rcom_type(__entry->rc_type),
+ __entry->rc_result, __entry->rc_id, __entry->rc_seq,
+ __entry->rc_seq_reply,
+ __print_hex_str(__get_dynamic_array(rc_buf),
+ __get_dynamic_array_len(rc_buf)))
+
+);
+
+DEFINE_EVENT(dlm_rcom_template, dlm_send_rcom,
+ TP_PROTO(uint32_t dst, uint32_t h_seq, const struct dlm_rcom *rc),
+ TP_ARGS(dst, h_seq, rc));
+
+DEFINE_EVENT(dlm_rcom_template, dlm_recv_rcom,
+ TP_PROTO(uint32_t dst, uint32_t h_seq, const struct dlm_rcom *rc),
+ TP_ARGS(dst, h_seq, rc));
+
+TRACE_EVENT(dlm_send_message,
+
+ TP_PROTO(uint32_t dst, uint32_t h_seq, const struct dlm_message *ms,
+ const void *name, int namelen),
+
+ TP_ARGS(dst, h_seq, ms, name, namelen),
+
+ TP_STRUCT__entry(
+ __field(uint32_t, dst)
+ __field(uint32_t, h_seq)
+ __field(uint32_t, h_version)
+ __field(uint32_t, h_lockspace)
+ __field(uint32_t, h_nodeid)
+ __field(uint16_t, h_length)
+ __field(uint8_t, h_cmd)
+ __field(uint32_t, m_type)
+ __field(uint32_t, m_nodeid)
+ __field(uint32_t, m_pid)
+ __field(uint32_t, m_lkid)
+ __field(uint32_t, m_remid)
+ __field(uint32_t, m_parent_lkid)
+ __field(uint32_t, m_parent_remid)
+ __field(uint32_t, m_exflags)
+ __field(uint32_t, m_sbflags)
+ __field(uint32_t, m_flags)
+ __field(uint32_t, m_lvbseq)
+ __field(uint32_t, m_hash)
+ __field(int32_t, m_status)
+ __field(int32_t, m_grmode)
+ __field(int32_t, m_rqmode)
+ __field(int32_t, m_bastmode)
+ __field(int32_t, m_asts)
+ __field(int32_t, m_result)
+ __dynamic_array(unsigned char, m_extra,
+ le16_to_cpu(ms->m_header.h_length) - sizeof(*ms))
+ __dynamic_array(unsigned char, res_name, namelen)
+ ),
+
+ TP_fast_assign(
+ __entry->dst = dst;
+ __entry->h_seq = h_seq;
+ __entry->h_version = le32_to_cpu(ms->m_header.h_version);
+ __entry->h_lockspace = le32_to_cpu(ms->m_header.u.h_lockspace);
+ __entry->h_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
+ __entry->h_length = le16_to_cpu(ms->m_header.h_length);
+ __entry->h_cmd = ms->m_header.h_cmd;
+ __entry->m_type = le32_to_cpu(ms->m_type);
+ __entry->m_nodeid = le32_to_cpu(ms->m_nodeid);
+ __entry->m_pid = le32_to_cpu(ms->m_pid);
+ __entry->m_lkid = le32_to_cpu(ms->m_lkid);
+ __entry->m_remid = le32_to_cpu(ms->m_remid);
+ __entry->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid);
+ __entry->m_parent_remid = le32_to_cpu(ms->m_parent_remid);
+ __entry->m_exflags = le32_to_cpu(ms->m_exflags);
+ __entry->m_sbflags = le32_to_cpu(ms->m_sbflags);
+ __entry->m_flags = le32_to_cpu(ms->m_flags);
+ __entry->m_lvbseq = le32_to_cpu(ms->m_lvbseq);
+ __entry->m_hash = le32_to_cpu(ms->m_hash);
+ __entry->m_status = le32_to_cpu(ms->m_status);
+ __entry->m_grmode = le32_to_cpu(ms->m_grmode);
+ __entry->m_rqmode = le32_to_cpu(ms->m_rqmode);
+ __entry->m_bastmode = le32_to_cpu(ms->m_bastmode);
+ __entry->m_asts = le32_to_cpu(ms->m_asts);
+ __entry->m_result = le32_to_cpu(ms->m_result);
+ memcpy(__get_dynamic_array(m_extra), ms->m_extra,
+ __get_dynamic_array_len(m_extra));
+ memcpy(__get_dynamic_array(res_name), name,
+ __get_dynamic_array_len(res_name));
+ ),
+
+ TP_printk("dst=%u h_seq=%u h_version=%s h_lockspace=%u h_nodeid=%u "
+ "h_length=%u h_cmd=%s m_type=%s m_nodeid=%u "
+ "m_pid=%u m_lkid=%u m_remid=%u m_parent_lkid=%u "
+ "m_parent_remid=%u m_exflags=%s m_sbflags=%s m_flags=%s "
+ "m_lvbseq=%u m_hash=%u m_status=%d m_grmode=%s "
+ "m_rqmode=%s m_bastmode=%s m_asts=%d m_result=%d "
+ "m_extra=0x%s res_name=0x%s", __entry->dst,
+ __entry->h_seq, show_message_version(__entry->h_version),
+ __entry->h_lockspace, __entry->h_nodeid, __entry->h_length,
+ show_header_cmd(__entry->h_cmd),
+ show_message_type(__entry->m_type),
+ __entry->m_nodeid, __entry->m_pid, __entry->m_lkid,
+ __entry->m_remid, __entry->m_parent_lkid,
+ __entry->m_parent_remid, show_lock_flags(__entry->m_exflags),
+ show_dlm_sb_flags(__entry->m_sbflags),
+ show_lkb_flags(__entry->m_flags), __entry->m_lvbseq,
+ __entry->m_hash, __entry->m_status,
+ show_lock_mode(__entry->m_grmode),
+ show_lock_mode(__entry->m_rqmode),
+ show_lock_mode(__entry->m_bastmode),
+ __entry->m_asts, __entry->m_result,
+ __print_hex_str(__get_dynamic_array(m_extra),
+ __get_dynamic_array_len(m_extra)),
+ __print_hex_str(__get_dynamic_array(res_name),
+ __get_dynamic_array_len(res_name)))
+
+);
+
+TRACE_EVENT(dlm_recv_message,
+
+ TP_PROTO(uint32_t dst, uint32_t h_seq, const struct dlm_message *ms),
+
+ TP_ARGS(dst, h_seq, ms),
+
+ TP_STRUCT__entry(
+ __field(uint32_t, dst)
+ __field(uint32_t, h_seq)
+ __field(uint32_t, h_version)
+ __field(uint32_t, h_lockspace)
+ __field(uint32_t, h_nodeid)
+ __field(uint16_t, h_length)
+ __field(uint8_t, h_cmd)
+ __field(uint32_t, m_type)
+ __field(uint32_t, m_nodeid)
+ __field(uint32_t, m_pid)
+ __field(uint32_t, m_lkid)
+ __field(uint32_t, m_remid)
+ __field(uint32_t, m_parent_lkid)
+ __field(uint32_t, m_parent_remid)
+ __field(uint32_t, m_exflags)
+ __field(uint32_t, m_sbflags)
+ __field(uint32_t, m_flags)
+ __field(uint32_t, m_lvbseq)
+ __field(uint32_t, m_hash)
+ __field(int32_t, m_status)
+ __field(int32_t, m_grmode)
+ __field(int32_t, m_rqmode)
+ __field(int32_t, m_bastmode)
+ __field(int32_t, m_asts)
+ __field(int32_t, m_result)
+ __dynamic_array(unsigned char, m_extra,
+ le16_to_cpu(ms->m_header.h_length) - sizeof(*ms))
+ ),
+
+ TP_fast_assign(
+ __entry->dst = dst;
+ __entry->h_seq = h_seq;
+ __entry->h_version = le32_to_cpu(ms->m_header.h_version);
+ __entry->h_lockspace = le32_to_cpu(ms->m_header.u.h_lockspace);
+ __entry->h_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
+ __entry->h_length = le16_to_cpu(ms->m_header.h_length);
+ __entry->h_cmd = ms->m_header.h_cmd;
+ __entry->m_type = le32_to_cpu(ms->m_type);
+ __entry->m_nodeid = le32_to_cpu(ms->m_nodeid);
+ __entry->m_pid = le32_to_cpu(ms->m_pid);
+ __entry->m_lkid = le32_to_cpu(ms->m_lkid);
+ __entry->m_remid = le32_to_cpu(ms->m_remid);
+ __entry->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid);
+ __entry->m_parent_remid = le32_to_cpu(ms->m_parent_remid);
+ __entry->m_exflags = le32_to_cpu(ms->m_exflags);
+ __entry->m_sbflags = le32_to_cpu(ms->m_sbflags);
+ __entry->m_flags = le32_to_cpu(ms->m_flags);
+ __entry->m_lvbseq = le32_to_cpu(ms->m_lvbseq);
+ __entry->m_hash = le32_to_cpu(ms->m_hash);
+ __entry->m_status = le32_to_cpu(ms->m_status);
+ __entry->m_grmode = le32_to_cpu(ms->m_grmode);
+ __entry->m_rqmode = le32_to_cpu(ms->m_rqmode);
+ __entry->m_bastmode = le32_to_cpu(ms->m_bastmode);
+ __entry->m_asts = le32_to_cpu(ms->m_asts);
+ __entry->m_result = le32_to_cpu(ms->m_result);
+ memcpy(__get_dynamic_array(m_extra), ms->m_extra,
+ __get_dynamic_array_len(m_extra));
+ ),
+
+ TP_printk("dst=%u h_seq=%u h_version=%s h_lockspace=%u h_nodeid=%u "
+ "h_length=%u h_cmd=%s m_type=%s m_nodeid=%u "
+ "m_pid=%u m_lkid=%u m_remid=%u m_parent_lkid=%u "
+ "m_parent_remid=%u m_exflags=%s m_sbflags=%s m_flags=%s "
+ "m_lvbseq=%u m_hash=%u m_status=%d m_grmode=%s "
+ "m_rqmode=%s m_bastmode=%s m_asts=%d m_result=%d "
+ "m_extra=0x%s", __entry->dst,
+ __entry->h_seq, show_message_version(__entry->h_version),
+ __entry->h_lockspace, __entry->h_nodeid, __entry->h_length,
+ show_header_cmd(__entry->h_cmd),
+ show_message_type(__entry->m_type),
+ __entry->m_nodeid, __entry->m_pid, __entry->m_lkid,
+ __entry->m_remid, __entry->m_parent_lkid,
+ __entry->m_parent_remid, show_lock_flags(__entry->m_exflags),
+ show_dlm_sb_flags(__entry->m_sbflags),
+ show_lkb_flags(__entry->m_flags), __entry->m_lvbseq,
+ __entry->m_hash, __entry->m_status,
+ show_lock_mode(__entry->m_grmode),
+ show_lock_mode(__entry->m_rqmode),
+ show_lock_mode(__entry->m_bastmode),
+ __entry->m_asts, __entry->m_result,
+ __print_hex_str(__get_dynamic_array(m_extra),
+ __get_dynamic_array_len(m_extra)))
+
+);
+
TRACE_EVENT(dlm_send,
TP_PROTO(int nodeid, int ret),
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 229e8fae66a3..77b426ae0064 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -104,6 +104,7 @@ TRACE_DEFINE_ENUM(EXT4_FC_REASON_RESIZE);
TRACE_DEFINE_ENUM(EXT4_FC_REASON_RENAME_DIR);
TRACE_DEFINE_ENUM(EXT4_FC_REASON_FALLOC_RANGE);
TRACE_DEFINE_ENUM(EXT4_FC_REASON_INODE_JOURNAL_DATA);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_ENCRYPTED_FILENAME);
TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX);
#define show_fc_reason(reason) \
@@ -116,7 +117,8 @@ TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX);
{ EXT4_FC_REASON_RESIZE, "RESIZE"}, \
{ EXT4_FC_REASON_RENAME_DIR, "RENAME_DIR"}, \
{ EXT4_FC_REASON_FALLOC_RANGE, "FALLOC_RANGE"}, \
- { EXT4_FC_REASON_INODE_JOURNAL_DATA, "INODE_JOURNAL_DATA"})
+ { EXT4_FC_REASON_INODE_JOURNAL_DATA, "INODE_JOURNAL_DATA"}, \
+ { EXT4_FC_REASON_ENCRYPTED_FILENAME, "ENCRYPTED_FILENAME"})
TRACE_EVENT(ext4_other_inode_update_time,
TP_PROTO(struct inode *inode, ino_t orig_ino),
@@ -1744,18 +1746,19 @@ TRACE_EVENT(ext4_load_inode,
(unsigned long) __entry->ino)
);
-TRACE_EVENT(ext4_journal_start,
+TRACE_EVENT(ext4_journal_start_sb,
TP_PROTO(struct super_block *sb, int blocks, int rsv_blocks,
- int revoke_creds, unsigned long IP),
+ int revoke_creds, int type, unsigned long IP),
- TP_ARGS(sb, blocks, rsv_blocks, revoke_creds, IP),
+ TP_ARGS(sb, blocks, rsv_blocks, revoke_creds, type, IP),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field(unsigned long, ip )
- __field( int, blocks )
- __field( int, rsv_blocks )
- __field( int, revoke_creds )
+ __field( dev_t, dev )
+ __field( unsigned long, ip )
+ __field( int, blocks )
+ __field( int, rsv_blocks )
+ __field( int, revoke_creds )
+ __field( int, type )
),
TP_fast_assign(
@@ -1764,11 +1767,45 @@ TRACE_EVENT(ext4_journal_start,
__entry->blocks = blocks;
__entry->rsv_blocks = rsv_blocks;
__entry->revoke_creds = revoke_creds;
+ __entry->type = type;
+ ),
+
+ TP_printk("dev %d,%d blocks %d, rsv_blocks %d, revoke_creds %d,"
+ " type %d, caller %pS", MAJOR(__entry->dev),
+ MINOR(__entry->dev), __entry->blocks, __entry->rsv_blocks,
+ __entry->revoke_creds, __entry->type, (void *)__entry->ip)
+);
+
+TRACE_EVENT(ext4_journal_start_inode,
+ TP_PROTO(struct inode *inode, int blocks, int rsv_blocks,
+ int revoke_creds, int type, unsigned long IP),
+
+ TP_ARGS(inode, blocks, rsv_blocks, revoke_creds, type, IP),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, ino )
+ __field( dev_t, dev )
+ __field( unsigned long, ip )
+ __field( int, blocks )
+ __field( int, rsv_blocks )
+ __field( int, revoke_creds )
+ __field( int, type )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ip = IP;
+ __entry->blocks = blocks;
+ __entry->rsv_blocks = rsv_blocks;
+ __entry->revoke_creds = revoke_creds;
+ __entry->type = type;
+ __entry->ino = inode->i_ino;
),
- TP_printk("dev %d,%d blocks %d, rsv_blocks %d, revoke_creds %d, "
- "caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->blocks, __entry->rsv_blocks, __entry->revoke_creds,
+ TP_printk("dev %d,%d blocks %d, rsv_blocks %d, revoke_creds %d,"
+ " type %d, ino %lu, caller %pS", MAJOR(__entry->dev),
+ MINOR(__entry->dev), __entry->blocks, __entry->rsv_blocks,
+ __entry->revoke_creds, __entry->type, __entry->ino,
(void *)__entry->ip)
);
@@ -2764,7 +2801,7 @@ TRACE_EVENT(ext4_fc_stats,
),
TP_printk("dev %d,%d fc ineligible reasons:\n"
- "%s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u "
+ "%s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u"
"num_commits:%lu, ineligible: %lu, numblks: %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR),
@@ -2776,6 +2813,7 @@ TRACE_EVENT(ext4_fc_stats,
FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR),
FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE),
FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_ENCRYPTED_FILENAME),
__entry->fc_commits, __entry->fc_ineligible_commits,
__entry->fc_numblks)
);
diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h
index c078c48a8e6d..a6190aa1b406 100644
--- a/include/trace/events/fscache.h
+++ b/include/trace/events/fscache.h
@@ -66,6 +66,7 @@ enum fscache_cookie_trace {
fscache_cookie_put_work,
fscache_cookie_see_active,
fscache_cookie_see_lru_discard,
+ fscache_cookie_see_lru_discard_clear,
fscache_cookie_see_lru_do_one,
fscache_cookie_see_relinquish,
fscache_cookie_see_withdraw,
@@ -149,6 +150,7 @@ enum fscache_access_trace {
EM(fscache_cookie_put_work, "PQ work ") \
EM(fscache_cookie_see_active, "- activ") \
EM(fscache_cookie_see_lru_discard, "- x-lru") \
+ EM(fscache_cookie_see_lru_discard_clear,"- lrudc") \
EM(fscache_cookie_see_lru_do_one, "- lrudo") \
EM(fscache_cookie_see_relinquish, "- x-rlq") \
EM(fscache_cookie_see_withdraw, "- x-wth") \
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index 935af4947917..760455dfa860 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -171,15 +171,15 @@ TRACE_EVENT(mm_collapse_huge_page_swapin,
TRACE_EVENT(mm_khugepaged_scan_file,
- TP_PROTO(struct mm_struct *mm, struct page *page, const char *filename,
+ TP_PROTO(struct mm_struct *mm, struct page *page, struct file *file,
int present, int swap, int result),
- TP_ARGS(mm, page, filename, present, swap, result),
+ TP_ARGS(mm, page, file, present, swap, result),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
__field(unsigned long, pfn)
- __string(filename, filename)
+ __string(filename, file->f_path.dentry->d_iname)
__field(int, present)
__field(int, swap)
__field(int, result)
@@ -188,7 +188,7 @@ TRACE_EVENT(mm_khugepaged_scan_file,
TP_fast_assign(
__entry->mm = mm;
__entry->pfn = page ? page_to_pfn(page) : -1;
- __assign_str(filename, filename);
+ __assign_str(filename, file->f_path.dentry->d_iname);
__entry->present = present;
__entry->swap = swap;
__entry->result = result;
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index 99f783c384bb..8f5ee380d309 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -40,7 +40,7 @@ DECLARE_EVENT_CLASS(jbd2_commit,
TP_STRUCT__entry(
__field( dev_t, dev )
__field( char, sync_commit )
- __field( int, transaction )
+ __field( tid_t, transaction )
),
TP_fast_assign(
@@ -49,7 +49,7 @@ DECLARE_EVENT_CLASS(jbd2_commit,
__entry->transaction = commit_transaction->t_tid;
),
- TP_printk("dev %d,%d transaction %d sync %d",
+ TP_printk("dev %d,%d transaction %u sync %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->transaction, __entry->sync_commit)
);
@@ -97,8 +97,8 @@ TRACE_EVENT(jbd2_end_commit,
TP_STRUCT__entry(
__field( dev_t, dev )
__field( char, sync_commit )
- __field( int, transaction )
- __field( int, head )
+ __field( tid_t, transaction )
+ __field( tid_t, head )
),
TP_fast_assign(
@@ -108,7 +108,7 @@ TRACE_EVENT(jbd2_end_commit,
__entry->head = journal->j_tail_sequence;
),
- TP_printk("dev %d,%d transaction %d sync %d head %d",
+ TP_printk("dev %d,%d transaction %u sync %d head %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->transaction, __entry->sync_commit, __entry->head)
);
@@ -134,14 +134,14 @@ TRACE_EVENT(jbd2_submit_inode_data,
);
DECLARE_EVENT_CLASS(jbd2_handle_start_class,
- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
unsigned int line_no, int requested_blocks),
TP_ARGS(dev, tid, type, line_no, requested_blocks),
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( unsigned long, tid )
+ __field( tid_t, tid )
__field( unsigned int, type )
__field( unsigned int, line_no )
__field( int, requested_blocks)
@@ -155,28 +155,28 @@ DECLARE_EVENT_CLASS(jbd2_handle_start_class,
__entry->requested_blocks = requested_blocks;
),
- TP_printk("dev %d,%d tid %lu type %u line_no %u "
+ TP_printk("dev %d,%d tid %u type %u line_no %u "
"requested_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
__entry->type, __entry->line_no, __entry->requested_blocks)
);
DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_start,
- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
unsigned int line_no, int requested_blocks),
TP_ARGS(dev, tid, type, line_no, requested_blocks)
);
DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_restart,
- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
unsigned int line_no, int requested_blocks),
TP_ARGS(dev, tid, type, line_no, requested_blocks)
);
TRACE_EVENT(jbd2_handle_extend,
- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
unsigned int line_no, int buffer_credits,
int requested_blocks),
@@ -184,7 +184,7 @@ TRACE_EVENT(jbd2_handle_extend,
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( unsigned long, tid )
+ __field( tid_t, tid )
__field( unsigned int, type )
__field( unsigned int, line_no )
__field( int, buffer_credits )
@@ -200,7 +200,7 @@ TRACE_EVENT(jbd2_handle_extend,
__entry->requested_blocks = requested_blocks;
),
- TP_printk("dev %d,%d tid %lu type %u line_no %u "
+ TP_printk("dev %d,%d tid %u type %u line_no %u "
"buffer_credits %d requested_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
__entry->type, __entry->line_no, __entry->buffer_credits,
@@ -208,7 +208,7 @@ TRACE_EVENT(jbd2_handle_extend,
);
TRACE_EVENT(jbd2_handle_stats,
- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
unsigned int line_no, int interval, int sync,
int requested_blocks, int dirtied_blocks),
@@ -217,7 +217,7 @@ TRACE_EVENT(jbd2_handle_stats,
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( unsigned long, tid )
+ __field( tid_t, tid )
__field( unsigned int, type )
__field( unsigned int, line_no )
__field( int, interval )
@@ -237,7 +237,7 @@ TRACE_EVENT(jbd2_handle_stats,
__entry->dirtied_blocks = dirtied_blocks;
),
- TP_printk("dev %d,%d tid %lu type %u line_no %u interval %d "
+ TP_printk("dev %d,%d tid %u type %u line_no %u interval %d "
"sync %d requested_blocks %d dirtied_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
__entry->type, __entry->line_no, __entry->interval,
@@ -246,14 +246,14 @@ TRACE_EVENT(jbd2_handle_stats,
);
TRACE_EVENT(jbd2_run_stats,
- TP_PROTO(dev_t dev, unsigned long tid,
+ TP_PROTO(dev_t dev, tid_t tid,
struct transaction_run_stats_s *stats),
TP_ARGS(dev, tid, stats),
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( unsigned long, tid )
+ __field( tid_t, tid )
__field( unsigned long, wait )
__field( unsigned long, request_delay )
__field( unsigned long, running )
@@ -279,7 +279,7 @@ TRACE_EVENT(jbd2_run_stats,
__entry->blocks_logged = stats->rs_blocks_logged;
),
- TP_printk("dev %d,%d tid %lu wait %u request_delay %u running %u "
+ TP_printk("dev %d,%d tid %u wait %u request_delay %u running %u "
"locked %u flushing %u logging %u handle_count %u "
"blocks %u blocks_logged %u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
@@ -294,14 +294,14 @@ TRACE_EVENT(jbd2_run_stats,
);
TRACE_EVENT(jbd2_checkpoint_stats,
- TP_PROTO(dev_t dev, unsigned long tid,
+ TP_PROTO(dev_t dev, tid_t tid,
struct transaction_chp_stats_s *stats),
TP_ARGS(dev, tid, stats),
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( unsigned long, tid )
+ __field( tid_t, tid )
__field( unsigned long, chp_time )
__field( __u32, forced_to_close )
__field( __u32, written )
@@ -317,7 +317,7 @@ TRACE_EVENT(jbd2_checkpoint_stats,
__entry->dropped = stats->cs_dropped;
),
- TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u "
+ TP_printk("dev %d,%d tid %u chp_time %u forced_to_close %u "
"written %u dropped %u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
jiffies_to_msecs(__entry->chp_time),
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index c9048f3e471b..3f121eed369e 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -13,7 +13,7 @@
#include <linux/tracepoint.h>
-#include <trace/events/sunrpc_base.h>
+#include <trace/misc/sunrpc.h>
/**
** GSS-API related trace events
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index fcd3b3f1020a..8f461e04e5f0 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -15,8 +15,8 @@
#include <linux/tracepoint.h>
#include <rdma/ib_cm.h>
-#include <trace/events/rdma.h>
-#include <trace/events/sunrpc_base.h>
+#include <trace/misc/rdma.h>
+#include <trace/misc/sunrpc.h>
/**
** Event classes
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index f48f2ab9d238..37604e0e5963 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -14,7 +14,7 @@
#include <linux/net.h>
#include <linux/tracepoint.h>
-#include <trace/events/sunrpc_base.h>
+#include <trace/misc/sunrpc.h>
TRACE_DEFINE_ENUM(SOCK_STREAM);
TRACE_DEFINE_ENUM(SOCK_DGRAM);
@@ -1666,11 +1666,13 @@ TRACE_DEFINE_ENUM(SVC_COMPLETE);
#define SVC_RQST_ENDPOINT_VARARGS \
__entry->xid, __get_sockaddr(server), __get_sockaddr(client)
-TRACE_EVENT(svc_authenticate,
+TRACE_EVENT_CONDITION(svc_authenticate,
TP_PROTO(const struct svc_rqst *rqst, int auth_res),
TP_ARGS(rqst, auth_res),
+ TP_CONDITION(auth_res != SVC_OK && auth_res != SVC_COMPLETE),
+
TP_STRUCT__entry(
SVC_RQST_ENDPOINT_FIELDS(rqst)
diff --git a/include/trace/events/watchdog.h b/include/trace/events/watchdog.h
new file mode 100644
index 000000000000..beb9bb3424c8
--- /dev/null
+++ b/include/trace/events/watchdog.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM watchdog
+
+#if !defined(_TRACE_WATCHDOG_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WATCHDOG_H
+
+#include <linux/watchdog.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(watchdog_template,
+
+ TP_PROTO(struct watchdog_device *wdd, int err),
+
+ TP_ARGS(wdd, err),
+
+ TP_STRUCT__entry(
+ __field(int, id)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->id = wdd->id;
+ __entry->err = err;
+ ),
+
+ TP_printk("watchdog%d err=%d", __entry->id, __entry->err)
+);
+
+DEFINE_EVENT(watchdog_template, watchdog_start,
+ TP_PROTO(struct watchdog_device *wdd, int err),
+ TP_ARGS(wdd, err));
+
+DEFINE_EVENT(watchdog_template, watchdog_ping,
+ TP_PROTO(struct watchdog_device *wdd, int err),
+ TP_ARGS(wdd, err));
+
+DEFINE_EVENT(watchdog_template, watchdog_stop,
+ TP_PROTO(struct watchdog_device *wdd, int err),
+ TP_ARGS(wdd, err));
+
+TRACE_EVENT(watchdog_set_timeout,
+
+ TP_PROTO(struct watchdog_device *wdd, unsigned int timeout, int err),
+
+ TP_ARGS(wdd, timeout, err),
+
+ TP_STRUCT__entry(
+ __field(int, id)
+ __field(unsigned int, timeout)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->id = wdd->id;
+ __entry->timeout = timeout;
+ __entry->err = err;
+ ),
+
+ TP_printk("watchdog%d timeout=%u err=%d", __entry->id, __entry->timeout, __entry->err)
+);
+
+#endif /* !defined(_TRACE_WATCHDOG_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/fs.h b/include/trace/misc/fs.h
index 738b97f22f36..738b97f22f36 100644
--- a/include/trace/events/fs.h
+++ b/include/trace/misc/fs.h
diff --git a/include/trace/events/nfs.h b/include/trace/misc/nfs.h
index 09ffdbb04134..0d9d48dca38a 100644
--- a/include/trace/events/nfs.h
+++ b/include/trace/misc/nfs.h
@@ -360,6 +360,18 @@ TRACE_DEFINE_ENUM(IOMODE_ANY);
{ IOMODE_RW, "RW" }, \
{ IOMODE_ANY, "ANY" })
+#define show_rca_mask(x) \
+ __print_flags(x, "|", \
+ { BIT(RCA4_TYPE_MASK_RDATA_DLG), "RDATA_DLG" }, \
+ { BIT(RCA4_TYPE_MASK_WDATA_DLG), "WDATA_DLG" }, \
+ { BIT(RCA4_TYPE_MASK_DIR_DLG), "DIR_DLG" }, \
+ { BIT(RCA4_TYPE_MASK_FILE_LAYOUT), "FILE_LAYOUT" }, \
+ { BIT(RCA4_TYPE_MASK_BLK_LAYOUT), "BLK_LAYOUT" }, \
+ { BIT(RCA4_TYPE_MASK_OBJ_LAYOUT_MIN), "OBJ_LAYOUT_MIN" }, \
+ { BIT(RCA4_TYPE_MASK_OBJ_LAYOUT_MAX), "OBJ_LAYOUT_MAX" }, \
+ { BIT(RCA4_TYPE_MASK_OTHER_LAYOUT_MIN), "OTHER_LAYOUT_MIN" }, \
+ { BIT(RCA4_TYPE_MASK_OTHER_LAYOUT_MAX), "OTHER_LAYOUT_MAX" })
+
#define show_nfs4_seq4_status(x) \
__print_flags(x, "|", \
{ SEQ4_STATUS_CB_PATH_DOWN, "CB_PATH_DOWN" }, \
diff --git a/include/trace/events/rdma.h b/include/trace/misc/rdma.h
index 81bb454fc288..81bb454fc288 100644
--- a/include/trace/events/rdma.h
+++ b/include/trace/misc/rdma.h
diff --git a/include/trace/events/sunrpc_base.h b/include/trace/misc/sunrpc.h
index 588557d07ea8..588557d07ea8 100644
--- a/include/trace/events/sunrpc_base.h
+++ b/include/trace/misc/sunrpc.h
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 7ee65c0b4f70..0d93ec132ebb 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -763,6 +763,8 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_MES_KIQ 0x19
/* Subquery id: Query MES firmware version */
#define AMDGPU_INFO_FW_MES 0x1a
+ /* Subquery id: Query IMU firmware version */
+ #define AMDGPU_INFO_FW_IMU 0x1b
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index eac87310b348..9f231d40a146 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -235,25 +235,29 @@ struct drm_panfrost_madvise {
#define PANFROSTDUMP_BUF_BO (PANFROSTDUMP_BUF_BOMAP + 1)
#define PANFROSTDUMP_BUF_TRAILER (PANFROSTDUMP_BUF_BO + 1)
+/*
+ * This structure is the native endianness of the dumping machine, tools can
+ * detect the endianness by looking at the value in 'magic'.
+ */
struct panfrost_dump_object_header {
- __le32 magic;
- __le32 type;
- __le32 file_size;
- __le32 file_offset;
+ __u32 magic;
+ __u32 type;
+ __u32 file_size;
+ __u32 file_offset;
union {
- struct pan_reg_hdr {
- __le64 jc;
- __le32 gpu_id;
- __le32 major;
- __le32 minor;
- __le64 nbos;
+ struct {
+ __u64 jc;
+ __u32 gpu_id;
+ __u32 major;
+ __u32 minor;
+ __u64 nbos;
} reghdr;
- struct pan_bomap_hdr {
- __le32 valid;
- __le64 iova;
- __le32 data[2];
+ struct {
+ __u32 valid;
+ __u64 iova;
+ __u32 data[2];
} bomap;
/*
@@ -261,14 +265,14 @@ struct panfrost_dump_object_header {
* with new fields and also keep it 512-byte aligned
*/
- __le32 sizer[496];
+ __u32 sizer[496];
};
};
/* Registers object, an array of these */
struct panfrost_dump_registers {
- __le32 reg;
- __le32 value;
+ __u32 reg;
+ __u32 value;
};
#if defined(__cplusplus)
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 7c1dc818b1d5..d676ed2b246e 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -187,7 +187,7 @@
#define AUDIT_MAX_KEY_LEN 256
#define AUDIT_BITMASK_SIZE 64
#define AUDIT_WORD(nr) ((__u32)((nr)/32))
-#define AUDIT_BIT(nr) (1 << ((nr) - AUDIT_WORD(nr)*32))
+#define AUDIT_BIT(nr) (1U << ((nr) - AUDIT_WORD(nr)*32))
#define AUDIT_SYSCALL_CLASSES 16
#define AUDIT_CLASS_DIR_WRITE 0
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 5655e89b962b..b4f0f9531119 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -19,8 +19,14 @@
#ifndef _UAPI_LINUX_BTRFS_H
#define _UAPI_LINUX_BTRFS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <linux/types.h>
#include <linux/ioctl.h>
+#include <linux/fs.h>
#define BTRFS_IOCTL_MAGIC 0x94
#define BTRFS_VOL_NAME_MAX 255
@@ -333,6 +339,12 @@ struct btrfs_ioctl_feature_flags {
*/
struct btrfs_balance_args {
__u64 profiles;
+
+ /*
+ * usage filter
+ * BTRFS_BALANCE_ARGS_USAGE with a single value means '0..N'
+ * BTRFS_BALANCE_ARGS_USAGE_RANGE - range syntax, min..max
+ */
union {
__u64 usage;
struct {
@@ -549,7 +561,7 @@ struct btrfs_ioctl_search_header {
__u64 offset;
__u32 type;
__u32 len;
-};
+} __attribute__ ((__may_alias__));
#define BTRFS_SEARCH_ARGS_BUFSIZE (4096 - sizeof(struct btrfs_ioctl_search_key))
/*
@@ -562,6 +574,10 @@ struct btrfs_ioctl_search_args {
char buf[BTRFS_SEARCH_ARGS_BUFSIZE];
};
+/*
+ * Extended version of TREE_SEARCH ioctl that can return more than 4k of bytes.
+ * The allocated size of the buffer is set in buf_size.
+ */
struct btrfs_ioctl_search_args_v2 {
struct btrfs_ioctl_search_key key; /* in/out - search parameters */
__u64 buf_size; /* in - size of buffer
@@ -570,10 +586,11 @@ struct btrfs_ioctl_search_args_v2 {
__u64 buf[]; /* out - found items */
};
+/* With a @src_length of zero, the range from @src_offset->EOF is cloned! */
struct btrfs_ioctl_clone_range_args {
- __s64 src_fd;
- __u64 src_offset, src_length;
- __u64 dest_offset;
+ __s64 src_fd;
+ __u64 src_offset, src_length;
+ __u64 dest_offset;
};
/*
@@ -677,8 +694,11 @@ struct btrfs_ioctl_logical_ino_args {
/* struct btrfs_data_container *inodes; out */
__u64 inodes;
};
-/* Return every ref to the extent, not just those containing logical block.
- * Requires logical == extent bytenr. */
+
+/*
+ * Return every ref to the extent, not just those containing logical block.
+ * Requires logical == extent bytenr.
+ */
#define BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET (1ULL << 0)
enum btrfs_dev_stat_values {
@@ -1144,4 +1164,8 @@ enum btrfs_err_code {
#define BTRFS_IOC_ENCODED_WRITE _IOW(BTRFS_IOCTL_MAGIC, 64, \
struct btrfs_ioctl_encoded_io_args)
+#ifdef __cplusplus
+}
+#endif
+
#endif /* _UAPI_LINUX_BTRFS_H */
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index 1f7a38ec6ac3..ab38d0f411fa 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -10,6 +10,23 @@
#include <stddef.h>
#endif
+/* ASCII for _BHRfS_M, no terminating nul */
+#define BTRFS_MAGIC 0x4D5F53665248425FULL
+
+#define BTRFS_MAX_LEVEL 8
+
+/*
+ * We can actually store much bigger names, but lets not confuse the rest of
+ * linux.
+ */
+#define BTRFS_NAME_LEN 255
+
+/*
+ * Theoretical limit is larger, but we keep this down to a sane value. That
+ * should limit greatly the possibility of collisions on inode ref items.
+ */
+#define BTRFS_LINK_MAX 65535U
+
/*
* This header contains the structure definitions and constants used
* by file system objects that can be retrieved using
@@ -359,6 +376,50 @@ enum btrfs_csum_type {
#define BTRFS_FT_SYMLINK 7
#define BTRFS_FT_XATTR 8
#define BTRFS_FT_MAX 9
+/* Directory contains encrypted data */
+#define BTRFS_FT_ENCRYPTED 0x80
+
+static inline __u8 btrfs_dir_flags_to_ftype(__u8 flags)
+{
+ return flags & ~BTRFS_FT_ENCRYPTED;
+}
+
+/*
+ * Inode flags
+ */
+#define BTRFS_INODE_NODATASUM (1U << 0)
+#define BTRFS_INODE_NODATACOW (1U << 1)
+#define BTRFS_INODE_READONLY (1U << 2)
+#define BTRFS_INODE_NOCOMPRESS (1U << 3)
+#define BTRFS_INODE_PREALLOC (1U << 4)
+#define BTRFS_INODE_SYNC (1U << 5)
+#define BTRFS_INODE_IMMUTABLE (1U << 6)
+#define BTRFS_INODE_APPEND (1U << 7)
+#define BTRFS_INODE_NODUMP (1U << 8)
+#define BTRFS_INODE_NOATIME (1U << 9)
+#define BTRFS_INODE_DIRSYNC (1U << 10)
+#define BTRFS_INODE_COMPRESS (1U << 11)
+
+#define BTRFS_INODE_ROOT_ITEM_INIT (1U << 31)
+
+#define BTRFS_INODE_FLAG_MASK \
+ (BTRFS_INODE_NODATASUM | \
+ BTRFS_INODE_NODATACOW | \
+ BTRFS_INODE_READONLY | \
+ BTRFS_INODE_NOCOMPRESS | \
+ BTRFS_INODE_PREALLOC | \
+ BTRFS_INODE_SYNC | \
+ BTRFS_INODE_IMMUTABLE | \
+ BTRFS_INODE_APPEND | \
+ BTRFS_INODE_NODUMP | \
+ BTRFS_INODE_NOATIME | \
+ BTRFS_INODE_DIRSYNC | \
+ BTRFS_INODE_COMPRESS | \
+ BTRFS_INODE_ROOT_ITEM_INIT)
+
+#define BTRFS_INODE_RO_VERITY (1U << 0)
+
+#define BTRFS_INODE_RO_FLAG_MASK (BTRFS_INODE_RO_VERITY)
/*
* The key defines the order in the tree, and so it also defines (optimal)
@@ -389,6 +450,109 @@ struct btrfs_key {
__u64 offset;
} __attribute__ ((__packed__));
+/*
+ * Every tree block (leaf or node) starts with this header.
+ */
+struct btrfs_header {
+ /* These first four must match the super block */
+ __u8 csum[BTRFS_CSUM_SIZE];
+ /* FS specific uuid */
+ __u8 fsid[BTRFS_FSID_SIZE];
+ /* Which block this node is supposed to live in */
+ __le64 bytenr;
+ __le64 flags;
+
+ /* Allowed to be different from the super from here on down */
+ __u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
+ __le64 generation;
+ __le64 owner;
+ __le32 nritems;
+ __u8 level;
+} __attribute__ ((__packed__));
+
+/*
+ * This is a very generous portion of the super block, giving us room to
+ * translate 14 chunks with 3 stripes each.
+ */
+#define BTRFS_SYSTEM_CHUNK_ARRAY_SIZE 2048
+
+/*
+ * Just in case we somehow lose the roots and are not able to mount, we store
+ * an array of the roots from previous transactions in the super.
+ */
+#define BTRFS_NUM_BACKUP_ROOTS 4
+struct btrfs_root_backup {
+ __le64 tree_root;
+ __le64 tree_root_gen;
+
+ __le64 chunk_root;
+ __le64 chunk_root_gen;
+
+ __le64 extent_root;
+ __le64 extent_root_gen;
+
+ __le64 fs_root;
+ __le64 fs_root_gen;
+
+ __le64 dev_root;
+ __le64 dev_root_gen;
+
+ __le64 csum_root;
+ __le64 csum_root_gen;
+
+ __le64 total_bytes;
+ __le64 bytes_used;
+ __le64 num_devices;
+ /* future */
+ __le64 unused_64[4];
+
+ __u8 tree_root_level;
+ __u8 chunk_root_level;
+ __u8 extent_root_level;
+ __u8 fs_root_level;
+ __u8 dev_root_level;
+ __u8 csum_root_level;
+ /* future and to align */
+ __u8 unused_8[10];
+} __attribute__ ((__packed__));
+
+/*
+ * A leaf is full of items. offset and size tell us where to find the item in
+ * the leaf (relative to the start of the data area)
+ */
+struct btrfs_item {
+ struct btrfs_disk_key key;
+ __le32 offset;
+ __le32 size;
+} __attribute__ ((__packed__));
+
+/*
+ * Leaves have an item area and a data area:
+ * [item0, item1....itemN] [free space] [dataN...data1, data0]
+ *
+ * The data is separate from the items to get the keys closer together during
+ * searches.
+ */
+struct btrfs_leaf {
+ struct btrfs_header header;
+ struct btrfs_item items[];
+} __attribute__ ((__packed__));
+
+/*
+ * All non-leaf blocks are nodes, they hold only keys and pointers to other
+ * blocks.
+ */
+struct btrfs_key_ptr {
+ struct btrfs_disk_key key;
+ __le64 blockptr;
+ __le64 generation;
+} __attribute__ ((__packed__));
+
+struct btrfs_node {
+ struct btrfs_header header;
+ struct btrfs_key_ptr ptrs[];
+} __attribute__ ((__packed__));
+
struct btrfs_dev_item {
/* the internal btrfs device id */
__le64 devid;
@@ -472,6 +636,69 @@ struct btrfs_chunk {
/* additional stripes go here */
} __attribute__ ((__packed__));
+/*
+ * The super block basically lists the main trees of the FS.
+ */
+struct btrfs_super_block {
+ /* The first 4 fields must match struct btrfs_header */
+ __u8 csum[BTRFS_CSUM_SIZE];
+ /* FS specific UUID, visible to user */
+ __u8 fsid[BTRFS_FSID_SIZE];
+ /* This block number */
+ __le64 bytenr;
+ __le64 flags;
+
+ /* Allowed to be different from the btrfs_header from here own down */
+ __le64 magic;
+ __le64 generation;
+ __le64 root;
+ __le64 chunk_root;
+ __le64 log_root;
+
+ /*
+ * This member has never been utilized since the very beginning, thus
+ * it's always 0 regardless of kernel version. We always use
+ * generation + 1 to read log tree root. So here we mark it deprecated.
+ */
+ __le64 __unused_log_root_transid;
+ __le64 total_bytes;
+ __le64 bytes_used;
+ __le64 root_dir_objectid;
+ __le64 num_devices;
+ __le32 sectorsize;
+ __le32 nodesize;
+ __le32 __unused_leafsize;
+ __le32 stripesize;
+ __le32 sys_chunk_array_size;
+ __le64 chunk_root_generation;
+ __le64 compat_flags;
+ __le64 compat_ro_flags;
+ __le64 incompat_flags;
+ __le16 csum_type;
+ __u8 root_level;
+ __u8 chunk_root_level;
+ __u8 log_root_level;
+ struct btrfs_dev_item dev_item;
+
+ char label[BTRFS_LABEL_SIZE];
+
+ __le64 cache_generation;
+ __le64 uuid_tree_generation;
+
+ /* The UUID written into btree blocks */
+ __u8 metadata_uuid[BTRFS_FSID_SIZE];
+
+ __u64 nr_global_roots;
+
+ /* Future expansion */
+ __le64 reserved[27];
+ __u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE];
+ struct btrfs_root_backup super_roots[BTRFS_NUM_BACKUP_ROOTS];
+
+ /* Padded to 4096 bytes */
+ __u8 padding[565];
+} __attribute__ ((__packed__));
+
#define BTRFS_FREE_SPACE_EXTENT 1
#define BTRFS_FREE_SPACE_BITMAP 2
@@ -526,6 +753,14 @@ struct btrfs_extent_item_v0 {
/* use full backrefs for extent pointers in the block */
#define BTRFS_BLOCK_FLAG_FULL_BACKREF (1ULL << 8)
+#define BTRFS_BACKREF_REV_MAX 256
+#define BTRFS_BACKREF_REV_SHIFT 56
+#define BTRFS_BACKREF_REV_MASK (((u64)BTRFS_BACKREF_REV_MAX - 1) << \
+ BTRFS_BACKREF_REV_SHIFT)
+
+#define BTRFS_OLD_BACKREF_REV 0
+#define BTRFS_MIXED_BACKREF_REV 1
+
/*
* this flag is only used internally by scrub and may be changed at any time
* it is only declared here to avoid collisions
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index 463d1ba2232a..3d61a0ae055d 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -426,7 +426,7 @@ struct vfs_ns_cap_data {
*/
#define CAP_TO_INDEX(x) ((x) >> 5) /* 1 << 5 == bits in __u32 */
-#define CAP_TO_MASK(x) (1 << ((x) & 31)) /* mask for indexed __u32 */
+#define CAP_TO_MASK(x) (1U << ((x) & 31)) /* mask for indexed __u32 */
#endif /* _UAPI_LINUX_CAPABILITY_H */
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
index c3baaea0b8ef..d58fa1cdcb08 100644
--- a/include/uapi/linux/cec-funcs.h
+++ b/include/uapi/linux/cec-funcs.h
@@ -1568,6 +1568,20 @@ static inline void cec_ops_request_short_audio_descriptor(const struct cec_msg *
}
}
+static inline void cec_msg_set_audio_volume_level(struct cec_msg *msg,
+ __u8 audio_volume_level)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_SET_AUDIO_VOLUME_LEVEL;
+ msg->msg[2] = audio_volume_level;
+}
+
+static inline void cec_ops_set_audio_volume_level(const struct cec_msg *msg,
+ __u8 *audio_volume_level)
+{
+ *audio_volume_level = msg->msg[2];
+}
+
/* Audio Rate Control Feature */
static inline void cec_msg_set_audio_rate(struct cec_msg *msg,
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index 1d48da926216..b8e071abaea5 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -768,6 +768,7 @@ struct cec_event {
#define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE 0x08
#define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX 0x04
#define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX 0x02
+#define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_VOLUME_LEVEL 0x01
#define CEC_MSG_GIVE_FEATURES 0xa5 /* HDMI 2.0 */
@@ -1059,6 +1060,7 @@ struct cec_event {
#define CEC_OP_AUD_FMT_ID_CEA861 0
#define CEC_OP_AUD_FMT_ID_CEA861_CXT 1
+#define CEC_MSG_SET_AUDIO_VOLUME_LEVEL 0x73
/* Audio Rate Control Feature */
#define CEC_MSG_SET_AUDIO_RATE 0x9a
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index c7b056af9ef0..4c6a8fa5e7ed 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -91,7 +91,7 @@ typedef __s64 Elf64_Sxword;
#define DT_INIT 12
#define DT_FINI 13
#define DT_SONAME 14
-#define DT_RPATH 15
+#define DT_RPATH 15
#define DT_SYMBOLIC 16
#define DT_REL 17
#define DT_RELSZ 18
@@ -140,9 +140,9 @@ typedef __s64 Elf64_Sxword;
#define ELF64_ST_BIND(x) ELF_ST_BIND(x)
#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x)
-typedef struct dynamic{
+typedef struct dynamic {
Elf32_Sword d_tag;
- union{
+ union {
Elf32_Sword d_val;
Elf32_Addr d_ptr;
} d_un;
@@ -173,7 +173,7 @@ typedef struct elf64_rel {
Elf64_Xword r_info; /* index and type of relocation */
} Elf64_Rel;
-typedef struct elf32_rela{
+typedef struct elf32_rela {
Elf32_Addr r_offset;
Elf32_Word r_info;
Elf32_Sword r_addend;
@@ -185,7 +185,7 @@ typedef struct elf64_rela {
Elf64_Sxword r_addend; /* Constant addend used to compute value */
} Elf64_Rela;
-typedef struct elf32_sym{
+typedef struct elf32_sym {
Elf32_Word st_name;
Elf32_Addr st_value;
Elf32_Word st_size;
@@ -206,7 +206,7 @@ typedef struct elf64_sym {
#define EI_NIDENT 16
-typedef struct elf32_hdr{
+typedef struct elf32_hdr {
unsigned char e_ident[EI_NIDENT];
Elf32_Half e_type;
Elf32_Half e_machine;
@@ -246,7 +246,7 @@ typedef struct elf64_hdr {
#define PF_W 0x2
#define PF_X 0x1
-typedef struct elf32_phdr{
+typedef struct elf32_phdr {
Elf32_Word p_type;
Elf32_Off p_offset;
Elf32_Addr p_vaddr;
diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h
index a756b29afcc2..fd1fb0d5389d 100644
--- a/include/uapi/linux/fscrypt.h
+++ b/include/uapi/linux/fscrypt.h
@@ -26,6 +26,8 @@
#define FSCRYPT_MODE_AES_256_CTS 4
#define FSCRYPT_MODE_AES_128_CBC 5
#define FSCRYPT_MODE_AES_128_CTS 6
+#define FSCRYPT_MODE_SM4_XTS 7
+#define FSCRYPT_MODE_SM4_CTS 8
#define FSCRYPT_MODE_ADIANTUM 9
#define FSCRYPT_MODE_AES_256_HCTR2 10
/* If adding a mode number > 10, update FSCRYPT_MODE_MAX in fscrypt_private.h */
@@ -185,8 +187,6 @@ struct fscrypt_get_key_status_arg {
#define FS_ENCRYPTION_MODE_AES_256_CTS FSCRYPT_MODE_AES_256_CTS
#define FS_ENCRYPTION_MODE_AES_128_CBC FSCRYPT_MODE_AES_128_CBC
#define FS_ENCRYPTION_MODE_AES_128_CTS FSCRYPT_MODE_AES_128_CTS
-#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* removed */
-#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* removed */
#define FS_ENCRYPTION_MODE_ADIANTUM FSCRYPT_MODE_ADIANTUM
#define FS_KEY_DESC_PREFIX FSCRYPT_KEY_DESC_PREFIX
#define FS_KEY_DESC_PREFIX_SIZE FSCRYPT_KEY_DESC_PREFIX_SIZE
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 76ee8f9e024a..e3c54109bae9 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -197,6 +197,10 @@
*
* 7.37
* - add FUSE_TMPFILE
+ *
+ * 7.38
+ * - add FUSE_EXPIRE_ONLY flag to fuse_notify_inval_entry
+ * - add FOPEN_PARALLEL_DIRECT_WRITES
*/
#ifndef _LINUX_FUSE_H
@@ -232,7 +236,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 37
+#define FUSE_KERNEL_MINOR_VERSION 38
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -304,6 +308,7 @@ struct fuse_file_lock {
* FOPEN_CACHE_DIR: allow caching this directory
* FOPEN_STREAM: the file is stream-like (no file position at all)
* FOPEN_NOFLUSH: don't flush data cache on close (unless FUSE_WRITEBACK_CACHE)
+ * FOPEN_PARALLEL_DIRECT_WRITES: Allow concurrent direct writes on the same inode
*/
#define FOPEN_DIRECT_IO (1 << 0)
#define FOPEN_KEEP_CACHE (1 << 1)
@@ -311,6 +316,7 @@ struct fuse_file_lock {
#define FOPEN_CACHE_DIR (1 << 3)
#define FOPEN_STREAM (1 << 4)
#define FOPEN_NOFLUSH (1 << 5)
+#define FOPEN_PARALLEL_DIRECT_WRITES (1 << 6)
/**
* INIT request/reply flags
@@ -491,6 +497,12 @@ struct fuse_file_lock {
*/
#define FUSE_SETXATTR_ACL_KILL_SGID (1 << 0)
+/**
+ * notify_inval_entry flags
+ * FUSE_EXPIRE_ONLY
+ */
+#define FUSE_EXPIRE_ONLY (1 << 0)
+
enum fuse_opcode {
FUSE_LOOKUP = 1,
FUSE_FORGET = 2, /* no reply */
@@ -919,7 +931,7 @@ struct fuse_notify_inval_inode_out {
struct fuse_notify_inval_entry_out {
uint64_t parent;
uint32_t namelen;
- uint32_t padding;
+ uint32_t flags;
};
struct fuse_notify_delete_out {
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
index 095299c75828..2b9e7feba3f3 100644
--- a/include/uapi/linux/idxd.h
+++ b/include/uapi/linux/idxd.h
@@ -29,6 +29,7 @@ enum idxd_scmd_stat {
IDXD_SCMD_WQ_NO_SIZE = 0x800e0000,
IDXD_SCMD_WQ_NO_PRIV = 0x800f0000,
IDXD_SCMD_WQ_IRQ_ERR = 0x80100000,
+ IDXD_SCMD_WQ_USER_NO_IOMMU = 0x80110000,
};
#define IDXD_SCMD_SOFTERR_MASK 0x80000000
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index f243ce665f74..07a4cb149305 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -20,6 +20,7 @@
#define _UAPI_LINUX_IN_H
#include <linux/types.h>
+#include <linux/stddef.h>
#include <linux/libc-compat.h>
#include <linux/socket.h>
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index ab7458033ee3..2df3225b562f 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -222,7 +222,7 @@ enum io_uring_op {
/*
* sqe->uring_cmd_flags
- * IORING_URING_CMD_FIXED use registered buffer; pass thig flag
+ * IORING_URING_CMD_FIXED use registered buffer; pass this flag
* along with setting sqe->buf_index.
*/
#define IORING_URING_CMD_FIXED (1U << 0)
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index 961ec16a26b8..874a92349bf5 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -100,8 +100,10 @@ struct iphdr {
__u8 ttl;
__u8 protocol;
__sum16 check;
- __be32 saddr;
- __be32 daddr;
+ __struct_group(/* no tag */, addrs, /* no attrs */,
+ __be32 saddr;
+ __be32 daddr;
+ );
/*The options start here. */
};
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 03cdbe798fe3..81f4243bebb1 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -130,8 +130,10 @@ struct ipv6hdr {
__u8 nexthdr;
__u8 hop_limit;
- struct in6_addr saddr;
- struct in6_addr daddr;
+ __struct_group(/* no tag */, addrs, /* no attrs */,
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ );
};
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
index 9c4bcc37a455..f3223f964691 100644
--- a/include/uapi/linux/landlock.h
+++ b/include/uapi/linux/landlock.h
@@ -95,8 +95,19 @@ struct landlock_path_beneath_attr {
* A file can only receive these access rights:
*
* - %LANDLOCK_ACCESS_FS_EXECUTE: Execute a file.
- * - %LANDLOCK_ACCESS_FS_WRITE_FILE: Open a file with write access.
+ * - %LANDLOCK_ACCESS_FS_WRITE_FILE: Open a file with write access. Note that
+ * you might additionally need the %LANDLOCK_ACCESS_FS_TRUNCATE right in order
+ * to overwrite files with :manpage:`open(2)` using ``O_TRUNC`` or
+ * :manpage:`creat(2)`.
* - %LANDLOCK_ACCESS_FS_READ_FILE: Open a file with read access.
+ * - %LANDLOCK_ACCESS_FS_TRUNCATE: Truncate a file with :manpage:`truncate(2)`,
+ * :manpage:`ftruncate(2)`, :manpage:`creat(2)`, or :manpage:`open(2)` with
+ * ``O_TRUNC``. Whether an opened file can be truncated with
+ * :manpage:`ftruncate(2)` is determined during :manpage:`open(2)`, in the
+ * same way as read and write permissions are checked during
+ * :manpage:`open(2)` using %LANDLOCK_ACCESS_FS_READ_FILE and
+ * %LANDLOCK_ACCESS_FS_WRITE_FILE. This access right is available since the
+ * third version of the Landlock ABI.
*
* A directory can receive access rights related to files or directories. The
* following access right is applied to the directory itself, and the
@@ -139,10 +150,9 @@ struct landlock_path_beneath_attr {
*
* It is currently not possible to restrict some file-related actions
* accessible through these syscall families: :manpage:`chdir(2)`,
- * :manpage:`truncate(2)`, :manpage:`stat(2)`, :manpage:`flock(2)`,
- * :manpage:`chmod(2)`, :manpage:`chown(2)`, :manpage:`setxattr(2)`,
- * :manpage:`utime(2)`, :manpage:`ioctl(2)`, :manpage:`fcntl(2)`,
- * :manpage:`access(2)`.
+ * :manpage:`stat(2)`, :manpage:`flock(2)`, :manpage:`chmod(2)`,
+ * :manpage:`chown(2)`, :manpage:`setxattr(2)`, :manpage:`utime(2)`,
+ * :manpage:`ioctl(2)`, :manpage:`fcntl(2)`, :manpage:`access(2)`.
* Future Landlock evolutions will enable to restrict them.
*/
/* clang-format off */
@@ -160,6 +170,7 @@ struct landlock_path_beneath_attr {
#define LANDLOCK_ACCESS_FS_MAKE_BLOCK (1ULL << 11)
#define LANDLOCK_ACCESS_FS_MAKE_SYM (1ULL << 12)
#define LANDLOCK_ACCESS_FS_REFER (1ULL << 13)
+#define LANDLOCK_ACCESS_FS_TRUNCATE (1ULL << 14)
/* clang-format on */
#endif /* _UAPI_LINUX_LANDLOCK_H */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 57b8e2ffb1dd..82a03ea954af 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -1119,6 +1119,7 @@
#define PCI_DOE_STATUS_DATA_OBJECT_READY 0x80000000 /* Data Object Ready */
#define PCI_DOE_WRITE 0x10 /* DOE Write Data Mailbox Register */
#define PCI_DOE_READ 0x14 /* DOE Read Data Mailbox Register */
+#define PCI_DOE_CAP_SIZEOF 0x18 /* Size of DOE register block */
/* DOE Data Object - note not actually registers */
#define PCI_DOE_DATA_OBJECT_HEADER_1_VID 0x0000ffff
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 85be78e0e7f6..ccb7f5dad59b 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -1337,7 +1337,7 @@ union perf_mem_data_src {
#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
/* 5-0x8 available */
-#define PERF_MEM_LVLNUM_EXTN_MEM 0x09 /* Extension memory */
+#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */
#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */
#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
diff --git a/include/uapi/linux/rkisp1-config.h b/include/uapi/linux/rkisp1-config.h
index 583ca0d9a79d..730673ecc63d 100644
--- a/include/uapi/linux/rkisp1-config.h
+++ b/include/uapi/linux/rkisp1-config.h
@@ -117,7 +117,46 @@
/*
* Defect Pixel Cluster Correction
*/
-#define RKISP1_CIF_ISP_DPCC_METHODS_MAX 3
+#define RKISP1_CIF_ISP_DPCC_METHODS_MAX 3
+
+#define RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE (1U << 2)
+
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_G_CENTER (1U << 0)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_RB_CENTER (1U << 1)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_G_3X3 (1U << 2)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_RB_3X3 (1U << 3)
+
+/* 0-2 for sets 1-3 */
+#define RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_SET(n) ((n) << 0)
+#define RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_FIX_SET (1U << 3)
+
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_GREEN_ENABLE (1U << 0)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_GREEN_ENABLE (1U << 1)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_GREEN_ENABLE (1U << 2)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_GREEN_ENABLE (1U << 3)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_GREEN_ENABLE (1U << 4)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_RED_BLUE_ENABLE (1U << 8)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_RED_BLUE_ENABLE (1U << 9)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_RED_BLUE_ENABLE (1U << 10)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_RED_BLUE_ENABLE (1U << 11)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_RED_BLUE_ENABLE (1U << 12)
+
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_RB(v) ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_RB(v) ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_RB(v) ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_RB(v) ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_RB(v) ((v) << 8)
+
+#define RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_G(n, v) ((v) << ((n) * 4))
+#define RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_RB(n, v) ((v) << ((n) * 4 + 2))
+
+#define RKISP1_CIF_ISP_DPCC_RND_OFFS_n_G(n, v) ((v) << ((n) * 4))
+#define RKISP1_CIF_ISP_DPCC_RND_OFFS_n_RB(n, v) ((v) << ((n) * 4 + 2))
/*
* Denoising pre filter
@@ -249,16 +288,20 @@ struct rkisp1_cif_isp_bls_config {
};
/**
- * struct rkisp1_cif_isp_dpcc_methods_config - Methods Configuration used by DPCC
+ * struct rkisp1_cif_isp_dpcc_methods_config - DPCC methods set configuration
*
- * Methods Configuration used by Defect Pixel Cluster Correction
+ * This structure stores the configuration of one set of methods for the DPCC
+ * algorithm. Multiple methods can be selected in each set (independently for
+ * the Green and Red/Blue components) through the @method field, the result is
+ * the logical AND of all enabled methods. The remaining fields set thresholds
+ * and factors for each method.
*
- * @method: Method enable bits
- * @line_thresh: Line threshold
- * @line_mad_fac: Line MAD factor
- * @pg_fac: Peak gradient factor
- * @rnd_thresh: Rank Neighbor Difference threshold
- * @rg_fac: Rank gradient factor
+ * @method: Method enable bits (RKISP1_CIF_ISP_DPCC_METHODS_SET_*)
+ * @line_thresh: Line threshold (RKISP1_CIF_ISP_DPCC_LINE_THRESH_*)
+ * @line_mad_fac: Line Mean Absolute Difference factor (RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_*)
+ * @pg_fac: Peak gradient factor (RKISP1_CIF_ISP_DPCC_PG_FAC_*)
+ * @rnd_thresh: Rank Neighbor Difference threshold (RKISP1_CIF_ISP_DPCC_RND_THRESH_*)
+ * @rg_fac: Rank gradient factor (RKISP1_CIF_ISP_DPCC_RG_FAC_*)
*/
struct rkisp1_cif_isp_dpcc_methods_config {
__u32 method;
@@ -272,14 +315,16 @@ struct rkisp1_cif_isp_dpcc_methods_config {
/**
* struct rkisp1_cif_isp_dpcc_config - Configuration used by DPCC
*
- * Configuration used by Defect Pixel Cluster Correction
+ * Configuration used by Defect Pixel Cluster Correction. Three sets of methods
+ * can be configured and selected through the @set_use field. The result is the
+ * logical OR of all enabled sets.
*
- * @mode: dpcc output mode
- * @output_mode: whether use hard coded methods
- * @set_use: stage1 methods set
- * @methods: methods config
- * @ro_limits: rank order limits
- * @rnd_offs: differential rank offsets for rank neighbor difference
+ * @mode: DPCC mode (RKISP1_CIF_ISP_DPCC_MODE_*)
+ * @output_mode: Interpolation output mode (RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_*)
+ * @set_use: Methods sets selection (RKISP1_CIF_ISP_DPCC_SET_USE_*)
+ * @methods: Methods sets configuration
+ * @ro_limits: Rank order limits (RKISP1_CIF_ISP_DPCC_RO_LIMITS_*)
+ * @rnd_offs: Differential rank offsets for rank neighbor difference (RKISP1_CIF_ISP_DPCC_RND_OFFS_*)
*/
struct rkisp1_cif_isp_dpcc_config {
__u32 mode;
diff --git a/include/uapi/linux/tdx-guest.h b/include/uapi/linux/tdx-guest.h
new file mode 100644
index 000000000000..a6a2098c08ff
--- /dev/null
+++ b/include/uapi/linux/tdx-guest.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Userspace interface for TDX guest driver
+ *
+ * Copyright (C) 2022 Intel Corporation
+ */
+
+#ifndef _UAPI_LINUX_TDX_GUEST_H_
+#define _UAPI_LINUX_TDX_GUEST_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/* Length of the REPORTDATA used in TDG.MR.REPORT TDCALL */
+#define TDX_REPORTDATA_LEN 64
+
+/* Length of TDREPORT used in TDG.MR.REPORT TDCALL */
+#define TDX_REPORT_LEN 1024
+
+/**
+ * struct tdx_report_req - Request struct for TDX_CMD_GET_REPORT0 IOCTL.
+ *
+ * @reportdata: User buffer with REPORTDATA to be included into TDREPORT.
+ * Typically it can be some nonce provided by attestation
+ * service, so the generated TDREPORT can be uniquely verified.
+ * @tdreport: User buffer to store TDREPORT output from TDCALL[TDG.MR.REPORT].
+ */
+struct tdx_report_req {
+ __u8 reportdata[TDX_REPORTDATA_LEN];
+ __u8 tdreport[TDX_REPORT_LEN];
+};
+
+/*
+ * TDX_CMD_GET_REPORT0 - Get TDREPORT0 (a.k.a. TDREPORT subtype 0) using
+ * TDCALL[TDG.MR.REPORT]
+ *
+ * Return 0 on success, -EIO on TDCALL execution failure, and
+ * standard errno on other general error cases.
+ */
+#define TDX_CMD_GET_REPORT0 _IOWR('T', 1, struct tdx_report_req)
+
+#endif /* _UAPI_LINUX_TDX_GUEST_H_ */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 86cae23cc446..29da1f4b4578 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -1601,7 +1601,8 @@ struct v4l2_bt_timings {
((bt)->width + V4L2_DV_BT_BLANKING_WIDTH(bt))
#define V4L2_DV_BT_BLANKING_HEIGHT(bt) \
((bt)->vfrontporch + (bt)->vsync + (bt)->vbackporch + \
- (bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch)
+ ((bt)->interlaced ? \
+ ((bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch) : 0))
#define V4L2_DV_BT_FRAME_HEIGHT(bt) \
((bt)->height + V4L2_DV_BT_BLANKING_HEIGHT(bt))
diff --git a/include/xen/arm/xen-ops.h b/include/xen/arm/xen-ops.h
index b0766a660338..70073f5a2b54 100644
--- a/include/xen/arm/xen-ops.h
+++ b/include/xen/arm/xen-ops.h
@@ -8,9 +8,7 @@
static inline void xen_setup_dma_ops(struct device *dev)
{
#ifdef CONFIG_XEN
- if (xen_is_grant_dma_device(dev))
- xen_grant_setup_dma_ops(dev);
- else if (xen_swiotlb_detect())
+ if (xen_swiotlb_detect())
dev->dma_ops = &xen_swiotlb_dma_ops;
#endif
}
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index a34f4271a2e9..47f11bec5e90 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -216,26 +216,10 @@ static inline void xen_preemptible_hcall_end(void) { }
#endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
#ifdef CONFIG_XEN_GRANT_DMA_OPS
-void xen_grant_setup_dma_ops(struct device *dev);
-bool xen_is_grant_dma_device(struct device *dev);
-bool xen_virtio_mem_acc(struct virtio_device *dev);
bool xen_virtio_restricted_mem_acc(struct virtio_device *dev);
#else
-static inline void xen_grant_setup_dma_ops(struct device *dev)
-{
-}
-static inline bool xen_is_grant_dma_device(struct device *dev)
-{
- return false;
-}
-
struct virtio_device;
-static inline bool xen_virtio_mem_acc(struct virtio_device *dev)
-{
- return false;
-}
-
static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
{
return false;
diff --git a/include/xen/xen.h b/include/xen/xen.h
index a99bab817523..7adf59837c25 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -2,6 +2,8 @@
#ifndef _XEN_XEN_H
#define _XEN_XEN_H
+#include <linux/types.h>
+
enum xen_domain_type {
XEN_NATIVE, /* running on bare hardware */
XEN_PV_DOMAIN, /* running in a PV domain */
@@ -25,8 +27,6 @@ extern bool xen_pvh;
#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
#define xen_pvh_domain() (xen_pvh)
-#include <linux/types.h>
-
extern uint32_t xen_start_flags;
#include <xen/interface/hvm/start_info.h>