summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h8
-rw-r--r--include/asm-generic/barrier.h8
-rw-r--r--include/asm-generic/bug.h5
-rw-r--r--include/asm-generic/export.h11
-rw-r--r--include/asm-generic/hyperv-tlfs.h19
-rw-r--r--include/asm-generic/mshyperv.h14
-rw-r--r--include/asm-generic/sections.h5
-rw-r--r--include/crypto/acompress.h73
-rw-r--r--include/crypto/aead.h21
-rw-r--r--include/crypto/aes.h5
-rw-r--r--include/crypto/akcipher.h78
-rw-r--r--include/crypto/algapi.h3
-rw-r--r--include/crypto/ecc_curve.h2
-rw-r--r--include/crypto/ecdh.h1
-rw-r--r--include/crypto/hash.h22
-rw-r--r--include/crypto/internal/acompress.h1
-rw-r--r--include/crypto/internal/cryptouser.h16
-rw-r--r--include/crypto/internal/ecc.h27
-rw-r--r--include/crypto/internal/scompress.h1
-rw-r--r--include/crypto/kpp.h58
-rw-r--r--include/crypto/rng.h51
-rw-r--r--include/crypto/skcipher.h25
-rw-r--r--include/dt-bindings/arm/qcom,ids.h1
-rw-r--r--include/dt-bindings/clock/google,gs101.h116
-rw-r--r--include/dt-bindings/clock/r8a73a4-clock.h4
-rw-r--r--include/keys/trusted_dcp.h11
-rw-r--r--include/keys/trusted_tpm.h2
-rw-r--r--include/kvm/arm_pmu.h2
-rw-r--r--include/linux/anon_inodes.h5
-rw-r--r--include/linux/arm_ffa.h27
-rw-r--r--include/linux/bio.h11
-rw-r--r--include/linux/blk-mq.h85
-rw-r--r--include/linux/blk_types.h36
-rw-r--r--include/linux/blkdev.h129
-rw-r--r--include/linux/bootconfig.h8
-rw-r--r--include/linux/bpf.h16
-rw-r--r--include/linux/bus/stm32_firewall_device.h142
-rw-r--r--include/linux/cc_platform.h12
-rw-r--r--include/linux/clk.h5
-rw-r--r--include/linux/cmpxchg-emu.h15
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/compiler_types.h18
-rw-r--r--include/linux/coredump.h2
-rw-r--r--include/linux/cpu.h11
-rw-r--r--include/linux/device.h1
-rw-r--r--include/linux/elf.h2
-rw-r--r--include/linux/energy_model.h1
-rw-r--r--include/linux/etherdevice.h25
-rw-r--r--include/linux/file.h1
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/firmware/qcom/qcom_qseecom.h55
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h10
-rw-r--r--include/linux/fortify-string.h9
-rw-r--r--include/linux/framer/framer.h4
-rw-r--r--include/linux/fs.h94
-rw-r--r--include/linux/fs_parser.h4
-rw-r--r--include/linux/fscache.h22
-rw-r--r--include/linux/gfp_types.h2
-rw-r--r--include/linux/gpio/driver.h17
-rw-r--r--include/linux/gpio/property.h1
-rw-r--r--include/linux/hugetlb.h8
-rw-r--r--include/linux/hyperv.h1
-rw-r--r--include/linux/interrupt.h3
-rw-r--r--include/linux/io_uring.h6
-rw-r--r--include/linux/io_uring/cmd.h24
-rw-r--r--include/linux/io_uring/net.h18
-rw-r--r--include/linux/io_uring_types.h22
-rw-r--r--include/linux/irqflags.h2
-rw-r--r--include/linux/jump_label.h3
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/mm.h18
-rw-r--r--include/linux/mman.h8
-rw-r--r--include/linux/namei.h1
-rw-r--r--include/linux/netfs.h197
-rw-r--r--include/linux/oid_registry.h5
-rw-r--r--include/linux/page-flags.h144
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--include/linux/pagevec.h4
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/peci.h1
-rw-r--r--include/linux/perf_event.h37
-rw-r--r--include/linux/platform_data/ti-sysc.h1
-rw-r--r--include/linux/profile.h5
-rw-r--r--include/linux/qat/qat_mig_dev.h31
-rw-r--r--include/linux/randomize_kstack.h2
-rw-r--r--include/linux/rcupdate.h22
-rw-r--r--include/linux/rcupdate_wait.h18
-rw-r--r--include/linux/regmap.h8
-rw-r--r--include/linux/regulator/consumer.h4
-rw-r--r--include/linux/rwbase_rt.h4
-rw-r--r--include/linux/rwsem.h6
-rw-r--r--include/linux/scmi_protocol.h86
-rw-r--r--include/linux/secretmem.h4
-rw-r--r--include/linux/seq_file.h13
-rw-r--r--include/linux/shm.h5
-rw-r--r--include/linux/shmem_fs.h9
-rw-r--r--include/linux/skbuff.h43
-rw-r--r--include/linux/skmsg.h2
-rw-r--r--include/linux/slab.h4
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h139
-rw-r--r--include/linux/sockptr.h25
-rw-r--r--include/linux/srcutiny.h2
-rw-r--r--include/linux/stackdepot.h7
-rw-r--r--include/linux/stat.h1
-rw-r--r--include/linux/string.h62
-rw-r--r--include/linux/sunrpc/svc_rdma.h13
-rw-r--r--include/linux/swapops.h65
-rw-r--r--include/linux/tee_core.h306
-rw-r--r--include/linux/tee_drv.h285
-rw-r--r--include/linux/timecounter.h11
-rw-r--r--include/linux/timekeeping.h49
-rw-r--r--include/linux/timer.h12
-rw-r--r--include/linux/tpm.h316
-rw-r--r--include/linux/tracefs.h3
-rw-r--r--include/linux/tty.h14
-rw-r--r--include/linux/u64_stats_sync.h9
-rw-r--r--include/linux/udp.h30
-rw-r--r--include/linux/uio.h10
-rw-r--r--include/linux/virtio.h7
-rw-r--r--include/net/9p/client.h2
-rw-r--r--include/net/addrconf.h4
-rw-r--r--include/net/af_unix.h3
-rw-r--r--include/net/bluetooth/bluetooth.h9
-rw-r--r--include/net/bluetooth/hci.h9
-rw-r--r--include/net/bluetooth/hci_core.h8
-rw-r--r--include/net/cfg80211.h2
-rw-r--r--include/net/gro.h9
-rw-r--r--include/net/inet_connection_sock.h1
-rw-r--r--include/net/ip_tunnels.h33
-rw-r--r--include/net/mac80211.h3
-rw-r--r--include/net/macsec.h2
-rw-r--r--include/net/mana/mana.h1
-rw-r--r--include/net/netfilter/nf_flow_table.h12
-rw-r--r--include/net/netfilter/nf_tables.h14
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--include/net/sock.h45
-rw-r--r--include/net/tls.h3
-rw-r--r--include/net/udp.h9
-rw-r--r--include/net/xdp_sock.h2
-rw-r--r--include/net/xfrm.h3
-rw-r--r--include/scsi/scsi_driver.h1
-rw-r--r--include/scsi/scsi_host.h1
-rw-r--r--include/soc/fsl/dcp.h20
-rw-r--r--include/soc/qcom/cmd-db.h10
-rw-r--r--include/sound/cs35l56.h2
-rw-r--r--include/sound/emu10k1.h7
-rw-r--r--include/sound/hdaudio_ext.h3
-rw-r--r--include/sound/intel-nhlt.h10
-rw-r--r--include/sound/tas2781-tlv.h2
-rw-r--r--include/trace/events/mmflags.h1
-rw-r--r--include/trace/events/netfs.h250
-rw-r--r--include/trace/events/rcu.h27
-rw-r--r--include/trace/events/rpcgss.h4
-rw-r--r--include/trace/events/sched.h35
-rw-r--r--include/trace/events/scmi.h6
-rw-r--r--include/uapi/drm/etnaviv_drm.h5
-rw-r--r--include/uapi/linux/cryptouser.h30
-rw-r--r--include/uapi/linux/fcntl.h14
-rw-r--r--include/uapi/linux/io_uring.h38
-rw-r--r--include/uapi/linux/kfd_ioctl.h17
-rw-r--r--include/uapi/linux/stat.h4
-rw-r--r--include/uapi/linux/stddef.h8
-rw-r--r--include/uapi/linux/tee.h1
-rw-r--r--include/uapi/linux/vdpa.h6
-rw-r--r--include/uapi/linux/vhost.h15
-rw-r--r--include/uapi/linux/xfrm.h2
-rw-r--r--include/uapi/scsi/scsi_bsg_mpi3mr.h2
-rw-r--r--include/ufs/ufshcd.h1
-rw-r--r--include/vdso/datapage.h8
169 files changed, 2775 insertions, 1432 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 5de954e2b18a..e7796f373d0d 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -911,17 +911,19 @@ static inline bool acpi_int_uid_match(struct acpi_device *adev, u64 uid2)
* acpi_dev_hid_uid_match - Match device by supplied HID and UID
* @adev: ACPI device to match.
* @hid2: Hardware ID of the device.
- * @uid2: Unique ID of the device, pass 0 or NULL to not check _UID.
+ * @uid2: Unique ID of the device, pass NULL to not check _UID.
*
* Matches HID and UID in @adev with given @hid2 and @uid2. Absence of @uid2
* will be treated as a match. If user wants to validate @uid2, it should be
* done before calling this function.
*
- * Returns: %true if matches or @uid2 is 0 or NULL, %false otherwise.
+ * Returns: %true if matches or @uid2 is NULL, %false otherwise.
*/
#define acpi_dev_hid_uid_match(adev, hid2, uid2) \
(acpi_dev_hid_match(adev, hid2) && \
- (!(uid2) || acpi_dev_uid_match(adev, uid2)))
+ /* Distinguish integer 0 from NULL @uid2 */ \
+ (_Generic(uid2, ACPI_STR_TYPES(!(uid2)), default: 0) || \
+ acpi_dev_uid_match(adev, uid2)))
void acpi_dev_clear_dependencies(struct acpi_device *supplier);
bool acpi_dev_ready_for_enumeration(const struct acpi_device *device);
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 0c0695763bea..d4f581c1e21d 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -294,5 +294,13 @@ do { \
#define io_stop_wc() do { } while (0)
#endif
+/*
+ * Architectures that guarantee an implicit smp_mb() in switch_mm()
+ * can override smp_mb__after_switch_mm.
+ */
+#ifndef smp_mb__after_switch_mm
+# define smp_mb__after_switch_mm() smp_mb()
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 6e794420bd39..b7de3a4eade1 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -156,7 +156,10 @@ extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
#else /* !CONFIG_BUG */
#ifndef HAVE_ARCH_BUG
-#define BUG() do {} while (1)
+#define BUG() do { \
+ do {} while (1); \
+ unreachable(); \
+} while (0)
#endif
#ifndef HAVE_ARCH_BUG_ON
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
deleted file mode 100644
index 570cd4da7210..000000000000
--- a/include/asm-generic/export.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef __ASM_GENERIC_EXPORT_H
-#define __ASM_GENERIC_EXPORT_H
-
-/*
- * <asm/export.h> and <asm-generic/export.h> are deprecated.
- * Please include <linux/export.h> directly.
- */
-#include <linux/export.h>
-
-#endif
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
index 87e3d49a4e29..814207e7c37f 100644
--- a/include/asm-generic/hyperv-tlfs.h
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -512,13 +512,9 @@ struct hv_proximity_domain_flags {
u32 proximity_info_valid : 1;
} __packed;
-/* Not a union in windows but useful for zeroing */
-union hv_proximity_domain_info {
- struct {
- u32 domain_id;
- struct hv_proximity_domain_flags flags;
- };
- u64 as_uint64;
+struct hv_proximity_domain_info {
+ u32 domain_id;
+ struct hv_proximity_domain_flags flags;
} __packed;
struct hv_lp_startup_status {
@@ -532,14 +528,13 @@ struct hv_lp_startup_status {
} __packed;
/* HvAddLogicalProcessor hypercall */
-struct hv_add_logical_processor_in {
+struct hv_input_add_logical_processor {
u32 lp_index;
u32 apic_id;
- union hv_proximity_domain_info proximity_domain_info;
- u64 flags;
+ struct hv_proximity_domain_info proximity_domain_info;
} __packed;
-struct hv_add_logical_processor_out {
+struct hv_output_add_logical_processor {
struct hv_lp_startup_status startup_status;
} __packed;
@@ -560,7 +555,7 @@ struct hv_create_vp {
u8 padding[3];
u8 subnode_type;
u64 subnode_id;
- union hv_proximity_domain_info proximity_domain_info;
+ struct hv_proximity_domain_info proximity_domain_info;
u64 flags;
} __packed;
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index 99935779682d..8fe7aaab2599 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
+#include <acpi/acpi_numa.h>
#include <linux/cpumask.h>
#include <linux/nmi.h>
#include <asm/ptrace.h>
@@ -67,6 +68,19 @@ extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
bool hv_isolation_type_snp(void);
bool hv_isolation_type_tdx(void);
+static inline struct hv_proximity_domain_info hv_numa_node_to_pxm_info(int node)
+{
+ struct hv_proximity_domain_info pxm_info = {};
+
+ if (node != NUMA_NO_NODE) {
+ pxm_info.domain_id = node_to_pxm(node);
+ pxm_info.flags.proximity_info_valid = 1;
+ pxm_info.flags.proximity_preferred = 1;
+ }
+
+ return pxm_info;
+}
+
/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
static inline int hv_result(u64 status)
{
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index db13bb620f52..c768de6f19a9 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -180,6 +180,11 @@ static inline bool is_kernel_rodata(unsigned long addr)
addr < (unsigned long)__end_rodata;
}
+static inline bool is_kernel_ro_after_init(unsigned long addr)
+{
+ return addr >= (unsigned long)__start_ro_after_init &&
+ addr < (unsigned long)__end_ro_after_init;
+}
/**
* is_kernel_inittext - checks if the pointer address is located in the
* .init.text section
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index 80e243611fe2..54937b615239 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -56,31 +56,7 @@ struct crypto_acomp {
struct crypto_tfm base;
};
-/*
- * struct crypto_istat_compress - statistics for compress algorithm
- * @compress_cnt: number of compress requests
- * @compress_tlen: total data size handled by compress requests
- * @decompress_cnt: number of decompress requests
- * @decompress_tlen: total data size handled by decompress requests
- * @err_cnt: number of error for compress requests
- */
-struct crypto_istat_compress {
- atomic64_t compress_cnt;
- atomic64_t compress_tlen;
- atomic64_t decompress_cnt;
- atomic64_t decompress_tlen;
- atomic64_t err_cnt;
-};
-
-#ifdef CONFIG_CRYPTO_STATS
-#define COMP_ALG_COMMON_STATS struct crypto_istat_compress stat;
-#else
-#define COMP_ALG_COMMON_STATS
-#endif
-
#define COMP_ALG_COMMON { \
- COMP_ALG_COMMON_STATS \
- \
struct crypto_alg base; \
}
struct comp_alg_common COMP_ALG_COMMON;
@@ -261,27 +237,6 @@ static inline void acomp_request_set_params(struct acomp_req *req,
req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
}
-static inline struct crypto_istat_compress *comp_get_stat(
- struct comp_alg_common *alg)
-{
-#ifdef CONFIG_CRYPTO_STATS
- return &alg->stat;
-#else
- return NULL;
-#endif
-}
-
-static inline int crypto_comp_errstat(struct comp_alg_common *alg, int err)
-{
- if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
- return err;
-
- if (err && err != -EINPROGRESS && err != -EBUSY)
- atomic64_inc(&comp_get_stat(alg)->err_cnt);
-
- return err;
-}
-
/**
* crypto_acomp_compress() -- Invoke asynchronous compress operation
*
@@ -293,19 +248,7 @@ static inline int crypto_comp_errstat(struct comp_alg_common *alg, int err)
*/
static inline int crypto_acomp_compress(struct acomp_req *req)
{
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- struct comp_alg_common *alg;
-
- alg = crypto_comp_alg_common(tfm);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_compress *istat = comp_get_stat(alg);
-
- atomic64_inc(&istat->compress_cnt);
- atomic64_add(req->slen, &istat->compress_tlen);
- }
-
- return crypto_comp_errstat(alg, tfm->compress(req));
+ return crypto_acomp_reqtfm(req)->compress(req);
}
/**
@@ -319,19 +262,7 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
*/
static inline int crypto_acomp_decompress(struct acomp_req *req)
{
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- struct comp_alg_common *alg;
-
- alg = crypto_comp_alg_common(tfm);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_compress *istat = comp_get_stat(alg);
-
- atomic64_inc(&istat->decompress_cnt);
- atomic64_add(req->slen, &istat->decompress_tlen);
- }
-
- return crypto_comp_errstat(alg, tfm->decompress(req));
+ return crypto_acomp_reqtfm(req)->decompress(req);
}
#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 51382befbe37..0e8a41638678 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -101,22 +101,6 @@ struct aead_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
-/*
- * struct crypto_istat_aead - statistics for AEAD algorithm
- * @encrypt_cnt: number of encrypt requests
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @err_cnt: number of error for AEAD requests
- */
-struct crypto_istat_aead {
- atomic64_t encrypt_cnt;
- atomic64_t encrypt_tlen;
- atomic64_t decrypt_cnt;
- atomic64_t decrypt_tlen;
- atomic64_t err_cnt;
-};
-
/**
* struct aead_alg - AEAD cipher definition
* @maxauthsize: Set the maximum authentication tag size supported by the
@@ -135,7 +119,6 @@ struct crypto_istat_aead {
* @setkey: see struct skcipher_alg
* @encrypt: see struct skcipher_alg
* @decrypt: see struct skcipher_alg
- * @stat: statistics for AEAD algorithm
* @ivsize: see struct skcipher_alg
* @chunksize: see struct skcipher_alg
* @init: Initialize the cryptographic transformation object. This function
@@ -162,10 +145,6 @@ struct aead_alg {
int (*init)(struct crypto_aead *tfm);
void (*exit)(struct crypto_aead *tfm);
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_istat_aead stat;
-#endif
-
unsigned int ivsize;
unsigned int maxauthsize;
unsigned int chunksize;
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index 2090729701ab..9339da7c20a8 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -87,4 +87,9 @@ void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
extern const u8 crypto_aes_sbox[];
extern const u8 crypto_aes_inv_sbox[];
+void aescfb_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+ int len, const u8 iv[AES_BLOCK_SIZE]);
+void aescfb_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+ int len, const u8 iv[AES_BLOCK_SIZE]);
+
#endif
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index 31c111bebb68..18a10cad07aa 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -54,26 +54,6 @@ struct crypto_akcipher {
struct crypto_tfm base;
};
-/*
- * struct crypto_istat_akcipher - statistics for akcipher algorithm
- * @encrypt_cnt: number of encrypt requests
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @verify_cnt: number of verify operation
- * @sign_cnt: number of sign requests
- * @err_cnt: number of error for akcipher requests
- */
-struct crypto_istat_akcipher {
- atomic64_t encrypt_cnt;
- atomic64_t encrypt_tlen;
- atomic64_t decrypt_cnt;
- atomic64_t decrypt_tlen;
- atomic64_t verify_cnt;
- atomic64_t sign_cnt;
- atomic64_t err_cnt;
-};
-
/**
* struct akcipher_alg - generic public key algorithm
*
@@ -110,7 +90,6 @@ struct crypto_istat_akcipher {
* @exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @init, used to remove various changes set in
* @init.
- * @stat: Statistics for akcipher algorithm
*
* @base: Common crypto API algorithm data structure
*/
@@ -127,10 +106,6 @@ struct akcipher_alg {
int (*init)(struct crypto_akcipher *tfm);
void (*exit)(struct crypto_akcipher *tfm);
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_istat_akcipher stat;
-#endif
-
struct crypto_alg base;
};
@@ -302,27 +277,6 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm)
return alg->max_size(tfm);
}
-static inline struct crypto_istat_akcipher *akcipher_get_stat(
- struct akcipher_alg *alg)
-{
-#ifdef CONFIG_CRYPTO_STATS
- return &alg->stat;
-#else
- return NULL;
-#endif
-}
-
-static inline int crypto_akcipher_errstat(struct akcipher_alg *alg, int err)
-{
- if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
- return err;
-
- if (err && err != -EINPROGRESS && err != -EBUSY)
- atomic64_inc(&akcipher_get_stat(alg)->err_cnt);
-
- return err;
-}
-
/**
* crypto_akcipher_encrypt() - Invoke public key encrypt operation
*
@@ -336,16 +290,8 @@ static inline int crypto_akcipher_errstat(struct akcipher_alg *alg, int err)
static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
-
- atomic64_inc(&istat->encrypt_cnt);
- atomic64_add(req->src_len, &istat->encrypt_tlen);
- }
-
- return crypto_akcipher_errstat(alg, alg->encrypt(req));
+ return crypto_akcipher_alg(tfm)->encrypt(req);
}
/**
@@ -361,16 +307,8 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
-
- atomic64_inc(&istat->decrypt_cnt);
- atomic64_add(req->src_len, &istat->decrypt_tlen);
- }
-
- return crypto_akcipher_errstat(alg, alg->decrypt(req));
+ return crypto_akcipher_alg(tfm)->decrypt(req);
}
/**
@@ -422,12 +360,8 @@ int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm,
static inline int crypto_akcipher_sign(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- atomic64_inc(&akcipher_get_stat(alg)->sign_cnt);
-
- return crypto_akcipher_errstat(alg, alg->sign(req));
+ return crypto_akcipher_alg(tfm)->sign(req);
}
/**
@@ -447,12 +381,8 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
static inline int crypto_akcipher_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- atomic64_inc(&akcipher_get_stat(alg)->verify_cnt);
- return crypto_akcipher_errstat(alg, alg->verify(req));
+ return crypto_akcipher_alg(tfm)->verify(req);
}
/**
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 7a4a71af653f..156de41ca760 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -61,9 +61,6 @@ struct crypto_type {
void (*show)(struct seq_file *m, struct crypto_alg *alg);
int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
void (*free)(struct crypto_instance *inst);
-#ifdef CONFIG_CRYPTO_STATS
- int (*report_stat)(struct sk_buff *skb, struct crypto_alg *alg);
-#endif
unsigned int type;
unsigned int maskclear;
diff --git a/include/crypto/ecc_curve.h b/include/crypto/ecc_curve.h
index 70964781eb68..7d90c5e82266 100644
--- a/include/crypto/ecc_curve.h
+++ b/include/crypto/ecc_curve.h
@@ -23,6 +23,7 @@ struct ecc_point {
* struct ecc_curve - definition of elliptic curve
*
* @name: Short name of the curve.
+ * @nbits: The number of bits of a curve.
* @g: Generator point of the curve.
* @p: Prime number, if Barrett's reduction is used for this curve
* pre-calculated value 'mu' is appended to the @p after ndigits.
@@ -34,6 +35,7 @@ struct ecc_point {
*/
struct ecc_curve {
char *name;
+ u32 nbits;
struct ecc_point g;
u64 *p;
u64 *n;
diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h
index a9f98078d29c..9784ecdd2fb4 100644
--- a/include/crypto/ecdh.h
+++ b/include/crypto/ecdh.h
@@ -26,6 +26,7 @@
#define ECC_CURVE_NIST_P192 0x0001
#define ECC_CURVE_NIST_P256 0x0002
#define ECC_CURVE_NIST_P384 0x0003
+#define ECC_CURVE_NIST_P521 0x0004
/**
* struct ecdh - define an ECDH private key
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 5d61f576cfc8..0014bdd81ab7 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -24,26 +24,7 @@ struct crypto_ahash;
*/
/*
- * struct crypto_istat_hash - statistics for has algorithm
- * @hash_cnt: number of hash requests
- * @hash_tlen: total data size hashed
- * @err_cnt: number of error for hash requests
- */
-struct crypto_istat_hash {
- atomic64_t hash_cnt;
- atomic64_t hash_tlen;
- atomic64_t err_cnt;
-};
-
-#ifdef CONFIG_CRYPTO_STATS
-#define HASH_ALG_COMMON_STAT struct crypto_istat_hash stat;
-#else
-#define HASH_ALG_COMMON_STAT
-#endif
-
-/*
* struct hash_alg_common - define properties of message digest
- * @stat: Statistics for hash algorithm.
* @digestsize: Size of the result of the transformation. A buffer of this size
* must be available to the @final and @finup calls, so they can
* store the resulting hash into it. For various predefined sizes,
@@ -60,8 +41,6 @@ struct crypto_istat_hash {
* information.
*/
#define HASH_ALG_COMMON { \
- HASH_ALG_COMMON_STAT \
- \
unsigned int digestsize; \
unsigned int statesize; \
\
@@ -243,7 +222,6 @@ struct shash_alg {
};
};
#undef HASH_ALG_COMMON
-#undef HASH_ALG_COMMON_STAT
struct crypto_ahash {
bool using_shash; /* Underlying algorithm is shash, not ahash */
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index 4ac46bafba9d..d00392d1988e 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -31,7 +31,6 @@
* @init.
*
* @reqsize: Context size for (de)compression requests
- * @stat: Statistics for compress algorithm
* @base: Common crypto API algorithm data structure
* @calg: Cmonn algorithm data structure shared with scomp
*/
diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h
deleted file mode 100644
index fd54074332f5..000000000000
--- a/include/crypto/internal/cryptouser.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/cryptouser.h>
-#include <net/netlink.h>
-
-struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact);
-
-#ifdef CONFIG_CRYPTO_STATS
-int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs);
-#else
-static inline int crypto_reportstat(struct sk_buff *in_skb,
- struct nlmsghdr *in_nlh,
- struct nlattr **attrs)
-{
- return -ENOTSUPP;
-}
-#endif
diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h
index 4f6c1a68882f..7ca1f463d1ec 100644
--- a/include/crypto/internal/ecc.h
+++ b/include/crypto/internal/ecc.h
@@ -33,7 +33,8 @@
#define ECC_CURVE_NIST_P192_DIGITS 3
#define ECC_CURVE_NIST_P256_DIGITS 4
#define ECC_CURVE_NIST_P384_DIGITS 6
-#define ECC_MAX_DIGITS (512 / 64) /* due to ecrdsa */
+#define ECC_CURVE_NIST_P521_DIGITS 9
+#define ECC_MAX_DIGITS DIV_ROUND_UP(521, 64) /* NIST P521 */
#define ECC_DIGITS_TO_BYTES_SHIFT 3
@@ -57,6 +58,27 @@ static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigit
}
/**
+ * ecc_digits_from_bytes() - Create ndigits-sized digits array from byte array
+ * @in: Input byte array
+ * @nbytes Size of input byte array
+ * @out Output digits array
+ * @ndigits: Number of digits to create from byte array
+ */
+static inline void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
+ u64 *out, unsigned int ndigits)
+{
+ unsigned int o = nbytes & 7;
+ __be64 msd = 0;
+
+ if (o) {
+ memcpy((u8 *)&msd + sizeof(msd) - o, in, o);
+ out[--ndigits] = be64_to_cpu(msd);
+ in += o;
+ }
+ ecc_swap_digits(in, out, ndigits);
+}
+
+/**
* ecc_is_key_valid() - Validate a given ECDH private key
*
* @curve_id: id representing the curve to use
@@ -81,7 +103,8 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
* Returns 0 if the private key was generated successfully, a negative value
* if an error occurred.
*/
-int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey);
+int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits,
+ u64 *private_key);
/**
* ecc_make_pub_key() - Compute an ECC public key
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index 858fe3965ae3..07a10fd2d321 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -27,7 +27,6 @@ struct crypto_scomp {
* @free_ctx: Function frees context allocated with alloc_ctx
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
- * @stat: Statistics for compress algorithm
* @base: Common crypto API algorithm data structure
* @calg: Cmonn algorithm data structure shared with acomp
*/
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 1988e24a0d1d..2d9c4de57b69 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -51,20 +51,6 @@ struct crypto_kpp {
struct crypto_tfm base;
};
-/*
- * struct crypto_istat_kpp - statistics for KPP algorithm
- * @setsecret_cnt: number of setsecrey operation
- * @generate_public_key_cnt: number of generate_public_key operation
- * @compute_shared_secret_cnt: number of compute_shared_secret operation
- * @err_cnt: number of error for KPP requests
- */
-struct crypto_istat_kpp {
- atomic64_t setsecret_cnt;
- atomic64_t generate_public_key_cnt;
- atomic64_t compute_shared_secret_cnt;
- atomic64_t err_cnt;
-};
-
/**
* struct kpp_alg - generic key-agreement protocol primitives
*
@@ -87,7 +73,6 @@ struct crypto_istat_kpp {
* @exit: Undo everything @init did.
*
* @base: Common crypto API algorithm data structure
- * @stat: Statistics for KPP algorithm
*/
struct kpp_alg {
int (*set_secret)(struct crypto_kpp *tfm, const void *buffer,
@@ -100,10 +85,6 @@ struct kpp_alg {
int (*init)(struct crypto_kpp *tfm);
void (*exit)(struct crypto_kpp *tfm);
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_istat_kpp stat;
-#endif
-
struct crypto_alg base;
};
@@ -291,26 +272,6 @@ struct kpp_secret {
unsigned short len;
};
-static inline struct crypto_istat_kpp *kpp_get_stat(struct kpp_alg *alg)
-{
-#ifdef CONFIG_CRYPTO_STATS
- return &alg->stat;
-#else
- return NULL;
-#endif
-}
-
-static inline int crypto_kpp_errstat(struct kpp_alg *alg, int err)
-{
- if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
- return err;
-
- if (err && err != -EINPROGRESS && err != -EBUSY)
- atomic64_inc(&kpp_get_stat(alg)->err_cnt);
-
- return err;
-}
-
/**
* crypto_kpp_set_secret() - Invoke kpp operation
*
@@ -329,12 +290,7 @@ static inline int crypto_kpp_errstat(struct kpp_alg *alg, int err)
static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
const void *buffer, unsigned int len)
{
- struct kpp_alg *alg = crypto_kpp_alg(tfm);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- atomic64_inc(&kpp_get_stat(alg)->setsecret_cnt);
-
- return crypto_kpp_errstat(alg, alg->set_secret(tfm, buffer, len));
+ return crypto_kpp_alg(tfm)->set_secret(tfm, buffer, len);
}
/**
@@ -353,12 +309,8 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct kpp_alg *alg = crypto_kpp_alg(tfm);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- atomic64_inc(&kpp_get_stat(alg)->generate_public_key_cnt);
- return crypto_kpp_errstat(alg, alg->generate_public_key(req));
+ return crypto_kpp_alg(tfm)->generate_public_key(req);
}
/**
@@ -374,12 +326,8 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct kpp_alg *alg = crypto_kpp_alg(tfm);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- atomic64_inc(&kpp_get_stat(alg)->compute_shared_secret_cnt);
- return crypto_kpp_errstat(alg, alg->compute_shared_secret(req));
+ return crypto_kpp_alg(tfm)->compute_shared_secret(req);
}
/**
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index 6abe5102e5fb..5ac4388f50e1 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -15,20 +15,6 @@
struct crypto_rng;
-/*
- * struct crypto_istat_rng: statistics for RNG algorithm
- * @generate_cnt: number of RNG generate requests
- * @generate_tlen: total data size of generated data by the RNG
- * @seed_cnt: number of times the RNG was seeded
- * @err_cnt: number of error for RNG requests
- */
-struct crypto_istat_rng {
- atomic64_t generate_cnt;
- atomic64_t generate_tlen;
- atomic64_t seed_cnt;
- atomic64_t err_cnt;
-};
-
/**
* struct rng_alg - random number generator definition
*
@@ -46,7 +32,6 @@ struct crypto_istat_rng {
* size of the seed is defined with @seedsize .
* @set_ent: Set entropy that would otherwise be obtained from
* entropy source. Internal use only.
- * @stat: Statistics for rng algorithm
* @seedsize: The seed size required for a random number generator
* initialization defined with this variable. Some
* random number generators does not require a seed
@@ -63,10 +48,6 @@ struct rng_alg {
void (*set_ent)(struct crypto_rng *tfm, const u8 *data,
unsigned int len);
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_istat_rng stat;
-#endif
-
unsigned int seedsize;
struct crypto_alg base;
@@ -144,26 +125,6 @@ static inline void crypto_free_rng(struct crypto_rng *tfm)
crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
}
-static inline struct crypto_istat_rng *rng_get_stat(struct rng_alg *alg)
-{
-#ifdef CONFIG_CRYPTO_STATS
- return &alg->stat;
-#else
- return NULL;
-#endif
-}
-
-static inline int crypto_rng_errstat(struct rng_alg *alg, int err)
-{
- if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
- return err;
-
- if (err && err != -EINPROGRESS && err != -EBUSY)
- atomic64_inc(&rng_get_stat(alg)->err_cnt);
-
- return err;
-}
-
/**
* crypto_rng_generate() - get random number
* @tfm: cipher handle
@@ -182,17 +143,7 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
- struct rng_alg *alg = crypto_rng_alg(tfm);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_rng *istat = rng_get_stat(alg);
-
- atomic64_inc(&istat->generate_cnt);
- atomic64_add(dlen, &istat->generate_tlen);
- }
-
- return crypto_rng_errstat(alg,
- alg->generate(tfm, src, slen, dst, dlen));
+ return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
}
/**
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index c8857d7bdb37..74d47e23374e 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -65,28 +65,6 @@ struct crypto_lskcipher {
};
/*
- * struct crypto_istat_cipher - statistics for cipher algorithm
- * @encrypt_cnt: number of encrypt requests
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @err_cnt: number of error for cipher requests
- */
-struct crypto_istat_cipher {
- atomic64_t encrypt_cnt;
- atomic64_t encrypt_tlen;
- atomic64_t decrypt_cnt;
- atomic64_t decrypt_tlen;
- atomic64_t err_cnt;
-};
-
-#ifdef CONFIG_CRYPTO_STATS
-#define SKCIPHER_ALG_COMMON_STAT struct crypto_istat_cipher stat;
-#else
-#define SKCIPHER_ALG_COMMON_STAT
-#endif
-
-/*
* struct skcipher_alg_common - common properties of skcipher_alg
* @min_keysize: Minimum key size supported by the transformation. This is the
* smallest key length supported by this transformation algorithm.
@@ -103,7 +81,6 @@ struct crypto_istat_cipher {
* @chunksize: Equal to the block size except for stream ciphers such as
* CTR where it is set to the underlying block size.
* @statesize: Size of the internal state for the algorithm.
- * @stat: Statistics for cipher algorithm
* @base: Definition of a generic crypto algorithm.
*/
#define SKCIPHER_ALG_COMMON { \
@@ -113,8 +90,6 @@ struct crypto_istat_cipher {
unsigned int chunksize; \
unsigned int statesize; \
\
- SKCIPHER_ALG_COMMON_STAT \
- \
struct crypto_alg base; \
}
struct skcipher_alg_common SKCIPHER_ALG_COMMON;
diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h
index 19ac7b36f608..d040033dc8ee 100644
--- a/include/dt-bindings/arm/qcom,ids.h
+++ b/include/dt-bindings/arm/qcom,ids.h
@@ -258,6 +258,7 @@
#define QCOM_ID_QRU1000 539
#define QCOM_ID_SM8475_2 540
#define QCOM_ID_QDU1000 545
+#define QCOM_ID_X1E80100 555
#define QCOM_ID_SM8650 557
#define QCOM_ID_SM4450 568
#define QCOM_ID_QDU1010 587
diff --git a/include/dt-bindings/clock/google,gs101.h b/include/dt-bindings/clock/google,gs101.h
index 3dac3577788a..442f9e9037dc 100644
--- a/include/dt-bindings/clock/google,gs101.h
+++ b/include/dt-bindings/clock/google,gs101.h
@@ -313,6 +313,122 @@
#define CLK_APM_PLL_DIV4_APM 70
#define CLK_APM_PLL_DIV16_APM 71
+/* CMU_HSI0 */
+#define CLK_FOUT_USB_PLL 1
+#define CLK_MOUT_PLL_USB 2
+#define CLK_MOUT_HSI0_ALT_USER 3
+#define CLK_MOUT_HSI0_BUS_USER 4
+#define CLK_MOUT_HSI0_DPGTC_USER 5
+#define CLK_MOUT_HSI0_TCXO_USER 6
+#define CLK_MOUT_HSI0_USB20_USER 7
+#define CLK_MOUT_HSI0_USB31DRD_USER 8
+#define CLK_MOUT_HSI0_USBDPDBG_USER 9
+#define CLK_MOUT_HSI0_BUS 10
+#define CLK_MOUT_HSI0_USB20_REF 11
+#define CLK_MOUT_HSI0_USB31DRD 12
+#define CLK_DOUT_HSI0_USB31DRD 13
+#define CLK_GOUT_HSI0_PCLK 14
+#define CLK_GOUT_HSI0_USB31DRD_I_USB31DRD_SUSPEND_CLK_26 15
+#define CLK_GOUT_HSI0_CLK_HSI0_ALT 16
+#define CLK_GOUT_HSI0_DP_LINK_I_DP_GTC_CLK 17
+#define CLK_GOUT_HSI0_DP_LINK_I_PCLK 18
+#define CLK_GOUT_HSI0_D_TZPC_HSI0_PCLK 19
+#define CLK_GOUT_HSI0_ETR_MIU_I_ACLK 20
+#define CLK_GOUT_HSI0_ETR_MIU_I_PCLK 21
+#define CLK_GOUT_HSI0_GPC_HSI0_PCLK 22
+#define CLK_GOUT_HSI0_LHM_AXI_G_ETR_HSI0_I_CLK 23
+#define CLK_GOUT_HSI0_LHM_AXI_P_AOCHSI0_I_CLK 24
+#define CLK_GOUT_HSI0_LHM_AXI_P_HSI0_I_CLK 25
+#define CLK_GOUT_HSI0_LHS_ACEL_D_HSI0_I_CLK 26
+#define CLK_GOUT_HSI0_LHS_AXI_D_HSI0AOC_I_CLK 27
+#define CLK_GOUT_HSI0_PPMU_HSI0_AOC_ACLK 28
+#define CLK_GOUT_HSI0_PPMU_HSI0_AOC_PCLK 29
+#define CLK_GOUT_HSI0_PPMU_HSI0_BUS0_ACLK 30
+#define CLK_GOUT_HSI0_PPMU_HSI0_BUS0_PCLK 31
+#define CLK_GOUT_HSI0_CLK_HSI0_BUS_CLK 32
+#define CLK_GOUT_HSI0_SSMT_USB_ACLK 33
+#define CLK_GOUT_HSI0_SSMT_USB_PCLK 34
+#define CLK_GOUT_HSI0_SYSMMU_USB_CLK_S2 35
+#define CLK_GOUT_HSI0_SYSREG_HSI0_PCLK 36
+#define CLK_GOUT_HSI0_UASC_HSI0_CTRL_ACLK 37
+#define CLK_GOUT_HSI0_UASC_HSI0_CTRL_PCLK 38
+#define CLK_GOUT_HSI0_UASC_HSI0_LINK_ACLK 39
+#define CLK_GOUT_HSI0_UASC_HSI0_LINK_PCLK 40
+#define CLK_GOUT_HSI0_USB31DRD_ACLK_PHYCTRL 41
+#define CLK_GOUT_HSI0_USB31DRD_BUS_CLK_EARLY 42
+#define CLK_GOUT_HSI0_USB31DRD_I_USB20_PHY_REFCLK_26 43
+#define CLK_GOUT_HSI0_USB31DRD_I_USB31DRD_REF_CLK_40 44
+#define CLK_GOUT_HSI0_USB31DRD_I_USBDPPHY_REF_SOC_PLL 45
+#define CLK_GOUT_HSI0_USB31DRD_I_USBDPPHY_SCL_APB_PCLK 46
+#define CLK_GOUT_HSI0_USB31DRD_I_USBPCS_APB_CLK 47
+#define CLK_GOUT_HSI0_USB31DRD_USBDPPHY_I_ACLK 48
+#define CLK_GOUT_HSI0_USB31DRD_USBDPPHY_UDBG_I_APB_PCLK 49
+#define CLK_GOUT_HSI0_XIU_D0_HSI0_ACLK 50
+#define CLK_GOUT_HSI0_XIU_D1_HSI0_ACLK 51
+#define CLK_GOUT_HSI0_XIU_P_HSI0_ACLK 52
+
+/* CMU_HSI2 */
+#define CLK_MOUT_HSI2_BUS_USER 1
+#define CLK_MOUT_HSI2_MMC_CARD_USER 2
+#define CLK_MOUT_HSI2_PCIE_USER 3
+#define CLK_MOUT_HSI2_UFS_EMBD_USER 4
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_PHY_REFCLK_IN 5
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_PHY_REFCLK_IN 6
+#define CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4A_1_ACLK 7
+#define CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4A_1_PCLK 8
+#define CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4B_1_ACLK 9
+#define CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4B_1_PCLK 10
+#define CLK_GOUT_HSI2_D_TZPC_HSI2_PCLK 11
+#define CLK_GOUT_HSI2_GPC_HSI2_PCLK 12
+#define CLK_GOUT_HSI2_GPIO_HSI2_PCLK 13
+#define CLK_GOUT_HSI2_HSI2_CMU_HSI2_PCLK 14
+#define CLK_GOUT_HSI2_LHM_AXI_P_HSI2_I_CLK 15
+#define CLK_GOUT_HSI2_LHS_ACEL_D_HSI2_I_CLK 16
+#define CLK_GOUT_HSI2_MMC_CARD_I_ACLK 17
+#define CLK_GOUT_HSI2_MMC_CARD_SDCLKIN 18
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_DBI_ACLK_UG 19
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_MSTR_ACLK_UG 20
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_SLV_ACLK_UG 21
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_I_DRIVER_APB_CLK 22
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_DBI_ACLK_UG 23
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_MSTR_ACLK_UG 24
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_SLV_ACLK_UG 25
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_I_DRIVER_APB_CLK 26
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCS_PMA_PHY_UDBG_I_APB_PCLK 27
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCS_PMA_PIPE_PAL_PCIE_I_APB_PCLK 28
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCS_PMA_PCIEPHY210X2_QCH_I_APB_PCLK 29
+#define CLK_GOUT_HSI2_PCIE_IA_GEN4A_1_I_CLK 30
+#define CLK_GOUT_HSI2_PCIE_IA_GEN4B_1_I_CLK 31
+#define CLK_GOUT_HSI2_PPMU_HSI2_ACLK 32
+#define CLK_GOUT_HSI2_PPMU_HSI2_PCLK 33
+#define CLK_GOUT_HSI2_QE_MMC_CARD_HSI2_ACLK 34
+#define CLK_GOUT_HSI2_QE_MMC_CARD_HSI2_PCLK 35
+#define CLK_GOUT_HSI2_QE_PCIE_GEN4A_HSI2_ACLK 36
+#define CLK_GOUT_HSI2_QE_PCIE_GEN4A_HSI2_PCLK 37
+#define CLK_GOUT_HSI2_QE_PCIE_GEN4B_HSI2_ACLK 38
+#define CLK_GOUT_HSI2_QE_PCIE_GEN4B_HSI2_PCLK 39
+#define CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_ACLK 40
+#define CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_PCLK 41
+#define CLK_GOUT_HSI2_CLK_HSI2_BUS_CLK 42
+#define CLK_GOUT_HSI2_CLK_HSI2_OSCCLK_CLK 43
+#define CLK_GOUT_HSI2_SSMT_HSI2_ACLK 44
+#define CLK_GOUT_HSI2_SSMT_HSI2_PCLK 45
+#define CLK_GOUT_HSI2_SYSMMU_HSI2_CLK_S2 46
+#define CLK_GOUT_HSI2_SYSREG_HSI2_PCLK 47
+#define CLK_GOUT_HSI2_UASC_PCIE_GEN4A_DBI_1_ACLK 48
+#define CLK_GOUT_HSI2_UASC_PCIE_GEN4A_DBI_1_PCLK 49
+#define CLK_GOUT_HSI2_UASC_PCIE_GEN4A_SLV_1_ACLK 50
+#define CLK_GOUT_HSI2_UASC_PCIE_GEN4A_SLV_1_PCLK 51
+#define CLK_GOUT_HSI2_UASC_PCIE_GEN4B_DBI_1_ACLK 52
+#define CLK_GOUT_HSI2_UASC_PCIE_GEN4B_DBI_1_PCLK 53
+#define CLK_GOUT_HSI2_UASC_PCIE_GEN4B_SLV_1_ACLK 54
+#define CLK_GOUT_HSI2_UASC_PCIE_GEN4B_SLV_1_PCLK 55
+#define CLK_GOUT_HSI2_UFS_EMBD_I_ACLK 56
+#define CLK_GOUT_HSI2_UFS_EMBD_I_CLK_UNIPRO 57
+#define CLK_GOUT_HSI2_UFS_EMBD_I_FMP_CLK 58
+#define CLK_GOUT_HSI2_XIU_D_HSI2_ACLK 59
+#define CLK_GOUT_HSI2_XIU_P_HSI2_ACLK 60
+
/* CMU_MISC */
#define CLK_MOUT_MISC_BUS_USER 1
#define CLK_MOUT_MISC_SSS_USER 2
diff --git a/include/dt-bindings/clock/r8a73a4-clock.h b/include/dt-bindings/clock/r8a73a4-clock.h
index 1ec4827b8091..655440a3e7c6 100644
--- a/include/dt-bindings/clock/r8a73a4-clock.h
+++ b/include/dt-bindings/clock/r8a73a4-clock.h
@@ -24,6 +24,10 @@
#define R8A73A4_CLK_ZS 14
#define R8A73A4_CLK_HP 15
+/* MSTP1 */
+#define R8A73A4_CLK_TMU0 25
+#define R8A73A4_CLK_TMU3 21
+
/* MSTP2 */
#define R8A73A4_CLK_DMAC 18
#define R8A73A4_CLK_SCIFB3 17
diff --git a/include/keys/trusted_dcp.h b/include/keys/trusted_dcp.h
new file mode 100644
index 000000000000..9aaa42075b40
--- /dev/null
+++ b/include/keys/trusted_dcp.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 sigma star gmbh
+ */
+
+#ifndef TRUSTED_DCP_H
+#define TRUSTED_DCP_H
+
+extern struct trusted_key_ops dcp_trusted_key_ops;
+
+#endif
diff --git a/include/keys/trusted_tpm.h b/include/keys/trusted_tpm.h
index 7769b726863a..a088b33fd0e3 100644
--- a/include/keys/trusted_tpm.h
+++ b/include/keys/trusted_tpm.h
@@ -6,8 +6,6 @@
#include <linux/tpm_command.h>
/* implementation specific TPM constants */
-#define MAX_BUF_SIZE 1024
-#define TPM_GETRANDOM_SIZE 14
#define TPM_SIZE_OFFSET 2
#define TPM_RETURN_OFFSET 6
#define TPM_DATA_OFFSET 10
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index eb4c369a79eb..35d4ca4f6122 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -86,7 +86,7 @@ void kvm_vcpu_pmu_resync_el0(void);
*/
#define kvm_pmu_update_vcpu_events(vcpu) \
do { \
- if (!has_vhe() && kvm_vcpu_has_pmu(vcpu)) \
+ if (!has_vhe() && kvm_arm_support_pmu_v3()) \
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
} while (0)
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h
index 93a5f16d03f3..edef565c2a1a 100644
--- a/include/linux/anon_inodes.h
+++ b/include/linux/anon_inodes.h
@@ -9,12 +9,17 @@
#ifndef _LINUX_ANON_INODES_H
#define _LINUX_ANON_INODES_H
+#include <linux/types.h>
+
struct file_operations;
struct inode;
struct file *anon_inode_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags);
+struct file *anon_inode_getfile_fmode(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags, fmode_t f_mode);
struct file *anon_inode_create_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags,
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index c906f666ff5d..c82d56768101 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -126,6 +126,7 @@
/* FFA Bus/Device/Driver related */
struct ffa_device {
u32 id;
+ u32 properties;
int vm_id;
bool mode_32bit;
uuid_t uuid;
@@ -221,12 +222,29 @@ struct ffa_partition_info {
#define FFA_PARTITION_DIRECT_SEND BIT(1)
/* partition can send and receive indirect messages. */
#define FFA_PARTITION_INDIRECT_MSG BIT(2)
+/* partition can receive notifications */
+#define FFA_PARTITION_NOTIFICATION_RECV BIT(3)
/* partition runs in the AArch64 execution state. */
#define FFA_PARTITION_AARCH64_EXEC BIT(8)
u32 properties;
u32 uuid[4];
};
+static inline
+bool ffa_partition_check_property(struct ffa_device *dev, u32 property)
+{
+ return dev->properties & property;
+}
+
+#define ffa_partition_supports_notify_recv(dev) \
+ ffa_partition_check_property(dev, FFA_PARTITION_NOTIFICATION_RECV)
+
+#define ffa_partition_supports_indirect_msg(dev) \
+ ffa_partition_check_property(dev, FFA_PARTITION_INDIRECT_MSG)
+
+#define ffa_partition_supports_direct_recv(dev) \
+ ffa_partition_check_property(dev, FFA_PARTITION_DIRECT_RECV)
+
/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */
struct ffa_send_direct_data {
unsigned long data0; /* w3/x3 */
@@ -236,6 +254,14 @@ struct ffa_send_direct_data {
unsigned long data4; /* w7/x7 */
};
+struct ffa_indirect_msg_hdr {
+ u32 flags;
+ u32 res0;
+ u32 offset;
+ u32 send_recv_id;
+ u32 size;
+};
+
struct ffa_mem_region_addr_range {
/* The base IPA of the constituent memory region, aligned to 4 kiB */
u64 address;
@@ -396,6 +422,7 @@ struct ffa_msg_ops {
void (*mode_32bit_set)(struct ffa_device *dev);
int (*sync_send_receive)(struct ffa_device *dev,
struct ffa_send_direct_data *data);
+ int (*indirect_send)(struct ffa_device *dev, void *buf, size_t sz);
};
struct ffa_mem_ops {
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 875d792bffff..d5379548d684 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -615,6 +615,13 @@ static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
bl->tail = bl2->tail;
}
+static inline void bio_list_merge_init(struct bio_list *bl,
+ struct bio_list *bl2)
+{
+ bio_list_merge(bl, bl2);
+ bio_list_init(bl2);
+}
+
static inline void bio_list_merge_head(struct bio_list *bl,
struct bio_list *bl2)
{
@@ -824,5 +831,9 @@ static inline void bio_clear_polled(struct bio *bio)
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
+struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new);
+
+struct bio *blk_alloc_discard_bio(struct block_device *bdev,
+ sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask);
#endif /* __LINUX_BIO_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index d3d8fd8e229b..89ba6b16fe8b 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -54,8 +54,8 @@ typedef __u32 __bitwise req_flags_t;
/* Look at ->special_vec for the actual data payload instead of the
bio chain. */
#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
-/* The per-zone write lock is held for this request */
-#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
+/* The request completion needs to be signaled to zone write pluging. */
+#define RQF_ZONE_WRITE_PLUGGING ((__force req_flags_t)(1 << 20))
/* ->timeout has been called, don't expire again */
#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
#define RQF_RESV ((__force req_flags_t)(1 << 23))
@@ -1150,85 +1150,4 @@ static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
}
void blk_dump_rq_flags(struct request *, char *);
-#ifdef CONFIG_BLK_DEV_ZONED
-static inline unsigned int blk_rq_zone_no(struct request *rq)
-{
- return disk_zone_no(rq->q->disk, blk_rq_pos(rq));
-}
-
-static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
-{
- return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
-}
-
-/**
- * blk_rq_is_seq_zoned_write() - Check if @rq requires write serialization.
- * @rq: Request to examine.
- *
- * Note: REQ_OP_ZONE_APPEND requests do not require serialization.
- */
-static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
-{
- return op_needs_zoned_write_locking(req_op(rq)) &&
- blk_rq_zone_is_seq(rq);
-}
-
-bool blk_req_needs_zone_write_lock(struct request *rq);
-bool blk_req_zone_write_trylock(struct request *rq);
-void __blk_req_zone_write_lock(struct request *rq);
-void __blk_req_zone_write_unlock(struct request *rq);
-
-static inline void blk_req_zone_write_lock(struct request *rq)
-{
- if (blk_req_needs_zone_write_lock(rq))
- __blk_req_zone_write_lock(rq);
-}
-
-static inline void blk_req_zone_write_unlock(struct request *rq)
-{
- if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
- __blk_req_zone_write_unlock(rq);
-}
-
-static inline bool blk_req_zone_is_write_locked(struct request *rq)
-{
- return rq->q->disk->seq_zones_wlock &&
- test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock);
-}
-
-static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
-{
- if (!blk_req_needs_zone_write_lock(rq))
- return true;
- return !blk_req_zone_is_write_locked(rq);
-}
-#else /* CONFIG_BLK_DEV_ZONED */
-static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
-{
- return false;
-}
-
-static inline bool blk_req_needs_zone_write_lock(struct request *rq)
-{
- return false;
-}
-
-static inline void blk_req_zone_write_lock(struct request *rq)
-{
-}
-
-static inline void blk_req_zone_write_unlock(struct request *rq)
-{
-}
-static inline bool blk_req_zone_is_write_locked(struct request *rq)
-{
- return false;
-}
-
-static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
-{
- return true;
-}
-#endif /* CONFIG_BLK_DEV_ZONED */
-
#endif /* BLK_MQ_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index cb1526ec44b5..25dbf1097085 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -88,15 +88,9 @@ struct block_device {
/*
* Block error status values. See block/blk-core:blk_errors for the details.
- * Alpha cannot write a byte atomically, so we need to use 32-bit value.
*/
-#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
-typedef u32 __bitwise blk_status_t;
-typedef u32 blk_short_t;
-#else
typedef u8 __bitwise blk_status_t;
typedef u16 blk_short_t;
-#endif
#define BLK_STS_OK 0
#define BLK_STS_NOTSUPP ((__force blk_status_t)1)
#define BLK_STS_TIMEOUT ((__force blk_status_t)2)
@@ -137,25 +131,13 @@ typedef u16 blk_short_t;
#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
/*
- * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
- * related resources are unavailable, but the driver can guarantee the queue
- * will be rerun in the future once the resources become available again.
- *
- * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
- * a zone specific resource and IO to a different zone on the same device could
- * still be served. Examples of that are zones that are write-locked, but a read
- * to the same zone could be served.
- */
-#define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14)
-
-/*
* BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
* path if the device returns a status indicating that too many zone resources
* are currently open. The same command should be successful if resubmitted
* after the number of open zones decreases below the device's limits, which is
* reported in the request_queue's max_open_zones.
*/
-#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15)
+#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)14)
/*
* BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
@@ -164,20 +146,20 @@ typedef u16 blk_short_t;
* after the number of active zones decreases below the device's limits, which
* is reported in the request_queue's max_active_zones.
*/
-#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16)
+#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)15)
/*
* BLK_STS_OFFLINE is returned from the driver when the target device is offline
* or is being taken offline. This could help differentiate the case where a
* device is intentionally being shut down from a real I/O error.
*/
-#define BLK_STS_OFFLINE ((__force blk_status_t)17)
+#define BLK_STS_OFFLINE ((__force blk_status_t)16)
/*
* BLK_STS_DURATION_LIMIT is returned from the driver when the target device
* aborted the command because it exceeded one of its Command Duration Limits.
*/
-#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)18)
+#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)17)
/**
* blk_path_error - returns true if error may be path related
@@ -234,7 +216,12 @@ struct bio {
struct bvec_iter bi_iter;
- blk_qc_t bi_cookie;
+ union {
+ /* for polled bios: */
+ blk_qc_t bi_cookie;
+ /* for plugged zoned writes only: */
+ unsigned int __bi_nr_segments;
+ };
bio_end_io_t *bi_end_io;
void *bi_private;
#ifdef CONFIG_BLK_CGROUP
@@ -304,7 +291,8 @@ enum {
BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
BIO_QOS_MERGED, /* but went through rq_qos merge path */
BIO_REMAPPED,
- BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
+ BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */
+ BIO_EMULATES_ZONE_APPEND, /* bio emulates a zone append operation */
BIO_FLAG_LAST
};
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c3e8f7cf96be..69c4f113db42 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -128,6 +128,8 @@ typedef unsigned int __bitwise blk_mode_t;
#define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4))
/* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */
#define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5))
+/* return partition scanning errors */
+#define BLK_OPEN_STRICT_SCAN ((__force blk_mode_t)(1 << 6))
struct gendisk {
/*
@@ -177,22 +179,21 @@ struct gendisk {
#ifdef CONFIG_BLK_DEV_ZONED
/*
- * Zoned block device information for request dispatch control.
- * nr_zones is the total number of zones of the device. This is always
- * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
- * bits which indicates if a zone is conventional (bit set) or
- * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
- * bits which indicates if a zone is write locked, that is, if a write
- * request targeting the zone was dispatched.
- *
- * Reads of this information must be protected with blk_queue_enter() /
- * blk_queue_exit(). Modifying this information is only allowed while
- * no requests are being processed. See also blk_mq_freeze_queue() and
- * blk_mq_unfreeze_queue().
+ * Zoned block device information. Reads of this information must be
+ * protected with blk_queue_enter() / blk_queue_exit(). Modifying this
+ * information is only allowed while no requests are being processed.
+ * See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue().
*/
unsigned int nr_zones;
+ unsigned int zone_capacity;
unsigned long *conv_zones_bitmap;
- unsigned long *seq_zones_wlock;
+ unsigned int zone_wplugs_hash_bits;
+ spinlock_t zone_wplugs_lock;
+ struct mempool_s *zone_wplugs_pool;
+ struct hlist_head *zone_wplugs_hash;
+ struct list_head zone_wplugs_err_list;
+ struct work_struct zone_wplugs_work;
+ struct workqueue_struct *zone_wplugs_wq;
#endif /* CONFIG_BLK_DEV_ZONED */
#if IS_ENABLED(CONFIG_CDROM)
@@ -231,6 +232,19 @@ static inline unsigned int disk_openers(struct gendisk *disk)
return atomic_read(&disk->part0->bd_openers);
}
+/**
+ * disk_has_partscan - return %true if partition scanning is enabled on a disk
+ * @disk: disk to check
+ *
+ * Returns %true if partitions scanning is enabled for @disk, or %false if
+ * partition scanning is disabled either permanently or temporarily.
+ */
+static inline bool disk_has_partscan(struct gendisk *disk)
+{
+ return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) &&
+ !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
+}
+
/*
* The gendisk is refcounted by the part0 block_device, and the bd_device
* therein is also used for device model presentation in sysfs.
@@ -329,8 +343,7 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
sector_t sectors, sector_t nr_sectors);
-int blk_revalidate_disk_zones(struct gendisk *disk,
- void (*update_driver_data)(struct gendisk *disk));
+int blk_revalidate_disk_zones(struct gendisk *disk);
/*
* Independent access ranges: struct blk_independent_access_range describes
@@ -447,8 +460,6 @@ struct request_queue {
atomic_t nr_active_requests_shared_tags;
- unsigned int required_elevator_features;
-
struct blk_mq_tags *sched_shared_tags;
struct list_head icq_list;
@@ -631,15 +642,6 @@ static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
return sector >> ilog2(disk->queue->limits.chunk_sectors);
}
-static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
-{
- if (!blk_queue_is_zoned(disk->queue))
- return false;
- if (!disk->conv_zones_bitmap)
- return true;
- return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
-}
-
static inline void disk_set_max_open_zones(struct gendisk *disk,
unsigned int max_open_zones)
{
@@ -662,6 +664,7 @@ static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
return bdev->bd_disk->queue->limits.max_active_zones;
}
+bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
#else /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int bdev_nr_zones(struct block_device *bdev)
{
@@ -672,10 +675,6 @@ static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
return 0;
}
-static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
-{
- return false;
-}
static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
{
return 0;
@@ -689,6 +688,10 @@ static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
{
return 0;
}
+static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+{
+ return false;
+}
#endif /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int blk_queue_depth(struct request_queue *q)
@@ -853,9 +856,11 @@ static inline unsigned int bio_zone_no(struct bio *bio)
return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
}
-static inline unsigned int bio_zone_is_seq(struct bio *bio)
+static inline bool bio_straddles_zones(struct bio *bio)
{
- return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
+ return bio_sectors(bio) &&
+ bio_zone_no(bio) !=
+ disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1);
}
/*
@@ -940,14 +945,6 @@ disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
void disk_set_independent_access_ranges(struct gendisk *disk,
struct blk_independent_access_ranges *iars);
-/*
- * Elevator features for blk_queue_required_elevator_features:
- */
-/* Supports zoned block devices sequential write constraint */
-#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
-
-extern void blk_queue_required_elevator_features(struct request_queue *q,
- unsigned int features);
extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
struct device *dev);
@@ -1154,12 +1151,29 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
return q->limits.max_segment_size;
}
-static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
+static inline unsigned int queue_limits_max_zone_append_sectors(struct queue_limits *l)
{
+ unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors);
- const struct queue_limits *l = &q->limits;
+ return min_not_zero(l->max_zone_append_sectors, max_sectors);
+}
- return min(l->max_zone_append_sectors, l->max_sectors);
+static inline unsigned int queue_max_zone_append_sectors(struct request_queue *q)
+{
+ if (!blk_queue_is_zoned(q))
+ return 0;
+
+ return queue_limits_max_zone_append_sectors(&q->limits);
+}
+
+static inline bool queue_emulates_zone_append(struct request_queue *q)
+{
+ return blk_queue_is_zoned(q) && !q->limits.max_zone_append_sectors;
+}
+
+static inline bool bdev_emulates_zone_append(struct block_device *bdev)
+{
+ return queue_emulates_zone_append(bdev_get_queue(bdev));
}
static inline unsigned int
@@ -1301,18 +1315,6 @@ static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
return disk_zone_no(bdev->bd_disk, sec);
}
-/* Whether write serialization is required for @op on zoned devices. */
-static inline bool op_needs_zoned_write_locking(enum req_op op)
-{
- return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES;
-}
-
-static inline bool bdev_op_is_zoned_write(struct block_device *bdev,
- enum req_op op)
-{
- return bdev_is_zoned(bdev) && op_needs_zoned_write_locking(op);
-}
-
static inline sector_t bdev_zone_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
@@ -1328,6 +1330,12 @@ static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev,
return sector & (bdev_zone_sectors(bdev) - 1);
}
+static inline sector_t bio_offset_from_zone_start(struct bio *bio)
+{
+ return bdev_offset_from_zone_start(bio->bi_bdev,
+ bio->bi_iter.bi_sector);
+}
+
static inline bool bdev_is_zone_start(struct block_device *bdev,
sector_t sector)
{
@@ -1505,16 +1513,6 @@ struct blk_holder_ops {
* Thaw the file system mounted on the block device.
*/
int (*thaw)(struct block_device *bdev);
-
- /*
- * If needed, get a reference to the holder.
- */
- void (*get_holder)(void *holder);
-
- /*
- * Release the holder.
- */
- void (*put_holder)(void *holder);
};
/*
@@ -1585,6 +1583,7 @@ static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
int bdev_freeze(struct block_device *bdev);
int bdev_thaw(struct block_device *bdev);
+void bdev_fput(struct file *bdev_file);
struct io_comp_batch {
struct request *req_list;
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index ca73940e26df..3f4b4ac527ca 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -10,6 +10,7 @@
#ifdef __KERNEL__
#include <linux/kernel.h>
#include <linux/types.h>
+bool __init cmdline_has_extra_options(void);
#else /* !__KERNEL__ */
/*
* NOTE: This is only for tools/bootconfig, because tools/bootconfig will
@@ -287,7 +288,12 @@ int __init xbc_init(const char *buf, size_t size, const char **emsg, int *epos);
int __init xbc_get_info(int *node_size, size_t *data_size);
/* XBC cleanup data structures */
-void __init xbc_exit(void);
+void __init _xbc_exit(bool early);
+
+static inline void xbc_exit(void)
+{
+ _xbc_exit(false);
+}
/* XBC embedded bootconfig data in kernel */
#ifdef CONFIG_BOOT_CONFIG_EMBED
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 4f20f62f9d63..890e152d553e 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1574,12 +1574,26 @@ struct bpf_link {
enum bpf_link_type type;
const struct bpf_link_ops *ops;
struct bpf_prog *prog;
- struct work_struct work;
+ /* rcu is used before freeing, work can be used to schedule that
+ * RCU-based freeing before that, so they never overlap
+ */
+ union {
+ struct rcu_head rcu;
+ struct work_struct work;
+ };
};
struct bpf_link_ops {
void (*release)(struct bpf_link *link);
+ /* deallocate link resources callback, called without RCU grace period
+ * waiting
+ */
void (*dealloc)(struct bpf_link *link);
+ /* deallocate link resources callback, called after RCU grace period;
+ * if underlying BPF program is sleepable we go through tasks trace
+ * RCU GP and then "classic" RCU GP
+ */
+ void (*dealloc_deferred)(struct bpf_link *link);
int (*detach)(struct bpf_link *link);
int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
struct bpf_prog *old_prog);
diff --git a/include/linux/bus/stm32_firewall_device.h b/include/linux/bus/stm32_firewall_device.h
new file mode 100644
index 000000000000..18e0a2fc3816
--- /dev/null
+++ b/include/linux/bus/stm32_firewall_device.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef STM32_FIREWALL_DEVICE_H
+#define STM32_FIREWALL_DEVICE_H
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define STM32_FIREWALL_MAX_EXTRA_ARGS 5
+
+/* Opaque reference to stm32_firewall_controller */
+struct stm32_firewall_controller;
+
+/**
+ * struct stm32_firewall - Information on a device's firewall. Each device can have more than one
+ * firewall.
+ *
+ * @firewall_ctrl: Pointer referencing a firewall controller of the device. It is
+ * opaque so a device cannot manipulate the controller's ops or access
+ * the controller's data
+ * @extra_args: Extra arguments that are implementation dependent
+ * @entry: Name of the firewall entry
+ * @extra_args_size: Number of extra arguments
+ * @firewall_id: Firewall ID associated the device for this firewall controller
+ */
+struct stm32_firewall {
+ struct stm32_firewall_controller *firewall_ctrl;
+ u32 extra_args[STM32_FIREWALL_MAX_EXTRA_ARGS];
+ const char *entry;
+ size_t extra_args_size;
+ u32 firewall_id;
+};
+
+#if IS_ENABLED(CONFIG_STM32_FIREWALL)
+/**
+ * stm32_firewall_get_firewall - Get the firewall(s) associated to given device.
+ * The firewall controller reference is always the first argument
+ * of each of the access-controller property entries.
+ * The firewall ID is always the second argument of each of the
+ * access-controller property entries.
+ * If there's no argument linked to the phandle, then the firewall ID
+ * field is set to U32_MAX, which is an invalid ID.
+ *
+ * @np: Device node to parse
+ * @firewall: Array of firewall references
+ * @nb_firewall: Number of firewall references to get. Must be at least 1.
+ *
+ * Returns 0 on success, -ENODEV if there's no match with a firewall controller or appropriate errno
+ * code if error occurred.
+ */
+int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall,
+ unsigned int nb_firewall);
+
+/**
+ * stm32_firewall_grant_access - Request firewall access rights and grant access.
+ *
+ * @firewall: Firewall reference containing the ID to check against its firewall
+ * controller
+ *
+ * Returns 0 if access is granted, -EACCES if access is denied, -ENODEV if firewall is null or
+ * appropriate errno code if error occurred
+ */
+int stm32_firewall_grant_access(struct stm32_firewall *firewall);
+
+/**
+ * stm32_firewall_release_access - Release access granted from a call to
+ * stm32_firewall_grant_access().
+ *
+ * @firewall: Firewall reference containing the ID to check against its firewall
+ * controller
+ */
+void stm32_firewall_release_access(struct stm32_firewall *firewall);
+
+/**
+ * stm32_firewall_grant_access_by_id - Request firewall access rights of a given device
+ * based on a specific firewall ID
+ *
+ * Warnings:
+ * There is no way to ensure that the given ID will correspond to the firewall referenced in the
+ * device node if the ID did not come from stm32_firewall_get_firewall(). In that case, this
+ * function must be used with caution.
+ * This function should be used for subsystem resources that do not have the same firewall ID
+ * as their parent.
+ * U32_MAX is an invalid ID.
+ *
+ * @firewall: Firewall reference containing the firewall controller
+ * @subsystem_id: Firewall ID of the subsystem resource
+ *
+ * Returns 0 if access is granted, -EACCES if access is denied, -ENODEV if firewall is null or
+ * appropriate errno code if error occurred
+ */
+int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id);
+
+/**
+ * stm32_firewall_release_access_by_id - Release access granted from a call to
+ * stm32_firewall_grant_access_by_id().
+ *
+ * Warnings:
+ * There is no way to ensure that the given ID will correspond to the firewall referenced in the
+ * device node if the ID did not come from stm32_firewall_get_firewall(). In that case, this
+ * function must be used with caution.
+ * This function should be used for subsystem resources that do not have the same firewall ID
+ * as their parent.
+ * U32_MAX is an invalid ID.
+ *
+ * @firewall: Firewall reference containing the firewall controller
+ * @subsystem_id: Firewall ID of the subsystem resource
+ */
+void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id);
+
+#else /* CONFIG_STM32_FIREWALL */
+
+int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall,
+ unsigned int nb_firewall);
+{
+ return -ENODEV;
+}
+
+int stm32_firewall_grant_access(struct stm32_firewall *firewall)
+{
+ return -ENODEV;
+}
+
+void stm32_firewall_release_access(struct stm32_firewall *firewall)
+{
+}
+
+int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id)
+{
+ return -ENODEV;
+}
+
+void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id)
+{
+}
+
+#endif /* CONFIG_STM32_FIREWALL */
+#endif /* STM32_FIREWALL_DEVICE_H */
diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h
index cb0d6cd1c12f..60693a145894 100644
--- a/include/linux/cc_platform.h
+++ b/include/linux/cc_platform.h
@@ -90,6 +90,14 @@ enum cc_attr {
* Examples include TDX Guest.
*/
CC_ATTR_HOTPLUG_DISABLED,
+
+ /**
+ * @CC_ATTR_HOST_SEV_SNP: AMD SNP enabled on the host.
+ *
+ * The host kernel is running with the necessary features
+ * enabled to run SEV-SNP guests.
+ */
+ CC_ATTR_HOST_SEV_SNP,
};
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
@@ -107,10 +115,14 @@ enum cc_attr {
* * FALSE - Specified Confidential Computing attribute is not active
*/
bool cc_platform_has(enum cc_attr attr);
+void cc_platform_set(enum cc_attr attr);
+void cc_platform_clear(enum cc_attr attr);
#else /* !CONFIG_ARCH_HAS_CC_PLATFORM */
static inline bool cc_platform_has(enum cc_attr attr) { return false; }
+static inline void cc_platform_set(enum cc_attr attr) { }
+static inline void cc_platform_clear(enum cc_attr attr) { }
#endif /* CONFIG_ARCH_HAS_CC_PLATFORM */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 00623f4de5e1..0fa56d672532 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -286,6 +286,11 @@ static inline int clk_rate_exclusive_get(struct clk *clk)
return 0;
}
+static inline int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk)
+{
+ return 0;
+}
+
static inline void clk_rate_exclusive_put(struct clk *clk) {}
#endif
diff --git a/include/linux/cmpxchg-emu.h b/include/linux/cmpxchg-emu.h
new file mode 100644
index 000000000000..998deec67740
--- /dev/null
+++ b/include/linux/cmpxchg-emu.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Emulated 1-byte and 2-byte cmpxchg operations for architectures
+ * lacking direct support for these sizes. These are implemented in terms
+ * of 4-byte cmpxchg operations.
+ *
+ * Copyright (C) 2024 Paul E. McKenney.
+ */
+
+#ifndef __LINUX_CMPXCHG_EMU_H
+#define __LINUX_CMPXCHG_EMU_H
+
+uintptr_t cmpxchg_emu_u8(volatile u8 *p, uintptr_t old, uintptr_t new);
+
+#endif /* __LINUX_CMPXCHG_EMU_H */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index c00cc6c0878a..8c252e073bd8 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -268,7 +268,7 @@ static inline void *offset_to_ptr(const int *off)
* - When one operand is a null pointer constant (i.e. when x is an integer
* constant expression) and the other is an object pointer (i.e. our
* third operand), the conditional operator returns the type of the
- * object pointer operand (i.e. "int *). Here, within the sizeof(), we
+ * object pointer operand (i.e. "int *"). Here, within the sizeof(), we
* would then get:
* sizeof(*((int *)(...)) == sizeof(int) == 4
* - When one operand is a void pointer (i.e. when x is not an integer
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 2abaa3a825a9..774c2570fe51 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -273,9 +273,27 @@ struct ftrace_likely_data {
* disable all instrumentation. See Kconfig.kcsan where this is mandatory.
*/
# define __no_kcsan __no_sanitize_thread __disable_sanitizer_instrumentation
+/*
+ * Type qualifier to mark variables where all data-racy accesses should be
+ * ignored by KCSAN. Note, the implementation simply marks these variables as
+ * volatile, since KCSAN will treat such accesses as "marked".
+ */
+# define __data_racy volatile
# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused
#else
# define __no_kcsan
+# define __data_racy
+#endif
+
+#ifdef __SANITIZE_MEMORY__
+/*
+ * Similarly to KASAN and KCSAN, KMSAN loses function attributes of inlined
+ * functions, therefore disabling KMSAN checks also requires disabling inlining.
+ *
+ * __no_sanitize_or_inline effectively prevents KMSAN from reporting errors
+ * within the function and marks all its outputs as initialized.
+ */
+# define __no_sanitize_or_inline __no_kmsan_checks notrace __maybe_unused
#endif
#ifndef __no_sanitize_or_inline
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index d3eba4360150..0904ba010341 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -30,6 +30,8 @@ struct coredump_params {
struct core_vma_metadata *vma_meta;
};
+extern unsigned int core_file_note_size_limit;
+
/*
* These are the only things you should do on a core-file: use only these
* functions to write out all the necessary info.
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 272e4e79e15c..861c3bfc5f17 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -221,7 +221,18 @@ void cpuhp_report_idle_dead(void);
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+#ifdef CONFIG_CPU_MITIGATIONS
extern bool cpu_mitigations_off(void);
extern bool cpu_mitigations_auto_nosmt(void);
+#else
+static inline bool cpu_mitigations_off(void)
+{
+ return true;
+}
+static inline bool cpu_mitigations_auto_nosmt(void)
+{
+ return false;
+}
+#endif
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/device.h b/include/linux/device.h
index 97c4b046c09d..b9f5464f44ed 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1247,6 +1247,7 @@ void device_link_del(struct device_link *link);
void device_link_remove(void *consumer, struct device *supplier);
void device_links_supplier_sync_state_pause(void);
void device_links_supplier_sync_state_resume(void);
+void device_link_wait_removal(void);
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
diff --git a/include/linux/elf.h b/include/linux/elf.h
index c9a46c4e183b..5c402788da19 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -65,7 +65,7 @@ extern Elf64_Dyn _DYNAMIC [];
struct file;
struct coredump_params;
-#ifndef ARCH_HAVE_EXTRA_ELF_NOTES
+#ifndef CONFIG_ARCH_HAVE_EXTRA_ELF_NOTES
static inline int elf_coredump_extra_notes_size(void) { return 0; }
static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm) { return 0; }
#else
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index 770755df852f..70cd7258cd29 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -245,7 +245,6 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
* max utilization to the allowed CPU capacity before calculating
* effective performance.
*/
- max_util = map_util_perf(max_util);
max_util = min(max_util, allowed_cpu_cap);
/*
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 224645f17c33..297231854ada 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -608,6 +608,31 @@ static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
}
/**
+ * eth_skb_pkt_type - Assign packet type if destination address does not match
+ * @skb: Assigned a packet type if address does not match @dev address
+ * @dev: Network device used to compare packet address against
+ *
+ * If the destination MAC address of the packet does not match the network
+ * device address, assign an appropriate packet type.
+ */
+static inline void eth_skb_pkt_type(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ const struct ethhdr *eth = eth_hdr(skb);
+
+ if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) {
+ if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
+ if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+ } else {
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+ }
+}
+
+/**
* eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
* @skb: Buffer to pad
*
diff --git a/include/linux/file.h b/include/linux/file.h
index 169692cb1906..45d0f4800abd 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -84,6 +84,7 @@ static inline void fdput_pos(struct fd f)
}
DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd)
+DEFINE_CLASS(fd_raw, struct fd, fdput(_T), fdget_raw(fd), int fd)
extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index c99bc3df2d28..219ee7a76874 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -963,6 +963,7 @@ bool bpf_jit_supports_far_kfunc_call(void);
bool bpf_jit_supports_exceptions(void);
bool bpf_jit_supports_ptr_xchg(void);
bool bpf_jit_supports_arena(void);
+u64 bpf_arch_uaddress_limit(void);
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
bool bpf_helper_changes_pkt_data(void *func);
diff --git a/include/linux/firmware/qcom/qcom_qseecom.h b/include/linux/firmware/qcom/qcom_qseecom.h
index 5c28298a98be..366243ee9609 100644
--- a/include/linux/firmware/qcom/qcom_qseecom.h
+++ b/include/linux/firmware/qcom/qcom_qseecom.h
@@ -10,6 +10,7 @@
#define __QCOM_QSEECOM_H
#include <linux/auxiliary_bus.h>
+#include <linux/dma-mapping.h>
#include <linux/types.h>
#include <linux/firmware/qcom/qcom_scm.h>
@@ -25,11 +26,56 @@ struct qseecom_client {
};
/**
+ * qseecom_scm_dev() - Get the SCM device associated with the QSEECOM client.
+ * @client: The QSEECOM client device.
+ *
+ * Returns the SCM device under which the provided QSEECOM client device
+ * operates. This function is intended to be used for DMA allocations.
+ */
+static inline struct device *qseecom_scm_dev(struct qseecom_client *client)
+{
+ return client->aux_dev.dev.parent->parent;
+}
+
+/**
+ * qseecom_dma_alloc() - Allocate DMA memory for a QSEECOM client.
+ * @client: The QSEECOM client to allocate the memory for.
+ * @size: The number of bytes to allocate.
+ * @dma_handle: Pointer to where the DMA address should be stored.
+ * @gfp: Allocation flags.
+ *
+ * Wrapper function for dma_alloc_coherent(), allocating DMA memory usable for
+ * TZ/QSEECOM communication. Refer to dma_alloc_coherent() for details.
+ */
+static inline void *qseecom_dma_alloc(struct qseecom_client *client, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ return dma_alloc_coherent(qseecom_scm_dev(client), size, dma_handle, gfp);
+}
+
+/**
+ * dma_free_coherent() - Free QSEECOM DMA memory.
+ * @client: The QSEECOM client for which the memory has been allocated.
+ * @size: The number of bytes allocated.
+ * @cpu_addr: Virtual memory address to free.
+ * @dma_handle: DMA memory address to free.
+ *
+ * Wrapper function for dma_free_coherent(), freeing memory previously
+ * allocated with qseecom_dma_alloc(). Refer to dma_free_coherent() for
+ * details.
+ */
+static inline void qseecom_dma_free(struct qseecom_client *client, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ return dma_free_coherent(qseecom_scm_dev(client), size, cpu_addr, dma_handle);
+}
+
+/**
* qcom_qseecom_app_send() - Send to and receive data from a given QSEE app.
* @client: The QSEECOM client associated with the target app.
- * @req: Request buffer sent to the app (must be DMA-mappable).
+ * @req: DMA address of the request buffer sent to the app.
* @req_size: Size of the request buffer.
- * @rsp: Response buffer, written to by the app (must be DMA-mappable).
+ * @rsp: DMA address of the response buffer, written to by the app.
* @rsp_size: Size of the response buffer.
*
* Sends a request to the QSEE app associated with the given client and read
@@ -43,8 +89,9 @@ struct qseecom_client {
*
* Return: Zero on success, nonzero on failure.
*/
-static inline int qcom_qseecom_app_send(struct qseecom_client *client, void *req, size_t req_size,
- void *rsp, size_t rsp_size)
+static inline int qcom_qseecom_app_send(struct qseecom_client *client,
+ dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size)
{
return qcom_scm_qseecom_app_send(client->app_id, req, req_size, rsp, rsp_size);
}
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index ccaf28846054..aaa19f93ac43 100644
--- a/include/linux/firmware/qcom/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -118,8 +118,8 @@ bool qcom_scm_lmh_dcvsh_available(void);
#ifdef CONFIG_QCOM_QSEECOM
int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id);
-int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
- size_t rsp_size);
+int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size);
#else /* CONFIG_QCOM_QSEECOM */
@@ -128,9 +128,9 @@ static inline int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
return -EINVAL;
}
-static inline int qcom_scm_qseecom_app_send(u32 app_id, void *req,
- size_t req_size, void *rsp,
- size_t rsp_size)
+static inline int qcom_scm_qseecom_app_send(u32 app_id,
+ dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size)
{
return -EINVAL;
}
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index 6aeebe0a6777..85fc0e6f0f7f 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -15,10 +15,14 @@
#define FORTIFY_REASON(func, write) (FIELD_PREP(BIT(0), write) | \
FIELD_PREP(GENMASK(7, 1), func))
+/* Overridden by KUnit tests. */
#ifndef fortify_panic
# define fortify_panic(func, write, avail, size, retfail) \
__fortify_panic(FORTIFY_REASON(func, write), avail, size)
#endif
+#ifndef fortify_warn_once
+# define fortify_warn_once(x...) WARN_ONCE(x)
+#endif
#define FORTIFY_READ 0
#define FORTIFY_WRITE 1
@@ -609,7 +613,7 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
const size_t __q_size = (q_size); \
const size_t __p_size_field = (p_size_field); \
const size_t __q_size_field = (q_size_field); \
- WARN_ONCE(fortify_memcpy_chk(__fortify_size, __p_size, \
+ fortify_warn_once(fortify_memcpy_chk(__fortify_size, __p_size, \
__q_size, __p_size_field, \
__q_size_field, FORTIFY_FUNC_ ##op), \
#op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \
@@ -734,7 +738,8 @@ __FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp
if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
- fortify_panic(FORTIFY_FUNC_kmemdup, FORTIFY_READ, p_size, size, NULL);
+ fortify_panic(FORTIFY_FUNC_kmemdup, FORTIFY_READ, p_size, size,
+ __real_kmemdup(p, 0, gfp));
return __real_kmemdup(p, size, gfp);
}
diff --git a/include/linux/framer/framer.h b/include/linux/framer/framer.h
index 9a9b88962c29..2b85fe9e7f9a 100644
--- a/include/linux/framer/framer.h
+++ b/include/linux/framer/framer.h
@@ -181,12 +181,12 @@ static inline int framer_notifier_unregister(struct framer *framer,
return -ENOSYS;
}
-struct framer *framer_get(struct device *dev, const char *con_id)
+static inline struct framer *framer_get(struct device *dev, const char *con_id)
{
return ERR_PTR(-ENOSYS);
}
-void framer_put(struct device *dev, struct framer *framer)
+static inline void framer_put(struct device *dev, struct framer *framer)
{
}
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 00fc429b0af0..de946a1fd845 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -110,21 +110,26 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
*/
/* file is open for reading */
-#define FMODE_READ ((__force fmode_t)0x1)
+#define FMODE_READ ((__force fmode_t)(1 << 0))
/* file is open for writing */
-#define FMODE_WRITE ((__force fmode_t)0x2)
+#define FMODE_WRITE ((__force fmode_t)(1 << 1))
/* file is seekable */
-#define FMODE_LSEEK ((__force fmode_t)0x4)
+#define FMODE_LSEEK ((__force fmode_t)(1 << 2))
/* file can be accessed using pread */
-#define FMODE_PREAD ((__force fmode_t)0x8)
+#define FMODE_PREAD ((__force fmode_t)(1 << 3))
/* file can be accessed using pwrite */
-#define FMODE_PWRITE ((__force fmode_t)0x10)
+#define FMODE_PWRITE ((__force fmode_t)(1 << 4))
/* File is opened for execution with sys_execve / sys_uselib */
-#define FMODE_EXEC ((__force fmode_t)0x20)
+#define FMODE_EXEC ((__force fmode_t)(1 << 5))
+/* File writes are restricted (block device specific) */
+#define FMODE_WRITE_RESTRICTED ((__force fmode_t)(1 << 6))
+
+/* FMODE_* bits 7 to 8 */
+
/* 32bit hashes as llseek() offset (for directories) */
-#define FMODE_32BITHASH ((__force fmode_t)0x200)
+#define FMODE_32BITHASH ((__force fmode_t)(1 << 9))
/* 64bit hashes as llseek() offset (for directories) */
-#define FMODE_64BITHASH ((__force fmode_t)0x400)
+#define FMODE_64BITHASH ((__force fmode_t)(1 << 10))
/*
* Don't update ctime and mtime.
@@ -132,60 +137,53 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
* Currently a special hack for the XFS open_by_handle ioctl, but we'll
* hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon.
*/
-#define FMODE_NOCMTIME ((__force fmode_t)0x800)
+#define FMODE_NOCMTIME ((__force fmode_t)(1 << 11))
/* Expect random access pattern */
-#define FMODE_RANDOM ((__force fmode_t)0x1000)
+#define FMODE_RANDOM ((__force fmode_t)(1 << 12))
/* File is huge (eg. /dev/mem): treat loff_t as unsigned */
-#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
+#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)(1 << 13))
/* File is opened with O_PATH; almost nothing can be done with it */
-#define FMODE_PATH ((__force fmode_t)0x4000)
+#define FMODE_PATH ((__force fmode_t)(1 << 14))
/* File needs atomic accesses to f_pos */
-#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000)
+#define FMODE_ATOMIC_POS ((__force fmode_t)(1 << 15))
/* Write access to underlying fs */
-#define FMODE_WRITER ((__force fmode_t)0x10000)
+#define FMODE_WRITER ((__force fmode_t)(1 << 16))
/* Has read method(s) */
-#define FMODE_CAN_READ ((__force fmode_t)0x20000)
+#define FMODE_CAN_READ ((__force fmode_t)(1 << 17))
/* Has write method(s) */
-#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
+#define FMODE_CAN_WRITE ((__force fmode_t)(1 << 18))
-#define FMODE_OPENED ((__force fmode_t)0x80000)
-#define FMODE_CREATED ((__force fmode_t)0x100000)
+#define FMODE_OPENED ((__force fmode_t)(1 << 19))
+#define FMODE_CREATED ((__force fmode_t)(1 << 20))
/* File is stream-like */
-#define FMODE_STREAM ((__force fmode_t)0x200000)
+#define FMODE_STREAM ((__force fmode_t)(1 << 21))
/* File supports DIRECT IO */
-#define FMODE_CAN_ODIRECT ((__force fmode_t)0x400000)
+#define FMODE_CAN_ODIRECT ((__force fmode_t)(1 << 22))
-#define FMODE_NOREUSE ((__force fmode_t)0x800000)
+#define FMODE_NOREUSE ((__force fmode_t)(1 << 23))
-/* File supports non-exclusive O_DIRECT writes from multiple threads */
-#define FMODE_DIO_PARALLEL_WRITE ((__force fmode_t)0x1000000)
+/* FMODE_* bit 24 */
/* File is embedded in backing_file object */
-#define FMODE_BACKING ((__force fmode_t)0x2000000)
+#define FMODE_BACKING ((__force fmode_t)(1 << 25))
/* File was opened by fanotify and shouldn't generate fanotify events */
-#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
+#define FMODE_NONOTIFY ((__force fmode_t)(1 << 26))
/* File is capable of returning -EAGAIN if I/O will block */
-#define FMODE_NOWAIT ((__force fmode_t)0x8000000)
+#define FMODE_NOWAIT ((__force fmode_t)(1 << 27))
/* File represents mount that needs unmounting */
-#define FMODE_NEED_UNMOUNT ((__force fmode_t)0x10000000)
+#define FMODE_NEED_UNMOUNT ((__force fmode_t)(1 << 28))
/* File does not contribute to nr_files count */
-#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000)
-
-/* File supports async buffered reads */
-#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000)
-
-/* File supports async nowait buffered writes */
-#define FMODE_BUF_WASYNC ((__force fmode_t)0x80000000)
+#define FMODE_NOACCOUNT ((__force fmode_t)(1 << 29))
/*
* Attribute flags. These should be or-ed together to figure out what
@@ -1033,12 +1031,13 @@ struct file_handle {
__u32 handle_bytes;
int handle_type;
/* file identifier */
- unsigned char f_handle[];
+ unsigned char f_handle[] __counted_by(handle_bytes);
};
static inline struct file *get_file(struct file *f)
{
- atomic_long_inc(&f->f_count);
+ long prior = atomic_long_fetch_inc_relaxed(&f->f_count);
+ WARN_ONCE(!prior, "struct file::f_count incremented from zero; use-after-free condition present!\n");
return f;
}
@@ -2001,8 +2000,11 @@ struct iov_iter;
struct io_uring_cmd;
struct offset_ctx;
+typedef unsigned int __bitwise fop_flags_t;
+
struct file_operations {
struct module *owner;
+ fop_flags_t fop_flags;
loff_t (*llseek) (struct file *, loff_t, int);
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
@@ -2015,7 +2017,6 @@ struct file_operations {
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
- unsigned long mmap_supported_flags;
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
@@ -2046,6 +2047,17 @@ struct file_operations {
unsigned int poll_flags);
} __randomize_layout;
+/* Supports async buffered reads */
+#define FOP_BUFFER_RASYNC ((__force fop_flags_t)(1 << 0))
+/* Supports async buffered writes */
+#define FOP_BUFFER_WASYNC ((__force fop_flags_t)(1 << 1))
+/* Supports synchronous page faults for mappings */
+#define FOP_MMAP_SYNC ((__force fop_flags_t)(1 << 2))
+/* Supports non-exclusive O_DIRECT writes from multiple threads */
+#define FOP_DIO_PARALLEL_WRITE ((__force fop_flags_t)(1 << 3))
+/* Contains huge pages */
+#define FOP_HUGE_PAGES ((__force fop_flags_t)(1 << 4))
+
/* Wrap a directory iterator that needs exclusive inode access */
int wrap_directory_iterator(struct file *, struct dir_context *,
int (*) (struct file *, struct dir_context *));
@@ -2251,7 +2263,13 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
+
+#ifdef CONFIG_SWAP
#define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE)
+#else
+#define IS_SWAPFILE(inode) ((void)(inode), 0U)
+#endif
+
#define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE)
#define IS_IMA(inode) ((inode)->i_flags & S_IMA)
#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
@@ -3338,6 +3356,8 @@ void simple_offset_init(struct offset_ctx *octx);
int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry);
void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry);
int simple_offset_empty(struct dentry *dentry);
+int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
int simple_offset_rename_exchange(struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
index 01542c4b87a2..d3350979115f 100644
--- a/include/linux/fs_parser.h
+++ b/include/linux/fs_parser.h
@@ -132,4 +132,8 @@ static inline bool fs_validate_description(const char *name,
#define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0, NULL)
#define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0, NULL)
+/* String parameter that allows empty argument */
+#define fsparam_string_empty(NAME, OPT) \
+ __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL)
+
#endif /* _LINUX_FS_PARSER_H */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 6e8562cbcc43..9de27643607f 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -172,9 +172,12 @@ extern void __fscache_invalidate(struct fscache_cookie *, const void *, loff_t,
extern int __fscache_begin_read_operation(struct netfs_cache_resources *, struct fscache_cookie *);
extern int __fscache_begin_write_operation(struct netfs_cache_resources *, struct fscache_cookie *);
-extern void __fscache_write_to_cache(struct fscache_cookie *, struct address_space *,
- loff_t, size_t, loff_t, netfs_io_terminated_t, void *,
- bool);
+void __fscache_write_to_cache(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ loff_t start, size_t len, loff_t i_size,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv,
+ bool using_pgpriv2, bool cond);
extern void __fscache_clear_page_bits(struct address_space *, loff_t, size_t);
/**
@@ -597,7 +600,8 @@ static inline void fscache_clear_page_bits(struct address_space *mapping,
* @i_size: The new size of the inode
* @term_func: The function to call upon completion
* @term_func_priv: The private data for @term_func
- * @caching: If PG_fscache has been set
+ * @using_pgpriv2: If we're using PG_private_2 to mark in-progress write
+ * @caching: If we actually want to do the caching
*
* Helper function for a netfs to write dirty data from an inode into the cache
* object that's backing it.
@@ -608,19 +612,21 @@ static inline void fscache_clear_page_bits(struct address_space *mapping,
* marked with PG_fscache.
*
* If given, @term_func will be called upon completion and supplied with
- * @term_func_priv. Note that the PG_fscache flags will have been cleared by
- * this point, so the netfs must retain its own pin on the mapping.
+ * @term_func_priv. Note that if @using_pgpriv2 is set, the PG_private_2 flags
+ * will have been cleared by this point, so the netfs must retain its own pin
+ * on the mapping.
*/
static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
struct address_space *mapping,
loff_t start, size_t len, loff_t i_size,
netfs_io_terminated_t term_func,
void *term_func_priv,
- bool caching)
+ bool using_pgpriv2, bool caching)
{
if (caching)
__fscache_write_to_cache(cookie, mapping, start, len, i_size,
- term_func, term_func_priv, caching);
+ term_func, term_func_priv,
+ using_pgpriv2, caching);
else if (term_func)
term_func(term_func_priv, -ENOBUFS, false);
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 868c8fb1bbc1..13becafe41df 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -2,6 +2,8 @@
#ifndef __LINUX_GFP_TYPES_H
#define __LINUX_GFP_TYPES_H
+#include <linux/bits.h>
+
/* The typedef is in types.h but we want the documentation here */
#if 0
/**
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index dc75f802e284..f8617eaf08ba 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -646,8 +646,6 @@ int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc,
struct gpio_device *gpio_device_find(const void *data,
int (*match)(struct gpio_chip *gc,
const void *data));
-struct gpio_device *gpio_device_find_by_label(const char *label);
-struct gpio_device *gpio_device_find_by_fwnode(const struct fwnode_handle *fwnode);
struct gpio_device *gpio_device_get(struct gpio_device *gdev);
void gpio_device_put(struct gpio_device *gdev);
@@ -814,6 +812,9 @@ struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc);
int gpio_device_get_base(struct gpio_device *gdev);
const char *gpio_device_get_label(struct gpio_device *gdev);
+struct gpio_device *gpio_device_find_by_label(const char *label);
+struct gpio_device *gpio_device_find_by_fwnode(const struct fwnode_handle *fwnode);
+
#else /* CONFIG_GPIOLIB */
#include <asm/bug.h>
@@ -843,6 +844,18 @@ static inline const char *gpio_device_get_label(struct gpio_device *gdev)
return NULL;
}
+static inline struct gpio_device *gpio_device_find_by_label(const char *label)
+{
+ WARN_ON(1);
+ return NULL;
+}
+
+static inline struct gpio_device *gpio_device_find_by_fwnode(const struct fwnode_handle *fwnode)
+{
+ WARN_ON(1);
+ return NULL;
+}
+
static inline int gpiochip_lock_as_irq(struct gpio_chip *gc,
unsigned int offset)
{
diff --git a/include/linux/gpio/property.h b/include/linux/gpio/property.h
index 6c75c8bd44a0..1a14e239221f 100644
--- a/include/linux/gpio/property.h
+++ b/include/linux/gpio/property.h
@@ -2,7 +2,6 @@
#ifndef __LINUX_GPIO_PROPERTY_H
#define __LINUX_GPIO_PROPERTY_H
-#include <dt-bindings/gpio/gpio.h> /* for GPIO_* flags */
#include <linux/property.h>
#define PROPERTY_ENTRY_GPIO(_name_, _chip_node_, _idx_, _flags_) \
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 77b30a8c6076..b06f7c426d38 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -554,17 +554,13 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
}
-extern const struct file_operations hugetlbfs_file_operations;
extern const struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
int creat_flags, int page_size_log);
-static inline bool is_file_hugepages(struct file *file)
+static inline bool is_file_hugepages(const struct file *file)
{
- if (file->f_op == &hugetlbfs_file_operations)
- return true;
-
- return is_file_shm_hugepages(file);
+ return file->f_op->fop_flags & FOP_HUGE_PAGES;
}
static inline struct hstate *hstate_inode(struct inode *i)
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 6ef0557b4bff..96ceb4095425 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -832,6 +832,7 @@ struct vmbus_gpadl {
u32 gpadl_handle;
u32 size;
void *buffer;
+ bool decrypted;
};
struct vmbus_channel {
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 76121c2bb4f8..5c9bdd3ffccc 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -67,6 +67,8 @@
* later.
* IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers,
* depends on IRQF_PERCPU.
+ * IRQF_COND_ONESHOT - Agree to do IRQF_ONESHOT if already set for a shared
+ * interrupt.
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
@@ -82,6 +84,7 @@
#define IRQF_COND_SUSPEND 0x00040000
#define IRQF_NO_AUTOEN 0x00080000
#define IRQF_NO_DEBUG 0x00100000
+#define IRQF_COND_ONESHOT 0x00200000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 68ed6697fece..e123d5e17b52 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -11,7 +11,6 @@ void __io_uring_cancel(bool cancel_all);
void __io_uring_free(struct task_struct *tsk);
void io_uring_unreg_ringfd(void);
const char *io_uring_get_opcode(u8 opcode);
-int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
bool io_is_uring_fops(struct file *file);
static inline void io_uring_files_cancel(void)
@@ -45,11 +44,6 @@ static inline const char *io_uring_get_opcode(u8 opcode)
{
return "";
}
-static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
-{
- return -EOPNOTSUPP;
-}
static inline bool io_is_uring_fops(struct file *file)
{
return false;
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
index e453a997c060..447fbfd32215 100644
--- a/include/linux/io_uring/cmd.h
+++ b/include/linux/io_uring/cmd.h
@@ -26,12 +26,25 @@ static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
#if defined(CONFIG_IO_URING)
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd);
+
+/*
+ * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd
+ * and the corresponding io_uring request.
+ *
+ * Note: the caller should never hard code @issue_flags and is only allowed
+ * to pass the mask provided by the core io_uring code.
+ */
void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
unsigned issue_flags);
+
void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
void (*task_work_cb)(struct io_uring_cmd *, unsigned),
unsigned flags);
+/*
+ * Note: the caller should never hard code @issue_flags and only use the
+ * mask provided by the core io_uring code.
+ */
void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
unsigned int issue_flags);
@@ -56,6 +69,17 @@ static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
}
#endif
+/*
+ * Polled completions must ensure they are coming from a poll queue, and
+ * hence are completed inside the usual poll handling loops.
+ */
+static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd,
+ ssize_t ret, ssize_t res2)
+{
+ lockdep_assert(in_task());
+ io_uring_cmd_done(ioucmd, ret, res2, 0);
+}
+
/* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
void (*task_work_cb)(struct io_uring_cmd *, unsigned))
diff --git a/include/linux/io_uring/net.h b/include/linux/io_uring/net.h
new file mode 100644
index 000000000000..b58f39fed4d5
--- /dev/null
+++ b/include/linux/io_uring/net.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_IO_URING_NET_H
+#define _LINUX_IO_URING_NET_H
+
+struct io_uring_cmd;
+
+#if defined(CONFIG_IO_URING)
+int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
+
+#else
+static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index e24893625085..7a6b190c7da7 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -13,7 +13,7 @@ enum {
* A hint to not wake right away but delay until there are enough of
* tw's queued to match the number of CQEs the task is waiting for.
*
- * Must not be used wirh requests generating more than one CQE.
+ * Must not be used with requests generating more than one CQE.
* It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
*/
IOU_F_TWQ_LAZY_WAKE = 1,
@@ -205,6 +205,7 @@ struct io_submit_state {
bool plug_started;
bool need_plug;
+ bool cq_flush;
unsigned short submit_nr;
unsigned int cqes_count;
struct blk_plug plug;
@@ -219,7 +220,7 @@ struct io_ev_fd {
};
struct io_alloc_cache {
- struct io_wq_work_node list;
+ void **entries;
unsigned int nr_cached;
unsigned int max_cached;
size_t elem_size;
@@ -294,12 +295,13 @@ struct io_ring_ctx {
struct io_submit_state submit_state;
- struct io_buffer_list *io_bl;
struct xarray io_bl_xa;
struct io_hash_table cancel_table_locked;
struct io_alloc_cache apoll_cache;
struct io_alloc_cache netmsg_cache;
+ struct io_alloc_cache rw_cache;
+ struct io_alloc_cache uring_cache;
/*
* Any cancelable uring_cmd is added to this list in
@@ -342,14 +344,8 @@ struct io_ring_ctx {
unsigned cq_last_tm_flush;
} ____cacheline_aligned_in_smp;
- struct io_uring_cqe completion_cqes[16];
-
spinlock_t completion_lock;
- /* IRQ completion list, under ->completion_lock */
- unsigned int locked_free_nr;
- struct io_wq_work_list locked_free_list;
-
struct list_head io_buffers_comp;
struct list_head cq_overflow_list;
struct io_hash_table cancel_table;
@@ -372,9 +368,6 @@ struct io_ring_ctx {
struct list_head io_buffers_cache;
- /* deferred free list, protected by ->uring_lock */
- struct hlist_head io_buf_list;
-
/* Keep this last, we don't need it for the fast path */
struct wait_queue_head poll_wq;
struct io_restriction restrictions;
@@ -439,8 +432,6 @@ struct io_ring_ctx {
};
struct io_tw_state {
- /* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */
- bool locked;
};
enum {
@@ -481,6 +472,7 @@ enum {
REQ_F_CAN_POLL_BIT,
REQ_F_BL_EMPTY_BIT,
REQ_F_BL_NO_RECYCLE_BIT,
+ REQ_F_BUFFERS_COMMIT_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@@ -559,6 +551,8 @@ enum {
REQ_F_BL_EMPTY = IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT),
/* don't recycle provided buffers for this request */
REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT),
+ /* buffer ring head needs incrementing on put */
+ REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
};
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 147feebd508c..3f003d5fde53 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -114,7 +114,7 @@ do { \
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
# define lockdep_hrtimer_enter(__hrtimer) false
-# define lockdep_hrtimer_exit(__context) do { } while (0)
+# define lockdep_hrtimer_exit(__context) do { (void)(__context); } while (0)
# define lockdep_posixtimer_enter() do { } while (0)
# define lockdep_posixtimer_exit() do { } while (0)
# define lockdep_irq_work_enter(__work) do { } while (0)
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index f0a949b7c973..f5a2727ca4a9 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -216,6 +216,7 @@ extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[];
extern void jump_label_init(void);
+extern void jump_label_init_ro(void);
extern void jump_label_lock(void);
extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
@@ -265,6 +266,8 @@ static __always_inline void jump_label_init(void)
static_key_initialized = true;
}
+static __always_inline void jump_label_init_ro(void) { }
+
static __always_inline bool static_key_false(struct static_key *key)
{
if (unlikely_notrace(static_key_count(key) > 0))
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 26d68115afb8..324d792e7c78 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -107,6 +107,7 @@ enum {
ATA_DFLAG_NCQ_PRIO_ENABLED = (1 << 20), /* Priority cmds sent to dev */
ATA_DFLAG_CDL_ENABLED = (1 << 21), /* cmd duration limits is enabled */
+ ATA_DFLAG_RESUMING = (1 << 22), /* Device is resuming */
ATA_DFLAG_DETACH = (1 << 24),
ATA_DFLAG_DETACHED = (1 << 25),
ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0436b919f1c7..b6bdaa18b9e9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1223,14 +1223,16 @@ static inline void page_mapcount_reset(struct page *page)
* a large folio, it includes the number of times this page is mapped
* as part of that folio.
*
- * The result is undefined for pages which cannot be mapped into userspace.
- * For example SLAB or special types of pages. See function page_has_type().
- * They use this field in struct page differently.
+ * Will report 0 for pages which cannot be mapped into userspace, eg
+ * slab, page tables and similar.
*/
static inline int page_mapcount(struct page *page)
{
int mapcount = atomic_read(&page->_mapcount) + 1;
+ /* Handle page_has_type() pages */
+ if (mapcount < 0)
+ mapcount = 0;
if (unlikely(PageCompound(page)))
mapcount += folio_entire_mapcount(page_folio(page));
@@ -2207,11 +2209,6 @@ static inline int arch_make_folio_accessible(struct folio *folio)
*/
#include <linux/vmstat.h>
-static __always_inline void *lowmem_page_address(const struct page *page)
-{
- return page_to_virt(page);
-}
-
#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
#define HASHED_PAGE_VIRTUAL
#endif
@@ -2234,6 +2231,11 @@ void set_page_address(struct page *page, void *virtual);
void page_address_init(void);
#endif
+static __always_inline void *lowmem_page_address(const struct page *page)
+{
+ return page_to_virt(page);
+}
+
#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
#define page_address(page) lowmem_page_address(page)
#define set_page_address(page, address) do { } while(0)
diff --git a/include/linux/mman.h b/include/linux/mman.h
index dc7048824be8..bcb201ab7a41 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -162,6 +162,14 @@ calc_vm_flag_bits(unsigned long flags)
unsigned long vm_commit_limit(void);
+#ifndef arch_memory_deny_write_exec_supported
+static inline bool arch_memory_deny_write_exec_supported(void)
+{
+ return true;
+}
+#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
+#endif
+
/*
* Denies creating a writable executable mapping or gaining executable permissions.
*
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 74e0cc14ebf8..967aa9ea9f96 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -44,6 +44,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT};
#define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */
#define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */
#define LOOKUP_CACHED 0x200000 /* Only do cached lookup */
+#define LOOKUP_LINKAT_EMPTY 0x400000 /* Linkat request with empty path. */
/* LOOKUP_* flags which do scope-related checks based on the dirfd. */
#define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT)
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 100cbb261269..f45d06284f2f 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -20,95 +20,24 @@
#include <linux/uio.h>
enum netfs_sreq_ref_trace;
-
-/*
- * Overload PG_private_2 to give us PG_fscache - this is used to indicate that
- * a page is currently backed by a local disk cache
- */
-#define folio_test_fscache(folio) folio_test_private_2(folio)
-#define PageFsCache(page) PagePrivate2((page))
-#define SetPageFsCache(page) SetPagePrivate2((page))
-#define ClearPageFsCache(page) ClearPagePrivate2((page))
-#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
-#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
+typedef struct mempool_s mempool_t;
/**
- * folio_start_fscache - Start an fscache write on a folio.
+ * folio_start_private_2 - Start an fscache write on a folio. [DEPRECATED]
* @folio: The folio.
*
* Call this function before writing a folio to a local cache. Starting a
* second write before the first one finishes is not allowed.
+ *
+ * Note that this should no longer be used.
*/
-static inline void folio_start_fscache(struct folio *folio)
+static inline void folio_start_private_2(struct folio *folio)
{
VM_BUG_ON_FOLIO(folio_test_private_2(folio), folio);
folio_get(folio);
folio_set_private_2(folio);
}
-/**
- * folio_end_fscache - End an fscache write on a folio.
- * @folio: The folio.
- *
- * Call this function after the folio has been written to the local cache.
- * This will wake any sleepers waiting on this folio.
- */
-static inline void folio_end_fscache(struct folio *folio)
-{
- folio_end_private_2(folio);
-}
-
-/**
- * folio_wait_fscache - Wait for an fscache write on this folio to end.
- * @folio: The folio.
- *
- * If this folio is currently being written to a local cache, wait for
- * the write to finish. Another write may start after this one finishes,
- * unless the caller holds the folio lock.
- */
-static inline void folio_wait_fscache(struct folio *folio)
-{
- folio_wait_private_2(folio);
-}
-
-/**
- * folio_wait_fscache_killable - Wait for an fscache write on this folio to end.
- * @folio: The folio.
- *
- * If this folio is currently being written to a local cache, wait
- * for the write to finish or for a fatal signal to be received.
- * Another write may start after this one finishes, unless the caller
- * holds the folio lock.
- *
- * Return:
- * - 0 if successful.
- * - -EINTR if a fatal signal was encountered.
- */
-static inline int folio_wait_fscache_killable(struct folio *folio)
-{
- return folio_wait_private_2_killable(folio);
-}
-
-static inline void set_page_fscache(struct page *page)
-{
- folio_start_fscache(page_folio(page));
-}
-
-static inline void end_page_fscache(struct page *page)
-{
- folio_end_private_2(page_folio(page));
-}
-
-static inline void wait_on_page_fscache(struct page *page)
-{
- folio_wait_private_2(page_folio(page));
-}
-
-static inline int wait_on_page_fscache_killable(struct page *page)
-{
- return folio_wait_private_2_killable(page_folio(page));
-}
-
/* Marks used on xarray-based buffers */
#define NETFS_BUF_PUT_MARK XA_MARK_0 /* - Page needs putting */
#define NETFS_BUF_PAGECACHE_MARK XA_MARK_1 /* - Page needs wb/dirty flag wrangling */
@@ -135,6 +64,7 @@ struct netfs_inode {
#if IS_ENABLED(CONFIG_FSCACHE)
struct fscache_cookie *cache;
#endif
+ struct mutex wb_lock; /* Writeback serialisation */
loff_t remote_i_size; /* Size of the remote file */
loff_t zero_point; /* Size after which we assume there's no data
* on the server */
@@ -142,7 +72,8 @@ struct netfs_inode {
#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
#define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */
-#define NETFS_ICTX_NO_WRITE_STREAMING 3 /* Don't engage in write-streaming */
+#define NETFS_ICTX_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark
+ * write to cache on read */
};
/*
@@ -165,16 +96,25 @@ struct netfs_folio {
unsigned int dirty_len; /* Write-streaming dirty data length */
};
#define NETFS_FOLIO_INFO 0x1UL /* OR'd with folio->private. */
+#define NETFS_FOLIO_COPY_TO_CACHE ((struct netfs_group *)0x356UL) /* Write to the cache only */
-static inline struct netfs_folio *netfs_folio_info(struct folio *folio)
+static inline bool netfs_is_folio_info(const void *priv)
{
- void *priv = folio_get_private(folio);
+ return (unsigned long)priv & NETFS_FOLIO_INFO;
+}
- if ((unsigned long)priv & NETFS_FOLIO_INFO)
+static inline struct netfs_folio *__netfs_folio_info(const void *priv)
+{
+ if (netfs_is_folio_info(priv))
return (struct netfs_folio *)((unsigned long)priv & ~NETFS_FOLIO_INFO);
return NULL;
}
+static inline struct netfs_folio *netfs_folio_info(struct folio *folio)
+{
+ return __netfs_folio_info(folio_get_private(folio));
+}
+
static inline struct netfs_group *netfs_folio_group(struct folio *folio)
{
struct netfs_folio *finfo;
@@ -187,6 +127,33 @@ static inline struct netfs_group *netfs_folio_group(struct folio *folio)
}
/*
+ * Stream of I/O subrequests going to a particular destination, such as the
+ * server or the local cache. This is mainly intended for writing where we may
+ * have to write to multiple destinations concurrently.
+ */
+struct netfs_io_stream {
+ /* Submission tracking */
+ struct netfs_io_subrequest *construct; /* Op being constructed */
+ unsigned int submit_off; /* Folio offset we're submitting from */
+ unsigned int submit_len; /* Amount of data left to submit */
+ unsigned int submit_max_len; /* Amount I/O can be rounded up to */
+ void (*prepare_write)(struct netfs_io_subrequest *subreq);
+ void (*issue_write)(struct netfs_io_subrequest *subreq);
+ /* Collection tracking */
+ struct list_head subrequests; /* Contributory I/O operations */
+ struct netfs_io_subrequest *front; /* Op being collected */
+ unsigned long long collected_to; /* Position we've collected results to */
+ size_t transferred; /* The amount transferred from this stream */
+ enum netfs_io_source source; /* Where to read from/write to */
+ unsigned short error; /* Aggregate error for the stream */
+ unsigned char stream_nr; /* Index of stream in parent table */
+ bool avail; /* T if stream is available */
+ bool active; /* T if stream is active */
+ bool need_retry; /* T if this stream needs retrying */
+ bool failed; /* T if this stream failed */
+};
+
+/*
* Resources required to do operations on a cache.
*/
struct netfs_cache_resources {
@@ -209,14 +176,17 @@ struct netfs_io_subrequest {
struct work_struct work;
struct list_head rreq_link; /* Link in rreq->subrequests */
struct iov_iter io_iter; /* Iterator for this subrequest */
- loff_t start; /* Where to start the I/O */
+ unsigned long long start; /* Where to start the I/O */
+ size_t max_len; /* Maximum size of the I/O */
size_t len; /* Size of the I/O */
size_t transferred; /* Amount of data transferred */
refcount_t ref;
short error; /* 0 or error that occurred */
unsigned short debug_index; /* Index in list (for debugging output) */
+ unsigned int nr_segs; /* Number of segs in io_iter */
unsigned int max_nr_segs; /* 0 or max number of segments in an iterator */
enum netfs_io_source source; /* Where to read from/write to */
+ unsigned char stream_nr; /* I/O stream this belongs to */
unsigned long flags;
#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
#define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */
@@ -224,15 +194,20 @@ struct netfs_io_subrequest {
#define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */
#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */
#define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */
+#define NETFS_SREQ_BOUNDARY 6 /* Set if ends on hard boundary (eg. ceph object) */
+#define NETFS_SREQ_IN_PROGRESS 8 /* Unlocked when the subrequest completes */
+#define NETFS_SREQ_NEED_RETRY 9 /* Set if the filesystem requests a retry */
+#define NETFS_SREQ_RETRYING 10 /* Set if we're retrying */
+#define NETFS_SREQ_FAILED 11 /* Set if the subreq failed unretryably */
};
enum netfs_io_origin {
NETFS_READAHEAD, /* This read was triggered by readahead */
NETFS_READPAGE, /* This read is a synchronous read */
NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
+ NETFS_COPY_TO_CACHE, /* This write is to copy a read to the cache */
NETFS_WRITEBACK, /* This write was triggered by writepages */
NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */
- NETFS_LAUNDER_WRITE, /* This is triggered by ->launder_folio() */
NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */
NETFS_DIO_READ, /* This is a direct I/O read */
NETFS_DIO_WRITE, /* This is a direct I/O write */
@@ -254,26 +229,36 @@ struct netfs_io_request {
struct netfs_cache_resources cache_resources;
struct list_head proc_link; /* Link in netfs_iorequests */
struct list_head subrequests; /* Contributory I/O operations */
+ struct netfs_io_stream io_streams[2]; /* Streams of parallel I/O operations */
+#define NR_IO_STREAMS 2 //wreq->nr_io_streams
+ struct netfs_group *group; /* Writeback group being written back */
struct iov_iter iter; /* Unencrypted-side iterator */
struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */
void *netfs_priv; /* Private data for the netfs */
+ void *netfs_priv2; /* Private data for the netfs */
struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */
unsigned int direct_bv_count; /* Number of elements in direct_bv[] */
unsigned int debug_id;
unsigned int rsize; /* Maximum read size (0 for none) */
unsigned int wsize; /* Maximum write size (0 for none) */
- unsigned int subreq_counter; /* Next subreq->debug_index */
+ atomic_t subreq_counter; /* Next subreq->debug_index */
+ unsigned int nr_group_rel; /* Number of refs to release on ->group */
+ spinlock_t lock; /* Lock for queuing subreqs */
atomic_t nr_outstanding; /* Number of ops in progress */
atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */
- size_t submitted; /* Amount submitted for I/O so far */
- size_t len; /* Length of the request */
size_t upper_len; /* Length can be extended to here */
+ unsigned long long submitted; /* Amount submitted for I/O so far */
+ unsigned long long len; /* Length of the request */
size_t transferred; /* Amount to be indicated as transferred */
short error; /* 0 or error that occurred */
enum netfs_io_origin origin; /* Origin of the request */
bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
- loff_t i_size; /* Size of the file */
- loff_t start; /* Start position */
+ unsigned long long i_size; /* Size of the file */
+ unsigned long long start; /* Start position */
+ atomic64_t issued_to; /* Write issuer folio cursor */
+ unsigned long long contiguity; /* Tracking for gaps in the writeback sequence */
+ unsigned long long collected_to; /* Point we've collected to */
+ unsigned long long cleaned_to; /* Position we've cleaned folios to */
pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
refcount_t ref;
unsigned long flags;
@@ -287,6 +272,11 @@ struct netfs_io_request {
#define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */
#define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */
#define NETFS_RREQ_BLOCKED 10 /* We blocked */
+#define NETFS_RREQ_PAUSE 11 /* Pause subrequest generation */
+#define NETFS_RREQ_USE_IO_ITER 12 /* Use ->io_iter rather than ->i_pages */
+#define NETFS_RREQ_ALL_QUEUED 13 /* All subreqs are now queued */
+#define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark
+ * write to cache on read */
const struct netfs_request_ops *netfs_ops;
void (*cleanup)(struct netfs_io_request *req);
};
@@ -295,8 +285,8 @@ struct netfs_io_request {
* Operations the network filesystem can/must provide to the helpers.
*/
struct netfs_request_ops {
- unsigned int io_request_size; /* Alloc size for netfs_io_request struct */
- unsigned int io_subrequest_size; /* Alloc size for netfs_io_subrequest struct */
+ mempool_t *request_pool;
+ mempool_t *subrequest_pool;
int (*init_request)(struct netfs_io_request *rreq, struct file *file);
void (*free_request)(struct netfs_io_request *rreq);
void (*free_subrequest)(struct netfs_io_subrequest *rreq);
@@ -312,10 +302,13 @@ struct netfs_request_ops {
/* Modification handling */
void (*update_i_size)(struct inode *inode, loff_t i_size);
+ void (*post_modify)(struct inode *inode);
/* Write request handling */
- void (*create_write_requests)(struct netfs_io_request *wreq,
- loff_t start, size_t len);
+ void (*begin_writeback)(struct netfs_io_request *wreq);
+ void (*prepare_write)(struct netfs_io_subrequest *subreq);
+ void (*issue_write)(struct netfs_io_subrequest *subreq);
+ void (*retry_request)(struct netfs_io_request *wreq, struct netfs_io_stream *stream);
void (*invalidate_cache)(struct netfs_io_request *wreq);
};
@@ -350,15 +343,27 @@ struct netfs_cache_ops {
netfs_io_terminated_t term_func,
void *term_func_priv);
+ /* Write data to the cache from a netfs subrequest. */
+ void (*issue_write)(struct netfs_io_subrequest *subreq);
+
/* Expand readahead request */
void (*expand_readahead)(struct netfs_cache_resources *cres,
- loff_t *_start, size_t *_len, loff_t i_size);
+ unsigned long long *_start,
+ unsigned long long *_len,
+ unsigned long long i_size);
/* Prepare a read operation, shortening it to a cached/uncached
* boundary as appropriate.
*/
enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq,
- loff_t i_size);
+ unsigned long long i_size);
+
+ /* Prepare a write subrequest, working out if we're allowed to do it
+ * and finding out the maximum amount of data to gather before
+ * attempting to submit. If we're not permitted to do it, the
+ * subrequest should be marked failed.
+ */
+ void (*prepare_write_subreq)(struct netfs_io_subrequest *subreq);
/* Prepare a write operation, working out what part of the write we can
* actually do.
@@ -410,7 +415,6 @@ int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
bool netfs_release_folio(struct folio *folio, gfp_t gfp);
-int netfs_launder_folio(struct folio *folio);
/* VMA operations API. */
vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
@@ -426,9 +430,7 @@ ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
iov_iter_extraction_t extraction_flags);
size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
size_t max_size, size_t max_segs);
-struct netfs_io_subrequest *netfs_create_write_request(
- struct netfs_io_request *wreq, enum netfs_io_source dest,
- loff_t start, size_t len, work_func_t worker);
+void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq);
void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
bool was_async);
void netfs_queue_write_request(struct netfs_io_subrequest *subreq);
@@ -472,6 +474,7 @@ static inline void netfs_inode_init(struct netfs_inode *ctx,
#if IS_ENABLED(CONFIG_FSCACHE)
ctx->cache = NULL;
#endif
+ mutex_init(&ctx->wb_lock);
/* ->releasepage() drives zero_point */
if (use_zero_point) {
ctx->zero_point = ctx->remote_i_size;
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index 3921fbed0b28..6f9242259edc 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -17,10 +17,12 @@
* build_OID_registry.pl to generate the data for look_up_OID().
*/
enum OID {
+ OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */
OID_id_dsa, /* 1.2.840.10040.4.1 */
OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */
OID_id_prime192v1, /* 1.2.840.10045.3.1.1 */
OID_id_prime256v1, /* 1.2.840.10045.3.1.7 */
+ OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */
OID_id_ecdsa_with_sha224, /* 1.2.840.10045.4.3.1 */
OID_id_ecdsa_with_sha256, /* 1.2.840.10045.4.3.2 */
OID_id_ecdsa_with_sha384, /* 1.2.840.10045.4.3.3 */
@@ -28,6 +30,7 @@ enum OID {
/* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */
OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */
+ OID_sha1WithRSAEncryption, /* 1.2.840.113549.1.1.5 */
OID_sha256WithRSAEncryption, /* 1.2.840.113549.1.1.11 */
OID_sha384WithRSAEncryption, /* 1.2.840.113549.1.1.12 */
OID_sha512WithRSAEncryption, /* 1.2.840.113549.1.1.13 */
@@ -64,7 +67,9 @@ enum OID {
OID_PKU2U, /* 1.3.5.1.5.2.7 */
OID_Scram, /* 1.3.6.1.5.5.14 */
OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
+ OID_sha1, /* 1.3.14.3.2.26 */
OID_id_ansip384r1, /* 1.3.132.0.34 */
+ OID_id_ansip521r1, /* 1.3.132.0.35 */
OID_sha256, /* 2.16.840.1.101.3.4.2.1 */
OID_sha384, /* 2.16.840.1.101.3.4.2.2 */
OID_sha512, /* 2.16.840.1.101.3.4.2.3 */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 652d77805e99..4bf1c25fd1dc 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -190,7 +190,6 @@ enum pageflags {
/* At least one page in this folio has the hwpoison flag set */
PG_has_hwpoisoned = PG_error,
- PG_hugetlb = PG_active,
PG_large_rmappable = PG_workingset, /* anon or file-backed */
};
@@ -458,30 +457,51 @@ static __always_inline int TestClearPage##uname(struct page *page) \
TESTSETFLAG(uname, lname, policy) \
TESTCLEARFLAG(uname, lname, policy)
+#define FOLIO_TEST_FLAG_FALSE(name) \
+static inline bool folio_test_##name(const struct folio *folio) \
+{ return false; }
+#define FOLIO_SET_FLAG_NOOP(name) \
+static inline void folio_set_##name(struct folio *folio) { }
+#define FOLIO_CLEAR_FLAG_NOOP(name) \
+static inline void folio_clear_##name(struct folio *folio) { }
+#define __FOLIO_SET_FLAG_NOOP(name) \
+static inline void __folio_set_##name(struct folio *folio) { }
+#define __FOLIO_CLEAR_FLAG_NOOP(name) \
+static inline void __folio_clear_##name(struct folio *folio) { }
+#define FOLIO_TEST_SET_FLAG_FALSE(name) \
+static inline bool folio_test_set_##name(struct folio *folio) \
+{ return false; }
+#define FOLIO_TEST_CLEAR_FLAG_FALSE(name) \
+static inline bool folio_test_clear_##name(struct folio *folio) \
+{ return false; }
+
+#define FOLIO_FLAG_FALSE(name) \
+FOLIO_TEST_FLAG_FALSE(name) \
+FOLIO_SET_FLAG_NOOP(name) \
+FOLIO_CLEAR_FLAG_NOOP(name)
+
#define TESTPAGEFLAG_FALSE(uname, lname) \
-static inline bool folio_test_##lname(const struct folio *folio) { return false; } \
+FOLIO_TEST_FLAG_FALSE(lname) \
static inline int Page##uname(const struct page *page) { return 0; }
#define SETPAGEFLAG_NOOP(uname, lname) \
-static inline void folio_set_##lname(struct folio *folio) { } \
+FOLIO_SET_FLAG_NOOP(lname) \
static inline void SetPage##uname(struct page *page) { }
#define CLEARPAGEFLAG_NOOP(uname, lname) \
-static inline void folio_clear_##lname(struct folio *folio) { } \
+FOLIO_CLEAR_FLAG_NOOP(lname) \
static inline void ClearPage##uname(struct page *page) { }
#define __CLEARPAGEFLAG_NOOP(uname, lname) \
-static inline void __folio_clear_##lname(struct folio *folio) { } \
+__FOLIO_CLEAR_FLAG_NOOP(lname) \
static inline void __ClearPage##uname(struct page *page) { }
#define TESTSETFLAG_FALSE(uname, lname) \
-static inline bool folio_test_set_##lname(struct folio *folio) \
-{ return 0; } \
+FOLIO_TEST_SET_FLAG_FALSE(lname) \
static inline int TestSetPage##uname(struct page *page) { return 0; }
#define TESTCLEARFLAG_FALSE(uname, lname) \
-static inline bool folio_test_clear_##lname(struct folio *folio) \
-{ return 0; } \
+FOLIO_TEST_CLEAR_FLAG_FALSE(lname) \
static inline int TestClearPage##uname(struct page *page) { return 0; }
#define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
@@ -855,29 +875,6 @@ TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable)
#define PG_head_mask ((1UL << PG_head))
-#ifdef CONFIG_HUGETLB_PAGE
-int PageHuge(const struct page *page);
-SETPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
-CLEARPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
-
-/**
- * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs
- * @folio: The folio to test.
- *
- * Context: Any context. Caller should have a reference on the folio to
- * prevent it from being turned into a tail page.
- * Return: True for hugetlbfs folios, false for anon folios or folios
- * belonging to other filesystems.
- */
-static inline bool folio_test_hugetlb(const struct folio *folio)
-{
- return folio_test_large(folio) &&
- test_bit(PG_hugetlb, const_folio_flags(folio, 1));
-}
-#else
-TESTPAGEFLAG_FALSE(Huge, hugetlb)
-#endif
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* PageHuge() only returns true for hugetlbfs pages, but not for
@@ -934,33 +931,22 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
#endif
/*
- * Check if a page is currently marked HWPoisoned. Note that this check is
- * best effort only and inherently racy: there is no way to synchronize with
- * failing hardware.
- */
-static inline bool is_page_hwpoison(struct page *page)
-{
- if (PageHWPoison(page))
- return true;
- return PageHuge(page) && PageHWPoison(compound_head(page));
-}
-
-/*
* For pages that are never mapped to userspace (and aren't PageSlab),
* page_type may be used. Because it is initialised to -1, we invert the
* sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
* __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
- * low bits so that an underflow or overflow of page_mapcount() won't be
+ * low bits so that an underflow or overflow of _mapcount won't be
* mistaken for a page type value.
*/
#define PAGE_TYPE_BASE 0xf0000000
-/* Reserve 0x0000007f to catch underflows of page_mapcount */
+/* Reserve 0x0000007f to catch underflows of _mapcount */
#define PAGE_MAPCOUNT_RESERVE -128
#define PG_buddy 0x00000080
#define PG_offline 0x00000100
#define PG_table 0x00000200
#define PG_guard 0x00000400
+#define PG_hugetlb 0x00000800
#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
@@ -977,35 +963,38 @@ static inline int page_has_type(const struct page *page)
return page_type_has_type(page->page_type);
}
+#define FOLIO_TYPE_OPS(lname, fname) \
+static __always_inline bool folio_test_##fname(const struct folio *folio)\
+{ \
+ return folio_test_type(folio, PG_##lname); \
+} \
+static __always_inline void __folio_set_##fname(struct folio *folio) \
+{ \
+ VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
+ folio->page.page_type &= ~PG_##lname; \
+} \
+static __always_inline void __folio_clear_##fname(struct folio *folio) \
+{ \
+ VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
+ folio->page.page_type |= PG_##lname; \
+}
+
#define PAGE_TYPE_OPS(uname, lname, fname) \
+FOLIO_TYPE_OPS(lname, fname) \
static __always_inline int Page##uname(const struct page *page) \
{ \
return PageType(page, PG_##lname); \
} \
-static __always_inline int folio_test_##fname(const struct folio *folio)\
-{ \
- return folio_test_type(folio, PG_##lname); \
-} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!PageType(page, 0), page); \
page->page_type &= ~PG_##lname; \
} \
-static __always_inline void __folio_set_##fname(struct folio *folio) \
-{ \
- VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
- folio->page.page_type &= ~PG_##lname; \
-} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
page->page_type |= PG_##lname; \
-} \
-static __always_inline void __folio_clear_##fname(struct folio *folio) \
-{ \
- VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
- folio->page.page_type |= PG_##lname; \
-} \
+}
/*
* PageBuddy() indicates that the page is free and in the buddy system
@@ -1052,6 +1041,37 @@ PAGE_TYPE_OPS(Table, table, pgtable)
*/
PAGE_TYPE_OPS(Guard, guard, guard)
+#ifdef CONFIG_HUGETLB_PAGE
+FOLIO_TYPE_OPS(hugetlb, hugetlb)
+#else
+FOLIO_TEST_FLAG_FALSE(hugetlb)
+#endif
+
+/**
+ * PageHuge - Determine if the page belongs to hugetlbfs
+ * @page: The page to test.
+ *
+ * Context: Any context.
+ * Return: True for hugetlbfs pages, false for anon pages or pages
+ * belonging to other filesystems.
+ */
+static inline bool PageHuge(const struct page *page)
+{
+ return folio_test_hugetlb(page_folio(page));
+}
+
+/*
+ * Check if a page is currently marked HWPoisoned. Note that this check is
+ * best effort only and inherently racy: there is no way to synchronize with
+ * failing hardware.
+ */
+static inline bool is_page_hwpoison(struct page *page)
+{
+ if (PageHWPoison(page))
+ return true;
+ return PageHuge(page) && PageHWPoison(compound_head(page));
+}
+
extern bool is_free_buddy_page(struct page *page);
PAGEFLAG(Isolated, isolated, PF_ANY);
@@ -1118,7 +1138,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
*/
#define PAGE_FLAGS_SECOND \
(0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
- 1UL << PG_hugetlb | 1UL << PG_large_rmappable)
+ 1UL << PG_large_rmappable)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 2df35e65557d..c5e33e2ca48a 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -40,6 +40,8 @@ int filemap_fdatawait_keep_errors(struct address_space *mapping);
int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
loff_t start_byte, loff_t end_byte);
+int filemap_invalidate_inode(struct inode *inode, bool flush,
+ loff_t start, loff_t end);
static inline int filemap_fdatawait(struct address_space *mapping)
{
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index fcc06c300a72..5d3a0cccc6bf 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -11,8 +11,8 @@
#include <linux/types.h>
-/* 15 pointers + header align the folio_batch structure to a power of two */
-#define PAGEVEC_SIZE 15
+/* 31 pointers + header align the folio_batch structure to a power of two */
+#define PAGEVEC_SIZE 31
struct folio;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a0c75e467df3..c547d1d4feb1 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -580,6 +580,7 @@
#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb
#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3
#define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb
+#define PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3 0x12bb
#define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3
#define PCI_DEVICE_ID_AMD_MI300_DF_F3 0x152b
#define PCI_DEVICE_ID_AMD_VANGOGH_USB 0x163a
diff --git a/include/linux/peci.h b/include/linux/peci.h
index 9b3d36aff431..90e241458ef6 100644
--- a/include/linux/peci.h
+++ b/include/linux/peci.h
@@ -58,7 +58,6 @@ static inline struct peci_controller *to_peci_controller(void *d)
/**
* struct peci_device - PECI device
* @dev: device object to register PECI device to the device model
- * @controller: manages the bus segment hosting this PECI device
* @info: PECI device characteristics
* @info.family: device family
* @info.model: device model
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index d2a15c0c6f8a..a5304ae8c654 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -809,11 +809,8 @@ struct perf_event {
u64 (*clock)(void);
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
-#ifdef CONFIG_BPF_SYSCALL
- perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
u64 bpf_cookie;
-#endif
#ifdef CONFIG_EVENT_TRACING
struct trace_event_call *tp_event;
@@ -883,6 +880,7 @@ struct perf_event_pmu_context {
unsigned int nr_events;
unsigned int nr_cgroups;
+ unsigned int nr_freq;
atomic_t refcount; /* event <-> epc */
struct rcu_head rcu_head;
@@ -897,6 +895,11 @@ struct perf_event_pmu_context {
int rotate_necessary;
};
+static inline bool perf_pmu_ctx_is_active(struct perf_event_pmu_context *epc)
+{
+ return !list_empty(&epc->flexible_active) || !list_empty(&epc->pinned_active);
+}
+
struct perf_event_groups {
struct rb_root tree;
u64 index;
@@ -1342,8 +1345,10 @@ extern int perf_event_output(struct perf_event *event,
struct pt_regs *regs);
static inline bool
-__is_default_overflow_handler(perf_overflow_handler_t overflow_handler)
+is_default_overflow_handler(struct perf_event *event)
{
+ perf_overflow_handler_t overflow_handler = event->overflow_handler;
+
if (likely(overflow_handler == perf_event_output_forward))
return true;
if (unlikely(overflow_handler == perf_event_output_backward))
@@ -1351,22 +1356,6 @@ __is_default_overflow_handler(perf_overflow_handler_t overflow_handler)
return false;
}
-#define is_default_overflow_handler(event) \
- __is_default_overflow_handler((event)->overflow_handler)
-
-#ifdef CONFIG_BPF_SYSCALL
-static inline bool uses_default_overflow_handler(struct perf_event *event)
-{
- if (likely(is_default_overflow_handler(event)))
- return true;
-
- return __is_default_overflow_handler(event->orig_overflow_handler);
-}
-#else
-#define uses_default_overflow_handler(event) \
- is_default_overflow_handler(event)
-#endif
-
extern void
perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
@@ -1697,6 +1686,14 @@ perf_event_addr_filters(struct perf_event *event)
return ifh;
}
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
+{
+ /* Only the parent has fasync state */
+ if (event->parent)
+ event = event->parent;
+ return &event->fasync;
+}
+
extern void perf_event_addr_filters_sync(struct perf_event *event);
extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id);
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index eb556f988d57..d8f15770a522 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -71,7 +71,6 @@ struct sysc_regbits {
#define SYSC_QUIRK_SWSUP_SIDLE_ACT BIT(12)
#define SYSC_QUIRK_SWSUP_SIDLE BIT(11)
#define SYSC_QUIRK_EXT_OPT_CLOCK BIT(10)
-#define SYSC_QUIRK_LEGACY_IDLE BIT(9)
#define SYSC_QUIRK_RESET_STATUS BIT(8)
#define SYSC_QUIRK_NO_IDLE BIT(7)
#define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6)
diff --git a/include/linux/profile.h b/include/linux/profile.h
index 11db1ec516e2..04ae5ebcb637 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -18,13 +18,8 @@ struct proc_dir_entry;
struct notifier_block;
#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS)
-void create_prof_cpu_mask(void);
int create_proc_profile(void);
#else
-static inline void create_prof_cpu_mask(void)
-{
-}
-
static inline int create_proc_profile(void)
{
return 0;
diff --git a/include/linux/qat/qat_mig_dev.h b/include/linux/qat/qat_mig_dev.h
new file mode 100644
index 000000000000..dbbb6a063dd2
--- /dev/null
+++ b/include/linux/qat/qat_mig_dev.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation */
+#ifndef QAT_MIG_DEV_H_
+#define QAT_MIG_DEV_H_
+
+struct pci_dev;
+
+struct qat_mig_dev {
+ void *parent_accel_dev;
+ u8 *state;
+ u32 setup_size;
+ u32 remote_setup_size;
+ u32 state_size;
+ s32 vf_id;
+};
+
+struct qat_mig_dev *qat_vfmig_create(struct pci_dev *pdev, int vf_id);
+int qat_vfmig_init(struct qat_mig_dev *mdev);
+void qat_vfmig_cleanup(struct qat_mig_dev *mdev);
+void qat_vfmig_reset(struct qat_mig_dev *mdev);
+int qat_vfmig_open(struct qat_mig_dev *mdev);
+void qat_vfmig_close(struct qat_mig_dev *mdev);
+int qat_vfmig_suspend(struct qat_mig_dev *mdev);
+int qat_vfmig_resume(struct qat_mig_dev *mdev);
+int qat_vfmig_save_state(struct qat_mig_dev *mdev);
+int qat_vfmig_save_setup(struct qat_mig_dev *mdev);
+int qat_vfmig_load_state(struct qat_mig_dev *mdev);
+int qat_vfmig_load_setup(struct qat_mig_dev *mdev, int size);
+void qat_vfmig_destroy(struct qat_mig_dev *mdev);
+
+#endif /*QAT_MIG_DEV_H_*/
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
index 5d868505a94e..6d92b68efbf6 100644
--- a/include/linux/randomize_kstack.h
+++ b/include/linux/randomize_kstack.h
@@ -80,7 +80,7 @@ DECLARE_PER_CPU(u32, kstack_offset);
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
&randomize_kstack_offset)) { \
u32 offset = raw_cpu_read(kstack_offset); \
- offset ^= (rand); \
+ offset = ror32(offset, 5) ^ (rand); \
raw_cpu_write(kstack_offset, offset); \
} \
} while (0)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 17d7ed5f3ae6..dfd2399f2cde 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -401,15 +401,15 @@ static inline int debug_lockdep_rcu_enabled(void)
} \
} while (0)
-#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
+#ifndef CONFIG_PREEMPT_RCU
static inline void rcu_preempt_sleep_check(void)
{
RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
"Illegal context switch in RCU read-side critical section");
}
-#else /* #ifdef CONFIG_PROVE_RCU */
+#else // #ifndef CONFIG_PREEMPT_RCU
static inline void rcu_preempt_sleep_check(void) { }
-#endif /* #else #ifdef CONFIG_PROVE_RCU */
+#endif // #else // #ifndef CONFIG_PREEMPT_RCU
#define rcu_sleep_check() \
do { \
@@ -809,9 +809,9 @@ static inline void rcu_read_unlock(void)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
+ rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
__release(RCU);
__rcu_read_unlock();
- rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
}
/**
@@ -1090,6 +1090,18 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
extern int rcu_expedited;
extern int rcu_normal;
-DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
+DEFINE_LOCK_GUARD_0(rcu,
+ do {
+ rcu_read_lock();
+ /*
+ * sparse doesn't call the cleanup function,
+ * so just release immediately and don't track
+ * the context. We don't need to anyway, since
+ * the whole point of the guard is to not need
+ * the explicit unlock.
+ */
+ __release(RCU);
+ } while (0),
+ rcu_read_unlock())
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index d07f0848802e..303ab9bee155 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -19,18 +19,18 @@ struct rcu_synchronize {
};
void wakeme_after_rcu(struct rcu_head *head);
-void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
+void __wait_rcu_gp(bool checktiny, unsigned int state, int n, call_rcu_func_t *crcu_array,
struct rcu_synchronize *rs_array);
-#define _wait_rcu_gp(checktiny, ...) \
-do { \
- call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
- struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \
- __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \
- __crcu_array, __rs_array); \
+#define _wait_rcu_gp(checktiny, state, ...) \
+do { \
+ call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
+ struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \
+ __wait_rcu_gp(checktiny, state, ARRAY_SIZE(__crcu_array), __crcu_array, __rs_array); \
} while (0)
-#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
+#define wait_rcu_gp(...) _wait_rcu_gp(false, TASK_UNINTERRUPTIBLE, __VA_ARGS__)
+#define wait_rcu_gp_state(state, ...) _wait_rcu_gp(false, state, __VA_ARGS__)
/**
* synchronize_rcu_mult - Wait concurrently for multiple grace periods
@@ -54,7 +54,7 @@ do { \
* grace period.
*/
#define synchronize_rcu_mult(...) \
- _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
+ _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), TASK_UNINTERRUPTIBLE, __VA_ARGS__)
static inline void cond_resched_rcu(void)
{
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index b743241cfb7c..d470303b1bbb 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -1230,6 +1230,7 @@ int regmap_multi_reg_write_bypassed(struct regmap *map,
int regmap_raw_write_async(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
+int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val);
int regmap_raw_read(struct regmap *map, unsigned int reg,
void *val, size_t val_len);
int regmap_noinc_read(struct regmap *map, unsigned int reg,
@@ -1739,6 +1740,13 @@ static inline int regmap_read(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_read_bypassed(struct regmap *map, unsigned int reg,
+ unsigned int *val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_raw_read(struct regmap *map, unsigned int reg,
void *val, size_t val_len)
{
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 4660582a3302..ed180ca419da 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -320,13 +320,13 @@ devm_regulator_get_exclusive(struct device *dev, const char *id)
static inline int devm_regulator_get_enable(struct device *dev, const char *id)
{
- return -ENODEV;
+ return 0;
}
static inline int devm_regulator_get_enable_optional(struct device *dev,
const char *id)
{
- return -ENODEV;
+ return 0;
}
static inline struct regulator *__must_check
diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h
index 29c4e4f243e4..f2394a409c9d 100644
--- a/include/linux/rwbase_rt.h
+++ b/include/linux/rwbase_rt.h
@@ -31,9 +31,9 @@ static __always_inline bool rw_base_is_locked(const struct rwbase_rt *rwb)
return atomic_read(&rwb->readers) != READER_BIAS;
}
-static inline void rw_base_assert_held_write(const struct rwbase_rt *rwb)
+static __always_inline bool rw_base_is_write_locked(const struct rwbase_rt *rwb)
{
- WARN_ON(atomic_read(&rwb->readers) != WRITER_BIAS);
+ return atomic_read(&rwb->readers) == WRITER_BIAS;
}
static __always_inline bool rw_base_is_contended(const struct rwbase_rt *rwb)
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 4f1c18992f76..c8b543d428b0 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -167,14 +167,14 @@ static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
return rw_base_is_locked(&sem->rwbase);
}
-static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
+static __always_inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
{
WARN_ON(!rwsem_is_locked(sem));
}
-static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
+static __always_inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
{
- rw_base_assert_held_write(sem);
+ WARN_ON(!rw_base_is_write_locked(&sem->rwbase));
}
static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index b807141acc14..3a9bb5b9a9e8 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -737,6 +737,89 @@ struct scmi_powercap_proto_ops {
u32 *power_thresh_high);
};
+enum scmi_pinctrl_selector_type {
+ PIN_TYPE = 0,
+ GROUP_TYPE,
+ FUNCTION_TYPE,
+};
+
+enum scmi_pinctrl_conf_type {
+ SCMI_PIN_DEFAULT = 0,
+ SCMI_PIN_BIAS_BUS_HOLD = 1,
+ SCMI_PIN_BIAS_DISABLE = 2,
+ SCMI_PIN_BIAS_HIGH_IMPEDANCE = 3,
+ SCMI_PIN_BIAS_PULL_UP = 4,
+ SCMI_PIN_BIAS_PULL_DEFAULT = 5,
+ SCMI_PIN_BIAS_PULL_DOWN = 6,
+ SCMI_PIN_DRIVE_OPEN_DRAIN = 7,
+ SCMI_PIN_DRIVE_OPEN_SOURCE = 8,
+ SCMI_PIN_DRIVE_PUSH_PULL = 9,
+ SCMI_PIN_DRIVE_STRENGTH = 10,
+ SCMI_PIN_INPUT_DEBOUNCE = 11,
+ SCMI_PIN_INPUT_MODE = 12,
+ SCMI_PIN_PULL_MODE = 13,
+ SCMI_PIN_INPUT_VALUE = 14,
+ SCMI_PIN_INPUT_SCHMITT = 15,
+ SCMI_PIN_LOW_POWER_MODE = 16,
+ SCMI_PIN_OUTPUT_MODE = 17,
+ SCMI_PIN_OUTPUT_VALUE = 18,
+ SCMI_PIN_POWER_SOURCE = 19,
+ SCMI_PIN_SLEW_RATE = 20,
+ SCMI_PIN_OEM_START = 192,
+ SCMI_PIN_OEM_END = 255,
+};
+
+/**
+ * struct scmi_pinctrl_proto_ops - represents the various operations provided
+ * by SCMI Pinctrl Protocol
+ *
+ * @count_get: returns count of the registered elements in given type
+ * @name_get: returns name by index of given type
+ * @group_pins_get: returns the set of pins, assigned to the specified group
+ * @function_groups_get: returns the set of groups, assigned to the specified
+ * function
+ * @mux_set: set muxing function for groups of pins
+ * @settings_get_one: returns one configuration parameter for pin or group
+ * specified by config_type
+ * @settings_get_all: returns all configuration parameters for pin or group
+ * @settings_conf: sets the configuration parameter for pin or group
+ * @pin_request: aquire pin before selecting mux setting
+ * @pin_free: frees pin, acquired by request_pin call
+ */
+struct scmi_pinctrl_proto_ops {
+ int (*count_get)(const struct scmi_protocol_handle *ph,
+ enum scmi_pinctrl_selector_type type);
+ int (*name_get)(const struct scmi_protocol_handle *ph, u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ const char **name);
+ int (*group_pins_get)(const struct scmi_protocol_handle *ph,
+ u32 selector, const unsigned int **pins,
+ unsigned int *nr_pins);
+ int (*function_groups_get)(const struct scmi_protocol_handle *ph,
+ u32 selector, unsigned int *nr_groups,
+ const unsigned int **groups);
+ int (*mux_set)(const struct scmi_protocol_handle *ph, u32 selector,
+ u32 group);
+ int (*settings_get_one)(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ enum scmi_pinctrl_conf_type config_type,
+ u32 *config_value);
+ int (*settings_get_all)(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ unsigned int *nr_configs,
+ enum scmi_pinctrl_conf_type *config_types,
+ u32 *config_values);
+ int (*settings_conf)(const struct scmi_protocol_handle *ph,
+ u32 selector, enum scmi_pinctrl_selector_type type,
+ unsigned int nr_configs,
+ enum scmi_pinctrl_conf_type *config_type,
+ u32 *config_value);
+ int (*pin_request)(const struct scmi_protocol_handle *ph, u32 pin);
+ int (*pin_free)(const struct scmi_protocol_handle *ph, u32 pin);
+};
+
/**
* struct scmi_notify_ops - represents notifications' operations provided by
* SCMI core
@@ -783,8 +866,6 @@ struct scmi_notify_ops {
const u32 *src_id,
struct notifier_block *nb);
int (*devm_event_notifier_unregister)(struct scmi_device *sdev,
- u8 proto_id, u8 evt_id,
- const u32 *src_id,
struct notifier_block *nb);
int (*event_notifier_register)(const struct scmi_handle *handle,
u8 proto_id, u8 evt_id,
@@ -844,6 +925,7 @@ enum scmi_std_protocol {
SCMI_PROTOCOL_RESET = 0x16,
SCMI_PROTOCOL_VOLTAGE = 0x17,
SCMI_PROTOCOL_POWERCAP = 0x18,
+ SCMI_PROTOCOL_PINCTRL = 0x19,
};
enum scmi_system_events {
diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h
index 35f3a4a8ceb1..acf7e1a3f3de 100644
--- a/include/linux/secretmem.h
+++ b/include/linux/secretmem.h
@@ -13,10 +13,10 @@ static inline bool folio_is_secretmem(struct folio *folio)
/*
* Using folio_mapping() is quite slow because of the actual call
* instruction.
- * We know that secretmem pages are not compound and LRU so we can
+ * We know that secretmem pages are not compound, so we can
* save a couple of cycles here.
*/
- if (folio_test_large(folio) || !folio_test_lru(folio))
+ if (folio_test_large(folio))
return false;
mapping = (struct address_space *)
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 234bcdb1fba4..8bd4fda6e027 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -118,7 +118,18 @@ void seq_vprintf(struct seq_file *m, const char *fmt, va_list args);
__printf(2, 3)
void seq_printf(struct seq_file *m, const char *fmt, ...);
void seq_putc(struct seq_file *m, char c);
-void seq_puts(struct seq_file *m, const char *s);
+void __seq_puts(struct seq_file *m, const char *s);
+
+static __always_inline void seq_puts(struct seq_file *m, const char *s)
+{
+ if (!__builtin_constant_p(*s))
+ __seq_puts(m, s);
+ else if (s[0] && !s[1])
+ seq_putc(m, s[0]);
+ else
+ seq_write(m, s, __builtin_strlen(s));
+}
+
void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter,
unsigned long long num, unsigned int width);
void seq_put_decimal_ull(struct seq_file *m, const char *delimiter,
diff --git a/include/linux/shm.h b/include/linux/shm.h
index c55bef0538e5..1d3d3ae958fb 100644
--- a/include/linux/shm.h
+++ b/include/linux/shm.h
@@ -16,7 +16,6 @@ struct sysv_shm {
long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr,
unsigned long shmlba);
-bool is_file_shm_hugepages(struct file *file);
void exit_shm(struct task_struct *task);
#define shm_init_task(task) INIT_LIST_HEAD(&(task)->sysvshm.shm_clist)
#else
@@ -30,10 +29,6 @@ static inline long do_shmat(int shmid, char __user *shmaddr,
{
return -ENOSYS;
}
-static inline bool is_file_shm_hugepages(struct file *file)
-{
- return false;
-}
static inline void exit_shm(struct task_struct *task)
{
}
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index a4c15db2f5e5..3fb18f7eb73e 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -110,8 +110,17 @@ extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
int shmem_unuse(unsigned int type);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
struct mm_struct *mm, unsigned long vm_flags);
+#else
+static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+ struct mm_struct *mm, unsigned long vm_flags)
+{
+ return false;
+}
+#endif
+
#ifdef CONFIG_SHMEM
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
#else
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0c7c67b3a87b..1cc72c370190 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -527,6 +527,13 @@ enum {
#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY | \
SKBFL_DONT_ORPHAN | SKBFL_MANAGED_FRAG_REFS)
+struct ubuf_info_ops {
+ void (*complete)(struct sk_buff *, struct ubuf_info *,
+ bool zerocopy_success);
+ /* has to be compatible with skb_zcopy_set() */
+ int (*link_skb)(struct sk_buff *skb, struct ubuf_info *uarg);
+};
+
/*
* The callback notifies userspace to release buffers when skb DMA is done in
* lower device, the skb last reference should be 0 when calling this.
@@ -536,8 +543,7 @@ enum {
* The desc field is used to track userspace buffer index.
*/
struct ubuf_info {
- void (*callback)(struct sk_buff *, struct ubuf_info *,
- bool zerocopy_success);
+ const struct ubuf_info_ops *ops;
refcount_t refcnt;
u8 flags;
};
@@ -753,8 +759,6 @@ typedef unsigned char *sk_buff_data_t;
* @list: queue head
* @ll_node: anchor in an llist (eg socket defer_list)
* @sk: Socket we are owned by
- * @ip_defrag_offset: (aka @sk) alternate use of @sk, used in
- * fragmentation management
* @dev: Device we arrived on/are leaving by
* @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
* @cb: Control buffer. Free for use by every layer. Put private vars here
@@ -875,10 +879,7 @@ struct sk_buff {
struct llist_node ll_node;
};
- union {
- struct sock *sk;
- int ip_defrag_offset;
- };
+ struct sock *sk;
union {
ktime_t tstamp;
@@ -1667,14 +1668,13 @@ static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
}
#endif
+extern const struct ubuf_info_ops msg_zerocopy_ubuf_ops;
+
struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
struct ubuf_info *uarg);
void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
-void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
- bool success);
-
int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, struct iov_iter *from,
size_t length);
@@ -1762,13 +1762,13 @@ static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
static inline void net_zcopy_put(struct ubuf_info *uarg)
{
if (uarg)
- uarg->callback(NULL, uarg, true);
+ uarg->ops->complete(NULL, uarg, true);
}
static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref)
{
if (uarg) {
- if (uarg->callback == msg_zerocopy_callback)
+ if (uarg->ops == &msg_zerocopy_ubuf_ops)
msg_zerocopy_put_abort(uarg, have_uref);
else if (have_uref)
net_zcopy_put(uarg);
@@ -1782,7 +1782,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
if (uarg) {
if (!skb_zcopy_is_nouarg(skb))
- uarg->callback(skb, uarg, zerocopy_success);
+ uarg->ops->complete(skb, uarg, zerocopy_success);
skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY;
}
@@ -3036,6 +3036,21 @@ static inline void skb_mac_header_rebuild(struct sk_buff *skb)
}
}
+/* Move the full mac header up to current network_header.
+ * Leaves skb->data pointing at offset skb->mac_len into the mac_header.
+ * Must be provided the complete mac header length.
+ */
+static inline void skb_mac_header_rebuild_full(struct sk_buff *skb, u32 full_mac_len)
+{
+ if (skb_mac_header_was_set(skb)) {
+ const unsigned char *old_mac = skb_mac_header(skb);
+
+ skb_set_mac_header(skb, -full_mac_len);
+ memmove(skb_mac_header(skb), old_mac, full_mac_len);
+ __skb_push(skb, full_mac_len - skb->mac_len);
+ }
+}
+
static inline int skb_checksum_start_offset(const struct sk_buff *skb)
{
return skb->csum_start - skb_headroom(skb);
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index e65ec3fd2799..a509caf823d6 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -461,10 +461,12 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
{
+ read_lock_bh(&sk->sk_callback_lock);
if (psock->saved_data_ready)
psock->saved_data_ready(sk);
else
sk->sk_data_ready(sk);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static inline void psock_set_prog(struct bpf_prog **pprog,
diff --git a/include/linux/slab.h b/include/linux/slab.h
index e53cbfa18325..739b21262507 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -266,7 +266,7 @@ void kfree(const void *objp);
void kfree_sensitive(const void *objp);
size_t __ksize(const void *objp);
-DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
+DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
/**
* ksize - Report actual allocation size of associated object
@@ -792,7 +792,7 @@ static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t fla
extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
__realloc_size(3);
extern void kvfree(const void *addr);
-DEFINE_FREE(kvfree, void *, if (_T) kvfree(_T))
+DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T))
extern void kvfree_sensitive(const void *addr, size_t len);
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index 649955d2cf5c..d4a8e34505e6 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -14,6 +14,15 @@
#define CMDQ_ADDR_HIGH(addr) ((u32)(((addr) >> 16) & GENMASK(31, 0)))
#define CMDQ_ADDR_LOW(addr) ((u16)(addr) | BIT(1))
+/*
+ * Every cmdq thread has its own SPRs (Specific Purpose Registers),
+ * so there are 4 * N (threads) SPRs in GCE that shares the same indexes below.
+ */
+#define CMDQ_THR_SPR_IDX0 (0)
+#define CMDQ_THR_SPR_IDX1 (1)
+#define CMDQ_THR_SPR_IDX2 (2)
+#define CMDQ_THR_SPR_IDX3 (3)
+
struct cmdq_pkt;
struct cmdq_client_reg {
@@ -62,17 +71,19 @@ void cmdq_mbox_destroy(struct cmdq_client *client);
/**
* cmdq_pkt_create() - create a CMDQ packet
* @client: the CMDQ mailbox client
+ * @pkt: the CMDQ packet
* @size: required CMDQ buffer size
*
- * Return: CMDQ packet pointer
+ * Return: 0 for success; else the error code is returned
*/
-struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size);
+int cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, size_t size);
/**
* cmdq_pkt_destroy() - destroy the CMDQ packet
+ * @client: the CMDQ mailbox client
* @pkt: the CMDQ packet
*/
-void cmdq_pkt_destroy(struct cmdq_pkt *pkt);
+void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt);
/**
* cmdq_pkt_write() - append write command to the CMDQ packet
@@ -174,6 +185,18 @@ int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
u16 addr_low, u32 value, u32 mask);
/**
+ * cmdq_pkt_mem_move() - append memory move command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @src_addr: source address
+ * @dst_addr: destination address
+ *
+ * Appends a CMDQ command to copy the value found in `src_addr` to `dst_addr`.
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_mem_move(struct cmdq_pkt *pkt, dma_addr_t src_addr, dma_addr_t dst_addr);
+
+/**
* cmdq_pkt_wfe() - append wait for event command to the CMDQ packet
* @pkt: the CMDQ packet
* @event: the desired event type to wait
@@ -184,6 +207,21 @@ int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear);
/**
+ * cmdq_pkt_acquire_event() - append acquire event command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @event: the desired event to be acquired
+ *
+ * User can use cmdq_pkt_acquire_event() as `mutex_lock` and cmdq_pkt_clear_event()
+ * as `mutex_unlock` to protect some `critical section` instructions between them.
+ * cmdq_pkt_acquire_event() would wait for event to be cleared.
+ * After event is cleared by cmdq_pkt_clear_event in other GCE threads,
+ * cmdq_pkt_acquire_event() would set event and keep executing next instruction.
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_acquire_event(struct cmdq_pkt *pkt, u16 event);
+
+/**
* cmdq_pkt_clear_event() - append clear event command to the CMDQ packet
* @pkt: the CMDQ packet
* @event: the desired event to be cleared
@@ -248,36 +286,76 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value);
/**
- * cmdq_pkt_jump() - Append jump command to the CMDQ packet, ask GCE
- * to execute an instruction that change current thread PC to
- * a physical address which should contains more instruction.
+ * cmdq_pkt_poll_addr() - Append blocking POLL command to CMDQ packet
+ * @pkt: the CMDQ packet
+ * @addr: the hardware register address
+ * @value: the specified target register value
+ * @mask: the specified target register mask
+ *
+ * Appends a polling (POLL) command to the CMDQ packet and asks the GCE
+ * to execute an instruction that checks for the specified `value` (with
+ * or without `mask`) to appear in the specified hardware register `addr`.
+ * All GCE threads will be blocked by this instruction.
+ *
+ * Return: 0 for success or negative error code
+ */
+int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mask);
+
+/**
+ * cmdq_pkt_jump_abs() - Append jump command to the CMDQ packet, ask GCE
+ * to execute an instruction that change current thread
+ * PC to a absolute physical address which should
+ * contains more instruction.
* @pkt: the CMDQ packet
- * @addr: physical address of target instruction buffer
+ * @addr: absolute physical address of target instruction buffer
+ * @shift_pa: shift bits of physical address in CMDQ instruction. This value
+ * is got by cmdq_get_shift_pa().
*
* Return: 0 for success; else the error code is returned
*/
-int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr);
+int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa);
+
+/* This wrapper has to be removed after all users migrated to jump_abs */
+static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa)
+{
+ return cmdq_pkt_jump_abs(pkt, addr, shift_pa);
+}
/**
- * cmdq_pkt_finalize() - Append EOC and jump command to pkt.
+ * cmdq_pkt_jump_rel() - Append jump command to the CMDQ packet, ask GCE
+ * to execute an instruction that change current thread
+ * PC to a physical address with relative offset. The
+ * target address should contains more instruction.
* @pkt: the CMDQ packet
+ * @offset: relative offset of target instruction buffer from current PC.
+ * @shift_pa: shift bits of physical address in CMDQ instruction. This value
+ * is got by cmdq_get_shift_pa().
*
* Return: 0 for success; else the error code is returned
*/
-int cmdq_pkt_finalize(struct cmdq_pkt *pkt);
+int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa);
+
+/**
+ * cmdq_pkt_eoc() - Append EOC and ask GCE to generate an IRQ at end of execution
+ * @pkt: The CMDQ packet
+ *
+ * Appends an End Of Code (EOC) command to the CMDQ packet and asks the GCE
+ * to generate an interrupt at the end of the execution of all commands in
+ * the pipeline.
+ * The EOC command is usually appended to the end of the pipeline to notify
+ * that all commands are done.
+ *
+ * Return: 0 for success or negative error number
+ */
+int cmdq_pkt_eoc(struct cmdq_pkt *pkt);
/**
- * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ
- * packet and call back at the end of done packet
+ * cmdq_pkt_finalize() - Append EOC and jump command to pkt.
* @pkt: the CMDQ packet
*
* Return: 0 for success; else the error code is returned
- *
- * Trigger CMDQ to asynchronously execute the CMDQ packet and call back
- * at the end of done packet. Note that this is an ASYNC function. When the
- * function returned, it may or may not be finished.
*/
-int cmdq_pkt_flush_async(struct cmdq_pkt *pkt);
+int cmdq_pkt_finalize(struct cmdq_pkt *pkt);
#else /* IS_ENABLED(CONFIG_MTK_CMDQ) */
@@ -294,12 +372,12 @@ static inline struct cmdq_client *cmdq_mbox_create(struct device *dev, int index
static inline void cmdq_mbox_destroy(struct cmdq_client *client) { }
-static inline struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
+static inline int cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, size_t size)
{
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
-static inline void cmdq_pkt_destroy(struct cmdq_pkt *pkt) { }
+static inline void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt) { }
static inline int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
{
@@ -374,17 +452,32 @@ static inline int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
return -EINVAL;
}
-static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
+static inline int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mask)
{
return -EINVAL;
}
-static inline int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
+static inline int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa)
{
return -EINVAL;
}
-static inline int cmdq_pkt_flush_async(struct cmdq_pkt *pkt)
+static inline int cmdq_pkt_eoc(struct cmdq_pkt *pkt)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
{
return -EINVAL;
}
diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h
index 307961b41541..317200cd3a60 100644
--- a/include/linux/sockptr.h
+++ b/include/linux/sockptr.h
@@ -50,11 +50,36 @@ static inline int copy_from_sockptr_offset(void *dst, sockptr_t src,
return 0;
}
+/* Deprecated.
+ * This is unsafe, unless caller checked user provided optlen.
+ * Prefer copy_safe_from_sockptr() instead.
+ */
static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size)
{
return copy_from_sockptr_offset(dst, src, 0, size);
}
+/**
+ * copy_safe_from_sockptr: copy a struct from sockptr
+ * @dst: Destination address, in kernel space. This buffer must be @ksize
+ * bytes long.
+ * @ksize: Size of @dst struct.
+ * @optval: Source address. (in user or kernel space)
+ * @optlen: Size of @optval data.
+ *
+ * Returns:
+ * * -EINVAL: @optlen < @ksize
+ * * -EFAULT: access to userspace failed.
+ * * 0 : @ksize bytes were copied
+ */
+static inline int copy_safe_from_sockptr(void *dst, size_t ksize,
+ sockptr_t optval, unsigned int optlen)
+{
+ if (optlen < ksize)
+ return -EINVAL;
+ return copy_from_sockptr(dst, optval, ksize);
+}
+
static inline int copy_struct_from_sockptr(void *dst, size_t ksize,
sockptr_t src, size_t usize)
{
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index 447133171d95..4d96bbdb45f0 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -64,8 +64,10 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
{
int idx;
+ preempt_disable(); // Needed for PREEMPT_AUTO
idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
+ preempt_enable();
return idx;
}
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index 3c6caa5abc7c..e9ec32fb97d4 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -44,10 +44,9 @@ typedef u32 depot_stack_handle_t;
union handle_parts {
depot_stack_handle_t handle;
struct {
- /* pool_index is offset by 1 */
- u32 pool_index : DEPOT_POOL_INDEX_BITS;
- u32 offset : DEPOT_OFFSET_BITS;
- u32 extra : STACK_DEPOT_EXTRA_BITS;
+ u32 pool_index_plus_1 : DEPOT_POOL_INDEX_BITS;
+ u32 offset : DEPOT_OFFSET_BITS;
+ u32 extra : STACK_DEPOT_EXTRA_BITS;
};
};
diff --git a/include/linux/stat.h b/include/linux/stat.h
index 52150570d37a..bf92441dbad2 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -53,6 +53,7 @@ struct kstat {
u32 dio_mem_align;
u32 dio_offset_align;
u64 change_cookie;
+ u64 subvol;
};
/* These definitions are internal to the kernel for now. Mainly used by nfsd. */
diff --git a/include/linux/string.h b/include/linux/string.h
index 9ba8b4597009..10e5177bb49c 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -14,8 +14,8 @@
#include <uapi/linux/string.h>
extern char *strndup_user(const char __user *, long);
-extern void *memdup_user(const void __user *, size_t);
-extern void *vmemdup_user(const void __user *, size_t);
+extern void *memdup_user(const void __user *, size_t) __realloc_size(2);
+extern void *vmemdup_user(const void __user *, size_t) __realloc_size(2);
extern void *memdup_user_nul(const void __user *, size_t);
/**
@@ -27,7 +27,8 @@ extern void *memdup_user_nul(const void __user *, size_t);
* Return: an ERR_PTR() on failure. Result is physically
* contiguous, to be freed by kfree().
*/
-static inline void *memdup_array_user(const void __user *src, size_t n, size_t size)
+static inline __realloc_size(2, 3)
+void *memdup_array_user(const void __user *src, size_t n, size_t size)
{
size_t nbytes;
@@ -46,7 +47,8 @@ static inline void *memdup_array_user(const void __user *src, size_t n, size_t s
* Return: an ERR_PTR() on failure. Result may be not
* physically contiguous. Use kvfree() to free.
*/
-static inline void *vmemdup_array_user(const void __user *src, size_t n, size_t size)
+static inline __realloc_size(2, 3)
+void *vmemdup_array_user(const void __user *src, size_t n, size_t size)
{
size_t nbytes;
@@ -285,7 +287,8 @@ extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
extern void *kvmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);
-extern void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp);
+extern void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp)
+ __realloc_size(2, 3);
/* lib/argv_split.c */
extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
@@ -423,6 +426,55 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
} while (0)
/**
+ * memtostr - Copy a possibly non-NUL-term string to a NUL-term string
+ * @dest: Pointer to destination NUL-terminates string
+ * @src: Pointer to character array (likely marked as __nonstring)
+ *
+ * This is a replacement for strncpy() uses where the source is not
+ * a NUL-terminated string.
+ *
+ * Note that sizes of @dest and @src must be known at compile-time.
+ */
+#define memtostr(dest, src) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
+ const size_t _src_len = __builtin_object_size(src, 1); \
+ const size_t _src_chars = strnlen(src, _src_len); \
+ const size_t _copy_len = min(_dest_len - 1, _src_chars); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ !__builtin_constant_p(_src_len) || \
+ _dest_len == 0 || _dest_len == (size_t)-1 || \
+ _src_len == 0 || _src_len == (size_t)-1); \
+ memcpy(dest, src, _copy_len); \
+ dest[_copy_len] = '\0'; \
+} while (0)
+
+/**
+ * memtostr_pad - Copy a possibly non-NUL-term string to a NUL-term string
+ * with NUL padding in the destination
+ * @dest: Pointer to destination NUL-terminates string
+ * @src: Pointer to character array (likely marked as __nonstring)
+ *
+ * This is a replacement for strncpy() uses where the source is not
+ * a NUL-terminated string.
+ *
+ * Note that sizes of @dest and @src must be known at compile-time.
+ */
+#define memtostr_pad(dest, src) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
+ const size_t _src_len = __builtin_object_size(src, 1); \
+ const size_t _src_chars = strnlen(src, _src_len); \
+ const size_t _copy_len = min(_dest_len - 1, _src_chars); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ !__builtin_constant_p(_src_len) || \
+ _dest_len == 0 || _dest_len == (size_t)-1 || \
+ _src_len == 0 || _src_len == (size_t)-1); \
+ memcpy(dest, src, _copy_len); \
+ memset(&dest[_copy_len], 0, _dest_len - _copy_len); \
+} while (0)
+
+/**
* memset_after - Set a value after a struct member to the end of a struct
*
* @obj: Address of target struct instance
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 24cd199dd6f3..d33bab33099a 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -210,7 +210,6 @@ struct svc_rdma_recv_ctxt {
*/
struct svc_rdma_write_info {
struct svcxprt_rdma *wi_rdma;
- struct list_head wi_list;
const struct svc_rdma_chunk *wi_chunk;
@@ -239,10 +238,7 @@ struct svc_rdma_send_ctxt {
struct ib_cqe sc_cqe;
struct xdr_buf sc_hdrbuf;
struct xdr_stream sc_stream;
-
- struct list_head sc_write_info_list;
struct svc_rdma_write_info sc_reply_info;
-
void *sc_xprt_buf;
int sc_page_count;
int sc_cur_sge_no;
@@ -274,14 +270,11 @@ extern void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
extern void svc_rdma_cc_release(struct svcxprt_rdma *rdma,
struct svc_rdma_chunk_ctxt *cc,
enum dma_data_direction dir);
-extern void svc_rdma_write_chunk_release(struct svcxprt_rdma *rdma,
- struct svc_rdma_send_ctxt *ctxt);
extern void svc_rdma_reply_chunk_release(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *ctxt);
-extern int svc_rdma_prepare_write_list(struct svcxprt_rdma *rdma,
- const struct svc_rdma_pcl *write_pcl,
- struct svc_rdma_send_ctxt *sctxt,
- const struct xdr_buf *xdr);
+extern int svc_rdma_send_write_list(struct svcxprt_rdma *rdma,
+ const struct svc_rdma_recv_ctxt *rctxt,
+ const struct xdr_buf *xdr);
extern int svc_rdma_prepare_reply_chunk(struct svcxprt_rdma *rdma,
const struct svc_rdma_pcl *write_pcl,
const struct svc_rdma_pcl *reply_pcl,
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 48b700ba1d18..a5c560a2f8c2 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -390,6 +390,35 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry)
}
#endif /* CONFIG_MIGRATION */
+#ifdef CONFIG_MEMORY_FAILURE
+
+/*
+ * Support for hardware poisoned pages
+ */
+static inline swp_entry_t make_hwpoison_entry(struct page *page)
+{
+ BUG_ON(!PageLocked(page));
+ return swp_entry(SWP_HWPOISON, page_to_pfn(page));
+}
+
+static inline int is_hwpoison_entry(swp_entry_t entry)
+{
+ return swp_type(entry) == SWP_HWPOISON;
+}
+
+#else
+
+static inline swp_entry_t make_hwpoison_entry(struct page *page)
+{
+ return swp_entry(0, 0);
+}
+
+static inline int is_hwpoison_entry(swp_entry_t swp)
+{
+ return 0;
+}
+#endif
+
typedef unsigned long pte_marker;
#define PTE_MARKER_UFFD_WP BIT(0)
@@ -483,8 +512,9 @@ static inline struct folio *pfn_swap_entry_folio(swp_entry_t entry)
/*
* A pfn swap entry is a special type of swap entry that always has a pfn stored
- * in the swap offset. They are used to represent unaddressable device memory
- * and to restrict access to a page undergoing migration.
+ * in the swap offset. They can either be used to represent unaddressable device
+ * memory, to restrict access to a page undergoing migration or to represent a
+ * pfn which has been hwpoisoned and unmapped.
*/
static inline bool is_pfn_swap_entry(swp_entry_t entry)
{
@@ -492,7 +522,7 @@ static inline bool is_pfn_swap_entry(swp_entry_t entry)
BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
return is_migration_entry(entry) || is_device_private_entry(entry) ||
- is_device_exclusive_entry(entry);
+ is_device_exclusive_entry(entry) || is_hwpoison_entry(entry);
}
struct page_vma_mapped_walk;
@@ -561,35 +591,6 @@ static inline int is_pmd_migration_entry(pmd_t pmd)
}
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
-#ifdef CONFIG_MEMORY_FAILURE
-
-/*
- * Support for hardware poisoned pages
- */
-static inline swp_entry_t make_hwpoison_entry(struct page *page)
-{
- BUG_ON(!PageLocked(page));
- return swp_entry(SWP_HWPOISON, page_to_pfn(page));
-}
-
-static inline int is_hwpoison_entry(swp_entry_t entry)
-{
- return swp_type(entry) == SWP_HWPOISON;
-}
-
-#else
-
-static inline swp_entry_t make_hwpoison_entry(struct page *page)
-{
- return swp_entry(0, 0);
-}
-
-static inline int is_hwpoison_entry(swp_entry_t swp)
-{
- return 0;
-}
-#endif
-
static inline int non_swap_entry(swp_entry_t entry)
{
return swp_type(entry) >= MAX_SWAPFILES;
diff --git a/include/linux/tee_core.h b/include/linux/tee_core.h
new file mode 100644
index 000000000000..efd16ed52315
--- /dev/null
+++ b/include/linux/tee_core.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 Linaro Limited
+ */
+
+#ifndef __TEE_CORE_H
+#define __TEE_CORE_H
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/tee.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+
+/*
+ * The file describes the API provided by the generic TEE driver to the
+ * specific TEE driver.
+ */
+
+#define TEE_SHM_DYNAMIC BIT(0) /* Dynamic shared memory registered */
+ /* in secure world */
+#define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */
+#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */
+#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */
+
+#define TEE_DEVICE_FLAG_REGISTERED 0x1
+#define TEE_MAX_DEV_NAME_LEN 32
+
+/**
+ * struct tee_device - TEE Device representation
+ * @name: name of device
+ * @desc: description of device
+ * @id: unique id of device
+ * @flags: represented by TEE_DEVICE_FLAG_REGISTERED above
+ * @dev: embedded basic device structure
+ * @cdev: embedded cdev
+ * @num_users: number of active users of this device
+ * @c_no_user: completion used when unregistering the device
+ * @mutex: mutex protecting @num_users and @idr
+ * @idr: register of user space shared memory objects allocated or
+ * registered on this device
+ * @pool: shared memory pool
+ */
+struct tee_device {
+ char name[TEE_MAX_DEV_NAME_LEN];
+ const struct tee_desc *desc;
+ int id;
+ unsigned int flags;
+
+ struct device dev;
+ struct cdev cdev;
+
+ size_t num_users;
+ struct completion c_no_users;
+ struct mutex mutex; /* protects num_users and idr */
+
+ struct idr idr;
+ struct tee_shm_pool *pool;
+};
+
+/**
+ * struct tee_driver_ops - driver operations vtable
+ * @get_version: returns version of driver
+ * @open: called when the device file is opened
+ * @release: release this open file
+ * @open_session: open a new session
+ * @close_session: close a session
+ * @system_session: declare session as a system session
+ * @invoke_func: invoke a trusted function
+ * @cancel_req: request cancel of an ongoing invoke or open
+ * @supp_recv: called for supplicant to get a command
+ * @supp_send: called for supplicant to send a response
+ * @shm_register: register shared memory buffer in TEE
+ * @shm_unregister: unregister shared memory buffer in TEE
+ */
+struct tee_driver_ops {
+ void (*get_version)(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers);
+ int (*open)(struct tee_context *ctx);
+ void (*release)(struct tee_context *ctx);
+ int (*open_session)(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param);
+ int (*close_session)(struct tee_context *ctx, u32 session);
+ int (*system_session)(struct tee_context *ctx, u32 session);
+ int (*invoke_func)(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param);
+ int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session);
+ int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params,
+ struct tee_param *param);
+ int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params,
+ struct tee_param *param);
+ int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start);
+ int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm);
+};
+
+/**
+ * struct tee_desc - Describes the TEE driver to the subsystem
+ * @name: name of driver
+ * @ops: driver operations vtable
+ * @owner: module providing the driver
+ * @flags: Extra properties of driver, defined by TEE_DESC_* below
+ */
+#define TEE_DESC_PRIVILEGED 0x1
+struct tee_desc {
+ const char *name;
+ const struct tee_driver_ops *ops;
+ struct module *owner;
+ u32 flags;
+};
+
+/**
+ * tee_device_alloc() - Allocate a new struct tee_device instance
+ * @teedesc: Descriptor for this driver
+ * @dev: Parent device for this device
+ * @pool: Shared memory pool, NULL if not used
+ * @driver_data: Private driver data for this device
+ *
+ * Allocates a new struct tee_device instance. The device is
+ * removed by tee_device_unregister().
+ *
+ * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
+ */
+struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
+ struct device *dev,
+ struct tee_shm_pool *pool,
+ void *driver_data);
+
+/**
+ * tee_device_register() - Registers a TEE device
+ * @teedev: Device to register
+ *
+ * tee_device_unregister() need to be called to remove the @teedev if
+ * this function fails.
+ *
+ * @returns < 0 on failure
+ */
+int tee_device_register(struct tee_device *teedev);
+
+/**
+ * tee_device_unregister() - Removes a TEE device
+ * @teedev: Device to unregister
+ *
+ * This function should be called to remove the @teedev even if
+ * tee_device_register() hasn't been called yet. Does nothing if
+ * @teedev is NULL.
+ */
+void tee_device_unregister(struct tee_device *teedev);
+
+/**
+ * tee_session_calc_client_uuid() - Calculates client UUID for session
+ * @uuid: Resulting UUID
+ * @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*)
+ * @connectuon_data: Connection data for opening session
+ *
+ * Based on connection method calculates UUIDv5 based client UUID.
+ *
+ * For group based logins verifies that calling process has specified
+ * credentials.
+ *
+ * @return < 0 on failure
+ */
+int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
+ const u8 connection_data[TEE_IOCTL_UUID_LEN]);
+
+/**
+ * struct tee_shm_pool - shared memory pool
+ * @ops: operations
+ * @private_data: private data for the shared memory manager
+ */
+struct tee_shm_pool {
+ const struct tee_shm_pool_ops *ops;
+ void *private_data;
+};
+
+/**
+ * struct tee_shm_pool_ops - shared memory pool operations
+ * @alloc: called when allocating shared memory
+ * @free: called when freeing shared memory
+ * @destroy_pool: called when destroying the pool
+ */
+struct tee_shm_pool_ops {
+ int (*alloc)(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align);
+ void (*free)(struct tee_shm_pool *pool, struct tee_shm *shm);
+ void (*destroy_pool)(struct tee_shm_pool *pool);
+};
+
+/*
+ * tee_shm_pool_alloc_res_mem() - Create a shm manager for reserved memory
+ * @vaddr: Virtual address of start of pool
+ * @paddr: Physical address of start of pool
+ * @size: Size in bytes of the pool
+ *
+ * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
+ */
+struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr,
+ phys_addr_t paddr, size_t size,
+ int min_alloc_order);
+
+/**
+ * tee_shm_pool_free() - Free a shared memory pool
+ * @pool: The shared memory pool to free
+ *
+ * The must be no remaining shared memory allocated from this pool when
+ * this function is called.
+ */
+static inline void tee_shm_pool_free(struct tee_shm_pool *pool)
+{
+ pool->ops->destroy_pool(pool);
+}
+
+/**
+ * tee_get_drvdata() - Return driver_data pointer
+ * @returns the driver_data pointer supplied to tee_register().
+ */
+void *tee_get_drvdata(struct tee_device *teedev);
+
+/**
+ * tee_shm_alloc_priv_buf() - Allocate shared memory for private use by specific
+ * TEE driver
+ * @ctx: The TEE context for shared memory allocation
+ * @size: Shared memory allocation size
+ * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
+ */
+struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size);
+
+int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
+ int (*shm_register)(struct tee_context *ctx,
+ struct tee_shm *shm,
+ struct page **pages,
+ size_t num_pages,
+ unsigned long start));
+void tee_dyn_shm_free_helper(struct tee_shm *shm,
+ int (*shm_unregister)(struct tee_context *ctx,
+ struct tee_shm *shm));
+
+/**
+ * tee_shm_is_dynamic() - Check if shared memory object is of the dynamic kind
+ * @shm: Shared memory handle
+ * @returns true if object is dynamic shared memory
+ */
+static inline bool tee_shm_is_dynamic(struct tee_shm *shm)
+{
+ return shm && (shm->flags & TEE_SHM_DYNAMIC);
+}
+
+/**
+ * tee_shm_put() - Decrease reference count on a shared memory handle
+ * @shm: Shared memory handle
+ */
+void tee_shm_put(struct tee_shm *shm);
+
+/**
+ * tee_shm_get_id() - Get id of a shared memory object
+ * @shm: Shared memory handle
+ * @returns id
+ */
+static inline int tee_shm_get_id(struct tee_shm *shm)
+{
+ return shm->id;
+}
+
+/**
+ * tee_shm_get_from_id() - Find shared memory object and increase reference
+ * count
+ * @ctx: Context owning the shared memory
+ * @id: Id of shared memory object
+ * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
+ */
+struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
+
+static inline bool tee_param_is_memref(struct tee_param *param)
+{
+ switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * teedev_open() - Open a struct tee_device
+ * @teedev: Device to open
+ *
+ * @return a pointer to struct tee_context on success or an ERR_PTR on failure.
+ */
+struct tee_context *teedev_open(struct tee_device *teedev);
+
+/**
+ * teedev_close_context() - closes a struct tee_context
+ * @ctx: The struct tee_context to close
+ */
+void teedev_close_context(struct tee_context *ctx);
+
+#endif /*__TEE_CORE_H*/
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 71632e3c5f18..786b9ae6cf4d 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -1,40 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015-2022 Linaro Limited
+ * Copyright (c) 2015-2024 Linaro Limited
*/
#ifndef __TEE_DRV_H
#define __TEE_DRV_H
#include <linux/device.h>
-#include <linux/idr.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/mod_devicetable.h>
#include <linux/tee.h>
#include <linux/types.h>
-#include <linux/uuid.h>
/*
- * The file describes the API provided by the generic TEE driver to the
- * specific TEE driver.
+ * The file describes the API provided by the TEE subsystem to the
+ * TEE client drivers.
*/
-#define TEE_SHM_DYNAMIC BIT(0) /* Dynamic shared memory registered */
- /* in secure world */
-#define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */
-#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */
-#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */
-
-struct device;
struct tee_device;
-struct tee_shm;
-struct tee_shm_pool;
/**
* struct tee_context - driver specific context on file pointer data
* @teedev: pointer to this drivers struct tee_device
- * @list_shm: List of shared memory object owned by this context
* @data: driver specific context data, managed by the driver
* @refcount: reference counter for this structure
* @releasing: flag that indicates if context is being released right now.
@@ -57,134 +45,6 @@ struct tee_context {
bool cap_memref_null;
};
-struct tee_param_memref {
- size_t shm_offs;
- size_t size;
- struct tee_shm *shm;
-};
-
-struct tee_param_value {
- u64 a;
- u64 b;
- u64 c;
-};
-
-struct tee_param {
- u64 attr;
- union {
- struct tee_param_memref memref;
- struct tee_param_value value;
- } u;
-};
-
-/**
- * struct tee_driver_ops - driver operations vtable
- * @get_version: returns version of driver
- * @open: called when the device file is opened
- * @release: release this open file
- * @open_session: open a new session
- * @close_session: close a session
- * @system_session: declare session as a system session
- * @invoke_func: invoke a trusted function
- * @cancel_req: request cancel of an ongoing invoke or open
- * @supp_recv: called for supplicant to get a command
- * @supp_send: called for supplicant to send a response
- * @shm_register: register shared memory buffer in TEE
- * @shm_unregister: unregister shared memory buffer in TEE
- */
-struct tee_driver_ops {
- void (*get_version)(struct tee_device *teedev,
- struct tee_ioctl_version_data *vers);
- int (*open)(struct tee_context *ctx);
- void (*release)(struct tee_context *ctx);
- int (*open_session)(struct tee_context *ctx,
- struct tee_ioctl_open_session_arg *arg,
- struct tee_param *param);
- int (*close_session)(struct tee_context *ctx, u32 session);
- int (*system_session)(struct tee_context *ctx, u32 session);
- int (*invoke_func)(struct tee_context *ctx,
- struct tee_ioctl_invoke_arg *arg,
- struct tee_param *param);
- int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session);
- int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params,
- struct tee_param *param);
- int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params,
- struct tee_param *param);
- int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm,
- struct page **pages, size_t num_pages,
- unsigned long start);
- int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm);
-};
-
-/**
- * struct tee_desc - Describes the TEE driver to the subsystem
- * @name: name of driver
- * @ops: driver operations vtable
- * @owner: module providing the driver
- * @flags: Extra properties of driver, defined by TEE_DESC_* below
- */
-#define TEE_DESC_PRIVILEGED 0x1
-struct tee_desc {
- const char *name;
- const struct tee_driver_ops *ops;
- struct module *owner;
- u32 flags;
-};
-
-/**
- * tee_device_alloc() - Allocate a new struct tee_device instance
- * @teedesc: Descriptor for this driver
- * @dev: Parent device for this device
- * @pool: Shared memory pool, NULL if not used
- * @driver_data: Private driver data for this device
- *
- * Allocates a new struct tee_device instance. The device is
- * removed by tee_device_unregister().
- *
- * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
- */
-struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
- struct device *dev,
- struct tee_shm_pool *pool,
- void *driver_data);
-
-/**
- * tee_device_register() - Registers a TEE device
- * @teedev: Device to register
- *
- * tee_device_unregister() need to be called to remove the @teedev if
- * this function fails.
- *
- * @returns < 0 on failure
- */
-int tee_device_register(struct tee_device *teedev);
-
-/**
- * tee_device_unregister() - Removes a TEE device
- * @teedev: Device to unregister
- *
- * This function should be called to remove the @teedev even if
- * tee_device_register() hasn't been called yet. Does nothing if
- * @teedev is NULL.
- */
-void tee_device_unregister(struct tee_device *teedev);
-
-/**
- * tee_session_calc_client_uuid() - Calculates client UUID for session
- * @uuid: Resulting UUID
- * @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*)
- * @connectuon_data: Connection data for opening session
- *
- * Based on connection method calculates UUIDv5 based client UUID.
- *
- * For group based logins verifies that calling process has specified
- * credentials.
- *
- * @return < 0 on failure
- */
-int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
- const u8 connection_data[TEE_IOCTL_UUID_LEN]);
-
/**
* struct tee_shm - shared memory object
* @ctx: context using the object
@@ -195,15 +55,12 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
* @pages: locked pages from userspace
* @num_pages: number of locked pages
* @refcount: reference counter
- * @flags: defined by TEE_SHM_* in tee_drv.h
+ * @flags: defined by TEE_SHM_* in tee_core.h
* @id: unique id of a shared memory object on this device, shared
* with user space
* @sec_world_id:
* secure world assigned id of this shared memory object, not
* used by all drivers
- *
- * This pool is only supposed to be accessed directly from the TEE
- * subsystem and from drivers that implements their own shm pool manager.
*/
struct tee_shm {
struct tee_context *ctx;
@@ -219,88 +76,53 @@ struct tee_shm {
u64 sec_world_id;
};
-/**
- * struct tee_shm_pool - shared memory pool
- * @ops: operations
- * @private_data: private data for the shared memory manager
- */
-struct tee_shm_pool {
- const struct tee_shm_pool_ops *ops;
- void *private_data;
+struct tee_param_memref {
+ size_t shm_offs;
+ size_t size;
+ struct tee_shm *shm;
};
-/**
- * struct tee_shm_pool_ops - shared memory pool operations
- * @alloc: called when allocating shared memory
- * @free: called when freeing shared memory
- * @destroy_pool: called when destroying the pool
- */
-struct tee_shm_pool_ops {
- int (*alloc)(struct tee_shm_pool *pool, struct tee_shm *shm,
- size_t size, size_t align);
- void (*free)(struct tee_shm_pool *pool, struct tee_shm *shm);
- void (*destroy_pool)(struct tee_shm_pool *pool);
+struct tee_param_value {
+ u64 a;
+ u64 b;
+ u64 c;
};
-/*
- * tee_shm_pool_alloc_res_mem() - Create a shm manager for reserved memory
- * @vaddr: Virtual address of start of pool
- * @paddr: Physical address of start of pool
- * @size: Size in bytes of the pool
- *
- * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
- */
-struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr,
- phys_addr_t paddr, size_t size,
- int min_alloc_order);
+struct tee_param {
+ u64 attr;
+ union {
+ struct tee_param_memref memref;
+ struct tee_param_value value;
+ } u;
+};
/**
- * tee_shm_pool_free() - Free a shared memory pool
- * @pool: The shared memory pool to free
- *
- * The must be no remaining shared memory allocated from this pool when
- * this function is called.
+ * tee_shm_alloc_kernel_buf() - Allocate kernel shared memory for a
+ * particular TEE client driver
+ * @ctx: The TEE context for shared memory allocation
+ * @size: Shared memory allocation size
+ * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
*/
-static inline void tee_shm_pool_free(struct tee_shm_pool *pool)
-{
- pool->ops->destroy_pool(pool);
-}
+struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size);
/**
- * tee_get_drvdata() - Return driver_data pointer
- * @returns the driver_data pointer supplied to tee_register().
+ * tee_shm_register_kernel_buf() - Register kernel shared memory for a
+ * particular TEE client driver
+ * @ctx: The TEE context for shared memory registration
+ * @addr: Kernel buffer address
+ * @length: Kernel buffer length
+ * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
*/
-void *tee_get_drvdata(struct tee_device *teedev);
-
-struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size);
-struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size);
-
struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
void *addr, size_t length);
/**
- * tee_shm_is_dynamic() - Check if shared memory object is of the dynamic kind
- * @shm: Shared memory handle
- * @returns true if object is dynamic shared memory
- */
-static inline bool tee_shm_is_dynamic(struct tee_shm *shm)
-{
- return shm && (shm->flags & TEE_SHM_DYNAMIC);
-}
-
-/**
* tee_shm_free() - Free shared memory
* @shm: Handle to shared memory to free
*/
void tee_shm_free(struct tee_shm *shm);
/**
- * tee_shm_put() - Decrease reference count on a shared memory handle
- * @shm: Shared memory handle
- */
-void tee_shm_put(struct tee_shm *shm);
-
-/**
* tee_shm_get_va() - Get virtual address of a shared memory plus an offset
* @shm: Shared memory handle
* @offs: Offset from start of this shared memory
@@ -353,25 +175,6 @@ static inline size_t tee_shm_get_page_offset(struct tee_shm *shm)
}
/**
- * tee_shm_get_id() - Get id of a shared memory object
- * @shm: Shared memory handle
- * @returns id
- */
-static inline int tee_shm_get_id(struct tee_shm *shm)
-{
- return shm->id;
-}
-
-/**
- * tee_shm_get_from_id() - Find shared memory object and increase reference
- * count
- * @ctx: Context owning the shared memory
- * @id: Id of shared memory object
- * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
- */
-struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
-
-/**
* tee_client_open_context() - Open a TEE context
* @start: if not NULL, continue search after this context
* @match: function to check TEE device
@@ -470,18 +273,6 @@ int tee_client_invoke_func(struct tee_context *ctx,
int tee_client_cancel_req(struct tee_context *ctx,
struct tee_ioctl_cancel_arg *arg);
-static inline bool tee_param_is_memref(struct tee_param *param)
-{
- switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
- return true;
- default:
- return false;
- }
-}
-
extern const struct bus_type tee_bus_type;
/**
@@ -509,18 +300,4 @@ struct tee_client_driver {
#define to_tee_client_driver(d) \
container_of(d, struct tee_client_driver, driver)
-/**
- * teedev_open() - Open a struct tee_device
- * @teedev: Device to open
- *
- * @return a pointer to struct tee_context on success or an ERR_PTR on failure.
- */
-struct tee_context *teedev_open(struct tee_device *teedev);
-
-/**
- * teedev_close_context() - closes a struct tee_context
- * @ctx: The struct tee_context to close
- */
-void teedev_close_context(struct tee_context *ctx);
-
#endif /*__TEE_DRV_H*/
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h
index c6540ceea143..0982d1d52b24 100644
--- a/include/linux/timecounter.h
+++ b/include/linux/timecounter.h
@@ -22,7 +22,7 @@
*
* @read: returns the current cycle value
* @mask: bitmask for two's complement
- * subtraction of non 64 bit counters,
+ * subtraction of non-64-bit counters,
* see CYCLECOUNTER_MASK() helper macro
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
@@ -35,7 +35,7 @@ struct cyclecounter {
};
/**
- * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
+ * struct timecounter - layer above a &struct cyclecounter which counts nanoseconds
* Contains the state needed by timecounter_read() to detect
* cycle counter wrap around. Initialize with
* timecounter_init(). Also used to convert cycle counts into the
@@ -66,6 +66,8 @@ struct timecounter {
* @cycles: Cycles
* @mask: bit mask for maintaining the 'frac' field
* @frac: pointer to storage for the fractional nanoseconds.
+ *
+ * Returns: cycle counter cycles converted to nanoseconds
*/
static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
u64 cycles, u64 mask, u64 *frac)
@@ -79,6 +81,7 @@ static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
/**
* timecounter_adjtime - Shifts the time of the clock.
+ * @tc: The &struct timecounter to adjust
* @delta: Desired change in nanoseconds.
*/
static inline void timecounter_adjtime(struct timecounter *tc, s64 delta)
@@ -107,6 +110,8 @@ extern void timecounter_init(struct timecounter *tc,
*
* In other words, keeps track of time since the same epoch as
* the function which generated the initial time stamp.
+ *
+ * Returns: nanoseconds since the initial time stamp
*/
extern u64 timecounter_read(struct timecounter *tc);
@@ -123,6 +128,8 @@ extern u64 timecounter_read(struct timecounter *tc);
*
* This allows conversion of cycle counter values which were generated
* in the past.
+ *
+ * Returns: cycle counter converted to nanoseconds since the initial time stamp
*/
extern u64 timecounter_cyc2time(const struct timecounter *tc,
u64 cycle_tstamp);
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 7e50cbd97f86..0ea7823b7f31 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -22,14 +22,14 @@ extern int do_sys_settimeofday64(const struct timespec64 *tv,
const struct timezone *tz);
/*
- * ktime_get() family: read the current time in a multitude of ways,
+ * ktime_get() family - read the current time in a multitude of ways.
*
* The default time reference is CLOCK_MONOTONIC, starting at
* boot time but not counting the time spent in suspend.
* For other references, use the functions with "real", "clocktai",
* "boottime" and "raw" suffixes.
*
- * To get the time in a different format, use the ones wit
+ * To get the time in a different format, use the ones with
* "ns", "ts64" and "seconds" suffix.
*
* See Documentation/core-api/timekeeping.rst for more details.
@@ -74,6 +74,8 @@ extern u32 ktime_get_resolution_ns(void);
/**
* ktime_get_real - get the real (wall-) time in ktime_t format
+ *
+ * Returns: real (wall) time in ktime_t format
*/
static inline ktime_t ktime_get_real(void)
{
@@ -86,10 +88,12 @@ static inline ktime_t ktime_get_coarse_real(void)
}
/**
- * ktime_get_boottime - Returns monotonic time since boot in ktime_t format
+ * ktime_get_boottime - Get monotonic time since boot in ktime_t format
*
* This is similar to CLOCK_MONTONIC/ktime_get, but also includes the
* time spent in suspend.
+ *
+ * Returns: monotonic time since boot in ktime_t format
*/
static inline ktime_t ktime_get_boottime(void)
{
@@ -102,7 +106,9 @@ static inline ktime_t ktime_get_coarse_boottime(void)
}
/**
- * ktime_get_clocktai - Returns the TAI time of day in ktime_t format
+ * ktime_get_clocktai - Get the TAI time of day in ktime_t format
+ *
+ * Returns: the TAI time of day in ktime_t format
*/
static inline ktime_t ktime_get_clocktai(void)
{
@@ -144,32 +150,60 @@ static inline u64 ktime_get_coarse_clocktai_ns(void)
/**
* ktime_mono_to_real - Convert monotonic time to clock realtime
+ * @mono: monotonic time to convert
+ *
+ * Returns: time converted to realtime clock
*/
static inline ktime_t ktime_mono_to_real(ktime_t mono)
{
return ktime_mono_to_any(mono, TK_OFFS_REAL);
}
+/**
+ * ktime_get_ns - Get the current time in nanoseconds
+ *
+ * Returns: current time converted to nanoseconds
+ */
static inline u64 ktime_get_ns(void)
{
return ktime_to_ns(ktime_get());
}
+/**
+ * ktime_get_real_ns - Get the current real/wall time in nanoseconds
+ *
+ * Returns: current real time converted to nanoseconds
+ */
static inline u64 ktime_get_real_ns(void)
{
return ktime_to_ns(ktime_get_real());
}
+/**
+ * ktime_get_boottime_ns - Get the monotonic time since boot in nanoseconds
+ *
+ * Returns: current boottime converted to nanoseconds
+ */
static inline u64 ktime_get_boottime_ns(void)
{
return ktime_to_ns(ktime_get_boottime());
}
+/**
+ * ktime_get_clocktai_ns - Get the current TAI time of day in nanoseconds
+ *
+ * Returns: current TAI time converted to nanoseconds
+ */
static inline u64 ktime_get_clocktai_ns(void)
{
return ktime_to_ns(ktime_get_clocktai());
}
+/**
+ * ktime_get_raw_ns - Get the raw monotonic time in nanoseconds
+ *
+ * Returns: current raw monotonic time converted to nanoseconds
+ */
static inline u64 ktime_get_raw_ns(void)
{
return ktime_to_ns(ktime_get_raw());
@@ -224,8 +258,8 @@ extern bool timekeeping_rtc_skipresume(void);
extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
-/*
- * struct ktime_timestanps - Simultaneous mono/boot/real timestamps
+/**
+ * struct ktime_timestamps - Simultaneous mono/boot/real timestamps
* @mono: Monotonic timestamp
* @boot: Boottime timestamp
* @real: Realtime timestamp
@@ -242,7 +276,8 @@ struct ktime_timestamps {
* @cycles: Clocksource counter value to produce the system times
* @real: Realtime system time
* @raw: Monotonic raw system time
- * @clock_was_set_seq: The sequence number of clock was set events
+ * @cs_id: Clocksource ID
+ * @clock_was_set_seq: The sequence number of clock-was-set events
* @cs_was_changed_seq: The sequence number of clocksource change events
*/
struct system_time_snapshot {
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 14a633ba61d6..e67ecd1cbc97 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -22,7 +22,7 @@
#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn)
#endif
-/**
+/*
* @TIMER_DEFERRABLE: A deferrable timer will work normally when the
* system is busy, but will not cause a CPU to come out of idle just
* to service it; instead, the timer will be serviced when the CPU
@@ -140,7 +140,7 @@ static inline void destroy_timer_on_stack(struct timer_list *timer) { }
* or not. Callers must ensure serialization wrt. other operations done
* to this timer, eg. interrupt contexts, or other CPUs on SMP.
*
- * return value: 1 if the timer is pending, 0 if not.
+ * Returns: 1 if the timer is pending, 0 if not.
*/
static inline int timer_pending(const struct timer_list * timer)
{
@@ -175,6 +175,10 @@ extern int timer_shutdown(struct timer_list *timer);
* See timer_delete_sync() for detailed explanation.
*
* Do not use in new code. Use timer_delete_sync() instead.
+ *
+ * Returns:
+ * * %0 - The timer was not pending
+ * * %1 - The timer was pending and deactivated
*/
static inline int del_timer_sync(struct timer_list *timer)
{
@@ -188,6 +192,10 @@ static inline int del_timer_sync(struct timer_list *timer)
* See timer_delete() for detailed explanation.
*
* Do not use in new code. Use timer_delete() instead.
+ *
+ * Returns:
+ * * %0 - The timer was not pending
+ * * %1 - The timer was pending and deactivated
*/
static inline int del_timer(struct timer_list *timer)
{
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 4ee9d13749ad..c17e4efbb2e5 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -23,6 +23,7 @@
#include <linux/fs.h>
#include <linux/highmem.h>
#include <crypto/hash_info.h>
+#include <crypto/aes.h>
#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */
#define TPM_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
@@ -30,17 +31,28 @@
struct tpm_chip;
struct trusted_key_payload;
struct trusted_key_options;
+/* opaque structure, holds auth session parameters like the session key */
+struct tpm2_auth;
+
+enum tpm2_session_types {
+ TPM2_SE_HMAC = 0x00,
+ TPM2_SE_POLICY = 0x01,
+ TPM2_SE_TRIAL = 0x02,
+};
/* if you add a new hash to this, increment TPM_MAX_HASHES below */
enum tpm_algorithms {
TPM_ALG_ERROR = 0x0000,
TPM_ALG_SHA1 = 0x0004,
+ TPM_ALG_AES = 0x0006,
TPM_ALG_KEYEDHASH = 0x0008,
TPM_ALG_SHA256 = 0x000B,
TPM_ALG_SHA384 = 0x000C,
TPM_ALG_SHA512 = 0x000D,
TPM_ALG_NULL = 0x0010,
TPM_ALG_SM3_256 = 0x0012,
+ TPM_ALG_ECC = 0x0023,
+ TPM_ALG_CFB = 0x0043,
};
/*
@@ -49,6 +61,11 @@ enum tpm_algorithms {
*/
#define TPM_MAX_HASHES 5
+enum tpm2_curves {
+ TPM2_ECC_NONE = 0x0000,
+ TPM2_ECC_NIST_P256 = 0x0003,
+};
+
struct tpm_digest {
u16 alg_id;
u8 digest[TPM_MAX_DIGEST_SIZE];
@@ -116,6 +133,20 @@ struct tpm_chip_seqops {
const struct seq_operations *seqops;
};
+/* fixed define for the curve we use which is NIST_P256 */
+#define EC_PT_SZ 32
+
+/*
+ * fixed define for the size of a name. This is actually HASHALG size
+ * plus 2, so 32 for SHA256
+ */
+#define TPM2_NAME_SIZE 34
+
+/*
+ * The maximum size for an object context
+ */
+#define TPM2_MAX_CONTEXT_SIZE 4096
+
struct tpm_chip {
struct device dev;
struct device devs;
@@ -170,6 +201,18 @@ struct tpm_chip {
/* active locality */
int locality;
+
+#ifdef CONFIG_TCG_TPM2_HMAC
+ /* details for communication security via sessions */
+
+ /* saved context for NULL seed */
+ u8 null_key_context[TPM2_MAX_CONTEXT_SIZE];
+ /* name of NULL seed */
+ u8 null_key_name[TPM2_NAME_SIZE];
+ u8 null_ec_key_x[EC_PT_SZ];
+ u8 null_ec_key_y[EC_PT_SZ];
+ struct tpm2_auth *auth;
+#endif
};
#define TPM_HEADER_SIZE 10
@@ -194,6 +237,7 @@ enum tpm2_timeouts {
enum tpm2_structures {
TPM2_ST_NO_SESSIONS = 0x8001,
TPM2_ST_SESSIONS = 0x8002,
+ TPM2_ST_CREATION = 0x8021,
};
/* Indicates from what layer of the software stack the error comes from */
@@ -204,6 +248,7 @@ enum tpm2_return_codes {
TPM2_RC_SUCCESS = 0x0000,
TPM2_RC_HASH = 0x0083, /* RC_FMT1 */
TPM2_RC_HANDLE = 0x008B,
+ TPM2_RC_INTEGRITY = 0x009F,
TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */
TPM2_RC_FAILURE = 0x0101,
TPM2_RC_DISABLED = 0x0120,
@@ -231,6 +276,8 @@ enum tpm2_command_codes {
TPM2_CC_CONTEXT_LOAD = 0x0161,
TPM2_CC_CONTEXT_SAVE = 0x0162,
TPM2_CC_FLUSH_CONTEXT = 0x0165,
+ TPM2_CC_READ_PUBLIC = 0x0173,
+ TPM2_CC_START_AUTH_SESS = 0x0176,
TPM2_CC_VERIFY_SIGNATURE = 0x0177,
TPM2_CC_GET_CAPABILITY = 0x017A,
TPM2_CC_GET_RANDOM = 0x017B,
@@ -243,9 +290,25 @@ enum tpm2_command_codes {
};
enum tpm2_permanent_handles {
+ TPM2_RH_NULL = 0x40000007,
TPM2_RS_PW = 0x40000009,
};
+/* Most Significant Octet for key types */
+enum tpm2_mso_type {
+ TPM2_MSO_NVRAM = 0x01,
+ TPM2_MSO_SESSION = 0x02,
+ TPM2_MSO_POLICY = 0x03,
+ TPM2_MSO_PERMANENT = 0x40,
+ TPM2_MSO_VOLATILE = 0x80,
+ TPM2_MSO_PERSISTENT = 0x81,
+};
+
+static inline enum tpm2_mso_type tpm2_handle_mso(u32 handle)
+{
+ return handle >> 24;
+}
+
enum tpm2_capabilities {
TPM2_CAP_HANDLES = 1,
TPM2_CAP_COMMANDS = 2,
@@ -284,6 +347,7 @@ enum tpm_chip_flags {
TPM_CHIP_FLAG_FIRMWARE_UPGRADE = BIT(7),
TPM_CHIP_FLAG_SUSPENDED = BIT(8),
TPM_CHIP_FLAG_HWRNG_DISABLED = BIT(9),
+ TPM_CHIP_FLAG_DISABLE = BIT(10),
};
#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
@@ -297,28 +361,61 @@ struct tpm_header {
};
} __packed;
-/* A string buffer type for constructing TPM commands. This is based on the
- * ideas of string buffer code in security/keys/trusted.h but is heap based
- * in order to keep the stack usage minimal.
- */
-
enum tpm_buf_flags {
+ /* the capacity exceeded: */
TPM_BUF_OVERFLOW = BIT(0),
+ /* TPM2B format: */
+ TPM_BUF_TPM2B = BIT(1),
+ /* read out of boundary: */
+ TPM_BUF_BOUNDARY_ERROR = BIT(2),
};
+/*
+ * A string buffer type for constructing TPM commands.
+ */
struct tpm_buf {
- unsigned int flags;
+ u32 flags;
+ u32 length;
u8 *data;
+ u8 handles;
};
enum tpm2_object_attributes {
TPM2_OA_FIXED_TPM = BIT(1),
+ TPM2_OA_ST_CLEAR = BIT(2),
TPM2_OA_FIXED_PARENT = BIT(4),
+ TPM2_OA_SENSITIVE_DATA_ORIGIN = BIT(5),
TPM2_OA_USER_WITH_AUTH = BIT(6),
+ TPM2_OA_ADMIN_WITH_POLICY = BIT(7),
+ TPM2_OA_NO_DA = BIT(10),
+ TPM2_OA_ENCRYPTED_DUPLICATION = BIT(11),
+ TPM2_OA_RESTRICTED = BIT(16),
+ TPM2_OA_DECRYPT = BIT(17),
+ TPM2_OA_SIGN = BIT(18),
};
+/*
+ * definitions for the canonical template. These are mandated
+ * by the TCG key template documents
+ */
+
+#define AES_KEY_BYTES AES_KEYSIZE_128
+#define AES_KEY_BITS (AES_KEY_BYTES*8)
+#define TPM2_OA_TMPL (TPM2_OA_NO_DA | \
+ TPM2_OA_FIXED_TPM | \
+ TPM2_OA_FIXED_PARENT | \
+ TPM2_OA_SENSITIVE_DATA_ORIGIN | \
+ TPM2_OA_USER_WITH_AUTH | \
+ TPM2_OA_DECRYPT | \
+ TPM2_OA_RESTRICTED)
+
enum tpm2_session_attributes {
TPM2_SA_CONTINUE_SESSION = BIT(0),
+ TPM2_SA_AUDIT_EXCLUSIVE = BIT(1),
+ TPM2_SA_AUDIT_RESET = BIT(3),
+ TPM2_SA_DECRYPT = BIT(5),
+ TPM2_SA_ENCRYPT = BIT(6),
+ TPM2_SA_AUDIT = BIT(7),
};
struct tpm2_hash {
@@ -326,84 +423,21 @@ struct tpm2_hash {
unsigned int tpm_id;
};
-static inline void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- head->tag = cpu_to_be16(tag);
- head->length = cpu_to_be32(sizeof(*head));
- head->ordinal = cpu_to_be32(ordinal);
-}
-
-static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
-{
- buf->data = (u8 *)__get_free_page(GFP_KERNEL);
- if (!buf->data)
- return -ENOMEM;
-
- buf->flags = 0;
- tpm_buf_reset(buf, tag, ordinal);
- return 0;
-}
-
-static inline void tpm_buf_destroy(struct tpm_buf *buf)
-{
- free_page((unsigned long)buf->data);
-}
-
-static inline u32 tpm_buf_length(struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- return be32_to_cpu(head->length);
-}
-
-static inline u16 tpm_buf_tag(struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- return be16_to_cpu(head->tag);
-}
-
-static inline void tpm_buf_append(struct tpm_buf *buf,
- const unsigned char *new_data,
- unsigned int new_len)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
- u32 len = tpm_buf_length(buf);
-
- /* Return silently if overflow has already happened. */
- if (buf->flags & TPM_BUF_OVERFLOW)
- return;
-
- if ((len + new_len) > PAGE_SIZE) {
- WARN(1, "tpm_buf: overflow\n");
- buf->flags |= TPM_BUF_OVERFLOW;
- return;
- }
-
- memcpy(&buf->data[len], new_data, new_len);
- head->length = cpu_to_be32(len + new_len);
-}
-
-static inline void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value)
-{
- tpm_buf_append(buf, &value, 1);
-}
-
-static inline void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value)
-{
- __be16 value2 = cpu_to_be16(value);
-
- tpm_buf_append(buf, (u8 *) &value2, 2);
-}
-
-static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
-{
- __be32 value2 = cpu_to_be32(value);
-
- tpm_buf_append(buf, (u8 *) &value2, 4);
-}
+int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal);
+void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal);
+int tpm_buf_init_sized(struct tpm_buf *buf);
+void tpm_buf_reset_sized(struct tpm_buf *buf);
+void tpm_buf_destroy(struct tpm_buf *buf);
+u32 tpm_buf_length(struct tpm_buf *buf);
+void tpm_buf_append(struct tpm_buf *buf, const u8 *new_data, u16 new_length);
+void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value);
+void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value);
+void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value);
+u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset);
+u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset);
+u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset);
+
+u8 *tpm_buf_parameters(struct tpm_buf *buf);
/*
* Check if TPM device is in the firmware upgrade mode.
@@ -415,7 +449,7 @@ static inline bool tpm_is_firmware_upgrade(struct tpm_chip *chip)
static inline u32 tpm2_rc_value(u32 rc)
{
- return (rc & BIT(7)) ? rc & 0xff : rc;
+ return (rc & BIT(7)) ? rc & 0xbf : rc;
}
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
@@ -429,10 +463,19 @@ extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digest);
extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digests);
-extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen);
extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max);
extern struct tpm_chip *tpm_default_chip(void);
void tpm2_flush_context(struct tpm_chip *chip, u32 handle);
+
+static inline void tpm_buf_append_empty_auth(struct tpm_buf *buf, u32 handle)
+{
+ /* simple authorization for empty auth */
+ tpm_buf_append_u32(buf, 9); /* total length of auth */
+ tpm_buf_append_u32(buf, handle);
+ tpm_buf_append_u16(buf, 0); /* nonce len */
+ tpm_buf_append_u8(buf, 0); /* attributes */
+ tpm_buf_append_u16(buf, 0); /* hmac len */
+}
#else
static inline int tpm_is_tpm2(struct tpm_chip *chip)
{
@@ -450,10 +493,6 @@ static inline int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
return -ENODEV;
}
-static inline int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
-{
- return -ENODEV;
-}
static inline int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max)
{
return -ENODEV;
@@ -463,5 +502,102 @@ static inline struct tpm_chip *tpm_default_chip(void)
{
return NULL;
}
+
+static inline void tpm_buf_append_empty_auth(struct tpm_buf *buf, u32 handle)
+{
+}
#endif
+#ifdef CONFIG_TCG_TPM2_HMAC
+
+int tpm2_start_auth_session(struct tpm_chip *chip);
+void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
+ u32 handle, u8 *name);
+void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
+ u8 attributes, u8 *passphrase,
+ int passphraselen);
+static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip,
+ struct tpm_buf *buf,
+ u8 attributes,
+ u8 *passphrase,
+ int passphraselen)
+{
+ tpm_buf_append_hmac_session(chip, buf, attributes, passphrase,
+ passphraselen);
+}
+void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf);
+int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
+ int rc);
+void tpm2_end_auth_session(struct tpm_chip *chip);
+#else
+#include <asm/unaligned.h>
+
+static inline int tpm2_start_auth_session(struct tpm_chip *chip)
+{
+ return 0;
+}
+static inline void tpm2_end_auth_session(struct tpm_chip *chip)
+{
+}
+static inline void tpm_buf_append_name(struct tpm_chip *chip,
+ struct tpm_buf *buf,
+ u32 handle, u8 *name)
+{
+ tpm_buf_append_u32(buf, handle);
+ /* count the number of handles in the upper bits of flags */
+ buf->handles++;
+}
+static inline void tpm_buf_append_hmac_session(struct tpm_chip *chip,
+ struct tpm_buf *buf,
+ u8 attributes, u8 *passphrase,
+ int passphraselen)
+{
+ /* offset tells us where the sessions area begins */
+ int offset = buf->handles * 4 + TPM_HEADER_SIZE;
+ u32 len = 9 + passphraselen;
+
+ if (tpm_buf_length(buf) != offset) {
+ /* not the first session so update the existing length */
+ len += get_unaligned_be32(&buf->data[offset]);
+ put_unaligned_be32(len, &buf->data[offset]);
+ } else {
+ tpm_buf_append_u32(buf, len);
+ }
+ /* auth handle */
+ tpm_buf_append_u32(buf, TPM2_RS_PW);
+ /* nonce */
+ tpm_buf_append_u16(buf, 0);
+ /* attributes */
+ tpm_buf_append_u8(buf, 0);
+ /* passphrase */
+ tpm_buf_append_u16(buf, passphraselen);
+ tpm_buf_append(buf, passphrase, passphraselen);
+}
+static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip,
+ struct tpm_buf *buf,
+ u8 attributes,
+ u8 *passphrase,
+ int passphraselen)
+{
+ int offset = buf->handles * 4 + TPM_HEADER_SIZE;
+ struct tpm_header *head = (struct tpm_header *) buf->data;
+
+ /*
+ * if the only sessions are optional, the command tag
+ * must change to TPM2_ST_NO_SESSIONS
+ */
+ if (tpm_buf_length(buf) == offset)
+ head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
+}
+static inline void tpm_buf_fill_hmac_session(struct tpm_chip *chip,
+ struct tpm_buf *buf)
+{
+}
+static inline int tpm_buf_check_hmac_response(struct tpm_chip *chip,
+ struct tpm_buf *buf,
+ int rc)
+{
+ return rc;
+}
+#endif /* CONFIG_TCG_TPM2_HMAC */
+
#endif
diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h
index 7a5fe17b6bf9..d03f74658716 100644
--- a/include/linux/tracefs.h
+++ b/include/linux/tracefs.h
@@ -62,6 +62,8 @@ struct eventfs_file;
typedef int (*eventfs_callback)(const char *name, umode_t *mode, void **data,
const struct file_operations **fops);
+typedef void (*eventfs_release)(const char *name, void *data);
+
/**
* struct eventfs_entry - dynamically created eventfs file call back handler
* @name: Then name of the dynamic file in an eventfs directory
@@ -72,6 +74,7 @@ typedef int (*eventfs_callback)(const char *name, umode_t *mode, void **data,
struct eventfs_entry {
const char *name;
eventfs_callback callback;
+ eventfs_release release;
};
struct eventfs_inode;
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 2b2e6f0a54d6..2372f9357240 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -145,15 +145,12 @@ struct tty_operations;
* @count: count of open processes, reaching zero cancels all the work for
* this tty and drops a @kref too (but does not free this tty)
* @winsize: size of the terminal "window" (cf. @winsize_mutex)
- * @flow: flow settings grouped together, see also @flow.unused
+ * @flow: flow settings grouped together
* @flow.lock: lock for @flow members
* @flow.stopped: tty stopped/started by stop_tty()/start_tty()
* @flow.tco_stopped: tty stopped/started by %TCOOFF/%TCOON ioctls (it has
* precedence over @flow.stopped)
- * @flow.unused: alignment for Alpha, so that no members other than @flow.* are
- * modified by the same 64b word store. The @flow's __aligned is
- * there for the very same reason.
- * @ctrl: control settings grouped together, see also @ctrl.unused
+ * @ctrl: control settings grouped together
* @ctrl.lock: lock for @ctrl members
* @ctrl.pgrp: process group of this tty (setpgrp(2))
* @ctrl.session: session of this tty (setsid(2)). Writes are protected by both
@@ -161,7 +158,6 @@ struct tty_operations;
* them.
* @ctrl.pktstatus: packet mode status (bitwise OR of %TIOCPKT_ constants)
* @ctrl.packet: packet mode enabled
- * @ctrl.unused: alignment for Alpha, see @flow.unused for explanation
* @hw_stopped: not controlled by the tty layer, under @driver's control for CTS
* handling
* @receive_room: bytes permitted to feed to @ldisc without any being lost
@@ -216,8 +212,7 @@ struct tty_struct {
spinlock_t lock;
bool stopped;
bool tco_stopped;
- unsigned long unused[0];
- } __aligned(sizeof(unsigned long)) flow;
+ } flow;
struct {
struct pid *pgrp;
@@ -225,8 +220,7 @@ struct tty_struct {
spinlock_t lock;
unsigned char pktstatus;
bool packet;
- unsigned long unused[0];
- } __aligned(sizeof(unsigned long)) ctrl;
+ } ctrl;
bool hw_stopped;
bool closing;
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index ffe48e69b3f3..457879938fc1 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -135,10 +135,11 @@ static inline void u64_stats_inc(u64_stats_t *p)
p->v++;
}
-static inline void u64_stats_init(struct u64_stats_sync *syncp)
-{
- seqcount_init(&syncp->seq);
-}
+#define u64_stats_init(syncp) \
+ do { \
+ struct u64_stats_sync *__s = (syncp); \
+ seqcount_init(&__s->seq); \
+ } while (0)
static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp)
{
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 3748e82b627b..e398e1dbd2d3 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -108,7 +108,7 @@ struct udp_sock {
#define udp_assign_bit(nr, sk, val) \
assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
-#define UDP_MAX_SEGMENTS (1 << 6UL)
+#define UDP_MAX_SEGMENTS (1 << 7UL)
#define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
@@ -150,6 +150,24 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
}
}
+DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
+#if IS_ENABLED(CONFIG_IPV6)
+DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
+#endif
+
+static inline bool udp_encap_needed(void)
+{
+ if (static_branch_unlikely(&udp_encap_needed_key))
+ return true;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (static_branch_unlikely(&udpv6_encap_needed_key))
+ return true;
+#endif
+
+ return false;
+}
+
static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
{
if (!skb_is_gso(skb))
@@ -163,6 +181,16 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
!udp_test_bit(ACCEPT_FRAGLIST, sk))
return true;
+ /* GSO packets lacking the SKB_GSO_UDP_TUNNEL/_CSUM bits might still
+ * land in a tunnel as the socket check in udp_gro_receive cannot be
+ * foolproof.
+ */
+ if (udp_encap_needed() &&
+ READ_ONCE(udp_sk(sk)->encap_rcv) &&
+ !(skb_shinfo(skb)->gso_type &
+ (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)))
+ return true;
+
return false;
}
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 00cebe2b70de..7020adedfa08 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -206,6 +206,16 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
}
static __always_inline __must_check
+bool copy_to_iter_full(const void *addr, size_t bytes, struct iov_iter *i)
+{
+ size_t copied = copy_to_iter(addr, bytes, i);
+ if (likely(copied == bytes))
+ return true;
+ iov_iter_revert(i, copied);
+ return false;
+}
+
+static __always_inline __must_check
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
size_t copied = copy_from_iter(addr, bytes, i);
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index b0201747a263..26c4325aa373 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -170,7 +170,7 @@ size_t virtio_max_dma_size(const struct virtio_device *vdev);
/**
* struct virtio_driver - operations for a virtio I/O driver
- * @driver: underlying device driver (populate name and owner).
+ * @driver: underlying device driver (populate name).
* @id_table: the ids serviced by this driver.
* @feature_table: an array of feature numbers supported by this driver.
* @feature_table_size: number of entries in the feature table array.
@@ -208,7 +208,10 @@ static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv)
return container_of(drv, struct virtio_driver, driver);
}
-int register_virtio_driver(struct virtio_driver *drv);
+/* use a macro to avoid include chaining to get THIS_MODULE */
+#define register_virtio_driver(drv) \
+ __register_virtio_driver(drv, THIS_MODULE)
+int __register_virtio_driver(struct virtio_driver *drv, struct module *owner);
void unregister_virtio_driver(struct virtio_driver *drv);
/* module_virtio_driver() - Helper macro for drivers that don't do
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 78ebcf782ce5..4f785098c67a 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -207,6 +207,8 @@ int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err
int p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
int *err);
int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err);
+struct netfs_io_subrequest;
+void p9_client_write_subreq(struct netfs_io_subrequest *subreq);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset);
int p9dirent_read(struct p9_client *clnt, char *buf, int len,
struct p9_dirent *dirent);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 9d06eb945509..62a407db1bf5 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -438,6 +438,10 @@ static inline void in6_ifa_hold(struct inet6_ifaddr *ifp)
refcount_inc(&ifp->refcnt);
}
+static inline bool in6_ifa_hold_safe(struct inet6_ifaddr *ifp)
+{
+ return refcount_inc_not_zero(&ifp->refcnt);
+}
/*
* compute link-local solicited-node multicast address
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 627ea8e2d915..3dee0b2721aa 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -85,6 +85,9 @@ enum unix_socket_lock_class {
U_LOCK_NORMAL,
U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */
U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
+ U_LOCK_GC_LISTENER, /* used for listening socket while determining gc
+ * candidates to close a small race window.
+ */
};
static inline void unix_state_lock_nested(struct sock *sk,
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 9fe95a22abeb..eaec5d6caa29 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -585,6 +585,15 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
return skb;
}
+static inline int bt_copy_from_sockptr(void *dst, size_t dst_size,
+ sockptr_t src, size_t src_size)
+{
+ if (dst_size > src_size)
+ return -EINVAL;
+
+ return copy_from_sockptr(dst, src, dst_size);
+}
+
int bt_to_errno(u16 code);
__u8 bt_status(int err);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 8701ca5f31ee..5c12761cbc0e 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -176,6 +176,15 @@ enum {
*/
HCI_QUIRK_USE_BDADDR_PROPERTY,
+ /* When this quirk is set, the Bluetooth Device Address provided by
+ * the 'local-bd-address' fwnode property is incorrectly specified in
+ * big-endian order.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BDADDR_PROPERTY_BROKEN,
+
/* When this quirk is set, the duplicate filtering during
* scanning is based on Bluetooth devices addresses. To allow
* RSSI based updates, restart scanning if needed.
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 56fb42df44a3..e8f581f3f3ce 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -738,6 +738,8 @@ struct hci_conn {
__u8 le_per_adv_data[HCI_MAX_PER_AD_TOT_LEN];
__u16 le_per_adv_data_len;
__u16 le_per_adv_data_offset;
+ __u8 le_adv_phy;
+ __u8 le_adv_sec_phy;
__u8 le_tx_phy;
__u8 le_rx_phy;
__s8 rssi;
@@ -1512,7 +1514,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
enum conn_reasons conn_reason);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, bool dst_resolved, u8 sec_level,
- u16 conn_timeout, u8 role);
+ u16 conn_timeout, u8 role, u8 phy, u8 sec_phy);
void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type,
@@ -1905,6 +1907,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define privacy_mode_capable(dev) (use_ll_privacy(dev) && \
(hdev->commands[39] & 0x04))
+#define read_key_size_capable(dev) \
+ ((dev)->commands[20] & 0x10 && \
+ !test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks))
+
/* Use enhanced synchronous connection if command is supported and its quirk
* has not been set.
*/
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 2e2be4fd2bb6..1e09329acc42 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4991,6 +4991,7 @@ struct cfg80211_ops {
* set this flag to update channels on beacon hints.
* @WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY: support connection to non-primary link
* of an NSTR mobile AP MLD.
+ * @WIPHY_FLAG_DISABLE_WEXT: disable wireless extensions for this device
*/
enum wiphy_flags {
WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0),
@@ -5002,6 +5003,7 @@ enum wiphy_flags {
WIPHY_FLAG_4ADDR_STATION = BIT(6),
WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7),
WIPHY_FLAG_IBSS_RSN = BIT(8),
+ WIPHY_FLAG_DISABLE_WEXT = BIT(9),
WIPHY_FLAG_MESH_AUTH = BIT(10),
WIPHY_FLAG_SUPPORTS_EXT_KCK_32 = BIT(11),
WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY = BIT(12),
diff --git a/include/net/gro.h b/include/net/gro.h
index 50f1e403dbbb..c1d4ca0463a1 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -87,6 +87,15 @@ struct napi_gro_cb {
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
+
+ /* L3 offsets */
+ union {
+ struct {
+ u16 network_offset;
+ u16 inner_network_offset;
+ };
+ u16 network_offsets[2];
+ };
};
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 9ab4bf704e86..ccf171f7eb60 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -175,6 +175,7 @@ void inet_csk_init_xmit_timers(struct sock *sk,
void (*delack_handler)(struct timer_list *),
void (*keepalive_handler)(struct timer_list *));
void inet_csk_clear_xmit_timers(struct sock *sk);
+void inet_csk_clear_xmit_timers_sync(struct sock *sk);
static inline void inet_csk_schedule_ack(struct sock *sk)
{
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 5cd64bb2104d..c286cc2e766e 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -361,6 +361,39 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
return pskb_network_may_pull(skb, nhlen);
}
+/* Variant of pskb_inet_may_pull().
+ */
+static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
+{
+ int nhlen = 0, maclen = ETH_HLEN;
+ __be16 type = skb->protocol;
+
+ /* Essentially this is skb_protocol(skb, true)
+ * And we get MAC len.
+ */
+ if (eth_type_vlan(type))
+ type = __vlan_get_protocol(skb, type, &maclen);
+
+ switch (type) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ nhlen = sizeof(struct ipv6hdr);
+ break;
+#endif
+ case htons(ETH_P_IP):
+ nhlen = sizeof(struct iphdr);
+ break;
+ }
+ /* For ETH_P_IPV6/ETH_P_IP we make sure to pull
+ * a base network header in skb->head.
+ */
+ if (!pskb_may_pull(skb, maclen + nhlen))
+ return false;
+
+ skb_set_network_header(skb, maclen);
+ return true;
+}
+
static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
{
const struct ip_tunnel_encap_ops *ops;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 353488ab94a2..2d7f87bc5324 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -953,6 +953,8 @@ enum mac80211_tx_info_flags {
* of their QoS TID or other priority field values.
* @IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX: first MLO TX, used mostly internally
* for sequence number assignment
+ * @IEEE80211_TX_CTRL_SCAN_TX: Indicates that this frame is transmitted
+ * due to scanning, not in normal operation on the interface.
* @IEEE80211_TX_CTRL_MLO_LINK: If not @IEEE80211_LINK_UNSPECIFIED, this
* frame should be transmitted on the specific link. This really is
* only relevant for frames that do not have data present, and is
@@ -973,6 +975,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_NO_SEQNO = BIT(7),
IEEE80211_TX_CTRL_DONT_REORDER = BIT(8),
IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX = BIT(9),
+ IEEE80211_TX_CTRL_SCAN_TX = BIT(10),
IEEE80211_TX_CTRL_MLO_LINK = 0xf0000000,
};
diff --git a/include/net/macsec.h b/include/net/macsec.h
index dbd22180cc5c..de216cbc6b05 100644
--- a/include/net/macsec.h
+++ b/include/net/macsec.h
@@ -321,6 +321,7 @@ struct macsec_context {
* for the TX tag
* @needed_tailroom: number of bytes reserved at the end of the sk_buff for the
* TX tag
+ * @rx_uses_md_dst: whether MACsec device offload supports sk_buff md_dst
*/
struct macsec_ops {
/* Device wide */
@@ -352,6 +353,7 @@ struct macsec_ops {
struct sk_buff *skb);
unsigned int needed_headroom;
unsigned int needed_tailroom;
+ bool rx_uses_md_dst;
};
void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 76147feb0d10..4eeedf14711b 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -39,7 +39,6 @@ enum TRI_STATE {
#define COMP_ENTRY_SIZE 64
#define RX_BUFFERS_PER_QUEUE 512
-#define MANA_RX_DATA_ALIGN 64
#define MAX_SEND_BUFFERS_PER_QUEUE 256
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index a763dd327c6e..9abb7ee40d72 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -336,7 +336,7 @@ int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
int nf_flow_table_offload_init(void);
void nf_flow_table_offload_exit(void);
-static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
+static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb)
{
__be16 proto;
@@ -352,6 +352,16 @@ static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
return 0;
}
+static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto)
+{
+ if (!pskb_may_pull(skb, PPPOE_SES_HLEN))
+ return false;
+
+ *inner_proto = __nf_flow_pppoe_proto(skb);
+
+ return true;
+}
+
#define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count)
#define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count)
#define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count) \
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index e27c28b612e4..3f1ed467f951 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -307,9 +307,23 @@ static inline void *nft_elem_priv_cast(const struct nft_elem_priv *priv)
return (void *)priv;
}
+
+/**
+ * enum nft_iter_type - nftables set iterator type
+ *
+ * @NFT_ITER_READ: read-only iteration over set elements
+ * @NFT_ITER_UPDATE: iteration under mutex to update set element state
+ */
+enum nft_iter_type {
+ NFT_ITER_UNSPEC,
+ NFT_ITER_READ,
+ NFT_ITER_UPDATE,
+};
+
struct nft_set;
struct nft_set_iter {
u8 genmask;
+ enum nft_iter_type type:8;
unsigned int count;
unsigned int skip;
int err;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index cefe0c4bdae3..41ca14e81d55 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -117,6 +117,7 @@ struct Qdisc {
struct qdisc_skb_head q;
struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
+ int owner;
unsigned long state;
unsigned long state2; /* must be written under qdisc spinlock */
struct Qdisc *next_sched;
diff --git a/include/net/sock.h b/include/net/sock.h
index b5e00702acc1..b4b553df7870 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1410,32 +1410,34 @@ sk_memory_allocated(const struct sock *sk)
#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
extern int sysctl_mem_pcpu_rsv;
+static inline void proto_memory_pcpu_drain(struct proto *proto)
+{
+ int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0);
+
+ if (val)
+ atomic_long_add(val, proto->memory_allocated);
+}
+
static inline void
-sk_memory_allocated_add(struct sock *sk, int amt)
+sk_memory_allocated_add(const struct sock *sk, int val)
{
- int local_reserve;
+ struct proto *proto = sk->sk_prot;
- preempt_disable();
- local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
- if (local_reserve >= READ_ONCE(sysctl_mem_pcpu_rsv)) {
- __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
- atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
- }
- preempt_enable();
+ val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val);
+
+ if (unlikely(val >= READ_ONCE(sysctl_mem_pcpu_rsv)))
+ proto_memory_pcpu_drain(proto);
}
static inline void
-sk_memory_allocated_sub(struct sock *sk, int amt)
+sk_memory_allocated_sub(const struct sock *sk, int val)
{
- int local_reserve;
+ struct proto *proto = sk->sk_prot;
- preempt_disable();
- local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
- if (local_reserve <= -READ_ONCE(sysctl_mem_pcpu_rsv)) {
- __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
- atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
- }
- preempt_enable();
+ val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val);
+
+ if (unlikely(val <= -READ_ONCE(sysctl_mem_pcpu_rsv)))
+ proto_memory_pcpu_drain(proto);
}
#define SK_ALLOC_PERCPU_COUNTER_BATCH 16
@@ -1759,6 +1761,13 @@ static inline void sock_owned_by_me(const struct sock *sk)
#endif
}
+static inline void sock_not_owned_by_me(const struct sock *sk)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(lockdep_sock_is_held(sk) && debug_locks);
+#endif
+}
+
static inline bool sock_owned_by_user(const struct sock *sk)
{
sock_owned_by_me(sk);
diff --git a/include/net/tls.h b/include/net/tls.h
index 340ad43971e4..33f657d3c051 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -111,7 +111,8 @@ struct tls_strparser {
u32 stopped : 1;
u32 copy_mode : 1;
u32 mixed_decrypted : 1;
- u32 msg_ready : 1;
+
+ bool msg_ready;
struct strp_msg stm;
diff --git a/include/net/udp.h b/include/net/udp.h
index 488a6d2babcc..c4e05b14b648 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -379,14 +379,7 @@ static inline bool udp_skb_is_linear(struct sk_buff *skb)
static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
struct iov_iter *to)
{
- int n;
-
- n = copy_to_iter(skb->data + off, len, to);
- if (n == len)
- return 0;
-
- iov_iter_revert(to, n);
- return -EFAULT;
+ return copy_to_iter_full(skb->data + off, len, to) ? 0 : -EFAULT;
}
/*
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 3cb4dc9bd70e..3d54de168a6d 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -188,6 +188,8 @@ static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl,
{
if (!compl)
return;
+ if (!compl->tx_timestamp)
+ return;
*compl->tx_timestamp = ops->tmo_fill_timestamp(priv);
}
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 57c743b7e4fe..cb4841a9fffd 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1049,6 +1049,9 @@ struct xfrm_offload {
#define CRYPTO_INVALID_PACKET_SYNTAX 64
#define CRYPTO_INVALID_PROTOCOL 128
+ /* Used to keep whole l2 header for transport mode GRO */
+ __u32 orig_mac_len;
+
__u8 proto;
__u8 inner_ipproto;
};
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index 4ce1988b2ba0..f40915d2ecee 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -12,6 +12,7 @@ struct request;
struct scsi_driver {
struct device_driver gendrv;
+ int (*resume)(struct device *);
void (*rescan)(struct device *);
blk_status_t (*init_command)(struct scsi_cmnd *);
void (*uninit_command)(struct scsi_cmnd *);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index b259d42a1e1a..129001f600fc 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -767,6 +767,7 @@ scsi_template_proc_dir(const struct scsi_host_template *sht);
#define scsi_template_proc_dir(sht) NULL
#endif
extern void scsi_scan_host(struct Scsi_Host *);
+extern int scsi_resume_device(struct scsi_device *sdev);
extern int scsi_rescan_device(struct scsi_device *sdev);
extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
diff --git a/include/soc/fsl/dcp.h b/include/soc/fsl/dcp.h
new file mode 100644
index 000000000000..3ec335d8ca8b
--- /dev/null
+++ b/include/soc/fsl/dcp.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 sigma star gmbh
+ *
+ * Specifies paes key slot handles for NXP's DCP (Data Co-Processor) to be used
+ * with the crypto_skcipher_setkey().
+ */
+
+#ifndef MXS_DCP_H
+#define MXS_DCP_H
+
+#define DCP_PAES_KEYSIZE 1
+#define DCP_PAES_KEY_SLOT0 0x00
+#define DCP_PAES_KEY_SLOT1 0x01
+#define DCP_PAES_KEY_SLOT2 0x02
+#define DCP_PAES_KEY_SLOT3 0x03
+#define DCP_PAES_KEY_UNIQUE 0xfe
+#define DCP_PAES_KEY_OTP 0xff
+
+#endif /* MXS_DCP_H */
diff --git a/include/soc/qcom/cmd-db.h b/include/soc/qcom/cmd-db.h
index c8bb56e6852a..47a6cab75e63 100644
--- a/include/soc/qcom/cmd-db.h
+++ b/include/soc/qcom/cmd-db.h
@@ -1,5 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
#ifndef __QCOM_COMMAND_DB_H__
#define __QCOM_COMMAND_DB_H__
@@ -21,6 +24,8 @@ u32 cmd_db_read_addr(const char *resource_id);
const void *cmd_db_read_aux_data(const char *resource_id, size_t *len);
+bool cmd_db_match_resource_addr(u32 addr1, u32 addr2);
+
enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id);
int cmd_db_ready(void);
@@ -31,6 +36,9 @@ static inline u32 cmd_db_read_addr(const char *resource_id)
static inline const void *cmd_db_read_aux_data(const char *resource_id, size_t *len)
{ return ERR_PTR(-ENODEV); }
+static inline bool cmd_db_match_resource_addr(u32 addr1, u32 addr2)
+{ return false; }
+
static inline enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id)
{ return -ENODEV; }
diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
index e0629699b563..1a3c6f66f620 100644
--- a/include/sound/cs35l56.h
+++ b/include/sound/cs35l56.h
@@ -267,6 +267,7 @@ struct cs35l56_base {
bool fw_patched;
bool secured;
bool can_hibernate;
+ bool fw_owns_asp1;
bool cal_data_valid;
s8 cal_index;
struct cirrus_amp_cal_data cal_data;
@@ -283,6 +284,7 @@ extern const char * const cs35l56_tx_input_texts[CS35L56_NUM_INPUT_SRC];
extern const unsigned int cs35l56_tx_input_values[CS35L56_NUM_INPUT_SRC];
int cs35l56_set_patch(struct cs35l56_base *cs35l56_base);
+int cs35l56_init_asp1_regs_for_driver_control(struct cs35l56_base *cs35l56_base);
int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base);
int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command);
int cs35l56_firmware_shutdown(struct cs35l56_base *cs35l56_base);
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 1af9e6819392..234b5baea69c 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -1684,8 +1684,8 @@ struct snd_emu1010 {
unsigned int clock_fallback;
unsigned int optical_in; /* 0:SPDIF, 1:ADAT */
unsigned int optical_out; /* 0:SPDIF, 1:ADAT */
- struct work_struct firmware_work;
- struct work_struct clock_work;
+ struct work_struct work;
+ struct mutex lock;
};
struct snd_emu10k1 {
@@ -1834,6 +1834,9 @@ unsigned int snd_emu10k1_ptr20_read(struct snd_emu10k1 * emu, unsigned int reg,
void snd_emu10k1_ptr20_write(struct snd_emu10k1 *emu, unsigned int reg, unsigned int chn, unsigned int data);
int snd_emu10k1_spi_write(struct snd_emu10k1 * emu, unsigned int data);
int snd_emu10k1_i2c_write(struct snd_emu10k1 *emu, u32 reg, u32 value);
+static inline void snd_emu1010_fpga_lock(struct snd_emu10k1 *emu) { mutex_lock(&emu->emu1010.lock); };
+static inline void snd_emu1010_fpga_unlock(struct snd_emu10k1 *emu) { mutex_unlock(&emu->emu1010.lock); };
+void snd_emu1010_fpga_write_lock(struct snd_emu10k1 *emu, u32 reg, u32 value);
void snd_emu1010_fpga_write(struct snd_emu10k1 *emu, u32 reg, u32 value);
void snd_emu1010_fpga_read(struct snd_emu10k1 *emu, u32 reg, u32 *value);
void snd_emu1010_fpga_link_dst_src_write(struct snd_emu10k1 *emu, u32 dst, u32 src);
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index a8bebac1e4b2..957295364a5e 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -56,6 +56,9 @@ struct hdac_ext_stream {
u32 pphcldpl;
u32 pphcldpu;
+ u32 pplcllpl;
+ u32 pplcllpu;
+
bool decoupled:1;
bool link_locked:1;
bool link_prepared;
diff --git a/include/sound/intel-nhlt.h b/include/sound/intel-nhlt.h
index 53470d6a28d6..24dbe16684ae 100644
--- a/include/sound/intel-nhlt.h
+++ b/include/sound/intel-nhlt.h
@@ -143,6 +143,9 @@ intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
u32 bus_id, u8 link_type, u8 vbps, u8 bps,
u8 num_ch, u32 rate, u8 dir, u8 dev_type);
+int intel_nhlt_ssp_device_type(struct device *dev, struct nhlt_acpi_table *nhlt,
+ u8 virtual_bus_id);
+
#else
static inline struct nhlt_acpi_table *intel_nhlt_init(struct device *dev)
@@ -184,6 +187,13 @@ intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
return NULL;
}
+static inline int intel_nhlt_ssp_device_type(struct device *dev,
+ struct nhlt_acpi_table *nhlt,
+ u8 virtual_bus_id)
+{
+ return -EINVAL;
+}
+
#endif
#endif
diff --git a/include/sound/tas2781-tlv.h b/include/sound/tas2781-tlv.h
index 4038dd421150..1dc59005d241 100644
--- a/include/sound/tas2781-tlv.h
+++ b/include/sound/tas2781-tlv.h
@@ -15,7 +15,7 @@
#ifndef __TAS2781_TLV_H__
#define __TAS2781_TLV_H__
-static const DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0);
+static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0);
static const DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0);
#endif
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index d801409b33cf..d55e53ac91bd 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -135,6 +135,7 @@ IF_HAVE_PG_ARCH_X(arch_3)
#define DEF_PAGETYPE_NAME(_name) { PG_##_name, __stringify(_name) }
#define __def_pagetype_names \
+ DEF_PAGETYPE_NAME(hugetlb), \
DEF_PAGETYPE_NAME(offline), \
DEF_PAGETYPE_NAME(guard), \
DEF_PAGETYPE_NAME(table), \
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index 447a8c21cf57..da23484268df 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -24,8 +24,8 @@
E_(netfs_read_trace_write_begin, "WRITEBEGN")
#define netfs_write_traces \
+ EM(netfs_write_trace_copy_to_cache, "COPY2CACH") \
EM(netfs_write_trace_dio_write, "DIO-WRITE") \
- EM(netfs_write_trace_launder, "LAUNDER ") \
EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \
EM(netfs_write_trace_writeback, "WRITEBACK") \
E_(netfs_write_trace_writethrough, "WRITETHRU")
@@ -34,9 +34,9 @@
EM(NETFS_READAHEAD, "RA") \
EM(NETFS_READPAGE, "RP") \
EM(NETFS_READ_FOR_WRITE, "RW") \
+ EM(NETFS_COPY_TO_CACHE, "CC") \
EM(NETFS_WRITEBACK, "WB") \
EM(NETFS_WRITETHROUGH, "WT") \
- EM(NETFS_LAUNDER_WRITE, "LW") \
EM(NETFS_UNBUFFERED_WRITE, "UW") \
EM(NETFS_DIO_READ, "DR") \
E_(NETFS_DIO_WRITE, "DW")
@@ -44,14 +44,18 @@
#define netfs_rreq_traces \
EM(netfs_rreq_trace_assess, "ASSESS ") \
EM(netfs_rreq_trace_copy, "COPY ") \
+ EM(netfs_rreq_trace_collect, "COLLECT") \
EM(netfs_rreq_trace_done, "DONE ") \
EM(netfs_rreq_trace_free, "FREE ") \
EM(netfs_rreq_trace_redirty, "REDIRTY") \
EM(netfs_rreq_trace_resubmit, "RESUBMT") \
+ EM(netfs_rreq_trace_set_pause, "PAUSE ") \
EM(netfs_rreq_trace_unlock, "UNLOCK ") \
EM(netfs_rreq_trace_unmark, "UNMARK ") \
EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \
+ EM(netfs_rreq_trace_wait_pause, "WT-PAUS") \
EM(netfs_rreq_trace_wake_ip, "WAKE-IP") \
+ EM(netfs_rreq_trace_unpause, "UNPAUSE") \
E_(netfs_rreq_trace_write_done, "WR-DONE")
#define netfs_sreq_sources \
@@ -64,11 +68,15 @@
E_(NETFS_INVALID_WRITE, "INVL")
#define netfs_sreq_traces \
+ EM(netfs_sreq_trace_discard, "DSCRD") \
EM(netfs_sreq_trace_download_instead, "RDOWN") \
+ EM(netfs_sreq_trace_fail, "FAIL ") \
EM(netfs_sreq_trace_free, "FREE ") \
EM(netfs_sreq_trace_limited, "LIMIT") \
EM(netfs_sreq_trace_prepare, "PREP ") \
+ EM(netfs_sreq_trace_prep_failed, "PRPFL") \
EM(netfs_sreq_trace_resubmit_short, "SHORT") \
+ EM(netfs_sreq_trace_retry, "RETRY") \
EM(netfs_sreq_trace_submit, "SUBMT") \
EM(netfs_sreq_trace_terminated, "TERM ") \
EM(netfs_sreq_trace_write, "WRITE") \
@@ -88,6 +96,7 @@
#define netfs_rreq_ref_traces \
EM(netfs_rreq_trace_get_for_outstanding,"GET OUTSTND") \
EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \
+ EM(netfs_rreq_trace_get_work, "GET WORK ") \
EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \
EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \
EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \
@@ -95,19 +104,25 @@
EM(netfs_rreq_trace_put_return, "PUT RETURN ") \
EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \
EM(netfs_rreq_trace_put_work, "PUT WORK ") \
+ EM(netfs_rreq_trace_put_work_complete, "PUT WORK CP") \
+ EM(netfs_rreq_trace_put_work_nq, "PUT WORK NQ") \
EM(netfs_rreq_trace_see_work, "SEE WORK ") \
E_(netfs_rreq_trace_new, "NEW ")
#define netfs_sreq_ref_traces \
EM(netfs_sreq_trace_get_copy_to_cache, "GET COPY2C ") \
EM(netfs_sreq_trace_get_resubmit, "GET RESUBMIT") \
+ EM(netfs_sreq_trace_get_submit, "GET SUBMIT") \
EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \
EM(netfs_sreq_trace_new, "NEW ") \
+ EM(netfs_sreq_trace_put_cancel, "PUT CANCEL ") \
EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \
EM(netfs_sreq_trace_put_discard, "PUT DISCARD") \
+ EM(netfs_sreq_trace_put_done, "PUT DONE ") \
EM(netfs_sreq_trace_put_failed, "PUT FAILED ") \
EM(netfs_sreq_trace_put_merged, "PUT MERGED ") \
EM(netfs_sreq_trace_put_no_copy, "PUT NO COPY") \
+ EM(netfs_sreq_trace_put_oom, "PUT OOM ") \
EM(netfs_sreq_trace_put_wip, "PUT WIP ") \
EM(netfs_sreq_trace_put_work, "PUT WORK ") \
E_(netfs_sreq_trace_put_terminated, "PUT TERM ")
@@ -124,24 +139,33 @@
EM(netfs_streaming_filled_page, "mod-streamw-f") \
EM(netfs_streaming_cont_filled_page, "mod-streamw-f+") \
/* The rest are for writeback */ \
+ EM(netfs_folio_trace_cancel_copy, "cancel-copy") \
EM(netfs_folio_trace_clear, "clear") \
- EM(netfs_folio_trace_clear_s, "clear-s") \
+ EM(netfs_folio_trace_clear_cc, "clear-cc") \
EM(netfs_folio_trace_clear_g, "clear-g") \
- EM(netfs_folio_trace_copy_to_cache, "copy") \
- EM(netfs_folio_trace_end_copy, "end-copy") \
+ EM(netfs_folio_trace_clear_s, "clear-s") \
+ EM(netfs_folio_trace_copy_to_cache, "mark-copy") \
EM(netfs_folio_trace_filled_gaps, "filled-gaps") \
EM(netfs_folio_trace_kill, "kill") \
- EM(netfs_folio_trace_launder, "launder") \
+ EM(netfs_folio_trace_kill_cc, "kill-cc") \
+ EM(netfs_folio_trace_kill_g, "kill-g") \
+ EM(netfs_folio_trace_kill_s, "kill-s") \
EM(netfs_folio_trace_mkwrite, "mkwrite") \
EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \
+ EM(netfs_folio_trace_not_under_wback, "!wback") \
EM(netfs_folio_trace_read_gaps, "read-gaps") \
- EM(netfs_folio_trace_redirty, "redirty") \
EM(netfs_folio_trace_redirtied, "redirtied") \
EM(netfs_folio_trace_store, "store") \
+ EM(netfs_folio_trace_store_copy, "store-copy") \
EM(netfs_folio_trace_store_plus, "store+") \
EM(netfs_folio_trace_wthru, "wthru") \
E_(netfs_folio_trace_wthru_plus, "wthru+")
+#define netfs_collect_contig_traces \
+ EM(netfs_contig_trace_collect, "Collect") \
+ EM(netfs_contig_trace_jump, "-->JUMP-->") \
+ E_(netfs_contig_trace_unlock, "Unlock")
+
#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
@@ -158,6 +182,7 @@ enum netfs_failure { netfs_failures } __mode(byte);
enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte);
enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
enum netfs_folio_trace { netfs_folio_traces } __mode(byte);
+enum netfs_collect_contig_trace { netfs_collect_contig_traces } __mode(byte);
#endif
@@ -179,6 +204,7 @@ netfs_failures;
netfs_rreq_ref_traces;
netfs_sreq_ref_traces;
netfs_folio_traces;
+netfs_collect_contig_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -279,7 +305,7 @@ TRACE_EVENT(netfs_sreq,
__entry->start = sreq->start;
),
- TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx/%zx e=%d",
+ TP_printk("R=%08x[%x] %s %s f=%02x s=%llx %zx/%zx e=%d",
__entry->rreq, __entry->index,
__print_symbolic(__entry->source, netfs_sreq_sources),
__print_symbolic(__entry->what, netfs_sreq_traces),
@@ -319,7 +345,7 @@ TRACE_EVENT(netfs_failure,
__entry->start = sreq ? sreq->start : 0;
),
- TP_printk("R=%08x[%d] %s f=%02x s=%llx %zx/%zx %s e=%d",
+ TP_printk("R=%08x[%x] %s f=%02x s=%llx %zx/%zx %s e=%d",
__entry->rreq, __entry->index,
__print_symbolic(__entry->source, netfs_sreq_sources),
__entry->flags,
@@ -412,16 +438,18 @@ TRACE_EVENT(netfs_write_iter,
__field(unsigned long long, start )
__field(size_t, len )
__field(unsigned int, flags )
+ __field(unsigned int, ino )
),
TP_fast_assign(
__entry->start = iocb->ki_pos;
__entry->len = iov_iter_count(from);
+ __entry->ino = iocb->ki_filp->f_inode->i_ino;
__entry->flags = iocb->ki_flags;
),
- TP_printk("WRITE-ITER s=%llx l=%zx f=%x",
- __entry->start, __entry->len, __entry->flags)
+ TP_printk("WRITE-ITER i=%x s=%llx l=%zx f=%x",
+ __entry->ino, __entry->start, __entry->len, __entry->flags)
);
TRACE_EVENT(netfs_write,
@@ -433,9 +461,10 @@ TRACE_EVENT(netfs_write,
TP_STRUCT__entry(
__field(unsigned int, wreq )
__field(unsigned int, cookie )
+ __field(unsigned int, ino )
__field(enum netfs_write_trace, what )
__field(unsigned long long, start )
- __field(size_t, len )
+ __field(unsigned long long, len )
),
TP_fast_assign(
@@ -443,18 +472,213 @@ TRACE_EVENT(netfs_write,
struct fscache_cookie *__cookie = netfs_i_cookie(__ctx);
__entry->wreq = wreq->debug_id;
__entry->cookie = __cookie ? __cookie->debug_id : 0;
+ __entry->ino = wreq->inode->i_ino;
__entry->what = what;
__entry->start = wreq->start;
__entry->len = wreq->len;
),
- TP_printk("R=%08x %s c=%08x by=%llx-%llx",
+ TP_printk("R=%08x %s c=%08x i=%x by=%llx-%llx",
__entry->wreq,
__print_symbolic(__entry->what, netfs_write_traces),
__entry->cookie,
+ __entry->ino,
__entry->start, __entry->start + __entry->len - 1)
);
+TRACE_EVENT(netfs_collect,
+ TP_PROTO(const struct netfs_io_request *wreq),
+
+ TP_ARGS(wreq),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq )
+ __field(unsigned int, len )
+ __field(unsigned long long, transferred )
+ __field(unsigned long long, start )
+ ),
+
+ TP_fast_assign(
+ __entry->wreq = wreq->debug_id;
+ __entry->start = wreq->start;
+ __entry->len = wreq->len;
+ __entry->transferred = wreq->transferred;
+ ),
+
+ TP_printk("R=%08x s=%llx-%llx",
+ __entry->wreq,
+ __entry->start + __entry->transferred,
+ __entry->start + __entry->len)
+ );
+
+TRACE_EVENT(netfs_collect_contig,
+ TP_PROTO(const struct netfs_io_request *wreq, unsigned long long to,
+ enum netfs_collect_contig_trace type),
+
+ TP_ARGS(wreq, to, type),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq)
+ __field(enum netfs_collect_contig_trace, type)
+ __field(unsigned long long, contiguity)
+ __field(unsigned long long, to)
+ ),
+
+ TP_fast_assign(
+ __entry->wreq = wreq->debug_id;
+ __entry->type = type;
+ __entry->contiguity = wreq->contiguity;
+ __entry->to = to;
+ ),
+
+ TP_printk("R=%08x %llx -> %llx %s",
+ __entry->wreq,
+ __entry->contiguity,
+ __entry->to,
+ __print_symbolic(__entry->type, netfs_collect_contig_traces))
+ );
+
+TRACE_EVENT(netfs_collect_sreq,
+ TP_PROTO(const struct netfs_io_request *wreq,
+ const struct netfs_io_subrequest *subreq),
+
+ TP_ARGS(wreq, subreq),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq )
+ __field(unsigned int, subreq )
+ __field(unsigned int, stream )
+ __field(unsigned int, len )
+ __field(unsigned int, transferred )
+ __field(unsigned long long, start )
+ ),
+
+ TP_fast_assign(
+ __entry->wreq = wreq->debug_id;
+ __entry->subreq = subreq->debug_index;
+ __entry->stream = subreq->stream_nr;
+ __entry->start = subreq->start;
+ __entry->len = subreq->len;
+ __entry->transferred = subreq->transferred;
+ ),
+
+ TP_printk("R=%08x[%u:%02x] s=%llx t=%x/%x",
+ __entry->wreq, __entry->stream, __entry->subreq,
+ __entry->start, __entry->transferred, __entry->len)
+ );
+
+TRACE_EVENT(netfs_collect_folio,
+ TP_PROTO(const struct netfs_io_request *wreq,
+ const struct folio *folio,
+ unsigned long long fend,
+ unsigned long long collected_to),
+
+ TP_ARGS(wreq, folio, fend, collected_to),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq )
+ __field(unsigned long, index )
+ __field(unsigned long long, fend )
+ __field(unsigned long long, cleaned_to )
+ __field(unsigned long long, collected_to )
+ ),
+
+ TP_fast_assign(
+ __entry->wreq = wreq->debug_id;
+ __entry->index = folio->index;
+ __entry->fend = fend;
+ __entry->cleaned_to = wreq->cleaned_to;
+ __entry->collected_to = collected_to;
+ ),
+
+ TP_printk("R=%08x ix=%05lx r=%llx-%llx t=%llx/%llx",
+ __entry->wreq, __entry->index,
+ (unsigned long long)__entry->index * PAGE_SIZE, __entry->fend,
+ __entry->cleaned_to, __entry->collected_to)
+ );
+
+TRACE_EVENT(netfs_collect_state,
+ TP_PROTO(const struct netfs_io_request *wreq,
+ unsigned long long collected_to,
+ unsigned int notes),
+
+ TP_ARGS(wreq, collected_to, notes),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq )
+ __field(unsigned int, notes )
+ __field(unsigned long long, collected_to )
+ __field(unsigned long long, cleaned_to )
+ __field(unsigned long long, contiguity )
+ ),
+
+ TP_fast_assign(
+ __entry->wreq = wreq->debug_id;
+ __entry->notes = notes;
+ __entry->collected_to = collected_to;
+ __entry->cleaned_to = wreq->cleaned_to;
+ __entry->contiguity = wreq->contiguity;
+ ),
+
+ TP_printk("R=%08x cto=%llx fto=%llx ctg=%llx n=%x",
+ __entry->wreq, __entry->collected_to,
+ __entry->cleaned_to, __entry->contiguity,
+ __entry->notes)
+ );
+
+TRACE_EVENT(netfs_collect_gap,
+ TP_PROTO(const struct netfs_io_request *wreq,
+ const struct netfs_io_stream *stream,
+ unsigned long long jump_to, char type),
+
+ TP_ARGS(wreq, stream, jump_to, type),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq)
+ __field(unsigned char, stream)
+ __field(unsigned char, type)
+ __field(unsigned long long, from)
+ __field(unsigned long long, to)
+ ),
+
+ TP_fast_assign(
+ __entry->wreq = wreq->debug_id;
+ __entry->stream = stream->stream_nr;
+ __entry->from = stream->collected_to;
+ __entry->to = jump_to;
+ __entry->type = type;
+ ),
+
+ TP_printk("R=%08x[%x:] %llx->%llx %c",
+ __entry->wreq, __entry->stream,
+ __entry->from, __entry->to, __entry->type)
+ );
+
+TRACE_EVENT(netfs_collect_stream,
+ TP_PROTO(const struct netfs_io_request *wreq,
+ const struct netfs_io_stream *stream),
+
+ TP_ARGS(wreq, stream),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq)
+ __field(unsigned char, stream)
+ __field(unsigned long long, collected_to)
+ __field(unsigned long long, front)
+ ),
+
+ TP_fast_assign(
+ __entry->wreq = wreq->debug_id;
+ __entry->stream = stream->stream_nr;
+ __entry->collected_to = stream->collected_to;
+ __entry->front = stream->front ? stream->front->start : UINT_MAX;
+ ),
+
+ TP_printk("R=%08x[%x:] cto=%llx frn=%llx",
+ __entry->wreq, __entry->stream,
+ __entry->collected_to, __entry->front)
+ );
+
#undef EM
#undef E_
#endif /* _TRACE_NETFS_H */
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 2ef9c719772a..31b3e0d3e65f 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -708,6 +708,33 @@ TRACE_EVENT_RCU(rcu_invoke_kfree_bulk_callback,
);
/*
+ * Tracepoint for a normal synchronize_rcu() states. The first argument
+ * is the RCU flavor, the second argument is a pointer to rcu_head the
+ * last one is an event.
+ */
+TRACE_EVENT_RCU(rcu_sr_normal,
+
+ TP_PROTO(const char *rcuname, struct rcu_head *rhp, const char *srevent),
+
+ TP_ARGS(rcuname, rhp, srevent),
+
+ TP_STRUCT__entry(
+ __field(const char *, rcuname)
+ __field(void *, rhp)
+ __field(const char *, srevent)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->rhp = rhp;
+ __entry->srevent = srevent;
+ ),
+
+ TP_printk("%s rhp=0x%p event=%s",
+ __entry->rcuname, __entry->rhp, __entry->srevent)
+);
+
+/*
* Tracepoint for exiting rcu_do_batch after RCU callbacks have been
* invoked. The first argument is the name of the RCU flavor,
* the second argument is number of callbacks actually invoked,
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index ba2d96a1bc2f..f50fcafc69de 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -609,7 +609,7 @@ TRACE_EVENT(rpcgss_context,
__field(unsigned int, timeout)
__field(u32, window_size)
__field(int, len)
- __string(acceptor, data)
+ __string_len(acceptor, data, len)
),
TP_fast_assign(
@@ -618,7 +618,7 @@ TRACE_EVENT(rpcgss_context,
__entry->timeout = timeout;
__entry->window_size = window_size;
__entry->len = len;
- strncpy(__get_str(acceptor), data, len);
+ __assign_str(acceptor, data);
),
TP_printk("win_size=%u expiry=%lu now=%lu timeout=%u acceptor=%.*s",
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index d115d64c4011..68973f650c26 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -420,6 +420,41 @@ TRACE_EVENT(sched_process_exec,
__entry->pid, __entry->old_pid)
);
+/**
+ * sched_prepare_exec - called before setting up new exec
+ * @task: pointer to the current task
+ * @bprm: pointer to linux_binprm used for new exec
+ *
+ * Called before flushing the old exec, where @task is still unchanged, but at
+ * the point of no return during switching to the new exec. At the point it is
+ * called the exec will either succeed, or on failure terminate the task. Also
+ * see the "sched_process_exec" tracepoint, which is called right after @task
+ * has successfully switched to the new exec.
+ */
+TRACE_EVENT(sched_prepare_exec,
+
+ TP_PROTO(struct task_struct *task, struct linux_binprm *bprm),
+
+ TP_ARGS(task, bprm),
+
+ TP_STRUCT__entry(
+ __string( interp, bprm->interp )
+ __string( filename, bprm->filename )
+ __field( pid_t, pid )
+ __string( comm, task->comm )
+ ),
+
+ TP_fast_assign(
+ __assign_str(interp, bprm->interp);
+ __assign_str(filename, bprm->filename);
+ __entry->pid = task->pid;
+ __assign_str(comm, task->comm);
+ ),
+
+ TP_printk("interp=%s filename=%s pid=%d comm=%s",
+ __get_str(interp), __get_str(filename),
+ __entry->pid, __get_str(comm))
+);
#ifdef CONFIG_SCHEDSTATS
#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h
index 422c1ad9484d..127300481123 100644
--- a/include/trace/events/scmi.h
+++ b/include/trace/events/scmi.h
@@ -7,6 +7,8 @@
#include <linux/tracepoint.h>
+#define TRACE_SCMI_MAX_TAG_LEN 6
+
TRACE_EVENT(scmi_fc_call,
TP_PROTO(u8 protocol_id, u8 msg_id, u32 res_id, u32 val1, u32 val2),
TP_ARGS(protocol_id, msg_id, res_id, val1, val2),
@@ -150,7 +152,7 @@ TRACE_EVENT(scmi_msg_dump,
__field(u8, channel_id)
__field(u8, protocol_id)
__field(u8, msg_id)
- __array(char, tag, 5)
+ __array(char, tag, TRACE_SCMI_MAX_TAG_LEN)
__field(u16, seq)
__field(int, status)
__field(size_t, len)
@@ -162,7 +164,7 @@ TRACE_EVENT(scmi_msg_dump,
__entry->channel_id = channel_id;
__entry->protocol_id = protocol_id;
__entry->msg_id = msg_id;
- strscpy(__entry->tag, tag, 5);
+ strscpy(__entry->tag, tag, TRACE_SCMI_MAX_TAG_LEN);
__entry->seq = seq;
__entry->status = status;
__entry->len = len;
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index d87410a8443a..af024d90453d 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -77,11 +77,6 @@ struct drm_etnaviv_timespec {
#define ETNAVIV_PARAM_GPU_PRODUCT_ID 0x1c
#define ETNAVIV_PARAM_GPU_CUSTOMER_ID 0x1d
#define ETNAVIV_PARAM_GPU_ECO_ID 0x1e
-#define ETNAVIV_PARAM_GPU_NN_CORE_COUNT 0x1f
-#define ETNAVIV_PARAM_GPU_NN_MAD_PER_CORE 0x20
-#define ETNAVIV_PARAM_GPU_TP_CORE_COUNT 0x21
-#define ETNAVIV_PARAM_GPU_ON_CHIP_SRAM_SIZE 0x22
-#define ETNAVIV_PARAM_GPU_AXI_SRAM_SIZE 0x23
#define ETNA_MAX_PIPES 4
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 5730c67f0617..20a6c0fc149e 100644
--- a/include/uapi/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -32,7 +32,7 @@ enum {
CRYPTO_MSG_UPDATEALG,
CRYPTO_MSG_GETALG,
CRYPTO_MSG_DELRNG,
- CRYPTO_MSG_GETSTAT,
+ CRYPTO_MSG_GETSTAT, /* No longer supported, do not use. */
__CRYPTO_MSG_MAX
};
#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1)
@@ -54,16 +54,16 @@ enum crypto_attr_type_t {
CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */
CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */
- CRYPTOCFGA_STAT_LARVAL, /* struct crypto_stat */
- CRYPTOCFGA_STAT_HASH, /* struct crypto_stat */
- CRYPTOCFGA_STAT_BLKCIPHER, /* struct crypto_stat */
- CRYPTOCFGA_STAT_AEAD, /* struct crypto_stat */
- CRYPTOCFGA_STAT_COMPRESS, /* struct crypto_stat */
- CRYPTOCFGA_STAT_RNG, /* struct crypto_stat */
- CRYPTOCFGA_STAT_CIPHER, /* struct crypto_stat */
- CRYPTOCFGA_STAT_AKCIPHER, /* struct crypto_stat */
- CRYPTOCFGA_STAT_KPP, /* struct crypto_stat */
- CRYPTOCFGA_STAT_ACOMP, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_LARVAL, /* No longer supported, do not use. */
+ CRYPTOCFGA_STAT_HASH, /* No longer supported, do not use. */
+ CRYPTOCFGA_STAT_BLKCIPHER, /* No longer supported, do not use. */
+ CRYPTOCFGA_STAT_AEAD, /* No longer supported, do not use. */
+ CRYPTOCFGA_STAT_COMPRESS, /* No longer supported, do not use. */
+ CRYPTOCFGA_STAT_RNG, /* No longer supported, do not use. */
+ CRYPTOCFGA_STAT_CIPHER, /* No longer supported, do not use. */
+ CRYPTOCFGA_STAT_AKCIPHER, /* No longer supported, do not use. */
+ CRYPTOCFGA_STAT_KPP, /* No longer supported, do not use. */
+ CRYPTOCFGA_STAT_ACOMP, /* No longer supported, do not use. */
__CRYPTOCFGA_MAX
#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
@@ -79,6 +79,7 @@ struct crypto_user_alg {
__u32 cru_flags;
};
+/* No longer supported, do not use. */
struct crypto_stat_aead {
char type[CRYPTO_MAX_NAME];
__u64 stat_encrypt_cnt;
@@ -88,6 +89,7 @@ struct crypto_stat_aead {
__u64 stat_err_cnt;
};
+/* No longer supported, do not use. */
struct crypto_stat_akcipher {
char type[CRYPTO_MAX_NAME];
__u64 stat_encrypt_cnt;
@@ -99,6 +101,7 @@ struct crypto_stat_akcipher {
__u64 stat_err_cnt;
};
+/* No longer supported, do not use. */
struct crypto_stat_cipher {
char type[CRYPTO_MAX_NAME];
__u64 stat_encrypt_cnt;
@@ -108,6 +111,7 @@ struct crypto_stat_cipher {
__u64 stat_err_cnt;
};
+/* No longer supported, do not use. */
struct crypto_stat_compress {
char type[CRYPTO_MAX_NAME];
__u64 stat_compress_cnt;
@@ -117,6 +121,7 @@ struct crypto_stat_compress {
__u64 stat_err_cnt;
};
+/* No longer supported, do not use. */
struct crypto_stat_hash {
char type[CRYPTO_MAX_NAME];
__u64 stat_hash_cnt;
@@ -124,6 +129,7 @@ struct crypto_stat_hash {
__u64 stat_err_cnt;
};
+/* No longer supported, do not use. */
struct crypto_stat_kpp {
char type[CRYPTO_MAX_NAME];
__u64 stat_setsecret_cnt;
@@ -132,6 +138,7 @@ struct crypto_stat_kpp {
__u64 stat_err_cnt;
};
+/* No longer supported, do not use. */
struct crypto_stat_rng {
char type[CRYPTO_MAX_NAME];
__u64 stat_generate_cnt;
@@ -140,6 +147,7 @@ struct crypto_stat_rng {
__u64 stat_err_cnt;
};
+/* No longer supported, do not use. */
struct crypto_stat_larval {
char type[CRYPTO_MAX_NAME];
};
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index 282e90aeb163..c0bcc185fa48 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -9,6 +9,14 @@
#define F_GETLEASE (F_LINUX_SPECIFIC_BASE + 1)
/*
+ * Request nofications on a directory.
+ * See below for events that may be notified.
+ */
+#define F_NOTIFY (F_LINUX_SPECIFIC_BASE + 2)
+
+#define F_DUPFD_QUERY (F_LINUX_SPECIFIC_BASE + 3)
+
+/*
* Cancel a blocking posix lock; internal use only until we expose an
* asynchronous lock api to userspace:
*/
@@ -18,12 +26,6 @@
#define F_DUPFD_CLOEXEC (F_LINUX_SPECIFIC_BASE + 6)
/*
- * Request nofications on a directory.
- * See below for events that may be notified.
- */
-#define F_NOTIFY (F_LINUX_SPECIFIC_BASE+2)
-
-/*
* Set and get of pipe page size array
*/
#define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 7bd10201a02b..994bf7af0efe 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -72,6 +72,7 @@ struct io_uring_sqe {
__u32 waitid_flags;
__u32 futex_flags;
__u32 install_fd_flags;
+ __u32 nop_flags;
};
__u64 user_data; /* data to be passed back at completion time */
/* pack this to avoid bogus arm OABI complaints */
@@ -115,7 +116,7 @@ struct io_uring_sqe {
*/
#define IORING_FILE_INDEX_ALLOC (~0U)
-enum {
+enum io_uring_sqe_flags_bit {
IOSQE_FIXED_FILE_BIT,
IOSQE_IO_DRAIN_BIT,
IOSQE_IO_LINK_BIT,
@@ -351,11 +352,20 @@ enum io_uring_op {
* 0 is reported if zerocopy was actually possible.
* IORING_NOTIF_USAGE_ZC_COPIED if data was copied
* (at least partially).
+ *
+ * IORING_RECVSEND_BUNDLE Used with IOSQE_BUFFER_SELECT. If set, send or
+ * recv will grab as many buffers from the buffer
+ * group ID given and send them all. The completion
+ * result will be the number of buffers send, with
+ * the starting buffer ID in cqe->flags as per
+ * usual for provided buffer usage. The buffers
+ * will be contigious from the starting buffer ID.
*/
#define IORING_RECVSEND_POLL_FIRST (1U << 0)
#define IORING_RECV_MULTISHOT (1U << 1)
#define IORING_RECVSEND_FIXED_BUF (1U << 2)
#define IORING_SEND_ZC_REPORT_USAGE (1U << 3)
+#define IORING_RECVSEND_BUNDLE (1U << 4)
/*
* cqe.res for IORING_CQE_F_NOTIF if
@@ -370,11 +380,13 @@ enum io_uring_op {
* accept flags stored in sqe->ioprio
*/
#define IORING_ACCEPT_MULTISHOT (1U << 0)
+#define IORING_ACCEPT_DONTWAIT (1U << 1)
+#define IORING_ACCEPT_POLL_FIRST (1U << 2)
/*
* IORING_OP_MSG_RING command types, stored in sqe->addr
*/
-enum {
+enum io_uring_msg_ring_flags {
IORING_MSG_DATA, /* pass sqe->len as 'res' and off as user_data */
IORING_MSG_SEND_FD, /* send a registered fd to another ring */
};
@@ -397,6 +409,13 @@ enum {
#define IORING_FIXED_FD_NO_CLOEXEC (1U << 0)
/*
+ * IORING_OP_NOP flags (sqe->nop_flags)
+ *
+ * IORING_NOP_INJECT_RESULT Inject result from sqe->result
+ */
+#define IORING_NOP_INJECT_RESULT (1U << 0)
+
+/*
* IO completion data structure (Completion Queue Entry)
*/
struct io_uring_cqe {
@@ -425,9 +444,7 @@ struct io_uring_cqe {
#define IORING_CQE_F_SOCK_NONEMPTY (1U << 2)
#define IORING_CQE_F_NOTIF (1U << 3)
-enum {
- IORING_CQE_BUFFER_SHIFT = 16,
-};
+#define IORING_CQE_BUFFER_SHIFT 16
/*
* Magic offsets for the application to mmap the data it needs
@@ -522,11 +539,12 @@ struct io_uring_params {
#define IORING_FEAT_CQE_SKIP (1U << 11)
#define IORING_FEAT_LINKED_FILE (1U << 12)
#define IORING_FEAT_REG_REG_RING (1U << 13)
+#define IORING_FEAT_RECVSEND_BUNDLE (1U << 14)
/*
* io_uring_register(2) opcodes and arguments
*/
-enum {
+enum io_uring_register_op {
IORING_REGISTER_BUFFERS = 0,
IORING_UNREGISTER_BUFFERS = 1,
IORING_REGISTER_FILES = 2,
@@ -583,7 +601,7 @@ enum {
};
/* io-wq worker categories */
-enum {
+enum io_wq_type {
IO_WQ_BOUND,
IO_WQ_UNBOUND,
};
@@ -688,7 +706,7 @@ struct io_uring_buf_ring {
* IORING_OFF_PBUF_RING | (bgid << IORING_OFF_PBUF_SHIFT)
* to get a virtual mapping for the ring.
*/
-enum {
+enum io_uring_register_pbuf_ring_flags {
IOU_PBUF_RING_MMAP = 1,
};
@@ -719,7 +737,7 @@ struct io_uring_napi {
/*
* io_uring_restriction->opcode values
*/
-enum {
+enum io_uring_register_restriction_op {
/* Allow an io_uring_register(2) opcode */
IORING_RESTRICTION_REGISTER_OP = 0,
@@ -775,7 +793,7 @@ struct io_uring_recvmsg_out {
/*
* Argument for IORING_OP_URING_CMD when file is a socket
*/
-enum {
+enum io_uring_socket_op {
SOCKET_URING_OP_SIOCINQ = 0,
SOCKET_URING_OP_SIOCOUTQ,
SOCKET_URING_OP_GETSOCKOPT,
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 9ce46edc62a5..2040a470ddb4 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -913,14 +913,25 @@ enum kfd_dbg_trap_exception_code {
KFD_EC_MASK(EC_DEVICE_NEW))
#define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \
KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE))
+#define KFD_EC_MASK_PACKET (KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED))
/* Checks for exception code types for KFD search */
+#define KFD_DBG_EC_IS_VALID(ecode) (ecode > EC_NONE && ecode < EC_MAX)
#define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \
- (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
+ (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
#define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \
- (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
+ (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
#define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \
- (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
+ (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
+#define KFD_DBG_EC_TYPE_IS_PACKET(ecode) \
+ (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PACKET))
/* Runtime enable states */
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 2f2ee82d5517..67626d535316 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -126,8 +126,9 @@ struct statx {
__u64 stx_mnt_id;
__u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
__u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
+ __u64 stx_subvol; /* Subvolume identifier */
/* 0xa0 */
- __u64 __spare3[12]; /* Spare space for future expansion */
+ __u64 __spare3[11]; /* Spare space for future expansion */
/* 0x100 */
};
@@ -155,6 +156,7 @@ struct statx {
#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */
#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
+#define STATX_SUBVOL 0x00008000U /* Want/got stx_subvol */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index 2ec6f35cda32..58154117d9b0 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -55,4 +55,12 @@
#define __counted_by(m)
#endif
+#ifndef __counted_by_le
+#define __counted_by_le(m)
+#endif
+
+#ifndef __counted_by_be
+#define __counted_by_be(m)
+#endif
+
#endif /* _UAPI_LINUX_STDDEF_H */
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
index 23e57164693c..d0430bee8292 100644
--- a/include/uapi/linux/tee.h
+++ b/include/uapi/linux/tee.h
@@ -56,6 +56,7 @@
*/
#define TEE_IMPL_ID_OPTEE 1
#define TEE_IMPL_ID_AMDTEE 2
+#define TEE_IMPL_ID_TSTEE 3
/*
* OP-TEE specific capabilities
diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h
index 43c51698195c..842bf1201ac4 100644
--- a/include/uapi/linux/vdpa.h
+++ b/include/uapi/linux/vdpa.h
@@ -57,7 +57,7 @@ enum vdpa_attr {
VDPA_ATTR_DEV_FEATURES, /* u64 */
VDPA_ATTR_DEV_BLK_CFG_CAPACITY, /* u64 */
- VDPA_ATTR_DEV_BLK_CFG_SEG_SIZE, /* u32 */
+ VDPA_ATTR_DEV_BLK_CFG_SIZE_MAX, /* u32 */
VDPA_ATTR_DEV_BLK_CFG_BLK_SIZE, /* u32 */
VDPA_ATTR_DEV_BLK_CFG_SEG_MAX, /* u32 */
VDPA_ATTR_DEV_BLK_CFG_NUM_QUEUES, /* u16 */
@@ -70,8 +70,8 @@ enum vdpa_attr {
VDPA_ATTR_DEV_BLK_CFG_DISCARD_SEC_ALIGN,/* u32 */
VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEC, /* u32 */
VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEG, /* u32 */
- VDPA_ATTR_DEV_BLK_CFG_READ_ONLY, /* u8 */
- VDPA_ATTR_DEV_BLK_CFG_FLUSH, /* u8 */
+ VDPA_ATTR_DEV_BLK_READ_ONLY, /* u8 */
+ VDPA_ATTR_DEV_BLK_FLUSH, /* u8 */
/* new attributes must be added above here */
VDPA_ATTR_MAX,
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index bea697390613..b95dd84eef2d 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -179,12 +179,6 @@
/* Get the config size */
#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32)
-/* Get the count of all virtqueues */
-#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32)
-
-/* Get the number of virtqueue groups. */
-#define VHOST_VDPA_GET_GROUP_NUM _IOR(VHOST_VIRTIO, 0x81, __u32)
-
/* Get the number of address spaces. */
#define VHOST_VDPA_GET_AS_NUM _IOR(VHOST_VIRTIO, 0x7A, unsigned int)
@@ -228,10 +222,17 @@
#define VHOST_VDPA_GET_VRING_DESC_GROUP _IOWR(VHOST_VIRTIO, 0x7F, \
struct vhost_vring_state)
+
+/* Get the count of all virtqueues */
+#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32)
+
+/* Get the number of virtqueue groups. */
+#define VHOST_VDPA_GET_GROUP_NUM _IOR(VHOST_VIRTIO, 0x81, __u32)
+
/* Get the queue size of a specific virtqueue.
* userspace set the vring index in vhost_vring_state.index
* kernel set the queue size in vhost_vring_state.num
*/
-#define VHOST_VDPA_GET_VRING_SIZE _IOWR(VHOST_VIRTIO, 0x80, \
+#define VHOST_VDPA_GET_VRING_SIZE _IOWR(VHOST_VIRTIO, 0x82, \
struct vhost_vring_state)
#endif
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 6a77328be114..594b66e16395 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -228,7 +228,7 @@ enum {
#define XFRM_NR_MSGTYPES (XFRM_MSG_MAX + 1 - XFRM_MSG_BASE)
/*
- * Generic LSM security context for comunicating to user space
+ * Generic LSM security context for communicating to user space
* NOTE: Same format as sadb_x_sec_ctx
*/
struct xfrm_user_sec_ctx {
diff --git a/include/uapi/scsi/scsi_bsg_mpi3mr.h b/include/uapi/scsi/scsi_bsg_mpi3mr.h
index c72ce387286a..30a5c1a59376 100644
--- a/include/uapi/scsi/scsi_bsg_mpi3mr.h
+++ b/include/uapi/scsi/scsi_bsg_mpi3mr.h
@@ -382,7 +382,7 @@ struct mpi3mr_bsg_in_reply_buf {
__u8 mpi_reply_type;
__u8 rsvd1;
__u16 rsvd2;
- __u8 reply_buf[1];
+ __u8 reply_buf[];
};
/**
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index cb2afcebbdf5..a35e12f8e68b 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -328,6 +328,7 @@ struct ufs_pwr_mode_info {
* @op_runtime_config: called to config Operation and runtime regs Pointers
* @get_outstanding_cqs: called to get outstanding completion queues
* @config_esi: called to config Event Specific Interrupt
+ * @config_scsi_dev: called to configure SCSI device parameters
*/
struct ufs_hba_variant_ops {
const char *name;
diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h
index 5d5c0b8efff2..c71ddb6d4691 100644
--- a/include/vdso/datapage.h
+++ b/include/vdso/datapage.h
@@ -19,12 +19,6 @@
#include <vdso/time32.h>
#include <vdso/time64.h>
-#ifdef CONFIG_ARM64
-#include <asm/page-def.h>
-#else
-#include <asm/page.h>
-#endif
-
#ifdef CONFIG_ARCH_HAS_VDSO_DATA
#include <asm/vdso/data.h>
#else
@@ -132,7 +126,7 @@ extern struct vdso_data _timens_data[CS_BASES] __attribute__((visibility("hidden
*/
union vdso_data_store {
struct vdso_data data[CS_BASES];
- u8 page[PAGE_SIZE];
+ u8 page[1U << CONFIG_PAGE_SHIFT];
};
/*