summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/current.h2
-rw-r--r--include/asm-generic/hugetlb.h2
-rw-r--r--include/asm-generic/hyperv-tlfs.h1
-rw-r--r--include/asm-generic/ide_iops.h39
-rw-r--r--include/asm-generic/mshyperv.h17
-rw-r--r--include/asm-generic/vmlinux.lds.h7
-rw-r--r--include/drm/drm_exec.h35
-rw-r--r--include/drm/drm_kunit_helpers.h4
-rw-r--r--include/dt-bindings/memory/mediatek,mt8188-memory-port.h489
-rw-r--r--include/kvm/arm_pmu.h4
-rw-r--r--include/linux/acpi.h9
-rw-r--r--include/linux/aer.h2
-rw-r--r--include/linux/amd-iommu.h1
-rw-r--r--include/linux/arm-smccc.h2
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h10
-rw-r--r--include/linux/audit.h2
-rw-r--r--include/linux/bpf.h4
-rw-r--r--include/linux/btf.h1
-rw-r--r--include/linux/btf_ids.h2
-rw-r--r--include/linux/buffer_head.h5
-rw-r--r--include/linux/ceph/ceph_fs.h44
-rw-r--r--include/linux/ceph/messenger.h40
-rw-r--r--include/linux/ceph/osd_client.h93
-rw-r--r--include/linux/ceph/rados.h4
-rw-r--r--include/linux/cgroup-defs.h14
-rw-r--r--include/linux/console.h3
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/dmar.h2
-rw-r--r--include/linux/export-internal.h2
-rw-r--r--include/linux/export.h4
-rw-r--r--include/linux/fs.h46
-rw-r--r--include/linux/ftrace.h5
-rw-r--r--include/linux/gameport.h2
-rw-r--r--include/linux/hid.h4
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/hyperv.h6
-rw-r--r--include/linux/i2c.h11
-rw-r--r--include/linux/if_team.h2
-rw-r--r--include/linux/instruction_pointer.h5
-rw-r--r--include/linux/interrupt.h6
-rw-r--r--include/linux/iommu.h15
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/kvm_host.h53
-rw-r--r--include/linux/leds.h3
-rw-r--r--include/linux/libata.h54
-rw-r--r--include/linux/maple_tree.h11
-rw-r--r--include/linux/memcontrol.h4
-rw-r--r--include/linux/mfd/88pm860x.h6
-rw-r--r--include/linux/mfd/abx500/ab8500.h4
-rw-r--r--include/linux/mfd/dbx500-prcmu.h21
-rw-r--r--include/linux/mfd/hi655x-pmic.h1
-rw-r--r--include/linux/mfd/max77686-private.h4
-rw-r--r--include/linux/mfd/rz-mtu3.h66
-rw-r--r--include/linux/micrel_phy.h7
-rw-r--r--include/linux/misc_cgroup.h28
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/linux/mtd/rawnand.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_sctp.h1
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_page.h4
-rw-r--r--include/linux/of.h4
-rw-r--r--include/linux/oid_registry.h1
-rw-r--r--include/linux/percpu_counter.h41
-rw-r--r--include/linux/pgtable.h10
-rw-r--r--include/linux/phylink.h4
-rw-r--r--include/linux/platform_data/rtc-ds2404.h20
-rw-r--r--include/linux/pwm.h6
-rw-r--r--include/linux/raid/pq.h4
-rw-r--r--include/linux/remoteproc.h4
-rw-r--r--include/linux/resume_user_mode.h2
-rw-r--r--include/linux/rmap.h1
-rw-r--r--include/linux/rpmsg.h15
-rw-r--r--include/linux/rtc.h1
-rw-r--r--include/linux/sched.h14
-rw-r--r--include/linux/seqlock.h2
-rw-r--r--include/linux/soundwire/sdw.h12
-rw-r--r--include/linux/soundwire/sdw_intel.h7
-rw-r--r--include/linux/sunrpc/xdr.h4
-rw-r--r--include/linux/swiotlb.h23
-rw-r--r--include/linux/tca6416_keypad.h1
-rw-r--r--include/linux/thermal.h87
-rw-r--r--include/linux/trace_events.h9
-rw-r--r--include/linux/tracefs.h23
-rw-r--r--include/linux/vdpa.h4
-rw-r--r--include/linux/virtio.h24
-rw-r--r--include/linux/workqueue.h115
-rw-r--r--include/linux/xarray.h18
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/cfg80211.h6
-rw-r--r--include/net/ip.h3
-rw-r--r--include/net/ip6_fib.h5
-rw-r--r--include/net/ip_fib.h6
-rw-r--r--include/net/ip_tunnels.h15
-rw-r--r--include/net/ipv6.h7
-rw-r--r--include/net/mana/mana.h5
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/netfilter/nf_tables.h7
-rw-r--r--include/net/page_pool/helpers.h6
-rw-r--r--include/net/scm.h14
-rw-r--r--include/net/sock.h29
-rw-r--r--include/net/tcp.h6
-rw-r--r--include/rdma/ib_verbs.h2
-rw-r--r--include/rdma/iw_cm.h21
-rw-r--r--include/rv/da_monitor.h2
-rw-r--r--include/scsi/libsas.h32
-rw-r--r--include/scsi/scsi.h3
-rw-r--r--include/scsi/scsi_device.h5
-rw-r--r--include/scsi/scsi_host.h4
-rw-r--r--include/scsi/scsi_transport_iscsi.h1
-rw-r--r--include/soc/mediatek/smi.h1
-rw-r--r--include/sound/dmaengine_pcm.h2
-rw-r--r--include/sound/soc-component.h4
-rw-r--r--include/sound/soc.h2
-rw-r--r--include/target/iscsi/iscsi_target_core.h4
-rw-r--r--include/trace/events/task.h2
-rw-r--r--include/trace/events/xen.h12
-rw-r--r--include/uapi/drm/nouveau_drm.h10
-rw-r--r--include/uapi/linux/bpf.h4
-rw-r--r--include/uapi/linux/cgroupstats.h2
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/fuse.h60
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h1
-rw-r--r--include/uapi/linux/rpmsg.h10
-rw-r--r--include/uapi/linux/stddef.h7
-rw-r--r--include/uapi/linux/vhost_types.h4
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h7
-rw-r--r--include/uapi/rdma/irdma-abi.h9
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h52
-rw-r--r--include/ufs/ufs.h90
-rw-r--r--include/ufs/ufs_quirks.h6
-rw-r--r--include/ufs/ufshcd.h60
-rw-r--r--include/ufs/ufshci.h53
-rw-r--r--include/ufs/unipro.h6
-rw-r--r--include/xen/arm/hypervisor.h12
-rw-r--r--include/xen/events.h3
137 files changed, 1555 insertions, 739 deletions
diff --git a/include/asm-generic/current.h b/include/asm-generic/current.h
index 3a2e224b9fa0..9c2aeecbd05a 100644
--- a/include/asm-generic/current.h
+++ b/include/asm-generic/current.h
@@ -2,9 +2,11 @@
#ifndef __ASM_GENERIC_CURRENT_H
#define __ASM_GENERIC_CURRENT_H
+#ifndef __ASSEMBLY__
#include <linux/thread_info.h>
#define get_current() (current_thread_info()->task)
#define current get_current()
+#endif
#endif /* __ASM_GENERIC_CURRENT_H */
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index 4da02798a00b..6dcf4d576970 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -76,7 +76,7 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+ pte_t *ptep, pte_t pte, unsigned long sz)
{
set_pte_at(mm, addr, ptep, pte);
}
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
index f4e4cc4f965f..fdac4a1714ec 100644
--- a/include/asm-generic/hyperv-tlfs.h
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -223,6 +223,7 @@ enum HV_GENERIC_SET_FORMAT {
#define HV_STATUS_INVALID_PORT_ID 17
#define HV_STATUS_INVALID_CONNECTION_ID 18
#define HV_STATUS_INSUFFICIENT_BUFFERS 19
+#define HV_STATUS_TIME_OUT 120
#define HV_STATUS_VTL_ALREADY_ENABLED 134
/*
diff --git a/include/asm-generic/ide_iops.h b/include/asm-generic/ide_iops.h
deleted file mode 100644
index 81dfa3ee5e06..000000000000
--- a/include/asm-generic/ide_iops.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Generic I/O and MEMIO string operations. */
-
-#define __ide_insw insw
-#define __ide_insl insl
-#define __ide_outsw outsw
-#define __ide_outsl outsl
-
-static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u16 *)addr = readw(port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u32 *)addr = readl(port);
- addr += 4;
- }
-}
-
-static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- writew(*(u16 *)addr, port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
-{
- while (count--) {
- writel(*(u32 *)addr, port);
- addr += 4;
- }
-}
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index a8f4b653ef4e..430f0ae0dde2 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -36,15 +36,22 @@ struct ms_hyperv_info {
u32 nested_features;
u32 max_vp_index;
u32 max_lp_index;
- u32 isolation_config_a;
+ u8 vtl;
+ union {
+ u32 isolation_config_a;
+ struct {
+ u32 paravisor_present : 1;
+ u32 reserved_a1 : 31;
+ };
+ };
union {
u32 isolation_config_b;
struct {
u32 cvm_type : 4;
- u32 reserved1 : 1;
+ u32 reserved_b1 : 1;
u32 shared_gpa_boundary_active : 1;
u32 shared_gpa_boundary_bits : 6;
- u32 reserved2 : 20;
+ u32 reserved_b2 : 20;
};
};
u64 shared_gpa_boundary;
@@ -57,7 +64,8 @@ extern void * __percpu *hyperv_pcpu_output_arg;
extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
-extern bool hv_isolation_type_snp(void);
+bool hv_isolation_type_snp(void);
+bool hv_isolation_type_tdx(void);
/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
static inline int hv_result(u64 status)
@@ -274,6 +282,7 @@ enum hv_isolation_type hv_get_isolation_type(void);
bool hv_is_isolation_supported(void);
bool hv_isolation_type_snp(void);
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
+u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2);
void hyperv_cleanup(void);
bool hv_query_ext_cap(u64 cap_query);
void hv_setup_dma_ops(struct device *dev, bool coherent);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 9c59409104f6..67d8dd2f1bde 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -138,13 +138,6 @@
* are handled as text/data or they can be discarded (which
* often happens at runtime)
*/
-#ifdef CONFIG_HOTPLUG_CPU
-#define CPU_KEEP(sec) *(.cpu##sec)
-#define CPU_DISCARD(sec)
-#else
-#define CPU_KEEP(sec)
-#define CPU_DISCARD(sec) *(.cpu##sec)
-#endif
#if defined(CONFIG_MEMORY_HOTPLUG)
#define MEM_KEEP(sec) *(.mem##sec)
diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h
index e0462361adf9..b5bf0b6da791 100644
--- a/include/drm/drm_exec.h
+++ b/include/drm/drm_exec.h
@@ -52,6 +52,20 @@ struct drm_exec {
};
/**
+ * drm_exec_obj() - Return the object for a give drm_exec index
+ * @exec: Pointer to the drm_exec context
+ * @index: The index.
+ *
+ * Return: Pointer to the locked object corresponding to @index if
+ * index is within the number of locked objects. NULL otherwise.
+ */
+static inline struct drm_gem_object *
+drm_exec_obj(struct drm_exec *exec, unsigned long index)
+{
+ return index < exec->num_objects ? exec->objects[index] : NULL;
+}
+
+/**
* drm_exec_for_each_locked_object - iterate over all the locked objects
* @exec: drm_exec object
* @index: unsigned long index for the iteration
@@ -59,10 +73,23 @@ struct drm_exec {
*
* Iterate over all the locked GEM objects inside the drm_exec object.
*/
-#define drm_exec_for_each_locked_object(exec, index, obj) \
- for (index = 0, obj = (exec)->objects[0]; \
- index < (exec)->num_objects; \
- ++index, obj = (exec)->objects[index])
+#define drm_exec_for_each_locked_object(exec, index, obj) \
+ for ((index) = 0; ((obj) = drm_exec_obj(exec, index)); ++(index))
+
+/**
+ * drm_exec_for_each_locked_object_reverse - iterate over all the locked
+ * objects in reverse locking order
+ * @exec: drm_exec object
+ * @index: unsigned long index for the iteration
+ * @obj: the current GEM object
+ *
+ * Iterate over all the locked GEM objects inside the drm_exec object in
+ * reverse locking order. Note that @index may go below zero and wrap,
+ * but that will be caught by drm_exec_obj(), returning a NULL object.
+ */
+#define drm_exec_for_each_locked_object_reverse(exec, index, obj) \
+ for ((index) = (exec)->num_objects - 1; \
+ ((obj) = drm_exec_obj(exec, index)); --(index))
/**
* drm_exec_until_all_locked - loop until all GEM objects are locked
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
index 514c8a7a32f0..ba483c87f0e7 100644
--- a/include/drm/drm_kunit_helpers.h
+++ b/include/drm/drm_kunit_helpers.h
@@ -3,6 +3,8 @@
#ifndef DRM_KUNIT_HELPERS_H_
#define DRM_KUNIT_HELPERS_H_
+#include <linux/device.h>
+
#include <kunit/test.h>
struct drm_device;
@@ -51,7 +53,7 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test,
{
struct drm_driver *driver;
- driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL);
+ driver = devm_kzalloc(dev, sizeof(*driver), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, driver);
driver->driver_features = features;
diff --git a/include/dt-bindings/memory/mediatek,mt8188-memory-port.h b/include/dt-bindings/memory/mediatek,mt8188-memory-port.h
new file mode 100644
index 000000000000..337ab11262af
--- /dev/null
+++ b/include/dt-bindings/memory/mediatek,mt8188-memory-port.h
@@ -0,0 +1,489 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Chengci Xu <chengci.xu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MEDIATEK_MT8188_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MEDIATEK_MT8188_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU larbs:
+ * From below, for example larb11 has larb11a/larb11b/larb11c,
+ * the index of larb is not in order. So we reindexed these larbs from a
+ * software view.
+ */
+#define SMI_L0_ID 0
+#define SMI_L1_ID 1
+#define SMI_L2_ID 2
+#define SMI_L3_ID 3
+#define SMI_L4_ID 4
+#define SMI_L5_ID 5
+#define SMI_L6_ID 6
+#define SMI_L7_ID 7
+#define SMI_L9_ID 8
+#define SMI_L10_ID 9
+#define SMI_L11A_ID 10
+#define SMI_L11B_ID 11
+#define SMI_L11C_ID 12
+#define SMI_L12_ID 13
+#define SMI_L13_ID 14
+#define SMI_L14_ID 15
+#define SMI_L15_ID 16
+#define SMI_L16A_ID 17
+#define SMI_L16B_ID 18
+#define SMI_L17A_ID 19
+#define SMI_L17B_ID 20
+#define SMI_L19_ID 21
+#define SMI_L21_ID 22
+#define SMI_L23_ID 23
+#define SMI_L27_ID 24
+#define SMI_L28_ID 25
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/1/2/3
+ * vcodec 4G ~ 8G larb19(21)[1]/21(22)/23
+ * cam/mdp 8G ~ 12G the other larbs.
+ * N/A 12G ~ 16G
+ * CCU0 0x24000_0000 ~ 0x243ff_ffff larb27(24): port 0/1
+ * CCU1 0x24400_0000 ~ 0x247ff_ffff larb27(24): port 2/3
+ *
+ * This SoC have two MM IOMMU HWs, this is the connected information:
+ * iommu-vdo: larb0/2/5/9/10/11A/11C/13/16B/17B/19/21
+ * iommu-vpp: larb1/3/4/6/7/11B/12/14/15/16A/17A/23/27
+ *
+ * [1]: This is larb19, but the index is 21 from the SW view.
+ */
+
+/* MM IOMMU ports */
+/* LARB 0 -- VDO-0 */
+#define M4U_PORT_L0_DISP_RDMA1 MTK_M4U_ID(SMI_L0_ID, 0)
+#define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_ID(SMI_L0_ID, 1)
+#define M4U_PORT_L0_DISP_OVL0_RDMA0 MTK_M4U_ID(SMI_L0_ID, 2)
+#define M4U_PORT_L0_DISP_OVL0_RDMA1 MTK_M4U_ID(SMI_L0_ID, 3)
+#define M4U_PORT_L0_DISP_OVL0_HDR MTK_M4U_ID(SMI_L0_ID, 4)
+#define M4U_PORT_L0_DISP_POSTMASK0 MTK_M4U_ID(SMI_L0_ID, 5)
+#define M4U_PORT_L0_DISP_FAKE_ENG0 MTK_M4U_ID(SMI_L0_ID, 6)
+
+/* LARB 1 -- VD0-0 */
+#define M4U_PORT_L1_DISP_RDMA0 MTK_M4U_ID(SMI_L1_ID, 0)
+#define M4U_PORT_L1_DISP_WDMA1 MTK_M4U_ID(SMI_L1_ID, 1)
+#define M4U_PORT_L1_DISP_OVL1_RDMA0 MTK_M4U_ID(SMI_L1_ID, 2)
+#define M4U_PORT_L1_DISP_OVL1_RDMA1 MTK_M4U_ID(SMI_L1_ID, 3)
+#define M4U_PORT_L1_DISP_OVL1_HDR MTK_M4U_ID(SMI_L1_ID, 4)
+#define M4U_PORT_L1_DISP_WROT0 MTK_M4U_ID(SMI_L1_ID, 5)
+#define M4U_PORT_L1_DISP_FAKE_ENG1 MTK_M4U_ID(SMI_L1_ID, 6)
+
+/* LARB 2 -- VDO-1 */
+#define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_ID(SMI_L2_ID, 0)
+#define M4U_PORT_L2_MDP_RDMA2 MTK_M4U_ID(SMI_L2_ID, 1)
+#define M4U_PORT_L2_MDP_RDMA4 MTK_M4U_ID(SMI_L2_ID, 2)
+#define M4U_PORT_L2_MDP_RDMA6 MTK_M4U_ID(SMI_L2_ID, 3)
+#define M4U_PORT_L2_DISP_FAKE1 MTK_M4U_ID(SMI_L2_ID, 4)
+
+/* LARB 3 -- VDO-1 */
+#define M4U_PORT_L3_MDP_RDMA1 MTK_M4U_ID(SMI_L3_ID, 0)
+#define M4U_PORT_L3_MDP_RDMA3 MTK_M4U_ID(SMI_L3_ID, 1)
+#define M4U_PORT_L3_MDP_RDMA5 MTK_M4U_ID(SMI_L3_ID, 2)
+#define M4U_PORT_L3_MDP_RDMA7 MTK_M4U_ID(SMI_L3_ID, 3)
+#define M4U_PORT_L3_HDR_DS_SMI MTK_M4U_ID(SMI_L3_ID, 4)
+#define M4U_PORT_L3_HDR_ADL_SMI MTK_M4U_ID(SMI_L3_ID, 5)
+#define M4U_PORT_L3_DISP_FAKE1 MTK_M4U_ID(SMI_L3_ID, 6)
+
+/* LARB 4 -- VPP-0 */
+#define M4U_PORT_L4_MDP_RDMA MTK_M4U_ID(SMI_L4_ID, 0)
+#define M4U_PORT_L4_MDP_FG MTK_M4U_ID(SMI_L4_ID, 1)
+#define M4U_PORT_L4_MDP_OVL MTK_M4U_ID(SMI_L4_ID, 2)
+#define M4U_PORT_L4_MDP_WROT MTK_M4U_ID(SMI_L4_ID, 3)
+#define M4U_PORT_L4_FAKE_ENG MTK_M4U_ID(SMI_L4_ID, 4)
+#define M4U_PORT_L4_DISP_RDMA MTK_M4U_ID(SMI_L4_ID, 5)
+#define M4U_PORT_L4_DISP_WDMA MTK_M4U_ID(SMI_L4_ID, 6)
+
+/* LARB 5 -- VPP-1 */
+#define M4U_PORT_L5_SVPP1_MDP_RDMA MTK_M4U_ID(SMI_L5_ID, 0)
+#define M4U_PORT_L5_SVPP1_MDP_FG MTK_M4U_ID(SMI_L5_ID, 1)
+#define M4U_PORT_L5_SVPP1_MDP_OVL MTK_M4U_ID(SMI_L5_ID, 2)
+#define M4U_PORT_L5_SVPP1_MDP_WROT MTK_M4U_ID(SMI_L5_ID, 3)
+#define M4U_PORT_L5_SVPP2_MDP_RDMA MTK_M4U_ID(SMI_L5_ID, 4)
+#define M4U_PORT_L5_SVPP2_MDP_FG MTK_M4U_ID(SMI_L5_ID, 5)
+#define M4U_PORT_L5_SVPP2_MDP_WROT MTK_M4U_ID(SMI_L5_ID, 6)
+#define M4U_PORT_L5_LARB5_FAKE_ENG MTK_M4U_ID(SMI_L5_ID, 7)
+
+/* LARB 6 -- VPP-1 */
+#define M4U_PORT_L6_SVPP3_MDP_RDMA MTK_M4U_ID(SMI_L6_ID, 0)
+#define M4U_PORT_L6_SVPP3_MDP_FG MTK_M4U_ID(SMI_L6_ID, 1)
+#define M4U_PORT_L6_SVPP3_MDP_WROT MTK_M4U_ID(SMI_L6_ID, 2)
+#define M4U_PORT_L6_LARB6_FAKE_ENG MTK_M4U_ID(SMI_L6_ID, 3)
+
+/* LARB 7 -- WPE */
+#define M4U_PORT_L7_WPE_RDMA_0 MTK_M4U_ID(SMI_L7_ID, 0)
+#define M4U_PORT_L7_WPE_RDMA_1 MTK_M4U_ID(SMI_L7_ID, 1)
+#define M4U_PORT_L7_WPE_WDMA_0 MTK_M4U_ID(SMI_L7_ID, 2)
+
+/* LARB 9 -- IMG-M */
+#define M4U_PORT_L9_IMGI_T1_A MTK_M4U_ID(SMI_L9_ID, 0)
+#define M4U_PORT_L9_UFDI_T1_A MTK_M4U_ID(SMI_L9_ID, 1)
+#define M4U_PORT_L9_IMGBI_T1_A MTK_M4U_ID(SMI_L9_ID, 2)
+#define M4U_PORT_L9_IMGCI_T1_A MTK_M4U_ID(SMI_L9_ID, 3)
+#define M4U_PORT_L9_SMTI_T1_A MTK_M4U_ID(SMI_L9_ID, 4)
+#define M4U_PORT_L9_SMTI_T4_A MTK_M4U_ID(SMI_L9_ID, 5)
+#define M4U_PORT_L9_TNCSTI_T1_A MTK_M4U_ID(SMI_L9_ID, 6)
+#define M4U_PORT_L9_TNCSTI_T4_A MTK_M4U_ID(SMI_L9_ID, 7)
+#define M4U_PORT_L9_YUVO_T1_A MTK_M4U_ID(SMI_L9_ID, 8)
+#define M4U_PORT_L9_YUVBO_T1_A MTK_M4U_ID(SMI_L9_ID, 9)
+#define M4U_PORT_L9_YUVCO_T1_A MTK_M4U_ID(SMI_L9_ID, 10)
+#define M4U_PORT_L9_TIMGO_T1_A MTK_M4U_ID(SMI_L9_ID, 11)
+#define M4U_PORT_L9_YUVO_T2_A MTK_M4U_ID(SMI_L9_ID, 12)
+#define M4U_PORT_L9_YUVO_T5_A MTK_M4U_ID(SMI_L9_ID, 13)
+#define M4U_PORT_L9_IMGI_T1_B MTK_M4U_ID(SMI_L9_ID, 14)
+#define M4U_PORT_L9_IMGBI_T1_B MTK_M4U_ID(SMI_L9_ID, 15)
+#define M4U_PORT_L9_IMGCI_T1_B MTK_M4U_ID(SMI_L9_ID, 16)
+#define M4U_PORT_L9_SMTI_T4_B MTK_M4U_ID(SMI_L9_ID, 17)
+#define M4U_PORT_L9_TNCSO_T1_A MTK_M4U_ID(SMI_L9_ID, 18)
+#define M4U_PORT_L9_SMTO_T1_A MTK_M4U_ID(SMI_L9_ID, 19)
+#define M4U_PORT_L9_SMTO_T4_A MTK_M4U_ID(SMI_L9_ID, 20)
+#define M4U_PORT_L9_TNCSTO_T1_A MTK_M4U_ID(SMI_L9_ID, 21)
+#define M4U_PORT_L9_YUVO_T2_B MTK_M4U_ID(SMI_L9_ID, 22)
+#define M4U_PORT_L9_YUVO_T5_B MTK_M4U_ID(SMI_L9_ID, 23)
+#define M4U_PORT_L9_SMTO_T4_B MTK_M4U_ID(SMI_L9_ID, 24)
+
+/* LARB 10 -- IMG-D */
+#define M4U_PORT_L10_IMGI_D1 MTK_M4U_ID(SMI_L10_ID, 0)
+#define M4U_PORT_L10_IMGBI_D1 MTK_M4U_ID(SMI_L10_ID, 1)
+#define M4U_PORT_L10_IMGCI_D1 MTK_M4U_ID(SMI_L10_ID, 2)
+#define M4U_PORT_L10_IMGDI_D1 MTK_M4U_ID(SMI_L10_ID, 3)
+#define M4U_PORT_L10_DEPI_D1 MTK_M4U_ID(SMI_L10_ID, 4)
+#define M4U_PORT_L10_DMGI_D1 MTK_M4U_ID(SMI_L10_ID, 5)
+#define M4U_PORT_L10_SMTI_D1 MTK_M4U_ID(SMI_L10_ID, 6)
+#define M4U_PORT_L10_RECI_D1 MTK_M4U_ID(SMI_L10_ID, 7)
+#define M4U_PORT_L10_RECI_D1_N MTK_M4U_ID(SMI_L10_ID, 8)
+#define M4U_PORT_L10_TNRWI_D1 MTK_M4U_ID(SMI_L10_ID, 9)
+#define M4U_PORT_L10_TNRCI_D1 MTK_M4U_ID(SMI_L10_ID, 10)
+#define M4U_PORT_L10_TNRCI_D1_N MTK_M4U_ID(SMI_L10_ID, 11)
+#define M4U_PORT_L10_IMG4O_D1 MTK_M4U_ID(SMI_L10_ID, 12)
+#define M4U_PORT_L10_IMG4BO_D1 MTK_M4U_ID(SMI_L10_ID, 13)
+#define M4U_PORT_L10_SMTI_D8 MTK_M4U_ID(SMI_L10_ID, 14)
+#define M4U_PORT_L10_SMTO_D1 MTK_M4U_ID(SMI_L10_ID, 15)
+#define M4U_PORT_L10_TNRMO_D1 MTK_M4U_ID(SMI_L10_ID, 16)
+#define M4U_PORT_L10_TNRMO_D1_N MTK_M4U_ID(SMI_L10_ID, 17)
+#define M4U_PORT_L10_SMTO_D8 MTK_M4U_ID(SMI_L10_ID, 18)
+#define M4U_PORT_L10_DBGO_D1 MTK_M4U_ID(SMI_L10_ID, 19)
+
+/* LARB 11A -- IMG-D */
+#define M4U_PORT_L11A_WPE_RDMA_0 MTK_M4U_ID(SMI_L11A_ID, 0)
+#define M4U_PORT_L11A_WPE_RDMA_1 MTK_M4U_ID(SMI_L11A_ID, 1)
+#define M4U_PORT_L11A_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11A_ID, 2)
+#define M4U_PORT_L11A_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11A_ID, 3)
+#define M4U_PORT_L11A_WPE_CQ0 MTK_M4U_ID(SMI_L11A_ID, 4)
+#define M4U_PORT_L11A_WPE_CQ1 MTK_M4U_ID(SMI_L11A_ID, 5)
+#define M4U_PORT_L11A_PIMGI_P1 MTK_M4U_ID(SMI_L11A_ID, 6)
+#define M4U_PORT_L11A_PIMGBI_P1 MTK_M4U_ID(SMI_L11A_ID, 7)
+#define M4U_PORT_L11A_PIMGCI_P1 MTK_M4U_ID(SMI_L11A_ID, 8)
+#define M4U_PORT_L11A_IMGI_T1_C MTK_M4U_ID(SMI_L11A_ID, 9)
+#define M4U_PORT_L11A_IMGBI_T1_C MTK_M4U_ID(SMI_L11A_ID, 10)
+#define M4U_PORT_L11A_IMGCI_T1_C MTK_M4U_ID(SMI_L11A_ID, 11)
+#define M4U_PORT_L11A_SMTI_T1_C MTK_M4U_ID(SMI_L11A_ID, 12)
+#define M4U_PORT_L11A_SMTI_T4_C MTK_M4U_ID(SMI_L11A_ID, 13)
+#define M4U_PORT_L11A_SMTI_T6_C MTK_M4U_ID(SMI_L11A_ID, 14)
+#define M4U_PORT_L11A_YUVO_T1_C MTK_M4U_ID(SMI_L11A_ID, 15)
+#define M4U_PORT_L11A_YUVBO_T1_C MTK_M4U_ID(SMI_L11A_ID, 16)
+#define M4U_PORT_L11A_YUVCO_T1_C MTK_M4U_ID(SMI_L11A_ID, 17)
+#define M4U_PORT_L11A_WPE_WDMA_0 MTK_M4U_ID(SMI_L11A_ID, 18)
+#define M4U_PORT_L11A_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11A_ID, 19)
+#define M4U_PORT_L11A_WROT_P1 MTK_M4U_ID(SMI_L11A_ID, 20)
+#define M4U_PORT_L11A_TCCSO_P1 MTK_M4U_ID(SMI_L11A_ID, 21)
+#define M4U_PORT_L11A_TCCSI_P1 MTK_M4U_ID(SMI_L11A_ID, 22)
+#define M4U_PORT_L11A_TIMGO_T1_C MTK_M4U_ID(SMI_L11A_ID, 23)
+#define M4U_PORT_L11A_YUVO_T2_C MTK_M4U_ID(SMI_L11A_ID, 24)
+#define M4U_PORT_L11A_YUVO_T5_C MTK_M4U_ID(SMI_L11A_ID, 25)
+#define M4U_PORT_L11A_SMTO_T1_C MTK_M4U_ID(SMI_L11A_ID, 26)
+#define M4U_PORT_L11A_SMTO_T4_C MTK_M4U_ID(SMI_L11A_ID, 27)
+#define M4U_PORT_L11A_SMTO_T6_C MTK_M4U_ID(SMI_L11A_ID, 28)
+#define M4U_PORT_L11A_DBGO_T1_C MTK_M4U_ID(SMI_L11A_ID, 29)
+
+/* LARB 11B -- IMG-D */
+#define M4U_PORT_L11B_WPE_RDMA_0 MTK_M4U_ID(SMI_L11B_ID, 0)
+#define M4U_PORT_L11B_WPE_RDMA_1 MTK_M4U_ID(SMI_L11B_ID, 1)
+#define M4U_PORT_L11B_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11B_ID, 2)
+#define M4U_PORT_L11B_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11B_ID, 3)
+#define M4U_PORT_L11B_WPE_CQ0 MTK_M4U_ID(SMI_L11B_ID, 4)
+#define M4U_PORT_L11B_WPE_CQ1 MTK_M4U_ID(SMI_L11B_ID, 5)
+#define M4U_PORT_L11B_PIMGI_P1 MTK_M4U_ID(SMI_L11B_ID, 6)
+#define M4U_PORT_L11B_PIMGBI_P1 MTK_M4U_ID(SMI_L11B_ID, 7)
+#define M4U_PORT_L11B_PIMGCI_P1 MTK_M4U_ID(SMI_L11B_ID, 8)
+#define M4U_PORT_L11B_IMGI_T1_C MTK_M4U_ID(SMI_L11B_ID, 9)
+#define M4U_PORT_L11B_IMGBI_T1_C MTK_M4U_ID(SMI_L11B_ID, 10)
+#define M4U_PORT_L11B_IMGCI_T1_C MTK_M4U_ID(SMI_L11B_ID, 11)
+#define M4U_PORT_L11B_SMTI_T1_C MTK_M4U_ID(SMI_L11B_ID, 12)
+#define M4U_PORT_L11B_SMTI_T4_C MTK_M4U_ID(SMI_L11B_ID, 13)
+#define M4U_PORT_L11B_SMTI_T6_C MTK_M4U_ID(SMI_L11B_ID, 14)
+#define M4U_PORT_L11B_YUVO_T1_C MTK_M4U_ID(SMI_L11B_ID, 15)
+#define M4U_PORT_L11B_YUVBO_T1_C MTK_M4U_ID(SMI_L11B_ID, 16)
+#define M4U_PORT_L11B_YUVCO_T1_C MTK_M4U_ID(SMI_L11B_ID, 17)
+#define M4U_PORT_L11B_WPE_WDMA_0 MTK_M4U_ID(SMI_L11B_ID, 18)
+#define M4U_PORT_L11B_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11B_ID, 19)
+#define M4U_PORT_L11B_WROT_P1 MTK_M4U_ID(SMI_L11B_ID, 20)
+#define M4U_PORT_L11B_TCCSO_P1 MTK_M4U_ID(SMI_L11B_ID, 21)
+#define M4U_PORT_L11B_TCCSI_P1 MTK_M4U_ID(SMI_L11B_ID, 22)
+#define M4U_PORT_L11B_TIMGO_T1_C MTK_M4U_ID(SMI_L11B_ID, 23)
+#define M4U_PORT_L11B_YUVO_T2_C MTK_M4U_ID(SMI_L11B_ID, 24)
+#define M4U_PORT_L11B_YUVO_T5_C MTK_M4U_ID(SMI_L11B_ID, 25)
+#define M4U_PORT_L11B_SMTO_T1_C MTK_M4U_ID(SMI_L11B_ID, 26)
+#define M4U_PORT_L11B_SMTO_T4_C MTK_M4U_ID(SMI_L11B_ID, 27)
+#define M4U_PORT_L11B_SMTO_T6_C MTK_M4U_ID(SMI_L11B_ID, 28)
+#define M4U_PORT_L11B_DBGO_T1_C MTK_M4U_ID(SMI_L11B_ID, 29)
+
+/* LARB 11C -- IMG-D */
+#define M4U_PORT_L11C_WPE_RDMA_0 MTK_M4U_ID(SMI_L11C_ID, 0)
+#define M4U_PORT_L11C_WPE_RDMA_1 MTK_M4U_ID(SMI_L11C_ID, 1)
+#define M4U_PORT_L11C_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11C_ID, 2)
+#define M4U_PORT_L11C_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11C_ID, 3)
+#define M4U_PORT_L11C_WPE_CQ0 MTK_M4U_ID(SMI_L11C_ID, 4)
+#define M4U_PORT_L11C_WPE_CQ1 MTK_M4U_ID(SMI_L11C_ID, 5)
+#define M4U_PORT_L11C_PIMGI_P1 MTK_M4U_ID(SMI_L11C_ID, 6)
+#define M4U_PORT_L11C_PIMGBI_P1 MTK_M4U_ID(SMI_L11C_ID, 7)
+#define M4U_PORT_L11C_PIMGCI_P1 MTK_M4U_ID(SMI_L11C_ID, 8)
+#define M4U_PORT_L11C_IMGI_T1_C MTK_M4U_ID(SMI_L11C_ID, 9)
+#define M4U_PORT_L11C_IMGBI_T1_C MTK_M4U_ID(SMI_L11C_ID, 10)
+#define M4U_PORT_L11C_IMGCI_T1_C MTK_M4U_ID(SMI_L11C_ID, 11)
+#define M4U_PORT_L11C_SMTI_T1_C MTK_M4U_ID(SMI_L11C_ID, 12)
+#define M4U_PORT_L11C_SMTI_T4_C MTK_M4U_ID(SMI_L11C_ID, 13)
+#define M4U_PORT_L11C_SMTI_T6_C MTK_M4U_ID(SMI_L11C_ID, 14)
+#define M4U_PORT_L11C_YUVO_T1_C MTK_M4U_ID(SMI_L11C_ID, 15)
+#define M4U_PORT_L11C_YUVBO_T1_C MTK_M4U_ID(SMI_L11C_ID, 16)
+#define M4U_PORT_L11C_YUVCO_T1_C MTK_M4U_ID(SMI_L11C_ID, 17)
+#define M4U_PORT_L11C_WPE_WDMA_0 MTK_M4U_ID(SMI_L11C_ID, 18)
+#define M4U_PORT_L11C_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11C_ID, 19)
+#define M4U_PORT_L11C_WROT_P1 MTK_M4U_ID(SMI_L11C_ID, 20)
+#define M4U_PORT_L11C_TCCSO_P1 MTK_M4U_ID(SMI_L11C_ID, 21)
+#define M4U_PORT_L11C_TCCSI_P1 MTK_M4U_ID(SMI_L11C_ID, 22)
+#define M4U_PORT_L11C_TIMGO_T1_C MTK_M4U_ID(SMI_L11C_ID, 23)
+#define M4U_PORT_L11C_YUVO_T2_C MTK_M4U_ID(SMI_L11C_ID, 24)
+#define M4U_PORT_L11C_YUVO_T5_C MTK_M4U_ID(SMI_L11C_ID, 25)
+#define M4U_PORT_L11C_SMTO_T1_C MTK_M4U_ID(SMI_L11C_ID, 26)
+#define M4U_PORT_L11C_SMTO_T4_C MTK_M4U_ID(SMI_L11C_ID, 27)
+#define M4U_PORT_L11C_SMTO_T6_C MTK_M4U_ID(SMI_L11C_ID, 28)
+#define M4U_PORT_L11C_DBGO_T1_C MTK_M4U_ID(SMI_L11C_ID, 29)
+
+/* LARB 12 -- IPE */
+#define M4U_PORT_L12_FDVT_RDA_0 MTK_M4U_ID(SMI_L12_ID, 0)
+#define M4U_PORT_L12_FDVT_RDB_0 MTK_M4U_ID(SMI_L12_ID, 1)
+#define M4U_PORT_L12_FDVT_WRA_0 MTK_M4U_ID(SMI_L12_ID, 2)
+#define M4U_PORT_L12_FDVT_WRB_0 MTK_M4U_ID(SMI_L12_ID, 3)
+#define M4U_PORT_L12_ME_RDMA MTK_M4U_ID(SMI_L12_ID, 4)
+#define M4U_PORT_L12_ME_WDMA MTK_M4U_ID(SMI_L12_ID, 5)
+#define M4U_PORT_L12_DVS_RDMA MTK_M4U_ID(SMI_L12_ID, 6)
+#define M4U_PORT_L12_DVS_WDMA MTK_M4U_ID(SMI_L12_ID, 7)
+#define M4U_PORT_L12_DVP_RDMA MTK_M4U_ID(SMI_L12_ID, 8)
+#define M4U_PORT_L12_DVP_WDMA MTK_M4U_ID(SMI_L12_ID, 9)
+#define M4U_PORT_L12_FDVT_2ND_RDA_0 MTK_M4U_ID(SMI_L12_ID, 10)
+#define M4U_PORT_L12_FDVT_2ND_RDB_0 MTK_M4U_ID(SMI_L12_ID, 11)
+#define M4U_PORT_L12_FDVT_2ND_WRA_0 MTK_M4U_ID(SMI_L12_ID, 12)
+#define M4U_PORT_L12_FDVT_2ND_WRB_0 MTK_M4U_ID(SMI_L12_ID, 13)
+#define M4U_PORT_L12_DHZEI_E1 MTK_M4U_ID(SMI_L12_ID, 14)
+#define M4U_PORT_L12_DHZEO_E1 MTK_M4U_ID(SMI_L12_ID, 15)
+
+/* LARB 13 -- CAM-1 */
+#define M4U_PORT_L13_CAMSV_CQI_E1 MTK_M4U_ID(SMI_L13_ID, 0)
+#define M4U_PORT_L13_CAMSV_CQI_E2 MTK_M4U_ID(SMI_L13_ID, 1)
+#define M4U_PORT_L13_GCAMSV_A_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 2)
+#define M4U_PORT_L13_GCAMSV_C_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 3)
+#define M4U_PORT_L13_GCAMSV_A_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 4)
+#define M4U_PORT_L13_GCAMSV_C_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 5)
+#define M4U_PORT_L13_PDAI_A_0 MTK_M4U_ID(SMI_L13_ID, 6)
+#define M4U_PORT_L13_PDAI_A_1 MTK_M4U_ID(SMI_L13_ID, 7)
+#define M4U_PORT_L13_CAMSV_CQI_B_E1 MTK_M4U_ID(SMI_L13_ID, 8)
+#define M4U_PORT_L13_CAMSV_CQI_B_E2 MTK_M4U_ID(SMI_L13_ID, 9)
+#define M4U_PORT_L13_CAMSV_CQI_C_E1 MTK_M4U_ID(SMI_L13_ID, 10)
+#define M4U_PORT_L13_CAMSV_CQI_C_E2 MTK_M4U_ID(SMI_L13_ID, 11)
+#define M4U_PORT_L13_GCAMSV_E_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 12)
+#define M4U_PORT_L13_GCAMSV_E_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 13)
+#define M4U_PORT_L13_GCAMSV_A_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 14)
+#define M4U_PORT_L13_GCAMSV_C_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 15)
+#define M4U_PORT_L13_GCAMSV_A_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 16)
+#define M4U_PORT_L13_GCAMSV_C_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 17)
+#define M4U_PORT_L13_GCAMSV_E_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 18)
+#define M4U_PORT_L13_GCAMSV_E_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 19)
+#define M4U_PORT_L13_GCAMSV_G_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 20)
+#define M4U_PORT_L13_GCAMSV_G_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 21)
+#define M4U_PORT_L13_PDAO_A MTK_M4U_ID(SMI_L13_ID, 22)
+#define M4U_PORT_L13_PDAO_C MTK_M4U_ID(SMI_L13_ID, 23)
+
+/* LARB 14 -- CAM-1 */
+#define M4U_PORT_L14_GCAMSV_B_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 0)
+#define M4U_PORT_L14_GCAMSV_B_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 1)
+#define M4U_PORT_L14_SCAMSV_A_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 2)
+#define M4U_PORT_L14_SCAMSV_A_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 3)
+#define M4U_PORT_L14_SCAMSV_B_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 4)
+#define M4U_PORT_L14_SCAMSV_B_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 5)
+#define M4U_PORT_L14_PDAI_B_0 MTK_M4U_ID(SMI_L14_ID, 6)
+#define M4U_PORT_L14_PDAI_B_1 MTK_M4U_ID(SMI_L14_ID, 7)
+#define M4U_PORT_L14_GCAMSV_D_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 8)
+#define M4U_PORT_L14_GCAMSV_D_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 9)
+#define M4U_PORT_L14_GCAMSV_F_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 10)
+#define M4U_PORT_L14_GCAMSV_F_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 11)
+#define M4U_PORT_L14_GCAMSV_H_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 12)
+#define M4U_PORT_L14_GCAMSV_H_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 13)
+#define M4U_PORT_L14_GCAMSV_B_UFEO_1 MTK_M4U_ID(SMI_L14_ID, 14)
+#define M4U_PORT_L14_GCAMSV_B_UFEO_2 MTK_M4U_ID(SMI_L14_ID, 15)
+#define M4U_PORT_L14_GCAMSV_D_UFEO_1 MTK_M4U_ID(SMI_L14_ID, 16)
+#define M4U_PORT_L14_GCAMSV_D_UFEO_2 MTK_M4U_ID(SMI_L14_ID, 17)
+#define M4U_PORT_L14_PDAO_B MTK_M4U_ID(SMI_L14_ID, 18)
+#define M4U_PORT_L14_IPUI MTK_M4U_ID(SMI_L14_ID, 19)
+#define M4U_PORT_L14_IPUO MTK_M4U_ID(SMI_L14_ID, 20)
+#define M4U_PORT_L14_IPU3O MTK_M4U_ID(SMI_L14_ID, 21)
+#define M4U_PORT_L14_FAKE MTK_M4U_ID(SMI_L14_ID, 22)
+
+/* LARB 15 -- IMG-D */
+#define M4U_PORT_L15_VIPI_D1 MTK_M4U_ID(SMI_L15_ID, 0)
+#define M4U_PORT_L15_VIPBI_D1 MTK_M4U_ID(SMI_L15_ID, 1)
+#define M4U_PORT_L15_SMTI_D6 MTK_M4U_ID(SMI_L15_ID, 2)
+#define M4U_PORT_L15_TNCSTI_D1 MTK_M4U_ID(SMI_L15_ID, 3)
+#define M4U_PORT_L15_TNCSTI_D4 MTK_M4U_ID(SMI_L15_ID, 4)
+#define M4U_PORT_L15_SMTI_D4 MTK_M4U_ID(SMI_L15_ID, 5)
+#define M4U_PORT_L15_IMG3O_D1 MTK_M4U_ID(SMI_L15_ID, 6)
+#define M4U_PORT_L15_IMG3BO_D1 MTK_M4U_ID(SMI_L15_ID, 7)
+#define M4U_PORT_L15_IMG3CO_D1 MTK_M4U_ID(SMI_L15_ID, 8)
+#define M4U_PORT_L15_IMG2O_D1 MTK_M4U_ID(SMI_L15_ID, 9)
+#define M4U_PORT_L15_SMTI_D9 MTK_M4U_ID(SMI_L15_ID, 10)
+#define M4U_PORT_L15_SMTO_D4 MTK_M4U_ID(SMI_L15_ID, 11)
+#define M4U_PORT_L15_FEO_D1 MTK_M4U_ID(SMI_L15_ID, 12)
+#define M4U_PORT_L15_TNCSO_D1 MTK_M4U_ID(SMI_L15_ID, 13)
+#define M4U_PORT_L15_TNCSTO_D1 MTK_M4U_ID(SMI_L15_ID, 14)
+#define M4U_PORT_L15_SMTO_D6 MTK_M4U_ID(SMI_L15_ID, 15)
+#define M4U_PORT_L15_SMTO_D9 MTK_M4U_ID(SMI_L15_ID, 16)
+#define M4U_PORT_L15_TNCO_D1 MTK_M4U_ID(SMI_L15_ID, 17)
+#define M4U_PORT_L15_TNCO_D1_N MTK_M4U_ID(SMI_L15_ID, 18)
+
+/* LARB 16A -- CAM */
+#define M4U_PORT_L16A_IMGO_R1 MTK_M4U_ID(SMI_L16A_ID, 0)
+#define M4U_PORT_L16A_CQI_R1 MTK_M4U_ID(SMI_L16A_ID, 1)
+#define M4U_PORT_L16A_CQI_R2 MTK_M4U_ID(SMI_L16A_ID, 2)
+#define M4U_PORT_L16A_BPCI_R1 MTK_M4U_ID(SMI_L16A_ID, 3)
+#define M4U_PORT_L16A_LSCI_R1 MTK_M4U_ID(SMI_L16A_ID, 4)
+#define M4U_PORT_L16A_RAWI_R2 MTK_M4U_ID(SMI_L16A_ID, 5)
+#define M4U_PORT_L16A_RAWI_R3 MTK_M4U_ID(SMI_L16A_ID, 6)
+#define M4U_PORT_L16A_UFDI_R2 MTK_M4U_ID(SMI_L16A_ID, 7)
+#define M4U_PORT_L16A_UFDI_R3 MTK_M4U_ID(SMI_L16A_ID, 8)
+#define M4U_PORT_L16A_RAWI_R4 MTK_M4U_ID(SMI_L16A_ID, 9)
+#define M4U_PORT_L16A_RAWI_R5 MTK_M4U_ID(SMI_L16A_ID, 10)
+#define M4U_PORT_L16A_AAI_R1 MTK_M4U_ID(SMI_L16A_ID, 11)
+#define M4U_PORT_L16A_UFDI_R5 MTK_M4U_ID(SMI_L16A_ID, 12)
+#define M4U_PORT_L16A_FHO_R1 MTK_M4U_ID(SMI_L16A_ID, 13)
+#define M4U_PORT_L16A_AAO_R1 MTK_M4U_ID(SMI_L16A_ID, 14)
+#define M4U_PORT_L16A_TSFSO_R1 MTK_M4U_ID(SMI_L16A_ID, 15)
+#define M4U_PORT_L16A_FLKO_R1 MTK_M4U_ID(SMI_L16A_ID, 16)
+
+/* LARB 16B -- CAM */
+#define M4U_PORT_L16B_IMGO_R1 MTK_M4U_ID(SMI_L16B_ID, 0)
+#define M4U_PORT_L16B_CQI_R1 MTK_M4U_ID(SMI_L16B_ID, 1)
+#define M4U_PORT_L16B_CQI_R2 MTK_M4U_ID(SMI_L16B_ID, 2)
+#define M4U_PORT_L16B_BPCI_R1 MTK_M4U_ID(SMI_L16B_ID, 3)
+#define M4U_PORT_L16B_LSCI_R1 MTK_M4U_ID(SMI_L16B_ID, 4)
+#define M4U_PORT_L16B_RAWI_R2 MTK_M4U_ID(SMI_L16B_ID, 5)
+#define M4U_PORT_L16B_RAWI_R3 MTK_M4U_ID(SMI_L16B_ID, 6)
+#define M4U_PORT_L16B_UFDI_R2 MTK_M4U_ID(SMI_L16B_ID, 7)
+#define M4U_PORT_L16B_UFDI_R3 MTK_M4U_ID(SMI_L16B_ID, 8)
+#define M4U_PORT_L16B_RAWI_R4 MTK_M4U_ID(SMI_L16B_ID, 9)
+#define M4U_PORT_L16B_RAWI_R5 MTK_M4U_ID(SMI_L16B_ID, 10)
+#define M4U_PORT_L16B_AAI_R1 MTK_M4U_ID(SMI_L16B_ID, 11)
+#define M4U_PORT_L16B_UFDI_R5 MTK_M4U_ID(SMI_L16B_ID, 12)
+#define M4U_PORT_L16B_FHO_R1 MTK_M4U_ID(SMI_L16B_ID, 13)
+#define M4U_PORT_L16B_AAO_R1 MTK_M4U_ID(SMI_L16B_ID, 14)
+#define M4U_PORT_L16B_TSFSO_R1 MTK_M4U_ID(SMI_L16B_ID, 15)
+#define M4U_PORT_L16B_FLKO_R1 MTK_M4U_ID(SMI_L16B_ID, 16)
+
+/* LARB 17A -- CAM */
+#define M4U_PORT_L17A_YUVO_R1 MTK_M4U_ID(SMI_L17A_ID, 0)
+#define M4U_PORT_L17A_YUVO_R3 MTK_M4U_ID(SMI_L17A_ID, 1)
+#define M4U_PORT_L17A_YUVCO_R1 MTK_M4U_ID(SMI_L17A_ID, 2)
+#define M4U_PORT_L17A_YUVO_R2 MTK_M4U_ID(SMI_L17A_ID, 3)
+#define M4U_PORT_L17A_RZH1N2TO_R1 MTK_M4U_ID(SMI_L17A_ID, 4)
+#define M4U_PORT_L17A_DRZS4NO_R1 MTK_M4U_ID(SMI_L17A_ID, 5)
+#define M4U_PORT_L17A_TNCSO_R1 MTK_M4U_ID(SMI_L17A_ID, 6)
+
+/* LARB 17B -- CAM */
+#define M4U_PORT_L17B_YUVO_R1 MTK_M4U_ID(SMI_L17B_ID, 0)
+#define M4U_PORT_L17B_YUVO_R3 MTK_M4U_ID(SMI_L17B_ID, 1)
+#define M4U_PORT_L17B_YUVCO_R1 MTK_M4U_ID(SMI_L17B_ID, 2)
+#define M4U_PORT_L17B_YUVO_R2 MTK_M4U_ID(SMI_L17B_ID, 3)
+#define M4U_PORT_L17B_RZH1N2TO_R1 MTK_M4U_ID(SMI_L17B_ID, 4)
+#define M4U_PORT_L17B_DRZS4NO_R1 MTK_M4U_ID(SMI_L17B_ID, 5)
+#define M4U_PORT_L17B_TNCSO_R1 MTK_M4U_ID(SMI_L17B_ID, 6)
+
+/* LARB 19 -- VENC */
+#define M4U_PORT_L19_VENC_RCPU MTK_M4U_ID(SMI_L19_ID, 0)
+#define M4U_PORT_L19_VENC_REC MTK_M4U_ID(SMI_L19_ID, 1)
+#define M4U_PORT_L19_VENC_BSDMA MTK_M4U_ID(SMI_L19_ID, 2)
+#define M4U_PORT_L19_VENC_SV_COMV MTK_M4U_ID(SMI_L19_ID, 3)
+#define M4U_PORT_L19_VENC_RD_COMV MTK_M4U_ID(SMI_L19_ID, 4)
+#define M4U_PORT_L19_VENC_NBM_RDMA MTK_M4U_ID(SMI_L19_ID, 5)
+#define M4U_PORT_L19_VENC_NBM_RDMA_LITE MTK_M4U_ID(SMI_L19_ID, 6)
+#define M4U_PORT_L19_JPGENC_Y_RDMA MTK_M4U_ID(SMI_L19_ID, 7)
+#define M4U_PORT_L19_JPGENC_C_RDMA MTK_M4U_ID(SMI_L19_ID, 8)
+#define M4U_PORT_L19_JPGENC_Q_TABLE MTK_M4U_ID(SMI_L19_ID, 9)
+#define M4U_PORT_L19_VENC_SUB_W_LUMA MTK_M4U_ID(SMI_L19_ID, 10)
+#define M4U_PORT_L19_VENC_FCS_NBM_RDMA MTK_M4U_ID(SMI_L19_ID, 11)
+#define M4U_PORT_L19_JPGENC_BSDMA MTK_M4U_ID(SMI_L19_ID, 12)
+#define M4U_PORT_L19_JPGDEC_WDMA_0 MTK_M4U_ID(SMI_L19_ID, 13)
+#define M4U_PORT_L19_JPGDEC_BSDMA_0 MTK_M4U_ID(SMI_L19_ID, 14)
+#define M4U_PORT_L19_VENC_NBM_WDMA MTK_M4U_ID(SMI_L19_ID, 15)
+#define M4U_PORT_L19_VENC_NBM_WDMA_LITE MTK_M4U_ID(SMI_L19_ID, 16)
+#define M4U_PORT_L19_VENC_FCS_NBM_WDMA MTK_M4U_ID(SMI_L19_ID, 17)
+#define M4U_PORT_L19_JPGDEC_WDMA_1 MTK_M4U_ID(SMI_L19_ID, 18)
+#define M4U_PORT_L19_JPGDEC_BSDMA_1 MTK_M4U_ID(SMI_L19_ID, 19)
+#define M4U_PORT_L19_JPGDEC_HUFF_OFFSET_1 MTK_M4U_ID(SMI_L19_ID, 20)
+#define M4U_PORT_L19_JPGDEC_HUFF_OFFSET_0 MTK_M4U_ID(SMI_L19_ID, 21)
+#define M4U_PORT_L19_VENC_CUR_LUMA MTK_M4U_ID(SMI_L19_ID, 22)
+#define M4U_PORT_L19_VENC_CUR_CHROMA MTK_M4U_ID(SMI_L19_ID, 23)
+#define M4U_PORT_L19_VENC_REF_LUMA MTK_M4U_ID(SMI_L19_ID, 24)
+#define M4U_PORT_L19_VENC_REF_CHROMA MTK_M4U_ID(SMI_L19_ID, 25)
+#define M4U_PORT_L19_VENC_SUB_R_LUMA MTK_M4U_ID(SMI_L19_ID, 26)
+
+/* LARB 21 -- VDEC-CORE0 */
+#define M4U_PORT_L21_HW_VDEC_MC_EXT MTK_M4U_ID(SMI_L21_ID, 0)
+#define M4U_PORT_L21_HW_VDEC_UFO_EXT MTK_M4U_ID(SMI_L21_ID, 1)
+#define M4U_PORT_L21_HW_VDEC_PP_EXT MTK_M4U_ID(SMI_L21_ID, 2)
+#define M4U_PORT_L21_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(SMI_L21_ID, 3)
+#define M4U_PORT_L21_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(SMI_L21_ID, 4)
+#define M4U_PORT_L21_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(SMI_L21_ID, 5)
+#define M4U_PORT_L21_HW_VDEC_TILE_EXT MTK_M4U_ID(SMI_L21_ID, 6)
+#define M4U_PORT_L21_HW_VDEC_VLD_EXT MTK_M4U_ID(SMI_L21_ID, 7)
+#define M4U_PORT_L21_HW_VDEC_VLD2_EXT MTK_M4U_ID(SMI_L21_ID, 8)
+#define M4U_PORT_L21_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(SMI_L21_ID, 9)
+#define M4U_PORT_L21_HW_VDEC_UFO_EXT_C MTK_M4U_ID(SMI_L21_ID, 10)
+
+/* LARB 23 -- VDEC-SOC */
+#define M4U_PORT_L23_HW_VDEC_LAT0_VLD_EXT MTK_M4U_ID(SMI_L23_ID, 0)
+#define M4U_PORT_L23_HW_VDEC_LAT0_VLD2_EXT MTK_M4U_ID(SMI_L23_ID, 1)
+#define M4U_PORT_L23_HW_VDEC_LAT0_AVC_MV_EXT MTK_M4U_ID(SMI_L23_ID, 2)
+#define M4U_PORT_L23_HW_VDEC_LAT0_PRED_RD_EXT MTK_M4U_ID(SMI_L23_ID, 3)
+#define M4U_PORT_L23_HW_VDEC_LAT0_TILE_EXT MTK_M4U_ID(SMI_L23_ID, 4)
+#define M4U_PORT_L23_HW_VDEC_LAT0_WDMA_EXT MTK_M4U_ID(SMI_L23_ID, 5)
+#define M4U_PORT_L23_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(SMI_L23_ID, 6)
+#define M4U_PORT_L23_HW_VDEC_UFO_ENC_EXT_C MTK_M4U_ID(SMI_L23_ID, 7)
+#define M4U_PORT_L23_HW_VDEC_MC_EXT_C MTK_M4U_ID(SMI_L23_ID, 8)
+
+/* LARB 27 -- CCU */
+#define M4U_PORT_L27_CCUI MTK_M4U_ID(SMI_L27_ID, 0)
+#define M4U_PORT_L27_CCUO MTK_M4U_ID(SMI_L27_ID, 1)
+#define M4U_PORT_L27_CCUI2 MTK_M4U_ID(SMI_L27_ID, 2)
+#define M4U_PORT_L27_CCUO2 MTK_M4U_ID(SMI_L27_ID, 3)
+
+/* LARB 28 -- AXI-CCU */
+#define M4U_PORT_L28_CCU_AXI_0 MTK_M4U_ID(SMI_L28_ID, 0)
+
+/* infra/peri */
+#define IFR_IOMMU_PORT_PCIE_0 MTK_IFAIOMMU_PERI_ID(0)
+
+#endif
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 847da6fc2713..31029f4f7be8 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -12,7 +12,7 @@
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
-#ifdef CONFIG_HW_PERF_EVENTS
+#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
struct kvm_pmc {
u8 idx; /* index into the pmu->pmc array */
@@ -74,6 +74,7 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
struct kvm_pmu_events *kvm_get_pmu_events(void);
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
+void kvm_vcpu_pmu_resync_el0(void);
#define kvm_vcpu_has_pmu(vcpu) \
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
@@ -171,6 +172,7 @@ static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
{
return 0;
}
+static inline void kvm_vcpu_pmu_resync_el0(void) {}
#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index a73246c3c35e..afd94c9b8b8a 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1480,6 +1480,15 @@ static inline int lpit_read_residency_count_address(u64 *address)
}
#endif
+#ifdef CONFIG_ACPI_PROCESSOR_IDLE
+#ifndef arch_get_idle_state_flags
+static inline unsigned int arch_get_idle_state_flags(u32 arch_flags)
+{
+ return 0;
+}
+#endif
+#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
+
#ifdef CONFIG_ACPI_PPTT
int acpi_pptt_cpu_is_thread(unsigned int cpu);
int find_acpi_cpu_topology(unsigned int cpu, int level);
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 2dd175f5debd..29cc10220952 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -42,11 +42,13 @@ struct aer_capability_regs {
#if defined(CONFIG_PCIEAER)
int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
+int pcie_aer_is_native(struct pci_dev *dev);
#else
static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
return -EINVAL;
}
+static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
#endif
void cper_print_aer(struct pci_dev *dev, int aer_severity,
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 953e6f12fa1c..99a5201d9e62 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -32,7 +32,6 @@ struct task_struct;
struct pci_dev;
extern int amd_iommu_detect(void);
-extern int amd_iommu_init_hardware(void);
/**
* amd_iommu_init_device() - Init device for use with IOMMUv2 driver
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 7c67c17321d4..083f85653716 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -67,6 +67,8 @@
#define ARM_SMCCC_VERSION_1_3 0x10003
#define ARM_SMCCC_1_3_SVE_HINT 0x10000
+#define ARM_SMCCC_CALL_HINTS ARM_SMCCC_1_3_SVE_HINT
+
#define ARM_SMCCC_VERSION_FUNC_ID \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
index 18f5744dfb5d..b83ef19da13d 100644
--- a/include/linux/atomic/atomic-arch-fallback.h
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -459,8 +459,6 @@ raw_atomic_read_acquire(const atomic_t *v)
{
#if defined(arch_atomic_read_acquire)
return arch_atomic_read_acquire(v);
-#elif defined(arch_atomic_read)
- return arch_atomic_read(v);
#else
int ret;
@@ -508,8 +506,6 @@ raw_atomic_set_release(atomic_t *v, int i)
{
#if defined(arch_atomic_set_release)
arch_atomic_set_release(v, i);
-#elif defined(arch_atomic_set)
- arch_atomic_set(v, i);
#else
if (__native_word(atomic_t)) {
smp_store_release(&(v)->counter, i);
@@ -2575,8 +2571,6 @@ raw_atomic64_read_acquire(const atomic64_t *v)
{
#if defined(arch_atomic64_read_acquire)
return arch_atomic64_read_acquire(v);
-#elif defined(arch_atomic64_read)
- return arch_atomic64_read(v);
#else
s64 ret;
@@ -2624,8 +2618,6 @@ raw_atomic64_set_release(atomic64_t *v, s64 i)
{
#if defined(arch_atomic64_set_release)
arch_atomic64_set_release(v, i);
-#elif defined(arch_atomic64_set)
- arch_atomic64_set(v, i);
#else
if (__native_word(atomic64_t)) {
smp_store_release(&(v)->counter, i);
@@ -4657,4 +4649,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
}
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 202b45c7db600ce36198eb1f1fc2c2d5268ace2d
+// 2fdd6702823fa842f9cea57a002e6e4476ae780c
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 6a3a9e122bb5..51b1b7054a23 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -117,6 +117,8 @@ enum audit_nfcfgop {
AUDIT_NFT_OP_OBJ_RESET,
AUDIT_NFT_OP_FLOWTABLE_REGISTER,
AUDIT_NFT_OP_FLOWTABLE_UNREGISTER,
+ AUDIT_NFT_OP_SETELEM_RESET,
+ AUDIT_NFT_OP_RULE_RESET,
AUDIT_NFT_OP_INVALID,
};
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 12596af59c00..49f8b691496c 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -438,7 +438,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
size /= sizeof(long);
while (size--)
- *ldst++ = *lsrc++;
+ data_race(*ldst++ = *lsrc++);
}
/* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
@@ -1307,7 +1307,7 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return NULL;
}
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
#define DEFINE_BPF_DISPATCHER(name)
diff --git a/include/linux/btf.h b/include/linux/btf.h
index df64cc642074..928113a80a95 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -209,6 +209,7 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec);
bool btf_type_is_void(const struct btf_type *t);
s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
+s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p);
const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
u32 id, u32 *res_id);
const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index a3462a9b8e18..a9cb10b0e2e9 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -49,7 +49,7 @@ word \
____BTF_ID(symbol, word)
#define __ID(prefix) \
- __PASTE(prefix, __COUNTER__)
+ __PASTE(__PASTE(prefix, __COUNTER__), __LINE__)
/*
* The BTF_ID defines unique symbol for each ID pointing
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 4ede47649a81..44e9de51eedf 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -171,7 +171,10 @@ static __always_inline int buffer_uptodate(const struct buffer_head *bh)
return test_bit_acquire(BH_Uptodate, &bh->b_state);
}
-#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
+static inline unsigned long bh_offset(const struct buffer_head *bh)
+{
+ return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1);
+}
/* If we *know* page->private refers to buffer_heads */
#define page_buffers(page) \
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 49586ff26152..f3b3593254b9 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -359,14 +359,19 @@ enum {
extern const char *ceph_mds_op_name(int op);
-
-#define CEPH_SETATTR_MODE 1
-#define CEPH_SETATTR_UID 2
-#define CEPH_SETATTR_GID 4
-#define CEPH_SETATTR_MTIME 8
-#define CEPH_SETATTR_ATIME 16
-#define CEPH_SETATTR_SIZE 32
-#define CEPH_SETATTR_CTIME 64
+#define CEPH_SETATTR_MODE (1 << 0)
+#define CEPH_SETATTR_UID (1 << 1)
+#define CEPH_SETATTR_GID (1 << 2)
+#define CEPH_SETATTR_MTIME (1 << 3)
+#define CEPH_SETATTR_ATIME (1 << 4)
+#define CEPH_SETATTR_SIZE (1 << 5)
+#define CEPH_SETATTR_CTIME (1 << 6)
+#define CEPH_SETATTR_MTIME_NOW (1 << 7)
+#define CEPH_SETATTR_ATIME_NOW (1 << 8)
+#define CEPH_SETATTR_BTIME (1 << 9)
+#define CEPH_SETATTR_KILL_SGUID (1 << 10)
+#define CEPH_SETATTR_FSCRYPT_AUTH (1 << 11)
+#define CEPH_SETATTR_FSCRYPT_FILE (1 << 12)
/*
* Ceph setxattr request flags.
@@ -479,7 +484,7 @@ union ceph_mds_request_args_ext {
#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
#define CEPH_MDS_FLAG_ASYNC 4 /* request is asynchronous */
-struct ceph_mds_request_head_old {
+struct ceph_mds_request_head_legacy {
__le64 oldest_client_tid;
__le32 mdsmap_epoch; /* on client */
__le32 flags; /* CEPH_MDS_FLAG_* */
@@ -492,9 +497,9 @@ struct ceph_mds_request_head_old {
union ceph_mds_request_args args;
} __attribute__ ((packed));
-#define CEPH_MDS_REQUEST_HEAD_VERSION 1
+#define CEPH_MDS_REQUEST_HEAD_VERSION 2
-struct ceph_mds_request_head {
+struct ceph_mds_request_head_old {
__le16 version; /* struct version */
__le64 oldest_client_tid;
__le32 mdsmap_epoch; /* on client */
@@ -508,6 +513,23 @@ struct ceph_mds_request_head {
union ceph_mds_request_args_ext args;
} __attribute__ ((packed));
+struct ceph_mds_request_head {
+ __le16 version; /* struct version */
+ __le64 oldest_client_tid;
+ __le32 mdsmap_epoch; /* on client */
+ __le32 flags; /* CEPH_MDS_FLAG_* */
+ __u8 num_retry, num_fwd; /* legacy count retry and fwd attempts */
+ __le16 num_releases; /* # include cap/lease release records */
+ __le32 op; /* mds op code */
+ __le32 caller_uid, caller_gid;
+ __le64 ino; /* use this ino for openc, mkdir, mknod,
+ etc. (if replaying) */
+ union ceph_mds_request_args_ext args;
+
+ __le32 ext_num_retry; /* new count retry attempts */
+ __le32 ext_num_fwd; /* new count fwd attempts */
+} __attribute__ ((packed));
+
/* cap/lease release record */
struct ceph_mds_request_release {
__le64 ino, cap_id; /* ino and unique cap id */
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 99c1726be6ee..2eaaabbe98cb 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -17,6 +17,7 @@
struct ceph_msg;
struct ceph_connection;
+struct ceph_msg_data_cursor;
/*
* Ceph defines these callbacks for handling connection events.
@@ -70,6 +71,30 @@ struct ceph_connection_operations {
int used_proto, int result,
const int *allowed_protos, int proto_cnt,
const int *allowed_modes, int mode_cnt);
+
+ /**
+ * sparse_read: read sparse data
+ * @con: connection we're reading from
+ * @cursor: data cursor for reading extents
+ * @buf: optional buffer to read into
+ *
+ * This should be called more than once, each time setting up to
+ * receive an extent into the current cursor position, and zeroing
+ * the holes between them.
+ *
+ * Returns amount of data to be read (in bytes), 0 if reading is
+ * complete, or -errno if there was an error.
+ *
+ * If @buf is set on a >0 return, then the data should be read into
+ * the provided buffer. Otherwise, it should be read into the cursor.
+ *
+ * The sparse read operation is expected to initialize the cursor
+ * with a length covering up to the end of the last extent.
+ */
+ int (*sparse_read)(struct ceph_connection *con,
+ struct ceph_msg_data_cursor *cursor,
+ char **buf);
+
};
/* use format string %s%lld */
@@ -98,6 +123,7 @@ enum ceph_msg_data_type {
CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */
#endif /* CONFIG_BLOCK */
CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */
+ CEPH_MSG_DATA_ITER, /* data source/destination is an iov_iter */
};
#ifdef CONFIG_BLOCK
@@ -199,6 +225,7 @@ struct ceph_msg_data {
bool own_pages;
};
struct ceph_pagelist *pagelist;
+ struct iov_iter iter;
};
};
@@ -207,6 +234,7 @@ struct ceph_msg_data_cursor {
struct ceph_msg_data *data; /* current data item */
size_t resid; /* bytes not yet consumed */
+ int sr_resid; /* residual sparse_read len */
bool need_crc; /* crc update needed */
union {
#ifdef CONFIG_BLOCK
@@ -222,6 +250,10 @@ struct ceph_msg_data_cursor {
struct page *page; /* page from list */
size_t offset; /* bytes from list */
};
+ struct {
+ struct iov_iter iov_iter;
+ unsigned int lastlen;
+ };
};
};
@@ -251,6 +283,7 @@ struct ceph_msg {
struct kref kref;
bool more_to_follow;
bool needs_out_seq;
+ bool sparse_read;
int front_alloc_len;
struct ceph_msgpool *pool;
@@ -309,6 +342,10 @@ struct ceph_connection_v1_info {
int in_base_pos; /* bytes read */
+ /* sparse reads */
+ struct kvec in_sr_kvec; /* current location to receive into */
+ u64 in_sr_len; /* amount of data in this extent */
+
/* message in temps */
u8 in_tag; /* protocol control byte */
struct ceph_msg_header in_hdr;
@@ -395,6 +432,7 @@ struct ceph_connection_v2_info {
void *conn_bufs[16];
int conn_buf_cnt;
+ int data_len_remain;
struct kvec in_sign_kvecs[8];
struct kvec out_sign_kvecs[8];
@@ -573,6 +611,8 @@ void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
#endif /* CONFIG_BLOCK */
void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
struct ceph_bvec_iter *bvec_pos);
+void ceph_msg_data_add_iter(struct ceph_msg *msg,
+ struct iov_iter *iter);
struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
gfp_t flags, bool can_fail);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index fb6be72104df..bf9823956758 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -29,14 +29,62 @@ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *);
#define CEPH_HOMELESS_OSD -1
-/* a given osd we're communicating with */
+/*
+ * A single extent in a SPARSE_READ reply.
+ *
+ * Note that these come from the OSD as little-endian values. On BE arches,
+ * we convert them in-place after receipt.
+ */
+struct ceph_sparse_extent {
+ u64 off;
+ u64 len;
+} __packed;
+
+/* Sparse read state machine state values */
+enum ceph_sparse_read_state {
+ CEPH_SPARSE_READ_HDR = 0,
+ CEPH_SPARSE_READ_EXTENTS,
+ CEPH_SPARSE_READ_DATA_LEN,
+ CEPH_SPARSE_READ_DATA,
+};
+
+/*
+ * A SPARSE_READ reply is a 32-bit count of extents, followed by an array of
+ * 64-bit offset/length pairs, and then all of the actual file data
+ * concatenated after it (sans holes).
+ *
+ * Unfortunately, we don't know how long the extent array is until we've
+ * started reading the data section of the reply. The caller should send down
+ * a destination buffer for the array, but we'll alloc one if it's too small
+ * or if the caller doesn't.
+ */
+struct ceph_sparse_read {
+ enum ceph_sparse_read_state sr_state; /* state machine state */
+ u64 sr_req_off; /* orig request offset */
+ u64 sr_req_len; /* orig request length */
+ u64 sr_pos; /* current pos in buffer */
+ int sr_index; /* current extent index */
+ __le32 sr_datalen; /* length of actual data */
+ u32 sr_count; /* extent count in reply */
+ int sr_ext_len; /* length of extent array */
+ struct ceph_sparse_extent *sr_extent; /* extent array */
+};
+
+/*
+ * A given osd we're communicating with.
+ *
+ * Note that the o_requests tree can be searched while holding the "lock" mutex
+ * or the "o_requests_lock" spinlock. Insertion or removal requires both!
+ */
struct ceph_osd {
refcount_t o_ref;
+ int o_sparse_op_idx;
struct ceph_osd_client *o_osdc;
int o_osd;
int o_incarnation;
struct rb_node o_node;
struct ceph_connection o_con;
+ spinlock_t o_requests_lock;
struct rb_root o_requests;
struct rb_root o_linger_requests;
struct rb_root o_backoff_mappings;
@@ -46,6 +94,7 @@ struct ceph_osd {
unsigned long lru_ttl;
struct list_head o_keepalive_item;
struct mutex lock;
+ struct ceph_sparse_read o_sparse_read;
};
#define CEPH_OSD_SLAB_OPS 2
@@ -59,6 +108,7 @@ enum ceph_osd_data_type {
CEPH_OSD_DATA_TYPE_BIO,
#endif /* CONFIG_BLOCK */
CEPH_OSD_DATA_TYPE_BVECS,
+ CEPH_OSD_DATA_TYPE_ITER,
};
struct ceph_osd_data {
@@ -82,6 +132,7 @@ struct ceph_osd_data {
struct ceph_bvec_iter bvec_pos;
u32 num_bvecs;
};
+ struct iov_iter iter;
};
};
@@ -98,6 +149,8 @@ struct ceph_osd_req_op {
u64 offset, length;
u64 truncate_size;
u32 truncate_seq;
+ int sparse_ext_cnt;
+ struct ceph_sparse_extent *sparse_ext;
struct ceph_osd_data osd_data;
} extent;
struct {
@@ -145,6 +198,9 @@ struct ceph_osd_req_op {
u32 src_fadvise_flags;
struct ceph_osd_data osd_data;
} copy_from;
+ struct {
+ u64 ver;
+ } assert_ver;
};
};
@@ -199,6 +255,7 @@ struct ceph_osd_request {
struct ceph_osd_client *r_osdc;
struct kref r_kref;
bool r_mempool;
+ bool r_linger; /* don't resend on failure */
struct completion r_completion; /* private to osd_client.c */
ceph_osdc_callback_t r_callback;
@@ -211,9 +268,9 @@ struct ceph_osd_request {
struct ceph_snap_context *r_snapc; /* for writes */
struct timespec64 r_mtime; /* ditto */
u64 r_data_offset; /* ditto */
- bool r_linger; /* don't resend on failure */
/* internal */
+ u64 r_version; /* data version sent in reply */
unsigned long r_stamp; /* jiffies, send or check time */
unsigned long r_start_stamp; /* jiffies */
ktime_t r_start_latency; /* ktime_t */
@@ -450,6 +507,8 @@ void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
unsigned int which,
struct ceph_bvec_iter *bvec_pos);
+void osd_req_op_extent_osd_iter(struct ceph_osd_request *osd_req,
+ unsigned int which, struct iov_iter *iter);
extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *,
unsigned int which,
@@ -504,6 +563,20 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
u32 truncate_seq, u64 truncate_size,
bool use_mempool);
+int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt);
+
+/*
+ * How big an extent array should we preallocate for a sparse read? This is
+ * just a starting value. If we get more than this back from the OSD, the
+ * receiver will reallocate.
+ */
+#define CEPH_SPARSE_EXT_ARRAY_INITIAL 16
+
+static inline int ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op)
+{
+ return __ceph_alloc_sparse_ext_map(op, CEPH_SPARSE_EXT_ARRAY_INITIAL);
+}
+
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
extern void ceph_osdc_put_request(struct ceph_osd_request *req);
@@ -558,5 +631,19 @@ int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
struct ceph_object_locator *oloc,
struct ceph_watch_item **watchers,
u32 *num_watchers);
-#endif
+/* Find offset into the buffer of the end of the extent map */
+static inline u64 ceph_sparse_ext_map_end(struct ceph_osd_req_op *op)
+{
+ struct ceph_sparse_extent *ext;
+
+ /* No extents? No data */
+ if (op->extent.sparse_ext_cnt == 0)
+ return 0;
+
+ ext = &op->extent.sparse_ext[op->extent.sparse_ext_cnt - 1];
+
+ return ext->off + ext->len - op->extent.offset;
+}
+
+#endif
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 43a7a1573b51..73c3efbec36c 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -524,6 +524,10 @@ struct ceph_osd_op {
__le64 cookie;
} __attribute__ ((packed)) notify;
struct {
+ __le64 unused;
+ __le64 ver;
+ } __attribute__ ((packed)) assert_ver;
+ struct {
__le64 offset, length;
__le64 src_offset;
} __attribute__ ((packed)) clonerange;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index ae20dbb885d6..f1b3151ac30b 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -342,6 +342,20 @@ struct cgroup_rstat_cpu {
struct cgroup_base_stat last_bstat;
/*
+ * This field is used to record the cumulative per-cpu time of
+ * the cgroup and its descendants. Currently it can be read via
+ * eBPF/drgn etc, and we are still trying to determine how to
+ * expose it in the cgroupfs interface.
+ */
+ struct cgroup_base_stat subtree_bstat;
+
+ /*
+ * Snapshots at the last reading. These are used to calculate the
+ * deltas to propagate to the per-cpu subtree_bstat.
+ */
+ struct cgroup_base_stat last_subtree_bstat;
+
+ /*
* Child cgroups with stat updates on this cpu since the last read
* are linked on the parent's ->updated_children through
* ->updated_next.
diff --git a/include/linux/console.h b/include/linux/console.h
index d3195664baa5..7de11c763eb3 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -154,6 +154,8 @@ static inline int con_debug_leave(void)
* receiving the printk spam for obvious reasons.
* @CON_EXTENDED: The console supports the extended output format of
* /dev/kmesg which requires a larger output buffer.
+ * @CON_SUSPENDED: Indicates if a console is suspended. If true, the
+ * printing callbacks must not be called.
*/
enum cons_flags {
CON_PRINTBUFFER = BIT(0),
@@ -163,6 +165,7 @@ enum cons_flags {
CON_ANYTIME = BIT(4),
CON_BRL = BIT(5),
CON_EXTENDED = BIT(6),
+ CON_SUSPENDED = BIT(7),
};
/**
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 43b363a99215..71d186d6933a 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -141,6 +141,9 @@ struct cpufreq_policy {
*/
bool dvfs_possible_from_any_cpu;
+ /* Per policy boost enabled flag. */
+ bool boost_enabled;
+
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
unsigned int cached_resolved_idx;
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 06dda85f0424..068f7738be22 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -90,7 +90,6 @@ enum cpuhp_state {
CPUHP_FS_BUFF_DEAD,
CPUHP_PRINTK_DEAD,
CPUHP_MM_MEMCQ_DEAD,
- CPUHP_XFS_DEAD,
CPUHP_PERCPU_CNT_DEAD,
CPUHP_RADIX_DEAD,
CPUHP_PAGE_ALLOC,
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 27dbd4c64860..e34b601b71fd 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -106,8 +106,6 @@ static inline bool dmar_rcu_check(void)
extern int dmar_table_init(void);
extern int dmar_dev_scope_init(void);
extern void dmar_register_bus_notifier(void);
-extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
- struct dmar_dev_scope **devices, u16 segment);
extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt);
extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h
index 1c849db953a5..45fca09b2319 100644
--- a/include/linux/export-internal.h
+++ b/include/linux/export-internal.h
@@ -52,6 +52,8 @@
#ifdef CONFIG_IA64
#define KSYM_FUNC(name) @fptr(name)
+#elif defined(CONFIG_PARISC) && defined(CONFIG_64BIT)
+#define KSYM_FUNC(name) P%name
#else
#define KSYM_FUNC(name) name
#endif
diff --git a/include/linux/export.h b/include/linux/export.h
index beed8387e0a4..9911508a9604 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -50,7 +50,7 @@ extern struct module __this_module;
__EXPORT_SYMBOL_REF(sym) ASM_NL \
.previous
-#if !defined(CONFIG_MODULES) || defined(__DISABLE_EXPORTS)
+#if defined(__DISABLE_EXPORTS)
/*
* Allow symbol exports to be disabled completely so that C code may
@@ -75,7 +75,7 @@ extern struct module __this_module;
__ADDRESSABLE(sym) \
asm(__stringify(___EXPORT_SYMBOL(sym, license, ns)))
-#endif /* CONFIG_MODULES */
+#endif
#ifdef DEFAULT_SYMBOL_NAMESPACE
#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, __stringify(DEFAULT_SYMBOL_NAMESPACE))
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 4aeb3fa11927..b528f063e8ff 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1508,47 +1508,18 @@ static inline bool fsuidgid_has_mapping(struct super_block *sb,
kgid_has_mapping(fs_userns, kgid);
}
-struct timespec64 current_mgtime(struct inode *inode);
struct timespec64 current_time(struct inode *inode);
struct timespec64 inode_set_ctime_current(struct inode *inode);
-/*
- * Multigrain timestamps
- *
- * Conditionally use fine-grained ctime and mtime timestamps when there
- * are users actively observing them via getattr. The primary use-case
- * for this is NFS clients that use the ctime to distinguish between
- * different states of the file, and that are often fooled by multiple
- * operations that occur in the same coarse-grained timer tick.
- *
- * The kernel always keeps normalized struct timespec64 values in the ctime,
- * which means that only the first 30 bits of the value are used. Use the
- * 31st bit of the ctime's tv_nsec field as a flag to indicate that the value
- * has been queried since it was last updated.
- */
-#define I_CTIME_QUERIED (1L<<30)
-
/**
* inode_get_ctime - fetch the current ctime from the inode
* @inode: inode from which to fetch ctime
*
- * Grab the current ctime tv_nsec field from the inode, mask off the
- * I_CTIME_QUERIED flag and return it. This is mostly intended for use by
- * internal consumers of the ctime that aren't concerned with ensuring a
- * fine-grained update on the next change (e.g. when preparing to store
- * the value in the backing store for later retrieval).
- *
- * This is safe to call regardless of whether the underlying filesystem
- * is using multigrain timestamps.
+ * Grab the current ctime from the inode and return it.
*/
static inline struct timespec64 inode_get_ctime(const struct inode *inode)
{
- struct timespec64 ctime;
-
- ctime.tv_sec = inode->__i_ctime.tv_sec;
- ctime.tv_nsec = inode->__i_ctime.tv_nsec & ~I_CTIME_QUERIED;
-
- return ctime;
+ return inode->__i_ctime;
}
/**
@@ -2334,7 +2305,6 @@ struct file_system_type {
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
-#define FS_MGTIME 64 /* FS uses multigrain timestamps */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
@@ -2358,17 +2328,6 @@ struct file_system_type {
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
-/**
- * is_mgtime: is this inode using multigrain timestamps
- * @inode: inode to test for multigrain timestamps
- *
- * Return true if the inode uses multigrain timestamps, false otherwise.
- */
-static inline bool is_mgtime(const struct inode *inode)
-{
- return inode->i_sb->s_type->fs_flags & FS_MGTIME;
-}
-
extern struct dentry *mount_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int));
@@ -3054,7 +3013,6 @@ extern void page_put_link(void *);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
-void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode);
void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *);
void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index aad9cf8876b5..e8921871ef9a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -862,13 +862,8 @@ extern int skip_trace(unsigned long ip);
extern void ftrace_module_init(struct module *mod);
extern void ftrace_module_enable(struct module *mod);
extern void ftrace_release_mod(struct module *mod);
-
-extern void ftrace_disable_daemon(void);
-extern void ftrace_enable_daemon(void);
#else /* CONFIG_DYNAMIC_FTRACE */
static inline int skip_trace(unsigned long ip) { return 0; }
-static inline void ftrace_disable_daemon(void) { }
-static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_module_init(struct module *mod) { }
static inline void ftrace_module_enable(struct module *mod) { }
static inline void ftrace_release_mod(struct module *mod) { }
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 0a221e768ea4..07e370113b2b 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -63,7 +63,7 @@ struct gameport_driver {
int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode);
void gameport_close(struct gameport *gameport);
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
void __gameport_register_port(struct gameport *gameport, struct module *owner);
/* use a define to avoid include chaining to get THIS_MODULE */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 964ca1f15e3f..5a8387a4a712 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -833,11 +833,11 @@ struct hid_driver {
void (*feature_mapping)(struct hid_device *hdev,
struct hid_field *field,
struct hid_usage *usage);
-#ifdef CONFIG_PM
+
int (*suspend)(struct hid_device *hdev, pm_message_t message);
int (*resume)(struct hid_device *hdev);
int (*reset_resume)(struct hid_device *hdev);
-#endif
+
/* private: */
struct device_driver driver;
};
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 5b2626063f4f..a30686e649f7 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -984,7 +984,9 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t pte)
{
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
}
#endif
@@ -1173,7 +1175,7 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
}
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+ pte_t *ptep, pte_t pte, unsigned long sz)
{
}
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 3ac3974b3c78..2b00faf98017 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -348,7 +348,7 @@ struct vmtransfer_page_packet_header {
u8 sender_owns_set;
u8 reserved;
u32 range_cnt;
- struct vmtransfer_page_range ranges[1];
+ struct vmtransfer_page_range ranges[];
} __packed;
struct vmgpadl_packet_header {
@@ -665,8 +665,8 @@ struct vmbus_channel_initiate_contact {
u64 interrupt_page;
struct {
u8 msg_sint;
- u8 padding1[3];
- u32 padding2;
+ u8 msg_vtl;
+ u8 reserved[6];
};
};
u64 monitor_page1;
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 3430cc2b05a6..0dae9db27538 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -237,7 +237,6 @@ enum i2c_driver_flags {
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
* @probe: Callback for device binding
- * @probe_new: Transitional callback for device binding - do not use
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
* @alert: Alert callback, for example for the SMBus alert protocol
@@ -272,16 +271,8 @@ enum i2c_driver_flags {
struct i2c_driver {
unsigned int class;
- union {
/* Standard driver model interfaces */
- int (*probe)(struct i2c_client *client);
- /*
- * Legacy callback that was part of a conversion of .probe().
- * Today it has the same semantic as .probe(). Don't use for new
- * code.
- */
- int (*probe_new)(struct i2c_client *client);
- };
+ int (*probe)(struct i2c_client *client);
void (*remove)(struct i2c_client *client);
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 1b9b15a492fa..cdc684e04a2f 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -189,6 +189,8 @@ struct team {
struct net_device *dev; /* associated netdevice */
struct team_pcpu_stats __percpu *pcpu_stats;
+ const struct header_ops *header_ops_cache;
+
struct mutex lock; /* used for overall locking, e.g. port lists write */
/*
diff --git a/include/linux/instruction_pointer.h b/include/linux/instruction_pointer.h
index cda1f706eaeb..aa0b3ffea935 100644
--- a/include/linux/instruction_pointer.h
+++ b/include/linux/instruction_pointer.h
@@ -2,7 +2,12 @@
#ifndef _LINUX_INSTRUCTION_POINTER_H
#define _LINUX_INSTRUCTION_POINTER_H
+#include <asm/linkage.h>
+
#define _RET_IP_ (unsigned long)__builtin_return_address(0)
+
+#ifndef _THIS_IP_
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+#endif
#endif /* _LINUX_INSTRUCTION_POINTER_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a92bce40b04b..4a1dc88ddbff 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -569,8 +569,12 @@ enum
* 2) rcu_report_dead() reports the final quiescent states.
*
* _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
+ *
+ * _ (HR)TIMER_SOFTIRQ: (hr)timers_dead_cpu() migrates the queue
*/
-#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ))
+#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(TIMER_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ) |\
+ BIT(HRTIMER_SOFTIRQ) | BIT(RCU_SOFTIRQ))
+
/* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq.
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index bd6a1110b294..c50a769d569a 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -196,6 +196,8 @@ enum iommu_dev_features {
IOMMU_DEV_FEAT_IOPF,
};
+#define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */
+#define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */
#define IOMMU_PASID_INVALID (-1U)
typedef unsigned int ioasid_t;
@@ -414,6 +416,8 @@ struct iommu_fault_param {
* @priv: IOMMU Driver private data
* @max_pasids: number of PASIDs this device can consume
* @attach_deferred: the dma domain attachment is deferred
+ * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
+ * @require_direct: device requires IOMMU_RESV_DIRECT regions
*
* TODO: migrate other per device data pointers under iommu_dev_data, e.g.
* struct iommu_group *iommu_group;
@@ -427,6 +431,8 @@ struct dev_iommu {
void *priv;
u32 max_pasids;
u32 attach_deferred:1;
+ u32 pci_32bit_workaround:1;
+ u32 require_direct:1;
};
int iommu_device_register(struct iommu_device *iommu,
@@ -721,6 +727,8 @@ void iommu_detach_device_pasid(struct iommu_domain *domain,
struct iommu_domain *
iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
unsigned int type);
+ioasid_t iommu_alloc_global_pasid(struct device *dev);
+void iommu_free_global_pasid(ioasid_t pasid);
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
@@ -1082,6 +1090,13 @@ iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
{
return NULL;
}
+
+static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
+{
+ return IOMMU_PASID_INVALID;
+}
+
+static inline void iommu_free_global_pasid(ioasid_t pasid) {}
#endif /* CONFIG_IOMMU_API */
/**
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 5883551b1ee8..af8a771a053c 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -147,6 +147,7 @@ struct inet6_skb_parm {
#define IP6SKB_JUMBOGRAM 128
#define IP6SKB_SEG6 256
#define IP6SKB_FAKEJUMBO 512
+#define IP6SKB_MULTIPATH 1024
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 819b6bc8ac08..842623d708c2 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -54,11 +54,13 @@ extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
+#ifndef kasan_mem_to_shadow
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
+ KASAN_SHADOW_OFFSET;
}
+#endif
int kasan_add_zero_shadow(void *start, unsigned long size);
void kasan_remove_zero_shadow(void *start, unsigned long size);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9d3ac7720da9..fb6c6109fdca 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -190,8 +190,6 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
struct kvm_vcpu *except);
-bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
- unsigned long *vcpu_bitmap);
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -256,11 +254,15 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+union kvm_mmu_notifier_arg {
+ pte_t pte;
+};
+
struct kvm_gfn_range {
struct kvm_memory_slot *slot;
gfn_t start;
gfn_t end;
- pte_t pte;
+ union kvm_mmu_notifier_arg arg;
bool may_block;
};
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
@@ -865,6 +867,25 @@ static inline void kvm_vm_bugged(struct kvm *kvm)
unlikely(__ret); \
})
+/*
+ * Note, "data corruption" refers to corruption of host kernel data structures,
+ * not guest data. Guest data corruption, suspected or confirmed, that is tied
+ * and contained to a single VM should *never* BUG() and potentially panic the
+ * host, i.e. use this variant of KVM_BUG() if and only if a KVM data structure
+ * is corrupted and that corruption can have a cascading effect to other parts
+ * of the hosts and/or to other VMs.
+ */
+#define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \
+({ \
+ bool __ret = !!(cond); \
+ \
+ if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) \
+ BUG_ON(__ret); \
+ else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PROVE_RCU
@@ -1359,6 +1380,9 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
void kvm_flush_remote_tlbs(struct kvm *kvm);
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
+void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot);
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
@@ -1387,10 +1411,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
unsigned long mask);
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
-#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
-void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
- const struct kvm_memory_slot *memslot);
-#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
+#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
int *is_dirty, struct kvm_memory_slot **memslot);
@@ -1479,11 +1500,23 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
}
#endif
-#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
-static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
+static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
{
return -ENOTSUPP;
}
+#else
+int kvm_arch_flush_remote_tlbs(struct kvm *kvm);
+#endif
+
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
+static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
+ gfn_t gfn, u64 nr_pages)
+{
+ return -EOPNOTSUPP;
+}
+#else
+int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
#endif
#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
@@ -2148,8 +2181,6 @@ struct kvm_device_ops {
int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
};
-void kvm_device_get(struct kvm_device *dev);
-void kvm_device_put(struct kvm_device *dev);
struct kvm_device *kvm_device_from_filp(struct file *filp);
int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
void kvm_unregister_device_ops(u32 type);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 7d428100b42b..aa16dc2a8230 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -100,6 +100,7 @@ struct led_classdev {
const char *name;
unsigned int brightness;
unsigned int max_brightness;
+ unsigned int color;
int flags;
/* Lower 16 bits reflect status */
@@ -313,6 +314,8 @@ extern struct led_classdev *of_led_get(struct device_node *np, int index);
extern void led_put(struct led_classdev *led_cdev);
struct led_classdev *__must_check devm_of_led_get(struct device *dev,
int index);
+struct led_classdev *__must_check devm_of_led_get_optional(struct device *dev,
+ int index);
/**
* led_blink_set - set blinking with software fallback
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 820f7a3a2749..2a7d2af0ed80 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -192,6 +192,7 @@ enum {
ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */
ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */
+ ATA_PFLAG_RESUMING = (1 << 16), /* port is being resumed */
ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */
@@ -222,6 +223,10 @@ enum {
ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */
ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */
+ ATA_HOST_NO_PART = (1 << 4), /* Host does not support partial */
+ ATA_HOST_NO_SSC = (1 << 5), /* Host does not support slumber */
+ ATA_HOST_NO_DEVSLP = (1 << 6), /* Host does not support devslp */
+
/* bits 24:31 of host->flags are reserved for LLD specific flags */
/* various lengths of time */
@@ -255,7 +260,7 @@ enum {
* advised to wait only for the following duration before
* doing SRST.
*/
- ATA_TMOUT_PMP_SRST_WAIT = 5000,
+ ATA_TMOUT_PMP_SRST_WAIT = 10000,
/* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
* be a spurious PHY event, so ignore the first PHY event that
@@ -314,9 +319,10 @@ enum {
ATA_EH_ENABLE_LINK = (1 << 3),
ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */
ATA_EH_GET_SUCCESS_SENSE = (1 << 6), /* Get sense data for successful cmd */
+ ATA_EH_SET_ACTIVE = (1 << 7), /* Set a device to active power mode */
ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK |
- ATA_EH_GET_SUCCESS_SENSE,
+ ATA_EH_GET_SUCCESS_SENSE | ATA_EH_SET_ACTIVE,
ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET |
ATA_EH_ENABLE_LINK,
@@ -344,7 +350,6 @@ enum {
ATA_LINK_RESUME_TRIES = 5,
/* how hard are we gonna try to probe/recover devices */
- ATA_PROBE_MAX_TRIES = 3,
ATA_EH_DEV_TRIES = 3,
ATA_EH_PMP_TRIES = 5,
ATA_EH_PMP_LINK_TRIES = 3,
@@ -354,7 +359,7 @@ enum {
/* This should match the actual table size of
* ata_eh_cmd_timeout_table in libata-eh.c.
*/
- ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 7,
+ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8,
/* Horkage types. May be set by libata or controller on drives
(some horkage may be drive/controller pair dependent */
@@ -977,12 +982,6 @@ struct ata_port_operations {
ssize_t size);
/*
- * Obsolete
- */
- void (*phy_reset)(struct ata_port *ap);
- void (*eng_timeout)(struct ata_port *ap);
-
- /*
* ->inherits must be the last field and all the preceding
* fields must be pointers.
*/
@@ -1116,7 +1115,7 @@ static inline void ata_sas_port_resume(struct ata_port *ap)
extern int ata_ratelimit(void);
extern void ata_msleep(struct ata_port *ap, unsigned int msecs);
extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask,
- u32 val, unsigned long interval, unsigned long timeout);
+ u32 val, unsigned int interval, unsigned int timeout);
extern int atapi_cmd_type(u8 opcode);
extern unsigned int ata_pack_xfermask(unsigned int pio_mask,
unsigned int mwdma_mask,
@@ -1151,6 +1150,7 @@ extern int ata_std_bios_param(struct scsi_device *sdev,
struct block_device *bdev,
sector_t capacity, int geom[]);
extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
+extern int ata_scsi_slave_alloc(struct scsi_device *sdev);
extern int ata_scsi_slave_config(struct scsi_device *sdev);
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
@@ -1166,11 +1166,11 @@ extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *
* SATA specific code - drivers/ata/libata-sata.c
*/
#ifdef CONFIG_SATA_HOST
-extern const unsigned long sata_deb_timing_normal[];
-extern const unsigned long sata_deb_timing_hotplug[];
-extern const unsigned long sata_deb_timing_long[];
+extern const unsigned int sata_deb_timing_normal[];
+extern const unsigned int sata_deb_timing_hotplug[];
+extern const unsigned int sata_deb_timing_long[];
-static inline const unsigned long *
+static inline const unsigned int *
sata_ehc_deb_timing(struct ata_eh_context *ehc)
{
if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
@@ -1185,14 +1185,14 @@ extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
extern int sata_set_spd(struct ata_link *link);
extern int sata_link_hardreset(struct ata_link *link,
- const unsigned long *timing, unsigned long deadline,
+ const unsigned int *timing, unsigned long deadline,
bool *online, int (*check_ready)(struct ata_link *));
-extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
+extern int sata_link_resume(struct ata_link *link, const unsigned int *params,
unsigned long deadline);
extern int ata_eh_read_sense_success_ncq_log(struct ata_link *link);
extern void ata_eh_analyze_ncq_error(struct ata_link *link);
#else
-static inline const unsigned long *
+static inline const unsigned int *
sata_ehc_deb_timing(struct ata_eh_context *ehc)
{
return NULL;
@@ -1212,7 +1212,7 @@ static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
}
static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; }
static inline int sata_link_hardreset(struct ata_link *link,
- const unsigned long *timing,
+ const unsigned int *timing,
unsigned long deadline,
bool *online,
int (*check_ready)(struct ata_link *))
@@ -1222,7 +1222,7 @@ static inline int sata_link_hardreset(struct ata_link *link,
return -EOPNOTSUPP;
}
static inline int sata_link_resume(struct ata_link *link,
- const unsigned long *params,
+ const unsigned int *params,
unsigned long deadline)
{
return -EOPNOTSUPP;
@@ -1234,20 +1234,15 @@ static inline int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { }
#endif
extern int sata_link_debounce(struct ata_link *link,
- const unsigned long *params, unsigned long deadline);
+ const unsigned int *params, unsigned long deadline);
extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
bool spm_wakeup);
extern int ata_slave_link_init(struct ata_port *ap);
-extern void ata_sas_port_destroy(struct ata_port *);
extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
struct ata_port_info *, struct Scsi_Host *);
-extern void ata_sas_async_probe(struct ata_port *ap);
-extern int ata_sas_sync_probe(struct ata_port *ap);
-extern int ata_sas_port_init(struct ata_port *);
-extern int ata_sas_port_start(struct ata_port *ap);
+extern void ata_port_probe(struct ata_port *ap);
extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
extern void ata_sas_tport_delete(struct ata_port *ap);
-extern void ata_sas_port_stop(struct ata_port *ap);
extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
extern void ata_tf_to_fis(const struct ata_taskfile *tf,
@@ -1404,6 +1399,7 @@ extern const struct attribute_group *ata_common_sdev_groups[];
.this_id = ATA_SHT_THIS_ID, \
.emulated = ATA_SHT_EMULATED, \
.proc_name = drv_name, \
+ .slave_alloc = ata_scsi_slave_alloc, \
.slave_destroy = ata_scsi_slave_destroy, \
.bios_param = ata_std_bios_param, \
.unlock_native_capacity = ata_scsi_unlock_native_capacity,\
@@ -1785,7 +1781,7 @@ static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
{
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
- if (unlikely(!qc) || !ap->ops->error_handler)
+ if (unlikely(!qc))
return qc;
if ((qc->flags & (ATA_QCFLAG_ACTIVE |
@@ -1876,7 +1872,7 @@ static inline int ata_check_ready(u8 status)
}
static inline unsigned long ata_deadline(unsigned long from_jiffies,
- unsigned long timeout_msecs)
+ unsigned int timeout_msecs)
{
return from_jiffies + msecs_to_jiffies(timeout_msecs);
}
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index e41c70ac7744..d01e850b570f 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -428,6 +428,8 @@ struct ma_wr_state {
#define MAS_ROOT ((struct maple_enode *)5UL)
#define MAS_NONE ((struct maple_enode *)9UL)
#define MAS_PAUSE ((struct maple_enode *)17UL)
+#define MAS_OVERFLOW ((struct maple_enode *)33UL)
+#define MAS_UNDERFLOW ((struct maple_enode *)65UL)
#define MA_ERROR(err) \
((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
@@ -511,6 +513,15 @@ static inline bool mas_is_paused(const struct ma_state *mas)
return mas->node == MAS_PAUSE;
}
+/* Check if the mas is pointing to a node or not */
+static inline bool mas_is_active(struct ma_state *mas)
+{
+ if ((unsigned long)mas->node >= MAPLE_RESERVED_RANGE)
+ return true;
+
+ return false;
+}
+
/**
* mas_reset() - Reset a Maple Tree operation state.
* @mas: Maple Tree operation state.
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ab94ad4597d0..e4e24da16d2c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -920,7 +920,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
}
-void mem_cgroup_handle_over_high(void);
+void mem_cgroup_handle_over_high(gfp_t gfp_mask);
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
@@ -1458,7 +1458,7 @@ static inline void mem_cgroup_unlock_pages(void)
rcu_read_unlock();
}
-static inline void mem_cgroup_handle_over_high(void)
+static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
}
diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h
index 473545a2c425..6fa21791fc85 100644
--- a/include/linux/mfd/88pm860x.h
+++ b/include/linux/mfd/88pm860x.h
@@ -472,13 +472,7 @@ extern int pm860x_bulk_read(struct i2c_client *, int, int, unsigned char *);
extern int pm860x_bulk_write(struct i2c_client *, int, int, unsigned char *);
extern int pm860x_set_bits(struct i2c_client *, int, unsigned char,
unsigned char);
-extern int pm860x_page_reg_read(struct i2c_client *, int);
extern int pm860x_page_reg_write(struct i2c_client *, int, unsigned char);
extern int pm860x_page_bulk_read(struct i2c_client *, int, int,
unsigned char *);
-extern int pm860x_page_bulk_write(struct i2c_client *, int, int,
- unsigned char *);
-extern int pm860x_page_set_bits(struct i2c_client *, int, unsigned char,
- unsigned char);
-
#endif /* __LINUX_MFD_88PM860X_H */
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index 302a330c5c84..09fb3c56e7d7 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -382,10 +382,6 @@ struct ab8500_platform_data {
struct ab8500_sysctrl_platform_data *sysctrl;
};
-extern int ab8500_init(struct ab8500 *ab8500,
- enum ab8500_version version);
-extern int ab8500_exit(struct ab8500 *ab8500);
-
extern int ab8500_suspend(struct ab8500 *ab8500);
static inline int is_ab8500(struct ab8500 *ab)
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index e7a7e70fdb38..dd0fc891b228 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -556,16 +556,6 @@ static inline void prcmu_clear(unsigned int reg, u32 bits)
#define PRCMU_QOS_ARM_OPP 3
#define PRCMU_QOS_DEFAULT_VALUE -1
-static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void)
-{
- return 0;
-}
-
-static inline int prcmu_qos_requirement(int prcmu_qos_class)
-{
- return 0;
-}
-
static inline int prcmu_qos_add_requirement(int prcmu_qos_class,
char *name, s32 value)
{
@@ -582,15 +572,4 @@ static inline void prcmu_qos_remove_requirement(int prcmu_qos_class, char *name)
{
}
-static inline int prcmu_qos_add_notifier(int prcmu_qos_class,
- struct notifier_block *notifier)
-{
- return 0;
-}
-static inline int prcmu_qos_remove_notifier(int prcmu_qos_class,
- struct notifier_block *notifier)
-{
- return 0;
-}
-
#endif /* __MACH_PRCMU_H */
diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h
index 6a012784dd1b..194556851ccf 100644
--- a/include/linux/mfd/hi655x-pmic.h
+++ b/include/linux/mfd/hi655x-pmic.h
@@ -52,7 +52,6 @@
#define OTMP_D1R_INT_MASK BIT(OTMP_D1R_INT)
struct hi655x_pmic {
- struct resource *res;
struct device *dev;
struct regmap *regmap;
struct gpio_desc *gpio;
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index 3acceeedbaba..ea635d12a741 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -441,8 +441,4 @@ enum max77686_types {
TYPE_MAX77802,
};
-extern int max77686_irq_init(struct max77686_dev *max77686);
-extern void max77686_irq_exit(struct max77686_dev *max77686);
-extern int max77686_irq_resume(struct max77686_dev *max77686);
-
#endif /* __LINUX_MFD_MAX77686_PRIV_H */
diff --git a/include/linux/mfd/rz-mtu3.h b/include/linux/mfd/rz-mtu3.h
index c5173bc06270..8421d49500bf 100644
--- a/include/linux/mfd/rz-mtu3.h
+++ b/include/linux/mfd/rz-mtu3.h
@@ -151,7 +151,6 @@ struct rz_mtu3 {
void *priv_data;
};
-#if IS_ENABLED(CONFIG_RZ_MTU3)
static inline bool rz_mtu3_request_channel(struct rz_mtu3_channel *ch)
{
mutex_lock(&ch->lock);
@@ -188,70 +187,5 @@ void rz_mtu3_32bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u32 val);
void rz_mtu3_shared_reg_write(struct rz_mtu3_channel *ch, u16 off, u16 val);
void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch, u16 off,
u16 pos, u8 val);
-#else
-static inline bool rz_mtu3_request_channel(struct rz_mtu3_channel *ch)
-{
- return false;
-}
-
-static inline void rz_mtu3_release_channel(struct rz_mtu3_channel *ch)
-{
-}
-
-static inline bool rz_mtu3_is_enabled(struct rz_mtu3_channel *ch)
-{
- return false;
-}
-
-static inline void rz_mtu3_disable(struct rz_mtu3_channel *ch)
-{
-}
-
-static inline int rz_mtu3_enable(struct rz_mtu3_channel *ch)
-{
- return 0;
-}
-
-static inline u8 rz_mtu3_8bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
-{
- return 0;
-}
-
-static inline u16 rz_mtu3_16bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
-{
- return 0;
-}
-
-static inline u32 rz_mtu3_32bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
-{
- return 0;
-}
-
-static inline u16 rz_mtu3_shared_reg_read(struct rz_mtu3_channel *ch, u16 off)
-{
- return 0;
-}
-
-static inline void rz_mtu3_8bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u8 val)
-{
-}
-
-static inline void rz_mtu3_16bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u16 val)
-{
-}
-
-static inline void rz_mtu3_32bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u32 val)
-{
-}
-
-static inline void rz_mtu3_shared_reg_write(struct rz_mtu3_channel *ch, u16 off, u16 val)
-{
-}
-
-static inline void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch,
- u16 off, u16 pos, u8 val)
-{
-}
-#endif
#endif /* __MFD_RZ_MTU3_H__ */
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 8bef1ab62bba..4e27ca7c49de 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -41,9 +41,10 @@
#define PHY_ID_KSZ9477 0x00221631
/* struct phy_device dev_flags definitions */
-#define MICREL_PHY_50MHZ_CLK 0x00000001
-#define MICREL_PHY_FXEN 0x00000002
-#define MICREL_KSZ8_P1_ERRATA 0x00000003
+#define MICREL_PHY_50MHZ_CLK BIT(0)
+#define MICREL_PHY_FXEN BIT(1)
+#define MICREL_KSZ8_P1_ERRATA BIT(2)
+#define MICREL_NO_EEE BIT(3)
#define MICREL_KSZ9021_EXTREG_CTRL 0xB
#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h
index c238207d1615..e799b1f8d05b 100644
--- a/include/linux/misc_cgroup.h
+++ b/include/linux/misc_cgroup.h
@@ -31,17 +31,18 @@ struct misc_cg;
* struct misc_res: Per cgroup per misc type resource
* @max: Maximum limit on the resource.
* @usage: Current usage of the resource.
- * @failed: True if charged failed for the resource in a cgroup.
+ * @events: Number of times, the resource limit exceeded.
*/
struct misc_res {
- unsigned long max;
- atomic_long_t usage;
- atomic_long_t events;
+ u64 max;
+ atomic64_t usage;
+ atomic64_t events;
};
/**
* struct misc_cg - Miscellaneous controller's cgroup structure.
* @css: cgroup subsys state object.
+ * @events_file: Handle for the misc resources events file.
* @res: Array of misc resources usage in the cgroup.
*/
struct misc_cg {
@@ -53,12 +54,10 @@ struct misc_cg {
struct misc_res res[MISC_CG_RES_TYPES];
};
-unsigned long misc_cg_res_total_usage(enum misc_res_type type);
-int misc_cg_set_capacity(enum misc_res_type type, unsigned long capacity);
-int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg,
- unsigned long amount);
-void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg,
- unsigned long amount);
+u64 misc_cg_res_total_usage(enum misc_res_type type);
+int misc_cg_set_capacity(enum misc_res_type type, u64 capacity);
+int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
+void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
/**
* css_misc() - Get misc cgroup from the css.
@@ -99,27 +98,26 @@ static inline void put_misc_cg(struct misc_cg *cg)
#else /* !CONFIG_CGROUP_MISC */
-static inline unsigned long misc_cg_res_total_usage(enum misc_res_type type)
+static inline u64 misc_cg_res_total_usage(enum misc_res_type type)
{
return 0;
}
-static inline int misc_cg_set_capacity(enum misc_res_type type,
- unsigned long capacity)
+static inline int misc_cg_set_capacity(enum misc_res_type type, u64 capacity)
{
return 0;
}
static inline int misc_cg_try_charge(enum misc_res_type type,
struct misc_cg *cg,
- unsigned long amount)
+ u64 amount)
{
return 0;
}
static inline void misc_cg_uncharge(enum misc_res_type type,
struct misc_cg *cg,
- unsigned long amount)
+ u64 amount)
{
}
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 7c58c44662b8..914a9f974baa 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -379,7 +379,7 @@ struct mtd_info {
struct module *owner;
struct device dev;
- int usecount;
+ struct kref refcnt;
struct mtd_debug_info dbg;
struct nvmem_device *nvmem;
struct nvmem_device *otp_user_nvmem;
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 5159d692f9ce..90a141ba2a5a 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -1540,6 +1540,7 @@ int nand_reset_op(struct nand_chip *chip);
int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
unsigned int len);
int nand_status_op(struct nand_chip *chip, u8 *status);
+int nand_exit_status_op(struct nand_chip *chip);
int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
int nand_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf, unsigned int len);
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
index 625f491b95de..fb31312825ae 100644
--- a/include/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -9,6 +9,7 @@ struct ip_ct_sctp {
enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX];
+ u8 init[IP_CT_DIR_MAX];
u8 last_dir;
u8 flags;
};
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 20eeba8b009d..cd628c4b011e 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -48,6 +48,7 @@ struct nfs_client {
#define NFS_CS_NOPING 6 /* - don't ping on connect */
#define NFS_CS_DS 7 /* - Server is a DS */
#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */
+#define NFS_CS_PNFS 9 /* - Server used for pnfs */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index aa9f4c6ebe26..1c315f854ea8 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -157,7 +157,9 @@ extern void nfs_unlock_request(struct nfs_page *req);
extern void nfs_unlock_and_release_request(struct nfs_page *);
extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
extern int nfs_page_group_lock_subrequests(struct nfs_page *head);
-extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode);
+extern void nfs_join_page_group(struct nfs_page *head,
+ struct nfs_commit_info *cinfo,
+ struct inode *inode);
extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
diff --git a/include/linux/of.h b/include/linux/of.h
index ed679819c279..6a9ddf20e79a 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1676,8 +1676,8 @@ int of_overlay_notifier_unregister(struct notifier_block *nb);
#else
-static inline int of_overlay_fdt_apply(void *overlay_fdt, u32 overlay_fdt_size,
- int *ovcs_id)
+static inline int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
+ int *ovcs_id, struct device_node *target_base)
{
return -ENOTSUPP;
}
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index 0f4a8903922a..f86a08ba0207 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -67,6 +67,7 @@ enum OID {
OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */
OID_ntlmssp, /* 1.3.6.1.4.1.311.2.2.10 */
+ OID_negoex, /* 1.3.6.1.4.1.311.2.2.30 */
OID_spnego, /* 1.3.6.1.5.5.2 */
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 75b73c83bc9d..d01351b1526f 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -30,17 +30,28 @@ struct percpu_counter {
extern int percpu_counter_batch;
-int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
- struct lock_class_key *key);
+int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
+ gfp_t gfp, u32 nr_counters,
+ struct lock_class_key *key);
-#define percpu_counter_init(fbc, value, gfp) \
+#define percpu_counter_init_many(fbc, value, gfp, nr_counters) \
({ \
static struct lock_class_key __key; \
\
- __percpu_counter_init(fbc, value, gfp, &__key); \
+ __percpu_counter_init_many(fbc, value, gfp, nr_counters,\
+ &__key); \
})
-void percpu_counter_destroy(struct percpu_counter *fbc);
+
+#define percpu_counter_init(fbc, value, gfp) \
+ percpu_counter_init_many(fbc, value, gfp, 1)
+
+void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters);
+static inline void percpu_counter_destroy(struct percpu_counter *fbc)
+{
+ percpu_counter_destroy_many(fbc, 1);
+}
+
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
s32 batch);
@@ -116,11 +127,27 @@ struct percpu_counter {
s64 count;
};
+static inline int percpu_counter_init_many(struct percpu_counter *fbc,
+ s64 amount, gfp_t gfp,
+ u32 nr_counters)
+{
+ u32 i;
+
+ for (i = 0; i < nr_counters; i++)
+ fbc[i].count = amount;
+
+ return 0;
+}
+
static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
gfp_t gfp)
{
- fbc->count = amount;
- return 0;
+ return percpu_counter_init_many(fbc, amount, gfp, 1);
+}
+
+static inline void percpu_counter_destroy_many(struct percpu_counter *fbc,
+ u32 nr_counters)
+{
}
static inline void percpu_counter_destroy(struct percpu_counter *fbc)
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 1fba072b3dac..af7639c3b0a3 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -206,6 +206,14 @@ static inline int pmd_young(pmd_t pmd)
#endif
#ifndef set_ptes
+
+#ifndef pte_next_pfn
+static inline pte_t pte_next_pfn(pte_t pte)
+{
+ return __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT));
+}
+#endif
+
/**
* set_ptes - Map consecutive pages to a contiguous range of addresses.
* @mm: Address space to map the pages into.
@@ -231,7 +239,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
if (--nr == 0)
break;
ptep++;
- pte = __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT));
+ pte = pte_next_pfn(pte);
}
arch_leave_lazy_mmu_mode();
}
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 7d07f8736431..2b886ea654bb 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -600,7 +600,7 @@ void pcs_get_state(struct phylink_pcs *pcs,
*
* The %neg_mode argument should be tested via the phylink_mode_*() family of
* functions, or for PCS that set pcs->neg_mode true, should be tested
- * against the %PHYLINK_PCS_NEG_* definitions.
+ * against the PHYLINK_PCS_NEG_* definitions.
*/
int pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, const unsigned long *advertising,
@@ -630,7 +630,7 @@ void pcs_an_restart(struct phylink_pcs *pcs);
*
* The %mode argument should be tested via the phylink_mode_*() family of
* functions, or for PCS that set pcs->neg_mode true, should be tested
- * against the %PHYLINK_PCS_NEG_* definitions.
+ * against the PHYLINK_PCS_NEG_* definitions.
*/
void pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex);
diff --git a/include/linux/platform_data/rtc-ds2404.h b/include/linux/platform_data/rtc-ds2404.h
deleted file mode 100644
index 22c53825528f..000000000000
--- a/include/linux/platform_data/rtc-ds2404.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * ds2404.h - platform data structure for the DS2404 RTC.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 Sven Schnelle <svens@stackframe.org>
- */
-
-#ifndef __LINUX_DS2404_H
-#define __LINUX_DS2404_H
-
-struct ds2404_platform_data {
-
- unsigned int gpio_rst;
- unsigned int gpio_clk;
- unsigned int gpio_dq;
-};
-#endif
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 04ae1d9073a7..d2f9f690a9c1 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -298,7 +298,7 @@ struct pwm_chip {
int base;
unsigned int npwm;
- struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
+ struct pwm_device * (*of_xlate)(struct pwm_chip *chip,
const struct of_phandle_args *args);
unsigned int of_pwm_n_cells;
@@ -395,9 +395,9 @@ struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
const char *label);
-struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc,
+struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *chip,
const struct of_phandle_args *args);
-struct pwm_device *of_pwm_single_xlate(struct pwm_chip *pc,
+struct pwm_device *of_pwm_single_xlate(struct pwm_chip *chip,
const struct of_phandle_args *args);
struct pwm_device *pwm_get(struct device *dev, const char *con_id);
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index f29aaaf2eb21..006e18decfad 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -108,6 +108,8 @@ extern const struct raid6_calls raid6_vpermxor1;
extern const struct raid6_calls raid6_vpermxor2;
extern const struct raid6_calls raid6_vpermxor4;
extern const struct raid6_calls raid6_vpermxor8;
+extern const struct raid6_calls raid6_lsx;
+extern const struct raid6_calls raid6_lasx;
struct raid6_recov_calls {
void (*data2)(int, size_t, int, int, void **);
@@ -123,6 +125,8 @@ extern const struct raid6_recov_calls raid6_recov_avx2;
extern const struct raid6_recov_calls raid6_recov_avx512;
extern const struct raid6_recov_calls raid6_recov_s390xc;
extern const struct raid6_recov_calls raid6_recov_neon;
+extern const struct raid6_recov_calls raid6_recov_lsx;
+extern const struct raid6_recov_calls raid6_recov_lasx;
extern const struct raid6_calls raid6_neonx1;
extern const struct raid6_calls raid6_neonx2;
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index fe8978eb69f1..b4795698d8c2 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -690,6 +690,10 @@ int rproc_detach(struct rproc *rproc);
int rproc_set_firmware(struct rproc *rproc, const char *fw_name);
void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type);
void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem);
+
+/* from remoteproc_coredump.c */
+void rproc_coredump_cleanup(struct rproc *rproc);
+void rproc_coredump(struct rproc *rproc);
void rproc_coredump_using_sections(struct rproc *rproc);
int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size);
int rproc_coredump_add_custom_segment(struct rproc *rproc,
diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h
index 285189454449..f8f3e958e9cf 100644
--- a/include/linux/resume_user_mode.h
+++ b/include/linux/resume_user_mode.h
@@ -55,7 +55,7 @@ static inline void resume_user_mode_work(struct pt_regs *regs)
}
#endif
- mem_cgroup_handle_over_high();
+ mem_cgroup_handle_over_high(GFP_KERNEL);
blkcg_maybe_throttle_current();
rseq_handle_notify_resume(NULL, regs);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index a3825ce81102..51cc21ebb568 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -479,7 +479,6 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
#define anon_vma_init() do {} while (0)
#define anon_vma_prepare(vma) (0)
-#define anon_vma_link(vma) do {} while (0)
static inline int folio_referenced(struct folio *folio, int is_locked,
struct mem_cgroup *memcg,
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 523c98b96cb4..90d8e4475f80 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -64,12 +64,14 @@ struct rpmsg_device {
};
typedef int (*rpmsg_rx_cb_t)(struct rpmsg_device *, void *, int, void *, u32);
+typedef int (*rpmsg_flowcontrol_cb_t)(struct rpmsg_device *, void *, bool);
/**
* struct rpmsg_endpoint - binds a local rpmsg address to its user
* @rpdev: rpmsg channel device
* @refcount: when this drops to zero, the ept is deallocated
* @cb: rx callback handler
+ * @flow_cb: remote flow control callback handler
* @cb_lock: must be taken before accessing/changing @cb
* @addr: local rpmsg address
* @priv: private data for the driver's use
@@ -92,6 +94,7 @@ struct rpmsg_endpoint {
struct rpmsg_device *rpdev;
struct kref refcount;
rpmsg_rx_cb_t cb;
+ rpmsg_flowcontrol_cb_t flow_cb;
struct mutex cb_lock;
u32 addr;
void *priv;
@@ -106,6 +109,7 @@ struct rpmsg_endpoint {
* @probe: invoked when a matching rpmsg channel (i.e. device) is found
* @remove: invoked when the rpmsg channel is removed
* @callback: invoked when an inbound message is received on the channel
+ * @flowcontrol: invoked when remote side flow control request is received
*/
struct rpmsg_driver {
struct device_driver drv;
@@ -113,6 +117,7 @@ struct rpmsg_driver {
int (*probe)(struct rpmsg_device *dev);
void (*remove)(struct rpmsg_device *dev);
int (*callback)(struct rpmsg_device *, void *, int, void *, u32);
+ int (*flowcontrol)(struct rpmsg_device *, void *, bool);
};
static inline u16 rpmsg16_to_cpu(struct rpmsg_device *rpdev, __rpmsg16 val)
@@ -192,6 +197,8 @@ __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept);
+int rpmsg_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst);
+
#else
static inline int rpmsg_register_device_override(struct rpmsg_device *rpdev,
@@ -316,6 +323,14 @@ static inline ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept)
return -ENXIO;
}
+static inline int rpmsg_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
#endif /* IS_ENABLED(CONFIG_RPMSG) */
/* use a macro to avoid include chaining to get THIS_MODULE */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 1fd9c6a21ebe..4c0bcbeb1f00 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -146,6 +146,7 @@ struct rtc_device {
time64_t range_min;
timeu64_t range_max;
+ timeu64_t alarm_offset_max;
time64_t start_secs;
time64_t offset_secs;
bool set_start_time;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 177b3f3676ef..77f01ac385f7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1671,7 +1671,7 @@ static inline unsigned int __task_state_index(unsigned int tsk_state,
BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
- if (tsk_state == TASK_IDLE)
+ if ((tsk_state & TASK_IDLE) == TASK_IDLE)
state = TASK_REPORT_IDLE;
/*
@@ -1679,7 +1679,7 @@ static inline unsigned int __task_state_index(unsigned int tsk_state,
* to userspace, we can make this appear as if the task has gone through
* a regular rt_mutex_lock() call.
*/
- if (tsk_state == TASK_RTLOCK_WAIT)
+ if (tsk_state & TASK_RTLOCK_WAIT)
state = TASK_UNINTERRUPTIBLE;
return fls(state);
@@ -1858,7 +1858,17 @@ extern int task_can_attach(struct task_struct *p);
extern int dl_bw_alloc(int cpu, u64 dl_bw);
extern void dl_bw_free(int cpu, u64 dl_bw);
#ifdef CONFIG_SMP
+
+/* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
+
+/**
+ * set_cpus_allowed_ptr - set CPU affinity mask of a task
+ * @p: the task
+ * @new_mask: CPU affinity mask
+ *
+ * Return: zero if successful, or a negative error code
+ */
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
extern void release_user_cpus_ptr(struct task_struct *p);
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 987a59d977c5..e9bd2f65d7f4 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -512,8 +512,8 @@ do { \
static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
- do_raw_write_seqcount_begin(s);
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
+ do_raw_write_seqcount_begin(s);
}
/**
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index a388160f4be0..4f3d14bb1538 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -858,6 +858,8 @@ struct sdw_defer {
* @post_bank_switch: Callback for post bank switch
* @read_ping_status: Read status from PING frames, reported with two bits per Device.
* Bits 31:24 are reserved.
+ * @get_device_num: Callback for vendor-specific device_number allocation
+ * @put_device_num: Callback for vendor-specific device_number release
* @new_peripheral_assigned: Callback to handle enumeration of new peripheral.
*/
struct sdw_master_ops {
@@ -873,7 +875,11 @@ struct sdw_master_ops {
int (*pre_bank_switch)(struct sdw_bus *bus);
int (*post_bank_switch)(struct sdw_bus *bus);
u32 (*read_ping_status)(struct sdw_bus *bus);
- void (*new_peripheral_assigned)(struct sdw_bus *bus, int dev_num);
+ int (*get_device_num)(struct sdw_bus *bus, struct sdw_slave *slave);
+ void (*put_device_num)(struct sdw_bus *bus, struct sdw_slave *slave);
+ void (*new_peripheral_assigned)(struct sdw_bus *bus,
+ struct sdw_slave *slave,
+ int dev_num);
};
/**
@@ -908,9 +914,6 @@ struct sdw_master_ops {
* meaningful if multi_link is set. If set to 1, hardware-based
* synchronization will be used even if a stream only uses a single
* SoundWire segment.
- * @dev_num_ida_min: if set, defines the minimum values for the IDA
- * used to allocate system-unique device numbers. This value needs to be
- * identical across all SoundWire bus in the system.
*/
struct sdw_bus {
struct device *dev;
@@ -939,7 +942,6 @@ struct sdw_bus {
u32 bank_switch_timeout;
bool multi_link;
int hw_sync_min_links;
- int dev_num_ida_min;
};
int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index fa67fad4ef51..00bb22d96ae5 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -428,4 +428,11 @@ struct sdw_intel_hw_ops {
extern const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops;
extern const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops;
+/*
+ * IDA min selected to allow for 5 unconstrained devices per link,
+ * and 6 system-unique Device Numbers for wake-capable devices.
+ */
+
+#define SDW_INTEL_DEV_NUM_IDA_MIN 6
+
#endif
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 5b4fb3c791bc..2f8dc47f1eb0 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -779,7 +779,9 @@ xdr_stream_decode_uint32_array(struct xdr_stream *xdr,
if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
return -EBADMSG;
- p = xdr_inline_decode(xdr, size_mul(len, sizeof(*p)));
+ if (U32_MAX >= SIZE_MAX / sizeof(*p) && len > SIZE_MAX / sizeof(*p))
+ return -EBADMSG;
+ p = xdr_inline_decode(xdr, len * sizeof(*p));
if (unlikely(!p))
return -EBADMSG;
if (array == NULL)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index b4536626f8ff..ecde0312dd52 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -172,14 +172,23 @@ static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
if (!mem)
return false;
- if (IS_ENABLED(CONFIG_SWIOTLB_DYNAMIC)) {
- /* Pairs with smp_wmb() in swiotlb_find_slots() and
- * swiotlb_dyn_alloc(), which modify the RCU lists.
- */
- smp_rmb();
- return swiotlb_find_pool(dev, paddr);
- }
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ /*
+ * All SWIOTLB buffer addresses must have been returned by
+ * swiotlb_tbl_map_single() and passed to a device driver.
+ * If a SWIOTLB address is checked on another CPU, then it was
+ * presumably loaded by the device driver from an unspecified private
+ * data structure. Make sure that this load is ordered before reading
+ * dev->dma_uses_io_tlb here and mem->pools in swiotlb_find_pool().
+ *
+ * This barrier pairs with smp_mb() in swiotlb_find_slots().
+ */
+ smp_rmb();
+ return READ_ONCE(dev->dma_uses_io_tlb) &&
+ swiotlb_find_pool(dev, paddr);
+#else
return paddr >= mem->defpool.start && paddr < mem->defpool.end;
+#endif
}
static inline bool is_swiotlb_force_bounce(struct device *dev)
diff --git a/include/linux/tca6416_keypad.h b/include/linux/tca6416_keypad.h
index b0d36a9934cc..5cf6f6f82aa7 100644
--- a/include/linux/tca6416_keypad.h
+++ b/include/linux/tca6416_keypad.h
@@ -25,7 +25,6 @@ struct tca6416_keys_platform_data {
unsigned int rep:1; /* enable input subsystem auto repeat */
uint16_t pinmask;
uint16_t invert;
- int irq_is_gpio;
int use_polling; /* use polling if Interrupt is not connected*/
};
#endif
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index b449a46766f5..a5ae4af955ff 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -53,6 +53,20 @@ enum thermal_notify_event {
THERMAL_EVENT_KEEP_ALIVE, /* Request for user space handler to respond */
};
+/**
+ * struct thermal_trip - representation of a point in temperature domain
+ * @temperature: temperature value in miliCelsius
+ * @hysteresis: relative hysteresis in miliCelsius
+ * @type: trip point type
+ * @priv: pointer to driver data associated with this trip
+ */
+struct thermal_trip {
+ int temperature;
+ int hysteresis;
+ enum thermal_trip_type type;
+ void *priv;
+};
+
struct thermal_zone_device_ops {
int (*bind) (struct thermal_zone_device *,
struct thermal_cooling_device *);
@@ -62,34 +76,16 @@ struct thermal_zone_device_ops {
int (*set_trips) (struct thermal_zone_device *, int, int);
int (*change_mode) (struct thermal_zone_device *,
enum thermal_device_mode);
- int (*get_trip_type) (struct thermal_zone_device *, int,
- enum thermal_trip_type *);
- int (*get_trip_temp) (struct thermal_zone_device *, int, int *);
int (*set_trip_temp) (struct thermal_zone_device *, int, int);
- int (*get_trip_hyst) (struct thermal_zone_device *, int, int *);
int (*set_trip_hyst) (struct thermal_zone_device *, int, int);
int (*get_crit_temp) (struct thermal_zone_device *, int *);
int (*set_emul_temp) (struct thermal_zone_device *, int);
- int (*get_trend) (struct thermal_zone_device *, int,
- enum thermal_trend *);
+ int (*get_trend) (struct thermal_zone_device *,
+ const struct thermal_trip *, enum thermal_trend *);
void (*hot)(struct thermal_zone_device *);
void (*critical)(struct thermal_zone_device *);
};
-/**
- * struct thermal_trip - representation of a point in temperature domain
- * @temperature: temperature value in miliCelsius
- * @hysteresis: relative hysteresis in miliCelsius
- * @type: trip point type
- * @priv: pointer to driver data associated with this trip
- */
-struct thermal_trip {
- int temperature;
- int hysteresis;
- enum thermal_trip_type type;
- void *priv;
-};
-
struct thermal_cooling_device_ops {
int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
@@ -304,16 +300,22 @@ int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
#endif
#ifdef CONFIG_THERMAL
-struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
- void *, struct thermal_zone_device_ops *,
- const struct thermal_zone_params *, int, int);
-
-void thermal_zone_device_unregister(struct thermal_zone_device *);
-
-struct thermal_zone_device *
-thermal_zone_device_register_with_trips(const char *, struct thermal_trip *, int, int,
- void *, struct thermal_zone_device_ops *,
- const struct thermal_zone_params *, int, int);
+struct thermal_zone_device *thermal_zone_device_register_with_trips(
+ const char *type,
+ struct thermal_trip *trips,
+ int num_trips, int mask,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp,
+ int passive_delay, int polling_delay);
+
+struct thermal_zone_device *thermal_tripless_zone_device_register(
+ const char *type,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp);
+
+void thermal_zone_device_unregister(struct thermal_zone_device *tz);
void *thermal_zone_device_priv(struct thermal_zone_device *tzd);
const char *thermal_zone_device_type(struct thermal_zone_device *tzd);
@@ -354,15 +356,26 @@ int thermal_zone_device_enable(struct thermal_zone_device *tz);
int thermal_zone_device_disable(struct thermal_zone_device *tz);
void thermal_zone_device_critical(struct thermal_zone_device *tz);
#else
-static inline struct thermal_zone_device *thermal_zone_device_register(
- const char *type, int trips, int mask, void *devdata,
- struct thermal_zone_device_ops *ops,
- const struct thermal_zone_params *tzp,
- int passive_delay, int polling_delay)
+static inline struct thermal_zone_device *thermal_zone_device_register_with_trips(
+ const char *type,
+ struct thermal_trip *trips,
+ int num_trips, int mask,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp,
+ int passive_delay, int polling_delay)
{ return ERR_PTR(-ENODEV); }
-static inline void thermal_zone_device_unregister(
- struct thermal_zone_device *tz)
+
+static inline struct thermal_zone_device *thermal_tripless_zone_device_register(
+ const char *type,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp)
+{ return ERR_PTR(-ENODEV); }
+
+static inline void thermal_zone_device_unregister(struct thermal_zone_device *tz)
{ }
+
static inline struct thermal_cooling_device *
thermal_cooling_device_register(const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index c1a0a19d80fb..21ae37e49319 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -62,13 +62,13 @@ void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
/* Used to find the offset and length of dynamic fields in trace events */
struct trace_dynamic_info {
#ifdef CONFIG_CPU_BIG_ENDIAN
- u16 offset;
u16 len;
+ u16 offset;
#else
- u16 len;
u16 offset;
+ u16 len;
#endif
-};
+} __packed;
/*
* The trace entry - the most basic unit of tracing. This is what
@@ -649,7 +649,7 @@ struct trace_event_file {
struct list_head list;
struct trace_event_call *event_call;
struct event_filter __rcu *filter;
- struct dentry *dir;
+ struct eventfs_file *ef;
struct trace_array *tr;
struct trace_subsystem_dir *system;
struct list_head triggers;
@@ -824,6 +824,7 @@ enum {
FILTER_RDYN_STRING,
FILTER_PTR_STRING,
FILTER_TRACE_FN,
+ FILTER_CPUMASK,
FILTER_COMM,
FILTER_CPU,
FILTER_STACKTRACE,
diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h
index 99912445974c..009072792fa3 100644
--- a/include/linux/tracefs.h
+++ b/include/linux/tracefs.h
@@ -21,6 +21,29 @@ struct file_operations;
#ifdef CONFIG_TRACING
+struct eventfs_file;
+
+struct dentry *eventfs_create_events_dir(const char *name,
+ struct dentry *parent);
+
+struct eventfs_file *eventfs_add_subsystem_dir(const char *name,
+ struct dentry *parent);
+
+struct eventfs_file *eventfs_add_dir(const char *name,
+ struct eventfs_file *ef_parent);
+
+int eventfs_add_file(const char *name, umode_t mode,
+ struct eventfs_file *ef_parent, void *data,
+ const struct file_operations *fops);
+
+int eventfs_add_events_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops);
+
+void eventfs_remove(struct eventfs_file *ef);
+
+void eventfs_remove_events_dir(struct dentry *dentry);
+
struct dentry *tracefs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index db1b0eaef4eb..0e652026b776 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -208,6 +208,9 @@ struct vdpa_map_file {
* @vdev: vdpa device
* Returns the virtio features support by the
* device
+ * @get_backend_features: Get parent-specific backend features (optional)
+ * Returns the vdpa features supported by the
+ * device.
* @set_driver_features: Set virtio features supported by the driver
* @vdev: vdpa device
* @features: feature support by the driver
@@ -358,6 +361,7 @@ struct vdpa_config_ops {
u32 (*get_vq_align)(struct vdpa_device *vdev);
u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
u64 (*get_device_features)(struct vdpa_device *vdev);
+ u64 (*get_backend_features)(const struct vdpa_device *vdev);
int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
u64 (*get_driver_features)(struct vdpa_device *vdev);
void (*set_config_cb)(struct vdpa_device *vdev,
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index de6041deee37..4cc614a38376 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
/**
* struct virtqueue - a queue to register buffers for sending or receiving.
@@ -61,6 +62,8 @@ int virtqueue_add_sgs(struct virtqueue *vq,
void *data,
gfp_t gfp);
+struct device *virtqueue_dma_dev(struct virtqueue *vq);
+
bool virtqueue_kick(struct virtqueue *vq);
bool virtqueue_kick_prepare(struct virtqueue *vq);
@@ -78,6 +81,8 @@ bool virtqueue_enable_cb(struct virtqueue *vq);
unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
+int virtqueue_set_dma_premapped(struct virtqueue *_vq);
+
bool virtqueue_poll(struct virtqueue *vq, unsigned);
bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
@@ -95,6 +100,8 @@ dma_addr_t virtqueue_get_used_addr(const struct virtqueue *vq);
int virtqueue_resize(struct virtqueue *vq, u32 num,
void (*recycle)(struct virtqueue *vq, void *buf));
+int virtqueue_reset(struct virtqueue *vq,
+ void (*recycle)(struct virtqueue *vq, void *buf));
/**
* struct virtio_device - representation of a device using virtio
@@ -184,10 +191,8 @@ struct virtio_driver {
void (*scan)(struct virtio_device *dev);
void (*remove)(struct virtio_device *dev);
void (*config_changed)(struct virtio_device *dev);
-#ifdef CONFIG_PM
int (*freeze)(struct virtio_device *dev);
int (*restore)(struct virtio_device *dev);
-#endif
};
static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv)
@@ -206,4 +211,19 @@ void unregister_virtio_driver(struct virtio_driver *drv);
#define module_virtio_driver(__virtio_driver) \
module_driver(__virtio_driver, register_virtio_driver, \
unregister_virtio_driver)
+
+dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);
+
+bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr);
+void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir);
+void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir);
#endif /* _LINUX_VIRTIO_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 683efe29fa69..1c1d06804d45 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -125,6 +125,17 @@ struct rcu_work {
struct workqueue_struct *wq;
};
+enum wq_affn_scope {
+ WQ_AFFN_DFL, /* use system default */
+ WQ_AFFN_CPU, /* one pod per CPU */
+ WQ_AFFN_SMT, /* one pod poer SMT */
+ WQ_AFFN_CACHE, /* one pod per LLC */
+ WQ_AFFN_NUMA, /* one pod per NUMA node */
+ WQ_AFFN_SYSTEM, /* one pod across the whole system */
+
+ WQ_AFFN_NR_TYPES,
+};
+
/**
* struct workqueue_attrs - A struct for workqueue attributes.
*
@@ -138,17 +149,58 @@ struct workqueue_attrs {
/**
* @cpumask: allowed CPUs
+ *
+ * Work items in this workqueue are affine to these CPUs and not allowed
+ * to execute on other CPUs. A pool serving a workqueue must have the
+ * same @cpumask.
*/
cpumask_var_t cpumask;
/**
- * @no_numa: disable NUMA affinity
+ * @__pod_cpumask: internal attribute used to create per-pod pools
+ *
+ * Internal use only.
+ *
+ * Per-pod unbound worker pools are used to improve locality. Always a
+ * subset of ->cpumask. A workqueue can be associated with multiple
+ * worker pools with disjoint @__pod_cpumask's. Whether the enforcement
+ * of a pool's @__pod_cpumask is strict depends on @affn_strict.
+ */
+ cpumask_var_t __pod_cpumask;
+
+ /**
+ * @affn_strict: affinity scope is strict
+ *
+ * If clear, workqueue will make a best-effort attempt at starting the
+ * worker inside @__pod_cpumask but the scheduler is free to migrate it
+ * outside.
*
- * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
- * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
- * doesn't participate in pool hash calculations or equality comparisons.
+ * If set, workers are only allowed to run inside @__pod_cpumask.
+ */
+ bool affn_strict;
+
+ /*
+ * Below fields aren't properties of a worker_pool. They only modify how
+ * :c:func:`apply_workqueue_attrs` select pools and thus don't
+ * participate in pool hash calculations or equality comparisons.
+ */
+
+ /**
+ * @affn_scope: unbound CPU affinity scope
+ *
+ * CPU pods are used to improve execution locality of unbound work
+ * items. There are multiple pod types, one for each wq_affn_scope, and
+ * every CPU in the system belongs to one pod in every pod type. CPUs
+ * that belong to the same pod share the worker pool. For example,
+ * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker
+ * pool for each NUMA node.
+ */
+ enum wq_affn_scope affn_scope;
+
+ /**
+ * @ordered: work items must be executed one by one in queueing order
*/
- bool no_numa;
+ bool ordered;
};
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
@@ -343,14 +395,10 @@ enum {
__WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
- WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
+ WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE,
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
};
-/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
-#define WQ_UNBOUND_MAX_ACTIVE \
- max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
-
/*
* System-wide workqueues which are always present.
*
@@ -391,7 +439,7 @@ extern struct workqueue_struct *system_freezable_power_efficient_wq;
* alloc_workqueue - allocate a workqueue
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags
- * @max_active: max in-flight work items, 0 for default
+ * @max_active: max in-flight work items per CPU, 0 for default
* remaining args: args for @fmt
*
* Allocate a workqueue with the specified parameters. For detailed
@@ -569,6 +617,7 @@ static inline bool schedule_work(struct work_struct *work)
/*
* Detect attempt to flush system-wide workqueues at compile time when possible.
+ * Warn attempt to flush system-wide workqueues at runtime.
*
* See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
* for reasons and steps for converting system-wide workqueues into local workqueues.
@@ -576,52 +625,13 @@ static inline bool schedule_work(struct work_struct *work)
extern void __warn_flushing_systemwide_wq(void)
__compiletime_warning("Please avoid flushing system-wide workqueues.");
-/**
- * flush_scheduled_work - ensure that any scheduled work has run to completion.
- *
- * Forces execution of the kernel-global workqueue and blocks until its
- * completion.
- *
- * It's very easy to get into trouble if you don't take great care.
- * Either of the following situations will lead to deadlock:
- *
- * One of the work items currently on the workqueue needs to acquire
- * a lock held by your code or its caller.
- *
- * Your code is running in the context of a work routine.
- *
- * They will be detected by lockdep when they occur, but the first might not
- * occur very often. It depends on what work items are on the workqueue and
- * what locks they need, which you have no control over.
- *
- * In most situations flushing the entire workqueue is overkill; you merely
- * need to know that a particular work item isn't queued and isn't running.
- * In such cases you should use cancel_delayed_work_sync() or
- * cancel_work_sync() instead.
- *
- * Please stop calling this function! A conversion to stop flushing system-wide
- * workqueues is in progress. This function will be removed after all in-tree
- * users stopped calling this function.
- */
-/*
- * The background of commit 771c035372a036f8 ("deprecate the
- * '__deprecated' attribute warnings entirely and for good") is that,
- * since Linus builds all modules between every single pull he does,
- * the standard kernel build needs to be _clean_ in order to be able to
- * notice when new problems happen. Therefore, don't emit warning while
- * there are in-tree users.
- */
+/* Please stop using this function, for this function will be removed in near future. */
#define flush_scheduled_work() \
({ \
- if (0) \
- __warn_flushing_systemwide_wq(); \
+ __warn_flushing_systemwide_wq(); \
__flush_workqueue(system_wq); \
})
-/*
- * Although there is no longer in-tree caller, for now just emit warning
- * in order to give out-of-tree callers time to update.
- */
#define flush_workqueue(wq) \
({ \
struct workqueue_struct *_wq = (wq); \
@@ -714,5 +724,6 @@ int workqueue_offline_cpu(unsigned int cpu);
void __init workqueue_init_early(void);
void __init workqueue_init(void);
+void __init workqueue_init_topology(void);
#endif
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 741703b45f61..cb571dfcf4b1 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -856,6 +856,9 @@ static inline int __must_check xa_insert_irq(struct xarray *xa,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -886,6 +889,9 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -916,6 +922,9 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -949,6 +958,9 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
@@ -983,6 +995,9 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
@@ -1017,6 +1032,9 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index e6359f7346f1..c33348ba1657 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -350,7 +350,7 @@ struct hci_dev {
struct list_head list;
struct mutex lock;
- char name[8];
+ const char *name;
unsigned long flags;
__u16 id;
__u8 bus;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 3a4b684f89bf..7192346e4a22 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -5941,6 +5941,7 @@ void wiphy_delayed_work_cancel(struct wiphy *wiphy,
* @event_lock: (private) lock for event list
* @owner_nlportid: (private) owner socket port ID
* @nl_owner_dead: (private) owner socket went away
+ * @cqm_rssi_work: (private) CQM RSSI reporting work
* @cqm_config: (private) nl80211 RSSI monitor state
* @pmsr_list: (private) peer measurement requests
* @pmsr_lock: (private) peer measurements requests/results lock
@@ -6013,7 +6014,8 @@ struct wireless_dev {
} wext;
#endif
- struct cfg80211_cqm_config *cqm_config;
+ struct wiphy_work cqm_rssi_work;
+ struct cfg80211_cqm_config __rcu *cqm_config;
struct list_head pmsr_list;
spinlock_t pmsr_lock;
@@ -7231,7 +7233,7 @@ struct cfg80211_rx_assoc_resp {
int uapsd_queues;
const u8 *ap_mld_addr;
struct {
- const u8 *addr;
+ u8 addr[ETH_ALEN] __aligned(2);
struct cfg80211_bss *bss;
u16 status;
} links[IEEE80211_MLD_MAX_NUM_LINKS];
diff --git a/include/net/ip.h b/include/net/ip.h
index 19adacd5ece0..3489a1cca5e7 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -57,6 +57,7 @@ struct inet_skb_parm {
#define IPSKB_FRAG_PMTU BIT(6)
#define IPSKB_L3SLAVE BIT(7)
#define IPSKB_NOPOLICY BIT(8)
+#define IPSKB_MULTIPATH BIT(9)
u16 frag_max_size;
};
@@ -94,7 +95,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
ipcm_init(ipcm);
ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark);
- ipcm->sockc.tsflags = inet->sk.sk_tsflags;
+ ipcm->sockc.tsflags = READ_ONCE(inet->sk.sk_tsflags);
ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
ipcm->addr = inet->inet_saddr;
ipcm->protocol = inet->inet_num;
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index c9ff23cf313e..1ba9f4ddf2f6 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -642,7 +642,10 @@ static inline bool fib6_rules_early_flow_dissect(struct net *net,
if (!net->ipv6.fib6_rules_require_fldissect)
return false;
- skb_flow_dissect_flow_keys(skb, flkeys, flag);
+ memset(flkeys, 0, sizeof(*flkeys));
+ __skb_flow_dissect(net, skb, &flow_keys_dissector,
+ flkeys, NULL, 0, 0, 0, flag);
+
fl6->fl6_sport = flkeys->ports.src;
fl6->fl6_dport = flkeys->ports.dst;
fl6->flowi6_proto = flkeys->basic.ip_proto;
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index a378eff827c7..15de07d36540 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -154,6 +154,7 @@ struct fib_info {
int fib_nhs;
bool fib_nh_is_v6;
bool nh_updated;
+ bool pfsrc_removed;
struct nexthop *nh;
struct rcu_head rcu;
struct fib_nh fib_nh[];
@@ -418,7 +419,10 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net,
if (!net->ipv4.fib_rules_require_fldissect)
return false;
- skb_flow_dissect_flow_keys(skb, flkeys, flag);
+ memset(flkeys, 0, sizeof(*flkeys));
+ __skb_flow_dissect(net, skb, &flow_keys_dissector,
+ flkeys, NULL, 0, 0, 0, flag);
+
fl4->fl4_sport = flkeys->ports.src;
fl4->fl4_dport = flkeys->ports.dst;
fl4->flowi4_proto = flkeys->basic.ip_proto;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index e8750b4ef7e1..f346b4efbc30 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -483,15 +483,14 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
u64_stats_inc(&tstats->tx_packets);
u64_stats_update_end(&tstats->syncp);
put_cpu_ptr(tstats);
+ return;
+ }
+
+ if (pkt_len < 0) {
+ DEV_STATS_INC(dev, tx_errors);
+ DEV_STATS_INC(dev, tx_aborted_errors);
} else {
- struct net_device_stats *err_stats = &dev->stats;
-
- if (pkt_len < 0) {
- err_stats->tx_errors++;
- err_stats->tx_aborted_errors++;
- } else {
- err_stats->tx_dropped++;
- }
+ DEV_STATS_INC(dev, tx_dropped);
}
}
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 0675be0f3fa0..c6932d1a3fa8 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -784,6 +784,11 @@ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
cpu_to_be32(0x0000ffff))) == 0UL;
}
+static inline bool ipv6_addr_v4mapped_any(const struct in6_addr *a)
+{
+ return ipv6_addr_v4mapped(a) && ipv4_is_zeronet(a->s6_addr32[3]);
+}
+
static inline bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a)
{
return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]);
@@ -1360,7 +1365,7 @@ static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val)
return 0;
}
-static inline int ip6_sock_set_addr_preferences(struct sock *sk, bool val)
+static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val)
{
int ret;
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 9f70b4332238..4d43adf18606 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -103,9 +103,10 @@ struct mana_txq {
/* skb data and frags dma mappings */
struct mana_skb_head {
- dma_addr_t dma_handle[MAX_SKB_FRAGS + 1];
+ /* GSO pkts may have 2 SGEs for the linear part*/
+ dma_addr_t dma_handle[MAX_SKB_FRAGS + 2];
- u32 size[MAX_SKB_FRAGS + 1];
+ u32 size[MAX_SKB_FRAGS + 2];
};
#define MANA_HEADROOM sizeof(struct mana_skb_head)
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 6da68886fabb..07022bb0d44d 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -539,7 +539,7 @@ static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
READ_ONCE(hh->hh_len))
return neigh_hh_output(hh, skb);
- return n->output(n, skb);
+ return READ_ONCE(n->output)(n, skb);
}
static inline struct neighbour *
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index dd40c75011d2..7c816359d5a9 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1682,7 +1682,7 @@ struct nft_trans_gc {
struct net *net;
struct nft_set *set;
u32 seq;
- u8 count;
+ u16 count;
void *priv[NFT_TRANS_GC_BATCHCOUNT];
struct rcu_head rcu;
};
@@ -1700,8 +1700,9 @@ void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans);
void nft_trans_gc_elem_add(struct nft_trans_gc *gc, void *priv);
-struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
- unsigned int gc_seq);
+struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
+ unsigned int gc_seq);
+struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc);
void nft_setelem_data_deactivate(const struct net *net,
const struct nft_set *set,
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 94231533a369..8e7751464ff5 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -16,13 +16,13 @@
* page_pool_alloc_pages() call. Drivers should use
* page_pool_dev_alloc_pages() replacing dev_alloc_pages().
*
- * API keeps track of in-flight pages, in order to let API user know
+ * The API keeps track of in-flight pages, in order to let API users know
* when it is safe to free a page_pool object. Thus, API users
* must call page_pool_put_page() to free the page, or attach
- * the page to a page_pool-aware objects like skbs marked with
+ * the page to a page_pool-aware object like skbs marked with
* skb_mark_for_recycle().
*
- * API user must call page_pool_put_page() once on a page, as it
+ * API users must call page_pool_put_page() once on a page, as it
* will either recycle the page, or in case of refcnt > 1, it will
* release the DMA mapping and in-flight state accounting.
*/
diff --git a/include/net/scm.h b/include/net/scm.h
index c5bcdf65f55c..e8c76b4be2fe 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -9,6 +9,7 @@
#include <linux/pid.h>
#include <linux/nsproxy.h>
#include <linux/sched/signal.h>
+#include <net/compat.h>
/* Well, we should have at least one descriptor open
* to accept passed FDs 8)
@@ -123,14 +124,17 @@ static inline bool scm_has_secdata(struct socket *sock)
static __inline__ void scm_pidfd_recv(struct msghdr *msg, struct scm_cookie *scm)
{
struct file *pidfd_file = NULL;
- int pidfd;
+ int len, pidfd;
- /*
- * put_cmsg() doesn't return an error if CMSG is truncated,
+ /* put_cmsg() doesn't return an error if CMSG is truncated,
* that's why we need to opencode these checks here.
*/
- if ((msg->msg_controllen <= sizeof(struct cmsghdr)) ||
- (msg->msg_controllen - sizeof(struct cmsghdr)) < sizeof(int)) {
+ if (msg->msg_flags & MSG_CMSG_COMPAT)
+ len = sizeof(struct compat_cmsghdr) + sizeof(int);
+ else
+ len = sizeof(struct cmsghdr) + sizeof(int);
+
+ if (msg->msg_controllen < len) {
msg->msg_flags |= MSG_CTRUNC;
return;
}
diff --git a/include/net/sock.h b/include/net/sock.h
index 11d503417591..b770261fbdaf 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1053,6 +1053,12 @@ static inline void sk_wmem_queued_add(struct sock *sk, int val)
WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
}
+static inline void sk_forward_alloc_add(struct sock *sk, int val)
+{
+ /* Paired with lockless reads of sk->sk_forward_alloc */
+ WRITE_ONCE(sk->sk_forward_alloc, sk->sk_forward_alloc + val);
+}
+
void sk_stream_write_space(struct sock *sk);
/* OOB backlog add */
@@ -1377,7 +1383,7 @@ static inline int sk_forward_alloc_get(const struct sock *sk)
if (sk->sk_prot->forward_alloc_get)
return sk->sk_prot->forward_alloc_get(sk);
#endif
- return sk->sk_forward_alloc;
+ return READ_ONCE(sk->sk_forward_alloc);
}
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
@@ -1673,14 +1679,14 @@ static inline void sk_mem_charge(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return;
- sk->sk_forward_alloc -= size;
+ sk_forward_alloc_add(sk, -size);
}
static inline void sk_mem_uncharge(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return;
- sk->sk_forward_alloc += size;
+ sk_forward_alloc_add(sk, size);
sk_mem_reclaim(sk);
}
@@ -1900,7 +1906,9 @@ struct sockcm_cookie {
static inline void sockcm_init(struct sockcm_cookie *sockc,
const struct sock *sk)
{
- *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
+ *sockc = (struct sockcm_cookie) {
+ .tsflags = READ_ONCE(sk->sk_tsflags)
+ };
}
int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
@@ -2695,9 +2703,9 @@ void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
static inline void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
{
- ktime_t kt = skb->tstamp;
struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
-
+ u32 tsflags = READ_ONCE(sk->sk_tsflags);
+ ktime_t kt = skb->tstamp;
/*
* generate control messages if
* - receive time stamping in software requested
@@ -2705,10 +2713,10 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
* - hardware time stamps available and wanted
*/
if (sock_flag(sk, SOCK_RCVTSTAMP) ||
- (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
- (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
+ (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
+ (kt && tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
(hwtstamps->hwtstamp &&
- (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
+ (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
__sock_recv_timestamp(msg, sk, skb);
else
sock_write_timestamp(sk, kt);
@@ -2730,7 +2738,8 @@ static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
#define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
SOF_TIMESTAMPING_RAW_HARDWARE)
- if (sk->sk_flags & FLAGS_RECV_CMSGS || sk->sk_tsflags & TSFLAGS_ANY)
+ if (sk->sk_flags & FLAGS_RECV_CMSGS ||
+ READ_ONCE(sk->sk_tsflags) & TSFLAGS_ANY)
__sock_recv_cmsgs(msg, sk, skb);
else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
sock_write_timestamp(sk, skb->tstamp);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 91688d0dadcd..7b1a720691ae 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -348,12 +348,14 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
bool force_schedule);
-static inline void tcp_dec_quickack_mode(struct sock *sk,
- const unsigned int pkts)
+static inline void tcp_dec_quickack_mode(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_ack.quick) {
+ /* How many ACKs S/ACKing new data have we sent? */
+ const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
+
if (pkts >= icsk->icsk_ack.quick) {
icsk->icsk_ack.quick = 0;
/* Leaving quickack mode we deflate ATO. */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 1e7774ac808f..533ab92684d8 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -4440,8 +4440,6 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
const struct sockaddr *addr);
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
unsigned int port);
-struct net_device *ib_device_netdev(struct ib_device *dev, u32 port);
-
struct ib_wq *ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr);
int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 03abd30e6c8c..2b22f153ef63 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -115,27 +115,6 @@ struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
void iw_destroy_cm_id(struct iw_cm_id *cm_id);
/**
- * iw_cm_bind_qp - Unbind the specified IW CM identifier and QP
- *
- * @cm_id: The IW CM idenfier to unbind from the QP.
- * @qp: The QP
- *
- * This is called by the provider when destroying the QP to ensure
- * that any references held by the IWCM are released. It may also
- * be called by the IWCM when destroying a CM_ID to that any
- * references held by the provider are released.
- */
-void iw_cm_unbind_qp(struct iw_cm_id *cm_id, struct ib_qp *qp);
-
-/**
- * iw_cm_get_qp - Return the ib_qp associated with a QPN
- *
- * @ib_device: The IB device
- * @qpn: The queue pair number
- */
-struct ib_qp *iw_cm_get_qp(struct ib_device *device, int qpn);
-
-/**
* iw_cm_listen - Listen for incoming connection requests on the
* specified IW CM id.
*
diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h
index 9eb75683e012..9705b2a98e49 100644
--- a/include/rv/da_monitor.h
+++ b/include/rv/da_monitor.h
@@ -262,7 +262,7 @@ static inline void da_monitor_destroy_##name(void) \
/* \
* per-cpu monitor variables \
*/ \
-DEFINE_PER_CPU(struct da_monitor, da_mon_##name); \
+static DEFINE_PER_CPU(struct da_monitor, da_mon_##name); \
\
/* \
* da_get_monitor_##name - return current CPU monitor address \
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 159823e0afbf..8a43534eea5c 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -23,22 +23,12 @@
struct block_device;
-enum sas_class {
- SAS,
- EXPANDER
-};
-
enum sas_phy_role {
PHY_ROLE_NONE = 0,
PHY_ROLE_TARGET = 0x40,
PHY_ROLE_INITIATOR = 0x80,
};
-enum sas_phy_type {
- PHY_TYPE_PHYSICAL,
- PHY_TYPE_VIRTUAL
-};
-
/* The events are mnemonically described in sas_dump.c
* so when updating/adding events here, please also
* update the other file too.
@@ -258,7 +248,6 @@ struct asd_sas_port {
/* public: */
int id;
- enum sas_class class;
u8 sas_addr[SAS_ADDR_SIZE];
u8 attached_sas_addr[SAS_ADDR_SIZE];
enum sas_protocol iproto;
@@ -319,11 +308,9 @@ struct asd_sas_phy {
int enabled; /* must be set */
int id; /* must be set */
- enum sas_class class;
enum sas_protocol iproto;
enum sas_protocol tproto;
- enum sas_phy_type type;
enum sas_phy_role role;
enum sas_oob_mode oob_mode;
enum sas_linkrate linkrate;
@@ -346,11 +333,6 @@ struct asd_sas_phy {
void *lldd_phy; /* not touched by the sas_class_code */
};
-struct scsi_core {
- struct Scsi_Host *shost;
-
-};
-
enum sas_ha_state {
SAS_HA_REGISTERED,
SAS_HA_DRAINING,
@@ -371,12 +353,11 @@ struct sas_ha_struct {
struct mutex disco_mutex;
- struct scsi_core core;
+ struct Scsi_Host *shost;
/* public: */
char *sas_ha_name;
struct device *dev; /* should be set */
- struct module *lldd_module; /* should be set */
struct workqueue_struct *event_q;
struct workqueue_struct *disco_q;
@@ -544,12 +525,9 @@ struct sas_ata_task {
struct host_to_dev_fis fis;
u8 atapi_packet[16]; /* 0 if not ATAPI task */
- u8 retry_count; /* hardware retry, should be > 0 */
-
u8 dma_xfer:1; /* PIO:0 or DMA:1 */
u8 use_ncq:1;
- u8 set_affil_pol:1;
- u8 stp_affil_pol:1;
+ u8 return_fis_on_success:1;
u8 device_control_reg_update:1;
@@ -582,12 +560,8 @@ enum task_attribute {
};
struct sas_ssp_task {
- u8 retry_count; /* hardware retry, should be > 0 */
-
u8 LUN[8];
- u8 enable_first_burst:1;
enum task_attribute task_attr;
- u8 task_prio;
struct scsi_cmnd *cmd;
};
@@ -727,8 +701,6 @@ extern struct device_attribute dev_attr_phy_event_threshold;
int sas_discover_root_expander(struct domain_device *);
-void sas_init_ex_attr(void);
-
int sas_ex_revalidate_domain(struct domain_device *);
void sas_unregister_domain_devices(struct asd_sas_port *port, int gone);
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index ec093594ba53..4498f845b112 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -157,6 +157,9 @@ enum scsi_disposition {
#define SCSI_3 4 /* SPC */
#define SCSI_SPC_2 5
#define SCSI_SPC_3 6
+#define SCSI_SPC_4 7
+#define SCSI_SPC_5 8
+#define SCSI_SPC_6 14
/*
* INQ PERIPHERAL QUALIFIERS
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index b9230b6add04..fd41fdac0a8e 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -161,6 +161,10 @@ struct scsi_device {
* pass settings from slave_alloc to scsi
* core. */
unsigned int eh_timeout; /* Error handling timeout */
+
+ bool manage_system_start_stop; /* Let HLD (sd) manage system start/stop */
+ bool manage_runtime_start_stop; /* Let HLD (sd) manage runtime start/stop */
+
unsigned removable:1;
unsigned changed:1; /* Data invalid due to media change */
unsigned busy:1; /* Used to prevent races */
@@ -193,7 +197,6 @@ struct scsi_device {
unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
unsigned no_start_on_add:1; /* do not issue start on add */
unsigned allow_restart:1; /* issue START_UNIT in error handler */
- unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */
unsigned no_start_on_resume:1; /* Do not issue START_STOP_UNIT on resume */
unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */
unsigned no_uld_attach:1; /* disable connecting to upper level drivers */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 70b7475dcf56..4c2dc8150c6d 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -764,12 +764,12 @@ scsi_template_proc_dir(const struct scsi_host_template *sht);
#define scsi_template_proc_dir(sht) NULL
#endif
extern void scsi_scan_host(struct Scsi_Host *);
-extern void scsi_rescan_device(struct device *);
+extern int scsi_rescan_device(struct scsi_device *sdev);
extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
extern int scsi_host_busy(struct Scsi_Host *shost);
extern void scsi_host_put(struct Scsi_Host *t);
-extern struct Scsi_Host *scsi_host_lookup(unsigned short);
+extern struct Scsi_Host *scsi_host_lookup(unsigned int hostnum);
extern const char *scsi_host_state_name(enum scsi_host_state);
extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
enum scsi_host_status status);
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 34c03707fb6e..fb3399e4cd29 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -472,7 +472,6 @@ extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host *shost,
uint32_t iface_type,
uint32_t iface_num, int dd_size);
extern void iscsi_destroy_iface(struct iscsi_iface *iface);
-extern struct iscsi_iface *iscsi_lookup_iface(int handle);
extern char *iscsi_get_port_speed_name(struct Scsi_Host *shost);
extern char *iscsi_get_port_state_name(struct Scsi_Host *shost);
extern int iscsi_is_session_dev(const struct device *dev);
diff --git a/include/soc/mediatek/smi.h b/include/soc/mediatek/smi.h
index dfd8efca5e60..000eb1cf68b7 100644
--- a/include/soc/mediatek/smi.h
+++ b/include/soc/mediatek/smi.h
@@ -13,6 +13,7 @@
enum iommu_atf_cmd {
IOMMU_ATF_CMD_CONFIG_SMI_LARB, /* For mm master to en/disable iommu */
+ IOMMU_ATF_CMD_CONFIG_INFRA_IOMMU, /* For infra master to enable iommu */
IOMMU_ATF_CMD_MAX,
};
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index c9a8bce9a785..d70c55f17df7 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -142,7 +142,7 @@ struct snd_dmaengine_pcm_config {
struct snd_pcm_substream *substream);
int (*process)(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
- struct iov_iter *buf, unsigned long bytes);
+ unsigned long bytes);
dma_filter_fn compat_filter_fn;
struct device *dma_dev;
const char *chan_names[SNDRV_PCM_STREAM_LAST + 1];
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index 17bea3144551..ceca69b46a82 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -139,7 +139,7 @@ struct snd_soc_component_driver {
struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
int (*copy)(struct snd_soc_component *component,
struct snd_pcm_substream *substream, int channel,
- unsigned long pos, struct iov_iter *buf,
+ unsigned long pos, struct iov_iter *iter,
unsigned long bytes);
struct page *(*page)(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
@@ -511,7 +511,7 @@ int snd_soc_pcm_component_ioctl(struct snd_pcm_substream *substream,
int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream);
int snd_soc_pcm_component_copy(struct snd_pcm_substream *substream,
int channel, unsigned long pos,
- struct iov_iter *buf, unsigned long bytes);
+ struct iov_iter *iter, unsigned long bytes);
struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream,
unsigned long offset);
int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream,
diff --git a/include/sound/soc.h b/include/sound/soc.h
index fa2337a3cf4c..37f9d3fe302a 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1126,6 +1126,8 @@ struct snd_soc_pcm_runtime {
unsigned int pop_wait:1;
unsigned int fe_compr:1; /* for Dynamic PCM */
+ bool initialized;
+
int num_components;
struct snd_soc_component *components[]; /* CPU/Codec/Platform */
};
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 4c15420e8965..60af7c63b34e 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -50,9 +50,6 @@ struct sock;
#define TA_LOGIN_TIMEOUT 15
#define TA_LOGIN_TIMEOUT_MAX 30
#define TA_LOGIN_TIMEOUT_MIN 5
-#define TA_NETIF_TIMEOUT 2
-#define TA_NETIF_TIMEOUT_MAX 15
-#define TA_NETIF_TIMEOUT_MIN 2
#define TA_GENERATE_NODE_ACLS 0
#define TA_DEFAULT_CMDSN_DEPTH 64
#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
@@ -773,7 +770,6 @@ to_iscsi_nacl(struct se_node_acl *se_nacl)
struct iscsi_tpg_attrib {
u32 authentication;
u32 login_timeout;
- u32 netif_timeout;
u32 generate_node_acls;
u32 cache_dynamic_acls;
u32 default_cmdsn_depth;
diff --git a/include/trace/events/task.h b/include/trace/events/task.h
index 64d160930b0d..47b527464d1a 100644
--- a/include/trace/events/task.h
+++ b/include/trace/events/task.h
@@ -47,7 +47,7 @@ TRACE_EVENT(task_rename,
TP_fast_assign(
__entry->pid = task->pid;
memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
- strlcpy(entry->newcomm, comm, TASK_COMM_LEN);
+ strscpy(entry->newcomm, comm, TASK_COMM_LEN);
__entry->oom_score_adj = task->signal->oom_score_adj;
),
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index 44a3f565264d..0577f0cdd231 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -6,26 +6,26 @@
#define _TRACE_XEN_H
#include <linux/tracepoint.h>
-#include <asm/paravirt_types.h>
+#include <asm/xen/hypervisor.h>
#include <asm/xen/trace_types.h>
struct multicall_entry;
/* Multicalls */
DECLARE_EVENT_CLASS(xen_mc__batch,
- TP_PROTO(enum paravirt_lazy_mode mode),
+ TP_PROTO(enum xen_lazy_mode mode),
TP_ARGS(mode),
TP_STRUCT__entry(
- __field(enum paravirt_lazy_mode, mode)
+ __field(enum xen_lazy_mode, mode)
),
TP_fast_assign(__entry->mode = mode),
TP_printk("start batch LAZY_%s",
- (__entry->mode == PARAVIRT_LAZY_MMU) ? "MMU" :
- (__entry->mode == PARAVIRT_LAZY_CPU) ? "CPU" : "NONE")
+ (__entry->mode == XEN_LAZY_MMU) ? "MMU" :
+ (__entry->mode == XEN_LAZY_CPU) ? "CPU" : "NONE")
);
#define DEFINE_XEN_MC_BATCH(name) \
DEFINE_EVENT(xen_mc__batch, name, \
- TP_PROTO(enum paravirt_lazy_mode mode), \
+ TP_PROTO(enum xen_lazy_mode mode), \
TP_ARGS(mode))
DEFINE_XEN_MC_BATCH(xen_mc_batch);
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 8d7402c13e56..eaf9f248619f 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -44,6 +44,16 @@ extern "C" {
#define NOUVEAU_GETPARAM_PTIMER_TIME 14
#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
+
+/**
+ * @NOUVEAU_GETPARAM_EXEC_PUSH_MAX
+ *
+ * Query the maximum amount of IBs that can be pushed through a single
+ * &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC
+ * ioctl().
+ */
+#define NOUVEAU_GETPARAM_EXEC_PUSH_MAX 17
+
struct drm_nouveau_getparam {
__u64 param;
__u64 value;
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 8790b3962e4b..0448700890f7 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1962,7 +1962,9 @@ union bpf_attr {
* performed again, if the helper is used in combination with
* direct packet access.
* Return
- * 0 on success, or a negative error in case of failure.
+ * 0 on success, or a negative error in case of failure. Positive
+ * error indicates a potential drop or congestion in the target
+ * device. The particular positive error codes are not defined.
*
* u64 bpf_get_current_pid_tgid(void)
* Description
diff --git a/include/uapi/linux/cgroupstats.h b/include/uapi/linux/cgroupstats.h
index aa306e4cd6c1..80b2c8594480 100644
--- a/include/uapi/linux/cgroupstats.h
+++ b/include/uapi/linux/cgroupstats.h
@@ -24,8 +24,6 @@
* basis. This data is shared using taskstats.
*
* Most of these states are derived by looking at the task->state value
- * For the nr_io_wait state, a flag in the delay accounting structure
- * indicates that the task is waiting on IO
*
* Each member is aligned to a 8 byte boundary.
*/
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index ee0bcff14b69..9b731976ce2f 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -445,6 +445,8 @@ typedef struct elf64_shdr {
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */
+#define NT_RISCV_CSR 0x900 /* RISC-V Control and Status Registers */
+#define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */
#define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */
#define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */
#define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index b3fcab13fcd3..db92a7202b34 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -207,6 +207,10 @@
* - add FUSE_EXT_GROUPS
* - add FUSE_CREATE_SUPP_GROUP
* - add FUSE_HAS_EXPIRE_ONLY
+ *
+ * 7.39
+ * - add FUSE_DIRECT_IO_RELAX
+ * - add FUSE_STATX and related structures
*/
#ifndef _LINUX_FUSE_H
@@ -242,7 +246,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 38
+#define FUSE_KERNEL_MINOR_VERSION 39
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -269,6 +273,40 @@ struct fuse_attr {
uint32_t flags;
};
+/*
+ * The following structures are bit-for-bit compatible with the statx(2) ABI in
+ * Linux.
+ */
+struct fuse_sx_time {
+ int64_t tv_sec;
+ uint32_t tv_nsec;
+ int32_t __reserved;
+};
+
+struct fuse_statx {
+ uint32_t mask;
+ uint32_t blksize;
+ uint64_t attributes;
+ uint32_t nlink;
+ uint32_t uid;
+ uint32_t gid;
+ uint16_t mode;
+ uint16_t __spare0[1];
+ uint64_t ino;
+ uint64_t size;
+ uint64_t blocks;
+ uint64_t attributes_mask;
+ struct fuse_sx_time atime;
+ struct fuse_sx_time btime;
+ struct fuse_sx_time ctime;
+ struct fuse_sx_time mtime;
+ uint32_t rdev_major;
+ uint32_t rdev_minor;
+ uint32_t dev_major;
+ uint32_t dev_minor;
+ uint64_t __spare2[14];
+};
+
struct fuse_kstatfs {
uint64_t blocks;
uint64_t bfree;
@@ -371,6 +409,8 @@ struct fuse_file_lock {
* FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir,
* symlink and mknod (single group that matches parent)
* FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation
+ * FUSE_DIRECT_IO_RELAX: relax restrictions in FOPEN_DIRECT_IO mode, for now
+ * allow shared mmap
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -409,6 +449,7 @@ struct fuse_file_lock {
#define FUSE_HAS_INODE_DAX (1ULL << 33)
#define FUSE_CREATE_SUPP_GROUP (1ULL << 34)
#define FUSE_HAS_EXPIRE_ONLY (1ULL << 35)
+#define FUSE_DIRECT_IO_RELAX (1ULL << 36)
/**
* CUSE INIT request/reply flags
@@ -575,6 +616,7 @@ enum fuse_opcode {
FUSE_REMOVEMAPPING = 49,
FUSE_SYNCFS = 50,
FUSE_TMPFILE = 51,
+ FUSE_STATX = 52,
/* CUSE specific operations */
CUSE_INIT = 4096,
@@ -639,6 +681,22 @@ struct fuse_attr_out {
struct fuse_attr attr;
};
+struct fuse_statx_in {
+ uint32_t getattr_flags;
+ uint32_t reserved;
+ uint64_t fh;
+ uint32_t sx_flags;
+ uint32_t sx_mask;
+};
+
+struct fuse_statx_out {
+ uint64_t attr_valid; /* Cache timeout for the attributes */
+ uint32_t attr_valid_nsec;
+ uint32_t flags;
+ uint64_t spare[2];
+ struct fuse_statx stat;
+};
+
#define FUSE_COMPAT_MKNOD_IN_SIZE 8
struct fuse_mknod_in {
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 8466c2a9938f..ca30232b7bc8 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -263,6 +263,7 @@ enum nft_chain_attributes {
* @NFTA_RULE_USERDATA: user data (NLA_BINARY, NFT_USERDATA_MAXLEN)
* @NFTA_RULE_ID: uniquely identifies a rule in a transaction (NLA_U32)
* @NFTA_RULE_POSITION_ID: transaction unique identifier of the previous rule (NLA_U32)
+ * @NFTA_RULE_CHAIN_ID: add the rule to chain by ID, alternative to @NFTA_RULE_CHAIN (NLA_U32)
*/
enum nft_rule_attributes {
NFTA_RULE_UNSPEC,
diff --git a/include/uapi/linux/rpmsg.h b/include/uapi/linux/rpmsg.h
index 1637e68177d9..f0c8da2b185b 100644
--- a/include/uapi/linux/rpmsg.h
+++ b/include/uapi/linux/rpmsg.h
@@ -43,4 +43,14 @@ struct rpmsg_endpoint_info {
*/
#define RPMSG_RELEASE_DEV_IOCTL _IOW(0xb5, 0x4, struct rpmsg_endpoint_info)
+/**
+ * Get the flow control state of the remote rpmsg char device.
+ */
+#define RPMSG_GET_OUTGOING_FLOWCONTROL _IOR(0xb5, 0x5, int)
+
+/**
+ * Set the flow control state of the local rpmsg char device.
+ */
+#define RPMSG_SET_INCOMING_FLOWCONTROL _IOR(0xb5, 0x6, int)
+
#endif
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index 7c3fc3980881..5c6c4269f7ef 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -29,6 +29,11 @@
struct TAG { MEMBERS } ATTRS NAME; \
}
+#ifdef __cplusplus
+/* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */
+#define __DECLARE_FLEX_ARRAY(T, member) \
+ T member[0]
+#else
/**
* __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
*
@@ -49,3 +54,5 @@
#ifndef __counted_by
#define __counted_by(m)
#endif
+
+#endif /* _UAPI_LINUX_STDDEF_H */
diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
index d3aad12ad1fa..2d827d22cd99 100644
--- a/include/uapi/linux/vhost_types.h
+++ b/include/uapi/linux/vhost_types.h
@@ -181,5 +181,9 @@ struct vhost_vdpa_iova_range {
#define VHOST_BACKEND_F_SUSPEND 0x4
/* Device can be resumed */
#define VHOST_BACKEND_F_RESUME 0x5
+/* Device supports the driver enabling virtqueues both before and after
+ * DRIVER_OK
+ */
+#define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK 0x6
#endif
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index 8a2a1d4f6b29..6e7c67a0cca3 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -53,6 +53,7 @@ enum {
BNXT_RE_UCNTX_CMASK_HAVE_CCTX = 0x1ULL,
BNXT_RE_UCNTX_CMASK_HAVE_MODE = 0x02ULL,
BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED = 0x04ULL,
+ BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED = 0x08ULL,
};
enum bnxt_re_wqe_mode {
@@ -131,10 +132,13 @@ enum bnxt_re_shpg_offt {
enum bnxt_re_objects {
BNXT_RE_OBJECT_ALLOC_PAGE = (1U << UVERBS_ID_NS_SHIFT),
+ BNXT_RE_OBJECT_NOTIFY_DRV,
};
enum bnxt_re_alloc_page_type {
BNXT_RE_ALLOC_WC_PAGE = 0,
+ BNXT_RE_ALLOC_DBR_BAR_PAGE,
+ BNXT_RE_ALLOC_DBR_PAGE,
};
enum bnxt_re_var_alloc_page_attrs {
@@ -154,4 +158,7 @@ enum bnxt_re_alloc_page_methods {
BNXT_RE_METHOD_DESTROY_PAGE,
};
+enum bnxt_re_notify_drv_methods {
+ BNXT_RE_METHOD_NOTIFY_DRV = (1U << UVERBS_ID_NS_SHIFT),
+};
#endif /* __BNXT_RE_UVERBS_ABI_H__*/
diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h
index a7085e092d34..bb18f15489e3 100644
--- a/include/uapi/rdma/irdma-abi.h
+++ b/include/uapi/rdma/irdma-abi.h
@@ -22,10 +22,16 @@ enum irdma_memreg_type {
IRDMA_MEMREG_TYPE_CQ = 2,
};
+enum {
+ IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
+ IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE = 1 << 1,
+};
+
struct irdma_alloc_ucontext_req {
__u32 rsvd32;
__u8 userspace_ver;
__u8 rsvd8[3];
+ __aligned_u64 comp_mask;
};
struct irdma_alloc_ucontext_resp {
@@ -46,6 +52,9 @@ struct irdma_alloc_ucontext_resp {
__u16 max_hw_sq_chunk;
__u8 hw_rev;
__u8 rsvd2;
+ __aligned_u64 comp_mask;
+ __u16 min_hw_wq_size;
+ __u8 rsvd3[6];
};
struct irdma_alloc_pd_resp {
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
index fd3f9e5ee241..03f2beadf201 100644
--- a/include/uapi/scsi/scsi_bsg_ufs.h
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -8,6 +8,7 @@
#ifndef SCSI_BSG_UFS_H
#define SCSI_BSG_UFS_H
+#include <asm/byteorder.h>
#include <linux/types.h>
/*
* This file intended to be included by both kernel and user space
@@ -40,11 +41,56 @@ enum ufs_rpmb_op_type {
* @dword_0: UPIU header DW-0
* @dword_1: UPIU header DW-1
* @dword_2: UPIU header DW-2
+ *
+ * @transaction_code: Type of request or response. See also enum
+ * upiu_request_transaction and enum upiu_response_transaction.
+ * @flags: UPIU flags. The meaning of individual flags depends on the
+ * transaction code.
+ * @lun: Logical unit number.
+ * @task_tag: Task tag.
+ * @iid: Initiator ID.
+ * @command_set_type: 0 for SCSI command set; 1 for UFS specific.
+ * @tm_function: Task management function in case of a task management request
+ * UPIU.
+ * @query_function: Query function in case of a query request UPIU.
+ * @response: 0 for success; 1 for failure.
+ * @status: SCSI status if this is the header of a response to a SCSI command.
+ * @ehs_length: EHS length in units of 32 bytes.
+ * @device_information:
+ * @data_segment_length: data segment length.
*/
struct utp_upiu_header {
- __be32 dword_0;
- __be32 dword_1;
- __be32 dword_2;
+ union {
+ struct {
+ __be32 dword_0;
+ __be32 dword_1;
+ __be32 dword_2;
+ };
+ struct {
+ __u8 transaction_code;
+ __u8 flags;
+ __u8 lun;
+ __u8 task_tag;
+#if defined(__BIG_ENDIAN)
+ __u8 iid: 4;
+ __u8 command_set_type: 4;
+#elif defined(__LITTLE_ENDIAN)
+ __u8 command_set_type: 4;
+ __u8 iid: 4;
+#else
+#error
+#endif
+ union {
+ __u8 tm_function;
+ __u8 query_function;
+ } __attribute__((packed));
+ __u8 response;
+ __u8 status;
+ __u8 ehs_length;
+ __u8 device_information;
+ __be16 data_segment_length;
+ };
+ };
};
/**
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
index 198cb391f9db..0cced88f4531 100644
--- a/include/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -11,10 +11,16 @@
#ifndef _UFS_H
#define _UFS_H
-#include <linux/mutex.h>
+#include <linux/bitops.h>
#include <linux/types.h>
#include <uapi/scsi/scsi_bsg_ufs.h>
+/*
+ * Using static_assert() is not allowed in UAPI header files. Hence the check
+ * in this header file of the size of struct utp_upiu_header.
+ */
+static_assert(sizeof(struct utp_upiu_header) == 12);
+
#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
#define QUERY_DESC_MAX_SIZE 255
#define QUERY_DESC_MIN_SIZE 2
@@ -23,9 +29,6 @@
(sizeof(struct utp_upiu_header)))
#define UFS_SENSE_SIZE 18
-#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
- cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
- (byte1 << 8) | (byte0))
/*
* UFS device may have standard LUs and LUN id could be from 0x00 to
* 0x7F. Standard LUs use "Peripheral Device Addressing Format".
@@ -76,7 +79,7 @@ enum {
};
/* UTP UPIU Transaction Codes Initiator to Target */
-enum {
+enum upiu_request_transaction {
UPIU_TRANSACTION_NOP_OUT = 0x00,
UPIU_TRANSACTION_COMMAND = 0x01,
UPIU_TRANSACTION_DATA_OUT = 0x02,
@@ -85,7 +88,7 @@ enum {
};
/* UTP UPIU Transaction Codes Target to Initiator */
-enum {
+enum upiu_response_transaction {
UPIU_TRANSACTION_NOP_IN = 0x20,
UPIU_TRANSACTION_RESPONSE = 0x21,
UPIU_TRANSACTION_DATA_IN = 0x22,
@@ -102,6 +105,12 @@ enum {
UPIU_CMD_FLAGS_READ = 0x40,
};
+/* UPIU response flags */
+enum {
+ UPIU_RSP_FLAG_UNDERFLOW = 0x20,
+ UPIU_RSP_FLAG_OVERFLOW = 0x40,
+};
+
/* UPIU Task Attributes */
enum {
UPIU_TASK_ATTR_SIMPLE = 0x00,
@@ -466,21 +475,11 @@ enum {
UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
};
-/* UTP Transfer Request Command Offset */
-#define UPIU_COMMAND_TYPE_OFFSET 28
-
/* Offset of the response code in the UPIU header */
#define UPIU_RSP_CODE_OFFSET 8
enum {
- MASK_SCSI_STATUS = 0xFF,
- MASK_TASK_RESPONSE = 0xFF00,
- MASK_RSP_UPIU_RESULT = 0xFFFF,
- MASK_QUERY_DATA_SEG_LEN = 0xFFFF,
- MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
- MASK_RSP_EXCEPTION_EVENT = 0x10000,
MASK_TM_SERVICE_RESP = 0xFF,
- MASK_TM_FUNC = 0xFF,
};
/* Task management service response */
@@ -516,41 +515,6 @@ struct utp_cmd_rsp {
u8 sense_data[UFS_SENSE_SIZE];
};
-struct ufshpb_active_field {
- __be16 active_rgn;
- __be16 active_srgn;
-};
-#define HPB_ACT_FIELD_SIZE 4
-
-/**
- * struct utp_hpb_rsp - Response UPIU structure
- * @residual_transfer_count: Residual transfer count DW-3
- * @reserved1: Reserved double words DW-4 to DW-7
- * @sense_data_len: Sense data length DW-8 U16
- * @desc_type: Descriptor type of sense data
- * @additional_len: Additional length of sense data
- * @hpb_op: HPB operation type
- * @lun: LUN of response UPIU
- * @active_rgn_cnt: Active region count
- * @inactive_rgn_cnt: Inactive region count
- * @hpb_active_field: Recommended to read HPB region and subregion
- * @hpb_inactive_field: To be inactivated HPB region and subregion
- */
-struct utp_hpb_rsp {
- __be32 residual_transfer_count;
- __be32 reserved1[4];
- __be16 sense_data_len;
- u8 desc_type;
- u8 additional_len;
- u8 hpb_op;
- u8 lun;
- u8 active_rgn_cnt;
- u8 inactive_rgn_cnt;
- struct ufshpb_active_field hpb_active_field[2];
- __be16 hpb_inactive_field[2];
-};
-#define UTP_HPB_RSP_SIZE 40
-
/**
* struct utp_upiu_rsp - general upiu response structure
* @header: UPIU header structure DW-0 to DW-2
@@ -561,31 +525,10 @@ struct utp_upiu_rsp {
struct utp_upiu_header header;
union {
struct utp_cmd_rsp sr;
- struct utp_hpb_rsp hr;
struct utp_upiu_query qr;
};
};
-/**
- * struct ufs_query_req - parameters for building a query request
- * @query_func: UPIU header query function
- * @upiu_req: the query request data
- */
-struct ufs_query_req {
- u8 query_func;
- struct utp_upiu_query upiu_req;
-};
-
-/**
- * struct ufs_query_resp - UPIU QUERY
- * @response: device response code
- * @upiu_res: query response data
- */
-struct ufs_query_res {
- u8 response;
- struct utp_upiu_query upiu_res;
-};
-
/*
* VCCQ & VCCQ2 current requirement when UFS device is in sleep state
* and link is in Hibern8 state.
@@ -621,9 +564,6 @@ struct ufs_dev_info {
/* Stores the depth of queue in UFS device */
u8 bqueuedepth;
- /* UFS HPB related flag */
- bool hpb_enabled;
-
/* UFS WB related flags */
bool wb_enabled;
bool wb_buf_flush_enabled;
diff --git a/include/ufs/ufs_quirks.h b/include/ufs/ufs_quirks.h
index bcb4f004bed5..41ff44dfa1db 100644
--- a/include/ufs/ufs_quirks.h
+++ b/include/ufs/ufs_quirks.h
@@ -107,10 +107,4 @@ struct ufs_dev_quirk {
*/
#define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11)
-/*
- * Some UFS devices require L2P entry should be swapped before being sent to the
- * UFS device for HPB READ command.
- */
-#define UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ (1 << 12)
-
#endif /* UFS_QUIRKS_H_ */
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 6dc11fa0ebb1..7d07b256e906 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -20,6 +20,7 @@
#include <linux/pm_runtime.h>
#include <linux/dma-direction.h>
#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
#include <ufs/unipro.h>
#include <ufs/ufs.h>
#include <ufs/ufs_quirks.h>
@@ -202,6 +203,25 @@ struct ufshcd_lrb {
};
/**
+ * struct ufs_query_req - parameters for building a query request
+ * @query_func: UPIU header query function
+ * @upiu_req: the query request data
+ */
+struct ufs_query_req {
+ u8 query_func;
+ struct utp_upiu_query upiu_req;
+};
+
+/**
+ * struct ufs_query_resp - UPIU QUERY
+ * @response: device response code
+ * @upiu_res: query response data
+ */
+struct ufs_query_res {
+ struct utp_upiu_query upiu_res;
+};
+
+/**
* struct ufs_query - holds relevant data structures for query request
* @request: request upiu and function
* @descriptor: buffer for sending/receiving descriptor
@@ -709,31 +729,6 @@ struct ufs_hba_variant_params {
u32 wb_flush_threshold;
};
-#ifdef CONFIG_SCSI_UFS_HPB
-/**
- * struct ufshpb_dev_info - UFSHPB device related info
- * @num_lu: the number of user logical unit to check whether all lu finished
- * initialization
- * @rgn_size: device reported HPB region size
- * @srgn_size: device reported HPB sub-region size
- * @slave_conf_cnt: counter to check all lu finished initialization
- * @hpb_disabled: flag to check if HPB is disabled
- * @max_hpb_single_cmd: device reported bMAX_DATA_SIZE_FOR_SINGLE_CMD value
- * @is_legacy: flag to check HPB 1.0
- * @control_mode: either host or device
- */
-struct ufshpb_dev_info {
- int num_lu;
- int rgn_size;
- int srgn_size;
- atomic_t slave_conf_cnt;
- bool hpb_disabled;
- u8 max_hpb_single_cmd;
- bool is_legacy;
- u8 control_mode;
-};
-#endif
-
struct ufs_hba_monitor {
unsigned long chunk_size;
@@ -894,7 +889,6 @@ enum ufshcd_mcq_opr {
* @rpm_dev_flush_recheck_work: used to suspend from RPM (runtime power
* management) after the UFS device has finished a WriteBooster buffer
* flush or auto BKOP.
- * @ufshpb_dev: information related to HPB (Host Performance Booster).
* @monitor: statistics about UFS commands
* @crypto_capabilities: Content of crypto capabilities register (0x100)
* @crypto_cap_array: Array of crypto capabilities
@@ -1050,10 +1044,6 @@ struct ufs_hba {
struct request_queue *bsg_queue;
struct delayed_work rpm_dev_flush_recheck_work;
-#ifdef CONFIG_SCSI_UFS_HPB
- struct ufshpb_dev_info ufshpb_dev;
-#endif
-
struct ufs_hba_monitor monitor;
#ifdef CONFIG_SCSI_UFS_CRYPTO
@@ -1254,9 +1244,12 @@ void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
void ufshcd_hba_stop(struct ufs_hba *hba);
void ufshcd_schedule_eh_work(struct ufs_hba *hba);
+void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
+u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
+void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg);
@@ -1383,12 +1376,6 @@ int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg);
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
-int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
- struct utp_upiu_req *req_upiu,
- struct utp_upiu_req *rsp_upiu,
- int msgcode,
- u8 *desc_buff, int *buff_len,
- enum query_opcode desc_op);
int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu, struct ufs_ehs *ehs_req,
struct ufs_ehs *ehs_rsp, int sg_cnt,
@@ -1398,6 +1385,7 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable);
int ufshcd_suspend_prepare(struct device *dev);
int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm);
void ufshcd_resume_complete(struct device *dev);
+bool ufshcd_is_hba_active(struct ufs_hba *hba);
/* Wrapper functions for safely calling variant operations */
static inline int ufshcd_vops_init(struct ufs_hba *hba)
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index 146fbea76d98..d5accacae6bc 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -11,7 +11,8 @@
#ifndef _UFSHCI_H
#define _UFSHCI_H
-#include <scsi/scsi_host.h>
+#include <linux/types.h>
+#include <ufs/ufs.h>
enum {
TASK_REQ_UPIU_SIZE_DWORDS = 8,
@@ -126,7 +127,6 @@ enum {
};
#define SQ_ICU_ERR_CODE_MASK GENMASK(7, 4)
-#define UPIU_COMMAND_TYPE_MASK GENMASK(31, 28)
#define UFS_MASK(mask, offset) ((mask) << (offset))
/* UFS Version 08h */
@@ -438,15 +438,13 @@ enum {
UTP_SCSI_COMMAND = 0x00000000,
UTP_NATIVE_UFS_COMMAND = 0x10000000,
UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
- UTP_REQ_DESC_INT_CMD = 0x01000000,
- UTP_REQ_DESC_CRYPTO_ENABLE_CMD = 0x00800000,
};
/* UTP Transfer Request Data Direction (DD) */
-enum {
- UTP_NO_DATA_TRANSFER = 0x00000000,
- UTP_HOST_TO_DEVICE = 0x02000000,
- UTP_DEVICE_TO_HOST = 0x04000000,
+enum utp_data_direction {
+ UTP_NO_DATA_TRANSFER = 0,
+ UTP_HOST_TO_DEVICE = 1,
+ UTP_DEVICE_TO_HOST = 2,
};
/* Overall command status values */
@@ -505,17 +503,38 @@ struct utp_transfer_cmd_desc {
/**
* struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
- * @dword0: Descriptor Header DW0
- * @dword1: Descriptor Header DW1
- * @dword2: Descriptor Header DW2
- * @dword3: Descriptor Header DW3
*/
struct request_desc_header {
- __le32 dword_0;
- __le32 dword_1;
- __le32 dword_2;
- __le32 dword_3;
-};
+ u8 cci;
+ u8 ehs_length;
+#if defined(__BIG_ENDIAN)
+ u8 enable_crypto:1;
+ u8 reserved2:7;
+
+ u8 command_type:4;
+ u8 reserved1:1;
+ u8 data_direction:2;
+ u8 interrupt:1;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved2:7;
+ u8 enable_crypto:1;
+
+ u8 interrupt:1;
+ u8 data_direction:2;
+ u8 reserved1:1;
+ u8 command_type:4;
+#else
+#error
+#endif
+
+ __le32 dunl;
+ u8 ocs;
+ u8 cds;
+ __le16 ldbc;
+ __le32 dunu;
+};
+
+static_assert(sizeof(struct request_desc_header) == 16);
/**
* struct utp_transfer_req_desc - UTP Transfer Request Descriptor (UTRD)
diff --git a/include/ufs/unipro.h b/include/ufs/unipro.h
index dc9dd1d23f0f..256eb3a43f54 100644
--- a/include/ufs/unipro.h
+++ b/include/ufs/unipro.h
@@ -230,6 +230,12 @@ enum ufs_hs_gear_tag {
UFS_HS_G5 /* HS Gear 5 */
};
+enum ufs_lanes {
+ UFS_LANE_DONT_CHANGE, /* Don't change Lane */
+ UFS_LANE_1, /* Lane 1 (default for reset) */
+ UFS_LANE_2, /* Lane 2 */
+};
+
enum ufs_unipro_ver {
UFS_UNIPRO_VER_RESERVED = 0,
UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */
diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h
index 43ef24dd030e..9995695204f5 100644
--- a/include/xen/arm/hypervisor.h
+++ b/include/xen/arm/hypervisor.h
@@ -7,18 +7,6 @@
extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
-/* Lazy mode for batching updates / context switch */
-enum paravirt_lazy_mode {
- PARAVIRT_LAZY_NONE,
- PARAVIRT_LAZY_MMU,
- PARAVIRT_LAZY_CPU,
-};
-
-static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
-{
- return PARAVIRT_LAZY_NONE;
-}
-
#ifdef CONFIG_XEN
void __init xen_early_init(void);
#else
diff --git a/include/xen/events.h b/include/xen/events.h
index 95d5e28de324..23932b0673dc 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -105,8 +105,7 @@ int irq_from_virq(unsigned int cpu, unsigned int virq);
evtchn_port_t evtchn_from_irq(unsigned irq);
int xen_set_callback_via(uint64_t via);
-void xen_evtchn_do_upcall(struct pt_regs *regs);
-int xen_hvm_evtchn_do_upcall(void);
+int xen_evtchn_do_upcall(void);
/* Bind a pirq for a physical interrupt to an irq. */
int xen_bind_pirq_gsi_to_irq(unsigned gsi,