summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/drm/drm_atomic_helper.h2
-rw-r--r--include/drm/drm_exec.h2
-rw-r--r--include/drm/drm_gpuvm.h12
-rw-r--r--include/drm/drm_mode_object.h2
-rw-r--r--include/drm/drm_plane.h7
-rw-r--r--include/drm/drm_prime.h7
-rw-r--r--include/drm/drm_property.h6
-rw-r--r--include/drm/i915_pciids.h3
-rw-r--r--include/drm/xe_pciids.h190
-rw-r--r--include/linux/acpi.h22
-rw-r--r--include/linux/acpi_amd_wbrf.h91
-rw-r--r--include/linux/amd-pstate.h4
-rw-r--r--include/linux/arm_ffa.h2
-rw-r--r--include/linux/blk_types.h4
-rw-r--r--include/linux/bpf.h3
-rw-r--r--include/linux/closure.h9
-rw-r--r--include/linux/debugfs.h19
-rw-r--r--include/linux/dma-fence.h15
-rw-r--r--include/linux/fw_table.h3
-rw-r--r--include/linux/habanalabs/cpucp_if.h8
-rw-r--r--include/linux/highmem.h2
-rw-r--r--include/linux/hugetlb.h5
-rw-r--r--include/linux/ieee80211.h4
-rw-r--r--include/linux/io_uring_types.h3
-rw-r--r--include/linux/iommu.h1
-rw-r--r--include/linux/kprobes.h13
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h3
-rw-r--r--include/linux/rethook.h7
-rw-r--r--include/linux/skmsg.h1
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/tcp.h8
-rw-r--r--include/linux/units.h1
-rw-r--r--include/linux/usb/r8152.h1
-rw-r--r--include/linux/vfio.h8
-rw-r--r--include/net/af_unix.h1
-rw-r--r--include/net/cfg80211.h46
-rw-r--r--include/net/genetlink.h2
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/tcp.h9
-rw-r--r--include/net/tcp_ao.h6
-rw-r--r--include/rdma/ib_umem.h9
-rw-r--r--include/rdma/ib_verbs.h1
-rw-r--r--include/scsi/scsi_device.h12
-rw-r--r--include/sound/cs35l41.h2
-rw-r--r--include/uapi/drm/drm_mode.h8
-rw-r--r--include/uapi/drm/habanalabs_accel.h28
-rw-r--r--include/uapi/drm/msm_drm.h3
-rw-r--r--include/uapi/drm/xe_drm.h1347
-rw-r--r--include/uapi/linux/stddef.h2
-rw-r--r--include/uapi/linux/v4l2-subdev.h2
50 files changed, 1889 insertions, 60 deletions
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index fea528aacfe2..9aa0a05aa072 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -96,6 +96,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state);
+void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
#define DRM_PLANE_COMMIT_ACTIVE_ONLY BIT(0)
#define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET BIT(1)
diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h
index b5bf0b6da791..f1a66c048721 100644
--- a/include/drm/drm_exec.h
+++ b/include/drm/drm_exec.h
@@ -135,7 +135,7 @@ static inline bool drm_exec_is_contended(struct drm_exec *exec)
return !!exec->contended;
}
-void drm_exec_init(struct drm_exec *exec, uint32_t flags);
+void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr);
void drm_exec_fini(struct drm_exec *exec);
bool drm_exec_cleanup(struct drm_exec *exec);
int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index f3fdb810989a..9060f9fae6f1 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
#ifndef __DRM_GPUVM_H__
#define __DRM_GPUVM_H__
@@ -1018,6 +1018,16 @@ struct drm_gpuva_ops {
list_for_each_entry_from_reverse(op, &(ops)->list, entry)
/**
+ * drm_gpuva_for_each_op_reverse - iterator to walk over &drm_gpuva_ops in reverse
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations in reverse
+ */
+#define drm_gpuva_for_each_op_reverse(op, ops) \
+ list_for_each_entry_reverse(op, &(ops)->list, entry)
+
+/**
* drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops
* @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from
*/
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index 912f1e415685..08d7a7f0188f 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -60,7 +60,7 @@ struct drm_mode_object {
void (*free_cb)(struct kref *kref);
};
-#define DRM_OBJECT_MAX_PROPERTY 24
+#define DRM_OBJECT_MAX_PROPERTY 64
/**
* struct drm_object_properties - property tracking for &drm_mode_object
*/
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index c6565a6f9324..641fe298052d 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -251,6 +251,13 @@ struct drm_plane_state {
/** @state: backpointer to global drm_atomic_state */
struct drm_atomic_state *state;
+
+ /**
+ * @color_mgmt_changed: Color management properties have changed. Used
+ * by the atomic helpers and drivers to steer the atomic commit control
+ * flow.
+ */
+ bool color_mgmt_changed : 1;
};
static inline struct drm_rect
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index a7abf9f3e697..2a1d01e5b56b 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -60,12 +60,19 @@ enum dma_data_direction;
struct drm_device;
struct drm_gem_object;
+struct drm_file;
/* core prime functions */
struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf_export_info *exp_info);
void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
+int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle, uint32_t flags,
+ int *prime_fd);
+
/* helper functions for exporting */
int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach);
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 65bc9710a470..082f29156b3e 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -279,6 +279,12 @@ struct drm_property_blob *drm_property_create_blob(struct drm_device *dev,
const void *data);
struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
uint32_t id);
+int drm_property_replace_blob_from_id(struct drm_device *dev,
+ struct drm_property_blob **blob,
+ uint64_t blob_id,
+ ssize_t expected_size,
+ ssize_t expected_elem_size,
+ bool *replaced);
int drm_property_replace_global_blob(struct drm_device *dev,
struct drm_property_blob **replace,
size_t length,
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 1c9ea6ab3eb9..fcf1849aa47c 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -738,7 +738,8 @@
INTEL_DG2_G12_IDS(info)
#define INTEL_ATS_M150_IDS(info) \
- INTEL_VGA_DEVICE(0x56C0, info)
+ INTEL_VGA_DEVICE(0x56C0, info), \
+ INTEL_VGA_DEVICE(0x56C2, info)
#define INTEL_ATS_M75_IDS(info) \
INTEL_VGA_DEVICE(0x56C1, info)
diff --git a/include/drm/xe_pciids.h b/include/drm/xe_pciids.h
new file mode 100644
index 000000000000..de1a344737bc
--- /dev/null
+++ b/include/drm/xe_pciids.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_PCIIDS_H_
+#define _XE_PCIIDS_H_
+
+/*
+ * Lists below can be turned into initializers for a struct pci_device_id
+ * by defining INTEL_VGA_DEVICE:
+ *
+ * #define INTEL_VGA_DEVICE(id, info) { \
+ * 0x8086, id, \
+ * ~0, ~0, \
+ * 0x030000, 0xff0000, \
+ * (unsigned long) info }
+ *
+ * And then calling like:
+ *
+ * XE_TGL_12_GT1_IDS(INTEL_VGA_DEVICE, ## __VA_ARGS__)
+ *
+ * To turn them into something else, just provide a different macro passed as
+ * first argument.
+ */
+
+/* TGL */
+#define XE_TGL_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x9A60, ## __VA_ARGS__), \
+ MACRO__(0x9A68, ## __VA_ARGS__), \
+ MACRO__(0x9A70, ## __VA_ARGS__)
+
+#define XE_TGL_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x9A40, ## __VA_ARGS__), \
+ MACRO__(0x9A49, ## __VA_ARGS__), \
+ MACRO__(0x9A59, ## __VA_ARGS__), \
+ MACRO__(0x9A78, ## __VA_ARGS__), \
+ MACRO__(0x9AC0, ## __VA_ARGS__), \
+ MACRO__(0x9AC9, ## __VA_ARGS__), \
+ MACRO__(0x9AD9, ## __VA_ARGS__), \
+ MACRO__(0x9AF8, ## __VA_ARGS__)
+
+#define XE_TGL_IDS(MACRO__, ...) \
+ XE_TGL_GT1_IDS(MACRO__, ## __VA_ARGS__),\
+ XE_TGL_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+/* RKL */
+#define XE_RKL_IDS(MACRO__, ...) \
+ MACRO__(0x4C80, ## __VA_ARGS__), \
+ MACRO__(0x4C8A, ## __VA_ARGS__), \
+ MACRO__(0x4C8B, ## __VA_ARGS__), \
+ MACRO__(0x4C8C, ## __VA_ARGS__), \
+ MACRO__(0x4C90, ## __VA_ARGS__), \
+ MACRO__(0x4C9A, ## __VA_ARGS__)
+
+/* DG1 */
+#define XE_DG1_IDS(MACRO__, ...) \
+ MACRO__(0x4905, ## __VA_ARGS__), \
+ MACRO__(0x4906, ## __VA_ARGS__), \
+ MACRO__(0x4907, ## __VA_ARGS__), \
+ MACRO__(0x4908, ## __VA_ARGS__), \
+ MACRO__(0x4909, ## __VA_ARGS__)
+
+/* ADL-S */
+#define XE_ADLS_IDS(MACRO__, ...) \
+ MACRO__(0x4680, ## __VA_ARGS__), \
+ MACRO__(0x4682, ## __VA_ARGS__), \
+ MACRO__(0x4688, ## __VA_ARGS__), \
+ MACRO__(0x468A, ## __VA_ARGS__), \
+ MACRO__(0x468B, ## __VA_ARGS__), \
+ MACRO__(0x4690, ## __VA_ARGS__), \
+ MACRO__(0x4692, ## __VA_ARGS__), \
+ MACRO__(0x4693, ## __VA_ARGS__)
+
+/* ADL-P */
+#define XE_ADLP_IDS(MACRO__, ...) \
+ MACRO__(0x46A0, ## __VA_ARGS__), \
+ MACRO__(0x46A1, ## __VA_ARGS__), \
+ MACRO__(0x46A2, ## __VA_ARGS__), \
+ MACRO__(0x46A3, ## __VA_ARGS__), \
+ MACRO__(0x46A6, ## __VA_ARGS__), \
+ MACRO__(0x46A8, ## __VA_ARGS__), \
+ MACRO__(0x46AA, ## __VA_ARGS__), \
+ MACRO__(0x462A, ## __VA_ARGS__), \
+ MACRO__(0x4626, ## __VA_ARGS__), \
+ MACRO__(0x4628, ## __VA_ARGS__), \
+ MACRO__(0x46B0, ## __VA_ARGS__), \
+ MACRO__(0x46B1, ## __VA_ARGS__), \
+ MACRO__(0x46B2, ## __VA_ARGS__), \
+ MACRO__(0x46B3, ## __VA_ARGS__), \
+ MACRO__(0x46C0, ## __VA_ARGS__), \
+ MACRO__(0x46C1, ## __VA_ARGS__), \
+ MACRO__(0x46C2, ## __VA_ARGS__), \
+ MACRO__(0x46C3, ## __VA_ARGS__)
+
+/* ADL-N */
+#define XE_ADLN_IDS(MACRO__, ...) \
+ MACRO__(0x46D0, ## __VA_ARGS__), \
+ MACRO__(0x46D1, ## __VA_ARGS__), \
+ MACRO__(0x46D2, ## __VA_ARGS__)
+
+/* RPL-S */
+#define XE_RPLS_IDS(MACRO__, ...) \
+ MACRO__(0xA780, ## __VA_ARGS__), \
+ MACRO__(0xA781, ## __VA_ARGS__), \
+ MACRO__(0xA782, ## __VA_ARGS__), \
+ MACRO__(0xA783, ## __VA_ARGS__), \
+ MACRO__(0xA788, ## __VA_ARGS__), \
+ MACRO__(0xA789, ## __VA_ARGS__), \
+ MACRO__(0xA78A, ## __VA_ARGS__), \
+ MACRO__(0xA78B, ## __VA_ARGS__)
+
+/* RPL-U */
+#define XE_RPLU_IDS(MACRO__, ...) \
+ MACRO__(0xA721, ## __VA_ARGS__), \
+ MACRO__(0xA7A1, ## __VA_ARGS__), \
+ MACRO__(0xA7A9, ## __VA_ARGS__), \
+ MACRO__(0xA7AC, ## __VA_ARGS__), \
+ MACRO__(0xA7AD, ## __VA_ARGS__)
+
+/* RPL-P */
+#define XE_RPLP_IDS(MACRO__, ...) \
+ XE_RPLU_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0xA720, ## __VA_ARGS__), \
+ MACRO__(0xA7A0, ## __VA_ARGS__), \
+ MACRO__(0xA7A8, ## __VA_ARGS__), \
+ MACRO__(0xA7AA, ## __VA_ARGS__), \
+ MACRO__(0xA7AB, ## __VA_ARGS__)
+
+/* DG2 */
+#define XE_DG2_G10_IDS(MACRO__, ...) \
+ MACRO__(0x5690, ## __VA_ARGS__), \
+ MACRO__(0x5691, ## __VA_ARGS__), \
+ MACRO__(0x5692, ## __VA_ARGS__), \
+ MACRO__(0x56A0, ## __VA_ARGS__), \
+ MACRO__(0x56A1, ## __VA_ARGS__), \
+ MACRO__(0x56A2, ## __VA_ARGS__)
+
+#define XE_DG2_G11_IDS(MACRO__, ...) \
+ MACRO__(0x5693, ## __VA_ARGS__), \
+ MACRO__(0x5694, ## __VA_ARGS__), \
+ MACRO__(0x5695, ## __VA_ARGS__), \
+ MACRO__(0x56A5, ## __VA_ARGS__), \
+ MACRO__(0x56A6, ## __VA_ARGS__), \
+ MACRO__(0x56B0, ## __VA_ARGS__), \
+ MACRO__(0x56B1, ## __VA_ARGS__), \
+ MACRO__(0x56BA, ## __VA_ARGS__), \
+ MACRO__(0x56BB, ## __VA_ARGS__), \
+ MACRO__(0x56BC, ## __VA_ARGS__), \
+ MACRO__(0x56BD, ## __VA_ARGS__)
+
+#define XE_DG2_G12_IDS(MACRO__, ...) \
+ MACRO__(0x5696, ## __VA_ARGS__), \
+ MACRO__(0x5697, ## __VA_ARGS__), \
+ MACRO__(0x56A3, ## __VA_ARGS__), \
+ MACRO__(0x56A4, ## __VA_ARGS__), \
+ MACRO__(0x56B2, ## __VA_ARGS__), \
+ MACRO__(0x56B3, ## __VA_ARGS__)
+
+#define XE_DG2_IDS(MACRO__, ...) \
+ XE_DG2_G10_IDS(MACRO__, ## __VA_ARGS__),\
+ XE_DG2_G11_IDS(MACRO__, ## __VA_ARGS__),\
+ XE_DG2_G12_IDS(MACRO__, ## __VA_ARGS__)
+
+#define XE_ATS_M150_IDS(MACRO__, ...) \
+ MACRO__(0x56C0, ## __VA_ARGS__), \
+ MACRO__(0x56C2, ## __VA_ARGS__)
+
+#define XE_ATS_M75_IDS(MACRO__, ...) \
+ MACRO__(0x56C1, ## __VA_ARGS__)
+
+#define XE_ATS_M_IDS(MACRO__, ...) \
+ XE_ATS_M150_IDS(MACRO__, ## __VA_ARGS__),\
+ XE_ATS_M75_IDS(MACRO__, ## __VA_ARGS__)
+
+/* MTL / ARL */
+#define XE_MTL_IDS(MACRO__, ...) \
+ MACRO__(0x7D40, ## __VA_ARGS__), \
+ MACRO__(0x7D45, ## __VA_ARGS__), \
+ MACRO__(0x7D55, ## __VA_ARGS__), \
+ MACRO__(0x7D60, ## __VA_ARGS__), \
+ MACRO__(0x7D67, ## __VA_ARGS__), \
+ MACRO__(0x7DD5, ## __VA_ARGS__)
+
+#define XE_LNL_IDS(MACRO__, ...) \
+ MACRO__(0x6420, ## __VA_ARGS__), \
+ MACRO__(0x64A0, ## __VA_ARGS__), \
+ MACRO__(0x64B0, ## __VA_ARGS__)
+
+#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 54189e0e5f41..4db54e928b36 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -15,7 +15,6 @@
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/uuid.h>
-#include <linux/fw_table.h>
struct irq_domain;
struct irq_domain_ops;
@@ -25,22 +24,13 @@ struct irq_domain_ops;
#endif
#include <acpi/acpi.h>
-#ifdef CONFIG_ACPI_TABLE_LIB
-#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
-#define __init_or_acpilib
-#define __initdata_or_acpilib
-#else
-#define EXPORT_SYMBOL_ACPI_LIB(x)
-#define __init_or_acpilib __init
-#define __initdata_or_acpilib __initdata
-#endif
-
#ifdef CONFIG_ACPI
#include <linux/list.h>
#include <linux/dynamic_debug.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/fw_table.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -48,6 +38,16 @@ struct irq_domain_ops;
#include <acpi/acpi_io.h>
#include <asm/acpi.h>
+#ifdef CONFIG_ACPI_TABLE_LIB
+#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
+#define __init_or_acpilib
+#define __initdata_or_acpilib
+#else
+#define EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_acpilib __init
+#define __initdata_or_acpilib __initdata
+#endif
+
static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
{
return adev ? adev->handle : NULL;
diff --git a/include/linux/acpi_amd_wbrf.h b/include/linux/acpi_amd_wbrf.h
new file mode 100644
index 000000000000..898f31d536d4
--- /dev/null
+++ b/include/linux/acpi_amd_wbrf.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Wifi Band Exclusion Interface (AMD ACPI Implementation)
+ * Copyright (C) 2023 Advanced Micro Devices
+ */
+
+#ifndef _ACPI_AMD_WBRF_H
+#define _ACPI_AMD_WBRF_H
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+
+/* The maximum number of frequency band ranges */
+#define MAX_NUM_OF_WBRF_RANGES 11
+
+/* Record actions */
+#define WBRF_RECORD_ADD 0x0
+#define WBRF_RECORD_REMOVE 0x1
+
+/**
+ * struct freq_band_range - Wifi frequency band range definition
+ * @start: start frequency point (in Hz)
+ * @end: end frequency point (in Hz)
+ */
+struct freq_band_range {
+ u64 start;
+ u64 end;
+};
+
+/**
+ * struct wbrf_ranges_in_out - wbrf ranges info
+ * @num_of_ranges: total number of band ranges in this struct
+ * @band_list: array of Wifi band ranges
+ */
+struct wbrf_ranges_in_out {
+ u64 num_of_ranges;
+ struct freq_band_range band_list[MAX_NUM_OF_WBRF_RANGES];
+};
+
+/**
+ * enum wbrf_notifier_actions - wbrf notifier actions index
+ * @WBRF_CHANGED: there was some frequency band updates. The consumers
+ * should retrieve the latest active frequency bands.
+ */
+enum wbrf_notifier_actions {
+ WBRF_CHANGED,
+};
+
+#if IS_ENABLED(CONFIG_AMD_WBRF)
+bool acpi_amd_wbrf_supported_producer(struct device *dev);
+int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in);
+bool acpi_amd_wbrf_supported_consumer(struct device *dev);
+int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out);
+int amd_wbrf_register_notifier(struct notifier_block *nb);
+int amd_wbrf_unregister_notifier(struct notifier_block *nb);
+#else
+static inline
+bool acpi_amd_wbrf_supported_consumer(struct device *dev)
+{
+ return false;
+}
+
+static inline
+int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in)
+{
+ return -ENODEV;
+}
+
+static inline
+bool acpi_amd_wbrf_supported_producer(struct device *dev)
+{
+ return false;
+}
+static inline
+int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out)
+{
+ return -ENODEV;
+}
+static inline
+int amd_wbrf_register_notifier(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+static inline
+int amd_wbrf_unregister_notifier(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_AMD_WBRF */
+
+#endif /* _ACPI_AMD_WBRF_H */
diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
index 446394f84606..6ad02ad9c7b4 100644
--- a/include/linux/amd-pstate.h
+++ b/include/linux/amd-pstate.h
@@ -70,6 +70,10 @@ struct amd_cpudata {
u32 nominal_perf;
u32 lowest_nonlinear_perf;
u32 lowest_perf;
+ u32 min_limit_perf;
+ u32 max_limit_perf;
+ u32 min_limit_freq;
+ u32 max_limit_freq;
u32 max_freq;
u32 min_freq;
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index 1abedb5b2e48..3d0fde57ba90 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -209,6 +209,8 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; }
#define module_ffa_driver(__ffa_driver) \
module_driver(__ffa_driver, ffa_register, ffa_unregister)
+extern struct bus_type ffa_bus_type;
+
/* FFA transport related */
struct ffa_partition_info {
u16 id;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d5c5e59ddbd2..b29ebd53417d 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -49,9 +49,10 @@ struct block_device {
bool bd_write_holder;
bool bd_has_submit_bio;
dev_t bd_dev;
+ struct inode *bd_inode; /* will die */
+
atomic_t bd_openers;
spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
- struct inode * bd_inode; /* will die */
void * bd_claiming;
void * bd_holder;
const struct blk_holder_ops *bd_holder_ops;
@@ -69,6 +70,7 @@ struct block_device {
#ifdef CONFIG_FAIL_MAKE_REQUEST
bool bd_make_it_fail;
#endif
+ bool bd_ro_warned;
/*
* keep this out-of-line as it's both big and not needed in the fast
* path
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 6762dac3ef76..cff5bb08820e 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -3175,6 +3175,9 @@ enum bpf_text_poke_type {
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *addr1, void *addr2);
+void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
+ struct bpf_prog *new, struct bpf_prog *old);
+
void *bpf_arch_text_copy(void *dst, void *src, size_t len);
int bpf_arch_text_invalidate(void *dst, size_t len);
diff --git a/include/linux/closure.h b/include/linux/closure.h
index de7bb47d8a46..c554c6a08768 100644
--- a/include/linux/closure.h
+++ b/include/linux/closure.h
@@ -104,7 +104,7 @@
struct closure;
struct closure_syncer;
-typedef void (closure_fn) (struct closure *);
+typedef void (closure_fn) (struct work_struct *);
extern struct dentry *bcache_debug;
struct closure_waitlist {
@@ -254,7 +254,7 @@ static inline void closure_queue(struct closure *cl)
INIT_WORK(&cl->work, cl->work.func);
BUG_ON(!queue_work(wq, &cl->work));
} else
- cl->fn(cl);
+ cl->fn(&cl->work);
}
/**
@@ -309,6 +309,11 @@ static inline void closure_wake_up(struct closure_waitlist *list)
__closure_wake_up(list);
}
+#define CLOSURE_CALLBACK(name) void name(struct work_struct *ws)
+#define closure_type(name, type, member) \
+ struct closure *cl = container_of(ws, struct closure, work); \
+ type *name = container_of(cl, type, member)
+
/**
* continue_at - jump to another function with barrier
*
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index ea2d919fd9c7..c9c65b132c0f 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -171,6 +171,25 @@ ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos);
+/**
+ * struct debugfs_cancellation - cancellation data
+ * @list: internal, for keeping track
+ * @cancel: callback to call
+ * @cancel_data: extra data for the callback to call
+ */
+struct debugfs_cancellation {
+ struct list_head list;
+ void (*cancel)(struct dentry *, void *);
+ void *cancel_data;
+};
+
+void __acquires(cancellation)
+debugfs_enter_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation);
+void __releases(cancellation)
+debugfs_leave_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation);
+
#else
#include <linux/err.h>
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index ebe78bd3d121..b3772edca2e6 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -499,6 +499,21 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
}
/**
+ * dma_fence_is_later_or_same - return true if f1 is later or same as f2
+ * @f1: the first fence from the same context
+ * @f2: the second fence from the same context
+ *
+ * Returns true if f1 is chronologically later than f2 or the same fence. Both
+ * fences must be from the same context, since a seqno is not re-used across
+ * contexts.
+ */
+static inline bool dma_fence_is_later_or_same(struct dma_fence *f1,
+ struct dma_fence *f2)
+{
+ return f1 == f2 || dma_fence_is_later(f1, f2);
+}
+
+/**
* dma_fence_later - return the chronologically later fence
* @f1: the first fence from the same context
* @f2: the second fence from the same context
diff --git a/include/linux/fw_table.h b/include/linux/fw_table.h
index ff8fa58d5818..ca49947f0a77 100644
--- a/include/linux/fw_table.h
+++ b/include/linux/fw_table.h
@@ -25,9 +25,6 @@ struct acpi_subtable_proc {
int count;
};
-#include <linux/acpi.h>
-#include <acpi/acpi.h>
-
union acpi_subtable_headers {
struct acpi_subtable_header common;
struct acpi_hmat_structure hmat;
diff --git a/include/linux/habanalabs/cpucp_if.h b/include/linux/habanalabs/cpucp_if.h
index 86ea7c63a0d2..f316c8d0f3fc 100644
--- a/include/linux/habanalabs/cpucp_if.h
+++ b/include/linux/habanalabs/cpucp_if.h
@@ -659,6 +659,12 @@ enum pq_init_status {
* number (nonce) provided by the host to prevent replay attacks.
* public key and certificate also provided as part of the FW response.
*
+ * CPUCP_PACKET_INFO_SIGNED_GET -
+ * Get the device information signed by the Trusted Platform device.
+ * device info data is also hashed with some unique number (nonce) provided
+ * by the host to prevent replay attacks. public key and certificate also
+ * provided as part of the FW response.
+ *
* CPUCP_PACKET_MONITOR_DUMP_GET -
* Get monitors registers dump from the CpuCP kernel.
* The CPU will put the registers dump in the a buffer allocated by the driver
@@ -733,7 +739,7 @@ enum cpucp_packet_id {
CPUCP_PACKET_ENGINE_CORE_ASID_SET, /* internal */
CPUCP_PACKET_RESERVED2, /* not used */
CPUCP_PACKET_SEC_ATTEST_GET, /* internal */
- CPUCP_PACKET_RESERVED3, /* not used */
+ CPUCP_PACKET_INFO_SIGNED_GET, /* internal */
CPUCP_PACKET_RESERVED4, /* not used */
CPUCP_PACKET_MONITOR_DUMP_GET, /* debugfs */
CPUCP_PACKET_RESERVED5, /* not used */
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 4cacc0e43b51..be20cff4ba73 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -454,7 +454,7 @@ static inline void memcpy_from_folio(char *to, struct folio *folio,
memcpy(to, from, chunk);
kunmap_local(from);
- from += chunk;
+ to += chunk;
offset += chunk;
len -= chunk;
} while (len > 0);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d3acecc5db4b..236ec7b63c54 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -1268,10 +1268,7 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
}
-static inline bool __vma_private_lock(struct vm_area_struct *vma)
-{
- return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
-}
+bool __vma_private_lock(struct vm_area_struct *vma);
/*
* Safe version of huge_pte_offset() to check the locks. See comments
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 958771bac9c0..c2ac9e9e7ee9 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2830,12 +2830,14 @@ ieee80211_he_oper_size(const u8 *he_oper_ie)
static inline const struct ieee80211_he_6ghz_oper *
ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper)
{
- const u8 *ret = (const void *)&he_oper->optional;
+ const u8 *ret;
u32 he_oper_params;
if (!he_oper)
return NULL;
+ ret = (const void *)&he_oper->optional;
+
he_oper_params = le32_to_cpu(he_oper->he_oper_params);
if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO))
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index d3009d56af0b..805bb635cdf5 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -340,6 +340,9 @@ struct io_ring_ctx {
struct list_head io_buffers_cache;
+ /* deferred free list, protected by ->uring_lock */
+ struct hlist_head io_buf_list;
+
/* Keep this last, we don't need it for the fast path */
struct wait_queue_head poll_wq;
struct io_restriction restrictions;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index ec289c1016f5..6291aa7b079b 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -845,6 +845,7 @@ static inline void dev_iommu_priv_set(struct device *dev, void *priv)
dev->iommu->priv = priv;
}
+extern struct mutex iommu_probe_device_lock;
int iommu_probe_device(struct device *dev);
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index ab1da3142b06..0ff44d6633e3 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -139,7 +139,7 @@ static inline bool kprobe_ftrace(struct kprobe *p)
*
*/
struct kretprobe_holder {
- struct kretprobe *rp;
+ struct kretprobe __rcu *rp;
struct objpool_head pool;
};
@@ -197,10 +197,8 @@ extern int arch_trampoline_kprobe(struct kprobe *p);
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
{
- RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
- "Kretprobe is accessed from instance under preemptive context");
-
- return (struct kretprobe *)READ_ONCE(ri->node.rethook->data);
+ /* rethook::data is non-changed field, so that you can access it freely. */
+ return (struct kretprobe *)ri->node.rethook->data;
}
static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
{
@@ -245,10 +243,7 @@ unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
{
- RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
- "Kretprobe is accessed from instance under preemptive context");
-
- return READ_ONCE(ri->rph->rp);
+ return rcu_dereference_check(ri->rph->rp, rcu_read_lock_any_held());
}
static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 63e630276499..ab1c7deff118 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -114,6 +114,9 @@
/* Charging mode - 1=Barrel, 2=USB */
#define ASUS_WMI_DEVID_CHARGE_MODE 0x0012006C
+/* MCU powersave mode */
+#define ASUS_WMI_DEVID_MCU_POWERSAVE 0x001200E2
+
/* epu is connected? 1 == true */
#define ASUS_WMI_DEVID_EGPU_CONNECTED 0x00090018
/* egpu on/off */
diff --git a/include/linux/rethook.h b/include/linux/rethook.h
index ce69b2b7bc35..ba60962805f6 100644
--- a/include/linux/rethook.h
+++ b/include/linux/rethook.h
@@ -28,7 +28,12 @@ typedef void (*rethook_handler_t) (struct rethook_node *, void *, unsigned long,
*/
struct rethook {
void *data;
- rethook_handler_t handler;
+ /*
+ * To avoid sparse warnings, this uses a raw function pointer with
+ * __rcu, instead of rethook_handler_t. But this must be same as
+ * rethook_handler_t.
+ */
+ void (__rcu *handler) (struct rethook_node *, void *, unsigned long, struct pt_regs *);
struct objpool_head pool;
struct rcu_head rcu;
};
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index c1637515a8a4..c953b8c0d2f4 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -106,6 +106,7 @@ struct sk_psock {
struct mutex work_mutex;
struct sk_psock_work_state work_state;
struct delayed_work work;
+ struct sock *sk_pair;
struct rcu_work rwork;
};
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 0b4658a7eceb..dee5ad6e48c5 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -175,6 +175,7 @@ struct stmmac_fpe_cfg {
bool hs_enable; /* FPE handshake enable */
enum stmmac_fpe_state lp_fpe_state; /* Link Partner FPE state */
enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */
+ u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
};
struct stmmac_safety_feature_cfg {
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 68f3d315d2e1..b646b574b060 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -169,7 +169,7 @@ struct tcp_request_sock {
#ifdef CONFIG_TCP_AO
u8 ao_keyid;
u8 ao_rcv_next;
- u8 maclen;
+ bool used_tcp_ao;
#endif
};
@@ -180,14 +180,10 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
static inline bool tcp_rsk_used_ao(const struct request_sock *req)
{
- /* The real length of MAC is saved in the request socket,
- * signing anything with zero-length makes no sense, so here is
- * a little hack..
- */
#ifndef CONFIG_TCP_AO
return false;
#else
- return tcp_rsk(req)->maclen != 0;
+ return tcp_rsk(req)->used_tcp_ao;
#endif
}
diff --git a/include/linux/units.h b/include/linux/units.h
index ff1bd6b5f5b3..45110daaf8d3 100644
--- a/include/linux/units.h
+++ b/include/linux/units.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_UNITS_H
#define _LINUX_UNITS_H
+#include <linux/bits.h>
#include <linux/math.h>
/* Metric prefixes in accordance with Système international (d'unités) */
diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h
index 287e9d83fb8b..33a4c146dc19 100644
--- a/include/linux/usb/r8152.h
+++ b/include/linux/usb/r8152.h
@@ -30,6 +30,7 @@
#define VENDOR_ID_NVIDIA 0x0955
#define VENDOR_ID_TPLINK 0x2357
#define VENDOR_ID_DLINK 0x2001
+#define VENDOR_ID_ASUS 0x0b05
#if IS_REACHABLE(CONFIG_USB_RTL8152)
extern u8 rtl8152_get_version(struct usb_interface *intf);
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 454e9295970c..a65b2513f8cd 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -289,16 +289,12 @@ void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes,
/*
* External user API
*/
-#if IS_ENABLED(CONFIG_VFIO_GROUP)
struct iommu_group *vfio_file_iommu_group(struct file *file);
+
+#if IS_ENABLED(CONFIG_VFIO_GROUP)
bool vfio_file_is_group(struct file *file);
bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
#else
-static inline struct iommu_group *vfio_file_iommu_group(struct file *file)
-{
- return NULL;
-}
-
static inline bool vfio_file_is_group(struct file *file)
{
return false;
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 824c258143a3..49c4640027d8 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -75,6 +75,7 @@ struct unix_sock {
};
#define unix_sk(ptr) container_of_const(ptr, struct unix_sock, sk)
+#define unix_peer(sk) (unix_sk(sk)->peer)
#define peer_wait peer_wq.wait
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index b137a33a1b68..4ecfb06c413d 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -9299,4 +9299,50 @@ bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap,
*/
void cfg80211_links_removed(struct net_device *dev, u16 link_mask);
+#ifdef CONFIG_CFG80211_DEBUGFS
+/**
+ * wiphy_locked_debugfs_read - do a locked read in debugfs
+ * @wiphy: the wiphy to use
+ * @file: the file being read
+ * @buf: the buffer to fill and then read from
+ * @bufsize: size of the buffer
+ * @userbuf: the user buffer to copy to
+ * @count: read count
+ * @ppos: read position
+ * @handler: the read handler to call (under wiphy lock)
+ * @data: additional data to pass to the read handler
+ */
+ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t bufsize,
+ char __user *userbuf, size_t count,
+ loff_t *ppos,
+ ssize_t (*handler)(struct wiphy *wiphy,
+ struct file *file,
+ char *buf,
+ size_t bufsize,
+ void *data),
+ void *data);
+
+/**
+ * wiphy_locked_debugfs_write - do a locked write in debugfs
+ * @wiphy: the wiphy to use
+ * @file: the file being written to
+ * @buf: the buffer to copy the user data to
+ * @bufsize: size of the buffer
+ * @userbuf: the user buffer to copy from
+ * @count: read count
+ * @handler: the write handler to call (under wiphy lock)
+ * @data: additional data to pass to the write handler
+ */
+ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t bufsize,
+ const char __user *userbuf, size_t count,
+ ssize_t (*handler)(struct wiphy *wiphy,
+ struct file *file,
+ char *buf,
+ size_t count,
+ void *data),
+ void *data);
+#endif
+
#endif /* __NET_CFG80211_H */
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index e18a4c0d69ee..c53244f20437 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -12,10 +12,12 @@
* struct genl_multicast_group - generic netlink multicast group
* @name: name of the multicast group, names are per-family
* @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
+ * @cap_sys_admin: whether %CAP_SYS_ADMIN is required for binding
*/
struct genl_multicast_group {
char name[GENL_NAMSIZ];
u8 flags;
+ u8 cap_sys_admin:1;
};
struct genl_split_ops;
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 07022bb0d44d..0d28172193fa 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -162,7 +162,7 @@ struct neighbour {
struct rcu_head rcu;
struct net_device *dev;
netdevice_tracker dev_tracker;
- u8 primary_key[0];
+ u8 primary_key[];
} __randomize_layout;
struct neigh_ops {
diff --git a/include/net/tcp.h b/include/net/tcp.h
index d2f0736b76b8..144ba48bb07b 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1514,17 +1514,22 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}
-static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
+static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
{
int unused_mem = sk_unused_reserved_mem(sk);
struct tcp_sock *tp = tcp_sk(sk);
- tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
+ tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
if (unused_mem)
tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
tcp_win_from_space(sk, unused_mem));
}
+static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
+{
+ __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
+}
+
void tcp_cleanup_rbuf(struct sock *sk, int copied);
void __tcp_cleanup_rbuf(struct sock *sk, int copied);
diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
index b56be10838f0..647781080613 100644
--- a/include/net/tcp_ao.h
+++ b/include/net/tcp_ao.h
@@ -62,11 +62,17 @@ static inline int tcp_ao_maclen(const struct tcp_ao_key *key)
return key->maclen;
}
+/* Use tcp_ao_len_aligned() for TCP header calculations */
static inline int tcp_ao_len(const struct tcp_ao_key *key)
{
return tcp_ao_maclen(key) + sizeof(struct tcp_ao_hdr);
}
+static inline int tcp_ao_len_aligned(const struct tcp_ao_key *key)
+{
+ return round_up(tcp_ao_len(key), 4);
+}
+
static inline unsigned int tcp_ao_digest_size(struct tcp_ao_key *key)
{
return key->digest_size;
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 95896472a82b..565a85044541 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -77,6 +77,13 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
{
__rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
umem->sgt_append.sgt.nents, pgsz);
+ biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
+ biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
+}
+
+static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
+{
+ return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
}
/**
@@ -92,7 +99,7 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
*/
#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
- __rdma_block_iter_next(biter);)
+ __rdma_umem_block_iter_next(biter);)
#ifdef CONFIG_INFINIBAND_USER_MEM
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fb1a2d6b1969..b7b6b58dd348 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2850,6 +2850,7 @@ struct ib_block_iter {
/* internal states */
struct scatterlist *__sg; /* sg holding the current aligned block */
dma_addr_t __dma_addr; /* unaligned DMA address of this block */
+ size_t __sg_numblocks; /* ib_umem_num_dma_blocks() */
unsigned int __sg_nents; /* number of SG entries */
unsigned int __sg_advance; /* number of bytes to advance in sg in next step */
unsigned int __pg_bit; /* alignment of current block */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 10480eb582b2..5ec1e71a09de 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -167,19 +167,25 @@ struct scsi_device {
* power state for system suspend/resume (suspend to RAM and
* hibernation) operations.
*/
- bool manage_system_start_stop;
+ unsigned manage_system_start_stop:1;
/*
* If true, let the high-level device driver (sd) manage the device
* power state for runtime device suspand and resume operations.
*/
- bool manage_runtime_start_stop;
+ unsigned manage_runtime_start_stop:1;
/*
* If true, let the high-level device driver (sd) manage the device
* power state for system shutdown (power off) operations.
*/
- bool manage_shutdown;
+ unsigned manage_shutdown:1;
+
+ /*
+ * If set and if the device is runtime suspended, ask the high-level
+ * device driver (sd) to force a runtime resume of the device.
+ */
+ unsigned force_runtime_start_on_system_start:1;
unsigned removable:1;
unsigned changed:1; /* Data invalid due to media change */
diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h
index 043f8ac65dbf..68e053fe7340 100644
--- a/include/sound/cs35l41.h
+++ b/include/sound/cs35l41.h
@@ -906,6 +906,6 @@ int cs35l41_init_boost(struct device *dev, struct regmap *regmap,
bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type);
int cs35l41_mdsync_up(struct regmap *regmap);
int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type,
- int enable, bool firmware_running);
+ int enable, struct cs_dsp *dsp);
#endif /* __CS35L41_H */
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index aee3d40c96f0..7040e7ea80c7 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -846,6 +846,14 @@ struct drm_color_ctm {
__u64 matrix[9];
};
+struct drm_color_ctm_3x4 {
+ /*
+ * Conversion matrix with 3x4 dimensions in S31.32 sign-magnitude
+ * (not two's complement!) format.
+ */
+ __u64 matrix[12];
+};
+
struct drm_color_lut {
/*
* Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and
diff --git a/include/uapi/drm/habanalabs_accel.h b/include/uapi/drm/habanalabs_accel.h
index 347c7b62e60e..a512dc4cffd0 100644
--- a/include/uapi/drm/habanalabs_accel.h
+++ b/include/uapi/drm/habanalabs_accel.h
@@ -846,6 +846,7 @@ enum hl_server_type {
#define HL_INFO_HW_ERR_EVENT 36
#define HL_INFO_FW_ERR_EVENT 37
#define HL_INFO_USER_ENGINE_ERR_EVENT 38
+#define HL_INFO_DEV_SIGNED 40
#define HL_INFO_VERSION_MAX_LEN 128
#define HL_INFO_CARD_NAME_MAX_LEN 16
@@ -1256,6 +1257,7 @@ struct hl_info_dev_memalloc_page_sizes {
#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */
#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */
+#define SEC_DEV_INFO_BUF_SZ 5120
/*
* struct hl_info_sec_attest - attestation report of the boot
@@ -1290,6 +1292,32 @@ struct hl_info_sec_attest {
__u8 pad0[2];
};
+/*
+ * struct hl_info_signed - device information signed by a secured device.
+ * @nonce: number only used once. random number provided by host. this also passed to the quote
+ * command as a qualifying data.
+ * @pub_data_len: length of the public data (bytes)
+ * @certificate_len: length of the certificate (bytes)
+ * @info_sig_len: length of the attestation signature (bytes)
+ * @public_data: public key info signed info data (outPublic + name + qualifiedName)
+ * @certificate: certificate for the signing key
+ * @info_sig: signature of the info + nonce data.
+ * @dev_info_len: length of device info (bytes)
+ * @dev_info: device info as byte array.
+ */
+struct hl_info_signed {
+ __u32 nonce;
+ __u16 pub_data_len;
+ __u16 certificate_len;
+ __u8 info_sig_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+ __u8 info_sig[SEC_SIGNATURE_BUF_SZ];
+ __u16 dev_info_len;
+ __u8 dev_info[SEC_DEV_INFO_BUF_SZ];
+ __u8 pad[2];
+};
+
/**
* struct hl_page_fault_info - page fault information.
* @timestamp: timestamp of page fault.
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 6c34272a13fd..d8a6b3472760 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -86,6 +86,7 @@ struct drm_msm_timespec {
#define MSM_PARAM_CMDLINE 0x0d /* WO: override for task cmdline */
#define MSM_PARAM_VA_START 0x0e /* RO: start of valid GPU iova range */
#define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */
+#define MSM_PARAM_HIGHEST_BANK_BIT 0x10 /* RO */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #
@@ -139,6 +140,8 @@ struct drm_msm_gem_new {
#define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */
#define MSM_INFO_SET_IOVA 0x04 /* set the iova, passed by value */
#define MSM_INFO_GET_FLAGS 0x05 /* get the MSM_BO_x flags */
+#define MSM_INFO_SET_METADATA 0x06 /* set userspace metadata */
+#define MSM_INFO_GET_METADATA 0x07 /* get userspace metadata */
struct drm_msm_gem_info {
__u32 handle; /* in */
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
new file mode 100644
index 000000000000..9fa3ae324731
--- /dev/null
+++ b/include/uapi/drm/xe_drm.h
@@ -0,0 +1,1347 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _UAPI_XE_DRM_H_
+#define _UAPI_XE_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ * Sections in this file are organized as follows:
+ * 1. IOCTL definition
+ * 2. Extension definition and helper structs
+ * 3. IOCTL's Query structs in the order of the Query's entries.
+ * 4. The rest of IOCTL structs in the order of IOCTL declaration.
+ */
+
+/**
+ * DOC: Xe Device Block Diagram
+ *
+ * The diagram below represents a high-level simplification of a discrete
+ * GPU supported by the Xe driver. It shows some device components which
+ * are necessary to understand this API, as well as how their relations
+ * to each other. This diagram does not represent real hardware::
+ *
+ * ┌──────────────────────────────────────────────────────────────────┐
+ * │ ┌──────────────────────────────────────────────────┐ ┌─────────┐ │
+ * │ │ ┌───────────────────────┐ ┌─────┐ │ │ ┌─────┐ │ │
+ * │ │ │ VRAM0 ├───┤ ... │ │ │ │VRAM1│ │ │
+ * │ │ └───────────┬───────────┘ └─GT1─┘ │ │ └──┬──┘ │ │
+ * │ │ ┌──────────────────┴───────────────────────────┐ │ │ ┌──┴──┐ │ │
+ * │ │ │ ┌─────────────────────┐ ┌─────────────────┐ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │RCS0 │ │BCS0 │ │ │ │ │ │ │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VCS0 │ │VCS1 │ │ │ │ │ │ │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VECS0│ │VECS1│ │ │ │ │ │ ... │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │CCS0 │ │CCS1 │ │ │ │ │ │ │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ └─────────DSS─────────┘ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │CCS2 │ │CCS3 │ │ │ │ │ │ │ │ │
+ * │ │ │ ┌─────┐ ┌─────┐ ┌─────┐ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ... │ │ ... │ │ ... │ │ │ │ │ │ │ │ │ │
+ * │ │ │ └─DSS─┘ └─DSS─┘ └─DSS─┘ └─────Engines─────┘ │ │ │ │ │ │ │
+ * │ │ └───────────────────────────GT0────────────────┘ │ │ └─GT2─┘ │ │
+ * │ └────────────────────────────Tile0─────────────────┘ └─ Tile1──┘ │
+ * └─────────────────────────────Device0───────┬──────────────────────┘
+ * │
+ * ───────────────────────┴────────── PCI bus
+ */
+
+/**
+ * DOC: Xe uAPI Overview
+ *
+ * This section aims to describe the Xe's IOCTL entries, its structs, and other
+ * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related
+ * entries and usage.
+ *
+ * List of supported IOCTLs:
+ * - &DRM_IOCTL_XE_DEVICE_QUERY
+ * - &DRM_IOCTL_XE_GEM_CREATE
+ * - &DRM_IOCTL_XE_GEM_MMAP_OFFSET
+ * - &DRM_IOCTL_XE_VM_CREATE
+ * - &DRM_IOCTL_XE_VM_DESTROY
+ * - &DRM_IOCTL_XE_VM_BIND
+ * - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
+ * - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
+ * - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
+ * - &DRM_IOCTL_XE_EXEC
+ * - &DRM_IOCTL_XE_WAIT_USER_FENCE
+ */
+
+/*
+ * xe specific ioctls.
+ *
+ * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
+ * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
+ * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
+ */
+#define DRM_XE_DEVICE_QUERY 0x00
+#define DRM_XE_GEM_CREATE 0x01
+#define DRM_XE_GEM_MMAP_OFFSET 0x02
+#define DRM_XE_VM_CREATE 0x03
+#define DRM_XE_VM_DESTROY 0x04
+#define DRM_XE_VM_BIND 0x05
+#define DRM_XE_EXEC_QUEUE_CREATE 0x06
+#define DRM_XE_EXEC_QUEUE_DESTROY 0x07
+#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
+#define DRM_XE_EXEC 0x09
+#define DRM_XE_WAIT_USER_FENCE 0x0a
+/* Must be kept compact -- no holes */
+
+#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
+#define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
+#define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
+#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
+#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
+#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
+#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
+#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
+#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
+#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
+#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
+
+/**
+ * DOC: Xe IOCTL Extensions
+ *
+ * Before detailing the IOCTLs and its structs, it is important to highlight
+ * that every IOCTL in Xe is extensible.
+ *
+ * Many interfaces need to grow over time. In most cases we can simply
+ * extend the struct and have userspace pass in more data. Another option,
+ * as demonstrated by Vulkan's approach to providing extensions for forward
+ * and backward compatibility, is to use a list of optional structs to
+ * provide those extra details.
+ *
+ * The key advantage to using an extension chain is that it allows us to
+ * redefine the interface more easily than an ever growing struct of
+ * increasing complexity, and for large parts of that interface to be
+ * entirely optional. The downside is more pointer chasing; chasing across
+ * the __user boundary with pointers encapsulated inside u64.
+ *
+ * Example chaining:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_user_extension ext3 {
+ * .next_extension = 0, // end
+ * .name = ...,
+ * };
+ * struct drm_xe_user_extension ext2 {
+ * .next_extension = (uintptr_t)&ext3,
+ * .name = ...,
+ * };
+ * struct drm_xe_user_extension ext1 {
+ * .next_extension = (uintptr_t)&ext2,
+ * .name = ...,
+ * };
+ *
+ * Typically the struct drm_xe_user_extension would be embedded in some uAPI
+ * struct, and in this case we would feed it the head of the chain(i.e ext1),
+ * which would then apply all of the above extensions.
+*/
+
+/**
+ * struct drm_xe_user_extension - Base class for defining a chain of extensions
+ */
+struct drm_xe_user_extension {
+ /**
+ * @next_extension:
+ *
+ * Pointer to the next struct drm_xe_user_extension, or zero if the end.
+ */
+ __u64 next_extension;
+
+ /**
+ * @name: Name of the extension.
+ *
+ * Note that the name here is just some integer.
+ *
+ * Also note that the name space for this is not global for the whole
+ * driver, but rather its scope/meaning is limited to the specific piece
+ * of uAPI which has embedded the struct drm_xe_user_extension.
+ */
+ __u32 name;
+
+ /**
+ * @pad: MBZ
+ *
+ * All undefined bits must be zero.
+ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_xe_ext_set_property - Generic set property extension
+ *
+ * A generic struct that allows any of the Xe's IOCTL to be extended
+ * with a set_property operation.
+ */
+struct drm_xe_ext_set_property {
+ /** @base: base user extension */
+ struct drm_xe_user_extension base;
+
+ /** @property: property to set */
+ __u32 property;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @value: property value */
+ __u64 value;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_engine_class_instance - instance of an engine class
+ *
+ * It is returned as part of the @drm_xe_engine, but it also is used as
+ * the input of engine selection for both @drm_xe_exec_queue_create and
+ * @drm_xe_query_engine_cycles
+ *
+ * The @engine_class can be:
+ * - %DRM_XE_ENGINE_CLASS_RENDER
+ * - %DRM_XE_ENGINE_CLASS_COPY
+ * - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE
+ * - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE
+ * - %DRM_XE_ENGINE_CLASS_COMPUTE
+ * - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual
+ * hardware engine class). Used for creating ordered queues of VM
+ * bind operations.
+ */
+struct drm_xe_engine_class_instance {
+#define DRM_XE_ENGINE_CLASS_RENDER 0
+#define DRM_XE_ENGINE_CLASS_COPY 1
+#define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2
+#define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3
+#define DRM_XE_ENGINE_CLASS_COMPUTE 4
+#define DRM_XE_ENGINE_CLASS_VM_BIND 5
+ /** @engine_class: engine class id */
+ __u16 engine_class;
+ /** @engine_instance: engine instance id */
+ __u16 engine_instance;
+ /** @gt_id: Unique ID of this GT within the PCI Device */
+ __u16 gt_id;
+ /** @pad: MBZ */
+ __u16 pad;
+};
+
+/**
+ * struct drm_xe_engine - describe hardware engine
+ */
+struct drm_xe_engine {
+ /** @instance: The @drm_xe_engine_class_instance */
+ struct drm_xe_engine_class_instance instance;
+
+ /** @reserved: Reserved */
+ __u64 reserved[3];
+};
+
+/**
+ * struct drm_xe_query_engines - describe engines
+ *
+ * If a query is made with a struct @drm_xe_device_query where .query
+ * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of
+ * struct @drm_xe_query_engines in .data.
+ */
+struct drm_xe_query_engines {
+ /** @num_engines: number of engines returned in @engines */
+ __u32 num_engines;
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @engines: The returned engines for this device */
+ struct drm_xe_engine engines[];
+};
+
+/**
+ * enum drm_xe_memory_class - Supported memory classes.
+ */
+enum drm_xe_memory_class {
+ /** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
+ DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
+ /**
+ * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
+ * represents the memory that is local to the device, which we
+ * call VRAM. Not valid on integrated platforms.
+ */
+ DRM_XE_MEM_REGION_CLASS_VRAM
+};
+
+/**
+ * struct drm_xe_mem_region - Describes some region as known to
+ * the driver.
+ */
+struct drm_xe_mem_region {
+ /**
+ * @mem_class: The memory class describing this region.
+ *
+ * See enum drm_xe_memory_class for supported values.
+ */
+ __u16 mem_class;
+ /**
+ * @instance: The unique ID for this region, which serves as the
+ * index in the placement bitmask used as argument for
+ * &DRM_IOCTL_XE_GEM_CREATE
+ */
+ __u16 instance;
+ /**
+ * @min_page_size: Min page-size in bytes for this region.
+ *
+ * When the kernel allocates memory for this region, the
+ * underlying pages will be at least @min_page_size in size.
+ * Buffer objects with an allowable placement in this region must be
+ * created with a size aligned to this value.
+ * GPU virtual address mappings of (parts of) buffer objects that
+ * may be placed in this region must also have their GPU virtual
+ * address and range aligned to this value.
+ * Affected IOCTLS will return %-EINVAL if alignment restrictions are
+ * not met.
+ */
+ __u32 min_page_size;
+ /**
+ * @total_size: The usable size in bytes for this region.
+ */
+ __u64 total_size;
+ /**
+ * @used: Estimate of the memory used in bytes for this region.
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
+ * accounting. Without this the value here will always equal
+ * zero.
+ */
+ __u64 used;
+ /**
+ * @cpu_visible_size: How much of this region can be CPU
+ * accessed, in bytes.
+ *
+ * This will always be <= @total_size, and the remainder (if
+ * any) will not be CPU accessible. If the CPU accessible part
+ * is smaller than @total_size then this is referred to as a
+ * small BAR system.
+ *
+ * On systems without small BAR (full BAR), the probed_size will
+ * always equal the @total_size, since all of it will be CPU
+ * accessible.
+ *
+ * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM
+ * regions (for other types the value here will always equal
+ * zero).
+ */
+ __u64 cpu_visible_size;
+ /**
+ * @cpu_visible_used: Estimate of CPU visible memory used, in
+ * bytes.
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
+ * accounting. Without this the value here will always equal
+ * zero. Note this is only currently tracked for
+ * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value
+ * here will always be zero).
+ */
+ __u64 cpu_visible_used;
+ /** @reserved: Reserved */
+ __u64 reserved[6];
+};
+
+/**
+ * struct drm_xe_query_mem_regions - describe memory regions
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses
+ * struct drm_xe_query_mem_regions in .data.
+ */
+struct drm_xe_query_mem_regions {
+ /** @num_mem_regions: number of memory regions returned in @mem_regions */
+ __u32 num_mem_regions;
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @mem_regions: The returned memory regions for this device */
+ struct drm_xe_mem_region mem_regions[];
+};
+
+/**
+ * struct drm_xe_query_config - describe the device configuration
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_CONFIG, then the reply uses
+ * struct drm_xe_query_config in .data.
+ *
+ * The index in @info can be:
+ * - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits)
+ * and the device revision (next 8 bits)
+ * - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device
+ * configuration, see list below
+ *
+ * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
+ * has usable VRAM
+ * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
+ * required by this device, typically SZ_4K or SZ_64K
+ * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
+ * - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest
+ * available exec queue priority
+ */
+struct drm_xe_query_config {
+ /** @num_params: number of parameters returned in info */
+ __u32 num_params;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
+#define DRM_XE_QUERY_CONFIG_FLAGS 1
+ #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
+#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
+#define DRM_XE_QUERY_CONFIG_VA_BITS 3
+#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
+ /** @info: array of elements containing the config info */
+ __u64 info[];
+};
+
+/**
+ * struct drm_xe_gt - describe an individual GT.
+ *
+ * To be used with drm_xe_query_gt_list, which will return a list with all the
+ * existing GT individual descriptions.
+ * Graphics Technology (GT) is a subset of a GPU/tile that is responsible for
+ * implementing graphics and/or media operations.
+ *
+ * The index in @type can be:
+ * - %DRM_XE_QUERY_GT_TYPE_MAIN
+ * - %DRM_XE_QUERY_GT_TYPE_MEDIA
+ */
+struct drm_xe_gt {
+#define DRM_XE_QUERY_GT_TYPE_MAIN 0
+#define DRM_XE_QUERY_GT_TYPE_MEDIA 1
+ /** @type: GT type: Main or Media */
+ __u16 type;
+ /** @tile_id: Tile ID where this GT lives (Information only) */
+ __u16 tile_id;
+ /** @gt_id: Unique ID of this GT within the PCI Device */
+ __u16 gt_id;
+ /** @pad: MBZ */
+ __u16 pad[3];
+ /** @reference_clock: A clock frequency for timestamp */
+ __u32 reference_clock;
+ /**
+ * @near_mem_regions: Bit mask of instances from
+ * drm_xe_query_mem_regions that are nearest to the current engines
+ * of this GT.
+ * Each index in this mask refers directly to the struct
+ * drm_xe_query_mem_regions' instance, no assumptions should
+ * be made about order. The type of each region is described
+ * by struct drm_xe_query_mem_regions' mem_class.
+ */
+ __u64 near_mem_regions;
+ /**
+ * @far_mem_regions: Bit mask of instances from
+ * drm_xe_query_mem_regions that are far from the engines of this GT.
+ * In general, they have extra indirections when compared to the
+ * @near_mem_regions. For a discrete device this could mean system
+ * memory and memory living in a different tile.
+ * Each index in this mask refers directly to the struct
+ * drm_xe_query_mem_regions' instance, no assumptions should
+ * be made about order. The type of each region is described
+ * by struct drm_xe_query_mem_regions' mem_class.
+ */
+ __u64 far_mem_regions;
+ /** @reserved: Reserved */
+ __u64 reserved[8];
+};
+
+/**
+ * struct drm_xe_query_gt_list - A list with GT description items.
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_GT_LIST, then the reply uses struct
+ * drm_xe_query_gt_list in .data.
+ */
+struct drm_xe_query_gt_list {
+ /** @num_gt: number of GT items returned in gt_list */
+ __u32 num_gt;
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @gt_list: The GT list returned for this device */
+ struct drm_xe_gt gt_list[];
+};
+
+/**
+ * struct drm_xe_query_topology_mask - describe the topology mask of a GT
+ *
+ * This is the hardware topology which reflects the internal physical
+ * structure of the GPU.
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_GT_TOPOLOGY, then the reply uses
+ * struct drm_xe_query_topology_mask in .data.
+ *
+ * The @type can be:
+ * - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices
+ * (DSS) available for geometry operations. For example a query response
+ * containing the following in mask:
+ * ``DSS_GEOMETRY ff ff ff ff 00 00 00 00``
+ * means 32 DSS are available for geometry.
+ * - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices
+ * (DSS) available for compute operations. For example a query response
+ * containing the following in mask:
+ * ``DSS_COMPUTE ff ff ff ff 00 00 00 00``
+ * means 32 DSS are available for compute.
+ * - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU)
+ * available per Dual Sub Slices (DSS). For example a query response
+ * containing the following in mask:
+ * ``EU_PER_DSS ff ff 00 00 00 00 00 00``
+ * means each DSS has 16 EU.
+ */
+struct drm_xe_query_topology_mask {
+ /** @gt_id: GT ID the mask is associated with */
+ __u16 gt_id;
+
+#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0)
+#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1)
+#define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
+ /** @type: type of mask */
+ __u16 type;
+
+ /** @num_bytes: number of bytes in requested mask */
+ __u32 num_bytes;
+
+ /** @mask: little-endian mask of @num_bytes */
+ __u8 mask[];
+};
+
+/**
+ * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
+ *
+ * If a query is made with a struct drm_xe_device_query where .query is equal to
+ * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles
+ * in .data. struct drm_xe_query_engine_cycles is allocated by the user and
+ * .data points to this allocated structure.
+ *
+ * The query returns the engine cycles, which along with GT's @reference_clock,
+ * can be used to calculate the engine timestamp. In addition the
+ * query returns a set of cpu timestamps that indicate when the command
+ * streamer cycle count was captured.
+ */
+struct drm_xe_query_engine_cycles {
+ /**
+ * @eci: This is input by the user and is the engine for which command
+ * streamer cycles is queried.
+ */
+ struct drm_xe_engine_class_instance eci;
+
+ /**
+ * @clockid: This is input by the user and is the reference clock id for
+ * CPU timestamp. For definition, see clock_gettime(2) and
+ * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC,
+ * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI.
+ */
+ __s32 clockid;
+
+ /** @width: Width of the engine cycle counter in bits. */
+ __u32 width;
+
+ /**
+ * @engine_cycles: Engine cycles as read from its register
+ * at 0x358 offset.
+ */
+ __u64 engine_cycles;
+
+ /**
+ * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
+ * reading the engine_cycles register using the reference clockid set by the
+ * user.
+ */
+ __u64 cpu_timestamp;
+
+ /**
+ * @cpu_delta: Time delta in ns captured around reading the lower dword
+ * of the engine_cycles register.
+ */
+ __u64 cpu_delta;
+};
+
+/**
+ * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main
+ * structure to query device information
+ *
+ * The user selects the type of data to query among DRM_XE_DEVICE_QUERY_*
+ * and sets the value in the query member. This determines the type of
+ * the structure provided by the driver in data, among struct drm_xe_query_*.
+ *
+ * The @query can be:
+ * - %DRM_XE_DEVICE_QUERY_ENGINES
+ * - %DRM_XE_DEVICE_QUERY_MEM_REGIONS
+ * - %DRM_XE_DEVICE_QUERY_CONFIG
+ * - %DRM_XE_DEVICE_QUERY_GT_LIST
+ * - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware
+ * configuration of the device such as information on slices, memory,
+ * caches, and so on. It is provided as a table of key / value
+ * attributes.
+ * - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY
+ * - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES
+ *
+ * If size is set to 0, the driver fills it with the required size for
+ * the requested type of data to query. If size is equal to the required
+ * size, the queried information is copied into data. If size is set to
+ * a value different from 0 and different from the required size, the
+ * IOCTL call returns -EINVAL.
+ *
+ * For example the following code snippet allows retrieving and printing
+ * information about the device engines with DRM_XE_DEVICE_QUERY_ENGINES:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_query_engines *engines;
+ * struct drm_xe_device_query query = {
+ * .extensions = 0,
+ * .query = DRM_XE_DEVICE_QUERY_ENGINES,
+ * .size = 0,
+ * .data = 0,
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
+ * engines = malloc(query.size);
+ * query.data = (uintptr_t)engines;
+ * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
+ * for (int i = 0; i < engines->num_engines; i++) {
+ * printf("Engine %d: %s\n", i,
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_RENDER ? "RENDER":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_COPY ? "COPY":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE":
+ * "UNKNOWN");
+ * }
+ * free(engines);
+ */
+struct drm_xe_device_query {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+#define DRM_XE_DEVICE_QUERY_ENGINES 0
+#define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1
+#define DRM_XE_DEVICE_QUERY_CONFIG 2
+#define DRM_XE_DEVICE_QUERY_GT_LIST 3
+#define DRM_XE_DEVICE_QUERY_HWCONFIG 4
+#define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5
+#define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6
+ /** @query: The type of data to query */
+ __u32 query;
+
+ /** @size: Size of the queried data */
+ __u32 size;
+
+ /** @data: Queried data is placed here */
+ __u64 data;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for
+ * gem creation
+ *
+ * The @flags can be:
+ * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING
+ * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
+ * - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
+ * possible placement, ensure that the corresponding VRAM allocation
+ * will always use the CPU accessible part of VRAM. This is important
+ * for small-bar systems (on full-bar systems this gets turned into a
+ * noop).
+ * Note1: System memory can be used as an extra placement if the kernel
+ * should spill the allocation to system memory, if space can't be made
+ * available in the CPU accessible part of VRAM (giving the same
+ * behaviour as the i915 interface, see
+ * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS).
+ * Note2: For clear-color CCS surfaces the kernel needs to read the
+ * clear-color value stored in the buffer, and on discrete platforms we
+ * need to use VRAM for display surfaces, therefore the kernel requires
+ * setting this flag for such objects, otherwise an error is thrown on
+ * small-bar systems.
+ *
+ * @cpu_caching supports the following values:
+ * - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
+ * caching. On iGPU this can't be used for scanout surfaces. Currently
+ * not allowed for objects placed in VRAM.
+ * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This
+ * is uncached. Scanout surfaces should likely use this. All objects
+ * that can be placed in VRAM must use this.
+ */
+struct drm_xe_gem_create {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /**
+ * @size: Size of the object to be created, must match region
+ * (system or vram) minimum alignment (&min_page_size).
+ */
+ __u64 size;
+
+ /**
+ * @placement: A mask of memory instances of where BO can be placed.
+ * Each index in this mask refers directly to the struct
+ * drm_xe_query_mem_regions' instance, no assumptions should
+ * be made about order. The type of each region is described
+ * by struct drm_xe_query_mem_regions' mem_class.
+ */
+ __u32 placement;
+
+#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
+#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
+#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
+ /**
+ * @flags: Flags, currently a mask of memory instances of where BO can
+ * be placed
+ */
+ __u32 flags;
+
+ /**
+ * @vm_id: Attached VM, if any
+ *
+ * If a VM is specified, this BO must:
+ *
+ * 1. Only ever be bound to that VM.
+ * 2. Cannot be exported as a PRIME fd.
+ */
+ __u32 vm_id;
+
+ /**
+ * @handle: Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+
+#define DRM_XE_GEM_CPU_CACHING_WB 1
+#define DRM_XE_GEM_CPU_CACHING_WC 2
+ /**
+ * @cpu_caching: The CPU caching mode to select for this object. If
+ * mmaping the object the mode selected here will also be used.
+ */
+ __u16 cpu_caching;
+ /** @pad: MBZ */
+ __u16 pad[3];
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET
+ */
+struct drm_xe_gem_mmap_offset {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @handle: Handle for the object being mapped. */
+ __u32 handle;
+
+ /** @flags: Must be zero */
+ __u32 flags;
+
+ /** @offset: The fake offset to use for subsequent mmap call */
+ __u64 offset;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
+ *
+ * The @flags can be:
+ * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
+ * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
+ * exec submissions to its exec_queues that don't have an upper time
+ * limit on the job execution time. But exec submissions to these
+ * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
+ * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
+ * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
+ * LR VMs can be created in recoverable page-fault mode using
+ * DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
+ * If that flag is omitted, the UMD can not rely on the slightly
+ * different per-VM overcommit semantics that are enabled by
+ * DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may
+ * still enable recoverable pagefaults if supported by the device.
+ * - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also
+ * DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated on
+ * demand when accessed, and also allows per-VM overcommit of memory.
+ * The xe driver internally uses recoverable pagefaults to implement
+ * this.
+ */
+struct drm_xe_vm_create {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
+#define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1)
+#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2)
+ /** @flags: Flags */
+ __u32 flags;
+
+ /** @vm_id: Returned VM ID */
+ __u32 vm_id;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY
+ */
+struct drm_xe_vm_destroy {
+ /** @vm_id: VM ID */
+ __u32 vm_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_bind_op - run bind operations
+ *
+ * The @op can be:
+ * - %DRM_XE_VM_BIND_OP_MAP
+ * - %DRM_XE_VM_BIND_OP_UNMAP
+ * - %DRM_XE_VM_BIND_OP_MAP_USERPTR
+ * - %DRM_XE_VM_BIND_OP_UNMAP_ALL
+ * - %DRM_XE_VM_BIND_OP_PREFETCH
+ *
+ * and the @flags can be:
+ * - %DRM_XE_VM_BIND_FLAG_READONLY
+ * - %DRM_XE_VM_BIND_FLAG_ASYNC
+ * - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - Valid on a faulting VM only, do the
+ * MAP operation immediately rather than deferring the MAP to the page
+ * fault handler.
+ * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
+ * tables are setup with a special bit which indicates writes are
+ * dropped and all reads return zero. In the future, the NULL flags
+ * will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
+ * handle MBZ, and the BO offset MBZ. This flag is intended to
+ * implement VK sparse bindings.
+ */
+struct drm_xe_vm_bind_op {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /**
+ * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP
+ */
+ __u32 obj;
+
+ /**
+ * @pat_index: The platform defined @pat_index to use for this mapping.
+ * The index basically maps to some predefined memory attributes,
+ * including things like caching, coherency, compression etc. The exact
+ * meaning of the pat_index is platform specific and defined in the
+ * Bspec and PRMs. When the KMD sets up the binding the index here is
+ * encoded into the ppGTT PTE.
+ *
+ * For coherency the @pat_index needs to be at least 1way coherent when
+ * drm_xe_gem_create.cpu_caching is DRM_XE_GEM_CPU_CACHING_WB. The KMD
+ * will extract the coherency mode from the @pat_index and reject if
+ * there is a mismatch (see note below for pre-MTL platforms).
+ *
+ * Note: On pre-MTL platforms there is only a caching mode and no
+ * explicit coherency mode, but on such hardware there is always a
+ * shared-LLC (or is dgpu) so all GT memory accesses are coherent with
+ * CPU caches even with the caching mode set as uncached. It's only the
+ * display engine that is incoherent (on dgpu it must be in VRAM which
+ * is always mapped as WC on the CPU). However to keep the uapi somewhat
+ * consistent with newer platforms the KMD groups the different cache
+ * levels into the following coherency buckets on all pre-MTL platforms:
+ *
+ * ppGTT UC -> COH_NONE
+ * ppGTT WC -> COH_NONE
+ * ppGTT WT -> COH_NONE
+ * ppGTT WB -> COH_AT_LEAST_1WAY
+ *
+ * In practice UC/WC/WT should only ever used for scanout surfaces on
+ * such platforms (or perhaps in general for dma-buf if shared with
+ * another device) since it is only the display engine that is actually
+ * incoherent. Everything else should typically use WB given that we
+ * have a shared-LLC. On MTL+ this completely changes and the HW
+ * defines the coherency mode as part of the @pat_index, where
+ * incoherent GT access is possible.
+ *
+ * Note: For userptr and externally imported dma-buf the kernel expects
+ * either 1WAY or 2WAY for the @pat_index.
+ *
+ * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions
+ * on the @pat_index. For such mappings there is no actual memory being
+ * mapped (the address in the PTE is invalid), so the various PAT memory
+ * attributes likely do not apply. Simply leaving as zero is one
+ * option (still a valid pat_index).
+ */
+ __u16 pat_index;
+
+ /** @pad: MBZ */
+ __u16 pad;
+
+ union {
+ /**
+ * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE,
+ * ignored for unbind
+ */
+ __u64 obj_offset;
+
+ /** @userptr: user pointer to bind on */
+ __u64 userptr;
+ };
+
+ /**
+ * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL
+ */
+ __u64 range;
+
+ /** @addr: Address to operate on, MBZ for UNMAP_ALL */
+ __u64 addr;
+
+#define DRM_XE_VM_BIND_OP_MAP 0x0
+#define DRM_XE_VM_BIND_OP_UNMAP 0x1
+#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
+#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
+#define DRM_XE_VM_BIND_OP_PREFETCH 0x4
+ /** @op: Bind operation to perform */
+ __u32 op;
+
+#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
+#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
+#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
+ /** @flags: Bind flags */
+ __u32 flags;
+
+ /**
+ * @prefetch_mem_region_instance: Memory region to prefetch VMA to.
+ * It is a region instance, not a mask.
+ * To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation.
+ */
+ __u32 prefetch_mem_region_instance;
+
+ /** @pad2: MBZ */
+ __u32 pad2;
+
+ /** @reserved: Reserved */
+ __u64 reserved[3];
+};
+
+/**
+ * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND
+ *
+ * Below is an example of a minimal use of @drm_xe_vm_bind to
+ * asynchronously bind the buffer `data` at address `BIND_ADDRESS` to
+ * illustrate `userptr`. It can be synchronized by using the example
+ * provided for @drm_xe_sync.
+ *
+ * .. code-block:: C
+ *
+ * data = aligned_alloc(ALIGNMENT, BO_SIZE);
+ * struct drm_xe_vm_bind bind = {
+ * .vm_id = vm,
+ * .num_binds = 1,
+ * .bind.obj = 0,
+ * .bind.obj_offset = to_user_pointer(data),
+ * .bind.range = BO_SIZE,
+ * .bind.addr = BIND_ADDRESS,
+ * .bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR,
+ * .bind.flags = 0,
+ * .num_syncs = 1,
+ * .syncs = &sync,
+ * .exec_queue_id = 0,
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind);
+ *
+ */
+struct drm_xe_vm_bind {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @vm_id: The ID of the VM to bind to */
+ __u32 vm_id;
+
+ /**
+ * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
+ * and exec queue must have same vm_id. If zero, the default VM bind engine
+ * is used.
+ */
+ __u32 exec_queue_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @num_binds: number of binds in this IOCTL */
+ __u32 num_binds;
+
+ union {
+ /** @bind: used if num_binds == 1 */
+ struct drm_xe_vm_bind_op bind;
+
+ /**
+ * @vector_of_binds: userptr to array of struct
+ * drm_xe_vm_bind_op if num_binds > 1
+ */
+ __u64 vector_of_binds;
+ };
+
+ /** @pad2: MBZ */
+ __u32 pad2;
+
+ /** @num_syncs: amount of syncs to wait on */
+ __u32 num_syncs;
+
+ /** @syncs: pointer to struct drm_xe_sync array */
+ __u64 syncs;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
+ *
+ * The example below shows how to use @drm_xe_exec_queue_create to create
+ * a simple exec_queue (no parallel submission) of class
+ * &DRM_XE_ENGINE_CLASS_RENDER.
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_engine_class_instance instance = {
+ * .engine_class = DRM_XE_ENGINE_CLASS_RENDER,
+ * };
+ * struct drm_xe_exec_queue_create exec_queue_create = {
+ * .extensions = 0,
+ * .vm_id = vm,
+ * .num_bb_per_exec = 1,
+ * .num_eng_per_bb = 1,
+ * .instances = to_user_pointer(&instance),
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
+ *
+ */
+struct drm_xe_exec_queue_create {
+#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7
+/* Monitor 128KB contiguous region with 4K sub-granularity */
+#define DRM_XE_ACC_GRANULARITY_128K 0
+/* Monitor 2MB contiguous region with 64KB sub-granularity */
+#define DRM_XE_ACC_GRANULARITY_2M 1
+/* Monitor 16MB contiguous region with 512KB sub-granularity */
+#define DRM_XE_ACC_GRANULARITY_16M 2
+/* Monitor 64MB contiguous region with 2M sub-granularity */
+#define DRM_XE_ACC_GRANULARITY_64M 3
+
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @width: submission width (number BB per exec) for this exec queue */
+ __u16 width;
+
+ /** @num_placements: number of valid placements for this exec queue */
+ __u16 num_placements;
+
+ /** @vm_id: VM to use for this exec queue */
+ __u32 vm_id;
+
+ /** @flags: MBZ */
+ __u32 flags;
+
+ /** @exec_queue_id: Returned exec queue ID */
+ __u32 exec_queue_id;
+
+ /**
+ * @instances: user pointer to a 2-d array of struct
+ * drm_xe_engine_class_instance
+ *
+ * length = width (i) * num_placements (j)
+ * index = j + i * width
+ */
+ __u64 instances;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
+ */
+struct drm_xe_exec_queue_destroy {
+ /** @exec_queue_id: Exec queue ID */
+ __u32 exec_queue_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
+ *
+ * The @property can be:
+ * - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN
+ */
+struct drm_xe_exec_queue_get_property {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @exec_queue_id: Exec queue ID */
+ __u32 exec_queue_id;
+
+#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
+ /** @property: property to get */
+ __u32 property;
+
+ /** @value: property value */
+ __u64 value;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_sync - sync object
+ *
+ * The @type can be:
+ * - %DRM_XE_SYNC_TYPE_SYNCOBJ
+ * - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ
+ * - %DRM_XE_SYNC_TYPE_USER_FENCE
+ *
+ * and the @flags can be:
+ * - %DRM_XE_SYNC_FLAG_SIGNAL
+ *
+ * A minimal use of @drm_xe_sync looks like this:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_sync sync = {
+ * .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ * .type = DRM_XE_SYNC_TYPE_SYNCOBJ,
+ * };
+ * struct drm_syncobj_create syncobj_create = { 0 };
+ * ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &syncobj_create);
+ * sync.handle = syncobj_create.handle;
+ * ...
+ * use of &sync in drm_xe_exec or drm_xe_vm_bind
+ * ...
+ * struct drm_syncobj_wait wait = {
+ * .handles = &sync.handle,
+ * .timeout_nsec = INT64_MAX,
+ * .count_handles = 1,
+ * .flags = 0,
+ * .first_signaled = 0,
+ * .pad = 0,
+ * };
+ * ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
+ */
+struct drm_xe_sync {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+#define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0
+#define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1
+#define DRM_XE_SYNC_TYPE_USER_FENCE 0x2
+ /** @type: Type of the this sync object */
+ __u32 type;
+
+#define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0)
+ /** @flags: Sync Flags */
+ __u32 flags;
+
+ union {
+ /** @handle: Handle for the object */
+ __u32 handle;
+
+ /**
+ * @addr: Address of user fence. When sync is passed in via exec
+ * IOCTL this is a GPU address in the VM. When sync passed in via
+ * VM bind IOCTL this is a user pointer. In either case, it is
+ * the users responsibility that this address is present and
+ * mapped when the user fence is signalled. Must be qword
+ * aligned.
+ */
+ __u64 addr;
+ };
+
+ /**
+ * @timeline_value: Input for the timeline sync object. Needs to be
+ * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ.
+ */
+ __u64 timeline_value;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC
+ *
+ * This is an example to use @drm_xe_exec for execution of the object
+ * at BIND_ADDRESS (see example in @drm_xe_vm_bind) by an exec_queue
+ * (see example in @drm_xe_exec_queue_create). It can be synchronized
+ * by using the example provided for @drm_xe_sync.
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_exec exec = {
+ * .exec_queue_id = exec_queue,
+ * .syncs = &sync,
+ * .num_syncs = 1,
+ * .address = BIND_ADDRESS,
+ * .num_batch_buffer = 1,
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_EXEC, &exec);
+ *
+ */
+struct drm_xe_exec {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @exec_queue_id: Exec queue ID for the batch buffer */
+ __u32 exec_queue_id;
+
+ /** @num_syncs: Amount of struct drm_xe_sync in array. */
+ __u32 num_syncs;
+
+ /** @syncs: Pointer to struct drm_xe_sync array. */
+ __u64 syncs;
+
+ /**
+ * @address: address of batch buffer if num_batch_buffer == 1 or an
+ * array of batch buffer addresses
+ */
+ __u64 address;
+
+ /**
+ * @num_batch_buffer: number of batch buffer in this exec, must match
+ * the width of the engine
+ */
+ __u16 num_batch_buffer;
+
+ /** @pad: MBZ */
+ __u16 pad[3];
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE
+ *
+ * Wait on user fence, XE will wake-up on every HW engine interrupt in the
+ * instances list and check if user fence is complete::
+ *
+ * (*addr & MASK) OP (VALUE & MASK)
+ *
+ * Returns to user on user fence completion or timeout.
+ *
+ * The @op can be:
+ * - %DRM_XE_UFENCE_WAIT_OP_EQ
+ * - %DRM_XE_UFENCE_WAIT_OP_NEQ
+ * - %DRM_XE_UFENCE_WAIT_OP_GT
+ * - %DRM_XE_UFENCE_WAIT_OP_GTE
+ * - %DRM_XE_UFENCE_WAIT_OP_LT
+ * - %DRM_XE_UFENCE_WAIT_OP_LTE
+ *
+ * and the @flags can be:
+ * - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME
+ * - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP
+ *
+ * The @mask values can be for example:
+ * - 0xffu for u8
+ * - 0xffffu for u16
+ * - 0xffffffffu for u32
+ * - 0xffffffffffffffffu for u64
+ */
+struct drm_xe_wait_user_fence {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /**
+ * @addr: user pointer address to wait on, must qword aligned
+ */
+ __u64 addr;
+
+#define DRM_XE_UFENCE_WAIT_OP_EQ 0x0
+#define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1
+#define DRM_XE_UFENCE_WAIT_OP_GT 0x2
+#define DRM_XE_UFENCE_WAIT_OP_GTE 0x3
+#define DRM_XE_UFENCE_WAIT_OP_LT 0x4
+#define DRM_XE_UFENCE_WAIT_OP_LTE 0x5
+ /** @op: wait operation (type of comparison) */
+ __u16 op;
+
+#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0)
+ /** @flags: wait flags */
+ __u16 flags;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @value: compare value */
+ __u64 value;
+
+ /** @mask: comparison mask */
+ __u64 mask;
+
+ /**
+ * @timeout: how long to wait before bailing, value in nanoseconds.
+ * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout)
+ * it contains timeout expressed in nanoseconds to wait (fence will
+ * expire at now() + timeout).
+ * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
+ * will end at timeout (uses system MONOTONIC_CLOCK).
+ * Passing negative timeout leads to neverending wait.
+ *
+ * On relative timeout this value is updated with timeout left
+ * (for restarting the call in case of signal delivery).
+ * On absolute timeout this value stays intact (restarted call still
+ * expire at the same point of time).
+ */
+ __s64 timeout;
+
+ /** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */
+ __u32 exec_queue_id;
+
+ /** @pad2: MBZ */
+ __u32 pad2;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _UAPI_XE_DRM_H_ */
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index 5c6c4269f7ef..2ec6f35cda32 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -27,7 +27,7 @@
union { \
struct { MEMBERS } ATTRS; \
struct TAG { MEMBERS } ATTRS NAME; \
- }
+ } ATTRS
#ifdef __cplusplus
/* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index 4a195b68f28f..b383c2fe0cf3 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -239,7 +239,7 @@ struct v4l2_subdev_routing {
* set (which is the default), the 'stream' fields will be forced to 0 by the
* kernel.
*/
- #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1U << 0)
+ #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1ULL << 0)
/**
* struct v4l2_subdev_client_capability - Capabilities of the client accessing