summaryrefslogtreecommitdiff
path: root/include/drm
diff options
context:
space:
mode:
authorJani Nikula <jani.nikula@intel.com>2023-10-04 18:06:27 +0300
committerJani Nikula <jani.nikula@intel.com>2023-10-04 18:06:27 +0300
commit7824a88b4286980512de2a46763646100274a5ac (patch)
treefc10f5509fa12f85b72ba14c9bdf62641278f303 /include/drm
parent25591b66d0a4f9277241cebe1a74b4f985bc27a9 (diff)
parentcaacbdc28f545744770fb2caf347b3c4be9a6299 (diff)
downloadlinux-7824a88b4286980512de2a46763646100274a5ac.tar.xz
Merge drm/drm-next into drm-intel-next
Backmerge to sync up with drm-intel-gt-next and drm-misc-next. Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'include/drm')
-rw-r--r--include/drm/bridge/samsung-dsim.h1
-rw-r--r--include/drm/display/drm_dp_mst_helper.h23
-rw-r--r--include/drm/drm_accel.h9
-rw-r--r--include/drm/drm_atomic.h2
-rw-r--r--include/drm/drm_bridge.h6
-rw-r--r--include/drm/drm_buddy.h6
-rw-r--r--include/drm/drm_client.h2
-rw-r--r--include/drm/drm_connector.h3
-rw-r--r--include/drm/drm_debugfs.h13
-rw-r--r--include/drm/drm_device.h14
-rw-r--r--include/drm/drm_drv.h8
-rw-r--r--include/drm/drm_exec.h35
-rw-r--r--include/drm/drm_file.h17
-rw-r--r--include/drm/drm_gpuvm.h (renamed from include/drm/drm_gpuva_mgr.h)155
-rw-r--r--include/drm/drm_kunit_helpers.h4
-rw-r--r--include/drm/i915_pciids.h12
16 files changed, 181 insertions, 129 deletions
diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h
index 05100e91ecb9..6fc9bb2979e4 100644
--- a/include/drm/bridge/samsung-dsim.h
+++ b/include/drm/bridge/samsung-dsim.h
@@ -53,6 +53,7 @@ struct samsung_dsim_driver_data {
unsigned int plltmr_reg;
unsigned int has_freqband:1;
unsigned int has_clklane_stop:1;
+ unsigned int has_broken_fifoctrl_emptyhdr:1;
unsigned int num_clks;
unsigned int min_freq;
unsigned int max_freq;
diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
index ed5c9660563c..4429d3b1745b 100644
--- a/include/drm/display/drm_dp_mst_helper.h
+++ b/include/drm/display/drm_dp_mst_helper.h
@@ -46,6 +46,13 @@ struct drm_dp_mst_topology_ref_history {
};
#endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
+enum drm_dp_mst_payload_allocation {
+ DRM_DP_MST_PAYLOAD_ALLOCATION_NONE,
+ DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL,
+ DRM_DP_MST_PAYLOAD_ALLOCATION_DFP,
+ DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE,
+};
+
struct drm_dp_mst_branch;
/**
@@ -537,7 +544,7 @@ struct drm_dp_mst_atomic_payload {
* drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the
* previous MST states payload start slots have been copied over to the new state. Note
* that a new start slot won't be assigned/removed from this payload until
- * drm_dp_add_payload_part1()/drm_dp_remove_payload() have been called.
+ * drm_dp_add_payload_part1()/drm_dp_remove_payload_part2() have been called.
* * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to
* get committed to hardware by calling drm_crtc_commit_wait() on each of the
* &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps.
@@ -564,6 +571,9 @@ struct drm_dp_mst_atomic_payload {
/** @dsc_enabled: Whether or not this payload has DSC enabled */
bool dsc_enabled : 1;
+ /** @payload_allocation_status: The allocation status of this payload */
+ enum drm_dp_mst_payload_allocation payload_allocation_status;
+
/** @next: The list node for this payload */
struct list_head next;
};
@@ -842,10 +852,13 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
struct drm_atomic_state *state,
struct drm_dp_mst_atomic_payload *payload);
-void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_topology_state *mst_state,
- const struct drm_dp_mst_atomic_payload *old_payload,
- struct drm_dp_mst_atomic_payload *new_payload);
+void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_atomic_payload *payload);
+void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ const struct drm_dp_mst_atomic_payload *old_payload,
+ struct drm_dp_mst_atomic_payload *new_payload);
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
diff --git a/include/drm/drm_accel.h b/include/drm/drm_accel.h
index d4955062c77e..f4d3784b1dce 100644
--- a/include/drm/drm_accel.h
+++ b/include/drm/drm_accel.h
@@ -58,7 +58,8 @@ int accel_minor_alloc(void);
void accel_minor_replace(struct drm_minor *minor, int index);
void accel_set_device_instance_params(struct device *kdev, int index);
int accel_open(struct inode *inode, struct file *filp);
-void accel_debugfs_init(struct drm_minor *minor, int minor_id);
+void accel_debugfs_init(struct drm_device *dev);
+void accel_debugfs_register(struct drm_device *dev);
#else
@@ -89,7 +90,11 @@ static inline void accel_set_device_instance_params(struct device *kdev, int ind
{
}
-static inline void accel_debugfs_init(struct drm_minor *minor, int minor_id)
+static inline void accel_debugfs_init(struct drm_device *dev)
+{
+}
+
+static inline void accel_debugfs_register(struct drm_device *dev)
{
}
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 9a022caacf93..cf8e1220a4ac 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -1126,7 +1126,7 @@ struct drm_bridge_state {
struct drm_bus_cfg input_bus_cfg;
/**
- * @output_bus_cfg: input bus configuration
+ * @output_bus_cfg: output bus configuration
*/
struct drm_bus_cfg output_bus_cfg;
};
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index c339fc85fd07..cfb7dcdb66c4 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -32,6 +32,8 @@
#include <drm/drm_mode_object.h>
#include <drm/drm_modes.h>
+struct device_node;
+
struct drm_bridge;
struct drm_bridge_timings;
struct drm_connector;
@@ -716,10 +718,8 @@ struct drm_bridge {
struct drm_encoder *encoder;
/** @chain_node: used to form a bridge chain */
struct list_head chain_node;
-#ifdef CONFIG_OF
/** @of_node: device node pointer to the bridge */
struct device_node *of_node;
-#endif
/** @list: to keep track of all added bridges */
struct list_head list;
/**
@@ -950,6 +950,6 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
}
#endif
-void drm_bridge_debugfs_init(struct drm_minor *minor);
+void drm_bridge_debugfs_init(struct drm_device *dev);
#endif
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index 572077ff8ae7..a5b39fc01003 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -22,8 +22,9 @@
start__ >= max__ || size__ > max__ - start__; \
})
-#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0)
-#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1)
+#define DRM_BUDDY_RANGE_ALLOCATION BIT(0)
+#define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1)
+#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2)
struct drm_buddy_block {
#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
@@ -155,5 +156,4 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p);
void drm_buddy_block_print(struct drm_buddy *mm,
struct drm_buddy_block *block,
struct drm_printer *p);
-
#endif
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index c0a14b40c039..d47458ecdac4 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -195,6 +195,6 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
drm_for_each_connector_iter(connector, iter) \
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
-void drm_client_debugfs_init(struct drm_minor *minor);
+void drm_client_debugfs_init(struct drm_device *dev);
#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 40a5e7acf2fa..6302caa0bd51 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -498,6 +498,8 @@ enum drm_privacy_screen_status {
* ITU-R BT.601 colorimetry format
* The DP spec does not say whether this is the 525 or the 625
* line version.
+ * @DRM_MODE_COLORIMETRY_COUNT:
+ * Not a valid value; merely used four counting
*/
enum drm_colorspace {
/* For Default case, driver will set the colorspace */
@@ -522,7 +524,6 @@ enum drm_colorspace {
DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED = 13,
DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT = 14,
DRM_MODE_COLORIMETRY_BT601_YCC = 15,
- /* not a valid value; merely used for counting */
DRM_MODE_COLORIMETRY_COUNT
};
diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h
index cb2c1956a214..cf06cee4343f 100644
--- a/include/drm/drm_debugfs.h
+++ b/include/drm/drm_debugfs.h
@@ -35,7 +35,7 @@
#include <linux/types.h>
#include <linux/seq_file.h>
-#include <drm/drm_gpuva_mgr.h>
+#include <drm/drm_gpuvm.h>
/**
* DRM_DEBUGFS_GPUVA_INFO - &drm_info_list entry to dump a GPU VA space
@@ -142,8 +142,8 @@ struct drm_debugfs_entry {
void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
struct drm_minor *minor);
-int drm_debugfs_remove_files(const struct drm_info_list *files,
- int count, struct drm_minor *minor);
+int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
+ struct dentry *root, struct drm_minor *minor);
void drm_debugfs_add_file(struct drm_device *dev, const char *name,
int (*show)(struct seq_file*, void*), void *data);
@@ -152,7 +152,7 @@ void drm_debugfs_add_files(struct drm_device *dev,
const struct drm_debugfs_info *files, int count);
int drm_debugfs_gpuva_info(struct seq_file *m,
- struct drm_gpuva_manager *mgr);
+ struct drm_gpuvm *gpuvm);
#else
static inline void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
@@ -160,7 +160,8 @@ static inline void drm_debugfs_create_files(const struct drm_info_list *files,
{}
static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
- int count, struct drm_minor *minor)
+ int count, struct dentry *root,
+ struct drm_minor *minor)
{
return 0;
}
@@ -176,7 +177,7 @@ static inline void drm_debugfs_add_files(struct drm_device *dev,
{}
static inline int drm_debugfs_gpuva_info(struct seq_file *m,
- struct drm_gpuva_manager *mgr)
+ struct drm_gpuvm *gpuvm)
{
return 0;
}
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 7cf4afae2e79..c490977ee250 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -312,19 +312,11 @@ struct drm_device {
struct drm_fb_helper *fb_helper;
/**
- * @debugfs_mutex:
+ * @debugfs_root:
*
- * Protects &debugfs_list access.
+ * Root directory for debugfs files.
*/
- struct mutex debugfs_mutex;
-
- /**
- * @debugfs_list:
- *
- * List of debugfs files to be created by the DRM device. The files
- * must be added during drm_dev_register().
- */
- struct list_head debugfs_list;
+ struct dentry *debugfs_root;
/* Everything below here is for legacy driver, never use! */
/* private: */
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 9813fa759b75..e2640dc64e08 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -581,4 +581,12 @@ static inline bool drm_firmware_drivers_only(void)
return video_firmware_drivers_only();
}
+#if defined(CONFIG_DEBUG_FS)
+void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root);
+#else
+static inline void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root)
+{
+}
+#endif
+
#endif
diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h
index e0462361adf9..b5bf0b6da791 100644
--- a/include/drm/drm_exec.h
+++ b/include/drm/drm_exec.h
@@ -52,6 +52,20 @@ struct drm_exec {
};
/**
+ * drm_exec_obj() - Return the object for a give drm_exec index
+ * @exec: Pointer to the drm_exec context
+ * @index: The index.
+ *
+ * Return: Pointer to the locked object corresponding to @index if
+ * index is within the number of locked objects. NULL otherwise.
+ */
+static inline struct drm_gem_object *
+drm_exec_obj(struct drm_exec *exec, unsigned long index)
+{
+ return index < exec->num_objects ? exec->objects[index] : NULL;
+}
+
+/**
* drm_exec_for_each_locked_object - iterate over all the locked objects
* @exec: drm_exec object
* @index: unsigned long index for the iteration
@@ -59,10 +73,23 @@ struct drm_exec {
*
* Iterate over all the locked GEM objects inside the drm_exec object.
*/
-#define drm_exec_for_each_locked_object(exec, index, obj) \
- for (index = 0, obj = (exec)->objects[0]; \
- index < (exec)->num_objects; \
- ++index, obj = (exec)->objects[index])
+#define drm_exec_for_each_locked_object(exec, index, obj) \
+ for ((index) = 0; ((obj) = drm_exec_obj(exec, index)); ++(index))
+
+/**
+ * drm_exec_for_each_locked_object_reverse - iterate over all the locked
+ * objects in reverse locking order
+ * @exec: drm_exec object
+ * @index: unsigned long index for the iteration
+ * @obj: the current GEM object
+ *
+ * Iterate over all the locked GEM objects inside the drm_exec object in
+ * reverse locking order. Note that @index may go below zero and wrap,
+ * but that will be caught by drm_exec_obj(), returning a NULL object.
+ */
+#define drm_exec_for_each_locked_object_reverse(exec, index, obj) \
+ for ((index) = (exec)->num_objects - 1; \
+ ((obj) = drm_exec_obj(exec, index)); --(index))
/**
* drm_exec_until_all_locked - loop until all GEM objects are locked
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 010239392adf..e1b5b4282f75 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -79,10 +79,8 @@ struct drm_minor {
struct device *kdev; /* Linux device */
struct drm_device *dev;
+ struct dentry *debugfs_symlink;
struct dentry *debugfs_root;
-
- struct list_head debugfs_list;
- struct mutex debugfs_lock; /* Protects debugfs_list. */
};
/**
@@ -256,8 +254,15 @@ struct drm_file {
/** @master_lookup_lock: Serializes @master. */
spinlock_t master_lookup_lock;
- /** @pid: Process that opened this file. */
- struct pid *pid;
+ /**
+ * @pid: Process that is using this file.
+ *
+ * Must only be dereferenced under a rcu_read_lock or equivalent.
+ *
+ * Updates are guarded with dev->filelist_mutex and reference must be
+ * dropped after a RCU grace period to accommodate lockless readers.
+ */
+ struct pid __rcu *pid;
/** @client_id: A unique id for fdinfo */
u64 client_id;
@@ -420,6 +425,8 @@ static inline bool drm_is_accel_client(const struct drm_file *file_priv)
return file_priv->minor->type == DRM_MINOR_ACCEL;
}
+void drm_file_update_pid(struct drm_file *);
+
int drm_open(struct inode *inode, struct file *filp);
int drm_open_helper(struct file *filp, struct drm_minor *minor);
ssize_t drm_read(struct file *filp, char __user *buffer,
diff --git a/include/drm/drm_gpuva_mgr.h b/include/drm/drm_gpuvm.h
index ed8d50200cc3..c7ed6bf441d4 100644
--- a/include/drm/drm_gpuva_mgr.h
+++ b/include/drm/drm_gpuvm.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef __DRM_GPUVA_MGR_H__
-#define __DRM_GPUVA_MGR_H__
+#ifndef __DRM_GPUVM_H__
+#define __DRM_GPUVM_H__
/*
* Copyright (c) 2022 Red Hat.
@@ -31,8 +31,8 @@
#include <drm/drm_gem.h>
-struct drm_gpuva_manager;
-struct drm_gpuva_fn_ops;
+struct drm_gpuvm;
+struct drm_gpuvm_ops;
/**
* enum drm_gpuva_flags - flags for struct drm_gpuva
@@ -62,15 +62,15 @@ enum drm_gpuva_flags {
* struct drm_gpuva - structure to track a GPU VA mapping
*
* This structure represents a GPU VA mapping and is associated with a
- * &drm_gpuva_manager.
+ * &drm_gpuvm.
*
* Typically, this structure is embedded in bigger driver structures.
*/
struct drm_gpuva {
/**
- * @mgr: the &drm_gpuva_manager this object is associated with
+ * @vm: the &drm_gpuvm this object is associated with
*/
- struct drm_gpuva_manager *mgr;
+ struct drm_gpuvm *vm;
/**
* @flags: the &drm_gpuva_flags for this mapping
@@ -137,20 +137,18 @@ struct drm_gpuva {
} rb;
};
-int drm_gpuva_insert(struct drm_gpuva_manager *mgr, struct drm_gpuva *va);
+int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va);
void drm_gpuva_remove(struct drm_gpuva *va);
void drm_gpuva_link(struct drm_gpuva *va);
void drm_gpuva_unlink(struct drm_gpuva *va);
-struct drm_gpuva *drm_gpuva_find(struct drm_gpuva_manager *mgr,
+struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm,
u64 addr, u64 range);
-struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuva_manager *mgr,
+struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
u64 addr, u64 range);
-struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start);
-struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end);
-
-bool drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range);
+struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start);
+struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end);
static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
struct drm_gem_object *obj, u64 offset)
@@ -186,7 +184,7 @@ static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
}
/**
- * struct drm_gpuva_manager - DRM GPU VA Manager
+ * struct drm_gpuvm - DRM GPU VA Manager
*
* The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
* &maple_tree structures. Typically, this structure is embedded in bigger
@@ -197,7 +195,7 @@ static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
*
* There should be one manager instance per GPU virtual address space.
*/
-struct drm_gpuva_manager {
+struct drm_gpuvm {
/**
* @name: the name of the DRM GPU VA space
*/
@@ -237,100 +235,101 @@ struct drm_gpuva_manager {
struct drm_gpuva kernel_alloc_node;
/**
- * @ops: &drm_gpuva_fn_ops providing the split/merge steps to drivers
+ * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers
*/
- const struct drm_gpuva_fn_ops *ops;
+ const struct drm_gpuvm_ops *ops;
};
-void drm_gpuva_manager_init(struct drm_gpuva_manager *mgr,
- const char *name,
- u64 start_offset, u64 range,
- u64 reserve_offset, u64 reserve_range,
- const struct drm_gpuva_fn_ops *ops);
-void drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr);
+void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
+ u64 start_offset, u64 range,
+ u64 reserve_offset, u64 reserve_range,
+ const struct drm_gpuvm_ops *ops);
+void drm_gpuvm_destroy(struct drm_gpuvm *gpuvm);
+
+bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
static inline struct drm_gpuva *
__drm_gpuva_next(struct drm_gpuva *va)
{
- if (va && !list_is_last(&va->rb.entry, &va->mgr->rb.list))
+ if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list))
return list_next_entry(va, rb.entry);
return NULL;
}
/**
- * drm_gpuva_for_each_va_range() - iterate over a range of &drm_gpuvas
+ * drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas
* @va__: &drm_gpuva structure to assign to in each iteration step
- * @mgr__: &drm_gpuva_manager to walk over
+ * @gpuvm__: &drm_gpuvm to walk over
* @start__: starting offset, the first gpuva will overlap this
* @end__: ending offset, the last gpuva will start before this (but may
* overlap)
*
- * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie
+ * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
* between @start__ and @end__. It is implemented similarly to list_for_each(),
- * but is using the &drm_gpuva_manager's internal interval tree to accelerate
+ * but is using the &drm_gpuvm's internal interval tree to accelerate
* the search for the starting &drm_gpuva, and hence isn't safe against removal
* of elements. It assumes that @end__ is within (or is the upper limit of) the
- * &drm_gpuva_manager. This iterator does not skip over the &drm_gpuva_manager's
+ * &drm_gpuvm. This iterator does not skip over the &drm_gpuvm's
* @kernel_alloc_node.
*/
-#define drm_gpuva_for_each_va_range(va__, mgr__, start__, end__) \
- for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)); \
+#define drm_gpuvm_for_each_va_range(va__, gpuvm__, start__, end__) \
+ for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \
va__ && (va__->va.addr < (end__)); \
va__ = __drm_gpuva_next(va__))
/**
- * drm_gpuva_for_each_va_range_safe() - safely iterate over a range of
+ * drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of
* &drm_gpuvas
* @va__: &drm_gpuva to assign to in each iteration step
* @next__: another &drm_gpuva to use as temporary storage
- * @mgr__: &drm_gpuva_manager to walk over
+ * @gpuvm__: &drm_gpuvm to walk over
* @start__: starting offset, the first gpuva will overlap this
* @end__: ending offset, the last gpuva will start before this (but may
* overlap)
*
- * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie
+ * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
* between @start__ and @end__. It is implemented similarly to
- * list_for_each_safe(), but is using the &drm_gpuva_manager's internal interval
+ * list_for_each_safe(), but is using the &drm_gpuvm's internal interval
* tree to accelerate the search for the starting &drm_gpuva, and hence is safe
* against removal of elements. It assumes that @end__ is within (or is the
- * upper limit of) the &drm_gpuva_manager. This iterator does not skip over the
- * &drm_gpuva_manager's @kernel_alloc_node.
+ * upper limit of) the &drm_gpuvm. This iterator does not skip over the
+ * &drm_gpuvm's @kernel_alloc_node.
*/
-#define drm_gpuva_for_each_va_range_safe(va__, next__, mgr__, start__, end__) \
- for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)), \
+#define drm_gpuvm_for_each_va_range_safe(va__, next__, gpuvm__, start__, end__) \
+ for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \
next__ = __drm_gpuva_next(va__); \
va__ && (va__->va.addr < (end__)); \
va__ = next__, next__ = __drm_gpuva_next(va__))
/**
- * drm_gpuva_for_each_va() - iterate over all &drm_gpuvas
+ * drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas
* @va__: &drm_gpuva to assign to in each iteration step
- * @mgr__: &drm_gpuva_manager to walk over
+ * @gpuvm__: &drm_gpuvm to walk over
*
* This iterator walks over all &drm_gpuva structures associated with the given
- * &drm_gpuva_manager.
+ * &drm_gpuvm.
*/
-#define drm_gpuva_for_each_va(va__, mgr__) \
- list_for_each_entry(va__, &(mgr__)->rb.list, rb.entry)
+#define drm_gpuvm_for_each_va(va__, gpuvm__) \
+ list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry)
/**
- * drm_gpuva_for_each_va_safe() - safely iterate over all &drm_gpuvas
+ * drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas
* @va__: &drm_gpuva to assign to in each iteration step
* @next__: another &drm_gpuva to use as temporary storage
- * @mgr__: &drm_gpuva_manager to walk over
+ * @gpuvm__: &drm_gpuvm to walk over
*
* This iterator walks over all &drm_gpuva structures associated with the given
- * &drm_gpuva_manager. It is implemented with list_for_each_entry_safe(), and
+ * &drm_gpuvm. It is implemented with list_for_each_entry_safe(), and
* hence safe against the removal of elements.
*/
-#define drm_gpuva_for_each_va_safe(va__, next__, mgr__) \
- list_for_each_entry_safe(va__, next__, &(mgr__)->rb.list, rb.entry)
+#define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \
+ list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry)
/**
* enum drm_gpuva_op_type - GPU VA operation type
*
- * Operations to alter the GPU VA mappings tracked by the &drm_gpuva_manager.
+ * Operations to alter the GPU VA mappings tracked by the &drm_gpuvm.
*/
enum drm_gpuva_op_type {
/**
@@ -413,7 +412,7 @@ struct drm_gpuva_op_unmap {
*
* Optionally, if &keep is set, drivers may keep the actual page table
* mappings for this &drm_gpuva, adding the missing page table entries
- * only and update the &drm_gpuva_manager accordingly.
+ * only and update the &drm_gpuvm accordingly.
*/
bool keep;
};
@@ -584,22 +583,22 @@ struct drm_gpuva_ops {
#define drm_gpuva_next_op(op) list_next_entry(op, entry)
struct drm_gpuva_ops *
-drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr,
+drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
u64 addr, u64 range,
struct drm_gem_object *obj, u64 offset);
struct drm_gpuva_ops *
-drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr,
+drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
u64 addr, u64 range);
struct drm_gpuva_ops *
-drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr,
+drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
u64 addr, u64 range);
struct drm_gpuva_ops *
-drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr,
+drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm,
struct drm_gem_object *obj);
-void drm_gpuva_ops_free(struct drm_gpuva_manager *mgr,
+void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
struct drm_gpuva_ops *ops);
static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
@@ -610,15 +609,15 @@ static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
}
/**
- * struct drm_gpuva_fn_ops - callbacks for split/merge steps
+ * struct drm_gpuvm_ops - callbacks for split/merge steps
*
- * This structure defines the callbacks used by &drm_gpuva_sm_map and
- * &drm_gpuva_sm_unmap to provide the split/merge steps for map and unmap
+ * This structure defines the callbacks used by &drm_gpuvm_sm_map and
+ * &drm_gpuvm_sm_unmap to provide the split/merge steps for map and unmap
* operations to drivers.
*/
-struct drm_gpuva_fn_ops {
+struct drm_gpuvm_ops {
/**
- * @op_alloc: called when the &drm_gpuva_manager allocates
+ * @op_alloc: called when the &drm_gpuvm allocates
* a struct drm_gpuva_op
*
* Some drivers may want to embed struct drm_gpuva_op into driver
@@ -630,7 +629,7 @@ struct drm_gpuva_fn_ops {
struct drm_gpuva_op *(*op_alloc)(void);
/**
- * @op_free: called when the &drm_gpuva_manager frees a
+ * @op_free: called when the &drm_gpuvm frees a
* struct drm_gpuva_op
*
* Some drivers may want to embed struct drm_gpuva_op into driver
@@ -642,19 +641,19 @@ struct drm_gpuva_fn_ops {
void (*op_free)(struct drm_gpuva_op *op);
/**
- * @sm_step_map: called from &drm_gpuva_sm_map to finally insert the
+ * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the
* mapping once all previous steps were completed
*
* The &priv pointer matches the one the driver passed to
- * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
+ * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
*
- * Can be NULL if &drm_gpuva_sm_map is used.
+ * Can be NULL if &drm_gpuvm_sm_map is used.
*/
int (*sm_step_map)(struct drm_gpuva_op *op, void *priv);
/**
- * @sm_step_remap: called from &drm_gpuva_sm_map and
- * &drm_gpuva_sm_unmap to split up an existent mapping
+ * @sm_step_remap: called from &drm_gpuvm_sm_map and
+ * &drm_gpuvm_sm_unmap to split up an existent mapping
*
* This callback is called when existent mapping needs to be split up.
* This is the case when either a newly requested mapping overlaps or
@@ -662,38 +661,38 @@ struct drm_gpuva_fn_ops {
* mapping is requested.
*
* The &priv pointer matches the one the driver passed to
- * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
+ * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
*
- * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is
+ * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
* used.
*/
int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv);
/**
- * @sm_step_unmap: called from &drm_gpuva_sm_map and
- * &drm_gpuva_sm_unmap to unmap an existent mapping
+ * @sm_step_unmap: called from &drm_gpuvm_sm_map and
+ * &drm_gpuvm_sm_unmap to unmap an existent mapping
*
* This callback is called when existent mapping needs to be unmapped.
* This is the case when either a newly requested mapping encloses an
* existent mapping or an unmap of an existent mapping is requested.
*
* The &priv pointer matches the one the driver passed to
- * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
+ * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
*
- * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is
+ * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
* used.
*/
int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
};
-int drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv,
+int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
u64 addr, u64 range,
struct drm_gem_object *obj, u64 offset);
-int drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv,
+int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
u64 addr, u64 range);
-void drm_gpuva_map(struct drm_gpuva_manager *mgr,
+void drm_gpuva_map(struct drm_gpuvm *gpuvm,
struct drm_gpuva *va,
struct drm_gpuva_op_map *op);
@@ -703,4 +702,4 @@ void drm_gpuva_remap(struct drm_gpuva *prev,
void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
-#endif /* __DRM_GPUVA_MGR_H__ */
+#endif /* __DRM_GPUVM_H__ */
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
index 514c8a7a32f0..ba483c87f0e7 100644
--- a/include/drm/drm_kunit_helpers.h
+++ b/include/drm/drm_kunit_helpers.h
@@ -3,6 +3,8 @@
#ifndef DRM_KUNIT_HELPERS_H_
#define DRM_KUNIT_HELPERS_H_
+#include <linux/device.h>
+
#include <kunit/test.h>
struct drm_device;
@@ -51,7 +53,7 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test,
{
struct drm_driver *driver;
- driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL);
+ driver = devm_kzalloc(dev, sizeof(*driver), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, driver);
driver->driver_features = features;
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 21faa73db7ec..1256770d3827 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -742,18 +742,14 @@
#define INTEL_ATS_M_IDS(info) \
INTEL_ATS_M150_IDS(info), \
INTEL_ATS_M75_IDS(info)
+
/* MTL */
-#define INTEL_MTL_M_IDS(info) \
+#define INTEL_MTL_IDS(info) \
INTEL_VGA_DEVICE(0x7D40, info), \
- INTEL_VGA_DEVICE(0x7D60, info)
-
-#define INTEL_MTL_P_IDS(info) \
INTEL_VGA_DEVICE(0x7D45, info), \
INTEL_VGA_DEVICE(0x7D55, info), \
+ INTEL_VGA_DEVICE(0x7D60, info), \
+ INTEL_VGA_DEVICE(0x7D67, info), \
INTEL_VGA_DEVICE(0x7DD5, info)
-#define INTEL_MTL_IDS(info) \
- INTEL_MTL_M_IDS(info), \
- INTEL_MTL_P_IDS(info)
-
#endif /* _I915_PCIIDS_H */