From 0e26cc72c71cb98e951716a6596060cd04b0ba6b Mon Sep 17 00:00:00 2001 From: André Almeida Date: Wed, 22 Nov 2023 13:19:38 -0300 Subject: drm: Refuse to async flip with atomic prop changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Given that prop changes may lead to modesetting, which would defeat the fast path of the async flip, refuse any atomic prop change for async flips in atomic API. The only exception is the framebuffer ID to flip to. Currently the only plane type supported is the primary one. Signed-off-by: André Almeida Reviewed-by: Simon Ser Signed-off-by: Simon Ser Link: https://patchwork.freedesktop.org/patch/msgid/20231122161941.320564-2-andrealmeid@igalia.com --- drivers/gpu/drm/drm_atomic_uapi.c | 52 ++++++++++++++++++++++++++++++++++--- drivers/gpu/drm/drm_crtc_internal.h | 2 +- drivers/gpu/drm/drm_mode_object.c | 2 +- 3 files changed, 51 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 98d3b10c08ae..52494642b095 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -1006,13 +1006,28 @@ out: return ret; } +static int drm_atomic_check_prop_changes(int ret, uint64_t old_val, uint64_t prop_value, + struct drm_property *prop) +{ + if (ret != 0 || old_val != prop_value) { + drm_dbg_atomic(prop->dev, + "[PROP:%d:%s] No prop can be changed during async flip\n", + prop->base.id, prop->name); + return -EINVAL; + } + + return 0; +} + int drm_atomic_set_property(struct drm_atomic_state *state, struct drm_file *file_priv, struct drm_mode_object *obj, struct drm_property *prop, - uint64_t prop_value) + u64 prop_value, + bool async_flip) { struct drm_mode_object *ref; + u64 old_val; int ret; if (!drm_property_change_valid_get(prop, prop_value, &ref)) @@ -1029,6 +1044,13 @@ int drm_atomic_set_property(struct drm_atomic_state *state, break; } + if (async_flip) { + ret = drm_atomic_connector_get_property(connector, connector_state, + prop, &old_val); + ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); + break; + } + ret = drm_atomic_connector_set_property(connector, connector_state, file_priv, prop, prop_value); @@ -1044,6 +1066,13 @@ int drm_atomic_set_property(struct drm_atomic_state *state, break; } + if (async_flip) { + ret = drm_atomic_crtc_get_property(crtc, crtc_state, + prop, &old_val); + ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); + break; + } + ret = drm_atomic_crtc_set_property(crtc, crtc_state, prop, prop_value); break; @@ -1051,6 +1080,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state, case DRM_MODE_OBJECT_PLANE: { struct drm_plane *plane = obj_to_plane(obj); struct drm_plane_state *plane_state; + struct drm_mode_config *config = &plane->dev->mode_config; plane_state = drm_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) { @@ -1058,6 +1088,21 @@ int drm_atomic_set_property(struct drm_atomic_state *state, break; } + if (async_flip && prop != config->prop_fb_id) { + ret = drm_atomic_plane_get_property(plane, plane_state, + prop, &old_val); + ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); + break; + } + + if (async_flip && plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY) { + drm_dbg_atomic(prop->dev, + "[OBJECT:%d] Only primary planes can be changed during async flip\n", + obj->id); + ret = -EINVAL; + break; + } + ret = drm_atomic_plane_set_property(plane, plane_state, file_priv, prop, prop_value); @@ -1337,6 +1382,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, struct drm_out_fence_state *fence_state; int ret = 0; unsigned int i, j, num_fences; + bool async_flip = false; /* disallow for drivers not supporting atomic: */ if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) @@ -1450,8 +1496,8 @@ retry: goto out; } - ret = drm_atomic_set_property(state, file_priv, - obj, prop, prop_value); + ret = drm_atomic_set_property(state, file_priv, obj, + prop, prop_value, async_flip); if (ret) { drm_mode_object_put(obj); goto out; diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index 6b646e0783be..a514d5207e41 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -253,7 +253,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state, struct drm_file *file_priv, struct drm_mode_object *obj, struct drm_property *prop, - uint64_t prop_value); + u64 prop_value, bool async_flip); int drm_atomic_get_property(struct drm_mode_object *obj, struct drm_property *property, uint64_t *val); diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index ac0d2ce3f870..0e8355063eee 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c @@ -538,7 +538,7 @@ retry: obj_to_connector(obj), prop_value); } else { - ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value); + ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value, false); if (ret) goto out; ret = drm_atomic_commit(state); -- cgit v1.2.3 From 4b4af74ab9719d17538a97f43137e93296ec7437 Mon Sep 17 00:00:00 2001 From: Simon Ser Date: Wed, 22 Nov 2023 13:19:39 -0300 Subject: drm: allow DRM_MODE_PAGE_FLIP_ASYNC for atomic commits MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If the driver supports it, allow user-space to supply the DRM_MODE_PAGE_FLIP_ASYNC flag to request an async page-flip. Set drm_crtc_state.async_flip accordingly. Document that drivers will reject atomic commits if an async flip isn't possible. This allows user-space to fall back to something else. For instance, Xorg falls back to a blit. Another option is to wait as close to the next vblank as possible before performing the page-flip to reduce latency. Signed-off-by: Simon Ser Reviewed-by: Alex Deucher Co-developed-by: André Almeida Signed-off-by: André Almeida Link: https://patchwork.freedesktop.org/patch/msgid/20231122161941.320564-3-andrealmeid@igalia.com --- drivers/gpu/drm/drm_atomic_uapi.c | 25 ++++++++++++++++++++++--- include/uapi/drm/drm_mode.h | 9 +++++++++ 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 52494642b095..116407002269 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -1368,6 +1368,18 @@ static void complete_signaling(struct drm_device *dev, kfree(fence_state); } +static void +set_async_flip(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int i; + + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + crtc_state->async_flip = true; + } +} + int drm_mode_atomic_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -1409,9 +1421,13 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, } if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) { - drm_dbg_atomic(dev, - "commit failed: invalid flag DRM_MODE_PAGE_FLIP_ASYNC\n"); - return -EINVAL; + if (!dev->mode_config.async_page_flip) { + drm_dbg_atomic(dev, + "commit failed: DRM_MODE_PAGE_FLIP_ASYNC not supported\n"); + return -EINVAL; + } + + async_flip = true; } /* can't test and expect an event at the same time. */ @@ -1514,6 +1530,9 @@ retry: if (ret) goto out; + if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) + set_async_flip(state); + if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) { ret = drm_atomic_check_only(state); } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 09e7a471ee30..95630f170110 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -957,6 +957,15 @@ struct hdr_output_metadata { * Request that the page-flip is performed as soon as possible, ie. with no * delay due to waiting for vblank. This may cause tearing to be visible on * the screen. + * + * When used with atomic uAPI, the driver will return an error if the hardware + * doesn't support performing an asynchronous page-flip for this update. + * User-space should handle this, e.g. by falling back to a regular page-flip. + * + * Note, some hardware might need to perform one last synchronous page-flip + * before being able to switch to asynchronous page-flips. As an exception, + * the driver will return success even though that first page-flip is not + * asynchronous. */ #define DRM_MODE_PAGE_FLIP_ASYNC 0x02 #define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4 -- cgit v1.2.3 From e4d983acffff270ccee417445a69b9ed198658b1 Mon Sep 17 00:00:00 2001 From: Simon Ser Date: Wed, 22 Nov 2023 13:19:40 -0300 Subject: drm: introduce DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This new kernel capability indicates whether async page-flips are supported via the atomic uAPI. DRM clients can use it to check for support before feeding DRM_MODE_PAGE_FLIP_ASYNC to the kernel. Make it clear that DRM_CAP_ASYNC_PAGE_FLIP is for legacy uAPI only. Signed-off-by: Simon Ser Reviewed-by: André Almeida Reviewed-by: Alex Deucher Signed-off-by: André Almeida Signed-off-by: Simon Ser Link: https://patchwork.freedesktop.org/patch/msgid/20231122161941.320564-4-andrealmeid@igalia.com --- drivers/gpu/drm/drm_ioctl.c | 4 ++++ include/uapi/drm/drm.h | 10 +++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 44fda68c28ae..f461ed862480 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -301,6 +301,10 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_ case DRM_CAP_CRTC_IN_VBLANK_EVENT: req->value = 1; break; + case DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP: + req->value = drm_core_check_feature(dev, DRIVER_ATOMIC) && + dev->mode_config.async_page_flip; + break; default: return -EINVAL; } diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 8662b5aeea0c..796de831f4a0 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -713,7 +713,8 @@ struct drm_gem_open { /** * DRM_CAP_ASYNC_PAGE_FLIP * - * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC. + * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy + * page-flips. */ #define DRM_CAP_ASYNC_PAGE_FLIP 0x7 /** @@ -773,6 +774,13 @@ struct drm_gem_open { * :ref:`drm_sync_objects`. */ #define DRM_CAP_SYNCOBJ_TIMELINE 0x14 +/** + * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP + * + * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic + * commits. + */ +#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15 /* DRM_IOCTL_GET_CAP ioctl argument type */ struct drm_get_cap { -- cgit v1.2.3 From 4e3b70da64a53784683cfcbac2deda5d6e540407 Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Mon, 23 Oct 2023 09:46:05 +0200 Subject: drm: Disable the cursor plane on atomic contexts with virtualized drivers Cursor planes on virtualized drivers have special meaning and require that the clients handle them in specific ways, e.g. the cursor plane should react to the mouse movement the way a mouse cursor would be expected to and the client is required to set hotspot properties on it in order for the mouse events to be routed correctly. This breaks the contract as specified by the "universal planes". Fix it by disabling the cursor planes on virtualized drivers while adding a foundation on top of which it's possible to special case mouse cursor planes for clients that want it. Disabling the cursor planes makes some kms compositors which were broken, e.g. Weston, fallback to software cursor which works fine or at least better than currently while having no effect on others, e.g. gnome-shell or kwin, which put virtualized drivers on a deny-list when running in atomic context to make them fallback to legacy kms and avoid this issue. Signed-off-by: Zack Rusin Fixes: 681e7ec73044 ("drm: Allow userspace to ask for universal plane list (v2)") Cc: # v5.4+ Cc: Maarten Lankhorst Cc: Maxime Ripard Cc: Thomas Zimmermann Cc: David Airlie Cc: Daniel Vetter Cc: Dave Airlie Cc: Gerd Hoffmann Cc: Hans de Goede Cc: Gurchetan Singh Cc: Chia-I Wu Cc: dri-devel@lists.freedesktop.org Cc: virtualization@lists.linux-foundation.org Cc: spice-devel@lists.freedesktop.org Acked-by: Pekka Paalanen Reviewed-by: Javier Martinez Canillas Acked-by: Simon Ser Signed-off-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231023074613.41327-2-aesteve@redhat.com --- drivers/gpu/drm/drm_plane.c | 13 +++++++++++++ drivers/gpu/drm/qxl/qxl_drv.c | 2 +- drivers/gpu/drm/vboxvideo/vbox_drv.c | 2 +- drivers/gpu/drm/virtio/virtgpu_drv.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 2 +- include/drm/drm_drv.h | 9 +++++++++ include/drm/drm_file.h | 12 ++++++++++++ 7 files changed, 38 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 24e7998d1731..c6bbb0c209f4 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -678,6 +678,19 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data, !file_priv->universal_planes) continue; + /* + * If we're running on a virtualized driver then, + * unless userspace advertizes support for the + * virtualized cursor plane, disable cursor planes + * because they'll be broken due to missing cursor + * hotspot info. + */ + if (plane->type == DRM_PLANE_TYPE_CURSOR && + drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT) && + file_priv->atomic && + !file_priv->supports_virtualized_cursor_plane) + continue; + if (drm_lease_held(file_priv, plane->base.id)) { if (count < plane_resp->count_planes && put_user(plane->base.id, plane_ptr + count)) diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 46de4f171970..beee5563031a 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -285,7 +285,7 @@ static const struct drm_ioctl_desc qxl_ioctls[] = { }; static struct drm_driver qxl_driver = { - .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_CURSOR_HOTSPOT, .dumb_create = qxl_mode_dumb_create, .dumb_map_offset = drm_gem_ttm_dumb_map_offset, diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index 047b95812334..cd9e66a06596 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -182,7 +182,7 @@ DEFINE_DRM_GEM_FOPS(vbox_fops); static const struct drm_driver driver = { .driver_features = - DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, + DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_CURSOR_HOTSPOT, .fops = &vbox_fops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 4334c7608408..f8e9abe647b9 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -177,7 +177,7 @@ static const struct drm_driver driver = { * out via drm_device::driver_features: */ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC | - DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE, + DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE | DRIVER_CURSOR_HOTSPOT, .open = virtio_gpu_driver_open, .postclose = virtio_gpu_driver_postclose, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 8b24ecf60e3e..d3e308fdfd5b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1611,7 +1611,7 @@ static const struct file_operations vmwgfx_driver_fops = { static const struct drm_driver driver = { .driver_features = - DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM, + DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM | DRIVER_CURSOR_HOTSPOT, .ioctls = vmw_ioctls, .num_ioctls = ARRAY_SIZE(vmw_ioctls), .master_set = vmw_master_set, diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index e2640dc64e08..ea36aa79dca2 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -110,6 +110,15 @@ enum drm_driver_feature { * Driver supports user defined GPU VA bindings for GEM objects. */ DRIVER_GEM_GPUVA = BIT(8), + /** + * @DRIVER_CURSOR_HOTSPOT: + * + * Driver supports and requires cursor hotspot information in the + * cursor plane (e.g. cursor plane has to actually track the mouse + * cursor and the clients are required to set hotspot in order for + * the cursor planes to work correctly). + */ + DRIVER_CURSOR_HOTSPOT = BIT(9), /* IMPORTANT: Below are all the legacy flags, add new ones above. */ diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index e1b5b4282f75..8f35dcea82d3 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -226,6 +226,18 @@ struct drm_file { */ bool is_master; + /** + * @supports_virtualized_cursor_plane: + * + * This client is capable of handling the cursor plane with the + * restrictions imposed on it by the virtualized drivers. + * + * This implies that the cursor plane has to behave like a cursor + * i.e. track cursor movement. It also requires setting of the + * hotspot properties by the client on the cursor plane. + */ + bool supports_virtualized_cursor_plane; + /** * @master: * -- cgit v1.2.3 From 8f7179a1027d89bf949b0b80c388a544a5e096f2 Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Mon, 23 Oct 2023 09:46:06 +0200 Subject: drm/atomic: Add support for mouse hotspots Atomic modesetting code lacked support for specifying mouse cursor hotspots. The legacy kms DRM_IOCTL_MODE_CURSOR2 had support for setting the hotspot but the functionality was not implemented in the new atomic paths. Due to the lack of hotspots in the atomic paths userspace compositors completely disable atomic modesetting for drivers that require it (i.e. all paravirtualized drivers). This change adds hotspot properties to the atomic codepaths throughtout the DRM core and will allow enabling atomic modesetting for virtualized drivers in the userspace. Signed-off-by: Zack Rusin Cc: Maarten Lankhorst Cc: Maxime Ripard Cc: Thomas Zimmermann Cc: David Airlie Cc: Daniel Vetter Reviewed-by: Javier Martinez Canillas Signed-off-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231023074613.41327-3-aesteve@redhat.com --- drivers/gpu/drm/drm_atomic_state_helper.c | 14 +++++++++ drivers/gpu/drm/drm_atomic_uapi.c | 20 +++++++++++++ drivers/gpu/drm/drm_plane.c | 50 +++++++++++++++++++++++++++++++ include/drm/drm_plane.h | 14 +++++++++ 4 files changed, 98 insertions(+) diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index 784e63d70a42..54975de44a0e 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -275,6 +275,20 @@ void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state, plane_state->normalized_zpos = val; } } + + if (plane->hotspot_x_property) { + if (!drm_object_property_get_default_value(&plane->base, + plane->hotspot_x_property, + &val)) + plane_state->hotspot_x = val; + } + + if (plane->hotspot_y_property) { + if (!drm_object_property_get_default_value(&plane->base, + plane->hotspot_y_property, + &val)) + plane_state->hotspot_y = val; + } } EXPORT_SYMBOL(__drm_atomic_helper_plane_state_reset); diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 116407002269..aee4a65d4959 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -593,6 +593,22 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, } else if (plane->funcs->atomic_set_property) { return plane->funcs->atomic_set_property(plane, state, property, val); + } else if (property == plane->hotspot_x_property) { + if (plane->type != DRM_PLANE_TYPE_CURSOR) { + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] is not a cursor plane: 0x%llx\n", + plane->base.id, plane->name, val); + return -EINVAL; + } + state->hotspot_x = val; + } else if (property == plane->hotspot_y_property) { + if (plane->type != DRM_PLANE_TYPE_CURSOR) { + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] is not a cursor plane: 0x%llx\n", + plane->base.id, plane->name, val); + return -EINVAL; + } + state->hotspot_y = val; } else { drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] unknown property [PROP:%d:%s]\n", @@ -653,6 +669,10 @@ drm_atomic_plane_get_property(struct drm_plane *plane, *val = state->scaling_filter; } else if (plane->funcs->atomic_get_property) { return plane->funcs->atomic_get_property(plane, state, property, val); + } else if (property == plane->hotspot_x_property) { + *val = state->hotspot_x; + } else if (property == plane->hotspot_y_property) { + *val = state->hotspot_y; } else { drm_dbg_atomic(dev, "[PLANE:%d:%s] unknown property [PROP:%d:%s]\n", diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index c6bbb0c209f4..eaca367bdc7e 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -230,6 +230,47 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane return 0; } +/** + * drm_plane_create_hotspot_properties - creates the mouse hotspot + * properties and attaches them to the given cursor plane + * + * @plane: drm cursor plane + * + * This function enables the mouse hotspot property on a given + * cursor plane. + * + * RETURNS: + * Zero for success or -errno + */ +static int drm_plane_create_hotspot_properties(struct drm_plane *plane) +{ + struct drm_property *prop_x; + struct drm_property *prop_y; + + drm_WARN_ON(plane->dev, + !drm_core_check_feature(plane->dev, + DRIVER_CURSOR_HOTSPOT)); + + prop_x = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_X", + INT_MIN, INT_MAX); + if (IS_ERR(prop_x)) + return PTR_ERR(prop_x); + + prop_y = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_Y", + INT_MIN, INT_MAX); + if (IS_ERR(prop_y)) { + drm_property_destroy(plane->dev, prop_x); + return PTR_ERR(prop_y); + } + + drm_object_attach_property(&plane->base, prop_x, 0); + drm_object_attach_property(&plane->base, prop_y, 0); + plane->hotspot_x_property = prop_x; + plane->hotspot_y_property = prop_y; + + return 0; +} + __printf(9, 0) static int __drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, @@ -348,6 +389,10 @@ static int __drm_universal_plane_init(struct drm_device *dev, drm_object_attach_property(&plane->base, config->prop_src_w, 0); drm_object_attach_property(&plane->base, config->prop_src_h, 0); } + if (drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT) && + type == DRM_PLANE_TYPE_CURSOR) { + drm_plane_create_hotspot_properties(plane); + } if (format_modifier_count) create_in_format_blob(dev, plane); @@ -1067,6 +1112,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, fb->hot_x = req->hot_x; fb->hot_y = req->hot_y; + + if (plane->hotspot_x_property && plane->state) + plane->state->hotspot_x = req->hot_x; + if (plane->hotspot_y_property && plane->state) + plane->state->hotspot_y = req->hot_y; } else { fb = NULL; } diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 79d62856defb..e2c671585775 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -116,6 +116,10 @@ struct drm_plane_state { /** @src_h: height of visible portion of plane (in 16.16) */ uint32_t src_h, src_w; + /** @hotspot_x: x offset to mouse cursor hotspot */ + /** @hotspot_y: y offset to mouse cursor hotspot */ + int32_t hotspot_x, hotspot_y; + /** * @alpha: * Opacity of the plane with 0 as completely transparent and 0xffff as @@ -748,6 +752,16 @@ struct drm_plane { * scaling. */ struct drm_property *scaling_filter_property; + + /** + * @hotspot_x_property: property to set mouse hotspot x offset. + */ + struct drm_property *hotspot_x_property; + + /** + * @hotspot_y_property: property to set mouse hotspot y offset. + */ + struct drm_property *hotspot_y_property; }; #define obj_to_plane(x) container_of(x, struct drm_plane, base) -- cgit v1.2.3 From cd5499429237b7ba3f5bfd3efb488688886c82fe Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Mon, 23 Oct 2023 09:46:07 +0200 Subject: drm/vmwgfx: Use the hotspot properties from cursor planes Atomic modesetting got support for mouse hotspots via the hotspot properties. Port the legacy kms hotspot handling to the new properties on cursor planes. Signed-off-by: Zack Rusin Cc: Maaz Mombasawala Reviewed-by: Javier Martinez Canillas Reviewed-by: Martin Krastev Signed-off-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231023074613.41327-4-aesteve@redhat.com --- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 818b7f109f53..bea0abc3d418 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -768,13 +768,8 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state); s32 hotspot_x, hotspot_y; - hotspot_x = du->hotspot_x; - hotspot_y = du->hotspot_y; - - if (new_state->fb) { - hotspot_x += new_state->fb->hot_x; - hotspot_y += new_state->fb->hot_y; - } + hotspot_x = du->hotspot_x + new_state->hotspot_x; + hotspot_y = du->hotspot_y + new_state->hotspot_y; du->cursor_surface = vps->surf; du->cursor_bo = vps->bo; -- cgit v1.2.3 From 305b391d8f84a46119b5554a7a7af775266ce382 Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Mon, 23 Oct 2023 09:46:08 +0200 Subject: drm/qxl: Use the hotspot properties from cursor planes Atomic modesetting got support for mouse hotspots via the hotspot properties. Port the legacy kms hotspot handling to the new properties on cursor planes. Signed-off-by: Zack Rusin Reviewed-by: Gerd Hoffmann Cc: Dave Airlie Cc: Daniel Vetter Cc: virtualization@lists.linux-foundation.org Cc: spice-devel@lists.freedesktop.org Reviewed-by: Javier Martinez Canillas Signed-off-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231023074613.41327-5-aesteve@redhat.com --- drivers/gpu/drm/qxl/qxl_display.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 404b0483bb7c..c6d35c33d5d6 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -485,7 +485,6 @@ static int qxl_primary_atomic_check(struct drm_plane *plane, static int qxl_primary_apply_cursor(struct qxl_device *qdev, struct drm_plane_state *plane_state) { - struct drm_framebuffer *fb = plane_state->fb; struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc); struct qxl_cursor_cmd *cmd; struct qxl_release *release; @@ -510,8 +509,8 @@ static int qxl_primary_apply_cursor(struct qxl_device *qdev, cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_SET; - cmd->u.set.position.x = plane_state->crtc_x + fb->hot_x; - cmd->u.set.position.y = plane_state->crtc_y + fb->hot_y; + cmd->u.set.position.x = plane_state->crtc_x + plane_state->hotspot_x; + cmd->u.set.position.y = plane_state->crtc_y + plane_state->hotspot_y; cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0); @@ -531,7 +530,6 @@ out_free_release: static int qxl_primary_move_cursor(struct qxl_device *qdev, struct drm_plane_state *plane_state) { - struct drm_framebuffer *fb = plane_state->fb; struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc); struct qxl_cursor_cmd *cmd; struct qxl_release *release; @@ -554,8 +552,8 @@ static int qxl_primary_move_cursor(struct qxl_device *qdev, cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_MOVE; - cmd->u.position.x = plane_state->crtc_x + fb->hot_x; - cmd->u.position.y = plane_state->crtc_y + fb->hot_y; + cmd->u.position.x = plane_state->crtc_x + plane_state->hotspot_x; + cmd->u.position.y = plane_state->crtc_y + plane_state->hotspot_y; qxl_release_unmap(qdev, release, &cmd->release_info); qxl_release_fence_buffer_objects(release); @@ -851,8 +849,8 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane, struct qxl_bo *old_cursor_bo = qcrtc->cursor_bo; qcrtc->cursor_bo = qxl_create_cursor(qdev, user_bo, - new_state->fb->hot_x, - new_state->fb->hot_y); + new_state->hotspot_x, + new_state->hotspot_y); qxl_free_cursor(old_cursor_bo); } -- cgit v1.2.3 From 44d877a1de912fa24d1af8f76433a914e6816057 Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Mon, 23 Oct 2023 09:46:09 +0200 Subject: drm/vboxvideo: Use the hotspot properties from cursor planes Atomic modesetting got support for mouse hotspots via the hotspot properties. Port the legacy kms hotspot handling to the new properties on cursor planes. Signed-off-by: Zack Rusin Cc: Hans de Goede Cc: David Airlie Cc: Daniel Vetter Reviewed-by: Javier Martinez Canillas Signed-off-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231023074613.41327-6-aesteve@redhat.com --- drivers/gpu/drm/vboxvideo/vbox_mode.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c index 341edd982cb3..9ff3bade9795 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_mode.c +++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c @@ -429,8 +429,8 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA; hgsmi_update_pointer_shape(vbox->guest_pool, flags, - min_t(u32, max(fb->hot_x, 0), width), - min_t(u32, max(fb->hot_y, 0), height), + min_t(u32, max(new_state->hotspot_x, 0), width), + min_t(u32, max(new_state->hotspot_y, 0), height), width, height, vbox->cursor_data, data_size); mutex_unlock(&vbox->hw_mutex); -- cgit v1.2.3 From cc6c535967ed07fd75f54a26a70091826daf691e Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Mon, 23 Oct 2023 09:46:10 +0200 Subject: drm/virtio: Use the hotspot properties from cursor planes Atomic modesetting got support for mouse hotspots via the hotspot properties. Port the legacy kms hotspot handling to the new properties on cursor planes. Signed-off-by: Zack Rusin Reviewed-by: Gerd Hoffmann Cc: David Airlie Cc: Gurchetan Singh Cc: Chia-I Wu Cc: Daniel Vetter Cc: virtualization@lists.linux-foundation.org Reviewed-by: Javier Martinez Canillas Signed-off-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231023074613.41327-7-aesteve@redhat.com --- drivers/gpu/drm/virtio/virtgpu_plane.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index a2e045f3a000..20de599658c1 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -323,16 +323,16 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle, plane->state->crtc_x, plane->state->crtc_y, - plane->state->fb ? plane->state->fb->hot_x : 0, - plane->state->fb ? plane->state->fb->hot_y : 0); + plane->state->hotspot_x, + plane->state->hotspot_y); output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); output->cursor.resource_id = cpu_to_le32(handle); if (plane->state->fb) { output->cursor.hot_x = - cpu_to_le32(plane->state->fb->hot_x); + cpu_to_le32(plane->state->hotspot_x); output->cursor.hot_y = - cpu_to_le32(plane->state->fb->hot_y); + cpu_to_le32(plane->state->hotspot_y); } else { output->cursor.hot_x = cpu_to_le32(0); output->cursor.hot_y = cpu_to_le32(0); -- cgit v1.2.3 From bce3dab7eb6ee596388699e8a052a7d58954c472 Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Mon, 23 Oct 2023 09:46:11 +0200 Subject: drm: Remove legacy cursor hotspot code Atomic modesetting supports mouse cursor offsets via the hotspot properties that are created on cursor planes. All drivers which support hotspots are atomic and the legacy code has been implemented in terms of the atomic properties as well. Due to the above the lagacy cursor hotspot code is no longer used or needed and can be removed. Signed-off-by: Zack Rusin Cc: Maarten Lankhorst Cc: Maxime Ripard Cc: Thomas Zimmermann Cc: David Airlie Cc: Daniel Vetter Reviewed-by: Javier Martinez Canillas Signed-off-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231023074613.41327-8-aesteve@redhat.com --- drivers/gpu/drm/drm_plane.c | 3 --- include/drm/drm_framebuffer.h | 12 ------------ 2 files changed, 15 deletions(-) diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index eaca367bdc7e..1dc00ad4c33c 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -1110,9 +1110,6 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, return PTR_ERR(fb); } - fb->hot_x = req->hot_x; - fb->hot_y = req->hot_y; - if (plane->hotspot_x_property && plane->state) plane->state->hotspot_x = req->hot_x; if (plane->hotspot_y_property && plane->state) diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index 80ece7b6dd9b..668077009fce 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -188,18 +188,6 @@ struct drm_framebuffer { * DRM_MODE_FB_MODIFIERS. */ int flags; - /** - * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor - * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR - * universal plane. - */ - int hot_x; - /** - * @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor - * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR - * universal plane. - */ - int hot_y; /** * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock. */ -- cgit v1.2.3 From 9724ed6c1b1212d138e63f5e80647dc8b6b86696 Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Mon, 23 Oct 2023 09:46:12 +0200 Subject: drm: Introduce DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT Virtualized drivers place additional restrictions on the cursor plane which breaks the contract of universal planes. To allow atomic modesettings with virtualized drivers the clients need to advertise that they're capable of dealing with those extra restrictions. To do that introduce DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT which lets DRM know that the client is aware of and capable of dealing with the extra restrictions on the virtual cursor plane. Setting this option to true makes DRM expose the cursor plane on virtualized drivers. The userspace is expected to set the hotspots and handle mouse events on that plane. Signed-off-by: Zack Rusin Cc: Maarten Lankhorst Cc: Maxime Ripard Cc: Thomas Zimmermann Cc: David Airlie Cc: Daniel Vetter Cc: dri-devel@lists.freedesktop.org Acked-by: Pekka Paalanen Reviewed-by: Javier Martinez Canillas Signed-off-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231023074613.41327-9-aesteve@redhat.com --- drivers/gpu/drm/drm_ioctl.c | 9 +++++++++ include/uapi/drm/drm.h | 25 +++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index f461ed862480..ea303386e688 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -365,6 +365,15 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv) return -EINVAL; file_priv->writeback_connectors = req->value; break; + case DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT: + if (!drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT)) + return -EOPNOTSUPP; + if (!file_priv->atomic) + return -EINVAL; + if (req->value > 1) + return -EINVAL; + file_priv->supports_virtualized_cursor_plane = req->value; + break; default: return -EINVAL; } diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 796de831f4a0..91943e5ce010 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -850,6 +850,31 @@ struct drm_get_cap { */ #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5 +/** + * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT + * + * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and + * virtualbox) have additional restrictions for cursor planes (thus + * making cursor planes on those drivers not truly universal,) e.g. + * they need cursor planes to act like one would expect from a mouse + * cursor and have correctly set hotspot properties. + * If this client cap is not set the DRM core will hide cursor plane on + * those virtualized drivers because not setting it implies that the + * client is not capable of dealing with those extra restictions. + * Clients which do set cursor hotspot and treat the cursor plane + * like a mouse cursor should set this property. + * The client must enable &DRM_CLIENT_CAP_ATOMIC first. + * + * Setting this property on drivers which do not special case + * cursor planes (i.e. non-virtualized drivers) will return + * EOPNOTSUPP, which can be used by userspace to gauge + * requirements of the hardware/drivers they're running on. + * + * This capability is always supported for atomic-capable virtualized + * drivers starting from kernel version 6.6. + */ +#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6 + /* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ struct drm_set_client_cap { __u64 capability; -- cgit v1.2.3 From 4653f9d014117f78813cae7b022c15b899c77d7b Mon Sep 17 00:00:00 2001 From: Michael Banack Date: Mon, 23 Oct 2023 09:46:13 +0200 Subject: drm: Introduce documentation for hotspot properties To clarify the intent and reasoning behind the hotspot properties introduce userspace documentation that goes over cursor handling in para-virtualized environments. The documentation is generic enough to not special case for any specific hypervisor and should apply equally to all. Signed-off-by: Zack Rusin Acked-by: Pekka Paalanen Reviewed-by: Javier Martinez Canillas Signed-off-by: Michael Banack Signed-off-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231023074613.41327-10-aesteve@redhat.com --- Documentation/gpu/drm-kms.rst | 6 +++++ drivers/gpu/drm/drm_plane.c | 58 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 63 insertions(+), 1 deletion(-) diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst index 270d320407c7..4a112b8cc7fb 100644 --- a/Documentation/gpu/drm-kms.rst +++ b/Documentation/gpu/drm-kms.rst @@ -579,6 +579,12 @@ Variable Refresh Properties .. kernel-doc:: drivers/gpu/drm/drm_connector.c :doc: Variable refresh properties +Cursor Hotspot Properties +--------------------------- + +.. kernel-doc:: drivers/gpu/drm/drm_plane.c + :doc: hotspot properties + Existing KMS Properties ----------------------- diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 1dc00ad4c33c..f3f2eae83cca 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -230,6 +230,61 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane return 0; } +/** + * DOC: hotspot properties + * + * HOTSPOT_X: property to set mouse hotspot x offset. + * HOTSPOT_Y: property to set mouse hotspot y offset. + * + * When the plane is being used as a cursor image to display a mouse pointer, + * the "hotspot" is the offset within the cursor image where mouse events + * are expected to go. + * + * Positive values move the hotspot from the top-left corner of the cursor + * plane towards the right and bottom. + * + * Most display drivers do not need this information because the + * hotspot is not actually connected to anything visible on screen. + * However, this is necessary for display drivers like the para-virtualized + * drivers (eg qxl, vbox, virtio, vmwgfx), that are attached to a user console + * with a mouse pointer. Since these consoles are often being remoted over a + * network, they would otherwise have to wait to display the pointer movement to + * the user until a full network round-trip has occurred. New mouse events have + * to be sent from the user's console, over the network to the virtual input + * devices, forwarded to the desktop for processing, and then the cursor plane's + * position can be updated and sent back to the user's console over the network. + * Instead, with the hotspot information, the console can anticipate the new + * location, and draw the mouse cursor there before the confirmation comes in. + * To do that correctly, the user's console must be able predict how the + * desktop will process mouse events, which normally requires the desktop's + * mouse topology information, ie where each CRTC sits in the mouse coordinate + * space. This is typically sent to the para-virtualized drivers using some + * driver-specific method, and the driver then forwards it to the console by + * way of the virtual display device or hypervisor. + * + * The assumption is generally made that there is only one cursor plane being + * used this way at a time, and that the desktop is feeding all mouse devices + * into the same global pointer. Para-virtualized drivers that require this + * should only be exposing a single cursor plane, or find some other way + * to coordinate with a userspace desktop that supports multiple pointers. + * If the hotspot properties are set, the cursor plane is therefore assumed to be + * used only for displaying a mouse cursor image, and the position of the combined + * cursor plane + offset can therefore be used for coordinating with input from a + * mouse device. + * + * The cursor will then be drawn either at the location of the plane in the CRTC + * console, or as a free-floating cursor plane on the user's console + * corresponding to their desktop mouse position. + * + * DRM clients which would like to work correctly on drivers which expose + * hotspot properties should advertise DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT. + * Setting this property on drivers which do not special case + * cursor planes will return EOPNOTSUPP, which can be used by userspace to + * gauge requirements of the hardware/drivers they're running on. Advertising + * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT implies that the userspace client will be + * correctly setting the hotspot properties. + */ + /** * drm_plane_create_hotspot_properties - creates the mouse hotspot * properties and attaches them to the given cursor plane @@ -237,7 +292,8 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane * @plane: drm cursor plane * * This function enables the mouse hotspot property on a given - * cursor plane. + * cursor plane. Look at the documentation for hotspot properties + * to get a better understanding for what they're used for. * * RETURNS: * Zero for success or -errno -- cgit v1.2.3 From 35ed38d58257336c1df26b14fd5110b026e2adde Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Thu, 23 Nov 2023 23:13:00 +0100 Subject: drm: Allow drivers to indicate the damage helpers to ignore damage clips It allows drivers to set a struct drm_plane_state .ignore_damage_clips in their plane's .atomic_check callback, as an indication to damage helpers such as drm_atomic_helper_damage_iter_init() that the damage clips should be ignored. To be used by drivers that do per-buffer (e.g: virtio-gpu) uploads (rather than per-plane uploads), since these type of drivers need to handle buffer damages instead of frame damages. That way, these drivers could force a full plane update if the framebuffer attached to a plane's state has changed since the last update (page-flip). Fixes: 01f05940a9a7 ("drm/virtio: Enable fb damage clips property for the primary plane") Cc: # v6.4+ Reported-by: nerdopolis Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218115 Suggested-by: Thomas Zimmermann Signed-off-by: Javier Martinez Canillas Reviewed-by: Thomas Zimmermann Reviewed-by: Zack Rusin Acked-by: Sima Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20231123221315.3579454-2-javierm@redhat.com --- Documentation/gpu/drm-kms.rst | 2 ++ drivers/gpu/drm/drm_damage_helper.c | 3 ++- include/drm/drm_plane.h | 10 ++++++++++ 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst index 4a112b8cc7fb..13d3627d8bc0 100644 --- a/Documentation/gpu/drm-kms.rst +++ b/Documentation/gpu/drm-kms.rst @@ -548,6 +548,8 @@ Plane Composition Properties .. kernel-doc:: drivers/gpu/drm/drm_blend.c :doc: overview +.. _damage_tracking_properties: + Damage Tracking Properties -------------------------- diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c index d8b2955e88fd..afb02aae707b 100644 --- a/drivers/gpu/drm/drm_damage_helper.c +++ b/drivers/gpu/drm/drm_damage_helper.c @@ -241,7 +241,8 @@ drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter, iter->plane_src.x2 = (src.x2 >> 16) + !!(src.x2 & 0xFFFF); iter->plane_src.y2 = (src.y2 >> 16) + !!(src.y2 & 0xFFFF); - if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) { + if (!iter->clips || state->ignore_damage_clips || + !drm_rect_equals(&state->src, &old_state->src)) { iter->clips = NULL; iter->num_clips = 0; iter->full_update = true; diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index e2c671585775..c6565a6f9324 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -194,6 +194,16 @@ struct drm_plane_state { */ struct drm_property_blob *fb_damage_clips; + /** + * @ignore_damage_clips: + * + * Set by drivers to indicate the drm_atomic_helper_damage_iter_init() + * helper that the @fb_damage_clips blob property should be ignored. + * + * See :ref:`damage_tracking_properties` for more information. + */ + bool ignore_damage_clips; + /** * @src: * -- cgit v1.2.3 From 0240db231dfe5ee5b7a3a03cba96f0844b7a673d Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Thu, 23 Nov 2023 23:13:01 +0100 Subject: drm/virtio: Disable damage clipping if FB changed since last page-flip The driver does per-buffer uploads and needs to force a full plane update if the plane's attached framebuffer has change since the last page-flip. Fixes: 01f05940a9a7 ("drm/virtio: Enable fb damage clips property for the primary plane") Cc: # v6.4+ Reported-by: nerdopolis Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218115 Suggested-by: Sima Vetter Signed-off-by: Javier Martinez Canillas Reviewed-by: Thomas Zimmermann Reviewed-by: Zack Rusin Acked-by: Sima Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20231123221315.3579454-3-javierm@redhat.com --- drivers/gpu/drm/virtio/virtgpu_plane.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 20de599658c1..a72a2dbda031 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -79,6 +79,8 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, + plane); bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; struct drm_crtc_state *crtc_state; int ret; @@ -86,6 +88,14 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) return 0; + /* + * Ignore damage clips if the framebuffer attached to the plane's state + * has changed since the last plane update (page-flip). In this case, a + * full plane update should happen because uploads are done per-buffer. + */ + if (old_plane_state->fb != new_plane_state->fb) + new_plane_state->ignore_damage_clips = true; + crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc); if (IS_ERR(crtc_state)) -- cgit v1.2.3 From b83b2a80d662cc8ba9d78db64fb70fbb5a481d9c Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Thu, 23 Nov 2023 23:13:02 +0100 Subject: drm/vmwgfx: Disable damage clipping if FB changed since last page-flip The driver does per-buffer uploads and needs to force a full plane update if the plane's attached framebuffer has change since the last page-flip. Suggested-by: Sima Vetter Signed-off-by: Javier Martinez Canillas Reviewed-by: Thomas Zimmermann Reviewed-by: Zack Rusin Acked-by: Sima Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20231123221315.3579454-4-javierm@redhat.com --- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index bea0abc3d418..5fd0ccaa0b41 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -832,10 +832,21 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, { struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct drm_crtc_state *crtc_state = NULL; struct drm_framebuffer *new_fb = new_state->fb; + struct drm_framebuffer *old_fb = old_state->fb; int ret; + /* + * Ignore damage clips if the framebuffer attached to the plane's state + * has changed since the last plane update (page-flip). In this case, a + * full plane update should happen because uploads are done per-buffer. + */ + if (old_fb != new_fb) + new_state->ignore_damage_clips = true; + if (new_state->crtc) crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc); -- cgit v1.2.3 From 017bdf8fa20175b9cccbc746122256432a599845 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Thu, 23 Nov 2023 23:13:03 +0100 Subject: drm/plane: Extend damage tracking kernel-doc The "Damage Tracking Properties" section in the documentation doesn't have info about the two type of damage handling: frame damage vs buffer damage. Add it to the section and mention that helpers only support frame damage, and how drivers handling buffer damage can indicate that the damage clips should be ignored. Also add references to further documentation about the two damage types. Suggested-by: Simon Ser Signed-off-by: Javier Martinez Canillas Reviewed-by: Simon Ser Reviewed-by: Thomas Zimmermann Reviewed-by: Zack Rusin Acked-by: Sima Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20231123221315.3579454-5-javierm@redhat.com --- drivers/gpu/drm/drm_plane.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index f3f2eae83cca..9e8e4c60983d 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -1558,6 +1558,36 @@ out: * Drivers implementing damage can use drm_atomic_helper_damage_iter_init() and * drm_atomic_helper_damage_iter_next() helper iterator function to get damage * rectangles clipped to &drm_plane_state.src. + * + * Note that there are two types of damage handling: frame damage and buffer + * damage, the type of damage handling implemented depends on a driver's upload + * target. Drivers implementing a per-plane or per-CRTC upload target need to + * handle frame damage, while drivers implementing a per-buffer upload target + * need to handle buffer damage. + * + * The existing damage helpers only support the frame damage type, there is no + * buffer age support or similar damage accumulation algorithm implemented yet. + * + * Only drivers handling frame damage can use the mentioned damage helpers to + * iterate over the damaged regions. Drivers that handle buffer damage, must set + * &drm_plane_state.ignore_damage_clips for drm_atomic_helper_damage_iter_init() + * to know that damage clips should be ignored and return &drm_plane_state.src + * as the damage rectangle, to force a full plane update. + * + * Drivers with a per-buffer upload target could compare the &drm_plane_state.fb + * of the old and new plane states to determine if the framebuffer attached to a + * plane has changed or not since the last plane update. If &drm_plane_state.fb + * has changed, then &drm_plane_state.ignore_damage_clips must be set to true. + * + * That is because drivers with a per-plane upload target, expect the backing + * storage buffer to not change for a given plane. If the upload buffer changes + * between page flips, the new upload buffer has to be updated as a whole. This + * can be improved in the future if support for frame damage is added to the DRM + * damage helpers, similarly to how user-space already handle this case as it is + * explained in the following documents: + * + * https://registry.khronos.org/EGL/extensions/KHR/EGL_KHR_swap_buffers_with_damage.txt + * https://emersion.fr/blog/2019/intro-to-damage-tracking/ */ /** -- cgit v1.2.3 From 6c18005d8fabe8a6835fc0f96102d4269199e05b Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Thu, 23 Nov 2023 23:13:04 +0100 Subject: drm/todo: Add entry about implementing buffer age for damage tracking Currently, only damage tracking for frame damage is supported. If a driver needs to do buffer damage (e.g: the framebuffer attached to plane's state has changed since the last page-flip), the damage helpers just fallback to a full plane update. Add en entry in the TODO about implementing buffer age or any other damage accumulation algorithm for buffer damage handling. Suggested-by: Simon Ser Signed-off-by: Javier Martinez Canillas Reviewed-by: Simon Ser Reviewed-by: Thomas Zimmermann Reviewed-by: Zack Rusin Acked-by: Sima Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20231123221315.3579454-6-javierm@redhat.com --- Documentation/gpu/todo.rst | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index b62c7fa0c2bc..503d57c75215 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -782,6 +782,29 @@ Contact: Hans de Goede Level: Advanced +Buffer age or other damage accumulation algorithm for buffer damage +=================================================================== + +Drivers that do per-buffer uploads, need a buffer damage handling (rather than +frame damage like drivers that do per-plane or per-CRTC uploads), but there is +no support to get the buffer age or any other damage accumulation algorithm. + +For this reason, the damage helpers just fallback to a full plane update if the +framebuffer attached to a plane has changed since the last page-flip. Drivers +set &drm_plane_state.ignore_damage_clips to true as indication to +drm_atomic_helper_damage_iter_init() and drm_atomic_helper_damage_iter_next() +helpers that the damage clips should be ignored. + +This should be improved to get damage tracking properly working on drivers that +do per-buffer uploads. + +More information about damage tracking and references to learning materials can +be found in :ref:`damage_tracking_properties`. + +Contact: Javier Martinez Canillas + +Level: Advanced + Outside DRM =========== -- cgit v1.2.3 From 014f831abcb82738e57c0b00db66dfef0798ed67 Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Mon, 13 Nov 2023 23:12:00 +0100 Subject: drm/nouveau: use GPUVM common infrastructure GPUVM provides common infrastructure to track external and evicted GEM objects as well as locking and validation helpers. Especially external and evicted object tracking is a huge improvement compared to the current brute force approach of iterating all mappings in order to lock and validate the GPUVM's GEM objects. Hence, make us of it. Signed-off-by: Danilo Krummrich Reviewed-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20231113221202.7203-1-dakr@redhat.com --- drivers/gpu/drm/nouveau/nouveau_bo.c | 4 +- drivers/gpu/drm/nouveau/nouveau_exec.c | 57 ++++---------- drivers/gpu/drm/nouveau/nouveau_exec.h | 4 - drivers/gpu/drm/nouveau/nouveau_sched.c | 9 ++- drivers/gpu/drm/nouveau/nouveau_sched.h | 7 +- drivers/gpu/drm/nouveau/nouveau_uvmm.c | 134 +++++++++++++++++--------------- 6 files changed, 100 insertions(+), 115 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 7afad86da64b..b7dda486a7ea 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1061,17 +1061,18 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); + struct drm_gem_object *obj = &bo->base; struct ttm_resource *old_reg = bo->resource; struct nouveau_drm_tile *new_tile = NULL; int ret = 0; - if (new_reg->mem_type == TTM_PL_TT) { ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg); if (ret) return ret; } + drm_gpuvm_bo_gem_evict(obj, evict); nouveau_bo_move_ntfy(bo, new_reg); ret = ttm_bo_wait_ctx(bo, ctx); if (ret) @@ -1136,6 +1137,7 @@ out: out_ntfy: if (ret) { nouveau_bo_move_ntfy(bo, bo->resource); + drm_gpuvm_bo_gem_evict(obj, !evict); } return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c index 9a5ef574744b..f8ef7004d133 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.c +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: MIT -#include - #include "nouveau_drv.h" #include "nouveau_gem.h" #include "nouveau_mem.h" @@ -86,14 +84,12 @@ */ static int -nouveau_exec_job_submit(struct nouveau_job *job) +nouveau_exec_job_submit(struct nouveau_job *job, + struct drm_gpuvm_exec *vme) { struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job); struct nouveau_cli *cli = job->cli; struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli); - struct drm_exec *exec = &job->exec; - struct drm_gem_object *obj; - unsigned long index; int ret; /* Create a new fence, but do not emit yet. */ @@ -102,52 +98,29 @@ nouveau_exec_job_submit(struct nouveau_job *job) return ret; nouveau_uvmm_lock(uvmm); - drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT | - DRM_EXEC_IGNORE_DUPLICATES); - drm_exec_until_all_locked(exec) { - struct drm_gpuva *va; - - drm_gpuvm_for_each_va(va, &uvmm->base) { - if (unlikely(va == &uvmm->base.kernel_alloc_node)) - continue; - - ret = drm_exec_prepare_obj(exec, va->gem.obj, 1); - drm_exec_retry_on_contention(exec); - if (ret) - goto err_uvmm_unlock; - } + ret = drm_gpuvm_exec_lock(vme); + if (ret) { + nouveau_uvmm_unlock(uvmm); + return ret; } nouveau_uvmm_unlock(uvmm); - drm_exec_for_each_locked_object(exec, index, obj) { - struct nouveau_bo *nvbo = nouveau_gem_object(obj); - - ret = nouveau_bo_validate(nvbo, true, false); - if (ret) - goto err_exec_fini; + ret = drm_gpuvm_exec_validate(vme); + if (ret) { + drm_gpuvm_exec_unlock(vme); + return ret; } return 0; - -err_uvmm_unlock: - nouveau_uvmm_unlock(uvmm); -err_exec_fini: - drm_exec_fini(exec); - return ret; - } static void -nouveau_exec_job_armed_submit(struct nouveau_job *job) +nouveau_exec_job_armed_submit(struct nouveau_job *job, + struct drm_gpuvm_exec *vme) { - struct drm_exec *exec = &job->exec; - struct drm_gem_object *obj; - unsigned long index; - - drm_exec_for_each_locked_object(exec, index, obj) - dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage); - - drm_exec_fini(exec); + drm_gpuvm_exec_resv_add_fence(vme, job->done_fence, + job->resv_usage, job->resv_usage); + drm_gpuvm_exec_unlock(vme); } static struct dma_fence * diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.h b/drivers/gpu/drm/nouveau/nouveau_exec.h index 5488d337bcc0..5a8fe1208a44 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.h +++ b/drivers/gpu/drm/nouveau/nouveau_exec.h @@ -3,16 +3,12 @@ #ifndef __NOUVEAU_EXEC_H__ #define __NOUVEAU_EXEC_H__ -#include - #include "nouveau_drv.h" #include "nouveau_sched.h" struct nouveau_exec_job_args { struct drm_file *file_priv; struct nouveau_sched_entity *sched_entity; - - struct drm_exec exec; struct nouveau_channel *chan; struct { diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index e03fd2bc8a11..f69f4cbf251b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -263,6 +263,11 @@ nouveau_job_submit(struct nouveau_job *job) { struct nouveau_sched_entity *entity = to_nouveau_sched_entity(job->base.entity); struct dma_fence *done_fence = NULL; + struct drm_gpuvm_exec vm_exec = { + .vm = &nouveau_cli_uvmm(job->cli)->base, + .flags = DRM_EXEC_IGNORE_DUPLICATES, + .num_fences = 1, + }; int ret; ret = nouveau_job_add_deps(job); @@ -282,7 +287,7 @@ nouveau_job_submit(struct nouveau_job *job) * successfully. */ if (job->ops->submit) { - ret = job->ops->submit(job); + ret = job->ops->submit(job, &vm_exec); if (ret) goto err_cleanup; } @@ -315,7 +320,7 @@ nouveau_job_submit(struct nouveau_job *job) set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &job->done_fence->flags); if (job->ops->armed_submit) - job->ops->armed_submit(job); + job->ops->armed_submit(job, &vm_exec); nouveau_job_fence_attach(job); diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h index 27ac19792597..0f87697dbc9e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.h +++ b/drivers/gpu/drm/nouveau/nouveau_sched.h @@ -5,7 +5,7 @@ #include -#include +#include #include #include "nouveau_drv.h" @@ -54,7 +54,6 @@ struct nouveau_job { struct drm_file *file_priv; struct nouveau_cli *cli; - struct drm_exec exec; enum dma_resv_usage resv_usage; struct dma_fence *done_fence; @@ -76,8 +75,8 @@ struct nouveau_job { /* If .submit() returns without any error, it is guaranteed that * armed_submit() is called. */ - int (*submit)(struct nouveau_job *); - void (*armed_submit)(struct nouveau_job *); + int (*submit)(struct nouveau_job *, struct drm_gpuvm_exec *); + void (*armed_submit)(struct nouveau_job *, struct drm_gpuvm_exec *); struct dma_fence *(*run)(struct nouveau_job *); void (*free)(struct nouveau_job *); enum drm_gpu_sched_stat (*timeout)(struct nouveau_job *); diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index eda7bb8624f1..90e6732ee9b5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -438,8 +438,9 @@ nouveau_uvma_region_complete(struct nouveau_uvma_region *reg) static void op_map_prepare_unwind(struct nouveau_uvma *uvma) { + struct drm_gpuva *va = &uvma->va; nouveau_uvma_gem_put(uvma); - drm_gpuva_remove(&uvma->va); + drm_gpuva_remove(va); nouveau_uvma_free(uvma); } @@ -468,6 +469,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm, break; case DRM_GPUVA_OP_REMAP: { struct drm_gpuva_op_remap *r = &op->remap; + struct drm_gpuva *va = r->unmap->va; if (r->next) op_map_prepare_unwind(new->next); @@ -475,7 +477,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm, if (r->prev) op_map_prepare_unwind(new->prev); - op_unmap_prepare_unwind(r->unmap->va); + op_unmap_prepare_unwind(va); break; } case DRM_GPUVA_OP_UNMAP: @@ -634,6 +636,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm, goto unwind; } } + break; } case DRM_GPUVA_OP_REMAP: { @@ -1135,12 +1138,53 @@ bind_link_gpuvas(struct bind_job_op *bop) } static int -nouveau_uvmm_bind_job_submit(struct nouveau_job *job) +bind_lock_validate(struct nouveau_job *job, struct drm_exec *exec, + unsigned int num_fences) +{ + struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job); + struct bind_job_op *op; + int ret; + + list_for_each_op(op, &bind_job->ops) { + struct drm_gpuva_op *va_op; + + if (!op->ops) + continue; + + drm_gpuva_for_each_op(va_op, op->ops) { + struct drm_gem_object *obj = op_gem_obj(va_op); + + if (unlikely(!obj)) + continue; + + ret = drm_exec_prepare_obj(exec, obj, num_fences); + if (ret) + return ret; + + /* Don't validate GEMs backing mappings we're about to + * unmap, it's not worth the effort. + */ + if (va_op->op == DRM_GPUVA_OP_UNMAP) + continue; + + ret = nouveau_bo_validate(nouveau_gem_object(obj), + true, false); + if (ret) + return ret; + } + } + + return 0; +} + +static int +nouveau_uvmm_bind_job_submit(struct nouveau_job *job, + struct drm_gpuvm_exec *vme) { struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli); struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job); struct nouveau_sched_entity *entity = job->entity; - struct drm_exec *exec = &job->exec; + struct drm_exec *exec = &vme->exec; struct bind_job_op *op; int ret; @@ -1157,6 +1201,8 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) dma_resv_unlock(obj->resv); if (IS_ERR(op->vm_bo)) return PTR_ERR(op->vm_bo); + + drm_gpuvm_bo_extobj_add(op->vm_bo); } ret = bind_validate_op(job, op); @@ -1179,6 +1225,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) * unwind all GPU VA space changes on failure. */ nouveau_uvmm_lock(uvmm); + list_for_each_op(op, &bind_job->ops) { switch (op->op) { case OP_MAP_SPARSE: @@ -1290,55 +1337,13 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) } } - drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT | - DRM_EXEC_IGNORE_DUPLICATES); + drm_exec_init(exec, vme->flags); drm_exec_until_all_locked(exec) { - list_for_each_op(op, &bind_job->ops) { - struct drm_gpuva_op *va_op; - - if (IS_ERR_OR_NULL(op->ops)) - continue; - - drm_gpuva_for_each_op(va_op, op->ops) { - struct drm_gem_object *obj = op_gem_obj(va_op); - - if (unlikely(!obj)) - continue; - - ret = drm_exec_prepare_obj(exec, obj, 1); - drm_exec_retry_on_contention(exec); - if (ret) { - op = list_last_op(&bind_job->ops); - goto unwind; - } - } - } - } - - list_for_each_op(op, &bind_job->ops) { - struct drm_gpuva_op *va_op; - - if (IS_ERR_OR_NULL(op->ops)) - continue; - - drm_gpuva_for_each_op(va_op, op->ops) { - struct drm_gem_object *obj = op_gem_obj(va_op); - - if (unlikely(!obj)) - continue; - - /* Don't validate GEMs backing mappings we're about to - * unmap, it's not worth the effort. - */ - if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP)) - continue; - - ret = nouveau_bo_validate(nouveau_gem_object(obj), - true, false); - if (ret) { - op = list_last_op(&bind_job->ops); - goto unwind; - } + ret = bind_lock_validate(job, exec, vme->num_fences); + drm_exec_retry_on_contention(exec); + if (ret) { + op = list_last_op(&bind_job->ops); + goto unwind; } } @@ -1413,21 +1418,17 @@ unwind: } nouveau_uvmm_unlock(uvmm); - drm_exec_fini(exec); + drm_gpuvm_exec_unlock(vme); return ret; } static void -nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job) +nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job, + struct drm_gpuvm_exec *vme) { - struct drm_exec *exec = &job->exec; - struct drm_gem_object *obj; - unsigned long index; - - drm_exec_for_each_locked_object(exec, index, obj) - dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage); - - drm_exec_fini(exec); + drm_gpuvm_exec_resv_add_fence(vme, job->done_fence, + job->resv_usage, job->resv_usage); + drm_gpuvm_exec_unlock(vme); } static struct dma_fence * @@ -1815,8 +1816,17 @@ nouveau_uvmm_free(struct drm_gpuvm *gpuvm) kfree(uvmm); } +static int +nouveau_uvmm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec) +{ + struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj); + + return nouveau_bo_validate(nvbo, true, false); +} + static const struct drm_gpuvm_ops gpuvm_ops = { .vm_free = nouveau_uvmm_free, + .vm_bo_validate = nouveau_uvmm_bo_validate, }; int -- cgit v1.2.3 From 5f03a507b29e44a848f315c7240c19894dd8be4f Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Tue, 14 Nov 2023 01:27:24 +0100 Subject: drm/nouveau: implement 1:1 scheduler - entity relationship Recent patches to the DRM scheduler [1][2] allow for a variable number of run-queues and add support for (shared) workqueues rather than dedicated kthreads per scheduler. This allows us to create a 1:1 relationship between a GPU scheduler and a scheduler entity, in order to properly support firmware schedulers being able to handle an arbitrary amount of dynamically allocated command ring buffers. This perfectly matches Nouveau's needs, hence make use of it. Topology wise we create one scheduler instance per client (handling VM_BIND jobs) and one scheduler instance per channel (handling EXEC jobs). All channel scheduler instances share a workqueue, but every client scheduler instance has a dedicated workqueue. The latter is required to ensure that for VM_BIND job's free_job() work and run_job() work can always run concurrently and hence, free_job() work can never stall run_job() work. For EXEC jobs we don't have this requirement, since EXEC job's free_job() does not require to take any locks which indirectly or directly are held for allocations elsewhere. [1] https://lore.kernel.org/all/8f53f7ef-7621-4f0b-bdef-d8d20bc497ff@redhat.com/T/ [2] https://lore.kernel.org/all/20231031032439.1558703-1-matthew.brost@intel.com/T/ Signed-off-by: Danilo Krummrich Reviewed-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20231114002728.3491-1-dakr@redhat.com --- drivers/gpu/drm/nouveau/nouveau_abi16.c | 18 +-- drivers/gpu/drm/nouveau/nouveau_abi16.h | 2 +- drivers/gpu/drm/nouveau/nouveau_drm.c | 31 +++-- drivers/gpu/drm/nouveau/nouveau_drv.h | 9 +- drivers/gpu/drm/nouveau/nouveau_exec.c | 7 +- drivers/gpu/drm/nouveau/nouveau_exec.h | 2 +- drivers/gpu/drm/nouveau/nouveau_sched.c | 197 ++++++++++++++++++-------------- drivers/gpu/drm/nouveau/nouveau_sched.h | 35 +++--- drivers/gpu/drm/nouveau/nouveau_uvmm.c | 69 ++++------- drivers/gpu/drm/nouveau/nouveau_uvmm.h | 4 +- 10 files changed, 179 insertions(+), 195 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 2edd7bb13fae..7542e1787183 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -127,21 +127,14 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, { struct nouveau_abi16_ntfy *ntfy, *temp; - /* When a client exits without waiting for it's queued up jobs to - * finish it might happen that we fault the channel. This is due to - * drm_file_free() calling drm_gem_release() before the postclose() - * callback. Hence, we can't tear down this scheduler entity before - * uvmm mappings are unmapped. Currently, we can't detect this case. - * - * However, this should be rare and harmless, since the channel isn't - * needed anymore. - */ - nouveau_sched_entity_fini(&chan->sched_entity); + /* Cancel all jobs from the entity's queue. */ + drm_sched_entity_fini(&chan->sched.entity); - /* wait for all activity to stop before cleaning up */ if (chan->chan) nouveau_channel_idle(chan->chan); + nouveau_sched_fini(&chan->sched); + /* cleanup notifier state */ list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) { nouveau_abi16_ntfy_fini(chan, ntfy); @@ -344,8 +337,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) if (ret) goto done; - ret = nouveau_sched_entity_init(&chan->sched_entity, &drm->sched, - drm->sched_wq); + ret = nouveau_sched_init(&chan->sched, drm, drm->sched_wq); if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h index 9f538486c10e..1f5e243c0c75 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.h +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h @@ -26,7 +26,7 @@ struct nouveau_abi16_chan { struct nouveau_bo *ntfy; struct nouveau_vma *ntfy_vma; struct nvkm_mm heap; - struct nouveau_sched_entity sched_entity; + struct nouveau_sched sched; }; struct nouveau_abi16 { diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index f603eaef1560..7e5f19153829 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -201,9 +201,9 @@ nouveau_cli_fini(struct nouveau_cli *cli) WARN_ON(!list_empty(&cli->worker)); usif_client_fini(cli); + nouveau_sched_fini(&cli->sched); if (uvmm) nouveau_uvmm_fini(uvmm); - nouveau_sched_entity_fini(&cli->sched_entity); nouveau_vmm_fini(&cli->svm); nouveau_vmm_fini(&cli->vmm); nvif_mmu_dtor(&cli->mmu); @@ -310,8 +310,17 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, cli->mem = &mems[ret]; - ret = nouveau_sched_entity_init(&cli->sched_entity, &drm->sched, - drm->sched_wq); + /* Don't pass in the (shared) sched_wq in order to let + * nouveau_sched_init() create a dedicated one for VM_BIND jobs. + * + * This is required to ensure that for VM_BIND jobs free_job() work and + * run_job() work can always run concurrently and hence, free_job() work + * can never stall run_job() work. For EXEC jobs we don't have this + * requirement, since EXEC job's free_job() does not require to take any + * locks which indirectly or directly are held for allocations + * elsewhere. + */ + ret = nouveau_sched_init(&cli->sched, drm, NULL); if (ret) goto done; @@ -582,13 +591,16 @@ nouveau_drm_device_init(struct drm_device *dev) nvif_parent_ctor(&nouveau_parent, &drm->parent); drm->master.base.object.parent = &drm->parent; - ret = nouveau_sched_init(drm); - if (ret) + drm->sched_wq = alloc_workqueue("nouveau_sched_wq_shared", 0, + WQ_MAX_ACTIVE); + if (!drm->sched_wq) { + ret = -ENOMEM; goto fail_alloc; + } ret = nouveau_cli_init(drm, "DRM-master", &drm->master); if (ret) - goto fail_sched; + goto fail_wq; ret = nouveau_cli_init(drm, "DRM", &drm->client); if (ret) @@ -658,8 +670,8 @@ fail_ttm: nouveau_cli_fini(&drm->client); fail_master: nouveau_cli_fini(&drm->master); -fail_sched: - nouveau_sched_fini(drm); +fail_wq: + destroy_workqueue(drm->sched_wq); fail_alloc: nvif_parent_dtor(&drm->parent); kfree(drm); @@ -711,10 +723,9 @@ nouveau_drm_device_fini(struct drm_device *dev) } mutex_unlock(&drm->clients_lock); - nouveau_sched_fini(drm); - nouveau_cli_fini(&drm->client); nouveau_cli_fini(&drm->master); + destroy_workqueue(drm->sched_wq); nvif_parent_dtor(&drm->parent); mutex_destroy(&drm->clients_lock); kfree(drm); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 7f7051df84a6..8a6d94c8b163 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -98,7 +98,7 @@ struct nouveau_cli { bool disabled; } uvmm; - struct nouveau_sched_entity sched_entity; + struct nouveau_sched sched; const struct nvif_mclass *mem; @@ -258,6 +258,9 @@ struct nouveau_drm { u64 context_base; } *runl; + /* Workqueue used for channel schedulers. */ + struct workqueue_struct *sched_wq; + /* context for accelerated drm-internal operations */ struct nouveau_channel *cechan; struct nouveau_channel *channel; @@ -298,10 +301,6 @@ struct nouveau_drm { struct mutex lock; bool component_registered; } audio; - - struct drm_gpu_scheduler sched; - struct workqueue_struct *sched_wq; - }; static inline struct nouveau_drm * diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c index f8ef7004d133..a179eeb6df09 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.c +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c @@ -165,6 +165,7 @@ nouveau_exec_job_free(struct nouveau_job *job) { struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job); + nouveau_job_done(job); nouveau_job_free(job); kfree(exec_job->fence); @@ -184,8 +185,6 @@ nouveau_exec_job_timeout(struct nouveau_job *job) NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n", chan->chid); - nouveau_sched_entity_fini(job->entity); - return DRM_GPU_SCHED_STAT_NOMINAL; } @@ -234,7 +233,7 @@ nouveau_exec_job_init(struct nouveau_exec_job **pjob, job->chan = __args->chan; - args.sched_entity = __args->sched_entity; + args.sched = __args->sched; args.file_priv = __args->file_priv; args.in_sync.count = __args->in_sync.count; @@ -388,7 +387,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev, if (ret) goto out; - args.sched_entity = &chan16->sched_entity; + args.sched = &chan16->sched; args.file_priv = file_priv; args.chan = chan; diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.h b/drivers/gpu/drm/nouveau/nouveau_exec.h index 5a8fe1208a44..9b3b151facfd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.h +++ b/drivers/gpu/drm/nouveau/nouveau_exec.h @@ -8,7 +8,7 @@ struct nouveau_exec_job_args { struct drm_file *file_priv; - struct nouveau_sched_entity *sched_entity; + struct nouveau_sched *sched; struct nouveau_channel *chan; struct { diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index f69f4cbf251b..0f4583f6fcad 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -12,30 +12,29 @@ #include "nouveau_abi16.h" #include "nouveau_sched.h" -/* FIXME - * - * We want to make sure that jobs currently executing can't be deferred by - * other jobs competing for the hardware. Otherwise we might end up with job - * timeouts just because of too many clients submitting too many jobs. We don't - * want jobs to time out because of system load, but because of the job being - * too bulky. - * - * For now allow for up to 16 concurrent jobs in flight until we know how many - * rings the hardware can process in parallel. - */ -#define NOUVEAU_SCHED_HW_SUBMISSIONS 16 +#define NOUVEAU_SCHED_HW_SUBMISSIONS 1 #define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000 +/* Starts at 0, since the DRM scheduler interprets those parameters as (initial) + * index to the run-queue array. + */ +enum nouveau_sched_priority { + NOUVEAU_SCHED_PRIORITY_SINGLE = DRM_SCHED_PRIORITY_MIN, + NOUVEAU_SCHED_PRIORITY_COUNT, +}; + int nouveau_job_init(struct nouveau_job *job, struct nouveau_job_args *args) { - struct nouveau_sched_entity *entity = args->sched_entity; + struct nouveau_sched *sched = args->sched; int ret; + INIT_LIST_HEAD(&job->entry); + job->file_priv = args->file_priv; job->cli = nouveau_cli(args->file_priv); - job->entity = entity; + job->sched = sched; job->sync = args->sync; job->resv_usage = args->resv_usage; @@ -89,7 +88,7 @@ nouveau_job_init(struct nouveau_job *job, } - ret = drm_sched_job_init(&job->base, &entity->base, 1, NULL); + ret = drm_sched_job_init(&job->base, &sched->entity, 1, NULL); if (ret) goto err_free_chains; @@ -108,6 +107,27 @@ err_free_in_sync: return ret; } +void +nouveau_job_fini(struct nouveau_job *job) +{ + dma_fence_put(job->done_fence); + drm_sched_job_cleanup(&job->base); + + job->ops->free(job); +} + +void +nouveau_job_done(struct nouveau_job *job) +{ + struct nouveau_sched *sched = job->sched; + + spin_lock(&sched->job.list.lock); + list_del(&job->entry); + spin_unlock(&sched->job.list.lock); + + wake_up(&sched->job.wq); +} + void nouveau_job_free(struct nouveau_job *job) { @@ -117,13 +137,6 @@ nouveau_job_free(struct nouveau_job *job) kfree(job->out_sync.chains); } -void nouveau_job_fini(struct nouveau_job *job) -{ - dma_fence_put(job->done_fence); - drm_sched_job_cleanup(&job->base); - job->ops->free(job); -} - static int sync_find_fence(struct nouveau_job *job, struct drm_nouveau_sync *sync, @@ -261,7 +274,7 @@ nouveau_job_fence_attach(struct nouveau_job *job) int nouveau_job_submit(struct nouveau_job *job) { - struct nouveau_sched_entity *entity = to_nouveau_sched_entity(job->base.entity); + struct nouveau_sched *sched = job->sched; struct dma_fence *done_fence = NULL; struct drm_gpuvm_exec vm_exec = { .vm = &nouveau_cli_uvmm(job->cli)->base, @@ -281,7 +294,7 @@ nouveau_job_submit(struct nouveau_job *job) /* Make sure the job appears on the sched_entity's queue in the same * order as it was submitted. */ - mutex_lock(&entity->mutex); + mutex_lock(&sched->mutex); /* Guarantee we won't fail after the submit() callback returned * successfully. @@ -292,33 +305,16 @@ nouveau_job_submit(struct nouveau_job *job) goto err_cleanup; } + /* Submit was successful; add the job to the schedulers job list. */ + spin_lock(&sched->job.list.lock); + list_add(&job->entry, &sched->job.list.head); + spin_unlock(&sched->job.list.lock); + drm_sched_job_arm(&job->base); job->done_fence = dma_fence_get(&job->base.s_fence->finished); if (job->sync) done_fence = dma_fence_get(job->done_fence); - /* If a sched job depends on a dma-fence from a job from the same GPU - * scheduler instance, but a different scheduler entity, the GPU - * scheduler does only wait for the particular job to be scheduled, - * rather than for the job to fully complete. This is due to the GPU - * scheduler assuming that there is a scheduler instance per ring. - * However, the current implementation, in order to avoid arbitrary - * amounts of kthreads, has a single scheduler instance while scheduler - * entities represent rings. - * - * As a workaround, set the DRM_SCHED_FENCE_DONT_PIPELINE for all - * out-fences in order to force the scheduler to wait for full job - * completion for dependent jobs from different entities and same - * scheduler instance. - * - * There is some work in progress [1] to address the issues of firmware - * schedulers; once it is in-tree the scheduler topology in Nouveau - * should be re-worked accordingly. - * - * [1] https://lore.kernel.org/dri-devel/20230801205103.627779-1-matthew.brost@intel.com/ - */ - set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &job->done_fence->flags); - if (job->ops->armed_submit) job->ops->armed_submit(job, &vm_exec); @@ -331,7 +327,7 @@ nouveau_job_submit(struct nouveau_job *job) drm_sched_entity_push_job(&job->base); - mutex_unlock(&entity->mutex); + mutex_unlock(&sched->mutex); if (done_fence) { dma_fence_wait(done_fence, true); @@ -341,20 +337,13 @@ nouveau_job_submit(struct nouveau_job *job) return 0; err_cleanup: - mutex_unlock(&entity->mutex); + mutex_unlock(&sched->mutex); nouveau_job_fence_attach_cleanup(job); err: job->state = NOUVEAU_JOB_SUBMIT_FAILED; return ret; } -bool -nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity, - struct work_struct *work) -{ - return queue_work(entity->sched_wq, work); -} - static struct dma_fence * nouveau_job_run(struct nouveau_job *job) { @@ -404,50 +393,82 @@ nouveau_sched_free_job(struct drm_sched_job *sched_job) nouveau_job_fini(job); } -int nouveau_sched_entity_init(struct nouveau_sched_entity *entity, - struct drm_gpu_scheduler *sched, - struct workqueue_struct *sched_wq) -{ - mutex_init(&entity->mutex); - spin_lock_init(&entity->job.list.lock); - INIT_LIST_HEAD(&entity->job.list.head); - init_waitqueue_head(&entity->job.wq); - - entity->sched_wq = sched_wq; - return drm_sched_entity_init(&entity->base, - DRM_SCHED_PRIORITY_NORMAL, - &sched, 1, NULL); -} - -void -nouveau_sched_entity_fini(struct nouveau_sched_entity *entity) -{ - drm_sched_entity_destroy(&entity->base); -} - static const struct drm_sched_backend_ops nouveau_sched_ops = { .run_job = nouveau_sched_run_job, .timedout_job = nouveau_sched_timedout_job, .free_job = nouveau_sched_free_job, }; -int nouveau_sched_init(struct nouveau_drm *drm) +int +nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm, + struct workqueue_struct *wq) { - struct drm_gpu_scheduler *sched = &drm->sched; + struct drm_gpu_scheduler *drm_sched = &sched->base; + struct drm_sched_entity *entity = &sched->entity; long job_hang_limit = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS); + int ret; - drm->sched_wq = create_singlethread_workqueue("nouveau_sched_wq"); - if (!drm->sched_wq) - return -ENOMEM; + if (!wq) { + wq = alloc_workqueue("nouveau_sched_wq_%d", 0, WQ_MAX_ACTIVE, + current->pid); + if (!wq) + return -ENOMEM; + + sched->wq = wq; + } - return drm_sched_init(sched, &nouveau_sched_ops, NULL, - DRM_SCHED_PRIORITY_COUNT, - NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit, - NULL, NULL, "nouveau_sched", drm->dev->dev); + ret = drm_sched_init(drm_sched, &nouveau_sched_ops, wq, + NOUVEAU_SCHED_PRIORITY_COUNT, + NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit, + NULL, NULL, "nouveau_sched", drm->dev->dev); + if (ret) + goto fail_wq; + + /* Using DRM_SCHED_PRIORITY_MIN, since that's what we're required to use + * when we want to have a single run-queue only. + * + * It's not documented, but one will find out when trying to use any + * other priority running into faults, because the scheduler uses the + * priority as array index. + * + * Can't use NOUVEAU_SCHED_PRIORITY_SINGLE either, because it's not + * matching the enum type used in drm_sched_entity_init(). + */ + ret = drm_sched_entity_init(entity, DRM_SCHED_PRIORITY_MIN, + &drm_sched, 1, NULL); + if (ret) + goto fail_sched; + + mutex_init(&sched->mutex); + spin_lock_init(&sched->job.list.lock); + INIT_LIST_HEAD(&sched->job.list.head); + init_waitqueue_head(&sched->job.wq); + + return 0; + +fail_sched: + drm_sched_fini(drm_sched); +fail_wq: + if (sched->wq) + destroy_workqueue(sched->wq); + return ret; } -void nouveau_sched_fini(struct nouveau_drm *drm) +void +nouveau_sched_fini(struct nouveau_sched *sched) { - destroy_workqueue(drm->sched_wq); - drm_sched_fini(&drm->sched); + struct drm_gpu_scheduler *drm_sched = &sched->base; + struct drm_sched_entity *entity = &sched->entity; + + rmb(); /* for list_empty to work without lock */ + wait_event(sched->job.wq, list_empty(&sched->job.list.head)); + + drm_sched_entity_fini(entity); + drm_sched_fini(drm_sched); + + /* Destroy workqueue after scheduler tear down, otherwise it might still + * be in use. + */ + if (sched->wq) + destroy_workqueue(sched->wq); } diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h index 0f87697dbc9e..23eff4b0f5b2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.h +++ b/drivers/gpu/drm/nouveau/nouveau_sched.h @@ -26,7 +26,7 @@ enum nouveau_job_state { struct nouveau_job_args { struct drm_file *file_priv; - struct nouveau_sched_entity *sched_entity; + struct nouveau_sched *sched; enum dma_resv_usage resv_usage; bool sync; @@ -49,7 +49,8 @@ struct nouveau_job { enum nouveau_job_state state; - struct nouveau_sched_entity *entity; + struct nouveau_sched *sched; + struct list_head entry; struct drm_file *file_priv; struct nouveau_cli *cli; @@ -89,20 +90,17 @@ int nouveau_job_ucopy_syncs(struct nouveau_job_args *args, int nouveau_job_init(struct nouveau_job *job, struct nouveau_job_args *args); -void nouveau_job_free(struct nouveau_job *job); - -int nouveau_job_submit(struct nouveau_job *job); void nouveau_job_fini(struct nouveau_job *job); +int nouveau_job_submit(struct nouveau_job *job); +void nouveau_job_done(struct nouveau_job *job); +void nouveau_job_free(struct nouveau_job *job); -#define to_nouveau_sched_entity(entity) \ - container_of((entity), struct nouveau_sched_entity, base) - -struct nouveau_sched_entity { - struct drm_sched_entity base; +struct nouveau_sched { + struct drm_gpu_scheduler base; + struct drm_sched_entity entity; + struct workqueue_struct *wq; struct mutex mutex; - struct workqueue_struct *sched_wq; - struct { struct { struct list_head head; @@ -112,15 +110,8 @@ struct nouveau_sched_entity { } job; }; -int nouveau_sched_entity_init(struct nouveau_sched_entity *entity, - struct drm_gpu_scheduler *sched, - struct workqueue_struct *sched_wq); -void nouveau_sched_entity_fini(struct nouveau_sched_entity *entity); - -bool nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity, - struct work_struct *work); - -int nouveau_sched_init(struct nouveau_drm *drm); -void nouveau_sched_fini(struct nouveau_drm *drm); +int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm, + struct workqueue_struct *wq); +void nouveau_sched_fini(struct nouveau_sched *sched); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index 90e6732ee9b5..46493fcb82fd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -963,6 +963,12 @@ nouveau_uvmm_bind_job_free(struct kref *kref) { struct nouveau_uvmm_bind_job *job = container_of(kref, struct nouveau_uvmm_bind_job, kref); + struct bind_job_op *op, *next; + + list_for_each_op_safe(op, next, &job->ops) { + list_del(&op->entry); + kfree(op); + } nouveau_job_free(&job->base); kfree(job); @@ -1004,14 +1010,16 @@ bind_validate_op(struct nouveau_job *job, static void bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range) { - struct nouveau_uvmm_bind_job *bind_job; - struct nouveau_sched_entity *entity = job->entity; + struct nouveau_sched *sched = job->sched; + struct nouveau_job *__job; struct bind_job_op *op; u64 end = addr + range; again: - spin_lock(&entity->job.list.lock); - list_for_each_entry(bind_job, &entity->job.list.head, entry) { + spin_lock(&sched->job.list.lock); + list_for_each_entry(__job, &sched->job.list.head, entry) { + struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(__job); + list_for_each_op(op, &bind_job->ops) { if (op->op == OP_UNMAP) { u64 op_addr = op->va.addr; @@ -1019,7 +1027,7 @@ again: if (!(end <= op_addr || addr >= op_end)) { nouveau_uvmm_bind_job_get(bind_job); - spin_unlock(&entity->job.list.lock); + spin_unlock(&sched->job.list.lock); wait_for_completion(&bind_job->complete); nouveau_uvmm_bind_job_put(bind_job); goto again; @@ -1027,7 +1035,7 @@ again: } } } - spin_unlock(&entity->job.list.lock); + spin_unlock(&sched->job.list.lock); } static int @@ -1183,7 +1191,6 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job, { struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli); struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job); - struct nouveau_sched_entity *entity = job->entity; struct drm_exec *exec = &vme->exec; struct bind_job_op *op; int ret; @@ -1380,10 +1387,6 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job, } nouveau_uvmm_unlock(uvmm); - spin_lock(&entity->job.list.lock); - list_add(&bind_job->entry, &entity->job.list.head); - spin_unlock(&entity->job.list.lock); - return 0; unwind_continue: @@ -1466,14 +1469,11 @@ out: } static void -nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work) +nouveau_uvmm_bind_job_cleanup(struct nouveau_job *job) { - struct nouveau_uvmm_bind_job *bind_job = - container_of(work, struct nouveau_uvmm_bind_job, work); - struct nouveau_job *job = &bind_job->base; + struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job); struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli); - struct nouveau_sched_entity *entity = job->entity; - struct bind_job_op *op, *next; + struct bind_job_op *op; list_for_each_op(op, &bind_job->ops) { struct drm_gem_object *obj = op->gem.obj; @@ -1525,38 +1525,17 @@ nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work) drm_gem_object_put(obj); } - spin_lock(&entity->job.list.lock); - list_del(&bind_job->entry); - spin_unlock(&entity->job.list.lock); - + nouveau_job_done(job); complete_all(&bind_job->complete); - wake_up(&entity->job.wq); - - /* Remove and free ops after removing the bind job from the job list to - * avoid races against bind_validate_map_sparse(). - */ - list_for_each_op_safe(op, next, &bind_job->ops) { - list_del(&op->entry); - kfree(op); - } nouveau_uvmm_bind_job_put(bind_job); } -static void -nouveau_uvmm_bind_job_free_qwork(struct nouveau_job *job) -{ - struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job); - struct nouveau_sched_entity *entity = job->entity; - - nouveau_sched_entity_qwork(entity, &bind_job->work); -} - static struct nouveau_job_ops nouveau_bind_job_ops = { .submit = nouveau_uvmm_bind_job_submit, .armed_submit = nouveau_uvmm_bind_job_armed_submit, .run = nouveau_uvmm_bind_job_run, - .free = nouveau_uvmm_bind_job_free_qwork, + .free = nouveau_uvmm_bind_job_cleanup, }; static int @@ -1617,7 +1596,6 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob, return ret; INIT_LIST_HEAD(&job->ops); - INIT_LIST_HEAD(&job->entry); for (i = 0; i < __args->op.count; i++) { ret = bind_job_op_from_uop(&op, &__args->op.s[i]); @@ -1628,9 +1606,8 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob, } init_completion(&job->complete); - INIT_WORK(&job->work, nouveau_uvmm_bind_job_free_work_fn); - args.sched_entity = __args->sched_entity; + args.sched = __args->sched; args.file_priv = __args->file_priv; args.in_sync.count = __args->in_sync.count; @@ -1758,7 +1735,7 @@ nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev, if (ret) return ret; - args.sched_entity = &cli->sched_entity; + args.sched = &cli->sched; args.file_priv = file_priv; ret = nouveau_uvmm_vm_bind(&args); @@ -1910,12 +1887,8 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm) MA_STATE(mas, &uvmm->region_mt, 0, 0); struct nouveau_uvma_region *reg; struct nouveau_cli *cli = uvmm->vmm.cli; - struct nouveau_sched_entity *entity = &cli->sched_entity; struct drm_gpuva *va, *next; - rmb(); /* for list_empty to work without lock */ - wait_event(entity->job.wq, list_empty(&entity->job.list.head)); - nouveau_uvmm_lock(uvmm); drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) { struct nouveau_uvma *uvma = uvma_from_va(va); diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h b/drivers/gpu/drm/nouveau/nouveau_uvmm.h index f0a6d98ace4f..9d3c348581eb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.h +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h @@ -44,8 +44,6 @@ struct nouveau_uvmm_bind_job { struct nouveau_job base; struct kref kref; - struct list_head entry; - struct work_struct work; struct completion complete; /* struct bind_job_op */ @@ -54,7 +52,7 @@ struct nouveau_uvmm_bind_job { struct nouveau_uvmm_bind_job_args { struct drm_file *file_priv; - struct nouveau_sched_entity *sched_entity; + struct nouveau_sched *sched; unsigned int flags; -- cgit v1.2.3 From 46990918f35c1bf6e367cf8e0423e7344fec9fcb Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Tue, 14 Nov 2023 01:27:25 +0100 Subject: drm/nouveau: enable dynamic job-flow control Make use of the scheduler's credit limit and scheduler job's credit count to account for the actual size of a job, such that we fill up the ring efficiently. Signed-off-by: Danilo Krummrich Reviewed-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20231114002728.3491-2-dakr@redhat.com --- drivers/gpu/drm/nouveau/nouveau_abi16.c | 3 ++- drivers/gpu/drm/nouveau/nouveau_drm.c | 2 +- drivers/gpu/drm/nouveau/nouveau_exec.c | 4 +++- drivers/gpu/drm/nouveau/nouveau_sched.c | 9 ++++----- drivers/gpu/drm/nouveau/nouveau_sched.h | 3 ++- drivers/gpu/drm/nouveau/nouveau_uvmm.c | 4 +++- 6 files changed, 15 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 7542e1787183..a04156ca8390 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -337,7 +337,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) if (ret) goto done; - ret = nouveau_sched_init(&chan->sched, drm, drm->sched_wq); + ret = nouveau_sched_init(&chan->sched, drm, drm->sched_wq, + chan->chan->dma.ib_max); if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 7e5f19153829..6f6c31a9937b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -320,7 +320,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, * locks which indirectly or directly are held for allocations * elsewhere. */ - ret = nouveau_sched_init(&cli->sched, drm, NULL); + ret = nouveau_sched_init(&cli->sched, drm, NULL, 1); if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c index a179eeb6df09..bc5d71b79ab2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.c +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c @@ -231,10 +231,12 @@ nouveau_exec_job_init(struct nouveau_exec_job **pjob, } } + args.file_priv = __args->file_priv; job->chan = __args->chan; args.sched = __args->sched; - args.file_priv = __args->file_priv; + /* Plus one to account for the HW fence. */ + args.credits = job->push.count + 1; args.in_sync.count = __args->in_sync.count; args.in_sync.s = __args->in_sync.s; diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index 0f4583f6fcad..3393647bd944 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -12,7 +12,6 @@ #include "nouveau_abi16.h" #include "nouveau_sched.h" -#define NOUVEAU_SCHED_HW_SUBMISSIONS 1 #define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000 /* Starts at 0, since the DRM scheduler interprets those parameters as (initial) @@ -85,10 +84,10 @@ nouveau_job_init(struct nouveau_job *job, ret = -ENOMEM; goto err_free_objs; } - } - ret = drm_sched_job_init(&job->base, &sched->entity, 1, NULL); + ret = drm_sched_job_init(&job->base, &sched->entity, + args->credits, NULL); if (ret) goto err_free_chains; @@ -401,7 +400,7 @@ static const struct drm_sched_backend_ops nouveau_sched_ops = { int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm, - struct workqueue_struct *wq) + struct workqueue_struct *wq, u32 credit_limit) { struct drm_gpu_scheduler *drm_sched = &sched->base; struct drm_sched_entity *entity = &sched->entity; @@ -419,7 +418,7 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm, ret = drm_sched_init(drm_sched, &nouveau_sched_ops, wq, NOUVEAU_SCHED_PRIORITY_COUNT, - NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit, + credit_limit, 0, job_hang_limit, NULL, NULL, "nouveau_sched", drm->dev->dev); if (ret) goto fail_wq; diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h index 23eff4b0f5b2..a6528f5981e6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.h +++ b/drivers/gpu/drm/nouveau/nouveau_sched.h @@ -27,6 +27,7 @@ enum nouveau_job_state { struct nouveau_job_args { struct drm_file *file_priv; struct nouveau_sched *sched; + u32 credits; enum dma_resv_usage resv_usage; bool sync; @@ -111,7 +112,7 @@ struct nouveau_sched { }; int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm, - struct workqueue_struct *wq); + struct workqueue_struct *wq, u32 credit_limit); void nouveau_sched_fini(struct nouveau_sched *sched); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index 46493fcb82fd..4b236da79509 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -1607,9 +1607,11 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob, init_completion(&job->complete); - args.sched = __args->sched; args.file_priv = __args->file_priv; + args.sched = __args->sched; + args.credits = 1; + args.in_sync.count = __args->in_sync.count; args.in_sync.s = __args->in_sync.s; -- cgit v1.2.3 From 2bbe6ab2be53858507f11f99f856846d04765ae3 Mon Sep 17 00:00:00 2001 From: Luben Tuikov Date: Wed, 22 Nov 2023 23:58:53 -0500 Subject: drm/sched: Fix bounds limiting when given a malformed entity MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If we're given a malformed entity in drm_sched_entity_init()--shouldn't happen, but we verify--with out-of-bounds priority value, we set it to an allowed value. Fix the expression which sets this limit. Signed-off-by: Luben Tuikov Fixes: 56e449603f0ac5 ("drm/sched: Convert the GPU scheduler to variable number of run-queues") Link: https://patchwork.freedesktop.org/patch/msgid/20231123122422.167832-2-ltuikov89@gmail.com Reviewed-by: Christian König Link: https://lore.kernel.org/r/dbb91dbe-ef77-4d79-aaf9-2adb171c1d7a@amd.com --- drivers/gpu/drm/scheduler/sched_entity.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 4d42b1e4daa6..ee645d38e98d 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -81,12 +81,15 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, */ pr_warn("%s: called with uninitialized scheduler\n", __func__); } else if (num_sched_list) { - /* The "priority" of an entity cannot exceed the number - * of run-queues of a scheduler. + /* The "priority" of an entity cannot exceed the number of run-queues of a + * scheduler. Protect against num_rqs being 0, by converting to signed. */ - if (entity->priority >= sched_list[0]->num_rqs) - entity->priority = max_t(u32, sched_list[0]->num_rqs, - DRM_SCHED_PRIORITY_MIN); + if (entity->priority >= sched_list[0]->num_rqs) { + drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n", + entity->priority, sched_list[0]->num_rqs); + entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1, + (s32) DRM_SCHED_PRIORITY_MIN); + } entity->rq = sched_list[0]->sched_rq[entity->priority]; } -- cgit v1.2.3 From fe375c74806dbd30b00ec038a80a5b7bf4653ab7 Mon Sep 17 00:00:00 2001 From: Luben Tuikov Date: Tue, 14 Nov 2023 20:40:58 -0500 Subject: drm/sched: Rename priority MIN to LOW MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename DRM_SCHED_PRIORITY_MIN to DRM_SCHED_PRIORITY_LOW. This mirrors DRM_SCHED_PRIORITY_HIGH, for a list of DRM scheduler priorities in ascending order, DRM_SCHED_PRIORITY_LOW, DRM_SCHED_PRIORITY_NORMAL, DRM_SCHED_PRIORITY_HIGH, DRM_SCHED_PRIORITY_KERNEL. Cc: Rob Clark Cc: Abhinav Kumar Cc: Dmitry Baryshkov Cc: Danilo Krummrich Cc: Alex Deucher Cc: Christian König Cc: linux-arm-msm@vger.kernel.org Cc: freedreno@lists.freedesktop.org Cc: dri-devel@lists.freedesktop.org Signed-off-by: Luben Tuikov Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20231124052752.6915-5-ltuikov89@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +- drivers/gpu/drm/msm/msm_gpu.h | 2 +- drivers/gpu/drm/scheduler/sched_entity.c | 2 +- drivers/gpu/drm/scheduler/sched_main.c | 10 +++++----- include/drm/gpu_scheduler.h | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index e2ae9ba147ba..5cb33ac99f70 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -73,10 +73,10 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio) return DRM_SCHED_PRIORITY_NORMAL; case AMDGPU_CTX_PRIORITY_VERY_LOW: - return DRM_SCHED_PRIORITY_MIN; + return DRM_SCHED_PRIORITY_LOW; case AMDGPU_CTX_PRIORITY_LOW: - return DRM_SCHED_PRIORITY_MIN; + return DRM_SCHED_PRIORITY_LOW; case AMDGPU_CTX_PRIORITY_NORMAL: return DRM_SCHED_PRIORITY_NORMAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 62bb7fc7448a..1a25931607c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) int i; /* Signal all jobs not yet scheduled */ - for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) { struct drm_sched_rq *rq = sched->sched_rq[i]; spin_lock(&rq->lock); list_for_each_entry(s_entity, &rq->entities, list) { diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 4252e3839fbc..eb0c97433e5f 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -347,7 +347,7 @@ struct msm_gpu_perfcntr { * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some * cases, so we don't use it (no need for kernel generated jobs). */ -#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN) +#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_LOW) /** * struct msm_file_private - per-drm_file context diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index ee645d38e98d..dd2b8f777f51 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -88,7 +88,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n", entity->priority, sched_list[0]->num_rqs); entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1, - (s32) DRM_SCHED_PRIORITY_MIN); + (s32) DRM_SCHED_PRIORITY_LOW); } entity->rq = sched_list[0]->sched_rq[entity->priority]; } diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 044a8c4875ba..b6d7bc49ff6e 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -1052,7 +1052,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) int i; /* Kernel run queue has higher priority than normal run queue*/ - for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) { entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ? drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) : drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]); @@ -1291,7 +1291,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, if (!sched->sched_rq) goto Out_free; sched->num_rqs = num_rqs; - for (i = DRM_SCHED_PRIORITY_MIN; i < sched->num_rqs; i++) { + for (i = DRM_SCHED_PRIORITY_LOW; i < sched->num_rqs; i++) { sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL); if (!sched->sched_rq[i]) goto Out_unroll; @@ -1312,7 +1312,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, sched->ready = true; return 0; Out_unroll: - for (--i ; i >= DRM_SCHED_PRIORITY_MIN; i--) + for (--i ; i >= DRM_SCHED_PRIORITY_LOW; i--) kfree(sched->sched_rq[i]); Out_free: kfree(sched->sched_rq); @@ -1338,7 +1338,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) drm_sched_wqueue_stop(sched); - for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) { struct drm_sched_rq *rq = sched->sched_rq[i]; spin_lock(&rq->lock); @@ -1390,7 +1390,7 @@ void drm_sched_increase_karma(struct drm_sched_job *bad) if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { atomic_inc(&bad->karma); - for (i = DRM_SCHED_PRIORITY_MIN; + for (i = DRM_SCHED_PRIORITY_LOW; i < min_t(typeof(sched->num_rqs), sched->num_rqs, DRM_SCHED_PRIORITY_KERNEL); i++) { struct drm_sched_rq *rq = sched->sched_rq[i]; diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 9a50348bd5c0..d8e2d84d9223 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -63,7 +63,7 @@ struct drm_file; * to an array, and as such should start at 0. */ enum drm_sched_priority { - DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_LOW, DRM_SCHED_PRIORITY_NORMAL, DRM_SCHED_PRIORITY_HIGH, DRM_SCHED_PRIORITY_KERNEL, -- cgit v1.2.3 From 38f922a563aac3148ac73e73689805917f034cb5 Mon Sep 17 00:00:00 2001 From: Luben Tuikov Date: Wed, 22 Nov 2023 22:08:48 -0500 Subject: drm/sched: Reverse run-queue priority enumeration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverse run-queue priority enumeration such that the higest priority is now 0, and for each consecutive integer the prioirty diminishes. Run-queues correspond to priorities. To an external observer a scheduler created with a single run-queue, and another created with DRM_SCHED_PRIORITY_COUNT number of run-queues, should always schedule sched->sched_rq[0] with the same "priority", as that index run-queue exists in both schedulers, i.e. a scheduler with one run-queue or many. This patch makes it so. In other words, the "priority" of sched->sched_rq[n], n >= 0, is the same for any scheduler created with any allowable number of run-queues (priorities), 0 to DRM_SCHED_PRIORITY_COUNT. Cc: Rob Clark Cc: Abhinav Kumar Cc: Dmitry Baryshkov Cc: Danilo Krummrich Cc: Alex Deucher Cc: Christian König Cc: linux-arm-msm@vger.kernel.org Cc: freedreno@lists.freedesktop.org Cc: dri-devel@lists.freedesktop.org Signed-off-by: Luben Tuikov Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20231124052752.6915-6-ltuikov89@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +- drivers/gpu/drm/msm/msm_gpu.h | 2 +- drivers/gpu/drm/scheduler/sched_entity.c | 5 +++-- drivers/gpu/drm/scheduler/sched_main.c | 15 +++++++-------- include/drm/gpu_scheduler.h | 6 +++--- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 1a25931607c5..71a5cf37b472 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) int i; /* Signal all jobs not yet scheduled */ - for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) { + for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { struct drm_sched_rq *rq = sched->sched_rq[i]; spin_lock(&rq->lock); list_for_each_entry(s_entity, &rq->entities, list) { diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index eb0c97433e5f..2bfcb222e353 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -347,7 +347,7 @@ struct msm_gpu_perfcntr { * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some * cases, so we don't use it (no need for kernel generated jobs). */ -#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_LOW) +#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH) /** * struct msm_file_private - per-drm_file context diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index dd2b8f777f51..3c4f5a392b06 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -82,13 +82,14 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, pr_warn("%s: called with uninitialized scheduler\n", __func__); } else if (num_sched_list) { /* The "priority" of an entity cannot exceed the number of run-queues of a - * scheduler. Protect against num_rqs being 0, by converting to signed. + * scheduler. Protect against num_rqs being 0, by converting to signed. Choose + * the lowest priority available. */ if (entity->priority >= sched_list[0]->num_rqs) { drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n", entity->priority, sched_list[0]->num_rqs); entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1, - (s32) DRM_SCHED_PRIORITY_LOW); + (s32) DRM_SCHED_PRIORITY_KERNEL); } entity->rq = sched_list[0]->sched_rq[entity->priority]; } diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index b6d7bc49ff6e..682aebe96db7 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -1051,8 +1051,9 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) struct drm_sched_entity *entity; int i; - /* Kernel run queue has higher priority than normal run queue*/ - for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) { + /* Start with the highest priority. + */ + for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ? drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) : drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]); @@ -1291,7 +1292,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, if (!sched->sched_rq) goto Out_free; sched->num_rqs = num_rqs; - for (i = DRM_SCHED_PRIORITY_LOW; i < sched->num_rqs; i++) { + for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL); if (!sched->sched_rq[i]) goto Out_unroll; @@ -1312,7 +1313,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, sched->ready = true; return 0; Out_unroll: - for (--i ; i >= DRM_SCHED_PRIORITY_LOW; i--) + for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--) kfree(sched->sched_rq[i]); Out_free: kfree(sched->sched_rq); @@ -1338,7 +1339,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) drm_sched_wqueue_stop(sched); - for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) { + for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { struct drm_sched_rq *rq = sched->sched_rq[i]; spin_lock(&rq->lock); @@ -1390,9 +1391,7 @@ void drm_sched_increase_karma(struct drm_sched_job *bad) if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { atomic_inc(&bad->karma); - for (i = DRM_SCHED_PRIORITY_LOW; - i < min_t(typeof(sched->num_rqs), sched->num_rqs, DRM_SCHED_PRIORITY_KERNEL); - i++) { + for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) { struct drm_sched_rq *rq = sched->sched_rq[i]; spin_lock(&rq->lock); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index d8e2d84d9223..5acc64954a88 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -63,10 +63,10 @@ struct drm_file; * to an array, and as such should start at 0. */ enum drm_sched_priority { - DRM_SCHED_PRIORITY_LOW, - DRM_SCHED_PRIORITY_NORMAL, - DRM_SCHED_PRIORITY_HIGH, DRM_SCHED_PRIORITY_KERNEL, + DRM_SCHED_PRIORITY_HIGH, + DRM_SCHED_PRIORITY_NORMAL, + DRM_SCHED_PRIORITY_LOW, DRM_SCHED_PRIORITY_COUNT }; -- cgit v1.2.3 From b0a7ce53d494c94dfacb5a877fc0668f2a688652 Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Sat, 11 Nov 2023 08:08:56 -0500 Subject: drm/ttm: Schedule delayed_delete worker closer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Try to allocate system memory on the NUMA node the device is closest to and try to run delayed_delete workers on a CPU of this node as well. To optimize the memory clearing operation when a TTM BO gets freed by the delayed_delete worker, scheduling it closer to a NUMA node where the memory was initially allocated helps avoid the cases where the worker gets randomly scheduled on the CPU cores that are across interconnect boundaries such as xGMI, PCIe etc. This change helps USWC GTT allocations on NUMA systems (dGPU) and AMD APU platforms such as GFXIP9.4.3. Signed-off-by: Rajneesh Bhardwaj Acked-by: Felix Kuehling Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20231111130856.1168304-1-rajneesh.bhardwaj@amd.com Signed-off-by: Christian König --- drivers/gpu/drm/ttm/ttm_bo.c | 8 +++++++- drivers/gpu/drm/ttm/ttm_device.c | 6 ++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index e58b7e249816..edf10618fe2b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -370,7 +370,13 @@ static void ttm_bo_release(struct kref *kref) spin_unlock(&bo->bdev->lru_lock); INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete); - queue_work(bdev->wq, &bo->delayed_delete); + + /* Schedule the worker on the closest NUMA node. This + * improves performance since system memory might be + * cleared on free and that is best done on a CPU core + * close to it. + */ + queue_work_node(bdev->pool.nid, bdev->wq, &bo->delayed_delete); return; } diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index d48b39132b32..f5187b384ae9 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -204,7 +204,8 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func if (ret) return ret; - bdev->wq = alloc_workqueue("ttm", WQ_MEM_RECLAIM | WQ_HIGHPRI, 16); + bdev->wq = alloc_workqueue("ttm", + WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 16); if (!bdev->wq) { ttm_global_release(); return -ENOMEM; @@ -213,7 +214,8 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func bdev->funcs = funcs; ttm_sys_man_init(bdev); - ttm_pool_init(&bdev->pool, dev, NUMA_NO_NODE, use_dma_alloc, use_dma32); + + ttm_pool_init(&bdev->pool, dev, dev_to_node(dev), use_dma_alloc, use_dma32); bdev->vma_manager = vma_manager; spin_lock_init(&bdev->lru_lock); -- cgit v1.2.3 From 19b4c60ce8660a0e3a2cebd3e4dc0691928d015d Mon Sep 17 00:00:00 2001 From: Luben Tuikov Date: Sat, 25 Nov 2023 14:06:08 -0500 Subject: drm/sched: Fix compilation issues with DRM priority rename MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix compilation issues with DRM scheduler priority rename MIN to LOW. Signed-off-by: Luben Tuikov Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202311252109.WgbJsSkG-lkp@intel.com/ Cc: Danilo Krummrich Cc: Frank Binns Cc: Donald Robson Cc: Matt Coster Cc: Direct Rendering Infrastructure - Development Fixes: fe375c74806dbd ("drm/sched: Rename priority MIN to LOW") Fixes: 38f922a563aac3 ("drm/sched: Reverse run-queue priority enumeration") Fixes: 5f03a507b29e44 ("drm/nouveau: implement 1:1 scheduler - entity relationship") Link: https://patchwork.freedesktop.org/patch/msgid/20231125192246.87268-2-ltuikov89@gmail.com Reviewed-by: Christian König Link: https://lore.kernel.org/r/7429262c-6dea-4dcc-bf7e-54d2277dabf1@amd.com --- drivers/gpu/drm/imagination/pvr_queue.c | 2 +- drivers/gpu/drm/nouveau/nouveau_sched.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c index d65c3fbedf5a..5ed9c98fb599 100644 --- a/drivers/gpu/drm/imagination/pvr_queue.c +++ b/drivers/gpu/drm/imagination/pvr_queue.c @@ -1292,7 +1292,7 @@ struct pvr_queue *pvr_queue_create(struct pvr_context *ctx, goto err_release_ufo; err = drm_sched_entity_init(&queue->entity, - DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_KERNEL, &sched, 1, &ctx->faulty); if (err) goto err_sched_fini; diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index 3393647bd944..dd98f6910f9c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -18,7 +18,7 @@ * index to the run-queue array. */ enum nouveau_sched_priority { - NOUVEAU_SCHED_PRIORITY_SINGLE = DRM_SCHED_PRIORITY_MIN, + NOUVEAU_SCHED_PRIORITY_SINGLE = DRM_SCHED_PRIORITY_KERNEL, NOUVEAU_SCHED_PRIORITY_COUNT, }; @@ -423,7 +423,7 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm, if (ret) goto fail_wq; - /* Using DRM_SCHED_PRIORITY_MIN, since that's what we're required to use + /* Using DRM_SCHED_PRIORITY_KERNEL, since that's what we're required to use * when we want to have a single run-queue only. * * It's not documented, but one will find out when trying to use any @@ -433,7 +433,7 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm, * Can't use NOUVEAU_SCHED_PRIORITY_SINGLE either, because it's not * matching the enum type used in drm_sched_entity_init(). */ - ret = drm_sched_entity_init(entity, DRM_SCHED_PRIORITY_MIN, + ret = drm_sched_entity_init(entity, DRM_SCHED_PRIORITY_KERNEL, &drm_sched, 1, NULL); if (ret) goto fail_sched; -- cgit v1.2.3 From e17049148678725248a57ecbf9c21df0fde3b434 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 20 Oct 2023 07:52:13 -0500 Subject: drm: Use device_get_match_data() Use preferred device_get_match_data() instead of of_match_device() to get the driver match data in a single step. With this, adjust the includes to explicitly include the correct headers. That also serves as preparation to remove implicit includes within the DT headers (of_device.h in particular). Reviewed-by: Tomi Valkeinen Reviewed-by: Andrew Jeffery Link: https://lore.kernel.org/r/20231020125214.2930329-1-robh@kernel.org Signed-off-by: Rob Herring --- drivers/gpu/drm/armada/armada_crtc.c | 24 +++++++----------------- drivers/gpu/drm/aspeed/aspeed_gfx_drv.c | 10 ++++------ drivers/gpu/drm/exynos/exynos_drm_gsc.c | 9 +++++---- drivers/gpu/drm/imx/ipuv3/imx-ldb.c | 9 ++++----- drivers/gpu/drm/mxsfb/mxsfb_drv.c | 10 +++------- drivers/gpu/drm/omapdrm/dss/dispc.c | 4 ++-- drivers/gpu/drm/omapdrm/dss/dss.c | 5 +++-- 7 files changed, 28 insertions(+), 43 deletions(-) diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 52d2c942d3d2..c78687c755a8 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -7,8 +7,9 @@ #include #include #include -#include +#include #include +#include #include #include @@ -1012,26 +1013,17 @@ armada_lcd_bind(struct device *dev, struct device *master, void *data) int irq = platform_get_irq(pdev, 0); const struct armada_variant *variant; struct device_node *port = NULL; + struct device_node *np, *parent = dev->of_node; if (irq < 0) return irq; - if (!dev->of_node) { - const struct platform_device_id *id; - id = platform_get_device_id(pdev); - if (!id) - return -ENXIO; - - variant = (const struct armada_variant *)id->driver_data; - } else { - const struct of_device_id *match; - struct device_node *np, *parent = dev->of_node; - - match = of_match_device(dev->driver->of_match_table, dev); - if (!match) - return -ENXIO; + variant = device_get_match_data(dev); + if (!variant) + return -ENXIO; + if (parent) { np = of_get_child_by_name(parent, "ports"); if (np) parent = np; @@ -1041,8 +1033,6 @@ armada_lcd_bind(struct device *dev, struct device *master, void *data) dev_err(dev, "no port node found in %pOF\n", parent); return -ENXIO; } - - variant = match->data; } return armada_drm_crtc_create(drm, dev, res, irq, variant, port); diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index 78122b35a0cb..a7a6b70220eb 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -6,10 +6,10 @@ #include #include #include -#include -#include +#include #include #include +#include #include #include @@ -143,7 +143,6 @@ static int aspeed_gfx_load(struct drm_device *drm) struct aspeed_gfx *priv = to_aspeed_gfx(drm); struct device_node *np = pdev->dev.of_node; const struct aspeed_gfx_config *config; - const struct of_device_id *match; struct resource *res; int ret; @@ -152,10 +151,9 @@ static int aspeed_gfx_load(struct drm_device *drm) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); - match = of_match_device(aspeed_gfx_match, &pdev->dev); - if (!match) + config = device_get_match_data(&pdev->dev); + if (!config) return -EINVAL; - config = match->data; priv->dac_reg = config->dac_reg; priv->int_clr_reg = config->int_clear_reg; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 34cdabc30b4f..35771fb4e85d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -11,9 +11,10 @@ #include #include #include -#include +#include #include #include +#include #include #include @@ -103,7 +104,7 @@ struct gsc_context { unsigned int num_formats; void __iomem *regs; - const char **clk_names; + const char *const *clk_names; struct clk *clocks[GSC_MAX_CLOCKS]; int num_clocks; struct gsc_scaler sc; @@ -1217,7 +1218,7 @@ static const unsigned int gsc_tiled_formats[] = { static int gsc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct gsc_driverdata *driver_data; + const struct gsc_driverdata *driver_data; struct exynos_drm_ipp_formats *formats; struct gsc_context *ctx; int num_formats, ret, i, j; @@ -1226,7 +1227,7 @@ static int gsc_probe(struct platform_device *pdev) if (!ctx) return -ENOMEM; - driver_data = (struct gsc_driverdata *)of_device_get_match_data(dev); + driver_data = device_get_match_data(dev); ctx->dev = dev; ctx->num_clocks = driver_data->num_clocks; ctx->clk_names = driver_data->clk_names; diff --git a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c index 989eca32d325..53840ab054c7 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c @@ -12,8 +12,10 @@ #include #include #include -#include +#include #include +#include +#include #include #include @@ -617,7 +619,6 @@ static int imx_ldb_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - const struct of_device_id *of_id = of_match_device(imx_ldb_dt_ids, dev); struct device_node *child; struct imx_ldb *imx_ldb; int dual; @@ -638,9 +639,7 @@ static int imx_ldb_probe(struct platform_device *pdev) regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0); imx_ldb->dev = dev; - - if (of_id) - imx_ldb->lvds_mux = of_id->data; + imx_ldb->lvds_mux = device_get_match_data(dev); dual = of_property_read_bool(np, "fsl,dual-channel"); if (dual) diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 625c1bfc4173..b483ef48216a 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -11,9 +11,10 @@ #include #include #include +#include #include -#include #include +#include #include #include @@ -346,18 +347,13 @@ MODULE_DEVICE_TABLE(of, mxsfb_dt_ids); static int mxsfb_probe(struct platform_device *pdev) { struct drm_device *drm; - const struct of_device_id *of_id = - of_match_device(mxsfb_dt_ids, &pdev->dev); int ret; - if (!pdev->dev.of_node) - return -ENODEV; - drm = drm_dev_alloc(&mxsfb_driver, &pdev->dev); if (IS_ERR(drm)) return PTR_ERR(drm); - ret = mxsfb_load(drm, of_id->data); + ret = mxsfb_load(drm, device_get_match_data(&pdev->dev)); if (ret) goto err_free; diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index c26aab4939fa..993691b3cc7e 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -22,11 +22,11 @@ #include #include #include +#include #include #include #include #include -#include #include #include #include @@ -4765,7 +4765,7 @@ static int dispc_bind(struct device *dev, struct device *master, void *data) if (soc) dispc->feat = soc->data; else - dispc->feat = of_match_device(dispc_of_match, &pdev->dev)->data; + dispc->feat = device_get_match_data(&pdev->dev); r = dispc_errata_i734_wa_init(dispc); if (r) diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 02955f976845..988888e164d7 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -22,12 +22,13 @@ #include #include #include +#include #include #include #include #include #include -#include +#include #include #include #include @@ -1445,7 +1446,7 @@ static int dss_probe(struct platform_device *pdev) if (soc) dss->feat = soc->data; else - dss->feat = of_match_device(dss_of_match, &pdev->dev)->data; + dss->feat = device_get_match_data(&pdev->dev); /* Map I/O registers, get and setup clocks. */ dss->base = devm_platform_ioremap_resource(pdev, 0); -- cgit v1.2.3 From 737077b873e32254959bc6f8c3e63cc67ba1f44c Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 24 Nov 2023 16:39:17 +0000 Subject: drm/imagination: Fix a couple of spelling mistakes in literal strings There are a couple of spelling mistakes in literal strings in the stid_fmts array. Fix these. Signed-off-by: Colin Ian King Reviewed-by: Frank Binns Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20231124163917.300685-1-colin.i.king@gmail.com --- drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h index 571954182f33..56e11009e123 100644 --- a/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h +++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h @@ -497,7 +497,7 @@ static const struct rogue_km_stid_fmt stid_fmts[] = { { ROGUE_FW_LOG_CREATESFID(213, ROGUE_FW_GROUP_MAIN, 1), "Safety Watchdog threshold period set to 0x%x clock cycles" }, { ROGUE_FW_LOG_CREATESFID(214, ROGUE_FW_GROUP_MAIN, 0), - "MTS Safety Event trigged by the safety watchdog." }, + "MTS Safety Event triggered by the safety watchdog." }, { ROGUE_FW_LOG_CREATESFID(215, ROGUE_FW_GROUP_MAIN, 3), "DM%d USC tasks range limit 0 - %d, stride %d" }, { ROGUE_FW_LOG_CREATESFID(216, ROGUE_FW_GROUP_MAIN, 1), @@ -1114,7 +1114,7 @@ static const struct rogue_km_stid_fmt stid_fmts[] = { { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_SPM, 2), "3DMemFree matches freelist 0x%08x (FL type = %u)" }, { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_SPM, 0), - "Raise the 3DMemFreeDedected flag" }, + "Raise the 3DMemFreeDetected flag" }, { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_SPM, 1), "Wait for pending grow on Freelist 0x%08x" }, { ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_SPM, 1), -- cgit v1.2.3 From 3519d77293fb74786a45811fa6b600db26c1b0be Mon Sep 17 00:00:00 2001 From: Yang Li Date: Mon, 27 Nov 2023 09:04:30 +0800 Subject: drm/imagination: Remove unneeded semicolon ./drivers/gpu/drm/imagination/pvr_free_list.c:258:2-3: Unneeded semicolon Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=7635 Signed-off-by: Yang Li Reviewed-by: Frank Binns Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20231127010430.119666-1-yang.lee@linux.alibaba.com --- drivers/gpu/drm/imagination/pvr_free_list.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/imagination/pvr_free_list.c b/drivers/gpu/drm/imagination/pvr_free_list.c index c61fd417edcb..5e51bc980751 100644 --- a/drivers/gpu/drm/imagination/pvr_free_list.c +++ b/drivers/gpu/drm/imagination/pvr_free_list.c @@ -255,7 +255,7 @@ pvr_free_list_insert_pages_locked(struct pvr_free_list *free_list, if (!num_pages) break; - }; + } /* clang-format on */ /* Make sure our free_list update is flushed. */ -- cgit v1.2.3 From 4aa89e8644d3b8879191911edea0b6a63ea9d6e2 Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Sat, 25 Nov 2023 00:36:36 +0100 Subject: drm/imagination: vm: prevent duplicate drm_gpuvm_bo instances Use drm_gpuvm_bo_obtain() instead of drm_gpuvm_bo_create(). The latter should only be used in conjunction with drm_gpuvm_bo_obtain_prealloc(). drm_gpuvm_bo_obtain() re-uses existing instances of a given VM and BO combination, whereas drm_gpuvm_bo_create() would always create a new instance of struct drm_gpuvm_bo and hence leave us with duplicates. Fixes: ff5f643de0bf ("drm/imagination: Add GEM and VM related code") Signed-off-by: Danilo Krummrich Reviewed-by: Donald Robson Tested-by: Frank Binns Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20231124233650.152653-2-dakr@redhat.com --- drivers/gpu/drm/imagination/pvr_vm.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 3ad1366294b9..09d481c575b0 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -224,6 +224,7 @@ pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op, struct pvr_gem_object *pvr_obj, u64 offset, u64 device_addr, u64 size) { + struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj); const bool is_user = vm_ctx == vm_ctx->pvr_dev->kernel_vm_ctx; const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj); struct sg_table *sgt; @@ -245,10 +246,11 @@ pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op, bind_op->type = PVR_VM_BIND_TYPE_MAP; - bind_op->gpuvm_bo = drm_gpuvm_bo_create(&vm_ctx->gpuvm_mgr, - gem_from_pvr_gem(pvr_obj)); - if (!bind_op->gpuvm_bo) - return -ENOMEM; + dma_resv_lock(obj->resv, NULL); + bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj); + dma_resv_unlock(obj->resv); + if (IS_ERR(bind_op->gpuvm_bo)) + return PTR_ERR(bind_op->gpuvm_bo); bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL); bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL); -- cgit v1.2.3 From 4550d66d08b2257a1b2d3ce339d68ca33177f4b9 Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Sat, 25 Nov 2023 00:36:37 +0100 Subject: drm/imagination: vm: check for drm_gpuvm_range_valid() Extend pvr_device_addr_and_size_are_valid() by the corresponding GPUVM sanity checks. This includes a, previously missing, overflow check for the base address and size of the requested mapping. Fixes: ff5f643de0bf ("drm/imagination: Add GEM and VM related code") Signed-off-by: Danilo Krummrich Reviewed-by: Donald Robson Tested-by: Frank Binns Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20231124233650.152653-3-dakr@redhat.com --- drivers/gpu/drm/imagination/pvr_vm.c | 9 ++++++--- drivers/gpu/drm/imagination/pvr_vm.h | 3 ++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 09d481c575b0..1e89092c3dcc 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -239,7 +239,7 @@ pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op, return -EINVAL; } - if (!pvr_device_addr_and_size_are_valid(device_addr, size) || + if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size) || offset & ~PAGE_MASK || size & ~PAGE_MASK || offset >= pvr_obj_size || offset_plus_size > pvr_obj_size) return -EINVAL; @@ -295,7 +295,7 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op, { int err; - if (!pvr_device_addr_and_size_are_valid(device_addr, size)) + if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size)) return -EINVAL; bind_op->type = PVR_VM_BIND_TYPE_UNMAP; @@ -505,6 +505,7 @@ pvr_device_addr_is_valid(u64 device_addr) /** * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual * address and associated size are both valid. + * @vm_ctx: Target VM context. * @device_addr: Virtual device address to test. * @size: Size of the range based at @device_addr to test. * @@ -523,9 +524,11 @@ pvr_device_addr_is_valid(u64 device_addr) * * %false otherwise. */ bool -pvr_device_addr_and_size_are_valid(u64 device_addr, u64 size) +pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx, + u64 device_addr, u64 size) { return pvr_device_addr_is_valid(device_addr) && + drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) && size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 && (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE); } diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h index cf8b97553dc8..f2a6463f2b05 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.h +++ b/drivers/gpu/drm/imagination/pvr_vm.h @@ -29,7 +29,8 @@ struct drm_exec; /* Functions defined in pvr_vm.c */ bool pvr_device_addr_is_valid(u64 device_addr); -bool pvr_device_addr_and_size_are_valid(u64 device_addr, u64 size); +bool pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx, + u64 device_addr, u64 size); struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context); -- cgit v1.2.3 From 0d3abd456be45369235dd75793ce26f07900044c Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Sat, 25 Nov 2023 00:36:38 +0100 Subject: drm/imagination: vm: fix drm_gpuvm reference count The driver specific reference count indicates whether the VM should be teared down, whereas GPUVM's reference count indicates whether the VM structure can finally be freed. Hence, free the VM structure in pvr_gpuvm_free() and drop the last GPUVM reference after tearing down the VM. Generally, this prevents lifetime issues such as the VM being freed as long as drm_gpuvm_bo structures still hold references to the VM. Fixes: ff5f643de0bf ("drm/imagination: Add GEM and VM related code") Signed-off-by: Danilo Krummrich Reviewed-by: Donald Robson Tested-by: Frank Binns Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20231124233650.152653-4-dakr@redhat.com --- drivers/gpu/drm/imagination/pvr_vm.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 1e89092c3dcc..e0d74d9a6190 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -64,6 +64,12 @@ struct pvr_vm_context { struct drm_gem_object dummy_gem; }; +static inline +struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm) +{ + return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr); +} + struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx) { if (vm_ctx) @@ -535,7 +541,7 @@ pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx, void pvr_gpuvm_free(struct drm_gpuvm *gpuvm) { - + kfree(to_pvr_vm_context(gpuvm)); } static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = { @@ -655,12 +661,11 @@ pvr_vm_context_release(struct kref *ref_count) WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, vm_ctx->gpuvm_mgr.mm_range)); - drm_gpuvm_put(&vm_ctx->gpuvm_mgr); pvr_mmu_context_destroy(vm_ctx->mmu_ctx); drm_gem_private_object_fini(&vm_ctx->dummy_gem); mutex_destroy(&vm_ctx->lock); - kfree(vm_ctx); + drm_gpuvm_put(&vm_ctx->gpuvm_mgr); } /** -- cgit v1.2.3 From c350a08ac7ec933f1dc8a143ebab60164ed4d90b Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Thu, 16 Nov 2023 10:59:20 +0100 Subject: drm/ast: Turn ioregs_lock to modeset_lock The lock for the I/O registers is only relevant during mode-setting operations. It protects the registers from concurrent access from reading EDID information. Reduce lock coverage to mode setting, rename the lock and move it entirely into the mode-setting code. No functional changes, as the I/O lock was never used for anything else than mode setting. Signed-off-by: Thomas Zimmermann Reviewed-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20231116100240.22975-2-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_drv.h | 3 ++- drivers/gpu/drm/ast/ast_main.c | 4 ---- drivers/gpu/drm/ast/ast_mode.c | 26 +++++++++++++++----------- 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 772f3b049c16..2ec29480939d 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -192,7 +192,6 @@ to_ast_bmc_connector(struct drm_connector *connector) struct ast_device { struct drm_device base; - struct mutex ioregs_lock; /* Protects access to I/O registers in ioregs */ void __iomem *regs; void __iomem *ioregs; void __iomem *dp501_fw_buf; @@ -207,6 +206,8 @@ struct ast_device { unsigned long vram_size; unsigned long vram_fb_available; + struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */ + struct ast_plane primary_plane; struct ast_plane cursor_plane; struct drm_crtc crtc; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index f4ab40e22cea..445cf47871a4 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -440,10 +440,6 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, pci_set_drvdata(pdev, dev); - ret = drmm_mutex_init(dev, &ast->ioregs_lock); - if (ret) - return ERR_PTR(ret); - ast->regs = pcim_iomap(pdev, 1, 0); if (!ast->regs) return ERR_PTR(-EIO); diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index c20534d0ef7c..a718646a66b8 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -1358,13 +1358,13 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector) * Protect access to I/O registers from concurrent modesetting * by acquiring the I/O-register lock. */ - mutex_lock(&ast->ioregs_lock); + mutex_lock(&ast->modeset_lock); edid = drm_get_edid(connector, &ast_vga_connector->i2c->adapter); if (!edid) goto err_mutex_unlock; - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); count = drm_add_edid_modes(connector, edid); kfree(edid); @@ -1372,7 +1372,7 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector) return count; err_mutex_unlock: - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); err_drm_connector_update_edid_property: drm_connector_update_edid_property(connector, NULL); return 0; @@ -1464,13 +1464,13 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector * Protect access to I/O registers from concurrent modesetting * by acquiring the I/O-register lock. */ - mutex_lock(&ast->ioregs_lock); + mutex_lock(&ast->modeset_lock); edid = drm_get_edid(connector, &ast_sil164_connector->i2c->adapter); if (!edid) goto err_mutex_unlock; - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); count = drm_add_edid_modes(connector, edid); kfree(edid); @@ -1478,7 +1478,7 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector return count; err_mutex_unlock: - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); err_drm_connector_update_edid_property: drm_connector_update_edid_property(connector, NULL); return 0; @@ -1670,13 +1670,13 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector) * Protect access to I/O registers from concurrent modesetting * by acquiring the I/O-register lock. */ - mutex_lock(&ast->ioregs_lock); + mutex_lock(&ast->modeset_lock); succ = ast_astdp_read_edid(connector->dev, edid); if (succ < 0) goto err_mutex_unlock; - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); drm_connector_update_edid_property(connector, edid); count = drm_add_edid_modes(connector, edid); @@ -1685,7 +1685,7 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector) return count; err_mutex_unlock: - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); kfree(edid); err_drm_connector_update_edid_property: drm_connector_update_edid_property(connector, NULL); @@ -1870,9 +1870,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s * display modes. Protect access to I/O registers by acquiring * the I/O-register lock. Released in atomic_flush(). */ - mutex_lock(&ast->ioregs_lock); + mutex_lock(&ast->modeset_lock); drm_atomic_helper_commit_tail_rpm(state); - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); } static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs = { @@ -1910,6 +1910,10 @@ int ast_mode_config_init(struct ast_device *ast) struct drm_connector *physical_connector = NULL; int ret; + ret = drmm_mutex_init(dev, &ast->modeset_lock); + if (ret) + return ret; + ret = drmm_mode_config_init(dev); if (ret) return ret; -- cgit v1.2.3 From 0ccaa3dde97bd30ae615c66fc20080e920ec9b4e Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Thu, 16 Nov 2023 10:59:21 +0100 Subject: drm/ast: Rework I/O register setup There are three different ways of retrieving the I/O-memory ranges for AST devices: either from PCI BAR 1, from PCI BAR 2 or from PCI BAR 1 by 'guessing'. Make the respective code more readable by making each case self- contained. Also add error checking against the length of the PCI BARs. v2: * fix I/O range length to 128 bytes * fix length test for PCI BAR 2 Signed-off-by: Thomas Zimmermann Reviewed-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20231116100240.22975-3-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_main.c | 40 ++++++++++++++++++++++++++++++---------- drivers/gpu/drm/ast/ast_reg.h | 1 + 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 445cf47871a4..70e1871dbaf9 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -444,22 +444,42 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, if (!ast->regs) return ERR_PTR(-EIO); - /* - * After AST2500, MMIO is enabled by default, and it should be adopted - * to be compatible with Arm. - */ if (pdev->revision >= 0x40) { + /* + * On AST2500 and later models, MMIO is enabled by + * default. Adopt it to be compatible with ARM. + */ + resource_size_t len = pci_resource_len(pdev, 1); + + if (len < AST_IO_MM_OFFSET) + return ERR_PTR(-EIO); + if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH) + return ERR_PTR(-EIO); ast->ioregs = ast->regs + AST_IO_MM_OFFSET; - } else if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) { - drm_info(dev, "platform has no IO space, trying MMIO\n"); - ast->ioregs = ast->regs + AST_IO_MM_OFFSET; - } + } else if (pci_resource_flags(pdev, 2) & IORESOURCE_IO) { + /* + * Map I/O registers if we have a PCI BAR for I/O. + */ + resource_size_t len = pci_resource_len(pdev, 2); - /* "map" IO regs if the above hasn't done so already */ - if (!ast->ioregs) { + if (len < AST_IO_MM_LENGTH) + return -EIO; ast->ioregs = pcim_iomap(pdev, 2, 0); if (!ast->ioregs) return ERR_PTR(-EIO); + } else { + /* + * Anything else is best effort. + */ + resource_size_t len = pci_resource_len(pdev, 1); + + if (len < AST_IO_MM_OFFSET) + return ERR_PTR(-EIO); + if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH) + return ERR_PTR(-EIO); + ast->ioregs = ast->regs + AST_IO_MM_OFFSET; + + drm_info(dev, "Platform has no I/O space, using MMIO\n"); } if (!ast_is_vga_enabled(dev)) { diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h index 555286ecf520..05bab94a9a90 100644 --- a/drivers/gpu/drm/ast/ast_reg.h +++ b/drivers/gpu/drm/ast/ast_reg.h @@ -10,6 +10,7 @@ */ #define AST_IO_MM_OFFSET (0x380) +#define AST_IO_MM_LENGTH (128) #define AST_IO_VGAARI_W (0x40) #define AST_IO_VGAMR_W (0x42) -- cgit v1.2.3 From b45efcfc94e8043d08344094a305bb4b8030c7df Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Thu, 16 Nov 2023 10:59:22 +0100 Subject: drm/ast: Retrieve I/O-memory ranges without ast device Read the I/O-memory ranges into local variables before setting them in the ast device instanace. We'll later need this to split detecting the device type from the creation of the ast device instance. Signed-off-by: Thomas Zimmermann Reviewed-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20231116100240.22975-4-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_main.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 70e1871dbaf9..026ef893dd50 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -432,6 +432,8 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, struct ast_device *ast; bool need_post = false; int ret = 0; + void __iomem *regs; + void __iomem *ioregs; ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); if (IS_ERR(ast)) @@ -440,8 +442,8 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, pci_set_drvdata(pdev, dev); - ast->regs = pcim_iomap(pdev, 1, 0); - if (!ast->regs) + regs = pcim_iomap(pdev, 1, 0); + if (!regs) return ERR_PTR(-EIO); if (pdev->revision >= 0x40) { @@ -455,7 +457,7 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, return ERR_PTR(-EIO); if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH) return ERR_PTR(-EIO); - ast->ioregs = ast->regs + AST_IO_MM_OFFSET; + ioregs = regs + AST_IO_MM_OFFSET; } else if (pci_resource_flags(pdev, 2) & IORESOURCE_IO) { /* * Map I/O registers if we have a PCI BAR for I/O. @@ -464,8 +466,8 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, if (len < AST_IO_MM_LENGTH) return -EIO; - ast->ioregs = pcim_iomap(pdev, 2, 0); - if (!ast->ioregs) + ioregs = pcim_iomap(pdev, 2, 0); + if (!ioregs) return ERR_PTR(-EIO); } else { /* @@ -477,11 +479,14 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, return ERR_PTR(-EIO); if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH) return ERR_PTR(-EIO); - ast->ioregs = ast->regs + AST_IO_MM_OFFSET; + ioregs = regs + AST_IO_MM_OFFSET; drm_info(dev, "Platform has no I/O space, using MMIO\n"); } + ast->regs = regs; + ast->ioregs = ioregs; + if (!ast_is_vga_enabled(dev)) { drm_info(dev, "VGA not enabled on entry, requesting chip POST\n"); need_post = true; -- cgit v1.2.3 From cdac0cd459cf282ccdc4f28f838a2375e5cf61f7 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Thu, 16 Nov 2023 10:59:23 +0100 Subject: drm/ast: Add I/O helpers without ast device Implement I/O access in helpers that do not use an ast device instance, but the raw pointer to the I/O memory. We'll later need these helpers to detect the device type before allocating the ast device instance. v3: * fix typo in commit message (Sui) Signed-off-by: Thomas Zimmermann Reviewed-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20231116100240.22975-5-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_drv.h | 73 +++++++++++++++++++++++++++++++++---------- 1 file changed, 56 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 2ec29480939d..71f0e23e08cd 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -273,55 +273,94 @@ static inline bool __ast_gen_is_eq(struct ast_device *ast, unsigned long gen) #define IS_AST_GEN6(__ast) __ast_gen_is_eq(__ast, 6) #define IS_AST_GEN7(__ast) __ast_gen_is_eq(__ast, 7) +static inline u8 __ast_read8(const void __iomem *addr, u32 reg) +{ + return ioread8(addr + reg); +} + +static inline u32 __ast_read32(const void __iomem *addr, u32 reg) +{ + return ioread32(addr + reg); +} + +static inline void __ast_write8(void __iomem *addr, u32 reg, u8 val) +{ + iowrite8(val, addr + reg); +} + +static inline void __ast_write32(void __iomem *addr, u32 reg, u32 val) +{ + iowrite32(val, addr + reg); +} + +static inline u8 __ast_read8_i(void __iomem *addr, u32 reg, u8 index) +{ + __ast_write8(addr, reg, index); + return __ast_read8(addr, reg + 1); +} + +static inline u8 __ast_read8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask) +{ + u8 val = __ast_read8_i(addr, reg, index); + + return val & read_mask; +} + +static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val) +{ + __ast_write8(addr, reg, index); + __ast_write8(addr, reg + 1, val); +} + +static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask, + u8 val) +{ + u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask); + + tmp |= val; + __ast_write8_i(addr, reg, index, tmp); +} + static inline u32 ast_read32(struct ast_device *ast, u32 reg) { - return ioread32(ast->regs + reg); + return __ast_read32(ast->regs, reg); } static inline void ast_write32(struct ast_device *ast, u32 reg, u32 val) { - iowrite32(val, ast->regs + reg); + __ast_write32(ast->regs, reg, val); } static inline u8 ast_io_read8(struct ast_device *ast, u32 reg) { - return ioread8(ast->ioregs + reg); + return __ast_read8(ast->ioregs, reg); } static inline void ast_io_write8(struct ast_device *ast, u32 reg, u8 val) { - iowrite8(val, ast->ioregs + reg); + __ast_write8(ast->ioregs, reg, val); } static inline u8 ast_get_index_reg(struct ast_device *ast, u32 base, u8 index) { - ast_io_write8(ast, base, index); - ++base; - return ast_io_read8(ast, base); + return __ast_read8_i(ast->ioregs, base, index); } static inline u8 ast_get_index_reg_mask(struct ast_device *ast, u32 base, u8 index, u8 preserve_mask) { - u8 val = ast_get_index_reg(ast, base, index); - - return val & preserve_mask; + return __ast_read8_i_masked(ast->ioregs, base, index, preserve_mask); } static inline void ast_set_index_reg(struct ast_device *ast, u32 base, u8 index, u8 val) { - ast_io_write8(ast, base, index); - ++base; - ast_io_write8(ast, base, val); + __ast_write8_i(ast->ioregs, base, index, val); } static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 index, u8 preserve_mask, u8 val) { - u8 tmp = ast_get_index_reg_mask(ast, base, index, preserve_mask); - - tmp |= val; - ast_set_index_reg(ast, base, index, tmp); + __ast_write8_i_masked(ast->ioregs, base, index, preserve_mask, val); } #define AST_VIDMEM_SIZE_8M 0x00800000 -- cgit v1.2.3 From 73b05bb4c0539d89111ed2f9c5a2eac1b577f83d Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Thu, 16 Nov 2023 10:59:24 +0100 Subject: drm/ast: Enable VGA without ast device instance We'll have to enable the VGA functionality for detecting the ast device type. Make this work without an instance of the ast device. Signed-off-by: Thomas Zimmermann Reviewed-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20231116100240.22975-6-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_main.c | 29 ++++++++++++----------------- drivers/gpu/drm/ast/ast_reg.h | 9 +++++++-- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 026ef893dd50..82fcee967d98 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -35,22 +35,17 @@ #include "ast_drv.h" -static bool ast_is_vga_enabled(struct drm_device *dev) +static bool ast_is_vga_enabled(void __iomem *ioregs) { - struct ast_device *ast = to_ast_device(dev); - u8 ch; - - ch = ast_io_read8(ast, AST_IO_VGAER); + u8 vgaer = __ast_read8(ioregs, AST_IO_VGAER); - return !!(ch & 0x01); + return vgaer & AST_IO_VGAER_VGA_ENABLE; } -static void ast_enable_vga(struct drm_device *dev) +static void ast_enable_vga(void __iomem *ioregs) { - struct ast_device *ast = to_ast_device(dev); - - ast_io_write8(ast, AST_IO_VGAER, 0x01); - ast_io_write8(ast, AST_IO_VGAMR_W, 0x01); + __ast_write8(ioregs, AST_IO_VGAER, AST_IO_VGAER_VGA_ENABLE); + __ast_write8(ioregs, AST_IO_VGAMR_W, AST_IO_VGAMR_IOSEL); } /* @@ -74,9 +69,9 @@ static int ast_enable_mmio(struct ast_device *ast) return devm_add_action_or_reset(dev->dev, ast_enable_mmio_release, ast); } -static void ast_open_key(struct ast_device *ast) +static void ast_open_key(void __iomem *ioregs) { - ast_set_index_reg(ast, AST_IO_VGACRI, 0x80, 0xA8); + __ast_write8_i(ioregs, AST_IO_VGACRI, 0x80, AST_IO_VGACR80_PASSWORD); } static int ast_device_config_init(struct ast_device *ast) @@ -487,7 +482,7 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, ast->regs = regs; ast->ioregs = ioregs; - if (!ast_is_vga_enabled(dev)) { + if (!ast_is_vga_enabled(ioregs)) { drm_info(dev, "VGA not enabled on entry, requesting chip POST\n"); need_post = true; } @@ -497,10 +492,10 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, * access to the scratch registers will fail. */ if (need_post) - ast_enable_vga(dev); - + ast_enable_vga(ioregs); /* Enable extended register access */ - ast_open_key(ast); + ast_open_key(ioregs); + ret = ast_enable_mmio(ast); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h index 05bab94a9a90..5258a84ef3a6 100644 --- a/drivers/gpu/drm/ast/ast_reg.h +++ b/drivers/gpu/drm/ast/ast_reg.h @@ -13,8 +13,14 @@ #define AST_IO_MM_LENGTH (128) #define AST_IO_VGAARI_W (0x40) + #define AST_IO_VGAMR_W (0x42) +#define AST_IO_VGAMR_R (0x4c) +#define AST_IO_VGAMR_IOSEL BIT(0) + #define AST_IO_VGAER (0x43) +#define AST_IO_VGAER_VGA_ENABLE BIT(0) + #define AST_IO_VGASRI (0x44) #define AST_IO_VGADRR (0x47) #define AST_IO_VGADWR (0x48) @@ -22,14 +28,13 @@ #define AST_IO_VGAGRI (0x4E) #define AST_IO_VGACRI (0x54) +#define AST_IO_VGACR80_PASSWORD (0xa8) #define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */ #define AST_IO_VGACRCB_HWC_ENABLED BIT(1) #define AST_IO_VGAIR1_R (0x5A) #define AST_IO_VGAIR1_VREFRESH BIT(3) -#define AST_IO_VGAMR_R (0x4C) - /* * Display Transmitter Type */ -- cgit v1.2.3 From 66f843d6703513b9ee8d3d10694a21931feb32c7 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Thu, 16 Nov 2023 10:59:25 +0100 Subject: drm/ast: Enable MMIO without ast device instance We'll have to enable the MMIO access for detecting the ast device type. Make this work without an instance of the ast device. Signed-off-by: Thomas Zimmermann Reviewed-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20231116100240.22975-7-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_main.c | 16 +++++++++------- drivers/gpu/drm/ast/ast_reg.h | 2 ++ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 82fcee967d98..0173cb44f17d 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -54,19 +54,21 @@ static void ast_enable_vga(void __iomem *ioregs) */ static void ast_enable_mmio_release(void *data) { - struct ast_device *ast = data; + void __iomem *ioregs = (void __force __iomem *)data; /* enable standard VGA decode */ - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x04); + __ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1, AST_IO_VGACRA1_MMIO_ENABLED); } -static int ast_enable_mmio(struct ast_device *ast) +static int ast_enable_mmio(struct device *dev, void __iomem *ioregs) { - struct drm_device *dev = &ast->base; + void *data = (void __force *)ioregs; - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x06); + __ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1, + AST_IO_VGACRA1_MMIO_ENABLED | + AST_IO_VGACRA1_VGAIO_DISABLED); - return devm_add_action_or_reset(dev->dev, ast_enable_mmio_release, ast); + return devm_add_action_or_reset(dev, ast_enable_mmio_release, data); } static void ast_open_key(void __iomem *ioregs) @@ -496,7 +498,7 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, /* Enable extended register access */ ast_open_key(ioregs); - ret = ast_enable_mmio(ast); + ret = ast_enable_mmio(&pdev->dev, ioregs); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h index 5258a84ef3a6..62dddbf3fe56 100644 --- a/drivers/gpu/drm/ast/ast_reg.h +++ b/drivers/gpu/drm/ast/ast_reg.h @@ -29,6 +29,8 @@ #define AST_IO_VGACRI (0x54) #define AST_IO_VGACR80_PASSWORD (0xa8) +#define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1) +#define AST_IO_VGACRA1_MMIO_ENABLED BIT(2) #define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */ #define AST_IO_VGACRCB_HWC_ENABLED BIT(1) -- cgit v1.2.3 From 83ab91faf20c1aed982ca5949ce5d83b34b7f546 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Thu, 16 Nov 2023 10:59:26 +0100 Subject: drm/ast: Partially implement POST without ast device instance We'll have to do some of the GPU POSTing for detecting the ast device type. Make this work without an instance of the ast device. Signed-off-by: Thomas Zimmermann Reviewed-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20231116100240.22975-8-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_drv.h | 2 +- drivers/gpu/drm/ast/ast_main.c | 2 +- drivers/gpu/drm/ast/ast_post.c | 73 ++++++++++++++++++++++++++---------------- 3 files changed, 47 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 71f0e23e08cd..390fbec6bc2b 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -482,7 +482,7 @@ int ast_mm_init(struct ast_device *ast); void ast_post_gpu(struct drm_device *dev); u32 ast_mindwm(struct ast_device *ast, u32 r); void ast_moutdwm(struct ast_device *ast, u32 r, u32 v); -void ast_patch_ahb_2500(struct ast_device *ast); +void ast_patch_ahb_2500(void __iomem *regs); /* ast dp501 */ void ast_set_dp501_video_output(struct drm_device *dev, u8 mode); bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 0173cb44f17d..19c8894e391a 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -113,7 +113,7 @@ static int ast_device_config_init(struct ast_device *ast) /* Patch AST2500/AST2510 */ if ((pdev->revision & 0xf0) == 0x40) { if (!(jregd0 & AST_VRAM_INIT_STATUS_MASK)) - ast_patch_ahb_2500(ast); + ast_patch_ahb_2500(ast->regs); } /* Double check that it's actually working */ diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 7a993a384314..22f548805dfb 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -77,28 +77,42 @@ ast_set_def_ext_reg(struct drm_device *dev) ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg); } -u32 ast_mindwm(struct ast_device *ast, u32 r) +static u32 __ast_mindwm(void __iomem *regs, u32 r) { - uint32_t data; + u32 data; - ast_write32(ast, 0xf004, r & 0xffff0000); - ast_write32(ast, 0xf000, 0x1); + __ast_write32(regs, 0xf004, r & 0xffff0000); + __ast_write32(regs, 0xf000, 0x1); do { - data = ast_read32(ast, 0xf004) & 0xffff0000; + data = __ast_read32(regs, 0xf004) & 0xffff0000; } while (data != (r & 0xffff0000)); - return ast_read32(ast, 0x10000 + (r & 0x0000ffff)); + + return __ast_read32(regs, 0x10000 + (r & 0x0000ffff)); } -void ast_moutdwm(struct ast_device *ast, u32 r, u32 v) +static void __ast_moutdwm(void __iomem *regs, u32 r, u32 v) { - uint32_t data; - ast_write32(ast, 0xf004, r & 0xffff0000); - ast_write32(ast, 0xf000, 0x1); + u32 data; + + __ast_write32(regs, 0xf004, r & 0xffff0000); + __ast_write32(regs, 0xf000, 0x1); + do { - data = ast_read32(ast, 0xf004) & 0xffff0000; + data = __ast_read32(regs, 0xf004) & 0xffff0000; } while (data != (r & 0xffff0000)); - ast_write32(ast, 0x10000 + (r & 0x0000ffff), v); + + __ast_write32(regs, 0x10000 + (r & 0x0000ffff), v); +} + +u32 ast_mindwm(struct ast_device *ast, u32 r) +{ + return __ast_mindwm(ast->regs, r); +} + +void ast_moutdwm(struct ast_device *ast, u32 r, u32 v) +{ + __ast_moutdwm(ast->regs, r, v); } /* @@ -1987,17 +2001,18 @@ static bool ast_dram_init_2500(struct ast_device *ast) return true; } -void ast_patch_ahb_2500(struct ast_device *ast) +void ast_patch_ahb_2500(void __iomem *regs) { - u32 data; + u32 data; /* Clear bus lock condition */ - ast_moutdwm(ast, 0x1e600000, 0xAEED1A03); - ast_moutdwm(ast, 0x1e600084, 0x00010000); - ast_moutdwm(ast, 0x1e600088, 0x00000000); - ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); - data = ast_mindwm(ast, 0x1e6e2070); - if (data & 0x08000000) { /* check fast reset */ + __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03); + __ast_moutdwm(regs, 0x1e600084, 0x00010000); + __ast_moutdwm(regs, 0x1e600088, 0x00000000); + __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8); + + data = __ast_mindwm(regs, 0x1e6e2070); + if (data & 0x08000000) { /* check fast reset */ /* * If "Fast restet" is enabled for ARM-ICE debugger, * then WDT needs to enable, that @@ -2009,16 +2024,18 @@ void ast_patch_ahb_2500(struct ast_device *ast) * [1]:= 1:WDT will be cleeared and disabled after timeout occurs * [0]:= 1:WDT enable */ - ast_moutdwm(ast, 0x1E785004, 0x00000010); - ast_moutdwm(ast, 0x1E785008, 0x00004755); - ast_moutdwm(ast, 0x1E78500c, 0x00000033); + __ast_moutdwm(regs, 0x1E785004, 0x00000010); + __ast_moutdwm(regs, 0x1E785008, 0x00004755); + __ast_moutdwm(regs, 0x1E78500c, 0x00000033); udelay(1000); } + do { - ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); - data = ast_mindwm(ast, 0x1e6e2000); - } while (data != 1); - ast_moutdwm(ast, 0x1e6e207c, 0x08000000); /* clear fast reset */ + __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8); + data = __ast_mindwm(regs, 0x1e6e2000); + } while (data != 1); + + __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */ } void ast_post_chip_2500(struct drm_device *dev) @@ -2030,7 +2047,7 @@ void ast_post_chip_2500(struct drm_device *dev) reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */ /* Clear bus lock condition */ - ast_patch_ahb_2500(ast); + ast_patch_ahb_2500(ast->regs); /* Disable watchdog */ ast_moutdwm(ast, 0x1E78502C, 0x00000000); -- cgit v1.2.3 From 9f3ebec843b0f48ea2c22b7e85c34040aa7c9ee8 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Thu, 16 Nov 2023 10:59:27 +0100 Subject: drm/ast: Add enum ast_config_mode The config mode used to be a field in struct ast_device. Turn it into a named type. We'll need this for device detection. Signed-off-by: Thomas Zimmermann Reviewed-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20231116100240.22975-9-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_drv.h | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 390fbec6bc2b..4c6ee163c6d9 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -98,6 +98,12 @@ enum ast_tx_chip { #define AST_TX_DP501_BIT BIT(AST_TX_DP501) #define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP) +enum ast_config_mode { + ast_use_p2a, + ast_use_dt, + ast_use_defaults +}; + #define AST_DRAM_512Mx16 0 #define AST_DRAM_1Gx16 1 #define AST_DRAM_512Mx32 2 @@ -196,7 +202,9 @@ struct ast_device { void __iomem *ioregs; void __iomem *dp501_fw_buf; + enum ast_config_mode config_mode; enum ast_chip chip; + uint32_t dram_bus_width; uint32_t dram_type; uint32_t mclk; @@ -235,11 +243,6 @@ struct ast_device { } output; bool support_wide_screen; - enum { - ast_use_p2a, - ast_use_dt, - ast_use_defaults - } config_mode; unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */ u8 *dp501_fw_addr; -- cgit v1.2.3 From 51412f869337682d0e9e640c5b424ffb8295d353 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Thu, 16 Nov 2023 10:59:28 +0100 Subject: drm/ast: Detect ast device type and config mode without ast device Return the ast chip and config in the detection function's parameters instead of storing them directly in the ast device instance. v2: * add break statements to switch default branches (Jocelyn) Signed-off-by: Thomas Zimmermann Reviewed-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20231116100240.22975-10-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_main.c | 106 +++++++++++++++++++++++------------------ 1 file changed, 59 insertions(+), 47 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 19c8894e391a..ad5b758153de 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -76,25 +76,27 @@ static void ast_open_key(void __iomem *ioregs) __ast_write8_i(ioregs, AST_IO_VGACRI, 0x80, AST_IO_VGACR80_PASSWORD); } -static int ast_device_config_init(struct ast_device *ast) +static int ast_detect_chip(struct pci_dev *pdev, + void __iomem *regs, void __iomem *ioregs, + enum ast_chip *chip_out, + enum ast_config_mode *config_mode_out) { - struct drm_device *dev = &ast->base; - struct pci_dev *pdev = to_pci_dev(dev->dev); - struct device_node *np = dev->dev->of_node; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + enum ast_config_mode config_mode = ast_use_defaults; uint32_t scu_rev = 0xffffffff; + enum ast_chip chip; u32 data; - u8 jregd0, jregd1; + u8 vgacrd0, vgacrd1; /* * Find configuration mode and read SCU revision */ - ast->config_mode = ast_use_defaults; - /* Check if we have device-tree properties */ if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) { /* We do, disable P2A access */ - ast->config_mode = ast_use_dt; + config_mode = ast_use_dt; scu_rev = data; } else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge /* @@ -102,9 +104,9 @@ static int ast_device_config_init(struct ast_device *ast) * is disabled. We force using P2A if VGA only mode bit * is set D[7] */ - jregd0 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); - jregd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff); - if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) { + vgacrd0 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd0); + vgacrd1 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd1); + if (!(vgacrd0 & 0x80) || !(vgacrd1 & 0x10)) { /* * We have a P2A bridge and it is enabled. @@ -112,32 +114,32 @@ static int ast_device_config_init(struct ast_device *ast) /* Patch AST2500/AST2510 */ if ((pdev->revision & 0xf0) == 0x40) { - if (!(jregd0 & AST_VRAM_INIT_STATUS_MASK)) - ast_patch_ahb_2500(ast->regs); + if (!(vgacrd0 & AST_VRAM_INIT_STATUS_MASK)) + ast_patch_ahb_2500(regs); } /* Double check that it's actually working */ - data = ast_read32(ast, 0xf004); + data = __ast_read32(regs, 0xf004); if ((data != 0xffffffff) && (data != 0x00)) { - ast->config_mode = ast_use_p2a; + config_mode = ast_use_p2a; /* Read SCU7c (silicon revision register) */ - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - scu_rev = ast_read32(ast, 0x1207c); + __ast_write32(regs, 0xf004, 0x1e6e0000); + __ast_write32(regs, 0xf000, 0x1); + scu_rev = __ast_read32(regs, 0x1207c); } } } - switch (ast->config_mode) { + switch (config_mode) { case ast_use_defaults: - drm_info(dev, "Using default configuration\n"); + dev_info(dev, "Using default configuration\n"); break; case ast_use_dt: - drm_info(dev, "Using device-tree for configuration\n"); + dev_info(dev, "Using device-tree for configuration\n"); break; case ast_use_p2a: - drm_info(dev, "Using P2A bridge for configuration\n"); + dev_info(dev, "Using P2A bridge for configuration\n"); break; } @@ -146,63 +148,68 @@ static int ast_device_config_init(struct ast_device *ast) */ if (pdev->revision >= 0x50) { - ast->chip = AST2600; - drm_info(dev, "AST 2600 detected\n"); + chip = AST2600; + dev_info(dev, "AST 2600 detected\n"); } else if (pdev->revision >= 0x40) { switch (scu_rev & 0x300) { case 0x0100: - ast->chip = AST2510; - drm_info(dev, "AST 2510 detected\n"); + chip = AST2510; + dev_info(dev, "AST 2510 detected\n"); break; default: - ast->chip = AST2500; - drm_info(dev, "AST 2500 detected\n"); + chip = AST2500; + dev_info(dev, "AST 2500 detected\n"); + break; } } else if (pdev->revision >= 0x30) { switch (scu_rev & 0x300) { case 0x0100: - ast->chip = AST1400; - drm_info(dev, "AST 1400 detected\n"); + chip = AST1400; + dev_info(dev, "AST 1400 detected\n"); break; default: - ast->chip = AST2400; - drm_info(dev, "AST 2400 detected\n"); + chip = AST2400; + dev_info(dev, "AST 2400 detected\n"); + break; } } else if (pdev->revision >= 0x20) { switch (scu_rev & 0x300) { case 0x0000: - ast->chip = AST1300; - drm_info(dev, "AST 1300 detected\n"); + chip = AST1300; + dev_info(dev, "AST 1300 detected\n"); break; default: - ast->chip = AST2300; - drm_info(dev, "AST 2300 detected\n"); + chip = AST2300; + dev_info(dev, "AST 2300 detected\n"); break; } } else if (pdev->revision >= 0x10) { switch (scu_rev & 0x0300) { case 0x0200: - ast->chip = AST1100; - drm_info(dev, "AST 1100 detected\n"); + chip = AST1100; + dev_info(dev, "AST 1100 detected\n"); break; case 0x0100: - ast->chip = AST2200; - drm_info(dev, "AST 2200 detected\n"); + chip = AST2200; + dev_info(dev, "AST 2200 detected\n"); break; case 0x0000: - ast->chip = AST2150; - drm_info(dev, "AST 2150 detected\n"); + chip = AST2150; + dev_info(dev, "AST 2150 detected\n"); break; default: - ast->chip = AST2100; - drm_info(dev, "AST 2100 detected\n"); + chip = AST2100; + dev_info(dev, "AST 2100 detected\n"); break; } } else { - ast->chip = AST2000; - drm_info(dev, "AST 2000 detected\n"); + chip = AST2000; + dev_info(dev, "AST 2000 detected\n"); } + *chip_out = chip; + *config_mode_out = config_mode; + return 0; } @@ -431,6 +438,8 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, int ret = 0; void __iomem *regs; void __iomem *ioregs; + enum ast_config_mode config_mode; + enum ast_chip chip; ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); if (IS_ERR(ast)) @@ -502,10 +511,13 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, if (ret) return ERR_PTR(ret); - ret = ast_device_config_init(ast); + ret = ast_detect_chip(pdev, regs, ioregs, &chip, &config_mode); if (ret) return ERR_PTR(ret); + ast->chip = chip; + ast->config_mode = config_mode; + ast_detect_widescreen(ast); ast_detect_tx_chip(ast, need_post); -- cgit v1.2.3 From 83dc1029dcf50b5b849b26679a1b3f860b85d79c Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Thu, 16 Nov 2023 10:59:29 +0100 Subject: drm/ast: Move detection code into PCI probe helper Detect device type and config mode in the PCI probe helper, but leave DRM device initialization where it is. Structures the driver probe and setup code into a detection and an initialization phase. A later patch can add branching to the device-initialization code. Each chip type can have it own initializer function, if necessary. Signed-off-by: Thomas Zimmermann Reviewed-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20231116100240.22975-11-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_drv.c | 263 +++++++++++++++++++++++++++++++++++++-- drivers/gpu/drm/ast/ast_drv.h | 10 +- drivers/gpu/drm/ast/ast_main.c | 270 ++--------------------------------------- 3 files changed, 274 insertions(+), 269 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index cf5b754f044c..90bcb1eb9cd9 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -89,11 +89,194 @@ static const struct pci_device_id ast_pciidlist[] = { MODULE_DEVICE_TABLE(pci, ast_pciidlist); +static bool ast_is_vga_enabled(void __iomem *ioregs) +{ + u8 vgaer = __ast_read8(ioregs, AST_IO_VGAER); + + return vgaer & AST_IO_VGAER_VGA_ENABLE; +} + +static void ast_enable_vga(void __iomem *ioregs) +{ + __ast_write8(ioregs, AST_IO_VGAER, AST_IO_VGAER_VGA_ENABLE); + __ast_write8(ioregs, AST_IO_VGAMR_W, AST_IO_VGAMR_IOSEL); +} + +/* + * Run this function as part of the HW device cleanup; not + * when the DRM device gets released. + */ +static void ast_enable_mmio_release(void *data) +{ + void __iomem *ioregs = (void __force __iomem *)data; + + /* enable standard VGA decode */ + __ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1, AST_IO_VGACRA1_MMIO_ENABLED); +} + +static int ast_enable_mmio(struct device *dev, void __iomem *ioregs) +{ + void *data = (void __force *)ioregs; + + __ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1, + AST_IO_VGACRA1_MMIO_ENABLED | + AST_IO_VGACRA1_VGAIO_DISABLED); + + return devm_add_action_or_reset(dev, ast_enable_mmio_release, data); +} + +static void ast_open_key(void __iomem *ioregs) +{ + __ast_write8_i(ioregs, AST_IO_VGACRI, 0x80, AST_IO_VGACR80_PASSWORD); +} + +static int ast_detect_chip(struct pci_dev *pdev, + void __iomem *regs, void __iomem *ioregs, + enum ast_chip *chip_out, + enum ast_config_mode *config_mode_out) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + enum ast_config_mode config_mode = ast_use_defaults; + uint32_t scu_rev = 0xffffffff; + enum ast_chip chip; + u32 data; + u8 vgacrd0, vgacrd1; + + /* + * Find configuration mode and read SCU revision + */ + + /* Check if we have device-tree properties */ + if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) { + /* We do, disable P2A access */ + config_mode = ast_use_dt; + scu_rev = data; + } else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge + /* + * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge + * is disabled. We force using P2A if VGA only mode bit + * is set D[7] + */ + vgacrd0 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd0); + vgacrd1 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd1); + if (!(vgacrd0 & 0x80) || !(vgacrd1 & 0x10)) { + + /* + * We have a P2A bridge and it is enabled. + */ + + /* Patch AST2500/AST2510 */ + if ((pdev->revision & 0xf0) == 0x40) { + if (!(vgacrd0 & AST_VRAM_INIT_STATUS_MASK)) + ast_patch_ahb_2500(regs); + } + + /* Double check that it's actually working */ + data = __ast_read32(regs, 0xf004); + if ((data != 0xffffffff) && (data != 0x00)) { + config_mode = ast_use_p2a; + + /* Read SCU7c (silicon revision register) */ + __ast_write32(regs, 0xf004, 0x1e6e0000); + __ast_write32(regs, 0xf000, 0x1); + scu_rev = __ast_read32(regs, 0x1207c); + } + } + } + + switch (config_mode) { + case ast_use_defaults: + dev_info(dev, "Using default configuration\n"); + break; + case ast_use_dt: + dev_info(dev, "Using device-tree for configuration\n"); + break; + case ast_use_p2a: + dev_info(dev, "Using P2A bridge for configuration\n"); + break; + } + + /* + * Identify chipset + */ + + if (pdev->revision >= 0x50) { + chip = AST2600; + dev_info(dev, "AST 2600 detected\n"); + } else if (pdev->revision >= 0x40) { + switch (scu_rev & 0x300) { + case 0x0100: + chip = AST2510; + dev_info(dev, "AST 2510 detected\n"); + break; + default: + chip = AST2500; + dev_info(dev, "AST 2500 detected\n"); + break; + } + } else if (pdev->revision >= 0x30) { + switch (scu_rev & 0x300) { + case 0x0100: + chip = AST1400; + dev_info(dev, "AST 1400 detected\n"); + break; + default: + chip = AST2400; + dev_info(dev, "AST 2400 detected\n"); + break; + } + } else if (pdev->revision >= 0x20) { + switch (scu_rev & 0x300) { + case 0x0000: + chip = AST1300; + dev_info(dev, "AST 1300 detected\n"); + break; + default: + chip = AST2300; + dev_info(dev, "AST 2300 detected\n"); + break; + } + } else if (pdev->revision >= 0x10) { + switch (scu_rev & 0x0300) { + case 0x0200: + chip = AST1100; + dev_info(dev, "AST 1100 detected\n"); + break; + case 0x0100: + chip = AST2200; + dev_info(dev, "AST 2200 detected\n"); + break; + case 0x0000: + chip = AST2150; + dev_info(dev, "AST 2150 detected\n"); + break; + default: + chip = AST2100; + dev_info(dev, "AST 2100 detected\n"); + break; + } + } else { + chip = AST2000; + dev_info(dev, "AST 2000 detected\n"); + } + + *chip_out = chip; + *config_mode_out = config_mode; + + return 0; +} + static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct ast_device *ast; - struct drm_device *dev; + struct device *dev = &pdev->dev; int ret; + void __iomem *regs; + void __iomem *ioregs; + enum ast_config_mode config_mode; + enum ast_chip chip; + struct drm_device *drm; + bool need_post = false; ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &ast_driver); if (ret) @@ -103,16 +286,80 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; - ast = ast_device_create(&ast_driver, pdev, ent->driver_data); - if (IS_ERR(ast)) - return PTR_ERR(ast); - dev = &ast->base; + regs = pcim_iomap(pdev, 1, 0); + if (!regs) + return -EIO; + + if (pdev->revision >= 0x40) { + /* + * On AST2500 and later models, MMIO is enabled by + * default. Adopt it to be compatible with ARM. + */ + resource_size_t len = pci_resource_len(pdev, 1); + + if (len < AST_IO_MM_OFFSET) + return -EIO; + if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH) + return -EIO; + ioregs = regs + AST_IO_MM_OFFSET; + } else if (pci_resource_flags(pdev, 2) & IORESOURCE_IO) { + /* + * Map I/O registers if we have a PCI BAR for I/O. + */ + resource_size_t len = pci_resource_len(pdev, 2); + + if (len < AST_IO_MM_LENGTH) + return -EIO; + ioregs = pcim_iomap(pdev, 2, 0); + if (!ioregs) + return -EIO; + } else { + /* + * Anything else is best effort. + */ + resource_size_t len = pci_resource_len(pdev, 1); + + if (len < AST_IO_MM_OFFSET) + return -EIO; + if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH) + return -EIO; + ioregs = regs + AST_IO_MM_OFFSET; + + dev_info(dev, "Platform has no I/O space, using MMIO\n"); + } + + if (!ast_is_vga_enabled(ioregs)) { + dev_info(dev, "VGA not enabled on entry, requesting chip POST\n"); + need_post = true; + } + + /* + * If VGA isn't enabled, we need to enable now or subsequent + * access to the scratch registers will fail. + */ + if (need_post) + ast_enable_vga(ioregs); + /* Enable extended register access */ + ast_open_key(ioregs); + + ret = ast_enable_mmio(dev, ioregs); + if (ret) + return ret; + + ret = ast_detect_chip(pdev, regs, ioregs, &chip, &config_mode); + if (ret) + return ret; + + drm = ast_device_create(pdev, &ast_driver, chip, config_mode, regs, ioregs, need_post); + if (IS_ERR(drm)) + return PTR_ERR(drm); + pci_set_drvdata(pdev, drm); - ret = drm_dev_register(dev, ent->driver_data); + ret = drm_dev_register(drm, ent->driver_data); if (ret) return ret; - drm_fbdev_generic_setup(dev, 32); + drm_fbdev_generic_setup(drm, 32); return 0; } diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 4c6ee163c6d9..3be5ccf1f5f4 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -254,9 +254,13 @@ static inline struct ast_device *to_ast_device(struct drm_device *dev) return container_of(dev, struct ast_device, base); } -struct ast_device *ast_device_create(const struct drm_driver *drv, - struct pci_dev *pdev, - unsigned long flags); +struct drm_device *ast_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); static inline unsigned long __ast_gen(struct ast_device *ast) { diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index ad5b758153de..2f3ad5f949fc 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -35,184 +35,6 @@ #include "ast_drv.h" -static bool ast_is_vga_enabled(void __iomem *ioregs) -{ - u8 vgaer = __ast_read8(ioregs, AST_IO_VGAER); - - return vgaer & AST_IO_VGAER_VGA_ENABLE; -} - -static void ast_enable_vga(void __iomem *ioregs) -{ - __ast_write8(ioregs, AST_IO_VGAER, AST_IO_VGAER_VGA_ENABLE); - __ast_write8(ioregs, AST_IO_VGAMR_W, AST_IO_VGAMR_IOSEL); -} - -/* - * Run this function as part of the HW device cleanup; not - * when the DRM device gets released. - */ -static void ast_enable_mmio_release(void *data) -{ - void __iomem *ioregs = (void __force __iomem *)data; - - /* enable standard VGA decode */ - __ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1, AST_IO_VGACRA1_MMIO_ENABLED); -} - -static int ast_enable_mmio(struct device *dev, void __iomem *ioregs) -{ - void *data = (void __force *)ioregs; - - __ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1, - AST_IO_VGACRA1_MMIO_ENABLED | - AST_IO_VGACRA1_VGAIO_DISABLED); - - return devm_add_action_or_reset(dev, ast_enable_mmio_release, data); -} - -static void ast_open_key(void __iomem *ioregs) -{ - __ast_write8_i(ioregs, AST_IO_VGACRI, 0x80, AST_IO_VGACR80_PASSWORD); -} - -static int ast_detect_chip(struct pci_dev *pdev, - void __iomem *regs, void __iomem *ioregs, - enum ast_chip *chip_out, - enum ast_config_mode *config_mode_out) -{ - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; - enum ast_config_mode config_mode = ast_use_defaults; - uint32_t scu_rev = 0xffffffff; - enum ast_chip chip; - u32 data; - u8 vgacrd0, vgacrd1; - - /* - * Find configuration mode and read SCU revision - */ - - /* Check if we have device-tree properties */ - if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) { - /* We do, disable P2A access */ - config_mode = ast_use_dt; - scu_rev = data; - } else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge - /* - * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge - * is disabled. We force using P2A if VGA only mode bit - * is set D[7] - */ - vgacrd0 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd0); - vgacrd1 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd1); - if (!(vgacrd0 & 0x80) || !(vgacrd1 & 0x10)) { - - /* - * We have a P2A bridge and it is enabled. - */ - - /* Patch AST2500/AST2510 */ - if ((pdev->revision & 0xf0) == 0x40) { - if (!(vgacrd0 & AST_VRAM_INIT_STATUS_MASK)) - ast_patch_ahb_2500(regs); - } - - /* Double check that it's actually working */ - data = __ast_read32(regs, 0xf004); - if ((data != 0xffffffff) && (data != 0x00)) { - config_mode = ast_use_p2a; - - /* Read SCU7c (silicon revision register) */ - __ast_write32(regs, 0xf004, 0x1e6e0000); - __ast_write32(regs, 0xf000, 0x1); - scu_rev = __ast_read32(regs, 0x1207c); - } - } - } - - switch (config_mode) { - case ast_use_defaults: - dev_info(dev, "Using default configuration\n"); - break; - case ast_use_dt: - dev_info(dev, "Using device-tree for configuration\n"); - break; - case ast_use_p2a: - dev_info(dev, "Using P2A bridge for configuration\n"); - break; - } - - /* - * Identify chipset - */ - - if (pdev->revision >= 0x50) { - chip = AST2600; - dev_info(dev, "AST 2600 detected\n"); - } else if (pdev->revision >= 0x40) { - switch (scu_rev & 0x300) { - case 0x0100: - chip = AST2510; - dev_info(dev, "AST 2510 detected\n"); - break; - default: - chip = AST2500; - dev_info(dev, "AST 2500 detected\n"); - break; - } - } else if (pdev->revision >= 0x30) { - switch (scu_rev & 0x300) { - case 0x0100: - chip = AST1400; - dev_info(dev, "AST 1400 detected\n"); - break; - default: - chip = AST2400; - dev_info(dev, "AST 2400 detected\n"); - break; - } - } else if (pdev->revision >= 0x20) { - switch (scu_rev & 0x300) { - case 0x0000: - chip = AST1300; - dev_info(dev, "AST 1300 detected\n"); - break; - default: - chip = AST2300; - dev_info(dev, "AST 2300 detected\n"); - break; - } - } else if (pdev->revision >= 0x10) { - switch (scu_rev & 0x0300) { - case 0x0200: - chip = AST1100; - dev_info(dev, "AST 1100 detected\n"); - break; - case 0x0100: - chip = AST2200; - dev_info(dev, "AST 2200 detected\n"); - break; - case 0x0000: - chip = AST2150; - dev_info(dev, "AST 2150 detected\n"); - break; - default: - chip = AST2100; - dev_info(dev, "AST 2100 detected\n"); - break; - } - } else { - chip = AST2000; - dev_info(dev, "AST 2000 detected\n"); - } - - *chip_out = chip; - *config_mode_out = config_mode; - - return 0; -} - static void ast_detect_widescreen(struct ast_device *ast) { u8 jreg; @@ -428,95 +250,27 @@ static int ast_get_dram_info(struct drm_device *dev) return 0; } -struct ast_device *ast_device_create(const struct drm_driver *drv, - struct pci_dev *pdev, - unsigned long flags) +struct drm_device *ast_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) { struct drm_device *dev; struct ast_device *ast; - bool need_post = false; - int ret = 0; - void __iomem *regs; - void __iomem *ioregs; - enum ast_config_mode config_mode; - enum ast_chip chip; + int ret; ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); if (IS_ERR(ast)) - return ast; + return ERR_CAST(ast); dev = &ast->base; - pci_set_drvdata(pdev, dev); - - regs = pcim_iomap(pdev, 1, 0); - if (!regs) - return ERR_PTR(-EIO); - - if (pdev->revision >= 0x40) { - /* - * On AST2500 and later models, MMIO is enabled by - * default. Adopt it to be compatible with ARM. - */ - resource_size_t len = pci_resource_len(pdev, 1); - - if (len < AST_IO_MM_OFFSET) - return ERR_PTR(-EIO); - if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH) - return ERR_PTR(-EIO); - ioregs = regs + AST_IO_MM_OFFSET; - } else if (pci_resource_flags(pdev, 2) & IORESOURCE_IO) { - /* - * Map I/O registers if we have a PCI BAR for I/O. - */ - resource_size_t len = pci_resource_len(pdev, 2); - - if (len < AST_IO_MM_LENGTH) - return -EIO; - ioregs = pcim_iomap(pdev, 2, 0); - if (!ioregs) - return ERR_PTR(-EIO); - } else { - /* - * Anything else is best effort. - */ - resource_size_t len = pci_resource_len(pdev, 1); - - if (len < AST_IO_MM_OFFSET) - return ERR_PTR(-EIO); - if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH) - return ERR_PTR(-EIO); - ioregs = regs + AST_IO_MM_OFFSET; - - drm_info(dev, "Platform has no I/O space, using MMIO\n"); - } - - ast->regs = regs; - ast->ioregs = ioregs; - - if (!ast_is_vga_enabled(ioregs)) { - drm_info(dev, "VGA not enabled on entry, requesting chip POST\n"); - need_post = true; - } - - /* - * If VGA isn't enabled, we need to enable now or subsequent - * access to the scratch registers will fail. - */ - if (need_post) - ast_enable_vga(ioregs); - /* Enable extended register access */ - ast_open_key(ioregs); - - ret = ast_enable_mmio(&pdev->dev, ioregs); - if (ret) - return ERR_PTR(ret); - - ret = ast_detect_chip(pdev, regs, ioregs, &chip, &config_mode); - if (ret) - return ERR_PTR(ret); - ast->chip = chip; ast->config_mode = config_mode; + ast->regs = regs; + ast->ioregs = ioregs; ast_detect_widescreen(ast); ast_detect_tx_chip(ast, need_post); @@ -547,5 +301,5 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, if (ret) return ERR_PTR(ret); - return ast; + return dev; } -- cgit v1.2.3 From 288b039db225676e0c520c981a1b5a2562d893a3 Mon Sep 17 00:00:00 2001 From: Dario Binacchi Date: Fri, 24 Nov 2023 10:42:30 +0100 Subject: drm/bridge: Fix typo in post_disable() description s/singals/signals/ Fixes: 199e4e967af4 ("drm: Extract drm_bridge.h") Signed-off-by: Dario Binacchi Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20231124094253.658064-1-dario.binacchi@amarulasolutions.com --- include/drm/drm_bridge.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index cfb7dcdb66c4..9ef461aa9b9e 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -194,7 +194,7 @@ struct drm_bridge_funcs { * or &drm_encoder_helper_funcs.dpms hook. * * The bridge must assume that the display pipe (i.e. clocks and timing - * singals) feeding it is no longer running when this callback is + * signals) feeding it is no longer running when this callback is * called. * * The @post_disable callback is optional. -- cgit v1.2.3 From 325b71e820b67569048c621227266783442b75ed Mon Sep 17 00:00:00 2001 From: Liu Ying Date: Thu, 23 Nov 2023 13:18:07 +0800 Subject: drm/bridge: imx93-mipi-dsi: Fix a couple of building warnings Fix a couple of building warnings on used uninitialized 'best_m' and 'best_n' local variables by initializing 'best_m' to zero and 'best_n' to UINT_MAX. This makes compiler happy only. No functional change. Fixes: ce62f8ea7e3f ("drm/bridge: imx: Add i.MX93 MIPI DSI support") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202311151746.f7u7dzbZ-lkp@intel.com/ Signed-off-by: Liu Ying Reviewed-by: Robert Foss Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20231123051807.3818342-1-victor.liu@nxp.com --- drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c index 3ff30ce80c5b..2347f8dd632f 100644 --- a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c @@ -226,8 +226,8 @@ dphy_pll_get_configure_from_opts(struct imx93_dsi *dsi, unsigned long fout; unsigned long best_fout = 0; unsigned int fvco_div; - unsigned int min_n, max_n, n, best_n; - unsigned long m, best_m; + unsigned int min_n, max_n, n, best_n = UINT_MAX; + unsigned long m, best_m = 0; unsigned long min_delta = ULONG_MAX; unsigned long delta; u64 tmp; -- cgit v1.2.3 From 3cc808e3239cf566b3d3b15cf2beee066b60f241 Mon Sep 17 00:00:00 2001 From: Donald Robson Date: Tue, 28 Nov 2023 17:35:07 +0000 Subject: drm/imagination: Numerous documentation fixes. Some reported by Stephen Rothwell. The rest were found by running the kernel-doc build script. Some indentation fixes. Reported-by: Stephen Rothwell Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202311241526.Y2WZeUau-lkp@intel.com/ Signed-off-by: Donald Robson Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20231128173507.95119-1-donald.robson@imgtec.com --- Documentation/gpu/imagination/index.rst | 2 +- Documentation/gpu/imagination/uapi.rst | 5 +---- drivers/gpu/drm/imagination/pvr_cccb.h | 1 + drivers/gpu/drm/imagination/pvr_device.h | 25 ++++++++++++++++++------- drivers/gpu/drm/imagination/pvr_fw.h | 3 ++- drivers/gpu/drm/imagination/pvr_fw_info.h | 8 ++++---- drivers/gpu/drm/imagination/pvr_hwrt.h | 1 + drivers/gpu/drm/imagination/pvr_job.c | 4 +--- drivers/gpu/drm/imagination/pvr_mmu.c | 3 ++- drivers/gpu/drm/imagination/pvr_queue.h | 4 ++-- drivers/gpu/drm/imagination/pvr_vm.c | 2 +- include/uapi/drm/pvr_drm.h | 10 ++++------ 12 files changed, 38 insertions(+), 30 deletions(-) diff --git a/Documentation/gpu/imagination/index.rst b/Documentation/gpu/imagination/index.rst index dc9579e758c3..0c1e247cea41 100644 --- a/Documentation/gpu/imagination/index.rst +++ b/Documentation/gpu/imagination/index.rst @@ -3,7 +3,7 @@ drm/imagination PowerVR Graphics Driver ======================================= .. kernel-doc:: drivers/gpu/drm/imagination/pvr_drv.c - :doc: PowerVR Graphics Driver + :doc: PowerVR (Series 6 and later) and IMG Graphics Driver Contents ======== diff --git a/Documentation/gpu/imagination/uapi.rst b/Documentation/gpu/imagination/uapi.rst index 2227ea7e6222..7502413d0a93 100644 --- a/Documentation/gpu/imagination/uapi.rst +++ b/Documentation/gpu/imagination/uapi.rst @@ -45,9 +45,6 @@ DEV_QUERY drm_pvr_heap drm_pvr_dev_query_heap_info -.. kernel-doc:: include/uapi/drm/pvr_drm.h - :doc: Flags for DRM_PVR_DEV_QUERY_HEAP_INFO_GET. - .. kernel-doc:: include/uapi/drm/pvr_drm.h :identifiers: drm_pvr_static_data_area_usage drm_pvr_static_data_area @@ -121,7 +118,7 @@ CREATE_FREE_LIST and DESTROY_FREE_LIST :identifiers: drm_pvr_ioctl_destroy_free_list_args CREATE_HWRT_DATASET and DESTROY_HWRT_DATASET --------------------------------------- +-------------------------------------------- .. kernel-doc:: include/uapi/drm/pvr_drm.h :doc: PowerVR IOCTL CREATE_HWRT_DATASET and DESTROY_HWRT_DATASET interfaces diff --git a/drivers/gpu/drm/imagination/pvr_cccb.h b/drivers/gpu/drm/imagination/pvr_cccb.h index f35b3d4c9575..943fe8f2c963 100644 --- a/drivers/gpu/drm/imagination/pvr_cccb.h +++ b/drivers/gpu/drm/imagination/pvr_cccb.h @@ -86,6 +86,7 @@ pvr_cccb_get_size_of_cmd_with_hdr(u32 cmd_size) /** * pvr_cccb_cmdseq_can_fit() - Check if a command sequence can fit in the CCCB. + * @pvr_cccb: Target Client CCB. * @size: Command sequence size. * * Returns: diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h index e07655fc65e8..2ca7e535799f 100644 --- a/drivers/gpu/drm/imagination/pvr_device.h +++ b/drivers/gpu/drm/imagination/pvr_device.h @@ -203,17 +203,29 @@ struct pvr_device { struct mutex lock; } queues; + /** + * @watchdog: Watchdog for communications with firmware. + */ struct { /** @work: Work item for watchdog callback. */ struct delayed_work work; - /** @old_kccb_cmds_executed: KCCB command execution count at last watchdog poll. */ + /** + * @old_kccb_cmds_executed: KCCB command execution count at last + * watchdog poll. + */ u32 old_kccb_cmds_executed; - /** @kccb_stall_count: Number of watchdog polls KCCB has been stalled for. */ + /** + * @kccb_stall_count: Number of watchdog polls KCCB has been + * stalled for. + */ u32 kccb_stall_count; } watchdog; + /** + * @kccb: Circular buffer for communications with firmware. + */ struct { /** @ccb: Kernel CCB. */ struct pvr_ccb ccb; @@ -225,8 +237,8 @@ struct pvr_device { struct pvr_fw_object *rtn_obj; /** - * @rtn: Pointer to CPU mapping of KCCB return slots. Must be accessed by - * READ_ONCE()/WRITE_ONCE(). + * @rtn: Pointer to CPU mapping of KCCB return slots. Must be + * accessed by READ_ONCE()/WRITE_ONCE(). */ u32 *rtn; @@ -293,14 +305,13 @@ struct pvr_file { /** * @pvr_dev: A reference to the powervr-specific wrapper for the - * associated device. Saves on repeated calls to - * to_pvr_device(). + * associated device. Saves on repeated calls to to_pvr_device(). */ struct pvr_device *pvr_dev; /** * @ctx_handles: Array of contexts belonging to this file. Array members - * are of type "struct pvr_context *". + * are of type "struct pvr_context *". * * This array is used to allocate handles returned to userspace. */ diff --git a/drivers/gpu/drm/imagination/pvr_fw.h b/drivers/gpu/drm/imagination/pvr_fw.h index 5cd3ef08d82b..b7966bd574a9 100644 --- a/drivers/gpu/drm/imagination/pvr_fw.h +++ b/drivers/gpu/drm/imagination/pvr_fw.h @@ -481,7 +481,8 @@ pvr_fw_object_unmap_and_destroy(struct pvr_fw_object *fw_obj) } /** - * pvr_fw_get_dma_addr() - Get DMA address for given offset in firmware object + * pvr_fw_object_get_dma_addr() - Get DMA address for given offset in firmware + * object. * @fw_obj: Pointer to object to lookup address in. * @offset: Offset within object to lookup address at. * @dma_addr_out: Pointer to location to store DMA address. diff --git a/drivers/gpu/drm/imagination/pvr_fw_info.h b/drivers/gpu/drm/imagination/pvr_fw_info.h index ad5d44a3067a..c3639440610e 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_info.h +++ b/drivers/gpu/drm/imagination/pvr_fw_info.h @@ -122,13 +122,13 @@ struct pvr_fw_layout_entry { * struct pvr_fw_device_info_header - Device information header. */ struct pvr_fw_device_info_header { - /* BRN Mask size (in u64s). */ + /** @brn_mask_size: BRN mask size (in u64s). */ u64 brn_mask_size; - /* ERN Mask size (in u64s). */ + /** @ern_mask_size: ERN mask size (in u64s). */ u64 ern_mask_size; - /* Feature Mask size (in u64s). */ + /** @feature_mask_size: Feature mask size (in u64s). */ u64 feature_mask_size; - /* Feature Parameter size (in u64s). */ + /** @feature_param_size: Feature parameter size (in u64s). */ u64 feature_param_size; }; diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.h b/drivers/gpu/drm/imagination/pvr_hwrt.h index 76992948d047..676070b20c3b 100644 --- a/drivers/gpu/drm/imagination/pvr_hwrt.h +++ b/drivers/gpu/drm/imagination/pvr_hwrt.h @@ -64,6 +64,7 @@ struct pvr_hwrt_dataset { /** @common_fw_obj: FW object representing common FW-side structure. */ struct pvr_fw_object *common_fw_obj; + /** @common: Common HWRT data. */ struct rogue_fwif_hwrtdata_common common; /** @data: HWRT data structures belonging to this set. */ diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c index 9d0812710295..04139da6c04d 100644 --- a/drivers/gpu/drm/imagination/pvr_job.c +++ b/drivers/gpu/drm/imagination/pvr_job.c @@ -378,7 +378,7 @@ prepare_job_syncs(struct pvr_file *pvr_file, /** * prepare_job_syncs_for_each() - Prepare all sync objects for an array of jobs. - * @file: PowerVR file. + * @pvr_file: PowerVR file. * @job_data: Array of precreated jobs and their sync_ops. * @job_count: Number of jobs. * @signal_array: xarray to receive signal sync objects. @@ -696,8 +696,6 @@ pvr_jobs_link_geom_frag(struct pvr_job_data *job_data, u32 *job_count) * @pvr_dev: Target PowerVR device. * @pvr_file: Pointer to PowerVR file structure. * @args: Ioctl args. - * @job_count: Number of jobs in @jobs_args. On error this will be updated - * with the index into @jobs_args where the error occurred. * * This initial implementation is entirely synchronous; on return the GPU will * be idle. This will not be the case for future implementations. diff --git a/drivers/gpu/drm/imagination/pvr_mmu.c b/drivers/gpu/drm/imagination/pvr_mmu.c index b71d30e5f380..c8562bfc0dcd 100644 --- a/drivers/gpu/drm/imagination/pvr_mmu.c +++ b/drivers/gpu/drm/imagination/pvr_mmu.c @@ -335,8 +335,9 @@ pvr_mmu_backing_page_fini(struct pvr_mmu_backing_page *page) /** * pvr_mmu_backing_page_sync() - Flush a MMU backing page from the CPU to the - * device. + * device. * @page: Target backing page. + * @flags: MMU flush flags. Must be one of %PVR_MMU_SYNC_LEVEL_*_FLAGS. * * .. caution:: * diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h index b5ce2c742150..e06ced69302f 100644 --- a/drivers/gpu/drm/imagination/pvr_queue.h +++ b/drivers/gpu/drm/imagination/pvr_queue.h @@ -50,7 +50,7 @@ struct pvr_queue_cccb_fence_ctx { */ struct pvr_job *job; - /** @lock: Lock protecting access to the job object. */ + /** @job_lock: Lock protecting access to the job object. */ struct mutex job_lock; }; @@ -114,7 +114,7 @@ struct pvr_queue { } timeline_ufo; /** - * last_queued_job_scheduled_fence: The scheduled fence of the last + * @last_queued_job_scheduled_fence: The scheduled fence of the last * job queued to this queue. * * We use it to insert frag -> geom dependencies when issuing combined diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index e0d74d9a6190..2aab53594a77 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -42,7 +42,7 @@ struct pvr_vm_context { /** @mmu_ctx: The context for binding to physical memory. */ struct pvr_mmu_context *mmu_ctx; - /** @gpuva_mgr: GPUVA manager object associated with this context. */ + /** @gpuvm_mgr: GPUVM object associated with this context. */ struct drm_gpuvm gpuvm_mgr; /** @lock: Global lock on this VM. */ diff --git a/include/uapi/drm/pvr_drm.h b/include/uapi/drm/pvr_drm.h index 1834375390c4..ccf6c2112468 100644 --- a/include/uapi/drm/pvr_drm.h +++ b/include/uapi/drm/pvr_drm.h @@ -741,20 +741,18 @@ enum drm_pvr_ctx_priority { */ enum drm_pvr_ctx_type { /** - * @DRM_PVR_CTX_TYPE_RENDER: Render context. Use &struct - * drm_pvr_ioctl_create_render_context_args for context creation arguments. + * @DRM_PVR_CTX_TYPE_RENDER: Render context. */ DRM_PVR_CTX_TYPE_RENDER = 0, /** - * @DRM_PVR_CTX_TYPE_COMPUTE: Compute context. Use &struct - * drm_pvr_ioctl_create_compute_context_args for context creation arguments. + * @DRM_PVR_CTX_TYPE_COMPUTE: Compute context. */ DRM_PVR_CTX_TYPE_COMPUTE, /** - * @DRM_PVR_CTX_TYPE_TRANSFER_FRAG: Transfer context for fragment data masters. Use - * &struct drm_pvr_ioctl_create_transfer_context_args for context creation arguments. + * @DRM_PVR_CTX_TYPE_TRANSFER_FRAG: Transfer context for fragment data + * master. */ DRM_PVR_CTX_TYPE_TRANSFER_FRAG, }; -- cgit v1.2.3 From f92a39ae47076ea123c7980fb85e6e33313f372e Mon Sep 17 00:00:00 2001 From: Bert Karwatzki Date: Mon, 27 Nov 2023 17:09:55 +0100 Subject: drm/sched: Partial revert of "Qualify drm_sched_wakeup() by drm_sched_entity_is_ready()" Commit f3123c2590005c, in combination with the use of work queues by the GPU scheduler, leads to random lock-ups of the GUI. This is a partial revert of of commit f3123c2590005c since drm_sched_wakeup() still needs its entity argument to pass it to drm_sched_can_queue(). Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2994 Link: https://lists.freedesktop.org/archives/dri-devel/2023-November/431606.html Signed-off-by: Bert Karwatzki Link: https://patchwork.freedesktop.org/patch/msgid/20231127160955.87879-1-spasswolf@web.de Link: https://lore.kernel.org/r/36bece178ff5dc705065e53d1e5e41f6db6d87e4.camel@web.de Fixes: f3123c2590005c ("drm/sched: Qualify drm_sched_wakeup() by drm_sched_entity_is_ready()") Reviewed-by: Luben Tuikov Signed-off-by: Luben Tuikov --- drivers/gpu/drm/scheduler/sched_main.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 682aebe96db7..550492a7a031 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -1029,9 +1029,8 @@ EXPORT_SYMBOL(drm_sched_job_cleanup); void drm_sched_wakeup(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity) { - if (drm_sched_entity_is_ready(entity)) - if (drm_sched_can_queue(sched, entity)) - drm_sched_run_job_queue(sched); + if (drm_sched_can_queue(sched, entity)) + drm_sched_run_job_queue(sched); } /** -- cgit v1.2.3 From 012e3208ab8d1456379d047d88d69ab7e074a520 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Thu, 2 Nov 2023 17:56:57 +0100 Subject: drm/tilcdc: Convert to platform remove callback returning void MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The .remove() callback for a platform driver returns an int which makes many driver authors wrongly assume it's possible to do error handling by returning an error code. However the value returned is (mostly) ignored and this typically results in resource leaks. To improve here there is a quest to make the remove callback return void. In the first step of this quest all drivers are converted to .remove_new() which already returns void. There is one error path in tilcdc_pdev_remove() that potentially could yield a non-zero return code. In this case an error message describing the failure is emitted now instead of remove callback returned a non-zero value. This will be ignored. before. Otherwise there is no difference. Also note that currently tilcdc_get_external_components() doesn't return negative values. Signed-off-by: Uwe Kleine-König Signed-off-by: Jyri Sarha Link: https://patchwork.freedesktop.org/patch/msgid/20231102165640.3307820-34-u.kleine-koenig@pengutronix.de Reviewed-by: Tomi Valkeinen Tested-by: Jyri Sarha --- drivers/gpu/drm/tilcdc/tilcdc_drv.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 2f6eaac7f659..23bf16f596f6 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -570,19 +570,18 @@ static int tilcdc_pdev_probe(struct platform_device *pdev) match); } -static int tilcdc_pdev_remove(struct platform_device *pdev) +static void tilcdc_pdev_remove(struct platform_device *pdev) { int ret; ret = tilcdc_get_external_components(&pdev->dev, NULL); if (ret < 0) - return ret; + dev_err(&pdev->dev, "tilcdc_get_external_components() failed (%pe)\n", + ERR_PTR(ret)); else if (ret == 0) tilcdc_fini(platform_get_drvdata(pdev)); else component_master_del(&pdev->dev, &tilcdc_comp_ops); - - return 0; } static void tilcdc_pdev_shutdown(struct platform_device *pdev) @@ -599,7 +598,7 @@ MODULE_DEVICE_TABLE(of, tilcdc_of_match); static struct platform_driver tilcdc_platform_driver = { .probe = tilcdc_pdev_probe, - .remove = tilcdc_pdev_remove, + .remove_new = tilcdc_pdev_remove, .shutdown = tilcdc_pdev_shutdown, .driver = { .name = "tilcdc", -- cgit v1.2.3 From 9f7843b515811aea6c56527eb195b622e9c01f12 Mon Sep 17 00:00:00 2001 From: Hsin-Yi Wang Date: Fri, 17 Nov 2023 13:46:32 -0800 Subject: drm/panel-edp: Add override_edid_mode quirk for generic edp Generic edp gets mode from edid. However, some panels report incorrect mode in this way, resulting in glitches on panel. Introduce a new quirk additional_mode to the generic edid to pick a correct hardcoded mode. Signed-off-by: Hsin-Yi Wang Reviewed-by: Douglas Anderson Signed-off-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20231117215056.1883314-2-hsinyi@chromium.org --- drivers/gpu/drm/panel/panel-edp.c | 48 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 45 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index f22677373171..33535f6de343 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -203,6 +203,9 @@ struct edp_panel_entry { /** @name: Name of this panel (for printing to logs). */ const char *name; + + /** @override_edid_mode: Override the mode obtained by edid. */ + const struct drm_display_mode *override_edid_mode; }; struct panel_edp { @@ -301,6 +304,24 @@ static unsigned int panel_edp_get_display_modes(struct panel_edp *panel, return num; } +static int panel_edp_override_edid_mode(struct panel_edp *panel, + struct drm_connector *connector, + const struct drm_display_mode *override_mode) +{ + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, override_mode); + if (!mode) { + dev_err(panel->base.dev, "failed to add additional mode\n"); + return 0; + } + + mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_set_name(mode); + drm_mode_probed_add(connector, mode); + return 1; +} + static int panel_edp_get_non_edid_modes(struct panel_edp *panel, struct drm_connector *connector) { @@ -568,6 +589,9 @@ static int panel_edp_get_modes(struct drm_panel *panel, { struct panel_edp *p = to_panel_edp(panel); int num = 0; + bool has_override_edid_mode = p->detected_panel && + p->detected_panel != ERR_PTR(-EINVAL) && + p->detected_panel->override_edid_mode; /* probe EDID if a DDC bus is available */ if (p->ddc) { @@ -575,9 +599,18 @@ static int panel_edp_get_modes(struct drm_panel *panel, if (!p->edid) p->edid = drm_get_edid(connector, p->ddc); - - if (p->edid) - num += drm_add_edid_modes(connector, p->edid); + if (p->edid) { + if (has_override_edid_mode) { + /* + * override_edid_mode is specified. Use + * override_edid_mode instead of from edid. + */ + num += panel_edp_override_edid_mode(p, connector, + p->detected_panel->override_edid_mode); + } else { + num += drm_add_edid_modes(connector, p->edid); + } + } pm_runtime_mark_last_busy(panel->dev); pm_runtime_put_autosuspend(panel->dev); @@ -1849,6 +1882,15 @@ static const struct panel_delay delay_200_150_e200 = { .delay = _delay \ } +#define EDP_PANEL_ENTRY2(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name, _mode) \ +{ \ + .name = _name, \ + .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \ + product_id), \ + .delay = _delay, \ + .override_edid_mode = _mode \ +} + /* * This table is used to figure out power sequencing delays for panels that * are detected by EDID. Entries here may point to entries in the -- cgit v1.2.3 From 70e0d5550f5cec301ad116703b840a539fe985dc Mon Sep 17 00:00:00 2001 From: Hsin-Yi Wang Date: Fri, 17 Nov 2023 13:46:33 -0800 Subject: drm/panel-edp: Add auo_b116xa3_mode Add auo_b116xa3_mode to override the original modes parsed from edid of the panels 0x405c B116XAK01.0 and 0x615c B116XAN06.1 which result in glitches on panel. Signed-off-by: Hsin-Yi Wang Reviewed-by: Douglas Anderson Signed-off-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20231117215056.1883314-3-hsinyi@chromium.org --- drivers/gpu/drm/panel/panel-edp.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 33535f6de343..3e144145a6bd 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -983,6 +983,19 @@ static const struct panel_desc auo_b101ean01 = { }, }; +static const struct drm_display_mode auo_b116xa3_mode = { + .clock = 70589, + .hdisplay = 1366, + .hsync_start = 1366 + 40, + .hsync_end = 1366 + 40 + 40, + .htotal = 1366 + 40 + 40 + 32, + .vdisplay = 768, + .vsync_start = 768 + 10, + .vsync_end = 768 + 10 + 12, + .vtotal = 768 + 10 + 12 + 6, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + static const struct drm_display_mode auo_b116xak01_mode = { .clock = 69300, .hdisplay = 1366, @@ -1908,9 +1921,11 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"), - EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"), + EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0", + &auo_b116xa3_mode), EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"), - EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"), + EDP_PANEL_ENTRY2('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1", + &auo_b116xa3_mode), EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"), -- cgit v1.2.3 From fb3f43d50d9b22946702085d1fa2139c8741283d Mon Sep 17 00:00:00 2001 From: Hsin-Yi Wang Date: Fri, 17 Nov 2023 13:46:34 -0800 Subject: drm/panel-edp: Avoid adding multiple preferred modes If a non generic edp-panel is under aux-bus, the mode read from edid would still be selected as preferred and results in multiple preferred modes, which is ambiguous. If both hard-coded mode and edid exists, only add mode from hard-coded. Signed-off-by: Hsin-Yi Wang Reviewed-by: Douglas Anderson Signed-off-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20231117215056.1883314-4-hsinyi@chromium.org --- drivers/gpu/drm/panel/panel-edp.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 3e144145a6bd..825fa2a0d8a5 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -589,6 +589,7 @@ static int panel_edp_get_modes(struct drm_panel *panel, { struct panel_edp *p = to_panel_edp(panel); int num = 0; + bool has_hard_coded_modes = p->desc->num_timings || p->desc->num_modes; bool has_override_edid_mode = p->detected_panel && p->detected_panel != ERR_PTR(-EINVAL) && p->detected_panel->override_edid_mode; @@ -599,7 +600,11 @@ static int panel_edp_get_modes(struct drm_panel *panel, if (!p->edid) p->edid = drm_get_edid(connector, p->ddc); - if (p->edid) { + /* + * If both edid and hard-coded modes exists, skip edid modes to + * avoid multiple preferred modes. + */ + if (p->edid && !has_hard_coded_modes) { if (has_override_edid_mode) { /* * override_edid_mode is specified. Use @@ -616,12 +621,7 @@ static int panel_edp_get_modes(struct drm_panel *panel, pm_runtime_put_autosuspend(panel->dev); } - /* - * Add hard-coded panel modes. Don't call this if there are no timings - * and no modes (the generic edp-panel case) because it will clobber - * the display_info that was already set by drm_add_edid_modes(). - */ - if (p->desc->num_timings || p->desc->num_modes) + if (has_hard_coded_modes) num += panel_edp_get_non_edid_modes(p, connector); else if (!num) dev_warn(p->base.dev, "No display modes\n"); -- cgit v1.2.3 From c9d99c73940e47692fa982cf7508581f5c55e363 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Thu, 23 Nov 2023 18:54:27 +0100 Subject: drm/bridge: ti-sn65dsi86: Simplify using pm_runtime_resume_and_get() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit pm_runtime_resume_and_get() already drops the runtime PM usage counter in the error case. So a call to pm_runtime_put_sync() can be dropped. Signed-off-by: Uwe Kleine-König Reviewed-by: Laurent Pinchart Reviewed-by: Neil Armstrong Reviewed-by: Douglas Anderson Signed-off-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20231123175425.496956-2-u.kleine-koenig@pengutronix.de --- drivers/gpu/drm/bridge/ti-sn65dsi86.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index c45c07840f64..5b8e1dfc458d 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -1413,11 +1413,9 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, int ret; if (!pdata->pwm_enabled) { - ret = pm_runtime_get_sync(pdata->dev); - if (ret < 0) { - pm_runtime_put_sync(pdata->dev); + ret = pm_runtime_resume_and_get(pdata->dev); + if (ret < 0) return ret; - } } if (state->enabled) { -- cgit v1.2.3 From 2d2cffdbbc21586b213e5e371680f9d934d3813b Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Mon, 13 Nov 2023 12:55:08 +0100 Subject: drm/loongson: Add platform dependency Only offer the Loongson DRM driver as an option on platforms where it makes sense. Signed-off-by: Jean Delvare Cc: Sui Jingfeng Cc: David Airlie Cc: Daniel Vetter Reviewed-by: Sui Jingfeng Signed-off-by: Sui Jingfeng Link: https://patchwork.freedesktop.org/patch/msgid/20231113125508.4dc755e8@endymion.delvare --- drivers/gpu/drm/loongson/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/loongson/Kconfig b/drivers/gpu/drm/loongson/Kconfig index df6946d505fa..8e59753e532d 100644 --- a/drivers/gpu/drm/loongson/Kconfig +++ b/drivers/gpu/drm/loongson/Kconfig @@ -3,6 +3,7 @@ config DRM_LOONGSON tristate "DRM support for Loongson Graphics" depends on DRM && PCI && MMU + depends on LOONGARCH || MIPS || COMPILE_TEST select DRM_KMS_HELPER select DRM_TTM select I2C -- cgit v1.2.3 From af3145aa142c92409d3b123ff87ff0b5fd0bf849 Mon Sep 17 00:00:00 2001 From: Xin Ji Date: Mon, 20 Nov 2023 17:10:36 +0800 Subject: Revert "drm/bridge: Add 200ms delay to wait FW HPD status stable" This reverts commit 330140d7319fcc4ec68bd924ea212e476bf12275 200ms delay will cause panel display image later than backlight turn on, revert this patch. Fixes: 330140d7319fcc ("drm/bridge: Add 200ms delay to wait FW HPD status stable") Signed-off-by: Xin Ji Reviewed-by: Robert Foss Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20231120091038.284825-1-xji@analogixsemi.com --- drivers/gpu/drm/bridge/analogix/anx7625.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 8f740154707d..51abe42c639e 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -1464,9 +1464,6 @@ static int _anx7625_hpd_polling(struct anx7625_data *ctx, if (ctx->pdata.intp_irq) return 0; - /* Delay 200ms for FW HPD de-bounce */ - msleep(200); - ret = readx_poll_timeout(anx7625_read_hpd_status_p0, ctx, val, ((val & HPD_STATUS) || (val < 0)), -- cgit v1.2.3 From e3af7053de3f685c96158373bc234b2feca1f160 Mon Sep 17 00:00:00 2001 From: Xin Ji Date: Mon, 20 Nov 2023 17:10:37 +0800 Subject: drm/bridge: anx7625: Fix Set HPD irq detect window to 2ms Polling firmware HPD GPIO status, set HPD irq detect window to 2ms after firmware HPD GPIO initial done Signed-off-by: Xin Ji Reviewed-by: Robert Foss Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20231120091038.284825-2-xji@analogixsemi.com --- drivers/gpu/drm/bridge/analogix/anx7625.c | 51 ++++++++++++++++++++++--------- drivers/gpu/drm/bridge/analogix/anx7625.h | 4 +++ 2 files changed, 40 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 51abe42c639e..ef31033439bc 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -1298,10 +1298,32 @@ static void anx7625_config(struct anx7625_data *ctx) XTAL_FRQ_SEL, XTAL_FRQ_27M); } +static int anx7625_hpd_timer_config(struct anx7625_data *ctx) +{ + int ret; + + /* Set irq detect window to 2ms */ + ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, + HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF); + ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, + HPD_DET_TIMER_BIT8_15, + (HPD_TIME >> 8) & 0xFF); + ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, + HPD_DET_TIMER_BIT16_23, + (HPD_TIME >> 16) & 0xFF); + + return ret; +} + +static int anx7625_read_hpd_gpio_config_status(struct anx7625_data *ctx) +{ + return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, GPIO_CTRL_2); +} + static void anx7625_disable_pd_protocol(struct anx7625_data *ctx) { struct device *dev = ctx->dev; - int ret; + int ret, val; /* Reset main ocm */ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x40); @@ -1315,6 +1337,19 @@ static void anx7625_disable_pd_protocol(struct anx7625_data *ctx) DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature fail.\n"); else DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature succeeded.\n"); + + /* + * Make sure the HPD GPIO already be configured after OCM release before + * setting HPD detect window register. Here we poll the status register + * at maximum 40ms, then config HPD irq detect window register + */ + readx_poll_timeout(anx7625_read_hpd_gpio_config_status, + ctx, val, + ((val & HPD_SOURCE) || (val < 0)), + 2000, 2000 * 20); + + /* Set HPD irq detect window to 2ms */ + anx7625_hpd_timer_config(ctx); } static int anx7625_ocm_loading_check(struct anx7625_data *ctx) @@ -1437,20 +1472,6 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx) static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx) { - int ret; - - /* Set irq detect window to 2ms */ - ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, - HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF); - ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, - HPD_DET_TIMER_BIT8_15, - (HPD_TIME >> 8) & 0xFF); - ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, - HPD_DET_TIMER_BIT16_23, - (HPD_TIME >> 16) & 0xFF); - if (ret < 0) - return ret; - return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS); } diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h index 5af819611ebc..66ebee7f3d83 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.h +++ b/drivers/gpu/drm/bridge/analogix/anx7625.h @@ -259,6 +259,10 @@ #define AP_MIPI_RX_EN BIT(5) /* 1: MIPI RX input in 0: no RX in */ #define AP_DISABLE_PD BIT(6) #define AP_DISABLE_DISPLAY BIT(7) + +#define GPIO_CTRL_2 0x49 +#define HPD_SOURCE BIT(6) + /***************************************************************/ /* Register definition of device address 0x84 */ #define MIPI_PHY_CONTROL_3 0x03 -- cgit v1.2.3 From b48807788e7a2bd93044fe84cfe8ff64b85ec15e Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:30 +0100 Subject: fbdev/acornfb: Fix name of fb_ops initializer macro Fix build by using the correct name for the initializer macro for struct fb_ops. Signed-off-by: Thomas Zimmermann Fixes: 9037afde8b9d ("fbdev/acornfb: Use fbdev I/O helpers") Cc: Thomas Zimmermann Cc: Sam Ravnborg Cc: Helge Deller Cc: Javier Martinez Canillas Cc: Arnd Bergmann Cc: # v6.6+ Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-2-tzimmermann@suse.de --- drivers/video/fbdev/acornfb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c index 163d2c9f951c..f0600f6ca254 100644 --- a/drivers/video/fbdev/acornfb.c +++ b/drivers/video/fbdev/acornfb.c @@ -605,7 +605,7 @@ acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) static const struct fb_ops acornfb_ops = { .owner = THIS_MODULE, - FB_IOMEM_DEFAULT_OPS, + FB_DEFAULT_IOMEM_OPS, .fb_check_var = acornfb_check_var, .fb_set_par = acornfb_set_par, .fb_setcolreg = acornfb_setcolreg, -- cgit v1.2.3 From 12d55c013a09a2d490f004a324c20800e4ff35ec Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:31 +0100 Subject: fbdev/sm712fb: Use correct initializer macros for struct fb_ops Only initialize mmap and draw helpers with macros; leave read/write callbacks to driver implementations. Fixes the following warnings: CC [M] drivers/video/fbdev/sm712fb.o sm712fb.c:1355:25: warning: initialized field overwritten [-Woverride-init] 1355 | .fb_fillrect = cfb_fillrect, | ^~~~~~~~~~~~ sm712fb.c:1355:25: note: (near initialization for 'smtcfb_ops.fb_fillrect') sm712fb.c:1356:25: warning: initialized field overwritten [-Woverride-init] 1356 | .fb_imageblit = cfb_imageblit, | ^~~~~~~~~~~~~ sm712fb.c:1356:25: note: (near initialization for 'smtcfb_ops.fb_imageblit') sm712fb.c:1357:25: warning: initialized field overwritten [-Woverride-init] 1357 | .fb_copyarea = cfb_copyarea, | ^~~~~~~~~~~~ sm712fb.c:1357:25: note: (near initialization for 'smtcfb_ops.fb_copyarea') sm712fb.c:1358:25: warning: initialized field overwritten [-Woverride-init] 1358 | .fb_read = smtcfb_read, | ^~~~~~~~~~~ sm712fb.c:1358:25: note: (near initialization for 'smtcfb_ops.fb_read') sm712fb.c:1359:25: warning: initialized field overwritten [-Woverride-init] 1359 | .fb_write = smtcfb_write, | ^~~~~~~~~~~~ sm712fb.c:1359:25: note: (near initialization for 'smtcfb_ops.fb_write') Signed-off-by: Thomas Zimmermann Fixes: 586132cf1d38 ("fbdev/sm712fb: Initialize fb_ops to fbdev I/O-memory helpers") Cc: Thomas Zimmermann Cc: Javier Martinez Canillas Cc: Sudip Mukherjee Cc: Teddy Wang Cc: Sam Ravnborg Cc: Helge Deller Cc: Arnd Bergmann Cc: linux-fbdev@vger.kernel.org Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-3-tzimmermann@suse.de --- drivers/video/fbdev/sm712fb.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c index 3f8ef50e3209..104f122e0f27 100644 --- a/drivers/video/fbdev/sm712fb.c +++ b/drivers/video/fbdev/sm712fb.c @@ -1347,16 +1347,14 @@ static int smtc_set_par(struct fb_info *info) static const struct fb_ops smtcfb_ops = { .owner = THIS_MODULE, - FB_DEFAULT_IOMEM_OPS, .fb_check_var = smtc_check_var, .fb_set_par = smtc_set_par, .fb_setcolreg = smtc_setcolreg, .fb_blank = smtc_blank, - .fb_fillrect = cfb_fillrect, - .fb_imageblit = cfb_imageblit, - .fb_copyarea = cfb_copyarea, + __FB_DEFAULT_IOMEM_OPS_DRAW, .fb_read = smtcfb_read, .fb_write = smtcfb_write, + __FB_DEFAULT_IOMEM_OPS_MMAP, }; /* -- cgit v1.2.3 From 63994d486c9fb5e780dabb0571c607c25d485421 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:32 +0100 Subject: fbdev/vfb: Set FBINFO_VIRTFB flag The vfb driver operates on system memory. Mark the framebuffer accordingly. Helpers operating on the framebuffer memory will test for the presence of this flag. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-4-tzimmermann@suse.de --- drivers/video/fbdev/vfb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c index 1b7c338f9956..c748b9311fb1 100644 --- a/drivers/video/fbdev/vfb.c +++ b/drivers/video/fbdev/vfb.c @@ -440,6 +440,7 @@ static int vfb_probe(struct platform_device *dev) if (!info) goto err; + info->flags |= FBINFO_VIRTFB; info->screen_buffer = videomemory; info->fbops = &vfb_ops; -- cgit v1.2.3 From 853767b6b94630ac6388aa8e7893d3df26be53be Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:33 +0100 Subject: fbdev/vfb: Initialize fb_ops with fbdev macros Initialize the instance of struct fb_ops with fbdev initializer macros for framebuffers in virtual address space. Set the read/write, draw and mmap callbacks to the correct implementation and avoid implicit defaults. Also select the necessary helpers in Kconfig. Fbdev drivers sometimes rely on the callbacks being NULL for a default I/O-memory-based implementation to be invoked; hence requiring the I/O helpers to be built in any case. Setting all callbacks in all drivers explicitly will allow to make the I/O helpers optional. This benefits systems that do not use these functions. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-5-tzimmermann@suse.de --- drivers/video/fbdev/Kconfig | 5 +---- drivers/video/fbdev/vfb.c | 7 ++----- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 53693c826ebd..63956382ffb6 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1800,10 +1800,7 @@ config FB_DA8XX config FB_VIRTUAL tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)" depends on FB - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT - select FB_SYS_FOPS + select FB_SYSMEM_HELPERS help This is a `virtual' frame buffer device. It operates on a chunk of unswappable kernel memory instead of on the memory of a graphics diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c index c748b9311fb1..f6140f247e4b 100644 --- a/drivers/video/fbdev/vfb.c +++ b/drivers/video/fbdev/vfb.c @@ -80,15 +80,12 @@ static int vfb_mmap(struct fb_info *info, static const struct fb_ops vfb_ops = { .owner = THIS_MODULE, - .fb_read = fb_sys_read, - .fb_write = fb_sys_write, + __FB_DEFAULT_SYSMEM_OPS_RDWR, .fb_check_var = vfb_check_var, .fb_set_par = vfb_set_par, .fb_setcolreg = vfb_setcolreg, .fb_pan_display = vfb_pan_display, - .fb_fillrect = sys_fillrect, - .fb_copyarea = sys_copyarea, - .fb_imageblit = sys_imageblit, + __FB_DEFAULT_SYSMEM_OPS_DRAW, .fb_mmap = vfb_mmap, }; -- cgit v1.2.3 From 30b72c0bde93ae3001736b596cf94bfb47c5e159 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:34 +0100 Subject: fbdev/arcfb: Set FBINFO_VIRTFB flag The arcfb driver operates on system memory. Mark the framebuffer accordingly. Helpers operating on the framebuffer memory will test for the presence of this flag. Signed-off-by: Thomas Zimmermann Cc: Jaya Kumar Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-6-tzimmermann@suse.de --- drivers/video/fbdev/arcfb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c index cff11cb04a55..7344e825543a 100644 --- a/drivers/video/fbdev/arcfb.c +++ b/drivers/video/fbdev/arcfb.c @@ -529,6 +529,7 @@ static int arcfb_probe(struct platform_device *dev) if (!info) goto err_fb_alloc; + info->flags |= FBINFO_VIRTFB; info->screen_buffer = videomemory; info->fbops = &arcfb_ops; -- cgit v1.2.3 From 28f57d03f5a73ad7c5b07fd1414d816f15f7fb1d Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:35 +0100 Subject: fbdev/arcfb: Use generator macros for deferred I/O Implement the driver's fops with the generator macros for deferred I/O. Only requires per-driver code for the on-scren scanout buffer. The generated helpers implement reading, writing and drawing on top of that. Also update the selected Kconfig tokens accordingly. Actual support for deferred I/O is missing from the driver. So writing to memory-mapped pages does not automatically update the scanout buffer. Signed-off-by: Thomas Zimmermann Cc: Jaya Kumar Acked-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-7-tzimmermann@suse.de --- drivers/video/fbdev/Kconfig | 5 +- drivers/video/fbdev/arcfb.c | 113 ++++++++++---------------------------------- 2 files changed, 27 insertions(+), 91 deletions(-) diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 63956382ffb6..44bf622fc8d7 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -272,10 +272,7 @@ config FB_FM2 config FB_ARC tristate "Arc Monochrome LCD board support" depends on FB && (X86 || COMPILE_TEST) - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT - select FB_SYS_FOPS + select FB_SYSMEM_HELPERS_DEFERRED help This enables support for the Arc Monochrome LCD board. The board is based on the KS-108 lcd controller and is typically a matrix diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c index 7344e825543a..b2408543277c 100644 --- a/drivers/video/fbdev/arcfb.c +++ b/drivers/video/fbdev/arcfb.c @@ -363,39 +363,6 @@ static void arcfb_lcd_update(struct arcfb_par *par, unsigned int dx, } } -static void arcfb_fillrect(struct fb_info *info, - const struct fb_fillrect *rect) -{ - struct arcfb_par *par = info->par; - - sys_fillrect(info, rect); - - /* update the physical lcd */ - arcfb_lcd_update(par, rect->dx, rect->dy, rect->width, rect->height); -} - -static void arcfb_copyarea(struct fb_info *info, - const struct fb_copyarea *area) -{ - struct arcfb_par *par = info->par; - - sys_copyarea(info, area); - - /* update the physical lcd */ - arcfb_lcd_update(par, area->dx, area->dy, area->width, area->height); -} - -static void arcfb_imageblit(struct fb_info *info, const struct fb_image *image) -{ - struct arcfb_par *par = info->par; - - sys_imageblit(info, image); - - /* update the physical lcd */ - arcfb_lcd_update(par, image->dx, image->dy, image->width, - image->height); -} - static int arcfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { @@ -436,76 +403,48 @@ static int arcfb_ioctl(struct fb_info *info, } } -/* - * this is the access path from userspace. they can seek and write to - * the fb. it's inefficient for them to do anything less than 64*8 - * writes since we update the lcd in each write() anyway. - */ -static ssize_t arcfb_write(struct fb_info *info, const char __user *buf, - size_t count, loff_t *ppos) +static void arcfb_damage_range(struct fb_info *info, off_t off, size_t len) { - /* modded from epson 1355 */ - - unsigned long p; - int err; - unsigned int fbmemlength,x,y,w,h, bitppos, startpos, endpos, bitcount; - struct arcfb_par *par; - unsigned int xres; - - if (!info->screen_buffer) - return -ENODEV; - - p = *ppos; - par = info->par; - xres = info->var.xres; - fbmemlength = (xres * info->var.yres)/8; - - if (p > fbmemlength) - return -ENOSPC; - - err = 0; - if ((count + p) > fbmemlength) { - count = fbmemlength - p; - err = -ENOSPC; - } - - if (count) { - char *base_addr; - - base_addr = info->screen_buffer; - count -= copy_from_user(base_addr + p, buf, count); - *ppos += count; - err = -EFAULT; - } - + struct arcfb_par *par = info->par; + unsigned int xres = info->var.xres; + unsigned int bitppos, startpos, endpos, bitcount; + unsigned int x, y, width, height; - bitppos = p*8; + bitppos = off * 8; startpos = floorXres(bitppos, xres); - endpos = ceilXres((bitppos + (count*8)), xres); + endpos = ceilXres((bitppos + (len * 8)), xres); bitcount = endpos - startpos; x = startpos % xres; y = startpos / xres; - w = xres; - h = bitcount / xres; - arcfb_lcd_update(par, x, y, w, h); + width = xres; + height = bitcount / xres; + + arcfb_lcd_update(par, x, y, width, height); +} - if (count) - return count; - return err; +static void arcfb_damage_area(struct fb_info *info, u32 x, u32 y, + u32 width, u32 height) +{ + struct arcfb_par *par = info->par; + + /* update the physical lcd */ + arcfb_lcd_update(par, x, y, width, height); } +FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(arcfb, + arcfb_damage_range, + arcfb_damage_area) + static const struct fb_ops arcfb_ops = { .owner = THIS_MODULE, .fb_open = arcfb_open, - .fb_read = fb_sys_read, - .fb_write = arcfb_write, + __FB_DEFAULT_DEFERRED_OPS_RDWR(arcfb), .fb_release = arcfb_release, .fb_pan_display = arcfb_pan_display, - .fb_fillrect = arcfb_fillrect, - .fb_copyarea = arcfb_copyarea, - .fb_imageblit = arcfb_imageblit, + __FB_DEFAULT_DEFERRED_OPS_DRAW(arcfb), .fb_ioctl = arcfb_ioctl, + // .fb_mmap reqires deferred I/O }; static int arcfb_probe(struct platform_device *dev) -- cgit v1.2.3 From eba1418968264cb3f83ff1ce78270ccc9a729c22 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:36 +0100 Subject: auxdisplay/cfag12864bfb: Set FBINFO_VIRTFB flag The cfag12864bfb driver operates on system memory. Mark the framebuffer accordingly. Helpers operating on the framebuffer memory will test for the presence of this flag. Signed-off-by: Thomas Zimmermann Cc: Miguel Ojeda Reviewed-by: Javier Martinez Canillas Acked-by: Miguel Ojeda Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-8-tzimmermann@suse.de --- drivers/auxdisplay/cfag12864bfb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c index 729845bcc803..c0ba693845aa 100644 --- a/drivers/auxdisplay/cfag12864bfb.c +++ b/drivers/auxdisplay/cfag12864bfb.c @@ -72,6 +72,7 @@ static int cfag12864bfb_probe(struct platform_device *device) if (!info) goto none; + info->flags = FBINFO_VIRTFB; info->screen_buffer = cfag12864b_buffer; info->screen_size = CFAG12864B_SIZE; info->fbops = &cfag12864bfb_ops; -- cgit v1.2.3 From 36e6cacdb095e35f2389ba8829db12603133d61d Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:37 +0100 Subject: auxdisplay/cfag12864bfb: Initialize fb_ops with fbdev macros Initialize the instance of struct fb_ops with fbdev initializer macros for framebuffers in virtual address space. Set the read/write, draw and mmap callbacks to the correct implementation and avoid implicit defaults. Also select the necessary helpers in Kconfig. Fbdev drivers sometimes rely on the callbacks being NULL for a default I/O-memory-based implementation to be invoked; hence requiring the I/O helpers to be built in any case. Setting all callbacks in all drivers explicitly will allow to make the I/O helpers optional. This benefits systems that do not use these functions. Signed-off-by: Thomas Zimmermann Cc: Miguel Ojeda Reviewed-by: Javier Martinez Canillas Acked-by: Miguel Ojeda Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-9-tzimmermann@suse.de --- drivers/auxdisplay/Kconfig | 5 +---- drivers/auxdisplay/cfag12864bfb.c | 7 ++----- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig index 64012cda4d12..4377e53f8f57 100644 --- a/drivers/auxdisplay/Kconfig +++ b/drivers/auxdisplay/Kconfig @@ -112,10 +112,7 @@ config CFAG12864B depends on X86 depends on FB depends on KS0108 - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT - select FB_SYS_FOPS + select FB_SYSMEM_HELPERS default n help If you have a Crystalfontz 128x64 2-color LCD, cfag12864b Series, diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c index c0ba693845aa..ede0f9a51311 100644 --- a/drivers/auxdisplay/cfag12864bfb.c +++ b/drivers/auxdisplay/cfag12864bfb.c @@ -56,11 +56,8 @@ static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma) static const struct fb_ops cfag12864bfb_ops = { .owner = THIS_MODULE, - .fb_read = fb_sys_read, - .fb_write = fb_sys_write, - .fb_fillrect = sys_fillrect, - .fb_copyarea = sys_copyarea, - .fb_imageblit = sys_imageblit, + __FB_DEFAULT_SYSMEM_OPS_RDWR, + __FB_DEFAULT_SYSMEM_OPS_DRAW, .fb_mmap = cfag12864bfb_mmap, }; -- cgit v1.2.3 From 1d6796547a44fb44252c83b36609d0ae6624cd7c Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:38 +0100 Subject: auxdisplay/ht16k33: Set FBINFO_VIRTFB flag The ht16k33 driver operates on system memory. Mark the framebuffer accordingly. Helpers operating on the framebuffer memory will test for the presence of this flag. Signed-off-by: Thomas Zimmermann Cc: Miguel Ojeda Cc: Robin van der Gracht Reviewed-by: Javier Martinez Canillas Acked-by: Miguel Ojeda Acked-by: Robin van der Gracht Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-10-tzimmermann@suse.de --- drivers/auxdisplay/ht16k33.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index 3a2d88387224..f1716e3ce6a9 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c @@ -640,6 +640,7 @@ static int ht16k33_fbdev_probe(struct device *dev, struct ht16k33_priv *priv, INIT_DELAYED_WORK(&priv->work, ht16k33_fb_update); fbdev->info->fbops = &ht16k33_fb_ops; + fbdev->info->flags |= FBINFO_VIRTFB; fbdev->info->screen_buffer = fbdev->buffer; fbdev->info->screen_size = HT16K33_FB_SIZE; fbdev->info->fix = ht16k33_fb_fix; -- cgit v1.2.3 From df558d53139f8adc7058b3dc17f01ae03c18a440 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:39 +0100 Subject: auxdisplay/ht16k33: Initialize fb_ops with fbdev macros Initialize the instance of struct fb_ops with fbdev initializer macros for framebuffers in virtual address space. Set the read/write, draw and mmap callbacks to the correct implementation and avoid implicit defaults. Also select the necessary helpers in Kconfig. Fbdev drivers sometimes rely on the callbacks being NULL for a default I/O-memory-based implementation to be invoked; hence requiring the I/O helpers to be built in any case. Setting all callbacks in all drivers explicitly will allow to make the I/O helpers optional. This benefits systems that do not use these functions. Signed-off-by: Thomas Zimmermann Cc: Miguel Ojeda Cc: Robin van der Gracht Reviewed-by: Javier Martinez Canillas Acked-by: Miguel Ojeda Reviewed-by: Robin van der Gracht Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-11-tzimmermann@suse.de --- drivers/auxdisplay/Kconfig | 5 +---- drivers/auxdisplay/ht16k33.c | 7 ++----- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig index 4377e53f8f57..d944d5298eca 100644 --- a/drivers/auxdisplay/Kconfig +++ b/drivers/auxdisplay/Kconfig @@ -167,10 +167,7 @@ config IMG_ASCII_LCD config HT16K33 tristate "Holtek Ht16K33 LED controller with keyscan" depends on FB && I2C && INPUT - select FB_SYS_FOPS - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT + select FB_SYSMEM_HELPERS select INPUT_MATRIXKMAP select FB_BACKLIGHT select NEW_LEDS diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index f1716e3ce6a9..2f1dc6b4e276 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c @@ -356,12 +356,9 @@ static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma) static const struct fb_ops ht16k33_fb_ops = { .owner = THIS_MODULE, - .fb_read = fb_sys_read, - .fb_write = fb_sys_write, + __FB_DEFAULT_SYSMEM_OPS_RDWR, .fb_blank = ht16k33_blank, - .fb_fillrect = sys_fillrect, - .fb_copyarea = sys_copyarea, - .fb_imageblit = sys_imageblit, + __FB_DEFAULT_SYSMEM_OPS_DRAW, .fb_mmap = ht16k33_mmap, }; -- cgit v1.2.3 From bc4e90771c886086572c6bfabd57f7a6b492f52e Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:40 +0100 Subject: hid/picolcd_fb: Set FBINFO_VIRTFB flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The picolcd_fb driver operates on system memory. Mark the framebuffer accordingly. Helpers operating on the framebuffer memory will test for the presence of this flag. Signed-off-by: Thomas Zimmermann Cc: "Bruno Prémont" Cc: Jiri Kosina Cc: Benjamin Tissoires Cc: linux-input@vger.kernel.org Reviewed-by: Javier Martinez Canillas Acked-by: Bruno Prémont Acked-by: Jiri Kosina Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-12-tzimmermann@suse.de --- drivers/hid/hid-picolcd_fb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c index a4dccdcda26f..d7dddd99d325 100644 --- a/drivers/hid/hid-picolcd_fb.c +++ b/drivers/hid/hid-picolcd_fb.c @@ -505,6 +505,7 @@ int picolcd_init_framebuffer(struct picolcd_data *data) dev_err(dev, "can't get a free page for framebuffer\n"); goto err_nomem; } + info->flags |= FBINFO_VIRTFB; info->screen_buffer = fbdata->bitmap; info->fix.smem_start = (unsigned long)fbdata->bitmap; memset(fbdata->vbitmap, 0xff, PICOLCDFB_SIZE); -- cgit v1.2.3 From 46b655ceeed09363b810712525dc63b533e5cd0b Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:41 +0100 Subject: fbdev/sh_mobile_lcdcfb: Set FBINFO_VIRTFB flag The sh_mobile_lcdcfb driver operates on DMA-able system memory. Mark the framebuffer accordingly. Helpers operating on the framebuffer memory will test for the presence of this flag. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-13-tzimmermann@suse.de --- drivers/video/fbdev/sh_mobile_lcdcfb.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c index 1364dafaadb1..5c99fc8a409f 100644 --- a/drivers/video/fbdev/sh_mobile_lcdcfb.c +++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c @@ -1567,6 +1567,7 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl) info->fbops = &sh_mobile_lcdc_overlay_ops; info->device = priv->dev; + info->flags |= FBINFO_VIRTFB; info->screen_buffer = ovl->fb_mem; info->par = ovl; @@ -2053,6 +2054,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch, info->fbops = &sh_mobile_lcdc_ops; info->device = priv->dev; + info->flags |= FBINFO_VIRTFB; info->screen_buffer = ch->fb_mem; info->pseudo_palette = &ch->pseudo_palette; info->par = ch; -- cgit v1.2.3 From 01f4fbb3bd265167cdd20ed5ff8250199a0d189e Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:42 +0100 Subject: fbdev/sh_mobile_lcdcfb: Initialize fb_ops with fbdev macros Initialize the instance of struct fb_ops with fbdev initializer macros for framebuffers in DMA-able virtual address space. Set the read/write, draw and mmap callbacks to the correct implementation and avoid implicit defaults. Also select the necessary helpers in Kconfig. The driver uses a mixture of DMA helpers and deferred I/O. That probably needs fixing by a driver maintainer. Fbdev drivers sometimes rely on the callbacks being NULL for a default I/O-memory-based implementation to be invoked; hence requiring the I/O helpers to be built in any case. Setting all callbacks in all drivers explicitly will allow to make the I/O helpers optional. This benefits systems that do not use these functions. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-14-tzimmermann@suse.de --- drivers/video/fbdev/Kconfig | 7 ++----- drivers/video/fbdev/sh_mobile_lcdcfb.c | 10 +++------- 2 files changed, 5 insertions(+), 12 deletions(-) diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 44bf622fc8d7..1eab89a07bbc 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1634,12 +1634,9 @@ config FB_SH_MOBILE_LCDC depends on FB && HAVE_CLK && HAS_IOMEM depends on SUPERH || ARCH_RENESAS || COMPILE_TEST depends on FB_DEVICE - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT - select FB_SYS_FOPS - select FB_DEFERRED_IO select FB_BACKLIGHT + select FB_DEFERRED_IO + select FB_DMAMEM_HELPERS help Frame buffer driver for the on-chip SH-Mobile LCD controller. diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c index 5c99fc8a409f..d84628de5189 100644 --- a/drivers/video/fbdev/sh_mobile_lcdcfb.c +++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c @@ -1488,13 +1488,10 @@ sh_mobile_lcdc_overlay_mmap(struct fb_info *info, struct vm_area_struct *vma) static const struct fb_ops sh_mobile_lcdc_overlay_ops = { .owner = THIS_MODULE, - .fb_read = fb_sys_read, - .fb_write = fb_sys_write, - .fb_fillrect = sys_fillrect, - .fb_copyarea = sys_copyarea, - .fb_imageblit = sys_imageblit, + __FB_DEFAULT_DMAMEM_OPS_RDWR, .fb_blank = sh_mobile_lcdc_overlay_blank, .fb_pan_display = sh_mobile_lcdc_overlay_pan, + __FB_DEFAULT_DMAMEM_OPS_DRAW, .fb_ioctl = sh_mobile_lcdc_overlay_ioctl, .fb_check_var = sh_mobile_lcdc_overlay_check_var, .fb_set_par = sh_mobile_lcdc_overlay_set_par, @@ -1966,8 +1963,7 @@ sh_mobile_lcdc_mmap(struct fb_info *info, struct vm_area_struct *vma) static const struct fb_ops sh_mobile_lcdc_ops = { .owner = THIS_MODULE, .fb_setcolreg = sh_mobile_lcdc_setcolreg, - .fb_read = fb_sys_read, - .fb_write = fb_sys_write, + __FB_DEFAULT_DMAMEM_OPS_RDWR, .fb_fillrect = sh_mobile_lcdc_fillrect, .fb_copyarea = sh_mobile_lcdc_copyarea, .fb_imageblit = sh_mobile_lcdc_imageblit, -- cgit v1.2.3 From 133a2ca22e11041f891e6fbbf398d0dfd8b07c0c Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:43 +0100 Subject: fbdev/smscufx: Select correct helpers The driver uses deferred I/O. Select the correct helpers via FB_SYSMEM_HELPERS_DEFERRED in the Kconfig file. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-15-tzimmermann@suse.de --- drivers/video/fbdev/Kconfig | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 1eab89a07bbc..d110d89b21d4 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1683,11 +1683,7 @@ config FB_SMSCUFX tristate "SMSC UFX6000/7000 USB Framebuffer support" depends on FB && USB select FB_MODE_HELPERS - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT - select FB_SYS_FOPS - select FB_DEFERRED_IO + select FB_SYSMEM_HELPERS_DEFERRED help This is a kernel framebuffer driver for SMSC UFX USB devices. Supports fbdev clients like xf86-video-fbdev, kdrive, fbi, and -- cgit v1.2.3 From c9496954c138b96964a2fa24bccbe5a4c3ad796f Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:44 +0100 Subject: fbdev/udlfb: Select correct helpers The driver uses deferred I/O. Select the correct helpers via FB_SYSMEM_HELPERS_DEFERRED in the Kconfig file. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-16-tzimmermann@suse.de --- drivers/video/fbdev/Kconfig | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index d110d89b21d4..de57bf59e017 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1696,11 +1696,7 @@ config FB_UDL depends on FB && USB depends on FB_DEVICE select FB_MODE_HELPERS - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT - select FB_SYS_FOPS - select FB_DEFERRED_IO + select FB_SYSMEM_HELPERS_DEFERRED help This is a kernel framebuffer driver for DisplayLink USB devices. Supports fbdev clients like xf86-video-fbdev, kdrive, fbi, and -- cgit v1.2.3 From cb99b486a5bcc9b9a0c869774137fe40c48cf2f4 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:45 +0100 Subject: fbdev/au1200fb: Set FBINFO_VIRTFB flag The au1200fb driver operates on DMA-able system memory. Mark the framebuffer accordingly. Helpers operating on the framebuffer memory will test for the presence of this flag. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-17-tzimmermann@suse.de --- drivers/video/fbdev/au1200fb.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index 98afd385c49c..817c1ebb625b 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c @@ -1568,6 +1568,8 @@ static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev) fbi->fix.mmio_len = 0; fbi->fix.accel = FB_ACCEL_NONE; + fbi->flags |= FBINFO_VIRTFB; + fbi->screen_buffer = fbdev->fb_mem; au1200fb_update_fbinfo(fbi); -- cgit v1.2.3 From dfc3052256e024057ea8a6fbfa2691bb87fea343 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:46 +0100 Subject: fbdev/au1200fb: Initialize fb_ops with fbdev macros Initialize the instance of struct fb_ops with fbdev initializer macros for framebuffers in DMA-able virtual address space. Set the read/write, draw and mmap callbacks to the correct implementation and avoid implicit defaults. Also select the necessary helpers in Kconfig. Fbdev drivers sometimes rely on the callbacks being NULL for a default I/O-memory-based implementation to be invoked; hence requiring the I/O helpers to be built in any case. Setting all callbacks in all drivers explicitly will allow to make the I/O helpers optional. This benefits systems that do not use these functions. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-18-tzimmermann@suse.de --- drivers/video/fbdev/Kconfig | 5 +---- drivers/video/fbdev/au1200fb.c | 7 ++----- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index de57bf59e017..4c82890887ba 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1457,10 +1457,7 @@ config FB_AU1100 config FB_AU1200 bool "Au1200/Au1300 LCD Driver" depends on (FB = y) && MIPS_ALCHEMY - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT - select FB_SYS_FOPS + select FB_DMAMEM_HELPERS help This is the framebuffer driver for the Au1200/Au1300 SOCs. It can drive various panels and CRTs by passing in kernel cmd line diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index 817c1ebb625b..16ebbab50097 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c @@ -1488,15 +1488,12 @@ static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd, static const struct fb_ops au1200fb_fb_ops = { .owner = THIS_MODULE, + __FB_DEFAULT_DMAMEM_OPS_RDWR, .fb_check_var = au1200fb_fb_check_var, .fb_set_par = au1200fb_fb_set_par, .fb_setcolreg = au1200fb_fb_setcolreg, .fb_blank = au1200fb_fb_blank, - .fb_fillrect = sys_fillrect, - .fb_copyarea = sys_copyarea, - .fb_imageblit = sys_imageblit, - .fb_read = fb_sys_read, - .fb_write = fb_sys_write, + __FB_DEFAULT_DMAMEM_OPS_DRAW, .fb_sync = NULL, .fb_ioctl = au1200fb_ioctl, .fb_mmap = au1200fb_fb_mmap, -- cgit v1.2.3 From cccc934a74487dcf1407918a0739d0923a3c4cc0 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:47 +0100 Subject: fbdev/ps3fb: Set FBINFO_VIRTFB flag The ps3fb driver operates on system memory. Mark the framebuffer accordingly. Helpers operating on the framebuffer memory will test for the presence of this flag. Signed-off-by: Thomas Zimmermann Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Christophe Leroy Cc: linuxppc-dev@lists.ozlabs.org Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-19-tzimmermann@suse.de --- drivers/video/fbdev/ps3fb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c index 64d291d6b153..de81ad3a5d1e 100644 --- a/drivers/video/fbdev/ps3fb.c +++ b/drivers/video/fbdev/ps3fb.c @@ -1145,7 +1145,7 @@ static int ps3fb_probe(struct ps3_system_bus_device *dev) info->fix.smem_len = ps3fb_videomemory.size - GPU_FB_START; info->pseudo_palette = par->pseudo_palette; - info->flags = FBINFO_READS_FAST | + info->flags = FBINFO_VIRTFB | FBINFO_READS_FAST | FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN; retval = fb_alloc_cmap(&info->cmap, 256, 0); -- cgit v1.2.3 From 741effeab963073c257ad1bcfac6c355d0eca966 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:48 +0100 Subject: fbdev/ps3fb: Initialize fb_ops with fbdev macros Initialize the instance of struct fb_ops with fbdev initializer macros for framebuffers in virtual address space. Set the read/write, draw and mmap callbacks to the correct implementation and avoid implicit defaults. Also select the necessary helpers in Kconfig. Fbdev drivers sometimes rely on the callbacks being NULL for a default I/O-memory-based implementation to be invoked; hence requiring the I/O helpers to be built in any case. Setting all callbacks in all drivers explicitly will allow to make the I/O helpers optional. This benefits systems that do not use these functions. Signed-off-by: Thomas Zimmermann Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Christophe Leroy Cc: linuxppc-dev@lists.ozlabs.org Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-20-tzimmermann@suse.de --- drivers/video/fbdev/Kconfig | 5 +---- drivers/video/fbdev/ps3fb.c | 7 ++----- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 4c82890887ba..f9dab4c90033 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1715,10 +1715,7 @@ config FB_IBM_GXT4500 config FB_PS3 tristate "PS3 GPU framebuffer driver" depends on FB && PS3_PS3AV - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT - select FB_SYS_FOPS + select FB_SYSMEM_HELPERS help Include support for the virtual frame buffer in the PS3 platform. diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c index de81ad3a5d1e..de8d78bf070a 100644 --- a/drivers/video/fbdev/ps3fb.c +++ b/drivers/video/fbdev/ps3fb.c @@ -939,15 +939,12 @@ static const struct fb_ops ps3fb_ops = { .owner = THIS_MODULE, .fb_open = ps3fb_open, .fb_release = ps3fb_release, - .fb_read = fb_sys_read, - .fb_write = fb_sys_write, + __FB_DEFAULT_SYSMEM_OPS_RDWR, .fb_check_var = ps3fb_check_var, .fb_set_par = ps3fb_set_par, .fb_setcolreg = ps3fb_setcolreg, .fb_pan_display = ps3fb_pan_display, - .fb_fillrect = sys_fillrect, - .fb_copyarea = sys_copyarea, - .fb_imageblit = sys_imageblit, + __FB_DEFAULT_SYSMEM_OPS_DRAW, .fb_mmap = ps3fb_mmap, .fb_blank = ps3fb_blank, .fb_ioctl = ps3fb_ioctl, -- cgit v1.2.3 From bff13b8f2c5a38f7216ea9ce40aae88694a2b196 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:49 +0100 Subject: media/ivtvfb: Initialize fb_ops to fbdev I/O-memory helpers Initialize the instance of struct fb_ops with fbdev initializer macros for framebuffers in I/O address space. This explictily sets the read/write, draw and mmap callbacks to the correct default implementation. Fbdev drivers sometimes rely on the callbacks being NULL for a default implementation to be invoked; hence requireing the I/O helpers to be built in any case. Setting all callbacks in all drivers explicitly will allow to make the I/O helpers optional. This benefits systems that do not use these functions. Set the callbacks via macros. No functional changes. Signed-off-by: Thomas Zimmermann Cc: Andy Walls Cc: Mauro Carvalho Chehab Cc: linux-media@vger.kernel.org Reviewed-by: Javier Martinez Canillas Reviewed-by: Hans Verkuil Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-21-tzimmermann@suse.de --- drivers/media/pci/ivtv/Kconfig | 4 +--- drivers/media/pci/ivtv/ivtvfb.c | 6 +++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/media/pci/ivtv/Kconfig b/drivers/media/pci/ivtv/Kconfig index 9be52101bc4f..2498f9079b75 100644 --- a/drivers/media/pci/ivtv/Kconfig +++ b/drivers/media/pci/ivtv/Kconfig @@ -48,9 +48,7 @@ config VIDEO_IVTV_ALSA config VIDEO_FB_IVTV tristate "Conexant cx23415 framebuffer support" depends on VIDEO_IVTV && FB - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT + select FB_IOMEM_HELPERS help This is a framebuffer driver for the Conexant cx23415 MPEG encoder/decoder. diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c index 23c8c094e791..410477e3e621 100644 --- a/drivers/media/pci/ivtv/ivtvfb.c +++ b/drivers/media/pci/ivtv/ivtvfb.c @@ -927,17 +927,17 @@ static int ivtvfb_blank(int blank_mode, struct fb_info *info) static const struct fb_ops ivtvfb_ops = { .owner = THIS_MODULE, + .fb_read = fb_io_read, .fb_write = ivtvfb_write, .fb_check_var = ivtvfb_check_var, .fb_set_par = ivtvfb_set_par, .fb_setcolreg = ivtvfb_setcolreg, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, + __FB_DEFAULT_IOMEM_OPS_DRAW, .fb_cursor = NULL, .fb_ioctl = ivtvfb_ioctl, .fb_pan_display = ivtvfb_pan_display, .fb_blank = ivtvfb_blank, + __FB_DEFAULT_IOMEM_OPS_MMAP, }; /* Restore hardware after firmware restart */ -- cgit v1.2.3 From dec2d60923dbda7c26a6194a0ed9fff9dc20cd69 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:50 +0100 Subject: fbdev/clps711x-fb: Initialize fb_ops with fbdev macros Initialize the instance of struct fb_ops with fbdev initializer macros for framebuffers in I/O address space. Set the read/write, draw and mmap callbacks to the correct implementation and avoid implicit defaults. Also select the necessary helpers in Kconfig. The driver previously selected drawing ops for system memory although it operates on I/O memory. Fixed now. Fbdev drivers sometimes rely on the callbacks being NULL for a default I/O-memory-based implementation to be invoked; hence requiring the I/O helpers to be built in any case. Setting all callbacks in all drivers explicitly will allow to make the I/O helpers optional. This benefits systems that do not use these functions. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-22-tzimmermann@suse.de --- drivers/video/fbdev/Kconfig | 4 +--- drivers/video/fbdev/clps711x-fb.c | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index f9dab4c90033..6fc43b40acac 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -146,10 +146,8 @@ config FB_ACORN config FB_CLPS711X tristate "CLPS711X LCD support" depends on FB && (ARCH_CLPS711X || COMPILE_TEST) + select FB_IOMEM_HELPERS select FB_MODE_HELPERS - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT select LCD_CLASS_DEVICE select VIDEOMODE_HELPERS help diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c index e956c90efcdc..dcfd1fbbc7e1 100644 --- a/drivers/video/fbdev/clps711x-fb.c +++ b/drivers/video/fbdev/clps711x-fb.c @@ -155,13 +155,11 @@ static int clps711x_fb_blank(int blank, struct fb_info *info) static const struct fb_ops clps711x_fb_ops = { .owner = THIS_MODULE, + FB_DEFAULT_IOMEM_OPS, .fb_setcolreg = clps711x_fb_setcolreg, .fb_check_var = clps711x_fb_check_var, .fb_set_par = clps711x_fb_set_par, .fb_blank = clps711x_fb_blank, - .fb_fillrect = sys_fillrect, - .fb_copyarea = sys_copyarea, - .fb_imageblit = sys_imageblit, }; static int clps711x_lcd_check_fb(struct lcd_device *lcddev, struct fb_info *fi) -- cgit v1.2.3 From 63a11adaceb8b77d70bcce0890197fa9462ce160 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:51 +0100 Subject: fbdev/vt8500lcdfb: Initialize fb_ops with fbdev macros Initialize the instance of struct fb_ops with fbdev initializer macros for framebuffers in DMA-able virtual address space. Set the read/write, draw and mmap callbacks to the correct implementation and avoid implicit defaults. Also select the necessary helpers in Kconfig. Fbdev drivers sometimes rely on the callbacks being NULL for a default I/O-memory-based implementation to be invoked; hence requiring the I/O helpers to be built in any case. Setting all callbacks in all drivers explicitly will allow to make the I/O helpers optional. This benefits systems that do not use these functions. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-23-tzimmermann@suse.de --- drivers/video/fbdev/Kconfig | 1 + drivers/video/fbdev/vt8500lcdfb.c | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 6fc43b40acac..4e06e403d021 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1467,6 +1467,7 @@ config FB_VT8500 select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS) select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS) select FB_SYS_IMAGEBLIT + select FB_SYS_FOPS select FB_MODE_HELPERS select VIDEOMODE_HELPERS help diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c index 42d39a9d5130..42c25dc85197 100644 --- a/drivers/video/fbdev/vt8500lcdfb.c +++ b/drivers/video/fbdev/vt8500lcdfb.c @@ -241,6 +241,7 @@ static int vt8500lcd_blank(int blank, struct fb_info *info) static const struct fb_ops vt8500lcd_ops = { .owner = THIS_MODULE, + __FB_DEFAULT_DMAMEM_OPS_RDWR, .fb_set_par = vt8500lcd_set_par, .fb_setcolreg = vt8500lcd_setcolreg, .fb_fillrect = wmt_ge_fillrect, @@ -250,6 +251,7 @@ static const struct fb_ops vt8500lcd_ops = { .fb_ioctl = vt8500lcd_ioctl, .fb_pan_display = vt8500lcd_pan_display, .fb_blank = vt8500lcd_blank, + // .fb_mmap needs DMA mmap }; static irqreturn_t vt8500lcd_handle_irq(int irq, void *dev_id) @@ -357,7 +359,7 @@ static int vt8500lcd_probe(struct platform_device *pdev) fbi->fb.fix.smem_start = fb_mem_phys; fbi->fb.fix.smem_len = fb_mem_len; - fbi->fb.screen_base = fb_mem_virt; + fbi->fb.screen_buffer = fb_mem_virt; fbi->palette_size = PAGE_ALIGN(512); fbi->palette_cpu = dma_alloc_coherent(&pdev->dev, -- cgit v1.2.3 From 11754a5046080a02ecf8a78bd80644e0dae56362 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:52 +0100 Subject: fbdev/wm8505fb: Initialize fb_ops to fbdev I/O-memory helpers Initialize the instance of struct fb_ops with fbdev initializer macros for framebuffers in DMA-able address space. This explictily sets the read/write, draw and mmap callbacks to the correct default implementation. Also select the necessary helpers in Kconfig. Fbdev drivers sometimes rely on the callbacks being NULL for a default implementation to be invoked; hence requireing the I/O helpers to be built in any case. Setting all callbacks in all drivers explicitly will allow to make the I/O helpers optional. This benefits systems that do not use these functions. Set the callbacks via macros. No functional changes. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-24-tzimmermann@suse.de --- drivers/video/fbdev/Kconfig | 1 + drivers/video/fbdev/wm8505fb.c | 2 ++ 2 files changed, 3 insertions(+) diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 4e06e403d021..004a63cdea5a 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1480,6 +1480,7 @@ config FB_WM8505 select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS) select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS) select FB_SYS_IMAGEBLIT + select FB_SYS_FOPS select FB_MODE_HELPERS select VIDEOMODE_HELPERS help diff --git a/drivers/video/fbdev/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c index 5833147aa43d..00952e9c8802 100644 --- a/drivers/video/fbdev/wm8505fb.c +++ b/drivers/video/fbdev/wm8505fb.c @@ -248,6 +248,7 @@ static int wm8505fb_blank(int blank, struct fb_info *info) static const struct fb_ops wm8505fb_ops = { .owner = THIS_MODULE, + __FB_DEFAULT_DMAMEM_OPS_RDWR, .fb_set_par = wm8505fb_set_par, .fb_setcolreg = wm8505fb_setcolreg, .fb_fillrect = wmt_ge_fillrect, @@ -256,6 +257,7 @@ static const struct fb_ops wm8505fb_ops = { .fb_sync = wmt_ge_sync, .fb_pan_display = wm8505fb_pan_display, .fb_blank = wm8505fb_blank, + __FB_DEFAULT_IOMEM_OPS_MMAP, }; static int wm8505fb_probe(struct platform_device *pdev) -- cgit v1.2.3 From e0f05e643eb1ff9588d38c46ad12ff74efb09959 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:53 +0100 Subject: fbdev/cyber2000fb: Initialize fb_ops with fbdev macros Initialize the instance of struct fb_ops with fbdev initializer macros for framebuffers in I/O address space. Set the read/write, draw and mmap callbacks to the correct implementation and avoid implicit defaults. Also select the necessary helpers in Kconfig. Fbdev drivers sometimes rely on the callbacks being NULL for a default I/O-memory-based implementation to be invoked; hence requiring the I/O helpers to be built in any case. Setting all callbacks in all drivers explicitly will allow to make the I/O helpers optional. This benefits systems that do not use these functions. Signed-off-by: Thomas Zimmermann Cc: Russell King Cc: linux-arm-kernel@lists.infradead.org Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-25-tzimmermann@suse.de --- drivers/video/fbdev/Kconfig | 5 +---- drivers/video/fbdev/cyber2000fb.c | 9 +-------- 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 004a63cdea5a..37c8188752ba 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -178,10 +178,7 @@ config FB_IMX config FB_CYBER2000 tristate "CyberPro 2000/2010/5000 support" depends on FB && PCI && (BROKEN || !SPARC64) - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT - select FB_IOMEM_FOPS + select FB_IOMEM_HELPERS select VIDEO_NOMODESET help This enables support for the Integraphics CyberPro 20x0 and 5000 diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c index 52105dc1a72f..abb87d3576db 100644 --- a/drivers/video/fbdev/cyber2000fb.c +++ b/drivers/video/fbdev/cyber2000fb.c @@ -227,13 +227,6 @@ cyber2000fb_copyarea(struct fb_info *info, const struct fb_copyarea *region) CO_REG_CMD_H, cfb); } -static void -cyber2000fb_imageblit(struct fb_info *info, const struct fb_image *image) -{ - cfb_imageblit(info, image); - return; -} - static int cyber2000fb_sync(struct fb_info *info) { struct cfb_info *cfb = container_of(info, struct cfb_info, fb); @@ -1069,7 +1062,7 @@ static const struct fb_ops cyber2000fb_ops = { .fb_pan_display = cyber2000fb_pan_display, .fb_fillrect = cyber2000fb_fillrect, .fb_copyarea = cyber2000fb_copyarea, - .fb_imageblit = cyber2000fb_imageblit, + .fb_imageblit = cfb_imageblit, .fb_sync = cyber2000fb_sync, __FB_DEFAULT_IOMEM_OPS_MMAP, }; -- cgit v1.2.3 From f7c8a046577e09d8f88213c985d102604e73ed4c Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:54 +0100 Subject: staging/sm750fb: Declare fb_ops as constant Split up lynxfb_ops and declare each as constant. The fb_ops instance used to be modified while initializing the driver. It is now constant and the driver picks the correct instance, depending on the settings for acceleration and cursor support. Signed-off-by: Thomas Zimmermann Cc: Sudip Mukherjee Cc: Teddy Wang Cc: Greg Kroah-Hartman Cc: linux-staging@lists.linux.dev Acked-by: Javier Martinez Canillas Acked-by: Greg Kroah-Hartman Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-26-tzimmermann@suse.de --- drivers/staging/sm750fb/sm750.c | 59 ++++++++++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 10 deletions(-) diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c index 79bcd5bd4938..35098516f53c 100644 --- a/drivers/staging/sm750fb/sm750.c +++ b/drivers/staging/sm750fb/sm750.c @@ -663,16 +663,53 @@ static int sm750fb_set_drv(struct lynxfb_par *par) return ret; } -static struct fb_ops lynxfb_ops = { +static const struct fb_ops lynxfb_ops = { .owner = THIS_MODULE, .fb_check_var = lynxfb_ops_check_var, .fb_set_par = lynxfb_ops_set_par, .fb_setcolreg = lynxfb_ops_setcolreg, .fb_blank = lynxfb_ops_blank, + .fb_pan_display = lynxfb_ops_pan_display, .fb_fillrect = cfb_fillrect, .fb_imageblit = cfb_imageblit, .fb_copyarea = cfb_copyarea, - /* cursor */ +}; + +static const struct fb_ops lynxfb_ops_with_cursor = { + .owner = THIS_MODULE, + .fb_check_var = lynxfb_ops_check_var, + .fb_set_par = lynxfb_ops_set_par, + .fb_setcolreg = lynxfb_ops_setcolreg, + .fb_blank = lynxfb_ops_blank, + .fb_pan_display = lynxfb_ops_pan_display, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_cursor = lynxfb_ops_cursor, +}; + +static const struct fb_ops lynxfb_ops_accel = { + .owner = THIS_MODULE, + .fb_check_var = lynxfb_ops_check_var, + .fb_set_par = lynxfb_ops_set_par, + .fb_setcolreg = lynxfb_ops_setcolreg, + .fb_blank = lynxfb_ops_blank, + .fb_pan_display = lynxfb_ops_pan_display, + .fb_fillrect = lynxfb_ops_fillrect, + .fb_copyarea = lynxfb_ops_copyarea, + .fb_imageblit = lynxfb_ops_imageblit, +}; + +static const struct fb_ops lynxfb_ops_accel_with_cursor = { + .owner = THIS_MODULE, + .fb_check_var = lynxfb_ops_check_var, + .fb_set_par = lynxfb_ops_set_par, + .fb_setcolreg = lynxfb_ops_setcolreg, + .fb_blank = lynxfb_ops_blank, + .fb_pan_display = lynxfb_ops_pan_display, + .fb_fillrect = lynxfb_ops_fillrect, + .fb_copyarea = lynxfb_ops_copyarea, + .fb_imageblit = lynxfb_ops_imageblit, .fb_cursor = lynxfb_ops_cursor, }; @@ -714,7 +751,6 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index) par->index = index; output->channel = &crtc->channel; sm750fb_set_drv(par); - lynxfb_ops.fb_pan_display = lynxfb_ops_pan_display; /* * set current cursor variable and proc pointer, @@ -731,19 +767,22 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index) crtc->cursor.vstart = sm750_dev->pvMem + crtc->cursor.offset; memset_io(crtc->cursor.vstart, 0, crtc->cursor.size); - if (!g_hwcursor) { - lynxfb_ops.fb_cursor = NULL; + if (!g_hwcursor) sm750_hw_cursor_disable(&crtc->cursor); - } /* set info->fbops, must be set before fb_find_mode */ if (!sm750_dev->accel_off) { /* use 2d acceleration */ - lynxfb_ops.fb_fillrect = lynxfb_ops_fillrect; - lynxfb_ops.fb_copyarea = lynxfb_ops_copyarea; - lynxfb_ops.fb_imageblit = lynxfb_ops_imageblit; + if (!g_hwcursor) + info->fbops = &lynxfb_ops_accel; + else + info->fbops = &lynxfb_ops_accel_with_cursor; + } else { + if (!g_hwcursor) + info->fbops = &lynxfb_ops; + else + info->fbops = &lynxfb_ops_with_cursor; } - info->fbops = &lynxfb_ops; if (!g_fbmode[index]) { g_fbmode[index] = g_def_fbmode; -- cgit v1.2.3 From dc0ad215e5d8524bd72180206e042d904fcc1d4f Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:55 +0100 Subject: staging/sm750fb: Initialize fb_ops with fbdev macros Initialize all instances of struct fb_ops with fbdev initializer macros for framebuffers in I/O address space. Set the read/write, draw and mmap callbacks to the correct implementation and avoid implicit defaults. Also select the necessary helpers in Kconfig. Fbdev drivers sometimes rely on the callbacks being NULL for a default I/O-memory-based implementation to be invoked; hence requiring the I/O helpers to be built in any case. Setting all callbacks in all drivers explicitly will allow to make the I/O helpers optional. This benefits systems that do not use these functions. Signed-off-by: Thomas Zimmermann Cc: Sudip Mukherjee Cc: Teddy Wang Cc: Greg Kroah-Hartman Cc: linux-staging@lists.linux.dev Reviewed-by: Javier Martinez Canillas Acked-by: Greg Kroah-Hartman Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-27-tzimmermann@suse.de --- drivers/staging/sm750fb/sm750.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c index 35098516f53c..04c1b32a22c5 100644 --- a/drivers/staging/sm750fb/sm750.c +++ b/drivers/staging/sm750fb/sm750.c @@ -665,31 +665,28 @@ static int sm750fb_set_drv(struct lynxfb_par *par) static const struct fb_ops lynxfb_ops = { .owner = THIS_MODULE, + FB_DEFAULT_IOMEM_OPS, .fb_check_var = lynxfb_ops_check_var, .fb_set_par = lynxfb_ops_set_par, .fb_setcolreg = lynxfb_ops_setcolreg, .fb_blank = lynxfb_ops_blank, .fb_pan_display = lynxfb_ops_pan_display, - .fb_fillrect = cfb_fillrect, - .fb_imageblit = cfb_imageblit, - .fb_copyarea = cfb_copyarea, }; static const struct fb_ops lynxfb_ops_with_cursor = { .owner = THIS_MODULE, + FB_DEFAULT_IOMEM_OPS, .fb_check_var = lynxfb_ops_check_var, .fb_set_par = lynxfb_ops_set_par, .fb_setcolreg = lynxfb_ops_setcolreg, .fb_blank = lynxfb_ops_blank, .fb_pan_display = lynxfb_ops_pan_display, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, .fb_cursor = lynxfb_ops_cursor, }; static const struct fb_ops lynxfb_ops_accel = { .owner = THIS_MODULE, + __FB_DEFAULT_IOMEM_OPS_RDWR, .fb_check_var = lynxfb_ops_check_var, .fb_set_par = lynxfb_ops_set_par, .fb_setcolreg = lynxfb_ops_setcolreg, @@ -698,10 +695,12 @@ static const struct fb_ops lynxfb_ops_accel = { .fb_fillrect = lynxfb_ops_fillrect, .fb_copyarea = lynxfb_ops_copyarea, .fb_imageblit = lynxfb_ops_imageblit, + __FB_DEFAULT_IOMEM_OPS_MMAP, }; static const struct fb_ops lynxfb_ops_accel_with_cursor = { .owner = THIS_MODULE, + __FB_DEFAULT_IOMEM_OPS_RDWR, .fb_check_var = lynxfb_ops_check_var, .fb_set_par = lynxfb_ops_set_par, .fb_setcolreg = lynxfb_ops_setcolreg, @@ -711,6 +710,7 @@ static const struct fb_ops lynxfb_ops_accel_with_cursor = { .fb_copyarea = lynxfb_ops_copyarea, .fb_imageblit = lynxfb_ops_imageblit, .fb_cursor = lynxfb_ops_cursor, + __FB_DEFAULT_IOMEM_OPS_MMAP, }; static int lynxfb_set_fbinfo(struct fb_info *info, int index) -- cgit v1.2.3 From 27ad64eac10fcb25fcbfb813921f4d30b3458e13 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:56 +0100 Subject: fbdev: Rename FB_SYS_FOPS token to FB_SYSMEM_FOPS Rename the token to harmonize naming among various helpers. For example, I/O-memory helpers use FB_IOMEM_FOPS. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-28-tzimmermann@suse.de --- drivers/video/fbdev/Kconfig | 4 ++-- drivers/video/fbdev/core/Kconfig | 6 +++--- drivers/video/fbdev/core/Makefile | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 37c8188752ba..d5909a9206ff 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1464,7 +1464,7 @@ config FB_VT8500 select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS) select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS) select FB_SYS_IMAGEBLIT - select FB_SYS_FOPS + select FB_SYSMEM_FOPS select FB_MODE_HELPERS select VIDEOMODE_HELPERS help @@ -1477,7 +1477,7 @@ config FB_WM8505 select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS) select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS) select FB_SYS_IMAGEBLIT - select FB_SYS_FOPS + select FB_SYSMEM_FOPS select FB_MODE_HELPERS select VIDEOMODE_HELPERS help diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig index 7a3ed13bed70..faab5d50cac3 100644 --- a/drivers/video/fbdev/core/Kconfig +++ b/drivers/video/fbdev/core/Kconfig @@ -129,7 +129,7 @@ config FB_LITTLE_ENDIAN endchoice -config FB_SYS_FOPS +config FB_SYSMEM_FOPS tristate depends on FB_CORE @@ -142,8 +142,8 @@ config FB_DMAMEM_HELPERS depends on FB_CORE select FB_SYS_COPYAREA select FB_SYS_FILLRECT - select FB_SYS_FOPS select FB_SYS_IMAGEBLIT + select FB_SYSMEM_FOPS config FB_IOMEM_FOPS tristate @@ -168,8 +168,8 @@ config FB_SYSMEM_HELPERS depends on FB_CORE select FB_SYS_COPYAREA select FB_SYS_FILLRECT - select FB_SYS_FOPS select FB_SYS_IMAGEBLIT + select FB_SYSMEM_FOPS config FB_SYSMEM_HELPERS_DEFERRED bool diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile index c1d657601b2b..d15974759086 100644 --- a/drivers/video/fbdev/core/Makefile +++ b/drivers/video/fbdev/core/Makefile @@ -32,6 +32,6 @@ obj-$(CONFIG_FB_IOMEM_FOPS) += fb_io_fops.o obj-$(CONFIG_FB_SYS_FILLRECT) += sysfillrect.o obj-$(CONFIG_FB_SYS_COPYAREA) += syscopyarea.o obj-$(CONFIG_FB_SYS_IMAGEBLIT) += sysimgblt.o -obj-$(CONFIG_FB_SYS_FOPS) += fb_sys_fops.o +obj-$(CONFIG_FB_SYSMEM_FOPS) += fb_sys_fops.o obj-$(CONFIG_FB_SVGALIB) += svgalib.o obj-$(CONFIG_FB_DDC) += fb_ddc.o -- cgit v1.2.3 From 23dad7b95fea68426311405a0627b90c06c3fc34 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:57 +0100 Subject: fbdev: Remove trailing whitespaces Fix coding style. No functional changes. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-29-tzimmermann@suse.de --- drivers/video/fbdev/sbuslib.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c index 21e9fd8e69e2..4d524db5c4f2 100644 --- a/drivers/video/fbdev/sbuslib.c +++ b/drivers/video/fbdev/sbuslib.c @@ -48,7 +48,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map, unsigned long map_offset = 0; unsigned long off; int i; - + if (!(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) return -EINVAL; @@ -72,7 +72,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map, #define POFF_MASK (PAGE_MASK|0x1UL) #else #define POFF_MASK (PAGE_MASK) -#endif +#endif map_offset = (physbase + map[i].poff) & POFF_MASK; break; } -- cgit v1.2.3 From 76f92201b821dd2f442ebe37ec9b52b525855bac Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:58 +0100 Subject: fbdev: Push pgprot_decrypted() into mmap implementations If a driver sets struct fb_ops.fb_mmap, the fbdev core automatically calls pgprot_decrypted(). But the default fb_mmap code doesn't handle pgprot_decrypted(). Move the call to pgprot_decrypted() into each drivers' fb_mmap function. This only concerns fb_mmap functions for system and DMA memory. For I/O memory, which is the default case, nothing changes. The fb_mmap for I/O-memory can later be moved into a helper as well. DRM's fbdev emulation handles pgprot_decrypted() internally via the Prime helpers. Fbdev doesn't have to do anything in this case. In cases where DRM uses deferred I/O, this patch updates fb_mmap correctly. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-30-tzimmermann@suse.de --- drivers/auxdisplay/cfag12864bfb.c | 2 ++ drivers/auxdisplay/ht16k33.c | 2 ++ drivers/video/fbdev/amba-clcd.c | 2 ++ drivers/video/fbdev/au1100fb.c | 2 ++ drivers/video/fbdev/au1200fb.c | 2 ++ drivers/video/fbdev/core/fb_chrdev.c | 5 ----- drivers/video/fbdev/core/fb_defio.c | 2 ++ drivers/video/fbdev/ep93xx-fb.c | 2 ++ drivers/video/fbdev/gbefb.c | 2 ++ drivers/video/fbdev/omap/omapfb_main.c | 2 ++ drivers/video/fbdev/omap2/omapfb/omapfb-main.c | 2 ++ drivers/video/fbdev/ps3fb.c | 2 ++ drivers/video/fbdev/sa1100fb.c | 2 ++ drivers/video/fbdev/sbuslib.c | 1 + drivers/video/fbdev/sh_mobile_lcdcfb.c | 4 ++++ drivers/video/fbdev/smscufx.c | 2 ++ drivers/video/fbdev/udlfb.c | 2 ++ drivers/video/fbdev/vermilion/vermilion.c | 2 ++ drivers/video/fbdev/vfb.c | 2 ++ 19 files changed, 37 insertions(+), 5 deletions(-) diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c index ede0f9a51311..5ba19c339f08 100644 --- a/drivers/auxdisplay/cfag12864bfb.c +++ b/drivers/auxdisplay/cfag12864bfb.c @@ -51,6 +51,8 @@ static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct page *pages = virt_to_page(cfag12864b_buffer); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + return vm_map_pages_zero(vma, &pages, 1); } diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index 2f1dc6b4e276..a90430b7d07b 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c @@ -351,6 +351,8 @@ static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma) struct ht16k33_priv *priv = info->par; struct page *pages = virt_to_page(priv->fbdev.buffer); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + return vm_map_pages_zero(vma, &pages, 1); } diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index 0399db369e70..47d373f04f3f 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c @@ -829,6 +829,8 @@ static int clcdfb_of_dma_setup(struct clcd_fb *fb) static int clcdfb_of_dma_mmap(struct clcd_fb *fb, struct vm_area_struct *vma) { + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base, fb->fb.fix.smem_start, fb->fb.fix.smem_len); } diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c index a9c8d33a6ef7..08109ce535cd 100644 --- a/drivers/video/fbdev/au1100fb.c +++ b/drivers/video/fbdev/au1100fb.c @@ -342,6 +342,8 @@ int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma) { struct au1100fb_device *fbdev = to_au1100fb_device(fbi); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6 return dma_mmap_coherent(fbdev->dev, vma, fbdev->fb_mem, fbdev->fb_phys, diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index 16ebbab50097..6f20efc663d7 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c @@ -1236,6 +1236,8 @@ static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct au1200fb_device *fbdev = info->par; + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + return dma_mmap_coherent(fbdev->dev, vma, fbdev->fb_mem, fbdev->fb_phys, fbdev->fb_len); } diff --git a/drivers/video/fbdev/core/fb_chrdev.c b/drivers/video/fbdev/core/fb_chrdev.c index 32a7315b4b6d..b73a122950a9 100644 --- a/drivers/video/fbdev/core/fb_chrdev.c +++ b/drivers/video/fbdev/core/fb_chrdev.c @@ -325,11 +325,6 @@ static int fb_mmap(struct file *file, struct vm_area_struct *vma) if (info->fbops->fb_mmap) { int res; - /* - * The framebuffer needs to be accessed decrypted, be sure - * SME protection is removed ahead of the call - */ - vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); res = info->fbops->fb_mmap(info, vma); mutex_unlock(&info->mm_lock); return res; diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index 274f5d0fa247..1b0b85e59e5e 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -227,6 +227,8 @@ static const struct address_space_operations fb_deferred_io_aops = { int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) { + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + vma->vm_ops = &fb_deferred_io_vm_ops; vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); if (!(info->flags & FBINFO_VIRTFB)) diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c index cae00deee001..3e378874ccc7 100644 --- a/drivers/video/fbdev/ep93xx-fb.c +++ b/drivers/video/fbdev/ep93xx-fb.c @@ -311,6 +311,8 @@ static int ep93xxfb_mmap(struct fb_info *info, struct vm_area_struct *vma) { unsigned int offset = vma->vm_pgoff << PAGE_SHIFT; + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + if (offset < info->fix.smem_len) { return dma_mmap_wc(info->device, vma, info->screen_base, info->fix.smem_start, info->fix.smem_len); diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c index e89e5579258e..8463de833d1e 100644 --- a/drivers/video/fbdev/gbefb.c +++ b/drivers/video/fbdev/gbefb.c @@ -1000,6 +1000,8 @@ static int gbefb_mmap(struct fb_info *info, unsigned long phys_addr, phys_size; u16 *tile; + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + /* check range */ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) return -EINVAL; diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c index 694cf6318782..aa31c0d26e92 100644 --- a/drivers/video/fbdev/omap/omapfb_main.c +++ b/drivers/video/fbdev/omap/omapfb_main.c @@ -1203,6 +1203,8 @@ static int omapfb_mmap(struct fb_info *info, struct vm_area_struct *vma) struct omapfb_device *fbdev = plane->fbdev; int r; + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + omapfb_rqueue_lock(fbdev); r = fbdev->ctrl->mmap(info, vma); omapfb_rqueue_unlock(fbdev); diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c index c9fd0ad352d7..0db9c55fce5a 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c @@ -1095,6 +1095,8 @@ static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma) u32 len; int r; + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + rg = omapfb_get_mem_region(ofbi->region); start = omapfb_get_region_paddr(ofbi); diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c index de8d78bf070a..dbcda307f6a6 100644 --- a/drivers/video/fbdev/ps3fb.c +++ b/drivers/video/fbdev/ps3fb.c @@ -708,6 +708,8 @@ static int ps3fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { int r; + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + r = vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len); dev_dbg(info->device, "ps3fb: mmap framebuffer P(%lx)->V(%lx)\n", diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c index befd3fe2f659..0d362d2bf0e3 100644 --- a/drivers/video/fbdev/sa1100fb.c +++ b/drivers/video/fbdev/sa1100fb.c @@ -562,6 +562,8 @@ static int sa1100fb_mmap(struct fb_info *info, container_of(info, struct sa1100fb_info, fb); unsigned long off = vma->vm_pgoff << PAGE_SHIFT; + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + if (off < info->fix.smem_len) { vma->vm_pgoff += 1; /* skip over the palette */ return dma_mmap_wc(fbi->dev, vma, fbi->map_cpu, fbi->map_dma, diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c index 4d524db5c4f2..634e3d159452 100644 --- a/drivers/video/fbdev/sbuslib.c +++ b/drivers/video/fbdev/sbuslib.c @@ -60,6 +60,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map, /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* Each page, see which map applies */ diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c index d84628de5189..eb2297b37504 100644 --- a/drivers/video/fbdev/sh_mobile_lcdcfb.c +++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c @@ -1482,6 +1482,8 @@ sh_mobile_lcdc_overlay_mmap(struct fb_info *info, struct vm_area_struct *vma) if (info->fbdefio) return fb_deferred_io_mmap(info, vma); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + return dma_mmap_coherent(ovl->channel->lcdc->dev, vma, ovl->fb_mem, ovl->dma_handle, ovl->fb_size); } @@ -1956,6 +1958,8 @@ sh_mobile_lcdc_mmap(struct fb_info *info, struct vm_area_struct *vma) if (info->fbdefio) return fb_deferred_io_mmap(info, vma); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + return dma_mmap_coherent(ch->lcdc->dev, vma, ch->fb_mem, ch->dma_handle, ch->fb_size); } diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c index 90a77d19b236..35d682b110c4 100644 --- a/drivers/video/fbdev/smscufx.c +++ b/drivers/video/fbdev/smscufx.c @@ -783,6 +783,8 @@ static int ufx_ops_mmap(struct fb_info *info, struct vm_area_struct *vma) if (info->fbdefio) return fb_deferred_io_mmap(info, vma); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) return -EINVAL; if (size > info->fix.smem_len) diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index 2460ff4ac86b..1514ddac4caf 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -331,6 +331,8 @@ static int dlfb_ops_mmap(struct fb_info *info, struct vm_area_struct *vma) if (info->fbdefio) return fb_deferred_io_mmap(info, vma); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) return -EINVAL; if (size > info->fix.smem_len) diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c index 840ead69654b..a087b42ca652 100644 --- a/drivers/video/fbdev/vermilion/vermilion.c +++ b/drivers/video/fbdev/vermilion/vermilion.c @@ -998,6 +998,8 @@ static int vmlfb_mmap(struct fb_info *info, struct vm_area_struct *vma) int ret; unsigned long prot; + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + ret = vmlfb_vram_offset(vinfo, offset); if (ret) return -EINVAL; diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c index f6140f247e4b..f86149ba3835 100644 --- a/drivers/video/fbdev/vfb.c +++ b/drivers/video/fbdev/vfb.c @@ -382,6 +382,8 @@ static int vfb_pan_display(struct fb_var_screeninfo *var, static int vfb_mmap(struct fb_info *info, struct vm_area_struct *vma) { + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + return remap_vmalloc_range(vma, (void *)info->fix.smem_start, vma->vm_pgoff); } -- cgit v1.2.3 From 33253d9e01d40542f07aaf718a655893cd2949e5 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:15:59 +0100 Subject: fbdev: Move default fb_mmap code into helper function Move the default fb_mmap code for I/O address spaces into the helper function fb_io_mmap(). The helper can either be called via struct fb_ops.fb_mmap or as the default if no fb_mmap has been set. Also set the new helper in __FB_DEFAULT_IOMEM_OPS_MMAP. In the mid-term, fb_io_mmap() is supposed to become optional. Fbdev drivers will initialize their struct fb_ops.fb_mmap to the helper and select a corresponding Kconfig token. The helper can then be made optional at compile time. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-31-tzimmermann@suse.de --- drivers/video/fbdev/core/fb_chrdev.c | 36 ++++++----------------------------- drivers/video/fbdev/core/fb_io_fops.c | 27 ++++++++++++++++++++++++++ include/linux/fb.h | 3 ++- 3 files changed, 35 insertions(+), 31 deletions(-) diff --git a/drivers/video/fbdev/core/fb_chrdev.c b/drivers/video/fbdev/core/fb_chrdev.c index b73a122950a9..089441c9d810 100644 --- a/drivers/video/fbdev/core/fb_chrdev.c +++ b/drivers/video/fbdev/core/fb_chrdev.c @@ -314,20 +314,16 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd, static int fb_mmap(struct file *file, struct vm_area_struct *vma) { struct fb_info *info = file_fb_info(file); - unsigned long mmio_pgoff; - unsigned long start; - u32 len; + int res; if (!info) return -ENODEV; + mutex_lock(&info->mm_lock); if (info->fbops->fb_mmap) { - int res; res = info->fbops->fb_mmap(info, vma); - mutex_unlock(&info->mm_lock); - return res; #if IS_ENABLED(CONFIG_FB_DEFERRED_IO) } else if (info->fbdefio) { /* @@ -335,35 +331,15 @@ static int fb_mmap(struct file *file, struct vm_area_struct *vma) * minimum, point struct fb_ops.fb_mmap to fb_deferred_io_mmap(). */ dev_warn_once(info->dev, "fbdev mmap not set up for deferred I/O.\n"); - mutex_unlock(&info->mm_lock); - return -ENODEV; + res = -ENODEV; #endif + } else { + res = fb_io_mmap(info, vma); } - /* - * Ugh. This can be either the frame buffer mapping, or - * if pgoff points past it, the mmio mapping. - */ - start = info->fix.smem_start; - len = info->fix.smem_len; - mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT; - if (vma->vm_pgoff >= mmio_pgoff) { - if (info->var.accel_flags) { - mutex_unlock(&info->mm_lock); - return -EINVAL; - } - - vma->vm_pgoff -= mmio_pgoff; - start = info->fix.mmio_start; - len = info->fix.mmio_len; - } mutex_unlock(&info->mm_lock); - vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - vma->vm_page_prot = pgprot_framebuffer(vma->vm_page_prot, vma->vm_start, - vma->vm_end, start); - - return vm_iomap_memory(vma, start, len); + return res; } static int fb_open(struct inode *inode, struct file *file) diff --git a/drivers/video/fbdev/core/fb_io_fops.c b/drivers/video/fbdev/core/fb_io_fops.c index 871b829521af..60805e43914e 100644 --- a/drivers/video/fbdev/core/fb_io_fops.c +++ b/drivers/video/fbdev/core/fb_io_fops.c @@ -132,5 +132,32 @@ ssize_t fb_io_write(struct fb_info *info, const char __user *buf, size_t count, } EXPORT_SYMBOL(fb_io_write); +int fb_io_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + unsigned long start = info->fix.smem_start; + u32 len = info->fix.smem_len; + unsigned long mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT; + + /* + * This can be either the framebuffer mapping, or if pgoff points + * past it, the mmio mapping. + */ + if (vma->vm_pgoff >= mmio_pgoff) { + if (info->var.accel_flags) + return -EINVAL; + + vma->vm_pgoff -= mmio_pgoff; + start = info->fix.mmio_start; + len = info->fix.mmio_len; + } + + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_page_prot = pgprot_framebuffer(vma->vm_page_prot, vma->vm_start, + vma->vm_end, start); + + return vm_iomap_memory(vma, start, len); +} +EXPORT_SYMBOL(fb_io_mmap); + MODULE_DESCRIPTION("Fbdev helpers for framebuffers in I/O memory"); MODULE_LICENSE("GPL"); diff --git a/include/linux/fb.h b/include/linux/fb.h index 94e2c44c6569..a36d05b576b0 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -536,6 +536,7 @@ extern ssize_t fb_io_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos); extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf, size_t count, loff_t *ppos); +int fb_io_mmap(struct fb_info *info, struct vm_area_struct *vma); #define __FB_DEFAULT_IOMEM_OPS_RDWR \ .fb_read = fb_io_read, \ @@ -547,7 +548,7 @@ extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf, .fb_imageblit = cfb_imageblit #define __FB_DEFAULT_IOMEM_OPS_MMAP \ - .fb_mmap = NULL /* default implementation */ + .fb_mmap = fb_io_mmap #define FB_DEFAULT_IOMEM_OPS \ __FB_DEFAULT_IOMEM_OPS_RDWR, \ -- cgit v1.2.3 From b3e8813773c568fd2d65e9752abfda27442e502e Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:16:00 +0100 Subject: fbdev: Warn on incorrect framebuffer access Test in framebuffer read, write and drawing helpers if FBINFO_VIRTFB has been set correctly. Framebuffers in I/O memory should only be accessed with the architecture's respective helpers. Framebuffers in system memory should be accessed with the regular load and store operations. Presumably not all drivers get this right, so we now warn about it. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-32-tzimmermann@suse.de --- drivers/video/fbdev/core/cfbcopyarea.c | 3 +++ drivers/video/fbdev/core/cfbfillrect.c | 3 +++ drivers/video/fbdev/core/cfbimgblt.c | 3 +++ drivers/video/fbdev/core/fb_io_fops.c | 9 +++++++++ drivers/video/fbdev/core/fb_sys_fops.c | 6 ++++++ drivers/video/fbdev/core/syscopyarea.c | 3 +++ drivers/video/fbdev/core/sysfillrect.c | 3 +++ drivers/video/fbdev/core/sysimgblt.c | 3 +++ include/linux/fb.h | 8 +++++++- 9 files changed, 40 insertions(+), 1 deletion(-) diff --git a/drivers/video/fbdev/core/cfbcopyarea.c b/drivers/video/fbdev/core/cfbcopyarea.c index 5b80bf3dae50..a271f57d9c6c 100644 --- a/drivers/video/fbdev/core/cfbcopyarea.c +++ b/drivers/video/fbdev/core/cfbcopyarea.c @@ -391,6 +391,9 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area) if (p->state != FBINFO_STATE_RUNNING) return; + if (p->flags & FBINFO_VIRTFB) + fb_warn_once(p, "Framebuffer is not in I/O address space."); + /* if the beginning of the target area might overlap with the end of the source area, be have to copy the area reverse. */ if ((dy == sy && dx > sx) || (dy > sy)) { diff --git a/drivers/video/fbdev/core/cfbfillrect.c b/drivers/video/fbdev/core/cfbfillrect.c index ba9f58b2a5e8..cbaa4c9e2355 100644 --- a/drivers/video/fbdev/core/cfbfillrect.c +++ b/drivers/video/fbdev/core/cfbfillrect.c @@ -287,6 +287,9 @@ void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect) if (p->state != FBINFO_STATE_RUNNING) return; + if (p->flags & FBINFO_VIRTFB) + fb_warn_once(p, "Framebuffer is not in I/O address space."); + if (p->fix.visual == FB_VISUAL_TRUECOLOR || p->fix.visual == FB_VISUAL_DIRECTCOLOR ) fg = ((u32 *) (p->pseudo_palette))[rect->color]; diff --git a/drivers/video/fbdev/core/cfbimgblt.c b/drivers/video/fbdev/core/cfbimgblt.c index 9ebda4e0dc7a..7d1d2f1a627d 100644 --- a/drivers/video/fbdev/core/cfbimgblt.c +++ b/drivers/video/fbdev/core/cfbimgblt.c @@ -326,6 +326,9 @@ void cfb_imageblit(struct fb_info *p, const struct fb_image *image) if (p->state != FBINFO_STATE_RUNNING) return; + if (p->flags & FBINFO_VIRTFB) + fb_warn_once(p, "Framebuffer is not in I/O address space."); + bitstart = (dy * p->fix.line_length * 8) + (dx * bpp); start_index = bitstart & (32 - 1); pitch_index = (p->fix.line_length & (bpl - 1)) * 8; diff --git a/drivers/video/fbdev/core/fb_io_fops.c b/drivers/video/fbdev/core/fb_io_fops.c index 60805e43914e..3408ff1b2b7a 100644 --- a/drivers/video/fbdev/core/fb_io_fops.c +++ b/drivers/video/fbdev/core/fb_io_fops.c @@ -12,6 +12,9 @@ ssize_t fb_io_read(struct fb_info *info, char __user *buf, size_t count, loff_t int c, cnt = 0, err = 0; unsigned long total_size, trailing; + if (info->flags & FBINFO_VIRTFB) + fb_warn_once(info, "Framebuffer is not in I/O address space."); + if (!info->screen_base) return -ENODEV; @@ -73,6 +76,9 @@ ssize_t fb_io_write(struct fb_info *info, const char __user *buf, size_t count, int c, cnt = 0, err = 0; unsigned long total_size, trailing; + if (info->flags & FBINFO_VIRTFB) + fb_warn_once(info, "Framebuffer is not in I/O address space."); + if (!info->screen_base) return -ENODEV; @@ -138,6 +144,9 @@ int fb_io_mmap(struct fb_info *info, struct vm_area_struct *vma) u32 len = info->fix.smem_len; unsigned long mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT; + if (info->flags & FBINFO_VIRTFB) + fb_warn_once(info, "Framebuffer is not in I/O address space."); + /* * This can be either the framebuffer mapping, or if pgoff points * past it, the mmio mapping. diff --git a/drivers/video/fbdev/core/fb_sys_fops.c b/drivers/video/fbdev/core/fb_sys_fops.c index 0cb0989abda6..a9aa6519a5b3 100644 --- a/drivers/video/fbdev/core/fb_sys_fops.c +++ b/drivers/video/fbdev/core/fb_sys_fops.c @@ -22,6 +22,9 @@ ssize_t fb_sys_read(struct fb_info *info, char __user *buf, size_t count, unsigned long total_size, c; ssize_t ret; + if (!(info->flags & FBINFO_VIRTFB)) + fb_warn_once(info, "Framebuffer is not in virtual address space."); + if (!info->screen_buffer) return -ENODEV; @@ -64,6 +67,9 @@ ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, unsigned long total_size, c; size_t ret; + if (!(info->flags & FBINFO_VIRTFB)) + fb_warn_once(info, "Framebuffer is not in virtual address space."); + if (!info->screen_buffer) return -ENODEV; diff --git a/drivers/video/fbdev/core/syscopyarea.c b/drivers/video/fbdev/core/syscopyarea.c index 7b8bd3a2bedc..75e7001e8450 100644 --- a/drivers/video/fbdev/core/syscopyarea.c +++ b/drivers/video/fbdev/core/syscopyarea.c @@ -324,6 +324,9 @@ void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area) if (p->state != FBINFO_STATE_RUNNING) return; + if (!(p->flags & FBINFO_VIRTFB)) + fb_warn_once(p, "Framebuffer is not in virtual address space."); + /* if the beginning of the target area might overlap with the end of the source area, be have to copy the area reverse. */ if ((dy == sy && dx > sx) || (dy > sy)) { diff --git a/drivers/video/fbdev/core/sysfillrect.c b/drivers/video/fbdev/core/sysfillrect.c index bcdcaeae6538..e49221a88ccc 100644 --- a/drivers/video/fbdev/core/sysfillrect.c +++ b/drivers/video/fbdev/core/sysfillrect.c @@ -242,6 +242,9 @@ void sys_fillrect(struct fb_info *p, const struct fb_fillrect *rect) if (p->state != FBINFO_STATE_RUNNING) return; + if (!(p->flags & FBINFO_VIRTFB)) + fb_warn_once(p, "Framebuffer is not in virtual address space."); + if (p->fix.visual == FB_VISUAL_TRUECOLOR || p->fix.visual == FB_VISUAL_DIRECTCOLOR ) fg = ((u32 *) (p->pseudo_palette))[rect->color]; diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c index 665ef7a0a249..6949bbd51d92 100644 --- a/drivers/video/fbdev/core/sysimgblt.c +++ b/drivers/video/fbdev/core/sysimgblt.c @@ -296,6 +296,9 @@ void sys_imageblit(struct fb_info *p, const struct fb_image *image) if (p->state != FBINFO_STATE_RUNNING) return; + if (!(p->flags & FBINFO_VIRTFB)) + fb_warn_once(p, "Framebuffer is not in virtual address space."); + bitstart = (dy * p->fix.line_length * 8) + (dx * bpp); start_index = bitstart & (32 - 1); pitch_index = (p->fix.line_length & (bpl - 1)) * 8; diff --git a/include/linux/fb.h b/include/linux/fb.h index a36d05b576b0..24f0ec366235 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -849,7 +849,10 @@ static inline bool fb_modesetting_disabled(const char *drvname) } #endif -/* Convenience logging macros */ +/* + * Convenience logging macros + */ + #define fb_err(fb_info, fmt, ...) \ pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) #define fb_notice(info, fmt, ...) \ @@ -861,4 +864,7 @@ static inline bool fb_modesetting_disabled(const char *drvname) #define fb_dbg(fb_info, fmt, ...) \ pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) +#define fb_warn_once(fb_info, fmt, ...) \ + pr_warn_once("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) + #endif /* _LINUX_FB_H */ -- cgit v1.2.3 From 8813e86f6d82a7931446c3cbc5d596f77d0f1ba6 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 27 Nov 2023 14:16:01 +0100 Subject: fbdev: Remove default file-I/O implementations Drop the default implementations for file read, write and mmap operations. Each fbdev driver must now provide an implementation and select any necessary helpers. If no implementation has been set, fbdev returns an errno code to user space. The code is the same as if the operation had not been set in the file_operations struct. This change makes the fbdev helpers for I/O memory optional. Most systems only use system-memory framebuffers via DRM's fbdev emulation. v2: * warn once if I/O callbacks are missing (Javier) Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-33-tzimmermann@suse.de --- drivers/video/fbdev/core/Kconfig | 1 - drivers/video/fbdev/core/fb_chrdev.c | 37 ++++++++++++------------------------ include/linux/fb.h | 5 +++++ 3 files changed, 17 insertions(+), 26 deletions(-) diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig index faab5d50cac3..21053bf00dc5 100644 --- a/drivers/video/fbdev/core/Kconfig +++ b/drivers/video/fbdev/core/Kconfig @@ -4,7 +4,6 @@ # config FB_CORE - select FB_IOMEM_FOPS select VIDEO_CMDLINE tristate diff --git a/drivers/video/fbdev/core/fb_chrdev.c b/drivers/video/fbdev/core/fb_chrdev.c index 089441c9d810..4ebd16b7e3b8 100644 --- a/drivers/video/fbdev/core/fb_chrdev.c +++ b/drivers/video/fbdev/core/fb_chrdev.c @@ -34,13 +34,13 @@ static ssize_t fb_read(struct file *file, char __user *buf, size_t count, loff_t if (!info) return -ENODEV; + if (fb_WARN_ON_ONCE(info, !info->fbops->fb_read)) + return -EINVAL; + if (info->state != FBINFO_STATE_RUNNING) return -EPERM; - if (info->fbops->fb_read) - return info->fbops->fb_read(info, buf, count, ppos); - - return fb_io_read(info, buf, count, ppos); + return info->fbops->fb_read(info, buf, count, ppos); } static ssize_t fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) @@ -50,13 +50,13 @@ static ssize_t fb_write(struct file *file, const char __user *buf, size_t count, if (!info) return -ENODEV; + if (fb_WARN_ON_ONCE(info, !info->fbops->fb_write)) + return -EINVAL; + if (info->state != FBINFO_STATE_RUNNING) return -EPERM; - if (info->fbops->fb_write) - return info->fbops->fb_write(info, buf, count, ppos); - - return fb_io_write(info, buf, count, ppos); + return info->fbops->fb_write(info, buf, count, ppos); } static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, @@ -319,24 +319,11 @@ static int fb_mmap(struct file *file, struct vm_area_struct *vma) if (!info) return -ENODEV; - mutex_lock(&info->mm_lock); - - if (info->fbops->fb_mmap) { - - res = info->fbops->fb_mmap(info, vma); -#if IS_ENABLED(CONFIG_FB_DEFERRED_IO) - } else if (info->fbdefio) { - /* - * FB deferred I/O wants you to handle mmap in your drivers. At a - * minimum, point struct fb_ops.fb_mmap to fb_deferred_io_mmap(). - */ - dev_warn_once(info->dev, "fbdev mmap not set up for deferred I/O.\n"); - res = -ENODEV; -#endif - } else { - res = fb_io_mmap(info, vma); - } + if (fb_WARN_ON_ONCE(info, !info->fbops->fb_mmap)) + return -ENODEV; + mutex_lock(&info->mm_lock); + res = info->fbops->fb_mmap(info, vma); mutex_unlock(&info->mm_lock); return res; diff --git a/include/linux/fb.h b/include/linux/fb.h index 24f0ec366235..05dc9624897d 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -867,4 +867,9 @@ static inline bool fb_modesetting_disabled(const char *drvname) #define fb_warn_once(fb_info, fmt, ...) \ pr_warn_once("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) +#define fb_WARN_ONCE(fb_info, condition, fmt, ...) \ + WARN_ONCE(condition, "fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) +#define fb_WARN_ON_ONCE(fb_info, x) \ + fb_WARN_ONCE(fb_info, (x), "%s", "fb_WARN_ON_ONCE(" __stringify(x) ")") + #endif /* _LINUX_FB_H */ -- cgit v1.2.3 From dad19630c476f4c3d88f222c60f254f13e6245ed Mon Sep 17 00:00:00 2001 From: Thomas Hellström Date: Wed, 29 Nov 2023 10:06:37 +0100 Subject: Documentation/gpu: VM_BIND locking document MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the first version of the VM_BIND locking document which is intended to be part of the xe driver upstreaming agreement. The document describes and discuss the locking used during exec- functions, evicton and for userptr gpu-vmas. Intention is to be using the same nomenclature as the drm-vm-bind-async.rst. v2: - s/gvm/gpu_vm/g (Rodrigo Vivi) - Clarify the userptr seqlock with a pointer to mm/mmu_notifier.c (Rodrigo Vivi) - Adjust commit message accordingly. - Add SPDX license header. v3: - Large update to align with the drm_gpuvm manager locking - Add "Efficient userptr gpu_vma exec function iteration" section - Add "Locking at bind- and unbind time" section. v4: - Fix tabs vs space errors by untabifying (Rodrigo Vivi) - Minor style fixes and typos (Rodrigo Vivi) - Clarify situations where stale GPU mappings are occurring and how access through these mappings are blocked. (Rodrigo Vivi) - Insert into the toctree in implementation_guidelines.rst v5: - Add a section about recoverable page-faults. - Use local references to other documentation where possible (Bagas Sanjaya) - General documentation fixes and typos (Danilo Krummrich and Boris Brezillon) - Improve the documentation around locks that need to be grabbed from the dm-fence critical section (Boris Brezillon) - Add more references to the DRM GPUVM helpers (Danilo Krummrich and Boriz Brezillon) - Update the rfc/xe.rst document. v6: - Rework wording to improve readability (Boris Brezillon, Rodrigo Vivi, Bagas Sanjaya) - Various minor fixes across the document (Boris Brezillon) Cc: Rodrigo Vivi Signed-off-by: Thomas Hellström Reviewed-by: Boris Brezillon Reviewed-by: Rodrigo Vivi Reviewed-by: Danilo Krummrich Acked-by: John Hubbard # Documentation/core-api/pin_user_pages.rst changes Link: https://patchwork.freedesktop.org/patch/msgid/20231129090637.2629-1-thomas.hellstrom@linux.intel.com --- Documentation/core-api/pin_user_pages.rst | 2 + Documentation/gpu/drm-mm.rst | 4 + Documentation/gpu/drm-vm-bind-locking.rst | 582 ++++++++++++++++++++++++ Documentation/gpu/implementation_guidelines.rst | 1 + Documentation/gpu/rfc/xe.rst | 5 + 5 files changed, 594 insertions(+) create mode 100644 Documentation/gpu/drm-vm-bind-locking.rst diff --git a/Documentation/core-api/pin_user_pages.rst b/Documentation/core-api/pin_user_pages.rst index d3c1f6d8c0e0..6b5f7e6e7155 100644 --- a/Documentation/core-api/pin_user_pages.rst +++ b/Documentation/core-api/pin_user_pages.rst @@ -153,6 +153,8 @@ NOTE: Some pages, such as DAX pages, cannot be pinned with longterm pins. That's because DAX pages do not have a separate page cache, and so "pinning" implies locking down file system blocks, which is not (yet) supported in that way. +.. _mmu-notifier-registration-case: + CASE 3: MMU notifier registration, with or without page faulting hardware ------------------------------------------------------------------------- Device drivers can pin pages via get_user_pages*(), and register for mmu diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst index acc5901ac840..d55751cad67c 100644 --- a/Documentation/gpu/drm-mm.rst +++ b/Documentation/gpu/drm-mm.rst @@ -466,6 +466,8 @@ DRM MM Range Allocator Function References .. kernel-doc:: drivers/gpu/drm/drm_mm.c :export: +.. _drm_gpuvm: + DRM GPUVM ========= @@ -481,6 +483,8 @@ Split and Merge .. kernel-doc:: drivers/gpu/drm/drm_gpuvm.c :doc: Split and Merge +.. _drm_gpuvm_locking: + Locking ------- diff --git a/Documentation/gpu/drm-vm-bind-locking.rst b/Documentation/gpu/drm-vm-bind-locking.rst new file mode 100644 index 000000000000..a345aa513d12 --- /dev/null +++ b/Documentation/gpu/drm-vm-bind-locking.rst @@ -0,0 +1,582 @@ +.. SPDX-License-Identifier: (GPL-2.0+ OR MIT) + +=============== +VM_BIND locking +=============== + +This document attempts to describe what's needed to get VM_BIND locking right, +including the userptr mmu_notifier locking. It also discusses some +optimizations to get rid of the looping through of all userptr mappings and +external / shared object mappings that is needed in the simplest +implementation. In addition, there is a section describing the VM_BIND locking +required for implementing recoverable pagefaults. + +The DRM GPUVM set of helpers +============================ + +There is a set of helpers for drivers implementing VM_BIND, and this +set of helpers implements much, but not all of the locking described +in this document. In particular, it is currently lacking a userptr +implementation. This document does not intend to describe the DRM GPUVM +implementation in detail, but it is covered in :ref:`its own +documentation `. It is highly recommended for any driver +implementing VM_BIND to use the DRM GPUVM helpers and to extend it if +common functionality is missing. + +Nomenclature +============ + +* ``gpu_vm``: Abstraction of a virtual GPU address space with + meta-data. Typically one per client (DRM file-private), or one per + execution context. +* ``gpu_vma``: Abstraction of a GPU address range within a gpu_vm with + associated meta-data. The backing storage of a gpu_vma can either be + a GEM object or anonymous or page-cache pages mapped also into the CPU + address space for the process. +* ``gpu_vm_bo``: Abstracts the association of a GEM object and + a VM. The GEM object maintains a list of gpu_vm_bos, where each gpu_vm_bo + maintains a list of gpu_vmas. +* ``userptr gpu_vma or just userptr``: A gpu_vma, whose backing store + is anonymous or page-cache pages as described above. +* ``revalidating``: Revalidating a gpu_vma means making the latest version + of the backing store resident and making sure the gpu_vma's + page-table entries point to that backing store. +* ``dma_fence``: A struct dma_fence that is similar to a struct completion + and which tracks GPU activity. When the GPU activity is finished, + the dma_fence signals. Please refer to the ``DMA Fences`` section of + the :doc:`dma-buf doc `. +* ``dma_resv``: A struct dma_resv (a.k.a reservation object) that is used + to track GPU activity in the form of multiple dma_fences on a + gpu_vm or a GEM object. The dma_resv contains an array / list + of dma_fences and a lock that needs to be held when adding + additional dma_fences to the dma_resv. The lock is of a type that + allows deadlock-safe locking of multiple dma_resvs in arbitrary + order. Please refer to the ``Reservation Objects`` section of the + :doc:`dma-buf doc `. +* ``exec function``: An exec function is a function that revalidates all + affected gpu_vmas, submits a GPU command batch and registers the + dma_fence representing the GPU command's activity with all affected + dma_resvs. For completeness, although not covered by this document, + it's worth mentioning that an exec function may also be the + revalidation worker that is used by some drivers in compute / + long-running mode. +* ``local object``: A GEM object which is only mapped within a + single VM. Local GEM objects share the gpu_vm's dma_resv. +* ``external object``: a.k.a shared object: A GEM object which may be shared + by multiple gpu_vms and whose backing storage may be shared with + other drivers. + +Locks and locking order +======================= + +One of the benefits of VM_BIND is that local GEM objects share the gpu_vm's +dma_resv object and hence the dma_resv lock. So, even with a huge +number of local GEM objects, only one lock is needed to make the exec +sequence atomic. + +The following locks and locking orders are used: + +* The ``gpu_vm->lock`` (optionally an rwsem). Protects the gpu_vm's + data structure keeping track of gpu_vmas. It can also protect the + gpu_vm's list of userptr gpu_vmas. With a CPU mm analogy this would + correspond to the mmap_lock. An rwsem allows several readers to walk + the VM tree concurrently, but the benefit of that concurrency most + likely varies from driver to driver. +* The ``userptr_seqlock``. This lock is taken in read mode for each + userptr gpu_vma on the gpu_vm's userptr list, and in write mode during mmu + notifier invalidation. This is not a real seqlock but described in + ``mm/mmu_notifier.c`` as a "Collision-retry read-side/write-side + 'lock' a lot like a seqcount. However this allows multiple + write-sides to hold it at once...". The read side critical section + is enclosed by ``mmu_interval_read_begin() / + mmu_interval_read_retry()`` with ``mmu_interval_read_begin()`` + sleeping if the write side is held. + The write side is held by the core mm while calling mmu interval + invalidation notifiers. +* The ``gpu_vm->resv`` lock. Protects the gpu_vm's list of gpu_vmas needing + rebinding, as well as the residency state of all the gpu_vm's local + GEM objects. + Furthermore, it typically protects the gpu_vm's list of evicted and + external GEM objects. +* The ``gpu_vm->userptr_notifier_lock``. This is an rwsem that is + taken in read mode during exec and write mode during a mmu notifier + invalidation. The userptr notifier lock is per gpu_vm. +* The ``gem_object->gpuva_lock`` This lock protects the GEM object's + list of gpu_vm_bos. This is usually the same lock as the GEM + object's dma_resv, but some drivers protects this list differently, + see below. +* The ``gpu_vm list spinlocks``. With some implementations they are needed + to be able to update the gpu_vm evicted- and external object + list. For those implementations, the spinlocks are grabbed when the + lists are manipulated. However, to avoid locking order violations + with the dma_resv locks, a special scheme is needed when iterating + over the lists. + +.. _gpu_vma lifetime: + +Protection and lifetime of gpu_vm_bos and gpu_vmas +================================================== + +The GEM object's list of gpu_vm_bos, and the gpu_vm_bo's list of gpu_vmas +is protected by the ``gem_object->gpuva_lock``, which is typically the +same as the GEM object's dma_resv, but if the driver +needs to access these lists from within a dma_fence signalling +critical section, it can instead choose to protect it with a +separate lock, which can be locked from within the dma_fence signalling +critical section. Such drivers then need to pay additional attention +to what locks need to be taken from within the loop when iterating +over the gpu_vm_bo and gpu_vma lists to avoid locking-order violations. + +The DRM GPUVM set of helpers provide lockdep asserts that this lock is +held in relevant situations and also provides a means of making itself +aware of which lock is actually used: :c:func:`drm_gem_gpuva_set_lock`. + +Each gpu_vm_bo holds a reference counted pointer to the underlying GEM +object, and each gpu_vma holds a reference counted pointer to the +gpu_vm_bo. When iterating over the GEM object's list of gpu_vm_bos and +over the gpu_vm_bo's list of gpu_vmas, the ``gem_object->gpuva_lock`` must +not be dropped, otherwise, gpu_vmas attached to a gpu_vm_bo may +disappear without notice since those are not reference-counted. A +driver may implement its own scheme to allow this at the expense of +additional complexity, but this is outside the scope of this document. + +In the DRM GPUVM implementation, each gpu_vm_bo and each gpu_vma +holds a reference count on the gpu_vm itself. Due to this, and to avoid circular +reference counting, cleanup of the gpu_vm's gpu_vmas must not be done from the +gpu_vm's destructor. Drivers typically implements a gpu_vm close +function for this cleanup. The gpu_vm close function will abort gpu +execution using this VM, unmap all gpu_vmas and release page-table memory. + +Revalidation and eviction of local objects +========================================== + +Note that in all the code examples given below we use simplified +pseudo-code. In particular, the dma_resv deadlock avoidance algorithm +as well as reserving memory for dma_resv fences is left out. + +Revalidation +____________ +With VM_BIND, all local objects need to be resident when the gpu is +executing using the gpu_vm, and the objects need to have valid +gpu_vmas set up pointing to them. Typically, each gpu command buffer +submission is therefore preceded with a re-validation section: + +.. code-block:: C + + dma_resv_lock(gpu_vm->resv); + + // Validation section starts here. + for_each_gpu_vm_bo_on_evict_list(&gpu_vm->evict_list, &gpu_vm_bo) { + validate_gem_bo(&gpu_vm_bo->gem_bo); + + // The following list iteration needs the Gem object's + // dma_resv to be held (it protects the gpu_vm_bo's list of + // gpu_vmas, but since local gem objects share the gpu_vm's + // dma_resv, it is already held at this point. + for_each_gpu_vma_of_gpu_vm_bo(&gpu_vm_bo, &gpu_vma) + move_gpu_vma_to_rebind_list(&gpu_vma, &gpu_vm->rebind_list); + } + + for_each_gpu_vma_on_rebind_list(&gpu vm->rebind_list, &gpu_vma) { + rebind_gpu_vma(&gpu_vma); + remove_gpu_vma_from_rebind_list(&gpu_vma); + } + // Validation section ends here, and job submission starts. + + add_dependencies(&gpu_job, &gpu_vm->resv); + job_dma_fence = gpu_submit(&gpu_job)); + + add_dma_fence(job_dma_fence, &gpu_vm->resv); + dma_resv_unlock(gpu_vm->resv); + +The reason for having a separate gpu_vm rebind list is that there +might be userptr gpu_vmas that are not mapping a buffer object that +also need rebinding. + +Eviction +________ + +Eviction of one of these local objects will then look similar to the +following: + +.. code-block:: C + + obj = get_object_from_lru(); + + dma_resv_lock(obj->resv); + for_each_gpu_vm_bo_of_obj(obj, &gpu_vm_bo); + add_gpu_vm_bo_to_evict_list(&gpu_vm_bo, &gpu_vm->evict_list); + + add_dependencies(&eviction_job, &obj->resv); + job_dma_fence = gpu_submit(&eviction_job); + add_dma_fence(&obj->resv, job_dma_fence); + + dma_resv_unlock(&obj->resv); + put_object(obj); + +Note that since the object is local to the gpu_vm, it will share the gpu_vm's +dma_resv lock such that ``obj->resv == gpu_vm->resv``. +The gpu_vm_bos marked for eviction are put on the gpu_vm's evict list, +which is protected by ``gpu_vm->resv``. During eviction all local +objects have their dma_resv locked and, due to the above equality, also +the gpu_vm's dma_resv protecting the gpu_vm's evict list is locked. + +With VM_BIND, gpu_vmas don't need to be unbound before eviction, +since the driver must ensure that the eviction blit or copy will wait +for GPU idle or depend on all previous GPU activity. Furthermore, any +subsequent attempt by the GPU to access freed memory through the +gpu_vma will be preceded by a new exec function, with a revalidation +section which will make sure all gpu_vmas are rebound. The eviction +code holding the object's dma_resv while revalidating will ensure a +new exec function may not race with the eviction. + +A driver can be implemented in such a way that, on each exec function, +only a subset of vmas are selected for rebind. In this case, all vmas that are +*not* selected for rebind must be unbound before the exec +function workload is submitted. + +Locking with external buffer objects +==================================== + +Since external buffer objects may be shared by multiple gpu_vm's they +can't share their reservation object with a single gpu_vm. Instead +they need to have a reservation object of their own. The external +objects bound to a gpu_vm using one or many gpu_vmas are therefore put on a +per-gpu_vm list which is protected by the gpu_vm's dma_resv lock or +one of the :ref:`gpu_vm list spinlocks `. Once +the gpu_vm's reservation object is locked, it is safe to traverse the +external object list and lock the dma_resvs of all external +objects. However, if instead a list spinlock is used, a more elaborate +iteration scheme needs to be used. + +At eviction time, the gpu_vm_bos of *all* the gpu_vms an external +object is bound to need to be put on their gpu_vm's evict list. +However, when evicting an external object, the dma_resvs of the +gpu_vms the object is bound to are typically not held. Only +the object's private dma_resv can be guaranteed to be held. If there +is a ww_acquire context at hand at eviction time we could grab those +dma_resvs but that could cause expensive ww_mutex rollbacks. A simple +option is to just mark the gpu_vm_bos of the evicted gem object with +an ``evicted`` bool that is inspected before the next time the +corresponding gpu_vm evicted list needs to be traversed. For example, when +traversing the list of external objects and locking them. At that time, +both the gpu_vm's dma_resv and the object's dma_resv is held, and the +gpu_vm_bo marked evicted, can then be added to the gpu_vm's list of +evicted gpu_vm_bos. The ``evicted`` bool is formally protected by the +object's dma_resv. + +The exec function becomes + +.. code-block:: C + + dma_resv_lock(gpu_vm->resv); + + // External object list is protected by the gpu_vm->resv lock. + for_each_gpu_vm_bo_on_extobj_list(gpu_vm, &gpu_vm_bo) { + dma_resv_lock(gpu_vm_bo.gem_obj->resv); + if (gpu_vm_bo_marked_evicted(&gpu_vm_bo)) + add_gpu_vm_bo_to_evict_list(&gpu_vm_bo, &gpu_vm->evict_list); + } + + for_each_gpu_vm_bo_on_evict_list(&gpu_vm->evict_list, &gpu_vm_bo) { + validate_gem_bo(&gpu_vm_bo->gem_bo); + + for_each_gpu_vma_of_gpu_vm_bo(&gpu_vm_bo, &gpu_vma) + move_gpu_vma_to_rebind_list(&gpu_vma, &gpu_vm->rebind_list); + } + + for_each_gpu_vma_on_rebind_list(&gpu vm->rebind_list, &gpu_vma) { + rebind_gpu_vma(&gpu_vma); + remove_gpu_vma_from_rebind_list(&gpu_vma); + } + + add_dependencies(&gpu_job, &gpu_vm->resv); + job_dma_fence = gpu_submit(&gpu_job)); + + add_dma_fence(job_dma_fence, &gpu_vm->resv); + for_each_external_obj(gpu_vm, &obj) + add_dma_fence(job_dma_fence, &obj->resv); + dma_resv_unlock_all_resv_locks(); + +And the corresponding shared-object aware eviction would look like: + +.. code-block:: C + + obj = get_object_from_lru(); + + dma_resv_lock(obj->resv); + for_each_gpu_vm_bo_of_obj(obj, &gpu_vm_bo) + if (object_is_vm_local(obj)) + add_gpu_vm_bo_to_evict_list(&gpu_vm_bo, &gpu_vm->evict_list); + else + mark_gpu_vm_bo_evicted(&gpu_vm_bo); + + add_dependencies(&eviction_job, &obj->resv); + job_dma_fence = gpu_submit(&eviction_job); + add_dma_fence(&obj->resv, job_dma_fence); + + dma_resv_unlock(&obj->resv); + put_object(obj); + +.. _Spinlock iteration: + +Accessing the gpu_vm's lists without the dma_resv lock held +=========================================================== + +Some drivers will hold the gpu_vm's dma_resv lock when accessing the +gpu_vm's evict list and external objects lists. However, there are +drivers that need to access these lists without the dma_resv lock +held, for example due to asynchronous state updates from within the +dma_fence signalling critical path. In such cases, a spinlock can be +used to protect manipulation of the lists. However, since higher level +sleeping locks need to be taken for each list item while iterating +over the lists, the items already iterated over need to be +temporarily moved to a private list and the spinlock released +while processing each item: + +.. code block:: C + + struct list_head still_in_list; + + INIT_LIST_HEAD(&still_in_list); + + spin_lock(&gpu_vm->list_lock); + do { + struct list_head *entry = list_first_entry_or_null(&gpu_vm->list, head); + + if (!entry) + break; + + list_move_tail(&entry->head, &still_in_list); + list_entry_get_unless_zero(entry); + spin_unlock(&gpu_vm->list_lock); + + process(entry); + + spin_lock(&gpu_vm->list_lock); + list_entry_put(entry); + } while (true); + + list_splice_tail(&still_in_list, &gpu_vm->list); + spin_unlock(&gpu_vm->list_lock); + +Due to the additional locking and atomic operations, drivers that *can* +avoid accessing the gpu_vm's list outside of the dma_resv lock +might want to avoid also this iteration scheme. Particularly, if the +driver anticipates a large number of list items. For lists where the +anticipated number of list items is small, where list iteration doesn't +happen very often or if there is a significant additional cost +associated with each iteration, the atomic operation overhead +associated with this type of iteration is, most likely, negligible. Note that +if this scheme is used, it is necessary to make sure this list +iteration is protected by an outer level lock or semaphore, since list +items are temporarily pulled off the list while iterating, and it is +also worth mentioning that the local list ``still_in_list`` should +also be considered protected by the ``gpu_vm->list_lock``, and it is +thus possible that items can be removed also from the local list +concurrently with list iteration. + +Please refer to the :ref:`DRM GPUVM locking section +` and its internal +:c:func:`get_next_vm_bo_from_list` function. + + +userptr gpu_vmas +================ + +A userptr gpu_vma is a gpu_vma that, instead of mapping a buffer object to a +GPU virtual address range, directly maps a CPU mm range of anonymous- +or file page-cache pages. +A very simple approach would be to just pin the pages using +pin_user_pages() at bind time and unpin them at unbind time, but this +creates a Denial-Of-Service vector since a single user-space process +would be able to pin down all of system memory, which is not +desirable. (For special use-cases and assuming proper accounting pinning might +still be a desirable feature, though). What we need to do in the +general case is to obtain a reference to the desired pages, make sure +we are notified using a MMU notifier just before the CPU mm unmaps the +pages, dirty them if they are not mapped read-only to the GPU, and +then drop the reference. +When we are notified by the MMU notifier that CPU mm is about to drop the +pages, we need to stop GPU access to the pages by waiting for VM idle +in the MMU notifier and make sure that before the next time the GPU +tries to access whatever is now present in the CPU mm range, we unmap +the old pages from the GPU page tables and repeat the process of +obtaining new page references. (See the :ref:`notifier example +` below). Note that when the core mm decides to +laundry pages, we get such an unmap MMU notification and can mark the +pages dirty again before the next GPU access. We also get similar MMU +notifications for NUMA accounting which the GPU driver doesn't really +need to care about, but so far it has proven difficult to exclude +certain notifications. + +Using a MMU notifier for device DMA (and other methods) is described in +:ref:`the pin_user_pages() documentation `. + +Now, the method of obtaining struct page references using +get_user_pages() unfortunately can't be used under a dma_resv lock +since that would violate the locking order of the dma_resv lock vs the +mmap_lock that is grabbed when resolving a CPU pagefault. This means +the gpu_vm's list of userptr gpu_vmas needs to be protected by an +outer lock, which in our example below is the ``gpu_vm->lock``. + +The MMU interval seqlock for a userptr gpu_vma is used in the following +way: + +.. code-block:: C + + // Exclusive locking mode here is strictly needed only if there are + // invalidated userptr gpu_vmas present, to avoid concurrent userptr + // revalidations of the same userptr gpu_vma. + down_write(&gpu_vm->lock); + retry: + + // Note: mmu_interval_read_begin() blocks until there is no + // invalidation notifier running anymore. + seq = mmu_interval_read_begin(&gpu_vma->userptr_interval); + if (seq != gpu_vma->saved_seq) { + obtain_new_page_pointers(&gpu_vma); + dma_resv_lock(&gpu_vm->resv); + add_gpu_vma_to_revalidate_list(&gpu_vma, &gpu_vm); + dma_resv_unlock(&gpu_vm->resv); + gpu_vma->saved_seq = seq; + } + + // The usual revalidation goes here. + + // Final userptr sequence validation may not happen before the + // submission dma_fence is added to the gpu_vm's resv, from the POW + // of the MMU invalidation notifier. Hence the + // userptr_notifier_lock that will make them appear atomic. + + add_dependencies(&gpu_job, &gpu_vm->resv); + down_read(&gpu_vm->userptr_notifier_lock); + if (mmu_interval_read_retry(&gpu_vma->userptr_interval, gpu_vma->saved_seq)) { + up_read(&gpu_vm->userptr_notifier_lock); + goto retry; + } + + job_dma_fence = gpu_submit(&gpu_job)); + + add_dma_fence(job_dma_fence, &gpu_vm->resv); + + for_each_external_obj(gpu_vm, &obj) + add_dma_fence(job_dma_fence, &obj->resv); + + dma_resv_unlock_all_resv_locks(); + up_read(&gpu_vm->userptr_notifier_lock); + up_write(&gpu_vm->lock); + +The code between ``mmu_interval_read_begin()`` and the +``mmu_interval_read_retry()`` marks the read side critical section of +what we call the ``userptr_seqlock``. In reality, the gpu_vm's userptr +gpu_vma list is looped through, and the check is done for *all* of its +userptr gpu_vmas, although we only show a single one here. + +The userptr gpu_vma MMU invalidation notifier might be called from +reclaim context and, again, to avoid locking order violations, we can't +take any dma_resv lock nor the gpu_vm->lock from within it. + +.. _Invalidation example: +.. code-block:: C + + bool gpu_vma_userptr_invalidate(userptr_interval, cur_seq) + { + // Make sure the exec function either sees the new sequence + // and backs off or we wait for the dma-fence: + + down_write(&gpu_vm->userptr_notifier_lock); + mmu_interval_set_seq(userptr_interval, cur_seq); + up_write(&gpu_vm->userptr_notifier_lock); + + // At this point, the exec function can't succeed in + // submitting a new job, because cur_seq is an invalid + // sequence number and will always cause a retry. When all + // invalidation callbacks, the mmu notifier core will flip + // the sequence number to a valid one. However we need to + // stop gpu access to the old pages here. + + dma_resv_wait_timeout(&gpu_vm->resv, DMA_RESV_USAGE_BOOKKEEP, + false, MAX_SCHEDULE_TIMEOUT); + return true; + } + +When this invalidation notifier returns, the GPU can no longer be +accessing the old pages of the userptr gpu_vma and needs to redo the +page-binding before a new GPU submission can succeed. + +Efficient userptr gpu_vma exec_function iteration +_________________________________________________ + +If the gpu_vm's list of userptr gpu_vmas becomes large, it's +inefficient to iterate through the complete lists of userptrs on each +exec function to check whether each userptr gpu_vma's saved +sequence number is stale. A solution to this is to put all +*invalidated* userptr gpu_vmas on a separate gpu_vm list and +only check the gpu_vmas present on this list on each exec +function. This list will then lend itself very-well to the spinlock +locking scheme that is +:ref:`described in the spinlock iteration section `, since +in the mmu notifier, where we add the invalidated gpu_vmas to the +list, it's not possible to take any outer locks like the +``gpu_vm->lock`` or the ``gpu_vm->resv`` lock. Note that the +``gpu_vm->lock`` still needs to be taken while iterating to ensure the list is +complete, as also mentioned in that section. + +If using an invalidated userptr list like this, the retry check in the +exec function trivially becomes a check for invalidated list empty. + +Locking at bind and unbind time +=============================== + +At bind time, assuming a GEM object backed gpu_vma, each +gpu_vma needs to be associated with a gpu_vm_bo and that +gpu_vm_bo in turn needs to be added to the GEM object's +gpu_vm_bo list, and possibly to the gpu_vm's external object +list. This is referred to as *linking* the gpu_vma, and typically +requires that the ``gpu_vm->lock`` and the ``gem_object->gpuva_lock`` +are held. When unlinking a gpu_vma the same locks should be held, +and that ensures that when iterating over ``gpu_vmas`, either under +the ``gpu_vm->resv`` or the GEM object's dma_resv, that the gpu_vmas +stay alive as long as the lock under which we iterate is not released. For +userptr gpu_vmas it's similarly required that during vma destroy, the +outer ``gpu_vm->lock`` is held, since otherwise when iterating over +the invalidated userptr list as described in the previous section, +there is nothing keeping those userptr gpu_vmas alive. + +Locking for recoverable page-fault page-table updates +===================================================== + +There are two important things we need to ensure with locking for +recoverable page-faults: + +* At the time we return pages back to the system / allocator for + reuse, there should be no remaining GPU mappings and any GPU TLB + must have been flushed. +* The unmapping and mapping of a gpu_vma must not race. + +Since the unmapping (or zapping) of GPU ptes is typically taking place +where it is hard or even impossible to take any outer level locks we +must either introduce a new lock that is held at both mapping and +unmapping time, or look at the locks we do hold at unmapping time and +make sure that they are held also at mapping time. For userptr +gpu_vmas, the ``userptr_seqlock`` is held in write mode in the mmu +invalidation notifier where zapping happens. Hence, if the +``userptr_seqlock`` as well as the ``gpu_vm->userptr_notifier_lock`` +is held in read mode during mapping, it will not race with the +zapping. For GEM object backed gpu_vmas, zapping will take place under +the GEM object's dma_resv and ensuring that the dma_resv is held also +when populating the page-tables for any gpu_vma pointing to the GEM +object, will similarly ensure we are race-free. + +If any part of the mapping is performed asynchronously +under a dma-fence with these locks released, the zapping will need to +wait for that dma-fence to signal under the relevant lock before +starting to modify the page-table. + +Since modifying the +page-table structure in a way that frees up page-table memory +might also require outer level locks, the zapping of GPU ptes +typically focuses only on zeroing page-table or page-directory entries +and flushing TLB, whereas freeing of page-table memory is deferred to +unbind or rebind time. diff --git a/Documentation/gpu/implementation_guidelines.rst b/Documentation/gpu/implementation_guidelines.rst index 138e637dcc6b..dbccfa72f1c9 100644 --- a/Documentation/gpu/implementation_guidelines.rst +++ b/Documentation/gpu/implementation_guidelines.rst @@ -7,3 +7,4 @@ Misc DRM driver uAPI- and feature implementation guidelines .. toctree:: drm-vm-bind-async + drm-vm-bind-locking diff --git a/Documentation/gpu/rfc/xe.rst b/Documentation/gpu/rfc/xe.rst index c29113a0ac30..ceb21219d52e 100644 --- a/Documentation/gpu/rfc/xe.rst +++ b/Documentation/gpu/rfc/xe.rst @@ -123,10 +123,15 @@ Documentation should include: * O(1) complexity under VM_BIND. +The document is now included in the drm documentation :doc:`here `. + Some parts of userptr like mmu_notifiers should become GPUVA or DRM helpers when the second driver supporting VM_BIND+userptr appears. Details to be defined when the time comes. +The DRM GPUVM helpers do not yet include the userptr parts, but discussions +about implementing them are ongoing. + Long running compute: minimal data structure/scaffolding -------------------------------------------------------- The generic scheduler code needs to include the handling of endless compute -- cgit v1.2.3 From b101d08451de6eaebd1a840e4885ce7ce73656ad Mon Sep 17 00:00:00 2001 From: Yuran Pereira Date: Fri, 17 Nov 2023 02:22:00 +0530 Subject: drm/nouveau: Removes unnecessary args check in nouveau_uvmm_sm_prepare Checking `args` after calling `op_map_prepare` is unnecessary since if `op_map_prepare` was to be called with NULL args, it would lead to a NULL pointer dereference, thus never hitting that check. Hence remove the check and add a note to remind users of this function to ensure that args != NULL when calling this function for a map operation as it was suggested by Danilo [1]. [1] https://lore.kernel.org/lkml/6a1ebcef-bade-45a0-9bd9-c05f0226eb88@redhat.com Suggested-by: Danilo Krummrich Signed-off-by: Yuran Pereira Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/GV1PR10MB65637F4BAABFE2D8E261E1DCE8B0A@GV1PR10MB6563.EURPRD10.PROD.OUTLOOK.COM --- drivers/gpu/drm/nouveau/nouveau_uvmm.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index 4b236da79509..dae3baf707a0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -608,6 +608,9 @@ op_unmap_prepare(struct drm_gpuva_op_unmap *u) drm_gpuva_unmap(u); } +/* + * Note: @args should not be NULL when calling for a map operation. + */ static int nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm, struct nouveau_uvma_prealloc *new, @@ -628,7 +631,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm, if (ret) goto unwind; - if (args && vmm_get_range) { + if (vmm_get_range) { ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start, vmm_get_range); if (ret) { -- cgit v1.2.3 From 03219a3aa6c89f1cbb6624907f32d6939a1ffeb0 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 30 Nov 2023 10:26:29 +0300 Subject: drm/imagination: Fix error codes in pvr_device_clk_init() There is a cut and paste error so this code returns the wrong variable. Fixes: 1f88f017e649 ("drm/imagination: Get GPU resources") Signed-off-by: Dan Carpenter Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/1649c66b-3eea-40d2-9687-592124f968cf@moroto.mountain --- drivers/gpu/drm/imagination/pvr_device.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c index 8499becf4fbb..e1dcc4e42087 100644 --- a/drivers/gpu/drm/imagination/pvr_device.c +++ b/drivers/gpu/drm/imagination/pvr_device.c @@ -105,12 +105,12 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev) sys_clk = devm_clk_get_optional(drm_dev->dev, "sys"); if (IS_ERR(sys_clk)) - return dev_err_probe(drm_dev->dev, PTR_ERR(core_clk), + return dev_err_probe(drm_dev->dev, PTR_ERR(sys_clk), "failed to get sys clock\n"); mem_clk = devm_clk_get_optional(drm_dev->dev, "mem"); if (IS_ERR(mem_clk)) - return dev_err_probe(drm_dev->dev, PTR_ERR(core_clk), + return dev_err_probe(drm_dev->dev, PTR_ERR(mem_clk), "failed to get mem clock\n"); pvr_dev->core_clk = core_clk; -- cgit v1.2.3 From 9ee33dc47772724ff583b060bb37c62b92b2d9c4 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 30 Nov 2023 10:27:01 +0300 Subject: drm/imagination: Fix IS_ERR() vs NULL bug in pvr_request_firmware() The pvr_build_firmware_filename() function returns NULL on error. It doesn't return error pointers. Fixes: f99f5f3ea7ef ("drm/imagination: Add GPU ID parsing and firmware loading") Signed-off-by: Dan Carpenter Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/384288de-a779-46c7-869d-b3c63462e12b@moroto.mountain --- drivers/gpu/drm/imagination/pvr_device.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c index e1dcc4e42087..5389aea7ff21 100644 --- a/drivers/gpu/drm/imagination/pvr_device.c +++ b/drivers/gpu/drm/imagination/pvr_device.c @@ -286,8 +286,8 @@ pvr_request_firmware(struct pvr_device *pvr_dev) filename = pvr_build_firmware_filename(pvr_dev, "powervr/rogue", PVR_FW_VERSION_MAJOR); - if (IS_ERR(filename)) - return PTR_ERR(filename); + if (!filename) + return -ENOMEM; /* * This function takes a copy of &filename, meaning we can free our -- cgit v1.2.3 From 55b0f4a7c37680428d640aeada96d62888366c56 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 30 Nov 2023 10:27:15 +0300 Subject: drm/imagination: fix off by one in pvr_vm_mips_init() error handling If the call to vmap() fails the "page_nr" is one element beyond the end of the mips_data->pt_dma_addr[] and mips_data->pt_pages[] arrays. The way that this is traditionally written is that we clean up the partial loop iteration before the goto and then we can say while (--i >= 0). At that point we know that all the elements thus far are initialized so we don't need to have NULL checks. Fixes: 927f3e0253c1 ("drm/imagination: Implement MIPS firmware processor and MMU support") Signed-off-by: Dan Carpenter Reviewed-by: Frank Binns Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/a2d3210b-290f-4397-9c3e-efdcca94d8ac@moroto.mountain --- drivers/gpu/drm/imagination/pvr_vm_mips.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c index 7268cf6e630b..2bc7181a4c3e 100644 --- a/drivers/gpu/drm/imagination/pvr_vm_mips.c +++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c @@ -57,6 +57,7 @@ pvr_vm_mips_init(struct pvr_device *pvr_dev) PAGE_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, mips_data->pt_dma_addr[page_nr])) { err = -ENOMEM; + __free_page(mips_data->pt_pages[page_nr]); goto err_free_pages; } } @@ -79,13 +80,11 @@ pvr_vm_mips_init(struct pvr_device *pvr_dev) return 0; err_free_pages: - for (; page_nr >= 0; page_nr--) { - if (mips_data->pt_dma_addr[page_nr]) - dma_unmap_page(from_pvr_device(pvr_dev)->dev, - mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE); + while (--page_nr >= 0) { + dma_unmap_page(from_pvr_device(pvr_dev)->dev, + mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE); - if (mips_data->pt_pages[page_nr]) - __free_page(mips_data->pt_pages[page_nr]); + __free_page(mips_data->pt_pages[page_nr]); } return err; -- cgit v1.2.3 From 3d1ff9dfdc168722f570144aba0ce29d28d7f483 Mon Sep 17 00:00:00 2001 From: Ramesh Errabolu Date: Wed, 22 Nov 2023 10:05:56 -0600 Subject: dma-buf: Correct the documentation of name and exp_name symbols MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the documentation of struct dma_buf members name and exp_name as to how these members are to be used and accessed. Signed-off-by: Ramesh Errabolu Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20231122160556.24948-1-Ramesh.Errabolu@amd.com Signed-off-by: Christian König --- include/linux/dma-buf.h | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 3f31baa3293f..8ff4add71f88 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -343,16 +343,19 @@ struct dma_buf { /** * @exp_name: * - * Name of the exporter; useful for debugging. See the - * DMA_BUF_SET_NAME IOCTL. + * Name of the exporter; useful for debugging. Must not be NULL */ const char *exp_name; /** * @name: * - * Userspace-provided name; useful for accounting and debugging, - * protected by dma_resv_lock() on @resv and @name_lock for read access. + * Userspace-provided name. Default value is NULL. If not NULL, + * length cannot be longer than DMA_BUF_NAME_LEN, including NIL + * char. Useful for accounting and debugging. Read/Write accesses + * are protected by @name_lock + * + * See the IOCTLs DMA_BUF_SET_NAME or DMA_BUF_SET_NAME_A/B */ const char *name; -- cgit v1.2.3 From f8cc37c59731c88c8c2c62222482dddf071a0600 Mon Sep 17 00:00:00 2001 From: Andrew Davis Date: Mon, 13 Nov 2023 14:55:01 -0600 Subject: drm/omapdrm: Improve check for contiguous buffers While a scatter-gather table having only 1 entry does imply it is contiguous, it is a logic error to assume the inverse. Tables can have more than 1 entry and still be contiguous. Use a proper check here. Signed-off-by: Andrew Davis Signed-off-by: Tomi Valkeinen Link: https://patchwork.freedesktop.org/patch/msgid/20231113205501.616927-1-afd@ti.com --- drivers/gpu/drm/omapdrm/omap_gem.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index c48fa531ca32..3421e8389222 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -48,7 +48,7 @@ struct omap_gem_object { * OMAP_BO_MEM_DMA_API flag set) * * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set) - * if they are physically contiguous (when sgt->orig_nents == 1) + * if they are physically contiguous * * - buffers mapped through the TILER when pin_cnt is not zero, in which * case the DMA address points to the TILER aperture @@ -148,12 +148,18 @@ u64 omap_gem_mmap_offset(struct drm_gem_object *obj) return drm_vma_node_offset_addr(&obj->vma_node); } +static bool omap_gem_sgt_is_contiguous(struct sg_table *sgt, size_t size) +{ + return !(drm_prime_get_contiguous_size(sgt) < size); +} + static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj) { if (omap_obj->flags & OMAP_BO_MEM_DMA_API) return true; - if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1) + if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && + omap_gem_sgt_is_contiguous(omap_obj->sgt, omap_obj->base.size)) return true; return false; @@ -1385,7 +1391,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, union omap_gem_size gsize; /* Without a DMM only physically contiguous buffers can be supported. */ - if (sgt->orig_nents != 1 && !priv->has_dmm) + if (!omap_gem_sgt_is_contiguous(sgt, size) && !priv->has_dmm) return ERR_PTR(-EINVAL); gsize.bytes = PAGE_ALIGN(size); @@ -1399,7 +1405,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, omap_obj->sgt = sgt; - if (sgt->orig_nents == 1) { + if (omap_gem_sgt_is_contiguous(sgt, size)) { omap_obj->dma_addr = sg_dma_address(sgt->sgl); } else { /* Create pages list from sgt */ -- cgit v1.2.3 From 7959ceb767e4b6c8ced7cb8aaa1c6439cfca890c Mon Sep 17 00:00:00 2001 From: Aradhya Bhatia Date: Wed, 8 Nov 2023 22:46:18 +0530 Subject: dt-bindings: display: ti: Add support for am62a7 dss The DSS controller on TI's AM62A7 SoC is an update from that on TI's AM625 SoC. Like the DSS in AM625, the DSS in this SoC has 2 video pipelines, but unlike the former, the latter only has one output port on VP2 to service DPI display sinks. Add the new controller's compatible. Signed-off-by: Aradhya Bhatia Reviewed-by: Krzysztof Kozlowski Reviewed-by: Tomi Valkeinen Link: https://lore.kernel.org/r/20231108171619.978438-2-a-bhatia1@ti.com Signed-off-by: Tomi Valkeinen --- .../devicetree/bindings/display/ti/ti,am65x-dss.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml index ae09cd3cbce1..b6767ef0d24d 100644 --- a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml +++ b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml @@ -23,6 +23,7 @@ properties: compatible: enum: - ti,am625-dss + - ti,am62a7,dss - ti,am65x-dss reg: @@ -87,6 +88,7 @@ properties: For AM65x DSS, the OLDI output port node from video port 1. For AM625 DSS, the internal DPI output port node from video port 1. + For AM62A7 DSS, the port is tied off inside the SoC. port@1: $ref: /schemas/graph.yaml#/properties/port @@ -108,6 +110,18 @@ properties: Input memory (from main memory to dispc) bandwidth limit in bytes per second +allOf: + - if: + properties: + compatible: + contains: + const: ti,am62a7-dss + then: + properties: + ports: + properties: + port@0: false + required: - compatible - reg -- cgit v1.2.3 From 5cc5ea7b6d7b0649b74df1e0f2d1875e683704d9 Mon Sep 17 00:00:00 2001 From: Aradhya Bhatia Date: Wed, 8 Nov 2023 22:46:19 +0530 Subject: drm/tidss: Add support for AM62A7 DSS Add support for the DSS controller on TI's AM62A7 SoC in the tidss driver. This controller has 2 video pipelines that can render 2 video planes on over a screen, using the overlay managers. The output of the DSS comes from video port 2 (VP2) in the form of RGB88 DPI signals, while the VP1 is tied off inside the SoC. Also add and use a new type of VP, DISPC_VP_TIED_OFF, for the tied-off VP1 of AM62A DSS. Signed-off-by: Aradhya Bhatia Reviewed-by: Tomi Valkeinen Link: https://lore.kernel.org/r/20231108171619.978438-3-a-bhatia1@ti.com Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/tidss/tidss_dispc.c | 59 +++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/tidss/tidss_dispc.h | 3 ++ drivers/gpu/drm/tidss/tidss_drv.c | 1 + 3 files changed, 63 insertions(+) diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index 9d9dee7abaef..7af416457c57 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -322,6 +322,60 @@ const struct dispc_features dispc_am625_feats = { .vid_order = { 1, 0 }, }; +const struct dispc_features dispc_am62a7_feats = { + /* + * if the code reaches dispc_mode_valid with VP1, + * it should return MODE_BAD. + */ + .max_pclk_khz = { + [DISPC_VP_TIED_OFF] = 0, + [DISPC_VP_DPI] = 165000, + }, + + .scaling = { + .in_width_max_5tap_rgb = 1280, + .in_width_max_3tap_rgb = 2560, + .in_width_max_5tap_yuv = 2560, + .in_width_max_3tap_yuv = 4096, + .upscale_limit = 16, + .downscale_limit_5tap = 4, + .downscale_limit_3tap = 2, + /* + * The max supported pixel inc value is 255. The value + * of pixel inc is calculated like this: 1+(xinc-1)*bpp. + * The maximum bpp of all formats supported by the HW + * is 8. So the maximum supported xinc value is 32, + * because 1+(32-1)*8 < 255 < 1+(33-1)*4. + */ + .xinc_max = 32, + }, + + .subrev = DISPC_AM62A7, + + .common = "common", + .common_regs = tidss_am65x_common_regs, + + .num_vps = 2, + .vp_name = { "vp1", "vp2" }, + .ovr_name = { "ovr1", "ovr2" }, + .vpclk_name = { "vp1", "vp2" }, + /* VP1 of the DSS in AM62A7 SoC is tied off internally */ + .vp_bus_type = { DISPC_VP_TIED_OFF, DISPC_VP_DPI }, + + .vp_feat = { .color = { + .has_ctm = true, + .gamma_size = 256, + .gamma_type = TIDSS_GAMMA_8BIT, + }, + }, + + .num_planes = 2, + /* note: vid is plane_id 0 and vidl1 is plane_id 1 */ + .vid_name = { "vid", "vidl1" }, + .vid_lite = { false, true, }, + .vid_order = { 1, 0 }, +}; + static const u16 *dispc_common_regmap; struct dss_vp_data { @@ -824,6 +878,7 @@ dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc) case DISPC_K2G: return dispc_k2g_read_and_clear_irqstatus(dispc); case DISPC_AM625: + case DISPC_AM62A7: case DISPC_AM65X: case DISPC_J721E: return dispc_k3_read_and_clear_irqstatus(dispc); @@ -840,6 +895,7 @@ void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask) dispc_k2g_set_irqenable(dispc, mask); break; case DISPC_AM625: + case DISPC_AM62A7: case DISPC_AM65X: case DISPC_J721E: dispc_k3_set_irqenable(dispc, mask); @@ -1331,6 +1387,7 @@ void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane, x, y, layer); break; case DISPC_AM625: + case DISPC_AM62A7: case DISPC_AM65X: dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport, x, y, layer); @@ -2250,6 +2307,7 @@ static void dispc_plane_init(struct dispc_device *dispc) dispc_k2g_plane_init(dispc); break; case DISPC_AM625: + case DISPC_AM62A7: case DISPC_AM65X: case DISPC_J721E: dispc_k3_plane_init(dispc); @@ -2357,6 +2415,7 @@ static void dispc_vp_write_gamma_table(struct dispc_device *dispc, dispc_k2g_vp_write_gamma_table(dispc, hw_videoport); break; case DISPC_AM625: + case DISPC_AM62A7: case DISPC_AM65X: dispc_am65x_vp_write_gamma_table(dispc, hw_videoport); break; diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h index 33ac5ad7a423..086327d51a90 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.h +++ b/drivers/gpu/drm/tidss/tidss_dispc.h @@ -54,12 +54,14 @@ enum dispc_vp_bus_type { DISPC_VP_DPI, /* DPI output */ DISPC_VP_OLDI, /* OLDI (LVDS) output */ DISPC_VP_INTERNAL, /* SoC internal routing */ + DISPC_VP_TIED_OFF, /* Tied off / Unavailable */ DISPC_VP_MAX_BUS_TYPE, }; enum dispc_dss_subrevision { DISPC_K2G, DISPC_AM625, + DISPC_AM62A7, DISPC_AM65X, DISPC_J721E, }; @@ -88,6 +90,7 @@ struct dispc_features { extern const struct dispc_features dispc_k2g_feats; extern const struct dispc_features dispc_am625_feats; +extern const struct dispc_features dispc_am62a7_feats; extern const struct dispc_features dispc_am65x_feats; extern const struct dispc_features dispc_j721e_feats; diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index 4d063eb9cd0b..edf69d020544 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -231,6 +231,7 @@ static void tidss_shutdown(struct platform_device *pdev) static const struct of_device_id tidss_of_table[] = { { .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, }, { .compatible = "ti,am625-dss", .data = &dispc_am625_feats, }, + { .compatible = "ti,am62a7-dss", .data = &dispc_am62a7_feats, }, { .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, }, { .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, }, { } -- cgit v1.2.3 From a0a9e7b4690b1d78f146fbc8c78a630856ed3015 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Thu, 9 Nov 2023 09:37:54 +0200 Subject: drm/tidss: Use pm_runtime_resume_and_get() Use pm_runtime_resume_and_get() instead of pm_runtime_get_sync(), which will handle error situations better. Also fix the return, as there should be no reason for the current complex return. Reviewed-by: Laurent Pinchart Reviewed-by: Aradhya Bhatia Link: https://lore.kernel.org/r/20231109-tidss-probe-v2-1-ac91b5ea35c0@ideasonboard.com Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/tidss/tidss_drv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index edf69d020544..bb1297820856 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -32,9 +32,9 @@ int tidss_runtime_get(struct tidss_device *tidss) dev_dbg(tidss->dev, "%s\n", __func__); - r = pm_runtime_get_sync(tidss->dev); + r = pm_runtime_resume_and_get(tidss->dev); WARN_ON(r < 0); - return r < 0 ? r : 0; + return r; } void tidss_runtime_put(struct tidss_device *tidss) -- cgit v1.2.3 From 4b0bdf9383a999498b88df4c1a1e3f0910b86d53 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Thu, 9 Nov 2023 09:37:55 +0200 Subject: drm/tidss: Use PM autosuspend Use runtime PM autosuspend feature, with 1s timeout, to avoid unnecessary suspend-resume cycles when, e.g. the userspace temporarily turns off the crtcs when configuring the outputs. Reviewed-by: Aradhya Bhatia Link: https://lore.kernel.org/r/20231109-tidss-probe-v2-2-ac91b5ea35c0@ideasonboard.com Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/tidss/tidss_drv.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index bb1297820856..09e30de2dd48 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -43,7 +43,9 @@ void tidss_runtime_put(struct tidss_device *tidss) dev_dbg(tidss->dev, "%s\n", __func__); - r = pm_runtime_put_sync(tidss->dev); + pm_runtime_mark_last_busy(tidss->dev); + + r = pm_runtime_put_autosuspend(tidss->dev); WARN_ON(r < 0); } @@ -144,6 +146,9 @@ static int tidss_probe(struct platform_device *pdev) pm_runtime_enable(dev); + pm_runtime_set_autosuspend_delay(dev, 1000); + pm_runtime_use_autosuspend(dev); + #ifndef CONFIG_PM /* If we don't have PM, we need to call resume manually */ dispc_runtime_resume(tidss->dispc); @@ -192,6 +197,7 @@ err_runtime_suspend: #ifndef CONFIG_PM dispc_runtime_suspend(tidss->dispc); #endif + pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); return ret; @@ -215,6 +221,7 @@ static void tidss_remove(struct platform_device *pdev) /* If we don't have PM, we need to call suspend manually */ dispc_runtime_suspend(tidss->dispc); #endif + pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); /* devm allocated dispc goes away with the dev so mark it NULL */ -- cgit v1.2.3 From c2746e4d278be8f71ef0dbcd00fcc8ba46c678ef Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Thu, 9 Nov 2023 09:37:56 +0200 Subject: drm/tidss: Drop useless variable init No need to initialize the ret to 0 in dispc_softreset(). Reviewed-by: Laurent Pinchart Reviewed-by: Aradhya Bhatia Link: https://lore.kernel.org/r/20231109-tidss-probe-v2-3-ac91b5ea35c0@ideasonboard.com Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/tidss/tidss_dispc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index 7af416457c57..1dfd95a54e64 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -2764,7 +2764,7 @@ static void dispc_init_errata(struct dispc_device *dispc) static void dispc_softreset(struct dispc_device *dispc) { u32 val; - int ret = 0; + int ret; /* Soft reset */ REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1); -- cgit v1.2.3 From 36d1e0852680aa038e2428d450673390111b165c Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Thu, 9 Nov 2023 09:37:57 +0200 Subject: drm/tidss: Move reset to the end of dispc_init() We do a DSS reset in the middle of the dispc_init(). While that happens to work now, we should really make sure that e..g the fclk, which is acquired only later in the function, is enabled when doing a reset. This will be handled in a later patch, but for now, let's move the dispc_softreset() call to the end of dispc_init(), which is a sensible place for it anyway. Reviewed-by: Laurent Pinchart Reviewed-by: Aradhya Bhatia Link: https://lore.kernel.org/r/20231109-tidss-probe-v2-4-ac91b5ea35c0@ideasonboard.com Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/tidss/tidss_dispc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index 1dfd95a54e64..bce5432d26f3 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -2836,10 +2836,6 @@ int dispc_init(struct tidss_device *tidss) return r; } - /* K2G display controller does not support soft reset */ - if (feat->subrev != DISPC_K2G) - dispc_softreset(dispc); - for (i = 0; i < dispc->feat->num_vps; i++) { u32 gamma_size = dispc->feat->vp_feat.color.gamma_size; u32 *gamma_table; @@ -2888,6 +2884,10 @@ int dispc_init(struct tidss_device *tidss) of_property_read_u32(dispc->dev->of_node, "max-memory-bandwidth", &dispc->memory_bandwidth_limit); + /* K2G display controller does not support soft reset */ + if (feat->subrev != DISPC_K2G) + dispc_softreset(dispc); + tidss->dispc = dispc; return 0; -- cgit v1.2.3 From aceafbb5035c4bfc75a321863ed1e393d644d2d2 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Thu, 9 Nov 2023 09:37:58 +0200 Subject: drm/tidss: Return error value from from softreset Return an error value from dispc_softreset() so that the caller can handle the errors. Reviewed-by: Aradhya Bhatia Link: https://lore.kernel.org/r/20231109-tidss-probe-v2-5-ac91b5ea35c0@ideasonboard.com Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/tidss/tidss_dispc.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index bce5432d26f3..b72f1e7d4b72 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -2761,7 +2761,7 @@ static void dispc_init_errata(struct dispc_device *dispc) } } -static void dispc_softreset(struct dispc_device *dispc) +static int dispc_softreset(struct dispc_device *dispc) { u32 val; int ret; @@ -2771,8 +2771,12 @@ static void dispc_softreset(struct dispc_device *dispc) /* Wait for reset to complete */ ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS, val, val & 1, 100, 5000); - if (ret) - dev_warn(dispc->dev, "failed to reset dispc\n"); + if (ret) { + dev_err(dispc->dev, "failed to reset dispc\n"); + return ret; + } + + return 0; } int dispc_init(struct tidss_device *tidss) @@ -2885,8 +2889,11 @@ int dispc_init(struct tidss_device *tidss) &dispc->memory_bandwidth_limit); /* K2G display controller does not support soft reset */ - if (feat->subrev != DISPC_K2G) - dispc_softreset(dispc); + if (feat->subrev != DISPC_K2G) { + r = dispc_softreset(dispc); + if (r) + return r; + } tidss->dispc = dispc; -- cgit v1.2.3 From 151825150cf9c2e9fb90763d35b9dff3783628ac Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Thu, 9 Nov 2023 09:37:59 +0200 Subject: drm/tidss: Check for K2G in in dispc_softreset() K2G doesn't have softreset feature. Instead of having every caller of dispc_softreset() check for K2G, move the check into dispc_softreset(), and make dispc_softreset() return 0 in case of K2G. Reviewed-by: Laurent Pinchart Reviewed-by: Aradhya Bhatia Link: https://lore.kernel.org/r/20231109-tidss-probe-v2-6-ac91b5ea35c0@ideasonboard.com Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/tidss/tidss_dispc.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index b72f1e7d4b72..f9de26cd241a 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -2766,6 +2766,10 @@ static int dispc_softreset(struct dispc_device *dispc) u32 val; int ret; + /* K2G display controller does not support soft reset */ + if (dispc->feat->subrev == DISPC_K2G) + return 0; + /* Soft reset */ REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1); /* Wait for reset to complete */ @@ -2888,12 +2892,9 @@ int dispc_init(struct tidss_device *tidss) of_property_read_u32(dispc->dev->of_node, "max-memory-bandwidth", &dispc->memory_bandwidth_limit); - /* K2G display controller does not support soft reset */ - if (feat->subrev != DISPC_K2G) { - r = dispc_softreset(dispc); - if (r) - return r; - } + r = dispc_softreset(dispc); + if (r) + return r; tidss->dispc = dispc; -- cgit v1.2.3 From 576d96c5c896221b5bc8feae473739469a92e144 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Thu, 9 Nov 2023 09:38:00 +0200 Subject: drm/tidss: Add simple K2G manual reset K2G display controller does not support soft reset, but we can do the most important steps manually: mask the IRQs and disable the VPs. Reviewed-by: Aradhya Bhatia Link: https://lore.kernel.org/r/20231109-tidss-probe-v2-7-ac91b5ea35c0@ideasonboard.com Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/tidss/tidss_dispc.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index f9de26cd241a..c3741feaafff 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -2761,14 +2761,28 @@ static void dispc_init_errata(struct dispc_device *dispc) } } +/* + * K2G display controller does not support soft reset, so we do a basic manual + * reset here: make sure the IRQs are masked and VPs are disabled. + */ +static void dispc_softreset_k2g(struct dispc_device *dispc) +{ + dispc_set_irqenable(dispc, 0); + dispc_read_and_clear_irqstatus(dispc); + + for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx) + VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0); +} + static int dispc_softreset(struct dispc_device *dispc) { u32 val; int ret; - /* K2G display controller does not support soft reset */ - if (dispc->feat->subrev == DISPC_K2G) + if (dispc->feat->subrev == DISPC_K2G) { + dispc_softreset_k2g(dispc); return 0; + } /* Soft reset */ REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1); -- cgit v1.2.3 From bc288a927815efcf9d7f4a54d4d89c5df478c635 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Thu, 9 Nov 2023 09:38:01 +0200 Subject: drm/tidss: Fix dss reset The probe function calls dispc_softreset() before runtime PM is enabled and without enabling any of the DSS clocks. This happens to work by luck, and we need to make sure the DSS HW is active and the fclk is enabled. To fix the above, add a new function, dispc_init_hw(), which does: - pm_runtime_set_active() - clk_prepare_enable(fclk) - dispc_softreset(). This ensures that the reset can be successfully accomplished. Note that we use pm_runtime_set_active(), not the normal pm_runtime_get(). The reason for this is that at this point we haven't enabled the runtime PM yet and also we don't want the normal resume callback to be called: the dispc resume callback does some initial HW setup, and it expects that the HW was off (no video ports are streaming). If the bootloader has enabled the DSS and has set up a boot time splash-screen, the DSS would be enabled and streaming which might lead to issues with the normal resume callback. Fixes: c9b2d923befd ("drm/tidss: Soft Reset DISPC on startup") Reviewed-by: Aradhya Bhatia Link: https://lore.kernel.org/r/20231109-tidss-probe-v2-8-ac91b5ea35c0@ideasonboard.com Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/tidss/tidss_dispc.c | 45 ++++++++++++++++++++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index c3741feaafff..1ad711f8d2a8 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -2797,6 +2797,49 @@ static int dispc_softreset(struct dispc_device *dispc) return 0; } +static int dispc_init_hw(struct dispc_device *dispc) +{ + struct device *dev = dispc->dev; + int ret; + + ret = pm_runtime_set_active(dev); + if (ret) { + dev_err(dev, "Failed to set DSS PM to active\n"); + return ret; + } + + ret = clk_prepare_enable(dispc->fclk); + if (ret) { + dev_err(dev, "Failed to enable DSS fclk\n"); + goto err_runtime_suspend; + } + + ret = dispc_softreset(dispc); + if (ret) + goto err_clk_disable; + + clk_disable_unprepare(dispc->fclk); + ret = pm_runtime_set_suspended(dev); + if (ret) { + dev_err(dev, "Failed to set DSS PM to suspended\n"); + return ret; + } + + return 0; + +err_clk_disable: + clk_disable_unprepare(dispc->fclk); + +err_runtime_suspend: + ret = pm_runtime_set_suspended(dev); + if (ret) { + dev_err(dev, "Failed to set DSS PM to suspended\n"); + return ret; + } + + return ret; +} + int dispc_init(struct tidss_device *tidss) { struct device *dev = tidss->dev; @@ -2906,7 +2949,7 @@ int dispc_init(struct tidss_device *tidss) of_property_read_u32(dispc->dev->of_node, "max-memory-bandwidth", &dispc->memory_bandwidth_limit); - r = dispc_softreset(dispc); + r = dispc_init_hw(dispc); if (r) return r; -- cgit v1.2.3 From d4652187367bc55983139401d0ee41d6e565b13d Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Thu, 9 Nov 2023 09:38:02 +0200 Subject: drm/tidss: IRQ code cleanup The IRQ setup code is overly complex. All we really need to do is initialize the related fields in struct tidss_device, and request the IRQ. We can drop all the HW accesses, as they are pointless: the driver will set the IRQs correctly when it needs any of the IRQs, and at probe time we have done a reset, so we know that all the IRQs are masked by default in the hardware. Thus we can combine the tidss_irq_preinstall() and tidss_irq_postinstall() into the tidss_irq_install() function, drop the HW accesses, and drop the use of spinlock, as this is done at init time and there can be no races. We can also drop the HW access from the tidss_irq_uninstall(), as the driver will anyway disable and suspend the hardware at remove time. Reviewed-by: Aradhya Bhatia Link: https://lore.kernel.org/r/20231109-tidss-probe-v2-9-ac91b5ea35c0@ideasonboard.com Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/tidss/tidss_drv.c | 2 ++ drivers/gpu/drm/tidss/tidss_irq.c | 54 ++++++--------------------------------- 2 files changed, 10 insertions(+), 46 deletions(-) diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index 09e30de2dd48..d15f836dca95 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -138,6 +138,8 @@ static int tidss_probe(struct platform_device *pdev) platform_set_drvdata(pdev, tidss); + spin_lock_init(&tidss->wait_lock); + ret = dispc_init(tidss); if (ret) { dev_err(dev, "failed to initialize dispc: %d\n", ret); diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c index 0c681c7600bc..604334ef526a 100644 --- a/drivers/gpu/drm/tidss/tidss_irq.c +++ b/drivers/gpu/drm/tidss/tidss_irq.c @@ -93,33 +93,21 @@ void tidss_irq_resume(struct tidss_device *tidss) spin_unlock_irqrestore(&tidss->wait_lock, flags); } -static void tidss_irq_preinstall(struct drm_device *ddev) -{ - struct tidss_device *tidss = to_tidss(ddev); - - spin_lock_init(&tidss->wait_lock); - - tidss_runtime_get(tidss); - - dispc_set_irqenable(tidss->dispc, 0); - dispc_read_and_clear_irqstatus(tidss->dispc); - - tidss_runtime_put(tidss); -} - -static void tidss_irq_postinstall(struct drm_device *ddev) +int tidss_irq_install(struct drm_device *ddev, unsigned int irq) { struct tidss_device *tidss = to_tidss(ddev); - unsigned long flags; - unsigned int i; + int ret; - tidss_runtime_get(tidss); + if (irq == IRQ_NOTCONNECTED) + return -ENOTCONN; - spin_lock_irqsave(&tidss->wait_lock, flags); + ret = request_irq(irq, tidss_irq_handler, 0, ddev->driver->name, ddev); + if (ret) + return ret; tidss->irq_mask = DSS_IRQ_DEVICE_OCP_ERR; - for (i = 0; i < tidss->num_crtcs; ++i) { + for (unsigned int i = 0; i < tidss->num_crtcs; ++i) { struct tidss_crtc *tcrtc = to_tidss_crtc(tidss->crtcs[i]); tidss->irq_mask |= DSS_IRQ_VP_SYNC_LOST(tcrtc->hw_videoport); @@ -127,28 +115,6 @@ static void tidss_irq_postinstall(struct drm_device *ddev) tidss->irq_mask |= DSS_IRQ_VP_FRAME_DONE(tcrtc->hw_videoport); } - tidss_irq_update(tidss); - - spin_unlock_irqrestore(&tidss->wait_lock, flags); - - tidss_runtime_put(tidss); -} - -int tidss_irq_install(struct drm_device *ddev, unsigned int irq) -{ - int ret; - - if (irq == IRQ_NOTCONNECTED) - return -ENOTCONN; - - tidss_irq_preinstall(ddev); - - ret = request_irq(irq, tidss_irq_handler, 0, ddev->driver->name, ddev); - if (ret) - return ret; - - tidss_irq_postinstall(ddev); - return 0; } @@ -156,9 +122,5 @@ void tidss_irq_uninstall(struct drm_device *ddev) { struct tidss_device *tidss = to_tidss(ddev); - tidss_runtime_get(tidss); - dispc_set_irqenable(tidss->dispc, 0); - tidss_runtime_put(tidss); - free_irq(tidss->irq, ddev); } -- cgit v1.2.3 From 95d4b471953411854f9c80b568da7fcf753f3801 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Thu, 9 Nov 2023 09:38:03 +0200 Subject: drm/tidss: Fix atomic_flush check tidss_crtc_atomic_flush() checks if the crtc is enabled, and if not, returns immediately as there's no reason to do any register changes. However, the code checks for 'crtc->state->enable', which does not reflect the actual HW state. We should instead look at the 'crtc->state->active' flag. This causes the tidss_crtc_atomic_flush() to proceed with the flush even if the active state is false, which then causes us to hit the WARN_ON(!crtc->state->event) check. Fix this by checking the active flag, and while at it, fix the related debug print which had "active" and "needs modeset" wrong way. Cc: Fixes: 32a1795f57ee ("drm/tidss: New driver for TI Keystone platform Display SubSystem") Reviewed-by: Aradhya Bhatia Link: https://lore.kernel.org/r/20231109-tidss-probe-v2-10-ac91b5ea35c0@ideasonboard.com Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/tidss/tidss_crtc.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c index 5e5e466f35d1..7c78c074e3a2 100644 --- a/drivers/gpu/drm/tidss/tidss_crtc.c +++ b/drivers/gpu/drm/tidss/tidss_crtc.c @@ -169,13 +169,13 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc, struct tidss_device *tidss = to_tidss(ddev); unsigned long flags; - dev_dbg(ddev->dev, - "%s: %s enabled %d, needs modeset %d, event %p\n", __func__, - crtc->name, drm_atomic_crtc_needs_modeset(crtc->state), - crtc->state->enable, crtc->state->event); + dev_dbg(ddev->dev, "%s: %s is %sactive, %s modeset, event %p\n", + __func__, crtc->name, crtc->state->active ? "" : "not ", + drm_atomic_crtc_needs_modeset(crtc->state) ? "needs" : "doesn't need", + crtc->state->event); /* There is nothing to do if CRTC is not going to be enabled. */ - if (!crtc->state->enable) + if (!crtc->state->active) return; /* -- cgit v1.2.3 From ca89b69734f92021e25899dff3e433b1904f5832 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Thu, 9 Nov 2023 09:38:04 +0200 Subject: drm/tidss: Use DRM_PLANE_COMMIT_ACTIVE_ONLY At the moment the driver does not use DRM_PLANE_COMMIT_ACTIVE_ONLY, but still checks for crtc->state->active in tidss_crtc_atomic_flush(), and skips the flush if the crtc is not active. The exact reason why DRM_PLANE_COMMIT_ACTIVE_ONLY is not used has been lost in history. DRM_PLANE_COMMIT_ACTIVE_ONLY does also affect the plane updates, and I think the issue was related to multi-display systems and moving planes between the displays. However, it is possible the issue was only present on the older DSS hardware, handled by the omapdrm driver (on which the tidss driver is loosely based). Reviewing the code related to DRM_PLANE_COMMIT_ACTIVE_ONLY does not show any issues, and testing on J7 EVM with two displays works fine. Change the driver to use DRM_PLANE_COMMIT_ACTIVE_ONLY. Reviewed-by: Aradhya Bhatia Link: https://lore.kernel.org/r/20231109-tidss-probe-v2-11-ac91b5ea35c0@ideasonboard.com Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/tidss/tidss_crtc.c | 4 ---- drivers/gpu/drm/tidss/tidss_kms.c | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c index 7c78c074e3a2..5f838980c7a1 100644 --- a/drivers/gpu/drm/tidss/tidss_crtc.c +++ b/drivers/gpu/drm/tidss/tidss_crtc.c @@ -174,10 +174,6 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc, drm_atomic_crtc_needs_modeset(crtc->state) ? "needs" : "doesn't need", crtc->state->event); - /* There is nothing to do if CRTC is not going to be enabled. */ - if (!crtc->state->active) - return; - /* * Flush CRTC changes with go bit only if new modeset is not * coming, so CRTC is enabled trough out the commit. diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c index d096d8d2bc8f..a0e494c806a9 100644 --- a/drivers/gpu/drm/tidss/tidss_kms.c +++ b/drivers/gpu/drm/tidss/tidss_kms.c @@ -29,7 +29,7 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state) tidss_runtime_get(tidss); drm_atomic_helper_commit_modeset_disables(ddev, old_state); - drm_atomic_helper_commit_planes(ddev, old_state, 0); + drm_atomic_helper_commit_planes(ddev, old_state, DRM_PLANE_COMMIT_ACTIVE_ONLY); drm_atomic_helper_commit_modeset_enables(ddev, old_state); drm_atomic_helper_commit_hw_done(old_state); -- cgit v1.2.3 From 780b9463ce66a9efb18e3b5d35cd011fb918d741 Mon Sep 17 00:00:00 2001 From: Melissa Wen Date: Thu, 30 Nov 2023 13:40:24 -0300 Subject: drm/v3d: Remove unused function header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v3d_mmu_get_offset header was added but the function was never defined. Just remove it. Signed-off-by: Melissa Wen Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20231130164420.932823-3-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_drv.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 4c59fefaa0b4..d8b392494eba 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -420,8 +420,6 @@ void v3d_irq_disable(struct v3d_dev *v3d); void v3d_irq_reset(struct v3d_dev *v3d); /* v3d_mmu.c */ -int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo, - u32 *offset); int v3d_mmu_set_page_table(struct v3d_dev *v3d); void v3d_mmu_insert_ptes(struct v3d_bo *bo); void v3d_mmu_remove_ptes(struct v3d_bo *bo); -- cgit v1.2.3 From a8ad9d63a160f6b93f6958a5a0ded1a6abb15815 Mon Sep 17 00:00:00 2001 From: Melissa Wen Date: Thu, 30 Nov 2023 13:40:25 -0300 Subject: drm/v3d: Move wait BO ioctl to the v3d_bo file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit IOCTLs related to BO operations reside on the file v3d_bo.c. The wait BO ioctl is the only IOCTL regarding BOs that is placed in a different file. So, move it to the v3d_bo.c file. Signed-off-by: Melissa Wen Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20231130164420.932823-4-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_bo.c | 33 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/v3d/v3d_drv.h | 4 ++-- drivers/gpu/drm/v3d/v3d_gem.c | 33 --------------------------------- 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index 8b3229a37c6d..357a0da7e16a 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -233,3 +233,36 @@ int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, drm_gem_object_put(gem_obj); return 0; } + +int +v3d_wait_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret; + struct drm_v3d_wait_bo *args = data; + ktime_t start = ktime_get(); + u64 delta_ns; + unsigned long timeout_jiffies = + nsecs_to_jiffies_timeout(args->timeout_ns); + + if (args->pad != 0) + return -EINVAL; + + ret = drm_gem_dma_resv_wait(file_priv, args->handle, + true, timeout_jiffies); + + /* Decrement the user's timeout, in case we got interrupted + * such that the ioctl will be restarted. + */ + delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start)); + if (delta_ns < args->timeout_ns) + args->timeout_ns -= delta_ns; + else + args->timeout_ns = 0; + + /* Asked to wait beyond the jiffie/scheduler precision? */ + if (ret == -ETIME && args->timeout_ns) + ret = -EAGAIN; + + return ret; +} diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index d8b392494eba..fc04372d5cbd 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -385,6 +385,8 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int v3d_wait_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); @@ -405,8 +407,6 @@ int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_submit_csd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int v3d_wait_bo_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); void v3d_job_cleanup(struct v3d_job *job); void v3d_job_put(struct v3d_job *job); void v3d_reset(struct v3d_dev *v3d); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 9d2ac23c29e3..26f62a48d226 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -363,39 +363,6 @@ void v3d_job_put(struct v3d_job *job) kref_put(&job->refcount, job->free); } -int -v3d_wait_bo_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - int ret; - struct drm_v3d_wait_bo *args = data; - ktime_t start = ktime_get(); - u64 delta_ns; - unsigned long timeout_jiffies = - nsecs_to_jiffies_timeout(args->timeout_ns); - - if (args->pad != 0) - return -EINVAL; - - ret = drm_gem_dma_resv_wait(file_priv, args->handle, - true, timeout_jiffies); - - /* Decrement the user's timeout, in case we got interrupted - * such that the ioctl will be restarted. - */ - delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start)); - if (delta_ns < args->timeout_ns) - args->timeout_ns -= delta_ns; - else - args->timeout_ns = 0; - - /* Asked to wait beyond the jiffie/scheduler precision? */ - if (ret == -ETIME && args->timeout_ns) - ret = -EAGAIN; - - return ret; -} - static int v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, void **container, size_t size, void (*free)(struct kref *ref), -- cgit v1.2.3 From 9032d5f633ed7b5c726971dc7e2372045bf27f40 Mon Sep 17 00:00:00 2001 From: Melissa Wen Date: Thu, 30 Nov 2023 13:40:26 -0300 Subject: drm/v3d: Detach job submissions IOCTLs to a new specific file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We will include a new job submission type, the CPU job submission. For readability and maintability, separate the job submission IOCTLs and related operations from v3d_gem.c. Minor fix in the CSD submission kernel doc: CSD (texture formatting) -> CSD (compute shader). Signed-off-by: Melissa Wen Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20231130164420.932823-5-mcanal@igalia.com --- drivers/gpu/drm/v3d/Makefile | 3 +- drivers/gpu/drm/v3d/v3d_drv.h | 12 +- drivers/gpu/drm/v3d/v3d_gem.c | 735 -------------------------------------- drivers/gpu/drm/v3d/v3d_submit.c | 744 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 753 insertions(+), 741 deletions(-) create mode 100644 drivers/gpu/drm/v3d/v3d_submit.c diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile index 4b21b20e4998..b7d673f1153b 100644 --- a/drivers/gpu/drm/v3d/Makefile +++ b/drivers/gpu/drm/v3d/Makefile @@ -12,7 +12,8 @@ v3d-y := \ v3d_perfmon.o \ v3d_trace_points.o \ v3d_sched.o \ - v3d_sysfs.o + v3d_sysfs.o \ + v3d_submit.o v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index fc04372d5cbd..4db9ace66024 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -401,17 +401,19 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue); /* v3d_gem.c */ int v3d_gem_init(struct drm_device *dev); void v3d_gem_destroy(struct drm_device *dev); +void v3d_reset(struct v3d_dev *v3d); +void v3d_invalidate_caches(struct v3d_dev *v3d); +void v3d_clean_caches(struct v3d_dev *v3d); + +/* v3d_submit.c */ +void v3d_job_cleanup(struct v3d_job *job); +void v3d_job_put(struct v3d_job *job); int v3d_submit_cl_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_submit_csd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -void v3d_job_cleanup(struct v3d_job *job); -void v3d_job_put(struct v3d_job *job); -void v3d_reset(struct v3d_dev *v3d); -void v3d_invalidate_caches(struct v3d_dev *v3d); -void v3d_clean_caches(struct v3d_dev *v3d); /* v3d_irq.c */ int v3d_irq_init(struct v3d_dev *v3d); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 26f62a48d226..afc565078c78 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -11,8 +11,6 @@ #include #include -#include -#include #include "v3d_drv.h" #include "v3d_regs.h" @@ -241,739 +239,6 @@ v3d_invalidate_caches(struct v3d_dev *v3d) v3d_invalidate_slices(v3d, 0); } -/* Takes the reservation lock on all the BOs being referenced, so that - * at queue submit time we can update the reservations. - * - * We don't lock the RCL the tile alloc/state BOs, or overflow memory - * (all of which are on exec->unref_list). They're entirely private - * to v3d, so we don't attach dma-buf fences to them. - */ -static int -v3d_lock_bo_reservations(struct v3d_job *job, - struct ww_acquire_ctx *acquire_ctx) -{ - int i, ret; - - ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); - if (ret) - return ret; - - for (i = 0; i < job->bo_count; i++) { - ret = dma_resv_reserve_fences(job->bo[i]->resv, 1); - if (ret) - goto fail; - - ret = drm_sched_job_add_implicit_dependencies(&job->base, - job->bo[i], true); - if (ret) - goto fail; - } - - return 0; - -fail: - drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); - return ret; -} - -/** - * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects - * referenced by the job. - * @dev: DRM device - * @file_priv: DRM file for this fd - * @job: V3D job being set up - * @bo_handles: GEM handles - * @bo_count: Number of GEM handles passed in - * - * The command validator needs to reference BOs by their index within - * the submitted job's BO list. This does the validation of the job's - * BO list and reference counting for the lifetime of the job. - * - * Note that this function doesn't need to unreference the BOs on - * failure, because that will happen at v3d_exec_cleanup() time. - */ -static int -v3d_lookup_bos(struct drm_device *dev, - struct drm_file *file_priv, - struct v3d_job *job, - u64 bo_handles, - u32 bo_count) -{ - job->bo_count = bo_count; - - if (!job->bo_count) { - /* See comment on bo_index for why we have to check - * this. - */ - DRM_DEBUG("Rendering requires BOs\n"); - return -EINVAL; - } - - return drm_gem_objects_lookup(file_priv, - (void __user *)(uintptr_t)bo_handles, - job->bo_count, &job->bo); -} - -static void -v3d_job_free(struct kref *ref) -{ - struct v3d_job *job = container_of(ref, struct v3d_job, refcount); - int i; - - if (job->bo) { - for (i = 0; i < job->bo_count; i++) - drm_gem_object_put(job->bo[i]); - kvfree(job->bo); - } - - dma_fence_put(job->irq_fence); - dma_fence_put(job->done_fence); - - if (job->perfmon) - v3d_perfmon_put(job->perfmon); - - kfree(job); -} - -static void -v3d_render_job_free(struct kref *ref) -{ - struct v3d_render_job *job = container_of(ref, struct v3d_render_job, - base.refcount); - struct v3d_bo *bo, *save; - - list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) { - drm_gem_object_put(&bo->base.base); - } - - v3d_job_free(ref); -} - -void v3d_job_cleanup(struct v3d_job *job) -{ - if (!job) - return; - - drm_sched_job_cleanup(&job->base); - v3d_job_put(job); -} - -void v3d_job_put(struct v3d_job *job) -{ - kref_put(&job->refcount, job->free); -} - -static int -v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, - void **container, size_t size, void (*free)(struct kref *ref), - u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue) -{ - struct v3d_file_priv *v3d_priv = file_priv->driver_priv; - struct v3d_job *job; - bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC); - int ret, i; - - *container = kcalloc(1, size, GFP_KERNEL); - if (!*container) { - DRM_ERROR("Cannot allocate memory for v3d job."); - return -ENOMEM; - } - - job = *container; - job->v3d = v3d; - job->free = free; - job->file = file_priv; - - ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue], - 1, v3d_priv); - if (ret) - goto fail; - - if (has_multisync) { - if (se->in_sync_count && se->wait_stage == queue) { - struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs); - - for (i = 0; i < se->in_sync_count; i++) { - struct drm_v3d_sem in; - - if (copy_from_user(&in, handle++, sizeof(in))) { - ret = -EFAULT; - DRM_DEBUG("Failed to copy wait dep handle.\n"); - goto fail_deps; - } - ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0); - - // TODO: Investigate why this was filtered out for the IOCTL. - if (ret && ret != -ENOENT) - goto fail_deps; - } - } - } else { - ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0); - - // TODO: Investigate why this was filtered out for the IOCTL. - if (ret && ret != -ENOENT) - goto fail_deps; - } - - kref_init(&job->refcount); - - return 0; - -fail_deps: - drm_sched_job_cleanup(&job->base); -fail: - kfree(*container); - *container = NULL; - - return ret; -} - -static void -v3d_push_job(struct v3d_job *job) -{ - drm_sched_job_arm(&job->base); - - job->done_fence = dma_fence_get(&job->base.s_fence->finished); - - /* put by scheduler job completion */ - kref_get(&job->refcount); - - drm_sched_entity_push_job(&job->base); -} - -static void -v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, - struct v3d_job *job, - struct ww_acquire_ctx *acquire_ctx, - u32 out_sync, - struct v3d_submit_ext *se, - struct dma_fence *done_fence) -{ - struct drm_syncobj *sync_out; - bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC); - int i; - - for (i = 0; i < job->bo_count; i++) { - /* XXX: Use shared fences for read-only objects. */ - dma_resv_add_fence(job->bo[i]->resv, job->done_fence, - DMA_RESV_USAGE_WRITE); - } - - drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); - - /* Update the return sync object for the job */ - /* If it only supports a single signal semaphore*/ - if (!has_multisync) { - sync_out = drm_syncobj_find(file_priv, out_sync); - if (sync_out) { - drm_syncobj_replace_fence(sync_out, done_fence); - drm_syncobj_put(sync_out); - } - return; - } - - /* If multiple semaphores extension is supported */ - if (se->out_sync_count) { - for (i = 0; i < se->out_sync_count; i++) { - drm_syncobj_replace_fence(se->out_syncs[i].syncobj, - done_fence); - drm_syncobj_put(se->out_syncs[i].syncobj); - } - kvfree(se->out_syncs); - } -} - -static void -v3d_put_multisync_post_deps(struct v3d_submit_ext *se) -{ - unsigned int i; - - if (!(se && se->out_sync_count)) - return; - - for (i = 0; i < se->out_sync_count; i++) - drm_syncobj_put(se->out_syncs[i].syncobj); - kvfree(se->out_syncs); -} - -static int -v3d_get_multisync_post_deps(struct drm_file *file_priv, - struct v3d_submit_ext *se, - u32 count, u64 handles) -{ - struct drm_v3d_sem __user *post_deps; - int i, ret; - - if (!count) - return 0; - - se->out_syncs = (struct v3d_submit_outsync *) - kvmalloc_array(count, - sizeof(struct v3d_submit_outsync), - GFP_KERNEL); - if (!se->out_syncs) - return -ENOMEM; - - post_deps = u64_to_user_ptr(handles); - - for (i = 0; i < count; i++) { - struct drm_v3d_sem out; - - if (copy_from_user(&out, post_deps++, sizeof(out))) { - ret = -EFAULT; - DRM_DEBUG("Failed to copy post dep handles\n"); - goto fail; - } - - se->out_syncs[i].syncobj = drm_syncobj_find(file_priv, - out.handle); - if (!se->out_syncs[i].syncobj) { - ret = -EINVAL; - goto fail; - } - } - se->out_sync_count = count; - - return 0; - -fail: - for (i--; i >= 0; i--) - drm_syncobj_put(se->out_syncs[i].syncobj); - kvfree(se->out_syncs); - - return ret; -} - -/* Get data for multiple binary semaphores synchronization. Parse syncobj - * to be signaled when job completes (out_sync). - */ -static int -v3d_get_multisync_submit_deps(struct drm_file *file_priv, - struct drm_v3d_extension __user *ext, - void *data) -{ - struct drm_v3d_multi_sync multisync; - struct v3d_submit_ext *se = data; - int ret; - - if (copy_from_user(&multisync, ext, sizeof(multisync))) - return -EFAULT; - - if (multisync.pad) - return -EINVAL; - - ret = v3d_get_multisync_post_deps(file_priv, data, multisync.out_sync_count, - multisync.out_syncs); - if (ret) - return ret; - - se->in_sync_count = multisync.in_sync_count; - se->in_syncs = multisync.in_syncs; - se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC; - se->wait_stage = multisync.wait_stage; - - return 0; -} - -/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data - * according to the extension id (name). - */ -static int -v3d_get_extensions(struct drm_file *file_priv, - u64 ext_handles, - void *data) -{ - struct drm_v3d_extension __user *user_ext; - int ret; - - user_ext = u64_to_user_ptr(ext_handles); - while (user_ext) { - struct drm_v3d_extension ext; - - if (copy_from_user(&ext, user_ext, sizeof(ext))) { - DRM_DEBUG("Failed to copy submit extension\n"); - return -EFAULT; - } - - switch (ext.id) { - case DRM_V3D_EXT_ID_MULTI_SYNC: - ret = v3d_get_multisync_submit_deps(file_priv, user_ext, data); - if (ret) - return ret; - break; - default: - DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id); - return -EINVAL; - } - - user_ext = u64_to_user_ptr(ext.next); - } - - return 0; -} - -/** - * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D. - * @dev: DRM device - * @data: ioctl argument - * @file_priv: DRM file for this fd - * - * This is the main entrypoint for userspace to submit a 3D frame to - * the GPU. Userspace provides the binner command list (if - * applicable), and the kernel sets up the render command list to draw - * to the framebuffer described in the ioctl, using the command lists - * that the 3D engine's binner will produce. - */ -int -v3d_submit_cl_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct v3d_dev *v3d = to_v3d_dev(dev); - struct v3d_file_priv *v3d_priv = file_priv->driver_priv; - struct drm_v3d_submit_cl *args = data; - struct v3d_submit_ext se = {0}; - struct v3d_bin_job *bin = NULL; - struct v3d_render_job *render = NULL; - struct v3d_job *clean_job = NULL; - struct v3d_job *last_job; - struct ww_acquire_ctx acquire_ctx; - int ret = 0; - - trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end); - - if (args->pad) - return -EINVAL; - - if (args->flags && - args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE | - DRM_V3D_SUBMIT_EXTENSION)) { - DRM_INFO("invalid flags: %d\n", args->flags); - return -EINVAL; - } - - if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { - ret = v3d_get_extensions(file_priv, args->extensions, &se); - if (ret) { - DRM_DEBUG("Failed to get extensions.\n"); - return ret; - } - } - - ret = v3d_job_init(v3d, file_priv, (void *)&render, sizeof(*render), - v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER); - if (ret) - goto fail; - - render->start = args->rcl_start; - render->end = args->rcl_end; - INIT_LIST_HEAD(&render->unref_list); - - if (args->bcl_start != args->bcl_end) { - ret = v3d_job_init(v3d, file_priv, (void *)&bin, sizeof(*bin), - v3d_job_free, args->in_sync_bcl, &se, V3D_BIN); - if (ret) - goto fail; - - bin->start = args->bcl_start; - bin->end = args->bcl_end; - bin->qma = args->qma; - bin->qms = args->qms; - bin->qts = args->qts; - bin->render = render; - } - - if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { - ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job), - v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); - if (ret) - goto fail; - - last_job = clean_job; - } else { - last_job = &render->base; - } - - ret = v3d_lookup_bos(dev, file_priv, last_job, - args->bo_handles, args->bo_handle_count); - if (ret) - goto fail; - - ret = v3d_lock_bo_reservations(last_job, &acquire_ctx); - if (ret) - goto fail; - - if (args->perfmon_id) { - render->base.perfmon = v3d_perfmon_find(v3d_priv, - args->perfmon_id); - - if (!render->base.perfmon) { - ret = -ENOENT; - goto fail_perfmon; - } - } - - mutex_lock(&v3d->sched_lock); - if (bin) { - bin->base.perfmon = render->base.perfmon; - v3d_perfmon_get(bin->base.perfmon); - v3d_push_job(&bin->base); - - ret = drm_sched_job_add_dependency(&render->base.base, - dma_fence_get(bin->base.done_fence)); - if (ret) - goto fail_unreserve; - } - - v3d_push_job(&render->base); - - if (clean_job) { - struct dma_fence *render_fence = - dma_fence_get(render->base.done_fence); - ret = drm_sched_job_add_dependency(&clean_job->base, - render_fence); - if (ret) - goto fail_unreserve; - clean_job->perfmon = render->base.perfmon; - v3d_perfmon_get(clean_job->perfmon); - v3d_push_job(clean_job); - } - - mutex_unlock(&v3d->sched_lock); - - v3d_attach_fences_and_unlock_reservation(file_priv, - last_job, - &acquire_ctx, - args->out_sync, - &se, - last_job->done_fence); - - if (bin) - v3d_job_put(&bin->base); - v3d_job_put(&render->base); - if (clean_job) - v3d_job_put(clean_job); - - return 0; - -fail_unreserve: - mutex_unlock(&v3d->sched_lock); -fail_perfmon: - drm_gem_unlock_reservations(last_job->bo, - last_job->bo_count, &acquire_ctx); -fail: - v3d_job_cleanup((void *)bin); - v3d_job_cleanup((void *)render); - v3d_job_cleanup(clean_job); - v3d_put_multisync_post_deps(&se); - - return ret; -} - -/** - * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D. - * @dev: DRM device - * @data: ioctl argument - * @file_priv: DRM file for this fd - * - * Userspace provides the register setup for the TFU, which we don't - * need to validate since the TFU is behind the MMU. - */ -int -v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct v3d_dev *v3d = to_v3d_dev(dev); - struct drm_v3d_submit_tfu *args = data; - struct v3d_submit_ext se = {0}; - struct v3d_tfu_job *job = NULL; - struct ww_acquire_ctx acquire_ctx; - int ret = 0; - - trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia); - - if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) { - DRM_DEBUG("invalid flags: %d\n", args->flags); - return -EINVAL; - } - - if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { - ret = v3d_get_extensions(file_priv, args->extensions, &se); - if (ret) { - DRM_DEBUG("Failed to get extensions.\n"); - return ret; - } - } - - ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job), - v3d_job_free, args->in_sync, &se, V3D_TFU); - if (ret) - goto fail; - - job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles), - sizeof(*job->base.bo), GFP_KERNEL); - if (!job->base.bo) { - ret = -ENOMEM; - goto fail; - } - - job->args = *args; - - for (job->base.bo_count = 0; - job->base.bo_count < ARRAY_SIZE(args->bo_handles); - job->base.bo_count++) { - struct drm_gem_object *bo; - - if (!args->bo_handles[job->base.bo_count]) - break; - - bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]); - if (!bo) { - DRM_DEBUG("Failed to look up GEM BO %d: %d\n", - job->base.bo_count, - args->bo_handles[job->base.bo_count]); - ret = -ENOENT; - goto fail; - } - job->base.bo[job->base.bo_count] = bo; - } - - ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx); - if (ret) - goto fail; - - mutex_lock(&v3d->sched_lock); - v3d_push_job(&job->base); - mutex_unlock(&v3d->sched_lock); - - v3d_attach_fences_and_unlock_reservation(file_priv, - &job->base, &acquire_ctx, - args->out_sync, - &se, - job->base.done_fence); - - v3d_job_put(&job->base); - - return 0; - -fail: - v3d_job_cleanup((void *)job); - v3d_put_multisync_post_deps(&se); - - return ret; -} - -/** - * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D. - * @dev: DRM device - * @data: ioctl argument - * @file_priv: DRM file for this fd - * - * Userspace provides the register setup for the CSD, which we don't - * need to validate since the CSD is behind the MMU. - */ -int -v3d_submit_csd_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct v3d_dev *v3d = to_v3d_dev(dev); - struct v3d_file_priv *v3d_priv = file_priv->driver_priv; - struct drm_v3d_submit_csd *args = data; - struct v3d_submit_ext se = {0}; - struct v3d_csd_job *job = NULL; - struct v3d_job *clean_job = NULL; - struct ww_acquire_ctx acquire_ctx; - int ret; - - trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]); - - if (args->pad) - return -EINVAL; - - if (!v3d_has_csd(v3d)) { - DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n"); - return -EINVAL; - } - - if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) { - DRM_INFO("invalid flags: %d\n", args->flags); - return -EINVAL; - } - - if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { - ret = v3d_get_extensions(file_priv, args->extensions, &se); - if (ret) { - DRM_DEBUG("Failed to get extensions.\n"); - return ret; - } - } - - ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job), - v3d_job_free, args->in_sync, &se, V3D_CSD); - if (ret) - goto fail; - - ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job), - v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); - if (ret) - goto fail; - - job->args = *args; - - ret = v3d_lookup_bos(dev, file_priv, clean_job, - args->bo_handles, args->bo_handle_count); - if (ret) - goto fail; - - ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx); - if (ret) - goto fail; - - if (args->perfmon_id) { - job->base.perfmon = v3d_perfmon_find(v3d_priv, - args->perfmon_id); - if (!job->base.perfmon) { - ret = -ENOENT; - goto fail_perfmon; - } - } - - mutex_lock(&v3d->sched_lock); - v3d_push_job(&job->base); - - ret = drm_sched_job_add_dependency(&clean_job->base, - dma_fence_get(job->base.done_fence)); - if (ret) - goto fail_unreserve; - - v3d_push_job(clean_job); - mutex_unlock(&v3d->sched_lock); - - v3d_attach_fences_and_unlock_reservation(file_priv, - clean_job, - &acquire_ctx, - args->out_sync, - &se, - clean_job->done_fence); - - v3d_job_put(&job->base); - v3d_job_put(clean_job); - - return 0; - -fail_unreserve: - mutex_unlock(&v3d->sched_lock); -fail_perfmon: - drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count, - &acquire_ctx); -fail: - v3d_job_cleanup((void *)job); - v3d_job_cleanup(clean_job); - v3d_put_multisync_post_deps(&se); - - return ret; -} - int v3d_gem_init(struct drm_device *dev) { diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c new file mode 100644 index 000000000000..f36214002f37 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -0,0 +1,744 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2014-2018 Broadcom + * Copyright (C) 2023 Raspberry Pi + */ + +#include + +#include "v3d_drv.h" +#include "v3d_regs.h" +#include "v3d_trace.h" + +/* Takes the reservation lock on all the BOs being referenced, so that + * at queue submit time we can update the reservations. + * + * We don't lock the RCL the tile alloc/state BOs, or overflow memory + * (all of which are on exec->unref_list). They're entirely private + * to v3d, so we don't attach dma-buf fences to them. + */ +static int +v3d_lock_bo_reservations(struct v3d_job *job, + struct ww_acquire_ctx *acquire_ctx) +{ + int i, ret; + + ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); + if (ret) + return ret; + + for (i = 0; i < job->bo_count; i++) { + ret = dma_resv_reserve_fences(job->bo[i]->resv, 1); + if (ret) + goto fail; + + ret = drm_sched_job_add_implicit_dependencies(&job->base, + job->bo[i], true); + if (ret) + goto fail; + } + + return 0; + +fail: + drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); + return ret; +} + +/** + * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects + * referenced by the job. + * @dev: DRM device + * @file_priv: DRM file for this fd + * @job: V3D job being set up + * @bo_handles: GEM handles + * @bo_count: Number of GEM handles passed in + * + * The command validator needs to reference BOs by their index within + * the submitted job's BO list. This does the validation of the job's + * BO list and reference counting for the lifetime of the job. + * + * Note that this function doesn't need to unreference the BOs on + * failure, because that will happen at v3d_exec_cleanup() time. + */ +static int +v3d_lookup_bos(struct drm_device *dev, + struct drm_file *file_priv, + struct v3d_job *job, + u64 bo_handles, + u32 bo_count) +{ + job->bo_count = bo_count; + + if (!job->bo_count) { + /* See comment on bo_index for why we have to check + * this. + */ + DRM_DEBUG("Rendering requires BOs\n"); + return -EINVAL; + } + + return drm_gem_objects_lookup(file_priv, + (void __user *)(uintptr_t)bo_handles, + job->bo_count, &job->bo); +} + +static void +v3d_job_free(struct kref *ref) +{ + struct v3d_job *job = container_of(ref, struct v3d_job, refcount); + int i; + + if (job->bo) { + for (i = 0; i < job->bo_count; i++) + drm_gem_object_put(job->bo[i]); + kvfree(job->bo); + } + + dma_fence_put(job->irq_fence); + dma_fence_put(job->done_fence); + + if (job->perfmon) + v3d_perfmon_put(job->perfmon); + + kfree(job); +} + +static void +v3d_render_job_free(struct kref *ref) +{ + struct v3d_render_job *job = container_of(ref, struct v3d_render_job, + base.refcount); + struct v3d_bo *bo, *save; + + list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) { + drm_gem_object_put(&bo->base.base); + } + + v3d_job_free(ref); +} + +void v3d_job_cleanup(struct v3d_job *job) +{ + if (!job) + return; + + drm_sched_job_cleanup(&job->base); + v3d_job_put(job); +} + +void v3d_job_put(struct v3d_job *job) +{ + kref_put(&job->refcount, job->free); +} + +static int +v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, + void **container, size_t size, void (*free)(struct kref *ref), + u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue) +{ + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct v3d_job *job; + bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC); + int ret, i; + + *container = kcalloc(1, size, GFP_KERNEL); + if (!*container) { + DRM_ERROR("Cannot allocate memory for v3d job."); + return -ENOMEM; + } + + job = *container; + job->v3d = v3d; + job->free = free; + job->file = file_priv; + + ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue], + 1, v3d_priv); + if (ret) + goto fail; + + if (has_multisync) { + if (se->in_sync_count && se->wait_stage == queue) { + struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs); + + for (i = 0; i < se->in_sync_count; i++) { + struct drm_v3d_sem in; + + if (copy_from_user(&in, handle++, sizeof(in))) { + ret = -EFAULT; + DRM_DEBUG("Failed to copy wait dep handle.\n"); + goto fail_deps; + } + ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0); + + // TODO: Investigate why this was filtered out for the IOCTL. + if (ret && ret != -ENOENT) + goto fail_deps; + } + } + } else { + ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0); + + // TODO: Investigate why this was filtered out for the IOCTL. + if (ret && ret != -ENOENT) + goto fail_deps; + } + + kref_init(&job->refcount); + + return 0; + +fail_deps: + drm_sched_job_cleanup(&job->base); +fail: + kfree(*container); + *container = NULL; + + return ret; +} + +static void +v3d_push_job(struct v3d_job *job) +{ + drm_sched_job_arm(&job->base); + + job->done_fence = dma_fence_get(&job->base.s_fence->finished); + + /* put by scheduler job completion */ + kref_get(&job->refcount); + + drm_sched_entity_push_job(&job->base); +} + +static void +v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, + struct v3d_job *job, + struct ww_acquire_ctx *acquire_ctx, + u32 out_sync, + struct v3d_submit_ext *se, + struct dma_fence *done_fence) +{ + struct drm_syncobj *sync_out; + bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC); + int i; + + for (i = 0; i < job->bo_count; i++) { + /* XXX: Use shared fences for read-only objects. */ + dma_resv_add_fence(job->bo[i]->resv, job->done_fence, + DMA_RESV_USAGE_WRITE); + } + + drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); + + /* Update the return sync object for the job */ + /* If it only supports a single signal semaphore*/ + if (!has_multisync) { + sync_out = drm_syncobj_find(file_priv, out_sync); + if (sync_out) { + drm_syncobj_replace_fence(sync_out, done_fence); + drm_syncobj_put(sync_out); + } + return; + } + + /* If multiple semaphores extension is supported */ + if (se->out_sync_count) { + for (i = 0; i < se->out_sync_count; i++) { + drm_syncobj_replace_fence(se->out_syncs[i].syncobj, + done_fence); + drm_syncobj_put(se->out_syncs[i].syncobj); + } + kvfree(se->out_syncs); + } +} + +static void +v3d_put_multisync_post_deps(struct v3d_submit_ext *se) +{ + unsigned int i; + + if (!(se && se->out_sync_count)) + return; + + for (i = 0; i < se->out_sync_count; i++) + drm_syncobj_put(se->out_syncs[i].syncobj); + kvfree(se->out_syncs); +} + +static int +v3d_get_multisync_post_deps(struct drm_file *file_priv, + struct v3d_submit_ext *se, + u32 count, u64 handles) +{ + struct drm_v3d_sem __user *post_deps; + int i, ret; + + if (!count) + return 0; + + se->out_syncs = (struct v3d_submit_outsync *) + kvmalloc_array(count, + sizeof(struct v3d_submit_outsync), + GFP_KERNEL); + if (!se->out_syncs) + return -ENOMEM; + + post_deps = u64_to_user_ptr(handles); + + for (i = 0; i < count; i++) { + struct drm_v3d_sem out; + + if (copy_from_user(&out, post_deps++, sizeof(out))) { + ret = -EFAULT; + DRM_DEBUG("Failed to copy post dep handles\n"); + goto fail; + } + + se->out_syncs[i].syncobj = drm_syncobj_find(file_priv, + out.handle); + if (!se->out_syncs[i].syncobj) { + ret = -EINVAL; + goto fail; + } + } + se->out_sync_count = count; + + return 0; + +fail: + for (i--; i >= 0; i--) + drm_syncobj_put(se->out_syncs[i].syncobj); + kvfree(se->out_syncs); + + return ret; +} + +/* Get data for multiple binary semaphores synchronization. Parse syncobj + * to be signaled when job completes (out_sync). + */ +static int +v3d_get_multisync_submit_deps(struct drm_file *file_priv, + struct drm_v3d_extension __user *ext, + void *data) +{ + struct drm_v3d_multi_sync multisync; + struct v3d_submit_ext *se = data; + int ret; + + if (copy_from_user(&multisync, ext, sizeof(multisync))) + return -EFAULT; + + if (multisync.pad) + return -EINVAL; + + ret = v3d_get_multisync_post_deps(file_priv, data, multisync.out_sync_count, + multisync.out_syncs); + if (ret) + return ret; + + se->in_sync_count = multisync.in_sync_count; + se->in_syncs = multisync.in_syncs; + se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC; + se->wait_stage = multisync.wait_stage; + + return 0; +} + +/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data + * according to the extension id (name). + */ +static int +v3d_get_extensions(struct drm_file *file_priv, + u64 ext_handles, + void *data) +{ + struct drm_v3d_extension __user *user_ext; + int ret; + + user_ext = u64_to_user_ptr(ext_handles); + while (user_ext) { + struct drm_v3d_extension ext; + + if (copy_from_user(&ext, user_ext, sizeof(ext))) { + DRM_DEBUG("Failed to copy submit extension\n"); + return -EFAULT; + } + + switch (ext.id) { + case DRM_V3D_EXT_ID_MULTI_SYNC: + ret = v3d_get_multisync_submit_deps(file_priv, user_ext, data); + if (ret) + return ret; + break; + default: + DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id); + return -EINVAL; + } + + user_ext = u64_to_user_ptr(ext.next); + } + + return 0; +} + +/** + * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D. + * @dev: DRM device + * @data: ioctl argument + * @file_priv: DRM file for this fd + * + * This is the main entrypoint for userspace to submit a 3D frame to + * the GPU. Userspace provides the binner command list (if + * applicable), and the kernel sets up the render command list to draw + * to the framebuffer described in the ioctl, using the command lists + * that the 3D engine's binner will produce. + */ +int +v3d_submit_cl_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_submit_cl *args = data; + struct v3d_submit_ext se = {0}; + struct v3d_bin_job *bin = NULL; + struct v3d_render_job *render = NULL; + struct v3d_job *clean_job = NULL; + struct v3d_job *last_job; + struct ww_acquire_ctx acquire_ctx; + int ret = 0; + + trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end); + + if (args->pad) + return -EINVAL; + + if (args->flags && + args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE | + DRM_V3D_SUBMIT_EXTENSION)) { + DRM_INFO("invalid flags: %d\n", args->flags); + return -EINVAL; + } + + if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { + ret = v3d_get_extensions(file_priv, args->extensions, &se); + if (ret) { + DRM_DEBUG("Failed to get extensions.\n"); + return ret; + } + } + + ret = v3d_job_init(v3d, file_priv, (void *)&render, sizeof(*render), + v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER); + if (ret) + goto fail; + + render->start = args->rcl_start; + render->end = args->rcl_end; + INIT_LIST_HEAD(&render->unref_list); + + if (args->bcl_start != args->bcl_end) { + ret = v3d_job_init(v3d, file_priv, (void *)&bin, sizeof(*bin), + v3d_job_free, args->in_sync_bcl, &se, V3D_BIN); + if (ret) + goto fail; + + bin->start = args->bcl_start; + bin->end = args->bcl_end; + bin->qma = args->qma; + bin->qms = args->qms; + bin->qts = args->qts; + bin->render = render; + } + + if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { + ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job), + v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); + if (ret) + goto fail; + + last_job = clean_job; + } else { + last_job = &render->base; + } + + ret = v3d_lookup_bos(dev, file_priv, last_job, + args->bo_handles, args->bo_handle_count); + if (ret) + goto fail; + + ret = v3d_lock_bo_reservations(last_job, &acquire_ctx); + if (ret) + goto fail; + + if (args->perfmon_id) { + render->base.perfmon = v3d_perfmon_find(v3d_priv, + args->perfmon_id); + + if (!render->base.perfmon) { + ret = -ENOENT; + goto fail_perfmon; + } + } + + mutex_lock(&v3d->sched_lock); + if (bin) { + bin->base.perfmon = render->base.perfmon; + v3d_perfmon_get(bin->base.perfmon); + v3d_push_job(&bin->base); + + ret = drm_sched_job_add_dependency(&render->base.base, + dma_fence_get(bin->base.done_fence)); + if (ret) + goto fail_unreserve; + } + + v3d_push_job(&render->base); + + if (clean_job) { + struct dma_fence *render_fence = + dma_fence_get(render->base.done_fence); + ret = drm_sched_job_add_dependency(&clean_job->base, + render_fence); + if (ret) + goto fail_unreserve; + clean_job->perfmon = render->base.perfmon; + v3d_perfmon_get(clean_job->perfmon); + v3d_push_job(clean_job); + } + + mutex_unlock(&v3d->sched_lock); + + v3d_attach_fences_and_unlock_reservation(file_priv, + last_job, + &acquire_ctx, + args->out_sync, + &se, + last_job->done_fence); + + if (bin) + v3d_job_put(&bin->base); + v3d_job_put(&render->base); + if (clean_job) + v3d_job_put(clean_job); + + return 0; + +fail_unreserve: + mutex_unlock(&v3d->sched_lock); +fail_perfmon: + drm_gem_unlock_reservations(last_job->bo, + last_job->bo_count, &acquire_ctx); +fail: + v3d_job_cleanup((void *)bin); + v3d_job_cleanup((void *)render); + v3d_job_cleanup(clean_job); + v3d_put_multisync_post_deps(&se); + + return ret; +} + +/** + * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D. + * @dev: DRM device + * @data: ioctl argument + * @file_priv: DRM file for this fd + * + * Userspace provides the register setup for the TFU, which we don't + * need to validate since the TFU is behind the MMU. + */ +int +v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct drm_v3d_submit_tfu *args = data; + struct v3d_submit_ext se = {0}; + struct v3d_tfu_job *job = NULL; + struct ww_acquire_ctx acquire_ctx; + int ret = 0; + + trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia); + + if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) { + DRM_DEBUG("invalid flags: %d\n", args->flags); + return -EINVAL; + } + + if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { + ret = v3d_get_extensions(file_priv, args->extensions, &se); + if (ret) { + DRM_DEBUG("Failed to get extensions.\n"); + return ret; + } + } + + ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job), + v3d_job_free, args->in_sync, &se, V3D_TFU); + if (ret) + goto fail; + + job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles), + sizeof(*job->base.bo), GFP_KERNEL); + if (!job->base.bo) { + ret = -ENOMEM; + goto fail; + } + + job->args = *args; + + for (job->base.bo_count = 0; + job->base.bo_count < ARRAY_SIZE(args->bo_handles); + job->base.bo_count++) { + struct drm_gem_object *bo; + + if (!args->bo_handles[job->base.bo_count]) + break; + + bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]); + if (!bo) { + DRM_DEBUG("Failed to look up GEM BO %d: %d\n", + job->base.bo_count, + args->bo_handles[job->base.bo_count]); + ret = -ENOENT; + goto fail; + } + job->base.bo[job->base.bo_count] = bo; + } + + ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx); + if (ret) + goto fail; + + mutex_lock(&v3d->sched_lock); + v3d_push_job(&job->base); + mutex_unlock(&v3d->sched_lock); + + v3d_attach_fences_and_unlock_reservation(file_priv, + &job->base, &acquire_ctx, + args->out_sync, + &se, + job->base.done_fence); + + v3d_job_put(&job->base); + + return 0; + +fail: + v3d_job_cleanup((void *)job); + v3d_put_multisync_post_deps(&se); + + return ret; +} + +/** + * v3d_submit_csd_ioctl() - Submits a CSD (compute shader) job to the V3D. + * @dev: DRM device + * @data: ioctl argument + * @file_priv: DRM file for this fd + * + * Userspace provides the register setup for the CSD, which we don't + * need to validate since the CSD is behind the MMU. + */ +int +v3d_submit_csd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_submit_csd *args = data; + struct v3d_submit_ext se = {0}; + struct v3d_csd_job *job = NULL; + struct v3d_job *clean_job = NULL; + struct ww_acquire_ctx acquire_ctx; + int ret; + + trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]); + + if (args->pad) + return -EINVAL; + + if (!v3d_has_csd(v3d)) { + DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n"); + return -EINVAL; + } + + if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) { + DRM_INFO("invalid flags: %d\n", args->flags); + return -EINVAL; + } + + if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { + ret = v3d_get_extensions(file_priv, args->extensions, &se); + if (ret) { + DRM_DEBUG("Failed to get extensions.\n"); + return ret; + } + } + + ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job), + v3d_job_free, args->in_sync, &se, V3D_CSD); + if (ret) + goto fail; + + ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job), + v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); + if (ret) + goto fail; + + job->args = *args; + + ret = v3d_lookup_bos(dev, file_priv, clean_job, + args->bo_handles, args->bo_handle_count); + if (ret) + goto fail; + + ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx); + if (ret) + goto fail; + + if (args->perfmon_id) { + job->base.perfmon = v3d_perfmon_find(v3d_priv, + args->perfmon_id); + if (!job->base.perfmon) { + ret = -ENOENT; + goto fail_perfmon; + } + } + + mutex_lock(&v3d->sched_lock); + v3d_push_job(&job->base); + + ret = drm_sched_job_add_dependency(&clean_job->base, + dma_fence_get(job->base.done_fence)); + if (ret) + goto fail_unreserve; + + v3d_push_job(clean_job); + mutex_unlock(&v3d->sched_lock); + + v3d_attach_fences_and_unlock_reservation(file_priv, + clean_job, + &acquire_ctx, + args->out_sync, + &se, + clean_job->done_fence); + + v3d_job_put(&job->base); + v3d_job_put(clean_job); + + return 0; + +fail_unreserve: + mutex_unlock(&v3d->sched_lock); +fail_perfmon: + drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count, + &acquire_ctx); +fail: + v3d_job_cleanup((void *)job); + v3d_job_cleanup(clean_job); + v3d_put_multisync_post_deps(&se); + + return ret; +} -- cgit v1.2.3 From 8288faaa8b3817c2fcdbacc720527bb8df2b57b1 Mon Sep 17 00:00:00 2001 From: Melissa Wen Date: Thu, 30 Nov 2023 13:40:27 -0300 Subject: drm/v3d: Simplify job refcount handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of checking if the job is NULL every time we call the function, check it inside the function. Signed-off-by: Melissa Wen Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20231130164420.932823-6-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_submit.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c index f36214002f37..a0caf9c499bb 100644 --- a/drivers/gpu/drm/v3d/v3d_submit.c +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -129,6 +129,9 @@ void v3d_job_cleanup(struct v3d_job *job) void v3d_job_put(struct v3d_job *job) { + if (!job) + return; + kref_put(&job->refcount, job->free); } @@ -517,11 +520,9 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, &se, last_job->done_fence); - if (bin) - v3d_job_put(&bin->base); + v3d_job_put(&bin->base); v3d_job_put(&render->base); - if (clean_job) - v3d_job_put(clean_job); + v3d_job_put(clean_job); return 0; -- cgit v1.2.3 From 6893deb881ab7da1691bd05045ffcc0c806319b9 Mon Sep 17 00:00:00 2001 From: Maíra Canal Date: Thu, 30 Nov 2023 13:40:28 -0300 Subject: drm/v3d: Don't allow two multisync extensions in the same job MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, two multisync extensions can be added to the same job and only the last multisync extension will be used. To avoid this vulnerability, don't allow two multisync extensions in the same job. Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20231130164420.932823-7-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_submit.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c index a0caf9c499bb..10141dc2820a 100644 --- a/drivers/gpu/drm/v3d/v3d_submit.c +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -329,6 +329,11 @@ v3d_get_multisync_submit_deps(struct drm_file *file_priv, struct v3d_submit_ext *se = data; int ret; + if (se->in_sync_count || se->out_sync_count) { + DRM_DEBUG("Two multisync extensions were added to the same job."); + return -EINVAL; + } + if (copy_from_user(&multisync, ext, sizeof(multisync))) return -EFAULT; -- cgit v1.2.3 From 464c61e76de851a216e667c91332172c68ffed54 Mon Sep 17 00:00:00 2001 From: Maíra Canal Date: Thu, 30 Nov 2023 13:40:29 -0300 Subject: drm/v3d: Decouple job allocation from job initiation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want to allow the IOCTLs to allocate the job without initiating it. This will be useful for the CPU job submission IOCTL, as the CPU job has the need to use information from the user extensions. Currently, the user extensions are parsed before the job allocation, making it impossible to fill the CPU job when parsing the user extensions. Therefore, decouple the job allocation from the job initiation. Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20231130164420.932823-8-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_submit.c | 64 +++++++++++++++++++++++++++------------- 1 file changed, 44 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c index 10141dc2820a..148900283c2a 100644 --- a/drivers/gpu/drm/v3d/v3d_submit.c +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -135,23 +135,27 @@ void v3d_job_put(struct v3d_job *job) kref_put(&job->refcount, job->free); } +static int +v3d_job_allocate(void **container, size_t size) +{ + *container = kcalloc(1, size, GFP_KERNEL); + if (!*container) { + DRM_ERROR("Cannot allocate memory for V3D job.\n"); + return -ENOMEM; + } + + return 0; +} + static int v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, - void **container, size_t size, void (*free)(struct kref *ref), + struct v3d_job *job, void (*free)(struct kref *ref), u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue) { struct v3d_file_priv *v3d_priv = file_priv->driver_priv; - struct v3d_job *job; bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC); int ret, i; - *container = kcalloc(1, size, GFP_KERNEL); - if (!*container) { - DRM_ERROR("Cannot allocate memory for v3d job."); - return -ENOMEM; - } - - job = *container; job->v3d = v3d; job->free = free; job->file = file_priv; @@ -159,7 +163,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue], 1, v3d_priv); if (ret) - goto fail; + return ret; if (has_multisync) { if (se->in_sync_count && se->wait_stage == queue) { @@ -194,10 +198,6 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, fail_deps: drm_sched_job_cleanup(&job->base); -fail: - kfree(*container); - *container = NULL; - return ret; } @@ -437,7 +437,11 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, } } - ret = v3d_job_init(v3d, file_priv, (void *)&render, sizeof(*render), + ret = v3d_job_allocate((void *)&render, sizeof(*render)); + if (ret) + return ret; + + ret = v3d_job_init(v3d, file_priv, &render->base, v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER); if (ret) goto fail; @@ -447,7 +451,11 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, INIT_LIST_HEAD(&render->unref_list); if (args->bcl_start != args->bcl_end) { - ret = v3d_job_init(v3d, file_priv, (void *)&bin, sizeof(*bin), + ret = v3d_job_allocate((void *)&bin, sizeof(*bin)); + if (ret) + goto fail; + + ret = v3d_job_init(v3d, file_priv, &bin->base, v3d_job_free, args->in_sync_bcl, &se, V3D_BIN); if (ret) goto fail; @@ -461,7 +469,11 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, } if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { - ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job), + ret = v3d_job_allocate((void *)&clean_job, sizeof(*clean_job)); + if (ret) + goto fail; + + ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); if (ret) goto fail; @@ -580,7 +592,11 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, } } - ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job), + ret = v3d_job_allocate((void *)&job, sizeof(*job)); + if (ret) + return ret; + + ret = v3d_job_init(v3d, file_priv, &job->base, v3d_job_free, args->in_sync, &se, V3D_TFU); if (ret) goto fail; @@ -683,12 +699,20 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, } } - ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job), + ret = v3d_job_allocate((void *)&job, sizeof(*job)); + if (ret) + return ret; + + ret = v3d_job_init(v3d, file_priv, &job->base, v3d_job_free, args->in_sync, &se, V3D_CSD); if (ret) goto fail; - ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job), + ret = v3d_job_allocate((void *)&clean_job, sizeof(*clean_job)); + if (ret) + goto fail; + + ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); if (ret) goto fail; -- cgit v1.2.3 From aafc1a2bea67460c41a289e8bb1e4dc6d016fe11 Mon Sep 17 00:00:00 2001 From: Melissa Wen Date: Thu, 30 Nov 2023 13:40:30 -0300 Subject: drm/v3d: Add a CPU job submission MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Create a new type of job, a CPU job. A CPU job is a type of job that performs operations that requires CPU intervention. The overall idea is to use user extensions to enable different types of CPU job, allowing the CPU job to perform different operations according to the type of user extension. The user extension ID identify the type of CPU job that must be dealt. Having a CPU job is interesting for synchronization purposes as a CPU job has a queue like any other V3D job and can be synchoronized by the multisync extension. Signed-off-by: Melissa Wen Co-developed-by: Maíra Canal Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20231130164420.932823-9-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_drv.c | 4 ++ drivers/gpu/drm/v3d/v3d_drv.h | 16 +++++++- drivers/gpu/drm/v3d/v3d_sched.c | 57 ++++++++++++++++++++++++++ drivers/gpu/drm/v3d/v3d_submit.c | 86 ++++++++++++++++++++++++++++++++++++++++ include/uapi/drm/v3d_drm.h | 17 ++++++++ 5 files changed, 179 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 44a1ca57d6a4..3debf37e7d9b 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -91,6 +91,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT: args->value = 1; return 0; + case DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE: + args->value = 1; + return 0; default: DRM_DEBUG("Unknown parameter %d\n", args->param); return -EINVAL; @@ -189,6 +192,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(V3D_PERFMON_CREATE, v3d_perfmon_create_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(V3D_PERFMON_DESTROY, v3d_perfmon_destroy_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_VALUES, v3d_perfmon_get_values_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CPU, v3d_submit_cpu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), }; static const struct drm_driver v3d_drm_driver = { diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 4db9ace66024..2246a0e29955 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -19,7 +19,7 @@ struct reset_control; #define GMP_GRANULARITY (128 * 1024) -#define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1) +#define V3D_MAX_QUEUES (V3D_CPU + 1) static inline char *v3d_queue_to_string(enum v3d_queue queue) { @@ -29,6 +29,7 @@ static inline char *v3d_queue_to_string(enum v3d_queue queue) case V3D_TFU: return "tfu"; case V3D_CSD: return "csd"; case V3D_CACHE_CLEAN: return "cache_clean"; + case V3D_CPU: return "cpu"; } return "UNKNOWN"; } @@ -122,6 +123,7 @@ struct v3d_dev { struct v3d_render_job *render_job; struct v3d_tfu_job *tfu_job; struct v3d_csd_job *csd_job; + struct v3d_cpu_job *cpu_job; struct v3d_queue_state queue[V3D_MAX_QUEUES]; @@ -312,6 +314,16 @@ struct v3d_csd_job { struct drm_v3d_submit_csd args; }; +enum v3d_cpu_job_type {}; + +struct v3d_cpu_job { + struct v3d_job base; + + enum v3d_cpu_job_type job_type; +}; + +typedef void (*v3d_cpu_job_fn)(struct v3d_cpu_job *); + struct v3d_submit_outsync { struct drm_syncobj *syncobj; }; @@ -414,6 +426,8 @@ int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_submit_csd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int v3d_submit_cpu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* v3d_irq.c */ int v3d_irq_init(struct v3d_dev *v3d); diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index fccbea2a5f2e..c89e92fc614c 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -55,6 +55,12 @@ to_csd_job(struct drm_sched_job *sched_job) return container_of(sched_job, struct v3d_csd_job, base.base); } +static struct v3d_cpu_job * +to_cpu_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct v3d_cpu_job, base.base); +} + static void v3d_sched_job_free(struct drm_sched_job *sched_job) { @@ -262,6 +268,42 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) return fence; } +static const v3d_cpu_job_fn cpu_job_function[] = { }; + +static struct dma_fence * +v3d_cpu_job_run(struct drm_sched_job *sched_job) +{ + struct v3d_cpu_job *job = to_cpu_job(sched_job); + struct v3d_dev *v3d = job->base.v3d; + struct v3d_file_priv *file = job->base.file->driver_priv; + u64 runtime; + + v3d->cpu_job = job; + + if (job->job_type >= ARRAY_SIZE(cpu_job_function)) { + DRM_DEBUG_DRIVER("Unknown CPU job: %d\n", job->job_type); + return NULL; + } + + file->start_ns[V3D_CPU] = local_clock(); + v3d->queue[V3D_CPU].start_ns = file->start_ns[V3D_CPU]; + + cpu_job_function[job->job_type](job); + + runtime = local_clock() - file->start_ns[V3D_CPU]; + + file->enabled_ns[V3D_CPU] += runtime; + v3d->queue[V3D_CPU].enabled_ns += runtime; + + file->jobs_sent[V3D_CPU]++; + v3d->queue[V3D_CPU].jobs_sent++; + + file->start_ns[V3D_CPU] = 0; + v3d->queue[V3D_CPU].start_ns = 0; + + return NULL; +} + static struct dma_fence * v3d_cache_clean_job_run(struct drm_sched_job *sched_job) { @@ -416,6 +458,12 @@ static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = { .free_job = v3d_sched_job_free }; +static const struct drm_sched_backend_ops v3d_cpu_sched_ops = { + .run_job = v3d_cpu_job_run, + .timedout_job = v3d_generic_job_timedout, + .free_job = v3d_sched_job_free +}; + int v3d_sched_init(struct v3d_dev *v3d) { @@ -471,6 +519,15 @@ v3d_sched_init(struct v3d_dev *v3d) goto fail; } + ret = drm_sched_init(&v3d->queue[V3D_CPU].sched, + &v3d_cpu_sched_ops, NULL, + DRM_SCHED_PRIORITY_COUNT, + 1, job_hang_limit, + msecs_to_jiffies(hang_limit_ms), NULL, + NULL, "v3d_cpu", v3d->drm.dev); + if (ret) + goto fail; + return 0; fail: diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c index 148900283c2a..918e28b1d00a 100644 --- a/drivers/gpu/drm/v3d/v3d_submit.c +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -772,3 +772,89 @@ fail: return ret; } + +static const unsigned int cpu_job_bo_handle_count[] = { }; + +/** + * v3d_submit_cpu_ioctl() - Submits a CPU job to the V3D. + * @dev: DRM device + * @data: ioctl argument + * @file_priv: DRM file for this fd + * + * Userspace specifies the CPU job type and data required to perform its + * operations through the drm_v3d_extension struct. + */ +int +v3d_submit_cpu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct drm_v3d_submit_cpu *args = data; + struct v3d_submit_ext se = {0}; + struct v3d_cpu_job *cpu_job = NULL; + struct ww_acquire_ctx acquire_ctx; + int ret; + + if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) { + DRM_INFO("Invalid flags: %d\n", args->flags); + return -EINVAL; + } + + ret = v3d_job_allocate((void *)&cpu_job, sizeof(*cpu_job)); + if (ret) + return ret; + + if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { + ret = v3d_get_extensions(file_priv, args->extensions, &se); + if (ret) { + DRM_DEBUG("Failed to get extensions.\n"); + goto fail; + } + } + + /* Every CPU job must have a CPU job user extension */ + if (!cpu_job->job_type) { + DRM_DEBUG("CPU job must have a CPU job user extension.\n"); + goto fail; + } + + if (args->bo_handle_count != cpu_job_bo_handle_count[cpu_job->job_type]) { + DRM_DEBUG("This CPU job was not submitted with the proper number of BOs.\n"); + goto fail; + } + + ret = v3d_job_init(v3d, file_priv, &cpu_job->base, + v3d_job_free, 0, &se, V3D_CPU); + if (ret) + goto fail; + + if (args->bo_handle_count) { + ret = v3d_lookup_bos(dev, file_priv, &cpu_job->base, + args->bo_handles, args->bo_handle_count); + if (ret) + goto fail; + + ret = v3d_lock_bo_reservations(&cpu_job->base, &acquire_ctx); + if (ret) + goto fail; + } + + mutex_lock(&v3d->sched_lock); + v3d_push_job(&cpu_job->base); + mutex_unlock(&v3d->sched_lock); + + v3d_attach_fences_and_unlock_reservation(file_priv, + &cpu_job->base, + &acquire_ctx, 0, + NULL, cpu_job->base.done_fence); + + v3d_job_put(&cpu_job->base); + + return 0; + +fail: + v3d_job_cleanup((void *)cpu_job); + v3d_put_multisync_post_deps(&se); + + return ret; +} diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index 1a7d7a689de3..00abef9d0db7 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -41,6 +41,7 @@ extern "C" { #define DRM_V3D_PERFMON_CREATE 0x08 #define DRM_V3D_PERFMON_DESTROY 0x09 #define DRM_V3D_PERFMON_GET_VALUES 0x0a +#define DRM_V3D_SUBMIT_CPU 0x0b #define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl) #define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo) @@ -56,6 +57,7 @@ extern "C" { struct drm_v3d_perfmon_destroy) #define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \ struct drm_v3d_perfmon_get_values) +#define DRM_IOCTL_V3D_SUBMIT_CPU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CPU, struct drm_v3d_submit_cpu) #define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01 #define DRM_V3D_SUBMIT_EXTENSION 0x02 @@ -93,6 +95,7 @@ enum v3d_queue { V3D_TFU, V3D_CSD, V3D_CACHE_CLEAN, + V3D_CPU, }; /** @@ -276,6 +279,7 @@ enum drm_v3d_param { DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH, DRM_V3D_PARAM_SUPPORTS_PERFMON, DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT, + DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE, }; struct drm_v3d_get_param { @@ -361,6 +365,19 @@ struct drm_v3d_submit_csd { __u32 pad; }; +struct drm_v3d_submit_cpu { + /* Pointer to a u32 array of the BOs that are referenced by the job. */ + __u64 bo_handles; + + /* Number of BO handles passed in (size is that times 4). */ + __u32 bo_handle_count; + + __u32 flags; + + /* Pointer to an array of ioctl extensions*/ + __u64 extensions; +}; + enum { V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS, V3D_PERFCNT_FEP_VALID_PRIMS, -- cgit v1.2.3 From c5195d001f4c122032a9ce90c6b88d772673fa35 Mon Sep 17 00:00:00 2001 From: Maíra Canal Date: Thu, 30 Nov 2023 13:40:31 -0300 Subject: drm/v3d: Use v3d_get_extensions() to parse CPU job data MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, v3d_get_extensions() only parses multisync data and assigns it to the `struct v3d_submit_ext`. But, to implement the CPU job with user extensions, we want v3d_get_extensions() to be able to parse CPU job data and assign it to the `struct v3d_cpu_job`. Therefore, allow the function v3d_get_extensions() to use `struct v3d_cpu_job *` as a parameter. If the `struct v3d_cpu_job *` is assigned to NULL, it means that the job is a GPU job and CPU job extensions should be rejected. Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20231130164420.932823-10-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_submit.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c index 918e28b1d00a..124935547f17 100644 --- a/drivers/gpu/drm/v3d/v3d_submit.c +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -323,10 +323,9 @@ fail: static int v3d_get_multisync_submit_deps(struct drm_file *file_priv, struct drm_v3d_extension __user *ext, - void *data) + struct v3d_submit_ext *se) { struct drm_v3d_multi_sync multisync; - struct v3d_submit_ext *se = data; int ret; if (se->in_sync_count || se->out_sync_count) { @@ -340,7 +339,7 @@ v3d_get_multisync_submit_deps(struct drm_file *file_priv, if (multisync.pad) return -EINVAL; - ret = v3d_get_multisync_post_deps(file_priv, data, multisync.out_sync_count, + ret = v3d_get_multisync_post_deps(file_priv, se, multisync.out_sync_count, multisync.out_syncs); if (ret) return ret; @@ -359,7 +358,8 @@ v3d_get_multisync_submit_deps(struct drm_file *file_priv, static int v3d_get_extensions(struct drm_file *file_priv, u64 ext_handles, - void *data) + struct v3d_submit_ext *se, + struct v3d_cpu_job *job) { struct drm_v3d_extension __user *user_ext; int ret; @@ -375,15 +375,16 @@ v3d_get_extensions(struct drm_file *file_priv, switch (ext.id) { case DRM_V3D_EXT_ID_MULTI_SYNC: - ret = v3d_get_multisync_submit_deps(file_priv, user_ext, data); - if (ret) - return ret; + ret = v3d_get_multisync_submit_deps(file_priv, user_ext, se); break; default: DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id); return -EINVAL; } + if (ret) + return ret; + user_ext = u64_to_user_ptr(ext.next); } @@ -430,7 +431,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, } if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { - ret = v3d_get_extensions(file_priv, args->extensions, &se); + ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL); if (ret) { DRM_DEBUG("Failed to get extensions.\n"); return ret; @@ -585,7 +586,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, } if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { - ret = v3d_get_extensions(file_priv, args->extensions, &se); + ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL); if (ret) { DRM_DEBUG("Failed to get extensions.\n"); return ret; @@ -692,7 +693,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, } if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { - ret = v3d_get_extensions(file_priv, args->extensions, &se); + ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL); if (ret) { DRM_DEBUG("Failed to get extensions.\n"); return ret; @@ -805,7 +806,7 @@ v3d_submit_cpu_ioctl(struct drm_device *dev, void *data, return ret; if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { - ret = v3d_get_extensions(file_priv, args->extensions, &se); + ret = v3d_get_extensions(file_priv, args->extensions, &se, cpu_job); if (ret) { DRM_DEBUG("Failed to get extensions.\n"); goto fail; -- cgit v1.2.3 From 1fe0879efc8f623816c7a825d853d2140c88cb2d Mon Sep 17 00:00:00 2001 From: Maíra Canal Date: Thu, 30 Nov 2023 13:40:32 -0300 Subject: drm/v3d: Create tracepoints to track the CPU job MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Create tracepoints to track the three major events of a CPU job lifetime: 1. Submission of a `v3d_submit_cpu` IOCTL 2. Beginning of the execution of a CPU job 3. Ending of the execution of a CPU job Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20231130164420.932823-11-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_sched.c | 4 +++ drivers/gpu/drm/v3d/v3d_submit.c | 2 ++ drivers/gpu/drm/v3d/v3d_trace.h | 57 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 63 insertions(+) diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index c89e92fc614c..ebbd00840a73 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -288,8 +288,12 @@ v3d_cpu_job_run(struct drm_sched_job *sched_job) file->start_ns[V3D_CPU] = local_clock(); v3d->queue[V3D_CPU].start_ns = file->start_ns[V3D_CPU]; + trace_v3d_cpu_job_begin(&v3d->drm, job->job_type); + cpu_job_function[job->job_type](job); + trace_v3d_cpu_job_end(&v3d->drm, job->job_type); + runtime = local_clock() - file->start_ns[V3D_CPU]; file->enabled_ns[V3D_CPU] += runtime; diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c index 124935547f17..c134b113b181 100644 --- a/drivers/gpu/drm/v3d/v3d_submit.c +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -824,6 +824,8 @@ v3d_submit_cpu_ioctl(struct drm_device *dev, void *data, goto fail; } + trace_v3d_submit_cpu_ioctl(&v3d->drm, cpu_job->job_type); + ret = v3d_job_init(v3d, file_priv, &cpu_job->base, v3d_job_free, 0, &se, V3D_CPU); if (ret) diff --git a/drivers/gpu/drm/v3d/v3d_trace.h b/drivers/gpu/drm/v3d/v3d_trace.h index 7aa8dc356e54..5917b94148f5 100644 --- a/drivers/gpu/drm/v3d/v3d_trace.h +++ b/drivers/gpu/drm/v3d/v3d_trace.h @@ -225,6 +225,63 @@ TRACE_EVENT(v3d_submit_csd, __entry->seqno) ); +TRACE_EVENT(v3d_submit_cpu_ioctl, + TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type), + TP_ARGS(dev, job_type), + + TP_STRUCT__entry( + __field(u32, dev) + __field(enum v3d_cpu_job_type, job_type) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->job_type = job_type; + ), + + TP_printk("dev=%u, job_type=%d", + __entry->dev, + __entry->job_type) +); + +TRACE_EVENT(v3d_cpu_job_begin, + TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type), + TP_ARGS(dev, job_type), + + TP_STRUCT__entry( + __field(u32, dev) + __field(enum v3d_cpu_job_type, job_type) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->job_type = job_type; + ), + + TP_printk("dev=%u, job_type=%d", + __entry->dev, + __entry->job_type) +); + +TRACE_EVENT(v3d_cpu_job_end, + TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type), + TP_ARGS(dev, job_type), + + TP_STRUCT__entry( + __field(u32, dev) + __field(enum v3d_cpu_job_type, job_type) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->job_type = job_type; + ), + + TP_printk("dev=%u, job_type=%d", + __entry->dev, + __entry->job_type) +); + TRACE_EVENT(v3d_cache_clean_begin, TP_PROTO(struct drm_device *dev), TP_ARGS(dev), -- cgit v1.2.3 From 369b05961731925e4a43608ea1e7884df200f0bd Mon Sep 17 00:00:00 2001 From: Melissa Wen Date: Thu, 30 Nov 2023 13:40:33 -0300 Subject: drm/v3d: Detach the CSD job BO setup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Detach CSD job setup from CSD submission ioctl to reuse it in CPU submission ioctl for indirect CSD job. Signed-off-by: Melissa Wen Co-developed-by: Maíra Canal Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20231130164420.932823-12-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_submit.c | 68 +++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c index c134b113b181..eb26fe1e27e3 100644 --- a/drivers/gpu/drm/v3d/v3d_submit.c +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -256,6 +256,45 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, } } +static int +v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv, + struct v3d_dev *v3d, + struct drm_v3d_submit_csd *args, + struct v3d_csd_job **job, + struct v3d_job **clean_job, + struct v3d_submit_ext *se, + struct ww_acquire_ctx *acquire_ctx) +{ + int ret; + + ret = v3d_job_allocate((void *)job, sizeof(**job)); + if (ret) + return ret; + + ret = v3d_job_init(v3d, file_priv, &(*job)->base, + v3d_job_free, args->in_sync, se, V3D_CSD); + if (ret) + return ret; + + ret = v3d_job_allocate((void *)clean_job, sizeof(**clean_job)); + if (ret) + return ret; + + ret = v3d_job_init(v3d, file_priv, *clean_job, + v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); + if (ret) + return ret; + + (*job)->args = *args; + + ret = v3d_lookup_bos(&v3d->drm, file_priv, *clean_job, + args->bo_handles, args->bo_handle_count); + if (ret) + return ret; + + return v3d_lock_bo_reservations(*clean_job, acquire_ctx); +} + static void v3d_put_multisync_post_deps(struct v3d_submit_ext *se) { @@ -700,32 +739,9 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, } } - ret = v3d_job_allocate((void *)&job, sizeof(*job)); - if (ret) - return ret; - - ret = v3d_job_init(v3d, file_priv, &job->base, - v3d_job_free, args->in_sync, &se, V3D_CSD); - if (ret) - goto fail; - - ret = v3d_job_allocate((void *)&clean_job, sizeof(*clean_job)); - if (ret) - goto fail; - - ret = v3d_job_init(v3d, file_priv, clean_job, - v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); - if (ret) - goto fail; - - job->args = *args; - - ret = v3d_lookup_bos(dev, file_priv, clean_job, - args->bo_handles, args->bo_handle_count); - if (ret) - goto fail; - - ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx); + ret = v3d_setup_csd_jobs_and_bos(file_priv, v3d, args, + &job, &clean_job, &se, + &acquire_ctx); if (ret) goto fail; -- cgit v1.2.3 From 7c13132c4073628b5fe23b5188ac583a2882a6b0 Mon Sep 17 00:00:00 2001 From: Maíra Canal Date: Thu, 30 Nov 2023 13:40:34 -0300 Subject: drm/v3d: Enable BO mapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For the indirect CSD CPU job, we will need to access the internal contents of the BO with the dispatch parameters. Therefore, create methods to allow the mapping and unmapping of the BO. Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20231130164420.932823-13-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_bo.c | 18 ++++++++++++++++++ drivers/gpu/drm/v3d/v3d_drv.h | 4 ++++ 2 files changed, 22 insertions(+) diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index 357a0da7e16a..1bdfac8beafd 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -33,6 +33,9 @@ void v3d_free_object(struct drm_gem_object *obj) struct v3d_dev *v3d = to_v3d_dev(obj->dev); struct v3d_bo *bo = to_v3d_bo(obj); + if (bo->vaddr) + v3d_put_bo_vaddr(bo); + v3d_mmu_remove_ptes(bo); mutex_lock(&v3d->bo_lock); @@ -134,6 +137,7 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, if (IS_ERR(shmem_obj)) return ERR_CAST(shmem_obj); bo = to_v3d_bo(&shmem_obj->base); + bo->vaddr = NULL; ret = v3d_bo_create_finish(&shmem_obj->base); if (ret) @@ -167,6 +171,20 @@ v3d_prime_import_sg_table(struct drm_device *dev, return obj; } +void v3d_get_bo_vaddr(struct v3d_bo *bo) +{ + struct drm_gem_shmem_object *obj = &bo->base; + + bo->vaddr = vmap(obj->pages, obj->base.size >> PAGE_SHIFT, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); +} + +void v3d_put_bo_vaddr(struct v3d_bo *bo) +{ + vunmap(bo->vaddr); + bo->vaddr = NULL; +} + int v3d_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 2246a0e29955..39d62915cdd6 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -202,6 +202,8 @@ struct v3d_bo { * v3d_render_job->unref_list */ struct list_head unref_head; + + void *vaddr; }; static inline struct v3d_bo * @@ -391,6 +393,8 @@ struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size); void v3d_free_object(struct drm_gem_object *gem_obj); struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, size_t size); +void v3d_get_bo_vaddr(struct v3d_bo *bo); +void v3d_put_bo_vaddr(struct v3d_bo *bo); int v3d_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, -- cgit v1.2.3 From 18b8413b25b7070fa2e55858a2c808e6909581d0 Mon Sep 17 00:00:00 2001 From: Maíra Canal Date: Thu, 30 Nov 2023 13:40:35 -0300 Subject: drm/v3d: Create a CPU job extension for a indirect CSD job MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A CPU job is a type of job that performs operations that requires CPU intervention. An indirect CSD job is a job that, when executed in the queue, will map the indirect buffer, read the dispatch parameters, and submit a regular dispatch. Therefore, it is a job that needs CPU intervention. So, create a user extension for the CPU job that enables the creation of an indirect CSD. This user extension will allow the creation of a CSD job linked to a CPU job. The CPU job will wait for the indirect CSD job dependencies and, once they are signaled, it will update the CSD job parameters. Co-developed-by: Melissa Wen Signed-off-by: Melissa Wen Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20231130164420.932823-14-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_drv.h | 31 +++++++++++- drivers/gpu/drm/v3d/v3d_sched.c | 41 ++++++++++++++- drivers/gpu/drm/v3d/v3d_submit.c | 104 ++++++++++++++++++++++++++++++++++++++- include/uapi/drm/v3d_drm.h | 43 +++++++++++++++- 4 files changed, 213 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 39d62915cdd6..202c0d4b04a5 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -316,12 +316,41 @@ struct v3d_csd_job { struct drm_v3d_submit_csd args; }; -enum v3d_cpu_job_type {}; +enum v3d_cpu_job_type { + V3D_CPU_JOB_TYPE_INDIRECT_CSD = 1, +}; + +struct v3d_indirect_csd_info { + /* Indirect CSD */ + struct v3d_csd_job *job; + + /* Clean cache job associated to the Indirect CSD job */ + struct v3d_job *clean_job; + + /* Offset within the BO where the workgroup counts are stored */ + u32 offset; + + /* Workgroups size */ + u32 wg_size; + + /* Indices of the uniforms with the workgroup dispatch counts + * in the uniform stream. + */ + u32 wg_uniform_offsets[3]; + + /* Indirect BO */ + struct drm_gem_object *indirect; + + /* Context of the Indirect CSD job */ + struct ww_acquire_ctx acquire_ctx; +}; struct v3d_cpu_job { struct v3d_job base; enum v3d_cpu_job_type job_type; + + struct v3d_indirect_csd_info indirect_csd; }; typedef void (*v3d_cpu_job_fn)(struct v3d_cpu_job *); diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index ebbd00840a73..307257ab0e4a 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -25,6 +25,8 @@ #include "v3d_regs.h" #include "v3d_trace.h" +#define V3D_CSD_CFG012_WG_COUNT_SHIFT 16 + static struct v3d_job * to_v3d_job(struct drm_sched_job *sched_job) { @@ -268,7 +270,44 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) return fence; } -static const v3d_cpu_job_fn cpu_job_function[] = { }; +static void +v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job) +{ + struct v3d_indirect_csd_info *indirect_csd = &job->indirect_csd; + struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); + struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect); + struct drm_v3d_submit_csd *args = &indirect_csd->job->args; + u32 *wg_counts; + + v3d_get_bo_vaddr(bo); + v3d_get_bo_vaddr(indirect); + + wg_counts = (uint32_t *)(bo->vaddr + indirect_csd->offset); + + if (wg_counts[0] == 0 || wg_counts[1] == 0 || wg_counts[2] == 0) + return; + + args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT; + args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT; + args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT; + args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) * + (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1; + + for (int i = 0; i < 3; i++) { + /* 0xffffffff indicates that the uniform rewrite is not needed */ + if (indirect_csd->wg_uniform_offsets[i] != 0xffffffff) { + u32 uniform_idx = indirect_csd->wg_uniform_offsets[i]; + ((uint32_t *)indirect->vaddr)[uniform_idx] = wg_counts[i]; + } + } + + v3d_put_bo_vaddr(indirect); + v3d_put_bo_vaddr(bo); +} + +static const v3d_cpu_job_fn cpu_job_function[] = { + [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = v3d_rewrite_csd_job_wg_counts_from_indirect, +}; static struct dma_fence * v3d_cpu_job_run(struct drm_sched_job *sched_job) diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c index eb26fe1e27e3..0320695b941b 100644 --- a/drivers/gpu/drm/v3d/v3d_submit.c +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -391,6 +391,48 @@ v3d_get_multisync_submit_deps(struct drm_file *file_priv, return 0; } +/* Get data for the indirect CSD job submission. */ +static int +v3d_get_cpu_indirect_csd_params(struct drm_file *file_priv, + struct drm_v3d_extension __user *ext, + struct v3d_cpu_job *job) +{ + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct v3d_dev *v3d = v3d_priv->v3d; + struct drm_v3d_indirect_csd indirect_csd; + struct v3d_indirect_csd_info *info = &job->indirect_csd; + + if (!job) { + DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); + return -EINVAL; + } + + if (job->job_type) { + DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n"); + return -EINVAL; + } + + if (copy_from_user(&indirect_csd, ext, sizeof(indirect_csd))) + return -EFAULT; + + if (!v3d_has_csd(v3d)) { + DRM_DEBUG("Attempting CSD submit on non-CSD hardware.\n"); + return -EINVAL; + } + + job->job_type = V3D_CPU_JOB_TYPE_INDIRECT_CSD; + info->offset = indirect_csd.offset; + info->wg_size = indirect_csd.wg_size; + memcpy(&info->wg_uniform_offsets, &indirect_csd.wg_uniform_offsets, + sizeof(indirect_csd.wg_uniform_offsets)); + + info->indirect = drm_gem_object_lookup(file_priv, indirect_csd.indirect); + + return v3d_setup_csd_jobs_and_bos(file_priv, v3d, &indirect_csd.submit, + &info->job, &info->clean_job, + NULL, &info->acquire_ctx); +} + /* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data * according to the extension id (name). */ @@ -416,6 +458,9 @@ v3d_get_extensions(struct drm_file *file_priv, case DRM_V3D_EXT_ID_MULTI_SYNC: ret = v3d_get_multisync_submit_deps(file_priv, user_ext, se); break; + case DRM_V3D_EXT_ID_CPU_INDIRECT_CSD: + ret = v3d_get_cpu_indirect_csd_params(file_priv, user_ext, job); + break; default: DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id); return -EINVAL; @@ -790,7 +835,9 @@ fail: return ret; } -static const unsigned int cpu_job_bo_handle_count[] = { }; +static const unsigned int cpu_job_bo_handle_count[] = { + [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = 1, +}; /** * v3d_submit_cpu_ioctl() - Submits a CPU job to the V3D. @@ -808,7 +855,10 @@ v3d_submit_cpu_ioctl(struct drm_device *dev, void *data, struct v3d_dev *v3d = to_v3d_dev(dev); struct drm_v3d_submit_cpu *args = data; struct v3d_submit_ext se = {0}; + struct v3d_submit_ext *out_se = NULL; struct v3d_cpu_job *cpu_job = NULL; + struct v3d_csd_job *csd_job = NULL; + struct v3d_job *clean_job = NULL; struct ww_acquire_ctx acquire_ctx; int ret; @@ -847,6 +897,9 @@ v3d_submit_cpu_ioctl(struct drm_device *dev, void *data, if (ret) goto fail; + clean_job = cpu_job->indirect_csd.clean_job; + csd_job = cpu_job->indirect_csd.job; + if (args->bo_handle_count) { ret = v3d_lookup_bos(dev, file_priv, &cpu_job->base, args->bo_handles, args->bo_handle_count); @@ -860,19 +913,66 @@ v3d_submit_cpu_ioctl(struct drm_device *dev, void *data, mutex_lock(&v3d->sched_lock); v3d_push_job(&cpu_job->base); + + switch (cpu_job->job_type) { + case V3D_CPU_JOB_TYPE_INDIRECT_CSD: + ret = drm_sched_job_add_dependency(&csd_job->base.base, + dma_fence_get(cpu_job->base.done_fence)); + if (ret) + goto fail_unreserve; + + v3d_push_job(&csd_job->base); + + ret = drm_sched_job_add_dependency(&clean_job->base, + dma_fence_get(csd_job->base.done_fence)); + if (ret) + goto fail_unreserve; + + v3d_push_job(clean_job); + + break; + default: + break; + } mutex_unlock(&v3d->sched_lock); + out_se = (cpu_job->job_type == V3D_CPU_JOB_TYPE_INDIRECT_CSD) ? NULL : &se; + v3d_attach_fences_and_unlock_reservation(file_priv, &cpu_job->base, &acquire_ctx, 0, - NULL, cpu_job->base.done_fence); + out_se, cpu_job->base.done_fence); + + switch (cpu_job->job_type) { + case V3D_CPU_JOB_TYPE_INDIRECT_CSD: + v3d_attach_fences_and_unlock_reservation(file_priv, + clean_job, + &cpu_job->indirect_csd.acquire_ctx, + 0, &se, clean_job->done_fence); + break; + default: + break; + } v3d_job_put(&cpu_job->base); + v3d_job_put(&csd_job->base); + v3d_job_put(clean_job); return 0; +fail_unreserve: + mutex_unlock(&v3d->sched_lock); + + drm_gem_unlock_reservations(cpu_job->base.bo, cpu_job->base.bo_count, + &acquire_ctx); + + drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count, + &cpu_job->indirect_csd.acquire_ctx); + fail: v3d_job_cleanup((void *)cpu_job); + v3d_job_cleanup((void *)csd_job); + v3d_job_cleanup(clean_job); v3d_put_multisync_post_deps(&se); return ret; diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index 00abef9d0db7..0c0f47782528 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -71,7 +71,8 @@ extern "C" { struct drm_v3d_extension { __u64 next; __u32 id; -#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01 +#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01 +#define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD 0x02 __u32 flags; /* mbz */ }; @@ -365,8 +366,46 @@ struct drm_v3d_submit_csd { __u32 pad; }; +/** + * struct drm_v3d_indirect_csd - ioctl extension for the CPU job to create an + * indirect CSD + * + * When an extension of DRM_V3D_EXT_ID_CPU_INDIRECT_CSD id is defined, it + * points to this extension to define a indirect CSD submission. It creates a + * CPU job linked to a CSD job. The CPU job waits for the indirect CSD + * dependencies and, once they are signaled, it updates the CSD job config + * before allowing the CSD job execution. + */ +struct drm_v3d_indirect_csd { + struct drm_v3d_extension base; + + /* Indirect CSD */ + struct drm_v3d_submit_csd submit; + + /* Handle of the indirect BO, that should be also attached to the + * indirect CSD. + */ + __u32 indirect; + + /* Offset within the BO where the workgroup counts are stored */ + __u32 offset; + + /* Workgroups size */ + __u32 wg_size; + + /* Indices of the uniforms with the workgroup dispatch counts + * in the uniform stream. If the uniform rewrite is not needed, + * the offset must be 0xffffffff. + */ + __u32 wg_uniform_offsets[3]; +}; + struct drm_v3d_submit_cpu { - /* Pointer to a u32 array of the BOs that are referenced by the job. */ + /* Pointer to a u32 array of the BOs that are referenced by the job. + * + * For DRM_V3D_EXT_ID_CPU_INDIRECT_CSD, it must contain only one BO, + * that contains the workgroup counts. + */ __u64 bo_handles; /* Number of BO handles passed in (size is that times 4). */ -- cgit v1.2.3 From 9ba0ff3e083f6a4a0b6698f06bfff74805fefa5f Mon Sep 17 00:00:00 2001 From: Maíra Canal Date: Thu, 30 Nov 2023 13:40:36 -0300 Subject: drm/v3d: Create a CPU job extension for the timestamp query job MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A CPU job is a type of job that performs operations that requires CPU intervention. A timestamp query job is a job that calculates the query timestamp and updates the query availability by signaling a syncobj. As V3D doesn't provide any mechanism to obtain a timestamp from the GPU, it is a job that needs CPU intervention. So, create a user extension for the CPU job that enables the creation of a timestamp query job. This user extension will allow the creation of a CPU job that performs the timestamp query calculation and updates the timestamp BO with the proper value. Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20231130164420.932823-15-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_drv.h | 17 +++++++++++ drivers/gpu/drm/v3d/v3d_sched.c | 40 ++++++++++++++++++++++++- drivers/gpu/drm/v3d/v3d_submit.c | 63 ++++++++++++++++++++++++++++++++++++++++ include/uapi/drm/v3d_drm.h | 30 +++++++++++++++++++ 4 files changed, 149 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 202c0d4b04a5..dd86e745c260 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -318,6 +318,15 @@ struct v3d_csd_job { enum v3d_cpu_job_type { V3D_CPU_JOB_TYPE_INDIRECT_CSD = 1, + V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY, +}; + +struct v3d_timestamp_query { + /* Offset of this query in the timestamp BO for its value. */ + u32 offset; + + /* Syncobj that indicates the timestamp availability */ + struct drm_syncobj *syncobj; }; struct v3d_indirect_csd_info { @@ -345,12 +354,20 @@ struct v3d_indirect_csd_info { struct ww_acquire_ctx acquire_ctx; }; +struct v3d_timestamp_query_info { + struct v3d_timestamp_query *queries; + + u32 count; +}; + struct v3d_cpu_job { struct v3d_job base; enum v3d_cpu_job_type job_type; struct v3d_indirect_csd_info indirect_csd; + + struct v3d_timestamp_query_info timestamp_query; }; typedef void (*v3d_cpu_job_fn)(struct v3d_cpu_job *); diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 307257ab0e4a..5e692f087105 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -21,6 +21,8 @@ #include #include +#include + #include "v3d_drv.h" #include "v3d_regs.h" #include "v3d_trace.h" @@ -71,6 +73,21 @@ v3d_sched_job_free(struct drm_sched_job *sched_job) v3d_job_cleanup(job); } +static void +v3d_cpu_job_free(struct drm_sched_job *sched_job) +{ + struct v3d_cpu_job *job = to_cpu_job(sched_job); + struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query; + + if (timestamp_query->queries) { + for (int i = 0; i < timestamp_query->count; i++) + drm_syncobj_put(timestamp_query->queries[i].syncobj); + kvfree(timestamp_query->queries); + } + + v3d_job_cleanup(&job->base); +} + static void v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) { @@ -305,8 +322,29 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job) v3d_put_bo_vaddr(bo); } +static void +v3d_timestamp_query(struct v3d_cpu_job *job) +{ + struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query; + struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); + u8 *value_addr; + + v3d_get_bo_vaddr(bo); + + for (int i = 0; i < timestamp_query->count; i++) { + value_addr = ((u8 *)bo->vaddr) + timestamp_query->queries[i].offset; + *((u64 *)value_addr) = i == 0 ? ktime_get_ns() : 0ull; + + drm_syncobj_replace_fence(timestamp_query->queries[i].syncobj, + job->base.done_fence); + } + + v3d_put_bo_vaddr(bo); +} + static const v3d_cpu_job_fn cpu_job_function[] = { [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = v3d_rewrite_csd_job_wg_counts_from_indirect, + [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = v3d_timestamp_query, }; static struct dma_fence * @@ -504,7 +542,7 @@ static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = { static const struct drm_sched_backend_ops v3d_cpu_sched_ops = { .run_job = v3d_cpu_job_run, .timedout_job = v3d_generic_job_timedout, - .free_job = v3d_sched_job_free + .free_job = v3d_cpu_job_free }; int diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c index 0320695b941b..83e029e786ea 100644 --- a/drivers/gpu/drm/v3d/v3d_submit.c +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -433,6 +433,64 @@ v3d_get_cpu_indirect_csd_params(struct drm_file *file_priv, NULL, &info->acquire_ctx); } +/* Get data for the query timestamp job submission. */ +static int +v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv, + struct drm_v3d_extension __user *ext, + struct v3d_cpu_job *job) +{ + u32 __user *offsets, *syncs; + struct drm_v3d_timestamp_query timestamp; + + if (!job) { + DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); + return -EINVAL; + } + + if (job->job_type) { + DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n"); + return -EINVAL; + } + + if (copy_from_user(×tamp, ext, sizeof(timestamp))) + return -EFAULT; + + if (timestamp.pad) + return -EINVAL; + + job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY; + + job->timestamp_query.queries = kvmalloc_array(timestamp.count, + sizeof(struct v3d_timestamp_query), + GFP_KERNEL); + if (!job->timestamp_query.queries) + return -ENOMEM; + + offsets = u64_to_user_ptr(timestamp.offsets); + syncs = u64_to_user_ptr(timestamp.syncs); + + for (int i = 0; i < timestamp.count; i++) { + u32 offset, sync; + + if (copy_from_user(&offset, offsets++, sizeof(offset))) { + kvfree(job->timestamp_query.queries); + return -EFAULT; + } + + job->timestamp_query.queries[i].offset = offset; + + if (copy_from_user(&sync, syncs++, sizeof(sync))) { + kvfree(job->timestamp_query.queries); + return -EFAULT; + } + + job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); + } + job->timestamp_query.count = timestamp.count; + + return 0; +} + /* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data * according to the extension id (name). */ @@ -461,6 +519,9 @@ v3d_get_extensions(struct drm_file *file_priv, case DRM_V3D_EXT_ID_CPU_INDIRECT_CSD: ret = v3d_get_cpu_indirect_csd_params(file_priv, user_ext, job); break; + case DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY: + ret = v3d_get_cpu_timestamp_query_params(file_priv, user_ext, job); + break; default: DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id); return -EINVAL; @@ -837,6 +898,7 @@ fail: static const unsigned int cpu_job_bo_handle_count[] = { [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = 1, + [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = 1, }; /** @@ -974,6 +1036,7 @@ fail: v3d_job_cleanup((void *)csd_job); v3d_job_cleanup(clean_job); v3d_put_multisync_post_deps(&se); + kvfree(cpu_job->timestamp_query.queries); return ret; } diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index 0c0f47782528..239801b5e117 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -73,6 +73,7 @@ struct drm_v3d_extension { __u32 id; #define DRM_V3D_EXT_ID_MULTI_SYNC 0x01 #define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD 0x02 +#define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY 0x03 __u32 flags; /* mbz */ }; @@ -400,11 +401,40 @@ struct drm_v3d_indirect_csd { __u32 wg_uniform_offsets[3]; }; +/** + * struct drm_v3d_timestamp_query - ioctl extension for the CPU job to calculate + * a timestamp query + * + * When an extension DRM_V3D_EXT_ID_TIMESTAMP_QUERY is defined, it points to + * this extension to define a timestamp query submission. This CPU job will + * calculate the timestamp query and update the query value within the + * timestamp BO. Moreover, it will signal the timestamp syncobj to indicate + * query availability. + */ +struct drm_v3d_timestamp_query { + struct drm_v3d_extension base; + + /* Array of queries' offsets within the timestamp BO for their value */ + __u64 offsets; + + /* Array of timestamp's syncobjs to indicate its availability */ + __u64 syncs; + + /* Number of queries */ + __u32 count; + + /* mbz */ + __u32 pad; +}; + struct drm_v3d_submit_cpu { /* Pointer to a u32 array of the BOs that are referenced by the job. * * For DRM_V3D_EXT_ID_CPU_INDIRECT_CSD, it must contain only one BO, * that contains the workgroup counts. + * + * For DRM_V3D_EXT_ID_TIMESTAMP_QUERY, it must contain only one BO, + * that will contain the timestamp. */ __u64 bo_handles; -- cgit v1.2.3 From 34a101e64296c736b14ce27e647fcebd70cb7bf8 Mon Sep 17 00:00:00 2001 From: Maíra Canal Date: Thu, 30 Nov 2023 13:40:37 -0300 Subject: drm/v3d: Create a CPU job extension for the reset timestamp job MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A CPU job is a type of job that performs operations that requires CPU intervention. A reset timestamp job is a job that resets the timestamp queries based on the value offset of the first query. As V3D doesn't provide any mechanism to obtain a timestamp from the GPU, it is a job that needs CPU intervention. So, create a user extension for the CPU job that enables the creation of a reset timestamp job. This user extension will allow the creation of a CPU job that resets the timestamp value in the timestamp BO and resets the availability syncobj. Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20231130164420.932823-16-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_drv.h | 1 + drivers/gpu/drm/v3d/v3d_sched.c | 21 ++++++++++++++++ drivers/gpu/drm/v3d/v3d_submit.c | 52 ++++++++++++++++++++++++++++++++++++++++ include/uapi/drm/v3d_drm.h | 27 +++++++++++++++++++++ 4 files changed, 101 insertions(+) diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index dd86e745c260..3988407635ed 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -319,6 +319,7 @@ struct v3d_csd_job { enum v3d_cpu_job_type { V3D_CPU_JOB_TYPE_INDIRECT_CSD = 1, V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY, + V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY, }; struct v3d_timestamp_query { diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 5e692f087105..e287f42d3621 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -342,9 +342,30 @@ v3d_timestamp_query(struct v3d_cpu_job *job) v3d_put_bo_vaddr(bo); } +static void +v3d_reset_timestamp_queries(struct v3d_cpu_job *job) +{ + struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query; + struct v3d_timestamp_query *queries = timestamp_query->queries; + struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); + u8 *value_addr; + + v3d_get_bo_vaddr(bo); + + for (int i = 0; i < timestamp_query->count; i++) { + value_addr = ((u8 *)bo->vaddr) + queries[i].offset; + *((u64 *)value_addr) = 0; + + drm_syncobj_replace_fence(queries[i].syncobj, NULL); + } + + v3d_put_bo_vaddr(bo); +} + static const v3d_cpu_job_fn cpu_job_function[] = { [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = v3d_rewrite_csd_job_wg_counts_from_indirect, [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = v3d_timestamp_query, + [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = v3d_reset_timestamp_queries, }; static struct dma_fence * diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c index 83e029e786ea..1c719416e26a 100644 --- a/drivers/gpu/drm/v3d/v3d_submit.c +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -491,6 +491,54 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv, return 0; } +static int +v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv, + struct drm_v3d_extension __user *ext, + struct v3d_cpu_job *job) +{ + u32 __user *syncs; + struct drm_v3d_reset_timestamp_query reset; + + if (!job) { + DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); + return -EINVAL; + } + + if (job->job_type) { + DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n"); + return -EINVAL; + } + + if (copy_from_user(&reset, ext, sizeof(reset))) + return -EFAULT; + + job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY; + + job->timestamp_query.queries = kvmalloc_array(reset.count, + sizeof(struct v3d_timestamp_query), + GFP_KERNEL); + if (!job->timestamp_query.queries) + return -ENOMEM; + + syncs = u64_to_user_ptr(reset.syncs); + + for (int i = 0; i < reset.count; i++) { + u32 sync; + + job->timestamp_query.queries[i].offset = reset.offset + 8 * i; + + if (copy_from_user(&sync, syncs++, sizeof(sync))) { + kvfree(job->timestamp_query.queries); + return -EFAULT; + } + + job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); + } + job->timestamp_query.count = reset.count; + + return 0; +} + /* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data * according to the extension id (name). */ @@ -522,6 +570,9 @@ v3d_get_extensions(struct drm_file *file_priv, case DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY: ret = v3d_get_cpu_timestamp_query_params(file_priv, user_ext, job); break; + case DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY: + ret = v3d_get_cpu_reset_timestamp_params(file_priv, user_ext, job); + break; default: DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id); return -EINVAL; @@ -899,6 +950,7 @@ fail: static const unsigned int cpu_job_bo_handle_count[] = { [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = 1, [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = 1, + [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = 1, }; /** diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index 239801b5e117..930f8d07f088 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -74,6 +74,7 @@ struct drm_v3d_extension { #define DRM_V3D_EXT_ID_MULTI_SYNC 0x01 #define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD 0x02 #define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY 0x03 +#define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY 0x04 __u32 flags; /* mbz */ }; @@ -427,6 +428,29 @@ struct drm_v3d_timestamp_query { __u32 pad; }; +/** + * struct drm_v3d_reset_timestamp_query - ioctl extension for the CPU job to + * reset timestamp queries + * + * When an extension DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY is defined, it + * points to this extension to define a reset timestamp submission. This CPU + * job will reset the timestamp queries based on value offset of the first + * query. Moreover, it will reset the timestamp syncobj to reset query + * availability. + */ +struct drm_v3d_reset_timestamp_query { + struct drm_v3d_extension base; + + /* Array of timestamp's syncobjs to indicate its availability */ + __u64 syncs; + + /* Offset of the first query within the timestamp BO for its value */ + __u32 offset; + + /* Number of queries */ + __u32 count; +}; + struct drm_v3d_submit_cpu { /* Pointer to a u32 array of the BOs that are referenced by the job. * @@ -435,6 +459,9 @@ struct drm_v3d_submit_cpu { * * For DRM_V3D_EXT_ID_TIMESTAMP_QUERY, it must contain only one BO, * that will contain the timestamp. + * + * For DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY, it must contain only + * one BO, that contains the timestamp. */ __u64 bo_handles; -- cgit v1.2.3 From 6745f3e44a20ac18e7e5a40a3c7f62225983d544 Mon Sep 17 00:00:00 2001 From: Maíra Canal Date: Thu, 30 Nov 2023 13:40:38 -0300 Subject: drm/v3d: Create a CPU job extension to copy timestamp query to a buffer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A CPU job is a type of job that performs operations that requires CPU intervention. A copy timestamp query job is a job that copy the complete or partial result of a query to a buffer. As V3D doesn't provide any mechanism to obtain a timestamp from the GPU, it is a job that needs CPU intervention. So, create a user extension for the CPU job that enables the creation of a copy timestamp query job. This user extension will allow the creation of a CPU job that copy the results of a timestamp query to a BO with the possibility to indicate the timestamp availability with a availability bit. Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20231130164420.932823-17-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_drv.h | 20 ++++++++++++ drivers/gpu/drm/v3d/v3d_sched.c | 56 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/v3d/v3d_submit.c | 69 ++++++++++++++++++++++++++++++++++++++++ include/uapi/drm/v3d_drm.h | 45 ++++++++++++++++++++++++++ 4 files changed, 190 insertions(+) diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 3988407635ed..5058a354fffd 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -320,6 +320,7 @@ enum v3d_cpu_job_type { V3D_CPU_JOB_TYPE_INDIRECT_CSD = 1, V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY, V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY, + V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY, }; struct v3d_timestamp_query { @@ -361,6 +362,23 @@ struct v3d_timestamp_query_info { u32 count; }; +struct v3d_copy_query_results_info { + /* Define if should write to buffer using 64 or 32 bits */ + bool do_64bit; + + /* Define if it can write to buffer even if the query is not available */ + bool do_partial; + + /* Define if it should write availability bit to buffer */ + bool availability_bit; + + /* Offset of the copy buffer in the BO */ + u32 offset; + + /* Stride of the copy buffer in the BO */ + u32 stride; +}; + struct v3d_cpu_job { struct v3d_job base; @@ -369,6 +387,8 @@ struct v3d_cpu_job { struct v3d_indirect_csd_info indirect_csd; struct v3d_timestamp_query_info timestamp_query; + + struct v3d_copy_query_results_info copy; }; typedef void (*v3d_cpu_job_fn)(struct v3d_cpu_job *); diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index e287f42d3621..b1662d32a929 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -362,10 +362,66 @@ v3d_reset_timestamp_queries(struct v3d_cpu_job *job) v3d_put_bo_vaddr(bo); } +static void +write_to_buffer(void *dst, u32 idx, bool do_64bit, u64 value) +{ + if (do_64bit) { + u64 *dst64 = (u64 *)dst; + + dst64[idx] = value; + } else { + u32 *dst32 = (u32 *)dst; + + dst32[idx] = (u32)value; + } +} + +static void +v3d_copy_query_results(struct v3d_cpu_job *job) +{ + struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query; + struct v3d_timestamp_query *queries = timestamp_query->queries; + struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); + struct v3d_bo *timestamp = to_v3d_bo(job->base.bo[1]); + struct v3d_copy_query_results_info *copy = &job->copy; + struct dma_fence *fence; + u8 *query_addr; + bool available, write_result; + u8 *data; + int i; + + v3d_get_bo_vaddr(bo); + v3d_get_bo_vaddr(timestamp); + + data = ((u8 *)bo->vaddr) + copy->offset; + + for (i = 0; i < timestamp_query->count; i++) { + fence = drm_syncobj_fence_get(queries[i].syncobj); + available = fence ? dma_fence_is_signaled(fence) : false; + + write_result = available || copy->do_partial; + if (write_result) { + query_addr = ((u8 *)timestamp->vaddr) + queries[i].offset; + write_to_buffer(data, 0, copy->do_64bit, *((u64 *)query_addr)); + } + + if (copy->availability_bit) + write_to_buffer(data, 1, copy->do_64bit, available ? 1u : 0u); + + data += copy->stride; + + dma_fence_put(fence); + } + + v3d_put_bo_vaddr(timestamp); + v3d_put_bo_vaddr(bo); +} + static const v3d_cpu_job_fn cpu_job_function[] = { [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = v3d_rewrite_csd_job_wg_counts_from_indirect, [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = v3d_timestamp_query, [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = v3d_reset_timestamp_queries, + [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = v3d_copy_query_results, }; static struct dma_fence * diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c index 1c719416e26a..bafd49c6440c 100644 --- a/drivers/gpu/drm/v3d/v3d_submit.c +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -539,6 +539,71 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv, return 0; } +/* Get data for the copy timestamp query results job submission. */ +static int +v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv, + struct drm_v3d_extension __user *ext, + struct v3d_cpu_job *job) +{ + u32 __user *offsets, *syncs; + struct drm_v3d_copy_timestamp_query copy; + int i; + + if (!job) { + DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); + return -EINVAL; + } + + if (job->job_type) { + DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n"); + return -EINVAL; + } + + if (copy_from_user(©, ext, sizeof(copy))) + return -EFAULT; + + if (copy.pad) + return -EINVAL; + + job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY; + + job->timestamp_query.queries = kvmalloc_array(copy.count, + sizeof(struct v3d_timestamp_query), + GFP_KERNEL); + if (!job->timestamp_query.queries) + return -ENOMEM; + + offsets = u64_to_user_ptr(copy.offsets); + syncs = u64_to_user_ptr(copy.syncs); + + for (i = 0; i < copy.count; i++) { + u32 offset, sync; + + if (copy_from_user(&offset, offsets++, sizeof(offset))) { + kvfree(job->timestamp_query.queries); + return -EFAULT; + } + + job->timestamp_query.queries[i].offset = offset; + + if (copy_from_user(&sync, syncs++, sizeof(sync))) { + kvfree(job->timestamp_query.queries); + return -EFAULT; + } + + job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); + } + job->timestamp_query.count = copy.count; + + job->copy.do_64bit = copy.do_64bit; + job->copy.do_partial = copy.do_partial; + job->copy.availability_bit = copy.availability_bit; + job->copy.offset = copy.offset; + job->copy.stride = copy.stride; + + return 0; +} + /* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data * according to the extension id (name). */ @@ -573,6 +638,9 @@ v3d_get_extensions(struct drm_file *file_priv, case DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY: ret = v3d_get_cpu_reset_timestamp_params(file_priv, user_ext, job); break; + case DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY: + ret = v3d_get_cpu_copy_query_results_params(file_priv, user_ext, job); + break; default: DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id); return -EINVAL; @@ -951,6 +1019,7 @@ static const unsigned int cpu_job_bo_handle_count[] = { [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = 1, [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = 1, [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = 1, + [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = 2, }; /** diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index 930f8d07f088..f7defbf3d043 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -75,6 +75,7 @@ struct drm_v3d_extension { #define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD 0x02 #define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY 0x03 #define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY 0x04 +#define DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY 0x05 __u32 flags; /* mbz */ }; @@ -451,6 +452,46 @@ struct drm_v3d_reset_timestamp_query { __u32 count; }; +/** + * struct drm_v3d_copy_timestamp_query - ioctl extension for the CPU job to copy + * query results to a buffer + * + * When an extension DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY is defined, it + * points to this extension to define a copy timestamp query submission. This + * CPU job will copy the timestamp queries results to a BO with the offset + * and stride defined in the extension. + */ +struct drm_v3d_copy_timestamp_query { + struct drm_v3d_extension base; + + /* Define if should write to buffer using 64 or 32 bits */ + __u8 do_64bit; + + /* Define if it can write to buffer even if the query is not available */ + __u8 do_partial; + + /* Define if it should write availability bit to buffer */ + __u8 availability_bit; + + /* mbz */ + __u8 pad; + + /* Offset of the buffer in the BO */ + __u32 offset; + + /* Stride of the buffer in the BO */ + __u32 stride; + + /* Number of queries */ + __u32 count; + + /* Array of queries' offsets within the timestamp BO for their value */ + __u64 offsets; + + /* Array of timestamp's syncobjs to indicate its availability */ + __u64 syncs; +}; + struct drm_v3d_submit_cpu { /* Pointer to a u32 array of the BOs that are referenced by the job. * @@ -462,6 +503,10 @@ struct drm_v3d_submit_cpu { * * For DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY, it must contain only * one BO, that contains the timestamp. + * + * For DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY, it must contain two + * BOs. The first is the BO where the timestamp queries will be written + * to. The second is the BO that contains the timestamp. */ __u64 bo_handles; -- cgit v1.2.3 From bae7cb5d68001a8d4ceec5964dda74bb9aab7220 Mon Sep 17 00:00:00 2001 From: Maíra Canal Date: Thu, 30 Nov 2023 13:40:39 -0300 Subject: drm/v3d: Create a CPU job extension for the reset performance query job MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A CPU job is a type of job that performs operations that requires CPU intervention. A reset performance query job is a job that resets the performance queries by resetting the values of the perfmons. Moreover, we also reset the syncobjs related to the availability of the query. So, create a user extension for the CPU job that enables the creation of a reset performance job. This user extension will allow the creation of a CPU job that resets the perfmons values and resets the availability syncobj. Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20231130164420.932823-18-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_drv.h | 28 +++++++++++++++ drivers/gpu/drm/v3d/v3d_sched.c | 36 ++++++++++++++++++++ drivers/gpu/drm/v3d/v3d_submit.c | 73 ++++++++++++++++++++++++++++++++++++++++ include/uapi/drm/v3d_drm.h | 30 +++++++++++++++++ 4 files changed, 167 insertions(+) diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 5058a354fffd..0f7f80ad8d88 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -321,6 +321,7 @@ enum v3d_cpu_job_type { V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY, V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY, V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY, + V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY, }; struct v3d_timestamp_query { @@ -331,6 +332,18 @@ struct v3d_timestamp_query { struct drm_syncobj *syncobj; }; +/* Number of perfmons required to handle all supported performance counters */ +#define V3D_MAX_PERFMONS DIV_ROUND_UP(V3D_PERFCNT_NUM, \ + DRM_V3D_MAX_PERF_COUNTERS) + +struct v3d_performance_query { + /* Performance monitor IDs for this query */ + u32 kperfmon_ids[V3D_MAX_PERFMONS]; + + /* Syncobj that indicates the query availability */ + struct drm_syncobj *syncobj; +}; + struct v3d_indirect_csd_info { /* Indirect CSD */ struct v3d_csd_job *job; @@ -362,6 +375,19 @@ struct v3d_timestamp_query_info { u32 count; }; +struct v3d_performance_query_info { + struct v3d_performance_query *queries; + + /* Number of performance queries */ + u32 count; + + /* Number of performance monitors related to that query pool */ + u32 nperfmons; + + /* Number of performance counters related to that query pool */ + u32 ncounters; +}; + struct v3d_copy_query_results_info { /* Define if should write to buffer using 64 or 32 bits */ bool do_64bit; @@ -389,6 +415,8 @@ struct v3d_cpu_job { struct v3d_timestamp_query_info timestamp_query; struct v3d_copy_query_results_info copy; + + struct v3d_performance_query_info performance_query; }; typedef void (*v3d_cpu_job_fn)(struct v3d_cpu_job *); diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index b1662d32a929..e83d2907bc83 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -78,6 +78,7 @@ v3d_cpu_job_free(struct drm_sched_job *sched_job) { struct v3d_cpu_job *job = to_cpu_job(sched_job); struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query; + struct v3d_performance_query_info *performance_query = &job->performance_query; if (timestamp_query->queries) { for (int i = 0; i < timestamp_query->count; i++) @@ -85,6 +86,12 @@ v3d_cpu_job_free(struct drm_sched_job *sched_job) kvfree(timestamp_query->queries); } + if (performance_query->queries) { + for (int i = 0; i < performance_query->count; i++) + drm_syncobj_put(performance_query->queries[i].syncobj); + kvfree(performance_query->queries); + } + v3d_job_cleanup(&job->base); } @@ -417,11 +424,40 @@ v3d_copy_query_results(struct v3d_cpu_job *job) v3d_put_bo_vaddr(bo); } +static void +v3d_reset_performance_queries(struct v3d_cpu_job *job) +{ + struct v3d_performance_query_info *performance_query = &job->performance_query; + struct v3d_file_priv *v3d_priv = job->base.file->driver_priv; + struct v3d_dev *v3d = job->base.v3d; + struct v3d_perfmon *perfmon; + + for (int i = 0; i < performance_query->count; i++) { + for (int j = 0; j < performance_query->nperfmons; j++) { + perfmon = v3d_perfmon_find(v3d_priv, + performance_query->queries[i].kperfmon_ids[j]); + if (!perfmon) { + DRM_DEBUG("Failed to find perfmon."); + continue; + } + + v3d_perfmon_stop(v3d, perfmon, false); + + memset(perfmon->values, 0, perfmon->ncounters * sizeof(u64)); + + v3d_perfmon_put(perfmon); + } + + drm_syncobj_replace_fence(performance_query->queries[i].syncobj, NULL); + } +} + static const v3d_cpu_job_fn cpu_job_function[] = { [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = v3d_rewrite_csd_job_wg_counts_from_indirect, [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = v3d_timestamp_query, [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = v3d_reset_timestamp_queries, [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = v3d_copy_query_results, + [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = v3d_reset_performance_queries, }; static struct dma_fence * diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c index bafd49c6440c..20af8ae14831 100644 --- a/drivers/gpu/drm/v3d/v3d_submit.c +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -604,6 +604,74 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv, return 0; } +static int +v3d_get_cpu_reset_performance_params(struct drm_file *file_priv, + struct drm_v3d_extension __user *ext, + struct v3d_cpu_job *job) +{ + u32 __user *syncs; + u64 __user *kperfmon_ids; + struct drm_v3d_reset_performance_query reset; + + if (!job) { + DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); + return -EINVAL; + } + + if (job->job_type) { + DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n"); + return -EINVAL; + } + + if (copy_from_user(&reset, ext, sizeof(reset))) + return -EFAULT; + + job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY; + + job->performance_query.queries = kvmalloc_array(reset.count, + sizeof(struct v3d_performance_query), + GFP_KERNEL); + if (!job->performance_query.queries) + return -ENOMEM; + + syncs = u64_to_user_ptr(reset.syncs); + kperfmon_ids = u64_to_user_ptr(reset.kperfmon_ids); + + for (int i = 0; i < reset.count; i++) { + u32 sync; + u64 ids; + u32 __user *ids_pointer; + u32 id; + + if (copy_from_user(&sync, syncs++, sizeof(sync))) { + kvfree(job->performance_query.queries); + return -EFAULT; + } + + job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); + + if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) { + kvfree(job->performance_query.queries); + return -EFAULT; + } + + ids_pointer = u64_to_user_ptr(ids); + + for (int j = 0; j < reset.nperfmons; j++) { + if (copy_from_user(&id, ids_pointer++, sizeof(id))) { + kvfree(job->performance_query.queries); + return -EFAULT; + } + + job->performance_query.queries[i].kperfmon_ids[j] = id; + } + } + job->performance_query.count = reset.count; + job->performance_query.nperfmons = reset.nperfmons; + + return 0; +} + /* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data * according to the extension id (name). */ @@ -641,6 +709,9 @@ v3d_get_extensions(struct drm_file *file_priv, case DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY: ret = v3d_get_cpu_copy_query_results_params(file_priv, user_ext, job); break; + case DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY: + ret = v3d_get_cpu_reset_performance_params(file_priv, user_ext, job); + break; default: DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id); return -EINVAL; @@ -1020,6 +1091,7 @@ static const unsigned int cpu_job_bo_handle_count[] = { [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = 1, [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = 1, [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = 2, + [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = 0, }; /** @@ -1158,6 +1230,7 @@ fail: v3d_job_cleanup(clean_job); v3d_put_multisync_post_deps(&se); kvfree(cpu_job->timestamp_query.queries); + kvfree(cpu_job->performance_query.queries); return ret; } diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index f7defbf3d043..d609bdf29fd6 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -76,6 +76,7 @@ struct drm_v3d_extension { #define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY 0x03 #define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY 0x04 #define DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY 0x05 +#define DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY 0x06 __u32 flags; /* mbz */ }; @@ -492,6 +493,32 @@ struct drm_v3d_copy_timestamp_query { __u64 syncs; }; +/** + * struct drm_v3d_reset_performance_query - ioctl extension for the CPU job to + * reset performance queries + * + * When an extension DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY is defined, it + * points to this extension to define a reset performance submission. This CPU + * job will reset the performance queries by resetting the values of the + * performance monitors. Moreover, it will reset the syncobj to reset query + * availability. + */ +struct drm_v3d_reset_performance_query { + struct drm_v3d_extension base; + + /* Array of performance queries's syncobjs to indicate its availability */ + __u64 syncs; + + /* Number of queries */ + __u32 count; + + /* Number of performance monitors */ + __u32 nperfmons; + + /* Array of u64 user-pointers that point to an array of kperfmon_ids */ + __u64 kperfmon_ids; +}; + struct drm_v3d_submit_cpu { /* Pointer to a u32 array of the BOs that are referenced by the job. * @@ -507,6 +534,9 @@ struct drm_v3d_submit_cpu { * For DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY, it must contain two * BOs. The first is the BO where the timestamp queries will be written * to. The second is the BO that contains the timestamp. + * + * For DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY, it must contain no + * BOs. */ __u64 bo_handles; -- cgit v1.2.3 From 209e8d2695ee7a67a5b0487bbd1aa75e290d0f41 Mon Sep 17 00:00:00 2001 From: Maíra Canal Date: Thu, 30 Nov 2023 13:40:40 -0300 Subject: drm/v3d: Create a CPU job extension for the copy performance query job MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A CPU job is a type of job that performs operations that requires CPU intervention. A copy performance query job is a job that copy the complete or partial result of a query to a buffer. In order to copy the result of a performance query to a buffer, we need to get the values from the performance monitors. So, create a user extension for the CPU job that enables the creation of a copy performance query job. This user extension will allow the creation of a CPU job that copy the results of a performance query to a BO with the possibility to indicate the availability with a availability bit. Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20231130164420.932823-19-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_drv.h | 1 + drivers/gpu/drm/v3d/v3d_sched.c | 65 +++++++++++++++++++++++++++++++ drivers/gpu/drm/v3d/v3d_submit.c | 82 ++++++++++++++++++++++++++++++++++++++++ include/uapi/drm/v3d_drm.h | 50 ++++++++++++++++++++++++ 4 files changed, 198 insertions(+) diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 0f7f80ad8d88..3c7d58866570 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -322,6 +322,7 @@ enum v3d_cpu_job_type { V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY, V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY, V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY, + V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY, }; struct v3d_timestamp_query { diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index e83d2907bc83..54015ad765c7 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -452,12 +452,77 @@ v3d_reset_performance_queries(struct v3d_cpu_job *job) } } +static void +v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 query) +{ + struct v3d_performance_query_info *performance_query = &job->performance_query; + struct v3d_copy_query_results_info *copy = &job->copy; + struct v3d_file_priv *v3d_priv = job->base.file->driver_priv; + struct v3d_dev *v3d = job->base.v3d; + struct v3d_perfmon *perfmon; + u64 counter_values[V3D_PERFCNT_NUM]; + + for (int i = 0; i < performance_query->nperfmons; i++) { + perfmon = v3d_perfmon_find(v3d_priv, + performance_query->queries[query].kperfmon_ids[i]); + if (!perfmon) { + DRM_DEBUG("Failed to find perfmon."); + continue; + } + + v3d_perfmon_stop(v3d, perfmon, true); + + memcpy(&counter_values[i * DRM_V3D_MAX_PERF_COUNTERS], perfmon->values, + perfmon->ncounters * sizeof(u64)); + + v3d_perfmon_put(perfmon); + } + + for (int i = 0; i < performance_query->ncounters; i++) + write_to_buffer(data, i, copy->do_64bit, counter_values[i]); +} + +static void +v3d_copy_performance_query(struct v3d_cpu_job *job) +{ + struct v3d_performance_query_info *performance_query = &job->performance_query; + struct v3d_copy_query_results_info *copy = &job->copy; + struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); + struct dma_fence *fence; + bool available, write_result; + u8 *data; + + v3d_get_bo_vaddr(bo); + + data = ((u8 *)bo->vaddr) + copy->offset; + + for (int i = 0; i < performance_query->count; i++) { + fence = drm_syncobj_fence_get(performance_query->queries[i].syncobj); + available = fence ? dma_fence_is_signaled(fence) : false; + + write_result = available || copy->do_partial; + if (write_result) + v3d_write_performance_query_result(job, data, i); + + if (copy->availability_bit) + write_to_buffer(data, performance_query->ncounters, + copy->do_64bit, available ? 1u : 0u); + + data += copy->stride; + + dma_fence_put(fence); + } + + v3d_put_bo_vaddr(bo); +} + static const v3d_cpu_job_fn cpu_job_function[] = { [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = v3d_rewrite_csd_job_wg_counts_from_indirect, [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = v3d_timestamp_query, [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = v3d_reset_timestamp_queries, [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = v3d_copy_query_results, [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = v3d_reset_performance_queries, + [V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = v3d_copy_performance_query, }; static struct dma_fence * diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c index 20af8ae14831..d7a9da2484fd 100644 --- a/drivers/gpu/drm/v3d/v3d_submit.c +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -672,6 +672,84 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv, return 0; } +static int +v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv, + struct drm_v3d_extension __user *ext, + struct v3d_cpu_job *job) +{ + u32 __user *syncs; + u64 __user *kperfmon_ids; + struct drm_v3d_copy_performance_query copy; + + if (!job) { + DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); + return -EINVAL; + } + + if (job->job_type) { + DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n"); + return -EINVAL; + } + + if (copy_from_user(©, ext, sizeof(copy))) + return -EFAULT; + + if (copy.pad) + return -EINVAL; + + job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY; + + job->performance_query.queries = kvmalloc_array(copy.count, + sizeof(struct v3d_performance_query), + GFP_KERNEL); + if (!job->performance_query.queries) + return -ENOMEM; + + syncs = u64_to_user_ptr(copy.syncs); + kperfmon_ids = u64_to_user_ptr(copy.kperfmon_ids); + + for (int i = 0; i < copy.count; i++) { + u32 sync; + u64 ids; + u32 __user *ids_pointer; + u32 id; + + if (copy_from_user(&sync, syncs++, sizeof(sync))) { + kvfree(job->performance_query.queries); + return -EFAULT; + } + + job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); + + if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) { + kvfree(job->performance_query.queries); + return -EFAULT; + } + + ids_pointer = u64_to_user_ptr(ids); + + for (int j = 0; j < copy.nperfmons; j++) { + if (copy_from_user(&id, ids_pointer++, sizeof(id))) { + kvfree(job->performance_query.queries); + return -EFAULT; + } + + job->performance_query.queries[i].kperfmon_ids[j] = id; + } + } + job->performance_query.count = copy.count; + job->performance_query.nperfmons = copy.nperfmons; + job->performance_query.ncounters = copy.ncounters; + + job->copy.do_64bit = copy.do_64bit; + job->copy.do_partial = copy.do_partial; + job->copy.availability_bit = copy.availability_bit; + job->copy.offset = copy.offset; + job->copy.stride = copy.stride; + + return 0; +} + /* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data * according to the extension id (name). */ @@ -712,6 +790,9 @@ v3d_get_extensions(struct drm_file *file_priv, case DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY: ret = v3d_get_cpu_reset_performance_params(file_priv, user_ext, job); break; + case DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY: + ret = v3d_get_cpu_copy_performance_query_params(file_priv, user_ext, job); + break; default: DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id); return -EINVAL; @@ -1092,6 +1173,7 @@ static const unsigned int cpu_job_bo_handle_count[] = { [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = 1, [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = 2, [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = 0, + [V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = 1, }; /** diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index d609bdf29fd6..dce1835eced4 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -77,6 +77,7 @@ struct drm_v3d_extension { #define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY 0x04 #define DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY 0x05 #define DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY 0x06 +#define DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY 0x07 __u32 flags; /* mbz */ }; @@ -519,6 +520,52 @@ struct drm_v3d_reset_performance_query { __u64 kperfmon_ids; }; +/** + * struct drm_v3d_copy_performance_query - ioctl extension for the CPU job to copy + * performance query results to a buffer + * + * When an extension DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY is defined, it + * points to this extension to define a copy performance query submission. This + * CPU job will copy the performance queries results to a BO with the offset + * and stride defined in the extension. + */ +struct drm_v3d_copy_performance_query { + struct drm_v3d_extension base; + + /* Define if should write to buffer using 64 or 32 bits */ + __u8 do_64bit; + + /* Define if it can write to buffer even if the query is not available */ + __u8 do_partial; + + /* Define if it should write availability bit to buffer */ + __u8 availability_bit; + + /* mbz */ + __u8 pad; + + /* Offset of the buffer in the BO */ + __u32 offset; + + /* Stride of the buffer in the BO */ + __u32 stride; + + /* Number of performance monitors */ + __u32 nperfmons; + + /* Number of performance counters related to this query pool */ + __u32 ncounters; + + /* Number of queries */ + __u32 count; + + /* Array of performance queries's syncobjs to indicate its availability */ + __u64 syncs; + + /* Array of u64 user-pointers that point to an array of kperfmon_ids */ + __u64 kperfmon_ids; +}; + struct drm_v3d_submit_cpu { /* Pointer to a u32 array of the BOs that are referenced by the job. * @@ -537,6 +584,9 @@ struct drm_v3d_submit_cpu { * * For DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY, it must contain no * BOs. + * + * For DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY, it must contain one + * BO, where the performance queries will be written. */ __u64 bo_handles; -- cgit v1.2.3 From ff3670877e7c73d06c2a835d9abb62efcae0145c Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Thu, 6 Jul 2023 11:27:31 +0200 Subject: drm/imx/lcdc: Fix double-free of driver data MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The struct imx_lcdc driver data is allocated using devm_drm_dev_alloc() so it must not be explicitly kfree()d. Also drm_kms_helper_poll_fini() should not be called as there is no matching drm_kms_helper_poll_init(). So drop the release function completely. Fixes: c87e859cdeb5 ("drm/imx/lcdc: Implement DRM driver for imx25") Signed-off-by: Uwe Kleine-König Reviewed-by: Philipp Zabel Signed-off-by: Philipp Zabel Link: https://patchwork.freedesktop.org/patch/msgid/20230706092731.2630232-1-u.kleine-koenig@pengutronix.de --- drivers/gpu/drm/imx/lcdc/imx-lcdc.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c index 0902983374d0..43ddf3a9810b 100644 --- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c +++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c @@ -342,21 +342,12 @@ static const struct drm_mode_config_helper_funcs imx_lcdc_mode_config_helpers = .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, }; -static void imx_lcdc_release(struct drm_device *drm) -{ - struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(drm); - - drm_kms_helper_poll_fini(drm); - kfree(lcdc); -} - DEFINE_DRM_GEM_DMA_FOPS(imx_lcdc_drm_fops); static struct drm_driver imx_lcdc_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &imx_lcdc_drm_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, - .release = imx_lcdc_release, .name = "imx-lcdc", .desc = "i.MX LCDC driver", .date = "20200716", -- cgit v1.2.3 From 44df9a2a13211955ae98f8650e4dc04065e8ec0b Mon Sep 17 00:00:00 2001 From: Carl Vanderlip Date: Fri, 17 Nov 2023 10:43:36 -0700 Subject: accel/qaic: Increase number of in_reset states 'in_reset' holds the state of the device. As part of bringup, the device needs to be queried to check if it's in a valid state. Add a new state that indicates that the device is coming up, but not ready for users yet. Rename to 'dev_state' to better describe the variable. Signed-off-by: Carl Vanderlip Reviewed-by: Pranjal Ramajor Asha Kanojiya Reviewed-by: Jeffrey Hugo Signed-off-by: Jeffrey Hugo Reviewed-by: Jacek Lawrynowicz Link: https://patchwork.freedesktop.org/patch/msgid/20231117174337.20174-2-quic_jhugo@quicinc.com --- drivers/accel/qaic/qaic.h | 13 +++++++++++-- drivers/accel/qaic/qaic_control.c | 5 +++-- drivers/accel/qaic/qaic_data.c | 16 ++++++++-------- drivers/accel/qaic/qaic_drv.c | 12 ++++++------ 4 files changed, 28 insertions(+), 18 deletions(-) diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h index bc40d52dc010..cb04f7f9cdb3 100644 --- a/drivers/accel/qaic/qaic.h +++ b/drivers/accel/qaic/qaic.h @@ -31,6 +31,15 @@ #define to_drm(qddev) (&(qddev)->drm) #define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */ +enum __packed dev_states { + /* Device is offline or will be very soon */ + QAIC_OFFLINE, + /* Device is booting, not clear if it's in a usable state */ + QAIC_BOOT, + /* Device is fully operational */ + QAIC_ONLINE, +}; + extern bool datapath_polling; struct qaic_user { @@ -121,8 +130,8 @@ struct qaic_device { struct workqueue_struct *cntl_wq; /* Synchronizes all the users of device during cleanup */ struct srcu_struct dev_lock; - /* true: Device under reset; false: Device not under reset */ - bool in_reset; + /* Track the state of the device during resets */ + enum dev_states dev_state; /* true: single MSI is used to operate device */ bool single_msi; /* diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c index 84915824be54..9e8a8cbadf6b 100644 --- a/drivers/accel/qaic/qaic_control.c +++ b/drivers/accel/qaic/qaic_control.c @@ -1022,7 +1022,8 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u int xfer_count = 0; int retry_count; - if (qdev->in_reset) { + /* Allow QAIC_BOOT state since we need to check control protocol version */ + if (qdev->dev_state == QAIC_OFFLINE) { mutex_unlock(&qdev->cntl_mutex); return ERR_PTR(-ENODEV); } @@ -1306,7 +1307,7 @@ int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ qdev = usr->qddev->qdev; qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); - if (qdev->in_reset) { + if (qdev->dev_state != QAIC_ONLINE) { srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); return -ENODEV; diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index 8998c28e566e..cf2898eda7ae 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -690,7 +690,7 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi qdev = usr->qddev->qdev; qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); - if (qdev->in_reset) { + if (qdev->dev_state != QAIC_ONLINE) { ret = -ENODEV; goto unlock_dev_srcu; } @@ -749,7 +749,7 @@ int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file qdev = usr->qddev->qdev; qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); - if (qdev->in_reset) { + if (qdev->dev_state != QAIC_ONLINE) { ret = -ENODEV; goto unlock_dev_srcu; } @@ -970,7 +970,7 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi qdev = usr->qddev->qdev; qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); - if (qdev->in_reset) { + if (qdev->dev_state != QAIC_ONLINE) { ret = -ENODEV; goto unlock_dev_srcu; } @@ -1341,7 +1341,7 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr qdev = usr->qddev->qdev; qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); - if (qdev->in_reset) { + if (qdev->dev_state != QAIC_ONLINE) { ret = -ENODEV; goto unlock_dev_srcu; } @@ -1497,7 +1497,7 @@ void irq_polling_work(struct work_struct *work) rcu_id = srcu_read_lock(&dbc->ch_lock); while (1) { - if (dbc->qdev->in_reset) { + if (dbc->qdev->dev_state != QAIC_ONLINE) { srcu_read_unlock(&dbc->ch_lock, rcu_id); return; } @@ -1687,7 +1687,7 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file qdev = usr->qddev->qdev; qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); - if (qdev->in_reset) { + if (qdev->dev_state != QAIC_ONLINE) { ret = -ENODEV; goto unlock_dev_srcu; } @@ -1756,7 +1756,7 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file qdev = usr->qddev->qdev; qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); - if (qdev->in_reset) { + if (qdev->dev_state != QAIC_ONLINE) { ret = -ENODEV; goto unlock_dev_srcu; } @@ -1847,7 +1847,7 @@ int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi qdev = usr->qddev->qdev; qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); - if (qdev->in_reset) { + if (qdev->dev_state != QAIC_ONLINE) { ret = -ENODEV; goto unlock_dev_srcu; } diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index b12226385003..24b8680b2dbd 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -64,7 +64,7 @@ static int qaic_open(struct drm_device *dev, struct drm_file *file) int ret; rcu_id = srcu_read_lock(&qdev->dev_lock); - if (qdev->in_reset) { + if (qdev->dev_state != QAIC_ONLINE) { ret = -ENODEV; goto dev_unlock; } @@ -121,7 +121,7 @@ static void qaic_postclose(struct drm_device *dev, struct drm_file *file) if (qddev) { qdev = qddev->qdev; qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); - if (!qdev->in_reset) { + if (qdev->dev_state == QAIC_ONLINE) { qaic_release_usr(qdev, usr); for (i = 0; i < qdev->num_dbc; ++i) if (qdev->dbc[i].usr && qdev->dbc[i].usr->handle == usr->handle) @@ -254,7 +254,7 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev)); - qdev->in_reset = false; + qdev->dev_state = QAIC_ONLINE; dev_set_drvdata(&mhi_dev->dev, qdev); qdev->cntl_ch = mhi_dev; @@ -291,7 +291,7 @@ static void qaic_notify_reset(struct qaic_device *qdev) { int i; - qdev->in_reset = true; + qdev->dev_state = QAIC_OFFLINE; /* wake up any waiters to avoid waiting for timeouts at sync */ wake_all_cntl(qdev); for (i = 0; i < qdev->num_dbc; ++i) @@ -313,7 +313,7 @@ void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset) release_dbc(qdev, i); if (exit_reset) - qdev->in_reset = false; + qdev->dev_state = QAIC_ONLINE; } static void cleanup_qdev(struct qaic_device *qdev) @@ -550,7 +550,7 @@ static void qaic_pci_reset_done(struct pci_dev *pdev) { struct qaic_device *qdev = pci_get_drvdata(pdev); - qdev->in_reset = false; + qdev->dev_state = QAIC_ONLINE; qaic_mhi_reset_done(qdev->mhi_cntrl); } -- cgit v1.2.3 From 5f0a0ebca2b98eeda10715433c7d9418ef2e6d01 Mon Sep 17 00:00:00 2001 From: Carl Vanderlip Date: Fri, 17 Nov 2023 10:43:37 -0700 Subject: accel/qaic: Expand DRM device lifecycle Currently the QAIC DRM device registers itself when the MHI QAIC_CONTROL channel becomes available. This is when the device is able to process workloads. However, the DRM driver also provides the debugfs interface bootlog for the device. If the device fails to boot to the QSM (which brings up the MHI QAIC_CONTROL channel), the bootlog won't be available for debugging why it failed to boot. Change when the DRM device registers itself from when QAIC_CONTROL is available to when the card is first probed on the PCI bus. Additionally, make the DRM driver persist through reset/error cases so the driver doesn't have to be reloaded to access the card again. Send KOBJ_ONLINE/OFFLINE uevents so userspace can know when DRM device is ready to handle requests. Signed-off-by: Carl Vanderlip Reviewed-by: Pranjal Ramajor Asha Kanojiya Reviewed-by: Jeffrey Hugo Signed-off-by: Jeffrey Hugo Reviewed-by: Jacek Lawrynowicz Link: https://patchwork.freedesktop.org/patch/msgid/20231117174337.20174-3-quic_jhugo@quicinc.com --- Documentation/accel/qaic/qaic.rst | 9 +++++++- drivers/accel/qaic/mhi_controller.c | 2 +- drivers/accel/qaic/qaic.h | 2 +- drivers/accel/qaic/qaic_drv.c | 44 ++++++++++++++----------------------- 4 files changed, 27 insertions(+), 30 deletions(-) diff --git a/Documentation/accel/qaic/qaic.rst b/Documentation/accel/qaic/qaic.rst index f81020736ebf..efb7771273bb 100644 --- a/Documentation/accel/qaic/qaic.rst +++ b/Documentation/accel/qaic/qaic.rst @@ -93,8 +93,15 @@ commands (does not impact QAIC). uAPI ==== +QAIC creates an accel device per phsyical PCIe device. This accel device exists +for as long as the PCIe device is known to Linux. + +The PCIe device may not be in the state to accept requests from userspace at +all times. QAIC will trigger KOBJ_ONLINE/OFFLINE uevents to advertise when the +device can accept requests (ONLINE) and when the device is no longer accepting +requests (OFFLINE) because of a reset or other state transition. + QAIC defines a number of driver specific IOCTLs as part of the userspace API. -This section describes those APIs. DRM_IOCTL_QAIC_MANAGE This IOCTL allows userspace to send a NNC request to the QSM. The call will diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c index 5d3cc30009cc..832464f2833a 100644 --- a/drivers/accel/qaic/mhi_controller.c +++ b/drivers/accel/qaic/mhi_controller.c @@ -469,7 +469,7 @@ static void mhi_status_cb(struct mhi_controller *mhi_cntrl, enum mhi_callback re pci_err(qdev->pdev, "Fatal error received from device. Attempting to recover\n"); /* this event occurs in non-atomic context */ if (reason == MHI_CB_SYS_ERROR) - qaic_dev_reset_clean_local_state(qdev, true); + qaic_dev_reset_clean_local_state(qdev); } static int mhi_reset_and_async_power_up(struct mhi_controller *mhi_cntrl) diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h index cb04f7f9cdb3..582836f9538f 100644 --- a/drivers/accel/qaic/qaic.h +++ b/drivers/accel/qaic/qaic.h @@ -283,7 +283,7 @@ void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id); void release_dbc(struct qaic_device *qdev, u32 dbc_id); void wake_all_cntl(struct qaic_device *qdev); -void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset); +void qaic_dev_reset_clean_local_state(struct qaic_device *qdev); struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index 24b8680b2dbd..2a313eb69b12 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -43,9 +44,6 @@ MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode"); static bool link_up; static DEFINE_IDA(qaic_usrs); -static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id); -static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id); - static void free_usr(struct kref *kref) { struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count); @@ -183,13 +181,6 @@ static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id) qddev->partition_id = partition_id; - /* - * drm_dev_unregister() sets the driver data to NULL and - * drm_dev_register() does not update the driver data. During a SOC - * reset drm dev is unregistered and registered again leaving the - * driver data to NULL. - */ - dev_set_drvdata(to_accel_kdev(qddev), drm->accel); ret = drm_dev_register(drm, 0); if (ret) pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret); @@ -203,7 +194,6 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id) struct drm_device *drm = to_drm(qddev); struct qaic_user *usr; - drm_dev_get(drm); drm_dev_unregister(drm); qddev->partition_id = 0; /* @@ -232,7 +222,6 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id) mutex_lock(&qddev->users_mutex); } mutex_unlock(&qddev->users_mutex); - drm_dev_put(drm); } static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) @@ -254,8 +243,6 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev)); - qdev->dev_state = QAIC_ONLINE; - dev_set_drvdata(&mhi_dev->dev, qdev); qdev->cntl_ch = mhi_dev; @@ -265,6 +252,7 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id return ret; } + qdev->dev_state = QAIC_BOOT; ret = get_cntl_version(qdev, NULL, &major, &minor); if (ret || major != CNTL_MAJOR || minor > CNTL_MINOR) { pci_err(qdev->pdev, "%s: Control protocol version (%d.%d) not supported. Supported version is (%d.%d). Ret: %d\n", @@ -272,8 +260,8 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id ret = -EINVAL; goto close_control; } - - ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION); + qdev->dev_state = QAIC_ONLINE; + kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_ONLINE); return ret; @@ -291,6 +279,7 @@ static void qaic_notify_reset(struct qaic_device *qdev) { int i; + kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_OFFLINE); qdev->dev_state = QAIC_OFFLINE; /* wake up any waiters to avoid waiting for timeouts at sync */ wake_all_cntl(qdev); @@ -299,21 +288,15 @@ static void qaic_notify_reset(struct qaic_device *qdev) synchronize_srcu(&qdev->dev_lock); } -void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset) +void qaic_dev_reset_clean_local_state(struct qaic_device *qdev) { int i; qaic_notify_reset(qdev); - /* remove drmdevs to prevent new users from coming in */ - qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION); - /* start tearing things down */ for (i = 0; i < qdev->num_dbc; ++i) release_dbc(qdev, i); - - if (exit_reset) - qdev->dev_state = QAIC_ONLINE; } static void cleanup_qdev(struct qaic_device *qdev) @@ -338,6 +321,7 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de if (!qdev) return NULL; + qdev->dev_state = QAIC_OFFLINE; if (id->device == PCI_DEV_AIC100) { qdev->num_dbc = 16; qdev->dbc = devm_kcalloc(&pdev->dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL); @@ -499,15 +483,21 @@ static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto cleanup_qdev; } + ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION); + if (ret) + goto cleanup_qdev; + qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq, qdev->single_msi); if (IS_ERR(qdev->mhi_cntrl)) { ret = PTR_ERR(qdev->mhi_cntrl); - goto cleanup_qdev; + goto cleanup_drm_dev; } return 0; +cleanup_drm_dev: + qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION); cleanup_qdev: cleanup_qdev(qdev); return ret; @@ -520,7 +510,8 @@ static void qaic_pci_remove(struct pci_dev *pdev) if (!qdev) return; - qaic_dev_reset_clean_local_state(qdev, false); + qaic_dev_reset_clean_local_state(qdev); + qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION); qaic_mhi_free_controller(qdev->mhi_cntrl, link_up); cleanup_qdev(qdev); } @@ -543,14 +534,13 @@ static void qaic_pci_reset_prepare(struct pci_dev *pdev) qaic_notify_reset(qdev); qaic_mhi_start_reset(qdev->mhi_cntrl); - qaic_dev_reset_clean_local_state(qdev, false); + qaic_dev_reset_clean_local_state(qdev); } static void qaic_pci_reset_done(struct pci_dev *pdev) { struct qaic_device *qdev = pci_get_drvdata(pdev); - qdev->dev_state = QAIC_ONLINE; qaic_mhi_reset_done(qdev->mhi_cntrl); } -- cgit v1.2.3 From 8570c27932e132d2663e8120311891deb2a853de Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 23 Aug 2023 14:54:54 -0700 Subject: drm/syncobj: Add deadline support for syncobj waits Add a new flag to let userspace provide a deadline as a hint for syncobj and timeline waits. This gives a hint to the driver signaling the backing fences about how soon userspace needs it to compete work, so it can adjust GPU frequency accordingly. An immediate deadline can be given to provide something equivalent to i915 "wait boost". v2: Use absolute u64 ns value for deadline hint, drop cap and driver feature flag in favor of allowing count_handles==0 as a way for userspace to probe kernel for support of new flag v3: More verbose comments about UAPI v4: Fix negative zero, s/deadline_ns/deadline_nsec/ for consistency with existing ioctl struct fields v5: Comment/description typo fixes Signed-off-by: Rob Clark Reviewed-by: Tvrtko Ursulin [DB: fixed checkpatch warnings] Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20230823215458.203366-2-robdclark@gmail.com --- drivers/gpu/drm/drm_syncobj.c | 64 ++++++++++++++++++++++++++++++++++--------- include/uapi/drm/drm.h | 17 ++++++++++++ 2 files changed, 68 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 01da6789d044..cbb65b7ba425 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -126,6 +126,11 @@ * synchronize between the two. * This requirement is inherited from the Vulkan fence API. * + * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE is set, the ioctl will also set + * a fence deadline hint on the backing fences before waiting, to provide the + * fence signaler with an appropriate sense of urgency. The deadline is + * specified as an absolute &CLOCK_MONOTONIC value in units of ns. + * * Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj * handles as well as an array of u64 points and does a host-side wait on all * of syncobj fences at the given points simultaneously. @@ -1027,7 +1032,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, uint32_t count, uint32_t flags, signed long timeout, - uint32_t *idx) + uint32_t *idx, + ktime_t *deadline) { struct syncobj_wait_entry *entries; struct dma_fence *fence; @@ -1108,6 +1114,15 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]); } + if (deadline) { + for (i = 0; i < count; ++i) { + fence = entries[i].fence; + if (!fence) + continue; + dma_fence_set_deadline(fence, *deadline); + } + } + do { set_current_state(TASK_INTERRUPTIBLE); @@ -1206,7 +1221,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev, struct drm_file *file_private, struct drm_syncobj_wait *wait, struct drm_syncobj_timeline_wait *timeline_wait, - struct drm_syncobj **syncobjs, bool timeline) + struct drm_syncobj **syncobjs, bool timeline, + ktime_t *deadline) { signed long timeout = 0; uint32_t first = ~0; @@ -1217,7 +1233,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev, NULL, wait->count_handles, wait->flags, - timeout, &first); + timeout, &first, + deadline); if (timeout < 0) return timeout; wait->first_signaled = first; @@ -1227,7 +1244,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev, u64_to_user_ptr(timeline_wait->points), timeline_wait->count_handles, timeline_wait->flags, - timeout, &first); + timeout, &first, + deadline); if (timeout < 0) return timeout; timeline_wait->first_signaled = first; @@ -1298,17 +1316,22 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, { struct drm_syncobj_wait *args = data; struct drm_syncobj **syncobjs; + unsigned int possible_flags; + ktime_t t, *tp = NULL; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) return -EOPNOTSUPP; - if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | - DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) + possible_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE; + + if (args->flags & ~possible_flags) return -EINVAL; if (args->count_handles == 0) - return -EINVAL; + return 0; ret = drm_syncobj_array_find(file_private, u64_to_user_ptr(args->handles), @@ -1317,8 +1340,13 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, if (ret < 0) return ret; + if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE) { + t = ns_to_ktime(args->deadline_nsec); + tp = &t; + } + ret = drm_syncobj_array_wait(dev, file_private, - args, NULL, syncobjs, false); + args, NULL, syncobjs, false, tp); drm_syncobj_array_free(syncobjs, args->count_handles); @@ -1331,18 +1359,23 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data, { struct drm_syncobj_timeline_wait *args = data; struct drm_syncobj **syncobjs; + unsigned int possible_flags; + ktime_t t, *tp = NULL; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) return -EOPNOTSUPP; - if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | - DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | - DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) + possible_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE; + + if (args->flags & ~possible_flags) return -EINVAL; if (args->count_handles == 0) - return -EINVAL; + return 0; ret = drm_syncobj_array_find(file_private, u64_to_user_ptr(args->handles), @@ -1351,8 +1384,13 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data, if (ret < 0) return ret; + if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE) { + t = ns_to_ktime(args->deadline_nsec); + tp = &t; + } + ret = drm_syncobj_array_wait(dev, file_private, - NULL, args, syncobjs, true); + NULL, args, syncobjs, true, tp); drm_syncobj_array_free(syncobjs, args->count_handles); diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 91943e5ce010..16122819edfe 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -926,6 +926,7 @@ struct drm_syncobj_transfer { #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1) #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */ +#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */ struct drm_syncobj_wait { __u64 handles; /* absolute timeout */ @@ -934,6 +935,14 @@ struct drm_syncobj_wait { __u32 flags; __u32 first_signaled; /* only valid when not waiting all */ __u32 pad; + /** + * @deadline_nsec - fence deadline hint + * + * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing + * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is + * set. + */ + __u64 deadline_nsec; }; struct drm_syncobj_timeline_wait { @@ -946,6 +955,14 @@ struct drm_syncobj_timeline_wait { __u32 flags; __u32 first_signaled; /* only valid when not waiting all */ __u32 pad; + /** + * @deadline_nsec - fence deadline hint + * + * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing + * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is + * set. + */ + __u64 deadline_nsec; }; /** -- cgit v1.2.3 From 63ee44540205d993854f143a5ab1d7d9e63ffcf1 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 23 Aug 2023 14:54:55 -0700 Subject: dma-buf/sync_file: Add SET_DEADLINE ioctl The initial purpose is for igt tests, but this would also be useful for compositors that wait until close to vblank deadline to make decisions about which frame to show. The igt tests can be found at: https://gitlab.freedesktop.org/robclark/igt-gpu-tools/-/commits/fence-deadline v2: Clarify the timebase, add link to igt tests v3: Use u64 value in ns to express deadline. v4: More doc Signed-off-by: Rob Clark Acked-by: Pekka Paalanen Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20230823215458.203366-3-robdclark@gmail.com --- drivers/dma-buf/dma-fence.c | 3 ++- drivers/dma-buf/sync_file.c | 19 +++++++++++++++++++ include/uapi/linux/sync_file.h | 22 ++++++++++++++++++++++ 3 files changed, 43 insertions(+), 1 deletion(-) diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 8aa8f8cb7071..e0fd99e61a2d 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -934,7 +934,8 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout); * the GPU's devfreq to reduce frequency, when in fact the opposite is what is * needed. * - * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline. + * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline + * (or indirectly via userspace facing ioctls like &sync_set_deadline). * The deadline hint provides a way for the waiting driver, or userspace, to * convey an appropriate sense of urgency to the signaling driver. * diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 2e9a316c596a..d9b1c1b2a72b 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -347,6 +347,22 @@ out: return ret; } +static int sync_file_ioctl_set_deadline(struct sync_file *sync_file, + unsigned long arg) +{ + struct sync_set_deadline ts; + + if (copy_from_user(&ts, (void __user *)arg, sizeof(ts))) + return -EFAULT; + + if (ts.pad) + return -EINVAL; + + dma_fence_set_deadline(sync_file->fence, ns_to_ktime(ts.deadline_ns)); + + return 0; +} + static long sync_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -359,6 +375,9 @@ static long sync_file_ioctl(struct file *file, unsigned int cmd, case SYNC_IOC_FILE_INFO: return sync_file_ioctl_fence_info(sync_file, arg); + case SYNC_IOC_SET_DEADLINE: + return sync_file_ioctl_set_deadline(sync_file, arg); + default: return -ENOTTY; } diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h index ff0a931833e2..ff1f38889dcf 100644 --- a/include/uapi/linux/sync_file.h +++ b/include/uapi/linux/sync_file.h @@ -76,6 +76,27 @@ struct sync_file_info { __u64 sync_fence_info; }; +/** + * struct sync_set_deadline - SYNC_IOC_SET_DEADLINE - set a deadline hint on a fence + * @deadline_ns: absolute time of the deadline + * @pad: must be zero + * + * Allows userspace to set a deadline on a fence, see &dma_fence_set_deadline + * + * The timebase for the deadline is CLOCK_MONOTONIC (same as vblank). For + * example + * + * clock_gettime(CLOCK_MONOTONIC, &t); + * deadline_ns = (t.tv_sec * 1000000000L) + t.tv_nsec + ns_until_deadline + */ +struct sync_set_deadline { + __u64 deadline_ns; + /* Not strictly needed for alignment but gives some possibility + * for future extension: + */ + __u64 pad; +}; + #define SYNC_IOC_MAGIC '>' /* @@ -87,5 +108,6 @@ struct sync_file_info { #define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data) #define SYNC_IOC_FILE_INFO _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info) +#define SYNC_IOC_SET_DEADLINE _IOW(SYNC_IOC_MAGIC, 5, struct sync_set_deadline) #endif /* _UAPI_LINUX_SYNC_H */ -- cgit v1.2.3 From 70e67aaec2f4706df0006423eebca813b00f5840 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 23 Aug 2023 14:54:56 -0700 Subject: dma-buf/sw_sync: Add fence deadline support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This consists of simply storing the most recent deadline, and adding an ioctl to retrieve the deadline. This can be used in conjunction with the SET_DEADLINE ioctl on a fence fd for testing. Ie. create various sw_sync fences, merge them into a fence-array, set deadline on the fence-array and confirm that it is propagated properly to each fence. v2: Switch UABI to express deadline as u64 v3: More verbose UAPI docs, show how to convert from timespec v4: Better comments, track the soonest deadline, as a normal fence implementation would, return an error if no deadline set. Signed-off-by: Rob Clark Reviewed-by: Christian König Acked-by: Pekka Paalanen Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20230823215458.203366-4-robdclark@gmail.com --- drivers/dma-buf/sw_sync.c | 82 ++++++++++++++++++++++++++++++++++++++++++++ drivers/dma-buf/sync_debug.h | 2 ++ 2 files changed, 84 insertions(+) diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index f0a35277fd84..c353029789cf 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -52,12 +52,33 @@ struct sw_sync_create_fence_data { __s32 fence; /* fd of new fence */ }; +/** + * struct sw_sync_get_deadline - get the deadline hint of a sw_sync fence + * @deadline_ns: absolute time of the deadline + * @pad: must be zero + * @fence_fd: the sw_sync fence fd (in) + * + * Return the earliest deadline set on the fence. The timebase for the + * deadline is CLOCK_MONOTONIC (same as vblank). If there is no deadline + * set on the fence, this ioctl will return -ENOENT. + */ +struct sw_sync_get_deadline { + __u64 deadline_ns; + __u32 pad; + __s32 fence_fd; +}; + #define SW_SYNC_IOC_MAGIC 'W' #define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\ struct sw_sync_create_fence_data) #define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) +#define SW_SYNC_GET_DEADLINE _IOWR(SW_SYNC_IOC_MAGIC, 2, \ + struct sw_sync_get_deadline) + + +#define SW_SYNC_HAS_DEADLINE_BIT DMA_FENCE_FLAG_USER_BITS static const struct dma_fence_ops timeline_fence_ops; @@ -171,6 +192,22 @@ static void timeline_fence_timeline_value_str(struct dma_fence *fence, snprintf(str, size, "%d", parent->value); } +static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline) +{ + struct sync_pt *pt = dma_fence_to_sync_pt(fence); + unsigned long flags; + + spin_lock_irqsave(fence->lock, flags); + if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) { + if (ktime_before(deadline, pt->deadline)) + pt->deadline = deadline; + } else { + pt->deadline = deadline; + __set_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags); + } + spin_unlock_irqrestore(fence->lock, flags); +} + static const struct dma_fence_ops timeline_fence_ops = { .get_driver_name = timeline_fence_get_driver_name, .get_timeline_name = timeline_fence_get_timeline_name, @@ -179,6 +216,7 @@ static const struct dma_fence_ops timeline_fence_ops = { .release = timeline_fence_release, .fence_value_str = timeline_fence_value_str, .timeline_value_str = timeline_fence_timeline_value_str, + .set_deadline = timeline_fence_set_deadline, }; /** @@ -387,6 +425,47 @@ static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg) return 0; } +static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long arg) +{ + struct sw_sync_get_deadline data; + struct dma_fence *fence; + unsigned long flags; + struct sync_pt *pt; + int ret = 0; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + if (data.deadline_ns || data.pad) + return -EINVAL; + + fence = sync_file_get_fence(data.fence_fd); + if (!fence) + return -EINVAL; + + pt = dma_fence_to_sync_pt(fence); + if (!pt) + return -EINVAL; + + spin_lock_irqsave(fence->lock, flags); + if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) { + data.deadline_ns = ktime_to_ns(pt->deadline); + } else { + ret = -ENOENT; + } + spin_unlock_irqrestore(fence->lock, flags); + + dma_fence_put(fence); + + if (ret) + return ret; + + if (copy_to_user((void __user *)arg, &data, sizeof(data))) + return -EFAULT; + + return 0; +} + static long sw_sync_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -399,6 +478,9 @@ static long sw_sync_ioctl(struct file *file, unsigned int cmd, case SW_SYNC_IOC_INC: return sw_sync_ioctl_inc(obj, arg); + case SW_SYNC_GET_DEADLINE: + return sw_sync_ioctl_get_deadline(obj, arg); + default: return -ENOTTY; } diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h index 6176e52ba2d7..a1bdd62efccd 100644 --- a/drivers/dma-buf/sync_debug.h +++ b/drivers/dma-buf/sync_debug.h @@ -55,11 +55,13 @@ static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence) * @base: base fence object * @link: link on the sync timeline's list * @node: node in the sync timeline's tree + * @deadline: the earliest fence deadline hint */ struct sync_pt { struct dma_fence base; struct list_head link; struct rb_node node; + ktime_t deadline; }; extern const struct file_operations sw_sync_debugfs_fops; -- cgit v1.2.3 From e50e5fed41c7eed2db4119645bf3480ec43fec11 Mon Sep 17 00:00:00 2001 From: Jessica Zhang Date: Fri, 27 Oct 2023 15:32:51 -0700 Subject: drm: Introduce pixel_source DRM plane property Add support for pixel_source property to drm_plane and related documentation. In addition, force pixel_source to DRM_PLANE_PIXEL_SOURCE_FB in DRM_IOCTL_MODE_SETPLANE as to not break legacy userspace. This enum property will allow user to specify a pixel source for the plane. Possible pixel sources will be defined in the drm_plane_pixel_source enum. Currently, the only pixel sources are DRM_PLANE_PIXEL_SOURCE_FB (the default value) and DRM_PLANE_PIXEL_SOURCE_NONE. Acked-by: Dmitry Baryshkov Acked-by: Pekka Paalanen Acked-by: Harry Wentland Acked-by: Sebastian Wick Acked-by: Simon Ser Signed-off-by: Jessica Zhang Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231027-solid-fill-v7-1-780188bfa7b2@quicinc.com --- drivers/gpu/drm/drm_atomic_state_helper.c | 1 + drivers/gpu/drm/drm_atomic_uapi.c | 4 ++ drivers/gpu/drm/drm_blend.c | 94 +++++++++++++++++++++++++++++++ drivers/gpu/drm/drm_plane.c | 19 +++++-- include/drm/drm_blend.h | 2 + include/drm/drm_plane.h | 21 +++++++ 6 files changed, 137 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index 54975de44a0e..311b04edf742 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -252,6 +252,7 @@ void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state, plane_state->alpha = DRM_BLEND_ALPHA_OPAQUE; plane_state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI; + plane_state->pixel_source = DRM_PLANE_PIXEL_SOURCE_FB; if (plane->color_encoding_property) { if (!drm_object_property_get_default_value(&plane->base, diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index aee4a65d4959..bd7140531948 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -562,6 +562,8 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, state->src_w = val; } else if (property == config->prop_src_h) { state->src_h = val; + } else if (property == plane->pixel_source_property) { + state->pixel_source = val; } else if (property == plane->alpha_property) { state->alpha = val; } else if (property == plane->blend_mode_property) { @@ -650,6 +652,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane, *val = state->src_w; } else if (property == config->prop_src_h) { *val = state->src_h; + } else if (property == plane->pixel_source_property) { + *val = state->pixel_source; } else if (property == plane->alpha_property) { *val = state->alpha; } else if (property == plane->blend_mode_property) { diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index 6e74de833466..fce734cdb85b 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -185,6 +185,25 @@ * plane does not expose the "alpha" property, then this is * assumed to be 1.0 * + * pixel_source: + * pixel_source is set up with drm_plane_create_pixel_source_property(). + * It is used to toggle the active source of pixel data for the plane. + * The plane will only display data from the set pixel_source -- any + * data from other sources will be ignored. + * + * For non-framebuffer sources, if pixel_source is set to a non-framebuffer + * and non-NONE source, and the corresponding source property is NULL, then + * the atomic commit should return an error. + * + * Possible values: + * + * "NONE": + * No active pixel source. + * Committing with a NONE pixel source will disable the plane. + * + * "FB": + * Framebuffer source set by the "FB_ID" property. + * * Note that all the property extensions described here apply either to the * plane or the CRTC (e.g. for the background color, which currently is not * exposed and assumed to be black). @@ -615,3 +634,78 @@ int drm_plane_create_blend_mode_property(struct drm_plane *plane, return 0; } EXPORT_SYMBOL(drm_plane_create_blend_mode_property); + +static const struct drm_prop_enum_list drm_pixel_source_enum_list[] = { + { DRM_PLANE_PIXEL_SOURCE_NONE, "NONE" }, + { DRM_PLANE_PIXEL_SOURCE_FB, "FB" }, +}; + +/** + * drm_plane_create_pixel_source_property - create a new pixel source property + * @plane: DRM plane + * @extra_sources: Bitmask of additional supported pixel_sources for the driver. + * DRM_PLANE_PIXEL_SOURCE_FB and DRM_PLANE_PIXEL_SOURCE_NONE will + * always be enabled as supported sources. + * + * This creates a new property describing the current source of pixel data for the + * plane. The pixel_source will be initialized as DRM_PLANE_PIXEL_SOURCE_FB by default. + * + * Drivers can set a custom default source by overriding the pixel_source value in + * drm_plane_funcs.reset() + * + * The property is exposed to userspace as an enumeration property called + * "pixel_source" and has the following enumeration values: + * + * "NONE": + * No active pixel source + * + * "FB": + * Framebuffer pixel source + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_plane_create_pixel_source_property(struct drm_plane *plane, + unsigned long extra_sources) +{ + struct drm_device *dev = plane->dev; + struct drm_property *prop; + static const unsigned int valid_source_mask = BIT(DRM_PLANE_PIXEL_SOURCE_FB) | + BIT(DRM_PLANE_PIXEL_SOURCE_NONE); + int i; + + /* FB is supported by default */ + unsigned long supported_sources = extra_sources | + BIT(DRM_PLANE_PIXEL_SOURCE_FB) | + BIT(DRM_PLANE_PIXEL_SOURCE_NONE); + + if (WARN_ON(supported_sources & ~valid_source_mask)) + return -EINVAL; + + prop = drm_property_create(dev, DRM_MODE_PROP_ENUM | DRM_MODE_PROP_ATOMIC, "pixel_source", + hweight32(supported_sources)); + + if (!prop) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(drm_pixel_source_enum_list); i++) { + int ret; + + if (!test_bit(drm_pixel_source_enum_list[i].type, &supported_sources)) + continue; + + ret = drm_property_add_enum(prop, drm_pixel_source_enum_list[i].type, + drm_pixel_source_enum_list[i].name); + if (ret) { + drm_property_destroy(dev, prop); + + return ret; + } + } + + drm_object_attach_property(&plane->base, prop, DRM_PLANE_PIXEL_SOURCE_FB); + plane->pixel_source_property = prop; + + return 0; +} +EXPORT_SYMBOL(drm_plane_create_pixel_source_property); diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 9e8e4c60983d..c8dbe5ae60cc 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -953,6 +953,14 @@ bool drm_any_plane_has_format(struct drm_device *dev, } EXPORT_SYMBOL(drm_any_plane_has_format); +static bool drm_plane_needs_disable(struct drm_plane_state *state, struct drm_framebuffer *fb) +{ + if (state->pixel_source == DRM_PLANE_PIXEL_SOURCE_NONE) + return true; + + return state->pixel_source == DRM_PLANE_PIXEL_SOURCE_FB && fb == NULL; +} + /* * __setplane_internal - setplane handler for internal callers * @@ -975,8 +983,8 @@ static int __setplane_internal(struct drm_plane *plane, WARN_ON(drm_drv_uses_atomic_modeset(plane->dev)); - /* No fb means shut it down */ - if (!fb) { + /* No visible data means shut it down */ + if (drm_plane_needs_disable(plane->state, fb)) { plane->old_fb = plane->fb; ret = plane->funcs->disable_plane(plane, ctx); if (!ret) { @@ -1027,8 +1035,8 @@ static int __setplane_atomic(struct drm_plane *plane, WARN_ON(!drm_drv_uses_atomic_modeset(plane->dev)); - /* No fb means shut it down */ - if (!fb) + /* No visible data means shut it down */ + if (drm_plane_needs_disable(plane->state, fb)) return plane->funcs->disable_plane(plane, ctx); /* @@ -1101,6 +1109,9 @@ int drm_mode_setplane(struct drm_device *dev, void *data, return -ENOENT; } + if (plane->state) + plane->state->pixel_source = DRM_PLANE_PIXEL_SOURCE_FB; + if (plane_req->fb_id) { fb = drm_framebuffer_lookup(dev, file_priv, plane_req->fb_id); if (!fb) { diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h index 88bdfec3bd88..122bbfbaae33 100644 --- a/include/drm/drm_blend.h +++ b/include/drm/drm_blend.h @@ -58,4 +58,6 @@ int drm_atomic_normalize_zpos(struct drm_device *dev, struct drm_atomic_state *state); int drm_plane_create_blend_mode_property(struct drm_plane *plane, unsigned int supported_modes); +int drm_plane_create_pixel_source_property(struct drm_plane *plane, + unsigned long extra_sources); #endif diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index c6565a6f9324..bc0176ba25be 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -40,6 +40,12 @@ enum drm_scaling_filter { DRM_SCALING_FILTER_NEAREST_NEIGHBOR, }; +enum drm_plane_pixel_source { + DRM_PLANE_PIXEL_SOURCE_NONE, + DRM_PLANE_PIXEL_SOURCE_FB, + DRM_PLANE_PIXEL_SOURCE_MAX +}; + /** * struct drm_plane_state - mutable plane state * @@ -120,6 +126,14 @@ struct drm_plane_state { /** @hotspot_y: y offset to mouse cursor hotspot */ int32_t hotspot_x, hotspot_y; + /** + * @pixel_source: + * + * Source of pixel information for the plane. See + * drm_plane_create_pixel_source_property() for more details. + */ + enum drm_plane_pixel_source pixel_source; + /** * @alpha: * Opacity of the plane with 0 as completely transparent and 0xffff as @@ -713,6 +727,13 @@ struct drm_plane { */ struct drm_plane_state *state; + /* + * @pixel_source_property: + * Optional pixel_source property for this plane. See + * drm_plane_create_pixel_source_property(). + */ + struct drm_property *pixel_source_property; + /** * @alpha_property: * Optional alpha property for this plane. See -- cgit v1.2.3 From 85863a4e16e77079ee14865905ddc3ef9483a640 Mon Sep 17 00:00:00 2001 From: Jessica Zhang Date: Fri, 27 Oct 2023 15:32:52 -0700 Subject: drm: Introduce solid fill DRM plane property Document and add support for solid_fill property to drm_plane. In addition, add support for setting and getting the values for solid_fill. To enable solid fill planes, userspace must assign a property blob to the "solid_fill" plane property containing the following information: struct drm_mode_solid_fill { u32 r, g, b, pad; }; Acked-by: Harry Wentland Acked-by: Sebastian Wick Signed-off-by: Jessica Zhang Reviewed-by: Dmitry Baryshkov Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231027-solid-fill-v7-2-780188bfa7b2@quicinc.com --- drivers/gpu/drm/drm_atomic_state_helper.c | 9 ++++++++ drivers/gpu/drm/drm_atomic_uapi.c | 26 ++++++++++++++++++++++ drivers/gpu/drm/drm_blend.c | 30 ++++++++++++++++++++++++++ include/drm/drm_blend.h | 1 + include/drm/drm_plane.h | 36 +++++++++++++++++++++++++++++++ include/uapi/drm/drm_mode.h | 24 +++++++++++++++++++++ 6 files changed, 126 insertions(+) diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index 311b04edf742..f6c76cea8be4 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -254,6 +254,11 @@ void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state, plane_state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI; plane_state->pixel_source = DRM_PLANE_PIXEL_SOURCE_FB; + if (plane_state->solid_fill_blob) { + drm_property_blob_put(plane_state->solid_fill_blob); + plane_state->solid_fill_blob = NULL; + } + if (plane->color_encoding_property) { if (!drm_object_property_get_default_value(&plane->base, plane->color_encoding_property, @@ -350,6 +355,9 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane, if (state->fb) drm_framebuffer_get(state->fb); + if (state->solid_fill_blob) + drm_property_blob_get(state->solid_fill_blob); + state->fence = NULL; state->commit = NULL; state->fb_damage_clips = NULL; @@ -399,6 +407,7 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state) drm_crtc_commit_put(state->commit); drm_property_blob_put(state->fb_damage_clips); + drm_property_blob_put(state->solid_fill_blob); } EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state); diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index bd7140531948..d7ae8e2c0265 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -316,6 +316,20 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, } EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector); +static void drm_atomic_set_solid_fill_prop(struct drm_plane_state *state) +{ + struct drm_mode_solid_fill *user_info; + + if (!state->solid_fill_blob) + return; + + user_info = (struct drm_mode_solid_fill *)state->solid_fill_blob->data; + + state->solid_fill.r = user_info->r; + state->solid_fill.g = user_info->g; + state->solid_fill.b = user_info->b; +} + static void set_out_fence_for_crtc(struct drm_atomic_state *state, struct drm_crtc *crtc, s32 __user *fence_ptr) { @@ -564,6 +578,15 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, state->src_h = val; } else if (property == plane->pixel_source_property) { state->pixel_source = val; + } else if (property == plane->solid_fill_property) { + ret = drm_atomic_replace_property_blob_from_id(dev, + &state->solid_fill_blob, + val, sizeof(struct drm_mode_solid_fill), + -1, &replaced); + if (ret) + return ret; + + drm_atomic_set_solid_fill_prop(state); } else if (property == plane->alpha_property) { state->alpha = val; } else if (property == plane->blend_mode_property) { @@ -654,6 +677,9 @@ drm_atomic_plane_get_property(struct drm_plane *plane, *val = state->src_h; } else if (property == plane->pixel_source_property) { *val = state->pixel_source; + } else if (property == plane->solid_fill_property) { + *val = state->solid_fill_blob ? + state->solid_fill_blob->base.id : 0; } else if (property == plane->alpha_property) { *val = state->alpha; } else if (property == plane->blend_mode_property) { diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index fce734cdb85b..665c5d9b056f 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -204,6 +204,10 @@ * "FB": * Framebuffer source set by the "FB_ID" property. * + * solid_fill: + * solid_fill is set up with drm_plane_create_solid_fill_property(). It + * contains pixel data that drivers can use to fill a plane. + * * Note that all the property extensions described here apply either to the * plane or the CRTC (e.g. for the background color, which currently is not * exposed and assumed to be black). @@ -709,3 +713,29 @@ int drm_plane_create_pixel_source_property(struct drm_plane *plane, return 0; } EXPORT_SYMBOL(drm_plane_create_pixel_source_property); + +/** + * drm_plane_create_solid_fill_property - create a new solid_fill property + * @plane: drm plane + * + * This creates a new property blob that holds pixel data for solid fill planes. + * The property is exposed to userspace as a property blob called "solid_fill". + * + * For information on what the blob contains, see `drm_mode_solid_fill`. + */ +int drm_plane_create_solid_fill_property(struct drm_plane *plane) +{ + struct drm_property *prop; + + prop = drm_property_create(plane->dev, + DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB, + "solid_fill", 0); + if (!prop) + return -ENOMEM; + + drm_object_attach_property(&plane->base, prop, 0); + plane->solid_fill_property = prop; + + return 0; +} +EXPORT_SYMBOL(drm_plane_create_solid_fill_property); diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h index 122bbfbaae33..e7158fbee389 100644 --- a/include/drm/drm_blend.h +++ b/include/drm/drm_blend.h @@ -60,4 +60,5 @@ int drm_plane_create_blend_mode_property(struct drm_plane *plane, unsigned int supported_modes); int drm_plane_create_pixel_source_property(struct drm_plane *plane, unsigned long extra_sources); +int drm_plane_create_solid_fill_property(struct drm_plane *plane); #endif diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index bc0176ba25be..5bac644d4cc3 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -46,6 +46,18 @@ enum drm_plane_pixel_source { DRM_PLANE_PIXEL_SOURCE_MAX }; +/** + * struct solid_fill_property - RGB values for solid fill plane + * + * TODO: Add solid fill source and corresponding pixel source + * that supports RGBA color + */ +struct drm_solid_fill { + uint32_t r; + uint32_t g; + uint32_t b; +}; + /** * struct drm_plane_state - mutable plane state * @@ -134,6 +146,23 @@ struct drm_plane_state { */ enum drm_plane_pixel_source pixel_source; + /** + * @solid_fill_blob: + * + * Blob containing relevant information for a solid fill plane + * including RGB color values. See + * drm_plane_create_solid_fill_property() for more details. + */ + struct drm_property_blob *solid_fill_blob; + + /** + * @solid_fill: + * + * Pixel data for solid fill planes. See + * drm_plane_create_solid_fill_property() for more details. + */ + struct drm_solid_fill solid_fill; + /** * @alpha: * Opacity of the plane with 0 as completely transparent and 0xffff as @@ -734,6 +763,13 @@ struct drm_plane { */ struct drm_property *pixel_source_property; + /** + * @solid_fill_property: + * Optional solid_fill property for this plane. See + * drm_plane_create_solid_fill_property(). + */ + struct drm_property *solid_fill_property; + /** * @alpha_property: * Optional alpha property for this plane. See diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 95630f170110..cbd943a8c88b 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -259,6 +259,30 @@ struct drm_mode_modeinfo { char name[DRM_DISPLAY_MODE_LEN]; }; +/** + * struct drm_mode_solid_fill - User info for solid fill planes + * + * This is the userspace API solid fill information structure. + * + * Userspace can enable solid fill planes by assigning the plane "solid_fill" + * property to a blob containing a single drm_mode_solid_fill struct populated with an RGB323232 + * color and setting the pixel source to "SOLID_FILL". + * + * For information on the plane property, see drm_plane_create_solid_fill_property() + * + * @r: Red color value of single pixel + * @g: Green color value of single pixel + * @b: Blue color value of single pixel + * @pad: padding, must be zero + */ +struct drm_mode_solid_fill { + __u32 r; + __u32 g; + __u32 b; + __u32 pad; +}; + + struct drm_mode_card_res { __u64 fb_id_ptr; __u64 crtc_id_ptr; -- cgit v1.2.3 From 4b64167042927531f4cfaf035b8f88c2f7a05f06 Mon Sep 17 00:00:00 2001 From: Jessica Zhang Date: Fri, 27 Oct 2023 15:32:53 -0700 Subject: drm: Add solid fill pixel source Add "SOLID_FILL" as a valid pixel source. If the pixel_source property is set to "SOLID_FILL", it will display data from the drm_plane "solid_fill" blob property. Reviewed-by: Dmitry Baryshkov Acked-by: Pekka Paalanen Acked-by: Harry Wentland Acked-by: Sebastian Wick Signed-off-by: Jessica Zhang Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231027-solid-fill-v7-3-780188bfa7b2@quicinc.com --- drivers/gpu/drm/drm_blend.c | 10 +++++++++- include/drm/drm_plane.h | 1 + 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index 665c5d9b056f..37b31b7e5ce5 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -204,6 +204,9 @@ * "FB": * Framebuffer source set by the "FB_ID" property. * + * "SOLID_FILL": + * Solid fill color source set by the "solid_fill" property. + * * solid_fill: * solid_fill is set up with drm_plane_create_solid_fill_property(). It * contains pixel data that drivers can use to fill a plane. @@ -642,6 +645,7 @@ EXPORT_SYMBOL(drm_plane_create_blend_mode_property); static const struct drm_prop_enum_list drm_pixel_source_enum_list[] = { { DRM_PLANE_PIXEL_SOURCE_NONE, "NONE" }, { DRM_PLANE_PIXEL_SOURCE_FB, "FB" }, + { DRM_PLANE_PIXEL_SOURCE_SOLID_FILL, "SOLID_FILL" }, }; /** @@ -666,6 +670,9 @@ static const struct drm_prop_enum_list drm_pixel_source_enum_list[] = { * "FB": * Framebuffer pixel source * + * "SOLID_FILL": + * Solid fill color pixel source + * * Returns: * Zero on success, negative errno on failure. */ @@ -675,7 +682,8 @@ int drm_plane_create_pixel_source_property(struct drm_plane *plane, struct drm_device *dev = plane->dev; struct drm_property *prop; static const unsigned int valid_source_mask = BIT(DRM_PLANE_PIXEL_SOURCE_FB) | - BIT(DRM_PLANE_PIXEL_SOURCE_NONE); + BIT(DRM_PLANE_PIXEL_SOURCE_NONE) | + BIT(DRM_PLANE_PIXEL_SOURCE_SOLID_FILL); int i; /* FB is supported by default */ diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 5bac644d4cc3..4b7af4381bbe 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -43,6 +43,7 @@ enum drm_scaling_filter { enum drm_plane_pixel_source { DRM_PLANE_PIXEL_SOURCE_NONE, DRM_PLANE_PIXEL_SOURCE_FB, + DRM_PLANE_PIXEL_SOURCE_SOLID_FILL, DRM_PLANE_PIXEL_SOURCE_MAX }; -- cgit v1.2.3 From 8283ac7871a959848e09fc6593b8c12b8febfee6 Mon Sep 17 00:00:00 2001 From: Jessica Zhang Date: Fri, 27 Oct 2023 15:32:54 -0700 Subject: drm/atomic: Add pixel source to plane state dump Add pixel source to the atomic plane state dump Reviewed-by: Dmitry Baryshkov Acked-by: Harry Wentland Acked-by: Sebastian Wick Signed-off-by: Jessica Zhang Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231027-solid-fill-v7-4-780188bfa7b2@quicinc.com --- drivers/gpu/drm/drm_atomic.c | 1 + drivers/gpu/drm/drm_blend.c | 1 + drivers/gpu/drm/drm_crtc_internal.h | 1 + 3 files changed, 3 insertions(+) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index f1a503aafe5a..02aa7df832cc 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -722,6 +722,7 @@ static void drm_atomic_plane_print_state(struct drm_printer *p, drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name); drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); + drm_printf(p, "\tpixel-source=%s\n", drm_get_pixel_source_name(state->pixel_source)); drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); if (state->fb) drm_framebuffer_print_info(p, 2, state->fb); diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index 37b31b7e5ce5..9c1608f7c1df 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -647,6 +647,7 @@ static const struct drm_prop_enum_list drm_pixel_source_enum_list[] = { { DRM_PLANE_PIXEL_SOURCE_FB, "FB" }, { DRM_PLANE_PIXEL_SOURCE_SOLID_FILL, "SOLID_FILL" }, }; +DRM_ENUM_NAME_FN(drm_get_pixel_source_name, drm_pixel_source_enum_list); /** * drm_plane_create_pixel_source_property - create a new pixel source property diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index a514d5207e41..4114675c0728 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -269,6 +269,7 @@ int drm_plane_check_pixel_format(struct drm_plane *plane, u32 format, u64 modifier); struct drm_mode_rect * __drm_plane_get_damage_clips(const struct drm_plane_state *state); +const char *drm_get_pixel_source_name(int val); /* drm_bridge.c */ void drm_bridge_detach(struct drm_bridge *bridge); -- cgit v1.2.3 From e86413f5442ee094e66b3e75f2d3419ed0df9520 Mon Sep 17 00:00:00 2001 From: Jessica Zhang Date: Fri, 27 Oct 2023 15:32:55 -0700 Subject: drm/atomic: Add solid fill data to plane state dump Add solid_fill property data to the atomic plane state dump. Reviewed-by: Dmitry Baryshkov Acked-by: Harry Wentland Acked-by: Sebastian Wick Signed-off-by: Jessica Zhang Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231027-solid-fill-v7-5-780188bfa7b2@quicinc.com --- drivers/gpu/drm/drm_atomic.c | 4 ++++ drivers/gpu/drm/drm_plane.c | 8 ++++++++ include/drm/drm_plane.h | 3 +++ 3 files changed, 15 insertions(+) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 02aa7df832cc..1339785fbe80 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -726,6 +726,10 @@ static void drm_atomic_plane_print_state(struct drm_printer *p, drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); if (state->fb) drm_framebuffer_print_info(p, 2, state->fb); + drm_printf(p, "\tsolid_fill=%u\n", + state->solid_fill_blob ? state->solid_fill_blob->base.id : 0); + if (state->solid_fill_blob) + drm_plane_solid_fill_print_info(p, 2, state); drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest)); drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src)); drm_printf(p, "\trotation=%x\n", state->rotation); diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index c8dbe5ae60cc..c65c72701dd2 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -1641,6 +1641,14 @@ __drm_plane_get_damage_clips(const struct drm_plane_state *state) state->fb_damage_clips->data : NULL); } +void drm_plane_solid_fill_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_plane_state *state) +{ + drm_printf_indent(p, indent, "r=0x%08x\n", state->solid_fill.r); + drm_printf_indent(p, indent, "g=0x%08x\n", state->solid_fill.g); + drm_printf_indent(p, indent, "b=0x%08x\n", state->solid_fill.b); +} + /** * drm_plane_get_damage_clips - Returns damage clips. * @state: Plane state. diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 4b7af4381bbe..d14e2f1db234 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -1025,6 +1025,9 @@ drm_plane_get_damage_clips_count(const struct drm_plane_state *state); struct drm_mode_rect * drm_plane_get_damage_clips(const struct drm_plane_state *state); +void drm_plane_solid_fill_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_plane_state *state); + int drm_plane_create_scaling_filter_property(struct drm_plane *plane, unsigned int supported_filters); -- cgit v1.2.3 From 4ba6b7a646321e740c7f2d80c90505019c4e8fce Mon Sep 17 00:00:00 2001 From: Jessica Zhang Date: Fri, 27 Oct 2023 15:32:56 -0700 Subject: drm/atomic: Move framebuffer checks to helper Currently framebuffer checks happen directly in drm_atomic_plane_check(). Move these checks into their own helper method. Reviewed-by: Dmitry Baryshkov Acked-by: Harry Wentland Acked-by: Sebastian Wick Signed-off-by: Jessica Zhang Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231027-solid-fill-v7-6-780188bfa7b2@quicinc.com --- drivers/gpu/drm/drm_atomic.c | 130 ++++++++++++++++++++++++------------------- 1 file changed, 73 insertions(+), 57 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 1339785fbe80..c6f2b86c48ae 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -589,6 +589,76 @@ plane_switching_crtc(const struct drm_plane_state *old_plane_state, return true; } +static int drm_atomic_plane_check_fb(const struct drm_plane_state *state) +{ + struct drm_plane *plane = state->plane; + const struct drm_framebuffer *fb = state->fb; + struct drm_mode_rect *clips; + + uint32_t num_clips; + unsigned int fb_width, fb_height; + int ret; + + /* Check whether this plane supports the fb pixel format. */ + ret = drm_plane_check_pixel_format(plane, fb->format->format, + fb->modifier); + + if (ret) { + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n", + plane->base.id, plane->name, + &fb->format->format, fb->modifier); + return ret; + } + + fb_width = fb->width << 16; + fb_height = fb->height << 16; + + /* Make sure source coordinates are inside the fb. */ + if (state->src_w > fb_width || + state->src_x > fb_width - state->src_w || + state->src_h > fb_height || + state->src_y > fb_height - state->src_h) { + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] invalid source coordinates " + "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", + plane->base.id, plane->name, + state->src_w >> 16, + ((state->src_w & 0xffff) * 15625) >> 10, + state->src_h >> 16, + ((state->src_h & 0xffff) * 15625) >> 10, + state->src_x >> 16, + ((state->src_x & 0xffff) * 15625) >> 10, + state->src_y >> 16, + ((state->src_y & 0xffff) * 15625) >> 10, + fb->width, fb->height); + return -ENOSPC; + } + + clips = __drm_plane_get_damage_clips(state); + num_clips = drm_plane_get_damage_clips_count(state); + + /* Make sure damage clips are valid and inside the fb. */ + while (num_clips > 0) { + if (clips->x1 >= clips->x2 || + clips->y1 >= clips->y2 || + clips->x1 < 0 || + clips->y1 < 0 || + clips->x2 > fb_width || + clips->y2 > fb_height) { + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] invalid damage clip %d %d %d %d\n", + plane->base.id, plane->name, clips->x1, + clips->y1, clips->x2, clips->y2); + return -EINVAL; + } + clips++; + num_clips--; + } + + return 0; +} + /** * drm_atomic_plane_check - check plane state * @old_plane_state: old plane state to check @@ -605,9 +675,6 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, struct drm_plane *plane = new_plane_state->plane; struct drm_crtc *crtc = new_plane_state->crtc; const struct drm_framebuffer *fb = new_plane_state->fb; - unsigned int fb_width, fb_height; - struct drm_mode_rect *clips; - uint32_t num_clips; int ret; /* either *both* CRTC and FB must be set, or neither */ @@ -634,17 +701,6 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, return -EINVAL; } - /* Check whether this plane supports the fb pixel format. */ - ret = drm_plane_check_pixel_format(plane, fb->format->format, - fb->modifier); - if (ret) { - drm_dbg_atomic(plane->dev, - "[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n", - plane->base.id, plane->name, - &fb->format->format, fb->modifier); - return ret; - } - /* Give drivers some help against integer overflows */ if (new_plane_state->crtc_w > INT_MAX || new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w || @@ -658,50 +714,10 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, return -ERANGE; } - fb_width = fb->width << 16; - fb_height = fb->height << 16; - /* Make sure source coordinates are inside the fb. */ - if (new_plane_state->src_w > fb_width || - new_plane_state->src_x > fb_width - new_plane_state->src_w || - new_plane_state->src_h > fb_height || - new_plane_state->src_y > fb_height - new_plane_state->src_h) { - drm_dbg_atomic(plane->dev, - "[PLANE:%d:%s] invalid source coordinates " - "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", - plane->base.id, plane->name, - new_plane_state->src_w >> 16, - ((new_plane_state->src_w & 0xffff) * 15625) >> 10, - new_plane_state->src_h >> 16, - ((new_plane_state->src_h & 0xffff) * 15625) >> 10, - new_plane_state->src_x >> 16, - ((new_plane_state->src_x & 0xffff) * 15625) >> 10, - new_plane_state->src_y >> 16, - ((new_plane_state->src_y & 0xffff) * 15625) >> 10, - fb->width, fb->height); - return -ENOSPC; - } - - clips = __drm_plane_get_damage_clips(new_plane_state); - num_clips = drm_plane_get_damage_clips_count(new_plane_state); - - /* Make sure damage clips are valid and inside the fb. */ - while (num_clips > 0) { - if (clips->x1 >= clips->x2 || - clips->y1 >= clips->y2 || - clips->x1 < 0 || - clips->y1 < 0 || - clips->x2 > fb_width || - clips->y2 > fb_height) { - drm_dbg_atomic(plane->dev, - "[PLANE:%d:%s] invalid damage clip %d %d %d %d\n", - plane->base.id, plane->name, clips->x1, - clips->y1, clips->x2, clips->y2); - return -EINVAL; - } - clips++; - num_clips--; - } + ret = drm_atomic_plane_check_fb(new_plane_state); + if (ret) + return ret; if (plane_switching_crtc(old_plane_state, new_plane_state)) { drm_dbg_atomic(plane->dev, -- cgit v1.2.3 From f1e75da5364e780905d9cd6043f9c74cdcf84073 Mon Sep 17 00:00:00 2001 From: Jessica Zhang Date: Fri, 27 Oct 2023 15:32:57 -0700 Subject: drm/atomic: Loosen FB atomic checks Loosen the requirements for atomic and legacy commit so that, in cases where pixel_source != FB, the commit can still go through. This includes adding framebuffer NULL checks in other areas to account for FB being NULL when non-FB pixel sources are enabled. To disable a plane, the pixel_source must be NONE or the FB must be NULL if pixel_source == FB. Signed-off-by: Jessica Zhang Reviewed-by: Dmitry Baryshkov Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231027-solid-fill-v7-7-780188bfa7b2@quicinc.com --- drivers/gpu/drm/drm_atomic.c | 21 ++++++++++---------- drivers/gpu/drm/drm_atomic_helper.c | 39 +++++++++++++++++++++---------------- include/drm/drm_atomic_helper.h | 4 ++-- include/drm/drm_plane.h | 29 +++++++++++++++++++++++++++ 4 files changed, 64 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index c6f2b86c48ae..aed0a694c74c 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -674,17 +674,16 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, { struct drm_plane *plane = new_plane_state->plane; struct drm_crtc *crtc = new_plane_state->crtc; - const struct drm_framebuffer *fb = new_plane_state->fb; int ret; - /* either *both* CRTC and FB must be set, or neither */ - if (crtc && !fb) { - drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] CRTC set but no FB\n", + /* either *both* CRTC and pixel source must be set, or neither */ + if (crtc && !drm_plane_has_visible_data(new_plane_state)) { + drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] CRTC set but no visible data\n", plane->base.id, plane->name); return -EINVAL; - } else if (fb && !crtc) { - drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] FB set but no CRTC\n", - plane->base.id, plane->name); + } else if (drm_plane_has_visible_data(new_plane_state) && !crtc) { + drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] Source %d has visible data but no CRTC\n", + plane->base.id, plane->name, new_plane_state->pixel_source); return -EINVAL; } @@ -715,9 +714,11 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, } - ret = drm_atomic_plane_check_fb(new_plane_state); - if (ret) - return ret; + if (new_plane_state->pixel_source == DRM_PLANE_PIXEL_SOURCE_FB && new_plane_state->fb) { + ret = drm_atomic_plane_check_fb(new_plane_state); + if (ret) + return ret; + } if (plane_switching_crtc(old_plane_state, new_plane_state)) { drm_dbg_atomic(plane->dev, diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index c3f677130def..dc048988e3f3 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -861,7 +861,6 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, bool can_position, bool can_update_disabled) { - struct drm_framebuffer *fb = plane_state->fb; struct drm_rect *src = &plane_state->src; struct drm_rect *dst = &plane_state->dst; unsigned int rotation = plane_state->rotation; @@ -873,7 +872,7 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, *src = drm_plane_state_src(plane_state); *dst = drm_plane_state_dest(plane_state); - if (!fb) { + if (!drm_plane_has_visible_data(plane_state)) { plane_state->visible = false; return 0; } @@ -890,25 +889,31 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, return -EINVAL; } - drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation); + /* Check that selected pixel source is valid */ + if (plane_state->pixel_source == DRM_PLANE_PIXEL_SOURCE_FB && plane_state->fb) { + struct drm_framebuffer *fb = plane_state->fb; + drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation); - /* Check scaling */ - hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); - vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); - if (hscale < 0 || vscale < 0) { - drm_dbg_kms(plane_state->plane->dev, - "Invalid scaling of plane\n"); - drm_rect_debug_print("src: ", &plane_state->src, true); - drm_rect_debug_print("dst: ", &plane_state->dst, false); - return -ERANGE; - } + /* Check scaling */ + hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); + vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); - if (crtc_state->enable) - drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2); + if (hscale < 0 || vscale < 0) { + drm_dbg_kms(plane_state->plane->dev, + "Invalid scaling of plane\n"); + drm_rect_debug_print("src: ", &plane_state->src, true); + drm_rect_debug_print("dst: ", &plane_state->dst, false); + return -ERANGE; + } - plane_state->visible = drm_rect_clip_scaled(src, dst, &clip); + if (crtc_state->enable) + drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2); - drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation); + plane_state->visible = drm_rect_clip_scaled(src, dst, &clip); + drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation); + } else if (drm_plane_solid_fill_enabled(plane_state)) { + plane_state->visible = true; + } if (!plane_state->visible) /* diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 536a0b0091c3..6d97f38ac1f6 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -256,8 +256,8 @@ drm_atomic_plane_disabling(struct drm_plane_state *old_plane_state, * Anything else should be considered a bug in the atomic core, so we * gently warn about it. */ - WARN_ON((new_plane_state->crtc == NULL && new_plane_state->fb != NULL) || - (new_plane_state->crtc != NULL && new_plane_state->fb == NULL)); + WARN_ON((new_plane_state->crtc == NULL && drm_plane_has_visible_data(new_plane_state)) || + (new_plane_state->crtc != NULL && !drm_plane_has_visible_data(new_plane_state))); return old_plane_state->crtc && !new_plane_state->crtc; } diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index d14e2f1db234..3b187f3f5466 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -1016,6 +1016,35 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev, #define drm_for_each_plane(plane, dev) \ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) +/** + * drm_plane_solid_fill_enabled - Check if solid fill is enabled on plane + * @state: plane state + * + * Returns: + * Whether the plane has been assigned a solid_fill_blob + */ +static inline bool drm_plane_solid_fill_enabled(struct drm_plane_state *state) +{ + if (!state) + return false; + return state->pixel_source == DRM_PLANE_PIXEL_SOURCE_SOLID_FILL && state->solid_fill_blob; +} + +static inline bool drm_plane_has_visible_data(const struct drm_plane_state *state) +{ + switch (state->pixel_source) { + case DRM_PLANE_PIXEL_SOURCE_NONE: + return false; + case DRM_PLANE_PIXEL_SOURCE_SOLID_FILL: + return state->solid_fill_blob != NULL; + case DRM_PLANE_PIXEL_SOURCE_FB: + default: + WARN_ON(state->pixel_source != DRM_PLANE_PIXEL_SOURCE_FB); + } + + return state->fb != NULL; +} + bool drm_any_plane_has_format(struct drm_device *dev, u32 format, u64 modifier); -- cgit v1.2.3 From 5d86c15c3171c3ecebd84d53e30d9812b5591c84 Mon Sep 17 00:00:00 2001 From: Alex Bee Date: Sat, 2 Dec 2023 13:51:42 +0100 Subject: dt-bindings: gpu: mali-utgard: Add Rockchip RK3128 compatible Rockchip RK312x SoC family has a Mali400 MP2. Add a compatible for it. Signed-off-by: Alex Bee Acked-by: Krzysztof Kozlowski Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20231202125144.66052-4-knaerzche@gmail.com --- Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml index 0fae1ef013be..abd4aa335fbc 100644 --- a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml +++ b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml @@ -29,6 +29,7 @@ properties: - allwinner,sun50i-a64-mali - rockchip,rk3036-mali - rockchip,rk3066-mali + - rockchip,rk3128-mali - rockchip,rk3188-mali - rockchip,rk3228-mali - samsung,exynos4210-mali -- cgit v1.2.3 From a5b2dcb96d6acb286459612a142371b0d74543bf Mon Sep 17 00:00:00 2001 From: Abhinav Kumar Date: Wed, 20 Sep 2023 13:13:58 -0700 Subject: drm: improve the documentation of connector hpd ops While making the changes in [1], it was noted that the documentation of the enable_hpd() and disable_hpd() does not make it clear that these ops should not try to do hpd state maintenance and should only enable/disable hpd related hardware for the connector. The state management of these calls to make sure these calls are balanced is handled by the DRM core and we should keep it that way to minimize the overhead in the drivers which implement these ops. [1]: https://patchwork.freedesktop.org/patch/558387/ Signed-off-by: Abhinav Kumar Suggested-by: Laurent Pinchart Reviewed-by: Laurent Pinchart Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20230920201358.27597-1-quic_abhinavk@quicinc.com --- include/drm/drm_modeset_helper_vtables.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index e3c3ac615909..a33cf7488737 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -1154,6 +1154,11 @@ struct drm_connector_helper_funcs { * This operation is optional. * * This callback is used by the drm_kms_helper_poll_enable() helpers. + * + * This operation does not need to perform any hpd state tracking as + * the DRM core handles that maintenance and ensures the calls to enable + * and disable hpd are balanced. + * */ void (*enable_hpd)(struct drm_connector *connector); @@ -1165,6 +1170,11 @@ struct drm_connector_helper_funcs { * This operation is optional. * * This callback is used by the drm_kms_helper_poll_disable() helpers. + * + * This operation does not need to perform any hpd state tracking as + * the DRM core handles that maintenance and ensures the calls to enable + * and disable hpd are balanced. + * */ void (*disable_hpd)(struct drm_connector *connector); }; -- cgit v1.2.3 From f730e7adfd69d7ac859d8fe4d67e980cbad1e445 Mon Sep 17 00:00:00 2001 From: Abhinav Kumar Date: Tue, 19 Sep 2023 10:48:12 -0700 Subject: drm: remove drm_bridge_hpd_disable() from drm_bridge_connector_destroy() drm_bridge_hpd_enable()/drm_bridge_hpd_disable() callbacks call into the respective driver's hpd_enable()/hpd_disable() ops. These ops control the HPD enable/disable logic which in some cases like MSM can be a dedicate hardware block to control the HPD. During probe_defer cases, a connector can be initialized and then later destroyed till the probe is retried. During connector destroy in these cases, the hpd_disable() callback gets called without a corresponding hpd_enable() leading to an unbalanced state potentially causing even a crash. This can be avoided by the respective drivers maintaining their own state logic to ensure that a hpd_disable() without a corresponding hpd_enable() just returns without doing anything. However, to have a generic fix it would be better to avoid the hpd_disable() callback from the connector destroy path and let the hpd_enable() / hpd_disable() balance be maintained by the corresponding drm_bridge_connector_enable_hpd() / drm_bridge_connector_disable_hpd() APIs which should get called by drm_kms_helper_disable_hpd(). changes in v2: - minor change in commit text (Dmitry) Signed-off-by: Abhinav Kumar Reviewed-by: Dmitry Baryshkov Reviewed-by: Laurent Pinchart Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20230919174813.26958-1-quic_abhinavk@quicinc.com --- drivers/gpu/drm/drm_bridge_connector.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c index 8239ad43aed5..3acd67021ec6 100644 --- a/drivers/gpu/drm/drm_bridge_connector.c +++ b/drivers/gpu/drm/drm_bridge_connector.c @@ -198,12 +198,6 @@ static void drm_bridge_connector_destroy(struct drm_connector *connector) struct drm_bridge_connector *bridge_connector = to_drm_bridge_connector(connector); - if (bridge_connector->bridge_hpd) { - struct drm_bridge *hpd = bridge_connector->bridge_hpd; - - drm_bridge_hpd_disable(hpd); - } - drm_connector_unregister(connector); drm_connector_cleanup(connector); -- cgit v1.2.3 From 93032ae634d409e621c68a2fb7d6930e7eebb1d9 Mon Sep 17 00:00:00 2001 From: Marco Pagani Date: Thu, 30 Nov 2023 18:14:16 +0100 Subject: drm/test: add a test suite for GEM objects backed by shmem This patch introduces an initial KUnit test suite for GEM objects backed by shmem buffers. Suggested-by: Javier Martinez Canillas Signed-off-by: Marco Pagani v5: - using __drm_kunit_helper_alloc_drm_device() to avoid local struct v4: - Add missing MMU dependency for DRM_GEM_SHMEM_HELPER (kernel test robot) v3: - Explicitly cast pointers in the helpers - Removed unused pointer to parent dev in struct fake_dev - Test entries reordering in Kconfig and Makefile sent as a separate patch v2: - Improved description of test cases - Cleaner error handling using KUnit actions - Alphabetical order in Kconfig and Makefile Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20231130171417.74162-1-marpagan@redhat.com --- drivers/gpu/drm/Kconfig | 3 +- drivers/gpu/drm/tests/Makefile | 1 + drivers/gpu/drm/tests/drm_gem_shmem_test.c | 383 +++++++++++++++++++++++++++++ 3 files changed, 386 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/tests/drm_gem_shmem_test.c diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 740c1c0bd068..b7abd436455f 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -74,12 +74,13 @@ config DRM_KUNIT_TEST_HELPERS config DRM_KUNIT_TEST tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS - depends on DRM && KUNIT + depends on DRM && KUNIT && MMU select DRM_BUDDY select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER select DRM_EXEC select DRM_EXPORT_FOR_TESTS if m + select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER select DRM_KUNIT_TEST_HELPERS select DRM_LIB_RANDOM diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile index 2645af241ff0..d6183b3d7688 100644 --- a/drivers/gpu/drm/tests/Makefile +++ b/drivers/gpu/drm/tests/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \ drm_format_helper_test.o \ drm_format_test.o \ drm_framebuffer_test.o \ + drm_gem_shmem_test.o \ drm_managed_test.o \ drm_mm_test.o \ drm_modes_test.o \ diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c new file mode 100644 index 000000000000..91202e40cde9 --- /dev/null +++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit test suite for GEM objects backed by shmem buffers + * + * Copyright (C) 2023 Red Hat, Inc. + * + * Author: Marco Pagani + */ + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#define TEST_SIZE SZ_1M +#define TEST_BYTE 0xae + +/* + * Wrappers to avoid an explicit type casting when passing action + * functions to kunit_add_action(). + */ +static void kfree_wrapper(void *ptr) +{ + const void *obj = ptr; + + kfree(obj); +} + +static void sg_free_table_wrapper(void *ptr) +{ + struct sg_table *sgt = ptr; + + sg_free_table(sgt); +} + +static void drm_gem_shmem_free_wrapper(void *ptr) +{ + struct drm_gem_shmem_object *shmem = ptr; + + drm_gem_shmem_free(shmem); +} + +/* + * Test creating a shmem GEM object backed by shmem buffer. The test + * case succeeds if the GEM object is successfully allocated with the + * shmem file node and object functions attributes set, and the size + * attribute is equal to the correct size. + */ +static void drm_gem_shmem_test_obj_create(struct kunit *test) +{ + struct drm_device *drm_dev = test->priv; + struct drm_gem_shmem_object *shmem; + + shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); + KUNIT_EXPECT_EQ(test, shmem->base.size, TEST_SIZE); + KUNIT_EXPECT_NOT_NULL(test, shmem->base.filp); + KUNIT_EXPECT_NOT_NULL(test, shmem->base.funcs); + + drm_gem_shmem_free(shmem); +} + +/* + * Test creating a shmem GEM object from a scatter/gather table exported + * via a DMA-BUF. The test case succeed if the GEM object is successfully + * created with the shmem file node attribute equal to NULL and the sgt + * attribute pointing to the scatter/gather table that has been imported. + */ +static void drm_gem_shmem_test_obj_create_private(struct kunit *test) +{ + struct drm_device *drm_dev = test->priv; + struct drm_gem_shmem_object *shmem; + struct drm_gem_object *gem_obj; + struct dma_buf buf_mock; + struct dma_buf_attachment attach_mock; + struct sg_table *sgt; + char *buf; + int ret; + + /* Create a mock scatter/gather table */ + buf = kunit_kzalloc(test, TEST_SIZE, GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, buf); + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, sgt); + + ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt); + KUNIT_ASSERT_EQ(test, ret, 0); + + sg_init_one(sgt->sgl, buf, TEST_SIZE); + + /* Init a mock DMA-BUF */ + buf_mock.size = TEST_SIZE; + attach_mock.dmabuf = &buf_mock; + + gem_obj = drm_gem_shmem_prime_import_sg_table(drm_dev, &attach_mock, sgt); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gem_obj); + KUNIT_EXPECT_EQ(test, gem_obj->size, TEST_SIZE); + KUNIT_EXPECT_NULL(test, gem_obj->filp); + KUNIT_EXPECT_NOT_NULL(test, gem_obj->funcs); + + /* The scatter/gather table will be freed by drm_gem_shmem_free */ + kunit_remove_action(test, sg_free_table_wrapper, sgt); + kunit_remove_action(test, kfree_wrapper, sgt); + + shmem = to_drm_gem_shmem_obj(gem_obj); + KUNIT_EXPECT_PTR_EQ(test, shmem->sgt, sgt); + + drm_gem_shmem_free(shmem); +} + +/* + * Test pinning backing pages for a shmem GEM object. The test case + * succeeds if a suitable number of backing pages are allocated, and + * the pages table counter attribute is increased by one. + */ +static void drm_gem_shmem_test_pin_pages(struct kunit *test) +{ + struct drm_device *drm_dev = test->priv; + struct drm_gem_shmem_object *shmem; + int i, ret; + + shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); + KUNIT_EXPECT_NULL(test, shmem->pages); + KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0); + + ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = drm_gem_shmem_pin(shmem); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_ASSERT_NOT_NULL(test, shmem->pages); + KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1); + + for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++) + KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]); + + drm_gem_shmem_unpin(shmem); + KUNIT_EXPECT_NULL(test, shmem->pages); + KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0); +} + +/* + * Test creating a virtual mapping for a shmem GEM object. The test + * case succeeds if the backing memory is mapped and the reference + * counter for virtual mapping is increased by one. Moreover, the test + * case writes and then reads a test pattern over the mapped memory. + */ +static void drm_gem_shmem_test_vmap(struct kunit *test) +{ + struct drm_device *drm_dev = test->priv; + struct drm_gem_shmem_object *shmem; + struct iosys_map map; + int ret, i; + + shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); + KUNIT_EXPECT_NULL(test, shmem->vaddr); + KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0); + + ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = drm_gem_shmem_vmap(shmem, &map); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr); + KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map)); + KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1); + + iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE); + for (i = 0; i < TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE); + + drm_gem_shmem_vunmap(shmem, &map); + KUNIT_EXPECT_NULL(test, shmem->vaddr); + KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0); +} + +/* + * Test exporting a scatter/gather table of pinned pages suitable for + * PRIME usage from a shmem GEM object. The test case succeeds if a + * scatter/gather table large enough to accommodate the backing memory + * is successfully exported. + */ +static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test) +{ + struct drm_device *drm_dev = test->priv; + struct drm_gem_shmem_object *shmem; + struct sg_table *sgt; + struct scatterlist *sg; + unsigned int si, len = 0; + int ret; + + shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); + + ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = drm_gem_shmem_pin(shmem); + KUNIT_ASSERT_EQ(test, ret, 0); + + sgt = drm_gem_shmem_get_sg_table(shmem); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt); + KUNIT_EXPECT_NULL(test, shmem->sgt); + + ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt); + KUNIT_ASSERT_EQ(test, ret, 0); + + for_each_sgtable_sg(sgt, sg, si) { + KUNIT_EXPECT_NOT_NULL(test, sg); + len += sg->length; + } + + KUNIT_EXPECT_GE(test, len, TEST_SIZE); +} + +/* + * Test pinning pages and exporting a scatter/gather table suitable for + * driver usage from a shmem GEM object. The test case succeeds if the + * backing pages are pinned and a scatter/gather table large enough to + * accommodate the backing memory is successfully exported. + */ +static void drm_gem_shmem_test_get_sg_table(struct kunit *test) +{ + struct drm_device *drm_dev = test->priv; + struct drm_gem_shmem_object *shmem; + struct sg_table *sgt; + struct scatterlist *sg; + unsigned int si, ret, len = 0; + + shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); + + ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); + KUNIT_ASSERT_EQ(test, ret, 0); + + /* The scatter/gather table will be freed by drm_gem_shmem_free */ + sgt = drm_gem_shmem_get_pages_sgt(shmem); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt); + KUNIT_ASSERT_NOT_NULL(test, shmem->pages); + KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1); + KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt); + + for_each_sgtable_sg(sgt, sg, si) { + KUNIT_EXPECT_NOT_NULL(test, sg); + len += sg->length; + } + + KUNIT_EXPECT_GE(test, len, TEST_SIZE); +} + +/* + * Test updating the madvise state of a shmem GEM object. The test + * case checks that the function for setting madv updates it only if + * its current value is greater or equal than zero and returns false + * if it has a negative value. + */ +static void drm_gem_shmem_test_madvise(struct kunit *test) +{ + struct drm_device *drm_dev = test->priv; + struct drm_gem_shmem_object *shmem; + int ret; + + shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); + KUNIT_ASSERT_EQ(test, shmem->madv, 0); + + ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = drm_gem_shmem_madvise(shmem, 1); + KUNIT_EXPECT_TRUE(test, ret); + KUNIT_ASSERT_EQ(test, shmem->madv, 1); + + /* Set madv to a negative value */ + ret = drm_gem_shmem_madvise(shmem, -1); + KUNIT_EXPECT_FALSE(test, ret); + KUNIT_ASSERT_EQ(test, shmem->madv, -1); + + /* Check that madv cannot be set back to a positive value */ + ret = drm_gem_shmem_madvise(shmem, 0); + KUNIT_EXPECT_FALSE(test, ret); + KUNIT_ASSERT_EQ(test, shmem->madv, -1); +} + +/* + * Test purging a shmem GEM object. First, assert that a newly created + * shmem GEM object is not purgeable. Then, set madvise to a positive + * value and call drm_gem_shmem_get_pages_sgt() to pin and dma-map the + * backing pages. Finally, assert that the shmem GEM object is now + * purgeable and purge it. + */ +static void drm_gem_shmem_test_purge(struct kunit *test) +{ + struct drm_device *drm_dev = test->priv; + struct drm_gem_shmem_object *shmem; + struct sg_table *sgt; + int ret; + + shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); + + ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = drm_gem_shmem_is_purgeable(shmem); + KUNIT_EXPECT_FALSE(test, ret); + + ret = drm_gem_shmem_madvise(shmem, 1); + KUNIT_EXPECT_TRUE(test, ret); + + /* The scatter/gather table will be freed by drm_gem_shmem_free */ + sgt = drm_gem_shmem_get_pages_sgt(shmem); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt); + + ret = drm_gem_shmem_is_purgeable(shmem); + KUNIT_EXPECT_TRUE(test, ret); + + drm_gem_shmem_purge(shmem); + KUNIT_EXPECT_NULL(test, shmem->pages); + KUNIT_EXPECT_NULL(test, shmem->sgt); + KUNIT_EXPECT_EQ(test, shmem->madv, -1); +} + +static int drm_gem_shmem_test_init(struct kunit *test) +{ + struct device *dev; + struct drm_device *drm_dev; + + /* Allocate a parent device */ + dev = drm_kunit_helper_alloc_device(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); + + /* + * The DRM core will automatically initialize the GEM core and create + * a DRM Memory Manager object which provides an address space pool + * for GEM objects allocation. + */ + drm_dev = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm_dev), + 0, DRIVER_GEM); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm_dev); + + test->priv = drm_dev; + + return 0; +} + +static struct kunit_case drm_gem_shmem_test_cases[] = { + KUNIT_CASE(drm_gem_shmem_test_obj_create), + KUNIT_CASE(drm_gem_shmem_test_obj_create_private), + KUNIT_CASE(drm_gem_shmem_test_pin_pages), + KUNIT_CASE(drm_gem_shmem_test_vmap), + KUNIT_CASE(drm_gem_shmem_test_get_pages_sgt), + KUNIT_CASE(drm_gem_shmem_test_get_sg_table), + KUNIT_CASE(drm_gem_shmem_test_madvise), + KUNIT_CASE(drm_gem_shmem_test_purge), + {} +}; + +static struct kunit_suite drm_gem_shmem_suite = { + .name = "drm_gem_shmem", + .init = drm_gem_shmem_test_init, + .test_cases = drm_gem_shmem_test_cases +}; + +kunit_test_suite(drm_gem_shmem_suite); + +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 51097ef14d4e555c532ae535d24f97cc19c8c5a6 Mon Sep 17 00:00:00 2001 From: Donald Robson Date: Thu, 30 Nov 2023 16:00:13 +0000 Subject: drm/imagination: Fixed warning due to implicit cast to bool This line appears to confuse the compiler and had been noticed previously in clang-tidy output. There isn't anything fundamentally wrong that I can see. I suspect that it just looks like a mistake - hence the first note. By making the second operand an actual bool result, const correctness can be preserved while silencing the warning. >> drivers/gpu/drm/imagination/pvr_device_info.c:230:47: warning: use of logical '&&' with constant operand [-Wconstant-logical-operand] 230 | } else if (features_size == mapping_max_size && (mapping_max & 63)) { | ^ ~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/imagination/pvr_device_info.c:230:47: note: use '&' for a bitwise operation 230 | } else if (features_size == mapping_max_size && (mapping_max & 63)) { | ^~ | & drivers/gpu/drm/imagination/pvr_device_info.c:230:47: note: remove constant to silence this warning 230 | } else if (features_size == mapping_max_size && (mapping_max & 63)) { | ~^~~~~~~~~~~~~~~~~~~~~ 1 warning generated. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202311241752.3iLyyFcA-lkp@intel.com/ Fixes: f99f5f3ea7ef ("drm/imagination: Add GPU ID parsing and firmware loading") Signed-off-by: Donald Robson Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20231130160017.259902-1-donald.robson@imgtec.com --- drivers/gpu/drm/imagination/pvr_device_info.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/imagination/pvr_device_info.c b/drivers/gpu/drm/imagination/pvr_device_info.c index 11e6bef52ecd..d3301cde7d11 100644 --- a/drivers/gpu/drm/imagination/pvr_device_info.c +++ b/drivers/gpu/drm/imagination/pvr_device_info.c @@ -227,7 +227,8 @@ int pvr_device_info_set_features(struct pvr_device *pvr_dev, const u64 *features /* Verify no unsupported values in the bitmask. */ if (features_size > mapping_max_size) { drm_warn(from_pvr_device(pvr_dev), "Unsupported features in firmware image"); - } else if (features_size == mapping_max_size && (mapping_max & 63)) { + } else if (features_size == mapping_max_size && + ((mapping_max & 63) != 0)) { u64 invalid_mask = ~0ull << (mapping_max & 63); if (features[features_size - 1] & invalid_mask) -- cgit v1.2.3 From 0ffe9eb826f1391d52089ba8056a3778688da57d Mon Sep 17 00:00:00 2001 From: Donald Robson Date: Thu, 30 Nov 2023 16:00:14 +0000 Subject: drm/imagination: Fixed missing header in pvr_fw_meta A missing header causes the compiler to warn that the function below is not forward declared. >> drivers/gpu/drm/imagination/pvr_fw_meta.c:33:1: warning: no previous prototype for function 'pvr_meta_cr_read32' [-Wmissing-prototypes] 33 | pvr_meta_cr_read32(struct pvr_device *pvr_dev, u32 reg_addr, u32 *reg_value_out) | ^ drivers/gpu/drm/imagination/pvr_fw_meta.c:32:1: note: declare 'static' if the function is not intended to be used outside of this translation unit 32 | int | ^ | static 1 warning generated. Include the correct header. Reported-by: Arnd Bergmann Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202311250226.Da2yiSKp-lkp@intel.com/ Fixes: cc1aeedb98ad ("drm/imagination: Implement firmware infrastructure and META FW support") Signed-off-by: Donald Robson Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20231130160017.259902-2-donald.robson@imgtec.com --- drivers/gpu/drm/imagination/pvr_fw_meta.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c index 119934c36184..c39beb70c317 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_meta.c +++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c @@ -4,6 +4,7 @@ #include "pvr_device.h" #include "pvr_fw.h" #include "pvr_fw_info.h" +#include "pvr_fw_meta.h" #include "pvr_gem.h" #include "pvr_rogue_cr_defs.h" #include "pvr_rogue_meta.h" -- cgit v1.2.3 From 7620c6bd76b1076b104926b78da8d6ff17cfef5d Mon Sep 17 00:00:00 2001 From: Donald Robson Date: Thu, 30 Nov 2023 16:00:15 +0000 Subject: drm/imagination: pvr_device_process_active_queues now static The function below is used only within this source file, but is not static. >> drivers/gpu/drm/imagination/pvr_device.c:129:6: warning: no previous prototype for function 'pvr_device_process_active_queues' [-Wmissing-prototypes] 129 | void pvr_device_process_active_queues(struct pvr_device *pvr_dev) | ^ drivers/gpu/drm/imagination/pvr_device.c:129:1: note: declare 'static' if the function is not intended to be used outside of this translation unit 129 | void pvr_device_process_active_queues(struct pvr_device *pvr_dev) | ^ | static 1 warning generated. Make it static. Reported-by: Arnd Bergmann Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202311250632.giVEx7MU-lkp@intel.com/ Fixes: eaf01ee5ba28 ("drm/imagination: Implement job submission and scheduling") Signed-off-by: Donald Robson Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20231130160017.259902-3-donald.robson@imgtec.com --- drivers/gpu/drm/imagination/pvr_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c index 5389aea7ff21..1704c0268589 100644 --- a/drivers/gpu/drm/imagination/pvr_device.c +++ b/drivers/gpu/drm/imagination/pvr_device.c @@ -127,7 +127,7 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev) * This is called any time we receive a FW event. It iterates over all * active queues and calls pvr_queue_process() on them. */ -void pvr_device_process_active_queues(struct pvr_device *pvr_dev) +static void pvr_device_process_active_queues(struct pvr_device *pvr_dev) { struct pvr_queue *queue, *tmp_queue; LIST_HEAD(active_queues); -- cgit v1.2.3 From e8878b8043a25a19d0b405a29652a0cb94f56cdb Mon Sep 17 00:00:00 2001 From: Donald Robson Date: Thu, 30 Nov 2023 16:00:16 +0000 Subject: drm/imagination: pvr_gpuvm_free() now static The function below is used only within this source file, but is not static. drivers/gpu/drm/imagination/pvr_vm.c:542:6: error: no previous prototype for 'pvr_gpuvm_free' [-Werror=missing-prototypes] 542 | void pvr_gpuvm_free(struct drm_gpuvm *gpuvm) Make it static. Reported-by: Arnd Bergmann Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202311242159.hh8MWiAm-lkp@intel.com/ Fixes: ff5f643de0bf ("drm/imagination: Add GEM and VM related code") Signed-off-by: Donald Robson Link: https://lore.kernel.org/r/20231130160017.259902-4-donald.robson@imgtec.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/imagination/pvr_vm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 2aab53594a77..30ecd7d7052e 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -539,7 +539,7 @@ pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx, (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE); } -void pvr_gpuvm_free(struct drm_gpuvm *gpuvm) +static void pvr_gpuvm_free(struct drm_gpuvm *gpuvm) { kfree(to_pvr_vm_context(gpuvm)); } -- cgit v1.2.3 From 72ef65ab246e55847097d68e0964fbcdfff4366c Mon Sep 17 00:00:00 2001 From: Donald Robson Date: Thu, 30 Nov 2023 16:00:17 +0000 Subject: drm/imagination: Removed unused function to_pvr_vm_gpuva() This function is now unused, hence it causes a compiler warning. drivers/gpu/drm/imagination/pvr_vm.c:112:22: warning: unused function 'to_pvr_vm_gpuva' [-Wunused-function] 112 | struct pvr_vm_gpuva *to_pvr_vm_gpuva(struct drm_gpuva *gpuva) | ^ Remove the function for now. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202311242159.hh8MWiAm-lkp@intel.com/ Fixes: ff5f643de0bf ("drm/imagination: Add GEM and VM related code") Signed-off-by: Donald Robson Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20231130160017.259902-5-donald.robson@imgtec.com --- drivers/gpu/drm/imagination/pvr_vm.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 30ecd7d7052e..6f4e07effad2 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -114,12 +114,6 @@ struct pvr_vm_gpuva { struct drm_gpuva base; }; -static __always_inline -struct pvr_vm_gpuva *to_pvr_vm_gpuva(struct drm_gpuva *gpuva) -{ - return container_of(gpuva, struct pvr_vm_gpuva, base); -} - enum pvr_vm_bind_type { PVR_VM_BIND_TYPE_MAP, PVR_VM_BIND_TYPE_UNMAP, -- cgit v1.2.3 From 5f8dec200923a76dc57187965fd59c1136f5d085 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Sun, 3 Dec 2023 01:55:52 +0300 Subject: drm/drv: propagate errors from drm_modeset_register_all() In case the drm_modeset_register_all() function fails, its error code will be ignored. Instead make the drm_dev_register() bail out in case of such an error. Fixes: 79190ea2658a ("drm: Add callbacks for late registering") Reviewed-by: Neil Armstrong Signed-off-by: Dmitry Baryshkov Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20231202225552.1283638-1-dmitry.baryshkov@linaro.org --- drivers/gpu/drm/drm_drv.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 535f16e7882e..3c835c99daad 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -949,8 +949,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) goto err_minors; } - if (drm_core_check_feature(dev, DRIVER_MODESET)) - drm_modeset_register_all(dev); + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = drm_modeset_register_all(dev); + if (ret) + goto err_unload; + } DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", driver->name, driver->major, driver->minor, @@ -960,6 +963,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) goto out_unlock; +err_unload: + if (dev->driver->unload) + dev->driver->unload(dev); err_minors: remove_compat_control_link(dev); drm_minor_unregister(dev, DRM_MINOR_ACCEL); -- cgit v1.2.3 From b1dba0b13c0aa93d22f8ef8cb082a4f32e5ab1f6 Mon Sep 17 00:00:00 2001 From: heminhong Date: Fri, 10 Nov 2023 13:50:31 +0800 Subject: drm/qxl: remove unused declaration Some functions are never used by the driver, removing the functions declaration, it can be reducing program size, and improving code readability and maintainability. Signed-off-by: heminhong Link: https://lore.kernel.org/r/20231110055031.57360-1-heminhong@kylinos.cn Signed-off-by: Maxime Ripard --- drivers/gpu/drm/qxl/qxl_drv.h | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 307a890fde13..32069acd93f8 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -119,7 +119,6 @@ struct qxl_output { #define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base) #define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base) -#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc) struct qxl_mman { struct ttm_device bdev; @@ -256,8 +255,6 @@ struct qxl_device { #define to_qxl(dev) container_of(dev, struct qxl_device, ddev) -int qxl_debugfs_fence_init(struct qxl_device *rdev); - int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev); void qxl_device_fini(struct qxl_device *qdev); @@ -344,8 +341,6 @@ qxl_image_alloc_objects(struct qxl_device *qdev, int height, int stride); void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage); -void qxl_update_screen(struct qxl_device *qxl); - /* qxl io operations (qxl_cmd.c) */ void qxl_io_create_primary(struct qxl_device *qdev, @@ -445,8 +440,6 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev, int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo); -struct qxl_drv_surface * -qxl_surface_lookup(struct drm_device *dev, int surface_id); void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing); /* qxl_ioctl.c */ -- cgit v1.2.3 From e4256751df4a0a3860f181588ee730dd19cb0c30 Mon Sep 17 00:00:00 2001 From: Khaled Almahallawy Date: Thu, 30 Nov 2023 15:15:10 -0800 Subject: drm/display/dp: Add the remaining Square PHY patterns DPCD register definitions DP2.1 Specs added new DPCDs definitions for square pattern configs[1] These new definitions are used for UHBR Source Transmitter Equalizations tests[2]. Add the 3 new values for square pattern. v2: rebase [1]: DP2.1 Specs - 2.12.3.6.5 Square Pattern [2]: DP2.1 PHY CTS specs - 4.3 UHBR Source Transmitter Equalization Cc: Jani Nikula Cc: Imre Deak Cc: Lee Shawn C Signed-off-by: Khaled Almahallawy Reviewed-by: Jani Nikula Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20231130231510.221143-1-khaled.almahallawy@intel.com --- include/drm/display/drm_dp.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h index 83d2039c018b..3731828825bd 100644 --- a/include/drm/display/drm_dp.h +++ b/include/drm/display/drm_dp.h @@ -651,6 +651,9 @@ # define DP_LINK_QUAL_PATTERN_PRSBS31 0x38 # define DP_LINK_QUAL_PATTERN_CUSTOM 0x40 # define DP_LINK_QUAL_PATTERN_SQUARE 0x48 +# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DISABLED 0x49 +# define DP_LINK_QUAL_PATTERN_SQUARE_DEEMPHASIS_DISABLED 0x4a +# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED 0x4b #define DP_TRAINING_LANE0_1_SET2 0x10f #define DP_TRAINING_LANE2_3_SET2 0x110 -- cgit v1.2.3 From 0b82a2b70f890e8dd7a46dfbfcce00bd7e434762 Mon Sep 17 00:00:00 2001 From: Stefan Eichenberger Date: Wed, 15 Nov 2023 13:13:36 +0100 Subject: drm/bridge: lt8912b: Add suspend/resume support Add support for suspend and resume. The lt8912b will power off when going into suspend and power on when resuming. Signed-off-by: Stefan Eichenberger Signed-off-by: Francesco Dolcini Reviewed-by: Robert Foss Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20231115121338.22959-2-francesco@dolcini.it --- drivers/gpu/drm/bridge/lontium-lt8912b.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index 03532efb893b..097ab04234b7 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -634,6 +634,33 @@ static const struct drm_bridge_funcs lt8912_bridge_funcs = { .get_edid = lt8912_bridge_get_edid, }; +static int lt8912_bridge_resume(struct device *dev) +{ + struct lt8912 *lt = dev_get_drvdata(dev); + int ret; + + ret = lt8912_hard_power_on(lt); + if (ret) + return ret; + + ret = lt8912_soft_power_on(lt); + if (ret) + return ret; + + return lt8912_video_on(lt); +} + +static int lt8912_bridge_suspend(struct device *dev) +{ + struct lt8912 *lt = dev_get_drvdata(dev); + + lt8912_hard_power_off(lt); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(lt8912_bridge_pm_ops, lt8912_bridge_suspend, lt8912_bridge_resume); + static int lt8912_parse_dt(struct lt8912 *lt) { struct gpio_desc *gp_reset; @@ -770,6 +797,7 @@ static struct i2c_driver lt8912_i2c_driver = { .driver = { .name = "lt8912", .of_match_table = lt8912_dt_match, + .pm = pm_sleep_ptr(<8912_bridge_pm_ops), }, .probe = lt8912_probe, .remove = lt8912_remove, -- cgit v1.2.3 From f168c7f7d1a0cb12a4888af9f3f907139372f137 Mon Sep 17 00:00:00 2001 From: Stefan Eichenberger Date: Wed, 15 Nov 2023 13:13:37 +0100 Subject: dt-bindings: display: bridge: lt8912b: Add power supplies Add Lontium lt8912b power supplies. Signed-off-by: Stefan Eichenberger Signed-off-by: Francesco Dolcini Acked-by: Conor Dooley Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20231115121338.22959-3-francesco@dolcini.it --- .../bindings/display/bridge/lontium,lt8912b.yaml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml b/Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml index f201ae4af4fb..2cef25215798 100644 --- a/Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml +++ b/Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml @@ -55,6 +55,27 @@ properties: - port@0 - port@1 + vcchdmipll-supply: + description: A 1.8V supply that powers the HDMI PLL. + + vcchdmitx-supply: + description: A 1.8V supply that powers the HDMI TX part. + + vcclvdspll-supply: + description: A 1.8V supply that powers the LVDS PLL. + + vcclvdstx-supply: + description: A 1.8V supply that powers the LVDS TX part. + + vccmipirx-supply: + description: A 1.8V supply that powers the MIPI RX part. + + vccsysclk-supply: + description: A 1.8V supply that powers the SYSCLK. + + vdd-supply: + description: A 1.8V supply that powers the digital part. + required: - compatible - reg -- cgit v1.2.3 From f6d8a80f1d10ff01cff3ac26e242165a270bbbad Mon Sep 17 00:00:00 2001 From: Stefan Eichenberger Date: Wed, 15 Nov 2023 13:13:38 +0100 Subject: drm/bridge: lt8912b: Add power supplies Add supplies to the driver that can be used to turn the Lontium lt8912b on and off. It can have up to 7 independent supplies, we add them all and enable/disable them with bulk_enable/disable. Signed-off-by: Stefan Eichenberger Signed-off-by: Francesco Dolcini Reviewed-by: Robert Foss Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20231115121338.22959-4-francesco@dolcini.it --- drivers/gpu/drm/bridge/lontium-lt8912b.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index 097ab04234b7..273157428c82 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -43,6 +43,8 @@ struct lt8912 { struct videomode mode; + struct regulator_bulk_data supplies[7]; + u8 data_lanes; bool is_power_on; }; @@ -257,6 +259,12 @@ static int lt8912_free_i2c(struct lt8912 *lt) static int lt8912_hard_power_on(struct lt8912 *lt) { + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(lt->supplies), lt->supplies); + if (ret) + return ret; + gpiod_set_value_cansleep(lt->gp_reset, 0); msleep(20); @@ -267,6 +275,9 @@ static void lt8912_hard_power_off(struct lt8912 *lt) { gpiod_set_value_cansleep(lt->gp_reset, 1); msleep(20); + + regulator_bulk_disable(ARRAY_SIZE(lt->supplies), lt->supplies); + lt->is_power_on = false; } @@ -661,6 +672,21 @@ static int lt8912_bridge_suspend(struct device *dev) static DEFINE_SIMPLE_DEV_PM_OPS(lt8912_bridge_pm_ops, lt8912_bridge_suspend, lt8912_bridge_resume); +static int lt8912_get_regulators(struct lt8912 *lt) +{ + unsigned int i; + const char * const supply_names[] = { + "vdd", "vccmipirx", "vccsysclk", "vcclvdstx", + "vcchdmitx", "vcclvdspll", "vcchdmipll" + }; + + for (i = 0; i < ARRAY_SIZE(lt->supplies); i++) + lt->supplies[i].supply = supply_names[i]; + + return devm_regulator_bulk_get(lt->dev, ARRAY_SIZE(lt->supplies), + lt->supplies); +} + static int lt8912_parse_dt(struct lt8912 *lt) { struct gpio_desc *gp_reset; @@ -712,6 +738,10 @@ static int lt8912_parse_dt(struct lt8912 *lt) goto err_free_host_node; } + ret = lt8912_get_regulators(lt); + if (ret) + goto err_free_host_node; + of_node_put(port_node); return 0; -- cgit v1.2.3 From 914437992876838662c968cb416f832110fb1093 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 4 Dec 2023 15:29:00 +0300 Subject: drm/bridge: nxp-ptn3460: fix i2c_master_send() error checking The i2c_master_send/recv() functions return negative error codes or the number of bytes that were able to be sent/received. This code has two problems. 1) Instead of checking if all the bytes were sent or received, it checks that at least one byte was sent or received. 2) If there was a partial send/receive then we should return a negative error code but this code returns success. Fixes: a9fe713d7d45 ("drm/bridge: Add PTN3460 bridge driver") Cc: stable@vger.kernel.org Signed-off-by: Dan Carpenter Reviewed-by: Robert Foss Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/0cdc2dce-ca89-451a-9774-1482ab2f4762@moroto.mountain --- drivers/gpu/drm/bridge/nxp-ptn3460.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c index d81920227a8a..9b7eb8c669c1 100644 --- a/drivers/gpu/drm/bridge/nxp-ptn3460.c +++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c @@ -56,13 +56,13 @@ static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr, ret = i2c_master_send(ptn_bridge->client, &addr, 1); if (ret <= 0) { DRM_ERROR("Failed to send i2c command, ret=%d\n", ret); - return ret; + return ret ?: -EIO; } ret = i2c_master_recv(ptn_bridge->client, buf, len); - if (ret <= 0) { + if (ret != len) { DRM_ERROR("Failed to recv i2c data, ret=%d\n", ret); - return ret; + return ret < 0 ? ret : -EIO; } return 0; @@ -78,9 +78,9 @@ static int ptn3460_write_byte(struct ptn3460_bridge *ptn_bridge, char addr, buf[1] = val; ret = i2c_master_send(ptn_bridge->client, buf, ARRAY_SIZE(buf)); - if (ret <= 0) { + if (ret != ARRAY_SIZE(buf)) { DRM_ERROR("Failed to send i2c command, ret=%d\n", ret); - return ret; + return ret < 0 ? ret : -EIO; } return 0; -- cgit v1.2.3 From aa041111311d35cb311543f43bc60b825b8cd109 Mon Sep 17 00:00:00 2001 From: Frank Binns Date: Mon, 4 Dec 2023 13:28:47 +0000 Subject: MAINTAINERS: Document Imagination PowerVR driver patches go via drm-misc This is the tree used by nearly all other DRM drivers, so use it for the PowerVR driver as well. Signed-off-by: Frank Binns Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20231204132847.1307340-1-frank.binns@imgtec.com --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 0ba904b46efe..d4b46b3db022 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10397,6 +10397,7 @@ M: Frank Binns M: Donald Robson M: Matt Coster S: Supported +T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/devicetree/bindings/gpu/img,powervr.yaml F: Documentation/gpu/imagination/ F: drivers/gpu/drm/imagination/ -- cgit v1.2.3 From 2a04739139b2b2761571e18937e2400e71eff664 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Sun, 3 Dec 2023 14:43:28 +0300 Subject: drm/bridge: add transparent bridge helper Define a helper for creating simple transparent bridges which serve the only purpose of linking devices into the bridge chain up to the last bridge representing the connector. This is especially useful for DP/USB-C bridge chains, which can span across several devices, but do not require any additional functionality from the intermediate bridges. Reviewed-by: Neil Armstrong Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231203114333.1305826-2-dmitry.baryshkov@linaro.org --- drivers/gpu/drm/bridge/Kconfig | 9 +++ drivers/gpu/drm/bridge/Makefile | 1 + drivers/gpu/drm/bridge/aux-bridge.c | 140 ++++++++++++++++++++++++++++++++++++ include/drm/bridge/aux-bridge.h | 19 +++++ 4 files changed, 169 insertions(+) create mode 100644 drivers/gpu/drm/bridge/aux-bridge.c create mode 100644 include/drm/bridge/aux-bridge.h diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index ba82a1142adf..f12eab62799f 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -12,6 +12,15 @@ config DRM_PANEL_BRIDGE help DRM bridge wrapper of DRM panels +config DRM_AUX_BRIDGE + tristate + depends on DRM_BRIDGE && OF + select AUXILIARY_BUS + select DRM_PANEL_BRIDGE + help + Simple transparent bridge that is used by several non-DRM drivers to + build bridges chain. + menu "Display Interface Bridges" depends on DRM && DRM_BRIDGE diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 2b892b7ed59e..918e3bfff079 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_DRM_AUX_BRIDGE) += aux-bridge.o obj-$(CONFIG_DRM_CHIPONE_ICN6211) += chipone-icn6211.o obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c new file mode 100644 index 000000000000..49d7c2ab1ecc --- /dev/null +++ b/drivers/gpu/drm/bridge/aux-bridge.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2023 Linaro Ltd. + * + * Author: Dmitry Baryshkov + */ +#include +#include + +#include +#include + +static DEFINE_IDA(drm_aux_bridge_ida); + +static void drm_aux_bridge_release(struct device *dev) +{ + struct auxiliary_device *adev = to_auxiliary_dev(dev); + + ida_free(&drm_aux_bridge_ida, adev->id); + + kfree(adev); +} + +static void drm_aux_bridge_unregister_adev(void *_adev) +{ + struct auxiliary_device *adev = _adev; + + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); +} + +/** + * drm_aux_bridge_register - Create a simple bridge device to link the chain + * @parent: device instance providing this bridge + * + * Creates a simple DRM bridge that doesn't implement any drm_bridge + * operations. Such bridges merely fill a place in the bridge chain linking + * surrounding DRM bridges. + * + * Return: zero on success, negative error code on failure + */ +int drm_aux_bridge_register(struct device *parent) +{ + struct auxiliary_device *adev; + int ret; + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + return -ENOMEM; + + ret = ida_alloc(&drm_aux_bridge_ida, GFP_KERNEL); + if (ret < 0) { + kfree(adev); + return ret; + } + + adev->id = ret; + adev->name = "aux_bridge"; + adev->dev.parent = parent; + adev->dev.of_node = parent->of_node; + adev->dev.release = drm_aux_bridge_release; + + ret = auxiliary_device_init(adev); + if (ret) { + ida_free(&drm_aux_bridge_ida, adev->id); + kfree(adev); + return ret; + } + + ret = auxiliary_device_add(adev); + if (ret) { + auxiliary_device_uninit(adev); + return ret; + } + + return devm_add_action_or_reset(parent, drm_aux_bridge_unregister_adev, adev); +} +EXPORT_SYMBOL_GPL(drm_aux_bridge_register); + +struct drm_aux_bridge_data { + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct device *dev; +}; + +static int drm_aux_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct drm_aux_bridge_data *data; + + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) + return -EINVAL; + + data = container_of(bridge, struct drm_aux_bridge_data, bridge); + + return drm_bridge_attach(bridge->encoder, data->next_bridge, bridge, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); +} + +static const struct drm_bridge_funcs drm_aux_bridge_funcs = { + .attach = drm_aux_bridge_attach, +}; + +static int drm_aux_bridge_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *id) +{ + struct drm_aux_bridge_data *data; + + data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dev = &auxdev->dev; + data->next_bridge = devm_drm_of_get_bridge(&auxdev->dev, auxdev->dev.of_node, 0, 0); + if (IS_ERR(data->next_bridge)) + return dev_err_probe(&auxdev->dev, PTR_ERR(data->next_bridge), + "failed to acquire drm_bridge\n"); + + data->bridge.funcs = &drm_aux_bridge_funcs; + data->bridge.of_node = data->dev->of_node; + + return devm_drm_bridge_add(data->dev, &data->bridge); +} + +static const struct auxiliary_device_id drm_aux_bridge_table[] = { + { .name = KBUILD_MODNAME ".aux_bridge" }, + {}, +}; +MODULE_DEVICE_TABLE(auxiliary, drm_aux_bridge_table); + +static struct auxiliary_driver drm_aux_bridge_drv = { + .name = "aux_bridge", + .id_table = drm_aux_bridge_table, + .probe = drm_aux_bridge_probe, +}; +module_auxiliary_driver(drm_aux_bridge_drv); + +MODULE_AUTHOR("Dmitry Baryshkov "); +MODULE_DESCRIPTION("DRM transparent bridge"); +MODULE_LICENSE("GPL"); diff --git a/include/drm/bridge/aux-bridge.h b/include/drm/bridge/aux-bridge.h new file mode 100644 index 000000000000..b3a9cc9c862f --- /dev/null +++ b/include/drm/bridge/aux-bridge.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2023 Linaro Ltd. + * + * Author: Dmitry Baryshkov + */ +#ifndef DRM_AUX_BRIDGE_H +#define DRM_AUX_BRIDGE_H + +#if IS_ENABLED(CONFIG_DRM_AUX_BRIDGE) +int drm_aux_bridge_register(struct device *parent); +#else +static inline int drm_aux_bridge_register(struct device *parent) +{ + return 0; +} +#endif + +#endif -- cgit v1.2.3 From 35921910bbd0b6ab595cead16d0c8faadbf2fd94 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Sun, 3 Dec 2023 14:43:29 +0300 Subject: phy: qcom: qmp-combo: switch to DRM_AUX_BRIDGE Switch to using the new DRM_AUX_BRIDGE helper to create the transparent DRM bridge device instead of handcoding corresponding functionality. Acked-by: Vinod Koul Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231203114333.1305826-3-dmitry.baryshkov@linaro.org --- drivers/phy/qualcomm/Kconfig | 2 +- drivers/phy/qualcomm/phy-qcom-qmp-combo.c | 44 ++----------------------------- 2 files changed, 3 insertions(+), 43 deletions(-) diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig index d891058b7c39..846f8c99547f 100644 --- a/drivers/phy/qualcomm/Kconfig +++ b/drivers/phy/qualcomm/Kconfig @@ -63,7 +63,7 @@ config PHY_QCOM_QMP_COMBO depends on DRM || DRM=n select GENERIC_PHY select MFD_SYSCON - select DRM_PANEL_BRIDGE if DRM + select DRM_AUX_BRIDGE if DRM_BRIDGE help Enable this to support the QMP Combo PHY transceiver that is used with USB3 and DisplayPort controllers on Qualcomm chips. diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c index 9c87845c78ec..f6c727249104 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c @@ -21,7 +21,7 @@ #include #include -#include +#include #include @@ -1419,8 +1419,6 @@ struct qmp_combo { struct clk_hw dp_link_hw; struct clk_hw dp_pixel_hw; - struct drm_bridge bridge; - struct typec_switch_dev *sw; enum typec_orientation orientation; }; @@ -3191,44 +3189,6 @@ static int qmp_combo_typec_switch_register(struct qmp_combo *qmp) } #endif -#if IS_ENABLED(CONFIG_DRM) -static int qmp_combo_bridge_attach(struct drm_bridge *bridge, - enum drm_bridge_attach_flags flags) -{ - struct qmp_combo *qmp = container_of(bridge, struct qmp_combo, bridge); - struct drm_bridge *next_bridge; - - if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) - return -EINVAL; - - next_bridge = devm_drm_of_get_bridge(qmp->dev, qmp->dev->of_node, 0, 0); - if (IS_ERR(next_bridge)) { - dev_err(qmp->dev, "failed to acquire drm_bridge: %pe\n", next_bridge); - return PTR_ERR(next_bridge); - } - - return drm_bridge_attach(bridge->encoder, next_bridge, bridge, - DRM_BRIDGE_ATTACH_NO_CONNECTOR); -} - -static const struct drm_bridge_funcs qmp_combo_bridge_funcs = { - .attach = qmp_combo_bridge_attach, -}; - -static int qmp_combo_dp_register_bridge(struct qmp_combo *qmp) -{ - qmp->bridge.funcs = &qmp_combo_bridge_funcs; - qmp->bridge.of_node = qmp->dev->of_node; - - return devm_drm_bridge_add(qmp->dev, &qmp->bridge); -} -#else -static int qmp_combo_dp_register_bridge(struct qmp_combo *qmp) -{ - return 0; -} -#endif - static int qmp_combo_parse_dt_lecacy_dp(struct qmp_combo *qmp, struct device_node *np) { struct device *dev = qmp->dev; @@ -3440,7 +3400,7 @@ static int qmp_combo_probe(struct platform_device *pdev) if (ret) return ret; - ret = qmp_combo_dp_register_bridge(qmp); + ret = drm_aux_bridge_register(dev); if (ret) return ret; -- cgit v1.2.3 From c5d296bad640b190c52ef7508114d70e971a4bba Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Sun, 3 Dec 2023 14:43:30 +0300 Subject: usb: typec: nb7vpq904m: switch to DRM_AUX_BRIDGE Switch to using the new DRM_AUX_BRIDGE helper to create the transparent DRM bridge device instead of handcoding corresponding functionality. Reviewed-by: Heikki Krogerus Acked-by: Greg Kroah-Hartman Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231203114333.1305826-4-dmitry.baryshkov@linaro.org --- drivers/usb/typec/mux/Kconfig | 2 +- drivers/usb/typec/mux/nb7vpq904m.c | 44 ++------------------------------------ 2 files changed, 3 insertions(+), 43 deletions(-) diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig index 816b9bd08355..5120942f309d 100644 --- a/drivers/usb/typec/mux/Kconfig +++ b/drivers/usb/typec/mux/Kconfig @@ -40,7 +40,7 @@ config TYPEC_MUX_NB7VPQ904M tristate "On Semiconductor NB7VPQ904M Type-C redriver driver" depends on I2C depends on DRM || DRM=n - select DRM_PANEL_BRIDGE if DRM + select DRM_AUX_BRIDGE if DRM_BRIDGE select REGMAP_I2C help Say Y or M if your system has a On Semiconductor NB7VPQ904M Type-C diff --git a/drivers/usb/typec/mux/nb7vpq904m.c b/drivers/usb/typec/mux/nb7vpq904m.c index cda206cf0c38..b17826713753 100644 --- a/drivers/usb/typec/mux/nb7vpq904m.c +++ b/drivers/usb/typec/mux/nb7vpq904m.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include @@ -70,8 +70,6 @@ struct nb7vpq904m { bool swap_data_lanes; struct typec_switch *typec_switch; - struct drm_bridge bridge; - struct mutex lock; /* protect non-concurrent retimer & switch */ enum typec_orientation orientation; @@ -297,44 +295,6 @@ static int nb7vpq904m_retimer_set(struct typec_retimer *retimer, struct typec_re return ret; } -#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE) -static int nb7vpq904m_bridge_attach(struct drm_bridge *bridge, - enum drm_bridge_attach_flags flags) -{ - struct nb7vpq904m *nb7 = container_of(bridge, struct nb7vpq904m, bridge); - struct drm_bridge *next_bridge; - - if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) - return -EINVAL; - - next_bridge = devm_drm_of_get_bridge(&nb7->client->dev, nb7->client->dev.of_node, 0, 0); - if (IS_ERR(next_bridge)) { - dev_err(&nb7->client->dev, "failed to acquire drm_bridge: %pe\n", next_bridge); - return PTR_ERR(next_bridge); - } - - return drm_bridge_attach(bridge->encoder, next_bridge, bridge, - DRM_BRIDGE_ATTACH_NO_CONNECTOR); -} - -static const struct drm_bridge_funcs nb7vpq904m_bridge_funcs = { - .attach = nb7vpq904m_bridge_attach, -}; - -static int nb7vpq904m_register_bridge(struct nb7vpq904m *nb7) -{ - nb7->bridge.funcs = &nb7vpq904m_bridge_funcs; - nb7->bridge.of_node = nb7->client->dev.of_node; - - return devm_drm_bridge_add(&nb7->client->dev, &nb7->bridge); -} -#else -static int nb7vpq904m_register_bridge(struct nb7vpq904m *nb7) -{ - return 0; -} -#endif - static const struct regmap_config nb7_regmap = { .max_register = 0x1f, .reg_bits = 8, @@ -461,7 +421,7 @@ static int nb7vpq904m_probe(struct i2c_client *client) gpiod_set_value(nb7->enable_gpio, 1); - ret = nb7vpq904m_register_bridge(nb7); + ret = drm_aux_bridge_register(dev); if (ret) goto err_disable_gpio; -- cgit v1.2.3 From e560518a6c2e60f1566473c146fddcff3281f617 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Sun, 3 Dec 2023 14:43:31 +0300 Subject: drm/bridge: implement generic DP HPD bridge Several USB-C controllers implement a pretty simple DRM bridge which implements just the HPD notification operations. Add special helper for creating such simple bridges. Acked-by: Neil Armstrong Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231203114333.1305826-5-dmitry.baryshkov@linaro.org --- drivers/gpu/drm/bridge/Kconfig | 8 ++ drivers/gpu/drm/bridge/Makefile | 1 + drivers/gpu/drm/bridge/aux-hpd-bridge.c | 163 ++++++++++++++++++++++++++++++++ include/drm/bridge/aux-bridge.h | 18 ++++ 4 files changed, 190 insertions(+) create mode 100644 drivers/gpu/drm/bridge/aux-hpd-bridge.c diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index f12eab62799f..19d2dc05c397 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -21,6 +21,14 @@ config DRM_AUX_BRIDGE Simple transparent bridge that is used by several non-DRM drivers to build bridges chain. +config DRM_AUX_HPD_BRIDGE + tristate + depends on DRM_BRIDGE && OF + select AUXILIARY_BUS + help + Simple bridge that terminates the bridge chain and provides HPD + support. + menu "Display Interface Bridges" depends on DRM && DRM_BRIDGE diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 918e3bfff079..017b5832733b 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_DRM_AUX_BRIDGE) += aux-bridge.o +obj-$(CONFIG_DRM_AUX_HPD_BRIDGE) += aux-hpd-bridge.o obj-$(CONFIG_DRM_CHIPONE_ICN6211) += chipone-icn6211.o obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c new file mode 100644 index 000000000000..5d2ab3a715f9 --- /dev/null +++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2023 Linaro Ltd. + * + * Author: Dmitry Baryshkov + */ +#include +#include +#include + +#include +#include + +static DEFINE_IDA(drm_aux_hpd_bridge_ida); + +struct drm_aux_hpd_bridge_data { + struct drm_bridge bridge; + struct device *dev; +}; + +static void drm_aux_hpd_bridge_release(struct device *dev) +{ + struct auxiliary_device *adev = to_auxiliary_dev(dev); + + ida_free(&drm_aux_hpd_bridge_ida, adev->id); + + of_node_put(adev->dev.platform_data); + + kfree(adev); +} + +static void drm_aux_hpd_bridge_unregister_adev(void *_adev) +{ + struct auxiliary_device *adev = _adev; + + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); +} + +/** + * drm_dp_hpd_bridge_register - Create a simple HPD DisplayPort bridge + * @parent: device instance providing this bridge + * @np: device node pointer corresponding to this bridge instance + * + * Creates a simple DRM bridge with the type set to + * DRM_MODE_CONNECTOR_DisplayPort, which terminates the bridge chain and is + * able to send the HPD events. + * + * Return: device instance that will handle created bridge or an error code + * encoded into the pointer. + */ +struct device *drm_dp_hpd_bridge_register(struct device *parent, + struct device_node *np) +{ + struct auxiliary_device *adev; + int ret; + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + return ERR_PTR(-ENOMEM); + + ret = ida_alloc(&drm_aux_hpd_bridge_ida, GFP_KERNEL); + if (ret < 0) { + kfree(adev); + return ERR_PTR(ret); + } + + adev->id = ret; + adev->name = "dp_hpd_bridge"; + adev->dev.parent = parent; + adev->dev.of_node = parent->of_node; + adev->dev.release = drm_aux_hpd_bridge_release; + adev->dev.platform_data = np; + + ret = auxiliary_device_init(adev); + if (ret) { + ida_free(&drm_aux_hpd_bridge_ida, adev->id); + kfree(adev); + return ERR_PTR(ret); + } + + ret = auxiliary_device_add(adev); + if (ret) { + auxiliary_device_uninit(adev); + return ERR_PTR(ret); + } + + ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_unregister_adev, adev); + if (ret) + return ERR_PTR(ret); + + return &adev->dev; +} +EXPORT_SYMBOL_GPL(drm_dp_hpd_bridge_register); + +/** + * drm_aux_hpd_bridge_notify - notify hot plug detection events + * @dev: device created for the HPD bridge + * @status: output connection status + * + * A wrapper around drm_bridge_hpd_notify() that is used to report hot plug + * detection events for bridges created via drm_dp_hpd_bridge_register(). + * + * This function shall be called in a context that can sleep. + */ +void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status) +{ + struct auxiliary_device *adev = to_auxiliary_dev(dev); + struct drm_aux_hpd_bridge_data *data = auxiliary_get_drvdata(adev); + + if (!data) + return; + + drm_bridge_hpd_notify(&data->bridge, status); +} +EXPORT_SYMBOL_GPL(drm_aux_hpd_bridge_notify); + +static int drm_aux_hpd_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL; +} + +static const struct drm_bridge_funcs drm_aux_hpd_bridge_funcs = { + .attach = drm_aux_hpd_bridge_attach, +}; + +static int drm_aux_hpd_bridge_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *id) +{ + struct drm_aux_hpd_bridge_data *data; + + data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dev = &auxdev->dev; + data->bridge.funcs = &drm_aux_hpd_bridge_funcs; + data->bridge.of_node = dev_get_platdata(data->dev); + data->bridge.ops = DRM_BRIDGE_OP_HPD; + data->bridge.type = id->driver_data; + + auxiliary_set_drvdata(auxdev, data); + + return devm_drm_bridge_add(data->dev, &data->bridge); +} + +static const struct auxiliary_device_id drm_aux_hpd_bridge_table[] = { + { .name = KBUILD_MODNAME ".dp_hpd_bridge", .driver_data = DRM_MODE_CONNECTOR_DisplayPort, }, + {}, +}; +MODULE_DEVICE_TABLE(auxiliary, drm_aux_hpd_bridge_table); + +static struct auxiliary_driver drm_aux_hpd_bridge_drv = { + .name = "aux_hpd_bridge", + .id_table = drm_aux_hpd_bridge_table, + .probe = drm_aux_hpd_bridge_probe, +}; +module_auxiliary_driver(drm_aux_hpd_bridge_drv); + +MODULE_AUTHOR("Dmitry Baryshkov "); +MODULE_DESCRIPTION("DRM HPD bridge"); +MODULE_LICENSE("GPL"); diff --git a/include/drm/bridge/aux-bridge.h b/include/drm/bridge/aux-bridge.h index b3a9cc9c862f..66249ff0858e 100644 --- a/include/drm/bridge/aux-bridge.h +++ b/include/drm/bridge/aux-bridge.h @@ -7,6 +7,8 @@ #ifndef DRM_AUX_BRIDGE_H #define DRM_AUX_BRIDGE_H +#include + #if IS_ENABLED(CONFIG_DRM_AUX_BRIDGE) int drm_aux_bridge_register(struct device *parent); #else @@ -16,4 +18,20 @@ static inline int drm_aux_bridge_register(struct device *parent) } #endif +#if IS_ENABLED(CONFIG_DRM_AUX_HPD_BRIDGE) +struct device *drm_dp_hpd_bridge_register(struct device *parent, + struct device_node *np); +void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status); +#else +static inline struct device *drm_dp_hpd_bridge_register(struct device *parent, + struct device_node *np) +{ + return 0; +} + +static inline void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status) +{ +} +#endif + #endif -- cgit v1.2.3 From 2bcca96abfbf89d26fc10fc92e40532bb2ae8891 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Sun, 3 Dec 2023 14:43:32 +0300 Subject: soc: qcom: pmic-glink: switch to DRM_AUX_HPD_BRIDGE Use the freshly defined DRM_AUX_HPD_BRIDGE instead of open-coding the same functionality for the DRM bridge chain termination. Reviewed-by: Bjorn Andersson Acked-by: Bjorn Andersson Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231203114333.1305826-6-dmitry.baryshkov@linaro.org --- drivers/soc/qcom/Kconfig | 1 + drivers/soc/qcom/pmic_glink_altmode.c | 33 +++++++++------------------------ 2 files changed, 10 insertions(+), 24 deletions(-) diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index b3634e10f6f5..c954001ae79e 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -86,6 +86,7 @@ config QCOM_PMIC_GLINK depends on OF select AUXILIARY_BUS select QCOM_PDR_HELPERS + select DRM_AUX_HPD_BRIDGE help The Qualcomm PMIC GLINK driver provides access, over GLINK, to the USB and battery firmware running on one of the coprocessors in diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c index b78279e2f54c..053b7393e26a 100644 --- a/drivers/soc/qcom/pmic_glink_altmode.c +++ b/drivers/soc/qcom/pmic_glink_altmode.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include @@ -76,7 +76,7 @@ struct pmic_glink_altmode_port { struct work_struct work; - struct drm_bridge bridge; + struct device *bridge; enum typec_orientation orientation; u16 svid; @@ -230,10 +230,10 @@ static void pmic_glink_altmode_worker(struct work_struct *work) else pmic_glink_altmode_enable_usb(altmode, alt_port); - if (alt_port->hpd_state) - drm_bridge_hpd_notify(&alt_port->bridge, connector_status_connected); - else - drm_bridge_hpd_notify(&alt_port->bridge, connector_status_disconnected); + drm_aux_hpd_bridge_notify(alt_port->bridge, + alt_port->hpd_state ? + connector_status_connected : + connector_status_disconnected); pmic_glink_altmode_request(altmode, ALTMODE_PAN_ACK, alt_port->index); }; @@ -365,16 +365,6 @@ static void pmic_glink_altmode_callback(const void *data, size_t len, void *priv } } -static int pmic_glink_altmode_attach(struct drm_bridge *bridge, - enum drm_bridge_attach_flags flags) -{ - return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL; -} - -static const struct drm_bridge_funcs pmic_glink_altmode_bridge_funcs = { - .attach = pmic_glink_altmode_attach, -}; - static void pmic_glink_altmode_put_retimer(void *data) { typec_retimer_put(data); @@ -464,15 +454,10 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev, alt_port->index = port; INIT_WORK(&alt_port->work, pmic_glink_altmode_worker); - alt_port->bridge.funcs = &pmic_glink_altmode_bridge_funcs; - alt_port->bridge.of_node = to_of_node(fwnode); - alt_port->bridge.ops = DRM_BRIDGE_OP_HPD; - alt_port->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; - - ret = devm_drm_bridge_add(dev, &alt_port->bridge); - if (ret) { + alt_port->bridge = drm_dp_hpd_bridge_register(dev, to_of_node(fwnode)); + if (IS_ERR(alt_port->bridge)) { fwnode_handle_put(fwnode); - return ret; + return PTR_ERR(alt_port->bridge); } alt_port->dp_alt.svid = USB_TYPEC_DP_SID; -- cgit v1.2.3 From 7d9f1b72b29698e3030c2b163522cf4aa91b47e9 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Sun, 3 Dec 2023 14:43:33 +0300 Subject: usb: typec: qcom-pmic-typec: switch to DRM_AUX_HPD_BRIDGE Use the freshly defined DRM_AUX_HPD_BRIDGE instead of open-coding the same functionality for the DRM bridge chain termination. Acked-by: Bryan O'Donoghue Reviewed-by: Heikki Krogerus Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231203114333.1305826-7-dmitry.baryshkov@linaro.org --- drivers/usb/typec/tcpm/Kconfig | 1 + drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c | 41 ++++----------------------- 2 files changed, 7 insertions(+), 35 deletions(-) diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig index 0b2993fef564..64d5421c69e6 100644 --- a/drivers/usb/typec/tcpm/Kconfig +++ b/drivers/usb/typec/tcpm/Kconfig @@ -80,6 +80,7 @@ config TYPEC_QCOM_PMIC tristate "Qualcomm PMIC USB Type-C Port Controller Manager driver" depends on ARCH_QCOM || COMPILE_TEST depends on DRM || DRM=n + select DRM_AUX_HPD_BRIDGE if DRM_BRIDGE help A Type-C port and Power Delivery driver which aggregates two discrete pieces of silicon in the PM8150b PMIC block: the diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c index 581199d37b49..1a2b4bddaa97 100644 --- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c +++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c @@ -18,7 +18,7 @@ #include #include -#include +#include #include "qcom_pmic_typec_pdphy.h" #include "qcom_pmic_typec_port.h" @@ -36,7 +36,6 @@ struct pmic_typec { struct pmic_typec_port *pmic_typec_port; bool vbus_enabled; struct mutex lock; /* VBUS state serialization */ - struct drm_bridge bridge; }; #define tcpc_to_tcpm(_tcpc_) container_of(_tcpc_, struct pmic_typec, tcpc) @@ -150,35 +149,6 @@ static int qcom_pmic_typec_init(struct tcpc_dev *tcpc) return 0; } -#if IS_ENABLED(CONFIG_DRM) -static int qcom_pmic_typec_attach(struct drm_bridge *bridge, - enum drm_bridge_attach_flags flags) -{ - return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL; -} - -static const struct drm_bridge_funcs qcom_pmic_typec_bridge_funcs = { - .attach = qcom_pmic_typec_attach, -}; - -static int qcom_pmic_typec_init_drm(struct pmic_typec *tcpm) -{ - tcpm->bridge.funcs = &qcom_pmic_typec_bridge_funcs; -#ifdef CONFIG_OF - tcpm->bridge.of_node = of_get_child_by_name(tcpm->dev->of_node, "connector"); -#endif - tcpm->bridge.ops = DRM_BRIDGE_OP_HPD; - tcpm->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; - - return devm_drm_bridge_add(tcpm->dev, &tcpm->bridge); -} -#else -static int qcom_pmic_typec_init_drm(struct pmic_typec *tcpm) -{ - return 0; -} -#endif - static int qcom_pmic_typec_probe(struct platform_device *pdev) { struct pmic_typec *tcpm; @@ -186,6 +156,7 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; const struct pmic_typec_resources *res; struct regmap *regmap; + struct device *bridge_dev; u32 base[2]; int ret; @@ -241,14 +212,14 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev) mutex_init(&tcpm->lock); platform_set_drvdata(pdev, tcpm); - ret = qcom_pmic_typec_init_drm(tcpm); - if (ret) - return ret; - tcpm->tcpc.fwnode = device_get_named_child_node(tcpm->dev, "connector"); if (!tcpm->tcpc.fwnode) return -EINVAL; + bridge_dev = drm_dp_hpd_bridge_register(tcpm->dev, to_of_node(tcpm->tcpc.fwnode)); + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + tcpm->tcpm_port = tcpm_register_port(tcpm->dev, &tcpm->tcpc); if (IS_ERR(tcpm->tcpm_port)) { ret = PTR_ERR(tcpm->tcpm_port); -- cgit v1.2.3 From caf525ed45b4960b450cbd4e811d9b247bc2586c Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Sun, 3 Dec 2023 14:53:13 +0300 Subject: drm/encoder: register per-encoder debugfs dir Each of connectors and CRTCs used by the DRM device provides debugfs directory, which is used by several standard debugfs files and can further be extended by the driver. Add such generic debugfs directories for encoder. Reviewed-by: Neil Armstrong Acked-by: Maxime Ripard Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231203115315.1306124-2-dmitry.baryshkov@linaro.org --- drivers/gpu/drm/drm_debugfs.c | 25 +++++++++++++++++++++++++ drivers/gpu/drm/drm_encoder.c | 4 ++++ drivers/gpu/drm/drm_internal.h | 10 ++++++++++ include/drm/drm_encoder.h | 16 +++++++++++++++- 4 files changed, 54 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index f291fb4b359f..00406b4f3235 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -589,4 +589,29 @@ void drm_debugfs_crtc_remove(struct drm_crtc *crtc) crtc->debugfs_entry = NULL; } +void drm_debugfs_encoder_add(struct drm_encoder *encoder) +{ + struct drm_minor *minor = encoder->dev->primary; + struct dentry *root; + char *name; + + name = kasprintf(GFP_KERNEL, "encoder-%d", encoder->index); + if (!name) + return; + + root = debugfs_create_dir(name, minor->debugfs_root); + kfree(name); + + encoder->debugfs_entry = root; + + if (encoder->funcs->debugfs_init) + encoder->funcs->debugfs_init(encoder, root); +} + +void drm_debugfs_encoder_remove(struct drm_encoder *encoder) +{ + debugfs_remove_recursive(encoder->debugfs_entry); + encoder->debugfs_entry = NULL; +} + #endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c index 1143bc7f3252..8f2bc6a28482 100644 --- a/drivers/gpu/drm/drm_encoder.c +++ b/drivers/gpu/drm/drm_encoder.c @@ -30,6 +30,7 @@ #include #include "drm_crtc_internal.h" +#include "drm_internal.h" /** * DOC: overview @@ -74,6 +75,8 @@ int drm_encoder_register_all(struct drm_device *dev) int ret = 0; drm_for_each_encoder(encoder, dev) { + drm_debugfs_encoder_add(encoder); + if (encoder->funcs && encoder->funcs->late_register) ret = encoder->funcs->late_register(encoder); if (ret) @@ -90,6 +93,7 @@ void drm_encoder_unregister_all(struct drm_device *dev) drm_for_each_encoder(encoder, dev) { if (encoder->funcs && encoder->funcs->early_unregister) encoder->funcs->early_unregister(encoder); + drm_debugfs_encoder_remove(encoder); } } diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index b12c463bc460..b7a311efa2b1 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -194,6 +194,8 @@ void drm_debugfs_connector_remove(struct drm_connector *connector); void drm_debugfs_crtc_add(struct drm_crtc *crtc); void drm_debugfs_crtc_remove(struct drm_crtc *crtc); void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc); +void drm_debugfs_encoder_add(struct drm_encoder *encoder); +void drm_debugfs_encoder_remove(struct drm_encoder *encoder); #else static inline void drm_debugfs_dev_fini(struct drm_device *dev) { @@ -231,6 +233,14 @@ static inline void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc) { } +static inline void drm_debugfs_encoder_add(struct drm_encoder *encoder) +{ +} + +static inline void drm_debugfs_encoder_remove(struct drm_encoder *encoder) +{ +} + #endif drm_ioctl_t drm_version; diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h index 3a09682af685..977a9381c8ba 100644 --- a/include/drm/drm_encoder.h +++ b/include/drm/drm_encoder.h @@ -60,7 +60,7 @@ struct drm_encoder_funcs { * @late_register: * * This optional hook can be used to register additional userspace - * interfaces attached to the encoder like debugfs interfaces. + * interfaces attached to the encoder. * It is called late in the driver load sequence from drm_dev_register(). * Everything added from this callback should be unregistered in * the early_unregister callback. @@ -81,6 +81,13 @@ struct drm_encoder_funcs { * before data structures are torndown. */ void (*early_unregister)(struct drm_encoder *encoder); + + /** + * @debugfs_init: + * + * Allows encoders to create encoder-specific debugfs files. + */ + void (*debugfs_init)(struct drm_encoder *encoder, struct dentry *root); }; /** @@ -184,6 +191,13 @@ struct drm_encoder { const struct drm_encoder_funcs *funcs; const struct drm_encoder_helper_funcs *helper_private; + + /** + * @debugfs_entry: + * + * Debugfs directory for this CRTC. + */ + struct dentry *debugfs_entry; }; #define obj_to_encoder(x) container_of(x, struct drm_encoder, base) -- cgit v1.2.3 From d0b3c318e04cc6c4e2a3c30ee0f6f619aa8d0db5 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Sun, 3 Dec 2023 14:53:14 +0300 Subject: drm/bridge: migrate bridge_chains to per-encoder file Instead of having a single file with all bridge chains, list bridges under a corresponding per-encoder debugfs directory. While we are at it, also slightly improve the formatting of the bridge data: split a single line entry into multiple lines, include the symbol name of the bridge funcs and add the textual representation of the bridge ops. Example of the listing: $ cat /sys/kernel/debug/dri/0/encoder-0/bridges bridge[0]: dsi_mgr_bridge_funcs type: [0] Unknown ops: [0] bridge[1]: lt9611uxc_bridge_funcs type: [11] HDMI-A OF: /soc@0/geniqup@9c0000/i2c@994000/hdmi-bridge@2b:lontium,lt9611uxc ops: [7] detect edid hpd Reviewed-by: Neil Armstrong Reviewed-by: Tomi Valkeinen Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231203115315.1306124-3-dmitry.baryshkov@linaro.org --- drivers/gpu/drm/drm_bridge.c | 44 ------------------------------------------- drivers/gpu/drm/drm_debugfs.c | 40 ++++++++++++++++++++++++++++++++++++--- include/drm/drm_bridge.h | 2 -- 3 files changed, 37 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index 30d66bee0ec6..cee3188adf3d 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -1347,50 +1347,6 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np) EXPORT_SYMBOL(of_drm_find_bridge); #endif -#ifdef CONFIG_DEBUG_FS -static int drm_bridge_chains_info(struct seq_file *m, void *data) -{ - struct drm_debugfs_entry *entry = m->private; - struct drm_device *dev = entry->dev; - struct drm_printer p = drm_seq_file_printer(m); - struct drm_mode_config *config = &dev->mode_config; - struct drm_encoder *encoder; - unsigned int bridge_idx = 0; - - list_for_each_entry(encoder, &config->encoder_list, head) { - struct drm_bridge *bridge; - - drm_printf(&p, "encoder[%u]\n", encoder->base.id); - - drm_for_each_bridge_in_chain(encoder, bridge) { - drm_printf(&p, "\tbridge[%u] type: %u, ops: %#x", - bridge_idx, bridge->type, bridge->ops); - -#ifdef CONFIG_OF - if (bridge->of_node) - drm_printf(&p, ", OF: %pOFfc", bridge->of_node); -#endif - - drm_printf(&p, "\n"); - - bridge_idx++; - } - } - - return 0; -} - -static const struct drm_debugfs_info drm_bridge_debugfs_list[] = { - { "bridge_chains", drm_bridge_chains_info, 0 }, -}; - -void drm_bridge_debugfs_init(struct drm_device *dev) -{ - drm_debugfs_add_files(dev, drm_bridge_debugfs_list, - ARRAY_SIZE(drm_bridge_debugfs_list)); -} -#endif - MODULE_AUTHOR("Ajay Kumar "); MODULE_DESCRIPTION("DRM bridge infrastructure"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 00406b4f3235..02e7481758c0 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -314,10 +314,8 @@ void drm_debugfs_dev_register(struct drm_device *dev) drm_framebuffer_debugfs_init(dev); drm_client_debugfs_init(dev); } - if (drm_drv_uses_atomic_modeset(dev)) { + if (drm_drv_uses_atomic_modeset(dev)) drm_atomic_debugfs_init(dev); - drm_bridge_debugfs_init(dev); - } } int drm_debugfs_register(struct drm_minor *minor, int minor_id, @@ -589,6 +587,38 @@ void drm_debugfs_crtc_remove(struct drm_crtc *crtc) crtc->debugfs_entry = NULL; } +static int bridges_show(struct seq_file *m, void *data) +{ + struct drm_encoder *encoder = m->private; + struct drm_printer p = drm_seq_file_printer(m); + struct drm_bridge *bridge; + unsigned int idx = 0; + + drm_for_each_bridge_in_chain(encoder, bridge) { + drm_printf(&p, "bridge[%d]: %ps\n", idx++, bridge->funcs); + drm_printf(&p, "\ttype: [%d] %s\n", + bridge->type, + drm_get_connector_type_name(bridge->type)); +#ifdef CONFIG_OF + if (bridge->of_node) + drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node); +#endif + drm_printf(&p, "\tops: [0x%x]", bridge->ops); + if (bridge->ops & DRM_BRIDGE_OP_DETECT) + drm_puts(&p, " detect"); + if (bridge->ops & DRM_BRIDGE_OP_EDID) + drm_puts(&p, " edid"); + if (bridge->ops & DRM_BRIDGE_OP_HPD) + drm_puts(&p, " hpd"); + if (bridge->ops & DRM_BRIDGE_OP_MODES) + drm_puts(&p, " modes"); + drm_puts(&p, "\n"); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(bridges); + void drm_debugfs_encoder_add(struct drm_encoder *encoder) { struct drm_minor *minor = encoder->dev->primary; @@ -604,6 +634,10 @@ void drm_debugfs_encoder_add(struct drm_encoder *encoder) encoder->debugfs_entry = root; + /* bridges list */ + debugfs_create_file("bridges", 0444, root, encoder, + &bridges_fops); + if (encoder->funcs->debugfs_init) encoder->funcs->debugfs_init(encoder, root); } diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 9ef461aa9b9e..e39da5807ba7 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -950,6 +950,4 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm, } #endif -void drm_bridge_debugfs_init(struct drm_device *dev); - #endif -- cgit v1.2.3 From 1c0a80f160965c88f16e73ff69015db2f044c486 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Mon, 4 Dec 2023 15:13:48 +0200 Subject: Revert "drm/atomic: Loosen FB atomic checks" This reverts commit f1e75da5364e780905d9cd6043f9c74cdcf84073. Although the Solid Fill planes patchset got all reviews and acknowledgements, it doesn't fulfill requirements for the new uABI. It has neither corresponding open-source userspace implementation nor the IGT tests coverage. Reverting this patchset until userspace obligations are fulfilled. Acked-by: Simon Ser Acked-by: Maxime Ripard Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231204131455.19023-2-dmitry.baryshkov@linaro.org --- drivers/gpu/drm/drm_atomic.c | 21 ++++++++++---------- drivers/gpu/drm/drm_atomic_helper.c | 39 ++++++++++++++++--------------------- include/drm/drm_atomic_helper.h | 4 ++-- include/drm/drm_plane.h | 29 --------------------------- 4 files changed, 29 insertions(+), 64 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index aed0a694c74c..c6f2b86c48ae 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -674,16 +674,17 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, { struct drm_plane *plane = new_plane_state->plane; struct drm_crtc *crtc = new_plane_state->crtc; + const struct drm_framebuffer *fb = new_plane_state->fb; int ret; - /* either *both* CRTC and pixel source must be set, or neither */ - if (crtc && !drm_plane_has_visible_data(new_plane_state)) { - drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] CRTC set but no visible data\n", + /* either *both* CRTC and FB must be set, or neither */ + if (crtc && !fb) { + drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] CRTC set but no FB\n", plane->base.id, plane->name); return -EINVAL; - } else if (drm_plane_has_visible_data(new_plane_state) && !crtc) { - drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] Source %d has visible data but no CRTC\n", - plane->base.id, plane->name, new_plane_state->pixel_source); + } else if (fb && !crtc) { + drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] FB set but no CRTC\n", + plane->base.id, plane->name); return -EINVAL; } @@ -714,11 +715,9 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, } - if (new_plane_state->pixel_source == DRM_PLANE_PIXEL_SOURCE_FB && new_plane_state->fb) { - ret = drm_atomic_plane_check_fb(new_plane_state); - if (ret) - return ret; - } + ret = drm_atomic_plane_check_fb(new_plane_state); + if (ret) + return ret; if (plane_switching_crtc(old_plane_state, new_plane_state)) { drm_dbg_atomic(plane->dev, diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index dc048988e3f3..c3f677130def 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -861,6 +861,7 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, bool can_position, bool can_update_disabled) { + struct drm_framebuffer *fb = plane_state->fb; struct drm_rect *src = &plane_state->src; struct drm_rect *dst = &plane_state->dst; unsigned int rotation = plane_state->rotation; @@ -872,7 +873,7 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, *src = drm_plane_state_src(plane_state); *dst = drm_plane_state_dest(plane_state); - if (!drm_plane_has_visible_data(plane_state)) { + if (!fb) { plane_state->visible = false; return 0; } @@ -889,31 +890,25 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, return -EINVAL; } - /* Check that selected pixel source is valid */ - if (plane_state->pixel_source == DRM_PLANE_PIXEL_SOURCE_FB && plane_state->fb) { - struct drm_framebuffer *fb = plane_state->fb; - drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation); + drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation); - /* Check scaling */ - hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); - vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); + /* Check scaling */ + hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); + vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); + if (hscale < 0 || vscale < 0) { + drm_dbg_kms(plane_state->plane->dev, + "Invalid scaling of plane\n"); + drm_rect_debug_print("src: ", &plane_state->src, true); + drm_rect_debug_print("dst: ", &plane_state->dst, false); + return -ERANGE; + } - if (hscale < 0 || vscale < 0) { - drm_dbg_kms(plane_state->plane->dev, - "Invalid scaling of plane\n"); - drm_rect_debug_print("src: ", &plane_state->src, true); - drm_rect_debug_print("dst: ", &plane_state->dst, false); - return -ERANGE; - } + if (crtc_state->enable) + drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2); - if (crtc_state->enable) - drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2); + plane_state->visible = drm_rect_clip_scaled(src, dst, &clip); - plane_state->visible = drm_rect_clip_scaled(src, dst, &clip); - drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation); - } else if (drm_plane_solid_fill_enabled(plane_state)) { - plane_state->visible = true; - } + drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation); if (!plane_state->visible) /* diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 6d97f38ac1f6..536a0b0091c3 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -256,8 +256,8 @@ drm_atomic_plane_disabling(struct drm_plane_state *old_plane_state, * Anything else should be considered a bug in the atomic core, so we * gently warn about it. */ - WARN_ON((new_plane_state->crtc == NULL && drm_plane_has_visible_data(new_plane_state)) || - (new_plane_state->crtc != NULL && !drm_plane_has_visible_data(new_plane_state))); + WARN_ON((new_plane_state->crtc == NULL && new_plane_state->fb != NULL) || + (new_plane_state->crtc != NULL && new_plane_state->fb == NULL)); return old_plane_state->crtc && !new_plane_state->crtc; } diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 3b187f3f5466..d14e2f1db234 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -1016,35 +1016,6 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev, #define drm_for_each_plane(plane, dev) \ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) -/** - * drm_plane_solid_fill_enabled - Check if solid fill is enabled on plane - * @state: plane state - * - * Returns: - * Whether the plane has been assigned a solid_fill_blob - */ -static inline bool drm_plane_solid_fill_enabled(struct drm_plane_state *state) -{ - if (!state) - return false; - return state->pixel_source == DRM_PLANE_PIXEL_SOURCE_SOLID_FILL && state->solid_fill_blob; -} - -static inline bool drm_plane_has_visible_data(const struct drm_plane_state *state) -{ - switch (state->pixel_source) { - case DRM_PLANE_PIXEL_SOURCE_NONE: - return false; - case DRM_PLANE_PIXEL_SOURCE_SOLID_FILL: - return state->solid_fill_blob != NULL; - case DRM_PLANE_PIXEL_SOURCE_FB: - default: - WARN_ON(state->pixel_source != DRM_PLANE_PIXEL_SOURCE_FB); - } - - return state->fb != NULL; -} - bool drm_any_plane_has_format(struct drm_device *dev, u32 format, u64 modifier); -- cgit v1.2.3 From b881ba8faa5c7689eb1cb487ad891c46dbbed0e8 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Mon, 4 Dec 2023 15:13:49 +0200 Subject: Revert "drm/atomic: Move framebuffer checks to helper" This reverts commit 4ba6b7a646321e740c7f2d80c90505019c4e8fce. Although the Solid Fill planes patchset got all reviews and acknowledgements, it doesn't fulfill requirements for the new uABI. It has neither corresponding open-source userspace implementation nor the IGT tests coverage. Reverting this patchset until userspace obligations are fulfilled. Acked-by: Simon Ser Acked-by: Maxime Ripard Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231204131455.19023-3-dmitry.baryshkov@linaro.org --- drivers/gpu/drm/drm_atomic.c | 130 +++++++++++++++++++------------------------ 1 file changed, 57 insertions(+), 73 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index c6f2b86c48ae..1339785fbe80 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -589,76 +589,6 @@ plane_switching_crtc(const struct drm_plane_state *old_plane_state, return true; } -static int drm_atomic_plane_check_fb(const struct drm_plane_state *state) -{ - struct drm_plane *plane = state->plane; - const struct drm_framebuffer *fb = state->fb; - struct drm_mode_rect *clips; - - uint32_t num_clips; - unsigned int fb_width, fb_height; - int ret; - - /* Check whether this plane supports the fb pixel format. */ - ret = drm_plane_check_pixel_format(plane, fb->format->format, - fb->modifier); - - if (ret) { - drm_dbg_atomic(plane->dev, - "[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n", - plane->base.id, plane->name, - &fb->format->format, fb->modifier); - return ret; - } - - fb_width = fb->width << 16; - fb_height = fb->height << 16; - - /* Make sure source coordinates are inside the fb. */ - if (state->src_w > fb_width || - state->src_x > fb_width - state->src_w || - state->src_h > fb_height || - state->src_y > fb_height - state->src_h) { - drm_dbg_atomic(plane->dev, - "[PLANE:%d:%s] invalid source coordinates " - "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", - plane->base.id, plane->name, - state->src_w >> 16, - ((state->src_w & 0xffff) * 15625) >> 10, - state->src_h >> 16, - ((state->src_h & 0xffff) * 15625) >> 10, - state->src_x >> 16, - ((state->src_x & 0xffff) * 15625) >> 10, - state->src_y >> 16, - ((state->src_y & 0xffff) * 15625) >> 10, - fb->width, fb->height); - return -ENOSPC; - } - - clips = __drm_plane_get_damage_clips(state); - num_clips = drm_plane_get_damage_clips_count(state); - - /* Make sure damage clips are valid and inside the fb. */ - while (num_clips > 0) { - if (clips->x1 >= clips->x2 || - clips->y1 >= clips->y2 || - clips->x1 < 0 || - clips->y1 < 0 || - clips->x2 > fb_width || - clips->y2 > fb_height) { - drm_dbg_atomic(plane->dev, - "[PLANE:%d:%s] invalid damage clip %d %d %d %d\n", - plane->base.id, plane->name, clips->x1, - clips->y1, clips->x2, clips->y2); - return -EINVAL; - } - clips++; - num_clips--; - } - - return 0; -} - /** * drm_atomic_plane_check - check plane state * @old_plane_state: old plane state to check @@ -675,6 +605,9 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, struct drm_plane *plane = new_plane_state->plane; struct drm_crtc *crtc = new_plane_state->crtc; const struct drm_framebuffer *fb = new_plane_state->fb; + unsigned int fb_width, fb_height; + struct drm_mode_rect *clips; + uint32_t num_clips; int ret; /* either *both* CRTC and FB must be set, or neither */ @@ -701,6 +634,17 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, return -EINVAL; } + /* Check whether this plane supports the fb pixel format. */ + ret = drm_plane_check_pixel_format(plane, fb->format->format, + fb->modifier); + if (ret) { + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n", + plane->base.id, plane->name, + &fb->format->format, fb->modifier); + return ret; + } + /* Give drivers some help against integer overflows */ if (new_plane_state->crtc_w > INT_MAX || new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w || @@ -714,10 +658,50 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, return -ERANGE; } + fb_width = fb->width << 16; + fb_height = fb->height << 16; - ret = drm_atomic_plane_check_fb(new_plane_state); - if (ret) - return ret; + /* Make sure source coordinates are inside the fb. */ + if (new_plane_state->src_w > fb_width || + new_plane_state->src_x > fb_width - new_plane_state->src_w || + new_plane_state->src_h > fb_height || + new_plane_state->src_y > fb_height - new_plane_state->src_h) { + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] invalid source coordinates " + "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", + plane->base.id, plane->name, + new_plane_state->src_w >> 16, + ((new_plane_state->src_w & 0xffff) * 15625) >> 10, + new_plane_state->src_h >> 16, + ((new_plane_state->src_h & 0xffff) * 15625) >> 10, + new_plane_state->src_x >> 16, + ((new_plane_state->src_x & 0xffff) * 15625) >> 10, + new_plane_state->src_y >> 16, + ((new_plane_state->src_y & 0xffff) * 15625) >> 10, + fb->width, fb->height); + return -ENOSPC; + } + + clips = __drm_plane_get_damage_clips(new_plane_state); + num_clips = drm_plane_get_damage_clips_count(new_plane_state); + + /* Make sure damage clips are valid and inside the fb. */ + while (num_clips > 0) { + if (clips->x1 >= clips->x2 || + clips->y1 >= clips->y2 || + clips->x1 < 0 || + clips->y1 < 0 || + clips->x2 > fb_width || + clips->y2 > fb_height) { + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] invalid damage clip %d %d %d %d\n", + plane->base.id, plane->name, clips->x1, + clips->y1, clips->x2, clips->y2); + return -EINVAL; + } + clips++; + num_clips--; + } if (plane_switching_crtc(old_plane_state, new_plane_state)) { drm_dbg_atomic(plane->dev, -- cgit v1.2.3 From a513f095b941e9e96196f04f11f253d763310c08 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Mon, 4 Dec 2023 15:13:50 +0200 Subject: Revert "drm/atomic: Add solid fill data to plane state dump" This reverts commit e86413f5442ee094e66b3e75f2d3419ed0df9520. Although the Solid Fill planes patchset got all reviews and acknowledgements, it doesn't fulfill requirements for the new uABI. It has neither corresponding open-source userspace implementation nor the IGT tests coverage. Reverting this patchset until userspace obligations are fulfilled. Acked-by: Simon Ser Acked-by: Maxime Ripard Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231204131455.19023-4-dmitry.baryshkov@linaro.org --- drivers/gpu/drm/drm_atomic.c | 4 ---- drivers/gpu/drm/drm_plane.c | 8 -------- include/drm/drm_plane.h | 3 --- 3 files changed, 15 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 1339785fbe80..02aa7df832cc 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -726,10 +726,6 @@ static void drm_atomic_plane_print_state(struct drm_printer *p, drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); if (state->fb) drm_framebuffer_print_info(p, 2, state->fb); - drm_printf(p, "\tsolid_fill=%u\n", - state->solid_fill_blob ? state->solid_fill_blob->base.id : 0); - if (state->solid_fill_blob) - drm_plane_solid_fill_print_info(p, 2, state); drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest)); drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src)); drm_printf(p, "\trotation=%x\n", state->rotation); diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index c65c72701dd2..c8dbe5ae60cc 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -1641,14 +1641,6 @@ __drm_plane_get_damage_clips(const struct drm_plane_state *state) state->fb_damage_clips->data : NULL); } -void drm_plane_solid_fill_print_info(struct drm_printer *p, unsigned int indent, - const struct drm_plane_state *state) -{ - drm_printf_indent(p, indent, "r=0x%08x\n", state->solid_fill.r); - drm_printf_indent(p, indent, "g=0x%08x\n", state->solid_fill.g); - drm_printf_indent(p, indent, "b=0x%08x\n", state->solid_fill.b); -} - /** * drm_plane_get_damage_clips - Returns damage clips. * @state: Plane state. diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index d14e2f1db234..4b7af4381bbe 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -1025,9 +1025,6 @@ drm_plane_get_damage_clips_count(const struct drm_plane_state *state); struct drm_mode_rect * drm_plane_get_damage_clips(const struct drm_plane_state *state); -void drm_plane_solid_fill_print_info(struct drm_printer *p, unsigned int indent, - const struct drm_plane_state *state); - int drm_plane_create_scaling_filter_property(struct drm_plane *plane, unsigned int supported_filters); -- cgit v1.2.3 From fe28421d4fedb90cadcef4932be0e8364f79283d Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Mon, 4 Dec 2023 15:13:51 +0200 Subject: Revert "drm/atomic: Add pixel source to plane state dump" This reverts commit 8283ac7871a959848e09fc6593b8c12b8febfee6. Although the Solid Fill planes patchset got all reviews and acknowledgements, it doesn't fulfill requirements for the new uABI. It has neither corresponding open-source userspace implementation nor the IGT tests coverage. Reverting this patchset until userspace obligations are fulfilled. Acked-by: Simon Ser Acked-by: Maxime Ripard Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231204131455.19023-5-dmitry.baryshkov@linaro.org --- drivers/gpu/drm/drm_atomic.c | 1 - drivers/gpu/drm/drm_blend.c | 1 - drivers/gpu/drm/drm_crtc_internal.h | 1 - 3 files changed, 3 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 02aa7df832cc..f1a503aafe5a 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -722,7 +722,6 @@ static void drm_atomic_plane_print_state(struct drm_printer *p, drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name); drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); - drm_printf(p, "\tpixel-source=%s\n", drm_get_pixel_source_name(state->pixel_source)); drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); if (state->fb) drm_framebuffer_print_info(p, 2, state->fb); diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index 9c1608f7c1df..37b31b7e5ce5 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -647,7 +647,6 @@ static const struct drm_prop_enum_list drm_pixel_source_enum_list[] = { { DRM_PLANE_PIXEL_SOURCE_FB, "FB" }, { DRM_PLANE_PIXEL_SOURCE_SOLID_FILL, "SOLID_FILL" }, }; -DRM_ENUM_NAME_FN(drm_get_pixel_source_name, drm_pixel_source_enum_list); /** * drm_plane_create_pixel_source_property - create a new pixel source property diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index 4114675c0728..a514d5207e41 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -269,7 +269,6 @@ int drm_plane_check_pixel_format(struct drm_plane *plane, u32 format, u64 modifier); struct drm_mode_rect * __drm_plane_get_damage_clips(const struct drm_plane_state *state); -const char *drm_get_pixel_source_name(int val); /* drm_bridge.c */ void drm_bridge_detach(struct drm_bridge *bridge); -- cgit v1.2.3 From 5fb1ad3f5725c5c4d1a0c24ba4f82f239dc6878d Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Mon, 4 Dec 2023 15:13:52 +0200 Subject: Revert "drm: Add solid fill pixel source" This reverts commit 4b64167042927531f4cfaf035b8f88c2f7a05f06. Although the Solid Fill planes patchset got all reviews and acknowledgements, it doesn't fulfill requirements for the new uABI. It has neither corresponding open-source userspace implementation nor the IGT tests coverage. Reverting this patchset until userspace obligations are fulfilled. Acked-by: Simon Ser Acked-by: Maxime Ripard Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231204131455.19023-6-dmitry.baryshkov@linaro.org --- drivers/gpu/drm/drm_blend.c | 10 +--------- include/drm/drm_plane.h | 1 - 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index 37b31b7e5ce5..665c5d9b056f 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -204,9 +204,6 @@ * "FB": * Framebuffer source set by the "FB_ID" property. * - * "SOLID_FILL": - * Solid fill color source set by the "solid_fill" property. - * * solid_fill: * solid_fill is set up with drm_plane_create_solid_fill_property(). It * contains pixel data that drivers can use to fill a plane. @@ -645,7 +642,6 @@ EXPORT_SYMBOL(drm_plane_create_blend_mode_property); static const struct drm_prop_enum_list drm_pixel_source_enum_list[] = { { DRM_PLANE_PIXEL_SOURCE_NONE, "NONE" }, { DRM_PLANE_PIXEL_SOURCE_FB, "FB" }, - { DRM_PLANE_PIXEL_SOURCE_SOLID_FILL, "SOLID_FILL" }, }; /** @@ -670,9 +666,6 @@ static const struct drm_prop_enum_list drm_pixel_source_enum_list[] = { * "FB": * Framebuffer pixel source * - * "SOLID_FILL": - * Solid fill color pixel source - * * Returns: * Zero on success, negative errno on failure. */ @@ -682,8 +675,7 @@ int drm_plane_create_pixel_source_property(struct drm_plane *plane, struct drm_device *dev = plane->dev; struct drm_property *prop; static const unsigned int valid_source_mask = BIT(DRM_PLANE_PIXEL_SOURCE_FB) | - BIT(DRM_PLANE_PIXEL_SOURCE_NONE) | - BIT(DRM_PLANE_PIXEL_SOURCE_SOLID_FILL); + BIT(DRM_PLANE_PIXEL_SOURCE_NONE); int i; /* FB is supported by default */ diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 4b7af4381bbe..5bac644d4cc3 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -43,7 +43,6 @@ enum drm_scaling_filter { enum drm_plane_pixel_source { DRM_PLANE_PIXEL_SOURCE_NONE, DRM_PLANE_PIXEL_SOURCE_FB, - DRM_PLANE_PIXEL_SOURCE_SOLID_FILL, DRM_PLANE_PIXEL_SOURCE_MAX }; -- cgit v1.2.3 From e5fba1ada1c1d676438138d815acd8f427a1eaf0 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Mon, 4 Dec 2023 15:13:53 +0200 Subject: Revert "drm: Introduce solid fill DRM plane property" This reverts commit 85863a4e16e77079ee14865905ddc3ef9483a640. Although the Solid Fill planes patchset got all reviews and acknowledgements, it doesn't fulfill requirements for the new uABI. It has neither corresponding open-source userspace implementation nor the IGT tests coverage. Reverting this patchset until userspace obligations are fulfilled. Acked-by: Simon Ser Acked-by: Maxime Ripard Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231204131455.19023-7-dmitry.baryshkov@linaro.org --- drivers/gpu/drm/drm_atomic_state_helper.c | 9 -------- drivers/gpu/drm/drm_atomic_uapi.c | 26 ---------------------- drivers/gpu/drm/drm_blend.c | 30 -------------------------- include/drm/drm_blend.h | 1 - include/drm/drm_plane.h | 36 ------------------------------- include/uapi/drm/drm_mode.h | 24 --------------------- 6 files changed, 126 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index f6c76cea8be4..311b04edf742 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -254,11 +254,6 @@ void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state, plane_state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI; plane_state->pixel_source = DRM_PLANE_PIXEL_SOURCE_FB; - if (plane_state->solid_fill_blob) { - drm_property_blob_put(plane_state->solid_fill_blob); - plane_state->solid_fill_blob = NULL; - } - if (plane->color_encoding_property) { if (!drm_object_property_get_default_value(&plane->base, plane->color_encoding_property, @@ -355,9 +350,6 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane, if (state->fb) drm_framebuffer_get(state->fb); - if (state->solid_fill_blob) - drm_property_blob_get(state->solid_fill_blob); - state->fence = NULL; state->commit = NULL; state->fb_damage_clips = NULL; @@ -407,7 +399,6 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state) drm_crtc_commit_put(state->commit); drm_property_blob_put(state->fb_damage_clips); - drm_property_blob_put(state->solid_fill_blob); } EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state); diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index d7ae8e2c0265..bd7140531948 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -316,20 +316,6 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, } EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector); -static void drm_atomic_set_solid_fill_prop(struct drm_plane_state *state) -{ - struct drm_mode_solid_fill *user_info; - - if (!state->solid_fill_blob) - return; - - user_info = (struct drm_mode_solid_fill *)state->solid_fill_blob->data; - - state->solid_fill.r = user_info->r; - state->solid_fill.g = user_info->g; - state->solid_fill.b = user_info->b; -} - static void set_out_fence_for_crtc(struct drm_atomic_state *state, struct drm_crtc *crtc, s32 __user *fence_ptr) { @@ -578,15 +564,6 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, state->src_h = val; } else if (property == plane->pixel_source_property) { state->pixel_source = val; - } else if (property == plane->solid_fill_property) { - ret = drm_atomic_replace_property_blob_from_id(dev, - &state->solid_fill_blob, - val, sizeof(struct drm_mode_solid_fill), - -1, &replaced); - if (ret) - return ret; - - drm_atomic_set_solid_fill_prop(state); } else if (property == plane->alpha_property) { state->alpha = val; } else if (property == plane->blend_mode_property) { @@ -677,9 +654,6 @@ drm_atomic_plane_get_property(struct drm_plane *plane, *val = state->src_h; } else if (property == plane->pixel_source_property) { *val = state->pixel_source; - } else if (property == plane->solid_fill_property) { - *val = state->solid_fill_blob ? - state->solid_fill_blob->base.id : 0; } else if (property == plane->alpha_property) { *val = state->alpha; } else if (property == plane->blend_mode_property) { diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index 665c5d9b056f..fce734cdb85b 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -204,10 +204,6 @@ * "FB": * Framebuffer source set by the "FB_ID" property. * - * solid_fill: - * solid_fill is set up with drm_plane_create_solid_fill_property(). It - * contains pixel data that drivers can use to fill a plane. - * * Note that all the property extensions described here apply either to the * plane or the CRTC (e.g. for the background color, which currently is not * exposed and assumed to be black). @@ -713,29 +709,3 @@ int drm_plane_create_pixel_source_property(struct drm_plane *plane, return 0; } EXPORT_SYMBOL(drm_plane_create_pixel_source_property); - -/** - * drm_plane_create_solid_fill_property - create a new solid_fill property - * @plane: drm plane - * - * This creates a new property blob that holds pixel data for solid fill planes. - * The property is exposed to userspace as a property blob called "solid_fill". - * - * For information on what the blob contains, see `drm_mode_solid_fill`. - */ -int drm_plane_create_solid_fill_property(struct drm_plane *plane) -{ - struct drm_property *prop; - - prop = drm_property_create(plane->dev, - DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB, - "solid_fill", 0); - if (!prop) - return -ENOMEM; - - drm_object_attach_property(&plane->base, prop, 0); - plane->solid_fill_property = prop; - - return 0; -} -EXPORT_SYMBOL(drm_plane_create_solid_fill_property); diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h index e7158fbee389..122bbfbaae33 100644 --- a/include/drm/drm_blend.h +++ b/include/drm/drm_blend.h @@ -60,5 +60,4 @@ int drm_plane_create_blend_mode_property(struct drm_plane *plane, unsigned int supported_modes); int drm_plane_create_pixel_source_property(struct drm_plane *plane, unsigned long extra_sources); -int drm_plane_create_solid_fill_property(struct drm_plane *plane); #endif diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 5bac644d4cc3..bc0176ba25be 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -46,18 +46,6 @@ enum drm_plane_pixel_source { DRM_PLANE_PIXEL_SOURCE_MAX }; -/** - * struct solid_fill_property - RGB values for solid fill plane - * - * TODO: Add solid fill source and corresponding pixel source - * that supports RGBA color - */ -struct drm_solid_fill { - uint32_t r; - uint32_t g; - uint32_t b; -}; - /** * struct drm_plane_state - mutable plane state * @@ -146,23 +134,6 @@ struct drm_plane_state { */ enum drm_plane_pixel_source pixel_source; - /** - * @solid_fill_blob: - * - * Blob containing relevant information for a solid fill plane - * including RGB color values. See - * drm_plane_create_solid_fill_property() for more details. - */ - struct drm_property_blob *solid_fill_blob; - - /** - * @solid_fill: - * - * Pixel data for solid fill planes. See - * drm_plane_create_solid_fill_property() for more details. - */ - struct drm_solid_fill solid_fill; - /** * @alpha: * Opacity of the plane with 0 as completely transparent and 0xffff as @@ -763,13 +734,6 @@ struct drm_plane { */ struct drm_property *pixel_source_property; - /** - * @solid_fill_property: - * Optional solid_fill property for this plane. See - * drm_plane_create_solid_fill_property(). - */ - struct drm_property *solid_fill_property; - /** * @alpha_property: * Optional alpha property for this plane. See diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index cbd943a8c88b..95630f170110 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -259,30 +259,6 @@ struct drm_mode_modeinfo { char name[DRM_DISPLAY_MODE_LEN]; }; -/** - * struct drm_mode_solid_fill - User info for solid fill planes - * - * This is the userspace API solid fill information structure. - * - * Userspace can enable solid fill planes by assigning the plane "solid_fill" - * property to a blob containing a single drm_mode_solid_fill struct populated with an RGB323232 - * color and setting the pixel source to "SOLID_FILL". - * - * For information on the plane property, see drm_plane_create_solid_fill_property() - * - * @r: Red color value of single pixel - * @g: Green color value of single pixel - * @b: Blue color value of single pixel - * @pad: padding, must be zero - */ -struct drm_mode_solid_fill { - __u32 r; - __u32 g; - __u32 b; - __u32 pad; -}; - - struct drm_mode_card_res { __u64 fb_id_ptr; __u64 crtc_id_ptr; -- cgit v1.2.3 From 90422201f8f2b4e26ab7bd43b92786a11c1ffebf Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Mon, 4 Dec 2023 15:13:54 +0200 Subject: Revert "drm: Introduce pixel_source DRM plane property" This reverts commit e50e5fed41c7eed2db4119645bf3480ec43fec11. Although the Solid Fill planes patchset got all reviews and acknowledgements, it doesn't fulfill requirements for the new uABI. It has neither corresponding open-source userspace implementation nor the IGT tests coverage. Reverting this patchset until userspace obligations are fulfilled. Acked-by: Simon Ser Acked-by: Maxime Ripard Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231204131455.19023-8-dmitry.baryshkov@linaro.org --- drivers/gpu/drm/drm_atomic_state_helper.c | 1 - drivers/gpu/drm/drm_atomic_uapi.c | 4 -- drivers/gpu/drm/drm_blend.c | 94 ------------------------------- drivers/gpu/drm/drm_plane.c | 19 ++----- include/drm/drm_blend.h | 2 - include/drm/drm_plane.h | 21 ------- 6 files changed, 4 insertions(+), 137 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index 311b04edf742..54975de44a0e 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -252,7 +252,6 @@ void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state, plane_state->alpha = DRM_BLEND_ALPHA_OPAQUE; plane_state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI; - plane_state->pixel_source = DRM_PLANE_PIXEL_SOURCE_FB; if (plane->color_encoding_property) { if (!drm_object_property_get_default_value(&plane->base, diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index bd7140531948..aee4a65d4959 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -562,8 +562,6 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, state->src_w = val; } else if (property == config->prop_src_h) { state->src_h = val; - } else if (property == plane->pixel_source_property) { - state->pixel_source = val; } else if (property == plane->alpha_property) { state->alpha = val; } else if (property == plane->blend_mode_property) { @@ -652,8 +650,6 @@ drm_atomic_plane_get_property(struct drm_plane *plane, *val = state->src_w; } else if (property == config->prop_src_h) { *val = state->src_h; - } else if (property == plane->pixel_source_property) { - *val = state->pixel_source; } else if (property == plane->alpha_property) { *val = state->alpha; } else if (property == plane->blend_mode_property) { diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index fce734cdb85b..6e74de833466 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -185,25 +185,6 @@ * plane does not expose the "alpha" property, then this is * assumed to be 1.0 * - * pixel_source: - * pixel_source is set up with drm_plane_create_pixel_source_property(). - * It is used to toggle the active source of pixel data for the plane. - * The plane will only display data from the set pixel_source -- any - * data from other sources will be ignored. - * - * For non-framebuffer sources, if pixel_source is set to a non-framebuffer - * and non-NONE source, and the corresponding source property is NULL, then - * the atomic commit should return an error. - * - * Possible values: - * - * "NONE": - * No active pixel source. - * Committing with a NONE pixel source will disable the plane. - * - * "FB": - * Framebuffer source set by the "FB_ID" property. - * * Note that all the property extensions described here apply either to the * plane or the CRTC (e.g. for the background color, which currently is not * exposed and assumed to be black). @@ -634,78 +615,3 @@ int drm_plane_create_blend_mode_property(struct drm_plane *plane, return 0; } EXPORT_SYMBOL(drm_plane_create_blend_mode_property); - -static const struct drm_prop_enum_list drm_pixel_source_enum_list[] = { - { DRM_PLANE_PIXEL_SOURCE_NONE, "NONE" }, - { DRM_PLANE_PIXEL_SOURCE_FB, "FB" }, -}; - -/** - * drm_plane_create_pixel_source_property - create a new pixel source property - * @plane: DRM plane - * @extra_sources: Bitmask of additional supported pixel_sources for the driver. - * DRM_PLANE_PIXEL_SOURCE_FB and DRM_PLANE_PIXEL_SOURCE_NONE will - * always be enabled as supported sources. - * - * This creates a new property describing the current source of pixel data for the - * plane. The pixel_source will be initialized as DRM_PLANE_PIXEL_SOURCE_FB by default. - * - * Drivers can set a custom default source by overriding the pixel_source value in - * drm_plane_funcs.reset() - * - * The property is exposed to userspace as an enumeration property called - * "pixel_source" and has the following enumeration values: - * - * "NONE": - * No active pixel source - * - * "FB": - * Framebuffer pixel source - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_plane_create_pixel_source_property(struct drm_plane *plane, - unsigned long extra_sources) -{ - struct drm_device *dev = plane->dev; - struct drm_property *prop; - static const unsigned int valid_source_mask = BIT(DRM_PLANE_PIXEL_SOURCE_FB) | - BIT(DRM_PLANE_PIXEL_SOURCE_NONE); - int i; - - /* FB is supported by default */ - unsigned long supported_sources = extra_sources | - BIT(DRM_PLANE_PIXEL_SOURCE_FB) | - BIT(DRM_PLANE_PIXEL_SOURCE_NONE); - - if (WARN_ON(supported_sources & ~valid_source_mask)) - return -EINVAL; - - prop = drm_property_create(dev, DRM_MODE_PROP_ENUM | DRM_MODE_PROP_ATOMIC, "pixel_source", - hweight32(supported_sources)); - - if (!prop) - return -ENOMEM; - - for (i = 0; i < ARRAY_SIZE(drm_pixel_source_enum_list); i++) { - int ret; - - if (!test_bit(drm_pixel_source_enum_list[i].type, &supported_sources)) - continue; - - ret = drm_property_add_enum(prop, drm_pixel_source_enum_list[i].type, - drm_pixel_source_enum_list[i].name); - if (ret) { - drm_property_destroy(dev, prop); - - return ret; - } - } - - drm_object_attach_property(&plane->base, prop, DRM_PLANE_PIXEL_SOURCE_FB); - plane->pixel_source_property = prop; - - return 0; -} -EXPORT_SYMBOL(drm_plane_create_pixel_source_property); diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index c8dbe5ae60cc..9e8e4c60983d 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -953,14 +953,6 @@ bool drm_any_plane_has_format(struct drm_device *dev, } EXPORT_SYMBOL(drm_any_plane_has_format); -static bool drm_plane_needs_disable(struct drm_plane_state *state, struct drm_framebuffer *fb) -{ - if (state->pixel_source == DRM_PLANE_PIXEL_SOURCE_NONE) - return true; - - return state->pixel_source == DRM_PLANE_PIXEL_SOURCE_FB && fb == NULL; -} - /* * __setplane_internal - setplane handler for internal callers * @@ -983,8 +975,8 @@ static int __setplane_internal(struct drm_plane *plane, WARN_ON(drm_drv_uses_atomic_modeset(plane->dev)); - /* No visible data means shut it down */ - if (drm_plane_needs_disable(plane->state, fb)) { + /* No fb means shut it down */ + if (!fb) { plane->old_fb = plane->fb; ret = plane->funcs->disable_plane(plane, ctx); if (!ret) { @@ -1035,8 +1027,8 @@ static int __setplane_atomic(struct drm_plane *plane, WARN_ON(!drm_drv_uses_atomic_modeset(plane->dev)); - /* No visible data means shut it down */ - if (drm_plane_needs_disable(plane->state, fb)) + /* No fb means shut it down */ + if (!fb) return plane->funcs->disable_plane(plane, ctx); /* @@ -1109,9 +1101,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data, return -ENOENT; } - if (plane->state) - plane->state->pixel_source = DRM_PLANE_PIXEL_SOURCE_FB; - if (plane_req->fb_id) { fb = drm_framebuffer_lookup(dev, file_priv, plane_req->fb_id); if (!fb) { diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h index 122bbfbaae33..88bdfec3bd88 100644 --- a/include/drm/drm_blend.h +++ b/include/drm/drm_blend.h @@ -58,6 +58,4 @@ int drm_atomic_normalize_zpos(struct drm_device *dev, struct drm_atomic_state *state); int drm_plane_create_blend_mode_property(struct drm_plane *plane, unsigned int supported_modes); -int drm_plane_create_pixel_source_property(struct drm_plane *plane, - unsigned long extra_sources); #endif diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index bc0176ba25be..c6565a6f9324 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -40,12 +40,6 @@ enum drm_scaling_filter { DRM_SCALING_FILTER_NEAREST_NEIGHBOR, }; -enum drm_plane_pixel_source { - DRM_PLANE_PIXEL_SOURCE_NONE, - DRM_PLANE_PIXEL_SOURCE_FB, - DRM_PLANE_PIXEL_SOURCE_MAX -}; - /** * struct drm_plane_state - mutable plane state * @@ -126,14 +120,6 @@ struct drm_plane_state { /** @hotspot_y: y offset to mouse cursor hotspot */ int32_t hotspot_x, hotspot_y; - /** - * @pixel_source: - * - * Source of pixel information for the plane. See - * drm_plane_create_pixel_source_property() for more details. - */ - enum drm_plane_pixel_source pixel_source; - /** * @alpha: * Opacity of the plane with 0 as completely transparent and 0xffff as @@ -727,13 +713,6 @@ struct drm_plane { */ struct drm_plane_state *state; - /* - * @pixel_source_property: - * Optional pixel_source property for this plane. See - * drm_plane_create_pixel_source_property(). - */ - struct drm_property *pixel_source_property; - /** * @alpha_property: * Optional alpha property for this plane. See -- cgit v1.2.3 From dce94061f0d02f5ab355390a6e63d3dbea938b72 Mon Sep 17 00:00:00 2001 From: Harshit Mogalapalli Date: Mon, 4 Dec 2023 04:21:01 -0800 Subject: drm/v3d: Fix missing error code in v3d_submit_cpu_ioctl() Smatch warns: drivers/gpu/drm/v3d/v3d_submit.c:1222 v3d_submit_cpu_ioctl() warn: missing error code 'ret' When there is no job type or job is submitted with wrong number of BOs it is an error path, ret is zero at this point which is incorrect return. Fix this by changing it to -EINVAL. Fixes: aafc1a2bea67 ("drm/v3d: Add a CPU job submission") Signed-off-by: Harshit Mogalapalli Reviewed-by: Melissa Wen Signed-off-by: Melissa Wen Link: https://patchwork.freedesktop.org/patch/msgid/20231204122102.181298-1-harshit.m.mogalapalli@oracle.com --- drivers/gpu/drm/v3d/v3d_submit.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c index d7a9da2484fd..fcff41dd2315 100644 --- a/drivers/gpu/drm/v3d/v3d_submit.c +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -1219,11 +1219,13 @@ v3d_submit_cpu_ioctl(struct drm_device *dev, void *data, /* Every CPU job must have a CPU job user extension */ if (!cpu_job->job_type) { DRM_DEBUG("CPU job must have a CPU job user extension.\n"); + ret = -EINVAL; goto fail; } if (args->bo_handle_count != cpu_job_bo_handle_count[cpu_job->job_type]) { DRM_DEBUG("This CPU job was not submitted with the proper number of BOs.\n"); + ret = -EINVAL; goto fail; } -- cgit v1.2.3 From e759f2ca29d918d3db57a61cdf838025beb03465 Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Wed, 29 Nov 2023 23:08:00 +0100 Subject: drm/gpuvm: fall back to drm_exec_lock_obj() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fall back to drm_exec_lock_obj() if num_fences is zero for the drm_gpuvm_prepare_* function family. Otherwise dma_resv_reserve_fences() would actually allocate slots even though num_fences is zero. Cc: Christian König Acked-by: Donald Robson Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231129220835.297885-2-dakr@redhat.com --- drivers/gpu/drm/drm_gpuvm.c | 43 ++++++++++++++++++++++++++++++++++++++----- include/drm/drm_gpuvm.h | 23 +++-------------------- 2 files changed, 41 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index 54f5e8851de5..07a6676bc4f9 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -1085,6 +1085,37 @@ drm_gpuvm_put(struct drm_gpuvm *gpuvm) } EXPORT_SYMBOL_GPL(drm_gpuvm_put); +static int +exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj, + unsigned int num_fences) +{ + return num_fences ? drm_exec_prepare_obj(exec, obj, num_fences) : + drm_exec_lock_obj(exec, obj); +} + +/** + * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv + * @gpuvm: the &drm_gpuvm + * @exec: the &drm_exec context + * @num_fences: the amount of &dma_fences to reserve + * + * Calls drm_exec_prepare_obj() for the GPUVMs dummy &drm_gem_object; if + * @num_fences is zero drm_exec_lock_obj() is called instead. + * + * Using this function directly, it is the drivers responsibility to call + * drm_exec_init() and drm_exec_fini() accordingly. + * + * Returns: 0 on success, negative error code on failure. + */ +int +drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences) +{ + return exec_prepare_obj(exec, gpuvm->r_obj, num_fences); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_vm); + static int __drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm, struct drm_exec *exec, @@ -1095,7 +1126,7 @@ __drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm, int ret = 0; for_each_vm_bo_in_list(gpuvm, extobj, &extobjs, vm_bo) { - ret = drm_exec_prepare_obj(exec, vm_bo->obj, num_fences); + ret = exec_prepare_obj(exec, vm_bo->obj, num_fences); if (ret) break; } @@ -1116,7 +1147,7 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm, drm_gpuvm_resv_assert_held(gpuvm); list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) { - ret = drm_exec_prepare_obj(exec, vm_bo->obj, num_fences); + ret = exec_prepare_obj(exec, vm_bo->obj, num_fences); if (ret) break; @@ -1134,7 +1165,8 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm, * @num_fences: the amount of &dma_fences to reserve * * Calls drm_exec_prepare_obj() for all &drm_gem_objects the given - * &drm_gpuvm contains mappings of. + * &drm_gpuvm contains mappings of; if @num_fences is zero drm_exec_lock_obj() + * is called instead. * * Using this function directly, it is the drivers responsibility to call * drm_exec_init() and drm_exec_fini() accordingly. @@ -1171,7 +1203,8 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_objects); * @num_fences: the amount of &dma_fences to reserve * * Calls drm_exec_prepare_obj() for all &drm_gem_objects mapped between @addr - * and @addr + @range. + * and @addr + @range; if @num_fences is zero drm_exec_lock_obj() is called + * instead. * * Returns: 0 on success, negative error code on failure. */ @@ -1186,7 +1219,7 @@ drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec, drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) { struct drm_gem_object *obj = va->gem.obj; - ret = drm_exec_prepare_obj(exec, obj, num_fences); + ret = exec_prepare_obj(exec, obj, num_fences); if (ret) return ret; } diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h index f94fec9a8517..b3f82ec7fb17 100644 --- a/include/drm/drm_gpuvm.h +++ b/include/drm/drm_gpuvm.h @@ -544,26 +544,9 @@ struct drm_gpuvm_exec { } extra; }; -/** - * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv - * @gpuvm: the &drm_gpuvm - * @exec: the &drm_exec context - * @num_fences: the amount of &dma_fences to reserve - * - * Calls drm_exec_prepare_obj() for the GPUVMs dummy &drm_gem_object. - * - * Using this function directly, it is the drivers responsibility to call - * drm_exec_init() and drm_exec_fini() accordingly. - * - * Returns: 0 on success, negative error code on failure. - */ -static inline int -drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm, - struct drm_exec *exec, - unsigned int num_fences) -{ - return drm_exec_prepare_obj(exec, gpuvm->r_obj, num_fences); -} +int drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences); int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm, struct drm_exec *exec, -- cgit v1.2.3 From 4bc736f890cec126246a1d65d3b556763670a8d4 Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Wed, 29 Nov 2023 23:08:01 +0100 Subject: drm/imagination: vm: make use of GPUVM's drm_exec helper Make use of GPUVM's drm_exec helper functions preventing direct access to GPUVM internal data structures, such as the external object list. This is especially important to ensure following the locking rules around the GPUVM external object list. Fixes: ff5f643de0bf ("drm/imagination: Add GEM and VM related code") Reviewed-by: Donald Robson Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231129220835.297885-3-dakr@redhat.com --- drivers/gpu/drm/imagination/pvr_vm.c | 91 ++++++++++++++---------------------- 1 file changed, 36 insertions(+), 55 deletions(-) diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 6f4e07effad2..f42345fbe4bf 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -327,48 +327,6 @@ err_bind_op_fini: return err; } -static int -pvr_vm_bind_op_lock_resvs(struct drm_exec *exec, struct pvr_vm_bind_op *bind_op) -{ - drm_exec_until_all_locked(exec) { - struct drm_gem_object *r_obj = &bind_op->vm_ctx->dummy_gem; - struct drm_gpuvm *gpuvm = &bind_op->vm_ctx->gpuvm_mgr; - struct pvr_gem_object *pvr_obj = bind_op->pvr_obj; - struct drm_gpuvm_bo *gpuvm_bo; - - /* Acquire lock on the vm_context's reserve object. */ - int err = drm_exec_lock_obj(exec, r_obj); - - drm_exec_retry_on_contention(exec); - if (err) - return err; - - /* Acquire lock on all BOs in the context. */ - list_for_each_entry(gpuvm_bo, &gpuvm->extobj.list, - list.entry.extobj) { - err = drm_exec_lock_obj(exec, gpuvm_bo->obj); - - drm_exec_retry_on_contention(exec); - if (err) - return err; - } - - /* Unmap operations don't have an object to lock. */ - if (!pvr_obj) - break; - - /* Acquire lock on the GEM being mapped. */ - err = drm_exec_lock_obj(exec, - gem_from_pvr_gem(bind_op->pvr_obj)); - - drm_exec_retry_on_contention(exec); - if (err) - return err; - } - - return 0; -} - /** * pvr_vm_gpuva_map() - Insert a mapping into a memory context. * @op: gpuva op containing the remap details. @@ -725,6 +683,20 @@ void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file) } } +static int +pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec) +{ + struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv; + struct pvr_gem_object *pvr_obj = bind_op->pvr_obj; + + /* Unmap operations don't have an object to lock. */ + if (!pvr_obj) + return 0; + + /* Acquire lock on the GEM being mapped. */ + return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj)); +} + /** * pvr_vm_map() - Map a section of physical memory into a section of * device-virtual memory. @@ -752,7 +724,15 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset, u64 device_addr, u64 size) { struct pvr_vm_bind_op bind_op = {0}; - struct drm_exec exec; + struct drm_gpuvm_exec vm_exec = { + .vm = &vm_ctx->gpuvm_mgr, + .flags = DRM_EXEC_INTERRUPTIBLE_WAIT | + DRM_EXEC_IGNORE_DUPLICATES, + .extra = { + .fn = pvr_vm_lock_extra, + .priv = &bind_op, + }, + }; int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj, pvr_obj_offset, device_addr, @@ -761,18 +741,15 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, if (err) return err; - drm_exec_init(&exec, - DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES); - pvr_gem_object_get(pvr_obj); - err = pvr_vm_bind_op_lock_resvs(&exec, &bind_op); + err = drm_gpuvm_exec_lock(&vm_exec); if (err) goto err_cleanup; err = pvr_vm_bind_op_exec(&bind_op); - drm_exec_fini(&exec); + drm_gpuvm_exec_unlock(&vm_exec); err_cleanup: pvr_vm_bind_op_fini(&bind_op); @@ -798,24 +775,28 @@ int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size) { struct pvr_vm_bind_op bind_op = {0}; - struct drm_exec exec; + struct drm_gpuvm_exec vm_exec = { + .vm = &vm_ctx->gpuvm_mgr, + .flags = DRM_EXEC_INTERRUPTIBLE_WAIT | + DRM_EXEC_IGNORE_DUPLICATES, + .extra = { + .fn = pvr_vm_lock_extra, + .priv = &bind_op, + }, + }; int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr, size); - if (err) return err; - drm_exec_init(&exec, - DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES); - - err = pvr_vm_bind_op_lock_resvs(&exec, &bind_op); + err = drm_gpuvm_exec_lock(&vm_exec); if (err) goto err_cleanup; err = pvr_vm_bind_op_exec(&bind_op); - drm_exec_fini(&exec); + drm_gpuvm_exec_unlock(&vm_exec); err_cleanup: pvr_vm_bind_op_fini(&bind_op); -- cgit v1.2.3 From 4777dded21717c31d2d8471bccaf7c0ff58feaa4 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 27 Nov 2023 07:15:43 +0200 Subject: dt-bindings: display: simple: Add boe,bp101wx1-100 panel This panel is found on Motorola mapphone tablets mz615 to mz617. Signed-off-by: Tony Lindgren Acked-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20231127051547.15023-1-tony@atomide.com Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20231127051547.15023-1-tony@atomide.com --- Documentation/devicetree/bindings/display/panel/panel-simple.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml index 3ec9ee95045f..f00275e50531 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml @@ -73,6 +73,8 @@ properties: - auo,t215hvn01 # Shanghai AVIC Optoelectronics 7" 1024x600 color TFT-LCD panel - avic,tm070ddh03 + # BOE BP101WX1-100 10.1" WXGA (1280x800) LVDS panel + - boe,bp101wx1-100 # BOE EV121WXM-N10-1850 12.1" WXGA (1280x800) TFT LCD panel - boe,ev121wxm-n10-1850 # BOE HV070WSA-100 7.01" WSVGA TFT LCD panel -- cgit v1.2.3 From eeaddab4c14beb02157db5ca8f9e074066759bfd Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 27 Nov 2023 07:15:44 +0200 Subject: drm/panel: simple: Add BOE BP101WX1-100 panel This panel is found on Motorola mapphone tablets from mz615 to mz617. Signed-off-by: Tony Lindgren Reviewed-by: Neil Armstrong Link: https://lore.kernel.org/r/20231127051547.15023-2-tony@atomide.com Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20231127051547.15023-2-tony@atomide.com --- drivers/gpu/drm/panel/panel-simple.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 9367a4572dcf..876e1b40cfb3 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1324,6 +1324,35 @@ static const struct panel_desc bananapi_s070wv20_ct16 = { }, }; +static const struct drm_display_mode boe_bp101wx1_100_mode = { + .clock = 78945, + .hdisplay = 1280, + .hsync_start = 1280 + 0, + .hsync_end = 1280 + 0 + 2, + .htotal = 1280 + 62 + 0 + 2, + .vdisplay = 800, + .vsync_start = 800 + 8, + .vsync_end = 800 + 8 + 2, + .vtotal = 800 + 6 + 8 + 2, +}; + +static const struct panel_desc boe_bp101wx1_100 = { + .modes = &boe_bp101wx1_100_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 217, + .height = 136, + }, + .delay = { + .enable = 50, + .disable = 50, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct display_timing boe_ev121wxm_n10_1850_timing = { .pixelclock = { 69922000, 71000000, 72293000 }, .hactive = { 1280, 1280, 1280 }, @@ -4253,6 +4282,9 @@ static const struct of_device_id platform_of_match[] = { }, { .compatible = "bananapi,s070wv20-ct16", .data = &bananapi_s070wv20_ct16, + }, { + .compatible = "boe,bp101wx1-100", + .data = &boe_bp101wx1_100, }, { .compatible = "boe,ev121wxm-n10-1850", .data = &boe_ev121wxm_n10_1850, -- cgit v1.2.3 From 8c2c5d1d33f0725b7995f44f87a81311d13a441d Mon Sep 17 00:00:00 2001 From: Chris Morgan Date: Mon, 4 Dec 2023 12:57:10 -0600 Subject: drm/panel: himax-hx8394: Drop prepare/unprepare tracking Drop the panel specific prepare/unprepare logic. This is now tracked by the DRM stack [1]. [1] commit d2aacaf07395 ("drm/panel: Check for already prepared/enabled in drm_panel") Signed-off-by: Chris Morgan Reviewed-by: Neil Armstrong Link: https://lore.kernel.org/r/20231204185719.569021-2-macroalpha82@gmail.com Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20231204185719.569021-2-macroalpha82@gmail.com --- drivers/gpu/drm/panel/panel-himax-hx8394.c | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c index c73243d85de7..3823ff388b96 100644 --- a/drivers/gpu/drm/panel/panel-himax-hx8394.c +++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c @@ -68,7 +68,6 @@ struct hx8394 { struct gpio_desc *reset_gpio; struct regulator *vcc; struct regulator *iovcc; - bool prepared; const struct hx8394_panel_desc *desc; }; @@ -262,16 +261,11 @@ static int hx8394_unprepare(struct drm_panel *panel) { struct hx8394 *ctx = panel_to_hx8394(panel); - if (!ctx->prepared) - return 0; - gpiod_set_value_cansleep(ctx->reset_gpio, 1); regulator_disable(ctx->iovcc); regulator_disable(ctx->vcc); - ctx->prepared = false; - return 0; } @@ -280,9 +274,6 @@ static int hx8394_prepare(struct drm_panel *panel) struct hx8394 *ctx = panel_to_hx8394(panel); int ret; - if (ctx->prepared) - return 0; - gpiod_set_value_cansleep(ctx->reset_gpio, 1); ret = regulator_enable(ctx->vcc); @@ -301,8 +292,6 @@ static int hx8394_prepare(struct drm_panel *panel) msleep(180); - ctx->prepared = true; - return 0; disable_vcc: -- cgit v1.2.3 From e4f53a4d921eba6187a2599cf184a3beeb604fe2 Mon Sep 17 00:00:00 2001 From: Chris Morgan Date: Mon, 4 Dec 2023 12:57:11 -0600 Subject: drm/panel: himax-hx8394: Drop shutdown logic The driver shutdown is duplicate as it calls drm_unprepare and drm_disable which are called anyway when associated drivers are shutdown/removed. Signed-off-by: Chris Morgan Reviewed-by: Neil Armstrong Link: https://lore.kernel.org/r/20231204185719.569021-3-macroalpha82@gmail.com Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20231204185719.569021-3-macroalpha82@gmail.com --- drivers/gpu/drm/panel/panel-himax-hx8394.c | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c index 3823ff388b96..d8e590d5e1da 100644 --- a/drivers/gpu/drm/panel/panel-himax-hx8394.c +++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c @@ -390,27 +390,11 @@ static int hx8394_probe(struct mipi_dsi_device *dsi) return 0; } -static void hx8394_shutdown(struct mipi_dsi_device *dsi) -{ - struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi); - int ret; - - ret = drm_panel_disable(&ctx->panel); - if (ret < 0) - dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret); - - ret = drm_panel_unprepare(&ctx->panel); - if (ret < 0) - dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret); -} - static void hx8394_remove(struct mipi_dsi_device *dsi) { struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi); int ret; - hx8394_shutdown(dsi); - ret = mipi_dsi_detach(dsi); if (ret < 0) dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); @@ -427,7 +411,6 @@ MODULE_DEVICE_TABLE(of, hx8394_of_match); static struct mipi_dsi_driver hx8394_driver = { .probe = hx8394_probe, .remove = hx8394_remove, - .shutdown = hx8394_shutdown, .driver = { .name = DRV_NAME, .of_match_table = hx8394_of_match, -- cgit v1.2.3 From be478bc7ab08127473ce9ed893378cc2a8762611 Mon Sep 17 00:00:00 2001 From: Chris Morgan Date: Mon, 4 Dec 2023 12:57:12 -0600 Subject: dt-bindings: display: Document Himax HX8394 panel rotation Document panel rotation for Himax HX8394 display panel. Signed-off-by: Chris Morgan Acked-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20231204185719.569021-4-macroalpha82@gmail.com Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20231204185719.569021-4-macroalpha82@gmail.com --- Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml index ffb35288ffbb..3096debca55c 100644 --- a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml +++ b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml @@ -31,6 +31,8 @@ properties: backlight: true + rotation: true + port: true vcc-supply: -- cgit v1.2.3 From a695a5009c8fd239a98d98209489997ff5397d2b Mon Sep 17 00:00:00 2001 From: Chris Morgan Date: Mon, 4 Dec 2023 12:57:13 -0600 Subject: drm/panel: himax-hx8394: Add Panel Rotation Support Add support for setting the rotation property for the Himax HX8394 panel. Signed-off-by: Chris Morgan Reviewed-by: Neil Armstrong Link: https://lore.kernel.org/r/20231204185719.569021-5-macroalpha82@gmail.com Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20231204185719.569021-5-macroalpha82@gmail.com --- drivers/gpu/drm/panel/panel-himax-hx8394.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c index d8e590d5e1da..b68ea09f4725 100644 --- a/drivers/gpu/drm/panel/panel-himax-hx8394.c +++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c @@ -68,6 +68,7 @@ struct hx8394 { struct gpio_desc *reset_gpio; struct regulator *vcc; struct regulator *iovcc; + enum drm_panel_orientation orientation; const struct hx8394_panel_desc *desc; }; @@ -324,12 +325,20 @@ static int hx8394_get_modes(struct drm_panel *panel, return 1; } +static enum drm_panel_orientation hx8394_get_orientation(struct drm_panel *panel) +{ + struct hx8394 *ctx = panel_to_hx8394(panel); + + return ctx->orientation; +} + static const struct drm_panel_funcs hx8394_drm_funcs = { .disable = hx8394_disable, .unprepare = hx8394_unprepare, .prepare = hx8394_prepare, .enable = hx8394_enable, .get_modes = hx8394_get_modes, + .get_orientation = hx8394_get_orientation, }; static int hx8394_probe(struct mipi_dsi_device *dsi) @@ -347,6 +356,12 @@ static int hx8394_probe(struct mipi_dsi_device *dsi) return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "Failed to get reset gpio\n"); + ret = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation); + if (ret < 0) { + dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, ret); + return ret; + } + mipi_dsi_set_drvdata(dsi, ctx); ctx->dev = dev; -- cgit v1.2.3 From 00830a0d8f0d820335e7beb26e251069d90f2574 Mon Sep 17 00:00:00 2001 From: Chris Morgan Date: Mon, 4 Dec 2023 12:57:14 -0600 Subject: dt-bindings: display: himax-hx8394: Add Powkiddy X55 panel Add compatible string for the Powkiddy X55 panel. Signed-off-by: Chris Morgan Acked-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20231204185719.569021-6-macroalpha82@gmail.com Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20231204185719.569021-6-macroalpha82@gmail.com --- Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml index 3096debca55c..916bb7f94206 100644 --- a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml +++ b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml @@ -23,6 +23,7 @@ properties: items: - enum: - hannstar,hsd060bhw4 + - powkiddy,x55-panel - const: himax,hx8394 reg: true -- cgit v1.2.3 From 38db985966d2f0f89f7e1891253489a16936fc5e Mon Sep 17 00:00:00 2001 From: Chris Morgan Date: Mon, 4 Dec 2023 12:57:15 -0600 Subject: drm/panel: himax-hx8394: Add Support for Powkiddy X55 panel Add support for the Powkiddy X55 panel as used on the Powkiddy X55 handheld gaming console. This panel uses a Himax HX8394 display controller and requires a vendor provided init sequence. The display resolution is 720x1280 and is 67mm by 121mm as measured with calipers. Signed-off-by: Chris Morgan Reviewed-by: Neil Armstrong Link: https://lore.kernel.org/r/20231204185719.569021-7-macroalpha82@gmail.com Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20231204185719.569021-7-macroalpha82@gmail.com --- drivers/gpu/drm/panel/panel-himax-hx8394.c | 137 +++++++++++++++++++++++++++++ 1 file changed, 137 insertions(+) diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c index b68ea09f4725..ff0dc08b9829 100644 --- a/drivers/gpu/drm/panel/panel-himax-hx8394.c +++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c @@ -38,6 +38,7 @@ #define HX8394_CMD_SETMIPI 0xba #define HX8394_CMD_SETOTP 0xbb #define HX8394_CMD_SETREGBANK 0xbd +#define HX8394_CMD_UNKNOWN5 0xbf #define HX8394_CMD_UNKNOWN1 0xc0 #define HX8394_CMD_SETDGCLUT 0xc1 #define HX8394_CMD_SETID 0xc3 @@ -52,6 +53,7 @@ #define HX8394_CMD_SETGIP1 0xd5 #define HX8394_CMD_SETGIP2 0xd6 #define HX8394_CMD_SETGPO 0xd6 +#define HX8394_CMD_UNKNOWN4 0xd8 #define HX8394_CMD_SETSCALING 0xdd #define HX8394_CMD_SETIDLE 0xdf #define HX8394_CMD_SETGAMMA 0xe0 @@ -203,6 +205,140 @@ static const struct hx8394_panel_desc hsd060bhw4_desc = { .init_sequence = hsd060bhw4_init_sequence, }; +static int powkiddy_x55_init_sequence(struct hx8394 *ctx) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + + /* 5.19.8 SETEXTC: Set extension command (B9h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC, + 0xff, 0x83, 0x94); + + /* 5.19.9 SETMIPI: Set MIPI control (BAh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI, + 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0); + + /* 5.19.2 SETPOWER: Set power (B1h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER, + 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47); + + /* 5.19.3 SETDISP: Set display related register (B2h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP, + 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f); + + /* 5.19.4 SETCYC: Set display waveform cycles (B4h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC, + 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75, + 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, + 0x86); + + /* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM, + 0x6e, 0x6e); + + /* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0, + 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10, + 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15, + 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07, + 0x07, 0x0c, 0x40); + + /* 5.19.20 Set GIP Option1 (D5h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1, + 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18, + 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21, + 0x18, 0x18, 0x18, 0x18); + + /* 5.19.21 Set GIP Option2 (D6h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2, + 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, + 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18, + 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24, + 0x18, 0x18, 0x18, 0x18); + + /* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA, + 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, + 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8, + 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00, + 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65, + 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba, + 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f); + + /* Unknown command, not listed in the HX8394-F datasheet */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1, + 0x1f, 0x31); + + /* 5.19.17 SETPANEL (CCh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL, + 0x0b); + + /* Unknown command, not listed in the HX8394-F datasheet */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3, + 0x02); + + /* 5.19.11 Set register bank (BDh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, + 0x02); + + /* Unknown command, not listed in the HX8394-F datasheet */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff); + + /* 5.19.11 Set register bank (BDh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, + 0x00); + + /* 5.19.11 Set register bank (BDh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, + 0x01); + + /* 5.19.2 SETPOWER: Set power (B1h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER, + 0x00); + + /* 5.19.11 Set register bank (BDh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, + 0x00); + + /* Unknown command, not listed in the HX8394-F datasheet */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN5, + 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01); + + /* Unknown command, not listed in the HX8394-F datasheet */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2, + 0xed); + + return 0; +} + +static const struct drm_display_mode powkiddy_x55_mode = { + .hdisplay = 720, + .hsync_start = 720 + 44, + .hsync_end = 720 + 44 + 20, + .htotal = 720 + 44 + 20 + 20, + .vdisplay = 1280, + .vsync_start = 1280 + 12, + .vsync_end = 1280 + 12 + 10, + .vtotal = 1280 + 12 + 10 + 10, + .clock = 63290, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, + .width_mm = 67, + .height_mm = 121, +}; + +static const struct hx8394_panel_desc powkiddy_x55_desc = { + .mode = &powkiddy_x55_mode, + .lanes = 4, + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET, + .format = MIPI_DSI_FMT_RGB888, + .init_sequence = powkiddy_x55_init_sequence, +}; + static int hx8394_enable(struct drm_panel *panel) { struct hx8394 *ctx = panel_to_hx8394(panel); @@ -419,6 +555,7 @@ static void hx8394_remove(struct mipi_dsi_device *dsi) static const struct of_device_id hx8394_of_match[] = { { .compatible = "hannstar,hsd060bhw4", .data = &hsd060bhw4_desc }, + { .compatible = "powkiddy,x55-panel", .data = &powkiddy_x55_desc }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, hx8394_of_match); -- cgit v1.2.3 From 68c193c8d4a403222ce51c8b08bd1715f8b74274 Mon Sep 17 00:00:00 2001 From: Marco Felsch Date: Thu, 23 Nov 2023 18:08:04 +0100 Subject: drm/panel: ilitek-ili9881c: make use of prepare_prev_first The panel.prepare() call requires an initialized MIPI-DSI host, so set the prepare_prev_first flag to indicate that the host must be initialized first. Signed-off-by: Marco Felsch Signed-off-by: Philipp Zabel Reviewed-by: Neil Armstrong Link: https://lore.kernel.org/r/20231123-drm-panel-ili9881c-am8001280g-v1-1-fdf4d624c211@pengutronix.de Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20231123-drm-panel-ili9881c-am8001280g-v1-1-fdf4d624c211@pengutronix.de --- drivers/gpu/drm/panel/panel-ilitek-ili9881c.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c index 7838947a1bf3..0c911ed9141b 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c @@ -1094,6 +1094,8 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi) return ret; } + ctx->panel.prepare_prev_first = true; + ret = drm_panel_of_backlight(&ctx->panel); if (ret) return ret; -- cgit v1.2.3 From 7ff02f82c3e9ddd5dd81957c8659d350261196ae Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Thu, 23 Nov 2023 18:08:05 +0100 Subject: dt-bindings: ili9881c: Add Ampire AM8001280G LCD panel Document the compatible value for Ampire AM8001280G LCD panels. Signed-off-by: Philipp Zabel Acked-by: Conor Dooley Link: https://lore.kernel.org/r/20231123-drm-panel-ili9881c-am8001280g-v1-2-fdf4d624c211@pengutronix.de Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20231123-drm-panel-ili9881c-am8001280g-v1-2-fdf4d624c211@pengutronix.de --- Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml index e7ab6224b52e..b1e624be3e33 100644 --- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml +++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml @@ -16,6 +16,7 @@ properties: compatible: items: - enum: + - ampire,am8001280g - bananapi,lhr050h41 - feixin,k101-im2byl02 - tdo,tl050hdv35 -- cgit v1.2.3 From 2748848ceaf32671927c3b19672ba3104a1dba7e Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Thu, 23 Nov 2023 18:08:06 +0100 Subject: drm/panel: ilitek-ili9881c: Add Ampire AM8001280G LCD panel Add support for Ampire AM8001280G LCD panels. Signed-off-by: Philipp Zabel Reviewed-by: Neil Armstrong Link: https://lore.kernel.org/r/20231123-drm-panel-ili9881c-am8001280g-v1-3-fdf4d624c211@pengutronix.de Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20231123-drm-panel-ili9881c-am8001280g-v1-3-fdf4d624c211@pengutronix.de --- drivers/gpu/drm/panel/panel-ilitek-ili9881c.c | 223 ++++++++++++++++++++++++++ 1 file changed, 223 insertions(+) diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c index 0c911ed9141b..2ffe5f68a890 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c @@ -830,6 +830,203 @@ static const struct ili9881c_instr w552946ab_init[] = { ILI9881C_SWITCH_PAGE_INSTR(0), }; +static const struct ili9881c_instr am8001280g_init[] = { + ILI9881C_SWITCH_PAGE_INSTR(3), + ILI9881C_COMMAND_INSTR(0x01, 0x00), + ILI9881C_COMMAND_INSTR(0x02, 0x00), + ILI9881C_COMMAND_INSTR(0x03, 0x73), + ILI9881C_COMMAND_INSTR(0x04, 0xD3), + ILI9881C_COMMAND_INSTR(0x05, 0x00), + ILI9881C_COMMAND_INSTR(0x06, 0x0A), + ILI9881C_COMMAND_INSTR(0x07, 0x0E), + ILI9881C_COMMAND_INSTR(0x08, 0x00), + ILI9881C_COMMAND_INSTR(0x09, 0x01), + ILI9881C_COMMAND_INSTR(0x0a, 0x01), + ILI9881C_COMMAND_INSTR(0x0b, 0x01), + ILI9881C_COMMAND_INSTR(0x0c, 0x01), + ILI9881C_COMMAND_INSTR(0x0d, 0x01), + ILI9881C_COMMAND_INSTR(0x0e, 0x01), + ILI9881C_COMMAND_INSTR(0x0f, 0x01), + ILI9881C_COMMAND_INSTR(0x10, 0x01), + ILI9881C_COMMAND_INSTR(0x11, 0x00), + ILI9881C_COMMAND_INSTR(0x12, 0x00), + ILI9881C_COMMAND_INSTR(0x13, 0x00), + ILI9881C_COMMAND_INSTR(0x14, 0x00), + ILI9881C_COMMAND_INSTR(0x15, 0x00), + ILI9881C_COMMAND_INSTR(0x16, 0x00), + ILI9881C_COMMAND_INSTR(0x17, 0x00), + ILI9881C_COMMAND_INSTR(0x18, 0x00), + ILI9881C_COMMAND_INSTR(0x19, 0x00), + ILI9881C_COMMAND_INSTR(0x1a, 0x00), + ILI9881C_COMMAND_INSTR(0x1b, 0x00), + ILI9881C_COMMAND_INSTR(0x1c, 0x00), + ILI9881C_COMMAND_INSTR(0x1d, 0x00), + ILI9881C_COMMAND_INSTR(0x1e, 0x40), + ILI9881C_COMMAND_INSTR(0x1f, 0x80), + ILI9881C_COMMAND_INSTR(0x20, 0x06), + ILI9881C_COMMAND_INSTR(0x21, 0x01), + ILI9881C_COMMAND_INSTR(0x22, 0x00), + ILI9881C_COMMAND_INSTR(0x23, 0x00), + ILI9881C_COMMAND_INSTR(0x24, 0x00), + ILI9881C_COMMAND_INSTR(0x25, 0x00), + ILI9881C_COMMAND_INSTR(0x26, 0x00), + ILI9881C_COMMAND_INSTR(0x27, 0x00), + ILI9881C_COMMAND_INSTR(0x28, 0x33), + ILI9881C_COMMAND_INSTR(0x29, 0x03), + ILI9881C_COMMAND_INSTR(0x2a, 0x00), + ILI9881C_COMMAND_INSTR(0x2b, 0x00), + ILI9881C_COMMAND_INSTR(0x2c, 0x00), + ILI9881C_COMMAND_INSTR(0x2d, 0x00), + ILI9881C_COMMAND_INSTR(0x2e, 0x00), + ILI9881C_COMMAND_INSTR(0x2f, 0x00), + ILI9881C_COMMAND_INSTR(0x30, 0x00), + ILI9881C_COMMAND_INSTR(0x31, 0x00), + ILI9881C_COMMAND_INSTR(0x32, 0x00), + ILI9881C_COMMAND_INSTR(0x33, 0x00), + ILI9881C_COMMAND_INSTR(0x34, 0x03), + ILI9881C_COMMAND_INSTR(0x35, 0x00), + ILI9881C_COMMAND_INSTR(0x36, 0x03), + ILI9881C_COMMAND_INSTR(0x37, 0x00), + ILI9881C_COMMAND_INSTR(0x38, 0x00), + ILI9881C_COMMAND_INSTR(0x39, 0x00), + ILI9881C_COMMAND_INSTR(0x3a, 0x40), + ILI9881C_COMMAND_INSTR(0x3b, 0x40), + ILI9881C_COMMAND_INSTR(0x3c, 0x00), + ILI9881C_COMMAND_INSTR(0x3d, 0x00), + ILI9881C_COMMAND_INSTR(0x3e, 0x00), + ILI9881C_COMMAND_INSTR(0x3f, 0x00), + ILI9881C_COMMAND_INSTR(0x40, 0x00), + ILI9881C_COMMAND_INSTR(0x41, 0x00), + ILI9881C_COMMAND_INSTR(0x42, 0x00), + ILI9881C_COMMAND_INSTR(0x43, 0x00), + ILI9881C_COMMAND_INSTR(0x44, 0x00), + + ILI9881C_COMMAND_INSTR(0x50, 0x01), + ILI9881C_COMMAND_INSTR(0x51, 0x23), + ILI9881C_COMMAND_INSTR(0x52, 0x45), + ILI9881C_COMMAND_INSTR(0x53, 0x67), + ILI9881C_COMMAND_INSTR(0x54, 0x89), + ILI9881C_COMMAND_INSTR(0x55, 0xab), + ILI9881C_COMMAND_INSTR(0x56, 0x01), + ILI9881C_COMMAND_INSTR(0x57, 0x23), + ILI9881C_COMMAND_INSTR(0x58, 0x45), + ILI9881C_COMMAND_INSTR(0x59, 0x67), + ILI9881C_COMMAND_INSTR(0x5a, 0x89), + ILI9881C_COMMAND_INSTR(0x5b, 0xab), + ILI9881C_COMMAND_INSTR(0x5c, 0xcd), + ILI9881C_COMMAND_INSTR(0x5d, 0xef), + + ILI9881C_COMMAND_INSTR(0x5e, 0x11), + ILI9881C_COMMAND_INSTR(0x5f, 0x02), + ILI9881C_COMMAND_INSTR(0x60, 0x00), + ILI9881C_COMMAND_INSTR(0x61, 0x01), + ILI9881C_COMMAND_INSTR(0x62, 0x0D), + ILI9881C_COMMAND_INSTR(0x63, 0x0C), + ILI9881C_COMMAND_INSTR(0x64, 0x0F), + ILI9881C_COMMAND_INSTR(0x65, 0x0E), + ILI9881C_COMMAND_INSTR(0x66, 0x06), + ILI9881C_COMMAND_INSTR(0x67, 0x07), + ILI9881C_COMMAND_INSTR(0x68, 0x02), + ILI9881C_COMMAND_INSTR(0x69, 0x02), + ILI9881C_COMMAND_INSTR(0x6a, 0x08), + ILI9881C_COMMAND_INSTR(0x6b, 0x02), + ILI9881C_COMMAND_INSTR(0x6c, 0x02), + ILI9881C_COMMAND_INSTR(0x6d, 0x02), + ILI9881C_COMMAND_INSTR(0x6e, 0x02), + ILI9881C_COMMAND_INSTR(0x6f, 0x02), + ILI9881C_COMMAND_INSTR(0x70, 0x02), + ILI9881C_COMMAND_INSTR(0x71, 0x02), + ILI9881C_COMMAND_INSTR(0x72, 0x02), + ILI9881C_COMMAND_INSTR(0x73, 0x02), + ILI9881C_COMMAND_INSTR(0x74, 0x02), + ILI9881C_COMMAND_INSTR(0x75, 0x02), + ILI9881C_COMMAND_INSTR(0x76, 0x00), + ILI9881C_COMMAND_INSTR(0x77, 0x01), + ILI9881C_COMMAND_INSTR(0x78, 0x0D), + ILI9881C_COMMAND_INSTR(0x79, 0x0C), + ILI9881C_COMMAND_INSTR(0x7a, 0x0F), + ILI9881C_COMMAND_INSTR(0x7b, 0x0E), + ILI9881C_COMMAND_INSTR(0x7c, 0x06), + ILI9881C_COMMAND_INSTR(0x7d, 0x07), + ILI9881C_COMMAND_INSTR(0x7e, 0x02), + ILI9881C_COMMAND_INSTR(0x7f, 0x02), + ILI9881C_COMMAND_INSTR(0x80, 0x08), + ILI9881C_COMMAND_INSTR(0x81, 0x02), + ILI9881C_COMMAND_INSTR(0x82, 0x02), + ILI9881C_COMMAND_INSTR(0x83, 0x02), + ILI9881C_COMMAND_INSTR(0x84, 0x02), + ILI9881C_COMMAND_INSTR(0x85, 0x02), + ILI9881C_COMMAND_INSTR(0x86, 0x02), + ILI9881C_COMMAND_INSTR(0x87, 0x02), + ILI9881C_COMMAND_INSTR(0x88, 0x02), + ILI9881C_COMMAND_INSTR(0x89, 0x02), + ILI9881C_COMMAND_INSTR(0x8A, 0x02), + + ILI9881C_SWITCH_PAGE_INSTR(4), + ILI9881C_COMMAND_INSTR(0x6c, 0x15), + ILI9881C_COMMAND_INSTR(0x6e, 0x30), + ILI9881C_COMMAND_INSTR(0x6f, 0x33), + ILI9881C_COMMAND_INSTR(0x8d, 0x15), + ILI9881C_COMMAND_INSTR(0x3a, 0xa4), + ILI9881C_COMMAND_INSTR(0x87, 0xba), + ILI9881C_COMMAND_INSTR(0x26, 0x76), + ILI9881C_COMMAND_INSTR(0xb2, 0xd1), + + ILI9881C_SWITCH_PAGE_INSTR(1), + ILI9881C_COMMAND_INSTR(0x22, 0x0A), + ILI9881C_COMMAND_INSTR(0x31, 0x0B), + ILI9881C_COMMAND_INSTR(0x50, 0xa5), + ILI9881C_COMMAND_INSTR(0x51, 0xa0), + ILI9881C_COMMAND_INSTR(0x53, 0x70), + ILI9881C_COMMAND_INSTR(0x55, 0x7A), + ILI9881C_COMMAND_INSTR(0x60, 0x14), + + ILI9881C_COMMAND_INSTR(0xA0, 0x00), + ILI9881C_COMMAND_INSTR(0xA1, 0x53), + ILI9881C_COMMAND_INSTR(0xA2, 0x50), + ILI9881C_COMMAND_INSTR(0xA3, 0x20), + ILI9881C_COMMAND_INSTR(0xA4, 0x27), + ILI9881C_COMMAND_INSTR(0xA5, 0x33), + ILI9881C_COMMAND_INSTR(0xA6, 0x25), + ILI9881C_COMMAND_INSTR(0xA7, 0x25), + ILI9881C_COMMAND_INSTR(0xA8, 0xD4), + ILI9881C_COMMAND_INSTR(0xA9, 0x1A), + ILI9881C_COMMAND_INSTR(0xAA, 0x2B), + ILI9881C_COMMAND_INSTR(0xAB, 0xB5), + ILI9881C_COMMAND_INSTR(0xAC, 0x19), + ILI9881C_COMMAND_INSTR(0xAD, 0x18), + ILI9881C_COMMAND_INSTR(0xAE, 0x53), + ILI9881C_COMMAND_INSTR(0xAF, 0x1A), + ILI9881C_COMMAND_INSTR(0xB0, 0x25), + ILI9881C_COMMAND_INSTR(0xB1, 0x62), + ILI9881C_COMMAND_INSTR(0xB2, 0x6A), + ILI9881C_COMMAND_INSTR(0xB3, 0x31), + + ILI9881C_COMMAND_INSTR(0xC0, 0x00), + ILI9881C_COMMAND_INSTR(0xC1, 0x53), + ILI9881C_COMMAND_INSTR(0xC2, 0x50), + ILI9881C_COMMAND_INSTR(0xC3, 0x20), + ILI9881C_COMMAND_INSTR(0xC4, 0x27), + ILI9881C_COMMAND_INSTR(0xC5, 0x33), + ILI9881C_COMMAND_INSTR(0xC6, 0x25), + ILI9881C_COMMAND_INSTR(0xC7, 0x25), + ILI9881C_COMMAND_INSTR(0xC8, 0xD4), + ILI9881C_COMMAND_INSTR(0xC9, 0x1A), + ILI9881C_COMMAND_INSTR(0xCA, 0x2B), + ILI9881C_COMMAND_INSTR(0xCB, 0xB5), + ILI9881C_COMMAND_INSTR(0xCC, 0x19), + ILI9881C_COMMAND_INSTR(0xCD, 0x18), + ILI9881C_COMMAND_INSTR(0xCE, 0x53), + ILI9881C_COMMAND_INSTR(0xCF, 0x1A), + ILI9881C_COMMAND_INSTR(0xD0, 0x25), + ILI9881C_COMMAND_INSTR(0xD1, 0x62), + ILI9881C_COMMAND_INSTR(0xD2, 0x6A), + ILI9881C_COMMAND_INSTR(0xD3, 0x31), + ILI9881C_SWITCH_PAGE_INSTR(0), + ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c), + ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_POWER_SAVE, 0x00), +}; + static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel) { return container_of(panel, struct ili9881c, panel); @@ -1014,6 +1211,23 @@ static const struct drm_display_mode w552946aba_default_mode = { .height_mm = 121, }; +static const struct drm_display_mode am8001280g_default_mode = { + .clock = 67911, + + .hdisplay = 800, + .hsync_start = 800 + 20, + .hsync_end = 800 + 20 + 32, + .htotal = 800 + 20 + 32 + 20, + + .vdisplay = 1280, + .vsync_start = 1280 + 6, + .vsync_end = 1280 + 6 + 8, + .vtotal = 1280 + 6 + 8 + 4, + + .width_mm = 94, + .height_mm = 151, +}; + static int ili9881c_get_modes(struct drm_panel *panel, struct drm_connector *connector) { @@ -1147,11 +1361,20 @@ static const struct ili9881c_desc w552946aba_desc = { MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET, }; +static const struct ili9881c_desc am8001280g_desc = { + .init = am8001280g_init, + .init_length = ARRAY_SIZE(am8001280g_init), + .mode = &am8001280g_default_mode, + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | + MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM, +}; + static const struct of_device_id ili9881c_of_match[] = { { .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc }, { .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc }, { .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc }, { .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc }, + { .compatible = "ampire,am8001280g", .data = &am8001280g_desc }, { } }; MODULE_DEVICE_TABLE(of, ili9881c_of_match); -- cgit v1.2.3 From 2a5244a04e751c8617d5e7707550d97a83b1102d Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Thu, 23 Nov 2023 11:24:03 +0100 Subject: dt-bindings: display: simple: add Evervision VGG644804 panel Add Evervision VGG644804 5.7" 640x480 LVDS panel compatible string. Signed-off-by: Michael Walle Acked-by: Conor Dooley Link: https://lore.kernel.org/r/20231123102404.2022201-1-mwalle@kernel.org Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20231123102404.2022201-1-mwalle@kernel.org --- Documentation/devicetree/bindings/display/panel/panel-simple.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml index f00275e50531..2021aa82871a 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml @@ -146,6 +146,8 @@ properties: - edt,etmv570g2dhu # E Ink VB3300-KCA - eink,vb3300-kca + # Evervision Electronics Co. Ltd. VGG644804 5.7" VGA TFT LCD Panel + - evervision,vgg644804 # Evervision Electronics Co. Ltd. VGG804821 5.0" WVGA TFT LCD Panel - evervision,vgg804821 # Foxlink Group 5" WVGA TFT LCD panel -- cgit v1.2.3 From 1319f2178bdf1898a76ea8c4f00d57b240bbc5fd Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Thu, 23 Nov 2023 11:24:04 +0100 Subject: drm/panel-simple: add Evervision VGG644804 panel entry Timings taken from the datasheet, although sometimes there are just typical values and it's not clear if they are no min and max values or if you must use the typical value exactly. To make things worse, there is no back porch but only a combined sync and back porch length. Unfortunately, there is not public datasheet. Therefore, here are the relevant timings: | min | typ | max | -----------------+-----+--------+-----+ CLK frequency | - | 25.175 | - | HS period | - | 800 | - | HS pulse width | 5 | 30 | - | HS-DEN time | 112 | 144 | 175 | DEN pulse width | - | 640 | - | VS pulse width | 1 | 3 | 5 | VS-DEN time | - | 35 | - | VS period | - | 525 | - | Signed-off-by: Michael Walle Reviewed-by: Neil Armstrong Link: https://lore.kernel.org/r/20231123102404.2022201-2-mwalle@kernel.org Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20231123102404.2022201-2-mwalle@kernel.org --- drivers/gpu/drm/panel/panel-simple.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 876e1b40cfb3..8017ad33cf18 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2002,6 +2002,33 @@ static const struct panel_desc eink_vb3300_kca = { .connector_type = DRM_MODE_CONNECTOR_DPI, }; +static const struct display_timing evervision_vgg644804_timing = { + .pixelclock = { 25175000, 25175000, 25175000 }, + .hactive = { 640, 640, 640 }, + .hfront_porch = { 16, 16, 16 }, + .hback_porch = { 82, 114, 170 }, + .hsync_len = { 5, 30, 30 }, + .vactive = { 480, 480, 480 }, + .vfront_porch = { 10, 10, 10 }, + .vback_porch = { 30, 32, 34 }, + .vsync_len = { 1, 3, 5 }, + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | + DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE | + DISPLAY_FLAGS_SYNC_POSEDGE, +}; + +static const struct panel_desc evervision_vgg644804 = { + .timings = &evervision_vgg644804_timing, + .num_timings = 1, + .bpc = 8, + .size = { + .width = 115, + .height = 86, + }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE, +}; + static const struct display_timing evervision_vgg804821_timing = { .pixelclock = { 27600000, 33300000, 50000000 }, .hactive = { 800, 800, 800 }, @@ -4366,6 +4393,9 @@ static const struct of_device_id platform_of_match[] = { }, { .compatible = "eink,vb3300-kca", .data = &eink_vb3300_kca, + }, { + .compatible = "evervision,vgg644804", + .data = &evervision_vgg644804, }, { .compatible = "evervision,vgg804821", .data = &evervision_vgg804821, -- cgit v1.2.3 From a4f5892914ca7709ea6d191f0edace93a5935966 Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Mon, 4 Dec 2023 12:42:13 +0100 Subject: drm/panfrost: Ignore core_mask for poweroff and disable PWRTRANS irq Some SoCs may be equipped with a GPU containing two core groups and this is exactly the case of Samsung's Exynos 5422 featuring an ARM Mali-T628 MP6 GPU: the support for this GPU in Panfrost is partial, as this driver currently supports using only one core group and that's reflected on all parts of it, including the power on (and power off, previously to this patch) function. The issue with this is that even though executing the soft reset operation should power off all cores unconditionally, on at least one platform we're seeing a crash that seems to be happening due to an interrupt firing which may be because we are calling power transition only on the first core group, leaving the second one unchanged, or because ISR execution was pending before entering the panfrost_gpu_power_off() function and executed after powering off the GPU cores, or all of the above. Finally, solve this by: - Avoid to enable the power transition interrupt on reset; and - Ignoring the core_mask and ask the GPU to poweroff both core groups Fixes: 22aa1a209018 ("drm/panfrost: Really power off GPU cores in panfrost_gpu_power_off()") Reviewed-by: Boris Brezillon Reviewed-by: Steven Price Signed-off-by: AngeloGioacchino Del Regno Tested-by: Marek Szyprowski Signed-off-by: Boris Brezillon Link: https://patchwork.freedesktop.org/patch/msgid/20231204114215.54575-2-angelogioacchino.delregno@collabora.com --- drivers/gpu/drm/panfrost/panfrost_gpu.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index 09f5e1563ebd..bd41617c5e4b 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -78,7 +78,12 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) } gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL); - gpu_write(pfdev, GPU_INT_MASK, GPU_IRQ_MASK_ALL); + + /* Only enable the interrupts we care about */ + gpu_write(pfdev, GPU_INT_MASK, + GPU_IRQ_MASK_ERROR | + GPU_IRQ_PERFCNT_SAMPLE_COMPLETED | + GPU_IRQ_CLEAN_CACHES_COMPLETED); /* * All in-flight jobs should have released their cycle @@ -425,11 +430,10 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev) void panfrost_gpu_power_off(struct panfrost_device *pfdev) { - u64 core_mask = panfrost_get_core_mask(pfdev); int ret; u32 val; - gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present & core_mask); + gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present); ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO, val, !val, 1, 1000); if (ret) @@ -441,7 +445,7 @@ void panfrost_gpu_power_off(struct panfrost_device *pfdev) if (ret) dev_err(pfdev->dev, "tiler power transition timeout"); - gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present & core_mask); + gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present); ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO, val, !val, 0, 1000); if (ret) -- cgit v1.2.3 From b98e9a84d38ac88f9fd2accbcd45b656eeea7a04 Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Mon, 4 Dec 2023 12:42:14 +0100 Subject: drm/panfrost: Add gpu_irq, mmu_irq to struct panfrost_device In preparation for adding a IRQ synchronization mechanism for PM suspend, add gpu_irq and mmu_irq variables to struct panfrost_device and change functions panfrost_gpu_init() and panfrost_mmu_init() to use those. Reviewed-by: Boris Brezillon Reviewed-by: Steven Price Signed-off-by: AngeloGioacchino Del Regno Tested-by: Marek Szyprowski Signed-off-by: Boris Brezillon Link: https://patchwork.freedesktop.org/patch/msgid/20231204114215.54575-3-angelogioacchino.delregno@collabora.com --- drivers/gpu/drm/panfrost/panfrost_device.h | 2 ++ drivers/gpu/drm/panfrost/panfrost_gpu.c | 10 +++++----- drivers/gpu/drm/panfrost/panfrost_mmu.c | 10 +++++----- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index 0fc558db6bfd..54a8aad54259 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -94,6 +94,8 @@ struct panfrost_device { struct device *dev; struct drm_device *ddev; struct platform_device *pdev; + int gpu_irq; + int mmu_irq; void __iomem *iomem; struct clk *clock; diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index bd41617c5e4b..7adc4441fa14 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -454,7 +454,7 @@ void panfrost_gpu_power_off(struct panfrost_device *pfdev) int panfrost_gpu_init(struct panfrost_device *pfdev) { - int err, irq; + int err; err = panfrost_gpu_soft_reset(pfdev); if (err) @@ -469,11 +469,11 @@ int panfrost_gpu_init(struct panfrost_device *pfdev) dma_set_max_seg_size(pfdev->dev, UINT_MAX); - irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu"); - if (irq < 0) - return irq; + pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu"); + if (pfdev->gpu_irq < 0) + return pfdev->gpu_irq; - err = devm_request_irq(pfdev->dev, irq, panfrost_gpu_irq_handler, + err = devm_request_irq(pfdev->dev, pfdev->gpu_irq, panfrost_gpu_irq_handler, IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev); if (err) { dev_err(pfdev->dev, "failed to request gpu irq"); diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 846dd697c410..ac4296c1e54b 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -753,13 +753,13 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) int panfrost_mmu_init(struct panfrost_device *pfdev) { - int err, irq; + int err; - irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu"); - if (irq < 0) - return irq; + pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu"); + if (pfdev->mmu_irq < 0) + return pfdev->mmu_irq; - err = devm_request_threaded_irq(pfdev->dev, irq, + err = devm_request_threaded_irq(pfdev->dev, pfdev->mmu_irq, panfrost_mmu_irq_handler, panfrost_mmu_irq_handler_thread, IRQF_SHARED, KBUILD_MODNAME "-mmu", -- cgit v1.2.3 From 157ad4ccff0754d9eb57d3a4fa31264ee6e9716b Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Mon, 4 Dec 2023 12:42:15 +0100 Subject: drm/panfrost: Synchronize and disable interrupts before powering off To make sure that we don't unintentionally perform any unclocked and/or unpowered R/W operation on GPU registers, before turning off clocks and regulators we must make sure that no GPU, JOB or MMU ISR execution is pending: doing that requires to add a mechanism to synchronize the interrupts on suspend. Add functions panfrost_{gpu,job,mmu}_suspend_irq() which will perform interrupts masking and ISR execution synchronization, and then call those in the panfrost_device_runtime_suspend() handler in the exact sequence of job (may require mmu!) -> mmu -> gpu. As a side note, JOB and MMU suspend_irq functions needed some special treatment: as their interrupt handlers will unmask interrupts, it was necessary to add an `is_suspended` bitmap which is used to address the possible corner case of unintentional IRQ unmasking because of ISR execution after a call to synchronize_irq(). At resume, clear each is_suspended bit in the reset path of JOB/MMU to allow unmasking the interrupts. Signed-off-by: AngeloGioacchino Del Regno Reviewed-by: Boris Brezillon Tested-by: Marek Szyprowski Reviewed-by: Steven Price Signed-off-by: Boris Brezillon Link: https://patchwork.freedesktop.org/patch/msgid/20231204114215.54575-4-angelogioacchino.delregno@collabora.com --- drivers/gpu/drm/panfrost/panfrost_device.c | 3 +++ drivers/gpu/drm/panfrost/panfrost_device.h | 8 ++++++++ drivers/gpu/drm/panfrost/panfrost_gpu.c | 18 ++++++++++++++++-- drivers/gpu/drm/panfrost/panfrost_gpu.h | 1 + drivers/gpu/drm/panfrost/panfrost_job.c | 26 ++++++++++++++++++++++---- drivers/gpu/drm/panfrost/panfrost_job.h | 1 + drivers/gpu/drm/panfrost/panfrost_mmu.c | 22 +++++++++++++++++++--- drivers/gpu/drm/panfrost/panfrost_mmu.h | 1 + 8 files changed, 71 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c index c90ad5ee34e7..a45e4addcc19 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.c +++ b/drivers/gpu/drm/panfrost/panfrost_device.c @@ -421,6 +421,9 @@ static int panfrost_device_runtime_suspend(struct device *dev) return -EBUSY; panfrost_devfreq_suspend(pfdev); + panfrost_job_suspend_irq(pfdev); + panfrost_mmu_suspend_irq(pfdev); + panfrost_gpu_suspend_irq(pfdev); panfrost_gpu_power_off(pfdev); return 0; diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index 54a8aad54259..62f7e3527385 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -25,6 +25,13 @@ struct panfrost_perfcnt; #define NUM_JOB_SLOTS 3 #define MAX_PM_DOMAINS 5 +enum panfrost_drv_comp_bits { + PANFROST_COMP_BIT_GPU, + PANFROST_COMP_BIT_JOB, + PANFROST_COMP_BIT_MMU, + PANFROST_COMP_BIT_MAX +}; + /** * enum panfrost_gpu_pm - Supported kernel power management features * @GPU_PM_CLK_DIS: Allow disabling clocks during system suspend @@ -109,6 +116,7 @@ struct panfrost_device { struct panfrost_features features; const struct panfrost_compatible *comp; + DECLARE_BITMAP(is_suspended, PANFROST_COMP_BIT_MAX); spinlock_t as_lock; unsigned long as_in_use_mask; diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index 7adc4441fa14..9063ce254642 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -22,9 +22,13 @@ static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data) { struct panfrost_device *pfdev = data; - u32 state = gpu_read(pfdev, GPU_INT_STAT); - u32 fault_status = gpu_read(pfdev, GPU_FAULT_STATUS); + u32 fault_status, state; + if (test_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended)) + return IRQ_NONE; + + fault_status = gpu_read(pfdev, GPU_FAULT_STATUS); + state = gpu_read(pfdev, GPU_INT_STAT); if (!state) return IRQ_NONE; @@ -61,6 +65,8 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) gpu_write(pfdev, GPU_INT_MASK, 0); gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED); + clear_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended); + gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET); ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, val, val & GPU_IRQ_RESET_COMPLETED, 10, 10000); @@ -452,6 +458,14 @@ void panfrost_gpu_power_off(struct panfrost_device *pfdev) dev_err(pfdev->dev, "l2 power transition timeout"); } +void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev) +{ + set_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended); + + gpu_write(pfdev, GPU_INT_MASK, 0); + synchronize_irq(pfdev->gpu_irq); +} + int panfrost_gpu_init(struct panfrost_device *pfdev) { int err; diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h index 876fdad9f721..d841b86504ea 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.h +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h @@ -15,6 +15,7 @@ u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev); int panfrost_gpu_soft_reset(struct panfrost_device *pfdev); void panfrost_gpu_power_on(struct panfrost_device *pfdev); void panfrost_gpu_power_off(struct panfrost_device *pfdev); +void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev); void panfrost_cycle_counter_get(struct panfrost_device *pfdev); void panfrost_cycle_counter_put(struct panfrost_device *pfdev); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index f9446e197428..0c2dbf6ef2a5 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -405,6 +405,8 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev) int j; u32 irq_mask = 0; + clear_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended); + for (j = 0; j < NUM_JOB_SLOTS; j++) { irq_mask |= MK_JS_MASK(j); } @@ -413,6 +415,14 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev) job_write(pfdev, JOB_INT_MASK, irq_mask); } +void panfrost_job_suspend_irq(struct panfrost_device *pfdev) +{ + set_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended); + + job_write(pfdev, JOB_INT_MASK, 0); + synchronize_irq(pfdev->js->irq); +} + static void panfrost_job_handle_err(struct panfrost_device *pfdev, struct panfrost_job *job, unsigned int js) @@ -792,17 +802,25 @@ static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data) struct panfrost_device *pfdev = data; panfrost_job_handle_irqs(pfdev); - job_write(pfdev, JOB_INT_MASK, - GENMASK(16 + NUM_JOB_SLOTS - 1, 16) | - GENMASK(NUM_JOB_SLOTS - 1, 0)); + + /* Enable interrupts only if we're not about to get suspended */ + if (!test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended)) + job_write(pfdev, JOB_INT_MASK, + GENMASK(16 + NUM_JOB_SLOTS - 1, 16) | + GENMASK(NUM_JOB_SLOTS - 1, 0)); + return IRQ_HANDLED; } static irqreturn_t panfrost_job_irq_handler(int irq, void *data) { struct panfrost_device *pfdev = data; - u32 status = job_read(pfdev, JOB_INT_STAT); + u32 status; + + if (test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended)) + return IRQ_NONE; + status = job_read(pfdev, JOB_INT_STAT); if (!status) return IRQ_NONE; diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index 17ff808dba07..ec581b97852b 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -47,6 +47,7 @@ int panfrost_job_get_slot(struct panfrost_job *job); int panfrost_job_push(struct panfrost_job *job); void panfrost_job_put(struct panfrost_job *job); void panfrost_job_enable_interrupts(struct panfrost_device *pfdev); +void panfrost_job_suspend_irq(struct panfrost_device *pfdev); int panfrost_job_is_idle(struct panfrost_device *pfdev); #endif diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index ac4296c1e54b..f38385fe76bb 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -231,6 +231,8 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev) { struct panfrost_mmu *mmu, *mmu_tmp; + clear_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended); + spin_lock(&pfdev->as_lock); pfdev->as_alloc_mask = 0; @@ -670,6 +672,9 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) { struct panfrost_device *pfdev = data; + if (test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) + return IRQ_NONE; + if (!mmu_read(pfdev, MMU_INT_STAT)) return IRQ_NONE; @@ -744,9 +749,12 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask; } - spin_lock(&pfdev->as_lock); - mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask); - spin_unlock(&pfdev->as_lock); + /* Enable interrupts only if we're not about to get suspended */ + if (!test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) { + spin_lock(&pfdev->as_lock); + mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask); + spin_unlock(&pfdev->as_lock); + } return IRQ_HANDLED; }; @@ -777,3 +785,11 @@ void panfrost_mmu_fini(struct panfrost_device *pfdev) { mmu_write(pfdev, MMU_INT_MASK, 0); } + +void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev) +{ + set_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended); + + mmu_write(pfdev, MMU_INT_MASK, 0); + synchronize_irq(pfdev->mmu_irq); +} diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h index cc2a0d307feb..022a9a74a114 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.h +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h @@ -14,6 +14,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping); int panfrost_mmu_init(struct panfrost_device *pfdev); void panfrost_mmu_fini(struct panfrost_device *pfdev); void panfrost_mmu_reset(struct panfrost_device *pfdev); +void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev); u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu); void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu); -- cgit v1.2.3 From c50a291d621aa7abaa27b05f56d450a388b64948 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Mon, 4 Dec 2023 16:14:06 +0100 Subject: drm/gpuvm: Let drm_gpuvm_bo_put() report when the vm_bo object is destroyed Some users need to release resources attached to the vm_bo object when it's destroyed. In Panthor's case, we need to release the pin ref so BO pages can be returned to the system when all GPU mappings are gone. This could be done through a custom drm_gpuvm::vm_bo_free() hook, but this has all sort of locking implications that would force us to expose a drm_gem_shmem_unpin_locked() helper, not to mention the fact that having a ::vm_bo_free() implementation without a ::vm_bo_alloc() one seems odd. So let's keep things simple, and extend drm_gpuvm_bo_put() to report when the object is destroyed. Signed-off-by: Boris Brezillon Reviewed-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231204151406.1977285-1-boris.brezillon@collabora.com --- drivers/gpu/drm/drm_gpuvm.c | 8 ++++++-- include/drm/drm_gpuvm.h | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index 07a6676bc4f9..9c0922c1a5a2 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -1535,14 +1535,18 @@ drm_gpuvm_bo_destroy(struct kref *kref) * hold the dma-resv or driver specific GEM gpuva lock. * * This function may only be called from non-atomic context. + * + * Returns: true if vm_bo was destroyed, false otherwise. */ -void +bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo) { might_sleep(); if (vm_bo) - kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy); + return !!kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy); + + return false; } EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put); diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h index b3f82ec7fb17..6258849382e1 100644 --- a/include/drm/drm_gpuvm.h +++ b/include/drm/drm_gpuvm.h @@ -721,7 +721,7 @@ drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo) return vm_bo; } -void drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo); +bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo); struct drm_gpuvm_bo * drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, -- cgit v1.2.3 From c8fa1cc07759dde17c97796f41696a0da35c6ea7 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Sun, 3 Dec 2023 03:05:28 +0300 Subject: drm/atomic: add private obj state to state dump The drm_atomic_print_new_state() already prints private object state via drm_atomic_private_obj_print_state(). Add private object state dumping to __drm_state_dump(), so that it is also included into drm_state_dump() output and into debugfs/dri/N/state file. Reviewed-by: Rob Clark Acked-by: Maxime Ripard Signed-off-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20231203000532.1290480-2-dmitry.baryshkov@linaro.org --- drivers/gpu/drm/drm_atomic.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index f1a503aafe5a..c31fc0b48c31 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1773,6 +1773,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, struct drm_crtc *crtc; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; + struct drm_private_obj *obj; if (!drm_drv_uses_atomic_modeset(dev)) return; @@ -1801,6 +1802,14 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, if (take_locks) drm_modeset_unlock(&dev->mode_config.connection_mutex); drm_connector_list_iter_end(&conn_iter); + + list_for_each_entry(obj, &config->privobj_list, head) { + if (take_locks) + drm_modeset_lock(&obj->lock, NULL); + drm_atomic_private_obj_print_state(p, obj->state); + if (take_locks) + drm_modeset_unlock(&obj->lock); + } } /** -- cgit v1.2.3 From 8ebb1fc2e69ab8b89a425e402c7bd85e053b7b01 Mon Sep 17 00:00:00 2001 From: Abel Vesa Date: Mon, 4 Dec 2023 10:54:25 +0200 Subject: drm/panel-edp: Add SDC ATNA45AF01 Add support for the SDC ATNA45AF01 panel. Signed-off-by: Abel Vesa Reviewed-by: Douglas Anderson Signed-off-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20231201-x1e80100-drm-panel-edp-v2-1-b0173484631a@linaro.org --- drivers/gpu/drm/panel/panel-edp.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 825fa2a0d8a5..78565c99b54d 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1983,6 +1983,8 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"), EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"), + EDP_PANEL_ENTRY('S', 'D', 'C', 0x416d, &delay_100_500_e200, "ATNA45AF01"), + EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &sharp_lq140m1jw46.delay, "LQ140M1JW46"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"), -- cgit v1.2.3 From 4900e0396e59be233cfa636369d4eec6b40dbeca Mon Sep 17 00:00:00 2001 From: Pin-yen Lin Date: Tue, 5 Dec 2023 20:35:35 +0800 Subject: drm/edp-panel: Sort the panel entries Move the order of CMN 0x14e5 to make the list sorted. Signed-off-by: Pin-yen Lin Signed-off-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20231205123630.988663-3-treapking@chromium.org --- drivers/gpu/drm/panel/panel-edp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 78565c99b54d..c76f186c4baa 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1967,9 +1967,9 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"), - EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"), EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5c, &delay_200_500_e200, "MB116AN01-2"), -- cgit v1.2.3 From 9cf5ca1f485cae406968947a92bf304603999fa1 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 22 Nov 2023 13:09:31 +0100 Subject: drm: Fix TODO list mentioning non-KMS drivers Non-KMS drivers have been removed from DRM. Update the TODO list accordingly. Signed-off-by: Thomas Zimmermann Fixes: a276afc19eec ("drm: Remove some obsolete drm pciids(tdfx, mga, i810, savage, r128, sis, via)") Cc: Cai Huoqing Cc: Daniel Vetter Cc: Dave Airlie Cc: Thomas Zimmermann Cc: Maarten Lankhorst Cc: Maxime Ripard Cc: David Airlie Cc: Daniel Vetter Cc: Jonathan Corbet Cc: dri-devel@lists.freedesktop.org Cc: # v6.3+ Cc: linux-doc@vger.kernel.org Reviewed-by: David Airlie Reviewed-by: Daniel Vetter Acked-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20231122122449.11588-3-tzimmermann@suse.de --- Documentation/gpu/todo.rst | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 503d57c75215..41a264bf84ce 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -337,8 +337,8 @@ connector register/unregister fixes Level: Intermediate -Remove load/unload callbacks from all non-DRIVER_LEGACY drivers ---------------------------------------------------------------- +Remove load/unload callbacks +---------------------------- The load/unload callbacks in struct &drm_driver are very much midlayers, plus for historical reasons they get the ordering wrong (and we can't fix that) @@ -347,8 +347,7 @@ between setting up the &drm_driver structure and calling drm_dev_register(). - Rework drivers to no longer use the load/unload callbacks, directly coding the load/unload sequence into the driver's probe function. -- Once all non-DRIVER_LEGACY drivers are converted, disallow the load/unload - callbacks for all modern drivers. +- Once all drivers are converted, remove the load/unload callbacks. Contact: Daniel Vetter -- cgit v1.2.3 From 972c45e892448f698047f312763eb984c0b8d7c3 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 22 Nov 2023 13:09:32 +0100 Subject: drm: Include One of the source files includes via , which will be removed. Include drm_auth.h directly. Signed-off-by: Thomas Zimmermann Reviewed-by: David Airlie Reviewed-by: Daniel Vetter Acked-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20231122122449.11588-4-tzimmermann@suse.de --- drivers/gpu/drm/drm_pci.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 39d35fc3a43b..67907dd42721 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -29,6 +29,7 @@ #include #include +#include #include #include #include -- cgit v1.2.3 From 786b96d01919f8876187d75a6a995ac5783ed0f5 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 22 Nov 2023 13:09:33 +0100 Subject: drm/i915: Include One of the source files includes via , which will be removed. Include drm_auth.h directly. Signed-off-by: Thomas Zimmermann Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: Tvrtko Ursulin Cc: intel-gfx@lists.freedesktop.org Reviewed-by: David Airlie Reviewed-by: Daniel Vetter Acked-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20231122122449.11588-5-tzimmermann@suse.de --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 683fd8d3151c..ccc077b74d2d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -9,6 +9,7 @@ #include #include +#include #include #include "display/intel_frontbuffer.h" -- cgit v1.2.3 From 64c39a93ef6c45447bc093e34e02afeb4c063579 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 22 Nov 2023 13:09:34 +0100 Subject: accel: Include One of the source files includes via , which will be removed. Include drm_auth.h directly. Signed-off-by: Thomas Zimmermann Cc: Oded Gabbay Reviewed-by: David Airlie Reviewed-by: Daniel Vetter Acked-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20231122122449.11588-6-tzimmermann@suse.de --- drivers/accel/drm_accel.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c index 294b572a9c33..24cac4c0274b 100644 --- a/drivers/accel/drm_accel.c +++ b/drivers/accel/drm_accel.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include -- cgit v1.2.3 From 9f4db4495b6fa551f18a892f32c71899a20f4923 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 22 Nov 2023 13:09:35 +0100 Subject: drm: Include Include in drm_ioc32.c. Resolves a depenency on , which will be removed. Signed-off-by: Thomas Zimmermann Reviewed-by: David Airlie Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20231122122449.11588-7-tzimmermann@suse.de --- drivers/gpu/drm/drm_ioc32.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 025dc558c94e..c0163cb076ad 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -31,6 +31,7 @@ #include #include +#include #include #include -- cgit v1.2.3 From c45a1e0a2e9d3f6b37d27e636ba905678c84a41a Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 22 Nov 2023 13:09:36 +0100 Subject: drm/radeon: Do not include MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Including is not required by radeon. Signed-off-by: Thomas Zimmermann Cc: Alex Deucher Cc: "Christian König" Cc: "Pan, Xinhui" Cc: amd-gfx@lists.freedesktop.org Reviewed-by: David Airlie Reviewed-by: Daniel Vetter Acked-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20231122122449.11588-8-tzimmermann@suse.de --- drivers/gpu/drm/radeon/radeon_drv.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 34a1c73d3938..02a65971d140 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -33,7 +33,6 @@ #include #include -#include #include "radeon_family.h" -- cgit v1.2.3 From 184dcdc251420929bf195f99f0b9fb6960788b6d Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 22 Nov 2023 13:09:37 +0100 Subject: drm: Remove entry points for legacy ioctls DRM drivers with user-space mode setting have been removed in Linux v6.3. [1] Now remove the ioctl entry points for these drivers. Invoking any of the ioctl ops will unconditionally return -EINVAL to user space. This has already been the behavior for kernels without CONFIG_DRM_LEGACY set. Signed-off-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/series/111602/ # [1] Reviewed-by: David Airlie Reviewed-by: Daniel Vetter Acked-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20231122122449.11588-9-tzimmermann@suse.de --- drivers/gpu/drm/drm_ioc32.c | 610 -------------------------------------------- drivers/gpu/drm/drm_ioctl.c | 57 ----- 2 files changed, 667 deletions(-) diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index c0163cb076ad..910cadf14756 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -37,7 +37,6 @@ #include "drm_crtc_internal.h" #include "drm_internal.h" -#include "drm_legacy.h" #define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t) #define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t) @@ -164,92 +163,6 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd, return -EINVAL; } -#if IS_ENABLED(CONFIG_DRM_LEGACY) -typedef struct drm_map32 { - u32 offset; /* Requested physical address (0 for SAREA) */ - u32 size; /* Requested physical size (bytes) */ - enum drm_map_type type; /* Type of memory to map */ - enum drm_map_flags flags; /* Flags */ - u32 handle; /* User-space: "Handle" to pass to mmap() */ - int mtrr; /* MTRR slot used */ -} drm_map32_t; - -static int compat_drm_getmap(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_map32_t __user *argp = (void __user *)arg; - drm_map32_t m32; - struct drm_map map; - int err; - - if (copy_from_user(&m32, argp, sizeof(m32))) - return -EFAULT; - - map.offset = m32.offset; - err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, 0); - if (err) - return err; - - m32.offset = map.offset; - m32.size = map.size; - m32.type = map.type; - m32.flags = map.flags; - m32.handle = ptr_to_compat((void __user *)map.handle); - m32.mtrr = map.mtrr; - if (copy_to_user(argp, &m32, sizeof(m32))) - return -EFAULT; - return 0; - -} - -static int compat_drm_addmap(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_map32_t __user *argp = (void __user *)arg; - drm_map32_t m32; - struct drm_map map; - int err; - - if (copy_from_user(&m32, argp, sizeof(m32))) - return -EFAULT; - - map.offset = m32.offset; - map.size = m32.size; - map.type = m32.type; - map.flags = m32.flags; - - err = drm_ioctl_kernel(file, drm_legacy_addmap_ioctl, &map, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); - if (err) - return err; - - m32.offset = map.offset; - m32.mtrr = map.mtrr; - m32.handle = ptr_to_compat((void __user *)map.handle); - if (map.handle != compat_ptr(m32.handle)) - pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n", - map.handle, m32.type, m32.offset); - - if (copy_to_user(argp, &m32, sizeof(m32))) - return -EFAULT; - - return 0; -} - -static int compat_drm_rmmap(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_map32_t __user *argp = (void __user *)arg; - struct drm_map map; - u32 handle; - - if (get_user(handle, &argp->handle)) - return -EFAULT; - map.handle = compat_ptr(handle); - return drm_ioctl_kernel(file, drm_legacy_rmmap_ioctl, &map, DRM_AUTH); -} -#endif - typedef struct drm_client32 { int idx; /* Which client desired? */ int auth; /* Is client authenticated? */ @@ -309,501 +222,6 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd, return 0; } -#if IS_ENABLED(CONFIG_DRM_LEGACY) -typedef struct drm_buf_desc32 { - int count; /* Number of buffers of this size */ - int size; /* Size in bytes */ - int low_mark; /* Low water mark */ - int high_mark; /* High water mark */ - int flags; - u32 agp_start; /* Start address in the AGP aperture */ -} drm_buf_desc32_t; - -static int compat_drm_addbufs(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_buf_desc32_t __user *argp = (void __user *)arg; - drm_buf_desc32_t desc32; - struct drm_buf_desc desc; - int err; - - if (copy_from_user(&desc32, argp, sizeof(drm_buf_desc32_t))) - return -EFAULT; - - desc = (struct drm_buf_desc){ - desc32.count, desc32.size, desc32.low_mark, desc32.high_mark, - desc32.flags, desc32.agp_start - }; - - err = drm_ioctl_kernel(file, drm_legacy_addbufs, &desc, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); - if (err) - return err; - - desc32 = (drm_buf_desc32_t){ - desc.count, desc.size, desc.low_mark, desc.high_mark, - desc.flags, desc.agp_start - }; - if (copy_to_user(argp, &desc32, sizeof(drm_buf_desc32_t))) - return -EFAULT; - - return 0; -} - -static int compat_drm_markbufs(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_buf_desc32_t b32; - drm_buf_desc32_t __user *argp = (void __user *)arg; - struct drm_buf_desc buf; - - if (copy_from_user(&b32, argp, sizeof(b32))) - return -EFAULT; - - buf.size = b32.size; - buf.low_mark = b32.low_mark; - buf.high_mark = b32.high_mark; - - return drm_ioctl_kernel(file, drm_legacy_markbufs, &buf, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} - -typedef struct drm_buf_info32 { - int count; /**< Entries in list */ - u32 list; -} drm_buf_info32_t; - -static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from) -{ - drm_buf_info32_t *request = data; - drm_buf_desc32_t __user *to = compat_ptr(request->list); - drm_buf_desc32_t v = {.count = from->buf_count, - .size = from->buf_size, - .low_mark = from->low_mark, - .high_mark = from->high_mark}; - - if (copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags))) - return -EFAULT; - return 0; -} - -static int drm_legacy_infobufs32(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - drm_buf_info32_t *request = data; - - return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf32); -} - -static int compat_drm_infobufs(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_buf_info32_t req32; - drm_buf_info32_t __user *argp = (void __user *)arg; - int err; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - if (req32.count < 0) - req32.count = 0; - - err = drm_ioctl_kernel(file, drm_legacy_infobufs32, &req32, DRM_AUTH); - if (err) - return err; - - if (put_user(req32.count, &argp->count)) - return -EFAULT; - - return 0; -} - -typedef struct drm_buf_pub32 { - int idx; /**< Index into the master buffer list */ - int total; /**< Buffer size */ - int used; /**< Amount of buffer in use (for DMA) */ - u32 address; /**< Address of buffer */ -} drm_buf_pub32_t; - -typedef struct drm_buf_map32 { - int count; /**< Length of the buffer list */ - u32 virtual; /**< Mmap'd area in user-virtual */ - u32 list; /**< Buffer information */ -} drm_buf_map32_t; - -static int map_one_buf32(void *data, int idx, unsigned long virtual, - struct drm_buf *buf) -{ - drm_buf_map32_t *request = data; - drm_buf_pub32_t __user *to = compat_ptr(request->list) + idx; - drm_buf_pub32_t v; - - v.idx = buf->idx; - v.total = buf->total; - v.used = 0; - v.address = virtual + buf->offset; - if (copy_to_user(to, &v, sizeof(v))) - return -EFAULT; - return 0; -} - -static int drm_legacy_mapbufs32(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - drm_buf_map32_t *request = data; - void __user *v; - int err = __drm_legacy_mapbufs(dev, data, &request->count, - &v, map_one_buf32, - file_priv); - request->virtual = ptr_to_compat(v); - return err; -} - -static int compat_drm_mapbufs(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_buf_map32_t __user *argp = (void __user *)arg; - drm_buf_map32_t req32; - int err; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - if (req32.count < 0) - return -EINVAL; - - err = drm_ioctl_kernel(file, drm_legacy_mapbufs32, &req32, DRM_AUTH); - if (err) - return err; - - if (put_user(req32.count, &argp->count) - || put_user(req32.virtual, &argp->virtual)) - return -EFAULT; - - return 0; -} - -typedef struct drm_buf_free32 { - int count; - u32 list; -} drm_buf_free32_t; - -static int compat_drm_freebufs(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_buf_free32_t req32; - struct drm_buf_free request; - drm_buf_free32_t __user *argp = (void __user *)arg; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - request.count = req32.count; - request.list = compat_ptr(req32.list); - return drm_ioctl_kernel(file, drm_legacy_freebufs, &request, DRM_AUTH); -} - -typedef struct drm_ctx_priv_map32 { - unsigned int ctx_id; /**< Context requesting private mapping */ - u32 handle; /**< Handle of map */ -} drm_ctx_priv_map32_t; - -static int compat_drm_setsareactx(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_ctx_priv_map32_t req32; - struct drm_ctx_priv_map request; - drm_ctx_priv_map32_t __user *argp = (void __user *)arg; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - request.ctx_id = req32.ctx_id; - request.handle = compat_ptr(req32.handle); - return drm_ioctl_kernel(file, drm_legacy_setsareactx, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} - -static int compat_drm_getsareactx(struct file *file, unsigned int cmd, - unsigned long arg) -{ - struct drm_ctx_priv_map req; - drm_ctx_priv_map32_t req32; - drm_ctx_priv_map32_t __user *argp = (void __user *)arg; - int err; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - req.ctx_id = req32.ctx_id; - err = drm_ioctl_kernel(file, drm_legacy_getsareactx, &req, DRM_AUTH); - if (err) - return err; - - req32.handle = ptr_to_compat((void __user *)req.handle); - if (copy_to_user(argp, &req32, sizeof(req32))) - return -EFAULT; - - return 0; -} - -typedef struct drm_ctx_res32 { - int count; - u32 contexts; -} drm_ctx_res32_t; - -static int compat_drm_resctx(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_ctx_res32_t __user *argp = (void __user *)arg; - drm_ctx_res32_t res32; - struct drm_ctx_res res; - int err; - - if (copy_from_user(&res32, argp, sizeof(res32))) - return -EFAULT; - - res.count = res32.count; - res.contexts = compat_ptr(res32.contexts); - err = drm_ioctl_kernel(file, drm_legacy_resctx, &res, DRM_AUTH); - if (err) - return err; - - res32.count = res.count; - if (copy_to_user(argp, &res32, sizeof(res32))) - return -EFAULT; - - return 0; -} - -typedef struct drm_dma32 { - int context; /**< Context handle */ - int send_count; /**< Number of buffers to send */ - u32 send_indices; /**< List of handles to buffers */ - u32 send_sizes; /**< Lengths of data to send */ - enum drm_dma_flags flags; /**< Flags */ - int request_count; /**< Number of buffers requested */ - int request_size; /**< Desired size for buffers */ - u32 request_indices; /**< Buffer information */ - u32 request_sizes; - int granted_count; /**< Number of buffers granted */ -} drm_dma32_t; - -static int compat_drm_dma(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_dma32_t d32; - drm_dma32_t __user *argp = (void __user *)arg; - struct drm_dma d; - int err; - - if (copy_from_user(&d32, argp, sizeof(d32))) - return -EFAULT; - - d.context = d32.context; - d.send_count = d32.send_count; - d.send_indices = compat_ptr(d32.send_indices); - d.send_sizes = compat_ptr(d32.send_sizes); - d.flags = d32.flags; - d.request_count = d32.request_count; - d.request_indices = compat_ptr(d32.request_indices); - d.request_sizes = compat_ptr(d32.request_sizes); - err = drm_ioctl_kernel(file, drm_legacy_dma_ioctl, &d, DRM_AUTH); - if (err) - return err; - - if (put_user(d.request_size, &argp->request_size) - || put_user(d.granted_count, &argp->granted_count)) - return -EFAULT; - - return 0; -} -#endif - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -#if IS_ENABLED(CONFIG_AGP) -typedef struct drm_agp_mode32 { - u32 mode; /**< AGP mode */ -} drm_agp_mode32_t; - -static int compat_drm_agp_enable(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_mode32_t __user *argp = (void __user *)arg; - struct drm_agp_mode mode; - - if (get_user(mode.mode, &argp->mode)) - return -EFAULT; - - return drm_ioctl_kernel(file, drm_legacy_agp_enable_ioctl, &mode, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} - -typedef struct drm_agp_info32 { - int agp_version_major; - int agp_version_minor; - u32 mode; - u32 aperture_base; /* physical address */ - u32 aperture_size; /* bytes */ - u32 memory_allowed; /* bytes */ - u32 memory_used; - - /* PCI information */ - unsigned short id_vendor; - unsigned short id_device; -} drm_agp_info32_t; - -static int compat_drm_agp_info(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_info32_t __user *argp = (void __user *)arg; - drm_agp_info32_t i32; - struct drm_agp_info info; - int err; - - err = drm_ioctl_kernel(file, drm_legacy_agp_info_ioctl, &info, DRM_AUTH); - if (err) - return err; - - i32.agp_version_major = info.agp_version_major; - i32.agp_version_minor = info.agp_version_minor; - i32.mode = info.mode; - i32.aperture_base = info.aperture_base; - i32.aperture_size = info.aperture_size; - i32.memory_allowed = info.memory_allowed; - i32.memory_used = info.memory_used; - i32.id_vendor = info.id_vendor; - i32.id_device = info.id_device; - if (copy_to_user(argp, &i32, sizeof(i32))) - return -EFAULT; - - return 0; -} - -typedef struct drm_agp_buffer32 { - u32 size; /**< In bytes -- will round to page boundary */ - u32 handle; /**< Used for binding / unbinding */ - u32 type; /**< Type of memory to allocate */ - u32 physical; /**< Physical used by i810 */ -} drm_agp_buffer32_t; - -static int compat_drm_agp_alloc(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_buffer32_t __user *argp = (void __user *)arg; - drm_agp_buffer32_t req32; - struct drm_agp_buffer request; - int err; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - request.size = req32.size; - request.type = req32.type; - err = drm_ioctl_kernel(file, drm_legacy_agp_alloc_ioctl, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); - if (err) - return err; - - req32.handle = request.handle; - req32.physical = request.physical; - if (copy_to_user(argp, &req32, sizeof(req32))) { - drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); - return -EFAULT; - } - - return 0; -} - -static int compat_drm_agp_free(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_buffer32_t __user *argp = (void __user *)arg; - struct drm_agp_buffer request; - - if (get_user(request.handle, &argp->handle)) - return -EFAULT; - - return drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} - -typedef struct drm_agp_binding32 { - u32 handle; /**< From drm_agp_buffer */ - u32 offset; /**< In bytes -- will round to page boundary */ -} drm_agp_binding32_t; - -static int compat_drm_agp_bind(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_binding32_t __user *argp = (void __user *)arg; - drm_agp_binding32_t req32; - struct drm_agp_binding request; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - request.handle = req32.handle; - request.offset = req32.offset; - return drm_ioctl_kernel(file, drm_legacy_agp_bind_ioctl, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} - -static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_binding32_t __user *argp = (void __user *)arg; - struct drm_agp_binding request; - - if (get_user(request.handle, &argp->handle)) - return -EFAULT; - - return drm_ioctl_kernel(file, drm_legacy_agp_unbind_ioctl, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} -#endif /* CONFIG_AGP */ - -typedef struct drm_scatter_gather32 { - u32 size; /**< In bytes -- will round to page boundary */ - u32 handle; /**< Used for mapping / unmapping */ -} drm_scatter_gather32_t; - -static int compat_drm_sg_alloc(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_scatter_gather32_t __user *argp = (void __user *)arg; - struct drm_scatter_gather request; - int err; - - if (get_user(request.size, &argp->size)) - return -EFAULT; - - err = drm_ioctl_kernel(file, drm_legacy_sg_alloc, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); - if (err) - return err; - - /* XXX not sure about the handle conversion here... */ - if (put_user(request.handle >> PAGE_SHIFT, &argp->handle)) - return -EFAULT; - - return 0; -} - -static int compat_drm_sg_free(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_scatter_gather32_t __user *argp = (void __user *)arg; - struct drm_scatter_gather request; - unsigned long x; - - if (get_user(x, &argp->handle)) - return -EFAULT; - request.handle = x << PAGE_SHIFT; - return drm_ioctl_kernel(file, drm_legacy_sg_free, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} -#endif #if defined(CONFIG_X86) typedef struct drm_update_draw32 { drm_drawable_t handle; @@ -915,37 +333,9 @@ static struct { #define DRM_IOCTL32_DEF(n, f) [DRM_IOCTL_NR(n##32)] = {.fn = f, .name = #n} DRM_IOCTL32_DEF(DRM_IOCTL_VERSION, compat_drm_version), DRM_IOCTL32_DEF(DRM_IOCTL_GET_UNIQUE, compat_drm_getunique), -#if IS_ENABLED(CONFIG_DRM_LEGACY) - DRM_IOCTL32_DEF(DRM_IOCTL_GET_MAP, compat_drm_getmap), -#endif DRM_IOCTL32_DEF(DRM_IOCTL_GET_CLIENT, compat_drm_getclient), DRM_IOCTL32_DEF(DRM_IOCTL_GET_STATS, compat_drm_getstats), DRM_IOCTL32_DEF(DRM_IOCTL_SET_UNIQUE, compat_drm_setunique), -#if IS_ENABLED(CONFIG_DRM_LEGACY) - DRM_IOCTL32_DEF(DRM_IOCTL_ADD_MAP, compat_drm_addmap), - DRM_IOCTL32_DEF(DRM_IOCTL_ADD_BUFS, compat_drm_addbufs), - DRM_IOCTL32_DEF(DRM_IOCTL_MARK_BUFS, compat_drm_markbufs), - DRM_IOCTL32_DEF(DRM_IOCTL_INFO_BUFS, compat_drm_infobufs), - DRM_IOCTL32_DEF(DRM_IOCTL_MAP_BUFS, compat_drm_mapbufs), - DRM_IOCTL32_DEF(DRM_IOCTL_FREE_BUFS, compat_drm_freebufs), - DRM_IOCTL32_DEF(DRM_IOCTL_RM_MAP, compat_drm_rmmap), - DRM_IOCTL32_DEF(DRM_IOCTL_SET_SAREA_CTX, compat_drm_setsareactx), - DRM_IOCTL32_DEF(DRM_IOCTL_GET_SAREA_CTX, compat_drm_getsareactx), - DRM_IOCTL32_DEF(DRM_IOCTL_RES_CTX, compat_drm_resctx), - DRM_IOCTL32_DEF(DRM_IOCTL_DMA, compat_drm_dma), -#if IS_ENABLED(CONFIG_AGP) - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ENABLE, compat_drm_agp_enable), - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_INFO, compat_drm_agp_info), - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ALLOC, compat_drm_agp_alloc), - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_FREE, compat_drm_agp_free), - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_BIND, compat_drm_agp_bind), - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_UNBIND, compat_drm_agp_unbind), -#endif -#endif -#if IS_ENABLED(CONFIG_DRM_LEGACY) - DRM_IOCTL32_DEF(DRM_IOCTL_SG_ALLOC, compat_drm_sg_alloc), - DRM_IOCTL32_DEF(DRM_IOCTL_SG_FREE, compat_drm_sg_free), -#endif #if defined(CONFIG_X86) DRM_IOCTL32_DEF(DRM_IOCTL_UPDATE_DRAW, compat_drm_update_draw), #endif diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index ea303386e688..93f6bf15c0ae 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -42,7 +42,6 @@ #include "drm_crtc_internal.h" #include "drm_internal.h" -#include "drm_legacy.h" /** * DOC: getunique and setversion story @@ -572,21 +571,11 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) .name = #ioctl \ } -#if IS_ENABLED(CONFIG_DRM_LEGACY) -#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, _func, _flags) -#else -#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, drm_invalid_op, _flags) -#endif - /* Ioctl table */ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_legacy_irq_by_busid, - DRM_MASTER|DRM_ROOT_ONLY), - - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), @@ -599,60 +588,14 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_MASTER), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH), - - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, 0), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - -#if IS_ENABLED(CONFIG_AGP) - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_legacy_agp_acquire_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_legacy_agp_release_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_legacy_agp_enable_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_legacy_agp_info_ioctl, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_legacy_agp_alloc_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_legacy_agp_free_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_legacy_agp_bind_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_legacy_agp_unbind_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), -#endif - - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl_ioctl, 0), -- cgit v1.2.3 From 6bb0814be42e109555dd63e59e6eabf968b9b016 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 22 Nov 2023 13:09:38 +0100 Subject: drm: Remove the legacy DRM_IOCTL_MODESET_CTL ioctl DRM drivers with user-space mode setting have been removed in Linux v6.3. [1] Now remove the ioctl entry points for these drivers. Invoking any of the ioctl ops will unconditionally return -EINVAL to user space. Invoking DRM_IOCTL_MODESET_CTL is different from the other legacy ioctl ops as it returns 0 even without CONFIG_DRM_LEGACY set. From the original commit 29935554b384 ("drm: Disallow DRM_IOCTL_MODESET_CTL for KMS drivers") it is not apparent how or why the operation differs from the others. It is likely just an oversight in commit 61ae227032e7 ("drm: allow removal of legacy codepaths (v4.1)"), which allowed disabling leagacy ioctls in the first place. Still keep this removal separate from the other ioctls to allow an easy revert, if necessary. Signed-off-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/series/111602/ # [1] Reviewed-by: David Airlie Reviewed-by: Daniel Vetter Acked-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20231122122449.11588-10-tzimmermann@suse.de --- drivers/gpu/drm/drm_internal.h | 2 -- drivers/gpu/drm/drm_ioctl.c | 2 -- drivers/gpu/drm/drm_vblank.c | 82 ------------------------------------------ 3 files changed, 86 deletions(-) diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index b7a311efa2b1..fa08a313127e 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -117,8 +117,6 @@ void drm_handle_vblank_works(struct drm_vblank_crtc *vblank); /* IOCTLS */ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); -int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); /* drm_irq.c */ diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 93f6bf15c0ae..ebba34ba734e 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -598,8 +598,6 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_RENDER_ALLOW), diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 877e2067534f..a11f164b2384 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -1574,88 +1574,6 @@ void drm_crtc_vblank_restore(struct drm_crtc *crtc) } EXPORT_SYMBOL(drm_crtc_vblank_restore); -static void drm_legacy_vblank_pre_modeset(struct drm_device *dev, - unsigned int pipe) -{ - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - - /* vblank is not initialized (IRQ not installed ?), or has been freed */ - if (!drm_dev_has_vblank(dev)) - return; - - if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) - return; - - /* - * To avoid all the problems that might happen if interrupts - * were enabled/disabled around or between these calls, we just - * have the kernel take a reference on the CRTC (just once though - * to avoid corrupting the count if multiple, mismatch calls occur), - * so that interrupts remain enabled in the interim. - */ - if (!vblank->inmodeset) { - vblank->inmodeset = 0x1; - if (drm_vblank_get(dev, pipe) == 0) - vblank->inmodeset |= 0x2; - } -} - -static void drm_legacy_vblank_post_modeset(struct drm_device *dev, - unsigned int pipe) -{ - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - - /* vblank is not initialized (IRQ not installed ?), or has been freed */ - if (!drm_dev_has_vblank(dev)) - return; - - if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) - return; - - if (vblank->inmodeset) { - spin_lock_irq(&dev->vbl_lock); - drm_reset_vblank_timestamp(dev, pipe); - spin_unlock_irq(&dev->vbl_lock); - - if (vblank->inmodeset & 0x2) - drm_vblank_put(dev, pipe); - - vblank->inmodeset = 0; - } -} - -int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_modeset_ctl *modeset = data; - unsigned int pipe; - - /* If drm_vblank_init() hasn't been called yet, just no-op */ - if (!drm_dev_has_vblank(dev)) - return 0; - - /* KMS drivers handle this internally */ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return 0; - - pipe = modeset->crtc; - if (pipe >= dev->num_crtcs) - return -EINVAL; - - switch (modeset->cmd) { - case _DRM_PRE_MODESET: - drm_legacy_vblank_pre_modeset(dev, pipe); - break; - case _DRM_POST_MODESET: - drm_legacy_vblank_post_modeset(dev, pipe); - break; - default: - return -EINVAL; - } - - return 0; -} - static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, u64 req_seq, union drm_wait_vblank *vblwait, -- cgit v1.2.3 From 2722ac1ce1c1f3e6a3a0c59f0072b2f9ba136551 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 22 Nov 2023 13:09:39 +0100 Subject: drm: Remove support for legacy drivers Remove all hooks and calls into code for user-space mode setting from the DRM core. Without the drivers and ioctl entry points, none of this is required any longer. Signed-off-by: Thomas Zimmermann Reviewed-by: David Airlie Reviewed-by: Daniel Vetter Acked-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20231122122449.11588-11-tzimmermann@suse.de --- drivers/gpu/drm/drm_auth.c | 8 +----- drivers/gpu/drm/drm_drv.c | 17 ------------ drivers/gpu/drm/drm_file.c | 64 ++------------------------------------------ drivers/gpu/drm/drm_vblank.c | 19 ------------- 4 files changed, 3 insertions(+), 105 deletions(-) diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index 2ed2585ded37..252c105d614f 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -37,13 +37,12 @@ #include #include "drm_internal.h" -#include "drm_legacy.h" /** * DOC: master and authentication * * &struct drm_master is used to track groups of clients with open - * primary/legacy device nodes. For every &struct drm_file which has had at + * primary device nodes. For every &struct drm_file which has had at * least once successfully became the device master (either through the * SET_MASTER IOCTL, or implicitly through opening the primary device node when * no one else is the current master that time) there exists one &drm_master. @@ -139,7 +138,6 @@ struct drm_master *drm_master_create(struct drm_device *dev) return NULL; kref_init(&master->refcount); - drm_master_legacy_init(master); idr_init_base(&master->magic_map, 1); master->dev = dev; @@ -365,8 +363,6 @@ void drm_master_release(struct drm_file *file_priv) if (!drm_is_current_master_locked(file_priv)) goto out; - drm_legacy_lock_master_cleanup(dev, master); - if (dev->master == file_priv->master) drm_drop_master(dev, file_priv); out: @@ -429,8 +425,6 @@ static void drm_master_destroy(struct kref *kref) if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_lease_destroy(master); - drm_legacy_master_rmmaps(dev, master); - idr_destroy(&master->magic_map); idr_destroy(&master->leases); idr_destroy(&master->lessee_idr); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 3c835c99daad..243cacb3575c 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -48,7 +48,6 @@ #include "drm_crtc_internal.h" #include "drm_internal.h" -#include "drm_legacy.h" MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"); MODULE_DESCRIPTION("DRM shared core routines"); @@ -585,8 +584,6 @@ static void drm_fs_inode_free(struct inode *inode) static void drm_dev_init_release(struct drm_device *dev, void *res) { - drm_legacy_ctxbitmap_cleanup(dev); - drm_legacy_remove_map_hash(dev); drm_fs_inode_free(dev->anon_inode); put_device(dev->dev); @@ -597,7 +594,6 @@ static void drm_dev_init_release(struct drm_device *dev, void *res) mutex_destroy(&dev->clientlist_mutex); mutex_destroy(&dev->filelist_mutex); mutex_destroy(&dev->struct_mutex); - drm_legacy_destroy_members(dev); } static int drm_dev_init(struct drm_device *dev, @@ -632,7 +628,6 @@ static int drm_dev_init(struct drm_device *dev, return -EINVAL; } - drm_legacy_init_members(dev); INIT_LIST_HEAD(&dev->filelist); INIT_LIST_HEAD(&dev->filelist_internal); INIT_LIST_HEAD(&dev->clientlist); @@ -673,12 +668,6 @@ static int drm_dev_init(struct drm_device *dev, goto err; } - ret = drm_legacy_create_map_hash(dev); - if (ret) - goto err; - - drm_legacy_ctxbitmap_init(dev); - if (drm_core_check_feature(dev, DRIVER_GEM)) { ret = drm_gem_init(dev); if (ret) { @@ -996,9 +985,6 @@ EXPORT_SYMBOL(drm_dev_register); */ void drm_dev_unregister(struct drm_device *dev) { - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - drm_lastclose(dev); - dev->registered = false; drm_client_dev_unregister(dev); @@ -1009,9 +995,6 @@ void drm_dev_unregister(struct drm_device *dev) if (dev->driver->unload) dev->driver->unload(dev); - drm_legacy_pci_agp_destroy(dev); - drm_legacy_rmmaps(dev); - remove_compat_control_link(dev); drm_minor_unregister(dev, DRM_MINOR_ACCEL); drm_minor_unregister(dev, DRM_MINOR_PRIMARY); diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 5ddaffd32586..987c1a5d1773 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -47,21 +47,12 @@ #include "drm_crtc_internal.h" #include "drm_internal.h" -#include "drm_legacy.h" /* from BKL pushdown */ DEFINE_MUTEX(drm_global_mutex); bool drm_dev_needs_global_mutex(struct drm_device *dev) { - /* - * Legacy drivers rely on all kinds of BKL locking semantics, don't - * bother. They also still need BKL locking for their ioctls, so better - * safe than sorry. - */ - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - return true; - /* * The deprecated ->load callback must be called after the driver is * already registered. This means such drivers rely on the BKL to make @@ -107,9 +98,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev) * drm_send_event() as the main starting points. * * The memory mapping implementation will vary depending on how the driver - * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap() - * function, modern drivers should use one of the provided memory-manager - * specific implementations. For GEM-based drivers this is drm_gem_mmap(). + * manages memory. For GEM-based drivers this is drm_gem_mmap(). * * No other file operations are supported by the DRM userspace API. Overall the * following is an example &file_operations structure:: @@ -254,18 +243,6 @@ void drm_file_free(struct drm_file *file) (long)old_encode_dev(file->minor->kdev->devt), atomic_read(&dev->open_count)); -#ifdef CONFIG_DRM_LEGACY - if (drm_core_check_feature(dev, DRIVER_LEGACY) && - dev->driver->preclose) - dev->driver->preclose(dev, file); -#endif - - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - drm_legacy_lock_release(dev, file->filp); - - if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - drm_legacy_reclaim_buffers(dev, file); - drm_events_release(file); if (drm_core_check_feature(dev, DRIVER_MODESET)) { @@ -279,8 +256,6 @@ void drm_file_free(struct drm_file *file) if (drm_core_check_feature(dev, DRIVER_GEM)) drm_gem_release(dev, file); - drm_legacy_ctxbitmap_flush(dev, file); - if (drm_is_primary_client(file)) drm_master_release(file); @@ -367,29 +342,6 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor) list_add(&priv->lhead, &dev->filelist); mutex_unlock(&dev->filelist_mutex); -#ifdef CONFIG_DRM_LEGACY -#ifdef __alpha__ - /* - * Default the hose - */ - if (!dev->hose) { - struct pci_dev *pci_dev; - - pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); - if (pci_dev) { - dev->hose = pci_dev->sysdata; - pci_dev_put(pci_dev); - } - if (!dev->hose) { - struct pci_bus *b = list_entry(pci_root_buses.next, - struct pci_bus, node); - if (b) - dev->hose = b->sysdata; - } - } -#endif -#endif - return 0; } @@ -411,7 +363,6 @@ int drm_open(struct inode *inode, struct file *filp) struct drm_device *dev; struct drm_minor *minor; int retcode; - int need_setup = 0; minor = drm_minor_acquire(iminor(inode)); if (IS_ERR(minor)) @@ -421,8 +372,7 @@ int drm_open(struct inode *inode, struct file *filp) if (drm_dev_needs_global_mutex(dev)) mutex_lock(&drm_global_mutex); - if (!atomic_fetch_inc(&dev->open_count)) - need_setup = 1; + atomic_fetch_inc(&dev->open_count); /* share address_space across all char-devs of a single device */ filp->f_mapping = dev->anon_inode->i_mapping; @@ -430,13 +380,6 @@ int drm_open(struct inode *inode, struct file *filp) retcode = drm_open_helper(filp, minor); if (retcode) goto err_undo; - if (need_setup) { - retcode = drm_legacy_setup(dev); - if (retcode) { - drm_close_helper(filp); - goto err_undo; - } - } if (drm_dev_needs_global_mutex(dev)) mutex_unlock(&drm_global_mutex); @@ -460,9 +403,6 @@ void drm_lastclose(struct drm_device * dev) dev->driver->lastclose(dev); drm_dbg_core(dev, "driver lastclose completed\n"); - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - drm_legacy_dev_reinit(dev); - drm_client_dev_restore(dev); } diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index a11f164b2384..702a12bc93bd 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -210,11 +210,6 @@ static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe) if (crtc->funcs->get_vblank_counter) return crtc->funcs->get_vblank_counter(crtc); } -#ifdef CONFIG_DRM_LEGACY - else if (dev->driver->get_vblank_counter) { - return dev->driver->get_vblank_counter(dev, pipe); - } -#endif return drm_vblank_no_hw_counter(dev, pipe); } @@ -433,11 +428,6 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe) if (crtc->funcs->disable_vblank) crtc->funcs->disable_vblank(crtc); } -#ifdef CONFIG_DRM_LEGACY - else { - dev->driver->disable_vblank(dev, pipe); - } -#endif } /* @@ -1151,11 +1141,6 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe) if (crtc->funcs->enable_vblank) return crtc->funcs->enable_vblank(crtc); } -#ifdef CONFIG_DRM_LEGACY - else if (dev->driver->enable_vblank) { - return dev->driver->enable_vblank(dev, pipe); - } -#endif return -EINVAL; } @@ -1698,10 +1683,6 @@ static void drm_wait_vblank_reply(struct drm_device *dev, unsigned int pipe, static bool drm_wait_vblank_supported(struct drm_device *dev) { -#if IS_ENABLED(CONFIG_DRM_LEGACY) - if (unlikely(drm_core_check_feature(dev, DRIVER_LEGACY))) - return dev->irq_enabled; -#endif return drm_dev_has_vblank(dev); } -- cgit v1.2.3 From 2798ffcc1d6a788b5769b1fbcf0750dfc06ae98a Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 22 Nov 2023 13:09:40 +0100 Subject: drm: Remove locking for legacy ioctls and DRM_UNLOCKED Modern DRM drivers acquire ioctl locks by themselves. Legacy ioctls for user-space mode setting used to acquire drm_global_mutex. After removing the ioctl entry points, also remove the locking code. The only legacy ioctl without global locking was VBLANK_WAIT, which has been removed as well. Hence remove the related DRM_UNLOCKED flag. Signed-off-by: Thomas Zimmermann Reviewed-by: David Airlie Reviewed-by: Daniel Vetter Acked-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20231122122449.11588-12-tzimmermann@suse.de --- drivers/gpu/drm/drm_ioc32.c | 2 +- drivers/gpu/drm/drm_ioctl.c | 23 +++++++---------------- include/drm/drm_ioctl.h | 11 ----------- 3 files changed, 8 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 910cadf14756..129e2b91dbfe 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -273,7 +273,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, req.request.type = req32.request.type; req.request.sequence = req32.request.sequence; req.request.signal = req32.request.signal; - err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED); + err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, 0); req32.reply.type = req.reply.type; req32.reply.sequence = req.reply.sequence; diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index ebba34ba734e..e368fc084c77 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -596,7 +596,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), @@ -729,7 +729,7 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata, { struct drm_file *file_priv = file->private_data; struct drm_device *dev = file_priv->minor->dev; - int retcode; + int ret; /* Update drm_file owner if fd was passed along. */ drm_file_update_pid(file_priv); @@ -737,20 +737,11 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata, if (drm_dev_is_unplugged(dev)) return -ENODEV; - retcode = drm_ioctl_permit(flags, file_priv); - if (unlikely(retcode)) - return retcode; - - /* Enforce sane locking for modern driver ioctls. */ - if (likely(!drm_core_check_feature(dev, DRIVER_LEGACY)) || - (flags & DRM_UNLOCKED)) - retcode = func(dev, kdata, file_priv); - else { - mutex_lock(&drm_global_mutex); - retcode = func(dev, kdata, file_priv); - mutex_unlock(&drm_global_mutex); - } - return retcode; + ret = drm_ioctl_permit(flags, file_priv); + if (unlikely(ret)) + return ret; + + return func(dev, kdata, file_priv); } EXPORT_SYMBOL(drm_ioctl_kernel); diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h index 6ed61c371f6c..171760b6c4a1 100644 --- a/include/drm/drm_ioctl.h +++ b/include/drm/drm_ioctl.h @@ -109,17 +109,6 @@ enum drm_ioctl_flags { * This is equivalent to callers with the SYSADMIN capability. */ DRM_ROOT_ONLY = BIT(2), - /** - * @DRM_UNLOCKED: - * - * Whether &drm_ioctl_desc.func should be called with the DRM BKL held - * or not. Enforced as the default for all modern drivers, hence there - * should never be a need to set this flag. - * - * Do not use anywhere else than for the VBLANK_WAIT IOCTL, which is the - * only legacy IOCTL which needs this. - */ - DRM_UNLOCKED = BIT(4), /** * @DRM_RENDER_ALLOW: * -- cgit v1.2.3 From 2504c7ec728b7a2b6ca067e2a908fd1af2aad57c Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 22 Nov 2023 13:09:41 +0100 Subject: drm: Remove source code for non-KMS drivers Remove all remaining source code for non-KMS drivers. These drivers have been removed in v6.3 and won't comeback. Signed-off-by: Thomas Zimmermann Reviewed-by: David Airlie Reviewed-by: Daniel Vetter Acked-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20231122122449.11588-13-tzimmermann@suse.de --- drivers/gpu/drm/Makefile | 12 - drivers/gpu/drm/drm_agpsupport.c | 451 ---------- drivers/gpu/drm/drm_bufs.c | 1627 ------------------------------------- drivers/gpu/drm/drm_context.c | 513 ------------ drivers/gpu/drm/drm_dma.c | 178 ---- drivers/gpu/drm/drm_hashtab.c | 203 ----- drivers/gpu/drm/drm_internal.h | 5 - drivers/gpu/drm/drm_irq.c | 204 ----- drivers/gpu/drm/drm_legacy.h | 290 ------- drivers/gpu/drm/drm_legacy_misc.c | 105 --- drivers/gpu/drm/drm_lock.c | 373 --------- drivers/gpu/drm/drm_memory.c | 138 ---- drivers/gpu/drm/drm_pci.c | 203 ----- drivers/gpu/drm/drm_scatter.c | 220 ----- drivers/gpu/drm/drm_vm.c | 665 --------------- include/drm/drm_auth.h | 22 - include/drm/drm_device.h | 71 +- include/drm/drm_drv.h | 19 - include/drm/drm_file.h | 5 - include/drm/drm_legacy.h | 331 -------- 20 files changed, 2 insertions(+), 5633 deletions(-) delete mode 100644 drivers/gpu/drm/drm_agpsupport.c delete mode 100644 drivers/gpu/drm/drm_bufs.c delete mode 100644 drivers/gpu/drm/drm_context.c delete mode 100644 drivers/gpu/drm/drm_dma.c delete mode 100644 drivers/gpu/drm/drm_hashtab.c delete mode 100644 drivers/gpu/drm/drm_irq.c delete mode 100644 drivers/gpu/drm/drm_legacy.h delete mode 100644 drivers/gpu/drm/drm_legacy_misc.c delete mode 100644 drivers/gpu/drm/drm_lock.c delete mode 100644 drivers/gpu/drm/drm_memory.c delete mode 100644 drivers/gpu/drm/drm_scatter.c delete mode 100644 drivers/gpu/drm/drm_vm.c delete mode 100644 include/drm/drm_legacy.h diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index b4cb0835620a..8ac6f4b9546e 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -47,18 +47,6 @@ drm-y := \ drm_vblank_work.o \ drm_vma_manager.o \ drm_writeback.o -drm-$(CONFIG_DRM_LEGACY) += \ - drm_agpsupport.o \ - drm_bufs.o \ - drm_context.o \ - drm_dma.o \ - drm_hashtab.o \ - drm_irq.o \ - drm_legacy_misc.o \ - drm_lock.o \ - drm_memory.o \ - drm_scatter.o \ - drm_vm.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_PANEL) += drm_panel.o diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c deleted file mode 100644 index a4ad6fd13abc..000000000000 --- a/drivers/gpu/drm/drm_agpsupport.c +++ /dev/null @@ -1,451 +0,0 @@ -/* - * \file drm_agpsupport.c - * DRM support for AGP/GART backend - * - * \author Rickard E. (Rik) Faith - * \author Gareth Hughes - */ - -/* - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include -#include - -#if IS_ENABLED(CONFIG_AGP) -#include -#endif - -#include -#include -#include -#include - -#include "drm_legacy.h" - -#if IS_ENABLED(CONFIG_AGP) - -/* - * Get AGP information. - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device has been initialized and acquired and fills in the - * drm_agp_info structure with the information in drm_agp_head::agp_info. - */ -int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info) -{ - struct agp_kern_info *kern; - - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - - kern = &dev->agp->agp_info; - info->agp_version_major = kern->version.major; - info->agp_version_minor = kern->version.minor; - info->mode = kern->mode; - info->aperture_base = kern->aper_base; - info->aperture_size = kern->aper_size * 1024 * 1024; - info->memory_allowed = kern->max_memory << PAGE_SHIFT; - info->memory_used = kern->current_memory << PAGE_SHIFT; - info->id_vendor = kern->device->vendor; - info->id_device = kern->device->device; - - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_info); - -int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_info *info = data; - int err; - - err = drm_legacy_agp_info(dev, info); - if (err) - return err; - - return 0; -} - -/* - * Acquire the AGP device. - * - * \param dev DRM device that is to acquire AGP. - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device hasn't been acquired before and calls - * \c agp_backend_acquire. - */ -int drm_legacy_agp_acquire(struct drm_device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev->dev); - - if (!dev->agp) - return -ENODEV; - if (dev->agp->acquired) - return -EBUSY; - dev->agp->bridge = agp_backend_acquire(pdev); - if (!dev->agp->bridge) - return -ENODEV; - dev->agp->acquired = 1; - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_acquire); - -/* - * Acquire the AGP device (ioctl). - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device hasn't been acquired before and calls - * \c agp_backend_acquire. - */ -int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return drm_legacy_agp_acquire((struct drm_device *)file_priv->minor->dev); -} - -/* - * Release the AGP device. - * - * \param dev DRM device that is to release AGP. - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device has been acquired and calls \c agp_backend_release. - */ -int drm_legacy_agp_release(struct drm_device *dev) -{ - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - agp_backend_release(dev->agp->bridge); - dev->agp->acquired = 0; - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_release); - -int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return drm_legacy_agp_release(dev); -} - -/* - * Enable the AGP bus. - * - * \param dev DRM device that has previously acquired AGP. - * \param mode Requested AGP mode. - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device has been acquired but not enabled, and calls - * \c agp_enable. - */ -int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode) -{ - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - - dev->agp->mode = mode.mode; - agp_enable(dev->agp->bridge, mode.mode); - dev->agp->enabled = 1; - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_enable); - -int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_mode *mode = data; - - return drm_legacy_agp_enable(dev, *mode); -} - -/* - * Allocate AGP memory. - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device is present and has been acquired, allocates the - * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it. - */ -int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) -{ - struct drm_agp_mem *entry; - struct agp_memory *memory; - unsigned long pages; - u32 type; - - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - pages = DIV_ROUND_UP(request->size, PAGE_SIZE); - type = (u32) request->type; - memory = agp_allocate_memory(dev->agp->bridge, pages, type); - if (!memory) { - kfree(entry); - return -ENOMEM; - } - - entry->handle = (unsigned long)memory->key + 1; - entry->memory = memory; - entry->bound = 0; - entry->pages = pages; - list_add(&entry->head, &dev->agp->memory); - - request->handle = entry->handle; - request->physical = memory->physical; - - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_alloc); - - -int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_buffer *request = data; - - return drm_legacy_agp_alloc(dev, request); -} - -/* - * Search for the AGP memory entry associated with a handle. - * - * \param dev DRM device structure. - * \param handle AGP memory handle. - * \return pointer to the drm_agp_mem structure associated with \p handle. - * - * Walks through drm_agp_head::memory until finding a matching handle. - */ -static struct drm_agp_mem *drm_legacy_agp_lookup_entry(struct drm_device *dev, - unsigned long handle) -{ - struct drm_agp_mem *entry; - - list_for_each_entry(entry, &dev->agp->memory, head) { - if (entry->handle == handle) - return entry; - } - return NULL; -} - -/* - * Unbind AGP memory from the GATT (ioctl). - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device is present and acquired, looks-up the AGP memory - * entry and passes it to the unbind_agp() function. - */ -int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) -{ - struct drm_agp_mem *entry; - int ret; - - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - entry = drm_legacy_agp_lookup_entry(dev, request->handle); - if (!entry || !entry->bound) - return -EINVAL; - ret = agp_unbind_memory(entry->memory); - if (ret == 0) - entry->bound = 0; - return ret; -} -EXPORT_SYMBOL(drm_legacy_agp_unbind); - - -int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_binding *request = data; - - return drm_legacy_agp_unbind(dev, request); -} - -/* - * Bind AGP memory into the GATT (ioctl) - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device is present and has been acquired and that no memory - * is currently bound into the GATT. Looks-up the AGP memory entry and passes - * it to bind_agp() function. - */ -int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) -{ - struct drm_agp_mem *entry; - int retcode; - int page; - - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - entry = drm_legacy_agp_lookup_entry(dev, request->handle); - if (!entry || entry->bound) - return -EINVAL; - page = DIV_ROUND_UP(request->offset, PAGE_SIZE); - retcode = agp_bind_memory(entry->memory, page); - if (retcode) - return retcode; - entry->bound = dev->agp->base + (page << PAGE_SHIFT); - DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n", - dev->agp->base, entry->bound); - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_bind); - - -int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_binding *request = data; - - return drm_legacy_agp_bind(dev, request); -} - -/* - * Free AGP memory (ioctl). - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device is present and has been acquired and looks up the - * AGP memory entry. If the memory is currently bound, unbind it via - * unbind_agp(). Frees it via free_agp() as well as the entry itself - * and unlinks from the doubly linked list it's inserted in. - */ -int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request) -{ - struct drm_agp_mem *entry; - - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - entry = drm_legacy_agp_lookup_entry(dev, request->handle); - if (!entry) - return -EINVAL; - if (entry->bound) - agp_unbind_memory(entry->memory); - - list_del(&entry->head); - - agp_free_memory(entry->memory); - kfree(entry); - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_free); - - -int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_buffer *request = data; - - return drm_legacy_agp_free(dev, request); -} - -/* - * Initialize the AGP resources. - * - * \return pointer to a drm_agp_head structure. - * - * Gets the drm_agp_t structure which is made available by the agpgart module - * via the inter_module_* functions. Creates and initializes a drm_agp_head - * structure. - * - * Note that final cleanup of the kmalloced structure is directly done in - * drm_pci_agp_destroy. - */ -struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev->dev); - struct drm_agp_head *head = NULL; - - head = kzalloc(sizeof(*head), GFP_KERNEL); - if (!head) - return NULL; - head->bridge = agp_find_bridge(pdev); - if (!head->bridge) { - head->bridge = agp_backend_acquire(pdev); - if (!head->bridge) { - kfree(head); - return NULL; - } - agp_copy_info(head->bridge, &head->agp_info); - agp_backend_release(head->bridge); - } else { - agp_copy_info(head->bridge, &head->agp_info); - } - if (head->agp_info.chipset == NOT_SUPPORTED) { - kfree(head); - return NULL; - } - INIT_LIST_HEAD(&head->memory); - head->cant_use_aperture = head->agp_info.cant_use_aperture; - head->page_mask = head->agp_info.page_mask; - head->base = head->agp_info.aper_base; - return head; -} -/* Only exported for i810.ko */ -EXPORT_SYMBOL(drm_legacy_agp_init); - -/** - * drm_legacy_agp_clear - Clear AGP resource list - * @dev: DRM device - * - * Iterate over all AGP resources and remove them. But keep the AGP head - * intact so it can still be used. It is safe to call this if AGP is disabled or - * was already removed. - * - * Cleanup is only done for drivers who have DRIVER_LEGACY set. - */ -void drm_legacy_agp_clear(struct drm_device *dev) -{ - struct drm_agp_mem *entry, *tempe; - - if (!dev->agp) - return; - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { - if (entry->bound) - agp_unbind_memory(entry->memory); - agp_free_memory(entry->memory); - kfree(entry); - } - INIT_LIST_HEAD(&dev->agp->memory); - - if (dev->agp->acquired) - drm_legacy_agp_release(dev); - - dev->agp->acquired = 0; - dev->agp->enabled = 0; -} - -#endif diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c deleted file mode 100644 index 86700560fea2..000000000000 --- a/drivers/gpu/drm/drm_bufs.c +++ /dev/null @@ -1,1627 +0,0 @@ -/* - * Legacy: Generic DRM Buffer Management - * - * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Author: Rickard E. (Rik) Faith - * Author: Gareth Hughes - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include - -#include "drm_legacy.h" - - -static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, - struct drm_local_map *map) -{ - struct drm_map_list *entry; - - list_for_each_entry(entry, &dev->maplist, head) { - /* - * Because the kernel-userspace ABI is fixed at a 32-bit offset - * while PCI resources may live above that, we only compare the - * lower 32 bits of the map offset for maps of type - * _DRM_FRAMEBUFFER or _DRM_REGISTERS. - * It is assumed that if a driver have more than one resource - * of each type, the lower 32 bits are different. - */ - if (!entry->map || - map->type != entry->map->type || - entry->master != dev->master) - continue; - switch (map->type) { - case _DRM_SHM: - if (map->flags != _DRM_CONTAINS_LOCK) - break; - return entry; - case _DRM_REGISTERS: - case _DRM_FRAME_BUFFER: - if ((entry->map->offset & 0xffffffff) == - (map->offset & 0xffffffff)) - return entry; - break; - default: /* Make gcc happy */ - break; - } - if (entry->map->offset == map->offset) - return entry; - } - - return NULL; -} - -static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, - unsigned long user_token, int hashed_handle, int shm) -{ - int use_hashed_handle, shift; - unsigned long add; - -#if (BITS_PER_LONG == 64) - use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle); -#elif (BITS_PER_LONG == 32) - use_hashed_handle = hashed_handle; -#else -#error Unsupported long size. Neither 64 nor 32 bits. -#endif - - if (!use_hashed_handle) { - int ret; - - hash->key = user_token >> PAGE_SHIFT; - ret = drm_ht_insert_item(&dev->map_hash, hash); - if (ret != -EINVAL) - return ret; - } - - shift = 0; - add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT; - if (shm && (SHMLBA > PAGE_SIZE)) { - int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1; - - /* For shared memory, we have to preserve the SHMLBA - * bits of the eventual vma->vm_pgoff value during - * mmap(). Otherwise we run into cache aliasing problems - * on some platforms. On these platforms, the pgoff of - * a mmap() request is used to pick a suitable virtual - * address for the mmap() region such that it will not - * cause cache aliasing problems. - * - * Therefore, make sure the SHMLBA relevant bits of the - * hash value we use are equal to those in the original - * kernel virtual address. - */ - shift = bits; - add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL)); - } - - return drm_ht_just_insert_please(&dev->map_hash, hash, - user_token, 32 - PAGE_SHIFT - 3, - shift, add); -} - -/* - * Core function to create a range of memory available for mapping by a - * non-root process. - * - * Adjusts the memory offset to its absolute value according to the mapping - * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where - * applicable and if supported by the kernel. - */ -static int drm_addmap_core(struct drm_device *dev, resource_size_t offset, - unsigned int size, enum drm_map_type type, - enum drm_map_flags flags, - struct drm_map_list **maplist) -{ - struct drm_local_map *map; - struct drm_map_list *list; - unsigned long user_token; - int ret; - - map = kmalloc(sizeof(*map), GFP_KERNEL); - if (!map) - return -ENOMEM; - - map->offset = offset; - map->size = size; - map->flags = flags; - map->type = type; - - /* Only allow shared memory to be removable since we only keep enough - * book keeping information about shared memory to allow for removal - * when processes fork. - */ - if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { - kfree(map); - return -EINVAL; - } - DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", - (unsigned long long)map->offset, map->size, map->type); - - /* page-align _DRM_SHM maps. They are allocated here so there is no security - * hole created by that and it works around various broken drivers that use - * a non-aligned quantity to map the SAREA. --BenH - */ - if (map->type == _DRM_SHM) - map->size = PAGE_ALIGN(map->size); - - if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { - kfree(map); - return -EINVAL; - } - map->mtrr = -1; - map->handle = NULL; - - switch (map->type) { - case _DRM_REGISTERS: - case _DRM_FRAME_BUFFER: -#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__) - if (map->offset + (map->size-1) < map->offset || - map->offset < virt_to_phys(high_memory)) { - kfree(map); - return -EINVAL; - } -#endif - /* Some drivers preinitialize some maps, without the X Server - * needing to be aware of it. Therefore, we just return success - * when the server tries to create a duplicate map. - */ - list = drm_find_matching_map(dev, map); - if (list != NULL) { - if (list->map->size != map->size) { - DRM_DEBUG("Matching maps of type %d with " - "mismatched sizes, (%ld vs %ld)\n", - map->type, map->size, - list->map->size); - list->map->size = map->size; - } - - kfree(map); - *maplist = list; - return 0; - } - - if (map->type == _DRM_FRAME_BUFFER || - (map->flags & _DRM_WRITE_COMBINING)) { - map->mtrr = - arch_phys_wc_add(map->offset, map->size); - } - if (map->type == _DRM_REGISTERS) { - if (map->flags & _DRM_WRITE_COMBINING) - map->handle = ioremap_wc(map->offset, - map->size); - else - map->handle = ioremap(map->offset, map->size); - if (!map->handle) { - kfree(map); - return -ENOMEM; - } - } - - break; - case _DRM_SHM: - list = drm_find_matching_map(dev, map); - if (list != NULL) { - if (list->map->size != map->size) { - DRM_DEBUG("Matching maps of type %d with " - "mismatched sizes, (%ld vs %ld)\n", - map->type, map->size, list->map->size); - list->map->size = map->size; - } - - kfree(map); - *maplist = list; - return 0; - } - map->handle = vmalloc_user(map->size); - DRM_DEBUG("%lu %d %p\n", - map->size, order_base_2(map->size), map->handle); - if (!map->handle) { - kfree(map); - return -ENOMEM; - } - map->offset = (unsigned long)map->handle; - if (map->flags & _DRM_CONTAINS_LOCK) { - /* Prevent a 2nd X Server from creating a 2nd lock */ - if (dev->master->lock.hw_lock != NULL) { - vfree(map->handle); - kfree(map); - return -EBUSY; - } - dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */ - } - break; - case _DRM_AGP: { - struct drm_agp_mem *entry; - int valid = 0; - - if (!dev->agp) { - kfree(map); - return -EINVAL; - } -#ifdef __alpha__ - map->offset += dev->hose->mem_space->start; -#endif - /* In some cases (i810 driver), user space may have already - * added the AGP base itself, because dev->agp->base previously - * only got set during AGP enable. So, only add the base - * address if the map's offset isn't already within the - * aperture. - */ - if (map->offset < dev->agp->base || - map->offset > dev->agp->base + - dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { - map->offset += dev->agp->base; - } - map->mtrr = dev->agp->agp_mtrr; /* for getmap */ - - /* This assumes the DRM is in total control of AGP space. - * It's not always the case as AGP can be in the control - * of user space (i.e. i810 driver). So this loop will get - * skipped and we double check that dev->agp->memory is - * actually set as well as being invalid before EPERM'ing - */ - list_for_each_entry(entry, &dev->agp->memory, head) { - if ((map->offset >= entry->bound) && - (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { - valid = 1; - break; - } - } - if (!list_empty(&dev->agp->memory) && !valid) { - kfree(map); - return -EPERM; - } - DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", - (unsigned long long)map->offset, map->size); - - break; - } - case _DRM_SCATTER_GATHER: - if (!dev->sg) { - kfree(map); - return -EINVAL; - } - map->offset += (unsigned long)dev->sg->virtual; - break; - case _DRM_CONSISTENT: - /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, - * As we're limiting the address to 2^32-1 (or less), - * casting it down to 32 bits is no problem, but we - * need to point to a 64bit variable first. - */ - map->handle = dma_alloc_coherent(dev->dev, - map->size, - &map->offset, - GFP_KERNEL); - if (!map->handle) { - kfree(map); - return -ENOMEM; - } - break; - default: - kfree(map); - return -EINVAL; - } - - list = kzalloc(sizeof(*list), GFP_KERNEL); - if (!list) { - if (map->type == _DRM_REGISTERS) - iounmap(map->handle); - kfree(map); - return -EINVAL; - } - list->map = map; - - mutex_lock(&dev->struct_mutex); - list_add(&list->head, &dev->maplist); - - /* Assign a 32-bit handle */ - /* We do it here so that dev->struct_mutex protects the increment */ - user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle : - map->offset; - ret = drm_map_handle(dev, &list->hash, user_token, 0, - (map->type == _DRM_SHM)); - if (ret) { - if (map->type == _DRM_REGISTERS) - iounmap(map->handle); - kfree(map); - kfree(list); - mutex_unlock(&dev->struct_mutex); - return ret; - } - - list->user_token = list->hash.key << PAGE_SHIFT; - mutex_unlock(&dev->struct_mutex); - - if (!(map->flags & _DRM_DRIVER)) - list->master = dev->master; - *maplist = list; - return 0; -} - -int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset, - unsigned int size, enum drm_map_type type, - enum drm_map_flags flags, struct drm_local_map **map_ptr) -{ - struct drm_map_list *list; - int rc; - - rc = drm_addmap_core(dev, offset, size, type, flags, &list); - if (!rc) - *map_ptr = list->map; - return rc; -} -EXPORT_SYMBOL(drm_legacy_addmap); - -struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, - unsigned int token) -{ - struct drm_map_list *_entry; - - list_for_each_entry(_entry, &dev->maplist, head) - if (_entry->user_token == token) - return _entry->map; - return NULL; -} -EXPORT_SYMBOL(drm_legacy_findmap); - -/* - * Ioctl to specify a range of memory that is available for mapping by a - * non-root process. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a drm_map structure. - * \return zero on success or a negative value on error. - * - */ -int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_map *map = data; - struct drm_map_list *maplist; - int err; - - if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) - return -EPERM; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - err = drm_addmap_core(dev, map->offset, map->size, map->type, - map->flags, &maplist); - - if (err) - return err; - - /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ - map->handle = (void *)(unsigned long)maplist->user_token; - - /* - * It appears that there are no users of this value whatsoever -- - * drmAddMap just discards it. Let's not encourage its use. - * (Keeping drm_addmap_core's returned mtrr value would be wrong -- - * it's not a real mtrr index anymore.) - */ - map->mtrr = -1; - - return 0; -} - -/* - * Get a mapping information. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument, pointing to a drm_map structure. - * - * \return zero on success or a negative number on failure. - * - * Searches for the mapping with the specified offset and copies its information - * into userspace - */ -int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_map *map = data; - struct drm_map_list *r_list = NULL; - struct list_head *list; - int idx; - int i; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - idx = map->offset; - if (idx < 0) - return -EINVAL; - - i = 0; - mutex_lock(&dev->struct_mutex); - list_for_each(list, &dev->maplist) { - if (i == idx) { - r_list = list_entry(list, struct drm_map_list, head); - break; - } - i++; - } - if (!r_list || !r_list->map) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - - map->offset = r_list->map->offset; - map->size = r_list->map->size; - map->type = r_list->map->type; - map->flags = r_list->map->flags; - map->handle = (void *)(unsigned long) r_list->user_token; - map->mtrr = arch_phys_wc_index(r_list->map->mtrr); - - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -/* - * Remove a map private from list and deallocate resources if the mapping - * isn't in use. - * - * Searches the map on drm_device::maplist, removes it from the list, see if - * it's being used, and free any associated resource (such as MTRR's) if it's not - * being on use. - * - * \sa drm_legacy_addmap - */ -int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) -{ - struct drm_map_list *r_list = NULL, *list_t; - int found = 0; - struct drm_master *master; - - /* Find the list entry for the map and remove it */ - list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { - if (r_list->map == map) { - master = r_list->master; - list_del(&r_list->head); - drm_ht_remove_key(&dev->map_hash, - r_list->user_token >> PAGE_SHIFT); - kfree(r_list); - found = 1; - break; - } - } - - if (!found) - return -EINVAL; - - switch (map->type) { - case _DRM_REGISTERS: - iounmap(map->handle); - fallthrough; - case _DRM_FRAME_BUFFER: - arch_phys_wc_del(map->mtrr); - break; - case _DRM_SHM: - vfree(map->handle); - if (master) { - if (dev->sigdata.lock == master->lock.hw_lock) - dev->sigdata.lock = NULL; - master->lock.hw_lock = NULL; /* SHM removed */ - master->lock.file_priv = NULL; - wake_up_interruptible_all(&master->lock.lock_queue); - } - break; - case _DRM_AGP: - case _DRM_SCATTER_GATHER: - break; - case _DRM_CONSISTENT: - dma_free_coherent(dev->dev, - map->size, - map->handle, - map->offset); - break; - } - kfree(map); - - return 0; -} -EXPORT_SYMBOL(drm_legacy_rmmap_locked); - -void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - mutex_lock(&dev->struct_mutex); - drm_legacy_rmmap_locked(dev, map); - mutex_unlock(&dev->struct_mutex); -} -EXPORT_SYMBOL(drm_legacy_rmmap); - -void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master) -{ - struct drm_map_list *r_list, *list_temp; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { - if (r_list->master == master) { - drm_legacy_rmmap_locked(dev, r_list->map); - r_list = NULL; - } - } - mutex_unlock(&dev->struct_mutex); -} - -void drm_legacy_rmmaps(struct drm_device *dev) -{ - struct drm_map_list *r_list, *list_temp; - - list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) - drm_legacy_rmmap(dev, r_list->map); -} - -/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on - * the last close of the device, and this is necessary for cleanup when things - * exit uncleanly. Therefore, having userland manually remove mappings seems - * like a pointless exercise since they're going away anyway. - * - * One use case might be after addmap is allowed for normal users for SHM and - * gets used by drivers that the server doesn't need to care about. This seems - * unlikely. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a struct drm_map structure. - * \return zero on success or a negative value on error. - */ -int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_map *request = data; - struct drm_local_map *map = NULL; - struct drm_map_list *r_list; - int ret; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry(r_list, &dev->maplist, head) { - if (r_list->map && - r_list->user_token == (unsigned long)request->handle && - r_list->map->flags & _DRM_REMOVABLE) { - map = r_list->map; - break; - } - } - - /* List has wrapped around to the head pointer, or it's empty we didn't - * find anything. - */ - if (list_empty(&dev->maplist) || !map) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - - /* Register and framebuffer maps are permanent */ - if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { - mutex_unlock(&dev->struct_mutex); - return 0; - } - - ret = drm_legacy_rmmap_locked(dev, map); - - mutex_unlock(&dev->struct_mutex); - - return ret; -} - -/* - * Cleanup after an error on one of the addbufs() functions. - * - * \param dev DRM device. - * \param entry buffer entry where the error occurred. - * - * Frees any pages and buffers associated with the given entry. - */ -static void drm_cleanup_buf_error(struct drm_device *dev, - struct drm_buf_entry *entry) -{ - drm_dma_handle_t *dmah; - int i; - - if (entry->seg_count) { - for (i = 0; i < entry->seg_count; i++) { - if (entry->seglist[i]) { - dmah = entry->seglist[i]; - dma_free_coherent(dev->dev, - dmah->size, - dmah->vaddr, - dmah->busaddr); - kfree(dmah); - } - } - kfree(entry->seglist); - - entry->seg_count = 0; - } - - if (entry->buf_count) { - for (i = 0; i < entry->buf_count; i++) { - kfree(entry->buflist[i].dev_private); - } - kfree(entry->buflist); - - entry->buf_count = 0; - } -} - -#if IS_ENABLED(CONFIG_AGP) -/* - * Add AGP buffers for DMA transfers. - * - * \param dev struct drm_device to which the buffers are to be added. - * \param request pointer to a struct drm_buf_desc describing the request. - * \return zero on success or a negative number on failure. - * - * After some sanity checks creates a drm_buf structure for each buffer and - * reallocates the buffer list of the same size order to accommodate the new - * buffers. - */ -int drm_legacy_addbufs_agp(struct drm_device *dev, - struct drm_buf_desc *request) -{ - struct drm_device_dma *dma = dev->dma; - struct drm_buf_entry *entry; - struct drm_agp_mem *agp_entry; - struct drm_buf *buf; - unsigned long offset; - unsigned long agp_offset; - int count; - int order; - int size; - int alignment; - int page_order; - int total; - int byte_count; - int i, valid; - struct drm_buf **temp_buflist; - - if (!dma) - return -EINVAL; - - count = request->count; - order = order_base_2(request->size); - size = 1 << order; - - alignment = (request->flags & _DRM_PAGE_ALIGN) - ? PAGE_ALIGN(size) : size; - page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; - total = PAGE_SIZE << page_order; - - byte_count = 0; - agp_offset = dev->agp->base + request->agp_start; - - DRM_DEBUG("count: %d\n", count); - DRM_DEBUG("order: %d\n", order); - DRM_DEBUG("size: %d\n", size); - DRM_DEBUG("agp_offset: %lx\n", agp_offset); - DRM_DEBUG("alignment: %d\n", alignment); - DRM_DEBUG("page_order: %d\n", page_order); - DRM_DEBUG("total: %d\n", total); - - if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) - return -EINVAL; - - /* Make sure buffers are located in AGP memory that we own */ - valid = 0; - list_for_each_entry(agp_entry, &dev->agp->memory, head) { - if ((agp_offset >= agp_entry->bound) && - (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { - valid = 1; - break; - } - } - if (!list_empty(&dev->agp->memory) && !valid) { - DRM_DEBUG("zone invalid\n"); - return -EINVAL; - } - spin_lock(&dev->buf_lock); - if (dev->buf_use) { - spin_unlock(&dev->buf_lock); - return -EBUSY; - } - atomic_inc(&dev->buf_alloc); - spin_unlock(&dev->buf_lock); - - mutex_lock(&dev->struct_mutex); - entry = &dma->bufs[order]; - if (entry->buf_count) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; /* May only call once for each order */ - } - - if (count < 0 || count > 4096) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -EINVAL; - } - - entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); - if (!entry->buflist) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - entry->buf_size = size; - entry->page_order = page_order; - - offset = 0; - - while (entry->buf_count < count) { - buf = &entry->buflist[entry->buf_count]; - buf->idx = dma->buf_count + entry->buf_count; - buf->total = alignment; - buf->order = order; - buf->used = 0; - - buf->offset = (dma->byte_count + offset); - buf->bus_address = agp_offset + offset; - buf->address = (void *)(agp_offset + offset); - buf->next = NULL; - buf->waiting = 0; - buf->pending = 0; - buf->file_priv = NULL; - - buf->dev_priv_size = dev->driver->dev_priv_size; - buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); - if (!buf->dev_private) { - /* Set count correctly so we free the proper amount. */ - entry->buf_count = count; - drm_cleanup_buf_error(dev, entry); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); - - offset += alignment; - entry->buf_count++; - byte_count += PAGE_SIZE << page_order; - } - - DRM_DEBUG("byte_count: %d\n", byte_count); - - temp_buflist = krealloc(dma->buflist, - (dma->buf_count + entry->buf_count) * - sizeof(*dma->buflist), GFP_KERNEL); - if (!temp_buflist) { - /* Free the entry because it isn't valid */ - drm_cleanup_buf_error(dev, entry); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - dma->buflist = temp_buflist; - - for (i = 0; i < entry->buf_count; i++) { - dma->buflist[i + dma->buf_count] = &entry->buflist[i]; - } - - dma->buf_count += entry->buf_count; - dma->seg_count += entry->seg_count; - dma->page_count += byte_count >> PAGE_SHIFT; - dma->byte_count += byte_count; - - DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); - DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); - - mutex_unlock(&dev->struct_mutex); - - request->count = entry->buf_count; - request->size = size; - - dma->flags = _DRM_DMA_USE_AGP; - - atomic_dec(&dev->buf_alloc); - return 0; -} -EXPORT_SYMBOL(drm_legacy_addbufs_agp); -#endif /* CONFIG_AGP */ - -int drm_legacy_addbufs_pci(struct drm_device *dev, - struct drm_buf_desc *request) -{ - struct drm_device_dma *dma = dev->dma; - int count; - int order; - int size; - int total; - int page_order; - struct drm_buf_entry *entry; - drm_dma_handle_t *dmah; - struct drm_buf *buf; - int alignment; - unsigned long offset; - int i; - int byte_count; - int page_count; - unsigned long *temp_pagelist; - struct drm_buf **temp_buflist; - - if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - count = request->count; - order = order_base_2(request->size); - size = 1 << order; - - DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", - request->count, request->size, size, order); - - if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) - return -EINVAL; - - alignment = (request->flags & _DRM_PAGE_ALIGN) - ? PAGE_ALIGN(size) : size; - page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; - total = PAGE_SIZE << page_order; - - spin_lock(&dev->buf_lock); - if (dev->buf_use) { - spin_unlock(&dev->buf_lock); - return -EBUSY; - } - atomic_inc(&dev->buf_alloc); - spin_unlock(&dev->buf_lock); - - mutex_lock(&dev->struct_mutex); - entry = &dma->bufs[order]; - if (entry->buf_count) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; /* May only call once for each order */ - } - - if (count < 0 || count > 4096) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -EINVAL; - } - - entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); - if (!entry->buflist) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL); - if (!entry->seglist) { - kfree(entry->buflist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - /* Keep the original pagelist until we know all the allocations - * have succeeded - */ - temp_pagelist = kmalloc_array(dma->page_count + (count << page_order), - sizeof(*dma->pagelist), - GFP_KERNEL); - if (!temp_pagelist) { - kfree(entry->buflist); - kfree(entry->seglist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - memcpy(temp_pagelist, - dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); - DRM_DEBUG("pagelist: %d entries\n", - dma->page_count + (count << page_order)); - - entry->buf_size = size; - entry->page_order = page_order; - byte_count = 0; - page_count = 0; - - while (entry->buf_count < count) { - dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); - if (!dmah) { - /* Set count correctly so we free the proper amount. */ - entry->buf_count = count; - entry->seg_count = count; - drm_cleanup_buf_error(dev, entry); - kfree(temp_pagelist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - dmah->size = total; - dmah->vaddr = dma_alloc_coherent(dev->dev, - dmah->size, - &dmah->busaddr, - GFP_KERNEL); - if (!dmah->vaddr) { - kfree(dmah); - - /* Set count correctly so we free the proper amount. */ - entry->buf_count = count; - entry->seg_count = count; - drm_cleanup_buf_error(dev, entry); - kfree(temp_pagelist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - entry->seglist[entry->seg_count++] = dmah; - for (i = 0; i < (1 << page_order); i++) { - DRM_DEBUG("page %d @ 0x%08lx\n", - dma->page_count + page_count, - (unsigned long)dmah->vaddr + PAGE_SIZE * i); - temp_pagelist[dma->page_count + page_count++] - = (unsigned long)dmah->vaddr + PAGE_SIZE * i; - } - for (offset = 0; - offset + size <= total && entry->buf_count < count; - offset += alignment, ++entry->buf_count) { - buf = &entry->buflist[entry->buf_count]; - buf->idx = dma->buf_count + entry->buf_count; - buf->total = alignment; - buf->order = order; - buf->used = 0; - buf->offset = (dma->byte_count + byte_count + offset); - buf->address = (void *)(dmah->vaddr + offset); - buf->bus_address = dmah->busaddr + offset; - buf->next = NULL; - buf->waiting = 0; - buf->pending = 0; - buf->file_priv = NULL; - - buf->dev_priv_size = dev->driver->dev_priv_size; - buf->dev_private = kzalloc(buf->dev_priv_size, - GFP_KERNEL); - if (!buf->dev_private) { - /* Set count correctly so we free the proper amount. */ - entry->buf_count = count; - entry->seg_count = count; - drm_cleanup_buf_error(dev, entry); - kfree(temp_pagelist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - DRM_DEBUG("buffer %d @ %p\n", - entry->buf_count, buf->address); - } - byte_count += PAGE_SIZE << page_order; - } - - temp_buflist = krealloc(dma->buflist, - (dma->buf_count + entry->buf_count) * - sizeof(*dma->buflist), GFP_KERNEL); - if (!temp_buflist) { - /* Free the entry because it isn't valid */ - drm_cleanup_buf_error(dev, entry); - kfree(temp_pagelist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - dma->buflist = temp_buflist; - - for (i = 0; i < entry->buf_count; i++) { - dma->buflist[i + dma->buf_count] = &entry->buflist[i]; - } - - /* No allocations failed, so now we can replace the original pagelist - * with the new one. - */ - if (dma->page_count) { - kfree(dma->pagelist); - } - dma->pagelist = temp_pagelist; - - dma->buf_count += entry->buf_count; - dma->seg_count += entry->seg_count; - dma->page_count += entry->seg_count << page_order; - dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); - - mutex_unlock(&dev->struct_mutex); - - request->count = entry->buf_count; - request->size = size; - - if (request->flags & _DRM_PCI_BUFFER_RO) - dma->flags = _DRM_DMA_USE_PCI_RO; - - atomic_dec(&dev->buf_alloc); - return 0; - -} -EXPORT_SYMBOL(drm_legacy_addbufs_pci); - -static int drm_legacy_addbufs_sg(struct drm_device *dev, - struct drm_buf_desc *request) -{ - struct drm_device_dma *dma = dev->dma; - struct drm_buf_entry *entry; - struct drm_buf *buf; - unsigned long offset; - unsigned long agp_offset; - int count; - int order; - int size; - int alignment; - int page_order; - int total; - int byte_count; - int i; - struct drm_buf **temp_buflist; - - if (!drm_core_check_feature(dev, DRIVER_SG)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - count = request->count; - order = order_base_2(request->size); - size = 1 << order; - - alignment = (request->flags & _DRM_PAGE_ALIGN) - ? PAGE_ALIGN(size) : size; - page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; - total = PAGE_SIZE << page_order; - - byte_count = 0; - agp_offset = request->agp_start; - - DRM_DEBUG("count: %d\n", count); - DRM_DEBUG("order: %d\n", order); - DRM_DEBUG("size: %d\n", size); - DRM_DEBUG("agp_offset: %lu\n", agp_offset); - DRM_DEBUG("alignment: %d\n", alignment); - DRM_DEBUG("page_order: %d\n", page_order); - DRM_DEBUG("total: %d\n", total); - - if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) - return -EINVAL; - - spin_lock(&dev->buf_lock); - if (dev->buf_use) { - spin_unlock(&dev->buf_lock); - return -EBUSY; - } - atomic_inc(&dev->buf_alloc); - spin_unlock(&dev->buf_lock); - - mutex_lock(&dev->struct_mutex); - entry = &dma->bufs[order]; - if (entry->buf_count) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; /* May only call once for each order */ - } - - if (count < 0 || count > 4096) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -EINVAL; - } - - entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); - if (!entry->buflist) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - entry->buf_size = size; - entry->page_order = page_order; - - offset = 0; - - while (entry->buf_count < count) { - buf = &entry->buflist[entry->buf_count]; - buf->idx = dma->buf_count + entry->buf_count; - buf->total = alignment; - buf->order = order; - buf->used = 0; - - buf->offset = (dma->byte_count + offset); - buf->bus_address = agp_offset + offset; - buf->address = (void *)(agp_offset + offset - + (unsigned long)dev->sg->virtual); - buf->next = NULL; - buf->waiting = 0; - buf->pending = 0; - buf->file_priv = NULL; - - buf->dev_priv_size = dev->driver->dev_priv_size; - buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); - if (!buf->dev_private) { - /* Set count correctly so we free the proper amount. */ - entry->buf_count = count; - drm_cleanup_buf_error(dev, entry); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); - - offset += alignment; - entry->buf_count++; - byte_count += PAGE_SIZE << page_order; - } - - DRM_DEBUG("byte_count: %d\n", byte_count); - - temp_buflist = krealloc(dma->buflist, - (dma->buf_count + entry->buf_count) * - sizeof(*dma->buflist), GFP_KERNEL); - if (!temp_buflist) { - /* Free the entry because it isn't valid */ - drm_cleanup_buf_error(dev, entry); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - dma->buflist = temp_buflist; - - for (i = 0; i < entry->buf_count; i++) { - dma->buflist[i + dma->buf_count] = &entry->buflist[i]; - } - - dma->buf_count += entry->buf_count; - dma->seg_count += entry->seg_count; - dma->page_count += byte_count >> PAGE_SHIFT; - dma->byte_count += byte_count; - - DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); - DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); - - mutex_unlock(&dev->struct_mutex); - - request->count = entry->buf_count; - request->size = size; - - dma->flags = _DRM_DMA_USE_SG; - - atomic_dec(&dev->buf_alloc); - return 0; -} - -/* - * Add buffers for DMA transfers (ioctl). - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a struct drm_buf_desc request. - * \return zero on success or a negative number on failure. - * - * According with the memory type specified in drm_buf_desc::flags and the - * build options, it dispatches the call either to addbufs_agp(), - * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent - * PCI memory respectively. - */ -int drm_legacy_addbufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_buf_desc *request = data; - int ret; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - return -EOPNOTSUPP; - -#if IS_ENABLED(CONFIG_AGP) - if (request->flags & _DRM_AGP_BUFFER) - ret = drm_legacy_addbufs_agp(dev, request); - else -#endif - if (request->flags & _DRM_SG_BUFFER) - ret = drm_legacy_addbufs_sg(dev, request); - else if (request->flags & _DRM_FB_BUFFER) - ret = -EINVAL; - else - ret = drm_legacy_addbufs_pci(dev, request); - - return ret; -} - -/* - * Get information about the buffer mappings. - * - * This was originally mean for debugging purposes, or by a sophisticated - * client library to determine how best to use the available buffers (e.g., - * large buffers can be used for image transfer). - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a drm_buf_info structure. - * \return zero on success or a negative number on failure. - * - * Increments drm_device::buf_use while holding the drm_device::buf_lock - * lock, preventing of allocating more buffers after this call. Information - * about each requested buffer is then copied into user space. - */ -int __drm_legacy_infobufs(struct drm_device *dev, - void *data, int *p, - int (*f)(void *, int, struct drm_buf_entry *)) -{ - struct drm_device_dma *dma = dev->dma; - int i; - int count; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - spin_lock(&dev->buf_lock); - if (atomic_read(&dev->buf_alloc)) { - spin_unlock(&dev->buf_lock); - return -EBUSY; - } - ++dev->buf_use; /* Can't allocate more after this call */ - spin_unlock(&dev->buf_lock); - - for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { - if (dma->bufs[i].buf_count) - ++count; - } - - DRM_DEBUG("count = %d\n", count); - - if (*p >= count) { - for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { - struct drm_buf_entry *from = &dma->bufs[i]; - - if (from->buf_count) { - if (f(data, count, from) < 0) - return -EFAULT; - DRM_DEBUG("%d %d %d %d %d\n", - i, - dma->bufs[i].buf_count, - dma->bufs[i].buf_size, - dma->bufs[i].low_mark, - dma->bufs[i].high_mark); - ++count; - } - } - } - *p = count; - - return 0; -} - -static int copy_one_buf(void *data, int count, struct drm_buf_entry *from) -{ - struct drm_buf_info *request = data; - struct drm_buf_desc __user *to = &request->list[count]; - struct drm_buf_desc v = {.count = from->buf_count, - .size = from->buf_size, - .low_mark = from->low_mark, - .high_mark = from->high_mark}; - - if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags))) - return -EFAULT; - return 0; -} - -int drm_legacy_infobufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_buf_info *request = data; - - return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf); -} - -/* - * Specifies a low and high water mark for buffer allocation - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg a pointer to a drm_buf_desc structure. - * \return zero on success or a negative number on failure. - * - * Verifies that the size order is bounded between the admissible orders and - * updates the respective drm_device_dma::bufs entry low and high water mark. - * - * \note This ioctl is deprecated and mostly never used. - */ -int drm_legacy_markbufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_device_dma *dma = dev->dma; - struct drm_buf_desc *request = data; - int order; - struct drm_buf_entry *entry; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - DRM_DEBUG("%d, %d, %d\n", - request->size, request->low_mark, request->high_mark); - order = order_base_2(request->size); - if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) - return -EINVAL; - entry = &dma->bufs[order]; - - if (request->low_mark < 0 || request->low_mark > entry->buf_count) - return -EINVAL; - if (request->high_mark < 0 || request->high_mark > entry->buf_count) - return -EINVAL; - - entry->low_mark = request->low_mark; - entry->high_mark = request->high_mark; - - return 0; -} - -/* - * Unreserve the buffers in list, previously reserved using drmDMA. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a drm_buf_free structure. - * \return zero on success or a negative number on failure. - * - * Calls free_buffer() for each used buffer. - * This function is primarily used for debugging. - */ -int drm_legacy_freebufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_device_dma *dma = dev->dma; - struct drm_buf_free *request = data; - int i; - int idx; - struct drm_buf *buf; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - DRM_DEBUG("%d\n", request->count); - for (i = 0; i < request->count; i++) { - if (copy_from_user(&idx, &request->list[i], sizeof(idx))) - return -EFAULT; - if (idx < 0 || idx >= dma->buf_count) { - DRM_ERROR("Index %d (of %d max)\n", - idx, dma->buf_count - 1); - return -EINVAL; - } - idx = array_index_nospec(idx, dma->buf_count); - buf = dma->buflist[idx]; - if (buf->file_priv != file_priv) { - DRM_ERROR("Process %d freeing buffer not owned\n", - task_pid_nr(current)); - return -EINVAL; - } - drm_legacy_free_buffer(dev, buf); - } - - return 0; -} - -/* - * Maps all of the DMA buffers into client-virtual space (ioctl). - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a drm_buf_map structure. - * \return zero on success or a negative number on failure. - * - * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information - * about each buffer into user space. For PCI buffers, it calls vm_mmap() with - * offset equal to 0, which drm_mmap() interprets as PCI buffers and calls - * drm_mmap_dma(). - */ -int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p, - void __user **v, - int (*f)(void *, int, unsigned long, - struct drm_buf *), - struct drm_file *file_priv) -{ - struct drm_device_dma *dma = dev->dma; - int retcode = 0; - unsigned long virtual; - int i; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - spin_lock(&dev->buf_lock); - if (atomic_read(&dev->buf_alloc)) { - spin_unlock(&dev->buf_lock); - return -EBUSY; - } - dev->buf_use++; /* Can't allocate more after this call */ - spin_unlock(&dev->buf_lock); - - if (*p >= dma->buf_count) { - if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) - || (drm_core_check_feature(dev, DRIVER_SG) - && (dma->flags & _DRM_DMA_USE_SG))) { - struct drm_local_map *map = dev->agp_buffer_map; - unsigned long token = dev->agp_buffer_token; - - if (!map) { - retcode = -EINVAL; - goto done; - } - virtual = vm_mmap(file_priv->filp, 0, map->size, - PROT_READ | PROT_WRITE, - MAP_SHARED, - token); - } else { - virtual = vm_mmap(file_priv->filp, 0, dma->byte_count, - PROT_READ | PROT_WRITE, - MAP_SHARED, 0); - } - if (virtual > -1024UL) { - /* Real error */ - retcode = (signed long)virtual; - goto done; - } - *v = (void __user *)virtual; - - for (i = 0; i < dma->buf_count; i++) { - if (f(data, i, virtual, dma->buflist[i]) < 0) { - retcode = -EFAULT; - goto done; - } - } - } - done: - *p = dma->buf_count; - DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode); - - return retcode; -} - -static int map_one_buf(void *data, int idx, unsigned long virtual, - struct drm_buf *buf) -{ - struct drm_buf_map *request = data; - unsigned long address = virtual + buf->offset; /* *** */ - - if (copy_to_user(&request->list[idx].idx, &buf->idx, - sizeof(request->list[0].idx))) - return -EFAULT; - if (copy_to_user(&request->list[idx].total, &buf->total, - sizeof(request->list[0].total))) - return -EFAULT; - if (clear_user(&request->list[idx].used, sizeof(int))) - return -EFAULT; - if (copy_to_user(&request->list[idx].address, &address, - sizeof(address))) - return -EFAULT; - return 0; -} - -int drm_legacy_mapbufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_buf_map *request = data; - - return __drm_legacy_mapbufs(dev, data, &request->count, - &request->virtual, map_one_buf, - file_priv); -} - -int drm_legacy_dma_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (dev->driver->dma_ioctl) - return dev->driver->dma_ioctl(dev, data, file_priv); - else - return -EINVAL; -} - -struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev) -{ - struct drm_map_list *entry; - - list_for_each_entry(entry, &dev->maplist, head) { - if (entry->map && entry->map->type == _DRM_SHM && - (entry->map->flags & _DRM_CONTAINS_LOCK)) { - return entry->map; - } - } - return NULL; -} -EXPORT_SYMBOL(drm_legacy_getsarea); diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c deleted file mode 100644 index a0fc779e5e1e..000000000000 --- a/drivers/gpu/drm/drm_context.c +++ /dev/null @@ -1,513 +0,0 @@ -/* - * Legacy: Generic DRM Contexts - * - * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Author: Rickard E. (Rik) Faith - * Author: Gareth Hughes - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include - -#include -#include -#include - -#include "drm_legacy.h" - -struct drm_ctx_list { - struct list_head head; - drm_context_t handle; - struct drm_file *tag; -}; - -/******************************************************************/ -/** \name Context bitmap support */ -/*@{*/ - -/* - * Free a handle from the context bitmap. - * - * \param dev DRM device. - * \param ctx_handle context handle. - * - * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry - * in drm_device::ctx_idr, while holding the drm_device::struct_mutex - * lock. - */ -void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - mutex_lock(&dev->struct_mutex); - idr_remove(&dev->ctx_idr, ctx_handle); - mutex_unlock(&dev->struct_mutex); -} - -/* - * Context bitmap allocation. - * - * \param dev DRM device. - * \return (non-negative) context handle on success or a negative number on failure. - * - * Allocate a new idr from drm_device::ctx_idr while holding the - * drm_device::struct_mutex lock. - */ -static int drm_legacy_ctxbitmap_next(struct drm_device * dev) -{ - int ret; - - mutex_lock(&dev->struct_mutex); - ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0, - GFP_KERNEL); - mutex_unlock(&dev->struct_mutex); - return ret; -} - -/* - * Context bitmap initialization. - * - * \param dev DRM device. - * - * Initialise the drm_device::ctx_idr - */ -void drm_legacy_ctxbitmap_init(struct drm_device * dev) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - idr_init(&dev->ctx_idr); -} - -/* - * Context bitmap cleanup. - * - * \param dev DRM device. - * - * Free all idr members using drm_ctx_sarea_free helper function - * while holding the drm_device::struct_mutex lock. - */ -void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - mutex_lock(&dev->struct_mutex); - idr_destroy(&dev->ctx_idr); - mutex_unlock(&dev->struct_mutex); -} - -/** - * drm_legacy_ctxbitmap_flush() - Flush all contexts owned by a file - * @dev: DRM device to operate on - * @file: Open file to flush contexts for - * - * This iterates over all contexts on @dev and drops them if they're owned by - * @file. Note that after this call returns, new contexts might be added if - * the file is still alive. - */ -void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) -{ - struct drm_ctx_list *pos, *tmp; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - mutex_lock(&dev->ctxlist_mutex); - - list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) { - if (pos->tag == file && - pos->handle != DRM_KERNEL_CONTEXT) { - if (dev->driver->context_dtor) - dev->driver->context_dtor(dev, pos->handle); - - drm_legacy_ctxbitmap_free(dev, pos->handle); - list_del(&pos->head); - kfree(pos); - } - } - - mutex_unlock(&dev->ctxlist_mutex); -} - -/*@}*/ - -/******************************************************************/ -/** \name Per Context SAREA Support */ -/*@{*/ - -/* - * Get per-context SAREA. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx_priv_map structure. - * \return zero on success or a negative number on failure. - * - * Gets the map from drm_device::ctx_idr with the handle specified and - * returns its handle. - */ -int drm_legacy_getsareactx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx_priv_map *request = data; - struct drm_local_map *map; - struct drm_map_list *_entry; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - mutex_lock(&dev->struct_mutex); - - map = idr_find(&dev->ctx_idr, request->ctx_id); - if (!map) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - - request->handle = NULL; - list_for_each_entry(_entry, &dev->maplist, head) { - if (_entry->map == map) { - request->handle = - (void *)(unsigned long)_entry->user_token; - break; - } - } - - mutex_unlock(&dev->struct_mutex); - - if (request->handle == NULL) - return -EINVAL; - - return 0; -} - -/* - * Set per-context SAREA. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx_priv_map structure. - * \return zero on success or a negative number on failure. - * - * Searches the mapping specified in \p arg and update the entry in - * drm_device::ctx_idr with it. - */ -int drm_legacy_setsareactx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx_priv_map *request = data; - struct drm_local_map *map = NULL; - struct drm_map_list *r_list = NULL; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry(r_list, &dev->maplist, head) { - if (r_list->map - && r_list->user_token == (unsigned long) request->handle) - goto found; - } - bad: - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - - found: - map = r_list->map; - if (!map) - goto bad; - - if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id))) - goto bad; - - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -/*@}*/ - -/******************************************************************/ -/** \name The actual DRM context handling routines */ -/*@{*/ - -/* - * Switch context. - * - * \param dev DRM device. - * \param old old context handle. - * \param new new context handle. - * \return zero on success or a negative number on failure. - * - * Attempt to set drm_device::context_flag. - */ -static int drm_context_switch(struct drm_device * dev, int old, int new) -{ - if (test_and_set_bit(0, &dev->context_flag)) { - DRM_ERROR("Reentering -- FIXME\n"); - return -EBUSY; - } - - DRM_DEBUG("Context switch from %d to %d\n", old, new); - - if (new == dev->last_context) { - clear_bit(0, &dev->context_flag); - return 0; - } - - return 0; -} - -/* - * Complete context switch. - * - * \param dev DRM device. - * \param new new context handle. - * \return zero on success or a negative number on failure. - * - * Updates drm_device::last_context and drm_device::last_switch. Verifies the - * hardware lock is held, clears the drm_device::context_flag and wakes up - * drm_device::context_wait. - */ -static int drm_context_switch_complete(struct drm_device *dev, - struct drm_file *file_priv, int new) -{ - dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ - - if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) { - DRM_ERROR("Lock isn't held after context switch\n"); - } - - /* If a context switch is ever initiated - when the kernel holds the lock, release - that lock here. - */ - clear_bit(0, &dev->context_flag); - - return 0; -} - -/* - * Reserve contexts. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx_res structure. - * \return zero on success or a negative number on failure. - */ -int drm_legacy_resctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx_res *res = data; - struct drm_ctx ctx; - int i; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (res->count >= DRM_RESERVED_CONTEXTS) { - memset(&ctx, 0, sizeof(ctx)); - for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { - ctx.handle = i; - if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx))) - return -EFAULT; - } - } - res->count = DRM_RESERVED_CONTEXTS; - - return 0; -} - -/* - * Add context. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx structure. - * \return zero on success or a negative number on failure. - * - * Get a new handle for the context and copy to userspace. - */ -int drm_legacy_addctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx_list *ctx_entry; - struct drm_ctx *ctx = data; - int tmp_handle; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - tmp_handle = drm_legacy_ctxbitmap_next(dev); - if (tmp_handle == DRM_KERNEL_CONTEXT) { - /* Skip kernel's context and get a new one. */ - tmp_handle = drm_legacy_ctxbitmap_next(dev); - } - DRM_DEBUG("%d\n", tmp_handle); - if (tmp_handle < 0) { - DRM_DEBUG("Not enough free contexts.\n"); - /* Should this return -EBUSY instead? */ - return tmp_handle; - } - - ctx->handle = tmp_handle; - - ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL); - if (!ctx_entry) { - DRM_DEBUG("out of memory\n"); - return -ENOMEM; - } - - INIT_LIST_HEAD(&ctx_entry->head); - ctx_entry->handle = ctx->handle; - ctx_entry->tag = file_priv; - - mutex_lock(&dev->ctxlist_mutex); - list_add(&ctx_entry->head, &dev->ctxlist); - mutex_unlock(&dev->ctxlist_mutex); - - return 0; -} - -/* - * Get context. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx structure. - * \return zero on success or a negative number on failure. - */ -int drm_legacy_getctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx *ctx = data; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - /* This is 0, because we don't handle any context flags */ - ctx->flags = 0; - - return 0; -} - -/* - * Switch context. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx structure. - * \return zero on success or a negative number on failure. - * - * Calls context_switch(). - */ -int drm_legacy_switchctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx *ctx = data; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - DRM_DEBUG("%d\n", ctx->handle); - return drm_context_switch(dev, dev->last_context, ctx->handle); -} - -/* - * New context. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx structure. - * \return zero on success or a negative number on failure. - * - * Calls context_switch_complete(). - */ -int drm_legacy_newctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx *ctx = data; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - DRM_DEBUG("%d\n", ctx->handle); - drm_context_switch_complete(dev, file_priv, ctx->handle); - - return 0; -} - -/* - * Remove context. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx structure. - * \return zero on success or a negative number on failure. - * - * If not the special kernel context, calls ctxbitmap_free() to free the specified context. - */ -int drm_legacy_rmctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx *ctx = data; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - DRM_DEBUG("%d\n", ctx->handle); - if (ctx->handle != DRM_KERNEL_CONTEXT) { - if (dev->driver->context_dtor) - dev->driver->context_dtor(dev, ctx->handle); - drm_legacy_ctxbitmap_free(dev, ctx->handle); - } - - mutex_lock(&dev->ctxlist_mutex); - if (!list_empty(&dev->ctxlist)) { - struct drm_ctx_list *pos, *n; - - list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { - if (pos->handle == ctx->handle) { - list_del(&pos->head); - kfree(pos); - } - } - } - mutex_unlock(&dev->ctxlist_mutex); - - return 0; -} - -/*@}*/ diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c deleted file mode 100644 index eb6b741a6f99..000000000000 --- a/drivers/gpu/drm/drm_dma.c +++ /dev/null @@ -1,178 +0,0 @@ -/* - * \file drm_dma.c - * DMA IOCTL and function support - * - * \author Rickard E. (Rik) Faith - * \author Gareth Hughes - */ - -/* - * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com - * - * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include - -#include -#include - -#include "drm_legacy.h" - -/** - * drm_legacy_dma_setup() - Initialize the DMA data. - * - * @dev: DRM device. - * Return: zero on success or a negative value on failure. - * - * Allocate and initialize a drm_device_dma structure. - */ -int drm_legacy_dma_setup(struct drm_device *dev) -{ - int i; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) || - !drm_core_check_feature(dev, DRIVER_LEGACY)) - return 0; - - dev->buf_use = 0; - atomic_set(&dev->buf_alloc, 0); - - dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL); - if (!dev->dma) - return -ENOMEM; - - for (i = 0; i <= DRM_MAX_ORDER; i++) - memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0])); - - return 0; -} - -/** - * drm_legacy_dma_takedown() - Cleanup the DMA resources. - * - * @dev: DRM device. - * - * Free all pages associated with DMA buffers, the buffers and pages lists, and - * finally the drm_device::dma structure itself. - */ -void drm_legacy_dma_takedown(struct drm_device *dev) -{ - struct drm_device_dma *dma = dev->dma; - drm_dma_handle_t *dmah; - int i, j; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) || - !drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - if (!dma) - return; - - /* Clear dma buffers */ - for (i = 0; i <= DRM_MAX_ORDER; i++) { - if (dma->bufs[i].seg_count) { - DRM_DEBUG("order %d: buf_count = %d," - " seg_count = %d\n", - i, - dma->bufs[i].buf_count, - dma->bufs[i].seg_count); - for (j = 0; j < dma->bufs[i].seg_count; j++) { - if (dma->bufs[i].seglist[j]) { - dmah = dma->bufs[i].seglist[j]; - dma_free_coherent(dev->dev, - dmah->size, - dmah->vaddr, - dmah->busaddr); - kfree(dmah); - } - } - kfree(dma->bufs[i].seglist); - } - if (dma->bufs[i].buf_count) { - for (j = 0; j < dma->bufs[i].buf_count; j++) { - kfree(dma->bufs[i].buflist[j].dev_private); - } - kfree(dma->bufs[i].buflist); - } - } - - kfree(dma->buflist); - kfree(dma->pagelist); - kfree(dev->dma); - dev->dma = NULL; -} - -/** - * drm_legacy_free_buffer() - Free a buffer. - * - * @dev: DRM device. - * @buf: buffer to free. - * - * Resets the fields of \p buf. - */ -void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf) -{ - if (!buf) - return; - - buf->waiting = 0; - buf->pending = 0; - buf->file_priv = NULL; - buf->used = 0; -} - -/** - * drm_legacy_reclaim_buffers() - Reclaim the buffers. - * - * @dev: DRM device. - * @file_priv: DRM file private. - * - * Frees each buffer associated with \p file_priv not already on the hardware. - */ -void drm_legacy_reclaim_buffers(struct drm_device *dev, - struct drm_file *file_priv) -{ - struct drm_device_dma *dma = dev->dma; - int i; - - if (!dma) - return; - for (i = 0; i < dma->buf_count; i++) { - if (dma->buflist[i]->file_priv == file_priv) { - switch (dma->buflist[i]->list) { - case DRM_LIST_NONE: - drm_legacy_free_buffer(dev, dma->buflist[i]); - break; - case DRM_LIST_WAIT: - dma->buflist[i]->list = DRM_LIST_RECLAIM; - break; - default: - /* Buffer already on hardware. */ - break; - } - } - } -} diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c deleted file mode 100644 index 60afa1865559..000000000000 --- a/drivers/gpu/drm/drm_hashtab.c +++ /dev/null @@ -1,203 +0,0 @@ -/************************************************************************** - * - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * - **************************************************************************/ -/* - * Simple open hash tab implementation. - * - * Authors: - * Thomas Hellström - */ - -#include -#include -#include -#include -#include - -#include - -#include "drm_legacy.h" - -int drm_ht_create(struct drm_open_hash *ht, unsigned int order) -{ - unsigned int size = 1 << order; - - ht->order = order; - ht->table = NULL; - if (size <= PAGE_SIZE / sizeof(*ht->table)) - ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL); - else - ht->table = vzalloc(array_size(size, sizeof(*ht->table))); - if (!ht->table) { - DRM_ERROR("Out of memory for hash table\n"); - return -ENOMEM; - } - return 0; -} - -void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) -{ - struct drm_hash_item *entry; - struct hlist_head *h_list; - unsigned int hashed_key; - int count = 0; - - hashed_key = hash_long(key, ht->order); - DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); - h_list = &ht->table[hashed_key]; - hlist_for_each_entry(entry, h_list, head) - DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); -} - -static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, - unsigned long key) -{ - struct drm_hash_item *entry; - struct hlist_head *h_list; - unsigned int hashed_key; - - hashed_key = hash_long(key, ht->order); - h_list = &ht->table[hashed_key]; - hlist_for_each_entry(entry, h_list, head) { - if (entry->key == key) - return &entry->head; - if (entry->key > key) - break; - } - return NULL; -} - -static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht, - unsigned long key) -{ - struct drm_hash_item *entry; - struct hlist_head *h_list; - unsigned int hashed_key; - - hashed_key = hash_long(key, ht->order); - h_list = &ht->table[hashed_key]; - hlist_for_each_entry_rcu(entry, h_list, head) { - if (entry->key == key) - return &entry->head; - if (entry->key > key) - break; - } - return NULL; -} - -int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) -{ - struct drm_hash_item *entry; - struct hlist_head *h_list; - struct hlist_node *parent; - unsigned int hashed_key; - unsigned long key = item->key; - - hashed_key = hash_long(key, ht->order); - h_list = &ht->table[hashed_key]; - parent = NULL; - hlist_for_each_entry(entry, h_list, head) { - if (entry->key == key) - return -EINVAL; - if (entry->key > key) - break; - parent = &entry->head; - } - if (parent) { - hlist_add_behind_rcu(&item->head, parent); - } else { - hlist_add_head_rcu(&item->head, h_list); - } - return 0; -} - -/* - * Just insert an item and return any "bits" bit key that hasn't been - * used before. - */ -int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, - unsigned long seed, int bits, int shift, - unsigned long add) -{ - int ret; - unsigned long mask = (1UL << bits) - 1; - unsigned long first, unshifted_key; - - unshifted_key = hash_long(seed, bits); - first = unshifted_key; - do { - item->key = (unshifted_key << shift) + add; - ret = drm_ht_insert_item(ht, item); - if (ret) - unshifted_key = (unshifted_key + 1) & mask; - } while(ret && (unshifted_key != first)); - - if (ret) { - DRM_ERROR("Available key bit space exhausted\n"); - return -EINVAL; - } - return 0; -} - -int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, - struct drm_hash_item **item) -{ - struct hlist_node *list; - - list = drm_ht_find_key_rcu(ht, key); - if (!list) - return -EINVAL; - - *item = hlist_entry(list, struct drm_hash_item, head); - return 0; -} - -int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) -{ - struct hlist_node *list; - - list = drm_ht_find_key(ht, key); - if (list) { - hlist_del_init_rcu(list); - return 0; - } - return -EINVAL; -} - -int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) -{ - hlist_del_init_rcu(&item->head); - return 0; -} - -void drm_ht_remove(struct drm_open_hash *ht) -{ - if (ht->table) { - kvfree(ht->table); - ht->table = NULL; - } -} diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index fa08a313127e..8e4faf0a28e6 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -121,11 +121,6 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, /* drm_irq.c */ /* IOCTLS */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_legacy_irq_control(struct drm_device *dev, void *data, - struct drm_file *file_priv); -#endif - int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c deleted file mode 100644 index d327638e15ee..000000000000 --- a/drivers/gpu/drm/drm_irq.c +++ /dev/null @@ -1,204 +0,0 @@ -/* - * drm_irq.c IRQ and vblank support - * - * \author Rickard E. (Rik) Faith - * \author Gareth Hughes - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com - * - * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - - -#include -#include /* For task queue support */ -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "drm_internal.h" - -static int drm_legacy_irq_install(struct drm_device *dev, int irq) -{ - int ret; - unsigned long sh_flags = 0; - - if (irq == 0) - return -EINVAL; - - if (dev->irq_enabled) - return -EBUSY; - dev->irq_enabled = true; - - DRM_DEBUG("irq=%d\n", irq); - - /* Before installing handler */ - if (dev->driver->irq_preinstall) - dev->driver->irq_preinstall(dev); - - /* PCI devices require shared interrupts. */ - if (dev_is_pci(dev->dev)) - sh_flags = IRQF_SHARED; - - ret = request_irq(irq, dev->driver->irq_handler, - sh_flags, dev->driver->name, dev); - - if (ret < 0) { - dev->irq_enabled = false; - return ret; - } - - /* After installing handler */ - if (dev->driver->irq_postinstall) - ret = dev->driver->irq_postinstall(dev); - - if (ret < 0) { - dev->irq_enabled = false; - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - vga_client_unregister(to_pci_dev(dev->dev)); - free_irq(irq, dev); - } else { - dev->irq = irq; - } - - return ret; -} - -int drm_legacy_irq_uninstall(struct drm_device *dev) -{ - unsigned long irqflags; - bool irq_enabled; - int i; - - irq_enabled = dev->irq_enabled; - dev->irq_enabled = false; - - /* - * Wake up any waiters so they don't hang. This is just to paper over - * issues for UMS drivers which aren't in full control of their - * vblank/irq handling. KMS drivers must ensure that vblanks are all - * disabled when uninstalling the irq handler. - */ - if (drm_dev_has_vblank(dev)) { - spin_lock_irqsave(&dev->vbl_lock, irqflags); - for (i = 0; i < dev->num_crtcs; i++) { - struct drm_vblank_crtc *vblank = &dev->vblank[i]; - - if (!vblank->enabled) - continue; - - WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET)); - - drm_vblank_disable_and_save(dev, i); - wake_up(&vblank->queue); - } - spin_unlock_irqrestore(&dev->vbl_lock, irqflags); - } - - if (!irq_enabled) - return -EINVAL; - - DRM_DEBUG("irq=%d\n", dev->irq); - - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - vga_client_unregister(to_pci_dev(dev->dev)); - - if (dev->driver->irq_uninstall) - dev->driver->irq_uninstall(dev); - - free_irq(dev->irq, dev); - - return 0; -} -EXPORT_SYMBOL(drm_legacy_irq_uninstall); - -int drm_legacy_irq_control(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_control *ctl = data; - int ret = 0, irq; - struct pci_dev *pdev; - - /* if we haven't irq we fallback for compatibility reasons - - * this used to be a separate function in drm_dma.h - */ - - if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) - return 0; - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return 0; - /* UMS was only ever supported on pci devices. */ - if (WARN_ON(!dev_is_pci(dev->dev))) - return -EINVAL; - - switch (ctl->func) { - case DRM_INST_HANDLER: - pdev = to_pci_dev(dev->dev); - irq = pdev->irq; - - if (dev->if_version < DRM_IF_VERSION(1, 2) && - ctl->irq != irq) - return -EINVAL; - mutex_lock(&dev->struct_mutex); - ret = drm_legacy_irq_install(dev, irq); - mutex_unlock(&dev->struct_mutex); - - return ret; - case DRM_UNINST_HANDLER: - mutex_lock(&dev->struct_mutex); - ret = drm_legacy_irq_uninstall(dev); - mutex_unlock(&dev->struct_mutex); - - return ret; - default: - return -EINVAL; - } -} diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h deleted file mode 100644 index 70c9dba114a6..000000000000 --- a/drivers/gpu/drm/drm_legacy.h +++ /dev/null @@ -1,290 +0,0 @@ -#ifndef __DRM_LEGACY_H__ -#define __DRM_LEGACY_H__ - -/* - * Copyright (c) 2014 David Herrmann - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * This file contains legacy interfaces that modern drm drivers - * should no longer be using. They cannot be removed as legacy - * drivers use them, and removing them are API breaks. - */ -#include - -#include -#include -#include - -struct agp_memory; -struct drm_buf_desc; -struct drm_device; -struct drm_file; -struct drm_hash_item; -struct drm_open_hash; - -/* - * Hash-table Support - */ - -#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) - -/* drm_hashtab.c */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_ht_create(struct drm_open_hash *ht, unsigned int order); -int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); -int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, - unsigned long seed, int bits, int shift, - unsigned long add); -int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); - -void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); -int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); -int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); -void drm_ht_remove(struct drm_open_hash *ht); -#endif - -/* - * RCU-safe interface - * - * The user of this API needs to make sure that two or more instances of the - * hash table manipulation functions are never run simultaneously. - * The lookup function drm_ht_find_item_rcu may, however, run simultaneously - * with any of the manipulation functions as long as it's called from within - * an RCU read-locked section. - */ -#define drm_ht_insert_item_rcu drm_ht_insert_item -#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please -#define drm_ht_remove_key_rcu drm_ht_remove_key -#define drm_ht_remove_item_rcu drm_ht_remove_item -#define drm_ht_find_item_rcu drm_ht_find_item - -/* - * Generic DRM Contexts - */ - -#define DRM_KERNEL_CONTEXT 0 -#define DRM_RESERVED_CONTEXTS 1 - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_ctxbitmap_init(struct drm_device *dev); -void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev); -void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file); -#else -static inline void drm_legacy_ctxbitmap_init(struct drm_device *dev) {} -static inline void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev) {} -static inline void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) {} -#endif - -void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle); - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_switchctx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_newctx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f); - -int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f); -#endif - -/* - * Generic Buffer Management - */ - -#define DRM_MAP_HASH_OFFSET 0x10000000 - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -static inline int drm_legacy_create_map_hash(struct drm_device *dev) -{ - return drm_ht_create(&dev->map_hash, 12); -} - -static inline void drm_legacy_remove_map_hash(struct drm_device *dev) -{ - drm_ht_remove(&dev->map_hash); -} -#else -static inline int drm_legacy_create_map_hash(struct drm_device *dev) -{ - return 0; -} - -static inline void drm_legacy_remove_map_hash(struct drm_device *dev) {} -#endif - - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f); - -int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f); -#endif - -int __drm_legacy_infobufs(struct drm_device *, void *, int *, - int (*)(void *, int, struct drm_buf_entry *)); -int __drm_legacy_mapbufs(struct drm_device *, void *, int *, - void __user **, - int (*)(void *, int, unsigned long, struct drm_buf *), - struct drm_file *); - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_master_rmmaps(struct drm_device *dev, - struct drm_master *master); -void drm_legacy_rmmaps(struct drm_device *dev); -#else -static inline void drm_legacy_master_rmmaps(struct drm_device *dev, - struct drm_master *master) {} -static inline void drm_legacy_rmmaps(struct drm_device *dev) {} -#endif - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_vma_flush(struct drm_device *d); -#else -static inline void drm_legacy_vma_flush(struct drm_device *d) -{ - /* do nothing */ -} -#endif - -/* - * AGP Support - */ - -struct drm_agp_mem { - unsigned long handle; - struct agp_memory *memory; - unsigned long bound; - int pages; - struct list_head head; -}; - -/* drm_agpsupport.c */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP) -void drm_legacy_agp_clear(struct drm_device *dev); - -int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -#else -static inline void drm_legacy_agp_clear(struct drm_device *dev) {} -#endif - -/* drm_lock.c */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f); -void drm_legacy_lock_release(struct drm_device *dev, struct file *filp); -#else -static inline void drm_legacy_lock_release(struct drm_device *dev, struct file *filp) {} -#endif - -/* DMA support */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_legacy_dma_setup(struct drm_device *dev); -void drm_legacy_dma_takedown(struct drm_device *dev); -#else -static inline int drm_legacy_dma_setup(struct drm_device *dev) -{ - return 0; -} -#endif - -void drm_legacy_free_buffer(struct drm_device *dev, - struct drm_buf * buf); -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_reclaim_buffers(struct drm_device *dev, - struct drm_file *filp); -#else -static inline void drm_legacy_reclaim_buffers(struct drm_device *dev, - struct drm_file *filp) {} -#endif - -/* Scatter Gather Support */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_sg_cleanup(struct drm_device *dev); -int drm_legacy_sg_alloc(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_sg_free(struct drm_device *dev, void *data, - struct drm_file *file_priv); -#endif - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_init_members(struct drm_device *dev); -void drm_legacy_destroy_members(struct drm_device *dev); -void drm_legacy_dev_reinit(struct drm_device *dev); -int drm_legacy_setup(struct drm_device * dev); -#else -static inline void drm_legacy_init_members(struct drm_device *dev) {} -static inline void drm_legacy_destroy_members(struct drm_device *dev) {} -static inline void drm_legacy_dev_reinit(struct drm_device *dev) {} -static inline int drm_legacy_setup(struct drm_device * dev) { return 0; } -#endif - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master); -#else -static inline void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master) {} -#endif - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_master_legacy_init(struct drm_master *master); -#else -static inline void drm_master_legacy_init(struct drm_master *master) {} -#endif - -/* drm_pci.c */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_PCI) -int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv); -void drm_legacy_pci_agp_destroy(struct drm_device *dev); -#else -static inline int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return -EINVAL; -} - -static inline void drm_legacy_pci_agp_destroy(struct drm_device *dev) {} -#endif - -#endif /* __DRM_LEGACY_H__ */ diff --git a/drivers/gpu/drm/drm_legacy_misc.c b/drivers/gpu/drm/drm_legacy_misc.c deleted file mode 100644 index d4c5434062d7..000000000000 --- a/drivers/gpu/drm/drm_legacy_misc.c +++ /dev/null @@ -1,105 +0,0 @@ -/* - * \file drm_legacy_misc.c - * Misc legacy support functions. - * - * \author Rickard E. (Rik) Faith - * \author Gareth Hughes - */ - -/* - * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include -#include - -#include "drm_internal.h" -#include "drm_legacy.h" - -void drm_legacy_init_members(struct drm_device *dev) -{ - INIT_LIST_HEAD(&dev->ctxlist); - INIT_LIST_HEAD(&dev->vmalist); - INIT_LIST_HEAD(&dev->maplist); - spin_lock_init(&dev->buf_lock); - mutex_init(&dev->ctxlist_mutex); -} - -void drm_legacy_destroy_members(struct drm_device *dev) -{ - mutex_destroy(&dev->ctxlist_mutex); -} - -int drm_legacy_setup(struct drm_device * dev) -{ - int ret; - - if (dev->driver->firstopen && - drm_core_check_feature(dev, DRIVER_LEGACY)) { - ret = dev->driver->firstopen(dev); - if (ret != 0) - return ret; - } - - ret = drm_legacy_dma_setup(dev); - if (ret < 0) - return ret; - - - DRM_DEBUG("\n"); - return 0; -} - -void drm_legacy_dev_reinit(struct drm_device *dev) -{ - if (dev->irq_enabled) - drm_legacy_irq_uninstall(dev); - - mutex_lock(&dev->struct_mutex); - - drm_legacy_agp_clear(dev); - - drm_legacy_sg_cleanup(dev); - drm_legacy_vma_flush(dev); - drm_legacy_dma_takedown(dev); - - mutex_unlock(&dev->struct_mutex); - - dev->sigdata.lock = NULL; - - dev->context_flag = 0; - dev->last_context = 0; - dev->if_version = 0; - - DRM_DEBUG("lastclose completed\n"); -} - -void drm_master_legacy_init(struct drm_master *master) -{ - spin_lock_init(&master->lock.spinlock); - init_waitqueue_head(&master->lock.lock_queue); -} diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c deleted file mode 100644 index 1efbd5389d89..000000000000 --- a/drivers/gpu/drm/drm_lock.c +++ /dev/null @@ -1,373 +0,0 @@ -/* - * \file drm_lock.c - * IOCTLs for locking - * - * \author Rickard E. (Rik) Faith - * \author Gareth Hughes - */ - -/* - * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include - -#include -#include -#include -#include - -#include "drm_internal.h" -#include "drm_legacy.h" - -static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); - -/* - * Take the heavyweight lock. - * - * \param lock lock pointer. - * \param context locking context. - * \return one if the lock is held, or zero otherwise. - * - * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. - */ -static -int drm_lock_take(struct drm_lock_data *lock_data, - unsigned int context) -{ - unsigned int old, new, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - spin_lock_bh(&lock_data->spinlock); - do { - old = *lock; - if (old & _DRM_LOCK_HELD) - new = old | _DRM_LOCK_CONT; - else { - new = context | _DRM_LOCK_HELD | - ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? - _DRM_LOCK_CONT : 0); - } - prev = cmpxchg(lock, old, new); - } while (prev != old); - spin_unlock_bh(&lock_data->spinlock); - - if (_DRM_LOCKING_CONTEXT(old) == context) { - if (old & _DRM_LOCK_HELD) { - if (context != DRM_KERNEL_CONTEXT) { - DRM_ERROR("%d holds heavyweight lock\n", - context); - } - return 0; - } - } - - if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { - /* Have lock */ - return 1; - } - return 0; -} - -/* - * This takes a lock forcibly and hands it to context. Should ONLY be used - * inside *_unlock to give lock to kernel before calling *_dma_schedule. - * - * \param dev DRM device. - * \param lock lock pointer. - * \param context locking context. - * \return always one. - * - * Resets the lock file pointer. - * Marks the lock as held by the given context, via the \p cmpxchg instruction. - */ -static int drm_lock_transfer(struct drm_lock_data *lock_data, - unsigned int context) -{ - unsigned int old, new, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - lock_data->file_priv = NULL; - do { - old = *lock; - new = context | _DRM_LOCK_HELD; - prev = cmpxchg(lock, old, new); - } while (prev != old); - return 1; -} - -static int drm_legacy_lock_free(struct drm_lock_data *lock_data, - unsigned int context) -{ - unsigned int old, new, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - spin_lock_bh(&lock_data->spinlock); - if (lock_data->kernel_waiters != 0) { - drm_lock_transfer(lock_data, 0); - lock_data->idle_has_lock = 1; - spin_unlock_bh(&lock_data->spinlock); - return 1; - } - spin_unlock_bh(&lock_data->spinlock); - - do { - old = *lock; - new = _DRM_LOCKING_CONTEXT(old); - prev = cmpxchg(lock, old, new); - } while (prev != old); - - if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { - DRM_ERROR("%d freed heavyweight lock held by %d\n", - context, _DRM_LOCKING_CONTEXT(old)); - return 1; - } - wake_up_interruptible(&lock_data->lock_queue); - return 0; -} - -/* - * Lock ioctl. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument, pointing to a drm_lock structure. - * \return zero on success or negative number on failure. - * - * Add the current task to the lock wait queue, and attempt to take to lock. - */ -int drm_legacy_lock(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - DECLARE_WAITQUEUE(entry, current); - struct drm_lock *lock = data; - struct drm_master *master = file_priv->master; - int ret = 0; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - ++file_priv->lock_count; - - if (lock->context == DRM_KERNEL_CONTEXT) { - DRM_ERROR("Process %d using kernel context %d\n", - task_pid_nr(current), lock->context); - return -EINVAL; - } - - DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", - lock->context, task_pid_nr(current), - master->lock.hw_lock ? master->lock.hw_lock->lock : -1, - lock->flags); - - add_wait_queue(&master->lock.lock_queue, &entry); - spin_lock_bh(&master->lock.spinlock); - master->lock.user_waiters++; - spin_unlock_bh(&master->lock.spinlock); - - for (;;) { - __set_current_state(TASK_INTERRUPTIBLE); - if (!master->lock.hw_lock) { - /* Device has been unregistered */ - send_sig(SIGTERM, current, 0); - ret = -EINTR; - break; - } - if (drm_lock_take(&master->lock, lock->context)) { - master->lock.file_priv = file_priv; - master->lock.lock_time = jiffies; - break; /* Got lock */ - } - - /* Contention */ - mutex_unlock(&drm_global_mutex); - schedule(); - mutex_lock(&drm_global_mutex); - if (signal_pending(current)) { - ret = -EINTR; - break; - } - } - spin_lock_bh(&master->lock.spinlock); - master->lock.user_waiters--; - spin_unlock_bh(&master->lock.spinlock); - __set_current_state(TASK_RUNNING); - remove_wait_queue(&master->lock.lock_queue, &entry); - - DRM_DEBUG("%d %s\n", lock->context, - ret ? "interrupted" : "has lock"); - if (ret) return ret; - - /* don't set the block all signals on the master process for now - * really probably not the correct answer but lets us debug xkb - * xserver for now */ - if (!drm_is_current_master(file_priv)) { - dev->sigdata.context = lock->context; - dev->sigdata.lock = master->lock.hw_lock; - } - - if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) - { - if (dev->driver->dma_quiescent(dev)) { - DRM_DEBUG("%d waiting for DMA quiescent\n", - lock->context); - return -EBUSY; - } - } - - return 0; -} - -/* - * Unlock ioctl. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument, pointing to a drm_lock structure. - * \return zero on success or negative number on failure. - * - * Transfer and free the lock. - */ -int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_lock *lock = data; - struct drm_master *master = file_priv->master; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (lock->context == DRM_KERNEL_CONTEXT) { - DRM_ERROR("Process %d using kernel context %d\n", - task_pid_nr(current), lock->context); - return -EINVAL; - } - - if (drm_legacy_lock_free(&master->lock, lock->context)) { - /* FIXME: Should really bail out here. */ - } - - return 0; -} - -/* - * This function returns immediately and takes the hw lock - * with the kernel context if it is free, otherwise it gets the highest priority when and if - * it is eventually released. - * - * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held - * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause - * a deadlock, which is why the "idlelock" was invented). - * - * This should be sufficient to wait for GPU idle without - * having to worry about starvation. - */ -void drm_legacy_idlelock_take(struct drm_lock_data *lock_data) -{ - int ret; - - spin_lock_bh(&lock_data->spinlock); - lock_data->kernel_waiters++; - if (!lock_data->idle_has_lock) { - - spin_unlock_bh(&lock_data->spinlock); - ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT); - spin_lock_bh(&lock_data->spinlock); - - if (ret == 1) - lock_data->idle_has_lock = 1; - } - spin_unlock_bh(&lock_data->spinlock); -} -EXPORT_SYMBOL(drm_legacy_idlelock_take); - -void drm_legacy_idlelock_release(struct drm_lock_data *lock_data) -{ - unsigned int old, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - spin_lock_bh(&lock_data->spinlock); - if (--lock_data->kernel_waiters == 0) { - if (lock_data->idle_has_lock) { - do { - old = *lock; - prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); - } while (prev != old); - wake_up_interruptible(&lock_data->lock_queue); - lock_data->idle_has_lock = 0; - } - } - spin_unlock_bh(&lock_data->spinlock); -} -EXPORT_SYMBOL(drm_legacy_idlelock_release); - -static int drm_legacy_i_have_hw_lock(struct drm_device *dev, - struct drm_file *file_priv) -{ - struct drm_master *master = file_priv->master; - - return (file_priv->lock_count && master->lock.hw_lock && - _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && - master->lock.file_priv == file_priv); -} - -void drm_legacy_lock_release(struct drm_device *dev, struct file *filp) -{ - struct drm_file *file_priv = filp->private_data; - - /* if the master has gone away we can't do anything with the lock */ - if (!dev->master) - return; - - if (drm_legacy_i_have_hw_lock(dev, file_priv)) { - DRM_DEBUG("File %p released, freeing lock for context %d\n", - filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); - drm_legacy_lock_free(&file_priv->master->lock, - _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); - } -} - -void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - /* - * Since the master is disappearing, so is the - * possibility to lock. - */ - mutex_lock(&dev->struct_mutex); - if (master->lock.hw_lock) { - if (dev->sigdata.lock == master->lock.hw_lock) - dev->sigdata.lock = NULL; - master->lock.hw_lock = NULL; - master->lock.file_priv = NULL; - wake_up_interruptible_all(&master->lock.lock_queue); - } - mutex_unlock(&dev->struct_mutex); -} diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c deleted file mode 100644 index d2e1dccd8113..000000000000 --- a/drivers/gpu/drm/drm_memory.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * \file drm_memory.c - * Memory management wrappers for DRM - * - * \author Rickard E. (Rik) Faith - * \author Gareth Hughes - */ - -/* - * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include -#include -#include - -#include -#include - -#include "drm_legacy.h" - -#if IS_ENABLED(CONFIG_AGP) - -#ifdef HAVE_PAGE_AGP -# include -#else -# ifdef __powerpc__ -# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL) -# else -# define PAGE_AGP PAGE_KERNEL -# endif -#endif - -static void *agp_remap(unsigned long offset, unsigned long size, - struct drm_device *dev) -{ - unsigned long i, num_pages = - PAGE_ALIGN(size) / PAGE_SIZE; - struct drm_agp_mem *agpmem; - struct page **page_map; - struct page **phys_page_map; - void *addr; - - size = PAGE_ALIGN(size); - -#ifdef __alpha__ - offset -= dev->hose->mem_space->start; -#endif - - list_for_each_entry(agpmem, &dev->agp->memory, head) - if (agpmem->bound <= offset - && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= - (offset + size)) - break; - if (&agpmem->head == &dev->agp->memory) - return NULL; - - /* - * OK, we're mapping AGP space on a chipset/platform on which memory accesses by - * the CPU do not get remapped by the GART. We fix this by using the kernel's - * page-table instead (that's probably faster anyhow...). - */ - /* note: use vmalloc() because num_pages could be large... */ - page_map = vmalloc(array_size(num_pages, sizeof(struct page *))); - if (!page_map) - return NULL; - - phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE); - for (i = 0; i < num_pages; ++i) - page_map[i] = phys_page_map[i]; - addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); - vfree(page_map); - - return addr; -} - -#else /* CONFIG_AGP */ -static inline void *agp_remap(unsigned long offset, unsigned long size, - struct drm_device *dev) -{ - return NULL; -} - -#endif /* CONFIG_AGP */ - -void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev) -{ - if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) - map->handle = agp_remap(map->offset, map->size, dev); - else - map->handle = ioremap(map->offset, map->size); -} -EXPORT_SYMBOL(drm_legacy_ioremap); - -void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev) -{ - if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) - map->handle = agp_remap(map->offset, map->size, dev); - else - map->handle = ioremap_wc(map->offset, map->size); -} -EXPORT_SYMBOL(drm_legacy_ioremap_wc); - -void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev) -{ - if (!map->handle || !map->size) - return; - - if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) - vunmap(map->handle); - else - iounmap(map->handle); -} -EXPORT_SYMBOL(drm_legacy_ioremapfree); diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 67907dd42721..c585f1e8803e 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -35,13 +35,6 @@ #include #include "drm_internal.h" -#include "drm_legacy.h" - -#ifdef CONFIG_DRM_LEGACY -/* List of devices hanging off drivers with stealth attach. */ -static LIST_HEAD(legacy_dev_list); -static DEFINE_MUTEX(legacy_dev_list_lock); -#endif static int drm_get_pci_domain(struct drm_device *dev) { @@ -72,199 +65,3 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) master->unique_len = strlen(master->unique); return 0; } - -#ifdef CONFIG_DRM_LEGACY - -static int drm_legacy_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) -{ - struct pci_dev *pdev = to_pci_dev(dev->dev); - - if ((p->busnum >> 8) != drm_get_pci_domain(dev) || - (p->busnum & 0xff) != pdev->bus->number || - p->devnum != PCI_SLOT(pdev->devfn) || p->funcnum != PCI_FUNC(pdev->devfn)) - return -EINVAL; - - p->irq = pdev->irq; - - DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, - p->irq); - return 0; -} - -/** - * drm_legacy_irq_by_busid - Get interrupt from bus ID - * @dev: DRM device - * @data: IOCTL parameter pointing to a drm_irq_busid structure - * @file_priv: DRM file private. - * - * Finds the PCI device with the specified bus id and gets its IRQ number. - * This IOCTL is deprecated, and will now return EINVAL for any busid not equal - * to that of the device that this DRM instance attached to. - * - * Return: 0 on success or a negative error code on failure. - */ -int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_irq_busid *p = data; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - /* UMS was only ever support on PCI devices. */ - if (WARN_ON(!dev_is_pci(dev->dev))) - return -EINVAL; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) - return -EOPNOTSUPP; - - return drm_legacy_pci_irq_by_busid(dev, p); -} - -void drm_legacy_pci_agp_destroy(struct drm_device *dev) -{ - if (dev->agp) { - arch_phys_wc_del(dev->agp->agp_mtrr); - drm_legacy_agp_clear(dev); - kfree(dev->agp); - dev->agp = NULL; - } -} - -static void drm_legacy_pci_agp_init(struct drm_device *dev) -{ - if (drm_core_check_feature(dev, DRIVER_USE_AGP)) { - if (pci_find_capability(to_pci_dev(dev->dev), PCI_CAP_ID_AGP)) - dev->agp = drm_legacy_agp_init(dev); - if (dev->agp) { - dev->agp->agp_mtrr = arch_phys_wc_add( - dev->agp->agp_info.aper_base, - dev->agp->agp_info.aper_size * - 1024 * 1024); - } - } -} - -static int drm_legacy_get_pci_dev(struct pci_dev *pdev, - const struct pci_device_id *ent, - const struct drm_driver *driver) -{ - struct drm_device *dev; - int ret; - - DRM_DEBUG("\n"); - - dev = drm_dev_alloc(driver, &pdev->dev); - if (IS_ERR(dev)) - return PTR_ERR(dev); - - ret = pci_enable_device(pdev); - if (ret) - goto err_free; - -#ifdef __alpha__ - dev->hose = pdev->sysdata; -#endif - - drm_legacy_pci_agp_init(dev); - - ret = drm_dev_register(dev, ent->driver_data); - if (ret) - goto err_agp; - - if (drm_core_check_feature(dev, DRIVER_LEGACY)) { - mutex_lock(&legacy_dev_list_lock); - list_add_tail(&dev->legacy_dev_list, &legacy_dev_list); - mutex_unlock(&legacy_dev_list_lock); - } - - return 0; - -err_agp: - drm_legacy_pci_agp_destroy(dev); - pci_disable_device(pdev); -err_free: - drm_dev_put(dev); - return ret; -} - -/** - * drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver - * @driver: DRM device driver - * @pdriver: PCI device driver - * - * This is only used by legacy dri1 drivers and deprecated. - * - * Return: 0 on success or a negative error code on failure. - */ -int drm_legacy_pci_init(const struct drm_driver *driver, - struct pci_driver *pdriver) -{ - struct pci_dev *pdev = NULL; - const struct pci_device_id *pid; - int i; - - DRM_DEBUG("\n"); - - if (WARN_ON(!(driver->driver_features & DRIVER_LEGACY))) - return -EINVAL; - - /* If not using KMS, fall back to stealth mode manual scanning. */ - for (i = 0; pdriver->id_table[i].vendor != 0; i++) { - pid = &pdriver->id_table[i]; - - /* Loop around setting up a DRM device for each PCI device - * matching our ID and device class. If we had the internal - * function that pci_get_subsys and pci_get_class used, we'd - * be able to just pass pid in instead of doing a two-stage - * thing. - */ - pdev = NULL; - while ((pdev = - pci_get_subsys(pid->vendor, pid->device, pid->subvendor, - pid->subdevice, pdev)) != NULL) { - if ((pdev->class & pid->class_mask) != pid->class) - continue; - - /* stealth mode requires a manual probe */ - pci_dev_get(pdev); - drm_legacy_get_pci_dev(pdev, pid, driver); - } - } - return 0; -} -EXPORT_SYMBOL(drm_legacy_pci_init); - -/** - * drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver - * @driver: DRM device driver - * @pdriver: PCI device driver - * - * Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This - * is deprecated and only used by dri1 drivers. - */ -void drm_legacy_pci_exit(const struct drm_driver *driver, - struct pci_driver *pdriver) -{ - struct drm_device *dev, *tmp; - - DRM_DEBUG("\n"); - - if (!(driver->driver_features & DRIVER_LEGACY)) { - WARN_ON(1); - } else { - mutex_lock(&legacy_dev_list_lock); - list_for_each_entry_safe(dev, tmp, &legacy_dev_list, - legacy_dev_list) { - if (dev->driver == driver) { - list_del(&dev->legacy_dev_list); - drm_put_dev(dev); - } - } - mutex_unlock(&legacy_dev_list_lock); - } - DRM_INFO("Module unloaded\n"); -} -EXPORT_SYMBOL(drm_legacy_pci_exit); - -#endif diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c deleted file mode 100644 index f4e6184d1877..000000000000 --- a/drivers/gpu/drm/drm_scatter.c +++ /dev/null @@ -1,220 +0,0 @@ -/* - * \file drm_scatter.c - * IOCTLs to manage scatter/gather memory - * - * \author Gareth Hughes - */ - -/* - * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com - * - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include -#include -#include - -#include -#include -#include - -#include "drm_legacy.h" - -#define DEBUG_SCATTER 0 - -static void drm_sg_cleanup(struct drm_sg_mem * entry) -{ - struct page *page; - int i; - - for (i = 0; i < entry->pages; i++) { - page = entry->pagelist[i]; - if (page) - ClearPageReserved(page); - } - - vfree(entry->virtual); - - kfree(entry->busaddr); - kfree(entry->pagelist); - kfree(entry); -} - -void drm_legacy_sg_cleanup(struct drm_device *dev) -{ - if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg && - drm_core_check_feature(dev, DRIVER_LEGACY)) { - drm_sg_cleanup(dev->sg); - dev->sg = NULL; - } -} -#ifdef _LP64 -# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1))) -#else -# define ScatterHandle(x) (unsigned int)(x) -#endif - -int drm_legacy_sg_alloc(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_scatter_gather *request = data; - struct drm_sg_mem *entry; - unsigned long pages, i, j; - - DRM_DEBUG("\n"); - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_SG)) - return -EOPNOTSUPP; - - if (request->size > SIZE_MAX - PAGE_SIZE) - return -EINVAL; - - if (dev->sg) - return -EINVAL; - - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; - DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); - - entry->pages = pages; - entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL); - if (!entry->pagelist) { - kfree(entry); - return -ENOMEM; - } - - entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL); - if (!entry->busaddr) { - kfree(entry->pagelist); - kfree(entry); - return -ENOMEM; - } - - entry->virtual = vmalloc_32(pages << PAGE_SHIFT); - if (!entry->virtual) { - kfree(entry->busaddr); - kfree(entry->pagelist); - kfree(entry); - return -ENOMEM; - } - - /* This also forces the mapping of COW pages, so our page list - * will be valid. Please don't remove it... - */ - memset(entry->virtual, 0, pages << PAGE_SHIFT); - - entry->handle = ScatterHandle((unsigned long)entry->virtual); - - DRM_DEBUG("handle = %08lx\n", entry->handle); - DRM_DEBUG("virtual = %p\n", entry->virtual); - - for (i = (unsigned long)entry->virtual, j = 0; j < pages; - i += PAGE_SIZE, j++) { - entry->pagelist[j] = vmalloc_to_page((void *)i); - if (!entry->pagelist[j]) - goto failed; - SetPageReserved(entry->pagelist[j]); - } - - request->handle = entry->handle; - - dev->sg = entry; - -#if DEBUG_SCATTER - /* Verify that each page points to its virtual address, and vice - * versa. - */ - { - int error = 0; - - for (i = 0; i < pages; i++) { - unsigned long *tmp; - - tmp = page_address(entry->pagelist[i]); - for (j = 0; - j < PAGE_SIZE / sizeof(unsigned long); - j++, tmp++) { - *tmp = 0xcafebabe; - } - tmp = (unsigned long *)((u8 *) entry->virtual + - (PAGE_SIZE * i)); - for (j = 0; - j < PAGE_SIZE / sizeof(unsigned long); - j++, tmp++) { - if (*tmp != 0xcafebabe && error == 0) { - error = 1; - DRM_ERROR("Scatter allocation error, " - "pagelist does not match " - "virtual mapping\n"); - } - } - tmp = page_address(entry->pagelist[i]); - for (j = 0; - j < PAGE_SIZE / sizeof(unsigned long); - j++, tmp++) { - *tmp = 0; - } - } - if (error == 0) - DRM_ERROR("Scatter allocation matches pagelist\n"); - } -#endif - - return 0; - - failed: - drm_sg_cleanup(entry); - return -ENOMEM; -} - -int drm_legacy_sg_free(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_scatter_gather *request = data; - struct drm_sg_mem *entry; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_SG)) - return -EOPNOTSUPP; - - entry = dev->sg; - dev->sg = NULL; - - if (!entry || entry->handle != request->handle) - return -EINVAL; - - DRM_DEBUG("virtual = %p\n", entry->virtual); - - drm_sg_cleanup(entry); - - return 0; -} diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c deleted file mode 100644 index 87c9fe55dec7..000000000000 --- a/drivers/gpu/drm/drm_vm.c +++ /dev/null @@ -1,665 +0,0 @@ -/* - * \file drm_vm.c - * Memory mapping for DRM - * - * \author Rickard E. (Rik) Faith - * \author Gareth Hughes - */ - -/* - * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include -#include -#include -#include - -#if defined(__ia64__) -#include -#include -#endif -#include - -#include -#include -#include -#include -#include - -#include "drm_internal.h" -#include "drm_legacy.h" - -struct drm_vma_entry { - struct list_head head; - struct vm_area_struct *vma; - pid_t pid; -}; - -static void drm_vm_open(struct vm_area_struct *vma); -static void drm_vm_close(struct vm_area_struct *vma); - -static pgprot_t drm_io_prot(struct drm_local_map *map, - struct vm_area_struct *vma) -{ - pgprot_t tmp = vm_get_page_prot(vma->vm_flags); - -#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \ - defined(__mips__) || defined(__loongarch__) - if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING)) - tmp = pgprot_noncached(tmp); - else - tmp = pgprot_writecombine(tmp); -#elif defined(__ia64__) - if (efi_range_is_wc(vma->vm_start, vma->vm_end - - vma->vm_start)) - tmp = pgprot_writecombine(tmp); - else - tmp = pgprot_noncached(tmp); -#elif defined(__sparc__) || defined(__arm__) - tmp = pgprot_noncached(tmp); -#endif - return tmp; -} - -static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) -{ - pgprot_t tmp = vm_get_page_prot(vma->vm_flags); - -#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) - tmp = pgprot_noncached_wc(tmp); -#endif - return tmp; -} - -/* - * \c fault method for AGP virtual memory. - * - * \param vma virtual memory area. - * \param address access address. - * \return pointer to the page structure. - * - * Find the right map and if it's AGP memory find the real physical page to - * map, get the page, increment the use count and return it. - */ -#if IS_ENABLED(CONFIG_AGP) -static vm_fault_t drm_vm_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_local_map *map = NULL; - struct drm_map_list *r_list; - struct drm_hash_item *hash; - - /* - * Find the right map - */ - if (!dev->agp) - goto vm_fault_error; - - if (!dev->agp || !dev->agp->cant_use_aperture) - goto vm_fault_error; - - if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) - goto vm_fault_error; - - r_list = drm_hash_entry(hash, struct drm_map_list, hash); - map = r_list->map; - - if (map && map->type == _DRM_AGP) { - /* - * Using vm_pgoff as a selector forces us to use this unusual - * addressing scheme. - */ - resource_size_t offset = vmf->address - vma->vm_start; - resource_size_t baddr = map->offset + offset; - struct drm_agp_mem *agpmem; - struct page *page; - -#ifdef __alpha__ - /* - * Adjust to a bus-relative address - */ - baddr -= dev->hose->mem_space->start; -#endif - - /* - * It's AGP memory - find the real physical page to map - */ - list_for_each_entry(agpmem, &dev->agp->memory, head) { - if (agpmem->bound <= baddr && - agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) - break; - } - - if (&agpmem->head == &dev->agp->memory) - goto vm_fault_error; - - /* - * Get the page, inc the use count, and return it - */ - offset = (baddr - agpmem->bound) >> PAGE_SHIFT; - page = agpmem->memory->pages[offset]; - get_page(page); - vmf->page = page; - - DRM_DEBUG - ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n", - (unsigned long long)baddr, - agpmem->memory->pages[offset], - (unsigned long long)offset, - page_count(page)); - return 0; - } -vm_fault_error: - return VM_FAULT_SIGBUS; /* Disallow mremap */ -} -#else -static vm_fault_t drm_vm_fault(struct vm_fault *vmf) -{ - return VM_FAULT_SIGBUS; -} -#endif - -/* - * \c nopage method for shared virtual memory. - * - * \param vma virtual memory area. - * \param address access address. - * \return pointer to the page structure. - * - * Get the mapping, find the real physical page to map, get the page, and - * return it. - */ -static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_local_map *map = vma->vm_private_data; - unsigned long offset; - unsigned long i; - struct page *page; - - if (!map) - return VM_FAULT_SIGBUS; /* Nothing allocated */ - - offset = vmf->address - vma->vm_start; - i = (unsigned long)map->handle + offset; - page = vmalloc_to_page((void *)i); - if (!page) - return VM_FAULT_SIGBUS; - get_page(page); - vmf->page = page; - - DRM_DEBUG("shm_fault 0x%lx\n", offset); - return 0; -} - -/* - * \c close method for shared virtual memory. - * - * \param vma virtual memory area. - * - * Deletes map information if we are the last - * person to close a mapping and it's not in the global maplist. - */ -static void drm_vm_shm_close(struct vm_area_struct *vma) -{ - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_vma_entry *pt, *temp; - struct drm_local_map *map; - struct drm_map_list *r_list; - int found_maps = 0; - - DRM_DEBUG("0x%08lx,0x%08lx\n", - vma->vm_start, vma->vm_end - vma->vm_start); - - map = vma->vm_private_data; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { - if (pt->vma->vm_private_data == map) - found_maps++; - if (pt->vma == vma) { - list_del(&pt->head); - kfree(pt); - } - } - - /* We were the only map that was found */ - if (found_maps == 1 && map->flags & _DRM_REMOVABLE) { - /* Check to see if we are in the maplist, if we are not, then - * we delete this mappings information. - */ - found_maps = 0; - list_for_each_entry(r_list, &dev->maplist, head) { - if (r_list->map == map) - found_maps++; - } - - if (!found_maps) { - switch (map->type) { - case _DRM_REGISTERS: - case _DRM_FRAME_BUFFER: - arch_phys_wc_del(map->mtrr); - iounmap(map->handle); - break; - case _DRM_SHM: - vfree(map->handle); - break; - case _DRM_AGP: - case _DRM_SCATTER_GATHER: - break; - case _DRM_CONSISTENT: - dma_free_coherent(dev->dev, - map->size, - map->handle, - map->offset); - break; - } - kfree(map); - } - } - mutex_unlock(&dev->struct_mutex); -} - -/* - * \c fault method for DMA virtual memory. - * - * \param address access address. - * \return pointer to the page structure. - * - * Determine the page number from the page offset and get it from drm_device_dma::pagelist. - */ -static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_device_dma *dma = dev->dma; - unsigned long offset; - unsigned long page_nr; - struct page *page; - - if (!dma) - return VM_FAULT_SIGBUS; /* Error */ - if (!dma->pagelist) - return VM_FAULT_SIGBUS; /* Nothing allocated */ - - offset = vmf->address - vma->vm_start; - /* vm_[pg]off[set] should be 0 */ - page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */ - page = virt_to_page((void *)dma->pagelist[page_nr]); - - get_page(page); - vmf->page = page; - - DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr); - return 0; -} - -/* - * \c fault method for scatter-gather virtual memory. - * - * \param address access address. - * \return pointer to the page structure. - * - * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. - */ -static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_local_map *map = vma->vm_private_data; - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_sg_mem *entry = dev->sg; - unsigned long offset; - unsigned long map_offset; - unsigned long page_offset; - struct page *page; - - if (!entry) - return VM_FAULT_SIGBUS; /* Error */ - if (!entry->pagelist) - return VM_FAULT_SIGBUS; /* Nothing allocated */ - - offset = vmf->address - vma->vm_start; - map_offset = map->offset - (unsigned long)dev->sg->virtual; - page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); - page = entry->pagelist[page_offset]; - get_page(page); - vmf->page = page; - - return 0; -} - -/** AGP virtual memory operations */ -static const struct vm_operations_struct drm_vm_ops = { - .fault = drm_vm_fault, - .open = drm_vm_open, - .close = drm_vm_close, -}; - -/** Shared virtual memory operations */ -static const struct vm_operations_struct drm_vm_shm_ops = { - .fault = drm_vm_shm_fault, - .open = drm_vm_open, - .close = drm_vm_shm_close, -}; - -/** DMA virtual memory operations */ -static const struct vm_operations_struct drm_vm_dma_ops = { - .fault = drm_vm_dma_fault, - .open = drm_vm_open, - .close = drm_vm_close, -}; - -/** Scatter-gather virtual memory operations */ -static const struct vm_operations_struct drm_vm_sg_ops = { - .fault = drm_vm_sg_fault, - .open = drm_vm_open, - .close = drm_vm_close, -}; - -static void drm_vm_open_locked(struct drm_device *dev, - struct vm_area_struct *vma) -{ - struct drm_vma_entry *vma_entry; - - DRM_DEBUG("0x%08lx,0x%08lx\n", - vma->vm_start, vma->vm_end - vma->vm_start); - - vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL); - if (vma_entry) { - vma_entry->vma = vma; - vma_entry->pid = current->pid; - list_add(&vma_entry->head, &dev->vmalist); - } -} - -static void drm_vm_open(struct vm_area_struct *vma) -{ - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - - mutex_lock(&dev->struct_mutex); - drm_vm_open_locked(dev, vma); - mutex_unlock(&dev->struct_mutex); -} - -static void drm_vm_close_locked(struct drm_device *dev, - struct vm_area_struct *vma) -{ - struct drm_vma_entry *pt, *temp; - - DRM_DEBUG("0x%08lx,0x%08lx\n", - vma->vm_start, vma->vm_end - vma->vm_start); - - list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { - if (pt->vma == vma) { - list_del(&pt->head); - kfree(pt); - break; - } - } -} - -/* - * \c close method for all virtual memory types. - * - * \param vma virtual memory area. - * - * Search the \p vma private data entry in drm_device::vmalist, unlink it, and - * free it. - */ -static void drm_vm_close(struct vm_area_struct *vma) -{ - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - - mutex_lock(&dev->struct_mutex); - drm_vm_close_locked(dev, vma); - mutex_unlock(&dev->struct_mutex); -} - -/* - * mmap DMA memory. - * - * \param file_priv DRM file private. - * \param vma virtual memory area. - * \return zero on success or a negative number on failure. - * - * Sets the virtual memory area operations structure to vm_dma_ops, the file - * pointer, and calls vm_open(). - */ -static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *priv = filp->private_data; - struct drm_device *dev; - struct drm_device_dma *dma; - unsigned long length = vma->vm_end - vma->vm_start; - - dev = priv->minor->dev; - dma = dev->dma; - DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", - vma->vm_start, vma->vm_end, vma->vm_pgoff); - - /* Length must match exact page count */ - if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { - return -EINVAL; - } - - if (!capable(CAP_SYS_ADMIN) && - (dma->flags & _DRM_DMA_USE_PCI_RO)) { - vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE); -#if defined(__i386__) || defined(__x86_64__) - pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; -#else - /* Ye gads this is ugly. With more thought - we could move this up higher and use - `protection_map' instead. */ - vma->vm_page_prot = - __pgprot(pte_val - (pte_wrprotect - (__pte(pgprot_val(vma->vm_page_prot))))); -#endif - } - - vma->vm_ops = &drm_vm_dma_ops; - - vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); - - drm_vm_open_locked(dev, vma); - return 0; -} - -static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev) -{ -#ifdef __alpha__ - return dev->hose->dense_mem_base; -#else - return 0; -#endif -} - -/* - * mmap DMA memory. - * - * \param file_priv DRM file private. - * \param vma virtual memory area. - * \return zero on success or a negative number on failure. - * - * If the virtual memory area has no offset associated with it then it's a DMA - * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist, - * checks that the restricted flag is not set, sets the virtual memory operations - * according to the mapping type and remaps the pages. Finally sets the file - * pointer and calls vm_open(). - */ -static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_local_map *map = NULL; - resource_size_t offset = 0; - struct drm_hash_item *hash; - - DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", - vma->vm_start, vma->vm_end, vma->vm_pgoff); - - if (!priv->authenticated) - return -EACCES; - - /* We check for "dma". On Apple's UniNorth, it's valid to have - * the AGP mapped at physical address 0 - * --BenH. - */ - if (!vma->vm_pgoff -#if IS_ENABLED(CONFIG_AGP) - && (!dev->agp - || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) -#endif - ) - return drm_mmap_dma(filp, vma); - - if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) { - DRM_ERROR("Could not find map\n"); - return -EINVAL; - } - - map = drm_hash_entry(hash, struct drm_map_list, hash)->map; - if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) - return -EPERM; - - /* Check for valid size. */ - if (map->size < vma->vm_end - vma->vm_start) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { - vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE); -#if defined(__i386__) || defined(__x86_64__) - pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; -#else - /* Ye gads this is ugly. With more thought - we could move this up higher and use - `protection_map' instead. */ - vma->vm_page_prot = - __pgprot(pte_val - (pte_wrprotect - (__pte(pgprot_val(vma->vm_page_prot))))); -#endif - } - - switch (map->type) { -#if !defined(__arm__) - case _DRM_AGP: - if (dev->agp && dev->agp->cant_use_aperture) { - /* - * On some platforms we can't talk to bus dma address from the CPU, so for - * memory of type DRM_AGP, we'll deal with sorting out the real physical - * pages and mappings in fault() - */ -#if defined(__powerpc__) - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -#endif - vma->vm_ops = &drm_vm_ops; - break; - } - fallthrough; /* to _DRM_FRAME_BUFFER... */ -#endif - case _DRM_FRAME_BUFFER: - case _DRM_REGISTERS: - offset = drm_core_get_reg_ofs(dev); - vma->vm_page_prot = drm_io_prot(map, vma); - if (io_remap_pfn_range(vma, vma->vm_start, - (map->offset + offset) >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," - " offset = 0x%llx\n", - map->type, - vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset)); - - vma->vm_ops = &drm_vm_ops; - break; - case _DRM_CONSISTENT: - /* Consistent memory is really like shared memory. But - * it's allocated in a different way, so avoid fault */ - if (remap_pfn_range(vma, vma->vm_start, - page_to_pfn(virt_to_page(map->handle)), - vma->vm_end - vma->vm_start, vma->vm_page_prot)) - return -EAGAIN; - vma->vm_page_prot = drm_dma_prot(map->type, vma); - fallthrough; /* to _DRM_SHM */ - case _DRM_SHM: - vma->vm_ops = &drm_vm_shm_ops; - vma->vm_private_data = (void *)map; - break; - case _DRM_SCATTER_GATHER: - vma->vm_ops = &drm_vm_sg_ops; - vma->vm_private_data = (void *)map; - vma->vm_page_prot = drm_dma_prot(map->type, vma); - break; - default: - return -EINVAL; /* This should never happen. */ - } - vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); - - drm_vm_open_locked(dev, vma); - return 0; -} - -int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->minor->dev; - int ret; - - if (drm_dev_is_unplugged(dev)) - return -ENODEV; - - mutex_lock(&dev->struct_mutex); - ret = drm_mmap_locked(filp, vma); - mutex_unlock(&dev->struct_mutex); - - return ret; -} -EXPORT_SYMBOL(drm_legacy_mmap); - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_vma_flush(struct drm_device *dev) -{ - struct drm_vma_entry *vma, *vma_temp; - - /* Clear vma list (only needed for legacy drivers) */ - list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { - list_del(&vma->head); - kfree(vma); - } -} -#endif diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h index ba248ca8866f..50131383ed81 100644 --- a/include/drm/drm_auth.h +++ b/include/drm/drm_auth.h @@ -33,24 +33,6 @@ #include struct drm_file; -struct drm_hw_lock; - -/* - * Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for - * include ordering reasons. - * - * DO NOT USE. - */ -struct drm_lock_data { - struct drm_hw_lock *hw_lock; - struct drm_file *file_priv; - wait_queue_head_t lock_queue; - unsigned long lock_time; - spinlock_t spinlock; - uint32_t kernel_waiters; - uint32_t user_waiters; - int idle_has_lock; -}; /** * struct drm_master - drm master structure @@ -145,10 +127,6 @@ struct drm_master { * Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex. */ struct idr lessee_idr; - /* private: */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) - struct drm_lock_data lock; -#endif }; struct drm_master *drm_master_get(struct drm_master *master); diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index c490977ee250..63767cf24371 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -6,7 +6,6 @@ #include #include -#include #include struct drm_driver; @@ -153,8 +152,8 @@ struct drm_device { * * Lock for others (not &drm_minor.master and &drm_file.is_master) * - * WARNING: - * Only drivers annotated with DRIVER_LEGACY should be using this. + * TODO: This lock used to be the BKL of the DRM subsystem. Move the + * lock into i915, which is the only remaining user. */ struct mutex struct_mutex; @@ -317,72 +316,6 @@ struct drm_device { * Root directory for debugfs files. */ struct dentry *debugfs_root; - - /* Everything below here is for legacy driver, never use! */ - /* private: */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) - /* List of devices per driver for stealth attach cleanup */ - struct list_head legacy_dev_list; - -#ifdef __alpha__ - /** @hose: PCI hose, only used on ALPHA platforms. */ - struct pci_controller *hose; -#endif - - /* AGP data */ - struct drm_agp_head *agp; - - /* Context handle management - linked list of context handles */ - struct list_head ctxlist; - - /* Context handle management - mutex for &ctxlist */ - struct mutex ctxlist_mutex; - - /* Context handle management */ - struct idr ctx_idr; - - /* Memory management - linked list of regions */ - struct list_head maplist; - - /* Memory management - user token hash table for maps */ - struct drm_open_hash map_hash; - - /* Context handle management - list of vmas (for debugging) */ - struct list_head vmalist; - - /* Optional pointer for DMA support */ - struct drm_device_dma *dma; - - /* Context swapping flag */ - __volatile__ long context_flag; - - /* Last current context */ - int last_context; - - /* Lock for &buf_use and a few other things. */ - spinlock_t buf_lock; - - /* Usage counter for buffers in use -- cannot alloc */ - int buf_use; - - /* Buffer allocation in progress */ - atomic_t buf_alloc; - - struct { - int context; - struct drm_hw_lock *lock; - } sigdata; - - struct drm_local_map *agp_buffer_map; - unsigned int agp_buffer_token; - - /* Scatter gather memory */ - struct drm_sg_mem *sg; - - /* IRQs */ - bool irq_enabled; - int irq; -#endif }; #endif diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index ea36aa79dca2..8878260d7529 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -442,25 +442,6 @@ struct drm_driver { * some examples. */ const struct file_operations *fops; - -#ifdef CONFIG_DRM_LEGACY - /* Everything below here is for legacy driver, never use! */ - /* private: */ - - int (*firstopen) (struct drm_device *); - void (*preclose) (struct drm_device *, struct drm_file *file_priv); - int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); - int (*dma_quiescent) (struct drm_device *); - int (*context_dtor) (struct drm_device *dev, int context); - irqreturn_t (*irq_handler)(int irq, void *arg); - void (*irq_preinstall)(struct drm_device *dev); - int (*irq_postinstall)(struct drm_device *dev); - void (*irq_uninstall)(struct drm_device *dev); - u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe); - int (*enable_vblank)(struct drm_device *dev, unsigned int pipe); - void (*disable_vblank)(struct drm_device *dev, unsigned int pipe); - int dev_priv_size; -#endif }; void *__devm_drm_dev_alloc(struct device *parent, diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 8f35dcea82d3..ab230d3af138 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -386,11 +386,6 @@ struct drm_file { * Per-file buffer caches used by the PRIME buffer sharing code. */ struct drm_prime_file_private prime; - - /* private: */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) - unsigned long lock_count; /* DRI1 legacy lock count */ -#endif }; /** diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h deleted file mode 100644 index 0fc85418aad8..000000000000 --- a/include/drm/drm_legacy.h +++ /dev/null @@ -1,331 +0,0 @@ -#ifndef __DRM_DRM_LEGACY_H__ -#define __DRM_DRM_LEGACY_H__ -/* - * Legacy driver interfaces for the Direct Rendering Manager - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * Copyright (c) 2009-2010, Code Aurora Forum. - * All rights reserved. - * Copyright © 2014 Intel Corporation - * Daniel Vetter - * - * Author: Rickard E. (Rik) Faith - * Author: Gareth Hughes - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include - -#include -#include - -struct drm_device; -struct drm_driver; -struct file; -struct pci_driver; - -/* - * Legacy Support for palateontologic DRM drivers - * - * If you add a new driver and it uses any of these functions or structures, - * you're doing it terribly wrong. - */ - -/* - * Hash-table Support - */ - -struct drm_hash_item { - struct hlist_node head; - unsigned long key; -}; - -struct drm_open_hash { - struct hlist_head *table; - u8 order; -}; - -/** - * DMA buffer. - */ -struct drm_buf { - int idx; /**< Index into master buflist */ - int total; /**< Buffer size */ - int order; /**< log-base-2(total) */ - int used; /**< Amount of buffer in use (for DMA) */ - unsigned long offset; /**< Byte offset (used internally) */ - void *address; /**< Address of buffer */ - unsigned long bus_address; /**< Bus address of buffer */ - struct drm_buf *next; /**< Kernel-only: used for free list */ - __volatile__ int waiting; /**< On kernel DMA queue */ - __volatile__ int pending; /**< On hardware DMA queue */ - struct drm_file *file_priv; /**< Private of holding file descr */ - int context; /**< Kernel queue for this buffer */ - int while_locked; /**< Dispatch this buffer while locked */ - enum { - DRM_LIST_NONE = 0, - DRM_LIST_FREE = 1, - DRM_LIST_WAIT = 2, - DRM_LIST_PEND = 3, - DRM_LIST_PRIO = 4, - DRM_LIST_RECLAIM = 5 - } list; /**< Which list we're on */ - - int dev_priv_size; /**< Size of buffer private storage */ - void *dev_private; /**< Per-buffer private storage */ -}; - -typedef struct drm_dma_handle { - dma_addr_t busaddr; - void *vaddr; - size_t size; -} drm_dma_handle_t; - -/** - * Buffer entry. There is one of this for each buffer size order. - */ -struct drm_buf_entry { - int buf_size; /**< size */ - int buf_count; /**< number of buffers */ - struct drm_buf *buflist; /**< buffer list */ - int seg_count; - int page_order; - struct drm_dma_handle **seglist; - - int low_mark; /**< Low water mark */ - int high_mark; /**< High water mark */ -}; - -/** - * DMA data. - */ -struct drm_device_dma { - - struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ - int buf_count; /**< total number of buffers */ - struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ - int seg_count; - int page_count; /**< number of pages */ - unsigned long *pagelist; /**< page list */ - unsigned long byte_count; - enum { - _DRM_DMA_USE_AGP = 0x01, - _DRM_DMA_USE_SG = 0x02, - _DRM_DMA_USE_FB = 0x04, - _DRM_DMA_USE_PCI_RO = 0x08 - } flags; - -}; - -/** - * Scatter-gather memory. - */ -struct drm_sg_mem { - unsigned long handle; - void *virtual; - int pages; - struct page **pagelist; - dma_addr_t *busaddr; -}; - -/** - * Kernel side of a mapping - */ -struct drm_local_map { - dma_addr_t offset; /**< Requested physical address (0 for SAREA)*/ - unsigned long size; /**< Requested physical size (bytes) */ - enum drm_map_type type; /**< Type of memory to map */ - enum drm_map_flags flags; /**< Flags */ - void *handle; /**< User-space: "Handle" to pass to mmap() */ - /**< Kernel-space: kernel-virtual address */ - int mtrr; /**< MTRR slot used */ -}; - -typedef struct drm_local_map drm_local_map_t; - -/** - * Mappings list - */ -struct drm_map_list { - struct list_head head; /**< list head */ - struct drm_hash_item hash; - struct drm_local_map *map; /**< mapping */ - uint64_t user_token; - struct drm_master *master; -}; - -int drm_legacy_addmap(struct drm_device *d, resource_size_t offset, - unsigned int size, enum drm_map_type type, - enum drm_map_flags flags, struct drm_local_map **map_p); -struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token); -void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); -int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); -struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev); -int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma); - -int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req); -int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req); - -/** - * Test that the hardware lock is held by the caller, returning otherwise. - * - * \param dev DRM device. - * \param filp file pointer of the caller. - */ -#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ -do { \ - if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ - _file_priv->master->lock.file_priv != _file_priv) { \ - DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ - __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ - _file_priv->master->lock.file_priv, _file_priv); \ - return -EINVAL; \ - } \ -} while (0) - -void drm_legacy_idlelock_take(struct drm_lock_data *lock); -void drm_legacy_idlelock_release(struct drm_lock_data *lock); - -/* drm_irq.c */ -int drm_legacy_irq_uninstall(struct drm_device *dev); - -/* drm_pci.c */ - -#ifdef CONFIG_PCI - -int drm_legacy_pci_init(const struct drm_driver *driver, - struct pci_driver *pdriver); -void drm_legacy_pci_exit(const struct drm_driver *driver, - struct pci_driver *pdriver); - -#else - -static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, - size_t size, size_t align) -{ - return NULL; -} - -static inline void drm_pci_free(struct drm_device *dev, - struct drm_dma_handle *dmah) -{ -} - -static inline int drm_legacy_pci_init(const struct drm_driver *driver, - struct pci_driver *pdriver) -{ - return -EINVAL; -} - -static inline void drm_legacy_pci_exit(const struct drm_driver *driver, - struct pci_driver *pdriver) -{ -} - -#endif - -/* - * AGP Support - */ - -struct drm_agp_head { - struct agp_kern_info agp_info; - struct list_head memory; - unsigned long mode; - struct agp_bridge_data *bridge; - int enabled; - int acquired; - unsigned long base; - int agp_mtrr; - int cant_use_aperture; - unsigned long page_mask; -}; - -#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP) -struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev); -int drm_legacy_agp_acquire(struct drm_device *dev); -int drm_legacy_agp_release(struct drm_device *dev); -int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); -int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info); -int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); -int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); -int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); -int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); -#else -static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev) -{ - return NULL; -} - -static inline int drm_legacy_agp_acquire(struct drm_device *dev) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_release(struct drm_device *dev) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_enable(struct drm_device *dev, - struct drm_agp_mode mode) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_info(struct drm_device *dev, - struct drm_agp_info *info) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_alloc(struct drm_device *dev, - struct drm_agp_buffer *request) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_free(struct drm_device *dev, - struct drm_agp_buffer *request) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_unbind(struct drm_device *dev, - struct drm_agp_binding *request) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_bind(struct drm_device *dev, - struct drm_agp_binding *request) -{ - return -ENODEV; -} -#endif - -/* drm_memory.c */ -void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev); -void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); -void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev); - -#endif /* __DRM_DRM_LEGACY_H__ */ -- cgit v1.2.3 From 87be41f09ac92cee6d0124636f49fac21dfd445e Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 22 Nov 2023 13:09:42 +0100 Subject: char/agp: Remove frontend code The AGP subsystem supports a user-space interface via /dev/agpgart. It is only enabled with DRM support for mode setting in user space. (i.e., CONFIG_DRM_LEGACY). All of that DRM code has been removed and the option will go away. Hence remove the AGP frontend. Modern DRM drivers with kernel mode setting handle AGP support internally. Signed-off-by: Thomas Zimmermann Reviewed-by: David Airlie Reviewed-by: Daniel Vetter Acked-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20231122122449.11588-14-tzimmermann@suse.de --- drivers/char/agp/Makefile | 6 - drivers/char/agp/agp.h | 9 - drivers/char/agp/backend.c | 11 - drivers/char/agp/compat_ioctl.c | 291 ----------- drivers/char/agp/compat_ioctl.h | 106 ---- drivers/char/agp/frontend.c | 1068 --------------------------------------- 6 files changed, 1491 deletions(-) delete mode 100644 drivers/char/agp/compat_ioctl.c delete mode 100644 drivers/char/agp/compat_ioctl.h delete mode 100644 drivers/char/agp/frontend.c diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile index 25834557e486..43b09cf193bb 100644 --- a/drivers/char/agp/Makefile +++ b/drivers/char/agp/Makefile @@ -1,12 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 agpgart-y := backend.o generic.o isoch.o -ifeq ($(CONFIG_DRM_LEGACY),y) -agpgart-$(CONFIG_COMPAT) += compat_ioctl.o -agpgart-y += frontend.o -endif - - obj-$(CONFIG_AGP) += agpgart.o obj-$(CONFIG_AGP_ALI) += ali-agp.o obj-$(CONFIG_AGP_ATI) += ati-agp.o diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index 8771dcc9b8e2..5c36ab85f80b 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h @@ -185,15 +185,6 @@ void agp_put_bridge(struct agp_bridge_data *bridge); int agp_add_bridge(struct agp_bridge_data *bridge); void agp_remove_bridge(struct agp_bridge_data *bridge); -/* Frontend routines. */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int agp_frontend_initialize(void); -void agp_frontend_cleanup(void); -#else -static inline int agp_frontend_initialize(void) { return 0; } -static inline void agp_frontend_cleanup(void) {} -#endif - /* Generic routines. */ void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode); int agp_generic_create_gatt_table(struct agp_bridge_data *bridge); diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index 0e19c600db53..1776afd3ee07 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c @@ -293,13 +293,6 @@ int agp_add_bridge(struct agp_bridge_data *bridge) } if (list_empty(&agp_bridges)) { - error = agp_frontend_initialize(); - if (error) { - dev_info(&bridge->dev->dev, - "agp_frontend_initialize() failed\n"); - goto frontend_err; - } - dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n", bridge->driver->fetch_size(), bridge->gart_bus_addr); @@ -308,8 +301,6 @@ int agp_add_bridge(struct agp_bridge_data *bridge) list_add(&bridge->list, &agp_bridges); return 0; -frontend_err: - agp_backend_cleanup(bridge); err_out: module_put(bridge->driver->owner); err_put_bridge: @@ -323,8 +314,6 @@ void agp_remove_bridge(struct agp_bridge_data *bridge) { agp_backend_cleanup(bridge); list_del(&bridge->list); - if (list_empty(&agp_bridges)) - agp_frontend_cleanup(); module_put(bridge->driver->owner); } EXPORT_SYMBOL_GPL(agp_remove_bridge); diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c deleted file mode 100644 index 52ffe1706ce0..000000000000 --- a/drivers/char/agp/compat_ioctl.c +++ /dev/null @@ -1,291 +0,0 @@ -/* - * AGPGART driver frontend compatibility ioctls - * Copyright (C) 2004 Silicon Graphics, Inc. - * Copyright (C) 2002-2003 Dave Jones - * Copyright (C) 1999 Jeff Hartmann - * Copyright (C) 1999 Precision Insight, Inc. - * Copyright (C) 1999 Xi Graphics, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE - * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include -#include -#include -#include -#include -#include -#include "agp.h" -#include "compat_ioctl.h" - -static int compat_agpioc_info_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_info32 userinfo; - struct agp_kern_info kerninfo; - - agp_copy_info(agp_bridge, &kerninfo); - - userinfo.version.major = kerninfo.version.major; - userinfo.version.minor = kerninfo.version.minor; - userinfo.bridge_id = kerninfo.device->vendor | - (kerninfo.device->device << 16); - userinfo.agp_mode = kerninfo.mode; - userinfo.aper_base = (compat_long_t)kerninfo.aper_base; - userinfo.aper_size = kerninfo.aper_size; - userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory; - userinfo.pg_used = kerninfo.current_memory; - - if (copy_to_user(arg, &userinfo, sizeof(userinfo))) - return -EFAULT; - - return 0; -} - -static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_region32 ureserve; - struct agp_region kreserve; - struct agp_client *client; - struct agp_file_private *client_priv; - - DBG(""); - if (copy_from_user(&ureserve, arg, sizeof(ureserve))) - return -EFAULT; - - if ((unsigned) ureserve.seg_count >= ~0U/sizeof(struct agp_segment32)) - return -EFAULT; - - kreserve.pid = ureserve.pid; - kreserve.seg_count = ureserve.seg_count; - - client = agp_find_client_by_pid(kreserve.pid); - - if (kreserve.seg_count == 0) { - /* remove a client */ - client_priv = agp_find_private(kreserve.pid); - - if (client_priv != NULL) { - set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); - set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); - } - if (client == NULL) { - /* client is already removed */ - return 0; - } - return agp_remove_client(kreserve.pid); - } else { - struct agp_segment32 *usegment; - struct agp_segment *ksegment; - int seg; - - if (ureserve.seg_count >= 16384) - return -EINVAL; - - usegment = kmalloc_array(ureserve.seg_count, - sizeof(*usegment), - GFP_KERNEL); - if (!usegment) - return -ENOMEM; - - ksegment = kmalloc_array(kreserve.seg_count, - sizeof(*ksegment), - GFP_KERNEL); - if (!ksegment) { - kfree(usegment); - return -ENOMEM; - } - - if (copy_from_user(usegment, (void __user *) ureserve.seg_list, - sizeof(*usegment) * ureserve.seg_count)) { - kfree(usegment); - kfree(ksegment); - return -EFAULT; - } - - for (seg = 0; seg < ureserve.seg_count; seg++) { - ksegment[seg].pg_start = usegment[seg].pg_start; - ksegment[seg].pg_count = usegment[seg].pg_count; - ksegment[seg].prot = usegment[seg].prot; - } - - kfree(usegment); - kreserve.seg_list = ksegment; - - if (client == NULL) { - /* Create the client and add the segment */ - client = agp_create_client(kreserve.pid); - - if (client == NULL) { - kfree(ksegment); - return -ENOMEM; - } - client_priv = agp_find_private(kreserve.pid); - - if (client_priv != NULL) { - set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); - set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); - } - } - return agp_create_segment(client, &kreserve); - } - /* Will never really happen */ - return -EINVAL; -} - -static int compat_agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_memory *memory; - struct agp_allocate32 alloc; - - DBG(""); - if (copy_from_user(&alloc, arg, sizeof(alloc))) - return -EFAULT; - - memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); - - if (memory == NULL) - return -ENOMEM; - - alloc.key = memory->key; - alloc.physical = memory->physical; - - if (copy_to_user(arg, &alloc, sizeof(alloc))) { - agp_free_memory_wrap(memory); - return -EFAULT; - } - return 0; -} - -static int compat_agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_bind32 bind_info; - struct agp_memory *memory; - - DBG(""); - if (copy_from_user(&bind_info, arg, sizeof(bind_info))) - return -EFAULT; - - memory = agp_find_mem_by_key(bind_info.key); - - if (memory == NULL) - return -EINVAL; - - return agp_bind_memory(memory, bind_info.pg_start); -} - -static int compat_agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_memory *memory; - struct agp_unbind32 unbind; - - DBG(""); - if (copy_from_user(&unbind, arg, sizeof(unbind))) - return -EFAULT; - - memory = agp_find_mem_by_key(unbind.key); - - if (memory == NULL) - return -EINVAL; - - return agp_unbind_memory(memory); -} - -long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - struct agp_file_private *curr_priv = file->private_data; - int ret_val = -ENOTTY; - - mutex_lock(&(agp_fe.agp_mutex)); - - if ((agp_fe.current_controller == NULL) && - (cmd != AGPIOC_ACQUIRE32)) { - ret_val = -EINVAL; - goto ioctl_out; - } - if ((agp_fe.backend_acquired != true) && - (cmd != AGPIOC_ACQUIRE32)) { - ret_val = -EBUSY; - goto ioctl_out; - } - if (cmd != AGPIOC_ACQUIRE32) { - if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) { - ret_val = -EPERM; - goto ioctl_out; - } - /* Use the original pid of the controller, - * in case it's threaded */ - - if (agp_fe.current_controller->pid != curr_priv->my_pid) { - ret_val = -EBUSY; - goto ioctl_out; - } - } - - switch (cmd) { - case AGPIOC_INFO32: - ret_val = compat_agpioc_info_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_ACQUIRE32: - ret_val = agpioc_acquire_wrap(curr_priv); - break; - - case AGPIOC_RELEASE32: - ret_val = agpioc_release_wrap(curr_priv); - break; - - case AGPIOC_SETUP32: - ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_RESERVE32: - ret_val = compat_agpioc_reserve_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_PROTECT32: - ret_val = agpioc_protect_wrap(curr_priv); - break; - - case AGPIOC_ALLOCATE32: - ret_val = compat_agpioc_allocate_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_DEALLOCATE32: - ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg); - break; - - case AGPIOC_BIND32: - ret_val = compat_agpioc_bind_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_UNBIND32: - ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_CHIPSET_FLUSH32: - break; - } - -ioctl_out: - DBG("ioctl returns %d\n", ret_val); - mutex_unlock(&(agp_fe.agp_mutex)); - return ret_val; -} - diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h deleted file mode 100644 index f30e0fd97963..000000000000 --- a/drivers/char/agp/compat_ioctl.h +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright (C) 1999 Jeff Hartmann - * Copyright (C) 1999 Precision Insight, Inc. - * Copyright (C) 1999 Xi Graphics, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE - * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _AGP_COMPAT_IOCTL_H -#define _AGP_COMPAT_IOCTL_H - -#include -#include - -#define AGPIOC_INFO32 _IOR (AGPIOC_BASE, 0, compat_uptr_t) -#define AGPIOC_ACQUIRE32 _IO (AGPIOC_BASE, 1) -#define AGPIOC_RELEASE32 _IO (AGPIOC_BASE, 2) -#define AGPIOC_SETUP32 _IOW (AGPIOC_BASE, 3, compat_uptr_t) -#define AGPIOC_RESERVE32 _IOW (AGPIOC_BASE, 4, compat_uptr_t) -#define AGPIOC_PROTECT32 _IOW (AGPIOC_BASE, 5, compat_uptr_t) -#define AGPIOC_ALLOCATE32 _IOWR(AGPIOC_BASE, 6, compat_uptr_t) -#define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t) -#define AGPIOC_BIND32 _IOW (AGPIOC_BASE, 8, compat_uptr_t) -#define AGPIOC_UNBIND32 _IOW (AGPIOC_BASE, 9, compat_uptr_t) -#define AGPIOC_CHIPSET_FLUSH32 _IO (AGPIOC_BASE, 10) - -struct agp_info32 { - struct agp_version version; /* version of the driver */ - u32 bridge_id; /* bridge vendor/device */ - u32 agp_mode; /* mode info of bridge */ - compat_long_t aper_base; /* base of aperture */ - compat_size_t aper_size; /* size of aperture */ - compat_size_t pg_total; /* max pages (swap + system) */ - compat_size_t pg_system; /* max pages (system) */ - compat_size_t pg_used; /* current pages used */ -}; - -/* - * The "prot" down below needs still a "sleep" flag somehow ... - */ -struct agp_segment32 { - compat_off_t pg_start; /* starting page to populate */ - compat_size_t pg_count; /* number of pages */ - compat_int_t prot; /* prot flags for mmap */ -}; - -struct agp_region32 { - compat_pid_t pid; /* pid of process */ - compat_size_t seg_count; /* number of segments */ - struct agp_segment32 *seg_list; -}; - -struct agp_allocate32 { - compat_int_t key; /* tag of allocation */ - compat_size_t pg_count; /* number of pages */ - u32 type; /* 0 == normal, other devspec */ - u32 physical; /* device specific (some devices - * need a phys address of the - * actual page behind the gatt - * table) */ -}; - -struct agp_bind32 { - compat_int_t key; /* tag of allocation */ - compat_off_t pg_start; /* starting page to populate */ -}; - -struct agp_unbind32 { - compat_int_t key; /* tag of allocation */ - u32 priority; /* priority for paging out */ -}; - -extern struct agp_front_data agp_fe; - -int agpioc_acquire_wrap(struct agp_file_private *priv); -int agpioc_release_wrap(struct agp_file_private *priv); -int agpioc_protect_wrap(struct agp_file_private *priv); -int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg); -int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg); -struct agp_file_private *agp_find_private(pid_t pid); -struct agp_client *agp_create_client(pid_t id); -int agp_remove_client(pid_t id); -int agp_create_segment(struct agp_client *client, struct agp_region *region); -void agp_free_memory_wrap(struct agp_memory *memory); -struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type); -struct agp_memory *agp_find_mem_by_key(int key); -struct agp_client *agp_find_client_by_pid(pid_t id); - -#endif /* _AGP_COMPAT_H */ diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c deleted file mode 100644 index 321118a9cfa5..000000000000 --- a/drivers/char/agp/frontend.c +++ /dev/null @@ -1,1068 +0,0 @@ -/* - * AGPGART driver frontend - * Copyright (C) 2004 Silicon Graphics, Inc. - * Copyright (C) 2002-2003 Dave Jones - * Copyright (C) 1999 Jeff Hartmann - * Copyright (C) 1999 Precision Insight, Inc. - * Copyright (C) 1999 Xi Graphics, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE - * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "agp.h" -#include "compat_ioctl.h" - -struct agp_front_data agp_fe; - -struct agp_memory *agp_find_mem_by_key(int key) -{ - struct agp_memory *curr; - - if (agp_fe.current_controller == NULL) - return NULL; - - curr = agp_fe.current_controller->pool; - - while (curr != NULL) { - if (curr->key == key) - break; - curr = curr->next; - } - - DBG("key=%d -> mem=%p", key, curr); - return curr; -} - -static void agp_remove_from_pool(struct agp_memory *temp) -{ - struct agp_memory *prev; - struct agp_memory *next; - - /* Check to see if this is even in the memory pool */ - - DBG("mem=%p", temp); - if (agp_find_mem_by_key(temp->key) != NULL) { - next = temp->next; - prev = temp->prev; - - if (prev != NULL) { - prev->next = next; - if (next != NULL) - next->prev = prev; - - } else { - /* This is the first item on the list */ - if (next != NULL) - next->prev = NULL; - - agp_fe.current_controller->pool = next; - } - } -} - -/* - * Routines for managing each client's segment list - - * These routines handle adding and removing segments - * to each auth'ed client. - */ - -static struct -agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client, - unsigned long offset, - int size, pgprot_t page_prot) -{ - struct agp_segment_priv *seg; - int i; - off_t pg_start; - size_t pg_count; - - pg_start = offset / 4096; - pg_count = size / 4096; - seg = *(client->segments); - - for (i = 0; i < client->num_segments; i++) { - if ((seg[i].pg_start == pg_start) && - (seg[i].pg_count == pg_count) && - (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) { - return seg + i; - } - } - - return NULL; -} - -static void agp_remove_seg_from_client(struct agp_client *client) -{ - DBG("client=%p", client); - - if (client->segments != NULL) { - if (*(client->segments) != NULL) { - DBG("Freeing %p from client %p", *(client->segments), client); - kfree(*(client->segments)); - } - DBG("Freeing %p from client %p", client->segments, client); - kfree(client->segments); - client->segments = NULL; - } -} - -static void agp_add_seg_to_client(struct agp_client *client, - struct agp_segment_priv ** seg, int num_segments) -{ - struct agp_segment_priv **prev_seg; - - prev_seg = client->segments; - - if (prev_seg != NULL) - agp_remove_seg_from_client(client); - - DBG("Adding seg %p (%d segments) to client %p", seg, num_segments, client); - client->num_segments = num_segments; - client->segments = seg; -} - -static pgprot_t agp_convert_mmap_flags(int prot) -{ - unsigned long prot_bits; - - prot_bits = calc_vm_prot_bits(prot, 0) | VM_SHARED; - return vm_get_page_prot(prot_bits); -} - -int agp_create_segment(struct agp_client *client, struct agp_region *region) -{ - struct agp_segment_priv **ret_seg; - struct agp_segment_priv *seg; - struct agp_segment *user_seg; - size_t i; - - seg = kzalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL); - if (seg == NULL) { - kfree(region->seg_list); - region->seg_list = NULL; - return -ENOMEM; - } - user_seg = region->seg_list; - - for (i = 0; i < region->seg_count; i++) { - seg[i].pg_start = user_seg[i].pg_start; - seg[i].pg_count = user_seg[i].pg_count; - seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot); - } - kfree(region->seg_list); - region->seg_list = NULL; - - ret_seg = kmalloc(sizeof(void *), GFP_KERNEL); - if (ret_seg == NULL) { - kfree(seg); - return -ENOMEM; - } - *ret_seg = seg; - agp_add_seg_to_client(client, ret_seg, region->seg_count); - return 0; -} - -/* End - Routines for managing each client's segment list */ - -/* This function must only be called when current_controller != NULL */ -static void agp_insert_into_pool(struct agp_memory * temp) -{ - struct agp_memory *prev; - - prev = agp_fe.current_controller->pool; - - if (prev != NULL) { - prev->prev = temp; - temp->next = prev; - } - agp_fe.current_controller->pool = temp; -} - - -/* File private list routines */ - -struct agp_file_private *agp_find_private(pid_t pid) -{ - struct agp_file_private *curr; - - curr = agp_fe.file_priv_list; - - while (curr != NULL) { - if (curr->my_pid == pid) - return curr; - curr = curr->next; - } - - return NULL; -} - -static void agp_insert_file_private(struct agp_file_private * priv) -{ - struct agp_file_private *prev; - - prev = agp_fe.file_priv_list; - - if (prev != NULL) - prev->prev = priv; - priv->next = prev; - agp_fe.file_priv_list = priv; -} - -static void agp_remove_file_private(struct agp_file_private * priv) -{ - struct agp_file_private *next; - struct agp_file_private *prev; - - next = priv->next; - prev = priv->prev; - - if (prev != NULL) { - prev->next = next; - - if (next != NULL) - next->prev = prev; - - } else { - if (next != NULL) - next->prev = NULL; - - agp_fe.file_priv_list = next; - } -} - -/* End - File flag list routines */ - -/* - * Wrappers for agp_free_memory & agp_allocate_memory - * These make sure that internal lists are kept updated. - */ -void agp_free_memory_wrap(struct agp_memory *memory) -{ - agp_remove_from_pool(memory); - agp_free_memory(memory); -} - -struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type) -{ - struct agp_memory *memory; - - memory = agp_allocate_memory(agp_bridge, pg_count, type); - if (memory == NULL) - return NULL; - - agp_insert_into_pool(memory); - return memory; -} - -/* Routines for managing the list of controllers - - * These routines manage the current controller, and the list of - * controllers - */ - -static struct agp_controller *agp_find_controller_by_pid(pid_t id) -{ - struct agp_controller *controller; - - controller = agp_fe.controllers; - - while (controller != NULL) { - if (controller->pid == id) - return controller; - controller = controller->next; - } - - return NULL; -} - -static struct agp_controller *agp_create_controller(pid_t id) -{ - struct agp_controller *controller; - - controller = kzalloc(sizeof(struct agp_controller), GFP_KERNEL); - if (controller == NULL) - return NULL; - - controller->pid = id; - return controller; -} - -static int agp_insert_controller(struct agp_controller *controller) -{ - struct agp_controller *prev_controller; - - prev_controller = agp_fe.controllers; - controller->next = prev_controller; - - if (prev_controller != NULL) - prev_controller->prev = controller; - - agp_fe.controllers = controller; - - return 0; -} - -static void agp_remove_all_clients(struct agp_controller *controller) -{ - struct agp_client *client; - struct agp_client *temp; - - client = controller->clients; - - while (client) { - struct agp_file_private *priv; - - temp = client; - agp_remove_seg_from_client(temp); - priv = agp_find_private(temp->pid); - - if (priv != NULL) { - clear_bit(AGP_FF_IS_VALID, &priv->access_flags); - clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags); - } - client = client->next; - kfree(temp); - } -} - -static void agp_remove_all_memory(struct agp_controller *controller) -{ - struct agp_memory *memory; - struct agp_memory *temp; - - memory = controller->pool; - - while (memory) { - temp = memory; - memory = memory->next; - agp_free_memory_wrap(temp); - } -} - -static int agp_remove_controller(struct agp_controller *controller) -{ - struct agp_controller *prev_controller; - struct agp_controller *next_controller; - - prev_controller = controller->prev; - next_controller = controller->next; - - if (prev_controller != NULL) { - prev_controller->next = next_controller; - if (next_controller != NULL) - next_controller->prev = prev_controller; - - } else { - if (next_controller != NULL) - next_controller->prev = NULL; - - agp_fe.controllers = next_controller; - } - - agp_remove_all_memory(controller); - agp_remove_all_clients(controller); - - if (agp_fe.current_controller == controller) { - agp_fe.current_controller = NULL; - agp_fe.backend_acquired = false; - agp_backend_release(agp_bridge); - } - kfree(controller); - return 0; -} - -static void agp_controller_make_current(struct agp_controller *controller) -{ - struct agp_client *clients; - - clients = controller->clients; - - while (clients != NULL) { - struct agp_file_private *priv; - - priv = agp_find_private(clients->pid); - - if (priv != NULL) { - set_bit(AGP_FF_IS_VALID, &priv->access_flags); - set_bit(AGP_FF_IS_CLIENT, &priv->access_flags); - } - clients = clients->next; - } - - agp_fe.current_controller = controller; -} - -static void agp_controller_release_current(struct agp_controller *controller, - struct agp_file_private *controller_priv) -{ - struct agp_client *clients; - - clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags); - clients = controller->clients; - - while (clients != NULL) { - struct agp_file_private *priv; - - priv = agp_find_private(clients->pid); - - if (priv != NULL) - clear_bit(AGP_FF_IS_VALID, &priv->access_flags); - - clients = clients->next; - } - - agp_fe.current_controller = NULL; - agp_fe.used_by_controller = false; - agp_backend_release(agp_bridge); -} - -/* - * Routines for managing client lists - - * These routines are for managing the list of auth'ed clients. - */ - -static struct agp_client -*agp_find_client_in_controller(struct agp_controller *controller, pid_t id) -{ - struct agp_client *client; - - if (controller == NULL) - return NULL; - - client = controller->clients; - - while (client != NULL) { - if (client->pid == id) - return client; - client = client->next; - } - - return NULL; -} - -static struct agp_controller *agp_find_controller_for_client(pid_t id) -{ - struct agp_controller *controller; - - controller = agp_fe.controllers; - - while (controller != NULL) { - if ((agp_find_client_in_controller(controller, id)) != NULL) - return controller; - controller = controller->next; - } - - return NULL; -} - -struct agp_client *agp_find_client_by_pid(pid_t id) -{ - struct agp_client *temp; - - if (agp_fe.current_controller == NULL) - return NULL; - - temp = agp_find_client_in_controller(agp_fe.current_controller, id); - return temp; -} - -static void agp_insert_client(struct agp_client *client) -{ - struct agp_client *prev_client; - - prev_client = agp_fe.current_controller->clients; - client->next = prev_client; - - if (prev_client != NULL) - prev_client->prev = client; - - agp_fe.current_controller->clients = client; - agp_fe.current_controller->num_clients++; -} - -struct agp_client *agp_create_client(pid_t id) -{ - struct agp_client *new_client; - - new_client = kzalloc(sizeof(struct agp_client), GFP_KERNEL); - if (new_client == NULL) - return NULL; - - new_client->pid = id; - agp_insert_client(new_client); - return new_client; -} - -int agp_remove_client(pid_t id) -{ - struct agp_client *client; - struct agp_client *prev_client; - struct agp_client *next_client; - struct agp_controller *controller; - - controller = agp_find_controller_for_client(id); - if (controller == NULL) - return -EINVAL; - - client = agp_find_client_in_controller(controller, id); - if (client == NULL) - return -EINVAL; - - prev_client = client->prev; - next_client = client->next; - - if (prev_client != NULL) { - prev_client->next = next_client; - if (next_client != NULL) - next_client->prev = prev_client; - - } else { - if (next_client != NULL) - next_client->prev = NULL; - controller->clients = next_client; - } - - controller->num_clients--; - agp_remove_seg_from_client(client); - kfree(client); - return 0; -} - -/* End - Routines for managing client lists */ - -/* File Operations */ - -static int agp_mmap(struct file *file, struct vm_area_struct *vma) -{ - unsigned int size, current_size; - unsigned long offset; - struct agp_client *client; - struct agp_file_private *priv = file->private_data; - struct agp_kern_info kerninfo; - - mutex_lock(&(agp_fe.agp_mutex)); - - if (agp_fe.backend_acquired != true) - goto out_eperm; - - if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags))) - goto out_eperm; - - agp_copy_info(agp_bridge, &kerninfo); - size = vma->vm_end - vma->vm_start; - current_size = kerninfo.aper_size; - current_size = current_size * 0x100000; - offset = vma->vm_pgoff << PAGE_SHIFT; - DBG("%lx:%lx", offset, offset+size); - - if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) { - if ((size + offset) > current_size) - goto out_inval; - - client = agp_find_client_by_pid(current->pid); - - if (client == NULL) - goto out_eperm; - - if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot)) - goto out_inval; - - DBG("client vm_ops=%p", kerninfo.vm_ops); - if (kerninfo.vm_ops) { - vma->vm_ops = kerninfo.vm_ops; - } else if (io_remap_pfn_range(vma, vma->vm_start, - (kerninfo.aper_base + offset) >> PAGE_SHIFT, - size, - pgprot_writecombine(vma->vm_page_prot))) { - goto out_again; - } - mutex_unlock(&(agp_fe.agp_mutex)); - return 0; - } - - if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) { - if (size != current_size) - goto out_inval; - - DBG("controller vm_ops=%p", kerninfo.vm_ops); - if (kerninfo.vm_ops) { - vma->vm_ops = kerninfo.vm_ops; - } else if (io_remap_pfn_range(vma, vma->vm_start, - kerninfo.aper_base >> PAGE_SHIFT, - size, - pgprot_writecombine(vma->vm_page_prot))) { - goto out_again; - } - mutex_unlock(&(agp_fe.agp_mutex)); - return 0; - } - -out_eperm: - mutex_unlock(&(agp_fe.agp_mutex)); - return -EPERM; - -out_inval: - mutex_unlock(&(agp_fe.agp_mutex)); - return -EINVAL; - -out_again: - mutex_unlock(&(agp_fe.agp_mutex)); - return -EAGAIN; -} - -static int agp_release(struct inode *inode, struct file *file) -{ - struct agp_file_private *priv = file->private_data; - - mutex_lock(&(agp_fe.agp_mutex)); - - DBG("priv=%p", priv); - - if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) { - struct agp_controller *controller; - - controller = agp_find_controller_by_pid(priv->my_pid); - - if (controller != NULL) { - if (controller == agp_fe.current_controller) - agp_controller_release_current(controller, priv); - agp_remove_controller(controller); - controller = NULL; - } - } - - if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) - agp_remove_client(priv->my_pid); - - agp_remove_file_private(priv); - kfree(priv); - file->private_data = NULL; - mutex_unlock(&(agp_fe.agp_mutex)); - return 0; -} - -static int agp_open(struct inode *inode, struct file *file) -{ - int minor = iminor(inode); - struct agp_file_private *priv; - struct agp_client *client; - - if (minor != AGPGART_MINOR) - return -ENXIO; - - mutex_lock(&(agp_fe.agp_mutex)); - - priv = kzalloc(sizeof(struct agp_file_private), GFP_KERNEL); - if (priv == NULL) { - mutex_unlock(&(agp_fe.agp_mutex)); - return -ENOMEM; - } - - set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags); - priv->my_pid = current->pid; - - if (capable(CAP_SYS_RAWIO)) - /* Root priv, can be controller */ - set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags); - - client = agp_find_client_by_pid(current->pid); - - if (client != NULL) { - set_bit(AGP_FF_IS_CLIENT, &priv->access_flags); - set_bit(AGP_FF_IS_VALID, &priv->access_flags); - } - file->private_data = (void *) priv; - agp_insert_file_private(priv); - DBG("private=%p, client=%p", priv, client); - - mutex_unlock(&(agp_fe.agp_mutex)); - - return 0; -} - -static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_info userinfo; - struct agp_kern_info kerninfo; - - agp_copy_info(agp_bridge, &kerninfo); - - memset(&userinfo, 0, sizeof(userinfo)); - userinfo.version.major = kerninfo.version.major; - userinfo.version.minor = kerninfo.version.minor; - userinfo.bridge_id = kerninfo.device->vendor | - (kerninfo.device->device << 16); - userinfo.agp_mode = kerninfo.mode; - userinfo.aper_base = kerninfo.aper_base; - userinfo.aper_size = kerninfo.aper_size; - userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory; - userinfo.pg_used = kerninfo.current_memory; - - if (copy_to_user(arg, &userinfo, sizeof(struct agp_info))) - return -EFAULT; - - return 0; -} - -int agpioc_acquire_wrap(struct agp_file_private *priv) -{ - struct agp_controller *controller; - - DBG(""); - - if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags))) - return -EPERM; - - if (agp_fe.current_controller != NULL) - return -EBUSY; - - if (!agp_bridge) - return -ENODEV; - - if (atomic_read(&agp_bridge->agp_in_use)) - return -EBUSY; - - atomic_inc(&agp_bridge->agp_in_use); - - agp_fe.backend_acquired = true; - - controller = agp_find_controller_by_pid(priv->my_pid); - - if (controller != NULL) { - agp_controller_make_current(controller); - } else { - controller = agp_create_controller(priv->my_pid); - - if (controller == NULL) { - agp_fe.backend_acquired = false; - agp_backend_release(agp_bridge); - return -ENOMEM; - } - agp_insert_controller(controller); - agp_controller_make_current(controller); - } - - set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags); - set_bit(AGP_FF_IS_VALID, &priv->access_flags); - return 0; -} - -int agpioc_release_wrap(struct agp_file_private *priv) -{ - DBG(""); - agp_controller_release_current(agp_fe.current_controller, priv); - return 0; -} - -int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_setup mode; - - DBG(""); - if (copy_from_user(&mode, arg, sizeof(struct agp_setup))) - return -EFAULT; - - agp_enable(agp_bridge, mode.agp_mode); - return 0; -} - -static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_region reserve; - struct agp_client *client; - struct agp_file_private *client_priv; - - DBG(""); - if (copy_from_user(&reserve, arg, sizeof(struct agp_region))) - return -EFAULT; - - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment)) - return -EFAULT; - - client = agp_find_client_by_pid(reserve.pid); - - if (reserve.seg_count == 0) { - /* remove a client */ - client_priv = agp_find_private(reserve.pid); - - if (client_priv != NULL) { - set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); - set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); - } - if (client == NULL) { - /* client is already removed */ - return 0; - } - return agp_remove_client(reserve.pid); - } else { - struct agp_segment *segment; - - if (reserve.seg_count >= 16384) - return -EINVAL; - - segment = kmalloc((sizeof(struct agp_segment) * reserve.seg_count), - GFP_KERNEL); - - if (segment == NULL) - return -ENOMEM; - - if (copy_from_user(segment, (void __user *) reserve.seg_list, - sizeof(struct agp_segment) * reserve.seg_count)) { - kfree(segment); - return -EFAULT; - } - reserve.seg_list = segment; - - if (client == NULL) { - /* Create the client and add the segment */ - client = agp_create_client(reserve.pid); - - if (client == NULL) { - kfree(segment); - return -ENOMEM; - } - client_priv = agp_find_private(reserve.pid); - - if (client_priv != NULL) { - set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); - set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); - } - } - return agp_create_segment(client, &reserve); - } - /* Will never really happen */ - return -EINVAL; -} - -int agpioc_protect_wrap(struct agp_file_private *priv) -{ - DBG(""); - /* This function is not currently implemented */ - return -EINVAL; -} - -static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_memory *memory; - struct agp_allocate alloc; - - DBG(""); - if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate))) - return -EFAULT; - - if (alloc.type >= AGP_USER_TYPES) - return -EINVAL; - - memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); - - if (memory == NULL) - return -ENOMEM; - - alloc.key = memory->key; - alloc.physical = memory->physical; - - if (copy_to_user(arg, &alloc, sizeof(struct agp_allocate))) { - agp_free_memory_wrap(memory); - return -EFAULT; - } - return 0; -} - -int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg) -{ - struct agp_memory *memory; - - DBG(""); - memory = agp_find_mem_by_key(arg); - - if (memory == NULL) - return -EINVAL; - - agp_free_memory_wrap(memory); - return 0; -} - -static int agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_bind bind_info; - struct agp_memory *memory; - - DBG(""); - if (copy_from_user(&bind_info, arg, sizeof(struct agp_bind))) - return -EFAULT; - - memory = agp_find_mem_by_key(bind_info.key); - - if (memory == NULL) - return -EINVAL; - - return agp_bind_memory(memory, bind_info.pg_start); -} - -static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_memory *memory; - struct agp_unbind unbind; - - DBG(""); - if (copy_from_user(&unbind, arg, sizeof(struct agp_unbind))) - return -EFAULT; - - memory = agp_find_mem_by_key(unbind.key); - - if (memory == NULL) - return -EINVAL; - - return agp_unbind_memory(memory); -} - -static long agp_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) -{ - struct agp_file_private *curr_priv = file->private_data; - int ret_val = -ENOTTY; - - DBG("priv=%p, cmd=%x", curr_priv, cmd); - mutex_lock(&(agp_fe.agp_mutex)); - - if ((agp_fe.current_controller == NULL) && - (cmd != AGPIOC_ACQUIRE)) { - ret_val = -EINVAL; - goto ioctl_out; - } - if ((agp_fe.backend_acquired != true) && - (cmd != AGPIOC_ACQUIRE)) { - ret_val = -EBUSY; - goto ioctl_out; - } - if (cmd != AGPIOC_ACQUIRE) { - if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) { - ret_val = -EPERM; - goto ioctl_out; - } - /* Use the original pid of the controller, - * in case it's threaded */ - - if (agp_fe.current_controller->pid != curr_priv->my_pid) { - ret_val = -EBUSY; - goto ioctl_out; - } - } - - switch (cmd) { - case AGPIOC_INFO: - ret_val = agpioc_info_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_ACQUIRE: - ret_val = agpioc_acquire_wrap(curr_priv); - break; - - case AGPIOC_RELEASE: - ret_val = agpioc_release_wrap(curr_priv); - break; - - case AGPIOC_SETUP: - ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_RESERVE: - ret_val = agpioc_reserve_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_PROTECT: - ret_val = agpioc_protect_wrap(curr_priv); - break; - - case AGPIOC_ALLOCATE: - ret_val = agpioc_allocate_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_DEALLOCATE: - ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg); - break; - - case AGPIOC_BIND: - ret_val = agpioc_bind_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_UNBIND: - ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_CHIPSET_FLUSH: - break; - } - -ioctl_out: - DBG("ioctl returns %d\n", ret_val); - mutex_unlock(&(agp_fe.agp_mutex)); - return ret_val; -} - -static const struct file_operations agp_fops = -{ - .owner = THIS_MODULE, - .llseek = no_llseek, - .unlocked_ioctl = agp_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = compat_agp_ioctl, -#endif - .mmap = agp_mmap, - .open = agp_open, - .release = agp_release, -}; - -static struct miscdevice agp_miscdev = -{ - .minor = AGPGART_MINOR, - .name = "agpgart", - .fops = &agp_fops -}; - -int agp_frontend_initialize(void) -{ - memset(&agp_fe, 0, sizeof(struct agp_front_data)); - mutex_init(&(agp_fe.agp_mutex)); - - if (misc_register(&agp_miscdev)) { - printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR); - return -EIO; - } - return 0; -} - -void agp_frontend_cleanup(void) -{ - misc_deregister(&agp_miscdev); -} -- cgit v1.2.3 From 94f8f319cbcbddce8f82bfaf8ed39eb57efdd457 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 22 Nov 2023 13:09:43 +0100 Subject: drm: Remove Kconfig option for legacy support (CONFIG_DRM_LEGACY) Remove CONFIG_DRM_LEGACY from Kconfig. Nothing depends on the option. Signed-off-by: Thomas Zimmermann Reviewed-by: David Airlie Reviewed-by: Daniel Vetter Acked-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20231122122449.11588-15-tzimmermann@suse.de --- drivers/gpu/drm/Kconfig | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index b7abd436455f..31cfe2c2a2af 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -410,27 +410,6 @@ config DRM_HYPERV If M is selected the module will be called hyperv_drm. -# Keep legacy drivers last - -menuconfig DRM_LEGACY - bool "Enable legacy drivers (DANGEROUS)" - depends on DRM && MMU - help - Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous - APIs to user-space, which can be used to circumvent access - restrictions and other security measures. For backwards compatibility - those drivers are still available, but their use is highly - inadvisable and might harm your system. - - You are recommended to use the safe modeset-only drivers instead, and - perform 3D emulation in user-space. - - Unless you have strong reasons to go rogue, say "N". - -if DRM_LEGACY -# leave here to list legacy drivers -endif # DRM_LEGACY - config DRM_EXPORT_FOR_TESTS bool -- cgit v1.2.3 From a0fce84cb1b3b88d3d5853f7ac5f1a3ef7e38620 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 4 Dec 2023 10:07:46 +0100 Subject: drm/plane-helper: Move drm_plane_helper_atomic_check() into udl The udl driver is the only caller of drm_plane_helper_atomic_check(). Move the function into the driver. No functional changes. v2: * fix documenation (Sui) Signed-off-by: Thomas Zimmermann Reviewed-by: Alex Deucher Acked-by: Sui Jingfeng Link: https://patchwork.freedesktop.org/patch/msgid/20231204090852.1650-2-tzimmermann@suse.de --- drivers/gpu/drm/drm_crtc_helper.c | 7 ++----- drivers/gpu/drm/drm_plane_helper.c | 32 -------------------------------- drivers/gpu/drm/udl/udl_modeset.c | 19 +++++++++++++++++-- include/drm/drm_plane_helper.h | 2 -- 4 files changed, 19 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index a209659a996c..2dafc39a27cb 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -439,11 +439,8 @@ EXPORT_SYMBOL(drm_crtc_helper_set_mode); * @state: atomic state object * * Provides a default CRTC-state check handler for CRTCs that only have - * one primary plane attached to it. - * - * This is often the case for the CRTC of simple framebuffers. See also - * drm_plane_helper_atomic_check() for the respective plane-state check - * helper function. + * one primary plane attached to it. This is often the case for the CRTC + * of simple framebuffers. * * RETURNS: * Zero on success, or an errno code otherwise. diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 5e95089676ff..7982be4b0306 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -279,35 +279,3 @@ void drm_plane_helper_destroy(struct drm_plane *plane) kfree(plane); } EXPORT_SYMBOL(drm_plane_helper_destroy); - -/** - * drm_plane_helper_atomic_check() - Helper to check plane atomic-state - * @plane: plane to check - * @state: atomic state object - * - * Provides a default plane-state check handler for planes whose atomic-state - * scale and positioning are not expected to change since the plane is always - * a fullscreen scanout buffer. - * - * This is often the case for the primary plane of simple framebuffers. See - * also drm_crtc_helper_atomic_check() for the respective CRTC-state check - * helper function. - * - * RETURNS: - * Zero on success, or an errno code otherwise. - */ -int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) -{ - struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); - struct drm_crtc *new_crtc = new_plane_state->crtc; - struct drm_crtc_state *new_crtc_state = NULL; - - if (new_crtc) - new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); - - return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, - DRM_PLANE_NO_SCALING, - DRM_PLANE_NO_SCALING, - false, false); -} -EXPORT_SYMBOL(drm_plane_helper_atomic_check); diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index 40876bcdd79a..7702359c90c2 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include @@ -261,6 +260,22 @@ static const uint64_t udl_primary_plane_fmtmods[] = { DRM_FORMAT_MOD_INVALID }; +static int udl_primary_plane_helper_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_crtc *new_crtc = new_plane_state->crtc; + struct drm_crtc_state *new_crtc_state = NULL; + + if (new_crtc) + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); + + return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + false, false); +} + static void udl_primary_plane_helper_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { @@ -296,7 +311,7 @@ out_drm_gem_fb_end_cpu_access: static const struct drm_plane_helper_funcs udl_primary_plane_helper_funcs = { DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, - .atomic_check = drm_plane_helper_atomic_check, + .atomic_check = udl_primary_plane_helper_atomic_check, .atomic_update = udl_primary_plane_helper_atomic_update, }; diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h index 3a574e8cd22f..75f9c4830564 100644 --- a/include/drm/drm_plane_helper.h +++ b/include/drm/drm_plane_helper.h @@ -26,7 +26,6 @@ #include -struct drm_atomic_state; struct drm_crtc; struct drm_framebuffer; struct drm_modeset_acquire_ctx; @@ -42,7 +41,6 @@ int drm_plane_helper_update_primary(struct drm_plane *plane, struct drm_crtc *cr int drm_plane_helper_disable_primary(struct drm_plane *plane, struct drm_modeset_acquire_ctx *ctx); void drm_plane_helper_destroy(struct drm_plane *plane); -int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state); /** * DRM_PLANE_NON_ATOMIC_FUNCS - Default plane functions for non-atomic drivers -- cgit v1.2.3 From bb8532601260209d1ee40c52d15e98578b703e47 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 4 Dec 2023 10:07:47 +0100 Subject: drm/amdgpu: Do not include Remove unnecessary include statements for . The file contains helpers for non-atomic code and should not be required by most drivers. No functional changes. Signed-off-by: Thomas Zimmermann Reviewed-by: Alex Deucher Reviewed-by: Harry Wentland Link: https://patchwork.freedesktop.org/patch/msgid/20231204090852.1650-3-tzimmermann@suse.de --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index aa43f1761acd..b8c3a9b104a4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -92,7 +92,6 @@ #include #include #include -#include #include -- cgit v1.2.3 From 85ddae2392b5673aa4bda3c7d14d205d1ed069fe Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 4 Dec 2023 10:07:48 +0100 Subject: drm/loongson: Do not include Remove unnecessary include statements for . The file contains helpers for non-atomic code and should not be required by most drivers. No functional changes. Signed-off-by: Thomas Zimmermann Reviewed-by: Sui Jingfeng Reviewed-by: Alex Deucher Tested-by: Sui Jingfeng Link: https://patchwork.freedesktop.org/patch/msgid/20231204090852.1650-4-tzimmermann@suse.de --- drivers/gpu/drm/loongson/lsdc_plane.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/loongson/lsdc_plane.c b/drivers/gpu/drm/loongson/lsdc_plane.c index 0d5094633222..d227a2c1dcf1 100644 --- a/drivers/gpu/drm/loongson/lsdc_plane.c +++ b/drivers/gpu/drm/loongson/lsdc_plane.c @@ -9,7 +9,6 @@ #include #include #include -#include #include "lsdc_drv.h" #include "lsdc_regs.h" -- cgit v1.2.3 From 2887875256d486c0cbb544e67932526bd681e209 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 4 Dec 2023 10:07:49 +0100 Subject: drm/shmobile: Do not include Remove unnecessary include statements for . The file contains helpers for non-atomic code and should not be required by most drivers. No functional changes. Signed-off-by: Thomas Zimmermann Reviewed-by: Geert Uytterhoeven Reviewed-by: Alex Deucher Acked-by: Geert Uytterhoeven Link: https://patchwork.freedesktop.org/patch/msgid/20231204090852.1650-5-tzimmermann@suse.de --- drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c index 8f9a728affde..07ad17d24294 100644 --- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c +++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c @@ -14,7 +14,6 @@ #include #include #include -#include #include "shmob_drm_drv.h" #include "shmob_drm_kms.h" -- cgit v1.2.3 From 9e8f373e8a77c5192532bab6ea267b329fe66b77 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 4 Dec 2023 10:07:50 +0100 Subject: drm/solomon: Do not include Remove unnecessary include statements for . The file contains helpers for non-atomic code and should not be required by most drivers. No functional changes. Signed-off-by: Thomas Zimmermann Reviewed-by: Geert Uytterhoeven Reviewed-by: Javier Martinez Canillas Reviewed-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20231204090852.1650-6-tzimmermann@suse.de --- drivers/gpu/drm/solomon/ssd130x.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h index acf7cedf0c1a..075c5c3ee75a 100644 --- a/drivers/gpu/drm/solomon/ssd130x.h +++ b/drivers/gpu/drm/solomon/ssd130x.h @@ -17,7 +17,6 @@ #include #include #include -#include #include -- cgit v1.2.3 From 7e661a06998e06455563b2ff6198c7f3efe31cf2 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 4 Dec 2023 10:07:51 +0100 Subject: drm/ofdrm: Do not include Remove unnecessary include statements for . The file contains helpers for non-atomic code and should not be required by most drivers. No functional changes. Signed-off-by: Thomas Zimmermann Reviewed-by: Sui Jingfeng Reviewed-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20231204090852.1650-7-tzimmermann@suse.de --- drivers/gpu/drm/tiny/ofdrm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c index 05a72473cfc6..ab89b7fc7bf6 100644 --- a/drivers/gpu/drm/tiny/ofdrm.c +++ b/drivers/gpu/drm/tiny/ofdrm.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include -- cgit v1.2.3 From 81b32f4393cde612e022ff35b556b28001350d3b Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 4 Dec 2023 10:07:52 +0100 Subject: drm/simpledrm: Do not include Remove unnecessary include statements for . The file contains helpers for non-atomic code and should not be required by most drivers. No functional changes. Signed-off-by: Thomas Zimmermann Reviewed-by: Sui Jingfeng Reviewed-by: Javier Martinez Canillas Reviewed-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20231204090852.1650-8-tzimmermann@suse.de --- drivers/gpu/drm/tiny/simpledrm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index 34bbbd7b53dd..7ce1c4617675 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #define DRIVER_NAME "simpledrm" -- cgit v1.2.3 From aa5d7cf88bdebe5376970d79878f16d28536f7e2 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 4 Dec 2023 10:07:53 +0100 Subject: drm/xlnx: Do not include Remove unnecessary include statements for . The file contains helpers for non-atomic code and should not be required by most drivers. No functional changes. Signed-off-by: Thomas Zimmermann Reviewed-by: Laurent Pinchart Reviewed-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20231204090852.1650-9-tzimmermann@suse.de --- drivers/gpu/drm/xlnx/zynqmp_kms.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c index a7f8611be6f4..db3bb4afbfc4 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_kms.c +++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From e9d5ae8a9e7e32d0b1bc582996de4f7180cb2ff2 Mon Sep 17 00:00:00 2001 From: Donald Robson Date: Mon, 4 Dec 2023 15:13:37 +0000 Subject: drm/imagination: Removed unused functions in pvr_fw_trace Fixing the warning below due to an unused file level vtable. Removing only this causes additional warnings for the now unused functions, so I've removed those too. >> drivers/gpu/drm/imagination/pvr_fw_trace.c:205:37: warning: 'pvr_fw_trace_group_mask_fops' defined but not used [-Wunused-const-variable=] 205 | static const struct file_operations pvr_fw_trace_group_mask_fops = { | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ Changes since v1: - Corrected hash in Fixes tag. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202311302054.MVYPxFCE-lkp@intel.com/ Fixes: cb56cd610866 ("drm/imagination: Add firmware trace to debugfs") Signed-off-by: Donald Robson Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20231204151337.60930-1-donald.robson@imgtec.com --- drivers/gpu/drm/imagination/pvr_fw_trace.c | 44 ------------------------------ 1 file changed, 44 deletions(-) diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c index 87a42fb6ace6..30f41a10a0cf 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_trace.c +++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c @@ -167,50 +167,6 @@ err_up_read: #if defined(CONFIG_DEBUG_FS) -static int fw_trace_group_mask_show(struct seq_file *m, void *data) -{ - struct pvr_device *pvr_dev = m->private; - - seq_printf(m, "%08x\n", pvr_dev->fw_dev.fw_trace.group_mask); - - return 0; -} - -static int fw_trace_group_mask_open(struct inode *inode, struct file *file) -{ - return single_open(file, fw_trace_group_mask_show, inode->i_private); -} - -static ssize_t fw_trace_group_mask_write(struct file *file, const char __user *ubuf, size_t len, - loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct pvr_device *pvr_dev = m->private; - u32 new_group_mask; - int err; - - err = kstrtouint_from_user(ubuf, len, 0, &new_group_mask); - if (err) - return err; - - err = update_logtype(pvr_dev, new_group_mask); - if (err) - return err; - - pvr_dev->fw_dev.fw_trace.group_mask = new_group_mask; - - return (ssize_t)len; -} - -static const struct file_operations pvr_fw_trace_group_mask_fops = { - .owner = THIS_MODULE, - .open = fw_trace_group_mask_open, - .read = seq_read, - .write = fw_trace_group_mask_write, - .llseek = default_llseek, - .release = single_release, -}; - struct pvr_fw_trace_seq_data { /** @buffer: Pointer to copy of trace data. */ u32 *buffer; -- cgit v1.2.3 From 4b83b783ad778f7e69312fa61d1bee8e76e2156f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 4 Dec 2023 08:32:10 +0100 Subject: drm/imagination: move update_logtype() into ifdef section This function is only used when debugfs is enabled, and otherwise causes a build warning: drivers/gpu/drm/imagination/pvr_fw_trace.c:135:1: error: 'update_logtype' defined but not used [-Werror=unused-function] Move the #ifdef check to include this function as well. Fixes: cb56cd610866 ("drm/imagination: Add firmware trace to debugfs") Acked-by: Frank Binns Signed-off-by: Arnd Bergmann Link: https://lore.kernel.org/r/20231204073231.1164163-1-arnd@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/imagination/pvr_fw_trace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c index 30f41a10a0cf..7159fc479001 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_trace.c +++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c @@ -121,6 +121,8 @@ void pvr_fw_trace_fini(struct pvr_device *pvr_dev) pvr_fw_object_unmap_and_destroy(fw_trace->tracebuf_ctrl_obj); } +#if defined(CONFIG_DEBUG_FS) + /** * update_logtype() - Send KCCB command to trigger FW to update logtype * @pvr_dev: Target PowerVR device @@ -165,8 +167,6 @@ err_up_read: return err; } -#if defined(CONFIG_DEBUG_FS) - struct pvr_fw_trace_seq_data { /** @buffer: Pointer to copy of trace data. */ u32 *buffer; -- cgit v1.2.3 From 28d3d0696688154cc04983f343011d07bf0508e4 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 6 Dec 2023 18:05:15 +0300 Subject: drm/bridge: nxp-ptn3460: simplify some error checking The i2c_master_send/recv() functions return negative error codes or they return "len" on success. So the error handling here can be written as just normal checks for "if (ret < 0) return ret;". No need to complicate things. Btw, in this code the "len" parameter can never be zero, but even if it were, then I feel like this would still be the best way to write it. Fixes: 914437992876 ("drm/bridge: nxp-ptn3460: fix i2c_master_send() error checking") Suggested-by: Neil Armstrong Signed-off-by: Dan Carpenter Reviewed-by: Robert Foss Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/04242630-42d8-4920-8c67-24ac9db6b3c9@moroto.mountain --- drivers/gpu/drm/bridge/nxp-ptn3460.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c index 9b7eb8c669c1..7c0076e49953 100644 --- a/drivers/gpu/drm/bridge/nxp-ptn3460.c +++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c @@ -54,15 +54,15 @@ static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr, int ret; ret = i2c_master_send(ptn_bridge->client, &addr, 1); - if (ret <= 0) { + if (ret < 0) { DRM_ERROR("Failed to send i2c command, ret=%d\n", ret); - return ret ?: -EIO; + return ret; } ret = i2c_master_recv(ptn_bridge->client, buf, len); - if (ret != len) { + if (ret < 0) { DRM_ERROR("Failed to recv i2c data, ret=%d\n", ret); - return ret < 0 ? ret : -EIO; + return ret; } return 0; @@ -78,9 +78,9 @@ static int ptn3460_write_byte(struct ptn3460_bridge *ptn_bridge, char addr, buf[1] = val; ret = i2c_master_send(ptn_bridge->client, buf, ARRAY_SIZE(buf)); - if (ret != ARRAY_SIZE(buf)) { + if (ret < 0) { DRM_ERROR("Failed to send i2c command, ret=%d\n", ret); - return ret < 0 ? ret : -EIO; + return ret; } return 0; -- cgit v1.2.3 From 1d3062fad9c7313fff9970a88e0538a24480ffb8 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Fri, 3 Nov 2023 15:14:03 +0200 Subject: drm/drm_file: fix use of uninitialized variable smatch reports: drivers/gpu/drm/drm_file.c:967 drm_show_memory_stats() error: uninitialized symbol 'supported_status'. 'supported_status' is only set in one code path. I'm not familiar with the code to say if that path will always be ran in real life, but whether that is the case or not, I think it is good to initialize 'supported_status' to 0 to silence the warning (and possibly fix a bug). Reviewed-by: Laurent Pinchart Acked-by: Maxime Ripard Signed-off-by: Tomi Valkeinen Link: https://patchwork.freedesktop.org/patch/msgid/20231103-uninit-fixes-v2-1-c22b2444f5f5@ideasonboard.com --- drivers/gpu/drm/drm_file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 987c1a5d1773..8c87287c3e16 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -898,7 +898,7 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file) { struct drm_gem_object *obj; struct drm_memory_stats status = {}; - enum drm_gem_object_status supported_status; + enum drm_gem_object_status supported_status = 0; int id; spin_lock(&file->table_lock); -- cgit v1.2.3 From f9af8f0c1dc567a5a6a6318ff324c45d80d4a60f Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Fri, 3 Nov 2023 15:14:04 +0200 Subject: drm/framebuffer: Fix use of uninitialized variable smatch reports: drivers/gpu/drm/drm_framebuffer.c:654 drm_mode_getfb2_ioctl() error: uninitialized symbol 'ret'. 'ret' is possibly not set when there are no errors, causing the error above. I can't say if that ever happens in real-life, but in any case I think it is good to initialize 'ret' to 0. Reviewed-by: Laurent Pinchart Acked-by: Maxime Ripard Signed-off-by: Tomi Valkeinen Link: https://patchwork.freedesktop.org/patch/msgid/20231103-uninit-fixes-v2-2-c22b2444f5f5@ideasonboard.com --- drivers/gpu/drm/drm_framebuffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 09e289fca5c3..3cc0ffc28e86 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -583,7 +583,7 @@ int drm_mode_getfb2_ioctl(struct drm_device *dev, struct drm_mode_fb_cmd2 *r = data; struct drm_framebuffer *fb; unsigned int i; - int ret; + int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; -- cgit v1.2.3 From 155d6fb61270dd297f128731cd155080deee8f3a Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Fri, 3 Nov 2023 15:14:05 +0200 Subject: drm/bridge: cdns-mhdp8546: Fix use of uninitialized variable 'ret' could be uninitialized at the end of the function, although it's not clear if that can happen in practice. Fixes: 6a3608eae6d3 ("drm: bridge: cdns-mhdp8546: Enable HDCP") Acked-by: Maxime Ripard Signed-off-by: Tomi Valkeinen Link: https://patchwork.freedesktop.org/patch/msgid/20231103-uninit-fixes-v2-3-c22b2444f5f5@ideasonboard.com --- drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c index 946212a95598..5e3b8edcf794 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c @@ -403,7 +403,8 @@ static int _cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp) static int _cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type) { - int ret, tries = 3; + int ret = -EINVAL; + int tries = 3; u32 i; for (i = 0; i < tries; i++) { -- cgit v1.2.3 From 32bd29b619638256c5b75fb021d6d9f12fc4a984 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Fri, 3 Nov 2023 15:14:06 +0200 Subject: drm/bridge: tc358767: Fix return value on error case If the hpd_pin is invalid, the driver returns 'ret'. But 'ret' contains 0, instead of an error value. Return -EINVAL instead. Fixes: f25ee5017e4f ("drm/bridge: tc358767: add IRQ and HPD support") Acked-by: Maxime Ripard Signed-off-by: Tomi Valkeinen Link: https://patchwork.freedesktop.org/patch/msgid/20231103-uninit-fixes-v2-4-c22b2444f5f5@ideasonboard.com --- drivers/gpu/drm/bridge/tc358767.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index ef2e373606ba..615cc8f950d7 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -2273,7 +2273,7 @@ static int tc_probe(struct i2c_client *client) } else { if (tc->hpd_pin < 0 || tc->hpd_pin > 1) { dev_err(dev, "failed to parse HPD number\n"); - return ret; + return -EINVAL; } } -- cgit v1.2.3 From 90d50b8d85834e73536fdccd5aa913b30494fef0 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Thu, 21 Sep 2023 13:50:32 +0300 Subject: drm/mipi-dsi: Fix detach call without attach It's been reported that DSI host driver's detach can be called without the attach ever happening: https://lore.kernel.org/all/20230412073954.20601-1-tony@atomide.com/ After reading the code, I think this is what happens: We have a DSI host defined in the device tree and a DSI peripheral under that host (i.e. an i2c device using the DSI as data bus doesn't exhibit this behavior). The host driver calls mipi_dsi_host_register(), which causes (via a few functions) mipi_dsi_device_add() to be called for the DSI peripheral. So now we have a DSI device under the host, but attach hasn't been called. Normally the probing of the devices continues, and eventually the DSI peripheral's driver will call mipi_dsi_attach(), attaching the peripheral. However, if the host driver's probe encounters an error after calling mipi_dsi_host_register(), and before the peripheral has called mipi_dsi_attach(), the host driver will do cleanups and return an error from its probe function. The cleanups include calling mipi_dsi_host_unregister(). mipi_dsi_host_unregister() will call two functions for all its DSI peripheral devices: mipi_dsi_detach() and mipi_dsi_device_unregister(). The latter makes sense, as the device exists, but the former may be wrong as attach has not necessarily been done. To fix this, track the attached state of the peripheral, and only detach from mipi_dsi_host_unregister() if the peripheral was attached. Note that I have only tested this with a board with an i2c DSI peripheral, not with a "pure" DSI peripheral. However, slightly related, the unregister machinery still seems broken. E.g. if the DSI host driver is unbound, it'll detach and unregister the DSI peripherals. After that, when the DSI peripheral driver unbound it'll call detach either directly or using the devm variant, leading to a crash. And probably the driver will crash if it happens, for some reason, to try to send a message via the DSI bus. But that's another topic. Tested-by: H. Nikolaus Schaller Acked-by: Maxime Ripard Reviewed-by: Sebastian Reichel Tested-by: Tony Lindgren Signed-off-by: Tomi Valkeinen Link: https://patchwork.freedesktop.org/patch/msgid/20230921-dsi-detach-fix-v1-1-d0de2d1621d9@ideasonboard.com --- drivers/gpu/drm/drm_mipi_dsi.c | 17 +++++++++++++++-- include/drm/drm_mipi_dsi.h | 2 ++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 14201f73aab1..843a6dbda93a 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -347,7 +347,8 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); - mipi_dsi_detach(dsi); + if (dsi->attached) + mipi_dsi_detach(dsi); mipi_dsi_device_unregister(dsi); return 0; @@ -370,11 +371,18 @@ EXPORT_SYMBOL(mipi_dsi_host_unregister); int mipi_dsi_attach(struct mipi_dsi_device *dsi) { const struct mipi_dsi_host_ops *ops = dsi->host->ops; + int ret; if (!ops || !ops->attach) return -ENOSYS; - return ops->attach(dsi->host, dsi); + ret = ops->attach(dsi->host, dsi); + if (ret) + return ret; + + dsi->attached = true; + + return 0; } EXPORT_SYMBOL(mipi_dsi_attach); @@ -386,9 +394,14 @@ int mipi_dsi_detach(struct mipi_dsi_device *dsi) { const struct mipi_dsi_host_ops *ops = dsi->host->ops; + if (WARN_ON(!dsi->attached)) + return -EINVAL; + if (!ops || !ops->detach) return -ENOSYS; + dsi->attached = false; + return ops->detach(dsi->host, dsi); } EXPORT_SYMBOL(mipi_dsi_detach); diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index c9df0407980c..c0aec0d4d664 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -168,6 +168,7 @@ struct mipi_dsi_device_info { * struct mipi_dsi_device - DSI peripheral device * @host: DSI host for this peripheral * @dev: driver model device node for this peripheral + * @attached: the DSI device has been successfully attached * @name: DSI peripheral chip type * @channel: virtual channel assigned to the peripheral * @format: pixel format for video mode @@ -184,6 +185,7 @@ struct mipi_dsi_device_info { struct mipi_dsi_device { struct mipi_dsi_host *host; struct device dev; + bool attached; char name[DSI_DEV_NAME_SIZE]; unsigned int channel; -- cgit v1.2.3