summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/udl
diff options
context:
space:
mode:
authorThomas Zimmermann <tzimmermann@suse.de>2022-10-06 12:53:49 +0300
committerThomas Zimmermann <tzimmermann@suse.de>2022-10-07 14:46:37 +0300
commitfcc21447c79816b40feddfc707006e9c72f3445e (patch)
tree380033fb69c33f37477da9f0ba7259854f3cdef8 /drivers/gpu/drm/udl
parentca2bd373eb6632d5c37323755030fea6364937d0 (diff)
downloadlinux-fcc21447c79816b40feddfc707006e9c72f3445e.tar.xz
drm/udl: Use damage iterator
Use a damage iterator to process damage areas individually. Merging damage areas can result in large updates of unchanged framebuffer regions. As USB is rather slow, it's better to process damage areas individually and hence minimize USB-transfered data. As part of the change, move drm_gem_fb_{begin,end}_cpu_access() into the plane's atomic_update helper. To avoid overhead and intermediate writers, we want to synchronize buffers and reserve access only once before copying damage areas of the framebuffer. v2: * clarify commit message (Javier) Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de> Reviewed-by: Javier Martinez Canillas <javierm@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20221006095355.23579-11-tzimmermann@suse.de
Diffstat (limited to 'drivers/gpu/drm/udl')
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c42
1 files changed, 21 insertions, 21 deletions
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 34c7fb6ecfe9..aee4fe2b5b08 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -238,15 +238,9 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
return ret;
log_bpp = ret;
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return ret;
-
urb = udl_get_urb(dev);
- if (!urb) {
- ret = -ENOMEM;
- goto out_drm_gem_fb_end_cpu_access;
- }
+ if (!urb)
+ return -ENOMEM;
cmd = urb->transfer_buffer;
for (i = clip->y1; i < clip->y2; i++) {
@@ -258,7 +252,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
&cmd, byte_offset, dev_byte_offset,
byte_width);
if (ret)
- goto out_drm_gem_fb_end_cpu_access;
+ return ret;
}
if (cmd > (char *)urb->transfer_buffer) {
@@ -272,11 +266,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
udl_urb_completion(urb);
}
- ret = 0;
-
-out_drm_gem_fb_end_cpu_access:
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
- return ret;
+ return 0;
}
/*
@@ -301,19 +291,29 @@ static void udl_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct drm_rect rect;
- int idx;
-
- if (!drm_dev_enter(dev, &idx))
- return;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+ int ret, idx;
if (!fb)
return; /* no framebuffer; plane is disabled */
- if (drm_atomic_helper_damage_merged(old_plane_state, plane_state, &rect))
- udl_handle_damage(fb, &shadow_plane_state->data[0], &rect);
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ return;
+
+ if (!drm_dev_enter(dev, &idx))
+ goto out_drm_gem_fb_end_cpu_access;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ udl_handle_damage(fb, &shadow_plane_state->data[0], &damage);
+ }
drm_dev_exit(idx);
+
+out_drm_gem_fb_end_cpu_access:
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
}
static const struct drm_plane_helper_funcs udl_primary_plane_helper_funcs = {