summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/virtio/virtgpu_vq.c
diff options
context:
space:
mode:
authorGurchetan Singh <gurchetansingh@chromium.org>2020-09-24 03:32:11 +0300
committerGerd Hoffmann <kraxel@redhat.com>2020-09-29 12:23:47 +0300
commit50c3d1938ee380e2d24714ea7058d2d3fc2662de (patch)
treefee94a6e49bea5615c95380fa4207a1e98907d0f /drivers/gpu/drm/virtio/virtgpu_vq.c
parent1e2554f49e41d3f6509c6a633e5e4dd48d93ffb9 (diff)
downloadlinux-50c3d1938ee380e2d24714ea7058d2d3fc2662de.tar.xz
drm/virtio: implement blob resources: fix stride discrepancy
The old transfer ioctls may work on blob resources, and there is no TRANSFER_BLOB hypercall now for simplicity. The guest may have a image view on the blob resources such that the stride is not equal to width * bytes_per_pixel. For host-only blobs, we can repurpose the transfer ioctls to synchronize caches as well. For guest-only blobs, these operations are undefined for now so leave them out. Also, with seamless Wayland integration between guest/host looking increasingly attractive, it also makes sense to keep track of one value for stride. Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org> Acked-by: Tomeu Vizoso <tomeu.vizoso@collabora.com> Link: http://patchwork.freedesktop.org/patch/msgid/20200924003214.662-16-gurchetansingh@chromium.org Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_vq.c')
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index e71c8eec0b91..6434b9fb38a6 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -1017,6 +1017,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
+ uint32_t stride,
+ uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
@@ -1025,12 +1027,14 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
- if (use_dma_api)
+ if (virtio_gpu_is_shmem(bo) && use_dma_api) {
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
+
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE);
+ }
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
@@ -1043,6 +1047,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
+ cmd_p->stride = cpu_to_le32(stride);
+ cmd_p->layer_stride = cpu_to_le32(layer_stride);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
@@ -1050,6 +1056,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
+ uint32_t stride,
+ uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
@@ -1069,6 +1077,8 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
+ cmd_p->stride = cpu_to_le32(stride);
+ cmd_p->layer_stride = cpu_to_le32(layer_stride);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}