summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/virtio
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-08-28 05:37:46 +0300
committerDave Airlie <airlied@redhat.com>2020-08-28 05:38:06 +0300
commitcbc2e82932ae01d3e8e81dd17b9e8ef8564c606e (patch)
tree47d0f6e23d74a5b68247f15f0f465c450c2c0400 /drivers/gpu/drm/virtio
parentd012a7190fc1fd72ed48911e77ca97ba4521bccd (diff)
parentcd6da0b113512b15a4d35f355f9ecd8858297369 (diff)
downloadlinux-cbc2e82932ae01d3e8e81dd17b9e8ef8564c606e.tar.xz
Merge tag 'drm-misc-next-2020-08-27' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.10: UAPI Changes: Cross-subsystem Changes: Core Changes: - ttm: various cleanups and reworks of the API Driver Changes: - ast: various cleanups - gma500: A few fixes, conversion to GPIOd API - hisilicon: Change of maintainer, various reworks - ingenic: Clock handling and formats support improvements - mcde: improvements to the DSI support - mgag200: Support G200 desktop cards - mxsfb: Support the i.MX7 and i.MX8M and the alpha plane - panfrost: support devfreq - ps8640: Retrieve the EDID from eDP control, misc improvements - tidss: Add a workaround for AM65xx YUV formats handling - virtio: a few cleanups, support for virtio-gpu exported resources - bridges: Support the chained bridges on more drivers, new bridges: Toshiba TC358762, Toshiba TC358775, Lontium LT9611 - panels: Convert to dev_ based logging, read orientation from the DT, various fixes, new panels: Mantix MLAF057WE51-X, Chefree CH101OLHLWH-002, Powertip PH800480T013, KingDisplay KD116N21-30NV-A010 Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maxime Ripard <maxime@cerno.tech> Link: https://patchwork.freedesktop.org/patch/msgid/20200827155517.do6emeacetpturli@gilmour.lan
Diffstat (limited to 'drivers/gpu/drm/virtio')
-rw-r--r--drivers/gpu/drm/virtio/Kconfig1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h21
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c96
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c55
9 files changed, 185 insertions, 8 deletions
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index eff3047052d4..67624423013a 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -4,6 +4,7 @@ config DRM_VIRTIO_GPU
depends on DRM && VIRTIO && MMU
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
+ select VIRTIO_DMA_SHARED_BUFFER
help
This is the virtual GPU driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen).
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index af55b334be2f..2c2742b8d657 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -165,8 +165,6 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
if (width == 0 || height == 0) {
- width = XRES_DEF;
- height = YRES_DEF;
drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
} else {
DRM_DEBUG("add mode: %dx%d\n", width, height);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index ab4bed78e656..b039f493bda9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -165,6 +165,7 @@ static unsigned int features[] = {
VIRTIO_GPU_F_VIRGL,
#endif
VIRTIO_GPU_F_EDID,
+ VIRTIO_GPU_F_RESOURCE_UUID,
};
static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
@@ -202,6 +203,8 @@ static struct drm_driver driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_mmap = drm_gem_prime_mmap,
+ .gem_prime_export = virtgpu_gem_prime_export,
+ .gem_prime_import = virtgpu_gem_prime_import,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
.gem_create_object = virtio_gpu_create_object,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 9ff9f4ac0522..9a8bfbf85412 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -49,6 +49,10 @@
#define DRIVER_MINOR 1
#define DRIVER_PATCHLEVEL 0
+#define UUID_INITIALIZING 0
+#define UUID_INITIALIZED 1
+#define UUID_INITIALIZATION_FAILED 2
+
struct virtio_gpu_object_params {
uint32_t format;
uint32_t width;
@@ -71,6 +75,9 @@ struct virtio_gpu_object {
uint32_t hw_res_handle;
bool dumb;
bool created;
+
+ int uuid_state;
+ uuid_t uuid;
};
#define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, base.base)
@@ -200,6 +207,7 @@ struct virtio_gpu_device {
bool has_virgl_3d;
bool has_edid;
bool has_indirect;
+ bool has_resource_assign_uuid;
struct work_struct config_changed_work;
@@ -210,6 +218,9 @@ struct virtio_gpu_device {
struct virtio_gpu_drv_capset *capsets;
uint32_t num_capsets;
struct list_head cap_cache;
+
+ /* protects resource state when exporting */
+ spinlock_t resource_export_lock;
};
struct virtio_gpu_fpriv {
@@ -336,6 +347,10 @@ void virtio_gpu_dequeue_fence_func(struct work_struct *work);
void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
+int
+virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs);
+
/* virtgpu_display.c */
void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
@@ -367,6 +382,12 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
/* virtgpu_prime.c */
+struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
+ int flags);
+struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *buf);
+int virtgpu_gem_prime_get_uuid(struct drm_gem_object *obj,
+ uuid_t *uuid);
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 24ffacac99e4..c30c75ee83fc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -154,9 +154,8 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
{
struct virtio_gpu_object_array *objs;
- size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents;
- objs = kmalloc(size, GFP_KERNEL);
+ objs = kmalloc(struct_size(objs, objs, nents), GFP_KERNEL);
if (!objs)
return NULL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 4d944a0dff3e..75d0dc2f6d28 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -118,6 +118,7 @@ int virtio_gpu_init(struct drm_device *dev)
vgdev->dev = dev->dev;
spin_lock_init(&vgdev->display_info_lock);
+ spin_lock_init(&vgdev->resource_export_lock);
ida_init(&vgdev->ctx_id_ida);
ida_init(&vgdev->resource_ida);
init_waitqueue_head(&vgdev->resp_wq);
@@ -146,6 +147,9 @@ int virtio_gpu_init(struct drm_device *dev)
if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
vgdev->has_indirect = true;
}
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
+ vgdev->has_resource_assign_uuid = true;
+ }
DRM_INFO("features: %cvirgl %cedid\n",
vgdev->has_virgl_3d ? '+' : '-',
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index e83651b7747d..842f8b61aa89 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -151,7 +151,13 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
if (ret < 0)
return -EINVAL;
- shmem->pages = drm_gem_shmem_get_pages_sgt(&bo->base.base);
+ /*
+ * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
+ * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
+ * dma-ops. This is discouraged for other drivers, but should be fine
+ * since virtio_gpu doesn't support dma-buf import from other devices.
+ */
+ shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
if (!shmem->pages) {
drm_gem_shmem_unpin(&bo->base.base);
return -EINVAL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 050d24c39a8f..acd14ef73d56 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -23,12 +23,102 @@
*/
#include <drm/drm_prime.h>
+#include <linux/virtio_dma_buf.h>
#include "virtgpu_drv.h"
-/* Empty Implementations as there should not be any other driver for a virtual
- * device that might share buffers with virtgpu
- */
+static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
+ uuid_t *uuid)
+{
+ struct drm_gem_object *obj = buf->priv;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+
+ wait_event(vgdev->resp_wq, bo->uuid_state != UUID_INITIALIZING);
+ if (bo->uuid_state != UUID_INITIALIZED)
+ return -ENODEV;
+
+ uuid_copy(uuid, &bo->uuid);
+
+ return 0;
+}
+
+const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
+ .ops = {
+ .cache_sgt_mapping = true,
+ .attach = virtio_dma_buf_attach,
+ .detach = drm_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+ },
+ .device_attach = drm_gem_map_attach,
+ .get_uuid = virtgpu_virtio_get_uuid,
+};
+
+struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
+ int flags)
+{
+ struct dma_buf *buf;
+ struct drm_device *dev = obj->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_object_array *objs;
+ int ret = 0;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ if (vgdev->has_resource_assign_uuid) {
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return ERR_PTR(-ENOMEM);
+ virtio_gpu_array_add_obj(objs, &bo->base.base);
+
+ ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
+ if (ret)
+ return ERR_PTR(ret);
+ virtio_gpu_notify(vgdev);
+ } else {
+ bo->uuid_state = UUID_INITIALIZATION_FAILED;
+ }
+
+ exp_info.ops = &virtgpu_dmabuf_ops.ops;
+ exp_info.size = obj->size;
+ exp_info.flags = flags;
+ exp_info.priv = obj;
+ exp_info.resv = obj->resv;
+
+ buf = virtio_dma_buf_export(&exp_info);
+ if (IS_ERR(buf))
+ return buf;
+
+ drm_dev_get(dev);
+ drm_gem_object_get(obj);
+
+ return buf;
+}
+
+struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *buf)
+{
+ struct drm_gem_object *obj;
+
+ if (buf->ops == &virtgpu_dmabuf_ops.ops) {
+ obj = buf->priv;
+ if (obj->dev == dev) {
+ /*
+ * Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_get(obj);
+ return obj;
+ }
+ }
+
+ return drm_gem_prime_import(dev, buf);
+}
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 53af60d484a4..c93c2db35aaf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -1107,3 +1107,58 @@ void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
memcpy(cur_p, &output->cursor, sizeof(output->cursor));
virtio_gpu_queue_cursor(vgdev, vbuf);
}
+
+static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtio_gpu_object *obj =
+ gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
+ struct virtio_gpu_resp_resource_uuid *resp =
+ (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
+ uint32_t resp_type = le32_to_cpu(resp->hdr.type);
+
+ spin_lock(&vgdev->resource_export_lock);
+ WARN_ON(obj->uuid_state != UUID_INITIALIZING);
+
+ if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
+ obj->uuid_state == UUID_INITIALIZING) {
+ memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b));
+ obj->uuid_state = UUID_INITIALIZED;
+ } else {
+ obj->uuid_state = UUID_INITIALIZATION_FAILED;
+ }
+ spin_unlock(&vgdev->resource_export_lock);
+
+ wake_up_all(&vgdev->resp_wq);
+}
+
+int
+virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ struct virtio_gpu_resource_assign_uuid *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ struct virtio_gpu_resp_resource_uuid *resp_buf;
+
+ resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
+ if (!resp_buf) {
+ spin_lock(&vgdev->resource_export_lock);
+ bo->uuid_state = UUID_INITIALIZATION_FAILED;
+ spin_unlock(&vgdev->resource_export_lock);
+ virtio_gpu_array_put_free(objs);
+ return -ENOMEM;
+ }
+
+ cmd_p = virtio_gpu_alloc_cmd_resp
+ (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
+ sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+
+ vbuf->objs = objs;
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ return 0;
+}