summaryrefslogtreecommitdiff
path: root/drivers/staging/vboxvideo
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-10-29 20:38:10 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-29 20:38:10 +0300
commit738b04fba18d35cd352b7b15afefb8a7b798648e (patch)
tree07fabb1a920af5c92bd35e10f9821cf56c8de3e4 /drivers/staging/vboxvideo
parentfe675d4d3c6b96710d481346821839b4a817c672 (diff)
parent4ab7e05dd070600833680bd318d6d962f010caa2 (diff)
downloadlinux-738b04fba18d35cd352b7b15afefb8a7b798648e.tar.xz
Merge tag 'staging-4.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging
Pull staging/IIO driver updates from Greg KH: "Here is the big staging and IIO driver pull request for 4.20-rc1. There are lots of things here, we ended up adding more lines than removing, thanks to a large influx of Comedi National Instrument device support. Someday soon we need to get comedi out of staging... Other than the comedi drivers, the "big" things here are: - new iio drivers - delete dgnc driver (no one used it and no one had the hardware anymore) - vbox driver updates and fixes - erofs fixes - tons and tons of tiny checkpatch fixes for almost all staging drivers All of these have been in linux-next, with the last few happening a bit "late" due to them getting stuck on my laptop during travel to the Mantainers summit" * tag 'staging-4.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging: (690 commits) staging: gasket: Fix sparse "incorrect type in assignment" warnings. staging: gasket: remove debug logs for callback invocation staging: gasket: remove debug logs in page table mapping calls staging: rtl8188eu: core: Use sizeof(*p) instead of sizeof(struct P) for memory allocation staging: ks7010: Remove extra blank line staging: gasket: Remove extra blank line staging: media: davinci_vpfe: Fix spelling mistake in enum staging: speakup: Add a pair of braces staging: wlan-ng: Replace long int with long staging: MAINTAINERS: remove obsolete IPX staging directory staging: MAINTAINERS: remove NCP filesystem entry staging: rtl8188eu: cleanup comparsions to false staging: gasket: Update device virtual address comment staging: gasket: sysfs: fix attribute release comment staging: gasket: apex: fix sysfs_show staging: gasket: page_table: simplify gasket_components_to_dev_address staging: gasket: page_table: fix comment in components_to_dev_address staging: gasket: page table: fixup error path allocating coherent mem staging: gasket: page_table: rearrange gasket_page_table_entry staging: gasket: page_table: remove unnecessary PTE status set to free ...
Diffstat (limited to 'drivers/staging/vboxvideo')
-rw-r--r--drivers/staging/vboxvideo/TODO1
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c165
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.h86
-rw-r--r--drivers/staging/vboxvideo/vbox_fb.c152
-rw-r--r--drivers/staging/vboxvideo/vbox_irq.c8
-rw-r--r--drivers/staging/vboxvideo/vbox_main.c185
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c922
-rw-r--r--drivers/staging/vboxvideo/vbox_ttm.c78
8 files changed, 744 insertions, 853 deletions
diff --git a/drivers/staging/vboxvideo/TODO b/drivers/staging/vboxvideo/TODO
index 468eea856ca6..2e0f99c3f10c 100644
--- a/drivers/staging/vboxvideo/TODO
+++ b/drivers/staging/vboxvideo/TODO
@@ -1,5 +1,4 @@
TODO:
--Move the driver over to the atomic API
-Get a full review from the drm-maintainers on dri-devel done on this driver
-Extend this TODO with the results of that review
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c
index 69cc508af1bc..257030460fb6 100644
--- a/drivers/staging/vboxvideo/vbox_drv.c
+++ b/drivers/staging/vboxvideo/vbox_drv.c
@@ -49,139 +49,140 @@ static const struct pci_device_id pciidlist[] = {
};
MODULE_DEVICE_TABLE(pci, pciidlist);
+static struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
+ .fb_probe = vboxfb_create,
+};
+
static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct drm_device *dev = NULL;
+ struct vbox_private *vbox;
int ret = 0;
- dev = drm_dev_alloc(&driver, &pdev->dev);
- if (IS_ERR(dev)) {
- ret = PTR_ERR(dev);
- goto err_drv_alloc;
+ if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
+ return -ENODEV;
+
+ vbox = kzalloc(sizeof(*vbox), GFP_KERNEL);
+ if (!vbox)
+ return -ENOMEM;
+
+ ret = drm_dev_init(&vbox->ddev, &driver, &pdev->dev);
+ if (ret) {
+ kfree(vbox);
+ return ret;
}
+ vbox->ddev.pdev = pdev;
+ vbox->ddev.dev_private = vbox;
+ pci_set_drvdata(pdev, vbox);
+ mutex_init(&vbox->hw_mutex);
+
ret = pci_enable_device(pdev);
if (ret)
- goto err_pci_enable;
-
- dev->pdev = pdev;
- pci_set_drvdata(pdev, dev);
+ goto err_dev_put;
- ret = vbox_driver_load(dev);
+ ret = vbox_hw_init(vbox);
if (ret)
- goto err_vbox_driver_load;
+ goto err_pci_disable;
- ret = drm_dev_register(dev, 0);
+ ret = vbox_mm_init(vbox);
if (ret)
- goto err_drv_dev_register;
-
- return ret;
-
- err_drv_dev_register:
- vbox_driver_unload(dev);
- err_vbox_driver_load:
- pci_disable_device(pdev);
- err_pci_enable:
- drm_dev_put(dev);
- err_drv_alloc:
- return ret;
-}
-
-static void vbox_pci_remove(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
-
- drm_dev_unregister(dev);
- vbox_driver_unload(dev);
- drm_dev_put(dev);
-}
+ goto err_hw_fini;
-static int vbox_drm_freeze(struct drm_device *dev)
-{
- struct vbox_private *vbox = dev->dev_private;
+ ret = vbox_mode_init(vbox);
+ if (ret)
+ goto err_mm_fini;
- drm_kms_helper_poll_disable(dev);
+ ret = vbox_irq_init(vbox);
+ if (ret)
+ goto err_mode_fini;
- pci_save_state(dev->pdev);
+ ret = drm_fb_helper_fbdev_setup(&vbox->ddev, &vbox->fb_helper,
+ &vbox_fb_helper_funcs, 32,
+ vbox->num_crtcs);
+ if (ret)
+ goto err_irq_fini;
- drm_fb_helper_set_suspend_unlocked(&vbox->fbdev->helper, true);
+ ret = drm_dev_register(&vbox->ddev, 0);
+ if (ret)
+ goto err_fbdev_fini;
return 0;
-}
-
-static int vbox_drm_thaw(struct drm_device *dev)
-{
- struct vbox_private *vbox = dev->dev_private;
-
- drm_mode_config_reset(dev);
- drm_helper_resume_force_mode(dev);
- drm_fb_helper_set_suspend_unlocked(&vbox->fbdev->helper, false);
- return 0;
+err_fbdev_fini:
+ vbox_fbdev_fini(vbox);
+err_irq_fini:
+ vbox_irq_fini(vbox);
+err_mode_fini:
+ vbox_mode_fini(vbox);
+err_mm_fini:
+ vbox_mm_fini(vbox);
+err_hw_fini:
+ vbox_hw_fini(vbox);
+err_pci_disable:
+ pci_disable_device(pdev);
+err_dev_put:
+ drm_dev_put(&vbox->ddev);
+ return ret;
}
-static int vbox_drm_resume(struct drm_device *dev)
+static void vbox_pci_remove(struct pci_dev *pdev)
{
- int ret;
-
- if (pci_enable_device(dev->pdev))
- return -EIO;
-
- ret = vbox_drm_thaw(dev);
- if (ret)
- return ret;
-
- drm_kms_helper_poll_enable(dev);
-
- return 0;
+ struct vbox_private *vbox = pci_get_drvdata(pdev);
+
+ drm_dev_unregister(&vbox->ddev);
+ vbox_fbdev_fini(vbox);
+ vbox_irq_fini(vbox);
+ vbox_mode_fini(vbox);
+ vbox_mm_fini(vbox);
+ vbox_hw_fini(vbox);
+ drm_dev_put(&vbox->ddev);
}
static int vbox_pm_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *ddev = pci_get_drvdata(pdev);
+ struct vbox_private *vbox = dev_get_drvdata(dev);
int error;
- error = vbox_drm_freeze(ddev);
+ error = drm_mode_config_helper_suspend(&vbox->ddev);
if (error)
return error;
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
+ pci_save_state(vbox->ddev.pdev);
+ pci_disable_device(vbox->ddev.pdev);
+ pci_set_power_state(vbox->ddev.pdev, PCI_D3hot);
return 0;
}
static int vbox_pm_resume(struct device *dev)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct vbox_private *vbox = dev_get_drvdata(dev);
+
+ if (pci_enable_device(vbox->ddev.pdev))
+ return -EIO;
- return vbox_drm_resume(ddev);
+ return drm_mode_config_helper_resume(&vbox->ddev);
}
static int vbox_pm_freeze(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *ddev = pci_get_drvdata(pdev);
-
- if (!ddev || !ddev->dev_private)
- return -ENODEV;
+ struct vbox_private *vbox = dev_get_drvdata(dev);
- return vbox_drm_freeze(ddev);
+ return drm_mode_config_helper_suspend(&vbox->ddev);
}
static int vbox_pm_thaw(struct device *dev)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct vbox_private *vbox = dev_get_drvdata(dev);
- return vbox_drm_thaw(ddev);
+ return drm_mode_config_helper_resume(&vbox->ddev);
}
static int vbox_pm_poweroff(struct device *dev)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct vbox_private *vbox = dev_get_drvdata(dev);
- return vbox_drm_freeze(ddev);
+ return drm_mode_config_helper_suspend(&vbox->ddev);
}
static const struct dev_pm_ops vbox_pm_ops = {
@@ -259,10 +260,10 @@ static void vbox_master_drop(struct drm_device *dev, struct drm_file *file_priv)
static struct drm_driver driver = {
.driver_features =
DRIVER_MODESET | DRIVER_GEM | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
- DRIVER_PRIME,
+ DRIVER_PRIME | DRIVER_ATOMIC,
.dev_priv_size = 0,
- .lastclose = vbox_driver_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.master_set = vbox_master_set,
.master_drop = vbox_master_drop,
diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h
index 594f84272957..73395a7536c5 100644
--- a/drivers/staging/vboxvideo/vbox_drv.h
+++ b/drivers/staging/vboxvideo/vbox_drv.h
@@ -72,10 +72,16 @@
sizeof(struct hgsmi_host_flags))
#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE
-struct vbox_fbdev;
+struct vbox_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
struct vbox_private {
- struct drm_device *dev;
+ /* Must be first; or we must define our own release callback */
+ struct drm_device ddev;
+ struct drm_fb_helper fb_helper;
+ struct vbox_framebuffer afb;
u8 __iomem *guest_heap;
u8 __iomem *vbva_buffers;
@@ -90,8 +96,6 @@ struct vbox_private {
/** Array of structures for receiving mode hints. */
struct vbva_modehint *last_mode_hints;
- struct vbox_fbdev *fbdev;
-
int fb_mtrr;
struct {
@@ -115,21 +119,12 @@ struct vbox_private {
* encompassing all screen ones or is the fbdev console active?
*/
bool single_framebuffer;
- u32 cursor_width;
- u32 cursor_height;
- u32 cursor_hot_x;
- u32 cursor_hot_y;
- size_t cursor_data_size;
u8 cursor_data[CURSOR_DATA_SIZE];
};
#undef CURSOR_PIXEL_COUNT
#undef CURSOR_DATA_SIZE
-int vbox_driver_load(struct drm_device *dev);
-void vbox_driver_unload(struct drm_device *dev);
-void vbox_driver_lastclose(struct drm_device *dev);
-
struct vbox_gem_object;
struct vbox_connector {
@@ -145,43 +140,51 @@ struct vbox_connector {
struct vbox_crtc {
struct drm_crtc base;
- bool blanked;
bool disconnected;
unsigned int crtc_id;
u32 fb_offset;
bool cursor_enabled;
u32 x_hint;
u32 y_hint;
+ /*
+ * When setting a mode we not only pass the mode to the hypervisor,
+ * but also information on how to map / translate input coordinates
+ * for the emulated USB tablet. This input-mapping may change when
+ * the mode on *another* crtc changes.
+ *
+ * This means that sometimes we must do a modeset on other crtc-s then
+ * the one being changed to update the input-mapping. Including crtc-s
+ * which may be disabled inside the guest (shown as a black window
+ * on the host unless closed by the user).
+ *
+ * With atomic modesetting the mode-info of disabled crtcs gets zeroed
+ * yet we need it when updating the input-map to avoid resizing the
+ * window as a side effect of a mode_set on another crtc. Therefor we
+ * cache the info of the last mode below.
+ */
+ u32 width;
+ u32 height;
+ u32 x;
+ u32 y;
};
struct vbox_encoder {
struct drm_encoder base;
};
-struct vbox_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
-};
-
-struct vbox_fbdev {
- struct drm_fb_helper helper;
- struct vbox_framebuffer afb;
- int size;
- struct ttm_bo_kmap_obj mapping;
- int x1, y1, x2, y2; /* dirty rect */
- spinlock_t dirty_lock;
-};
-
#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base)
-int vbox_mode_init(struct drm_device *dev);
-void vbox_mode_fini(struct drm_device *dev);
+bool vbox_check_supported(u16 id);
+int vbox_hw_init(struct vbox_private *vbox);
+void vbox_hw_fini(struct vbox_private *vbox);
+
+int vbox_mode_init(struct vbox_private *vbox);
+void vbox_mode_fini(struct vbox_private *vbox);
#define DRM_MODE_FB_CMD drm_mode_fb_cmd2
-#define CRTC_FB(crtc) ((crtc)->primary->fb)
void vbox_enable_accel(struct vbox_private *vbox);
void vbox_disable_accel(struct vbox_private *vbox);
@@ -191,14 +194,14 @@ void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
struct drm_clip_rect *rects,
unsigned int num_rects);
-int vbox_framebuffer_init(struct drm_device *dev,
+int vbox_framebuffer_init(struct vbox_private *vbox,
struct vbox_framebuffer *vbox_fb,
const struct DRM_MODE_FB_CMD *mode_cmd,
struct drm_gem_object *obj);
-int vbox_fbdev_init(struct drm_device *dev);
-void vbox_fbdev_fini(struct drm_device *dev);
-void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr);
+int vboxfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+void vbox_fbdev_fini(struct vbox_private *vbox);
struct vbox_bo {
struct ttm_buffer_object bo;
@@ -218,6 +221,11 @@ static inline struct vbox_bo *vbox_bo(struct ttm_buffer_object *bo)
#define to_vbox_obj(x) container_of(x, struct vbox_gem_object, base)
+static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
+{
+ return bo->bo.offset;
+}
+
int vbox_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -232,13 +240,13 @@ int vbox_dumb_mmap_offset(struct drm_file *file,
int vbox_mm_init(struct vbox_private *vbox);
void vbox_mm_fini(struct vbox_private *vbox);
-int vbox_bo_create(struct drm_device *dev, int size, int align,
+int vbox_bo_create(struct vbox_private *vbox, int size, int align,
u32 flags, struct vbox_bo **pvboxbo);
-int vbox_gem_create(struct drm_device *dev,
+int vbox_gem_create(struct vbox_private *vbox,
u32 size, bool iskernel, struct drm_gem_object **obj);
-int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag);
int vbox_bo_unpin(struct vbox_bo *bo);
static inline int vbox_bo_reserve(struct vbox_bo *bo, bool no_wait)
@@ -262,6 +270,8 @@ static inline void vbox_bo_unreserve(struct vbox_bo *bo)
void vbox_ttm_placement(struct vbox_bo *bo, int domain);
int vbox_bo_push_sysram(struct vbox_bo *bo);
int vbox_mmap(struct file *filp, struct vm_area_struct *vma);
+void *vbox_bo_kmap(struct vbox_bo *bo);
+void vbox_bo_kunmap(struct vbox_bo *bo);
/* vbox_prime.c */
int vbox_gem_prime_pin(struct drm_gem_object *obj);
diff --git a/drivers/staging/vboxvideo/vbox_fb.c b/drivers/staging/vboxvideo/vbox_fb.c
index 034f8ffa8f20..d1a1f74c8de3 100644
--- a/drivers/staging/vboxvideo/vbox_fb.c
+++ b/drivers/staging/vboxvideo/vbox_fb.c
@@ -66,38 +66,19 @@ static struct fb_ops vboxfb_ops = {
.fb_debug_leave = drm_fb_helper_debug_leave,
};
-static int vboxfb_create_object(struct vbox_fbdev *fbdev,
- struct DRM_MODE_FB_CMD *mode_cmd,
- struct drm_gem_object **gobj_p)
+int vboxfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
- struct drm_device *dev = fbdev->helper.dev;
- u32 size;
- struct drm_gem_object *gobj;
- u32 pitch = mode_cmd->pitches[0];
- int ret;
-
- size = pitch * mode_cmd->height;
- ret = vbox_gem_create(dev, size, true, &gobj);
- if (ret)
- return ret;
-
- *gobj_p = gobj;
-
- return 0;
-}
-
-static int vboxfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct vbox_fbdev *fbdev =
- container_of(helper, struct vbox_fbdev, helper);
- struct drm_device *dev = fbdev->helper.dev;
+ struct vbox_private *vbox =
+ container_of(helper, struct vbox_private, fb_helper);
+ struct pci_dev *pdev = vbox->ddev.pdev;
struct DRM_MODE_FB_CMD mode_cmd;
struct drm_framebuffer *fb;
struct fb_info *info;
struct drm_gem_object *gobj;
struct vbox_bo *bo;
int size, ret;
+ u64 gpu_addr;
u32 pitch;
mode_cmd.width = sizes->surface_width;
@@ -109,45 +90,35 @@ static int vboxfb_create(struct drm_fb_helper *helper,
size = pitch * mode_cmd.height;
- ret = vboxfb_create_object(fbdev, &mode_cmd, &gobj);
+ ret = vbox_gem_create(vbox, size, true, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon backing object %d\n", ret);
return ret;
}
- ret = vbox_framebuffer_init(dev, &fbdev->afb, &mode_cmd, gobj);
+ ret = vbox_framebuffer_init(vbox, &vbox->afb, &mode_cmd, gobj);
if (ret)
return ret;
bo = gem_to_vbox_bo(gobj);
- ret = vbox_bo_reserve(bo, false);
+ ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM);
if (ret)
return ret;
- ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
- if (ret) {
- vbox_bo_unreserve(bo);
- return ret;
- }
-
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
- vbox_bo_unreserve(bo);
- if (ret) {
- DRM_ERROR("failed to kmap fbcon\n");
- return ret;
- }
-
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info))
- return -PTR_ERR(info);
+ return PTR_ERR(info);
- info->par = fbdev;
+ info->screen_size = size;
+ info->screen_base = (char __iomem *)vbox_bo_kmap(bo);
+ if (IS_ERR(info->screen_base))
+ return PTR_ERR(info->screen_base);
- fbdev->size = size;
+ info->par = helper;
- fb = &fbdev->afb.base;
- fbdev->helper.fb = fb;
+ fb = &vbox->afb.base;
+ helper->fb = fb;
strcpy(info->fix.id, "vboxdrmfb");
@@ -162,15 +133,16 @@ static int vboxfb_create(struct drm_fb_helper *helper,
* This seems to be done for safety checking that the framebuffer
* is not registered twice by different drivers.
*/
- info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
- info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+ info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
+ info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, &fbdev->helper, sizes->fb_width,
+ drm_fb_helper_fill_var(info, helper, sizes->fb_width,
sizes->fb_height);
- info->screen_base = (char __iomem *)bo->kmap.virtual;
- info->screen_size = size;
+ gpu_addr = vbox_bo_gpu_offset(bo);
+ info->fix.smem_start = info->apertures->ranges[0].base + gpu_addr;
+ info->fix.smem_len = vbox->available_vram_size - gpu_addr;
#ifdef CONFIG_DRM_KMS_FB_HELPER
info->fbdefio = &vbox_defio;
@@ -184,86 +156,30 @@ static int vboxfb_create(struct drm_fb_helper *helper,
return 0;
}
-static struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
- .fb_probe = vboxfb_create,
-};
-
-void vbox_fbdev_fini(struct drm_device *dev)
+void vbox_fbdev_fini(struct vbox_private *vbox)
{
- struct vbox_private *vbox = dev->dev_private;
- struct vbox_fbdev *fbdev = vbox->fbdev;
- struct vbox_framebuffer *afb = &fbdev->afb;
+ struct vbox_framebuffer *afb = &vbox->afb;
#ifdef CONFIG_DRM_KMS_FB_HELPER
- if (fbdev->helper.fbdev && fbdev->helper.fbdev->fbdefio)
- fb_deferred_io_cleanup(fbdev->helper.fbdev);
+ if (vbox->fb_helper.fbdev && vbox->fb_helper.fbdev->fbdefio)
+ fb_deferred_io_cleanup(vbox->fb_helper.fbdev);
#endif
- drm_fb_helper_unregister_fbi(&fbdev->helper);
+ drm_fb_helper_unregister_fbi(&vbox->fb_helper);
if (afb->obj) {
struct vbox_bo *bo = gem_to_vbox_bo(afb->obj);
- if (!vbox_bo_reserve(bo, false)) {
- if (bo->kmap.virtual)
- ttm_bo_kunmap(&bo->kmap);
- /*
- * QXL does this, but is it really needed before
- * freeing?
- */
- if (bo->pin_count)
- vbox_bo_unpin(bo);
- vbox_bo_unreserve(bo);
- }
+ vbox_bo_kunmap(bo);
+
+ if (bo->pin_count)
+ vbox_bo_unpin(bo);
+
drm_gem_object_put_unlocked(afb->obj);
afb->obj = NULL;
}
- drm_fb_helper_fini(&fbdev->helper);
+ drm_fb_helper_fini(&vbox->fb_helper);
drm_framebuffer_unregister_private(&afb->base);
drm_framebuffer_cleanup(&afb->base);
}
-
-int vbox_fbdev_init(struct drm_device *dev)
-{
- struct vbox_private *vbox = dev->dev_private;
- struct vbox_fbdev *fbdev;
- int ret;
-
- fbdev = devm_kzalloc(dev->dev, sizeof(*fbdev), GFP_KERNEL);
- if (!fbdev)
- return -ENOMEM;
-
- vbox->fbdev = fbdev;
- spin_lock_init(&fbdev->dirty_lock);
-
- drm_fb_helper_prepare(dev, &fbdev->helper, &vbox_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, &fbdev->helper, vbox->num_crtcs);
- if (ret)
- return ret;
-
- ret = drm_fb_helper_single_add_all_connectors(&fbdev->helper);
- if (ret)
- goto err_fini;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(&fbdev->helper, 32);
- if (ret)
- goto err_fini;
-
- return 0;
-
-err_fini:
- drm_fb_helper_fini(&fbdev->helper);
- return ret;
-}
-
-void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr)
-{
- struct fb_info *fbdev = vbox->fbdev->helper.fbdev;
-
- fbdev->fix.smem_start = fbdev->apertures->ranges[0].base + gpu_addr;
- fbdev->fix.smem_len = vbox->available_vram_size - gpu_addr;
-}
diff --git a/drivers/staging/vboxvideo/vbox_irq.c b/drivers/staging/vboxvideo/vbox_irq.c
index 74abdf02d9fd..09f858ec1369 100644
--- a/drivers/staging/vboxvideo/vbox_irq.c
+++ b/drivers/staging/vboxvideo/vbox_irq.c
@@ -123,7 +123,7 @@ static void validate_or_set_position_hints(struct vbox_private *vbox)
*/
static void vbox_update_mode_hints(struct vbox_private *vbox)
{
- struct drm_device *dev = vbox->dev;
+ struct drm_device *dev = &vbox->ddev;
struct drm_connector *connector;
struct vbox_connector *vbox_conn;
struct vbva_modehint *hints;
@@ -179,7 +179,7 @@ static void vbox_hotplug_worker(struct work_struct *work)
hotplug_work);
vbox_update_mode_hints(vbox);
- drm_kms_helper_hotplug_event(vbox->dev);
+ drm_kms_helper_hotplug_event(&vbox->ddev);
}
int vbox_irq_init(struct vbox_private *vbox)
@@ -187,11 +187,11 @@ int vbox_irq_init(struct vbox_private *vbox)
INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker);
vbox_update_mode_hints(vbox);
- return drm_irq_install(vbox->dev, vbox->dev->pdev->irq);
+ return drm_irq_install(&vbox->ddev, vbox->ddev.pdev->irq);
}
void vbox_irq_fini(struct vbox_private *vbox)
{
- drm_irq_uninstall(vbox->dev);
+ drm_irq_uninstall(&vbox->ddev);
flush_work(&vbox->hotplug_work);
}
diff --git a/drivers/staging/vboxvideo/vbox_main.c b/drivers/staging/vboxvideo/vbox_main.c
index 429f6a453619..7466c1103ff6 100644
--- a/drivers/staging/vboxvideo/vbox_main.c
+++ b/drivers/staging/vboxvideo/vbox_main.c
@@ -102,24 +102,30 @@ void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
unsigned int num_rects)
{
struct vbox_private *vbox = fb->dev->dev_private;
+ struct drm_display_mode *mode;
struct drm_crtc *crtc;
+ int crtc_x, crtc_y;
unsigned int i;
mutex_lock(&vbox->hw_mutex);
list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
- if (CRTC_FB(crtc) != fb)
+ if (crtc->primary->state->fb != fb)
continue;
+ mode = &crtc->state->mode;
+ crtc_x = crtc->primary->state->src_x >> 16;
+ crtc_y = crtc->primary->state->src_y >> 16;
+
vbox_enable_accel(vbox);
for (i = 0; i < num_rects; ++i) {
struct vbva_cmd_hdr cmd_hdr;
unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
- if ((rects[i].x1 > crtc->x + crtc->hwmode.hdisplay) ||
- (rects[i].y1 > crtc->y + crtc->hwmode.vdisplay) ||
- (rects[i].x2 < crtc->x) ||
- (rects[i].y2 < crtc->y))
+ if ((rects[i].x1 > crtc_x + mode->hdisplay) ||
+ (rects[i].y1 > crtc_y + mode->vdisplay) ||
+ (rects[i].x2 < crtc_x) ||
+ (rects[i].y2 < crtc_y))
continue;
cmd_hdr.x = (s16)rects[i].x1;
@@ -155,16 +161,16 @@ static const struct drm_framebuffer_funcs vbox_fb_funcs = {
.dirty = vbox_user_framebuffer_dirty,
};
-int vbox_framebuffer_init(struct drm_device *dev,
+int vbox_framebuffer_init(struct vbox_private *vbox,
struct vbox_framebuffer *vbox_fb,
const struct DRM_MODE_FB_CMD *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
- drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(&vbox->ddev, &vbox_fb->base, mode_cmd);
vbox_fb->obj = obj;
- ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
+ ret = drm_framebuffer_init(&vbox->ddev, &vbox_fb->base, &vbox_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
return ret;
@@ -173,45 +179,11 @@ int vbox_framebuffer_init(struct drm_device *dev,
return 0;
}
-static struct drm_framebuffer *vbox_user_framebuffer_create(
- struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct drm_gem_object *obj;
- struct vbox_framebuffer *vbox_fb;
- int ret = -ENOMEM;
-
- obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (!obj)
- return ERR_PTR(-ENOENT);
-
- vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
- if (!vbox_fb)
- goto err_unref_obj;
-
- ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
- if (ret)
- goto err_free_vbox_fb;
-
- return &vbox_fb->base;
-
-err_free_vbox_fb:
- kfree(vbox_fb);
-err_unref_obj:
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(ret);
-}
-
-static const struct drm_mode_config_funcs vbox_mode_funcs = {
- .fb_create = vbox_user_framebuffer_create,
-};
-
static int vbox_accel_init(struct vbox_private *vbox)
{
unsigned int i;
- vbox->vbva_info = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
+ vbox->vbva_info = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs,
sizeof(*vbox->vbva_info), GFP_KERNEL);
if (!vbox->vbva_info)
return -ENOMEM;
@@ -219,7 +191,7 @@ static int vbox_accel_init(struct vbox_private *vbox)
/* Take a command buffer for each screen from the end of usable VRAM. */
vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
- vbox->vbva_buffers = pci_iomap_range(vbox->dev->pdev, 0,
+ vbox->vbva_buffers = pci_iomap_range(vbox->ddev.pdev, 0,
vbox->available_vram_size,
vbox->num_crtcs *
VBVA_MIN_BUFFER_SIZE);
@@ -238,7 +210,7 @@ static int vbox_accel_init(struct vbox_private *vbox)
static void vbox_accel_fini(struct vbox_private *vbox)
{
vbox_disable_accel(vbox);
- pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers);
+ pci_iounmap(vbox->ddev.pdev, vbox->vbva_buffers);
}
/** Do we support the 4.3 plus mode hint reporting interface? */
@@ -262,7 +234,7 @@ static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
}
-static bool vbox_check_supported(u16 id)
+bool vbox_check_supported(u16 id)
{
u16 dispi_id;
@@ -276,7 +248,7 @@ static bool vbox_check_supported(u16 id)
* Set up our heaps and data exchange buffers in VRAM before handing the rest
* to the memory manager.
*/
-static int vbox_hw_init(struct vbox_private *vbox)
+int vbox_hw_init(struct vbox_private *vbox)
{
int ret = -ENOMEM;
@@ -287,7 +259,7 @@ static int vbox_hw_init(struct vbox_private *vbox)
/* Map guest-heap at end of vram */
vbox->guest_heap =
- pci_iomap_range(vbox->dev->pdev, 0, GUEST_HEAP_OFFSET(vbox),
+ pci_iomap_range(vbox->ddev.pdev, 0, GUEST_HEAP_OFFSET(vbox),
GUEST_HEAP_SIZE);
if (!vbox->guest_heap)
return -ENOMEM;
@@ -322,7 +294,7 @@ static int vbox_hw_init(struct vbox_private *vbox)
goto err_destroy_guest_pool;
}
- vbox->last_mode_hints = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
+ vbox->last_mode_hints = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs,
sizeof(struct vbva_modehint),
GFP_KERNEL);
if (!vbox->last_mode_hints) {
@@ -339,102 +311,18 @@ static int vbox_hw_init(struct vbox_private *vbox)
err_destroy_guest_pool:
gen_pool_destroy(vbox->guest_pool);
err_unmap_guest_heap:
- pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
+ pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
return ret;
}
-static void vbox_hw_fini(struct vbox_private *vbox)
+void vbox_hw_fini(struct vbox_private *vbox)
{
vbox_accel_fini(vbox);
gen_pool_destroy(vbox->guest_pool);
- pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
-}
-
-int vbox_driver_load(struct drm_device *dev)
-{
- struct vbox_private *vbox;
- int ret = 0;
-
- if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
- return -ENODEV;
-
- vbox = devm_kzalloc(dev->dev, sizeof(*vbox), GFP_KERNEL);
- if (!vbox)
- return -ENOMEM;
-
- dev->dev_private = vbox;
- vbox->dev = dev;
-
- mutex_init(&vbox->hw_mutex);
-
- ret = vbox_hw_init(vbox);
- if (ret)
- return ret;
-
- ret = vbox_mm_init(vbox);
- if (ret)
- goto err_hw_fini;
-
- drm_mode_config_init(dev);
-
- dev->mode_config.funcs = (void *)&vbox_mode_funcs;
- dev->mode_config.min_width = 64;
- dev->mode_config.min_height = 64;
- dev->mode_config.preferred_depth = 24;
- dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
- dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
-
- ret = vbox_mode_init(dev);
- if (ret)
- goto err_drm_mode_cleanup;
-
- ret = vbox_irq_init(vbox);
- if (ret)
- goto err_mode_fini;
-
- ret = vbox_fbdev_init(dev);
- if (ret)
- goto err_irq_fini;
-
- return 0;
-
-err_irq_fini:
- vbox_irq_fini(vbox);
-err_mode_fini:
- vbox_mode_fini(dev);
-err_drm_mode_cleanup:
- drm_mode_config_cleanup(dev);
- vbox_mm_fini(vbox);
-err_hw_fini:
- vbox_hw_fini(vbox);
- return ret;
-}
-
-void vbox_driver_unload(struct drm_device *dev)
-{
- struct vbox_private *vbox = dev->dev_private;
-
- vbox_fbdev_fini(dev);
- vbox_irq_fini(vbox);
- vbox_mode_fini(dev);
- drm_mode_config_cleanup(dev);
- vbox_mm_fini(vbox);
- vbox_hw_fini(vbox);
+ pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
}
-/**
- * @note this is described in the DRM framework documentation. AST does not
- * have it, but we get an oops on driver unload if it is not present.
- */
-void vbox_driver_lastclose(struct drm_device *dev)
-{
- struct vbox_private *vbox = dev->dev_private;
-
- if (vbox->fbdev)
- drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
-}
-
-int vbox_gem_create(struct drm_device *dev,
+int vbox_gem_create(struct vbox_private *vbox,
u32 size, bool iskernel, struct drm_gem_object **obj)
{
struct vbox_bo *vboxbo;
@@ -446,7 +334,7 @@ int vbox_gem_create(struct drm_device *dev,
if (size == 0)
return -EINVAL;
- ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
+ ret = vbox_bo_create(vbox, size, 0, 0, &vboxbo);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("failed to allocate GEM object\n");
@@ -461,14 +349,16 @@ int vbox_gem_create(struct drm_device *dev,
int vbox_dumb_create(struct drm_file *file,
struct drm_device *dev, struct drm_mode_create_dumb *args)
{
- int ret;
+ struct vbox_private *vbox =
+ container_of(dev, struct vbox_private, ddev);
struct drm_gem_object *gobj;
u32 handle;
+ int ret;
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
- ret = vbox_gem_create(dev, args->size, false, &gobj);
+ ret = vbox_gem_create(vbox, args->size, false, &gobj);
if (ret)
return ret;
@@ -482,24 +372,11 @@ int vbox_dumb_create(struct drm_file *file,
return 0;
}
-static void vbox_bo_unref(struct vbox_bo **bo)
-{
- struct ttm_buffer_object *tbo;
-
- if ((*bo) == NULL)
- return;
-
- tbo = &((*bo)->bo);
- ttm_bo_unref(&tbo);
- if (!tbo)
- *bo = NULL;
-}
-
void vbox_gem_free_object(struct drm_gem_object *obj)
{
struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
- vbox_bo_unref(&vbox_bo);
+ ttm_bo_put(&vbox_bo->bo);
}
static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
index 79836c8fb909..6acc965247ff 100644
--- a/drivers/staging/vboxvideo/vbox_mode.c
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -32,25 +32,22 @@
* Hans de Goede <hdegoede@redhat.com>
*/
#include <linux/export.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
#include "vbox_drv.h"
#include "vboxvideo.h"
#include "hgsmi_channels.h"
-static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
- u32 handle, u32 width, u32 height,
- s32 hot_x, s32 hot_y);
-static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y);
-
/**
* Set a graphics mode. Poke any required values into registers, do an HGSMI
* mode set and tell the host we support advanced graphics functions.
*/
-static void vbox_do_modeset(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
+static void vbox_do_modeset(struct drm_crtc *crtc)
{
+ struct drm_framebuffer *fb = crtc->primary->state->fb;
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
struct vbox_private *vbox;
int width, height, bpp, pitch;
@@ -58,12 +55,12 @@ static void vbox_do_modeset(struct drm_crtc *crtc,
s32 x_offset, y_offset;
vbox = crtc->dev->dev_private;
- width = mode->hdisplay ? mode->hdisplay : 640;
- height = mode->vdisplay ? mode->vdisplay : 480;
- bpp = crtc->enabled ? CRTC_FB(crtc)->format->cpp[0] * 8 : 32;
- pitch = crtc->enabled ? CRTC_FB(crtc)->pitches[0] : width * bpp / 8;
- x_offset = vbox->single_framebuffer ? crtc->x : vbox_crtc->x_hint;
- y_offset = vbox->single_framebuffer ? crtc->y : vbox_crtc->y_hint;
+ width = vbox_crtc->width ? vbox_crtc->width : 640;
+ height = vbox_crtc->height ? vbox_crtc->height : 480;
+ bpp = fb ? fb->format->cpp[0] * 8 : 32;
+ pitch = fb ? fb->pitches[0] : width * bpp / 8;
+ x_offset = vbox->single_framebuffer ? vbox_crtc->x : vbox_crtc->x_hint;
+ y_offset = vbox->single_framebuffer ? vbox_crtc->y : vbox_crtc->y_hint;
/*
* This is the old way of setting graphics modes. It assumed one screen
@@ -71,31 +68,29 @@ static void vbox_do_modeset(struct drm_crtc *crtc,
* VirtualBox, certain parts of the code still assume that the first
* screen is programmed this way, so try to fake it.
*/
- if (vbox_crtc->crtc_id == 0 && crtc->enabled &&
+ if (vbox_crtc->crtc_id == 0 && fb &&
vbox_crtc->fb_offset / pitch < 0xffff - crtc->y &&
vbox_crtc->fb_offset % (bpp / 8) == 0) {
vbox_write_ioport(VBE_DISPI_INDEX_XRES, width);
vbox_write_ioport(VBE_DISPI_INDEX_YRES, height);
vbox_write_ioport(VBE_DISPI_INDEX_VIRT_WIDTH, pitch * 8 / bpp);
- vbox_write_ioport(VBE_DISPI_INDEX_BPP,
- CRTC_FB(crtc)->format->cpp[0] * 8);
+ vbox_write_ioport(VBE_DISPI_INDEX_BPP, bpp);
vbox_write_ioport(VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED);
vbox_write_ioport(
VBE_DISPI_INDEX_X_OFFSET,
- vbox_crtc->fb_offset % pitch / bpp * 8 + crtc->x);
+ vbox_crtc->fb_offset % pitch / bpp * 8 + vbox_crtc->x);
vbox_write_ioport(VBE_DISPI_INDEX_Y_OFFSET,
- vbox_crtc->fb_offset / pitch + crtc->y);
+ vbox_crtc->fb_offset / pitch + vbox_crtc->y);
}
flags = VBVA_SCREEN_F_ACTIVE;
- flags |= (crtc->enabled && !vbox_crtc->blanked) ?
- 0 : VBVA_SCREEN_F_BLANK;
+ flags |= (fb && crtc->state->active) ? 0 : VBVA_SCREEN_F_BLANK;
flags |= vbox_crtc->disconnected ? VBVA_SCREEN_F_DISABLED : 0;
hgsmi_process_display_info(vbox->guest_pool, vbox_crtc->crtc_id,
x_offset, y_offset,
- crtc->x * bpp / 8 + crtc->y * pitch,
- pitch, width, height,
- vbox_crtc->blanked ? 0 : bpp, flags);
+ vbox_crtc->x * bpp / 8 +
+ vbox_crtc->y * pitch,
+ pitch, width, height, bpp, flags);
}
static int vbox_set_view(struct drm_crtc *crtc)
@@ -132,34 +127,6 @@ static int vbox_set_view(struct drm_crtc *crtc)
return 0;
}
-static void vbox_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
- struct vbox_private *vbox = crtc->dev->dev_private;
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- vbox_crtc->blanked = false;
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- vbox_crtc->blanked = true;
- break;
- }
-
- mutex_lock(&vbox->hw_mutex);
- vbox_do_modeset(crtc, &crtc->hwmode);
- mutex_unlock(&vbox->hw_mutex);
-}
-
-static bool vbox_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
/*
* Try to map the layout of virtual screens to the range of the input device.
* Return true if we need to re-set the crtc modes due to screen offset
@@ -169,7 +136,7 @@ static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
{
struct drm_crtc *crtci;
struct drm_connector *connectori;
- struct drm_framebuffer *fb1 = NULL;
+ struct drm_framebuffer *fb, *fb1 = NULL;
bool single_framebuffer = true;
bool old_single_framebuffer = vbox->single_framebuffer;
u16 width = 0, height = 0;
@@ -179,30 +146,30 @@ static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
* If so then screen layout can be deduced from the crtc offsets.
* Same fall-back if this is the fbdev frame-buffer.
*/
- list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head) {
+ list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, head) {
+ fb = crtci->primary->state->fb;
+ if (!fb)
+ continue;
+
if (!fb1) {
- fb1 = CRTC_FB(crtci);
- if (to_vbox_framebuffer(fb1) == &vbox->fbdev->afb)
+ fb1 = fb;
+ if (to_vbox_framebuffer(fb1) == &vbox->afb)
break;
- } else if (CRTC_FB(crtci) && fb1 != CRTC_FB(crtci)) {
+ } else if (fb != fb1) {
single_framebuffer = false;
}
}
- if (single_framebuffer) {
- list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
- head) {
- if (to_vbox_crtc(crtci)->crtc_id != 0)
- continue;
+ if (!fb1)
+ return false;
- vbox->single_framebuffer = true;
- vbox->input_mapping_width = CRTC_FB(crtci)->width;
- vbox->input_mapping_height = CRTC_FB(crtci)->height;
- return old_single_framebuffer !=
- vbox->single_framebuffer;
- }
+ if (single_framebuffer) {
+ vbox->single_framebuffer = true;
+ vbox->input_mapping_width = fb1->width;
+ vbox->input_mapping_height = fb1->height;
+ return old_single_framebuffer != vbox->single_framebuffer;
}
/* Otherwise calculate the total span of all screens. */
- list_for_each_entry(connectori, &vbox->dev->mode_config.connector_list,
+ list_for_each_entry(connectori, &vbox->ddev.mode_config.connector_list,
head) {
struct vbox_connector *vbox_connector =
to_vbox_connector(connectori);
@@ -221,180 +188,462 @@ static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
return old_single_framebuffer != vbox->single_framebuffer;
}
-static int vbox_crtc_do_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *old_fb,
- struct drm_framebuffer *new_fb,
- int x, int y)
+static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_display_mode *mode,
+ int x, int y)
{
+ struct vbox_bo *bo = gem_to_vbox_bo(to_vbox_framebuffer(fb)->obj);
struct vbox_private *vbox = crtc->dev->dev_private;
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
- struct drm_gem_object *obj;
- struct vbox_framebuffer *vbox_fb;
- struct vbox_bo *bo;
- int ret;
- u64 gpu_addr;
-
- /* Unpin the previous fb. */
- if (old_fb) {
- vbox_fb = to_vbox_framebuffer(old_fb);
- obj = vbox_fb->obj;
- bo = gem_to_vbox_bo(obj);
- ret = vbox_bo_reserve(bo, false);
- if (ret)
- return ret;
+ bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state);
+
+ mutex_lock(&vbox->hw_mutex);
- vbox_bo_unpin(bo);
- vbox_bo_unreserve(bo);
+ vbox_crtc->width = mode->hdisplay;
+ vbox_crtc->height = mode->vdisplay;
+ vbox_crtc->x = x;
+ vbox_crtc->y = y;
+ vbox_crtc->fb_offset = vbox_bo_gpu_offset(bo);
+
+ /* vbox_do_modeset() checks vbox->single_framebuffer so update it now */
+ if (needs_modeset && vbox_set_up_input_mapping(vbox)) {
+ struct drm_crtc *crtci;
+
+ list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list,
+ head) {
+ if (crtci == crtc)
+ continue;
+ vbox_do_modeset(crtci);
+ }
}
- vbox_fb = to_vbox_framebuffer(new_fb);
- obj = vbox_fb->obj;
- bo = gem_to_vbox_bo(obj);
+ vbox_set_view(crtc);
+ vbox_do_modeset(crtc);
- ret = vbox_bo_reserve(bo, false);
- if (ret)
- return ret;
+ if (needs_modeset)
+ hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
+ vbox->input_mapping_width,
+ vbox->input_mapping_height);
- ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
- if (ret) {
- vbox_bo_unreserve(bo);
- return ret;
+ mutex_unlock(&vbox->hw_mutex);
+}
+
+static void vbox_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+}
+
+static void vbox_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+}
+
+static void vbox_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ if (crtc->state && crtc->state->event) {
+ event = crtc->state->event;
+ crtc->state->event = NULL;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
+}
- if (&vbox->fbdev->afb == vbox_fb)
- vbox_fbdev_set_base(vbox, gpu_addr);
- vbox_bo_unreserve(bo);
+static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
+ .atomic_enable = vbox_crtc_atomic_enable,
+ .atomic_disable = vbox_crtc_atomic_disable,
+ .atomic_flush = vbox_crtc_atomic_flush,
+};
- /* vbox_set_start_address_crt1(crtc, (u32)gpu_addr); */
- vbox_crtc->fb_offset = gpu_addr;
- if (vbox_set_up_input_mapping(vbox)) {
- struct drm_crtc *crtci;
+static void vbox_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
- list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
- head) {
- vbox_set_view(crtc);
- vbox_do_modeset(crtci, &crtci->mode);
- }
+static const struct drm_crtc_funcs vbox_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ /* .gamma_set = vbox_crtc_gamma_set, */
+ .destroy = vbox_crtc_destroy,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int vbox_primary_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_crtc_state *crtc_state = NULL;
+
+ if (new_state->crtc) {
+ crtc_state = drm_atomic_get_existing_crtc_state(
+ new_state->state, new_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
}
- return 0;
+ return drm_atomic_helper_check_plane_state(new_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
}
-static int vbox_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static void vbox_primary_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
- return vbox_crtc_do_set_base(crtc, old_fb, CRTC_FB(crtc), x, y);
+ struct drm_crtc *crtc = plane->state->crtc;
+ struct drm_framebuffer *fb = plane->state->fb;
+
+ vbox_crtc_set_base_and_mode(crtc, fb, &crtc->state->mode,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16);
}
-static int vbox_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y, struct drm_framebuffer *old_fb)
+static void vbox_primary_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
- struct vbox_private *vbox = crtc->dev->dev_private;
+ struct drm_crtc *crtc = old_state->crtc;
+
+ /* vbox_do_modeset checks plane->state->fb and will disable if NULL */
+ vbox_crtc_set_base_and_mode(crtc, old_state->fb, &crtc->state->mode,
+ old_state->src_x >> 16,
+ old_state->src_y >> 16);
+}
+
+static int vbox_primary_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct vbox_bo *bo;
int ret;
- vbox_crtc_mode_set_base(crtc, x, y, old_fb);
+ if (!new_state->fb)
+ return 0;
- mutex_lock(&vbox->hw_mutex);
- ret = vbox_set_view(crtc);
- if (!ret)
- vbox_do_modeset(crtc, mode);
- hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
- vbox->input_mapping_width,
- vbox->input_mapping_height);
- mutex_unlock(&vbox->hw_mutex);
+ bo = gem_to_vbox_bo(to_vbox_framebuffer(new_state->fb)->obj);
+ ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ DRM_WARN("Error %d pinning new fb, out of video mem?\n", ret);
return ret;
}
-static int vbox_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags,
- struct drm_modeset_acquire_ctx *ctx)
+static void vbox_primary_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
- struct vbox_private *vbox = crtc->dev->dev_private;
- struct drm_device *drm = vbox->dev;
- unsigned long flags;
- int rc;
+ struct vbox_bo *bo;
- rc = vbox_crtc_do_set_base(crtc, CRTC_FB(crtc), fb, 0, 0);
- if (rc)
- return rc;
+ if (!old_state->fb)
+ return;
- mutex_lock(&vbox->hw_mutex);
- vbox_set_view(crtc);
- vbox_do_modeset(crtc, &crtc->mode);
- mutex_unlock(&vbox->hw_mutex);
+ bo = gem_to_vbox_bo(to_vbox_framebuffer(old_state->fb)->obj);
+ vbox_bo_unpin(bo);
+}
- spin_lock_irqsave(&drm->event_lock, flags);
+static int vbox_cursor_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_crtc_state *crtc_state = NULL;
+ u32 width = new_state->crtc_w;
+ u32 height = new_state->crtc_h;
+ int ret;
- if (event)
- drm_crtc_send_vblank_event(crtc, event);
+ if (new_state->crtc) {
+ crtc_state = drm_atomic_get_existing_crtc_state(
+ new_state->state, new_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+ }
- spin_unlock_irqrestore(&drm->event_lock, flags);
+ ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+ if (ret)
+ return ret;
+
+ if (!new_state->fb)
+ return 0;
+
+ if (width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT ||
+ width == 0 || height == 0)
+ return -EINVAL;
return 0;
}
-static void vbox_crtc_disable(struct drm_crtc *crtc)
+/**
+ * Copy the ARGB image and generate the mask, which is needed in case the host
+ * does not support ARGB cursors. The mask is a 1BPP bitmap with the bit set
+ * if the corresponding alpha value in the ARGB image is greater than 0xF0.
+ */
+static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height,
+ size_t mask_size)
{
+ size_t line_size = (width + 7) / 8;
+ u32 i, j;
+
+ memcpy(dst + mask_size, src, width * height * 4);
+ for (i = 0; i < height; ++i)
+ for (j = 0; j < width; ++j)
+ if (((u32 *)src)[i * width + j] > 0xf0000000)
+ dst[i * line_size + j / 8] |= (0x80 >> (j % 8));
}
-static void vbox_crtc_prepare(struct drm_crtc *crtc)
+static void vbox_cursor_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
+ struct vbox_private *vbox =
+ container_of(plane->dev, struct vbox_private, ddev);
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(plane->state->crtc);
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct vbox_bo *bo = gem_to_vbox_bo(to_vbox_framebuffer(fb)->obj);
+ u32 width = plane->state->crtc_w;
+ u32 height = plane->state->crtc_h;
+ size_t data_size, mask_size;
+ u32 flags;
+ u8 *src;
+
+ /*
+ * VirtualBox uses the host windowing system to draw the cursor so
+ * moves are a no-op, we only need to upload new cursor sprites.
+ */
+ if (fb == old_state->fb)
+ return;
+
+ mutex_lock(&vbox->hw_mutex);
+
+ vbox_crtc->cursor_enabled = true;
+
+ /* pinning is done in prepare/cleanup framebuffer */
+ src = vbox_bo_kmap(bo);
+ if (IS_ERR(src)) {
+ mutex_unlock(&vbox->hw_mutex);
+ DRM_WARN("Could not kmap cursor bo, skipping update\n");
+ return;
+ }
+
+ /*
+ * The mask must be calculated based on the alpha
+ * channel, one bit per ARGB word, and must be 32-bit
+ * padded.
+ */
+ mask_size = ((width + 7) / 8 * height + 3) & ~3;
+ data_size = width * height * 4 + mask_size;
+
+ copy_cursor_image(src, vbox->cursor_data, width, height, mask_size);
+ vbox_bo_kunmap(bo);
+
+ flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
+ VBOX_MOUSE_POINTER_ALPHA;
+ hgsmi_update_pointer_shape(vbox->guest_pool, flags,
+ min_t(u32, max(fb->hot_x, 0), width),
+ min_t(u32, max(fb->hot_y, 0), height),
+ width, height, vbox->cursor_data, data_size);
+
+ mutex_unlock(&vbox->hw_mutex);
}
-static void vbox_crtc_commit(struct drm_crtc *crtc)
+static void vbox_cursor_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
-}
+ struct vbox_private *vbox =
+ container_of(plane->dev, struct vbox_private, ddev);
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(old_state->crtc);
+ bool cursor_enabled = false;
+ struct drm_crtc *crtci;
-static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
- .dpms = vbox_crtc_dpms,
- .mode_fixup = vbox_crtc_mode_fixup,
- .mode_set = vbox_crtc_mode_set,
- /* .mode_set_base = vbox_crtc_mode_set_base, */
- .disable = vbox_crtc_disable,
- .prepare = vbox_crtc_prepare,
- .commit = vbox_crtc_commit,
-};
+ mutex_lock(&vbox->hw_mutex);
-static void vbox_crtc_reset(struct drm_crtc *crtc)
+ vbox_crtc->cursor_enabled = false;
+
+ list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, head) {
+ if (to_vbox_crtc(crtci)->cursor_enabled)
+ cursor_enabled = true;
+ }
+
+ if (!cursor_enabled)
+ hgsmi_update_pointer_shape(vbox->guest_pool, 0, 0, 0,
+ 0, 0, NULL, 0);
+
+ mutex_unlock(&vbox->hw_mutex);
+}
+
+static int vbox_cursor_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
{
+ struct vbox_bo *bo;
+
+ if (!new_state->fb)
+ return 0;
+
+ bo = gem_to_vbox_bo(to_vbox_framebuffer(new_state->fb)->obj);
+ return vbox_bo_pin(bo, TTM_PL_FLAG_SYSTEM);
}
-static void vbox_crtc_destroy(struct drm_crtc *crtc)
+static void vbox_cursor_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
- drm_crtc_cleanup(crtc);
- kfree(crtc);
+ struct vbox_bo *bo;
+
+ if (!plane->state->fb)
+ return;
+
+ bo = gem_to_vbox_bo(to_vbox_framebuffer(plane->state->fb)->obj);
+ vbox_bo_unpin(bo);
}
-static const struct drm_crtc_funcs vbox_crtc_funcs = {
- .cursor_move = vbox_cursor_move,
- .cursor_set2 = vbox_cursor_set2,
- .reset = vbox_crtc_reset,
- .set_config = drm_crtc_helper_set_config,
- /* .gamma_set = vbox_crtc_gamma_set, */
- .page_flip = vbox_crtc_page_flip,
- .destroy = vbox_crtc_destroy,
+static const uint32_t vbox_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
};
+static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = {
+ .atomic_check = vbox_cursor_atomic_check,
+ .atomic_update = vbox_cursor_atomic_update,
+ .atomic_disable = vbox_cursor_atomic_disable,
+ .prepare_fb = vbox_cursor_prepare_fb,
+ .cleanup_fb = vbox_cursor_cleanup_fb,
+};
+
+static const struct drm_plane_funcs vbox_cursor_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_primary_helper_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static const uint32_t vbox_primary_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+};
+
+static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = {
+ .atomic_check = vbox_primary_atomic_check,
+ .atomic_update = vbox_primary_atomic_update,
+ .atomic_disable = vbox_primary_atomic_disable,
+ .prepare_fb = vbox_primary_prepare_fb,
+ .cleanup_fb = vbox_primary_cleanup_fb,
+};
+
+static const struct drm_plane_funcs vbox_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_primary_helper_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static struct drm_plane *vbox_create_plane(struct vbox_private *vbox,
+ unsigned int possible_crtcs,
+ enum drm_plane_type type)
+{
+ const struct drm_plane_helper_funcs *helper_funcs = NULL;
+ const struct drm_plane_funcs *funcs;
+ struct drm_plane *plane;
+ const uint32_t *formats;
+ int num_formats;
+ int err;
+
+ if (type == DRM_PLANE_TYPE_PRIMARY) {
+ funcs = &vbox_primary_plane_funcs;
+ formats = vbox_primary_plane_formats;
+ helper_funcs = &vbox_primary_helper_funcs;
+ num_formats = ARRAY_SIZE(vbox_primary_plane_formats);
+ } else if (type == DRM_PLANE_TYPE_CURSOR) {
+ funcs = &vbox_cursor_plane_funcs;
+ formats = vbox_cursor_plane_formats;
+ helper_funcs = &vbox_cursor_helper_funcs;
+ num_formats = ARRAY_SIZE(vbox_cursor_plane_formats);
+ } else {
+ return ERR_PTR(-EINVAL);
+ }
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ err = drm_universal_plane_init(&vbox->ddev, plane, possible_crtcs,
+ funcs, formats, num_formats,
+ NULL, type, NULL);
+ if (err)
+ goto free_plane;
+
+ drm_plane_helper_add(plane, helper_funcs);
+
+ return plane;
+
+free_plane:
+ kfree(plane);
+ return ERR_PTR(-EINVAL);
+}
+
static struct vbox_crtc *vbox_crtc_init(struct drm_device *dev, unsigned int i)
{
+ struct vbox_private *vbox =
+ container_of(dev, struct vbox_private, ddev);
+ struct drm_plane *cursor = NULL;
struct vbox_crtc *vbox_crtc;
+ struct drm_plane *primary;
+ u32 caps = 0;
+ int ret;
+
+ ret = hgsmi_query_conf(vbox->guest_pool,
+ VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps);
+ if (ret)
+ return ERR_PTR(ret);
vbox_crtc = kzalloc(sizeof(*vbox_crtc), GFP_KERNEL);
if (!vbox_crtc)
- return NULL;
+ return ERR_PTR(-ENOMEM);
+
+ primary = vbox_create_plane(vbox, 1 << i, DRM_PLANE_TYPE_PRIMARY);
+ if (IS_ERR(primary)) {
+ ret = PTR_ERR(primary);
+ goto free_mem;
+ }
+
+ if ((caps & VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE)) {
+ cursor = vbox_create_plane(vbox, 1 << i, DRM_PLANE_TYPE_CURSOR);
+ if (IS_ERR(cursor)) {
+ ret = PTR_ERR(cursor);
+ goto clean_primary;
+ }
+ } else {
+ DRM_WARN("VirtualBox host is too old, no cursor support\n");
+ }
vbox_crtc->crtc_id = i;
- drm_crtc_init(dev, &vbox_crtc->base, &vbox_crtc_funcs);
+ ret = drm_crtc_init_with_planes(dev, &vbox_crtc->base, primary, cursor,
+ &vbox_crtc_funcs, NULL);
+ if (ret)
+ goto clean_cursor;
+
drm_mode_crtc_set_gamma_size(&vbox_crtc->base, 256);
drm_crtc_helper_add(&vbox_crtc->base, &vbox_crtc_helper_funcs);
return vbox_crtc;
+
+clean_cursor:
+ if (cursor) {
+ drm_plane_cleanup(cursor);
+ kfree(cursor);
+ }
+clean_primary:
+ drm_plane_cleanup(primary);
+ kfree(primary);
+free_mem:
+ kfree(vbox_crtc);
+ return ERR_PTR(ret);
}
static void vbox_encoder_destroy(struct drm_encoder *encoder)
@@ -403,55 +652,10 @@ static void vbox_encoder_destroy(struct drm_encoder *encoder)
kfree(encoder);
}
-static struct drm_encoder *vbox_best_single_encoder(struct drm_connector
- *connector)
-{
- int enc_id = connector->encoder_ids[0];
-
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
-
- return NULL;
-}
-
static const struct drm_encoder_funcs vbox_enc_funcs = {
.destroy = vbox_encoder_destroy,
};
-static void vbox_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-}
-
-static bool vbox_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static void vbox_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-}
-
-static void vbox_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void vbox_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static const struct drm_encoder_helper_funcs vbox_enc_helper_funcs = {
- .dpms = vbox_encoder_dpms,
- .mode_fixup = vbox_mode_fixup,
- .prepare = vbox_encoder_prepare,
- .commit = vbox_encoder_commit,
- .mode_set = vbox_encoder_mode_set,
-};
-
static struct drm_encoder *vbox_encoder_init(struct drm_device *dev,
unsigned int i)
{
@@ -463,7 +667,6 @@ static struct drm_encoder *vbox_encoder_init(struct drm_device *dev,
drm_encoder_init(dev, &vbox_encoder->base, &vbox_enc_funcs,
DRM_MODE_ENCODER_DAC, NULL);
- drm_encoder_helper_add(&vbox_encoder->base, &vbox_enc_helper_funcs);
vbox_encoder->base.possible_crtcs = 1 << i;
return &vbox_encoder->base;
@@ -589,29 +792,23 @@ static int vbox_get_modes(struct drm_connector *connector)
if (vbox_connector->vbox_crtc->x_hint != -1)
drm_object_property_set_value(&connector->base,
- vbox->dev->mode_config.suggested_x_property,
+ vbox->ddev.mode_config.suggested_x_property,
vbox_connector->vbox_crtc->x_hint);
else
drm_object_property_set_value(&connector->base,
- vbox->dev->mode_config.suggested_x_property, 0);
+ vbox->ddev.mode_config.suggested_x_property, 0);
if (vbox_connector->vbox_crtc->y_hint != -1)
drm_object_property_set_value(&connector->base,
- vbox->dev->mode_config.suggested_y_property,
+ vbox->ddev.mode_config.suggested_y_property,
vbox_connector->vbox_crtc->y_hint);
else
drm_object_property_set_value(&connector->base,
- vbox->dev->mode_config.suggested_y_property, 0);
+ vbox->ddev.mode_config.suggested_y_property, 0);
return num_modes;
}
-static enum drm_mode_status vbox_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
static void vbox_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
@@ -648,16 +845,16 @@ static int vbox_fill_modes(struct drm_connector *connector, u32 max_x,
}
static const struct drm_connector_helper_funcs vbox_connector_helper_funcs = {
- .mode_valid = vbox_mode_valid,
.get_modes = vbox_get_modes,
- .best_encoder = vbox_best_single_encoder,
};
static const struct drm_connector_funcs vbox_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
.detect = vbox_connector_detect,
.fill_modes = vbox_fill_modes,
.destroy = vbox_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int vbox_connector_init(struct drm_device *dev,
@@ -686,225 +883,92 @@ static int vbox_connector_init(struct drm_device *dev,
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
- drm_connector_register(connector);
drm_connector_attach_encoder(connector, encoder);
return 0;
}
-int vbox_mode_init(struct drm_device *dev)
+static struct drm_framebuffer *vbox_user_framebuffer_create(
+ struct drm_device *dev,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct vbox_private *vbox = dev->dev_private;
- struct drm_encoder *encoder;
- struct vbox_crtc *vbox_crtc;
- unsigned int i;
- int ret;
+ struct vbox_private *vbox =
+ container_of(dev, struct vbox_private, ddev);
+ struct drm_gem_object *obj;
+ struct vbox_framebuffer *vbox_fb;
+ int ret = -ENOMEM;
- /* vbox_cursor_init(dev); */
- for (i = 0; i < vbox->num_crtcs; ++i) {
- vbox_crtc = vbox_crtc_init(dev, i);
- if (!vbox_crtc)
- return -ENOMEM;
- encoder = vbox_encoder_init(dev, i);
- if (!encoder)
- return -ENOMEM;
- ret = vbox_connector_init(dev, vbox_crtc, encoder);
- if (ret)
- return ret;
- }
+ obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
+ if (!obj)
+ return ERR_PTR(-ENOENT);
- return 0;
-}
+ vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
+ if (!vbox_fb)
+ goto err_unref_obj;
-void vbox_mode_fini(struct drm_device *dev)
-{
- /* vbox_cursor_fini(dev); */
-}
+ ret = vbox_framebuffer_init(vbox, vbox_fb, mode_cmd, obj);
+ if (ret)
+ goto err_free_vbox_fb;
-/**
- * Copy the ARGB image and generate the mask, which is needed in case the host
- * does not support ARGB cursors. The mask is a 1BPP bitmap with the bit set
- * if the corresponding alpha value in the ARGB image is greater than 0xF0.
- */
-static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height,
- size_t mask_size)
-{
- size_t line_size = (width + 7) / 8;
- u32 i, j;
+ return &vbox_fb->base;
- memcpy(dst + mask_size, src, width * height * 4);
- for (i = 0; i < height; ++i)
- for (j = 0; j < width; ++j)
- if (((u32 *)src)[i * width + j] > 0xf0000000)
- dst[i * line_size + j / 8] |= (0x80 >> (j % 8));
+err_free_vbox_fb:
+ kfree(vbox_fb);
+err_unref_obj:
+ drm_gem_object_put_unlocked(obj);
+ return ERR_PTR(ret);
}
-static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
- u32 handle, u32 width, u32 height,
- s32 hot_x, s32 hot_y)
+static const struct drm_mode_config_funcs vbox_mode_funcs = {
+ .fb_create = vbox_user_framebuffer_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+int vbox_mode_init(struct vbox_private *vbox)
{
- struct vbox_private *vbox = crtc->dev->dev_private;
- struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
- struct ttm_bo_kmap_obj uobj_map;
- size_t data_size, mask_size;
- struct drm_gem_object *obj;
- u32 flags, caps = 0;
- struct vbox_bo *bo;
- bool src_isiomem;
- u8 *dst = NULL;
- u8 *src;
+ struct drm_device *dev = &vbox->ddev;
+ struct drm_encoder *encoder;
+ struct vbox_crtc *vbox_crtc;
+ unsigned int i;
int ret;
- /*
- * Re-set this regularly as in 5.0.20 and earlier the information was
- * lost on save and restore.
- */
- hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
- vbox->input_mapping_width,
- vbox->input_mapping_height);
- if (!handle) {
- bool cursor_enabled = false;
- struct drm_crtc *crtci;
-
- /* Hide cursor. */
- vbox_crtc->cursor_enabled = false;
- list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
- head) {
- if (to_vbox_crtc(crtci)->cursor_enabled)
- cursor_enabled = true;
- }
-
- if (!cursor_enabled)
- hgsmi_update_pointer_shape(vbox->guest_pool, 0, 0, 0,
- 0, 0, NULL, 0);
- return 0;
- }
-
- vbox_crtc->cursor_enabled = true;
-
- if (width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT ||
- width == 0 || height == 0)
- return -EINVAL;
-
- ret = hgsmi_query_conf(vbox->guest_pool,
- VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps);
- if (ret)
- return ret;
-
- if (!(caps & VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE)) {
- /*
- * -EINVAL means cursor_set2() not supported, -EAGAIN means
- * retry at once.
- */
- return -EBUSY;
- }
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj) {
- DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
- return -ENOENT;
- }
+ drm_mode_config_init(dev);
- bo = gem_to_vbox_bo(obj);
- ret = vbox_bo_reserve(bo, false);
- if (ret)
- goto out_unref_obj;
+ dev->mode_config.funcs = (void *)&vbox_mode_funcs;
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
+ dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
- /*
- * The mask must be calculated based on the alpha
- * channel, one bit per ARGB word, and must be 32-bit
- * padded.
- */
- mask_size = ((width + 7) / 8 * height + 3) & ~3;
- data_size = width * height * 4 + mask_size;
- vbox->cursor_hot_x = min_t(u32, max(hot_x, 0), width);
- vbox->cursor_hot_y = min_t(u32, max(hot_y, 0), height);
- vbox->cursor_width = width;
- vbox->cursor_height = height;
- vbox->cursor_data_size = data_size;
- dst = vbox->cursor_data;
-
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
- if (ret) {
- vbox->cursor_data_size = 0;
- goto out_unreserve_bo;
- }
-
- src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
- if (src_isiomem) {
- DRM_ERROR("src cursor bo not in main memory\n");
- ret = -EIO;
- goto out_unmap_bo;
+ for (i = 0; i < vbox->num_crtcs; ++i) {
+ vbox_crtc = vbox_crtc_init(dev, i);
+ if (IS_ERR(vbox_crtc)) {
+ ret = PTR_ERR(vbox_crtc);
+ goto err_drm_mode_cleanup;
+ }
+ encoder = vbox_encoder_init(dev, i);
+ if (!encoder) {
+ ret = -ENOMEM;
+ goto err_drm_mode_cleanup;
+ }
+ ret = vbox_connector_init(dev, vbox_crtc, encoder);
+ if (ret)
+ goto err_drm_mode_cleanup;
}
- copy_cursor_image(src, dst, width, height, mask_size);
-
- flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
- VBOX_MOUSE_POINTER_ALPHA;
- ret = hgsmi_update_pointer_shape(vbox->guest_pool, flags,
- vbox->cursor_hot_x, vbox->cursor_hot_y,
- width, height, dst, data_size);
-out_unmap_bo:
- ttm_bo_kunmap(&uobj_map);
-out_unreserve_bo:
- vbox_bo_unreserve(bo);
-out_unref_obj:
- drm_gem_object_put_unlocked(obj);
+ drm_mode_config_reset(dev);
+ return 0;
+err_drm_mode_cleanup:
+ drm_mode_config_cleanup(dev);
return ret;
}
-static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y)
+void vbox_mode_fini(struct vbox_private *vbox)
{
- struct vbox_private *vbox = crtc->dev->dev_private;
- u32 flags = VBOX_MOUSE_POINTER_VISIBLE |
- VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA;
- s32 crtc_x =
- vbox->single_framebuffer ? crtc->x : to_vbox_crtc(crtc)->x_hint;
- s32 crtc_y =
- vbox->single_framebuffer ? crtc->y : to_vbox_crtc(crtc)->y_hint;
- u32 host_x, host_y;
- u32 hot_x = 0;
- u32 hot_y = 0;
- int ret;
-
- /*
- * We compare these to unsigned later and don't
- * need to handle negative.
- */
- if (x + crtc_x < 0 || y + crtc_y < 0 || vbox->cursor_data_size == 0)
- return 0;
-
- ret = hgsmi_cursor_position(vbox->guest_pool, true, x + crtc_x,
- y + crtc_y, &host_x, &host_y);
-
- /*
- * The only reason we have vbox_cursor_move() is that some older clients
- * might use DRM_IOCTL_MODE_CURSOR instead of DRM_IOCTL_MODE_CURSOR2 and
- * use DRM_MODE_CURSOR_MOVE to set the hot-spot.
- *
- * However VirtualBox 5.0.20 and earlier has a bug causing it to return
- * 0,0 as host cursor location after a save and restore.
- *
- * To work around this we ignore a 0, 0 return, since missing the odd
- * time when it legitimately happens is not going to hurt much.
- */
- if (ret || (host_x == 0 && host_y == 0))
- return ret;
-
- if (x + crtc_x < host_x)
- hot_x = min(host_x - x - crtc_x, vbox->cursor_width);
- if (y + crtc_y < host_y)
- hot_y = min(host_y - y - crtc_y, vbox->cursor_height);
-
- if (hot_x == vbox->cursor_hot_x && hot_y == vbox->cursor_hot_y)
- return 0;
-
- vbox->cursor_hot_x = hot_x;
- vbox->cursor_hot_y = hot_y;
-
- return hgsmi_update_pointer_shape(vbox->guest_pool, flags,
- hot_x, hot_y, vbox->cursor_width, vbox->cursor_height,
- vbox->cursor_data, vbox->cursor_data_size);
+ drm_mode_config_cleanup(&vbox->ddev);
}
diff --git a/drivers/staging/vboxvideo/vbox_ttm.c b/drivers/staging/vboxvideo/vbox_ttm.c
index 548edb7c494b..5ecfa7629173 100644
--- a/drivers/staging/vboxvideo/vbox_ttm.c
+++ b/drivers/staging/vboxvideo/vbox_ttm.c
@@ -169,7 +169,7 @@ static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
return 0;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
+ mem->bus.base = pci_resource_start(vbox->ddev.pdev, 0);
mem->bus.is_iomem = true;
break;
default:
@@ -224,7 +224,7 @@ static struct ttm_bo_driver vbox_bo_driver = {
int vbox_mm_init(struct vbox_private *vbox)
{
int ret;
- struct drm_device *dev = vbox->dev;
+ struct drm_device *dev = &vbox->ddev;
struct ttm_bo_device *bdev = &vbox->ttm.bdev;
ret = vbox_ttm_global_init(vbox);
@@ -269,8 +269,8 @@ void vbox_mm_fini(struct vbox_private *vbox)
{
#ifdef DRM_MTRR_WC
drm_mtrr_del(vbox->fb_mtrr,
- pci_resource_start(vbox->dev->pdev, 0),
- pci_resource_len(vbox->dev->pdev, 0), DRM_MTRR_WC);
+ pci_resource_start(vbox->ddev.pdev, 0),
+ pci_resource_len(vbox->ddev.pdev, 0), DRM_MTRR_WC);
#else
arch_phys_wc_del(vbox->fb_mtrr);
#endif
@@ -305,10 +305,9 @@ void vbox_ttm_placement(struct vbox_bo *bo, int domain)
}
}
-int vbox_bo_create(struct drm_device *dev, int size, int align,
+int vbox_bo_create(struct vbox_private *vbox, int size, int align,
u32 flags, struct vbox_bo **pvboxbo)
{
- struct vbox_private *vbox = dev->dev_private;
struct vbox_bo *vboxbo;
size_t acc_size;
int ret;
@@ -317,7 +316,7 @@ int vbox_bo_create(struct drm_device *dev, int size, int align,
if (!vboxbo)
return -ENOMEM;
- ret = drm_gem_object_init(dev, &vboxbo->gem, size);
+ ret = drm_gem_object_init(&vbox->ddev, &vboxbo->gem, size);
if (ret)
goto err_free_vboxbo;
@@ -344,39 +343,32 @@ err_free_vboxbo:
return ret;
}
-static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
-{
- return bo->bo.offset;
-}
-
-int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
+int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag)
{
struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (bo->pin_count) {
bo->pin_count++;
- if (gpu_addr)
- *gpu_addr = vbox_bo_gpu_offset(bo);
-
return 0;
}
+ ret = vbox_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
vbox_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
- if (ret)
- return ret;
-
- bo->pin_count = 1;
+ if (ret == 0)
+ bo->pin_count = 1;
- if (gpu_addr)
- *gpu_addr = vbox_bo_gpu_offset(bo);
+ vbox_bo_unreserve(bo);
- return 0;
+ return ret;
}
int vbox_bo_unpin(struct vbox_bo *bo)
@@ -392,14 +384,20 @@ int vbox_bo_unpin(struct vbox_bo *bo)
if (bo->pin_count)
return 0;
+ ret = vbox_bo_reserve(bo, false);
+ if (ret) {
+ DRM_ERROR("Error %d reserving bo, leaving it pinned\n", ret);
+ return ret;
+ }
+
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
- if (ret)
- return ret;
- return 0;
+ vbox_bo_unreserve(bo);
+
+ return ret;
}
/*
@@ -420,8 +418,10 @@ int vbox_bo_push_sysram(struct vbox_bo *bo)
if (bo->pin_count)
return 0;
- if (bo->kmap.virtual)
+ if (bo->kmap.virtual) {
ttm_bo_kunmap(&bo->kmap);
+ bo->kmap.virtual = NULL;
+ }
vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
@@ -450,3 +450,27 @@ int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
}
+
+void *vbox_bo_kmap(struct vbox_bo *bo)
+{
+ int ret;
+
+ if (bo->kmap.virtual)
+ return bo->kmap.virtual;
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("Error kmapping bo: %d\n", ret);
+ return NULL;
+ }
+
+ return bo->kmap.virtual;
+}
+
+void vbox_bo_kunmap(struct vbox_bo *bo)
+{
+ if (bo->kmap.virtual) {
+ ttm_bo_kunmap(&bo->kmap);
+ bo->kmap.virtual = NULL;
+ }
+}