summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/accel/qaic/Makefile1
-rw-r--r--drivers/accel/qaic/mhi_qaic_ctrl.c569
-rw-r--r--drivers/accel/qaic/mhi_qaic_ctrl.h12
-rw-r--r--drivers/accel/qaic/qaic_drv.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c174
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c7
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c17
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c21
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c10
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c30
20 files changed, 185 insertions, 745 deletions
diff --git a/drivers/accel/qaic/Makefile b/drivers/accel/qaic/Makefile
index d5f4952ae79a..2418418f7a50 100644
--- a/drivers/accel/qaic/Makefile
+++ b/drivers/accel/qaic/Makefile
@@ -7,7 +7,6 @@ obj-$(CONFIG_DRM_ACCEL_QAIC) := qaic.o
qaic-y := \
mhi_controller.o \
- mhi_qaic_ctrl.o \
qaic_control.o \
qaic_data.o \
qaic_drv.o
diff --git a/drivers/accel/qaic/mhi_qaic_ctrl.c b/drivers/accel/qaic/mhi_qaic_ctrl.c
deleted file mode 100644
index 0c7e571f1f12..000000000000
--- a/drivers/accel/qaic/mhi_qaic_ctrl.c
+++ /dev/null
@@ -1,569 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
-
-#include <linux/kernel.h>
-#include <linux/mhi.h>
-#include <linux/mod_devicetable.h>
-#include <linux/module.h>
-#include <linux/poll.h>
-#include <linux/xarray.h>
-#include <uapi/linux/eventpoll.h>
-
-#include "mhi_qaic_ctrl.h"
-#include "qaic.h"
-
-#define MHI_QAIC_CTRL_DRIVER_NAME "mhi_qaic_ctrl"
-#define MHI_QAIC_CTRL_MAX_MINORS 128
-#define MHI_MAX_MTU 0xffff
-static DEFINE_XARRAY_ALLOC(mqc_xa);
-static struct class *mqc_dev_class;
-static int mqc_dev_major;
-
-/**
- * struct mqc_buf - Buffer structure used to receive data from device
- * @data: Address of data to read from
- * @odata: Original address returned from *alloc() API. Used to free this buf.
- * @len: Length of data in byte
- * @node: This buffer will be part of list managed in struct mqc_dev
- */
-struct mqc_buf {
- void *data;
- void *odata;
- size_t len;
- struct list_head node;
-};
-
-/**
- * struct mqc_dev - MHI QAIC Control Device
- * @minor: MQC device node minor number
- * @mhi_dev: Associated mhi device object
- * @mtu: Max TRE buffer length
- * @enabled: Flag to track the state of the MQC device
- * @lock: Mutex lock to serialize access to open_count
- * @read_lock: Mutex lock to serialize readers
- * @write_lock: Mutex lock to serialize writers
- * @ul_wq: Wait queue for writers
- * @dl_wq: Wait queue for readers
- * @dl_queue_lock: Spin lock to serialize access to download queue
- * @dl_queue: Queue of downloaded buffers
- * @open_count: Track open counts
- * @ref_count: Reference count for this structure
- */
-struct mqc_dev {
- u32 minor;
- struct mhi_device *mhi_dev;
- size_t mtu;
- bool enabled;
- struct mutex lock;
- struct mutex read_lock;
- struct mutex write_lock;
- wait_queue_head_t ul_wq;
- wait_queue_head_t dl_wq;
- spinlock_t dl_queue_lock;
- struct list_head dl_queue;
- unsigned int open_count;
- struct kref ref_count;
-};
-
-static void mqc_dev_release(struct kref *ref)
-{
- struct mqc_dev *mqcdev = container_of(ref, struct mqc_dev, ref_count);
-
- mutex_destroy(&mqcdev->read_lock);
- mutex_destroy(&mqcdev->write_lock);
- mutex_destroy(&mqcdev->lock);
- kfree(mqcdev);
-}
-
-static int mhi_qaic_ctrl_fill_dl_queue(struct mqc_dev *mqcdev)
-{
- struct mhi_device *mhi_dev = mqcdev->mhi_dev;
- struct mqc_buf *ctrlbuf;
- int rx_budget;
- int ret = 0;
- void *data;
-
- rx_budget = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
- if (rx_budget < 0)
- return -EIO;
-
- while (rx_budget--) {
- data = kzalloc(mqcdev->mtu + sizeof(*ctrlbuf), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- ctrlbuf = data + mqcdev->mtu;
- ctrlbuf->odata = data;
-
- ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, data, mqcdev->mtu, MHI_EOT);
- if (ret) {
- kfree(data);
- dev_err(&mhi_dev->dev, "Failed to queue buffer\n");
- return ret;
- }
- }
-
- return ret;
-}
-
-static int mhi_qaic_ctrl_dev_start_chan(struct mqc_dev *mqcdev)
-{
- struct device *dev = &mqcdev->mhi_dev->dev;
- int ret = 0;
-
- ret = mutex_lock_interruptible(&mqcdev->lock);
- if (ret)
- return ret;
- if (!mqcdev->enabled) {
- ret = -ENODEV;
- goto release_dev_lock;
- }
- if (!mqcdev->open_count) {
- ret = mhi_prepare_for_transfer(mqcdev->mhi_dev);
- if (ret) {
- dev_err(dev, "Error starting transfer channels\n");
- goto release_dev_lock;
- }
-
- ret = mhi_qaic_ctrl_fill_dl_queue(mqcdev);
- if (ret) {
- dev_err(dev, "Error filling download queue.\n");
- goto mhi_unprepare;
- }
- }
- mqcdev->open_count++;
- mutex_unlock(&mqcdev->lock);
-
- return 0;
-
-mhi_unprepare:
- mhi_unprepare_from_transfer(mqcdev->mhi_dev);
-release_dev_lock:
- mutex_unlock(&mqcdev->lock);
- return ret;
-}
-
-static struct mqc_dev *mqc_dev_get_by_minor(unsigned int minor)
-{
- struct mqc_dev *mqcdev;
-
- xa_lock(&mqc_xa);
- mqcdev = xa_load(&mqc_xa, minor);
- if (mqcdev)
- kref_get(&mqcdev->ref_count);
- xa_unlock(&mqc_xa);
-
- return mqcdev;
-}
-
-static int mhi_qaic_ctrl_open(struct inode *inode, struct file *filp)
-{
- struct mqc_dev *mqcdev;
- int ret;
-
- mqcdev = mqc_dev_get_by_minor(iminor(inode));
- if (!mqcdev) {
- pr_debug("mqc: minor %d not found\n", iminor(inode));
- return -EINVAL;
- }
-
- ret = mhi_qaic_ctrl_dev_start_chan(mqcdev);
- if (ret) {
- kref_put(&mqcdev->ref_count, mqc_dev_release);
- return ret;
- }
-
- filp->private_data = mqcdev;
-
- return 0;
-}
-
-static void mhi_qaic_ctrl_buf_free(struct mqc_buf *ctrlbuf)
-{
- list_del(&ctrlbuf->node);
- kfree(ctrlbuf->odata);
-}
-
-static void __mhi_qaic_ctrl_release(struct mqc_dev *mqcdev)
-{
- struct mqc_buf *ctrlbuf, *tmp;
-
- mhi_unprepare_from_transfer(mqcdev->mhi_dev);
- wake_up_interruptible(&mqcdev->ul_wq);
- wake_up_interruptible(&mqcdev->dl_wq);
- /*
- * Free the dl_queue. As we have already unprepared mhi transfers, we
- * do not expect any callback functions that update dl_queue hence no need
- * to grab dl_queue lock.
- */
- mutex_lock(&mqcdev->read_lock);
- list_for_each_entry_safe(ctrlbuf, tmp, &mqcdev->dl_queue, node)
- mhi_qaic_ctrl_buf_free(ctrlbuf);
- mutex_unlock(&mqcdev->read_lock);
-}
-
-static int mhi_qaic_ctrl_release(struct inode *inode, struct file *file)
-{
- struct mqc_dev *mqcdev = file->private_data;
-
- mutex_lock(&mqcdev->lock);
- mqcdev->open_count--;
- if (!mqcdev->open_count && mqcdev->enabled)
- __mhi_qaic_ctrl_release(mqcdev);
- mutex_unlock(&mqcdev->lock);
-
- kref_put(&mqcdev->ref_count, mqc_dev_release);
-
- return 0;
-}
-
-static __poll_t mhi_qaic_ctrl_poll(struct file *file, poll_table *wait)
-{
- struct mqc_dev *mqcdev = file->private_data;
- struct mhi_device *mhi_dev;
- __poll_t mask = 0;
-
- mhi_dev = mqcdev->mhi_dev;
-
- poll_wait(file, &mqcdev->ul_wq, wait);
- poll_wait(file, &mqcdev->dl_wq, wait);
-
- mutex_lock(&mqcdev->lock);
- if (!mqcdev->enabled) {
- mutex_unlock(&mqcdev->lock);
- return EPOLLERR;
- }
-
- spin_lock_bh(&mqcdev->dl_queue_lock);
- if (!list_empty(&mqcdev->dl_queue))
- mask |= EPOLLIN | EPOLLRDNORM;
- spin_unlock_bh(&mqcdev->dl_queue_lock);
-
- if (mutex_lock_interruptible(&mqcdev->write_lock)) {
- mutex_unlock(&mqcdev->lock);
- return EPOLLERR;
- }
- if (mhi_get_free_desc_count(mhi_dev, DMA_TO_DEVICE) > 0)
- mask |= EPOLLOUT | EPOLLWRNORM;
- mutex_unlock(&mqcdev->write_lock);
- mutex_unlock(&mqcdev->lock);
-
- dev_dbg(&mhi_dev->dev, "Client attempted to poll, returning mask 0x%x\n", mask);
-
- return mask;
-}
-
-static int mhi_qaic_ctrl_tx(struct mqc_dev *mqcdev)
-{
- int ret;
-
- ret = wait_event_interruptible(mqcdev->ul_wq, !mqcdev->enabled ||
- mhi_get_free_desc_count(mqcdev->mhi_dev, DMA_TO_DEVICE) > 0);
-
- if (!mqcdev->enabled)
- return -ENODEV;
-
- return ret;
-}
-
-static ssize_t mhi_qaic_ctrl_write(struct file *file, const char __user *buf, size_t count,
- loff_t *offp)
-{
- struct mqc_dev *mqcdev = file->private_data;
- struct mhi_device *mhi_dev;
- size_t bytes_xfered = 0;
- struct device *dev;
- int ret, nr_desc;
-
- mhi_dev = mqcdev->mhi_dev;
- dev = &mhi_dev->dev;
-
- if (!mhi_dev->ul_chan)
- return -EOPNOTSUPP;
-
- if (!buf || !count)
- return -EINVAL;
-
- dev_dbg(dev, "Request to transfer %zu bytes\n", count);
-
- ret = mhi_qaic_ctrl_tx(mqcdev);
- if (ret)
- return ret;
-
- if (mutex_lock_interruptible(&mqcdev->write_lock))
- return -EINTR;
-
- nr_desc = mhi_get_free_desc_count(mhi_dev, DMA_TO_DEVICE);
- if (nr_desc * mqcdev->mtu < count) {
- ret = -EMSGSIZE;
- dev_dbg(dev, "Buffer too big to transfer\n");
- goto unlock_mutex;
- }
-
- while (count != bytes_xfered) {
- enum mhi_flags flags;
- size_t to_copy;
- void *kbuf;
-
- to_copy = min_t(size_t, count - bytes_xfered, mqcdev->mtu);
- kbuf = kmalloc(to_copy, GFP_KERNEL);
- if (!kbuf) {
- ret = -ENOMEM;
- goto unlock_mutex;
- }
-
- ret = copy_from_user(kbuf, buf + bytes_xfered, to_copy);
- if (ret) {
- kfree(kbuf);
- ret = -EFAULT;
- goto unlock_mutex;
- }
-
- if (bytes_xfered + to_copy == count)
- flags = MHI_EOT;
- else
- flags = MHI_CHAIN;
-
- ret = mhi_queue_buf(mhi_dev, DMA_TO_DEVICE, kbuf, to_copy, flags);
- if (ret) {
- kfree(kbuf);
- dev_err(dev, "Failed to queue buf of size %zu\n", to_copy);
- goto unlock_mutex;
- }
-
- bytes_xfered += to_copy;
- }
-
- mutex_unlock(&mqcdev->write_lock);
- dev_dbg(dev, "bytes xferred: %zu\n", bytes_xfered);
-
- return bytes_xfered;
-
-unlock_mutex:
- mutex_unlock(&mqcdev->write_lock);
- return ret;
-}
-
-static int mhi_qaic_ctrl_rx(struct mqc_dev *mqcdev)
-{
- int ret;
-
- ret = wait_event_interruptible(mqcdev->dl_wq,
- !mqcdev->enabled || !list_empty(&mqcdev->dl_queue));
-
- if (!mqcdev->enabled)
- return -ENODEV;
-
- return ret;
-}
-
-static ssize_t mhi_qaic_ctrl_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
-{
- struct mqc_dev *mqcdev = file->private_data;
- struct mqc_buf *ctrlbuf;
- size_t to_copy;
- int ret;
-
- if (!mqcdev->mhi_dev->dl_chan)
- return -EOPNOTSUPP;
-
- ret = mhi_qaic_ctrl_rx(mqcdev);
- if (ret)
- return ret;
-
- if (mutex_lock_interruptible(&mqcdev->read_lock))
- return -EINTR;
-
- ctrlbuf = list_first_entry_or_null(&mqcdev->dl_queue, struct mqc_buf, node);
- if (!ctrlbuf) {
- mutex_unlock(&mqcdev->read_lock);
- ret = -ENODEV;
- goto error_out;
- }
-
- to_copy = min_t(size_t, count, ctrlbuf->len);
- if (copy_to_user(buf, ctrlbuf->data, to_copy)) {
- mutex_unlock(&mqcdev->read_lock);
- dev_dbg(&mqcdev->mhi_dev->dev, "Failed to copy data to user buffer\n");
- ret = -EFAULT;
- goto error_out;
- }
-
- ctrlbuf->len -= to_copy;
- ctrlbuf->data += to_copy;
-
- if (!ctrlbuf->len) {
- spin_lock_bh(&mqcdev->dl_queue_lock);
- mhi_qaic_ctrl_buf_free(ctrlbuf);
- spin_unlock_bh(&mqcdev->dl_queue_lock);
- mhi_qaic_ctrl_fill_dl_queue(mqcdev);
- dev_dbg(&mqcdev->mhi_dev->dev, "Read buf freed\n");
- }
-
- mutex_unlock(&mqcdev->read_lock);
- return to_copy;
-
-error_out:
- mutex_unlock(&mqcdev->read_lock);
- return ret;
-}
-
-static const struct file_operations mhidev_fops = {
- .owner = THIS_MODULE,
- .open = mhi_qaic_ctrl_open,
- .release = mhi_qaic_ctrl_release,
- .read = mhi_qaic_ctrl_read,
- .write = mhi_qaic_ctrl_write,
- .poll = mhi_qaic_ctrl_poll,
-};
-
-static void mhi_qaic_ctrl_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
-{
- struct mqc_dev *mqcdev = dev_get_drvdata(&mhi_dev->dev);
-
- dev_dbg(&mhi_dev->dev, "%s: status: %d xfer_len: %zu\n", __func__,
- mhi_result->transaction_status, mhi_result->bytes_xferd);
-
- kfree(mhi_result->buf_addr);
-
- if (!mhi_result->transaction_status)
- wake_up_interruptible(&mqcdev->ul_wq);
-}
-
-static void mhi_qaic_ctrl_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
-{
- struct mqc_dev *mqcdev = dev_get_drvdata(&mhi_dev->dev);
- struct mqc_buf *ctrlbuf;
-
- dev_dbg(&mhi_dev->dev, "%s: status: %d receive_len: %zu\n", __func__,
- mhi_result->transaction_status, mhi_result->bytes_xferd);
-
- if (mhi_result->transaction_status &&
- mhi_result->transaction_status != -EOVERFLOW) {
- kfree(mhi_result->buf_addr);
- return;
- }
-
- ctrlbuf = mhi_result->buf_addr + mqcdev->mtu;
- ctrlbuf->data = mhi_result->buf_addr;
- ctrlbuf->len = mhi_result->bytes_xferd;
- spin_lock_bh(&mqcdev->dl_queue_lock);
- list_add_tail(&ctrlbuf->node, &mqcdev->dl_queue);
- spin_unlock_bh(&mqcdev->dl_queue_lock);
-
- wake_up_interruptible(&mqcdev->dl_wq);
-}
-
-static int mhi_qaic_ctrl_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
-{
- struct mqc_dev *mqcdev;
- struct device *dev;
- int ret;
-
- mqcdev = kzalloc(sizeof(*mqcdev), GFP_KERNEL);
- if (!mqcdev)
- return -ENOMEM;
-
- kref_init(&mqcdev->ref_count);
- mutex_init(&mqcdev->lock);
- mqcdev->mhi_dev = mhi_dev;
-
- ret = xa_alloc(&mqc_xa, &mqcdev->minor, mqcdev, XA_LIMIT(0, MHI_QAIC_CTRL_MAX_MINORS),
- GFP_KERNEL);
- if (ret) {
- kfree(mqcdev);
- return ret;
- }
-
- init_waitqueue_head(&mqcdev->ul_wq);
- init_waitqueue_head(&mqcdev->dl_wq);
- mutex_init(&mqcdev->read_lock);
- mutex_init(&mqcdev->write_lock);
- spin_lock_init(&mqcdev->dl_queue_lock);
- INIT_LIST_HEAD(&mqcdev->dl_queue);
- mqcdev->mtu = min_t(size_t, id->driver_data, MHI_MAX_MTU);
- mqcdev->enabled = true;
- mqcdev->open_count = 0;
- dev_set_drvdata(&mhi_dev->dev, mqcdev);
-
- dev = device_create(mqc_dev_class, &mhi_dev->dev, MKDEV(mqc_dev_major, mqcdev->minor),
- mqcdev, "%s", dev_name(&mhi_dev->dev));
- if (IS_ERR(dev)) {
- xa_erase(&mqc_xa, mqcdev->minor);
- dev_set_drvdata(&mhi_dev->dev, NULL);
- kfree(mqcdev);
- return PTR_ERR(dev);
- }
-
- return 0;
-};
-
-static void mhi_qaic_ctrl_remove(struct mhi_device *mhi_dev)
-{
- struct mqc_dev *mqcdev = dev_get_drvdata(&mhi_dev->dev);
-
- device_destroy(mqc_dev_class, MKDEV(mqc_dev_major, mqcdev->minor));
-
- mutex_lock(&mqcdev->lock);
- mqcdev->enabled = false;
- if (mqcdev->open_count)
- __mhi_qaic_ctrl_release(mqcdev);
- mutex_unlock(&mqcdev->lock);
-
- xa_erase(&mqc_xa, mqcdev->minor);
- kref_put(&mqcdev->ref_count, mqc_dev_release);
-}
-
-/* .driver_data stores max mtu */
-static const struct mhi_device_id mhi_qaic_ctrl_match_table[] = {
- { .chan = "QAIC_SAHARA", .driver_data = SZ_32K},
- {},
-};
-MODULE_DEVICE_TABLE(mhi, mhi_qaic_ctrl_match_table);
-
-static struct mhi_driver mhi_qaic_ctrl_driver = {
- .id_table = mhi_qaic_ctrl_match_table,
- .remove = mhi_qaic_ctrl_remove,
- .probe = mhi_qaic_ctrl_probe,
- .ul_xfer_cb = mhi_qaic_ctrl_ul_xfer_cb,
- .dl_xfer_cb = mhi_qaic_ctrl_dl_xfer_cb,
- .driver = {
- .name = MHI_QAIC_CTRL_DRIVER_NAME,
- },
-};
-
-int mhi_qaic_ctrl_init(void)
-{
- int ret;
-
- ret = register_chrdev(0, MHI_QAIC_CTRL_DRIVER_NAME, &mhidev_fops);
- if (ret < 0)
- return ret;
-
- mqc_dev_major = ret;
- mqc_dev_class = class_create(THIS_MODULE, MHI_QAIC_CTRL_DRIVER_NAME);
- if (IS_ERR(mqc_dev_class)) {
- ret = PTR_ERR(mqc_dev_class);
- goto unregister_chrdev;
- }
-
- ret = mhi_driver_register(&mhi_qaic_ctrl_driver);
- if (ret)
- goto destroy_class;
-
- return 0;
-
-destroy_class:
- class_destroy(mqc_dev_class);
-unregister_chrdev:
- unregister_chrdev(mqc_dev_major, MHI_QAIC_CTRL_DRIVER_NAME);
- return ret;
-}
-
-void mhi_qaic_ctrl_deinit(void)
-{
- mhi_driver_unregister(&mhi_qaic_ctrl_driver);
- class_destroy(mqc_dev_class);
- unregister_chrdev(mqc_dev_major, MHI_QAIC_CTRL_DRIVER_NAME);
- xa_destroy(&mqc_xa);
-}
diff --git a/drivers/accel/qaic/mhi_qaic_ctrl.h b/drivers/accel/qaic/mhi_qaic_ctrl.h
deleted file mode 100644
index 930b3ace1a59..000000000000
--- a/drivers/accel/qaic/mhi_qaic_ctrl.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only
- *
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
- */
-
-#ifndef __MHI_QAIC_CTRL_H__
-#define __MHI_QAIC_CTRL_H__
-
-int mhi_qaic_ctrl_init(void);
-void mhi_qaic_ctrl_deinit(void);
-
-#endif /* __MHI_QAIC_CTRL_H__ */
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 1106ad88a5b6..ff80eb571729 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -25,7 +25,6 @@
#include <uapi/drm/qaic_accel.h>
#include "mhi_controller.h"
-#include "mhi_qaic_ctrl.h"
#include "qaic.h"
MODULE_IMPORT_NS(DMA_BUF);
@@ -601,16 +600,8 @@ static int __init qaic_init(void)
goto free_mhi;
}
- ret = mhi_qaic_ctrl_init();
- if (ret) {
- pr_debug("qaic: mhi_qaic_ctrl_init failed %d\n", ret);
- goto free_pci;
- }
-
return 0;
-free_pci:
- pci_unregister_driver(&qaic_pci_driver);
free_mhi:
mhi_driver_unregister(&qaic_mhi_driver);
return ret;
@@ -634,7 +625,6 @@ static void __exit qaic_exit(void)
* reinitializing the link_up state after the cleanup is done.
*/
link_up = true;
- mhi_qaic_ctrl_deinit();
pci_unregister_driver(&qaic_pci_driver);
mhi_driver_unregister(&qaic_mhi_driver);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 16c539657f73..6b73fb7a83c3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -16,7 +16,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_ioctl.h>
@@ -108,7 +107,6 @@ static const struct drm_driver exynos_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM
| DRIVER_ATOMIC | DRIVER_RENDER,
.open = exynos_drm_open,
- .lastclose = drm_fb_helper_lastclose,
.postclose = exynos_drm_postclose,
.dumb_create = exynos_drm_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -288,19 +286,15 @@ static int exynos_drm_bind(struct device *dev)
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(drm);
- ret = exynos_drm_fbdev_init(drm);
- if (ret)
- goto err_cleanup_poll;
-
/* register the DRM device */
ret = drm_dev_register(drm, 0);
if (ret < 0)
- goto err_cleanup_fbdev;
+ goto err_cleanup_poll;
+
+ exynos_drm_fbdev_setup(drm);
return 0;
-err_cleanup_fbdev:
- exynos_drm_fbdev_fini(drm);
err_cleanup_poll:
drm_kms_helper_poll_fini(drm);
err_unbind_all:
@@ -321,7 +315,6 @@ static void exynos_drm_unbind(struct device *dev)
drm_dev_unregister(drm);
- exynos_drm_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
component_unbind_all(drm->dev, drm);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 6ae9056e7a18..81d501efd013 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -197,8 +197,6 @@ struct drm_exynos_file_private {
* @wait: wait an atomic commit to finish
*/
struct exynos_drm_private {
- struct drm_fb_helper *fb_helper;
-
struct device *g2d_dev;
struct device *dma_dev;
void *mapping;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 97f2dee2db29..fc1c5608db96 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -11,7 +11,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -157,7 +156,6 @@ static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 4929ffe5a09a..ea4b3d248aac 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -8,16 +8,12 @@
* Seung-Woo Kim <sw0312.kim@samsung.com>
*/
-#include <linux/console.h>
-#include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_prime.h>
-#include <drm/drm_probe_helper.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
@@ -27,22 +23,26 @@
#define MAX_CONNECTOR 4
#define PREFERRED_BPP 32
-#define to_exynos_fbdev(x) container_of(x, struct exynos_drm_fbdev,\
- drm_fb_helper)
+static int exynos_drm_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct drm_gem_object *obj = drm_gem_fb_get_obj(helper->fb, 0);
-struct exynos_drm_fbdev {
- struct drm_fb_helper drm_fb_helper;
- struct exynos_drm_gem *exynos_gem;
-};
+ return drm_gem_prime_mmap(obj, vma);
+}
-static int exynos_drm_fb_mmap(struct fb_info *info,
- struct vm_area_struct *vma)
+static void exynos_drm_fb_destroy(struct fb_info *info)
{
- struct drm_fb_helper *helper = info->par;
- struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
- struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+
+ drm_fb_helper_fini(fb_helper);
- return drm_gem_prime_mmap(&exynos_gem->base, vma);
+ drm_framebuffer_remove(fb);
+
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
}
static const struct fb_ops exynos_drm_fb_ops = {
@@ -54,6 +54,7 @@ static const struct fb_ops exynos_drm_fb_ops = {
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
+ .fb_destroy = exynos_drm_fb_destroy,
};
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
@@ -89,7 +90,6 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
- struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
struct exynos_drm_gem *exynos_gem;
struct drm_device *dev = helper->dev;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
@@ -113,8 +113,6 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
if (IS_ERR(exynos_gem))
return PTR_ERR(exynos_gem);
- exynos_fbdev->exynos_gem = exynos_gem;
-
helper->fb =
exynos_drm_framebuffer_init(dev, &mode_cmd, &exynos_gem, 1);
if (IS_ERR(helper->fb)) {
@@ -127,19 +125,13 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
if (ret < 0)
goto err_destroy_framebuffer;
- return ret;
+ return 0;
err_destroy_framebuffer:
drm_framebuffer_cleanup(helper->fb);
+ helper->fb = NULL;
err_destroy_gem:
exynos_drm_gem_destroy(exynos_gem);
-
- /*
- * if failed, all resources allocated above would be released by
- * drm_mode_config_cleanup() when drm_load() had been called prior
- * to any specific driver such as fimd or hdmi driver.
- */
-
return ret;
}
@@ -147,80 +139,92 @@ static const struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
.fb_probe = exynos_drm_fbdev_create,
};
-int exynos_drm_fbdev_init(struct drm_device *dev)
+/*
+ * struct drm_client
+ */
+
+static void exynos_drm_fbdev_client_unregister(struct drm_client_dev *client)
{
- struct exynos_drm_fbdev *fbdev;
- struct exynos_drm_private *private = dev->dev_private;
- struct drm_fb_helper *helper;
- int ret;
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ if (fb_helper->info) {
+ drm_fb_helper_unregister_info(fb_helper);
+ } else {
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+ }
+}
+
+static int exynos_drm_fbdev_client_restore(struct drm_client_dev *client)
+{
+ drm_fb_helper_lastclose(client->dev);
- if (!dev->mode_config.num_crtc)
- return 0;
+ return 0;
+}
- fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
- if (!fbdev)
- return -ENOMEM;
+static int exynos_drm_fbdev_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+ struct drm_device *dev = client->dev;
+ int ret;
- private->fb_helper = helper = &fbdev->drm_fb_helper;
+ if (dev->fb_helper)
+ return drm_fb_helper_hotplug_event(dev->fb_helper);
- drm_fb_helper_prepare(dev, helper, PREFERRED_BPP, &exynos_drm_fb_helper_funcs);
+ ret = drm_fb_helper_init(dev, fb_helper);
+ if (ret)
+ goto err_drm_err;
- ret = drm_fb_helper_init(dev, helper);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev,
- "failed to initialize drm fb helper.\n");
- goto err_init;
- }
+ if (!drm_drv_uses_atomic_modeset(dev))
+ drm_helper_disable_unused_functions(dev);
- ret = drm_fb_helper_initial_config(helper);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev,
- "failed to set up hw configuration.\n");
- goto err_setup;
- }
+ ret = drm_fb_helper_initial_config(fb_helper);
+ if (ret)
+ goto err_drm_fb_helper_fini;
return 0;
-err_setup:
- drm_fb_helper_fini(helper);
-err_init:
- drm_fb_helper_unprepare(helper);
- private->fb_helper = NULL;
- kfree(fbdev);
-
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_drm_err:
+ drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret);
return ret;
}
-static void exynos_drm_fbdev_destroy(struct drm_device *dev,
- struct drm_fb_helper *fb_helper)
+static const struct drm_client_funcs exynos_drm_fbdev_client_funcs = {
+ .owner = THIS_MODULE,
+ .unregister = exynos_drm_fbdev_client_unregister,
+ .restore = exynos_drm_fbdev_client_restore,
+ .hotplug = exynos_drm_fbdev_client_hotplug,
+};
+
+void exynos_drm_fbdev_setup(struct drm_device *dev)
{
- struct drm_framebuffer *fb;
+ struct drm_fb_helper *fb_helper;
+ int ret;
- /* release drm framebuffer and real buffer */
- if (fb_helper->fb && fb_helper->fb->funcs) {
- fb = fb_helper->fb;
- if (fb)
- drm_framebuffer_remove(fb);
- }
+ drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
+ drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
- drm_fb_helper_unregister_info(fb_helper);
+ fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
+ if (!fb_helper)
+ return;
+ drm_fb_helper_prepare(dev, fb_helper, PREFERRED_BPP, &exynos_drm_fb_helper_funcs);
- drm_fb_helper_fini(fb_helper);
-}
+ ret = drm_client_init(dev, &fb_helper->client, "fbdev", &exynos_drm_fbdev_client_funcs);
+ if (ret)
+ goto err_drm_client_init;
-void exynos_drm_fbdev_fini(struct drm_device *dev)
-{
- struct exynos_drm_private *private = dev->dev_private;
- struct exynos_drm_fbdev *fbdev;
+ ret = exynos_drm_fbdev_client_hotplug(&fb_helper->client);
+ if (ret)
+ drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
- if (!private || !private->fb_helper)
- return;
+ drm_client_register(&fb_helper->client);
- fbdev = to_exynos_fbdev(private->fb_helper);
+ return;
- exynos_drm_fbdev_destroy(dev, private->fb_helper);
- drm_fb_helper_unprepare(private->fb_helper);
- kfree(fbdev);
- private->fb_helper = NULL;
+err_drm_client_init:
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
}
-
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index 3b1e98e84580..1e1dea627cd9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -12,27 +12,11 @@
#define _EXYNOS_DRM_FBDEV_H_
#ifdef CONFIG_DRM_FBDEV_EMULATION
-
-int exynos_drm_fbdev_init(struct drm_device *dev);
-void exynos_drm_fbdev_fini(struct drm_device *dev);
-
+void exynos_drm_fbdev_setup(struct drm_device *dev);
#else
-
-static inline int exynos_drm_fbdev_init(struct drm_device *dev)
-{
- return 0;
-}
-
-static inline void exynos_drm_fbdev_fini(struct drm_device *dev)
+static inline void exynos_drm_fbdev_setup(struct drm_device *dev)
{
}
-
-static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
-{
-}
-
-#define exynos_drm_output_poll_changed (NULL)
-
#endif
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index d0bb3a52ae5c..3a7b98837516 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -3060,6 +3060,25 @@ void intel_ddi_update_pipe(struct intel_atomic_state *state,
intel_hdcp_update_pipe(state, encoder, crtc_state, conn_state);
}
+void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_crtc *slave_crtc;
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (!intel_phy_is_tc(i915, phy))
+ return;
+
+ intel_update_active_dpll(state, crtc, encoder);
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(crtc_state))
+ intel_update_active_dpll(state, slave_crtc, encoder);
+}
+
static void
intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
@@ -3074,15 +3093,9 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
if (is_tc_port) {
struct intel_crtc *master_crtc =
to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_crtc *slave_crtc;
intel_tc_port_get_link(dig_port, crtc_state->lane_count);
-
- intel_update_active_dpll(state, master_crtc, encoder);
-
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc,
- intel_crtc_bigjoiner_slave_pipes(crtc_state))
- intel_update_active_dpll(state, slave_crtc, encoder);
+ intel_ddi_update_active_dpll(state, encoder, master_crtc);
}
main_link_aux_power_domain_get(dig_port, crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index c85e74ae68e4..2bc034042a93 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -72,5 +72,8 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
int intel_ddi_level(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int lane);
+void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ struct intel_crtc *crtc);
#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 010ee793e1ff..3c29792137a5 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -959,7 +959,7 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
num_encoders++;
}
- drm_WARN(encoder->base.dev, num_encoders != 1,
+ drm_WARN(state->base.dev, num_encoders != 1,
"%d encoders for pipe %c\n",
num_encoders, pipe_name(master_crtc->pipe));
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index a88b852c437c..2c49d9ab86a2 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -674,6 +674,13 @@ static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
if (intel_dp->active_mst_links == 0)
dig_port->base.pre_pll_enable(state, &dig_port->base,
pipe_config, NULL);
+ else
+ /*
+ * The port PLL state needs to get updated for secondary
+ * streams as for the primary stream.
+ */
+ intel_ddi_update_active_dpll(state, &dig_port->base,
+ to_intel_crtc(pipe_config->uapi.crtc));
}
static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index 473d53610b92..0e7e014fcc71 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -111,6 +111,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
int min_src_w, min_src_h, min_dst_w, min_dst_h;
int max_src_w, max_src_h, max_dst_w, max_dst_h;
@@ -207,6 +209,21 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return -EINVAL;
}
+ /*
+ * The pipe scaler does not use all the bits of PIPESRC, at least
+ * on the earlier platforms. So even when we're scaling a plane
+ * the *pipe* source size must not be too large. For simplicity
+ * we assume the limits match the scaler source size limits. Might
+ * not be 100% accurate on all platforms, but good enough for now.
+ */
+ if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) {
+ drm_dbg_kms(&dev_priv->drm,
+ "scaler_user index %u.%u: pipe src size %ux%u "
+ "is out of scaler range\n",
+ crtc->pipe, scaler_user, pipe_src_w, pipe_src_h);
+ return -EINVAL;
+ }
+
/* mark this plane as a scaler user in crtc_state */
scaler_state->scaler_users |= (1 << scaler_user);
drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 264c952f777b..24765c30a0e1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -488,12 +488,25 @@ static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
}
}
-static int check_gsc_manifest(const struct firmware *fw,
+static int check_gsc_manifest(struct intel_gt *gt,
+ const struct firmware *fw,
struct intel_uc_fw *uc_fw)
{
u32 *dw = (u32 *)fw->data;
- u32 version_hi = dw[HUC_GSC_VERSION_HI_DW];
- u32 version_lo = dw[HUC_GSC_VERSION_LO_DW];
+ u32 version_hi, version_lo;
+ size_t min_size;
+
+ /* Check the size of the blob before examining buffer contents */
+ min_size = sizeof(u32) * (HUC_GSC_VERSION_LO_DW + 1);
+ if (unlikely(fw->size < min_size)) {
+ gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size, min_size);
+ return -ENODATA;
+ }
+
+ version_hi = dw[HUC_GSC_VERSION_HI_DW];
+ version_lo = dw[HUC_GSC_VERSION_LO_DW];
uc_fw->file_selected.ver.major = FIELD_GET(HUC_GSC_MAJOR_VER_HI_MASK, version_hi);
uc_fw->file_selected.ver.minor = FIELD_GET(HUC_GSC_MINOR_VER_HI_MASK, version_hi);
@@ -664,7 +677,7 @@ static int check_fw_header(struct intel_gt *gt,
return 0;
if (uc_fw->loaded_via_gsc)
- err = check_gsc_manifest(fw, uc_fw);
+ err = check_gsc_manifest(gt, fw, uc_fw);
else
err = check_ccs_header(gt, fw, uc_fw);
if (err)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 736a8dcf777b..c4197e31962e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6349,8 +6349,8 @@ enum skl_power_gate {
#define _PLANE_CSC_RY_GY_1(pipe) _PIPE(pipe, _PLANE_CSC_RY_GY_1_A, \
_PLANE_CSC_RY_GY_1_B)
-#define _PLANE_CSC_RY_GY_2(pipe) _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_2_A, \
- _PLANE_INPUT_CSC_RY_GY_2_B)
+#define _PLANE_CSC_RY_GY_2(pipe) _PIPE(pipe, _PLANE_CSC_RY_GY_2_A, \
+ _PLANE_CSC_RY_GY_2_B)
#define PLANE_CSC_COEFF(pipe, plane, index) _MMIO_PLANE(plane, \
_PLANE_CSC_RY_GY_1(pipe) + (index) * 4, \
_PLANE_CSC_RY_GY_2(pipe) + (index) * 4)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 5361ce70d3f2..154801f1c468 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -69,8 +69,10 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
rem = round_up(obj->base.size, BIT(31)) >> 31;
/* restricted by sg_alloc_table */
- if (overflows_type(rem, unsigned int))
+ if (overflows_type(rem, unsigned int)) {
+ kfree(pages);
return -E2BIG;
+ }
if (sg_alloc_table(pages, rem, GFP)) {
kfree(pages);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index abf752b36a52..8b108ac80b55 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -585,8 +585,12 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&nt->panel);
- if (ret)
+ if (ret) {
+ if (num_dsis == 2)
+ mipi_dsi_device_unregister(nt->dsi[1]);
+
return dev_err_probe(dev, ret, "Failed to get backlight\n");
+ }
drm_panel_add(&nt->panel);
@@ -602,6 +606,10 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
ret = mipi_dsi_attach(nt->dsi[i]);
if (ret < 0) {
+ /* If we fail to attach to either host, we're done */
+ if (num_dsis == 2)
+ mipi_dsi_device_unregister(nt->dsi[1]);
+
return dev_err_probe(dev, ret,
"Cannot attach to DSI%d host.\n", i);
}
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index b4729a94c34a..898b892f1143 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -471,7 +471,7 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
DRM_MODE_CONNECTOR_DSI);
ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
- dsi->host->dev, ctx,
+ dev, ctx,
&otm8009a_backlight_ops,
NULL);
if (IS_ERR(ctx->bl_dev)) {
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 18c342a919a2..dfce896c4bae 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -47,11 +47,6 @@
#include "ttm_module.h"
-#define TTM_MAX_ORDER (PMD_SHIFT - PAGE_SHIFT)
-#define __TTM_DIM_ORDER (TTM_MAX_ORDER + 1)
-/* Some architectures have a weird PMD_SHIFT */
-#define TTM_DIM_ORDER (__TTM_DIM_ORDER <= MAX_ORDER ? __TTM_DIM_ORDER : MAX_ORDER)
-
/**
* struct ttm_pool_dma - Helper object for coherent DMA mappings
*
@@ -70,11 +65,11 @@ module_param(page_pool_size, ulong, 0644);
static atomic_long_t allocated_pages;
-static struct ttm_pool_type global_write_combined[TTM_DIM_ORDER];
-static struct ttm_pool_type global_uncached[TTM_DIM_ORDER];
+static struct ttm_pool_type global_write_combined[MAX_ORDER];
+static struct ttm_pool_type global_uncached[MAX_ORDER];
-static struct ttm_pool_type global_dma32_write_combined[TTM_DIM_ORDER];
-static struct ttm_pool_type global_dma32_uncached[TTM_DIM_ORDER];
+static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
+static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
@@ -449,7 +444,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
else
gfp_flags |= GFP_HIGHUSER;
- for (order = min_t(unsigned int, TTM_MAX_ORDER, __fls(num_pages));
+ for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
num_pages;
order = min_t(unsigned int, order, __fls(num_pages))) {
struct ttm_pool_type *pt;
@@ -568,7 +563,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
if (use_dma_alloc) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < TTM_DIM_ORDER; ++j)
+ for (j = 0; j < MAX_ORDER; ++j)
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j);
}
@@ -588,7 +583,7 @@ void ttm_pool_fini(struct ttm_pool *pool)
if (pool->use_dma_alloc) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < TTM_DIM_ORDER; ++j)
+ for (j = 0; j < MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
}
@@ -642,7 +637,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m)
unsigned int i;
seq_puts(m, "\t ");
- for (i = 0; i < TTM_DIM_ORDER; ++i)
+ for (i = 0; i < MAX_ORDER; ++i)
seq_printf(m, " ---%2u---", i);
seq_puts(m, "\n");
}
@@ -653,7 +648,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
{
unsigned int i;
- for (i = 0; i < TTM_DIM_ORDER; ++i)
+ for (i = 0; i < MAX_ORDER; ++i)
seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
seq_puts(m, "\n");
}
@@ -756,16 +751,13 @@ int ttm_pool_mgr_init(unsigned long num_pages)
{
unsigned int i;
- BUILD_BUG_ON(TTM_DIM_ORDER > MAX_ORDER);
- BUILD_BUG_ON(TTM_DIM_ORDER < 1);
-
if (!page_pool_size)
page_pool_size = num_pages;
spin_lock_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list);
- for (i = 0; i < TTM_DIM_ORDER; ++i) {
+ for (i = 0; i < MAX_ORDER; ++i) {
ttm_pool_type_init(&global_write_combined[i], NULL,
ttm_write_combined, i);
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
@@ -798,7 +790,7 @@ void ttm_pool_mgr_fini(void)
{
unsigned int i;
- for (i = 0; i < TTM_DIM_ORDER; ++i) {
+ for (i = 0; i < MAX_ORDER; ++i) {
ttm_pool_type_fini(&global_write_combined[i]);
ttm_pool_type_fini(&global_uncached[i]);