summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml13
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml2
-rw-r--r--drivers/dma-buf/dma-buf.c24
-rw-r--r--drivers/dma-buf/dma-resv.c403
-rw-r--r--drivers/dma-buf/st-dma-resv.c111
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c359
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h89
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_plane.c10
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c5
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c14
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c96
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c15
-rw-r--r--drivers/gpu/drm/dp/drm_dp.c33
-rw-r--r--drivers/gpu/drm/drm_buddy.c3
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c12
-rw-r--r--drivers/gpu/drm/drm_edid.c358
-rw-r--r--drivers/gpu/drm/drm_gem.c3
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_modes.c4
-rw-r--r--drivers/gpu/drm/drm_vblank.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c42
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c4
-rw-r--r--drivers/gpu/drm/i915/i915_deps.c2
-rw-r--r--drivers/gpu/drm/i915/i915_request.c3
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c17
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c3
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig9
-rw-r--r--drivers/gpu/drm/ingenic/Makefile1
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c28
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-dw-hdmi.c103
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base917c.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c2
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c31
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c12
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c3
-rw-r--r--drivers/gpu/drm/solomon/Kconfig9
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-i2c.c11
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c73
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.h2
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c22
-rw-r--r--drivers/gpu/drm/stm/ltdc.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c37
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c27
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c34
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c8
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c26
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c39
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c8
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c13
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c5
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c3
-rw-r--r--drivers/video/fbdev/core/fbcon.c708
-rw-r--r--drivers/video/fbdev/core/fbcon.h8
-rw-r--r--drivers/video/fbdev/core/fbmem.c27
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c2
-rw-r--r--include/drm/dp/drm_dp_helper.h1
-rw-r--r--include/drm/ttm/ttm_bo_api.h2
-rw-r--r--include/drm/ttm/ttm_bo_driver.h11
-rw-r--r--include/drm/ttm/ttm_resource.h3
-rw-r--r--include/linux/dma-buf.h24
-rw-r--r--include/linux/dma-resv.h180
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/seqlock.h8
-rw-r--r--kernel/futex/futex.h1
121 files changed, 2076 insertions, 1557 deletions
diff --git a/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml b/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml
index 9baafd0c42dd..ade61d502edd 100644
--- a/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml
+++ b/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml
@@ -13,6 +13,7 @@ maintainers:
properties:
compatible:
enum:
+ - sinowealth,sh1106-i2c
- solomon,ssd1305fb-i2c
- solomon,ssd1306fb-i2c
- solomon,ssd1307fb-i2c
@@ -135,6 +136,18 @@ allOf:
properties:
compatible:
contains:
+ const: sinowealth,sh1106-i2c
+ then:
+ properties:
+ solomon,dclk-div:
+ default: 1
+ solomon,dclk-frq:
+ default: 5
+
+ - if:
+ properties:
+ compatible:
+ contains:
const: solomon,ssd1305fb-i2c
then:
properties:
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 01430973ecec..79b72e370ade 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -1130,6 +1130,8 @@ patternProperties:
description: Sinlinx Electronics Technology Co., LTD
"^sinovoip,.*":
description: SinoVoip Co., Ltd
+ "^sinowealth,.*":
+ description: SINO WEALTH Electronic Ltd.
"^sipeed,.*":
description: Shenzhen Sipeed Technology Co., Ltd.
"^sirf,.*":
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 775d3afb4169..79795857be3e 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -216,7 +216,8 @@ static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
struct dma_fence *fence;
int r;
- dma_resv_for_each_fence(&cursor, resv, write, fence) {
+ dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
+ fence) {
dma_fence_get(fence);
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
if (!r)
@@ -660,12 +661,24 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table;
+ signed long ret;
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ if (IS_ERR_OR_NULL(sg_table))
+ return sg_table;
+
+ if (!dma_buf_attachment_is_dynamic(attach)) {
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0) {
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
+ direction);
+ return ERR_PTR(ret);
+ }
+ }
- if (!IS_ERR_OR_NULL(sg_table))
- mangle_sg_table(sg_table);
-
+ mangle_sg_table(sg_table);
return sg_table;
}
@@ -1124,7 +1137,8 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
long ret;
/* Wait on any implicit rendering fences */
- ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
+ ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
+ true, MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 8c650b96357a..0cce6e4ec946 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -44,12 +44,12 @@
/**
* DOC: Reservation Object Overview
*
- * The reservation object provides a mechanism to manage shared and
- * exclusive fences associated with a buffer. A reservation object
- * can have attached one exclusive fence (normally associated with
- * write operations) or N shared fences (read operations). The RCU
- * mechanism is used to protect read access to fences from locked
- * write-side updates.
+ * The reservation object provides a mechanism to manage a container of
+ * dma_fence object associated with a resource. A reservation object
+ * can have any number of fences attaches to it. Each fence carries an usage
+ * parameter determining how the operation represented by the fence is using the
+ * resource. The RCU mechanism is used to protect read access to fences from
+ * locked write-side updates.
*
* See struct dma_resv for more details.
*/
@@ -57,39 +57,59 @@
DEFINE_WD_CLASS(reservation_ww_class);
EXPORT_SYMBOL(reservation_ww_class);
+/* Mask for the lower fence pointer bits */
+#define DMA_RESV_LIST_MASK 0x3
+
struct dma_resv_list {
struct rcu_head rcu;
- u32 shared_count, shared_max;
- struct dma_fence __rcu *shared[];
+ u32 num_fences, max_fences;
+ struct dma_fence __rcu *table[];
};
-/**
- * dma_resv_list_alloc - allocate fence list
- * @shared_max: number of fences we need space for
- *
+/* Extract the fence and usage flags from an RCU protected entry in the list. */
+static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index,
+ struct dma_resv *resv, struct dma_fence **fence,
+ enum dma_resv_usage *usage)
+{
+ long tmp;
+
+ tmp = (long)rcu_dereference_check(list->table[index],
+ resv ? dma_resv_held(resv) : true);
+ *fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK);
+ if (usage)
+ *usage = tmp & DMA_RESV_LIST_MASK;
+}
+
+/* Set the fence and usage flags at the specific index in the list. */
+static void dma_resv_list_set(struct dma_resv_list *list,
+ unsigned int index,
+ struct dma_fence *fence,
+ enum dma_resv_usage usage)
+{
+ long tmp = ((long)fence) | usage;
+
+ RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp);
+}
+
+/*
* Allocate a new dma_resv_list and make sure to correctly initialize
- * shared_max.
+ * max_fences.
*/
-static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
+static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences)
{
struct dma_resv_list *list;
- list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
+ list = kmalloc(struct_size(list, table, max_fences), GFP_KERNEL);
if (!list)
return NULL;
- list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
- sizeof(*list->shared);
+ list->max_fences = (ksize(list) - offsetof(typeof(*list), table)) /
+ sizeof(*list->table);
return list;
}
-/**
- * dma_resv_list_free - free fence list
- * @list: list to free
- *
- * Free a dma_resv_list and make sure to drop all references.
- */
+/* Free a dma_resv_list and make sure to drop all references. */
static void dma_resv_list_free(struct dma_resv_list *list)
{
unsigned int i;
@@ -97,9 +117,12 @@ static void dma_resv_list_free(struct dma_resv_list *list)
if (!list)
return;
- for (i = 0; i < list->shared_count; ++i)
- dma_fence_put(rcu_dereference_protected(list->shared[i], true));
+ for (i = 0; i < list->num_fences; ++i) {
+ struct dma_fence *fence;
+ dma_resv_list_entry(list, i, NULL, &fence, NULL);
+ dma_fence_put(fence);
+ }
kfree_rcu(list, rcu);
}
@@ -110,10 +133,8 @@ static void dma_resv_list_free(struct dma_resv_list *list)
void dma_resv_init(struct dma_resv *obj)
{
ww_mutex_init(&obj->lock, &reservation_ww_class);
- seqcount_ww_mutex_init(&obj->seq, &obj->lock);
- RCU_INIT_POINTER(obj->fence, NULL);
- RCU_INIT_POINTER(obj->fence_excl, NULL);
+ RCU_INIT_POINTER(obj->fences, NULL);
}
EXPORT_SYMBOL(dma_resv_init);
@@ -123,46 +144,32 @@ EXPORT_SYMBOL(dma_resv_init);
*/
void dma_resv_fini(struct dma_resv *obj)
{
- struct dma_resv_list *fobj;
- struct dma_fence *excl;
-
/*
* This object should be dead and all references must have
* been released to it, so no need to be protected with rcu.
*/
- excl = rcu_dereference_protected(obj->fence_excl, 1);
- if (excl)
- dma_fence_put(excl);
-
- fobj = rcu_dereference_protected(obj->fence, 1);
- dma_resv_list_free(fobj);
+ dma_resv_list_free(rcu_dereference_protected(obj->fences, true));
ww_mutex_destroy(&obj->lock);
}
EXPORT_SYMBOL(dma_resv_fini);
-static inline struct dma_fence *
-dma_resv_excl_fence(struct dma_resv *obj)
+/* Dereference the fences while ensuring RCU rules */
+static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj)
{
- return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
-}
-
-static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
-{
- return rcu_dereference_check(obj->fence, dma_resv_held(obj));
+ return rcu_dereference_check(obj->fences, dma_resv_held(obj));
}
/**
- * dma_resv_reserve_fences - Reserve space to add shared fences to
- * a dma_resv.
+ * dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object.
* @obj: reservation object
* @num_fences: number of fences we want to add
*
- * Should be called before dma_resv_add_shared_fence(). Must
- * be called with @obj locked through dma_resv_lock().
+ * Should be called before dma_resv_add_fence(). Must be called with @obj
+ * locked through dma_resv_lock().
*
* Note that the preallocated slots need to be re-reserved if @obj is unlocked
- * at any time before calling dma_resv_add_shared_fence(). This is validated
- * when CONFIG_DEBUG_MUTEXES is enabled.
+ * at any time before calling dma_resv_add_fence(). This is validated when
+ * CONFIG_DEBUG_MUTEXES is enabled.
*
* RETURNS
* Zero for success, or -errno
@@ -174,11 +181,11 @@ int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
dma_resv_assert_held(obj);
- old = dma_resv_shared_list(obj);
- if (old && old->shared_max) {
- if ((old->shared_count + num_fences) <= old->shared_max)
+ old = dma_resv_fences_list(obj);
+ if (old && old->max_fences) {
+ if ((old->num_fences + num_fences) <= old->max_fences)
return 0;
- max = max(old->shared_count + num_fences, old->shared_max * 2);
+ max = max(old->num_fences + num_fences, old->max_fences * 2);
} else {
max = max(4ul, roundup_pow_of_two(num_fences));
}
@@ -193,27 +200,27 @@ int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
* references from the old struct are carried over to
* the new.
*/
- for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
+ for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) {
+ enum dma_resv_usage usage;
struct dma_fence *fence;
- fence = rcu_dereference_protected(old->shared[i],
- dma_resv_held(obj));
+ dma_resv_list_entry(old, i, obj, &fence, &usage);
if (dma_fence_is_signaled(fence))
- RCU_INIT_POINTER(new->shared[--k], fence);
+ RCU_INIT_POINTER(new->table[--k], fence);
else
- RCU_INIT_POINTER(new->shared[j++], fence);
+ dma_resv_list_set(new, j++, fence, usage);
}
- new->shared_count = j;
+ new->num_fences = j;
/*
* We are not changing the effective set of fences here so can
* merely update the pointer to the new array; both existing
* readers and new readers will see exactly the same set of
- * active (unsignaled) shared fences. Individual fences and the
+ * active (unsignaled) fences. Individual fences and the
* old array are protected by RCU and so will not vanish under
* the gaze of the rcu_read_lock() readers.
*/
- rcu_assign_pointer(obj->fence, new);
+ rcu_assign_pointer(obj->fences, new);
if (!old)
return 0;
@@ -222,7 +229,7 @@ int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
for (i = k; i < max; ++i) {
struct dma_fence *fence;
- fence = rcu_dereference_protected(new->shared[i],
+ fence = rcu_dereference_protected(new->table[i],
dma_resv_held(obj));
dma_fence_put(fence);
}
@@ -234,37 +241,39 @@ EXPORT_SYMBOL(dma_resv_reserve_fences);
#ifdef CONFIG_DEBUG_MUTEXES
/**
- * dma_resv_reset_shared_max - reset shared fences for debugging
+ * dma_resv_reset_max_fences - reset fences for debugging
* @obj: the dma_resv object to reset
*
- * Reset the number of pre-reserved shared slots to test that drivers do
+ * Reset the number of pre-reserved fence slots to test that drivers do
* correct slot allocation using dma_resv_reserve_fences(). See also
- * &dma_resv_list.shared_max.
+ * &dma_resv_list.max_fences.
*/
-void dma_resv_reset_shared_max(struct dma_resv *obj)
+void dma_resv_reset_max_fences(struct dma_resv *obj)
{
- struct dma_resv_list *fences = dma_resv_shared_list(obj);
+ struct dma_resv_list *fences = dma_resv_fences_list(obj);
dma_resv_assert_held(obj);
- /* Test shared fence slot reservation */
+ /* Test fence slot reservation */
if (fences)
- fences->shared_max = fences->shared_count;
+ fences->max_fences = fences->num_fences;
}
-EXPORT_SYMBOL(dma_resv_reset_shared_max);
+EXPORT_SYMBOL(dma_resv_reset_max_fences);
#endif
/**
- * dma_resv_add_shared_fence - Add a fence to a shared slot
+ * dma_resv_add_fence - Add a fence to the dma_resv obj
* @obj: the reservation object
- * @fence: the shared fence to add
+ * @fence: the fence to add
+ * @usage: how the fence is used, see enum dma_resv_usage
*
- * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
+ * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and
* dma_resv_reserve_fences() has been called.
*
* See also &dma_resv.fence for a discussion of the semantics.
*/
-void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
+void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+ enum dma_resv_usage usage)
{
struct dma_resv_list *fobj;
struct dma_fence *old;
@@ -279,39 +288,36 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
*/
WARN_ON(dma_fence_is_container(fence));
- fobj = dma_resv_shared_list(obj);
- count = fobj->shared_count;
-
- write_seqcount_begin(&obj->seq);
+ fobj = dma_resv_fences_list(obj);
+ count = fobj->num_fences;
for (i = 0; i < count; ++i) {
-
- old = rcu_dereference_protected(fobj->shared[i],
- dma_resv_held(obj));
- if (old->context == fence->context ||
- dma_fence_is_signaled(old))
- goto replace;
+ enum dma_resv_usage old_usage;
+
+ dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
+ if ((old->context == fence->context && old_usage >= usage) ||
+ dma_fence_is_signaled(old)) {
+ dma_resv_list_set(fobj, i, fence, usage);
+ dma_fence_put(old);
+ return;
+ }
}
- BUG_ON(fobj->shared_count >= fobj->shared_max);
- old = NULL;
+ BUG_ON(fobj->num_fences >= fobj->max_fences);
count++;
-replace:
- RCU_INIT_POINTER(fobj->shared[i], fence);
- /* pointer update must be visible before we extend the shared_count */
- smp_store_mb(fobj->shared_count, count);
-
- write_seqcount_end(&obj->seq);
- dma_fence_put(old);
+ dma_resv_list_set(fobj, i, fence, usage);
+ /* pointer update must be visible before we extend the num_fences */
+ smp_store_mb(fobj->num_fences, count);
}
-EXPORT_SYMBOL(dma_resv_add_shared_fence);
+EXPORT_SYMBOL(dma_resv_add_fence);
/**
* dma_resv_replace_fences - replace fences in the dma_resv obj
* @obj: the reservation object
* @context: the context of the fences to replace
* @replacement: the new fence to use instead
+ * @usage: how the new fence is used, see enum dma_resv_usage
*
* Replace fences with a specified context with a new fence. Only valid if the
* operation represented by the original fence has no longer access to the
@@ -321,107 +327,66 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
* update fence which makes the resource inaccessible.
*/
void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
- struct dma_fence *replacement)
+ struct dma_fence *replacement,
+ enum dma_resv_usage usage)
{
struct dma_resv_list *list;
- struct dma_fence *old;
unsigned int i;
dma_resv_assert_held(obj);
- write_seqcount_begin(&obj->seq);
+ list = dma_resv_fences_list(obj);
+ for (i = 0; list && i < list->num_fences; ++i) {
+ struct dma_fence *old;
- old = dma_resv_excl_fence(obj);
- if (old->context == context) {
- RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement));
- dma_fence_put(old);
- }
-
- list = dma_resv_shared_list(obj);
- for (i = 0; list && i < list->shared_count; ++i) {
- old = rcu_dereference_protected(list->shared[i],
- dma_resv_held(obj));
+ dma_resv_list_entry(list, i, obj, &old, NULL);
if (old->context != context)
continue;
- rcu_assign_pointer(list->shared[i], dma_fence_get(replacement));
+ dma_resv_list_set(list, i, replacement, usage);
dma_fence_put(old);
}
-
- write_seqcount_end(&obj->seq);
}
EXPORT_SYMBOL(dma_resv_replace_fences);
-/**
- * dma_resv_add_excl_fence - Add an exclusive fence.
- * @obj: the reservation object
- * @fence: the exclusive fence to add
- *
- * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
- * See also &dma_resv.fence_excl for a discussion of the semantics.
- */
-void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
-{
- struct dma_fence *old_fence = dma_resv_excl_fence(obj);
-
- dma_resv_assert_held(obj);
-
- dma_fence_get(fence);
-
- write_seqcount_begin(&obj->seq);
- /* write_seqcount_begin provides the necessary memory barrier */
- RCU_INIT_POINTER(obj->fence_excl, fence);
- write_seqcount_end(&obj->seq);
-
- dma_fence_put(old_fence);
-}
-EXPORT_SYMBOL(dma_resv_add_excl_fence);
-
-/* Restart the iterator by initializing all the necessary fields, but not the
- * relation to the dma_resv object. */
+/* Restart the unlocked iteration by initializing the cursor object. */
static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
{
- cursor->seq = read_seqcount_begin(&cursor->obj->seq);
- cursor->index = -1;
- cursor->shared_count = 0;
- if (cursor->all_fences) {
- cursor->fences = dma_resv_shared_list(cursor->obj);
- if (cursor->fences)
- cursor->shared_count = cursor->fences->shared_count;
- } else {
- cursor->fences = NULL;
- }
+ cursor->index = 0;
+ cursor->num_fences = 0;
+ cursor->fences = dma_resv_fences_list(cursor->obj);
+ if (cursor->fences)
+ cursor->num_fences = cursor->fences->num_fences;
cursor->is_restarted = true;
}
/* Walk to the next not signaled fence and grab a reference to it */
static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
{
- struct dma_resv *obj = cursor->obj;
+ if (!cursor->fences)
+ return;
do {
/* Drop the reference from the previous round */
dma_fence_put(cursor->fence);
- if (cursor->index == -1) {
- cursor->fence = dma_resv_excl_fence(obj);
- cursor->index++;
- if (!cursor->fence)
- continue;
-
- } else if (!cursor->fences ||
- cursor->index >= cursor->shared_count) {
+ if (cursor->index >= cursor->num_fences) {
cursor->fence = NULL;
break;
- } else {
- struct dma_resv_list *fences = cursor->fences;
- unsigned int idx = cursor->index++;
-
- cursor->fence = rcu_dereference(fences->shared[idx]);
}
+
+ dma_resv_list_entry(cursor->fences, cursor->index++,
+ cursor->obj, &cursor->fence,
+ &cursor->fence_usage);
cursor->fence = dma_fence_get_rcu(cursor->fence);
- if (!cursor->fence || !dma_fence_is_signaled(cursor->fence))
+ if (!cursor->fence) {
+ dma_resv_iter_restart_unlocked(cursor);
+ continue;
+ }
+
+ if (!dma_fence_is_signaled(cursor->fence) &&
+ cursor->usage >= cursor->fence_usage)
break;
} while (true);
}
@@ -444,7 +409,7 @@ struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
do {
dma_resv_iter_restart_unlocked(cursor);
dma_resv_iter_walk_unlocked(cursor);
- } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
+ } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
rcu_read_unlock();
return cursor->fence;
@@ -467,13 +432,13 @@ struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
rcu_read_lock();
cursor->is_restarted = false;
- restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq);
+ restart = dma_resv_fences_list(cursor->obj) != cursor->fences;
do {
if (restart)
dma_resv_iter_restart_unlocked(cursor);
dma_resv_iter_walk_unlocked(cursor);
restart = true;
- } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
+ } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
rcu_read_unlock();
return cursor->fence;
@@ -496,15 +461,9 @@ struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
dma_resv_assert_held(cursor->obj);
cursor->index = 0;
- if (cursor->all_fences)
- cursor->fences = dma_resv_shared_list(cursor->obj);
- else
- cursor->fences = NULL;
-
- fence = dma_resv_excl_fence(cursor->obj);
- if (!fence)
- fence = dma_resv_iter_next(cursor);
+ cursor->fences = dma_resv_fences_list(cursor->obj);
+ fence = dma_resv_iter_next(cursor);
cursor->is_restarted = true;
return fence;
}
@@ -519,17 +478,22 @@ EXPORT_SYMBOL_GPL(dma_resv_iter_first);
*/
struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
{
- unsigned int idx;
+ struct dma_fence *fence;
dma_resv_assert_held(cursor->obj);
cursor->is_restarted = false;
- if (!cursor->fences || cursor->index >= cursor->fences->shared_count)
- return NULL;
- idx = cursor->index++;
- return rcu_dereference_protected(cursor->fences->shared[idx],
- dma_resv_held(cursor->obj));
+ do {
+ if (!cursor->fences ||
+ cursor->index >= cursor->fences->num_fences)
+ return NULL;
+
+ dma_resv_list_entry(cursor->fences, cursor->index++,
+ cursor->obj, &fence, &cursor->fence_usage);
+ } while (cursor->fence_usage > cursor->usage);
+
+ return fence;
}
EXPORT_SYMBOL_GPL(dma_resv_iter_next);
@@ -544,60 +508,43 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
{
struct dma_resv_iter cursor;
struct dma_resv_list *list;
- struct dma_fence *f, *excl;
+ struct dma_fence *f;
dma_resv_assert_held(dst);
list = NULL;
- excl = NULL;
- dma_resv_iter_begin(&cursor, src, true);
+ dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP);
dma_resv_for_each_fence_unlocked(&cursor, f) {
if (dma_resv_iter_is_restarted(&cursor)) {
dma_resv_list_free(list);
- dma_fence_put(excl);
-
- if (cursor.shared_count) {
- list = dma_resv_list_alloc(cursor.shared_count);
- if (!list) {
- dma_resv_iter_end(&cursor);
- return -ENOMEM;
- }
- list->shared_count = 0;
-
- } else {
- list = NULL;
+ list = dma_resv_list_alloc(cursor.num_fences);
+ if (!list) {
+ dma_resv_iter_end(&cursor);
+ return -ENOMEM;
}
- excl = NULL;
+ list->num_fences = 0;
}
dma_fence_get(f);
- if (dma_resv_iter_is_exclusive(&cursor))
- excl = f;
- else
- RCU_INIT_POINTER(list->shared[list->shared_count++], f);
+ dma_resv_list_set(list, list->num_fences++, f,
+ dma_resv_iter_usage(&cursor));
}
dma_resv_iter_end(&cursor);
- write_seqcount_begin(&dst->seq);
- excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
- list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
- write_seqcount_end(&dst->seq);
-
+ list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst));
dma_resv_list_free(list);
- dma_fence_put(excl);
-
return 0;
}
EXPORT_SYMBOL(dma_resv_copy_fences);
/**
- * dma_resv_get_fences - Get an object's shared and exclusive
+ * dma_resv_get_fences - Get an object's fences
* fences without update side lock held
* @obj: the reservation object
- * @write: true if we should return all fences
+ * @usage: controls which fences to include, see enum dma_resv_usage.
* @num_fences: the number of fences returned
* @fences: the array of fence ptrs returned (array is krealloc'd to the
* required size, and must be freed by caller)
@@ -605,7 +552,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
* Retrieve all fences from the reservation object.
* Returns either zero or -ENOMEM.
*/
-int dma_resv_get_fences(struct dma_resv *obj, bool write,
+int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
unsigned int *num_fences, struct dma_fence ***fences)
{
struct dma_resv_iter cursor;
@@ -614,7 +561,7 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
*num_fences = 0;
*fences = NULL;
- dma_resv_iter_begin(&cursor, obj, write);
+ dma_resv_iter_begin(&cursor, obj, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (dma_resv_iter_is_restarted(&cursor)) {
@@ -623,7 +570,7 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
while (*num_fences)
dma_fence_put((*fences)[--(*num_fences)]);
- count = cursor.shared_count + 1;
+ count = cursor.num_fences + 1;
/* Eventually re-allocate the array */
*fences = krealloc_array(*fences, count,
@@ -646,7 +593,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences);
/**
* dma_resv_get_singleton - Get a single fence for all the fences
* @obj: the reservation object
- * @write: true if we should return all fences
+ * @usage: controls which fences to include, see enum dma_resv_usage.
* @fence: the resulting fence
*
* Get a single fence representing all the fences inside the resv object.
@@ -658,7 +605,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences);
*
* Returns 0 on success and negative error values on failure.
*/
-int dma_resv_get_singleton(struct dma_resv *obj, bool write,
+int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
struct dma_fence **fence)
{
struct dma_fence_array *array;
@@ -666,7 +613,7 @@ int dma_resv_get_singleton(struct dma_resv *obj, bool write,
unsigned count;
int r;
- r = dma_resv_get_fences(obj, write, &count, &fences);
+ r = dma_resv_get_fences(obj, usage, &count, &fences);
if (r)
return r;
@@ -697,10 +644,9 @@ int dma_resv_get_singleton(struct dma_resv *obj, bool write,
EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
/**
- * dma_resv_wait_timeout - Wait on reservation's objects
- * shared and/or exclusive fences.
+ * dma_resv_wait_timeout - Wait on reservation's objects fences
* @obj: the reservation object
- * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @usage: controls which fences to include, see enum dma_resv_usage.
* @intr: if true, do interruptible wait
* @timeout: timeout value in jiffies or zero to return immediately
*
@@ -710,14 +656,14 @@ EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
* greater than zer on success.
*/
-long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
- unsigned long timeout)
+long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
+ bool intr, unsigned long timeout)
{
long ret = timeout ? timeout : 1;
struct dma_resv_iter cursor;
struct dma_fence *fence;
- dma_resv_iter_begin(&cursor, obj, wait_all);
+ dma_resv_iter_begin(&cursor, obj, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
ret = dma_fence_wait_timeout(fence, intr, ret);
@@ -737,8 +683,7 @@ EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
* dma_resv_test_signaled - Test if a reservation object's fences have been
* signaled.
* @obj: the reservation object
- * @test_all: if true, test all fences, otherwise only test the exclusive
- * fence
+ * @usage: controls which fences to include, see enum dma_resv_usage.
*
* Callers are not required to hold specific locks, but maybe hold
* dma_resv_lock() already.
@@ -747,12 +692,12 @@ EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
*
* True if all fences signaled, else false.
*/
-bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
+bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage)
{
struct dma_resv_iter cursor;
struct dma_fence *fence;
- dma_resv_iter_begin(&cursor, obj, test_all);
+ dma_resv_iter_begin(&cursor, obj, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
dma_resv_iter_end(&cursor);
return false;
@@ -772,13 +717,13 @@ EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
*/
void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
{
+ static const char *usage[] = { "kernel", "write", "read", "bookkeep" };
struct dma_resv_iter cursor;
struct dma_fence *fence;
- dma_resv_for_each_fence(&cursor, obj, true, fence) {
+ dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
seq_printf(seq, "\t%s fence:",
- dma_resv_iter_is_exclusive(&cursor) ?
- "Exclusive" : "Shared");
+ usage[dma_resv_iter_usage(&cursor)]);
dma_fence_describe(fence, seq);
}
}
diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
index d2e61f6ae989..813779e3c9be 100644
--- a/drivers/dma-buf/st-dma-resv.c
+++ b/drivers/dma-buf/st-dma-resv.c
@@ -58,8 +58,9 @@ static int sanitycheck(void *arg)
return r;
}
-static int test_signaling(void *arg, bool shared)
+static int test_signaling(void *arg)
{
+ enum dma_resv_usage usage = (unsigned long)arg;
struct dma_resv resv;
struct dma_fence *f;
int r;
@@ -81,18 +82,14 @@ static int test_signaling(void *arg, bool shared)
goto err_unlock;
}
- if (shared)
- dma_resv_add_shared_fence(&resv, f);
- else
- dma_resv_add_excl_fence(&resv, f);
-
- if (dma_resv_test_signaled(&resv, shared)) {
+ dma_resv_add_fence(&resv, f, usage);
+ if (dma_resv_test_signaled(&resv, usage)) {
pr_err("Resv unexpectedly signaled\n");
r = -EINVAL;
goto err_unlock;
}
dma_fence_signal(f);
- if (!dma_resv_test_signaled(&resv, shared)) {
+ if (!dma_resv_test_signaled(&resv, usage)) {
pr_err("Resv not reporting signaled\n");
r = -EINVAL;
goto err_unlock;
@@ -105,18 +102,9 @@ err_free:
return r;
}
-static int test_excl_signaling(void *arg)
-{
- return test_signaling(arg, false);
-}
-
-static int test_shared_signaling(void *arg)
-{
- return test_signaling(arg, true);
-}
-
-static int test_for_each(void *arg, bool shared)
+static int test_for_each(void *arg)
{
+ enum dma_resv_usage usage = (unsigned long)arg;
struct dma_resv_iter cursor;
struct dma_fence *f, *fence;
struct dma_resv resv;
@@ -139,13 +127,10 @@ static int test_for_each(void *arg, bool shared)
goto err_unlock;
}
- if (shared)
- dma_resv_add_shared_fence(&resv, f);
- else
- dma_resv_add_excl_fence(&resv, f);
+ dma_resv_add_fence(&resv, f, usage);
r = -ENOENT;
- dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
+ dma_resv_for_each_fence(&cursor, &resv, usage, fence) {
if (!r) {
pr_err("More than one fence found\n");
r = -EINVAL;
@@ -156,7 +141,7 @@ static int test_for_each(void *arg, bool shared)
r = -EINVAL;
goto err_unlock;
}
- if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
+ if (dma_resv_iter_usage(&cursor) != usage) {
pr_err("Unexpected fence usage\n");
r = -EINVAL;
goto err_unlock;
@@ -176,18 +161,9 @@ err_free:
return r;
}
-static int test_excl_for_each(void *arg)
-{
- return test_for_each(arg, false);
-}
-
-static int test_shared_for_each(void *arg)
-{
- return test_for_each(arg, true);
-}
-
-static int test_for_each_unlocked(void *arg, bool shared)
+static int test_for_each_unlocked(void *arg)
{
+ enum dma_resv_usage usage = (unsigned long)arg;
struct dma_resv_iter cursor;
struct dma_fence *f, *fence;
struct dma_resv resv;
@@ -211,14 +187,11 @@ static int test_for_each_unlocked(void *arg, bool shared)
goto err_free;
}
- if (shared)
- dma_resv_add_shared_fence(&resv, f);
- else
- dma_resv_add_excl_fence(&resv, f);
+ dma_resv_add_fence(&resv, f, usage);
dma_resv_unlock(&resv);
r = -ENOENT;
- dma_resv_iter_begin(&cursor, &resv, shared);
+ dma_resv_iter_begin(&cursor, &resv, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (!r) {
pr_err("More than one fence found\n");
@@ -234,7 +207,7 @@ static int test_for_each_unlocked(void *arg, bool shared)
r = -EINVAL;
goto err_iter_end;
}
- if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
+ if (dma_resv_iter_usage(&cursor) != usage) {
pr_err("Unexpected fence usage\n");
r = -EINVAL;
goto err_iter_end;
@@ -244,7 +217,7 @@ static int test_for_each_unlocked(void *arg, bool shared)
if (r == -ENOENT) {
r = -EINVAL;
/* That should trigger an restart */
- cursor.seq--;
+ cursor.fences = (void*)~0;
} else if (r == -EINVAL) {
r = 0;
}
@@ -260,18 +233,9 @@ err_free:
return r;
}
-static int test_excl_for_each_unlocked(void *arg)
-{
- return test_for_each_unlocked(arg, false);
-}
-
-static int test_shared_for_each_unlocked(void *arg)
-{
- return test_for_each_unlocked(arg, true);
-}
-
-static int test_get_fences(void *arg, bool shared)
+static int test_get_fences(void *arg)
{
+ enum dma_resv_usage usage = (unsigned long)arg;
struct dma_fence *f, **fences = NULL;
struct dma_resv resv;
int r, i;
@@ -294,13 +258,10 @@ static int test_get_fences(void *arg, bool shared)
goto err_resv;
}
- if (shared)
- dma_resv_add_shared_fence(&resv, f);
- else
- dma_resv_add_excl_fence(&resv, f);
+ dma_resv_add_fence(&resv, f, usage);
dma_resv_unlock(&resv);
- r = dma_resv_get_fences(&resv, shared, &i, &fences);
+ r = dma_resv_get_fences(&resv, usage, &i, &fences);
if (r) {
pr_err("get_fences failed\n");
goto err_free;
@@ -322,30 +283,24 @@ err_resv:
return r;
}
-static int test_excl_get_fences(void *arg)
-{
- return test_get_fences(arg, false);
-}
-
-static int test_shared_get_fences(void *arg)
-{
- return test_get_fences(arg, true);
-}
-
int dma_resv(void)
{
static const struct subtest tests[] = {
SUBTEST(sanitycheck),
- SUBTEST(test_excl_signaling),
- SUBTEST(test_shared_signaling),
- SUBTEST(test_excl_for_each),
- SUBTEST(test_shared_for_each),
- SUBTEST(test_excl_for_each_unlocked),
- SUBTEST(test_shared_for_each_unlocked),
- SUBTEST(test_excl_get_fences),
- SUBTEST(test_shared_get_fences),
+ SUBTEST(test_signaling),
+ SUBTEST(test_for_each),
+ SUBTEST(test_for_each_unlocked),
+ SUBTEST(test_get_fences),
};
+ enum dma_resv_usage usage;
+ int r;
spin_lock_init(&fence_lock);
- return subtests(tests, NULL);
+ for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_BOOKKEEP;
+ ++usage) {
+ r = subtests(tests, (void *)(unsigned long)usage);
+ if (r)
+ return r;
+ }
+ return 0;
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f1422bee3dcc..5133c3f028ab 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -280,6 +280,7 @@ config DRM_AMDGPU
select HWMON
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
+ select DRM_BUDDY
help
Choose this option if you have a recent AMD Radeon graphics card.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 98b1736bb221..3dc5ab2764ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -263,7 +263,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
*/
replacement = dma_fence_get_stub();
dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
- replacement);
+ replacement, DMA_RESV_USAGE_READ);
dma_fence_put(replacement);
return 0;
}
@@ -2447,6 +2447,8 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
struct amdgpu_bo *bo = mem->bo;
uint32_t domain = mem->domain;
struct kfd_mem_attachment *attachment;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
total_size += amdgpu_bo_size(bo);
@@ -2461,10 +2463,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
goto validate_map_fail;
}
}
- ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
- if (ret) {
- pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
- goto validate_map_fail;
+ dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
+ DMA_RESV_USAGE_KERNEL, fence) {
+ ret = amdgpu_sync_fence(&sync_obj, fence);
+ if (ret) {
+ pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
+ goto validate_map_fail;
+ }
}
list_for_each_entry(attachment, &mem->attachments, list) {
if (!attachment->is_mapped)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index 044b41f0bfd9..529d52a204cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -34,7 +34,6 @@ struct amdgpu_fpriv;
struct amdgpu_bo_list_entry {
struct ttm_validate_buffer tv;
struct amdgpu_bo_va *bo_va;
- struct dma_fence_chain *chain;
uint32_t priority;
struct page **user_pages;
bool user_invalidated;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e85e347eb670..8de283997769 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -55,8 +55,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &bo->tbo;
- /* One for TTM and one for the CS job */
- p->uf_entry.tv.num_shared = 2;
+ /* One for TTM and two for the CS job */
+ p->uf_entry.tv.num_shared = 3;
drm_gem_object_put(gobj);
@@ -574,14 +574,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
e->bo_va = amdgpu_vm_bo_find(vm, bo);
-
- if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) {
- e->chain = dma_fence_chain_alloc();
- if (!e->chain) {
- r = -ENOMEM;
- goto error_validate;
- }
- }
}
/* Move fence waiting after getting reservation lock of
@@ -642,13 +634,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
error_validate:
- if (r) {
- amdgpu_bo_list_for_each_entry(e, p->bo_list) {
- dma_fence_chain_free(e->chain);
- e->chain = NULL;
- }
+ if (r)
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
- }
out:
return r;
}
@@ -688,17 +675,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
{
unsigned i;
- if (error && backoff) {
- struct amdgpu_bo_list_entry *e;
-
- amdgpu_bo_list_for_each_entry(e, parser->bo_list) {
- dma_fence_chain_free(e->chain);
- e->chain = NULL;
- }
-
+ if (error && backoff)
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
- }
for (i = 0; i < parser->num_post_deps; i++) {
drm_syncobj_put(parser->post_deps[i].syncobj);
@@ -1272,29 +1251,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
- amdgpu_bo_list_for_each_entry(e, p->bo_list) {
- struct dma_resv *resv = e->tv.bo->base.resv;
- struct dma_fence_chain *chain = e->chain;
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
-
- if (!chain)
- continue;
-
- /*
- * Temporary workaround dma_resv shortcommings by wrapping up
- * the submission in a dma_fence_chain and add it as exclusive
- * fence.
- *
- * TODO: Remove together with dma_resv rework.
- */
- dma_resv_for_each_fence(&cursor, resv, false, fence) {
- break;
- }
- dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);
- rcu_assign_pointer(resv->fence_excl, &chain->base);
- e->chain = NULL;
- }
+ /* Make sure all BOs are remembered as writers */
+ amdgpu_bo_list_for_each_entry(e, p->bo_list)
+ e->tv.num_shared = 0;
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
mutex_unlock(&p->adev->notifier_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index fae5c1debfad..7a6908d71820 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -200,8 +200,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto unpin;
}
- /* TODO: Unify this with other drivers */
- r = dma_resv_get_fences(new_abo->tbo.base.resv, true,
+ r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 579adfafe4d0..782cbca37538 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -102,21 +102,9 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- int r;
/* pin buffer into GTT */
- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
- if (r)
- return r;
-
- if (bo->tbo.moving) {
- r = dma_fence_wait(bo->tbo.moving, true);
- if (r) {
- amdgpu_bo_unpin(bo);
- return r;
- }
- }
- return 0;
+ return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 57b74d35052f..84a53758e18e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -526,7 +526,8 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
- ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout);
+ ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
+ true, timeout);
/* ret == 0 means not signaled,
* ret > 0 means signaled
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 81207737c716..4ba4b54092f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -111,7 +111,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
struct dma_fence *fence;
int r;
- r = dma_resv_get_singleton(resv, true, &fence);
+ r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
if (r)
goto fallback;
@@ -139,7 +139,8 @@ fallback:
/* Not enough memory for the delayed delete, as last resort
* block for all the fences to complete.
*/
- dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT);
+ dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
amdgpu_pasid_free(pasid);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 4b153daf283d..b86c0b8252a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
mmu_interval_set_seq(mni, cur_seq);
- r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
mutex_unlock(&adev->notifier_lock);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index dd6693fff280..e92ecabfa7bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -612,9 +612,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (unlikely(r))
goto fail_unreserve;
- amdgpu_bo_fence(bo, fence, false);
- dma_fence_put(bo->tbo.moving);
- bo->tbo.moving = dma_fence_get(fence);
+ dma_resv_add_fence(bo->tbo.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
dma_fence_put(fence);
}
if (!bp->resv)
@@ -761,6 +760,11 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
return -EPERM;
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0)
+ return r;
+
kptr = amdgpu_bo_kptr(bo);
if (kptr) {
if (ptr)
@@ -768,11 +772,6 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0;
}
- r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
- MAX_SCHEDULE_TIMEOUT);
- if (r < 0)
- return r;
-
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
if (r)
return r;
@@ -1399,10 +1398,8 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
return;
}
- if (shared)
- dma_resv_add_shared_fence(resv, fence);
- else
- dma_resv_add_excl_fence(resv, fence);
+ dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
+ DMA_RESV_USAGE_WRITE);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index acfa207cf970..6546552e596c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -30,12 +30,15 @@
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_range_manager.h>
+#include "amdgpu_vram_mgr.h"
+
/* state back for walking over vram_mgr and gtt_mgr allocations */
struct amdgpu_res_cursor {
uint64_t start;
uint64_t size;
uint64_t remaining;
- struct drm_mm_node *node;
+ void *node;
+ uint32_t mem_type;
};
/**
@@ -52,27 +55,63 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
uint64_t start, uint64_t size,
struct amdgpu_res_cursor *cur)
{
+ struct drm_buddy_block *block;
+ struct list_head *head, *next;
struct drm_mm_node *node;
- if (!res || res->mem_type == TTM_PL_SYSTEM) {
- cur->start = start;
- cur->size = size;
- cur->remaining = size;
- cur->node = NULL;
- WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
- return;
- }
+ if (!res)
+ goto fallback;
BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
- node = to_ttm_range_mgr_node(res)->mm_nodes;
- while (start >= node->size << PAGE_SHIFT)
- start -= node++->size << PAGE_SHIFT;
+ cur->mem_type = res->mem_type;
+
+ switch (cur->mem_type) {
+ case TTM_PL_VRAM:
+ head = &to_amdgpu_vram_mgr_resource(res)->blocks;
+
+ block = list_first_entry_or_null(head,
+ struct drm_buddy_block,
+ link);
+ if (!block)
+ goto fallback;
+
+ while (start >= amdgpu_vram_mgr_block_size(block)) {
+ start -= amdgpu_vram_mgr_block_size(block);
+
+ next = block->link.next;
+ if (next != head)
+ block = list_entry(next, struct drm_buddy_block, link);
+ }
+
+ cur->start = amdgpu_vram_mgr_block_start(block) + start;
+ cur->size = min(amdgpu_vram_mgr_block_size(block) - start, size);
+ cur->remaining = size;
+ cur->node = block;
+ break;
+ case TTM_PL_TT:
+ node = to_ttm_range_mgr_node(res)->mm_nodes;
+ while (start >= node->size << PAGE_SHIFT)
+ start -= node++->size << PAGE_SHIFT;
+
+ cur->start = (node->start << PAGE_SHIFT) + start;
+ cur->size = min((node->size << PAGE_SHIFT) - start, size);
+ cur->remaining = size;
+ cur->node = node;
+ break;
+ default:
+ goto fallback;
+ }
- cur->start = (node->start << PAGE_SHIFT) + start;
- cur->size = min((node->size << PAGE_SHIFT) - start, size);
+ return;
+
+fallback:
+ cur->start = start;
+ cur->size = size;
cur->remaining = size;
- cur->node = node;
+ cur->node = NULL;
+ WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
+ return;
}
/**
@@ -85,7 +124,9 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
*/
static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
{
- struct drm_mm_node *node = cur->node;
+ struct drm_buddy_block *block;
+ struct drm_mm_node *node;
+ struct list_head *next;
BUG_ON(size > cur->remaining);
@@ -99,9 +140,27 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
return;
}
- cur->node = ++node;
- cur->start = node->start << PAGE_SHIFT;
- cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
+ switch (cur->mem_type) {
+ case TTM_PL_VRAM:
+ block = cur->node;
+
+ next = block->link.next;
+ block = list_entry(next, struct drm_buddy_block, link);
+
+ cur->node = block;
+ cur->start = amdgpu_vram_mgr_block_start(block);
+ cur->size = min(amdgpu_vram_mgr_block_size(block), cur->remaining);
+ break;
+ case TTM_PL_TT:
+ node = cur->node;
+
+ cur->node = ++node;
+ cur->start = node->start << PAGE_SHIFT;
+ cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
+ break;
+ default:
+ return;
+ }
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 40e06745fae9..11c46b3e4c60 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -259,7 +259,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
if (resv == NULL)
return -EINVAL;
- dma_resv_for_each_fence(&cursor, resv, true, f) {
+ /* TODO: Use DMA_RESV_USAGE_READ here */
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) {
dma_fence_chain_for_each(f, f) {
struct dma_fence *tmp = dma_fence_chain_contained(f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index f7f149588432..49ffad312d5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1344,7 +1344,8 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* If true, then return false as any KFD process needs all its BOs to
* be resident to run successfully
*/
- dma_resv_for_each_fence(&resv_cursor, bo->base.resv, true, f) {
+ dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP, f) {
if (amdkfd_fence_check_mm(f, current->mm))
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 9120ae80ef52..6a70818039dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -26,6 +26,7 @@
#include <linux/dma-direction.h>
#include <drm/gpu_scheduler.h>
+#include "amdgpu_vram_mgr.h"
#include "amdgpu.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
@@ -38,15 +39,6 @@
#define AMDGPU_POISON 0xd0bed0be
-struct amdgpu_vram_mgr {
- struct ttm_resource_manager manager;
- struct drm_mm mm;
- spinlock_t lock;
- struct list_head reservations_pending;
- struct list_head reserved_pages;
- atomic64_t vis_usage;
-};
-
struct amdgpu_gtt_mgr {
struct ttm_resource_manager manager;
struct drm_mm mm;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 39c74d9fa7cc..6eac649499d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1163,7 +1163,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
- r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+ r = dma_resv_wait_timeout(bo->tbo.base.resv,
+ DMA_RESV_USAGE_KERNEL, false,
msecs_to_jiffies(10));
if (r == 0)
r = -ETIMEDOUT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b13451255e8b..5277c10d901d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2059,7 +2059,7 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
struct dma_resv_iter cursor;
struct dma_fence *fence;
- dma_resv_for_each_fence(&cursor, resv, true, fence) {
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
/* Add a callback for each fence in the reservation object */
amdgpu_vm_prt_get(adev);
amdgpu_vm_add_prt_cb(adev, fence);
@@ -2665,7 +2665,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return true;
/* Don't evict VM page tables while they are busy */
- if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
+ if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
return false;
/* Try to block ongoing updates */
@@ -2845,7 +2845,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true,
+ timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP,
true, timeout);
if (timeout <= 0)
return timeout;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index e3fbf0f10add..31913ae86de6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -74,13 +74,12 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
{
unsigned int i;
uint64_t value;
- int r;
+ long r;
- if (vmbo->bo.tbo.moving) {
- r = dma_fence_wait(vmbo->bo.tbo.moving, true);
- if (r)
- return r;
- }
+ r = dma_resv_wait_timeout(vmbo->bo.tbo.base.resv, DMA_RESV_USAGE_KERNEL,
+ true, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0)
+ return r;
pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index dbb551762805..bdb44cee19d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -204,14 +204,19 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo = &vmbo->bo;
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
+ struct dma_resv_iter cursor;
unsigned int i, ndw, nptes;
+ struct dma_fence *fence;
uint64_t *pte;
int r;
/* Wait for PD/PT moves to be completed */
- r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving);
- if (r)
- return r;
+ dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
+ DMA_RESV_USAGE_KERNEL, fence) {
+ r = amdgpu_sync_fence(&p->job->sync, fence);
+ if (r)
+ return r;
+ }
do {
ndw = p->num_dw_left;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 0a7611648573..49e4092f447f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -32,8 +32,10 @@
#include "atom.h"
struct amdgpu_vram_reservation {
- struct list_head node;
- struct drm_mm_node mm_node;
+ u64 start;
+ u64 size;
+ struct list_head allocated;
+ struct list_head blocks;
};
static inline struct amdgpu_vram_mgr *
@@ -186,18 +188,18 @@ const struct attribute_group amdgpu_vram_mgr_attr_group = {
};
/**
- * amdgpu_vram_mgr_vis_size - Calculate visible node size
+ * amdgpu_vram_mgr_vis_size - Calculate visible block size
*
* @adev: amdgpu_device pointer
- * @node: MM node structure
+ * @block: DRM BUDDY block structure
*
- * Calculate how many bytes of the MM node are inside visible VRAM
+ * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM
*/
static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
- struct drm_mm_node *node)
+ struct drm_buddy_block *block)
{
- uint64_t start = node->start << PAGE_SHIFT;
- uint64_t end = (node->size + node->start) << PAGE_SHIFT;
+ u64 start = amdgpu_vram_mgr_block_start(block);
+ u64 end = start + amdgpu_vram_mgr_block_size(block);
if (start >= adev->gmc.visible_vram_size)
return 0;
@@ -218,9 +220,9 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_resource *res = bo->tbo.resource;
- unsigned pages = res->num_pages;
- struct drm_mm_node *mm;
- u64 usage;
+ struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
+ struct drm_buddy_block *block;
+ u64 usage = 0;
if (amdgpu_gmc_vram_full_visible(&adev->gmc))
return amdgpu_bo_size(bo);
@@ -228,9 +230,8 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
return 0;
- mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0];
- for (usage = 0; pages; pages -= mm->size, mm++)
- usage += amdgpu_vram_mgr_vis_size(adev, mm);
+ list_for_each_entry(block, &vres->blocks, link)
+ usage += amdgpu_vram_mgr_vis_size(adev, block);
return usage;
}
@@ -240,23 +241,30 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
- struct drm_mm *mm = &mgr->mm;
+ struct drm_buddy *mm = &mgr->mm;
struct amdgpu_vram_reservation *rsv, *temp;
+ struct drm_buddy_block *block;
uint64_t vis_usage;
- list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) {
- if (drm_mm_reserve_node(mm, &rsv->mm_node))
+ list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) {
+ if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size,
+ rsv->size, mm->chunk_size, &rsv->allocated,
+ DRM_BUDDY_RANGE_ALLOCATION))
+ continue;
+
+ block = amdgpu_vram_mgr_first_block(&rsv->allocated);
+ if (!block)
continue;
dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
- rsv->mm_node.start, rsv->mm_node.size);
+ rsv->start, rsv->size);
- vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
+ vis_usage = amdgpu_vram_mgr_vis_size(adev, block);
atomic64_add(vis_usage, &mgr->vis_usage);
spin_lock(&man->bdev->lru_lock);
- man->usage += rsv->mm_node.size << PAGE_SHIFT;
+ man->usage += rsv->size;
spin_unlock(&man->bdev->lru_lock);
- list_move(&rsv->node, &mgr->reserved_pages);
+ list_move(&rsv->blocks, &mgr->reserved_pages);
}
}
@@ -278,14 +286,16 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
if (!rsv)
return -ENOMEM;
- INIT_LIST_HEAD(&rsv->node);
- rsv->mm_node.start = start >> PAGE_SHIFT;
- rsv->mm_node.size = size >> PAGE_SHIFT;
+ INIT_LIST_HEAD(&rsv->allocated);
+ INIT_LIST_HEAD(&rsv->blocks);
- spin_lock(&mgr->lock);
- list_add_tail(&rsv->node, &mgr->reservations_pending);
+ rsv->start = start;
+ rsv->size = size;
+
+ mutex_lock(&mgr->lock);
+ list_add_tail(&rsv->blocks, &mgr->reservations_pending);
amdgpu_vram_mgr_do_reserve(&mgr->manager);
- spin_unlock(&mgr->lock);
+ mutex_unlock(&mgr->lock);
return 0;
}
@@ -307,19 +317,19 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
struct amdgpu_vram_reservation *rsv;
int ret;
- spin_lock(&mgr->lock);
+ mutex_lock(&mgr->lock);
- list_for_each_entry(rsv, &mgr->reservations_pending, node) {
- if ((rsv->mm_node.start <= start) &&
- (start < (rsv->mm_node.start + rsv->mm_node.size))) {
+ list_for_each_entry(rsv, &mgr->reservations_pending, blocks) {
+ if (rsv->start <= start &&
+ (start < (rsv->start + rsv->size))) {
ret = -EBUSY;
goto out;
}
}
- list_for_each_entry(rsv, &mgr->reserved_pages, node) {
- if ((rsv->mm_node.start <= start) &&
- (start < (rsv->mm_node.start + rsv->mm_node.size))) {
+ list_for_each_entry(rsv, &mgr->reserved_pages, blocks) {
+ if (rsv->start <= start &&
+ (start < (rsv->start + rsv->size))) {
ret = 0;
goto out;
}
@@ -327,33 +337,11 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
ret = -ENOENT;
out:
- spin_unlock(&mgr->lock);
+ mutex_unlock(&mgr->lock);
return ret;
}
/**
- * amdgpu_vram_mgr_virt_start - update virtual start address
- *
- * @mem: ttm_resource to update
- * @node: just allocated node
- *
- * Calculate a virtual BO start address to easily check if everything is CPU
- * accessible.
- */
-static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
- struct drm_mm_node *node)
-{
- unsigned long start;
-
- start = node->start + node->size;
- if (start > mem->num_pages)
- start -= mem->num_pages;
- else
- start = 0;
- mem->start = max(mem->start, start);
-}
-
-/**
* amdgpu_vram_mgr_new - allocate new ranges
*
* @man: TTM memory type manager
@@ -368,46 +356,44 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_resource **res)
{
- unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
+ u64 vis_usage = 0, max_bytes, cur_size, min_block_size;
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
- uint64_t vis_usage = 0, mem_bytes, max_bytes;
- struct ttm_range_mgr_node *node;
- struct drm_mm *mm = &mgr->mm;
- enum drm_mm_insert_mode mode;
- unsigned i;
+ struct amdgpu_vram_mgr_resource *vres;
+ u64 size, remaining_size, lpfn, fpfn;
+ struct drm_buddy *mm = &mgr->mm;
+ struct drm_buddy_block *block;
+ unsigned long pages_per_block;
int r;
- lpfn = place->lpfn;
+ lpfn = place->lpfn << PAGE_SHIFT;
if (!lpfn)
- lpfn = man->size >> PAGE_SHIFT;
+ lpfn = man->size;
+
+ fpfn = place->fpfn << PAGE_SHIFT;
max_bytes = adev->gmc.mc_vram_size;
if (tbo->type != ttm_bo_type_kernel)
max_bytes -= AMDGPU_VM_RESERVED_VRAM;
- mem_bytes = tbo->base.size;
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
- pages_per_node = ~0ul;
- num_nodes = 1;
+ pages_per_block = ~0ul;
} else {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- pages_per_node = HPAGE_PMD_NR;
+ pages_per_block = HPAGE_PMD_NR;
#else
/* default to 2MB */
- pages_per_node = 2UL << (20UL - PAGE_SHIFT);
+ pages_per_block = 2UL << (20UL - PAGE_SHIFT);
#endif
- pages_per_node = max_t(uint32_t, pages_per_node,
- tbo->page_alignment);
- num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node);
+ pages_per_block = max_t(uint32_t, pages_per_block,
+ tbo->page_alignment);
}
- node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
- GFP_KERNEL | __GFP_ZERO);
- if (!node)
+ vres = kzalloc(sizeof(*vres), GFP_KERNEL);
+ if (!vres)
return -ENOMEM;
- ttm_resource_init(tbo, place, &node->base);
+ ttm_resource_init(tbo, place, &vres->base);
/* bail out quickly if there's likely not enough VRAM for this BO */
if (ttm_resource_manager_usage(man) > max_bytes) {
@@ -415,66 +401,130 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
goto error_fini;
}
- mode = DRM_MM_INSERT_BEST;
+ INIT_LIST_HEAD(&vres->blocks);
+
if (place->flags & TTM_PL_FLAG_TOPDOWN)
- mode = DRM_MM_INSERT_HIGH;
-
- pages_left = node->base.num_pages;
-
- /* Limit maximum size to 2GB due to SG table limitations */
- pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
-
- i = 0;
- spin_lock(&mgr->lock);
- while (pages_left) {
- uint32_t alignment = tbo->page_alignment;
-
- if (pages >= pages_per_node)
- alignment = pages_per_node;
-
- r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages,
- alignment, 0, place->fpfn,
- lpfn, mode);
- if (unlikely(r)) {
- if (pages > pages_per_node) {
- if (is_power_of_2(pages))
- pages = pages / 2;
- else
- pages = rounddown_pow_of_two(pages);
- continue;
+ vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
+
+ if (fpfn || lpfn != man->size)
+ /* Allocate blocks in desired range */
+ vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
+
+ remaining_size = vres->base.num_pages << PAGE_SHIFT;
+
+ mutex_lock(&mgr->lock);
+ while (remaining_size) {
+ if (tbo->page_alignment)
+ min_block_size = tbo->page_alignment << PAGE_SHIFT;
+ else
+ min_block_size = mgr->default_page_size;
+
+ BUG_ON(min_block_size < mm->chunk_size);
+
+ /* Limit maximum size to 2GiB due to SG table limitations */
+ size = min(remaining_size, 2ULL << 30);
+
+ if (size >= pages_per_block << PAGE_SHIFT)
+ min_block_size = pages_per_block << PAGE_SHIFT;
+
+ cur_size = size;
+
+ if (fpfn + size != place->lpfn << PAGE_SHIFT) {
+ /*
+ * Except for actual range allocation, modify the size and
+ * min_block_size conforming to continuous flag enablement
+ */
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
+ size = roundup_pow_of_two(size);
+ min_block_size = size;
+ /*
+ * Modify the size value if size is not
+ * aligned with min_block_size
+ */
+ } else if (!IS_ALIGNED(size, min_block_size)) {
+ size = round_up(size, min_block_size);
}
- goto error_free;
}
- vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
- amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]);
- pages_left -= pages;
- ++i;
+ r = drm_buddy_alloc_blocks(mm, fpfn,
+ lpfn,
+ size,
+ min_block_size,
+ &vres->blocks,
+ vres->flags);
+ if (unlikely(r))
+ goto error_free_blocks;
+
+ if (size > remaining_size)
+ remaining_size = 0;
+ else
+ remaining_size -= size;
+ }
+ mutex_unlock(&mgr->lock);
+
+ if (cur_size != size) {
+ struct drm_buddy_block *block;
+ struct list_head *trim_list;
+ u64 original_size;
+ LIST_HEAD(temp);
+
+ trim_list = &vres->blocks;
+ original_size = vres->base.num_pages << PAGE_SHIFT;
+
+ /*
+ * If size value is rounded up to min_block_size, trim the last
+ * block to the required size
+ */
+ if (!list_is_singular(&vres->blocks)) {
+ block = list_last_entry(&vres->blocks, typeof(*block), link);
+ list_move_tail(&block->link, &temp);
+ trim_list = &temp;
+ /*
+ * Compute the original_size value by subtracting the
+ * last block size with (aligned size - original size)
+ */
+ original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size);
+ }
- if (pages > pages_left)
- pages = pages_left;
+ mutex_lock(&mgr->lock);
+ drm_buddy_block_trim(mm,
+ original_size,
+ trim_list);
+ mutex_unlock(&mgr->lock);
+
+ if (!list_empty(&temp))
+ list_splice_tail(trim_list, &vres->blocks);
+ }
+
+ list_for_each_entry(block, &vres->blocks, link)
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
+
+ block = amdgpu_vram_mgr_first_block(&vres->blocks);
+ if (!block) {
+ r = -EINVAL;
+ goto error_fini;
}
- spin_unlock(&mgr->lock);
- if (i == 1)
- node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
+ vres->base.start = amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
+
+ if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks))
+ vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
if (adev->gmc.xgmi.connected_to_cpu)
- node->base.bus.caching = ttm_cached;
+ vres->base.bus.caching = ttm_cached;
else
- node->base.bus.caching = ttm_write_combined;
+ vres->base.bus.caching = ttm_write_combined;
atomic64_add(vis_usage, &mgr->vis_usage);
- *res = &node->base;
+ *res = &vres->base;
return 0;
-error_free:
- while (i--)
- drm_mm_remove_node(&node->mm_nodes[i]);
- spin_unlock(&mgr->lock);
+error_free_blocks:
+ drm_buddy_free_list(mm, &vres->blocks);
+ mutex_unlock(&mgr->lock);
error_fini:
- ttm_resource_fini(man, &node->base);
- kvfree(node);
+ ttm_resource_fini(man, &vres->base);
+ kfree(vres);
return r;
}
@@ -490,27 +540,26 @@ error_fini:
static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
- struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
+ struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
+ struct drm_buddy *mm = &mgr->mm;
+ struct drm_buddy_block *block;
uint64_t vis_usage = 0;
- unsigned i, pages;
- spin_lock(&mgr->lock);
- for (i = 0, pages = res->num_pages; pages;
- pages -= node->mm_nodes[i].size, ++i) {
- struct drm_mm_node *mm = &node->mm_nodes[i];
+ mutex_lock(&mgr->lock);
+ list_for_each_entry(block, &vres->blocks, link)
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
- drm_mm_remove_node(mm);
- vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
- }
amdgpu_vram_mgr_do_reserve(man);
- spin_unlock(&mgr->lock);
+
+ drm_buddy_free_list(mm, &vres->blocks);
+ mutex_unlock(&mgr->lock);
atomic64_sub(vis_usage, &mgr->vis_usage);
ttm_resource_fini(man, res);
- kvfree(node);
+ kfree(vres);
}
/**
@@ -542,7 +591,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
if (!*sgt)
return -ENOMEM;
- /* Determine the number of DRM_MM nodes to export */
+ /* Determine the number of DRM_BUDDY blocks to export */
amdgpu_res_first(res, offset, length, &cursor);
while (cursor.remaining) {
num_entries++;
@@ -558,10 +607,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
sg->length = 0;
/*
- * Walk down DRM_MM nodes to populate scatterlist nodes
- * @note: Use iterator api to get first the DRM_MM node
+ * Walk down DRM_BUDDY blocks to populate scatterlist nodes
+ * @note: Use iterator api to get first the DRM_BUDDY block
* and the number of bytes from it. Access the following
- * DRM_MM node(s) if more buffer needs to exported
+ * DRM_BUDDY block(s) if more buffer needs to exported
*/
amdgpu_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
@@ -648,13 +697,22 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
+ struct drm_buddy *mm = &mgr->mm;
+ struct drm_buddy_block *block;
drm_printf(printer, " vis usage:%llu\n",
amdgpu_vram_mgr_vis_usage(mgr));
- spin_lock(&mgr->lock);
- drm_mm_print(&mgr->mm, printer);
- spin_unlock(&mgr->lock);
+ mutex_lock(&mgr->lock);
+ drm_printf(printer, "default_page_size: %lluKiB\n",
+ mgr->default_page_size >> 10);
+
+ drm_buddy_print(mm, printer);
+
+ drm_printf(printer, "reserved:\n");
+ list_for_each_entry(block, &mgr->reserved_pages, link)
+ drm_buddy_block_print(mm, block, printer);
+ mutex_unlock(&mgr->lock);
}
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
@@ -674,16 +732,21 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
{
struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
struct ttm_resource_manager *man = &mgr->manager;
+ int err;
ttm_resource_manager_init(man, &adev->mman.bdev,
adev->gmc.real_vram_size);
man->func = &amdgpu_vram_mgr_func;
- drm_mm_init(&mgr->mm, 0, man->size >> PAGE_SHIFT);
- spin_lock_init(&mgr->lock);
+ err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
+ if (err)
+ return err;
+
+ mutex_init(&mgr->lock);
INIT_LIST_HEAD(&mgr->reservations_pending);
INIT_LIST_HEAD(&mgr->reserved_pages);
+ mgr->default_page_size = PAGE_SIZE;
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
ttm_resource_manager_set_used(man, true);
@@ -711,16 +774,16 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
if (ret)
return;
- spin_lock(&mgr->lock);
- list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node)
+ mutex_lock(&mgr->lock);
+ list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks)
kfree(rsv);
- list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) {
- drm_mm_remove_node(&rsv->mm_node);
+ list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
+ drm_buddy_free_list(&mgr->mm, &rsv->blocks);
kfree(rsv);
}
- drm_mm_takedown(&mgr->mm);
- spin_unlock(&mgr->lock);
+ drm_buddy_fini(&mgr->mm);
+ mutex_unlock(&mgr->lock);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
new file mode 100644
index 000000000000..9a2db87186c7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_VRAM_MGR_H__
+#define __AMDGPU_VRAM_MGR_H__
+
+#include <drm/drm_buddy.h>
+
+struct amdgpu_vram_mgr {
+ struct ttm_resource_manager manager;
+ struct drm_buddy mm;
+ /* protects access to buffer objects */
+ struct mutex lock;
+ struct list_head reservations_pending;
+ struct list_head reserved_pages;
+ atomic64_t vis_usage;
+ u64 default_page_size;
+};
+
+struct amdgpu_vram_mgr_resource {
+ struct ttm_resource base;
+ struct list_head blocks;
+ unsigned long flags;
+};
+
+static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_offset(block);
+}
+
+static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block)
+{
+ return PAGE_SIZE << drm_buddy_block_order(block);
+}
+
+static inline struct drm_buddy_block *
+amdgpu_vram_mgr_first_block(struct list_head *list)
+{
+ return list_first_entry_or_null(list, struct drm_buddy_block, link);
+}
+
+static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
+{
+ struct drm_buddy_block *block;
+ u64 start, size;
+
+ block = amdgpu_vram_mgr_first_block(head);
+ if (!block)
+ return false;
+
+ while (head != block->link.next) {
+ start = amdgpu_vram_mgr_block_start(block);
+ size = amdgpu_vram_mgr_block_size(block);
+
+ block = list_entry(block->link.next, struct drm_buddy_block, link);
+ if (start + size != amdgpu_vram_mgr_block_start(block))
+ return false;
+ }
+
+ return true;
+}
+
+static inline struct amdgpu_vram_mgr_resource *
+to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
+{
+ return container_of(res, struct amdgpu_vram_mgr_resource, base);
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 62139ff35476..73423b805b54 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -9238,7 +9238,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* deadlock during GPU reset when this fence will not signal
* but we hold reservation lock for the BO.
*/
- r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
+ r = dma_resv_wait_timeout(abo->tbo.base.resv,
+ DMA_RESV_USAGE_WRITE, false,
msecs_to_jiffies(5000));
if (unlikely(r <= 0))
DRM_ERROR("Waiting for fences timed out!");
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index 541949f2d44a..e0b9f7063b20 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -256,6 +256,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms,
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
layer->layer_type, &n_formats);
+ if (!formats) {
+ kfree(kplane);
+ return -ENOMEM;
+ }
err = drm_universal_plane_init(&kms->base, plane,
get_possible_crtcs(kms, c->pipeline),
@@ -266,8 +270,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms,
komeda_put_fourcc_list(formats);
- if (err)
- goto cleanup;
+ if (err) {
+ kfree(kplane);
+ return err;
+ }
drm_plane_helper_add(plane, &komeda_plane_helper_funcs);
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 494075ddbef6..b5928b52e279 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -487,7 +487,10 @@ static void malidp_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
malidp_crtc_destroy_state(crtc, crtc->state);
- __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ if (state)
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
}
static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 0562bdaac00c..81d9f5004025 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -310,17 +310,13 @@ static int malidp_se_check_scaling(struct malidp_plane *mp,
static u32 malidp_get_pgsize_bitmap(struct malidp_plane *mp)
{
- u32 pgsize_bitmap = 0;
+ struct iommu_domain *mmu_dom;
- if (iommu_present(&platform_bus_type)) {
- struct iommu_domain *mmu_dom =
- iommu_get_domain_for_dev(mp->base.dev->dev);
+ mmu_dom = iommu_get_domain_for_dev(mp->base.dev->dev);
+ if (mmu_dom)
+ return mmu_dom->pgsize_bitmap;
- if (mmu_dom)
- pgsize_bitmap = mmu_dom->pgsize_bitmap;
- }
-
- return pgsize_bitmap;
+ return 0;
}
/*
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 658837caaf39..7ac920a57d26 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -32,6 +32,7 @@ config DRM_CHIPONE_ICN6211
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL_BRIDGE
+ select REGMAP_I2C
help
ICN6211 is MIPI-DSI/RGB Converter bridge from chipone.
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index b72aa3f3b5c2..6ce0633d4c08 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -14,6 +14,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#define VENDOR_ID 0x00
@@ -134,6 +135,7 @@
struct chipone {
struct device *dev;
+ struct regmap *regmap;
struct i2c_client *client;
struct drm_bridge bridge;
struct drm_display_mode mode;
@@ -146,6 +148,77 @@ struct chipone {
bool interface_i2c;
};
+static const struct regmap_range chipone_dsi_readable_ranges[] = {
+ regmap_reg_range(VENDOR_ID, VERSION_ID),
+ regmap_reg_range(FIRMWARE_VERSION, PLL_SSC_OFFSET(3)),
+ regmap_reg_range(GPIO_OEN, MIPI_ULPS_CTRL),
+ regmap_reg_range(MIPI_CLK_CHK_VAR, MIPI_T_TA_SURE_PRE),
+ regmap_reg_range(MIPI_T_LPX_SET, MIPI_INIT_TIME_H),
+ regmap_reg_range(MIPI_T_CLK_TERM_EN, MIPI_T_CLK_SETTLE),
+ regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY_(5)),
+ regmap_reg_range(MIPI_PD_RX, MIPI_RST_NUM),
+ regmap_reg_range(MIPI_DBG_SET_(0), MIPI_DBG_SET_(9)),
+ regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS_(1)),
+};
+
+static const struct regmap_access_table chipone_dsi_readable_table = {
+ .yes_ranges = chipone_dsi_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(chipone_dsi_readable_ranges),
+};
+
+static const struct regmap_range chipone_dsi_writeable_ranges[] = {
+ regmap_reg_range(CONFIG_FINISH, PLL_SSC_OFFSET(3)),
+ regmap_reg_range(GPIO_OEN, MIPI_ULPS_CTRL),
+ regmap_reg_range(MIPI_CLK_CHK_VAR, MIPI_T_TA_SURE_PRE),
+ regmap_reg_range(MIPI_T_LPX_SET, MIPI_INIT_TIME_H),
+ regmap_reg_range(MIPI_T_CLK_TERM_EN, MIPI_T_CLK_SETTLE),
+ regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY_(5)),
+ regmap_reg_range(MIPI_PD_RX, MIPI_RST_NUM),
+ regmap_reg_range(MIPI_DBG_SET_(0), MIPI_DBG_SET_(9)),
+ regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS_(1)),
+};
+
+static const struct regmap_access_table chipone_dsi_writeable_table = {
+ .yes_ranges = chipone_dsi_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(chipone_dsi_writeable_ranges),
+};
+
+static const struct regmap_config chipone_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &chipone_dsi_readable_table,
+ .wr_table = &chipone_dsi_writeable_table,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = MIPI_ATE_STATUS_(1),
+};
+
+static int chipone_dsi_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct mipi_dsi_device *dsi = context;
+ const u16 reg16 = (val_size << 8) | *(u8 *)reg;
+ int ret;
+
+ ret = mipi_dsi_generic_read(dsi, &reg16, 2, val, val_size);
+
+ return ret == val_size ? 0 : -EINVAL;
+}
+
+static int chipone_dsi_write(void *context, const void *data, size_t count)
+{
+ struct mipi_dsi_device *dsi = context;
+
+ return mipi_dsi_generic_write(dsi, data, 2);
+}
+
+static const struct regmap_bus chipone_dsi_regmap_bus = {
+ .read = chipone_dsi_read,
+ .write = chipone_dsi_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
static inline struct chipone *bridge_to_chipone(struct drm_bridge *bridge)
{
return container_of(bridge, struct chipone, bridge);
@@ -153,18 +226,16 @@ static inline struct chipone *bridge_to_chipone(struct drm_bridge *bridge)
static void chipone_readb(struct chipone *icn, u8 reg, u8 *val)
{
- if (icn->interface_i2c)
- *val = i2c_smbus_read_byte_data(icn->client, reg);
- else
- mipi_dsi_generic_read(icn->dsi, (u8[]){reg, 1}, 2, val, 1);
+ int ret, pval;
+
+ ret = regmap_read(icn->regmap, reg, &pval);
+
+ *val = ret ? 0 : pval & 0xff;
}
static int chipone_writeb(struct chipone *icn, u8 reg, u8 val)
{
- if (icn->interface_i2c)
- return i2c_smbus_write_byte_data(icn->client, reg, val);
- else
- return mipi_dsi_generic_write(icn->dsi, (u8[]){reg, val}, 2);
+ return regmap_write(icn->regmap, reg, val);
}
static void chipone_configure_pll(struct chipone *icn,
@@ -591,6 +662,11 @@ static int chipone_dsi_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
+ icn->regmap = devm_regmap_init(dev, &chipone_dsi_regmap_bus,
+ dsi, &chipone_regmap_config);
+ if (IS_ERR(icn->regmap))
+ return PTR_ERR(icn->regmap);
+
icn->interface_i2c = false;
icn->dsi = dsi;
@@ -616,6 +692,10 @@ static int chipone_i2c_probe(struct i2c_client *client,
if (ret)
return ret;
+ icn->regmap = devm_regmap_init_i2c(client, &chipone_regmap_config);
+ if (IS_ERR(icn->regmap))
+ return PTR_ERR(icn->regmap);
+
icn->interface_i2c = true;
icn->client = client;
dev_set_drvdata(dev, icn);
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index d24f5b90feab..e4d52a7e31b7 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -24,6 +24,7 @@ struct display_connector {
int hpd_irq;
struct regulator *dp_pwr;
+ struct gpio_desc *ddc_en;
};
static inline struct display_connector *
@@ -345,6 +346,17 @@ static int display_connector_probe(struct platform_device *pdev)
}
}
+ /* enable DDC */
+ if (type == DRM_MODE_CONNECTOR_HDMIA) {
+ conn->ddc_en = devm_gpiod_get_optional(&pdev->dev, "ddc-en",
+ GPIOD_OUT_HIGH);
+
+ if (IS_ERR(conn->ddc_en)) {
+ dev_err(&pdev->dev, "Couldn't get ddc-en gpio\n");
+ return PTR_ERR(conn->ddc_en);
+ }
+ }
+
conn->bridge.funcs = &display_connector_bridge_funcs;
conn->bridge.of_node = pdev->dev.of_node;
@@ -373,6 +385,9 @@ static int display_connector_remove(struct platform_device *pdev)
{
struct display_connector *conn = platform_get_drvdata(pdev);
+ if (conn->ddc_en)
+ gpiod_set_value(conn->ddc_en, 0);
+
if (conn->dp_pwr)
regulator_disable(conn->dp_pwr);
diff --git a/drivers/gpu/drm/dp/drm_dp.c b/drivers/gpu/drm/dp/drm_dp.c
index 580016a1b9eb..2a1f5ff6633c 100644
--- a/drivers/gpu/drm/dp/drm_dp.c
+++ b/drivers/gpu/drm/dp/drm_dp.c
@@ -528,6 +528,31 @@ unlock:
}
/**
+ * drm_dp_dpcd_probe() - probe a given DPCD address with a 1-byte read access
+ * @aux: DisplayPort AUX channel (SST)
+ * @offset: address of the register to probe
+ *
+ * Probe the provided DPCD address by reading 1 byte from it. The function can
+ * be used to trigger some side-effect the read access has, like waking up the
+ * sink, without the need for the read-out value.
+ *
+ * Returns 0 if the read access suceeded, or a negative error code on failure.
+ */
+int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset)
+{
+ u8 buffer;
+ int ret;
+
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, &buffer, 1);
+ WARN_ON(ret == 0);
+
+ drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, &buffer, ret);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL(drm_dp_dpcd_probe);
+
+/**
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
* @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to read
@@ -559,10 +584,9 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
* monitor doesn't power down exactly after the throw away read.
*/
if (!aux->is_remote) {
- ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV,
- buffer, 1);
- if (ret != 1)
- goto out;
+ ret = drm_dp_dpcd_probe(aux, DP_DPCD_REV);
+ if (ret < 0)
+ return ret;
}
if (aux->is_remote)
@@ -571,7 +595,6 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset,
buffer, size);
-out:
drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, buffer, ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 72f52f293249..11bb59399471 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -665,6 +665,9 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
if (start + size == end)
return __drm_buddy_alloc_range(mm, start, size, blocks);
+ if (!IS_ALIGNED(size, min_page_size))
+ return -EINVAL;
+
pages = size >> ilog2(mm->chunk_size);
order = fls(pages) - 1;
min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index bff917531f33..b632825654a9 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -297,15 +297,15 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
return false;
}
- saved_mode = crtc->mode;
- saved_hwmode = crtc->hwmode;
+ drm_mode_init(&saved_mode, &crtc->mode);
+ drm_mode_init(&saved_hwmode, &crtc->hwmode);
saved_x = crtc->x;
saved_y = crtc->y;
/* Update crtc values up front so the driver can rely on them for mode
* setting.
*/
- crtc->mode = *mode;
+ drm_mode_copy(&crtc->mode, mode);
crtc->x = x;
crtc->y = y;
@@ -341,7 +341,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
}
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
- crtc->hwmode = *adjusted_mode;
+ drm_mode_copy(&crtc->hwmode, adjusted_mode);
/* Prepare the encoders and CRTCs before setting the mode. */
drm_for_each_encoder(encoder, dev) {
@@ -411,8 +411,8 @@ done:
drm_mode_destroy(dev, adjusted_mode);
if (!ret) {
crtc->enabled = saved_enabled;
- crtc->mode = saved_mode;
- crtc->hwmode = saved_hwmode;
+ drm_mode_copy(&crtc->mode, &saved_mode);
+ drm_mode_copy(&crtc->hwmode, &saved_hwmode);
crtc->x = saved_x;
crtc->y = saved_y;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 04e818ecd662..324ce8467915 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1568,6 +1568,38 @@ static const struct drm_display_mode edid_4k_modes[] = {
/*** DDC fetch and block validation ***/
+static int edid_extension_block_count(const struct edid *edid)
+{
+ return edid->extensions;
+}
+
+static int edid_block_count(const struct edid *edid)
+{
+ return edid_extension_block_count(edid) + 1;
+}
+
+static int edid_size_by_blocks(int num_blocks)
+{
+ return num_blocks * EDID_LENGTH;
+}
+
+static int edid_size(const struct edid *edid)
+{
+ return edid_size_by_blocks(edid_block_count(edid));
+}
+
+static const void *edid_block_data(const struct edid *edid, int index)
+{
+ BUILD_BUG_ON(sizeof(*edid) != EDID_LENGTH);
+
+ return edid + index;
+}
+
+static const void *edid_extension_block_data(const struct edid *edid, int index)
+{
+ return edid_block_data(edid, index + 1);
+}
+
static const u8 edid_header[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
@@ -1632,9 +1664,9 @@ static int edid_block_tag(const void *_block)
return block[0];
}
-static bool edid_is_zero(const void *edid, int length)
+static bool edid_block_is_zero(const void *edid)
{
- return !memchr_inv(edid, 0, length);
+ return !memchr_inv(edid, 0, EDID_LENGTH);
}
/**
@@ -1654,8 +1686,8 @@ bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2)
return false;
if (edid1) {
- edid1_len = EDID_LENGTH * (1 + edid1->extensions);
- edid2_len = EDID_LENGTH * (1 + edid2->extensions);
+ edid1_len = edid_size(edid1);
+ edid2_len = edid_size(edid2);
if (edid1_len != edid2_len)
return false;
@@ -1670,7 +1702,9 @@ EXPORT_SYMBOL(drm_edid_are_equal);
enum edid_block_status {
EDID_BLOCK_OK = 0,
+ EDID_BLOCK_READ_FAIL,
EDID_BLOCK_NULL,
+ EDID_BLOCK_ZERO,
EDID_BLOCK_HEADER_CORRUPT,
EDID_BLOCK_HEADER_REPAIR,
EDID_BLOCK_HEADER_FIXED,
@@ -1689,15 +1723,23 @@ static enum edid_block_status edid_block_check(const void *_block,
if (is_base_block) {
int score = drm_edid_header_is_valid(block);
- if (score < clamp(edid_fixup, 0, 8))
- return EDID_BLOCK_HEADER_CORRUPT;
+ if (score < clamp(edid_fixup, 0, 8)) {
+ if (edid_block_is_zero(block))
+ return EDID_BLOCK_ZERO;
+ else
+ return EDID_BLOCK_HEADER_CORRUPT;
+ }
if (score < 8)
return EDID_BLOCK_HEADER_REPAIR;
}
- if (edid_block_compute_checksum(block) != edid_block_get_checksum(block))
- return EDID_BLOCK_CHECKSUM;
+ if (edid_block_compute_checksum(block) != edid_block_get_checksum(block)) {
+ if (edid_block_is_zero(block))
+ return EDID_BLOCK_ZERO;
+ else
+ return EDID_BLOCK_CHECKSUM;
+ }
if (is_base_block) {
if (block->version != 1)
@@ -1720,6 +1762,70 @@ static bool edid_block_valid(const void *block, bool base)
edid_block_tag(block));
}
+static void edid_block_status_print(enum edid_block_status status,
+ const struct edid *block,
+ int block_num)
+{
+ switch (status) {
+ case EDID_BLOCK_OK:
+ break;
+ case EDID_BLOCK_READ_FAIL:
+ pr_debug("EDID block %d read failed\n", block_num);
+ break;
+ case EDID_BLOCK_NULL:
+ pr_debug("EDID block %d pointer is NULL\n", block_num);
+ break;
+ case EDID_BLOCK_ZERO:
+ pr_notice("EDID block %d is all zeroes\n", block_num);
+ break;
+ case EDID_BLOCK_HEADER_CORRUPT:
+ pr_notice("EDID has corrupt header\n");
+ break;
+ case EDID_BLOCK_HEADER_REPAIR:
+ pr_debug("EDID corrupt header needs repair\n");
+ break;
+ case EDID_BLOCK_HEADER_FIXED:
+ pr_debug("EDID corrupt header fixed\n");
+ break;
+ case EDID_BLOCK_CHECKSUM:
+ if (edid_block_status_valid(status, edid_block_tag(block))) {
+ pr_debug("EDID block %d (tag 0x%02x) checksum is invalid, remainder is %d, ignoring\n",
+ block_num, edid_block_tag(block),
+ edid_block_compute_checksum(block));
+ } else {
+ pr_notice("EDID block %d (tag 0x%02x) checksum is invalid, remainder is %d\n",
+ block_num, edid_block_tag(block),
+ edid_block_compute_checksum(block));
+ }
+ break;
+ case EDID_BLOCK_VERSION:
+ pr_notice("EDID has major version %d, instead of 1\n",
+ block->version);
+ break;
+ default:
+ WARN(1, "EDID block %d unknown edid block status code %d\n",
+ block_num, status);
+ break;
+ }
+}
+
+static void edid_block_dump(const char *level, const void *block, int block_num)
+{
+ enum edid_block_status status;
+ char prefix[20];
+
+ status = edid_block_check(block, block_num == 0);
+ if (status == EDID_BLOCK_ZERO)
+ sprintf(prefix, "\t[%02x] ZERO ", block_num);
+ else if (!edid_block_status_valid(status, edid_block_tag(block)))
+ sprintf(prefix, "\t[%02x] BAD ", block_num);
+ else
+ sprintf(prefix, "\t[%02x] GOOD ", block_num);
+
+ print_hex_dump(level, prefix, DUMP_PREFIX_NONE, 16, 1,
+ block, EDID_LENGTH, false);
+}
+
/**
* drm_edid_block_valid - Sanity check the EDID block (base or extension)
* @raw_edid: pointer to raw EDID block
@@ -1766,33 +1872,14 @@ bool drm_edid_block_valid(u8 *_block, int block_num, bool print_bad_edid,
*edid_corrupt = true;
}
+ edid_block_status_print(status, block, block_num);
+
/* Determine whether we can use this block with this status. */
valid = edid_block_status_valid(status, edid_block_tag(block));
- /* Some fairly random status printouts. */
- if (status == EDID_BLOCK_CHECKSUM) {
- if (valid) {
- DRM_DEBUG("EDID block checksum is invalid, remainder is %d\n",
- edid_block_compute_checksum(block));
- DRM_DEBUG("Assuming a KVM switch modified the block but left the original checksum\n");
- } else if (print_bad_edid) {
- DRM_NOTE("EDID block checksum is invalid, remainder is %d\n",
- edid_block_compute_checksum(block));
- }
- } else if (status == EDID_BLOCK_VERSION) {
- DRM_NOTE("EDID has major version %d, instead of 1\n",
- block->version);
- }
-
- if (!valid && print_bad_edid) {
- if (edid_is_zero(block, EDID_LENGTH)) {
- pr_notice("EDID block is all zeroes\n");
- } else {
- pr_notice("Raw EDID:\n");
- print_hex_dump(KERN_NOTICE,
- " \t", DUMP_PREFIX_NONE, 16, 1,
- block, EDID_LENGTH, false);
- }
+ if (!valid && print_bad_edid && status != EDID_BLOCK_ZERO) {
+ pr_notice("Raw EDID:\n");
+ edid_block_dump(KERN_NOTICE, block, block_num);
}
return valid;
@@ -1810,14 +1897,16 @@ EXPORT_SYMBOL(drm_edid_block_valid);
bool drm_edid_is_valid(struct edid *edid)
{
int i;
- u8 *raw = (u8 *)edid;
if (!edid)
return false;
- for (i = 0; i <= edid->extensions; i++)
- if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true, NULL))
+ for (i = 0; i < edid_block_count(edid); i++) {
+ void *block = (void *)edid_block_data(edid, i);
+
+ if (!drm_edid_block_valid(block, i, true, NULL))
return false;
+ }
return true;
}
@@ -1830,13 +1919,13 @@ static struct edid *edid_filter_invalid_blocks(const struct edid *edid,
int valid_extensions = edid->extensions - invalid_blocks;
int i;
- new = kmalloc_array(valid_extensions + 1, EDID_LENGTH, GFP_KERNEL);
+ new = kmalloc(edid_size_by_blocks(valid_extensions + 1), GFP_KERNEL);
if (!new)
goto out;
dest_block = new;
- for (i = 0; i <= edid->extensions; i++) {
- const void *block = edid + i;
+ for (i = 0; i < edid_block_count(edid); i++) {
+ const void *block = edid_block_data(edid, i);
if (edid_block_valid(block, i == 0))
memcpy(dest_block++, block, EDID_LENGTH);
@@ -1916,7 +2005,7 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
}
static void connector_bad_edid(struct drm_connector *connector,
- u8 *edid, int num_blocks)
+ const struct edid *edid, int num_blocks)
{
int i;
u8 last_block;
@@ -1927,32 +2016,19 @@ static void connector_bad_edid(struct drm_connector *connector,
* of 0x7e in the EDID of the _index_ of the last block in the
* combined chunk of memory.
*/
- last_block = edid[0x7e];
+ last_block = edid->extensions;
/* Calculate real checksum for the last edid extension block data */
if (last_block < num_blocks)
connector->real_edid_checksum =
- edid_block_compute_checksum(edid + last_block * EDID_LENGTH);
+ edid_block_compute_checksum(edid + last_block);
if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
return;
drm_dbg_kms(connector->dev, "%s: EDID is invalid:\n", connector->name);
- for (i = 0; i < num_blocks; i++) {
- u8 *block = edid + i * EDID_LENGTH;
- char prefix[20];
-
- if (edid_is_zero(block, EDID_LENGTH))
- sprintf(prefix, "\t[%02x] ZERO ", i);
- else if (!drm_edid_block_valid(block, i, false, NULL))
- sprintf(prefix, "\t[%02x] BAD ", i);
- else
- sprintf(prefix, "\t[%02x] GOOD ", i);
-
- print_hex_dump(KERN_DEBUG,
- prefix, DUMP_PREFIX_NONE, 16, 1,
- block, EDID_LENGTH, false);
- }
+ for (i = 0; i < num_blocks; i++)
+ edid_block_dump(KERN_DEBUG, edid + i, i);
}
/* Get override or firmware EDID */
@@ -1999,43 +2075,39 @@ int drm_add_override_edid_modes(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_add_override_edid_modes);
-static struct edid *drm_do_get_edid_base_block(struct drm_connector *connector,
- int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
- size_t len),
- void *data)
+typedef int read_block_fn(void *context, u8 *buf, unsigned int block, size_t len);
+
+static enum edid_block_status edid_block_read(void *block, unsigned int block_num,
+ read_block_fn read_block,
+ void *context)
{
- int *null_edid_counter = connector ? &connector->null_edid_counter : NULL;
- bool *edid_corrupt = connector ? &connector->edid_corrupt : NULL;
- void *edid;
+ enum edid_block_status status;
+ bool is_base_block = block_num == 0;
int try;
- edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
- if (edid == NULL)
- return NULL;
-
- /* base block fetch */
for (try = 0; try < 4; try++) {
- if (get_edid_block(data, edid, 0, EDID_LENGTH))
- goto out;
- if (drm_edid_block_valid(edid, 0, false, edid_corrupt))
- break;
- if (try == 0 && edid_is_zero(edid, EDID_LENGTH)) {
- if (null_edid_counter)
- (*null_edid_counter)++;
- goto carp;
+ if (read_block(context, block, block_num, EDID_LENGTH))
+ return EDID_BLOCK_READ_FAIL;
+
+ status = edid_block_check(block, is_base_block);
+ if (status == EDID_BLOCK_HEADER_REPAIR) {
+ edid_header_fix(block);
+
+ /* Retry with fixed header, update status if that worked. */
+ status = edid_block_check(block, is_base_block);
+ if (status == EDID_BLOCK_OK)
+ status = EDID_BLOCK_HEADER_FIXED;
}
- }
- if (try == 4)
- goto carp;
- return edid;
+ if (edid_block_status_valid(status, edid_block_tag(block)))
+ break;
-carp:
- if (connector)
- connector_bad_edid(connector, edid, 1);
-out:
- kfree(edid);
- return NULL;
+ /* Fail early for unrepairable base block all zeros. */
+ if (try == 0 && is_base_block && status == EDID_BLOCK_ZERO)
+ break;
+ }
+
+ return status;
}
/**
@@ -2059,53 +2131,74 @@ out:
* Return: Pointer to valid EDID or NULL if we couldn't find any.
*/
struct edid *drm_do_get_edid(struct drm_connector *connector,
- int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
- size_t len),
- void *data)
+ read_block_fn read_block,
+ void *context)
{
- int j, invalid_blocks = 0;
- struct edid *edid, *new, *override;
+ enum edid_block_status status;
+ int i, invalid_blocks = 0;
+ struct edid *edid, *new;
- override = drm_get_override_edid(connector);
- if (override)
- return override;
+ edid = drm_get_override_edid(connector);
+ if (edid)
+ goto ok;
- edid = drm_do_get_edid_base_block(connector, get_edid_block, data);
+ edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (!edid)
return NULL;
- if (edid->extensions == 0)
- return edid;
+ status = edid_block_read(edid, 0, read_block, context);
+
+ edid_block_status_print(status, edid, 0);
+
+ if (status == EDID_BLOCK_READ_FAIL)
+ goto fail;
+
+ /* FIXME: Clarify what a corrupt EDID actually means. */
+ if (status == EDID_BLOCK_OK || status == EDID_BLOCK_VERSION)
+ connector->edid_corrupt = false;
+ else
+ connector->edid_corrupt = true;
+
+ if (!edid_block_status_valid(status, edid_block_tag(edid))) {
+ if (status == EDID_BLOCK_ZERO)
+ connector->null_edid_counter++;
+
+ connector_bad_edid(connector, edid, 1);
+ goto fail;
+ }
- new = krealloc(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!edid_extension_block_count(edid))
+ goto ok;
+
+ new = krealloc(edid, edid_size(edid), GFP_KERNEL);
if (!new)
- goto out;
+ goto fail;
edid = new;
- for (j = 1; j <= edid->extensions; j++) {
- void *block = edid + j;
- int try;
+ for (i = 1; i < edid_block_count(edid); i++) {
+ void *block = (void *)edid_block_data(edid, i);
- for (try = 0; try < 4; try++) {
- if (get_edid_block(data, block, j, EDID_LENGTH))
- goto out;
- if (drm_edid_block_valid(block, j, false, NULL))
- break;
- }
+ status = edid_block_read(block, i, read_block, context);
+
+ edid_block_status_print(status, block, i);
- if (try == 4)
+ if (!edid_block_status_valid(status, edid_block_tag(block))) {
+ if (status == EDID_BLOCK_READ_FAIL)
+ goto fail;
invalid_blocks++;
+ }
}
if (invalid_blocks) {
- connector_bad_edid(connector, (u8 *)edid, edid->extensions + 1);
+ connector_bad_edid(connector, edid, edid_block_count(edid));
edid = edid_filter_invalid_blocks(edid, invalid_blocks);
}
+ok:
return edid;
-out:
+fail:
kfree(edid);
return NULL;
}
@@ -2199,20 +2292,27 @@ static u32 edid_extract_panel_id(const struct edid *edid)
u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
{
- const struct edid *edid;
- u32 panel_id;
-
- edid = drm_do_get_edid_base_block(NULL, drm_do_probe_ddc_edid, adapter);
+ enum edid_block_status status;
+ void *base_block;
+ u32 panel_id = 0;
/*
* There are no manufacturer IDs of 0, so if there is a problem reading
* the EDID then we'll just return 0.
*/
- if (!edid)
+
+ base_block = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (!base_block)
return 0;
- panel_id = edid_extract_panel_id(edid);
- kfree(edid);
+ status = edid_block_read(base_block, 0, drm_do_probe_ddc_edid, adapter);
+
+ edid_block_status_print(status, base_block, 0);
+
+ if (edid_block_status_valid(status, edid_block_tag(base_block)))
+ panel_id = edid_extract_panel_id(base_block);
+
+ kfree(base_block);
return panel_id;
}
@@ -2255,7 +2355,7 @@ EXPORT_SYMBOL(drm_get_edid_switcheroo);
*/
struct edid *drm_edid_duplicate(const struct edid *edid)
{
- return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+ return kmemdup(edid, edid_size(edid), GFP_KERNEL);
}
EXPORT_SYMBOL(drm_edid_duplicate);
@@ -2439,8 +2539,8 @@ drm_for_each_detailed_block(const struct edid *edid, detailed_cb *cb, void *clos
for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
cb(&(edid->detailed_timings[i]), closure);
- for (i = 1; i <= edid->extensions; i++) {
- const u8 *ext = (const u8 *)edid + (i * EDID_LENGTH);
+ for (i = 0; i < edid_extension_block_count(edid); i++) {
+ const u8 *ext = edid_extension_block_data(edid, i);
switch (*ext) {
case CEA_EXT:
@@ -3410,17 +3510,17 @@ const u8 *drm_find_edid_extension(const struct edid *edid,
int i;
/* No EDID or EDID extensions */
- if (edid == NULL || edid->extensions == 0)
+ if (!edid || !edid_extension_block_count(edid))
return NULL;
/* Find CEA extension */
- for (i = *ext_index; i < edid->extensions; i++) {
- edid_ext = (const u8 *)edid + EDID_LENGTH * (i + 1);
+ for (i = *ext_index; i < edid_extension_block_count(edid); i++) {
+ edid_ext = edid_extension_block_data(edid, i);
if (edid_block_tag(edid_ext) == ext_id)
break;
}
- if (i >= edid->extensions)
+ if (i >= edid_extension_block_count(edid))
return NULL;
*ext_index = i + 1;
@@ -3551,9 +3651,11 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m
match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
for (vic = 1; vic < cea_num_vics(); vic = cea_next_vic(vic)) {
- struct drm_display_mode cea_mode = *cea_mode_for_vic(vic);
+ struct drm_display_mode cea_mode;
unsigned int clock1, clock2;
+ drm_mode_init(&cea_mode, cea_mode_for_vic(vic));
+
/* Check both 60Hz and 59.94Hz */
clock1 = cea_mode.clock;
clock2 = cea_mode_alternate_clock(&cea_mode);
@@ -3590,9 +3692,11 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
for (vic = 1; vic < cea_num_vics(); vic = cea_next_vic(vic)) {
- struct drm_display_mode cea_mode = *cea_mode_for_vic(vic);
+ struct drm_display_mode cea_mode;
unsigned int clock1, clock2;
+ drm_mode_init(&cea_mode, cea_mode_for_vic(vic));
+
/* Check both 60Hz and 59.94Hz */
clock1 = cea_mode.clock;
clock2 = cea_mode_alternate_clock(&cea_mode);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 133dfae06fab..eb0c2d041f13 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -771,7 +771,8 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
return -EINVAL;
}
- ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout);
+ ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all),
+ true, timeout);
if (ret == 0)
ret = -ETIME;
else if (ret > 0)
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index 9338ddb7edff..a6d89aed0bda 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -151,7 +151,7 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
return 0;
obj = drm_gem_fb_get_obj(state->fb, 0);
- ret = dma_resv_get_singleton(obj->resv, false, &fence);
+ ret = dma_resv_get_singleton(obj->resv, DMA_RESV_USAGE_WRITE, &fence);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 6e7e10f16ec0..e699950cc34d 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -837,7 +837,9 @@ EXPORT_SYMBOL(drm_mode_vrefresh);
void drm_mode_get_hv_timing(const struct drm_display_mode *mode,
int *hdisplay, int *vdisplay)
{
- struct drm_display_mode adjusted = *mode;
+ struct drm_display_mode adjusted;
+
+ drm_mode_init(&adjusted, mode);
drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY);
*hdisplay = adjusted.crtc_hdisplay;
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index b701cda86d0c..2ff31717a3de 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -644,7 +644,7 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
vblank->linedur_ns = linedur_ns;
vblank->framedur_ns = framedur_ns;
- vblank->hwmode = *mode;
+ drm_mode_copy(&vblank->hwmode, mode);
drm_dbg_core(dev,
"crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index d5314aa28ff7..507172e2780b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -380,12 +380,14 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}
if (op & ETNA_PREP_NOSYNC) {
- if (!dma_resv_test_signaled(obj->resv, write))
+ if (!dma_resv_test_signaled(obj->resv,
+ dma_resv_usage_rw(write)))
return -EBUSY;
} else {
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
- ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
+ ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
+ true, remain);
if (ret <= 0)
return ret == 0 ? -ETIMEDOUT : ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 53f7c78628a4..98bb5c9239de 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -202,14 +202,10 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
+ bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
- if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
- dma_resv_add_excl_fence(obj->resv,
- submit->out_fence);
- else
- dma_resv_add_shared_fence(obj->resv,
- submit->out_fence);
-
+ dma_resv_add_fence(obj->resv, submit->out_fence, write ?
+ DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
submit_unlock_object(submit, i);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 3d87da283cfa..efe8591619e3 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -1047,7 +1047,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
if (ret < 0)
goto unpin_fb;
- dma_resv_iter_begin(&cursor, obj->base.resv, false);
+ dma_resv_iter_begin(&cursor, obj->base.resv,
+ DMA_RESV_USAGE_WRITE);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
add_rps_boost_after_vblank(new_plane_state->hw.crtc,
fence);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 470fdfd61a0f..ddda468241ef 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -138,21 +138,21 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* Alternatively, we can trade that extra information on read/write
* activity with
* args->busy =
- * !dma_resv_test_signaled(obj->resv, true);
+ * !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
* to report the overall busyness. This is what the wait-ioctl does.
*
*/
args->busy = 0;
- dma_resv_iter_begin(&cursor, obj->base.resv, true);
+ dma_resv_iter_begin(&cursor, obj->base.resv, DMA_RESV_USAGE_READ);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (dma_resv_iter_is_restarted(&cursor))
args->busy = 0;
- if (dma_resv_iter_is_exclusive(&cursor))
- /* Translate the exclusive fence to the READ *and* WRITE engine */
+ if (dma_resv_iter_usage(&cursor) <= DMA_RESV_USAGE_WRITE)
+ /* Translate the write fences to the READ *and* WRITE engine */
args->busy |= busy_check_writer(fence);
else
- /* Translate shared fences to READ set of engines */
+ /* Translate read fences to READ set of engines */
args->busy |= busy_check_reader(fence);
}
dma_resv_iter_end(&cursor);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 1fd0cc9ca213..0512afdd20d8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -116,7 +116,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
obj->base.resv, NULL, true,
i915_fence_timeout(i915),
I915_FENCE_GFP);
- dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
+ dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
+ DMA_RESV_USAGE_KERNEL);
dma_fence_work_commit(&clflush->base);
/*
* We must have successfully populated the pages(since we are
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index ede084f36ca9..02ed3b269326 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -68,7 +68,7 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
#ifdef CONFIG_LOCKDEP
- GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true) &&
+ GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) &&
i915_gem_object_evictable(obj));
#endif
return mr && (mr->type == INTEL_MEMORY_LOCAL ||
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index c1c3b510b9e2..747ac65e060f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -742,30 +742,19 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = {
/**
* i915_gem_object_get_moving_fence - Get the object's moving fence if any
* @obj: The object whose moving fence to get.
+ * @fence: The resulting fence
*
* A non-signaled moving fence means that there is an async operation
* pending on the object that needs to be waited on before setting up
* any GPU- or CPU PTEs to the object's pages.
*
- * Return: A refcounted pointer to the object's moving fence if any,
- * NULL otherwise.
+ * Return: Negative error code or 0 for success.
*/
-struct dma_fence *
-i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj)
+int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
+ struct dma_fence **fence)
{
- return dma_fence_get(i915_gem_to_ttm(obj)->moving);
-}
-
-void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
- struct dma_fence *fence)
-{
- struct dma_fence **moving = &i915_gem_to_ttm(obj)->moving;
-
- if (*moving == fence)
- return;
-
- dma_fence_put(*moving);
- *moving = dma_fence_get(fence);
+ return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL,
+ fence);
}
/**
@@ -783,23 +772,16 @@ void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
bool intr)
{
- struct dma_fence *fence = i915_gem_to_ttm(obj)->moving;
- int ret;
+ long ret;
assert_object_held(obj);
- if (!fence)
- return 0;
- ret = dma_fence_wait(fence, intr);
- if (ret)
- return ret;
+ ret = dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL,
+ intr, MAX_SCHEDULE_TIMEOUT);
+ if (!ret)
+ ret = -ETIME;
- if (fence->error)
- return fence->error;
-
- i915_gem_to_ttm(obj)->moving = NULL;
- dma_fence_put(fence);
- return 0;
+ return ret < 0 ? ret : 0;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 02c37fe4a535..e11d82a9f7c3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -520,12 +520,8 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
-struct dma_fence *
-i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj);
-
-void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
- struct dma_fence *fence);
-
+int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
+ struct dma_fence **fence);
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
bool intr);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index 432ac74ff225..a10716f4e717 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -467,19 +467,6 @@ out:
return fence;
}
-static int
-prev_deps(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
- struct i915_deps *deps)
-{
- int ret;
-
- ret = i915_deps_add_dependency(deps, bo->moving, ctx);
- if (!ret)
- ret = i915_deps_add_resv(deps, bo->base.resv, ctx);
-
- return ret;
-}
-
/**
* i915_ttm_move - The TTM move callback used by i915.
* @bo: The buffer object.
@@ -534,7 +521,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
struct i915_deps deps;
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
- ret = prev_deps(bo, ctx, &deps);
+ ret = i915_deps_add_resv(&deps, bo->base.resv, ctx);
if (ret) {
i915_refct_sgt_put(dst_rsgt);
return ret;
@@ -637,9 +624,8 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
if (IS_ERR_OR_NULL(copy_fence))
return PTR_ERR_OR_ZERO(copy_fence);
- dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence);
- dma_resv_add_shared_fence(src_bo->base.resv, copy_fence);
-
+ dma_resv_add_fence(dst_bo->base.resv, copy_fence, DMA_RESV_USAGE_WRITE);
+ dma_resv_add_fence(src_bo->base.resv, copy_fence, DMA_RESV_USAGE_READ);
dma_fence_put(copy_fence);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 6d1a71d6404c..094f06b4ce33 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -86,7 +86,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
return true;
/* we will unbind on next submission, still have userptr pins */
- r = dma_resv_wait_timeout(obj->base.resv, true, false,
+ r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index dab3d30c09a0..319936f91ac5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -40,7 +40,8 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
struct dma_fence *fence;
long ret = timeout ?: 1;
- dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL);
+ dma_resv_iter_begin(&cursor, resv,
+ dma_resv_usage_rw(flags & I915_WAIT_ALL));
dma_resv_for_each_fence_unlocked(&cursor, fence) {
ret = i915_gem_object_wait_fence(fence, flags, timeout);
if (ret <= 0)
@@ -117,7 +118,8 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
struct dma_resv_iter cursor;
struct dma_fence *fence;
- dma_resv_iter_begin(&cursor, obj->base.resv, flags & I915_WAIT_ALL);
+ dma_resv_iter_begin(&cursor, obj->base.resv,
+ dma_resv_usage_rw(flags & I915_WAIT_ALL));
dma_resv_for_each_fence_unlocked(&cursor, fence)
i915_gem_fence_wait_priority(fence, attr);
dma_resv_iter_end(&cursor);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index b071a58dd6da..b4275b55e5b8 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -219,7 +219,8 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
goto out_detach;
}
- timeout = dma_resv_wait_timeout(dmabuf->resv, false, true, 5 * HZ);
+ timeout = dma_resv_wait_timeout(dmabuf->resv, DMA_RESV_USAGE_WRITE,
+ true, 5 * HZ);
if (!timeout) {
pr_err("dmabuf wait for exclusive fence timed out.\n");
timeout = -ETIME;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
index 0e52eb87cd55..0ad443a90c8b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -218,9 +218,8 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
if (rq) {
err = dma_resv_reserve_fences(obj->base.resv, 1);
if (!err)
- dma_resv_add_excl_fence(obj->base.resv,
- &rq->fence);
- i915_gem_object_set_moving_fence(obj, &rq->fence);
+ dma_resv_add_fence(obj->base.resv, &rq->fence,
+ DMA_RESV_USAGE_KERNEL);
i915_request_put(rq);
}
if (err)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index c4c2c91a2ee7..5bc93a1ce3e3 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1221,8 +1221,8 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
expand32(POISON_INUSE), &rq);
i915_gem_object_unpin_pages(obj);
if (rq) {
- dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
- i915_gem_object_set_moving_fence(obj, &rq->fence);
+ dma_resv_add_fence(obj->base.resv, &rq->fence,
+ DMA_RESV_USAGE_KERNEL);
i915_request_put(rq);
}
i915_gem_object_unlock(obj);
diff --git a/drivers/gpu/drm/i915/i915_deps.c b/drivers/gpu/drm/i915/i915_deps.c
index 999210b37325..297b8e4e42ee 100644
--- a/drivers/gpu/drm/i915/i915_deps.c
+++ b/drivers/gpu/drm/i915/i915_deps.c
@@ -226,7 +226,7 @@ int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv,
struct dma_fence *fence;
dma_resv_assert_held(resv);
- dma_resv_for_each_fence(&iter, resv, true, fence) {
+ dma_resv_for_each_fence(&iter, resv, dma_resv_usage_rw(true), fence) {
int ret = i915_deps_add_dependency(deps, fence, ctx);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 582770360ad1..73d5195146b0 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1598,7 +1598,8 @@ i915_request_await_object(struct i915_request *to,
struct dma_fence *fence;
int ret = 0;
- dma_resv_for_each_fence(&cursor, obj->base.resv, write, fence) {
+ dma_resv_for_each_fence(&cursor, obj->base.resv,
+ dma_resv_usage_rw(write), fence) {
ret = i915_request_await_dma_fence(to, fence);
if (ret)
break;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 2a74a9a1cafe..ae984c66c48a 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -585,7 +585,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
debug_fence_assert(fence);
might_sleep_if(gfpflags_allow_blocking(gfp));
- dma_resv_iter_begin(&cursor, resv, write);
+ dma_resv_iter_begin(&cursor, resv, dma_resv_usage_rw(write));
dma_resv_for_each_fence_unlocked(&cursor, f) {
pending = i915_sw_fence_await_dma_fence(fence, f, timeout,
gfp);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index bae3423f58e8..d077f7b9eaad 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1357,10 +1357,17 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (err)
return err;
+ if (vma->obj) {
+ err = i915_gem_object_get_moving_fence(vma->obj, &moving);
+ if (err)
+ return err;
+ } else {
+ moving = NULL;
+ }
+
if (flags & PIN_GLOBAL)
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
- moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL;
if (flags & vma->vm->bind_async_flags || moving) {
/* lock VM */
err = i915_vm_lock_objects(vma->vm, ww);
@@ -1826,7 +1833,8 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
}
if (fence) {
- dma_resv_add_excl_fence(vma->obj->base.resv, fence);
+ dma_resv_add_fence(vma->obj->base.resv, fence,
+ DMA_RESV_USAGE_WRITE);
obj->write_domain = I915_GEM_DOMAIN_RENDER;
obj->read_domains = 0;
}
@@ -1838,7 +1846,8 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
}
if (fence) {
- dma_resv_add_shared_fence(vma->obj->base.resv, fence);
+ dma_resv_add_fence(vma->obj->base.resv, fence,
+ DMA_RESV_USAGE_READ);
obj->write_domain = 0;
}
}
@@ -2078,7 +2087,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
goto out_rpm;
}
- dma_resv_add_shared_fence(obj->base.resv, fence);
+ dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ);
dma_fence_put(fence);
out_rpm:
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 6114e013092b..73eb53edb8de 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -1056,7 +1056,8 @@ static int igt_lmem_write_cpu(void *arg)
obj->mm.pages->sgl, I915_CACHE_NONE,
true, 0xdeadbeaf, &rq);
if (rq) {
- dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
+ dma_resv_add_fence(obj->base.resv, &rq->fence,
+ DMA_RESV_USAGE_WRITE);
i915_request_put(rq);
}
diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig
index 001f59fb06d5..090830bcbde7 100644
--- a/drivers/gpu/drm/ingenic/Kconfig
+++ b/drivers/gpu/drm/ingenic/Kconfig
@@ -24,4 +24,13 @@ config DRM_INGENIC_IPU
The Image Processing Unit (IPU) will appear as a second primary plane.
+config DRM_INGENIC_DW_HDMI
+ tristate "Ingenic specific support for Synopsys DW HDMI"
+ depends on MACH_JZ4780
+ select DRM_DW_HDMI
+ help
+ Choose this option to enable Synopsys DesignWare HDMI based driver.
+ If you want to enable HDMI on Ingenic JZ4780 based SoC, you should
+ select this option.
+
endif
diff --git a/drivers/gpu/drm/ingenic/Makefile b/drivers/gpu/drm/ingenic/Makefile
index d313326bdddb..f10cc1c5a5f2 100644
--- a/drivers/gpu/drm/ingenic/Makefile
+++ b/drivers/gpu/drm/ingenic/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_DRM_INGENIC) += ingenic-drm.o
ingenic-drm-y = ingenic-drm-drv.o
ingenic-drm-$(CONFIG_DRM_INGENIC_IPU) += ingenic-ipu.o
+obj-$(CONFIG_DRM_INGENIC_DW_HDMI) += ingenic-dw-hdmi.o
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index a4f5a323f490..8eb0ad501a7b 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -833,6 +833,32 @@ static int ingenic_drm_bridge_atomic_check(struct drm_bridge *bridge,
}
}
+static u32 *
+ingenic_drm_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ switch (output_fmt) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ case MEDIA_BUS_FMT_RGB888_3X8:
+ case MEDIA_BUS_FMT_RGB888_3X8_DELTA:
+ break;
+ default:
+ *num_input_fmts = 0;
+ return NULL;
+ }
+
+ return drm_atomic_helper_bridge_propagate_bus_fmt(bridge, bridge_state,
+ crtc_state, conn_state,
+ output_fmt,
+ num_input_fmts);
+}
+
static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg)
{
struct ingenic_drm *priv = drm_device_get_priv(arg);
@@ -984,7 +1010,7 @@ static const struct drm_bridge_funcs ingenic_drm_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
- .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt,
+ .atomic_get_input_bus_fmts = ingenic_drm_bridge_atomic_get_input_bus_fmts,
};
static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/ingenic/ingenic-dw-hdmi.c b/drivers/gpu/drm/ingenic/ingenic-dw-hdmi.c
new file mode 100644
index 000000000000..72f8b44998a5
--- /dev/null
+++ b/drivers/gpu/drm/ingenic/ingenic-dw-hdmi.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2019, 2020 Paul Boddie <paul@boddie.org.uk>
+ *
+ * Derived from dw_hdmi-imx.c with i.MX portions removed.
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+
+static const struct dw_hdmi_mpll_config ingenic_mpll_cfg[] = {
+ { 45250000, { { 0x01e0, 0x0000 }, { 0x21e1, 0x0000 }, { 0x41e2, 0x0000 } } },
+ { 92500000, { { 0x0140, 0x0005 }, { 0x2141, 0x0005 }, { 0x4142, 0x0005 } } },
+ { 148500000, { { 0x00a0, 0x000a }, { 0x20a1, 0x000a }, { 0x40a2, 0x000a } } },
+ { 216000000, { { 0x00a0, 0x000a }, { 0x2001, 0x000f }, { 0x4002, 0x000f } } },
+ { ~0UL, { { 0x0000, 0x0000 }, { 0x0000, 0x0000 }, { 0x0000, 0x0000 } } }
+};
+
+static const struct dw_hdmi_curr_ctrl ingenic_cur_ctr[] = {
+ /*pixelclk bpp8 bpp10 bpp12 */
+ { 54000000, { 0x091c, 0x091c, 0x06dc } },
+ { 58400000, { 0x091c, 0x06dc, 0x06dc } },
+ { 72000000, { 0x06dc, 0x06dc, 0x091c } },
+ { 74250000, { 0x06dc, 0x0b5c, 0x091c } },
+ { 118800000, { 0x091c, 0x091c, 0x06dc } },
+ { 216000000, { 0x06dc, 0x0b5c, 0x091c } },
+ { ~0UL, { 0x0000, 0x0000, 0x0000 } },
+};
+
+/*
+ * Resistance term 133Ohm Cfg
+ * PREEMP config 0.00
+ * TX/CK level 10
+ */
+static const struct dw_hdmi_phy_config ingenic_phy_config[] = {
+ /*pixelclk symbol term vlev */
+ { 216000000, 0x800d, 0x0005, 0x01ad},
+ { ~0UL, 0x0000, 0x0000, 0x0000}
+};
+
+static enum drm_mode_status
+ingenic_dw_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ if (mode->clock < 13500)
+ return MODE_CLOCK_LOW;
+ /* FIXME: Hardware is capable of 270MHz, but setup data is missing. */
+ if (mode->clock > 216000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static struct dw_hdmi_plat_data ingenic_dw_hdmi_plat_data = {
+ .mpll_cfg = ingenic_mpll_cfg,
+ .cur_ctr = ingenic_cur_ctr,
+ .phy_config = ingenic_phy_config,
+ .mode_valid = ingenic_dw_hdmi_mode_valid,
+ .output_port = 1,
+};
+
+static const struct of_device_id ingenic_dw_hdmi_dt_ids[] = {
+ { .compatible = "ingenic,jz4780-dw-hdmi" },
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ingenic_dw_hdmi_dt_ids);
+
+static void ingenic_dw_hdmi_cleanup(void *data)
+{
+ struct dw_hdmi *hdmi = (struct dw_hdmi *)data;
+
+ dw_hdmi_remove(hdmi);
+}
+
+static int ingenic_dw_hdmi_probe(struct platform_device *pdev)
+{
+ struct dw_hdmi *hdmi;
+
+ hdmi = dw_hdmi_probe(pdev, &ingenic_dw_hdmi_plat_data);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
+
+ return devm_add_action_or_reset(&pdev->dev, ingenic_dw_hdmi_cleanup, hdmi);
+}
+
+static struct platform_driver ingenic_dw_hdmi_driver = {
+ .probe = ingenic_dw_hdmi_probe,
+ .driver = {
+ .name = "dw-hdmi-ingenic",
+ .of_match_table = ingenic_dw_hdmi_dt_ids,
+ },
+};
+module_platform_driver(ingenic_dw_hdmi_driver);
+
+MODULE_DESCRIPTION("JZ4780 Specific DW-HDMI Driver Extension");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dw-hdmi-ingenic");
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index e0a11ee0e86d..0f1ca0b0db49 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -364,10 +364,9 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
fence = lima_sched_context_queue_task(submit->task);
for (i = 0; i < submit->nr_bos; i++) {
- if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
- dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence);
- else
- dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence);
+ dma_resv_add_fence(lima_bo_resv(bos[i]), fence,
+ submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE ?
+ DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
}
drm_gem_unlock_reservations((struct drm_gem_object **)bos,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 02b9ae65a96a..01bbb5f2d462 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -848,7 +848,8 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
- ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
+ ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
+ true, remain);
if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
else if (ret < 0)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 3164db8be893..8d1eef914ba8 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -395,9 +395,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
struct drm_gem_object *obj = &submit->bos[i].obj->base;
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
- dma_resv_add_excl_fence(obj->resv, submit->user_fence);
+ dma_resv_add_fence(obj->resv, submit->user_fence,
+ DMA_RESV_USAGE_WRITE);
else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
- dma_resv_add_shared_fence(obj->resv, submit->user_fence);
+ dma_resv_add_fence(obj->resv, submit->user_fence,
+ DMA_RESV_USAGE_READ);
}
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base917c.c b/drivers/gpu/drm/nouveau/dispnv50/base917c.c
index a1baed4fe0e9..ca260509a4f1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base917c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base917c.c
@@ -22,7 +22,7 @@
#include "base.h"
#include "atom.h"
-const u32
+static const u32
base917c_format[] = {
DRM_FORMAT_C8,
DRM_FORMAT_XRGB8888,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index e2faf92e4831..8642b84ea20c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -558,7 +558,8 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
asyw->image.handle[0] = ctxdma->object.handle;
}
- ret = dma_resv_get_singleton(nvbo->bo.base.resv, false,
+ ret = dma_resv_get_singleton(nvbo->bo.base.resv,
+ DMA_RESV_USAGE_WRITE,
&asyw->state.fence);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 74f8652d2bd3..05076e530e7d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -962,11 +962,11 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
struct dma_fence *fence;
int ret;
- /* TODO: This is actually a memory management dependency */
- ret = dma_resv_get_singleton(bo->base.resv, false, &fence);
+ ret = dma_resv_get_singleton(bo->base.resv, DMA_RESV_USAGE_WRITE,
+ &fence);
if (ret)
- dma_resv_wait_timeout(bo->base.resv, false, false,
- MAX_SCHEDULE_TIMEOUT);
+ dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_WRITE,
+ false, MAX_SCHEDULE_TIMEOUT);
nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
@@ -1308,10 +1308,11 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool excl
{
struct dma_resv *resv = nvbo->bo.base.resv;
- if (exclusive)
- dma_resv_add_excl_fence(resv, &fence->base);
- else if (fence)
- dma_resv_add_shared_fence(resv, &fence->base);
+ if (!fence)
+ return;
+
+ dma_resv_add_fence(resv, &fence->base, exclusive ?
+ DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 0268259e97eb..7f01dcf81fab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -350,17 +350,21 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
if (ret)
return ret;
- /* Waiting for the exclusive fence first causes performance regressions
- * under some circumstances. So manually wait for the shared ones first.
+ /* Waiting for the writes first causes performance regressions
+ * under some circumstances. So manually wait for the reads first.
*/
for (i = 0; i < 2; ++i) {
struct dma_resv_iter cursor;
struct dma_fence *fence;
- dma_resv_for_each_fence(&cursor, resv, exclusive, fence) {
+ dma_resv_for_each_fence(&cursor, resv,
+ dma_resv_usage_rw(exclusive),
+ fence) {
+ enum dma_resv_usage usage;
struct nouveau_fence *f;
- if (i == 0 && dma_resv_iter_is_exclusive(&cursor))
+ usage = dma_resv_iter_usage(&cursor);
+ if (i == 0 && usage == DMA_RESV_USAGE_WRITE)
continue;
f = nouveau_local_fence(fence, chan->drm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 9416bee92141..fab542a758ff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -962,7 +962,8 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true,
+ lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
+ dma_resv_usage_rw(write), true,
no_wait ? 0 : 30 * HZ);
if (!lret)
ret = -EBUSY;
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 60019d0532fc..347488685f74 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -93,22 +93,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
if (ret)
return -EINVAL;
- ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
- if (ret)
- goto error;
-
- if (nvbo->bo.moving)
- ret = dma_fence_wait(nvbo->bo.moving, true);
-
- ttm_bo_unreserve(&nvbo->bo);
- if (ret)
- goto error;
-
- return ret;
-
-error:
- nouveau_bo_unpin(nvbo);
- return ret;
+ return 0;
}
void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 030640bb3dca..ab3760e804b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -143,7 +143,7 @@ gf108_gr = {
}
};
-const struct gf100_gr_fwif
+static const struct gf100_gr_fwif
gf108_gr_fwif[] = {
{ -1, gf100_gr_load, &gf108_gr },
{ -1, gf100_gr_nofw, &gf108_gr },
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 27a1c9923b09..eca067e78579 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -30,9 +30,9 @@ struct panel_lvds {
const char *label;
unsigned int width;
unsigned int height;
- struct videomode video_mode;
+ struct drm_display_mode dmode;
+ u32 bus_flags;
unsigned int bus_format;
- bool data_mirror;
struct regulator *supply;
@@ -87,21 +87,18 @@ static int panel_lvds_get_modes(struct drm_panel *panel,
struct panel_lvds *lvds = to_panel_lvds(panel);
struct drm_display_mode *mode;
- mode = drm_mode_create(connector->dev);
+ mode = drm_mode_duplicate(connector->dev, &lvds->dmode);
if (!mode)
return 0;
- drm_display_mode_from_videomode(&lvds->video_mode, mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
- connector->display_info.width_mm = lvds->width;
- connector->display_info.height_mm = lvds->height;
+ connector->display_info.width_mm = lvds->dmode.width_mm;
+ connector->display_info.height_mm = lvds->dmode.height_mm;
drm_display_info_set_bus_formats(&connector->display_info,
&lvds->bus_format, 1);
- connector->display_info.bus_flags = lvds->data_mirror
- ? DRM_BUS_FLAG_DATA_LSB_TO_MSB
- : DRM_BUS_FLAG_DATA_MSB_TO_LSB;
+ connector->display_info.bus_flags = lvds->bus_flags;
drm_connector_set_panel_orientation(connector, lvds->orientation);
return 1;
@@ -116,7 +113,6 @@ static const struct drm_panel_funcs panel_lvds_funcs = {
static int panel_lvds_parse_dt(struct panel_lvds *lvds)
{
struct device_node *np = lvds->dev->of_node;
- struct display_timing timing;
int ret;
ret = of_drm_get_panel_orientation(np, &lvds->orientation);
@@ -125,23 +121,20 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
return ret;
}
- ret = of_get_display_timing(np, "panel-timing", &timing);
+ ret = of_get_drm_panel_display_mode(np, &lvds->dmode, &lvds->bus_flags);
if (ret < 0) {
dev_err(lvds->dev, "%pOF: problems parsing panel-timing (%d)\n",
np, ret);
return ret;
}
- videomode_from_timing(&timing, &lvds->video_mode);
-
- ret = of_property_read_u32(np, "width-mm", &lvds->width);
- if (ret < 0) {
+ if (lvds->dmode.width_mm == 0) {
dev_err(lvds->dev, "%pOF: invalid or missing %s DT property\n",
np, "width-mm");
return -ENODEV;
}
- ret = of_property_read_u32(np, "height-mm", &lvds->height);
- if (ret < 0) {
+
+ if (lvds->dmode.height_mm == 0) {
dev_err(lvds->dev, "%pOF: invalid or missing %s DT property\n",
np, "height-mm");
return -ENODEV;
@@ -158,7 +151,9 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
lvds->bus_format = ret;
- lvds->data_mirror = of_property_read_bool(np, "data-mirror");
+ lvds->bus_flags |= of_property_read_bool(np, "data-mirror") ?
+ DRM_BUS_FLAG_DATA_LSB_TO_MSB :
+ DRM_BUS_FLAG_DATA_MSB_TO_LSB;
return 0;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 94b6f0a19c83..7fcbc2a5b6cd 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -316,7 +316,8 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
if (!gem_obj)
return -ENOENT;
- ret = dma_resv_wait_timeout(gem_obj->resv, true, true, timeout);
+ ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_READ,
+ true, timeout);
if (!ret)
ret = timeout ? -ETIMEDOUT : -EBUSY;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index c34114560e49..fda5871aebe3 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -268,7 +268,7 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos,
int i;
for (i = 0; i < bo_count; i++)
- dma_resv_add_excl_fence(bos[i]->resv, fence);
+ dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE);
}
int panfrost_job_push(struct panfrost_job *job)
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 6a36b0fd845c..2d9ed3b94574 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -61,7 +61,8 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
struct dma_fence *fence;
int rel = 0;
- dma_resv_iter_begin(&cursor, bo->tbo.base.resv, true);
+ dma_resv_iter_begin(&cursor, bo->tbo.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (dma_resv_iter_is_restarted(&cursor))
rel = 0;
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index cde1e8ddaeaa..368d26da0d6a 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -429,7 +429,8 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
- dma_resv_add_shared_fence(bo->base.resv, &release->base);
+ dma_resv_add_fence(bo->base.resv, &release->base,
+ DMA_RESV_USAGE_READ);
ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index f60e826cd292..57ff2b723c87 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,8 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- r = dma_resv_get_singleton(new_rbo->tbo.base.resv, false, &work->fence);
+ r = dma_resv_get_singleton(new_rbo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
+ &work->fence);
if (r) {
radeon_bo_unreserve(new_rbo);
DRM_ERROR("failed to get new rbo buffer fences\n");
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index f563284a7fac..8c01a7f0e027 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -162,7 +162,9 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
+ r = dma_resv_wait_timeout(robj->tbo.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP,
+ true, 30 * HZ);
if (!r)
r = -EBUSY;
@@ -524,7 +526,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- r = dma_resv_test_signaled(robj->tbo.base.resv, true);
+ r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ);
if (r == 0)
r = -EBUSY;
else
@@ -553,7 +555,8 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
+ ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
+ true, 30 * HZ);
if (ret == 0)
r = -EBUSY;
else if (ret < 0)
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 9fa88549c89e..29fe8423bd90 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -66,8 +66,8 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
return true;
}
- r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 7ffd2e90f325..6c4a6802ca96 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -219,7 +219,12 @@ int radeon_bo_create(struct radeon_device *rdev,
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
{
bool is_iomem;
- int r;
+ long r;
+
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0)
+ return r;
if (bo->kptr) {
if (ptr) {
@@ -791,8 +796,6 @@ void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
return;
}
- if (shared)
- dma_resv_add_shared_fence(resv, &fence->base);
- else
- dma_resv_add_excl_fence(resv, &fence->base);
+ dma_resv_add_fence(resv, &fence->base, shared ?
+ DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
}
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 4a90807351e7..42a87948e28c 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -77,19 +77,9 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
- if (unlikely(ret))
- goto error;
-
- if (bo->tbo.moving) {
- ret = dma_fence_wait(bo->tbo.moving, false);
- if (unlikely(ret)) {
- radeon_bo_unpin(bo);
- goto error;
- }
- }
-
- bo->prime_shared_count++;
-error:
+ if (likely(ret == 0))
+ bo->prime_shared_count++;
+
radeon_bo_unreserve(bo);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index b991ba1bcd51..49bbb2266c0f 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -96,7 +96,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
struct dma_fence *f;
int r = 0;
- dma_resv_for_each_fence(&cursor, resv, shared, f) {
+ dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(shared), f) {
fence = to_radeon_fence(f);
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index bc0f44299bb9..a2cda184b2b2 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -470,24 +470,16 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
int32_t *msg, msg_type, handle;
unsigned img_size = 0;
void *ptr;
- long r;
- int i;
+ int i, r;
if (offset & 0x3F) {
DRM_ERROR("UVD messages must be 64 byte aligned!\n");
return -EINVAL;
}
- r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
- MAX_SCHEDULE_TIMEOUT);
- if (r <= 0) {
- DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
- return r ? r : -ETIME;
- }
-
r = radeon_bo_kmap(bo, &ptr);
if (r) {
- DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
+ DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
return r;
}
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index c5660b066554..76fd2904c7c6 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -705,7 +705,8 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
dma_resv_assert_held(obj->resv);
- dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
+ dma_resv_for_each_fence(&cursor, obj->resv, dma_resv_usage_rw(write),
+ fence) {
/* Make sure to grab an additional ref on the added fence */
dma_fence_get(fence);
ret = drm_sched_job_add_dependency(job, fence);
diff --git a/drivers/gpu/drm/solomon/Kconfig b/drivers/gpu/drm/solomon/Kconfig
index 6230369505c9..8c0a0c788385 100644
--- a/drivers/gpu/drm/solomon/Kconfig
+++ b/drivers/gpu/drm/solomon/Kconfig
@@ -5,9 +5,9 @@ config DRM_SSD130X
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
- DRM driver for the SSD1305, SSD1306, SSD1307 and SSD1309 Solomon
- OLED controllers. This is only for the core driver, a driver for
- the appropriate bus transport in your chip also must be selected.
+ DRM driver for the SSD130x Solomon and SINO WEALTH SH110x OLED
+ controllers. This is only for the core driver, a driver for the
+ appropriate bus transport in your chip also must be selected.
If M is selected the module will be called ssd130x.
@@ -16,6 +16,7 @@ config DRM_SSD130X_I2C
depends on DRM_SSD130X && I2C
select REGMAP_I2C
help
- Say Y here if the SSD130x OLED display is connected via I2C bus.
+ Say Y here if the SSD130x or SH110x OLED display is connected via
+ I2C bus.
If M is selected the module will be called ssd130x-i2c.
diff --git a/drivers/gpu/drm/solomon/ssd130x-i2c.c b/drivers/gpu/drm/solomon/ssd130x-i2c.c
index 3126aeda4ced..d099b241dd3f 100644
--- a/drivers/gpu/drm/solomon/ssd130x-i2c.c
+++ b/drivers/gpu/drm/solomon/ssd130x-i2c.c
@@ -53,6 +53,13 @@ static void ssd130x_i2c_shutdown(struct i2c_client *client)
ssd130x_shutdown(ssd130x);
}
+static struct ssd130x_deviceinfo ssd130x_sh1106_deviceinfo = {
+ .default_vcomh = 0x40,
+ .default_dclk_div = 1,
+ .default_dclk_frq = 5,
+ .page_mode_only = 1,
+};
+
static struct ssd130x_deviceinfo ssd130x_ssd1305_deviceinfo = {
.default_vcomh = 0x34,
.default_dclk_div = 1,
@@ -81,6 +88,10 @@ static struct ssd130x_deviceinfo ssd130x_ssd1309_deviceinfo = {
static const struct of_device_id ssd130x_of_match[] = {
{
+ .compatible = "sinowealth,sh1106-i2c",
+ .data = &ssd130x_sh1106_deviceinfo,
+ },
+ {
.compatible = "solomon,ssd1305fb-i2c",
.data = &ssd130x_ssd1305_deviceinfo,
},
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 38b6c2c14f53..a7e784518c69 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -42,6 +42,8 @@
#define SSD130X_DATA 0x40
#define SSD130X_COMMAND 0x80
+#define SSD130X_PAGE_COL_START_LOW 0x00
+#define SSD130X_PAGE_COL_START_HIGH 0x10
#define SSD130X_SET_ADDRESS_MODE 0x20
#define SSD130X_SET_COL_RANGE 0x21
#define SSD130X_SET_PAGE_RANGE 0x22
@@ -61,6 +63,11 @@
#define SSD130X_SET_COM_PINS_CONFIG 0xda
#define SSD130X_SET_VCOMH 0xdb
+#define SSD130X_PAGE_COL_START_MASK GENMASK(3, 0)
+#define SSD130X_PAGE_COL_START_HIGH_SET(val) FIELD_PREP(SSD130X_PAGE_COL_START_MASK, (val) >> 4)
+#define SSD130X_PAGE_COL_START_LOW_SET(val) FIELD_PREP(SSD130X_PAGE_COL_START_MASK, (val))
+#define SSD130X_START_PAGE_ADDRESS_MASK GENMASK(2, 0)
+#define SSD130X_START_PAGE_ADDRESS_SET(val) FIELD_PREP(SSD130X_START_PAGE_ADDRESS_MASK, (val))
#define SSD130X_SET_SEG_REMAP_MASK GENMASK(0, 0)
#define SSD130X_SET_SEG_REMAP_SET(val) FIELD_PREP(SSD130X_SET_SEG_REMAP_MASK, (val))
#define SSD130X_SET_COM_SCAN_DIR_MASK GENMASK(3, 3)
@@ -130,6 +137,7 @@ out_end:
return ret;
}
+/* Set address range for horizontal/vertical addressing modes */
static int ssd130x_set_col_range(struct ssd130x_device *ssd130x,
u8 col_start, u8 cols)
{
@@ -166,6 +174,26 @@ static int ssd130x_set_page_range(struct ssd130x_device *ssd130x,
return 0;
}
+/* Set page and column start address for page addressing mode */
+static int ssd130x_set_page_pos(struct ssd130x_device *ssd130x,
+ u8 page_start, u8 col_start)
+{
+ int ret;
+ u32 page, col_low, col_high;
+
+ page = SSD130X_START_PAGE_ADDRESS |
+ SSD130X_START_PAGE_ADDRESS_SET(page_start);
+ col_low = SSD130X_PAGE_COL_START_LOW |
+ SSD130X_PAGE_COL_START_LOW_SET(col_start);
+ col_high = SSD130X_PAGE_COL_START_HIGH |
+ SSD130X_PAGE_COL_START_HIGH_SET(col_start);
+ ret = ssd130x_write_cmd(ssd130x, 3, page, col_low, col_high);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int ssd130x_pwm_enable(struct ssd130x_device *ssd130x)
{
struct device *dev = ssd130x->dev;
@@ -342,6 +370,11 @@ static int ssd130x_init(struct ssd130x_device *ssd130x)
}
}
+ /* Switch to page addressing mode */
+ if (ssd130x->page_address_mode)
+ return ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_ADDRESS_MODE,
+ SSD130X_SET_ADDRESS_MODE_PAGE);
+
/* Switch to horizontal addressing mode */
return ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_ADDRESS_MODE,
SSD130X_SET_ADDRESS_MODE_HORIZONTAL);
@@ -396,13 +429,16 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x, u8 *buf,
* (5) A4 B4 C4 D4 E4 F4 G4 H4
*/
- ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset + x, width);
- if (ret < 0)
- goto out_free;
+ if (!ssd130x->page_address_mode) {
+ /* Set address range for horizontal addressing mode */
+ ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset + x, width);
+ if (ret < 0)
+ goto out_free;
- ret = ssd130x_set_page_range(ssd130x, ssd130x->page_offset + y / 8, pages);
- if (ret < 0)
- goto out_free;
+ ret = ssd130x_set_page_range(ssd130x, ssd130x->page_offset + y / 8, pages);
+ if (ret < 0)
+ goto out_free;
+ }
for (i = 0; i < pages; i++) {
int m = 8;
@@ -421,9 +457,29 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x, u8 *buf,
}
data_array[array_idx++] = data;
}
+
+ /*
+ * In page addressing mode, the start address needs to be reset,
+ * and each page then needs to be written out separately.
+ */
+ if (ssd130x->page_address_mode) {
+ ret = ssd130x_set_page_pos(ssd130x,
+ ssd130x->page_offset + i,
+ ssd130x->col_offset + x);
+ if (ret < 0)
+ goto out_free;
+
+ ret = ssd130x_write_data(ssd130x, data_array, width);
+ if (ret < 0)
+ goto out_free;
+
+ array_idx = 0;
+ }
}
- ret = ssd130x_write_data(ssd130x, data_array, width * pages);
+ /* Write out update in one go if we aren't using page addressing mode */
+ if (!ssd130x->page_address_mode)
+ ret = ssd130x_write_data(ssd130x, data_array, width * pages);
out_free:
kfree(data_array);
@@ -806,6 +862,9 @@ struct ssd130x_device *ssd130x_probe(struct device *dev, struct regmap *regmap)
ssd130x->regmap = regmap;
ssd130x->device_info = device_get_match_data(dev);
+ if (ssd130x->device_info->page_mode_only)
+ ssd130x->page_address_mode = 1;
+
ssd130x_parse_properties(ssd130x);
ret = ssd130x_get_resources(ssd130x);
diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h
index cd21cdccb566..f5b062576fdf 100644
--- a/drivers/gpu/drm/solomon/ssd130x.h
+++ b/drivers/gpu/drm/solomon/ssd130x.h
@@ -24,6 +24,7 @@ struct ssd130x_deviceinfo {
u32 default_dclk_frq;
int need_pwm;
int need_chargepump;
+ bool page_mode_only;
};
struct ssd130x_device {
@@ -38,6 +39,7 @@ struct ssd130x_device {
const struct ssd130x_deviceinfo *device_info;
+ unsigned page_address_mode : 1;
unsigned area_color_enable : 1;
unsigned com_invdir : 1;
unsigned com_lrremap : 1;
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 3db3768a3241..b58415f2e4d8 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -406,7 +406,7 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp)
(hw_nvn != gdp->node_list[i].top_field_paddr))
return &gdp->node_list[i];
- /* in hazardious cases restart with the first node */
+ /* in hazardous cases restart with the first node */
DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
sti_plane_to_str(&gdp->plane), hw_nvn);
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index f3ace11209dd..b3fbee7eac11 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -183,7 +183,7 @@ void hdmi_write(struct sti_hdmi *hdmi, u32 val, int offset)
writel(val, hdmi->regs + offset);
}
-/**
+/*
* HDMI interrupt handler threaded
*
* @irq: irq number
@@ -215,7 +215,7 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
return IRQ_HANDLED;
}
-/**
+/*
* HDMI interrupt handler
*
* @irq: irq number
@@ -237,7 +237,7 @@ static irqreturn_t hdmi_irq(int irq, void *arg)
return IRQ_WAKE_THREAD;
}
-/**
+/*
* Set hdmi active area depending on the drm display mode selected
*
* @hdmi: pointer on the hdmi internal structure
@@ -258,7 +258,7 @@ static void hdmi_active_area(struct sti_hdmi *hdmi)
hdmi_write(hdmi, ymax, HDMI_ACTIVE_VID_YMAX);
}
-/**
+/*
* Overall hdmi configuration
*
* @hdmi: pointer on the hdmi internal structure
@@ -336,7 +336,7 @@ static void hdmi_infoframe_reset(struct sti_hdmi *hdmi,
hdmi_write(hdmi, 0x0, pack_offset + i);
}
-/**
+/*
* Helper to concatenate infoframe in 32 bits word
*
* @ptr: pointer on the hdmi internal structure
@@ -353,7 +353,7 @@ static inline unsigned int hdmi_infoframe_subpack(const u8 *ptr, size_t size)
return value;
}
-/**
+/*
* Helper to write info frame
*
* @hdmi: pointer on the hdmi internal structure
@@ -423,7 +423,7 @@ static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi,
hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
}
-/**
+/*
* Prepare and configure the AVI infoframe
*
* AVI infoframe are transmitted at least once per two video field and
@@ -466,7 +466,7 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
return 0;
}
-/**
+/*
* Prepare and configure the AUDIO infoframe
*
* AUDIO infoframe are transmitted once per frame and
@@ -551,7 +551,7 @@ static int hdmi_vendor_infoframe_config(struct sti_hdmi *hdmi)
#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */
-/**
+/*
* Software reset of the hdmi subsystem
*
* @hdmi: pointer on the hdmi internal structure
@@ -785,7 +785,7 @@ static void sti_hdmi_disable(struct drm_bridge *bridge)
cec_notifier_set_phys_addr(hdmi->notifier, CEC_PHYS_ADDR_INVALID);
}
-/**
+/*
* sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent
* clocks. None-coherent clocks means that audio and TMDS clocks have not the
* same source (drifts between clocks). In this case assumption is that CTS is
@@ -892,7 +892,7 @@ static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
if (clk_prepare_enable(hdmi->clk_tmds))
DRM_ERROR("Failed to prepare/enable hdmi_tmds clk\n");
if (clk_prepare_enable(hdmi->clk_phy))
- DRM_ERROR("Failed to prepare/enable hdmi_rejec_pll clk\n");
+ DRM_ERROR("Failed to prepare/enable hdmi_rejection_pll clk\n");
hdmi->enabled = true;
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 17cc050207f4..6bd45df8f5a7 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -869,8 +869,8 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct drm_device *ddev = crtc->dev;
struct drm_connector_list_iter iter;
struct drm_connector *connector = NULL;
- struct drm_encoder *encoder = NULL;
- struct drm_bridge *bridge = NULL;
+ struct drm_encoder *encoder = NULL, *en_iter;
+ struct drm_bridge *bridge = NULL, *br_iter;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
u32 total_width, total_height;
@@ -880,15 +880,19 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
int ret;
/* get encoder from crtc */
- drm_for_each_encoder(encoder, ddev)
- if (encoder->crtc == crtc)
+ drm_for_each_encoder(en_iter, ddev)
+ if (en_iter->crtc == crtc) {
+ encoder = en_iter;
break;
+ }
if (encoder) {
/* get bridge from encoder */
- list_for_each_entry(bridge, &encoder->bridge_chain, chain_node)
- if (bridge->encoder == encoder)
+ list_for_each_entry(br_iter, &encoder->bridge_chain, chain_node)
+ if (br_iter->encoder == encoder) {
+ bridge = br_iter;
break;
+ }
/* Get the connector from encoder */
drm_connector_list_iter_begin(ddev, &iter);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c49996cf25d0..75d308ec173d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -223,7 +223,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
struct dma_resv_iter cursor;
struct dma_fence *fence;
- dma_resv_iter_begin(&cursor, resv, true);
+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (!fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
@@ -252,7 +252,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
struct dma_resv *resv = &bo->base._resv;
int ret;
- if (dma_resv_test_signaled(resv, true))
+ if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
ret = 0;
else
ret = -EBUSY;
@@ -264,7 +264,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
dma_resv_unlock(bo->base.resv);
spin_unlock(&bo->bdev->lru_lock);
- lret = dma_resv_wait_timeout(resv, true, interruptible,
+ lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
+ interruptible,
30 * HZ);
if (lret < 0)
@@ -367,7 +368,8 @@ static void ttm_bo_release(struct kref *kref)
/* Last resort, if we fail to allocate memory for the
* fences block for the BO to become idle
*/
- dma_resv_wait_timeout(bo->base.resv, true, false,
+ dma_resv_wait_timeout(bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP, false,
30 * HZ);
}
@@ -378,7 +380,7 @@ static void ttm_bo_release(struct kref *kref)
ttm_mem_io_free(bdev, bo->resource);
}
- if (!dma_resv_test_signaled(bo->base.resv, true) ||
+ if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) ||
!dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);
@@ -416,7 +418,6 @@ static void ttm_bo_release(struct kref *kref)
dma_resv_unlock(bo->base.resv);
atomic_dec(&ttm_glob.bo_count);
- dma_fence_put(bo->moving);
bo->destroy(bo);
}
@@ -712,9 +713,8 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo)
EXPORT_SYMBOL(ttm_bo_unpin);
/*
- * Add the last move fence to the BO and reserve a new shared slot. We only use
- * a shared slot to avoid unecessary sync and rely on the subsequent bo move to
- * either stall or use an exclusive fence respectively set bo->moving.
+ * Add the last move fence to the BO as kernel dependency and reserve a new
+ * fence slot.
*/
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man,
@@ -737,17 +737,11 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
return ret;
}
- dma_resv_add_shared_fence(bo->base.resv, fence);
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
ret = dma_resv_reserve_fences(bo->base.resv, 1);
- if (unlikely(ret)) {
- dma_fence_put(fence);
- return ret;
- }
-
- dma_fence_put(bo->moving);
- bo->moving = fence;
- return 0;
+ dma_fence_put(fence);
+ return ret;
}
/*
@@ -949,7 +943,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
bo->bdev = bdev;
bo->type = type;
bo->page_alignment = page_alignment;
- bo->moving = NULL;
bo->pin_count = 0;
bo->sg = sg;
bo->bulk_move = NULL;
@@ -1044,14 +1037,14 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
long timeout = 15 * HZ;
if (no_wait) {
- if (dma_resv_test_signaled(bo->base.resv, true))
+ if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP))
return 0;
else
return -EBUSY;
}
- timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible,
- timeout);
+ timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ interruptible, timeout);
if (timeout < 0)
return timeout;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1b96b91bf81b..1cbfb00c1d65 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -228,7 +228,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
atomic_inc(&ttm_glob.bo_count);
INIT_LIST_HEAD(&fbo->base.ddestroy);
- fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
kref_init(&fbo->base.kref);
@@ -500,14 +499,12 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
* operation has completed.
*/
- dma_fence_put(bo->moving);
- bo->moving = dma_fence_get(fence);
-
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return ret;
- dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
+ dma_resv_add_fence(&ghost_obj->base._resv, fence,
+ DMA_RESV_USAGE_KERNEL);
/**
* If we're not moving to fixed memory, the TTM object
@@ -545,9 +542,6 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
spin_unlock(&from->move_lock);
ttm_resource_free(bo, &bo->resource);
-
- dma_fence_put(bo->moving);
- bo->moving = dma_fence_get(fence);
}
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
@@ -561,7 +555,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
int ret = 0;
- dma_resv_add_excl_fence(bo->base.resv, fence);
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
if (!evict)
ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
else if (!from->use_tt && pipeline)
@@ -578,6 +572,21 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+{
+ struct ttm_device *bdev = bo->bdev;
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
+ int ret;
+
+ ret = ttm_bo_wait_free_node(bo, man->use_tt);
+ if (WARN_ON(ret))
+ return;
+
+ ttm_bo_assign_mem(bo, new_mem);
+}
+EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
+
/**
* ttm_bo_pipeline_gutting - purge the contents of a bo
* @bo: The buffer object
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 08ba083a80d2..5b324f245265 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -46,17 +46,13 @@
static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
- vm_fault_t ret = 0;
- int err = 0;
-
- if (likely(!bo->moving))
- goto out_unlock;
+ long err = 0;
/*
* Quick non-stalling check for idle.
*/
- if (dma_fence_is_signaled(bo->moving))
- goto out_clear;
+ if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL))
+ return 0;
/*
* If possible, avoid waiting for GPU with mmap_lock
@@ -64,34 +60,30 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
* is the first attempt.
*/
if (fault_flag_allow_retry_first(vmf->flags)) {
- ret = VM_FAULT_RETRY;
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
- goto out_unlock;
+ return VM_FAULT_RETRY;
ttm_bo_get(bo);
mmap_read_unlock(vmf->vma->vm_mm);
- (void) dma_fence_wait(bo->moving, true);
+ (void)dma_resv_wait_timeout(bo->base.resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
- goto out_unlock;
+ return VM_FAULT_RETRY;
}
/*
* Ordinary wait.
*/
- err = dma_fence_wait(bo->moving, true);
- if (unlikely(err != 0)) {
- ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+ err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (unlikely(err < 0)) {
+ return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
- goto out_unlock;
}
-out_clear:
- dma_fence_put(bo->moving);
- bo->moving = NULL;
-
-out_unlock:
- return ret;
+ return 0;
}
static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 789c645f004e..dbee34a058df 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -101,7 +101,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
continue;
}
- num_fences = min(entry->num_shared, 1u);
+ num_fences = max(entry->num_shared, 1u);
if (!ret) {
ret = dma_resv_reserve_fences(bo->base.resv,
num_fences);
@@ -154,10 +154,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- if (entry->num_shared)
- dma_resv_add_shared_fence(bo->base.resv, fence);
- else
- dma_resv_add_excl_fence(bo->base.resv, fence);
+ dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ?
+ DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 961812d33827..2352e9640922 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -550,8 +550,8 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
for (i = 0; i < job->bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */
- dma_resv_add_excl_fence(job->bo[i]->resv,
- job->done_fence);
+ dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
+ DMA_RESV_USAGE_WRITE);
}
drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 594bd6bb00d2..9eaf304fc20d 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -546,7 +546,8 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
bo = to_vc4_bo(&exec->bo[i]->base);
bo->seqno = seqno;
- dma_resv_add_shared_fence(bo->base.base.resv, exec->fence);
+ dma_resv_add_fence(bo->base.base.resv, exec->fence,
+ DMA_RESV_USAGE_READ);
}
list_for_each_entry(bo, &exec->unref_list, unref_head) {
@@ -557,7 +558,8 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
bo->write_seqno = seqno;
- dma_resv_add_excl_fence(bo->base.base.resv, exec->fence);
+ dma_resv_add_fence(bo->base.base.resv, exec->fence,
+ DMA_RESV_USAGE_WRITE);
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 9194cb52e706..2a58fc421cf6 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -611,6 +611,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
struct vc4_hvs *hvs = NULL;
int ret;
u32 dispctrl;
+ u32 reg;
hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL);
if (!hvs)
@@ -682,6 +683,26 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
vc4->hvs = hvs;
+ reg = HVS_READ(SCALER_DISPECTRL);
+ reg &= ~SCALER_DISPECTRL_DSP2_MUX_MASK;
+ HVS_WRITE(SCALER_DISPECTRL,
+ reg | VC4_SET_FIELD(0, SCALER_DISPECTRL_DSP2_MUX));
+
+ reg = HVS_READ(SCALER_DISPCTRL);
+ reg &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
+ HVS_WRITE(SCALER_DISPCTRL,
+ reg | VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX));
+
+ reg = HVS_READ(SCALER_DISPEOLN);
+ reg &= ~SCALER_DISPEOLN_DSP4_MUX_MASK;
+ HVS_WRITE(SCALER_DISPEOLN,
+ reg | VC4_SET_FIELD(3, SCALER_DISPEOLN_DSP4_MUX));
+
+ reg = HVS_READ(SCALER_DISPDITHER);
+ reg &= ~SCALER_DISPDITHER_DSP5_MUX_MASK;
+ HVS_WRITE(SCALER_DISPDITHER,
+ reg | VC4_SET_FIELD(3, SCALER_DISPDITHER_DSP5_MUX));
+
dispctrl = HVS_READ(SCALER_DISPCTRL);
dispctrl |= SCALER_DISPCTRL_ENABLE;
@@ -689,10 +710,6 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
SCALER_DISPCTRL_DISPEIRQ(1) |
SCALER_DISPCTRL_DISPEIRQ(2);
- /* Set DSP3 (PV1) to use HVS channel 2, which would otherwise
- * be unused.
- */
- dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
SCALER_DISPCTRL_SLVWREIRQ |
SCALER_DISPCTRL_SLVRDEIRQ |
@@ -706,7 +723,6 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
SCALER_DISPCTRL_DSPEISLUR(1) |
SCALER_DISPCTRL_DSPEISLUR(2) |
SCALER_DISPCTRL_SCLEIRQ);
- dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 747a2d199eca..c169bd72e53b 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -283,13 +283,18 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ unsigned int channel = vc4_state->assigned_channel;
if (!vc4_state->update_muxing)
continue;
switch (vc4_crtc->data->hvs_output) {
case 2:
- mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
+ drm_WARN_ON(&vc4->base,
+ VC4_GET_FIELD(HVS_READ(SCALER_DISPCTRL),
+ SCALER_DISPCTRL_DSP3_MUX) == channel);
+
+ mux = (channel == 2) ? 0 : 1;
reg = HVS_READ(SCALER_DISPECTRL);
HVS_WRITE(SCALER_DISPECTRL,
(reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
@@ -297,10 +302,10 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
break;
case 3:
- if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+ if (channel == VC4_HVS_CHANNEL_DISABLED)
mux = 3;
else
- mux = vc4_state->assigned_channel;
+ mux = channel;
reg = HVS_READ(SCALER_DISPCTRL);
HVS_WRITE(SCALER_DISPCTRL,
@@ -309,10 +314,10 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
break;
case 4:
- if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+ if (channel == VC4_HVS_CHANNEL_DISABLED)
mux = 3;
else
- mux = vc4_state->assigned_channel;
+ mux = channel;
reg = HVS_READ(SCALER_DISPEOLN);
HVS_WRITE(SCALER_DISPEOLN,
@@ -322,10 +327,10 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
break;
case 5:
- if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+ if (channel == VC4_HVS_CHANNEL_DISABLED)
mux = 3;
else
- mux = vc4_state->assigned_channel;
+ mux = channel;
reg = HVS_READ(SCALER_DISPDITHER);
HVS_WRITE(SCALER_DISPDITHER,
@@ -434,6 +439,9 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
* requirements.
*/
clk_set_min_rate(hvs->core_clk, new_hvs_state->core_clock_rate);
+
+ drm_dbg(dev, "Core clock actual rate: %lu Hz\n",
+ clk_get_rate(hvs->core_clk));
}
}
@@ -817,9 +825,18 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
unsigned int matching_channels;
unsigned int channel;
+ drm_dbg(dev, "%s: Trying to find a channel.\n", crtc->name);
+
/* Nothing to do here, let's skip it */
- if (old_crtc_state->enable == new_crtc_state->enable)
+ if (old_crtc_state->enable == new_crtc_state->enable) {
+ if (new_crtc_state->enable)
+ drm_dbg(dev, "%s: Already enabled, reusing channel %d.\n",
+ crtc->name, new_vc4_crtc_state->assigned_channel);
+ else
+ drm_dbg(dev, "%s: Disabled, ignoring.\n", crtc->name);
+
continue;
+ }
/* Muxing will need to be modified, mark it as such */
new_vc4_crtc_state->update_muxing = true;
@@ -827,6 +844,10 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
/* If we're disabling our CRTC, we put back our channel */
if (!new_crtc_state->enable) {
channel = old_vc4_crtc_state->assigned_channel;
+
+ drm_dbg(dev, "%s: Disabling, Freeing channel %d\n",
+ crtc->name, channel);
+
hvs_new_state->fifo_state[channel].in_use = false;
new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
continue;
@@ -861,6 +882,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
return -EINVAL;
channel = ffs(matching_channels) - 1;
+
+ drm_dbg(dev, "Assigned HVS channel %d to CRTC %s\n", channel, crtc->name);
new_vc4_crtc_state->assigned_channel = channel;
unassigned_channels &= ~BIT(channel);
hvs_new_state->fifo_state[channel].in_use = true;
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 9809ca3e2945..82beb8c159f2 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -298,12 +298,18 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
if (WARN_ON(i == ARRAY_SIZE(drm_fmts)))
return;
- ctrl = TXP_GO | TXP_VSTART_AT_EOF | TXP_EI |
+ ctrl = TXP_GO | TXP_EI |
VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) |
VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT);
if (fb->format->has_alpha)
ctrl |= TXP_ALPHA_ENABLE;
+ else
+ /*
+ * If TXP_ALPHA_ENABLE isn't set and TXP_ALPHA_INVERT is, the
+ * hardware will force the output padding to be 0xff.
+ */
+ ctrl |= TXP_ALPHA_INVERT;
gem = drm_fb_cma_get_gem_obj(fb, 0);
TXP_WRITE(TXP_DST_PTR, gem->paddr + fb->offsets[0]);
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 2ddbebca87d9..c2a879734d40 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -130,6 +130,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
struct vgem_file *vfile = file->driver_priv;
struct dma_resv *resv;
struct drm_gem_object *obj;
+ enum dma_resv_usage usage;
struct dma_fence *fence;
int ret;
@@ -151,7 +152,8 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
/* Check for a conflicting fence */
resv = obj->resv;
- if (!dma_resv_test_signaled(resv, arg->flags & VGEM_FENCE_WRITE)) {
+ usage = dma_resv_usage_rw(arg->flags & VGEM_FENCE_WRITE);
+ if (!dma_resv_test_signaled(resv, usage)) {
ret = -EBUSY;
goto err_fence;
}
@@ -159,12 +161,9 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
/* Expose the fence via the dma-buf */
dma_resv_lock(resv, NULL);
ret = dma_resv_reserve_fences(resv, 1);
- if (!ret) {
- if (arg->flags & VGEM_FENCE_WRITE)
- dma_resv_add_excl_fence(resv, fence);
- else
- dma_resv_add_shared_fence(resv, fence);
- }
+ if (!ret)
+ dma_resv_add_fence(resv, fence, arg->flags & VGEM_FENCE_WRITE ?
+ DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
dma_resv_unlock(resv);
/* Record the fence in our idr for later signaling */
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 1820ca6cf673..580a78809836 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -250,7 +250,8 @@ void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
int i;
for (i = 0; i < objs->nents; i++)
- dma_resv_add_excl_fence(objs->objs[i]->resv, fence);
+ dma_resv_add_fence(objs->objs[i]->resv, fence,
+ DMA_RESV_USAGE_WRITE);
}
void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 77743fd2c61a..f8d83358d2a0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -518,9 +518,10 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
if (args->flags & VIRTGPU_WAIT_NOWAIT) {
- ret = dma_resv_test_signaled(obj->resv, true);
+ ret = dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
} else {
- ret = dma_resv_wait_timeout(obj->resv, true, true, timeout);
+ ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ,
+ true, timeout);
}
if (ret == 0)
ret = -EBUSY;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index fe13aa8b4a64..408ede1f967f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -528,8 +528,8 @@ static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo,
if (flags & drm_vmw_synccpu_allow_cs) {
long lret;
- lret = dma_resv_wait_timeout(bo->base.resv, true, true,
- nonblock ? 0 :
+ lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
+ true, nonblock ? 0 :
MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
@@ -758,7 +758,8 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (!ret)
- dma_resv_add_excl_fence(bo->base.resv, &fence->base);
+ dma_resv_add_fence(bo->base.resv, &fence->base,
+ DMA_RESV_USAGE_KERNEL);
else
/* Last resort fallback when we are OOM */
dma_fence_wait(&fence->base, false);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 626067104751..a7d62a4eb47b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1161,11 +1161,6 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
PAGE_SIZE);
vmw_bo_fence_single(bo, NULL);
- if (bo->moving)
- dma_fence_put(bo->moving);
-
- return dma_resv_get_singleton(bo->base.resv, false,
- &bo->moving);
}
return 0;
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index d32cd7538835..fce80a4a5147 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -67,7 +67,8 @@ wait_fence:
* may be not up-to-date. Wait for the exporter to finish
* the migration.
*/
- return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, false,
+ return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL,
false, MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 2fc1b80a26ad..c4e91715ef00 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -86,10 +86,6 @@
* - fbcon state itself is protected by the console_lock, and the code does a
* pretty good job at making sure that lock is held everywhere it's needed.
*
- * - access to the registered_fb array is entirely unprotected. This should use
- * proper object lifetime handling, i.e. get/put_fb_info. This also means
- * switching from indices to proper pointers for fb_info everywhere.
- *
* - fbcon doesn't bother with fb_lock/unlock at all. This is buggy, since it
* means concurrent access to the same fbdev from both fbcon and userspace
* will blow up. To fix this all fbcon calls from fbmem.c need to be moved out
@@ -107,9 +103,23 @@ enum {
static struct fbcon_display fb_display[MAX_NR_CONSOLES];
+struct fb_info *fbcon_registered_fb[FB_MAX];
+int fbcon_num_registered_fb;
+
+#define fbcon_for_each_registered_fb(i) \
+ for (i = 0; WARN_CONSOLE_UNLOCKED(), i < FB_MAX; i++) \
+ if (!fbcon_registered_fb[i]) {} else
+
static signed char con2fb_map[MAX_NR_CONSOLES];
static signed char con2fb_map_boot[MAX_NR_CONSOLES];
+static struct fb_info *fbcon_info_from_console(int console)
+{
+ WARN_CONSOLE_UNLOCKED();
+
+ return fbcon_registered_fb[con2fb_map[console]];
+}
+
static int logo_lines;
/* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO
enums. */
@@ -163,39 +173,19 @@ static int fbcon_cursor_noblink;
* Interface used by the world
*/
-static const char *fbcon_startup(void);
-static void fbcon_init(struct vc_data *vc, int init);
-static void fbcon_deinit(struct vc_data *vc);
-static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
- int width);
-static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos);
-static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
- int count, int ypos, int xpos);
static void fbcon_clear_margins(struct vc_data *vc, int bottom_only);
-static void fbcon_cursor(struct vc_data *vc, int mode);
-static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
- int height, int width);
-static int fbcon_switch(struct vc_data *vc);
-static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch);
static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
/*
* Internal routines
*/
-static __inline__ void ywrap_up(struct vc_data *vc, int count);
-static __inline__ void ywrap_down(struct vc_data *vc, int count);
-static __inline__ void ypan_up(struct vc_data *vc, int count);
-static __inline__ void ypan_down(struct vc_data *vc, int count);
-static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
- int dy, int dx, int height, int width, u_int y_break);
static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
int unit);
static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
int line, int count, int dy);
static void fbcon_modechanged(struct fb_info *info);
static void fbcon_set_all_vcs(struct fb_info *info);
-static void fbcon_start(void);
-static void fbcon_exit(void);
+
static struct device *fbcon_device;
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_ROTATION
@@ -218,7 +208,7 @@ static void fbcon_rotate(struct fb_info *info, u32 rotate)
if (!ops || ops->currcon == -1)
return;
- fb_info = registered_fb[con2fb_map[ops->currcon]];
+ fb_info = fbcon_info_from_console(ops->currcon);
if (info == fb_info) {
struct fbcon_display *p = &fb_display[ops->currcon];
@@ -245,7 +235,7 @@ static void fbcon_rotate_all(struct fb_info *info, u32 rotate)
for (i = first_fb_vc; i <= last_fb_vc; i++) {
vc = vc_cons[i].d;
if (!vc || vc->vc_mode != KD_TEXT ||
- registered_fb[con2fb_map[i]] != info)
+ fbcon_info_from_console(i) != info)
continue;
p = &fb_display[vc->vc_num];
@@ -357,8 +347,8 @@ static int get_color(struct vc_data *vc, struct fb_info *info,
static void fb_flashcursor(struct work_struct *work)
{
- struct fb_info *info = container_of(work, struct fb_info, queue);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_ops *ops = container_of(work, struct fbcon_ops, cursor_work.work);
+ struct fb_info *info;
struct vc_data *vc = NULL;
int c;
int mode;
@@ -371,11 +361,14 @@ static void fb_flashcursor(struct work_struct *work)
if (ret == 0)
return;
- if (ops && ops->currcon != -1)
+ /* protected by console_lock */
+ info = ops->info;
+
+ if (ops->currcon != -1)
vc = vc_cons[ops->currcon].d;
if (!vc || !con_is_visible(vc) ||
- registered_fb[con2fb_map[vc->vc_num]] != info ||
+ fbcon_info_from_console(vc->vc_num) != info ||
vc->vc_deccm != 1) {
console_unlock();
return;
@@ -387,42 +380,25 @@ static void fb_flashcursor(struct work_struct *work)
ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
get_color(vc, info, c, 0));
console_unlock();
-}
-static void cursor_timer_handler(struct timer_list *t)
-{
- struct fbcon_ops *ops = from_timer(ops, t, cursor_timer);
- struct fb_info *info = ops->info;
-
- queue_work(system_power_efficient_wq, &info->queue);
- mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies);
+ queue_delayed_work(system_power_efficient_wq, &ops->cursor_work,
+ ops->cur_blink_jiffies);
}
-static void fbcon_add_cursor_timer(struct fb_info *info)
+static void fbcon_add_cursor_work(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
- if ((!info->queue.func || info->queue.func == fb_flashcursor) &&
- !(ops->flags & FBCON_FLAGS_CURSOR_TIMER) &&
- !fbcon_cursor_noblink) {
- if (!info->queue.func)
- INIT_WORK(&info->queue, fb_flashcursor);
-
- timer_setup(&ops->cursor_timer, cursor_timer_handler, 0);
- mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies);
- ops->flags |= FBCON_FLAGS_CURSOR_TIMER;
- }
+ if (!fbcon_cursor_noblink)
+ queue_delayed_work(system_power_efficient_wq, &ops->cursor_work,
+ ops->cur_blink_jiffies);
}
-static void fbcon_del_cursor_timer(struct fb_info *info)
+static void fbcon_del_cursor_work(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
- if (info->queue.func == fb_flashcursor &&
- ops->flags & FBCON_FLAGS_CURSOR_TIMER) {
- del_timer_sync(&ops->cursor_timer);
- ops->flags &= ~FBCON_FLAGS_CURSOR_TIMER;
- }
+ cancel_delayed_work_sync(&ops->cursor_work);
}
#ifndef MODULE
@@ -540,7 +516,7 @@ static int do_fbcon_takeover(int show_logo)
{
int err, i;
- if (!num_registered_fb)
+ if (!fbcon_num_registered_fb)
return -ENODEV;
if (!show_logo)
@@ -602,7 +578,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
save = kmalloc(array3_size(logo_lines, new_cols, 2),
GFP_KERNEL);
if (save) {
- int i = cols < new_cols ? cols : new_cols;
+ int i = min(cols, new_cols);
scr_memsetw(save, erase, array3_size(logo_lines, new_cols, 2));
r = q - step;
for (cnt = 0; cnt < logo_lines; cnt++, r += i)
@@ -703,87 +679,95 @@ static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount)
#endif /* CONFIG_MISC_TILEBLITTING */
-
-static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
- int unit, int oldidx)
+static void fbcon_release(struct fb_info *info)
{
- struct fbcon_ops *ops = NULL;
- int err = 0;
+ lock_fb_info(info);
+ if (info->fbops->fb_release)
+ info->fbops->fb_release(info, 0);
+ unlock_fb_info(info);
- if (!try_module_get(info->fbops->owner))
- err = -ENODEV;
+ module_put(info->fbops->owner);
- if (!err && info->fbops->fb_open &&
- info->fbops->fb_open(info, 0))
- err = -ENODEV;
+ if (info->fbcon_par) {
+ struct fbcon_ops *ops = info->fbcon_par;
- if (!err) {
- ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
- if (!ops)
- err = -ENOMEM;
+ fbcon_del_cursor_work(info);
+ kfree(ops->cursor_state.mask);
+ kfree(ops->cursor_data);
+ kfree(ops->cursor_src);
+ kfree(ops->fontbuffer);
+ kfree(info->fbcon_par);
+ info->fbcon_par = NULL;
}
+}
- if (!err) {
- ops->cur_blink_jiffies = HZ / 5;
- ops->info = info;
- info->fbcon_par = ops;
+static int fbcon_open(struct fb_info *info)
+{
+ struct fbcon_ops *ops;
- if (vc)
- set_blitting_type(vc, info);
- }
+ if (!try_module_get(info->fbops->owner))
+ return -ENODEV;
- if (err) {
- con2fb_map[unit] = oldidx;
+ lock_fb_info(info);
+ if (info->fbops->fb_open &&
+ info->fbops->fb_open(info, 0)) {
+ unlock_fb_info(info);
module_put(info->fbops->owner);
+ return -ENODEV;
+ }
+ unlock_fb_info(info);
+
+ ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
+ if (!ops) {
+ fbcon_release(info);
+ return -ENOMEM;
}
+ INIT_DELAYED_WORK(&ops->cursor_work, fb_flashcursor);
+ ops->info = info;
+ info->fbcon_par = ops;
+ ops->cur_blink_jiffies = HZ / 5;
+
+ return 0;
+}
+
+static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
+ int unit)
+{
+ int err;
+
+ err = fbcon_open(info);
+ if (err)
+ return err;
+
+ if (vc)
+ set_blitting_type(vc, info);
+
return err;
}
-static int con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo,
- struct fb_info *newinfo, int unit,
- int oldidx, int found)
+static void con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo,
+ struct fb_info *newinfo)
{
- struct fbcon_ops *ops = oldinfo->fbcon_par;
- int err = 0, ret;
+ int ret;
- if (oldinfo->fbops->fb_release &&
- oldinfo->fbops->fb_release(oldinfo, 0)) {
- con2fb_map[unit] = oldidx;
- if (!found && newinfo->fbops->fb_release)
- newinfo->fbops->fb_release(newinfo, 0);
- if (!found)
- module_put(newinfo->fbops->owner);
- err = -ENODEV;
- }
+ fbcon_release(oldinfo);
- if (!err) {
- fbcon_del_cursor_timer(oldinfo);
- kfree(ops->cursor_state.mask);
- kfree(ops->cursor_data);
- kfree(ops->cursor_src);
- kfree(ops->fontbuffer);
- kfree(oldinfo->fbcon_par);
- oldinfo->fbcon_par = NULL;
- module_put(oldinfo->fbops->owner);
- /*
- If oldinfo and newinfo are driving the same hardware,
- the fb_release() method of oldinfo may attempt to
- restore the hardware state. This will leave the
- newinfo in an undefined state. Thus, a call to
- fb_set_par() may be needed for the newinfo.
- */
- if (newinfo && newinfo->fbops->fb_set_par) {
- ret = newinfo->fbops->fb_set_par(newinfo);
+ /*
+ If oldinfo and newinfo are driving the same hardware,
+ the fb_release() method of oldinfo may attempt to
+ restore the hardware state. This will leave the
+ newinfo in an undefined state. Thus, a call to
+ fb_set_par() may be needed for the newinfo.
+ */
+ if (newinfo && newinfo->fbops->fb_set_par) {
+ ret = newinfo->fbops->fb_set_par(newinfo);
- if (ret)
- printk(KERN_ERR "con2fb_release_oldinfo: "
- "detected unhandled fb_set_par error, "
- "error code %d\n", ret);
- }
+ if (ret)
+ printk(KERN_ERR "con2fb_release_oldinfo: "
+ "detected unhandled fb_set_par error, "
+ "error code %d\n", ret);
}
-
- return err;
}
static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
@@ -794,7 +778,7 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
ops->currcon = fg_console;
- if (info->fbops->fb_set_par && !(ops->flags & FBCON_FLAGS_INIT)) {
+ if (info->fbops->fb_set_par && !ops->initialized) {
ret = info->fbops->fb_set_par(info);
if (ret)
@@ -803,14 +787,14 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
"error code %d\n", ret);
}
- ops->flags |= FBCON_FLAGS_INIT;
+ ops->initialized = true;
ops->graphics = 0;
fbcon_set_disp(info, &info->var, unit);
if (show_logo) {
struct vc_data *fg_vc = vc_cons[fg_console].d;
struct fb_info *fg_info =
- registered_fb[con2fb_map[fg_console]];
+ fbcon_info_from_console(fg_console);
fbcon_prepare_logo(fg_vc, fg_info, fg_vc->vc_cols,
fg_vc->vc_rows, fg_vc->vc_cols,
@@ -835,9 +819,9 @@ static int set_con2fb_map(int unit, int newidx, int user)
{
struct vc_data *vc = vc_cons[unit].d;
int oldidx = con2fb_map[unit];
- struct fb_info *info = registered_fb[newidx];
+ struct fb_info *info = fbcon_registered_fb[newidx];
struct fb_info *oldinfo = NULL;
- int found, err = 0;
+ int found, err = 0, show_logo;
WARN_CONSOLE_UNLOCKED();
@@ -853,31 +837,30 @@ static int set_con2fb_map(int unit, int newidx, int user)
}
if (oldidx != -1)
- oldinfo = registered_fb[oldidx];
+ oldinfo = fbcon_registered_fb[oldidx];
found = search_fb_in_map(newidx);
- con2fb_map[unit] = newidx;
- if (!err && !found)
- err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
+ if (!err && !found) {
+ err = con2fb_acquire_newinfo(vc, info, unit);
+ if (!err)
+ con2fb_map[unit] = newidx;
+ }
/*
* If old fb is not mapped to any of the consoles,
* fbcon should release it.
*/
if (!err && oldinfo && !search_fb_in_map(oldidx))
- err = con2fb_release_oldinfo(vc, oldinfo, info, unit, oldidx,
- found);
+ con2fb_release_oldinfo(vc, oldinfo, info);
- if (!err) {
- int show_logo = (fg_console == 0 && !user &&
- logo_shown != FBCON_LOGO_DONTSHOW);
+ show_logo = (fg_console == 0 && !user &&
+ logo_shown != FBCON_LOGO_DONTSHOW);
- if (!found)
- fbcon_add_cursor_timer(info);
- con2fb_map_boot[unit] = newidx;
- con2fb_init_display(vc, info, unit, show_logo);
- }
+ if (!found)
+ fbcon_add_cursor_work(info);
+ con2fb_map_boot[unit] = newidx;
+ con2fb_init_display(vc, info, unit, show_logo);
if (!search_fb_in_map(info_idx))
info_idx = newidx;
@@ -938,7 +921,6 @@ static const char *fbcon_startup(void)
struct fbcon_display *p = &fb_display[fg_console];
struct vc_data *vc = vc_cons[fg_console].d;
const struct font_desc *font = NULL;
- struct module *owner;
struct fb_info *info = NULL;
struct fbcon_ops *ops;
int rows, cols;
@@ -947,36 +929,23 @@ static const char *fbcon_startup(void)
* If num_registered_fb is zero, this is a call for the dummy part.
* The frame buffer devices weren't initialized yet.
*/
- if (!num_registered_fb || info_idx == -1)
+ if (!fbcon_num_registered_fb || info_idx == -1)
return display_desc;
/*
* Instead of blindly using registered_fb[0], we use info_idx, set by
- * fb_console_init();
+ * fbcon_fb_registered();
*/
- info = registered_fb[info_idx];
+ info = fbcon_registered_fb[info_idx];
if (!info)
return NULL;
- owner = info->fbops->owner;
- if (!try_module_get(owner))
- return NULL;
- if (info->fbops->fb_open && info->fbops->fb_open(info, 0)) {
- module_put(owner);
+ if (fbcon_open(info))
return NULL;
- }
-
- ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
- if (!ops) {
- module_put(owner);
- return NULL;
- }
+ ops = info->fbcon_par;
ops->currcon = -1;
ops->graphics = 1;
ops->cur_rotate = -1;
- ops->cur_blink_jiffies = HZ / 5;
- ops->info = info;
- info->fbcon_par = ops;
p->con_rotate = initial_rotation;
if (p->con_rotate == -1)
@@ -1013,7 +982,7 @@ static const char *fbcon_startup(void)
info->var.yres,
info->var.bits_per_pixel);
- fbcon_add_cursor_timer(info);
+ fbcon_add_cursor_work(info);
return display_desc;
}
@@ -1033,7 +1002,7 @@ static void fbcon_init(struct vc_data *vc, int init)
if (con2fb_map[vc->vc_num] == -1)
con2fb_map[vc->vc_num] = info_idx;
- info = registered_fb[con2fb_map[vc->vc_num]];
+ info = fbcon_info_from_console(vc->vc_num);
if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET)
logo_shown = FBCON_LOGO_DONTSHOW;
@@ -1046,7 +1015,7 @@ static void fbcon_init(struct vc_data *vc, int init)
return;
if (!info->fbcon_par)
- con2fb_acquire_newinfo(vc, info, vc->vc_num, -1);
+ con2fb_acquire_newinfo(vc, info, vc->vc_num);
/* If we are not the first console on this
fb, copy the font from that console */
@@ -1120,8 +1089,7 @@ static void fbcon_init(struct vc_data *vc, int init)
* We need to do it in fbcon_init() to prevent screen corruption.
*/
if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
- if (info->fbops->fb_set_par &&
- !(ops->flags & FBCON_FLAGS_INIT)) {
+ if (info->fbops->fb_set_par && !ops->initialized) {
ret = info->fbops->fb_set_par(info);
if (ret)
@@ -1130,7 +1098,7 @@ static void fbcon_init(struct vc_data *vc, int init)
"error code %d\n", ret);
}
- ops->flags |= FBCON_FLAGS_INIT;
+ ops->initialized = true;
}
ops->graphics = 0;
@@ -1175,6 +1143,27 @@ static void fbcon_free_font(struct fbcon_display *p, bool freefont)
static void set_vc_hi_font(struct vc_data *vc, bool set);
+static void fbcon_release_all(void)
+{
+ struct fb_info *info;
+ int i, j, mapped;
+
+ fbcon_for_each_registered_fb(i) {
+ mapped = 0;
+ info = fbcon_registered_fb[i];
+
+ for (j = first_fb_vc; j <= last_fb_vc; j++) {
+ if (con2fb_map[j] == i) {
+ mapped = 1;
+ con2fb_map[j] = -1;
+ }
+ }
+
+ if (mapped)
+ fbcon_release(info);
+ }
+}
+
static void fbcon_deinit(struct vc_data *vc)
{
struct fbcon_display *p = &fb_display[vc->vc_num];
@@ -1188,7 +1177,7 @@ static void fbcon_deinit(struct vc_data *vc)
if (idx == -1)
goto finished;
- info = registered_fb[idx];
+ info = fbcon_registered_fb[idx];
if (!info)
goto finished;
@@ -1201,9 +1190,9 @@ static void fbcon_deinit(struct vc_data *vc)
goto finished;
if (con_is_visible(vc))
- fbcon_del_cursor_timer(info);
+ fbcon_del_cursor_work(info);
- ops->flags &= ~FBCON_FLAGS_INIT;
+ ops->initialized = false;
finished:
fbcon_free_font(p, free_font);
@@ -1214,7 +1203,7 @@ finished:
set_vc_hi_font(vc, false);
if (!con_is_bound(&fb_con))
- fbcon_exit();
+ fbcon_release_all();
if (vc->vc_num == logo_shown)
logo_shown = FBCON_LOGO_CANSHOW;
@@ -1250,7 +1239,7 @@ finished:
static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
int width)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
@@ -1288,7 +1277,7 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
int count, int ypos, int xpos)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_display *p = &fb_display[vc->vc_num];
struct fbcon_ops *ops = info->fbcon_par;
@@ -1308,7 +1297,7 @@ static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
if (!fbcon_is_inactive(vc, info))
@@ -1317,7 +1306,7 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
static void fbcon_cursor(struct vc_data *vc, int mode)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
int c = scr_readw((u16 *) vc->vc_pos);
@@ -1327,9 +1316,9 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
return;
if (vc->vc_cursor_type & CUR_SW)
- fbcon_del_cursor_timer(info);
+ fbcon_del_cursor_work(info);
else
- fbcon_add_cursor_timer(info);
+ fbcon_add_cursor_work(info);
ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
@@ -1411,7 +1400,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
static __inline__ void ywrap_up(struct vc_data *vc, int count)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
@@ -1430,7 +1419,7 @@ static __inline__ void ywrap_up(struct vc_data *vc, int count)
static __inline__ void ywrap_down(struct vc_data *vc, int count)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
@@ -1449,7 +1438,7 @@ static __inline__ void ywrap_down(struct vc_data *vc, int count)
static __inline__ void ypan_up(struct vc_data *vc, int count)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_display *p = &fb_display[vc->vc_num];
struct fbcon_ops *ops = info->fbcon_par;
@@ -1473,7 +1462,7 @@ static __inline__ void ypan_up(struct vc_data *vc, int count)
static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
@@ -1497,7 +1486,7 @@ static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
static __inline__ void ypan_down(struct vc_data *vc, int count)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_display *p = &fb_display[vc->vc_num];
struct fbcon_ops *ops = info->fbcon_par;
@@ -1521,7 +1510,7 @@ static __inline__ void ypan_down(struct vc_data *vc, int count)
static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
@@ -1682,10 +1671,75 @@ static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p,
}
}
+static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
+ int dy, int dx, int height, int width, u_int y_break)
+{
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
+ struct fbcon_ops *ops = info->fbcon_par;
+ u_int b;
+
+ if (sy < y_break && sy + height > y_break) {
+ b = y_break - sy;
+ if (dy < sy) { /* Avoid trashing self */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ } else {
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ }
+ return;
+ }
+
+ if (dy < y_break && dy + height > y_break) {
+ b = y_break - dy;
+ if (dy < sy) { /* Avoid trashing self */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ } else {
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ }
+ return;
+ }
+ ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ height, width);
+}
+
+static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
+ int height, int width)
+{
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ if (fbcon_is_inactive(vc, info))
+ return;
+
+ if (!width || !height)
+ return;
+
+ /* Split blits that cross physical y_wrap case.
+ * Pathological case involves 4 blits, better to use recursive
+ * code rather than unrolled case
+ *
+ * Recursive invocations don't need to erase the cursor over and
+ * over again, so we use fbcon_bmove_rec()
+ */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width,
+ p->vrows - p->yscroll);
+}
+
static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
enum con_scroll dir, unsigned int count)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_display *p = &fb_display[vc->vc_num];
int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
@@ -1882,71 +1936,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
}
-static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
- int height, int width)
-{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct fbcon_display *p = &fb_display[vc->vc_num];
-
- if (fbcon_is_inactive(vc, info))
- return;
-
- if (!width || !height)
- return;
-
- /* Split blits that cross physical y_wrap case.
- * Pathological case involves 4 blits, better to use recursive
- * code rather than unrolled case
- *
- * Recursive invocations don't need to erase the cursor over and
- * over again, so we use fbcon_bmove_rec()
- */
- fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width,
- p->vrows - p->yscroll);
-}
-
-static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
- int dy, int dx, int height, int width, u_int y_break)
-{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct fbcon_ops *ops = info->fbcon_par;
- u_int b;
-
- if (sy < y_break && sy + height > y_break) {
- b = y_break - sy;
- if (dy < sy) { /* Avoid trashing self */
- fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
- y_break);
- fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
- height - b, width, y_break);
- } else {
- fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
- height - b, width, y_break);
- fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
- y_break);
- }
- return;
- }
-
- if (dy < y_break && dy + height > y_break) {
- b = y_break - dy;
- if (dy < sy) { /* Avoid trashing self */
- fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
- y_break);
- fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
- height - b, width, y_break);
- } else {
- fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
- height - b, width, y_break);
- fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
- y_break);
- }
- return;
- }
- ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
- height, width);
-}
-
static void updatescrollmode_accel(struct fbcon_display *p,
struct fb_info *info,
struct vc_data *vc)
@@ -2015,7 +2004,7 @@ static void updatescrollmode(struct fbcon_display *p,
static int fbcon_resize(struct vc_data *vc, unsigned int width,
unsigned int height, unsigned int user)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
struct fb_var_screeninfo var = info->var;
@@ -2084,7 +2073,7 @@ static int fbcon_switch(struct vc_data *vc)
struct fb_var_screeninfo var;
int i, ret, prev_console;
- info = registered_fb[con2fb_map[vc->vc_num]];
+ info = fbcon_info_from_console(vc->vc_num);
ops = info->fbcon_par;
if (logo_shown >= 0) {
@@ -2098,7 +2087,7 @@ static int fbcon_switch(struct vc_data *vc)
prev_console = ops->currcon;
if (prev_console != -1)
- old_info = registered_fb[con2fb_map[prev_console]];
+ old_info = fbcon_info_from_console(prev_console);
/*
* FIXME: If we have multiple fbdev's loaded, we need to
* update all info->currcon. Perhaps, we can place this
@@ -2107,9 +2096,9 @@ static int fbcon_switch(struct vc_data *vc)
*
* info->currcon = vc->vc_num;
*/
- for_each_registered_fb(i) {
- if (registered_fb[i]->fbcon_par) {
- struct fbcon_ops *o = registered_fb[i]->fbcon_par;
+ fbcon_for_each_registered_fb(i) {
+ if (fbcon_registered_fb[i]->fbcon_par) {
+ struct fbcon_ops *o = fbcon_registered_fb[i]->fbcon_par;
o->currcon = vc->vc_num;
}
@@ -2139,14 +2128,14 @@ static int fbcon_switch(struct vc_data *vc)
}
if (old_info != info)
- fbcon_del_cursor_timer(old_info);
+ fbcon_del_cursor_work(old_info);
}
if (fbcon_is_inactive(vc, info) ||
ops->blank_state != FB_BLANK_UNBLANK)
- fbcon_del_cursor_timer(info);
+ fbcon_del_cursor_work(info);
else
- fbcon_add_cursor_timer(info);
+ fbcon_add_cursor_work(info);
set_blitting_type(vc, info);
ops->cursor_reset = 1;
@@ -2221,7 +2210,7 @@ static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info,
static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
if (mode_switch) {
@@ -2254,16 +2243,16 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
if (mode_switch || fbcon_is_inactive(vc, info) ||
ops->blank_state != FB_BLANK_UNBLANK)
- fbcon_del_cursor_timer(info);
+ fbcon_del_cursor_work(info);
else
- fbcon_add_cursor_timer(info);
+ fbcon_add_cursor_work(info);
return 0;
}
static int fbcon_debug_enter(struct vc_data *vc)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
ops->save_graphics = ops->graphics;
@@ -2276,7 +2265,7 @@ static int fbcon_debug_enter(struct vc_data *vc)
static int fbcon_debug_leave(struct vc_data *vc)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
ops->graphics = ops->save_graphics;
@@ -2412,7 +2401,7 @@ static void set_vc_hi_font(struct vc_data *vc, bool set)
static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
const u8 * data, int userfont)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
int resize;
@@ -2466,7 +2455,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
unsigned int flags)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
unsigned charcount = font->charcount;
int w = font->width;
int h = font->height;
@@ -2530,7 +2519,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
static int fbcon_set_def_font(struct vc_data *vc, struct console_font *font, char *name)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
const struct font_desc *f;
if (!name)
@@ -2554,7 +2543,7 @@ static struct fb_cmap palette_cmap = {
static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
int i, j, k, depth;
u8 val;
@@ -2670,7 +2659,7 @@ static void fbcon_modechanged(struct fb_info *info)
return;
vc = vc_cons[ops->currcon].d;
if (vc->vc_mode != KD_TEXT ||
- registered_fb[con2fb_map[ops->currcon]] != info)
+ fbcon_info_from_console(ops->currcon) != info)
return;
p = &fb_display[vc->vc_num];
@@ -2710,7 +2699,7 @@ static void fbcon_set_all_vcs(struct fb_info *info)
for (i = first_fb_vc; i <= last_fb_vc; i++) {
vc = vc_cons[i].d;
if (!vc || vc->vc_mode != KD_TEXT ||
- registered_fb[con2fb_map[i]] != info)
+ fbcon_info_from_console(i) != info)
continue;
if (con_is_visible(vc)) {
@@ -2754,7 +2743,7 @@ int fbcon_mode_deleted(struct fb_info *info,
j = con2fb_map[i];
if (j == -1)
continue;
- fb_info = registered_fb[j];
+ fb_info = fbcon_registered_fb[j];
if (fb_info != info)
continue;
p = &fb_display[i];
@@ -2783,16 +2772,17 @@ static void fbcon_unbind(void)
static inline void fbcon_unbind(void) {}
#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
-/* called with console_lock held */
void fbcon_fb_unbind(struct fb_info *info)
{
- int i, new_idx = -1, ret = 0;
+ int i, new_idx = -1;
int idx = info->node;
- WARN_CONSOLE_UNLOCKED();
+ console_lock();
- if (!fbcon_has_console_bind)
+ if (!fbcon_has_console_bind) {
+ console_unlock();
return;
+ }
for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map[i] != idx &&
@@ -2808,7 +2798,7 @@ void fbcon_fb_unbind(struct fb_info *info)
set_con2fb_map(i, new_idx, 0);
}
} else {
- struct fb_info *info = registered_fb[idx];
+ struct fb_info *info = fbcon_registered_fb[idx];
/* This is sort of like set_con2fb_map, except it maps
* the consoles to no device and then releases the
@@ -2820,29 +2810,30 @@ void fbcon_fb_unbind(struct fb_info *info)
if (con2fb_map[i] == idx) {
con2fb_map[i] = -1;
if (!search_fb_in_map(idx)) {
- ret = con2fb_release_oldinfo(vc_cons[i].d,
- info, NULL, i,
- idx, 0);
- if (ret) {
- con2fb_map[i] = idx;
- return;
- }
+ con2fb_release_oldinfo(vc_cons[i].d,
+ info, NULL);
}
}
}
fbcon_unbind();
}
+
+ console_unlock();
}
-/* called with console_lock held */
void fbcon_fb_unregistered(struct fb_info *info)
{
int i, idx;
- WARN_CONSOLE_UNLOCKED();
+ console_lock();
- if (deferred_takeover)
+ fbcon_registered_fb[info->node] = NULL;
+ fbcon_num_registered_fb--;
+
+ if (deferred_takeover) {
+ console_unlock();
return;
+ }
idx = info->node;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
@@ -2853,7 +2844,7 @@ void fbcon_fb_unregistered(struct fb_info *info)
if (idx == info_idx) {
info_idx = -1;
- for_each_registered_fb(i) {
+ fbcon_for_each_registered_fb(i) {
info_idx = i;
break;
}
@@ -2869,8 +2860,9 @@ void fbcon_fb_unregistered(struct fb_info *info)
if (primary_device == idx)
primary_device = -1;
- if (!num_registered_fb)
+ if (!fbcon_num_registered_fb)
do_unregister_con_driver(&fb_con);
+ console_unlock();
}
void fbcon_remap_all(struct fb_info *info)
@@ -2928,13 +2920,21 @@ static inline void fbcon_select_primary(struct fb_info *info)
}
#endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */
+static bool lockless_register_fb;
+module_param_named_unsafe(lockless_register_fb, lockless_register_fb, bool, 0400);
+MODULE_PARM_DESC(lockless_register_fb,
+ "Lockless framebuffer registration for debugging [default=off]");
+
/* called with console_lock held */
-int fbcon_fb_registered(struct fb_info *info)
+static int do_fb_registered(struct fb_info *info)
{
int ret = 0, i, idx;
WARN_CONSOLE_UNLOCKED();
+ fbcon_registered_fb[info->node] = info;
+ fbcon_num_registered_fb++;
+
idx = info->node;
fbcon_select_primary(info);
@@ -2963,6 +2963,25 @@ int fbcon_fb_registered(struct fb_info *info)
return ret;
}
+int fbcon_fb_registered(struct fb_info *info)
+{
+ int ret;
+
+ if (!lockless_register_fb)
+ console_lock();
+ else
+ atomic_inc(&ignore_console_lock_warning);
+
+ ret = do_fb_registered(info);
+
+ if (!lockless_register_fb)
+ console_unlock();
+ else
+ atomic_dec(&ignore_console_lock_warning);
+
+ return ret;
+}
+
void fbcon_fb_blanked(struct fb_info *info, int blank)
{
struct fbcon_ops *ops = info->fbcon_par;
@@ -2973,7 +2992,7 @@ void fbcon_fb_blanked(struct fb_info *info, int blank)
vc = vc_cons[ops->currcon].d;
if (vc->vc_mode != KD_TEXT ||
- registered_fb[con2fb_map[ops->currcon]] != info)
+ fbcon_info_from_console(ops->currcon) != info)
return;
if (con_is_visible(vc)) {
@@ -2993,7 +3012,7 @@ void fbcon_new_modelist(struct fb_info *info)
const struct fb_videomode *mode;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
- if (registered_fb[con2fb_map[i]] != info)
+ if (fbcon_info_from_console(i) != info)
continue;
if (!fb_display[i].mode)
continue;
@@ -3048,9 +3067,9 @@ int fbcon_set_con2fb_map_ioctl(void __user *argp)
return -EINVAL;
if (con2fb.framebuffer >= FB_MAX)
return -EINVAL;
- if (!registered_fb[con2fb.framebuffer])
+ if (!fbcon_registered_fb[con2fb.framebuffer])
request_module("fb%d", con2fb.framebuffer);
- if (!registered_fb[con2fb.framebuffer]) {
+ if (!fbcon_registered_fb[con2fb.framebuffer]) {
return -EINVAL;
}
@@ -3117,10 +3136,10 @@ static ssize_t store_rotate(struct device *device,
console_lock();
idx = con2fb_map[fg_console];
- if (idx == -1 || registered_fb[idx] == NULL)
+ if (idx == -1 || fbcon_registered_fb[idx] == NULL)
goto err;
- info = registered_fb[idx];
+ info = fbcon_registered_fb[idx];
rotate = simple_strtoul(buf, last, 0);
fbcon_rotate(info, rotate);
err:
@@ -3139,10 +3158,10 @@ static ssize_t store_rotate_all(struct device *device,
console_lock();
idx = con2fb_map[fg_console];
- if (idx == -1 || registered_fb[idx] == NULL)
+ if (idx == -1 || fbcon_registered_fb[idx] == NULL)
goto err;
- info = registered_fb[idx];
+ info = fbcon_registered_fb[idx];
rotate = simple_strtoul(buf, last, 0);
fbcon_rotate_all(info, rotate);
err:
@@ -3159,14 +3178,14 @@ static ssize_t show_rotate(struct device *device,
console_lock();
idx = con2fb_map[fg_console];
- if (idx == -1 || registered_fb[idx] == NULL)
+ if (idx == -1 || fbcon_registered_fb[idx] == NULL)
goto err;
- info = registered_fb[idx];
+ info = fbcon_registered_fb[idx];
rotate = fbcon_get_rotate(info);
err:
console_unlock();
- return snprintf(buf, PAGE_SIZE, "%d\n", rotate);
+ return sysfs_emit(buf, "%d\n", rotate);
}
static ssize_t show_cursor_blink(struct device *device,
@@ -3179,19 +3198,19 @@ static ssize_t show_cursor_blink(struct device *device,
console_lock();
idx = con2fb_map[fg_console];
- if (idx == -1 || registered_fb[idx] == NULL)
+ if (idx == -1 || fbcon_registered_fb[idx] == NULL)
goto err;
- info = registered_fb[idx];
+ info = fbcon_registered_fb[idx];
ops = info->fbcon_par;
if (!ops)
goto err;
- blink = (ops->flags & FBCON_FLAGS_CURSOR_TIMER) ? 1 : 0;
+ blink = delayed_work_pending(&ops->cursor_work);
err:
console_unlock();
- return snprintf(buf, PAGE_SIZE, "%d\n", blink);
+ return sysfs_emit(buf, "%d\n", blink);
}
static ssize_t store_cursor_blink(struct device *device,
@@ -3205,10 +3224,10 @@ static ssize_t store_cursor_blink(struct device *device,
console_lock();
idx = con2fb_map[fg_console];
- if (idx == -1 || registered_fb[idx] == NULL)
+ if (idx == -1 || fbcon_registered_fb[idx] == NULL)
goto err;
- info = registered_fb[idx];
+ info = fbcon_registered_fb[idx];
if (!info->fbcon_par)
goto err;
@@ -3217,10 +3236,10 @@ static ssize_t store_cursor_blink(struct device *device,
if (blink) {
fbcon_cursor_noblink = 0;
- fbcon_add_cursor_timer(info);
+ fbcon_add_cursor_work(info);
} else {
fbcon_cursor_noblink = 1;
- fbcon_del_cursor_timer(info);
+ fbcon_del_cursor_work(info);
}
err:
@@ -3265,8 +3284,11 @@ static void fbcon_register_existing_fbs(struct work_struct *work)
console_lock();
- for_each_registered_fb(i)
- fbcon_fb_registered(registered_fb[i]);
+ deferred_takeover = false;
+ logo_shown = FBCON_LOGO_DONTSHOW;
+
+ fbcon_for_each_registered_fb(i)
+ do_fb_registered(fbcon_registered_fb[i]);
console_unlock();
}
@@ -3282,8 +3304,6 @@ static int fbcon_output_notifier(struct notifier_block *nb,
pr_info("fbcon: Taking over console\n");
dummycon_unregister_output_notifier(&fbcon_output_nb);
- deferred_takeover = false;
- logo_shown = FBCON_LOGO_DONTSHOW;
/* We may get called in atomic context */
schedule_work(&fbcon_deferred_takeover_work);
@@ -3306,67 +3326,6 @@ static void fbcon_start(void)
return;
}
#endif
-
- if (num_registered_fb) {
- int i;
-
- for_each_registered_fb(i) {
- info_idx = i;
- break;
- }
-
- do_fbcon_takeover(0);
- }
-}
-
-static void fbcon_exit(void)
-{
- struct fb_info *info;
- int i, j, mapped;
-
-#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
- if (deferred_takeover) {
- dummycon_unregister_output_notifier(&fbcon_output_nb);
- deferred_takeover = false;
- }
-#endif
-
- for_each_registered_fb(i) {
- int pending = 0;
-
- mapped = 0;
- info = registered_fb[i];
-
- if (info->queue.func)
- pending = cancel_work_sync(&info->queue);
- pr_debug("fbcon: %s pending work\n", (pending ? "canceled" : "no"));
-
- for (j = first_fb_vc; j <= last_fb_vc; j++) {
- if (con2fb_map[j] == i) {
- mapped = 1;
- con2fb_map[j] = -1;
- }
- }
-
- if (mapped) {
- if (info->fbops->fb_release)
- info->fbops->fb_release(info, 0);
- module_put(info->fbops->owner);
-
- if (info->fbcon_par) {
- struct fbcon_ops *ops = info->fbcon_par;
-
- fbcon_del_cursor_timer(info);
- kfree(ops->cursor_src);
- kfree(ops->cursor_state.mask);
- kfree(info->fbcon_par);
- info->fbcon_par = NULL;
- }
-
- if (info->queue.func == fb_flashcursor)
- info->queue.func = NULL;
- }
- }
}
void __init fb_console_init(void)
@@ -3408,10 +3367,19 @@ static void __exit fbcon_deinit_device(void)
void __exit fb_console_exit(void)
{
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+ console_lock();
+ if (deferred_takeover)
+ dummycon_unregister_output_notifier(&fbcon_output_nb);
+ console_unlock();
+
+ cancel_work_sync(&fbcon_deferred_takeover_work);
+#endif
+
console_lock();
fbcon_deinit_device();
device_destroy(fb_class, MKDEV(0, 0));
- fbcon_exit();
+
do_unregister_con_driver(&fb_con);
console_unlock();
}
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index 969d41ecede5..0eaf54a21151 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -14,12 +14,10 @@
#include <linux/types.h>
#include <linux/vt_buffer.h>
#include <linux/vt_kern.h>
+#include <linux/workqueue.h>
#include <asm/io.h>
-#define FBCON_FLAGS_INIT 1
-#define FBCON_FLAGS_CURSOR_TIMER 2
-
/*
* This is the interface between the low-level console driver and the
* low-level frame buffer device
@@ -68,7 +66,7 @@ struct fbcon_ops {
int (*update_start)(struct fb_info *info);
int (*rotate_font)(struct fb_info *info, struct vc_data *vc);
struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */
- struct timer_list cursor_timer; /* Cursor timer */
+ struct delayed_work cursor_work; /* Cursor timer */
struct fb_cursor cursor_state;
struct fbcon_display *p;
struct fb_info *info;
@@ -79,7 +77,7 @@ struct fbcon_ops {
int blank_state;
int graphics;
int save_graphics; /* for debug enter/leave */
- int flags;
+ bool initialized;
int rotate;
int cur_rotate;
char *cursor_data;
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index a6bb0e438216..bc6ed750e915 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1597,14 +1597,9 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
}
}
-static bool lockless_register_fb;
-module_param_named_unsafe(lockless_register_fb, lockless_register_fb, bool, 0400);
-MODULE_PARM_DESC(lockless_register_fb,
- "Lockless framebuffer registration for debugging [default=off]");
-
static int do_register_framebuffer(struct fb_info *fb_info)
{
- int i, ret;
+ int i;
struct fb_videomode mode;
if (fb_check_foreignness(fb_info))
@@ -1673,19 +1668,7 @@ static int do_register_framebuffer(struct fb_info *fb_info)
}
#endif
- if (!lockless_register_fb)
- console_lock();
- else
- atomic_inc(&ignore_console_lock_warning);
- lock_fb_info(fb_info);
- ret = fbcon_fb_registered(fb_info);
- unlock_fb_info(fb_info);
-
- if (!lockless_register_fb)
- console_unlock();
- else
- atomic_dec(&ignore_console_lock_warning);
- return ret;
+ return fbcon_fb_registered(fb_info);
}
static void unbind_console(struct fb_info *fb_info)
@@ -1695,11 +1678,7 @@ static void unbind_console(struct fb_info *fb_info)
if (WARN_ON(i < 0 || i >= FB_MAX || registered_fb[i] != fb_info))
return;
- console_lock();
- lock_fb_info(fb_info);
fbcon_fb_unbind(fb_info);
- unlock_fb_info(fb_info);
- console_unlock();
}
static void unlink_framebuffer(struct fb_info *fb_info)
@@ -1742,9 +1721,7 @@ static void do_unregister_framebuffer(struct fb_info *fb_info)
fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
}
#endif
- console_lock();
fbcon_fb_unregistered(fb_info);
- console_unlock();
/* this may free fb info */
put_fb_info(fb_info);
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index 26892940c213..8c1ee9ecec3d 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -91,9 +91,11 @@ static int activate(struct fb_info *fb_info, struct fb_var_screeninfo *var)
var->activate |= FB_ACTIVATE_FORCE;
console_lock();
+ lock_fb_info(fb_info);
err = fb_set_var(fb_info, var);
if (!err)
fbcon_update_vcs(fb_info, var->activate & FB_ACTIVATE_ALL);
+ unlock_fb_info(fb_info);
console_unlock();
if (err)
return err;
diff --git a/include/drm/dp/drm_dp_helper.h b/include/drm/dp/drm_dp_helper.h
index 1eccd9741943..91af98e6617c 100644
--- a/include/drm/dp/drm_dp_helper.h
+++ b/include/drm/dp/drm_dp_helper.h
@@ -2053,6 +2053,7 @@ struct drm_dp_aux {
bool is_remote;
};
+int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset);
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size);
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index c76932b68a33..2d524f8b0802 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -94,7 +94,6 @@ struct ttm_tt;
* @deleted: True if the object is only a zombie and already deleted.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
- * @moving: Fence set when BO is moving
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
* @cur_placement: Hint of current placement.
@@ -147,7 +146,6 @@ struct ttm_buffer_object {
* Members protected by a bo reservation.
*/
- struct dma_fence *moving;
unsigned priority;
unsigned pin_count;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 059a595e14e5..897b88f0bd59 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -245,7 +245,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem);
/**
- * ttm_bo_move_accel_cleanup.
+ * ttm_bo_move_sync_cleanup.
*
* @bo: A pointer to a struct ttm_buffer_object.
* @new_mem: struct ttm_resource indicating where to move.
@@ -253,13 +253,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
* Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
* by the caller to be idle. Typically used after memcpy buffer moves.
*/
-static inline void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
- struct ttm_resource *new_mem)
-{
- int ret = ttm_bo_move_accel_cleanup(bo, NULL, true, false, new_mem);
-
- WARN_ON(ret);
-}
+void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem);
/**
* ttm_bo_pipeline_gutting.
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 314f73de4280..f82fee18c07f 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -215,8 +215,7 @@ struct ttm_lru_bulk_move_pos {
/**
* struct ttm_lru_bulk_move
*
- * @tt: first/last lru entry for resources in the TT domain
- * @vram: first/last lru entry for resources in the VRAM domain
+ * @pos: first/last lru entry for resources in the each domain/priority
*
* Container for the current bulk move state. Should be used with
* ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move().
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 6fb91956ab8d..71731796c8c3 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -393,27 +393,30 @@ struct dma_buf {
* e.g. exposed in `Implicit Fence Poll Support`_ must follow the
* below rules.
*
- * - Drivers must add a shared fence through dma_resv_add_shared_fence()
- * for anything the userspace API considers a read access. This highly
- * depends upon the API and window system.
+ * - Drivers must add a read fence through dma_resv_add_fence() with the
+ * DMA_RESV_USAGE_READ flag for anything the userspace API considers a
+ * read access. This highly depends upon the API and window system.
*
- * - Similarly drivers must set the exclusive fence through
- * dma_resv_add_excl_fence() for anything the userspace API considers
- * write access.
+ * - Similarly drivers must add a write fence through
+ * dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for
+ * anything the userspace API considers write access.
*
- * - Drivers may just always set the exclusive fence, since that only
+ * - Drivers may just always add a write fence, since that only
* causes unecessarily synchronization, but no correctness issues.
*
* - Some drivers only expose a synchronous userspace API with no
* pipelining across drivers. These do not set any fences for their
* access. An example here is v4l.
*
+ * - Driver should use dma_resv_usage_rw() when retrieving fences as
+ * dependency for implicit synchronization.
+ *
* DYNAMIC IMPORTER RULES:
*
* Dynamic importers, see dma_buf_attachment_is_dynamic(), have
* additional constraints on how they set up fences:
*
- * - Dynamic importers must obey the exclusive fence and wait for it to
+ * - Dynamic importers must obey the write fences and wait for them to
* signal before allowing access to the buffer's underlying storage
* through the device.
*
@@ -423,8 +426,9 @@ struct dma_buf {
*
* IMPORTANT:
*
- * All drivers must obey the struct dma_resv rules, specifically the
- * rules for updating and obeying fences.
+ * All drivers and memory management related functions must obey the
+ * struct dma_resv rules, specifically the rules for updating and
+ * obeying fences. See enum dma_resv_usage for further descriptions.
*/
struct dma_resv *resv;
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 5fa04d0fccad..c8ccbc94d5d2 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -50,10 +50,86 @@ extern struct ww_class reservation_ww_class;
struct dma_resv_list;
/**
+ * enum dma_resv_usage - how the fences from a dma_resv obj are used
+ *
+ * This enum describes the different use cases for a dma_resv object and
+ * controls which fences are returned when queried.
+ *
+ * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and
+ * when the dma_resv object is asked for fences for one use case the fences
+ * for the lower use case are returned as well.
+ *
+ * For example when asking for WRITE fences then the KERNEL fences are returned
+ * as well. Similar when asked for READ fences then both WRITE and KERNEL
+ * fences are returned as well.
+ */
+enum dma_resv_usage {
+ /**
+ * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
+ *
+ * This should only be used for things like copying or clearing memory
+ * with a DMA hardware engine for the purpose of kernel memory
+ * management.
+ *
+ * Drivers *always* must wait for those fences before accessing the
+ * resource protected by the dma_resv object. The only exception for
+ * that is when the resource is known to be locked down in place by
+ * pinning it previously.
+ */
+ DMA_RESV_USAGE_KERNEL,
+
+ /**
+ * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
+ *
+ * This should only be used for userspace command submissions which add
+ * an implicit write dependency.
+ */
+ DMA_RESV_USAGE_WRITE,
+
+ /**
+ * @DMA_RESV_USAGE_READ: Implicit read synchronization.
+ *
+ * This should only be used for userspace command submissions which add
+ * an implicit read dependency.
+ */
+ DMA_RESV_USAGE_READ,
+
+ /**
+ * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync.
+ *
+ * This should be used by submissions which don't want to participate in
+ * implicit synchronization.
+ *
+ * The most common case are preemption fences as well as page table
+ * updates and their TLB flushes.
+ */
+ DMA_RESV_USAGE_BOOKKEEP
+};
+
+/**
+ * dma_resv_usage_rw - helper for implicit sync
+ * @write: true if we create a new implicit sync write
+ *
+ * This returns the implicit synchronization usage for write or read accesses,
+ * see enum dma_resv_usage and &dma_buf.resv.
+ */
+static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
+{
+ /* This looks confusing at first sight, but is indeed correct.
+ *
+ * The rational is that new write operations needs to wait for the
+ * existing read and write operations to finish.
+ * But a new read operation only needs to wait for the existing write
+ * operations to finish.
+ */
+ return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE;
+}
+
+/**
* struct dma_resv - a reservation object manages fences for a buffer
*
- * There are multiple uses for this, with sometimes slightly different rules in
- * how the fence slots are used.
+ * This is a container for dma_fence objects which needs to handle multiple use
+ * cases.
*
* One use is to synchronize cross-driver access to a struct dma_buf, either for
* dynamic buffer management or just to handle implicit synchronization between
@@ -80,50 +156,16 @@ struct dma_resv {
struct ww_mutex lock;
/**
- * @seq:
- *
- * Sequence count for managing RCU read-side synchronization, allows
- * read-only access to @fence_excl and @fence while ensuring we take a
- * consistent snapshot.
- */
- seqcount_ww_mutex_t seq;
-
- /**
- * @fence_excl:
+ * @fences:
*
- * The exclusive fence, if there is one currently.
+ * Array of fences which where added to the dma_resv object
*
- * To guarantee that no fences are lost, this new fence must signal
- * only after the previous exclusive fence has signalled. If
- * semantically only a new access is added without actually treating the
- * previous one as a dependency the exclusive fences can be strung
- * together using struct dma_fence_chain.
- *
- * Note that actual semantics of what an exclusive or shared fence mean
- * is defined by the user, for reservation objects shared across drivers
- * see &dma_buf.resv.
- */
- struct dma_fence __rcu *fence_excl;
-
- /**
- * @fence:
- *
- * List of current shared fences.
- *
- * There are no ordering constraints of shared fences against the
- * exclusive fence slot. If a waiter needs to wait for all access, it
- * has to wait for both sets of fences to signal.
- *
- * A new fence is added by calling dma_resv_add_shared_fence(). Since
- * this often needs to be done past the point of no return in command
+ * A new fence is added by calling dma_resv_add_fence(). Since this
+ * often needs to be done past the point of no return in command
* submission it cannot fail, and therefore sufficient slots need to be
* reserved by calling dma_resv_reserve_fences().
- *
- * Note that actual semantics of what an exclusive or shared fence mean
- * is defined by the user, for reservation objects shared across drivers
- * see &dma_buf.resv.
*/
- struct dma_resv_list __rcu *fence;
+ struct dma_resv_list __rcu *fences;
};
/**
@@ -142,14 +184,14 @@ struct dma_resv_iter {
/** @obj: The dma_resv object we iterate over */
struct dma_resv *obj;
- /** @all_fences: If all fences should be returned */
- bool all_fences;
+ /** @usage: Return fences with this usage or lower. */
+ enum dma_resv_usage usage;
/** @fence: the currently handled fence */
struct dma_fence *fence;
- /** @seq: sequence number to check for modifications */
- unsigned int seq;
+ /** @fence_usage: the usage of the current fence */
+ enum dma_resv_usage fence_usage;
/** @index: index into the shared fences */
unsigned int index;
@@ -157,8 +199,8 @@ struct dma_resv_iter {
/** @fences: the shared fences; private, *MUST* not dereference */
struct dma_resv_list *fences;
- /** @shared_count: number of shared fences */
- unsigned int shared_count;
+ /** @num_fences: number of fences */
+ unsigned int num_fences;
/** @is_restarted: true if this is the first returned fence */
bool is_restarted;
@@ -173,14 +215,14 @@ struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
* dma_resv_iter_begin - initialize a dma_resv_iter object
* @cursor: The dma_resv_iter object to initialize
* @obj: The dma_resv object which we want to iterate over
- * @all_fences: If all fences should be returned or just the exclusive one
+ * @usage: controls which fences to include, see enum dma_resv_usage.
*/
static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
struct dma_resv *obj,
- bool all_fences)
+ enum dma_resv_usage usage)
{
cursor->obj = obj;
- cursor->all_fences = all_fences;
+ cursor->usage = usage;
cursor->fence = NULL;
}
@@ -197,14 +239,15 @@ static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
}
/**
- * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one
+ * dma_resv_iter_usage - Return the usage of the current fence
* @cursor: the cursor of the current position
*
- * Returns true if the currently returned fence is the exclusive one.
+ * Returns the usage of the currently processed fence.
*/
-static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
+static inline enum dma_resv_usage
+dma_resv_iter_usage(struct dma_resv_iter *cursor)
{
- return cursor->index == 0;
+ return cursor->fence_usage;
}
/**
@@ -241,7 +284,7 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
* dma_resv_for_each_fence - fence iterator
* @cursor: a struct dma_resv_iter pointer
* @obj: a dma_resv object pointer
- * @all_fences: true if all fences should be returned
+ * @usage: controls which fences to return
* @fence: the current fence
*
* Iterate over the fences in a struct dma_resv object while holding the
@@ -250,8 +293,8 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
* valid as long as the lock is held and so no extra reference to the fence is
* taken.
*/
-#define dma_resv_for_each_fence(cursor, obj, all_fences, fence) \
- for (dma_resv_iter_begin(cursor, obj, all_fences), \
+#define dma_resv_for_each_fence(cursor, obj, usage, fence) \
+ for (dma_resv_iter_begin(cursor, obj, usage), \
fence = dma_resv_iter_first(cursor); fence; \
fence = dma_resv_iter_next(cursor))
@@ -259,9 +302,9 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
#ifdef CONFIG_DEBUG_MUTEXES
-void dma_resv_reset_shared_max(struct dma_resv *obj);
+void dma_resv_reset_max_fences(struct dma_resv *obj);
#else
-static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {}
+static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {}
#endif
/**
@@ -407,25 +450,26 @@ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
*/
static inline void dma_resv_unlock(struct dma_resv *obj)
{
- dma_resv_reset_shared_max(obj);
+ dma_resv_reset_max_fences(obj);
ww_mutex_unlock(&obj->lock);
}
void dma_resv_init(struct dma_resv *obj);
void dma_resv_fini(struct dma_resv *obj);
int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences);
-void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
+void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+ enum dma_resv_usage usage);
void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
- struct dma_fence *fence);
-void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
-int dma_resv_get_fences(struct dma_resv *obj, bool write,
+ struct dma_fence *fence,
+ enum dma_resv_usage usage);
+int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
unsigned int *num_fences, struct dma_fence ***fences);
-int dma_resv_get_singleton(struct dma_resv *obj, bool write,
+int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
struct dma_fence **fence);
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
-long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
- unsigned long timeout);
-bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
+long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
+ bool intr, unsigned long timeout);
+bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage);
void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
#endif /* _LINUX_RESERVATION_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 9a77ab615c36..f95da1af9ff6 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -450,7 +450,6 @@ struct fb_info {
struct fb_var_screeninfo var; /* Current var */
struct fb_fix_screeninfo fix; /* Current fix */
struct fb_monspecs monspecs; /* Current Monitor specs */
- struct work_struct queue; /* Framebuffer event queue */
struct fb_pixmap pixmap; /* Image hardware mapper */
struct fb_pixmap sprite; /* Cursor hardware mapper */
struct fb_cmap cmap; /* Current cmap */
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 37ded6b8fee6..3926e9027947 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -17,7 +17,6 @@
#include <linux/kcsan-checks.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
-#include <linux/ww_mutex.h>
#include <linux/preempt.h>
#include <linux/spinlock.h>
@@ -164,7 +163,7 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
* static initializer or init function. This enables lockdep to validate
* that the write side critical section is properly serialized.
*
- * LOCKNAME: raw_spinlock, spinlock, rwlock, mutex, or ww_mutex.
+ * LOCKNAME: raw_spinlock, spinlock, rwlock or mutex
*/
/*
@@ -184,7 +183,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock)
#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex)
-#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex)
/*
* SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
@@ -277,7 +275,6 @@ SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_s
SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock))
SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock))
SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock))
-SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mutex, ww_mutex_lock(s->lock, NULL))
/*
* SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
@@ -304,8 +301,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
__seqprop_case((s), raw_spinlock, prop), \
__seqprop_case((s), spinlock, prop), \
__seqprop_case((s), rwlock, prop), \
- __seqprop_case((s), mutex, prop), \
- __seqprop_case((s), ww_mutex, prop))
+ __seqprop_case((s), mutex, prop))
#define seqprop_ptr(s) __seqprop(s, ptr)
#define seqprop_sequence(s) __seqprop(s, sequence)
diff --git a/kernel/futex/futex.h b/kernel/futex/futex.h
index c264cbeab71c..b5379c0e6d6d 100644
--- a/kernel/futex/futex.h
+++ b/kernel/futex/futex.h
@@ -3,6 +3,7 @@
#define _FUTEX_H
#include <linux/futex.h>
+#include <linux/rtmutex.h>
#include <linux/sched/wake_q.h>
#ifdef CONFIG_PREEMPT_RT