From 8ff5446a7ca47cffec206af679763a2e4cb1199a Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Thu, 28 Jan 2021 14:31:23 +0100 Subject: drm/i915: Remove references to struct drm_device.pdev Using struct drm_device.pdev is deprecated. Convert i915 to struct drm_device.dev. No functional changes. v6: * also remove assignment in selftests/ in a later patch (Chris) v5: * remove assignment in later patch (Chris) v3: * rebased v2: * move gt/ and gvt/ changes into separate patches Signed-off-by: Thomas Zimmermann Reviewed-by: Chris Wilson Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20210128133127.2311-2-tzimmermann@suse.de --- drivers/gpu/drm/i915/selftests/mock_gtt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/selftests') diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 7270fc8ca801..5c7ae40bba63 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -74,7 +74,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name) ppgtt->vm.i915 = i915; ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); ppgtt->vm.file = ERR_PTR(-ENODEV); - ppgtt->vm.dma = &i915->drm.pdev->dev; + ppgtt->vm.dma = i915->drm.dev; i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); -- cgit v1.2.3 From c471748dc742c207a5461be924538c286d66be3e Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 23 Mar 2021 16:49:56 +0100 Subject: drm/i915: Move HAS_STRUCT_PAGE to obj->flags MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want to remove the changing of ops structure for attaching phys pages, so we need to kill off HAS_STRUCT_PAGE from ops->flags, and put it in the bo. This will remove a potential race of dereferencing the wrong obj->ops without ww mutex held. Signed-off-by: Maarten Lankhorst Reviewed-by: Thomas Hellström [danvet: apply with wiggle] Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-8-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_internal.c | 6 +++--- drivers/gpu/drm/i915/gem/i915_gem_lmem.c | 4 ++-- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 7 +++---- drivers/gpu/drm/i915/gem/i915_gem_object.c | 4 +++- drivers/gpu/drm/i915/gem/i915_gem_object.h | 5 +++-- drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 10 ++++++---- drivers/gpu/drm/i915/gem/i915_gem_pages.c | 5 ++--- drivers/gpu/drm/i915/gem/i915_gem_phys.c | 2 ++ drivers/gpu/drm/i915/gem/i915_gem_region.c | 4 +--- drivers/gpu/drm/i915/gem/i915_gem_region.h | 3 +-- drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 8 ++++---- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 4 ++-- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 6 +++--- drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c | 4 ++-- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 10 +++++----- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 11 ++++------- drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c | 12 ++++++++++++ drivers/gpu/drm/i915/gvt/dmabuf.c | 2 +- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 2 +- drivers/gpu/drm/i915/selftests/mock_region.c | 4 ++-- 21 files changed, 63 insertions(+), 52 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 04e9c04545ad..36e3c2765f4c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -258,7 +258,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, } drm_gem_private_object_init(dev, &obj->base, dma_buf->size); - i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class); + i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class, 0); obj->base.import_attach = attach; obj->base.resv = dma_buf->resv; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index ad22f42541bd..21cc40897ca8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -138,8 +138,7 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj, static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = { .name = "i915_gem_object_internal", - .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | - I915_GEM_OBJECT_IS_SHRINKABLE, + .flags = I915_GEM_OBJECT_IS_SHRINKABLE, .get_pages = i915_gem_object_get_pages_internal, .put_pages = i915_gem_object_put_pages_internal, }; @@ -178,7 +177,8 @@ i915_gem_object_create_internal(struct drm_i915_private *i915, return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class); + i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class, + I915_BO_ALLOC_STRUCT_PAGE); /* * Mark the object as volatile, such that the pages are marked as diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c index 194f35342710..ce1c83c13d05 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -40,13 +40,13 @@ int __i915_gem_lmem_object_init(struct intel_memory_region *mem, struct drm_i915_private *i915 = mem->i915; drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class); + i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class, flags); obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT; i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); - i915_gem_object_init_memory_region(obj, mem, flags); + i915_gem_object_init_memory_region(obj, mem); return 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index ec28a6cde49b..c0034d811e50 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -251,7 +251,7 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) goto out; iomap = -1; - if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) { + if (!i915_gem_object_has_struct_page(obj)) { iomap = obj->mm.region->iomap.base; iomap -= obj->mm.region->region.start; } @@ -653,9 +653,8 @@ __assign_mmap_offset(struct drm_file *file, } if (mmap_type != I915_MMAP_TYPE_GTT && - !i915_gem_object_type_has(obj, - I915_GEM_OBJECT_HAS_STRUCT_PAGE | - I915_GEM_OBJECT_HAS_IOMEM)) { + !i915_gem_object_has_struct_page(obj) && + !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) { err = -ENODEV; goto out; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 70f798405f7f..b7fe7e4f69e4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -60,7 +60,7 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj) void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops, - struct lock_class_key *key) + struct lock_class_key *key, unsigned flags) { __mutex_init(&obj->mm.lock, ops->name ?: "obj->mm.lock", key); @@ -78,6 +78,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, init_rcu_head(&obj->rcu); obj->ops = ops; + GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS); + obj->flags = flags; obj->mm.madv = I915_MADV_WILLNEED; INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 7e726405df97..25c785e388fc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -23,7 +23,8 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops, - struct lock_class_key *key); + struct lock_class_key *key, + unsigned alloc_flags); struct drm_i915_gem_object * i915_gem_object_create_shmem(struct drm_i915_private *i915, resource_size_t size); @@ -215,7 +216,7 @@ i915_gem_object_type_has(const struct drm_i915_gem_object *obj, static inline bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) { - return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE); + return obj->flags & I915_BO_ALLOC_STRUCT_PAGE; } static inline bool diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 0438e00d4ca7..33b54727e306 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -30,7 +30,6 @@ struct i915_lut_handle { struct drm_i915_gem_object_ops { unsigned int flags; -#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0) #define I915_GEM_OBJECT_HAS_IOMEM BIT(1) #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2) #define I915_GEM_OBJECT_IS_PROXY BIT(3) @@ -171,9 +170,12 @@ struct drm_i915_gem_object { unsigned long flags; #define I915_BO_ALLOC_CONTIGUOUS BIT(0) #define I915_BO_ALLOC_VOLATILE BIT(1) -#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE) -#define I915_BO_READONLY BIT(2) -#define I915_TILING_QUIRK_BIT 3 /* unknown swizzling; do not release! */ +#define I915_BO_ALLOC_STRUCT_PAGE BIT(2) +#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \ + I915_BO_ALLOC_VOLATILE | \ + I915_BO_ALLOC_STRUCT_PAGE) +#define I915_BO_READONLY BIT(3) +#define I915_TILING_QUIRK_BIT 4 /* unknown swizzling; do not release! */ /* * Is the object to be mapped as read-only to the GPU diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index d44b72dd13fe..bf61b88a2113 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -333,13 +333,12 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, enum i915_map_type type) { enum i915_map_type has_type; - unsigned int flags; bool pinned; void *ptr; int err; - flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM; - if (!i915_gem_object_type_has(obj, flags)) + if (!i915_gem_object_has_struct_page(obj) && + !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) return ERR_PTR(-ENXIO); err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 01fe89afe8c0..d1bf543d111a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -240,6 +240,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) pages = __i915_gem_object_unset_pages(obj); obj->ops = &i915_gem_phys_ops; + obj->flags &= ~I915_BO_ALLOC_STRUCT_PAGE; err = ____i915_gem_object_get_pages(obj); if (err) @@ -258,6 +259,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) err_xfer: obj->ops = &i915_gem_shmem_ops; + obj->flags |= I915_BO_ALLOC_STRUCT_PAGE; if (!IS_ERR_OR_NULL(pages)) { unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c index 3e3dad22a683..5e107322a299 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -106,13 +106,11 @@ err_free_sg: } void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, - struct intel_memory_region *mem, - unsigned long flags) + struct intel_memory_region *mem) { INIT_LIST_HEAD(&obj->mm.blocks); obj->mm.region = intel_memory_region_get(mem); - obj->flags |= flags; if (obj->base.size <= mem->min_page_size) obj->flags |= I915_BO_ALLOC_CONTIGUOUS; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h index f2ff6f8bff74..ebddc86d78f7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h @@ -17,8 +17,7 @@ void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj, struct sg_table *pages); void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, - struct intel_memory_region *mem, - unsigned long flags); + struct intel_memory_region *mem); void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj); struct drm_i915_gem_object * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 680b370a8ef3..bb82b3bc8830 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -430,8 +430,7 @@ static void shmem_release(struct drm_i915_gem_object *obj) const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { .name = "i915_gem_object_shmem", - .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | - I915_GEM_OBJECT_IS_SHRINKABLE, + .flags = I915_GEM_OBJECT_IS_SHRINKABLE, .get_pages = shmem_get_pages, .put_pages = shmem_put_pages, @@ -491,7 +490,8 @@ static int shmem_object_init(struct intel_memory_region *mem, mapping_set_gfp_mask(mapping, mask); GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); - i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class); + i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, + I915_BO_ALLOC_STRUCT_PAGE); obj->write_domain = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU; @@ -515,7 +515,7 @@ static int shmem_object_init(struct intel_memory_region *mem, i915_gem_object_set_cache_coherency(obj, cache_level); - i915_gem_object_init_memory_region(obj, mem, 0); + i915_gem_object_init_memory_region(obj, mem); return 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index a1e197a6e999..51c2ef6bd8ac 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -630,7 +630,7 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem, int err; drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size); - i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class); + i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, 0); obj->stolen = stolen; obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; @@ -641,7 +641,7 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem, if (err) return err; - i915_gem_object_init_memory_region(obj, mem, 0); + i915_gem_object_init_memory_region(obj, mem); return 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index f2eaed6aca3d..30edc5a0a54e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -702,8 +702,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { .name = "i915_gem_object_userptr", - .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | - I915_GEM_OBJECT_IS_SHRINKABLE | + .flags = I915_GEM_OBJECT_IS_SHRINKABLE | I915_GEM_OBJECT_NO_MMAP | I915_GEM_OBJECT_ASYNC_CANCEL, .get_pages = i915_gem_userptr_get_pages, @@ -810,7 +809,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, return -ENOMEM; drm_gem_private_object_init(dev, &obj->base, args->user_size); - i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class); + i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, + I915_BO_ALLOC_STRUCT_PAGE); obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c index 2fb501a78a85..0c8ecfdf5405 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c @@ -89,7 +89,6 @@ static void huge_put_pages(struct drm_i915_gem_object *obj, static const struct drm_i915_gem_object_ops huge_ops = { .name = "huge-gem", - .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE, .get_pages = huge_get_pages, .put_pages = huge_put_pages, }; @@ -115,7 +114,8 @@ huge_gem_object(struct drm_i915_private *i915, return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, dma_size); - i915_gem_object_init(obj, &huge_ops, &lock_class); + i915_gem_object_init(obj, &huge_ops, &lock_class, + I915_BO_ALLOC_STRUCT_PAGE); obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index aacf4856ccb4..6c2241b7387b 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -140,8 +140,7 @@ static void put_huge_pages(struct drm_i915_gem_object *obj, static const struct drm_i915_gem_object_ops huge_page_ops = { .name = "huge-gem", - .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | - I915_GEM_OBJECT_IS_SHRINKABLE, + .flags = I915_GEM_OBJECT_IS_SHRINKABLE, .get_pages = get_huge_pages, .put_pages = put_huge_pages, }; @@ -168,7 +167,8 @@ huge_pages_object(struct drm_i915_private *i915, return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &huge_page_ops, &lock_class); + i915_gem_object_init(obj, &huge_page_ops, &lock_class, + I915_BO_ALLOC_STRUCT_PAGE); i915_gem_object_set_volatile(obj); @@ -319,9 +319,9 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) drm_gem_private_object_init(&i915->drm, &obj->base, size); if (single) - i915_gem_object_init(obj, &fake_ops_single, &lock_class); + i915_gem_object_init(obj, &fake_ops_single, &lock_class, 0); else - i915_gem_object_init(obj, &fake_ops, &lock_class); + i915_gem_object_init(obj, &fake_ops, &lock_class, 0); i915_gem_object_set_volatile(obj); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index d429c7643ff2..44908c68e331 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -835,9 +835,8 @@ static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) return false; if (type != I915_MMAP_TYPE_GTT && - !i915_gem_object_type_has(obj, - I915_GEM_OBJECT_HAS_STRUCT_PAGE | - I915_GEM_OBJECT_HAS_IOMEM)) + !i915_gem_object_has_struct_page(obj) && + !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) return false; return true; @@ -977,10 +976,8 @@ static const char *repr_mmap_type(enum i915_mmap_type type) static bool can_access(const struct drm_i915_gem_object *obj) { - unsigned int flags = - I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM; - - return i915_gem_object_type_has(obj, flags); + return i915_gem_object_has_struct_page(obj) || + i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM); } static int __igt_mmap_access(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c index 8cee68c6a6dc..fb6a17701310 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c @@ -25,12 +25,24 @@ static int mock_phys_object(void *arg) goto out; } + if (!i915_gem_object_has_struct_page(obj)) { + err = -EINVAL; + pr_err("shmem has no struct page\n"); + goto out_obj; + } + err = i915_gem_object_attach_phys(obj, PAGE_SIZE); if (err) { pr_err("i915_gem_object_attach_phys failed, err=%d\n", err); goto out_obj; } + if (i915_gem_object_has_struct_page(obj)) { + err = -EINVAL; + pr_err("shmem has a struct page\n"); + goto out_obj; + } + if (obj->ops != &i915_gem_phys_ops) { pr_err("i915_gem_object_attach_phys did not create a phys object\n"); err = -EINVAL; diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index c3eb3838fe88..d4f883f35b95 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -218,7 +218,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, drm_gem_private_object_init(dev, &obj->base, roundup(info->size, PAGE_SIZE)); - i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class); + i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class, 0); i915_gem_object_set_readonly(obj); obj->read_domains = I915_GEM_DOMAIN_GTT; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index c1adea8765a9..5be6dcf4357e 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -121,7 +121,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size) goto err; drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &fake_ops, &lock_class); + i915_gem_object_init(obj, &fake_ops, &lock_class, 0); i915_gem_object_set_volatile(obj); diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c index 3c6021415274..5d2d010a1e22 100644 --- a/drivers/gpu/drm/i915/selftests/mock_region.c +++ b/drivers/gpu/drm/i915/selftests/mock_region.c @@ -27,13 +27,13 @@ static int mock_object_init(struct intel_memory_region *mem, return -E2BIG; drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class); + i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class, flags); obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); - i915_gem_object_init_memory_region(obj, mem, flags); + i915_gem_object_init_memory_region(obj, mem); return 0; } -- cgit v1.2.3 From c05258889ed4900440de1e283df1d16f91adb3d8 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 23 Mar 2021 16:50:22 +0100 Subject: drm/i915: Add igt_spinner_pin() to allow for ww locking around spinner. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit By default, we assume that it's called inside igt_create_request to keep existing selftests working, but allow for manual pinning when passing a ww context. Signed-off-by: Maarten Lankhorst Reviewed-by: Thomas Hellström Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-34-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/selftests/igt_spinner.c | 136 ++++++++++++++++++--------- drivers/gpu/drm/i915/selftests/igt_spinner.h | 5 + 2 files changed, 95 insertions(+), 46 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests') diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 83f6e5f31fb3..cfbbe415b57c 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -12,8 +12,6 @@ int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt) { - unsigned int mode; - void *vaddr; int err; memset(spin, 0, sizeof(*spin)); @@ -24,6 +22,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt) err = PTR_ERR(spin->hws); goto err; } + i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC); spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); if (IS_ERR(spin->obj)) { @@ -31,34 +30,83 @@ int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt) goto err_hws; } - i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC); - vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_obj; - } - spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); - - mode = i915_coherent_map_type(gt->i915); - vaddr = i915_gem_object_pin_map(spin->obj, mode); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_unpin_hws; - } - spin->batch = vaddr; - return 0; -err_unpin_hws: - i915_gem_object_unpin_map(spin->hws); -err_obj: - i915_gem_object_put(spin->obj); err_hws: i915_gem_object_put(spin->hws); err: return err; } +static void *igt_spinner_pin_obj(struct intel_context *ce, + struct i915_gem_ww_ctx *ww, + struct drm_i915_gem_object *obj, + unsigned int mode, struct i915_vma **vma) +{ + void *vaddr; + int ret; + + *vma = i915_vma_instance(obj, ce->vm, NULL); + if (IS_ERR(*vma)) + return ERR_CAST(*vma); + + ret = i915_gem_object_lock(obj, ww); + if (ret) + return ERR_PTR(ret); + + vaddr = i915_gem_object_pin_map(obj, mode); + + if (!ww) + i915_gem_object_unlock(obj); + + if (IS_ERR(vaddr)) + return vaddr; + + if (ww) + ret = i915_vma_pin_ww(*vma, ww, 0, 0, PIN_USER); + else + ret = i915_vma_pin(*vma, 0, 0, PIN_USER); + + if (ret) { + i915_gem_object_unpin_map(obj); + return ERR_PTR(ret); + } + + return vaddr; +} + +int igt_spinner_pin(struct igt_spinner *spin, + struct intel_context *ce, + struct i915_gem_ww_ctx *ww) +{ + void *vaddr; + + if (spin->ce && WARN_ON(spin->ce != ce)) + return -ENODEV; + spin->ce = ce; + + if (!spin->seqno) { + vaddr = igt_spinner_pin_obj(ce, ww, spin->hws, I915_MAP_WB, &spin->hws_vma); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); + } + + if (!spin->batch) { + unsigned int mode = + i915_coherent_map_type(spin->gt->i915); + + vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + spin->batch = vaddr; + } + + return 0; +} + static unsigned int seqno_offset(u64 fence) { return offset_in_page(sizeof(u32) * fence); @@ -103,27 +151,18 @@ igt_spinner_create_request(struct igt_spinner *spin, if (!intel_engine_can_store_dword(ce->engine)) return ERR_PTR(-ENODEV); - vma = i915_vma_instance(spin->obj, ce->vm, NULL); - if (IS_ERR(vma)) - return ERR_CAST(vma); - - hws = i915_vma_instance(spin->hws, ce->vm, NULL); - if (IS_ERR(hws)) - return ERR_CAST(hws); + if (!spin->batch) { + err = igt_spinner_pin(spin, ce, NULL); + if (err) + return ERR_PTR(err); + } - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - return ERR_PTR(err); - - err = i915_vma_pin(hws, 0, 0, PIN_USER); - if (err) - goto unpin_vma; + hws = spin->hws_vma; + vma = spin->batch_vma; rq = intel_context_create_request(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto unpin_hws; - } + if (IS_ERR(rq)) + return ERR_CAST(rq); err = move_to_active(vma, rq, 0); if (err) @@ -186,10 +225,6 @@ cancel_rq: i915_request_set_error_once(rq, err); i915_request_add(rq); } -unpin_hws: - i915_vma_unpin(hws); -unpin_vma: - i915_vma_unpin(vma); return err ? ERR_PTR(err) : rq; } @@ -203,6 +238,9 @@ hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq) void igt_spinner_end(struct igt_spinner *spin) { + if (!spin->batch) + return; + *spin->batch = MI_BATCH_BUFFER_END; intel_gt_chipset_flush(spin->gt); } @@ -211,10 +249,16 @@ void igt_spinner_fini(struct igt_spinner *spin) { igt_spinner_end(spin); - i915_gem_object_unpin_map(spin->obj); + if (spin->batch) { + i915_vma_unpin(spin->batch_vma); + i915_gem_object_unpin_map(spin->obj); + } i915_gem_object_put(spin->obj); - i915_gem_object_unpin_map(spin->hws); + if (spin->seqno) { + i915_vma_unpin(spin->hws_vma); + i915_gem_object_unpin_map(spin->hws); + } i915_gem_object_put(spin->hws); } diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h index ec62c9ef320b..fbe5b1625b05 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.h +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h @@ -20,11 +20,16 @@ struct igt_spinner { struct intel_gt *gt; struct drm_i915_gem_object *hws; struct drm_i915_gem_object *obj; + struct intel_context *ce; + struct i915_vma *hws_vma, *batch_vma; u32 *batch; void *seqno; }; int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt); +int igt_spinner_pin(struct igt_spinner *spin, + struct intel_context *ce, + struct i915_gem_ww_ctx *ww); void igt_spinner_fini(struct igt_spinner *spin); struct i915_request * -- cgit v1.2.3 From aa8b70be896e114168c0d5084633193959a1b195 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 23 Mar 2021 16:50:46 +0100 Subject: drm/i915/selftests: Prepare i915_request tests for obj->mm.lock removal MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Straightforward conversion by using unlocked versions. Signed-off-by: Maarten Lankhorst Reviewed-by: Thomas Hellström Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-58-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/selftests/i915_request.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests') diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index d2a678a2497e..9a9e92a775c8 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -620,7 +620,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915) if (IS_ERR(obj)) return ERR_CAST(obj); - cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); + cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); goto err; @@ -782,7 +782,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) if (err) goto err; - cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); + cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); goto err; @@ -817,7 +817,7 @@ static int recursive_batch_resolve(struct i915_vma *batch) { u32 *cmd; - cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); + cmd = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC); if (IS_ERR(cmd)) return PTR_ERR(cmd); @@ -1070,8 +1070,8 @@ out_request: if (!request[idx]) break; - cmd = i915_gem_object_pin_map(request[idx]->batch->obj, - I915_MAP_WC); + cmd = i915_gem_object_pin_map_unlocked(request[idx]->batch->obj, + I915_MAP_WC); if (!IS_ERR(cmd)) { *cmd = MI_BATCH_BUFFER_END; -- cgit v1.2.3 From e70a27d7612596d6e8bade18a1094a3213cd012e Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 23 Mar 2021 16:50:47 +0100 Subject: drm/i915/selftests: Prepare memory region tests for obj->mm.lock removal MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the unlocked variants for pin_map and pin_pages, and add lock around unpinning/putting pages. Signed-off-by: Maarten Lankhorst Reviewed-by: Thomas Hellström Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-59-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/selftests/intel_memory_region.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests') diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index ce7adfa3bca0..15ccd28f938e 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -31,10 +31,12 @@ static void close_objects(struct intel_memory_region *mem, struct drm_i915_gem_object *obj, *on; list_for_each_entry_safe(obj, on, objects, st_link) { + i915_gem_object_lock(obj, NULL); if (i915_gem_object_has_pinned_pages(obj)) i915_gem_object_unpin_pages(obj); /* No polluting the memory region between tests */ __i915_gem_object_put_pages(obj); + i915_gem_object_unlock(obj); list_del(&obj->st_link); i915_gem_object_put(obj); } @@ -69,7 +71,7 @@ static int igt_mock_fill(void *arg) break; } - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) { i915_gem_object_put(obj); break; @@ -109,7 +111,7 @@ igt_object_create(struct intel_memory_region *mem, if (IS_ERR(obj)) return obj; - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) goto put; @@ -123,8 +125,10 @@ put: static void igt_object_release(struct drm_i915_gem_object *obj) { + i915_gem_object_lock(obj, NULL); i915_gem_object_unpin_pages(obj); __i915_gem_object_put_pages(obj); + i915_gem_object_unlock(obj); list_del(&obj->st_link); i915_gem_object_put(obj); } @@ -433,7 +437,7 @@ static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) if (err) return err; - ptr = i915_gem_object_pin_map(obj, I915_MAP_WC); + ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); if (IS_ERR(ptr)) return PTR_ERR(ptr); @@ -538,7 +542,7 @@ static int igt_lmem_create(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) goto out_put; @@ -577,7 +581,7 @@ static int igt_lmem_write_gpu(void *arg) goto out_file; } - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) goto out_put; @@ -649,7 +653,7 @@ static int igt_lmem_write_cpu(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); - vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); + vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); goto out_put; @@ -753,7 +757,7 @@ create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type, return obj; } - addr = i915_gem_object_pin_map(obj, type); + addr = i915_gem_object_pin_map_unlocked(obj, type); if (IS_ERR(addr)) { i915_gem_object_put(obj); if (PTR_ERR(addr) == -ENXIO) -- cgit v1.2.3 From 480ae79537b28f30ef6e07b7de69a9ae2599daa7 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 23 Mar 2021 16:50:49 +0100 Subject: drm/i915/selftests: Prepare gtt tests for obj->mm.lock removal MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to lock the global gtt dma_resv, use i915_vm_lock_objects to handle this correctly. Add ww handling for this where required. Add the object lock around unpin/put pages, and use the unlocked versions of pin_pages and pin_map where required. Signed-off-by: Maarten Lankhorst Reviewed-by: Thomas Hellström Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-61-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 92 +++++++++++++++++++-------- 1 file changed, 67 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests') diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 5be6dcf4357e..2e4f06eaacc1 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -130,7 +130,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size) obj->cache_level = I915_CACHE_NONE; /* Preallocate the "backing storage" */ - if (i915_gem_object_pin_pages(obj)) + if (i915_gem_object_pin_pages_unlocked(obj)) goto err_obj; i915_gem_object_unpin_pages(obj); @@ -146,6 +146,7 @@ static int igt_ppgtt_alloc(void *arg) { struct drm_i915_private *dev_priv = arg; struct i915_ppgtt *ppgtt; + struct i915_gem_ww_ctx ww; u64 size, last, limit; int err = 0; @@ -171,6 +172,12 @@ static int igt_ppgtt_alloc(void *arg) limit = totalram_pages() << PAGE_SHIFT; limit = min(ppgtt->vm.total, limit); + i915_gem_ww_ctx_init(&ww, false); +retry: + err = i915_vm_lock_objects(&ppgtt->vm, &ww); + if (err) + goto err_ppgtt_cleanup; + /* Check we can allocate the entire range */ for (size = 4096; size <= limit; size <<= 2) { struct i915_vm_pt_stash stash = {}; @@ -215,6 +222,13 @@ static int igt_ppgtt_alloc(void *arg) } err_ppgtt_cleanup: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + i915_vm_put(&ppgtt->vm); return err; } @@ -276,7 +290,7 @@ static int lowlevel_hole(struct i915_address_space *vm, GEM_BUG_ON(obj->base.size != BIT_ULL(size)); - if (i915_gem_object_pin_pages(obj)) { + if (i915_gem_object_pin_pages_unlocked(obj)) { i915_gem_object_put(obj); kfree(order); break; @@ -297,20 +311,36 @@ static int lowlevel_hole(struct i915_address_space *vm, if (vm->allocate_va_range) { struct i915_vm_pt_stash stash = {}; + struct i915_gem_ww_ctx ww; + int err; + + i915_gem_ww_ctx_init(&ww, false); +retry: + err = i915_vm_lock_objects(vm, &ww); + if (err) + goto alloc_vm_end; + err = -ENOMEM; if (i915_vm_alloc_pt_stash(vm, &stash, BIT_ULL(size))) - break; - - if (i915_vm_pin_pt_stash(vm, &stash)) { - i915_vm_free_pt_stash(vm, &stash); - break; - } + goto alloc_vm_end; - vm->allocate_va_range(vm, &stash, - addr, BIT_ULL(size)); + err = i915_vm_pin_pt_stash(vm, &stash); + if (!err) + vm->allocate_va_range(vm, &stash, + addr, BIT_ULL(size)); i915_vm_free_pt_stash(vm, &stash); +alloc_vm_end: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + + if (err) + break; } mock_vma->pages = obj->mm.pages; @@ -1166,7 +1196,7 @@ static int igt_ggtt_page(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) goto out_free; @@ -1333,7 +1363,7 @@ static int igt_gtt_reserve(void *arg) goto out; } - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) { i915_gem_object_put(obj); goto out; @@ -1385,7 +1415,7 @@ static int igt_gtt_reserve(void *arg) goto out; } - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) { i915_gem_object_put(obj); goto out; @@ -1549,7 +1579,7 @@ static int igt_gtt_insert(void *arg) goto out; } - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) { i915_gem_object_put(obj); goto out; @@ -1658,7 +1688,7 @@ static int igt_gtt_insert(void *arg) goto out; } - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) { i915_gem_object_put(obj); goto out; @@ -1829,7 +1859,7 @@ static int igt_cs_tlb(void *arg) goto out_vm; } - batch = i915_gem_object_pin_map(bbe, I915_MAP_WC); + batch = i915_gem_object_pin_map_unlocked(bbe, I915_MAP_WC); if (IS_ERR(batch)) { err = PTR_ERR(batch); goto out_put_bbe; @@ -1845,7 +1875,7 @@ static int igt_cs_tlb(void *arg) } /* Track the execution of each request by writing into different slot */ - batch = i915_gem_object_pin_map(act, I915_MAP_WC); + batch = i915_gem_object_pin_map_unlocked(act, I915_MAP_WC); if (IS_ERR(batch)) { err = PTR_ERR(batch); goto out_put_act; @@ -1892,7 +1922,7 @@ static int igt_cs_tlb(void *arg) goto out_put_out; GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE); - result = i915_gem_object_pin_map(out, I915_MAP_WB); + result = i915_gem_object_pin_map_unlocked(out, I915_MAP_WB); if (IS_ERR(result)) { err = PTR_ERR(result); goto out_put_out; @@ -1908,6 +1938,7 @@ static int igt_cs_tlb(void *arg) while (!__igt_timeout(end_time, NULL)) { struct i915_vm_pt_stash stash = {}; struct i915_request *rq; + struct i915_gem_ww_ctx ww; u64 offset; offset = igt_random_offset(&prng, @@ -1926,19 +1957,30 @@ static int igt_cs_tlb(void *arg) if (err) goto end; + i915_gem_ww_ctx_init(&ww, false); +retry: + err = i915_vm_lock_objects(vm, &ww); + if (err) + goto end_ww; + err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size); if (err) - goto end; + goto end_ww; err = i915_vm_pin_pt_stash(vm, &stash); - if (err) { - i915_vm_free_pt_stash(vm, &stash); - goto end; - } - - vm->allocate_va_range(vm, &stash, offset, chunk_size); + if (!err) + vm->allocate_va_range(vm, &stash, offset, chunk_size); i915_vm_free_pt_stash(vm, &stash); +end_ww: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + if (err) + goto end; /* Prime the TLB with the dummy pages */ for (i = 0; i < count; i++) { -- cgit v1.2.3 From a29a22917d4cf14ad4e7a8c4c503629d5a771f06 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Tue, 26 Jan 2021 10:30:19 +0000 Subject: drm/i915/buddy: document the unused header bits The largest possible order is (63-PAGE_SHIFT), given that our min chunk size is PAGE_SIZE. With that we should only need at most 6 bits to represent all possible orders, giving us back 4 bits for other potential uses. Include a simple selftest to verify this. Signed-off-by: Matthew Auld Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20210126103019.177622-1-matthew.auld@intel.com Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_buddy.c | 3 ++ drivers/gpu/drm/i915/i915_buddy.h | 7 +++-- drivers/gpu/drm/i915/selftests/i915_buddy.c | 48 +++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests') diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c index 20babbdb297d..3a2f6eecb2fc 100644 --- a/drivers/gpu/drm/i915/i915_buddy.c +++ b/drivers/gpu/drm/i915/i915_buddy.c @@ -48,6 +48,8 @@ static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_block *parent { struct i915_buddy_block *block; + GEM_BUG_ON(order > I915_BUDDY_MAX_ORDER); + block = kmem_cache_zalloc(global.slab_blocks, GFP_KERNEL); if (!block) return NULL; @@ -56,6 +58,7 @@ static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_block *parent block->header |= order; block->parent = parent; + GEM_BUG_ON(block->header & I915_BUDDY_HEADER_UNUSED); return block; } diff --git a/drivers/gpu/drm/i915/i915_buddy.h b/drivers/gpu/drm/i915/i915_buddy.h index ed41f3507cdc..9ce5200f4001 100644 --- a/drivers/gpu/drm/i915/i915_buddy.h +++ b/drivers/gpu/drm/i915/i915_buddy.h @@ -15,7 +15,9 @@ struct i915_buddy_block { #define I915_BUDDY_ALLOCATED (1 << 10) #define I915_BUDDY_FREE (2 << 10) #define I915_BUDDY_SPLIT (3 << 10) -#define I915_BUDDY_HEADER_ORDER GENMASK_ULL(9, 0) +/* Free to be used, if needed in the future */ +#define I915_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6) +#define I915_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0) u64 header; struct i915_buddy_block *left; @@ -34,7 +36,8 @@ struct i915_buddy_block { struct list_head tmp_link; }; -#define I915_BUDDY_MAX_ORDER I915_BUDDY_HEADER_ORDER +/* Order-zero must be at least PAGE_SIZE */ +#define I915_BUDDY_MAX_ORDER (63 - PAGE_SHIFT) /* * Binary Buddy System. diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c index 632b912b0bc9..f0f5c4df8dbc 100644 --- a/drivers/gpu/drm/i915/selftests/i915_buddy.c +++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c @@ -727,6 +727,53 @@ err_fini: return err; } +static int igt_buddy_alloc_limit(void *arg) +{ + struct i915_buddy_block *block; + struct i915_buddy_mm mm; + const u64 size = U64_MAX; + int err; + + err = i915_buddy_init(&mm, size, PAGE_SIZE); + if (err) + return err; + + if (mm.max_order != I915_BUDDY_MAX_ORDER) { + pr_err("mm.max_order(%d) != %d\n", + mm.max_order, I915_BUDDY_MAX_ORDER); + err = -EINVAL; + goto out_fini; + } + + block = i915_buddy_alloc(&mm, mm.max_order); + if (IS_ERR(block)) { + err = PTR_ERR(block); + goto out_fini; + } + + if (i915_buddy_block_order(block) != mm.max_order) { + pr_err("block order(%d) != %d\n", + i915_buddy_block_order(block), mm.max_order); + err = -EINVAL; + goto out_free; + } + + if (i915_buddy_block_size(&mm, block) != + BIT_ULL(mm.max_order) * PAGE_SIZE) { + pr_err("block size(%llu) != %llu\n", + i915_buddy_block_size(&mm, block), + BIT_ULL(mm.max_order) * PAGE_SIZE); + err = -EINVAL; + goto out_free; + } + +out_free: + i915_buddy_free(&mm, block); +out_fini: + i915_buddy_fini(&mm); + return err; +} + int i915_buddy_mock_selftests(void) { static const struct i915_subtest tests[] = { @@ -735,6 +782,7 @@ int i915_buddy_mock_selftests(void) SUBTEST(igt_buddy_alloc_pathological), SUBTEST(igt_buddy_alloc_smoke), SUBTEST(igt_buddy_alloc_range), + SUBTEST(igt_buddy_alloc_limit), }; return i915_subtests(tests, NULL); -- cgit v1.2.3 From adeca641bcb64f9e4fd477c0d1fe482f18934e90 Mon Sep 17 00:00:00 2001 From: Abdiel Janulgue Date: Wed, 27 Jan 2021 13:14:13 +0000 Subject: drm/i915: introduce mem->reserved In the following patch we need to reserve regions unaccessible to the driver during initialization, so add mem->reserved for collecting such regions. v2: turn into an actual intel_memory_region_reserve api Cc: Imre Deak Signed-off-by: Abdiel Janulgue Signed-off-by: Matthew Auld Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20210127131417.393872-4-matthew.auld@intel.com Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_memory_region.c | 14 ++++ drivers/gpu/drm/i915/intel_memory_region.h | 5 ++ .../gpu/drm/i915/selftests/intel_memory_region.c | 77 ++++++++++++++++++++++ 3 files changed, 96 insertions(+) (limited to 'drivers/gpu/drm/i915/selftests') diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index b1b610bfff09..49d306b5532f 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -156,9 +156,22 @@ int intel_memory_region_init_buddy(struct intel_memory_region *mem) void intel_memory_region_release_buddy(struct intel_memory_region *mem) { + i915_buddy_free_list(&mem->mm, &mem->reserved); i915_buddy_fini(&mem->mm); } +int intel_memory_region_reserve(struct intel_memory_region *mem, + u64 offset, u64 size) +{ + int ret; + + mutex_lock(&mem->mm_lock); + ret = i915_buddy_alloc_range(&mem->mm, &mem->reserved, offset, size); + mutex_unlock(&mem->mm_lock); + + return ret; +} + struct intel_memory_region * intel_memory_region_create(struct drm_i915_private *i915, resource_size_t start, @@ -185,6 +198,7 @@ intel_memory_region_create(struct drm_i915_private *i915, mutex_init(&mem->objects.lock); INIT_LIST_HEAD(&mem->objects.list); INIT_LIST_HEAD(&mem->objects.purgeable); + INIT_LIST_HEAD(&mem->reserved); mutex_init(&mem->mm_lock); diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 6ffc0673f005..d17e4fe3123c 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -89,6 +89,8 @@ struct intel_memory_region { unsigned int id; char name[8]; + struct list_head reserved; + dma_addr_t remap_addr; struct { @@ -113,6 +115,9 @@ void __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem, struct list_head *blocks); void __intel_memory_region_put_block_buddy(struct i915_buddy_block *block); +int intel_memory_region_reserve(struct intel_memory_region *mem, + u64 offset, u64 size); + struct intel_memory_region * intel_memory_region_create(struct drm_i915_private *i915, resource_size_t start, diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index ce7adfa3bca0..64348528e1d5 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -144,6 +144,82 @@ static bool is_contiguous(struct drm_i915_gem_object *obj) return true; } +static int igt_mock_reserve(void *arg) +{ + struct intel_memory_region *mem = arg; + resource_size_t avail = resource_size(&mem->region); + struct drm_i915_gem_object *obj; + const u32 chunk_size = SZ_32M; + u32 i, offset, count, *order; + u64 allocated, cur_avail; + I915_RND_STATE(prng); + LIST_HEAD(objects); + int err = 0; + + if (!list_empty(&mem->reserved)) { + pr_err("%s region reserved list is not empty\n", __func__); + return -EINVAL; + } + + count = avail / chunk_size; + order = i915_random_order(count, &prng); + if (!order) + return 0; + + /* Reserve a bunch of ranges within the region */ + for (i = 0; i < count; ++i) { + u64 start = order[i] * chunk_size; + u64 size = i915_prandom_u32_max_state(chunk_size, &prng); + + /* Allow for some really big holes */ + if (!size) + continue; + + size = round_up(size, PAGE_SIZE); + offset = igt_random_offset(&prng, 0, chunk_size, size, + PAGE_SIZE); + + err = intel_memory_region_reserve(mem, start + offset, size); + if (err) { + pr_err("%s failed to reserve range", __func__); + goto out_close; + } + + /* XXX: maybe sanity check the block range here? */ + avail -= size; + } + + /* Try to see if we can allocate from the remaining space */ + allocated = 0; + cur_avail = avail; + do { + u32 size = i915_prandom_u32_max_state(cur_avail, &prng); + + size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE); + obj = igt_object_create(mem, &objects, size, 0); + if (IS_ERR(obj)) { + if (PTR_ERR(obj) == -ENXIO) + break; + + err = PTR_ERR(obj); + goto out_close; + } + cur_avail -= size; + allocated += size; + } while (1); + + if (allocated != avail) { + pr_err("%s mismatch between allocation and free space", __func__); + err = -EINVAL; + } + +out_close: + kfree(order); + close_objects(mem, &objects); + i915_buddy_free_list(&mem->mm, &mem->reserved); + return err; +} + static int igt_mock_contiguous(void *arg) { struct intel_memory_region *mem = arg; @@ -930,6 +1006,7 @@ static int perf_memcpy(void *arg) int intel_memory_region_mock_selftests(void) { static const struct i915_subtest tests[] = { + SUBTEST(igt_mock_reserve), SUBTEST(igt_mock_fill), SUBTEST(igt_mock_contiguous), SUBTEST(igt_mock_splintered_region), -- cgit v1.2.3 From 38b237eab2bc7feac87a4c9d870368e935a0091b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 24 Mar 2021 12:13:30 +0000 Subject: drm/i915: Individual request cancellation Currently, we cancel outstanding requests within a context when the context is closed. We may also want to cancel individual requests using the same graceful preemption mechanism. v2 (Tvrtko): * Cancel waiters carefully considering no timeline lock and RCU. * Fixed selftests. v3 (Tvrtko): * Remove error propagation to waiters for now. v4 (Tvrtko): * Rebase for extracted i915_request_active_engine. (Matt) Signed-off-by: Chris Wilson Signed-off-by: Tvrtko Ursulin Reviewed-by: Matthew Auld [danvet: Resolve conflict because intel_engine_flush_scheduler is still called intel_engine_flush_submission] Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210324121335.2307063-3-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c | 1 + .../gpu/drm/i915/gt/intel_execlists_submission.c | 9 +- drivers/gpu/drm/i915/i915_request.c | 33 +++- drivers/gpu/drm/i915/i915_request.h | 4 +- drivers/gpu/drm/i915/selftests/i915_request.c | 201 +++++++++++++++++++++ 5 files changed, 242 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c index d7be2b9339f9..876394cce276 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -279,6 +279,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine) mutex_unlock(&ce->timeline->mutex); } + intel_engine_flush_submission(engine); intel_engine_pm_put(engine); return err; } diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index 77edca578e76..4ba6b8674012 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -470,6 +470,11 @@ static void reset_active(struct i915_request *rq, ce->lrc.lrca = lrc_update_regs(ce, engine, head); } +static bool bad_request(const struct i915_request *rq) +{ + return rq->fence.error && i915_request_started(rq); +} + static struct intel_engine_cs * __execlists_schedule_in(struct i915_request *rq) { @@ -482,7 +487,7 @@ __execlists_schedule_in(struct i915_request *rq) !intel_engine_has_heartbeat(engine))) intel_context_set_banned(ce); - if (unlikely(intel_context_is_banned(ce))) + if (unlikely(intel_context_is_banned(ce) || bad_request(rq))) reset_active(rq, engine); if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) @@ -1208,7 +1213,7 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine, return 0; /* Force a fast reset for terminated contexts (ignoring sysfs!) */ - if (unlikely(intel_context_is_banned(rq->context))) + if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq))) return 1; return READ_ONCE(engine->props.preempt_timeout_ms); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index d23186016fc6..a031b86f8508 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -33,7 +33,10 @@ #include "gem/i915_gem_context.h" #include "gt/intel_breadcrumbs.h" #include "gt/intel_context.h" +#include "gt/intel_engine.h" +#include "gt/intel_engine_heartbeat.h" #include "gt/intel_gpu_commands.h" +#include "gt/intel_reset.h" #include "gt/intel_ring.h" #include "gt/intel_rps.h" @@ -542,20 +545,22 @@ void __i915_request_skip(struct i915_request *rq) rq->infix = rq->postfix; } -void i915_request_set_error_once(struct i915_request *rq, int error) +bool i915_request_set_error_once(struct i915_request *rq, int error) { int old; GEM_BUG_ON(!IS_ERR_VALUE((long)error)); if (i915_request_signaled(rq)) - return; + return false; old = READ_ONCE(rq->fence.error); do { if (fatal_error(old)) - return; + return false; } while (!try_cmpxchg(&rq->fence.error, &old, error)); + + return true; } void i915_request_mark_eio(struct i915_request *rq) @@ -722,6 +727,28 @@ void i915_request_unsubmit(struct i915_request *request) spin_unlock_irqrestore(&engine->active.lock, flags); } +static void __cancel_request(struct i915_request *rq) +{ + struct intel_engine_cs *engine = NULL; + + i915_request_active_engine(rq, &engine); + + if (engine && intel_engine_pulse(engine)) + intel_gt_handle_error(engine->gt, engine->mask, 0, + "request cancellation by %s", + current->comm); +} + +void i915_request_cancel(struct i915_request *rq, int error) +{ + if (!i915_request_set_error_once(rq, error)) + return; + + set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags); + + __cancel_request(rq); +} + static int __i915_sw_fence_call submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index cf4bd07f749e..e4d190ab76b2 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -300,7 +300,7 @@ struct i915_request * __must_check i915_request_create(struct intel_context *ce); void __i915_request_skip(struct i915_request *rq); -void i915_request_set_error_once(struct i915_request *rq, int error); +bool i915_request_set_error_once(struct i915_request *rq, int error); void i915_request_mark_eio(struct i915_request *rq); struct i915_request *__i915_request_commit(struct i915_request *request); @@ -356,6 +356,8 @@ void i915_request_submit(struct i915_request *request); void __i915_request_unsubmit(struct i915_request *request); void i915_request_unsubmit(struct i915_request *request); +void i915_request_cancel(struct i915_request *rq, int error); + long i915_request_wait(struct i915_request *rq, unsigned int flags, long timeout) diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 9a9e92a775c8..ee8e753d98ce 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -609,6 +609,206 @@ static int live_nop_request(void *arg) return err; } +static int __cancel_inactive(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + struct igt_spinner spin; + struct i915_request *rq; + int err = 0; + + if (igt_spinner_init(&spin, engine->gt)) + return -ENOMEM; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out_spin; + } + + rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_ce; + } + + pr_debug("%s: Cancelling inactive request\n", engine->name); + i915_request_cancel(rq, -EINTR); + i915_request_get(rq); + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + struct drm_printer p = drm_info_printer(engine->i915->drm.dev); + + pr_err("%s: Failed to cancel inactive request\n", engine->name); + intel_engine_dump(engine, &p, "%s\n", engine->name); + err = -ETIME; + goto out_rq; + } + + if (rq->fence.error != -EINTR) { + pr_err("%s: fence not cancelled (%u)\n", + engine->name, rq->fence.error); + err = -EINVAL; + } + +out_rq: + i915_request_put(rq); +out_ce: + intel_context_put(ce); +out_spin: + igt_spinner_fini(&spin); + if (err) + pr_err("%s: %s error %d\n", __func__, engine->name, err); + return err; +} + +static int __cancel_active(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + struct igt_spinner spin; + struct i915_request *rq; + int err = 0; + + if (igt_spinner_init(&spin, engine->gt)) + return -ENOMEM; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out_spin; + } + + rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_ce; + } + + pr_debug("%s: Cancelling active request\n", engine->name); + i915_request_get(rq); + i915_request_add(rq); + if (!igt_wait_for_spinner(&spin, rq)) { + struct drm_printer p = drm_info_printer(engine->i915->drm.dev); + + pr_err("Failed to start spinner on %s\n", engine->name); + intel_engine_dump(engine, &p, "%s\n", engine->name); + err = -ETIME; + goto out_rq; + } + i915_request_cancel(rq, -EINTR); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + struct drm_printer p = drm_info_printer(engine->i915->drm.dev); + + pr_err("%s: Failed to cancel active request\n", engine->name); + intel_engine_dump(engine, &p, "%s\n", engine->name); + err = -ETIME; + goto out_rq; + } + + if (rq->fence.error != -EINTR) { + pr_err("%s: fence not cancelled (%u)\n", + engine->name, rq->fence.error); + err = -EINVAL; + } + +out_rq: + i915_request_put(rq); +out_ce: + intel_context_put(ce); +out_spin: + igt_spinner_fini(&spin); + if (err) + pr_err("%s: %s error %d\n", __func__, engine->name, err); + return err; +} + +static int __cancel_completed(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + struct igt_spinner spin; + struct i915_request *rq; + int err = 0; + + if (igt_spinner_init(&spin, engine->gt)) + return -ENOMEM; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out_spin; + } + + rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_ce; + } + igt_spinner_end(&spin); + i915_request_get(rq); + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + err = -ETIME; + goto out_rq; + } + + pr_debug("%s: Cancelling completed request\n", engine->name); + i915_request_cancel(rq, -EINTR); + if (rq->fence.error) { + pr_err("%s: fence not cancelled (%u)\n", + engine->name, rq->fence.error); + err = -EINVAL; + } + +out_rq: + i915_request_put(rq); +out_ce: + intel_context_put(ce); +out_spin: + igt_spinner_fini(&spin); + if (err) + pr_err("%s: %s error %d\n", __func__, engine->name, err); + return err; +} + +static int live_cancel_request(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + + /* + * Check cancellation of requests. We expect to be able to immediately + * cancel active requests, even if they are currently on the GPU. + */ + + for_each_uabi_engine(engine, i915) { + struct igt_live_test t; + int err, err2; + + if (!intel_engine_has_preemption(engine)) + continue; + + err = igt_live_test_begin(&t, i915, __func__, engine->name); + if (err) + return err; + + err = __cancel_inactive(engine); + if (err == 0) + err = __cancel_active(engine); + if (err == 0) + err = __cancel_completed(engine); + + err2 = igt_live_test_end(&t); + if (err) + return err; + if (err2) + return err2; + } + + return 0; +} + static struct i915_vma *empty_batch(struct drm_i915_private *i915) { struct drm_i915_gem_object *obj; @@ -1486,6 +1686,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915) SUBTEST(live_sequential_engines), SUBTEST(live_parallel_engines), SUBTEST(live_empty_request), + SUBTEST(live_cancel_request), SUBTEST(live_breadcrumbs_smoketest), }; -- cgit v1.2.3 From 69e331b44c9c45510f45f8a99c467eaa158f455d Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 25 Mar 2021 23:47:45 +0200 Subject: drm/i915/selftest: Fix error handling in igt_vma_remapped_gtt() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit An inner scope version of err shadows the variable in the outer scope, and err doesn't get set after a failure, fix these. Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20210325214808.2071517-3-imre.deak@intel.com --- drivers/gpu/drm/i915/selftests/i915_vma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/selftests') diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 1b6125e4c1ac..35481cfbb635 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -890,7 +890,6 @@ static int igt_vma_remapped_gtt(void *arg) struct i915_vma *vma; u32 __iomem *map; unsigned int x, y; - int err; i915_gem_object_lock(obj, NULL); err = i915_gem_object_set_to_gtt_domain(obj, true); @@ -962,6 +961,7 @@ static int igt_vma_remapped_gtt(void *arg) *t == I915_GGTT_VIEW_ROTATED ? "Rotated" : "Remapped", val, exp); i915_vma_unpin_iomap(vma); + err = -EINVAL; goto out; } } -- cgit v1.2.3 From 911e03327d4482c4c1f41164bfed9634c8ee598a Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 25 Mar 2021 23:47:46 +0200 Subject: drm/i915/selftest: Fix debug message in igt_vma_remapped_gtt() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The expected/found values were swapped in a debug message, fix this up. Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20210325214808.2071517-4-imre.deak@intel.com --- drivers/gpu/drm/i915/selftests/i915_vma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/selftests') diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 35481cfbb635..23f6a212a391 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -959,7 +959,7 @@ static int igt_vma_remapped_gtt(void *arg) if (val != exp) { pr_err("%s VMA write test failed, expected 0x%x, found 0x%x\n", *t == I915_GGTT_VIEW_ROTATED ? "Rotated" : "Remapped", - val, exp); + exp, val); i915_vma_unpin_iomap(vma); err = -EINVAL; goto out; -- cgit v1.2.3 From 54c2921f7e9eeb7bb17e7174fb83c833503277f5 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 25 Mar 2021 23:47:48 +0200 Subject: drm/i915/selftest: Make sure to init i915_ggtt_view in igt_vma_rotate_remap() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This probably doesn't cause an issue, since the code checks the view type dependent size of the views before comparing them, but let's follow the practice to bzero the whole struct when initializing it. v2: Use {} instead of { } struct intializer. (Ville) Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20210325214808.2071517-6-imre.deak@intel.com --- drivers/gpu/drm/i915/selftests/i915_vma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/selftests') diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 23f6a212a391..9dd6799105e6 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -515,7 +515,7 @@ static int igt_vma_rotate_remap(void *arg) for (t = types; *t; t++) { for (a = planes; a->width; a++) { for (b = planes + ARRAY_SIZE(planes); b-- != planes; ) { - struct i915_ggtt_view view; + struct i915_ggtt_view view = {}; unsigned int n, max_offset; max_offset = max(a->stride * a->height, -- cgit v1.2.3 From b05787aeed320f872bda8d7f0f0a2c7b1593c7e1 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 25 Mar 2021 23:48:04 +0200 Subject: drm/i915/selftest: Unify use of intel_remapped_plane_info in igt_vma_rotate_remap() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Always use the modified copy of the intel_remapped_plane_info variables. An upcoming patch updates the dst_stride field in these copies after which we can't use the original versions. v2: Init view in igt_vma_rotate_remap() when declaring it. (Ville) Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20210325214808.2071517-22-imre.deak@intel.com --- drivers/gpu/drm/i915/selftests/i915_vma.c | 63 +++++++++++++++++-------------- 1 file changed, 34 insertions(+), 29 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests') diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 9dd6799105e6..95dd6712ac90 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -515,22 +515,24 @@ static int igt_vma_rotate_remap(void *arg) for (t = types; *t; t++) { for (a = planes; a->width; a++) { for (b = planes + ARRAY_SIZE(planes); b-- != planes; ) { - struct i915_ggtt_view view = {}; + struct i915_ggtt_view view = { + .type = *t, + .remapped.plane[0] = *a, + .remapped.plane[1] = *b, + }; + struct intel_remapped_plane_info *plane_info = view.remapped.plane; unsigned int n, max_offset; - max_offset = max(a->stride * a->height, - b->stride * b->height); + max_offset = max(plane_info[0].stride * plane_info[0].height, + plane_info[1].stride * plane_info[1].height); GEM_BUG_ON(max_offset > max_pages); max_offset = max_pages - max_offset; - view.type = *t; - view.rotated.plane[0] = *a; - view.rotated.plane[1] = *b; - - for_each_prime_number_from(view.rotated.plane[0].offset, 0, max_offset) { - for_each_prime_number_from(view.rotated.plane[1].offset, 0, max_offset) { + for_each_prime_number_from(plane_info[0].offset, 0, max_offset) { + for_each_prime_number_from(plane_info[1].offset, 0, max_offset) { struct scatterlist *sg; struct i915_vma *vma; + unsigned int expected_pages; vma = checked_vma_instance(obj, vm, &view); if (IS_ERR(vma)) { @@ -544,25 +546,27 @@ static int igt_vma_rotate_remap(void *arg) goto out_object; } + expected_pages = rotated_size(&plane_info[0], &plane_info[1]); + if (view.type == I915_GGTT_VIEW_ROTATED && - vma->size != rotated_size(a, b) * PAGE_SIZE) { + vma->size != expected_pages * PAGE_SIZE) { pr_err("VMA is wrong size, expected %lu, found %llu\n", - PAGE_SIZE * rotated_size(a, b), vma->size); + PAGE_SIZE * expected_pages, vma->size); err = -EINVAL; goto out_object; } if (view.type == I915_GGTT_VIEW_REMAPPED && - vma->size > rotated_size(a, b) * PAGE_SIZE) { + vma->size > expected_pages * PAGE_SIZE) { pr_err("VMA is wrong size, expected %lu, found %llu\n", - PAGE_SIZE * rotated_size(a, b), vma->size); + PAGE_SIZE * expected_pages, vma->size); err = -EINVAL; goto out_object; } - if (vma->pages->nents > rotated_size(a, b)) { + if (vma->pages->nents > expected_pages) { pr_err("sg table is wrong sizeo, expected %u, found %u nents\n", - rotated_size(a, b), vma->pages->nents); + expected_pages, vma->pages->nents); err = -EINVAL; goto out_object; } @@ -590,14 +594,14 @@ static int igt_vma_rotate_remap(void *arg) pr_err("Inconsistent %s VMA pages for plane %d: [(%d, %d, %d, %d), (%d, %d, %d, %d)]\n", view.type == I915_GGTT_VIEW_ROTATED ? "rotated" : "remapped", n, - view.rotated.plane[0].width, - view.rotated.plane[0].height, - view.rotated.plane[0].stride, - view.rotated.plane[0].offset, - view.rotated.plane[1].width, - view.rotated.plane[1].height, - view.rotated.plane[1].stride, - view.rotated.plane[1].offset); + plane_info[0].width, + plane_info[0].height, + plane_info[0].stride, + plane_info[0].offset, + plane_info[1].width, + plane_info[1].height, + plane_info[1].stride, + plane_info[1].offset); err = -EINVAL; goto out_object; } @@ -887,6 +891,7 @@ static int igt_vma_remapped_gtt(void *arg) .type = *t, .rotated.plane[0] = *p, }; + struct intel_remapped_plane_info *plane_info = view.rotated.plane; struct i915_vma *vma; u32 __iomem *map; unsigned int x, y; @@ -912,15 +917,15 @@ static int igt_vma_remapped_gtt(void *arg) goto out; } - for (y = 0 ; y < p->height; y++) { - for (x = 0 ; x < p->width; x++) { + for (y = 0 ; y < plane_info[0].height; y++) { + for (x = 0 ; x < plane_info[0].width; x++) { unsigned int offset; u32 val = y << 16 | x; if (*t == I915_GGTT_VIEW_ROTATED) - offset = (x * p->height + y) * PAGE_SIZE; + offset = (x * plane_info[0].height + y) * PAGE_SIZE; else - offset = (y * p->width + x) * PAGE_SIZE; + offset = (y * plane_info[0].width + x) * PAGE_SIZE; iowrite32(val, &map[offset / sizeof(*map)]); } @@ -943,8 +948,8 @@ static int igt_vma_remapped_gtt(void *arg) goto out; } - for (y = 0 ; y < p->height; y++) { - for (x = 0 ; x < p->width; x++) { + for (y = 0 ; y < plane_info[0].height; y++) { + for (x = 0 ; x < plane_info[0].width; x++) { unsigned int offset, src_idx; u32 exp = y << 16 | x; u32 val; -- cgit v1.2.3 From 6d80f4308605e1b572777dc19f0297657ec0c206 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 25 Mar 2021 23:48:05 +0200 Subject: drm/i915: s/stride/src_stride/ in the intel_remapped_plane_info struct MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit An upcoming patch adds a new dst_stride field to the intel_remapped_plane_info struct, so for clarity rename the current stride field to src_stride. Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20210325214808.2071517-23-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_fb.c | 8 ++--- drivers/gpu/drm/i915/gt/intel_ggtt.c | 4 +-- drivers/gpu/drm/i915/i915_debugfs.c | 8 ++--- drivers/gpu/drm/i915/i915_vma_types.h | 2 +- drivers/gpu/drm/i915/selftests/i915_vma.c | 60 +++++++++++++++---------------- 5 files changed, 41 insertions(+), 41 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests') diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index 048567e81cf9..41ba835fabfb 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -590,8 +590,8 @@ static void init_plane_view_dims(const struct intel_framebuffer *fb, int color_p } static unsigned int -plane_view_stride_tiles(const struct intel_framebuffer *fb, int color_plane, - const struct fb_plane_view_dims *dims) +plane_view_src_stride_tiles(const struct intel_framebuffer *fb, int color_plane, + const struct fb_plane_view_dims *dims) { return DIV_ROUND_UP(fb->base.pitches[color_plane], dims->tile_width * fb->base.format->cpp[color_plane]); @@ -633,7 +633,7 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p struct drm_rect r; assign_chk_ovf(i915, remap_info->offset, obj_offset); - assign_chk_ovf(i915, remap_info->stride, plane_view_stride_tiles(fb, color_plane, dims)); + assign_chk_ovf(i915, remap_info->src_stride, plane_view_src_stride_tiles(fb, color_plane, dims)); assign_chk_ovf(i915, remap_info->width, plane_view_width_tiles(fb, color_plane, dims, x)); assign_chk_ovf(i915, remap_info->height, plane_view_height_tiles(fb, color_plane, dims, y)); @@ -699,7 +699,7 @@ calc_plane_normal_size(const struct intel_framebuffer *fb, int color_plane, x * fb->base.format->cpp[color_plane]; tiles = DIV_ROUND_UP(size, intel_tile_size(i915)); } else { - tiles = plane_view_stride_tiles(fb, color_plane, dims) * + tiles = plane_view_src_stride_tiles(fb, color_plane, dims) * plane_view_height_tiles(fb, color_plane, dims, y); /* * If the plane isn't horizontally tile aligned, diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index ec2bf963ced9..fff750451fef 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -1300,7 +1300,7 @@ intel_rotate_pages(struct intel_rotation_info *rot_info, for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { sg = rotate_pages(obj, rot_info->plane[i].offset, rot_info->plane[i].width, rot_info->plane[i].height, - rot_info->plane[i].stride, st, sg); + rot_info->plane[i].src_stride, st, sg); } return st; @@ -1384,7 +1384,7 @@ intel_remap_pages(struct intel_remapped_info *rem_info, for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { sg = remap_pages(obj, rem_info->plane[i].offset, rem_info->plane[i].width, rem_info->plane[i].height, - rem_info->plane[i].stride, st, sg); + rem_info->plane[i].src_stride, st, sg); } i915_sg_trim(st); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 51133b8fabb4..48032c0288ee 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -176,11 +176,11 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]", vma->ggtt_view.rotated.plane[0].width, vma->ggtt_view.rotated.plane[0].height, - vma->ggtt_view.rotated.plane[0].stride, + vma->ggtt_view.rotated.plane[0].src_stride, vma->ggtt_view.rotated.plane[0].offset, vma->ggtt_view.rotated.plane[1].width, vma->ggtt_view.rotated.plane[1].height, - vma->ggtt_view.rotated.plane[1].stride, + vma->ggtt_view.rotated.plane[1].src_stride, vma->ggtt_view.rotated.plane[1].offset); break; @@ -188,11 +188,11 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]", vma->ggtt_view.remapped.plane[0].width, vma->ggtt_view.remapped.plane[0].height, - vma->ggtt_view.remapped.plane[0].stride, + vma->ggtt_view.remapped.plane[0].src_stride, vma->ggtt_view.remapped.plane[0].offset, vma->ggtt_view.remapped.plane[1].width, vma->ggtt_view.remapped.plane[1].height, - vma->ggtt_view.remapped.plane[1].stride, + vma->ggtt_view.remapped.plane[1].src_stride, vma->ggtt_view.remapped.plane[1].offset); break; diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h index 358b4306fc00..f7f2aa168c9e 100644 --- a/drivers/gpu/drm/i915/i915_vma_types.h +++ b/drivers/gpu/drm/i915/i915_vma_types.h @@ -100,7 +100,7 @@ struct intel_remapped_plane_info { u32 offset; u16 width; u16 height; - u16 stride; + u16 src_stride; u16 unused_mbz; } __packed; diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 95dd6712ac90..9aaf7201e242 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -361,7 +361,7 @@ static unsigned long rotated_index(const struct intel_rotation_info *r, unsigned int x, unsigned int y) { - return (r->plane[n].stride * (r->plane[n].height - y - 1) + + return (r->plane[n].src_stride * (r->plane[n].height - y - 1) + r->plane[n].offset + x); } @@ -411,7 +411,7 @@ static unsigned long remapped_index(const struct intel_remapped_info *r, unsigned int x, unsigned int y) { - return (r->plane[n].stride * y + + return (r->plane[n].src_stride * y + r->plane[n].offset + x); } @@ -479,21 +479,21 @@ static int igt_vma_rotate_remap(void *arg) struct i915_address_space *vm = &ggtt->vm; struct drm_i915_gem_object *obj; const struct intel_remapped_plane_info planes[] = { - { .width = 1, .height = 1, .stride = 1 }, - { .width = 2, .height = 2, .stride = 2 }, - { .width = 4, .height = 4, .stride = 4 }, - { .width = 8, .height = 8, .stride = 8 }, + { .width = 1, .height = 1, .src_stride = 1 }, + { .width = 2, .height = 2, .src_stride = 2 }, + { .width = 4, .height = 4, .src_stride = 4 }, + { .width = 8, .height = 8, .src_stride = 8 }, - { .width = 3, .height = 5, .stride = 3 }, - { .width = 3, .height = 5, .stride = 4 }, - { .width = 3, .height = 5, .stride = 5 }, + { .width = 3, .height = 5, .src_stride = 3 }, + { .width = 3, .height = 5, .src_stride = 4 }, + { .width = 3, .height = 5, .src_stride = 5 }, - { .width = 5, .height = 3, .stride = 5 }, - { .width = 5, .height = 3, .stride = 7 }, - { .width = 5, .height = 3, .stride = 9 }, + { .width = 5, .height = 3, .src_stride = 5 }, + { .width = 5, .height = 3, .src_stride = 7 }, + { .width = 5, .height = 3, .src_stride = 9 }, - { .width = 4, .height = 6, .stride = 6 }, - { .width = 6, .height = 4, .stride = 6 }, + { .width = 4, .height = 6, .src_stride = 6 }, + { .width = 6, .height = 4, .src_stride = 6 }, { } }, *a, *b; enum i915_ggtt_view_type types[] = { @@ -523,8 +523,8 @@ static int igt_vma_rotate_remap(void *arg) struct intel_remapped_plane_info *plane_info = view.remapped.plane; unsigned int n, max_offset; - max_offset = max(plane_info[0].stride * plane_info[0].height, - plane_info[1].stride * plane_info[1].height); + max_offset = max(plane_info[0].src_stride * plane_info[0].height, + plane_info[1].src_stride * plane_info[1].height); GEM_BUG_ON(max_offset > max_pages); max_offset = max_pages - max_offset; @@ -596,11 +596,11 @@ static int igt_vma_rotate_remap(void *arg) "rotated" : "remapped", n, plane_info[0].width, plane_info[0].height, - plane_info[0].stride, + plane_info[0].src_stride, plane_info[0].offset, plane_info[1].width, plane_info[1].height, - plane_info[1].stride, + plane_info[1].src_stride, plane_info[1].offset); err = -EINVAL; goto out_object; @@ -853,21 +853,21 @@ static int igt_vma_remapped_gtt(void *arg) { struct drm_i915_private *i915 = arg; const struct intel_remapped_plane_info planes[] = { - { .width = 1, .height = 1, .stride = 1 }, - { .width = 2, .height = 2, .stride = 2 }, - { .width = 4, .height = 4, .stride = 4 }, - { .width = 8, .height = 8, .stride = 8 }, + { .width = 1, .height = 1, .src_stride = 1 }, + { .width = 2, .height = 2, .src_stride = 2 }, + { .width = 4, .height = 4, .src_stride = 4 }, + { .width = 8, .height = 8, .src_stride = 8 }, - { .width = 3, .height = 5, .stride = 3 }, - { .width = 3, .height = 5, .stride = 4 }, - { .width = 3, .height = 5, .stride = 5 }, + { .width = 3, .height = 5, .src_stride = 3 }, + { .width = 3, .height = 5, .src_stride = 4 }, + { .width = 3, .height = 5, .src_stride = 5 }, - { .width = 5, .height = 3, .stride = 5 }, - { .width = 5, .height = 3, .stride = 7 }, - { .width = 5, .height = 3, .stride = 9 }, + { .width = 5, .height = 3, .src_stride = 5 }, + { .width = 5, .height = 3, .src_stride = 7 }, + { .width = 5, .height = 3, .src_stride = 9 }, - { .width = 4, .height = 6, .stride = 6 }, - { .width = 6, .height = 4, .stride = 6 }, + { .width = 4, .height = 6, .src_stride = 6 }, + { .width = 6, .height = 4, .src_stride = 6 }, { } }, *p; enum i915_ggtt_view_type types[] = { -- cgit v1.2.3 From a4606d4595fd1cb662e4f7a63ac0c04396a0f124 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 25 Mar 2021 23:48:06 +0200 Subject: drm/i915: Add support for FBs requiring a POT stride alignment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit An upcoming platform has a restriction that the FB stride must be power-of-two aligned. To support framebuffer layouts that are not in this layout add a logic that pads the tile rows to the POT aligned size. The HW won't read the padding PTEs, so these don't have to point to an allocated address, or even have their valid flag set. So use a NULL PTE instead for instance the scratch page, which is simple and keeps the SG table compact. v2: - Simplify plane_view_dst_stride(). (Ville) - Pass pitch_tiles as unsigned int. v3: - Drop unintentional s/plane_state->rotation/plane_config->rotation/ change. Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20210325214808.2071517-24-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 4 +- drivers/gpu/drm/i915/display/intel_display_types.h | 1 + drivers/gpu/drm/i915/display/intel_fb.c | 50 +++++++++++++++---- drivers/gpu/drm/i915/gt/intel_ggtt.c | 56 ++++++++++++++++++---- drivers/gpu/drm/i915/i915_debugfs.c | 8 +++- drivers/gpu/drm/i915/i915_vma_types.h | 2 +- drivers/gpu/drm/i915/selftests/i915_vma.c | 13 +++++ 7 files changed, 112 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 30a2b9510184..bdb2adb4d748 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -933,7 +933,7 @@ unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info int i; for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) - size += rot_info->plane[i].width * rot_info->plane[i].height; + size += rot_info->plane[i].dst_stride * rot_info->plane[i].width; return size; } @@ -944,7 +944,7 @@ unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info int i; for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) - size += rem_info->plane[i].width * rem_info->plane[i].height; + size += rem_info->plane[i].dst_stride * rem_info->plane[i].height; return size; } diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 8008fd6f489e..e2e707c4dff5 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -128,6 +128,7 @@ struct intel_framebuffer { /* Params to remap the FB pages and program the plane registers in each view. */ struct intel_fb_view normal_view; struct intel_fb_view rotated_view; + struct intel_fb_view remapped_view; }; struct intel_fbdev { diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index 41ba835fabfb..fca41ac5b8e1 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -486,10 +486,17 @@ static bool intel_plane_can_remap(const struct intel_plane_state *plane_state) return true; } +static bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb) +{ + return false; +} + static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation) { if (drm_rotation_90_or_270(rotation)) return fb->rotated_view.color_plane[color_plane].stride; + else if (intel_fb_needs_pot_stride_remap(fb)) + return fb->remapped_view.color_plane[color_plane].stride; else return fb->normal_view.color_plane[color_plane].stride; } @@ -597,6 +604,16 @@ plane_view_src_stride_tiles(const struct intel_framebuffer *fb, int color_plane, dims->tile_width * fb->base.format->cpp[color_plane]); } +static unsigned int +plane_view_dst_stride_tiles(const struct intel_framebuffer *fb, int color_plane, + unsigned int pitch_tiles) +{ + if (intel_fb_needs_pot_stride_remap(fb)) + return roundup_pow_of_two(pitch_tiles); + else + return pitch_tiles; +} + static unsigned int plane_view_width_tiles(const struct intel_framebuffer *fb, int color_plane, const struct fb_plane_view_dims *dims, @@ -629,8 +646,8 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p unsigned int tile_width = dims->tile_width; unsigned int tile_height = dims->tile_height; unsigned int tile_size = intel_tile_size(i915); - unsigned int pitch_tiles; struct drm_rect r; + u32 size; assign_chk_ovf(i915, remap_info->offset, obj_offset); assign_chk_ovf(i915, remap_info->src_stride, plane_view_src_stride_tiles(fb, color_plane, dims)); @@ -640,6 +657,9 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p if (view->gtt.type == I915_GGTT_VIEW_ROTATED) { check_array_bounds(i915, view->gtt.rotated.plane, color_plane); + assign_chk_ovf(i915, remap_info->dst_stride, + plane_view_dst_stride_tiles(fb, color_plane, remap_info->height)); + /* rotate the x/y offsets to match the GTT view */ drm_rect_init(&r, x, y, dims->width, dims->height); drm_rect_rotate(&r, @@ -650,8 +670,9 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p color_plane_info->x = r.x1; color_plane_info->y = r.y1; - pitch_tiles = remap_info->height; - color_plane_info->stride = pitch_tiles * tile_height; + color_plane_info->stride = remap_info->dst_stride * tile_height; + + size = remap_info->dst_stride * remap_info->width; /* rotate the tile dimensions to match the GTT view */ swap(tile_width, tile_height); @@ -660,12 +681,16 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p check_array_bounds(i915, view->gtt.remapped.plane, color_plane); + assign_chk_ovf(i915, remap_info->dst_stride, + plane_view_dst_stride_tiles(fb, color_plane, remap_info->width)); + color_plane_info->x = x; color_plane_info->y = y; - pitch_tiles = remap_info->width; - color_plane_info->stride = pitch_tiles * tile_width * - fb->base.format->cpp[color_plane]; + color_plane_info->stride = remap_info->dst_stride * tile_width * + fb->base.format->cpp[color_plane]; + + size = remap_info->dst_stride * remap_info->height; } /* @@ -675,10 +700,10 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p */ intel_adjust_tile_offset(&color_plane_info->x, &color_plane_info->y, tile_width, tile_height, - tile_size, pitch_tiles, + tile_size, remap_info->dst_stride, gtt_offset * tile_size, 0); - return remap_info->width * remap_info->height; + return size; } #undef assign_chk_ovf @@ -723,12 +748,14 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct drm_framebuffer *fb struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb_obj(fb); u32 gtt_offset_rotated = 0; + u32 gtt_offset_remapped = 0; unsigned int max_size = 0; int i, num_planes = fb->format->num_planes; unsigned int tile_size = intel_tile_size(i915); intel_fb_view_init(&intel_fb->normal_view, I915_GGTT_VIEW_NORMAL); intel_fb_view_init(&intel_fb->rotated_view, I915_GGTT_VIEW_ROTATED); + intel_fb_view_init(&intel_fb->remapped_view, I915_GGTT_VIEW_REMAPPED); for (i = 0; i < num_planes; i++) { struct fb_plane_view_dims view_dims; @@ -776,6 +803,11 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct drm_framebuffer *fb offset, gtt_offset_rotated, x, y, &intel_fb->rotated_view); + if (intel_fb_needs_pot_stride_remap(intel_fb)) + gtt_offset_remapped += calc_plane_remap_info(intel_fb, i, &view_dims, + offset, gtt_offset_remapped, x, y, + &intel_fb->remapped_view); + size = calc_plane_normal_size(intel_fb, i, &view_dims, x, y); /* how many tiles in total needed in the bo */ max_size = max(max_size, offset + size); @@ -859,6 +891,8 @@ void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotatio { if (drm_rotation_90_or_270(rotation)) *view = fb->rotated_view; + else if (intel_fb_needs_pot_stride_remap(fb)) + *view = fb->remapped_view; else *view = fb->normal_view; } diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index fff750451fef..8a4fa7aef71d 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -1247,14 +1247,16 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt) static struct scatterlist * rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, unsigned int width, unsigned int height, - unsigned int stride, + unsigned int src_stride, unsigned int dst_stride, struct sg_table *st, struct scatterlist *sg) { unsigned int column, row; unsigned int src_idx; for (column = 0; column < width; column++) { - src_idx = stride * (height - 1) + column + offset; + unsigned int left; + + src_idx = src_stride * (height - 1) + column + offset; for (row = 0; row < height; row++) { st->nents++; /* @@ -1267,8 +1269,25 @@ rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, i915_gem_object_get_dma_address(obj, src_idx); sg_dma_len(sg) = I915_GTT_PAGE_SIZE; sg = sg_next(sg); - src_idx -= stride; + src_idx -= src_stride; } + + left = (dst_stride - height) * I915_GTT_PAGE_SIZE; + + if (!left) + continue; + + st->nents++; + + /* + * The DE ignores the PTEs for the padding tiles, the sg entry + * here is just a conenience to indicate how many padding PTEs + * to insert at this spot. + */ + sg_set_page(sg, NULL, left, 0); + sg_dma_address(sg) = 0; + sg_dma_len(sg) = left; + sg = sg_next(sg); } return sg; @@ -1297,11 +1316,12 @@ intel_rotate_pages(struct intel_rotation_info *rot_info, st->nents = 0; sg = st->sgl; - for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { + for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) sg = rotate_pages(obj, rot_info->plane[i].offset, rot_info->plane[i].width, rot_info->plane[i].height, - rot_info->plane[i].src_stride, st, sg); - } + rot_info->plane[i].src_stride, + rot_info->plane[i].dst_stride, + st, sg); return st; @@ -1319,7 +1339,7 @@ err_st_alloc: static struct scatterlist * remap_pages(struct drm_i915_gem_object *obj, unsigned int offset, unsigned int width, unsigned int height, - unsigned int stride, + unsigned int src_stride, unsigned int dst_stride, struct sg_table *st, struct scatterlist *sg) { unsigned int row; @@ -1352,7 +1372,24 @@ remap_pages(struct drm_i915_gem_object *obj, unsigned int offset, left -= length; } - offset += stride - width; + offset += src_stride - width; + + left = (dst_stride - width) * I915_GTT_PAGE_SIZE; + + if (!left) + continue; + + st->nents++; + + /* + * The DE ignores the PTEs for the padding tiles, the sg entry + * here is just a conenience to indicate how many padding PTEs + * to insert at this spot. + */ + sg_set_page(sg, NULL, left, 0); + sg_dma_address(sg) = 0; + sg_dma_len(sg) = left; + sg = sg_next(sg); } return sg; @@ -1384,7 +1421,8 @@ intel_remap_pages(struct intel_remapped_info *rem_info, for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { sg = remap_pages(obj, rem_info->plane[i].offset, rem_info->plane[i].width, rem_info->plane[i].height, - rem_info->plane[i].src_stride, st, sg); + rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride, + st, sg); } i915_sg_trim(st); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 48032c0288ee..4cf975b7504f 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -173,26 +173,30 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) break; case I915_GGTT_VIEW_ROTATED: - seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]", + seq_printf(m, ", rotated [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]", vma->ggtt_view.rotated.plane[0].width, vma->ggtt_view.rotated.plane[0].height, vma->ggtt_view.rotated.plane[0].src_stride, + vma->ggtt_view.rotated.plane[0].dst_stride, vma->ggtt_view.rotated.plane[0].offset, vma->ggtt_view.rotated.plane[1].width, vma->ggtt_view.rotated.plane[1].height, vma->ggtt_view.rotated.plane[1].src_stride, + vma->ggtt_view.rotated.plane[1].dst_stride, vma->ggtt_view.rotated.plane[1].offset); break; case I915_GGTT_VIEW_REMAPPED: - seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]", + seq_printf(m, ", remapped [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]", vma->ggtt_view.remapped.plane[0].width, vma->ggtt_view.remapped.plane[0].height, vma->ggtt_view.remapped.plane[0].src_stride, + vma->ggtt_view.remapped.plane[0].dst_stride, vma->ggtt_view.remapped.plane[0].offset, vma->ggtt_view.remapped.plane[1].width, vma->ggtt_view.remapped.plane[1].height, vma->ggtt_view.remapped.plane[1].src_stride, + vma->ggtt_view.remapped.plane[1].dst_stride, vma->ggtt_view.remapped.plane[1].offset); break; diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h index f7f2aa168c9e..6b1bfa230b82 100644 --- a/drivers/gpu/drm/i915/i915_vma_types.h +++ b/drivers/gpu/drm/i915/i915_vma_types.h @@ -101,7 +101,7 @@ struct intel_remapped_plane_info { u16 width; u16 height; u16 src_stride; - u16 unused_mbz; + u16 dst_stride; } __packed; struct intel_remapped_info { diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 9aaf7201e242..6aadcd31d75a 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -528,6 +528,15 @@ static int igt_vma_rotate_remap(void *arg) GEM_BUG_ON(max_offset > max_pages); max_offset = max_pages - max_offset; + if (!plane_info[0].dst_stride) + plane_info[0].dst_stride = view.type == I915_GGTT_VIEW_ROTATED ? + plane_info[0].height : + plane_info[0].width; + if (!plane_info[1].dst_stride) + plane_info[1].dst_stride = view.type == I915_GGTT_VIEW_ROTATED ? + plane_info[1].height : + plane_info[1].width; + for_each_prime_number_from(plane_info[0].offset, 0, max_offset) { for_each_prime_number_from(plane_info[1].offset, 0, max_offset) { struct scatterlist *sg; @@ -902,6 +911,10 @@ static int igt_vma_remapped_gtt(void *arg) if (err) goto out; + if (!plane_info[0].dst_stride) + plane_info[0].dst_stride = *t == I915_GGTT_VIEW_ROTATED ? + p->height : p->width; + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) { err = PTR_ERR(vma); -- cgit v1.2.3 From 25926cd856bea1e25f87f35c6fd935b4161b869b Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 25 Mar 2021 23:48:07 +0200 Subject: drm/i915/selftest: Add remap/rotate vma subtests when dst_stride!=width/height MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add selftests to test the POT stride padding functionality added in the previous patch. Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20210325214808.2071517-25-imre.deak@intel.com --- drivers/gpu/drm/i915/selftests/i915_vma.c | 93 ++++++++++++++++++++++++++++--- 1 file changed, 86 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests') diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 6aadcd31d75a..5fe7b80ca0bd 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -373,6 +373,8 @@ assert_rotated(struct drm_i915_gem_object *obj, unsigned int x, y; for (x = 0; x < r->plane[n].width; x++) { + unsigned int left; + for (y = 0; y < r->plane[n].height; y++) { unsigned long src_idx; dma_addr_t src; @@ -401,6 +403,31 @@ assert_rotated(struct drm_i915_gem_object *obj, sg = sg_next(sg); } + + left = (r->plane[n].dst_stride - y) * PAGE_SIZE; + + if (!left) + continue; + + if (!sg) { + pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n", + n, x, y); + return ERR_PTR(-EINVAL); + } + + if (sg_dma_len(sg) != left) { + pr_err("Invalid sg.length, found %d, expected %u for rotated page (%d, %d)\n", + sg_dma_len(sg), left, x, y); + return ERR_PTR(-EINVAL); + } + + if (sg_dma_address(sg) != 0) { + pr_err("Invalid address, found %pad, expected 0 for remapped page (%d, %d)\n", + &sg_dma_address(sg), x, y); + return ERR_PTR(-EINVAL); + } + + sg = sg_next(sg); } return sg; @@ -462,15 +489,55 @@ assert_remapped(struct drm_i915_gem_object *obj, if (!left) sg = sg_next(sg); } + + if (left) { + pr_err("Unexpected sg tail with %d size for remapped page (%d, %d)\n", + left, + x, y); + return ERR_PTR(-EINVAL); + } + + left = (r->plane[n].dst_stride - r->plane[n].width) * PAGE_SIZE; + + if (!left) + continue; + + if (!sg) { + pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n", + n, x, y); + return ERR_PTR(-EINVAL); + } + + if (sg_dma_len(sg) != left) { + pr_err("Invalid sg.length, found %u, expected %u for remapped page (%d, %d)\n", + sg_dma_len(sg), left, + x, y); + return ERR_PTR(-EINVAL); + } + + if (sg_dma_address(sg) != 0) { + pr_err("Invalid address, found %pad, expected 0 for remapped page (%d, %d)\n", + &sg_dma_address(sg), + x, y); + return ERR_PTR(-EINVAL); + } + + sg = sg_next(sg); + left = 0; } return sg; } -static unsigned int rotated_size(const struct intel_remapped_plane_info *a, - const struct intel_remapped_plane_info *b) +static unsigned int remapped_size(enum i915_ggtt_view_type view_type, + const struct intel_remapped_plane_info *a, + const struct intel_remapped_plane_info *b) { - return a->width * a->height + b->width * b->height; + + if (view_type == I915_GGTT_VIEW_ROTATED) + return a->dst_stride * a->width + b->dst_stride * b->width; + else + return a->dst_stride * a->height + b->dst_stride * b->height; } static int igt_vma_rotate_remap(void *arg) @@ -494,6 +561,11 @@ static int igt_vma_rotate_remap(void *arg) { .width = 4, .height = 6, .src_stride = 6 }, { .width = 6, .height = 4, .src_stride = 6 }, + + { .width = 2, .height = 2, .src_stride = 2, .dst_stride = 2 }, + { .width = 3, .height = 3, .src_stride = 3, .dst_stride = 4 }, + { .width = 5, .height = 6, .src_stride = 7, .dst_stride = 8 }, + { } }, *a, *b; enum i915_ggtt_view_type types[] = { @@ -555,7 +627,7 @@ static int igt_vma_rotate_remap(void *arg) goto out_object; } - expected_pages = rotated_size(&plane_info[0], &plane_info[1]); + expected_pages = remapped_size(view.type, &plane_info[0], &plane_info[1]); if (view.type == I915_GGTT_VIEW_ROTATED && vma->size != expected_pages * PAGE_SIZE) { @@ -600,16 +672,18 @@ static int igt_vma_rotate_remap(void *arg) else sg = assert_remapped(obj, &view.remapped, n, sg); if (IS_ERR(sg)) { - pr_err("Inconsistent %s VMA pages for plane %d: [(%d, %d, %d, %d), (%d, %d, %d, %d)]\n", + pr_err("Inconsistent %s VMA pages for plane %d: [(%d, %d, %d, %d, %d), (%d, %d, %d, %d, %d)]\n", view.type == I915_GGTT_VIEW_ROTATED ? "rotated" : "remapped", n, plane_info[0].width, plane_info[0].height, plane_info[0].src_stride, + plane_info[0].dst_stride, plane_info[0].offset, plane_info[1].width, plane_info[1].height, plane_info[1].src_stride, + plane_info[1].dst_stride, plane_info[1].offset); err = -EINVAL; goto out_object; @@ -877,6 +951,11 @@ static int igt_vma_remapped_gtt(void *arg) { .width = 4, .height = 6, .src_stride = 6 }, { .width = 6, .height = 4, .src_stride = 6 }, + + { .width = 2, .height = 2, .src_stride = 2, .dst_stride = 2 }, + { .width = 3, .height = 3, .src_stride = 3, .dst_stride = 4 }, + { .width = 5, .height = 6, .src_stride = 7, .dst_stride = 8 }, + { } }, *p; enum i915_ggtt_view_type types[] = { @@ -936,9 +1015,9 @@ static int igt_vma_remapped_gtt(void *arg) u32 val = y << 16 | x; if (*t == I915_GGTT_VIEW_ROTATED) - offset = (x * plane_info[0].height + y) * PAGE_SIZE; + offset = (x * plane_info[0].dst_stride + y) * PAGE_SIZE; else - offset = (y * plane_info[0].width + x) * PAGE_SIZE; + offset = (y * plane_info[0].dst_stride + x) * PAGE_SIZE; iowrite32(val, &map[offset / sizeof(*map)]); } -- cgit v1.2.3