summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/intel_memory_region.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2020-12-02 20:34:44 +0300
committerChris Wilson <chris@chris-wilson.co.uk>2020-12-02 23:59:29 +0300
commit7d1a31e128d3cb939cd70c95f898c13f85155571 (patch)
tree5cdffe81cc1fdb8d5a58437c9fa99061ef048fab /drivers/gpu/drm/i915/intel_memory_region.c
parenta2843b3bd17e5a1c6b270709dc5bb0091eba1074 (diff)
downloadlinux-7d1a31e128d3cb939cd70c95f898c13f85155571.tar.xz
Revert "drm/i915/lmem: Limit block size to 4G"
Mixing I915_ALLOC_CONTIGUOUS and I915_ALLOC_MAX_SEGMENT_SIZE fared badly. The two directives conflict, with the contiguous request setting the min_order to the full size of the object, and the max-segment-size setting the max_order to the limit of the DMA mapper. This results in a situation where max_order < min_order, causing our sanity checks to fail. Instead of limiting the buddy block size, in the previous patch we split the oversized buddy into multiple scatterlist elements. Fixes: d2cf0125d4a1 ("drm/i915/lmem: Limit block size to 4G") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20201202173444.14903-2-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/intel_memory_region.c')
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c18
1 files changed, 1 insertions, 17 deletions
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index ae36e2f6d6e3..b326993a1026 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -72,7 +72,6 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
struct list_head *blocks)
{
unsigned int min_order = 0;
- unsigned int max_order;
unsigned long n_pages;
GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
@@ -93,28 +92,13 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
n_pages = size >> ilog2(mem->mm.chunk_size);
- /*
- * If we going to feed this into an sg list we should limit the block
- * sizes such that we don't exceed the i915_sg_segment_size().
- */
- if (flags & I915_ALLOC_MAX_SEGMENT_SIZE) {
- unsigned int max_segment = i915_sg_segment_size();
-
- if (GEM_WARN_ON(max_segment < mem->mm.chunk_size))
- max_order = 0;
- else
- max_order = ilog2(max_segment) - ilog2(mem->mm.chunk_size);
- } else {
- max_order = mem->mm.max_order;
- }
-
mutex_lock(&mem->mm_lock);
do {
struct i915_buddy_block *block;
unsigned int order;
- order = min_t(u32, fls(n_pages) - 1, max_order);
+ order = fls(n_pages) - 1;
GEM_BUG_ON(order > mem->mm.max_order);
GEM_BUG_ON(order < min_order);