diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 789 |
1 files changed, 296 insertions, 493 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 7643a30ba4cd..36d069504836 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -10,7 +10,6 @@ #include <linux/uaccess.h> #include <drm/drm_syncobj.h> -#include <drm/i915_drm.h> #include "display/intel_frontbuffer.h" @@ -28,6 +27,19 @@ #include "i915_sw_fence_work.h" #include "i915_trace.h" +struct eb_vma { + struct i915_vma *vma; + unsigned int flags; + + /** This vma's place in the execbuf reservation list */ + struct drm_i915_gem_exec_object2 *exec; + struct list_head bind_link; + struct list_head reloc_link; + + struct hlist_node node; + u32 handle; +}; + enum { FORCE_CPU_RELOC = 1, FORCE_GTT_RELOC, @@ -35,17 +47,15 @@ enum { #define DBG_FORCE_RELOC 0 /* choose one of the above! */ }; -#define __EXEC_OBJECT_HAS_REF BIT(31) -#define __EXEC_OBJECT_HAS_PIN BIT(30) -#define __EXEC_OBJECT_HAS_FENCE BIT(29) -#define __EXEC_OBJECT_NEEDS_MAP BIT(28) -#define __EXEC_OBJECT_NEEDS_BIAS BIT(27) -#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above */ +#define __EXEC_OBJECT_HAS_PIN BIT(31) +#define __EXEC_OBJECT_HAS_FENCE BIT(30) +#define __EXEC_OBJECT_NEEDS_MAP BIT(29) +#define __EXEC_OBJECT_NEEDS_BIAS BIT(28) +#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */ #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE) #define __EXEC_HAS_RELOC BIT(31) -#define __EXEC_VALIDATED BIT(30) -#define __EXEC_INTERNAL_FLAGS (~0u << 30) +#define __EXEC_INTERNAL_FLAGS (~0u << 31) #define UPDATE PIN_OFFSET_FIXED #define BATCH_OFFSET_BIAS (256*1024) @@ -220,15 +230,14 @@ struct i915_execbuffer { struct drm_file *file; /** per-file lookup tables and limits */ struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */ struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */ - struct i915_vma **vma; - unsigned int *flags; + struct eb_vma *vma; struct intel_engine_cs *engine; /** engine to queue the request to */ struct intel_context *context; /* logical state for the request */ struct i915_gem_context *gem_context; /** caller's context */ struct i915_request *request; /** our request to build */ - struct i915_vma *batch; /** identity of the batch obj/vma */ + struct eb_vma *batch; /** identity of the batch obj/vma */ struct i915_vma *trampoline; /** trampoline used for chaining */ /** actual size of execobj[] as we may extend it for the cmdparser */ @@ -276,8 +285,6 @@ struct i915_execbuffer { struct hlist_head *buckets; /** ht for relocation handles */ }; -#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags]) - static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) { return intel_engine_requires_cmd_parser(eb->engine) || @@ -364,9 +371,9 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry, static inline bool eb_pin_vma(struct i915_execbuffer *eb, const struct drm_i915_gem_exec_object2 *entry, - struct i915_vma *vma) + struct eb_vma *ev) { - unsigned int exec_flags = *vma->exec_flags; + struct i915_vma *vma = ev->vma; u64 pin_flags; if (vma->node.size) @@ -375,24 +382,24 @@ eb_pin_vma(struct i915_execbuffer *eb, pin_flags = entry->offset & PIN_OFFSET_MASK; pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED; - if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT)) + if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT)) pin_flags |= PIN_GLOBAL; if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) return false; - if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) { + if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) { if (unlikely(i915_vma_pin_fence(vma))) { i915_vma_unpin(vma); return false; } if (vma->fence) - exec_flags |= __EXEC_OBJECT_HAS_FENCE; + ev->flags |= __EXEC_OBJECT_HAS_FENCE; } - *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN; - return !eb_vma_misplaced(entry, vma, exec_flags); + ev->flags |= __EXEC_OBJECT_HAS_PIN; + return !eb_vma_misplaced(entry, vma, ev->flags); } static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags) @@ -406,13 +413,13 @@ static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags) } static inline void -eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags) +eb_unreserve_vma(struct eb_vma *ev) { - if (!(*flags & __EXEC_OBJECT_HAS_PIN)) + if (!(ev->flags & __EXEC_OBJECT_HAS_PIN)) return; - __eb_unreserve_vma(vma, *flags); - *flags &= ~__EXEC_OBJECT_RESERVED; + __eb_unreserve_vma(ev->vma, ev->flags); + ev->flags &= ~__EXEC_OBJECT_RESERVED; } static int @@ -442,13 +449,6 @@ eb_validate_vma(struct i915_execbuffer *eb, } else { entry->pad_to_size = 0; } - - if (unlikely(vma->exec_flags)) { - DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n", - entry->handle, (int)(entry - eb->exec)); - return -EINVAL; - } - /* * From drm_mm perspective address space is continuous, * so from this point we're always using non-canonical @@ -471,41 +471,29 @@ eb_validate_vma(struct i915_execbuffer *eb, return 0; } -static int +static void eb_add_vma(struct i915_execbuffer *eb, unsigned int i, unsigned batch_idx, struct i915_vma *vma) { struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; - int err; + struct eb_vma *ev = &eb->vma[i]; GEM_BUG_ON(i915_vma_is_closed(vma)); - if (!(eb->args->flags & __EXEC_VALIDATED)) { - err = eb_validate_vma(eb, entry, vma); - if (unlikely(err)) - return err; - } + ev->vma = i915_vma_get(vma); + ev->exec = entry; + ev->flags = entry->flags; if (eb->lut_size > 0) { - vma->exec_handle = entry->handle; - hlist_add_head(&vma->exec_node, + ev->handle = entry->handle; + hlist_add_head(&ev->node, &eb->buckets[hash_32(entry->handle, eb->lut_size)]); } if (entry->relocation_count) - list_add_tail(&vma->reloc_link, &eb->relocs); - - /* - * Stash a pointer from the vma to execobj, so we can query its flags, - * size, alignment etc as provided by the user. Also we stash a pointer - * to the vma inside the execobj so that we can use a direct lookup - * to find the right target VMA when doing relocations. - */ - eb->vma[i] = vma; - eb->flags[i] = entry->flags; - vma->exec_flags = &eb->flags[i]; + list_add_tail(&ev->reloc_link, &eb->relocs); /* * SNA is doing fancy tricks with compressing batch buffers, which leads @@ -518,30 +506,23 @@ eb_add_vma(struct i915_execbuffer *eb, */ if (i == batch_idx) { if (entry->relocation_count && - !(eb->flags[i] & EXEC_OBJECT_PINNED)) - eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; + !(ev->flags & EXEC_OBJECT_PINNED)) + ev->flags |= __EXEC_OBJECT_NEEDS_BIAS; if (eb->reloc_cache.has_fence) - eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; + ev->flags |= EXEC_OBJECT_NEEDS_FENCE; - eb->batch = vma; + eb->batch = ev; } - err = 0; - if (eb_pin_vma(eb, entry, vma)) { + if (eb_pin_vma(eb, entry, ev)) { if (entry->offset != vma->node.start) { entry->offset = vma->node.start | UPDATE; eb->args->flags |= __EXEC_HAS_RELOC; } } else { - eb_unreserve_vma(vma, vma->exec_flags); - - list_add_tail(&vma->exec_link, &eb->unbound); - if (drm_mm_node_allocated(&vma->node)) - err = i915_vma_unbind(vma); - if (unlikely(err)) - vma->exec_flags = NULL; + eb_unreserve_vma(ev); + list_add_tail(&ev->bind_link, &eb->unbound); } - return err; } static inline int use_cpu_reloc(const struct reloc_cache *cache, @@ -562,14 +543,14 @@ static inline int use_cpu_reloc(const struct reloc_cache *cache, } static int eb_reserve_vma(const struct i915_execbuffer *eb, - struct i915_vma *vma) + struct eb_vma *ev, + u64 pin_flags) { - struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); - unsigned int exec_flags = *vma->exec_flags; - u64 pin_flags; + struct drm_i915_gem_exec_object2 *entry = ev->exec; + unsigned int exec_flags = ev->flags; + struct i915_vma *vma = ev->vma; int err; - pin_flags = PIN_USER | PIN_NONBLOCK; if (exec_flags & EXEC_OBJECT_NEEDS_GTT) pin_flags |= PIN_GLOBAL; @@ -583,11 +564,16 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, if (exec_flags & __EXEC_OBJECT_NEEDS_MAP) pin_flags |= PIN_MAPPABLE; - if (exec_flags & EXEC_OBJECT_PINNED) { + if (exec_flags & EXEC_OBJECT_PINNED) pin_flags |= entry->offset | PIN_OFFSET_FIXED; - pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */ - } else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) { + else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; + + if (drm_mm_node_allocated(&vma->node) && + eb_vma_misplaced(entry, vma, ev->flags)) { + err = i915_vma_unbind(vma); + if (err) + return err; } err = i915_vma_pin(vma, @@ -612,8 +598,8 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, exec_flags |= __EXEC_OBJECT_HAS_FENCE; } - *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN; - GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags)); + ev->flags = exec_flags | __EXEC_OBJECT_HAS_PIN; + GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags)); return 0; } @@ -621,10 +607,11 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, static int eb_reserve(struct i915_execbuffer *eb) { const unsigned int count = eb->buffer_count; + unsigned int pin_flags = PIN_USER | PIN_NONBLOCK; struct list_head last; - struct i915_vma *vma; + struct eb_vma *ev; unsigned int i, pass; - int err; + int err = 0; /* * Attempt to pin all of the buffers into the GTT. @@ -640,44 +627,54 @@ static int eb_reserve(struct i915_execbuffer *eb) * room for the earlier objects *unless* we need to defragment. */ + if (mutex_lock_interruptible(&eb->i915->drm.struct_mutex)) + return -EINTR; + pass = 0; - err = 0; do { - list_for_each_entry(vma, &eb->unbound, exec_link) { - err = eb_reserve_vma(eb, vma); + list_for_each_entry(ev, &eb->unbound, bind_link) { + err = eb_reserve_vma(eb, ev, pin_flags); if (err) break; } - if (err != -ENOSPC) - return err; + if (!(err == -ENOSPC || err == -EAGAIN)) + break; /* Resort *all* the objects into priority order */ INIT_LIST_HEAD(&eb->unbound); INIT_LIST_HEAD(&last); for (i = 0; i < count; i++) { - unsigned int flags = eb->flags[i]; - struct i915_vma *vma = eb->vma[i]; + unsigned int flags; + ev = &eb->vma[i]; + flags = ev->flags; if (flags & EXEC_OBJECT_PINNED && flags & __EXEC_OBJECT_HAS_PIN) continue; - eb_unreserve_vma(vma, &eb->flags[i]); + eb_unreserve_vma(ev); if (flags & EXEC_OBJECT_PINNED) /* Pinned must have their slot */ - list_add(&vma->exec_link, &eb->unbound); + list_add(&ev->bind_link, &eb->unbound); else if (flags & __EXEC_OBJECT_NEEDS_MAP) /* Map require the lowest 256MiB (aperture) */ - list_add_tail(&vma->exec_link, &eb->unbound); + list_add_tail(&ev->bind_link, &eb->unbound); else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) /* Prioritise 4GiB region for restricted bo */ - list_add(&vma->exec_link, &last); + list_add(&ev->bind_link, &last); else - list_add_tail(&vma->exec_link, &last); + list_add_tail(&ev->bind_link, &last); } list_splice_tail(&last, &eb->unbound); + if (err == -EAGAIN) { + mutex_unlock(&eb->i915->drm.struct_mutex); + flush_workqueue(eb->i915->mm.userptr_wq); + mutex_lock(&eb->i915->drm.struct_mutex); + continue; + } + switch (pass++) { case 0: break; @@ -688,13 +685,20 @@ static int eb_reserve(struct i915_execbuffer *eb) err = i915_gem_evict_vm(eb->context->vm); mutex_unlock(&eb->context->vm->mutex); if (err) - return err; + goto unlock; break; default: - return -ENOSPC; + err = -ENOSPC; + goto unlock; } + + pin_flags = PIN_USER; } while (1); + +unlock: + mutex_unlock(&eb->i915->drm.struct_mutex); + return err; } static unsigned int eb_batch_index(const struct i915_execbuffer *eb) @@ -731,17 +735,14 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) unsigned int i, batch; int err; + if (unlikely(i915_gem_context_is_closed(eb->gem_context))) + return -ENOENT; + INIT_LIST_HEAD(&eb->relocs); INIT_LIST_HEAD(&eb->unbound); batch = eb_batch_index(eb); - mutex_lock(&eb->gem_context->mutex); - if (unlikely(i915_gem_context_is_closed(eb->gem_context))) { - err = -ENOENT; - goto err_ctx; - } - for (i = 0; i < eb->buffer_count; i++) { u32 handle = eb->exec[i].handle; struct i915_lut_handle *lut; @@ -786,45 +787,37 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) i915_gem_object_unlock(obj); add_vma: - err = eb_add_vma(eb, i, batch, vma); + err = eb_validate_vma(eb, &eb->exec[i], vma); if (unlikely(err)) goto err_vma; - GEM_BUG_ON(vma != eb->vma[i]); - GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); - GEM_BUG_ON(drm_mm_node_allocated(&vma->node) && - eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i])); + eb_add_vma(eb, i, batch, vma); } - mutex_unlock(&eb->gem_context->mutex); - - eb->args->flags |= __EXEC_VALIDATED; - return eb_reserve(eb); + return 0; err_obj: i915_gem_object_put(obj); err_vma: - eb->vma[i] = NULL; -err_ctx: - mutex_unlock(&eb->gem_context->mutex); + eb->vma[i].vma = NULL; return err; } -static struct i915_vma * +static struct eb_vma * eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle) { if (eb->lut_size < 0) { if (handle >= -eb->lut_size) return NULL; - return eb->vma[handle]; + return &eb->vma[handle]; } else { struct hlist_head *head; - struct i915_vma *vma; + struct eb_vma *ev; head = &eb->buckets[hash_32(handle, eb->lut_size)]; - hlist_for_each_entry(vma, head, exec_node) { - if (vma->exec_handle == handle) - return vma; + hlist_for_each_entry(ev, head, node) { + if (ev->handle == handle) + return ev; } return NULL; } @@ -836,32 +829,21 @@ static void eb_release_vmas(const struct i915_execbuffer *eb) unsigned int i; for (i = 0; i < count; i++) { - struct i915_vma *vma = eb->vma[i]; - unsigned int flags = eb->flags[i]; + struct eb_vma *ev = &eb->vma[i]; + struct i915_vma *vma = ev->vma; if (!vma) break; - GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); - vma->exec_flags = NULL; - eb->vma[i] = NULL; + eb->vma[i].vma = NULL; - if (flags & __EXEC_OBJECT_HAS_PIN) - __eb_unreserve_vma(vma, flags); + if (ev->flags & __EXEC_OBJECT_HAS_PIN) + __eb_unreserve_vma(vma, ev->flags); - if (flags & __EXEC_OBJECT_HAS_REF) - i915_vma_put(vma); + i915_vma_put(vma); } } -static void eb_reset_vmas(const struct i915_execbuffer *eb) -{ - eb_release_vmas(eb); - if (eb->lut_size > 0) - memset(eb->buckets, 0, - sizeof(struct hlist_head) << eb->lut_size); -} - static void eb_destroy(const struct i915_execbuffer *eb) { GEM_BUG_ON(eb->reloc_cache.rq); @@ -1197,7 +1179,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, goto out_pool; skip_request: - i915_request_skip(rq, err); + i915_request_set_error_once(rq, err); err_request: i915_request_add(rq); err_unpin: @@ -1328,10 +1310,11 @@ out: static u64 eb_relocate_entry(struct i915_execbuffer *eb, - struct i915_vma *vma, + struct eb_vma *ev, const struct drm_i915_gem_relocation_entry *reloc) { - struct i915_vma *target; + struct drm_i915_private *i915 = eb->i915; + struct eb_vma *target; int err; /* we've already hold a reference to all valid objects */ @@ -1341,7 +1324,7 @@ eb_relocate_entry(struct i915_execbuffer *eb, /* Validate that the target is in a valid r/w GPU domain */ if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { - DRM_DEBUG("reloc with multiple write domains: " + drm_dbg(&i915->drm, "reloc with multiple write domains: " "target %d offset %d " "read %08x write %08x", reloc->target_handle, @@ -1352,7 +1335,7 @@ eb_relocate_entry(struct i915_execbuffer *eb, } if (unlikely((reloc->write_domain | reloc->read_domains) & ~I915_GEM_GPU_DOMAINS)) { - DRM_DEBUG("reloc with read/write non-GPU domains: " + drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: " "target %d offset %d " "read %08x write %08x", reloc->target_handle, @@ -1363,7 +1346,7 @@ eb_relocate_entry(struct i915_execbuffer *eb, } if (reloc->write_domain) { - *target->exec_flags |= EXEC_OBJECT_WRITE; + target->flags |= EXEC_OBJECT_WRITE; /* * Sandybridge PPGTT errata: We need a global gtt mapping @@ -1373,7 +1356,8 @@ eb_relocate_entry(struct i915_execbuffer *eb, */ if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && IS_GEN(eb->i915, 6)) { - err = i915_vma_bind(target, target->obj->cache_level, + err = i915_vma_bind(target->vma, + target->vma->obj->cache_level, PIN_GLOBAL, NULL); if (WARN_ONCE(err, "Unexpected failure to bind target VMA!")) @@ -1386,21 +1370,21 @@ eb_relocate_entry(struct i915_execbuffer *eb, * more work needs to be done. */ if (!DBG_FORCE_RELOC && - gen8_canonical_addr(target->node.start) == reloc->presumed_offset) + gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset) return 0; /* Check that the relocation address is valid... */ if (unlikely(reloc->offset > - vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) { - DRM_DEBUG("Relocation beyond object bounds: " + ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) { + drm_dbg(&i915->drm, "Relocation beyond object bounds: " "target %d offset %d size %d.\n", reloc->target_handle, (int)reloc->offset, - (int)vma->size); + (int)ev->vma->size); return -EINVAL; } if (unlikely(reloc->offset & 3)) { - DRM_DEBUG("Relocation not 4-byte aligned: " + drm_dbg(&i915->drm, "Relocation not 4-byte aligned: " "target %d offset %d.\n", reloc->target_handle, (int)reloc->offset); @@ -1415,18 +1399,18 @@ eb_relocate_entry(struct i915_execbuffer *eb, * do relocations we are already stalling, disable the user's opt * out of our synchronisation. */ - *vma->exec_flags &= ~EXEC_OBJECT_ASYNC; + ev->flags &= ~EXEC_OBJECT_ASYNC; /* and update the user's relocation entry */ - return relocate_entry(vma, reloc, eb, target); + return relocate_entry(ev->vma, reloc, eb, target->vma); } -static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) +static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) { #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) struct drm_i915_gem_relocation_entry stack[N_RELOC(512)]; struct drm_i915_gem_relocation_entry __user *urelocs; - const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); + const struct drm_i915_gem_exec_object2 *entry = ev->exec; unsigned int remain; urelocs = u64_to_user_ptr(entry->relocs_ptr); @@ -1456,9 +1440,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) * we would try to acquire the struct mutex again. Obviously * this is bad and so lockdep complains vehemently. */ - pagefault_disable(); - copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0])); - pagefault_enable(); + copied = __copy_from_user(r, urelocs, count * sizeof(r[0])); if (unlikely(copied)) { remain = -EFAULT; goto out; @@ -1466,7 +1448,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) remain -= count; do { - u64 offset = eb_relocate_entry(eb, vma, r); + u64 offset = eb_relocate_entry(eb, ev, r); if (likely(offset == 0)) { } else if ((s64)offset < 0) { @@ -1508,281 +1490,34 @@ out: return remain; } -static int -eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma) -{ - const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); - struct drm_i915_gem_relocation_entry *relocs = - u64_to_ptr(typeof(*relocs), entry->relocs_ptr); - unsigned int i; - int err; - - for (i = 0; i < entry->relocation_count; i++) { - u64 offset = eb_relocate_entry(eb, vma, &relocs[i]); - - if ((s64)offset < 0) { - err = (int)offset; - goto err; - } - } - err = 0; -err: - reloc_cache_reset(&eb->reloc_cache); - return err; -} - -static int check_relocations(const struct drm_i915_gem_exec_object2 *entry) -{ - const char __user *addr, *end; - unsigned long size; - char __maybe_unused c; - - size = entry->relocation_count; - if (size == 0) - return 0; - - if (size > N_RELOC(ULONG_MAX)) - return -EINVAL; - - addr = u64_to_user_ptr(entry->relocs_ptr); - size *= sizeof(struct drm_i915_gem_relocation_entry); - if (!access_ok(addr, size)) - return -EFAULT; - - end = addr + size; - for (; addr < end; addr += PAGE_SIZE) { - int err = __get_user(c, addr); - if (err) - return err; - } - return __get_user(c, end - 1); -} - -static int eb_copy_relocations(const struct i915_execbuffer *eb) +static int eb_relocate(struct i915_execbuffer *eb) { - struct drm_i915_gem_relocation_entry *relocs; - const unsigned int count = eb->buffer_count; - unsigned int i; int err; - for (i = 0; i < count; i++) { - const unsigned int nreloc = eb->exec[i].relocation_count; - struct drm_i915_gem_relocation_entry __user *urelocs; - unsigned long size; - unsigned long copied; - - if (nreloc == 0) - continue; - - err = check_relocations(&eb->exec[i]); - if (err) - goto err; - - urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr); - size = nreloc * sizeof(*relocs); - - relocs = kvmalloc_array(size, 1, GFP_KERNEL); - if (!relocs) { - err = -ENOMEM; - goto err; - } - - /* copy_from_user is limited to < 4GiB */ - copied = 0; - do { - unsigned int len = - min_t(u64, BIT_ULL(31), size - copied); - - if (__copy_from_user((char *)relocs + copied, - (char __user *)urelocs + copied, - len)) - goto end; - - copied += len; - } while (copied < size); - - /* - * As we do not update the known relocation offsets after - * relocating (due to the complexities in lock handling), - * we need to mark them as invalid now so that we force the - * relocation processing next time. Just in case the target - * object is evicted and then rebound into its old - * presumed_offset before the next execbuffer - if that - * happened we would make the mistake of assuming that the - * relocations were valid. - */ - if (!user_access_begin(urelocs, size)) - goto end; - - for (copied = 0; copied < nreloc; copied++) - unsafe_put_user(-1, - &urelocs[copied].presumed_offset, - end_user); - user_access_end(); - - eb->exec[i].relocs_ptr = (uintptr_t)relocs; - } - - return 0; - -end_user: - user_access_end(); -end: - kvfree(relocs); - err = -EFAULT; -err: - while (i--) { - relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr); - if (eb->exec[i].relocation_count) - kvfree(relocs); - } - return err; -} - -static int eb_prefault_relocations(const struct i915_execbuffer *eb) -{ - const unsigned int count = eb->buffer_count; - unsigned int i; - - if (unlikely(i915_modparams.prefault_disable)) - return 0; - - for (i = 0; i < count; i++) { - int err; - - err = check_relocations(&eb->exec[i]); - if (err) - return err; - } - - return 0; -} - -static noinline int eb_relocate_slow(struct i915_execbuffer *eb) -{ - struct drm_device *dev = &eb->i915->drm; - bool have_copy = false; - struct i915_vma *vma; - int err = 0; - -repeat: - if (signal_pending(current)) { - err = -ERESTARTSYS; - goto out; - } - - /* We may process another execbuffer during the unlock... */ - eb_reset_vmas(eb); - mutex_unlock(&dev->struct_mutex); - - /* - * We take 3 passes through the slowpatch. - * - * 1 - we try to just prefault all the user relocation entries and - * then attempt to reuse the atomic pagefault disabled fast path again. - * - * 2 - we copy the user entries to a local buffer here outside of the - * local and allow ourselves to wait upon any rendering before - * relocations - * - * 3 - we already have a local copy of the relocation entries, but - * were interrupted (EAGAIN) whilst waiting for the objects, try again. - */ - if (!err) { - err = eb_prefault_relocations(eb); - } else if (!have_copy) { - err = eb_copy_relocations(eb); - have_copy = err == 0; - } else { - cond_resched(); - err = 0; - } - if (err) { - mutex_lock(&dev->struct_mutex); - goto out; - } - - /* A frequent cause for EAGAIN are currently unavailable client pages */ - flush_workqueue(eb->i915->mm.userptr_wq); - - err = i915_mutex_lock_interruptible(dev); - if (err) { - mutex_lock(&dev->struct_mutex); - goto out; - } - - /* reacquire the objects */ + mutex_lock(&eb->gem_context->mutex); err = eb_lookup_vmas(eb); + mutex_unlock(&eb->gem_context->mutex); if (err) - goto err; - - GEM_BUG_ON(!eb->batch); - - list_for_each_entry(vma, &eb->relocs, reloc_link) { - if (!have_copy) { - pagefault_disable(); - err = eb_relocate_vma(eb, vma); - pagefault_enable(); - if (err) - goto repeat; - } else { - err = eb_relocate_vma_slow(eb, vma); - if (err) - goto err; - } - } - - /* - * Leave the user relocations as are, this is the painfully slow path, - * and we want to avoid the complication of dropping the lock whilst - * having buffers reserved in the aperture and so causing spurious - * ENOSPC for random operations. - */ - -err: - if (err == -EAGAIN) - goto repeat; - -out: - if (have_copy) { - const unsigned int count = eb->buffer_count; - unsigned int i; - - for (i = 0; i < count; i++) { - const struct drm_i915_gem_exec_object2 *entry = - &eb->exec[i]; - struct drm_i915_gem_relocation_entry *relocs; - - if (!entry->relocation_count) - continue; + return err; - relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr); - kvfree(relocs); - } + if (!list_empty(&eb->unbound)) { + err = eb_reserve(eb); + if (err) + return err; } - return err; -} - -static int eb_relocate(struct i915_execbuffer *eb) -{ - if (eb_lookup_vmas(eb)) - goto slow; - /* The objects are in their final locations, apply the relocations. */ if (eb->args->flags & __EXEC_HAS_RELOC) { - struct i915_vma *vma; + struct eb_vma *ev; - list_for_each_entry(vma, &eb->relocs, reloc_link) { - if (eb_relocate_vma(eb, vma)) - goto slow; + list_for_each_entry(ev, &eb->relocs, reloc_link) { + err = eb_relocate_vma(eb, ev); + if (err) + return err; } } return 0; - -slow: - return eb_relocate_slow(eb); } static int eb_move_to_gpu(struct i915_execbuffer *eb) @@ -1795,27 +1530,19 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) ww_acquire_init(&acquire, &reservation_ww_class); for (i = 0; i < count; i++) { - struct i915_vma *vma = eb->vma[i]; + struct eb_vma *ev = &eb->vma[i]; + struct i915_vma *vma = ev->vma; err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire); - if (!err) - continue; - - GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */ - if (err == -EDEADLK) { GEM_BUG_ON(i == 0); do { int j = i - 1; - ww_mutex_unlock(&eb->vma[j]->resv->lock); + ww_mutex_unlock(&eb->vma[j].vma->resv->lock); - swap(eb->flags[i], eb->flags[j]); swap(eb->vma[i], eb->vma[j]); - eb->vma[i]->exec_flags = &eb->flags[i]; } while (--i); - GEM_BUG_ON(vma != eb->vma[0]); - vma->exec_flags = &eb->flags[0]; err = ww_mutex_lock_slow_interruptible(&vma->resv->lock, &acquire); @@ -1826,8 +1553,9 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) ww_acquire_done(&acquire); while (i--) { - unsigned int flags = eb->flags[i]; - struct i915_vma *vma = eb->vma[i]; + struct eb_vma *ev = &eb->vma[i]; + struct i915_vma *vma = ev->vma; + unsigned int flags = ev->flags; struct drm_i915_gem_object *obj = vma->obj; assert_vma_held(vma); @@ -1871,10 +1599,9 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) i915_vma_unlock(vma); __eb_unreserve_vma(vma, flags); - vma->exec_flags = NULL; + i915_vma_put(vma); - if (unlikely(flags & __EXEC_OBJECT_HAS_REF)) - i915_vma_put(vma); + ev->vma = NULL; } ww_acquire_fini(&acquire); @@ -1888,7 +1615,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) return 0; err_skip: - i915_request_skip(eb->request, err); + i915_request_set_error_once(eb->request, err); return err; } @@ -1922,7 +1649,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq) int i; if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) { - DRM_DEBUG("sol reset is gen7/rcs only\n"); + drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n"); return -EINVAL; } @@ -2009,7 +1736,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb, if (!pw) return -ENOMEM; - err = i915_active_acquire(&eb->batch->active); + err = i915_active_acquire(&eb->batch->vma->active); if (err) goto err_free; @@ -2026,7 +1753,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb, dma_fence_work_init(&pw->base, &eb_parse_ops); pw->engine = eb->engine; - pw->batch = eb->batch; + pw->batch = eb->batch->vma; pw->batch_offset = eb->batch_start_offset; pw->batch_length = eb->batch_len; pw->shadow = shadow; @@ -2068,7 +1795,7 @@ err_trampoline: err_shadow: i915_active_release(&shadow->active); err_batch: - i915_active_release(&eb->batch->active); + i915_active_release(&eb->batch->vma->active); err_free: kfree(pw); return err; @@ -2076,6 +1803,7 @@ err_free: static int eb_parse(struct i915_execbuffer *eb) { + struct drm_i915_private *i915 = eb->i915; struct intel_engine_pool_node *pool; struct i915_vma *shadow, *trampoline; unsigned int len; @@ -2091,7 +1819,8 @@ static int eb_parse(struct i915_execbuffer *eb) * post-scan tampering */ if (!eb->context->vm->has_read_only) { - DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n"); + drm_dbg(&i915->drm, + "Cannot prevent post-scan tampering without RO capable vm\n"); return -EINVAL; } } else { @@ -2129,15 +1858,12 @@ static int eb_parse(struct i915_execbuffer *eb) if (err) goto err_trampoline; - eb->vma[eb->buffer_count] = i915_vma_get(shadow); - eb->flags[eb->buffer_count] = - __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF; - shadow->exec_flags = &eb->flags[eb->buffer_count]; - eb->buffer_count++; + eb->vma[eb->buffer_count].vma = i915_vma_get(shadow); + eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN; + eb->batch = &eb->vma[eb->buffer_count++]; eb->trampoline = trampoline; eb->batch_start_offset = 0; - eb->batch = shadow; shadow->private = pool; return 0; @@ -2164,7 +1890,7 @@ add_to_client(struct i915_request *rq, struct drm_file *file) spin_unlock(&file_priv->mm.lock); } -static int eb_submit(struct i915_execbuffer *eb) +static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) { int err; @@ -2191,7 +1917,7 @@ static int eb_submit(struct i915_execbuffer *eb) } err = eb->engine->emit_bb_start(eb->request, - eb->batch->node.start + + batch->node.start + eb->batch_start_offset, eb->batch_len, eb->batch_flags); @@ -2326,15 +2052,22 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce) intel_context_timeline_unlock(tl); if (rq) { - if (i915_request_wait(rq, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT) < 0) { - i915_request_put(rq); - err = -EINTR; - goto err_exit; - } + bool nonblock = eb->file->filp->f_flags & O_NONBLOCK; + long timeout; + timeout = MAX_SCHEDULE_TIMEOUT; + if (nonblock) + timeout = 0; + + timeout = i915_request_wait(rq, + I915_WAIT_INTERRUPTIBLE, + timeout); i915_request_put(rq); + + if (timeout < 0) { + err = nonblock ? -EWOULDBLOCK : timeout; + goto err_exit; + } } eb->engine = ce->engine; @@ -2372,8 +2105,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb, if (user_ring_id != I915_EXEC_BSD && (args->flags & I915_EXEC_BSD_MASK)) { - DRM_DEBUG("execbuf with non bsd ring but with invalid " - "bsd dispatch flags: %d\n", (int)(args->flags)); + drm_dbg(&i915->drm, + "execbuf with non bsd ring but with invalid " + "bsd dispatch flags: %d\n", (int)(args->flags)); return -1; } @@ -2387,8 +2121,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb, bsd_idx >>= I915_EXEC_BSD_SHIFT; bsd_idx--; } else { - DRM_DEBUG("execbuf with unknown bsd ring: %u\n", - bsd_idx); + drm_dbg(&i915->drm, + "execbuf with unknown bsd ring: %u\n", + bsd_idx); return -1; } @@ -2396,7 +2131,8 @@ eb_select_legacy_ring(struct i915_execbuffer *eb, } if (user_ring_id >= ARRAY_SIZE(user_ring_map)) { - DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id); + drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n", + user_ring_id); return -1; } @@ -2556,6 +2292,73 @@ signal_fence_array(struct i915_execbuffer *eb, } } +static void retire_requests(struct intel_timeline *tl, struct i915_request *end) +{ + struct i915_request *rq, *rn; + + list_for_each_entry_safe(rq, rn, &tl->requests, link) + if (rq == end || !i915_request_retire(rq)) + break; +} + +static void eb_request_add(struct i915_execbuffer *eb) +{ + struct i915_request *rq = eb->request; + struct intel_timeline * const tl = i915_request_timeline(rq); + struct i915_sched_attr attr = {}; + struct i915_request *prev; + + lockdep_assert_held(&tl->mutex); + lockdep_unpin_lock(&tl->mutex, rq->cookie); + + trace_i915_request_add(rq); + + prev = __i915_request_commit(rq); + + /* Check that the context wasn't destroyed before submission */ + if (likely(!intel_context_is_closed(eb->context))) { + attr = eb->gem_context->sched; + + /* + * Boost actual workloads past semaphores! + * + * With semaphores we spin on one engine waiting for another, + * simply to reduce the latency of starting our work when + * the signaler completes. However, if there is any other + * work that we could be doing on this engine instead, that + * is better utilisation and will reduce the overall duration + * of the current work. To avoid PI boosting a semaphore + * far in the distance past over useful work, we keep a history + * of any semaphore use along our dependency chain. + */ + if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN)) + attr.priority |= I915_PRIORITY_NOSEMAPHORE; + + /* + * Boost priorities to new clients (new request flows). + * + * Allow interactive/synchronous clients to jump ahead of + * the bulk clients. (FQ_CODEL) + */ + if (list_empty(&rq->sched.signalers_list)) + attr.priority |= I915_PRIORITY_WAIT; + } else { + /* Serialise with context_close via the add_to_timeline */ + i915_request_set_error_once(rq, -ENOENT); + __i915_request_skip(rq); + } + + local_bh_disable(); + __i915_request_queue(rq, &attr); + local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ + + /* Try to clean up the client's timeline after submitting the request */ + if (prev) + retire_requests(tl, prev); + + mutex_unlock(&tl->mutex); +} + static int i915_gem_do_execbuffer(struct drm_device *dev, struct drm_file *file, @@ -2568,6 +2371,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, struct dma_fence *in_fence = NULL; struct dma_fence *exec_fence = NULL; struct sync_file *out_fence = NULL; + struct i915_vma *batch; int out_fence_fd = -1; int err; @@ -2582,9 +2386,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, args->flags |= __EXEC_HAS_RELOC; eb.exec = exec; - eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1); - eb.vma[0] = NULL; - eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1); + eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1); + eb.vma[0].vma = NULL; eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; reloc_cache_init(&eb.reloc_cache, eb.i915); @@ -2652,10 +2455,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (unlikely(err)) goto err_context; - err = i915_mutex_lock_interruptible(dev); - if (err) - goto err_engine; - err = eb_relocate(&eb); if (err) { /* @@ -2669,20 +2468,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, goto err_vma; } - if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) { - DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); + if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) { + drm_dbg(&i915->drm, + "Attempting to use self-modifying batch buffer\n"); err = -EINVAL; goto err_vma; } - if (eb.batch_start_offset > eb.batch->size || - eb.batch_len > eb.batch->size - eb.batch_start_offset) { - DRM_DEBUG("Attempting to use out-of-bounds batch\n"); + + if (range_overflows_t(u64, + eb.batch_start_offset, eb.batch_len, + eb.batch->vma->size)) { + drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n"); err = -EINVAL; goto err_vma; } if (eb.batch_len == 0) - eb.batch_len = eb.batch->size - eb.batch_start_offset; + eb.batch_len = eb.batch->vma->size - eb.batch_start_offset; err = eb_parse(&eb); if (err) @@ -2692,6 +2494,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure * batch" bit. Hence we need to pin secure batches into the global gtt. * hsw should have this fixed, but bdw mucks it up again. */ + batch = eb.batch->vma; if (eb.batch_flags & I915_DISPATCH_SECURE) { struct i915_vma *vma; @@ -2705,13 +2508,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, * fitting due to fragmentation. * So this is actually safe. */ - vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0); + vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0); if (IS_ERR(vma)) { err = PTR_ERR(vma); - goto err_vma; + goto err_parse; } - eb.batch = vma; + batch = vma; } /* All GPU relocation batches must be submitted prior to the user rq */ @@ -2758,16 +2561,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, * inactive_list and lose its active reference. Hence we do not need * to explicitly hold another reference here. */ - eb.request->batch = eb.batch; - if (eb.batch->private) - intel_engine_pool_mark_active(eb.batch->private, eb.request); + eb.request->batch = batch; + if (batch->private) + intel_engine_pool_mark_active(batch->private, eb.request); trace_i915_request_queue(eb.request, eb.batch_flags); - err = eb_submit(&eb); + err = eb_submit(&eb, batch); err_request: add_to_client(eb.request, file); i915_request_get(eb.request); - i915_request_add(eb.request); + eb_request_add(&eb); if (fences) signal_fence_array(&eb, fences); @@ -2786,16 +2589,15 @@ err_request: err_batch_unpin: if (eb.batch_flags & I915_DISPATCH_SECURE) - i915_vma_unpin(eb.batch); - if (eb.batch->private) - intel_engine_pool_put(eb.batch->private); + i915_vma_unpin(batch); +err_parse: + if (batch->private) + intel_engine_pool_put(batch->private); err_vma: if (eb.exec) eb_release_vmas(&eb); if (eb.trampoline) i915_vma_unpin(eb.trampoline); - mutex_unlock(&dev->struct_mutex); -err_engine: eb_unpin_engine(&eb); err_context: i915_gem_context_put(eb.gem_context); @@ -2813,9 +2615,7 @@ err_in_fence: static size_t eb_element_size(void) { - return (sizeof(struct drm_i915_gem_exec_object2) + - sizeof(struct i915_vma *) + - sizeof(unsigned int)); + return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma); } static bool check_buffer_count(size_t count) @@ -2839,6 +2639,7 @@ int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_execbuffer *args = data; struct drm_i915_gem_execbuffer2 exec2; struct drm_i915_gem_exec_object *exec_list = NULL; @@ -2848,7 +2649,7 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, int err; if (!check_buffer_count(count)) { - DRM_DEBUG("execbuf2 with %zd buffers\n", count); + drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count); return -EINVAL; } @@ -2873,8 +2674,9 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, exec2_list = kvmalloc_array(count + 1, eb_element_size(), __GFP_NOWARN | GFP_KERNEL); if (exec_list == NULL || exec2_list == NULL) { - DRM_DEBUG("Failed to allocate exec list for %d buffers\n", - args->buffer_count); + drm_dbg(&i915->drm, + "Failed to allocate exec list for %d buffers\n", + args->buffer_count); kvfree(exec_list); kvfree(exec2_list); return -ENOMEM; @@ -2883,8 +2685,8 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, u64_to_user_ptr(args->buffers_ptr), sizeof(*exec_list) * count); if (err) { - DRM_DEBUG("copy %d exec entries failed %d\n", - args->buffer_count, err); + drm_dbg(&i915->drm, "copy %d exec entries failed %d\n", + args->buffer_count, err); kvfree(exec_list); kvfree(exec2_list); return -EFAULT; @@ -2931,6 +2733,7 @@ int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_execbuffer2 *args = data; struct drm_i915_gem_exec_object2 *exec2_list; struct drm_syncobj **fences = NULL; @@ -2938,7 +2741,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, int err; if (!check_buffer_count(count)) { - DRM_DEBUG("execbuf2 with %zd buffers\n", count); + drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count); return -EINVAL; } @@ -2950,14 +2753,14 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, exec2_list = kvmalloc_array(count + 1, eb_element_size(), __GFP_NOWARN | GFP_KERNEL); if (exec2_list == NULL) { - DRM_DEBUG("Failed to allocate exec list for %zd buffers\n", - count); + drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n", + count); return -ENOMEM; } if (copy_from_user(exec2_list, u64_to_user_ptr(args->buffers_ptr), sizeof(*exec2_list) * count)) { - DRM_DEBUG("copy %zd exec entries failed\n", count); + drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count); kvfree(exec2_list); return -EFAULT; } |