summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2023-08-24 23:28:15 +0300
committerDave Airlie <airlied@redhat.com>2023-08-24 23:28:33 +0300
commitbc609f4867f6a14db0efda55a7adef4dca16762e (patch)
tree9a27798954a5b42881bf001b906a72a41e6406b8 /drivers/gpu/drm/nouveau
parent38f88732b2928a831d794737b499c6db8da9b9ac (diff)
parentcdf4100eaa1f4107fcf7c95b5eccca96cca6c777 (diff)
downloadlinux-bc609f4867f6a14db0efda55a7adef4dca16762e.tar.xz
Merge tag 'drm-misc-next-fixes-2023-08-24' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
Short summary of fixes pull: * gpuva: Cleanups * kunit: Documentation fixes * nouveau: * UAPI: Avoid implicit NO_PREFETCH flag * Scheduler fixes * Fix remap * ttm: Fix type conversion in tests Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/20230824181241.GA6386@linux-uq9g.hotspot.internet-for-guests.com
Diffstat (limited to 'drivers/gpu/drm/nouveau')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c1
6 files changed, 54 insertions, 9 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index b90cac6d5772..b01c029f3a90 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -69,16 +69,19 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
}
void
-nv50_dma_push(struct nouveau_channel *chan, u64 offset, int length)
+nv50_dma_push(struct nouveau_channel *chan, u64 offset, u32 length,
+ bool no_prefetch)
{
struct nvif_user *user = &chan->drm->client.device.user;
struct nouveau_bo *pb = chan->push.buffer;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
BUG_ON(chan->dma.ib_free < 1);
+ WARN_ON(length > NV50_DMA_PUSH_MAX_LENGTH);
nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
- nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
+ nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8 |
+ (no_prefetch ? (1 << 31) : 0));
chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 035a709c7be1..1744d95b233e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -31,7 +31,8 @@
#include "nouveau_chan.h"
int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
-void nv50_dma_push(struct nouveau_channel *, u64 addr, int length);
+void nv50_dma_push(struct nouveau_channel *, u64 addr, u32 length,
+ bool no_prefetch);
/*
* There's a hw race condition where you can't jump to your PUT offset,
@@ -45,6 +46,9 @@ void nv50_dma_push(struct nouveau_channel *, u64 addr, int length);
*/
#define NOUVEAU_DMA_SKIPS (128 / 4)
+/* Maximum push buffer size. */
+#define NV50_DMA_PUSH_MAX_LENGTH 0x7fffff
+
/* Object handles - for stuff that's doesn't use handle == oclass. */
enum {
NvDmaFB = 0x80000002,
@@ -89,7 +93,7 @@ FIRE_RING(struct nouveau_channel *chan)
if (chan->dma.ib_max) {
nv50_dma_push(chan, chan->push.addr + (chan->dma.put << 2),
- (chan->dma.cur - chan->dma.put) << 2);
+ (chan->dma.cur - chan->dma.put) << 2, false);
} else {
WRITE_PUT(chan->dma.cur);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index 0f927adda4ed..a90c4cd8cbb2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -164,8 +164,10 @@ nouveau_exec_job_run(struct nouveau_job *job)
}
for (i = 0; i < exec_job->push.count; i++) {
- nv50_dma_push(chan, exec_job->push.s[i].va,
- exec_job->push.s[i].va_len);
+ struct drm_nouveau_exec_push *p = &exec_job->push.s[i];
+ bool no_prefetch = p->flags & DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH;
+
+ nv50_dma_push(chan, p->va, p->va_len, no_prefetch);
}
ret = nouveau_fence_emit(fence, chan);
@@ -223,7 +225,18 @@ nouveau_exec_job_init(struct nouveau_exec_job **pjob,
{
struct nouveau_exec_job *job;
struct nouveau_job_args args = {};
- int ret;
+ int i, ret;
+
+ for (i = 0; i < __args->push.count; i++) {
+ struct drm_nouveau_exec_push *p = &__args->push.s[i];
+
+ if (unlikely(p->va_len > NV50_DMA_PUSH_MAX_LENGTH)) {
+ NV_PRINTK(err, nouveau_cli(__args->file_priv),
+ "pushbuf size exceeds limit: 0x%x max 0x%x\n",
+ p->va_len, NV50_DMA_PUSH_MAX_LENGTH);
+ return -EINVAL;
+ }
+ }
job = *pjob = kzalloc(sizeof(*job), GFP_KERNEL);
if (!job)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index f39360870c70..c0b10d8d3d03 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -856,9 +856,11 @@ revalidate:
for (i = 0; i < req->nr_push; i++) {
struct nouveau_vma *vma = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
+ u64 addr = vma->addr + push[i].offset;
+ u32 length = push[i].length & ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
+ bool no_prefetch = push[i].length & NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
- nv50_dma_push(chan, vma->addr + push[i].offset,
- push[i].length);
+ nv50_dma_push(chan, addr, length, no_prefetch);
}
} else
if (drm->client.device.info.chipset >= 0x25) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index 3424a1bf6af3..88217185e0f3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -292,6 +292,28 @@ nouveau_job_submit(struct nouveau_job *job)
if (job->sync)
done_fence = dma_fence_get(job->done_fence);
+ /* If a sched job depends on a dma-fence from a job from the same GPU
+ * scheduler instance, but a different scheduler entity, the GPU
+ * scheduler does only wait for the particular job to be scheduled,
+ * rather than for the job to fully complete. This is due to the GPU
+ * scheduler assuming that there is a scheduler instance per ring.
+ * However, the current implementation, in order to avoid arbitrary
+ * amounts of kthreads, has a single scheduler instance while scheduler
+ * entities represent rings.
+ *
+ * As a workaround, set the DRM_SCHED_FENCE_DONT_PIPELINE for all
+ * out-fences in order to force the scheduler to wait for full job
+ * completion for dependent jobs from different entities and same
+ * scheduler instance.
+ *
+ * There is some work in progress [1] to address the issues of firmware
+ * schedulers; once it is in-tree the scheduler topology in Nouveau
+ * should be re-worked accordingly.
+ *
+ * [1] https://lore.kernel.org/dri-devel/20230801205103.627779-1-matthew.brost@intel.com/
+ */
+ set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &job->done_fence->flags);
+
if (job->ops->armed_submit)
job->ops->armed_submit(job);
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 3a1e8538f205..aae780e4a4aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -639,6 +639,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
struct drm_gpuva *va = r->unmap->va;
struct uvmm_map_args remap_args = {
.kind = uvma_from_va(va)->kind,
+ .region = uvma_from_va(va)->region,
};
u64 ustart = va->va.addr;
u64 urange = va->va.range;