summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c44
2 files changed, 22 insertions, 25 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 0fbc9c841666..b3f0e10f83fe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -277,6 +277,9 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = (u64)ttm_resource_manager_usage(vram_mgr) << PAGE_SHIFT;
break;
}
+ case NOUVEAU_GETPARAM_HAS_VMA_TILEMODE:
+ getparam->value = 1;
+ break;
default:
NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 254d6c9ef202..3a7f4ce34aa3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -234,28 +234,28 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
}
nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
- if (!nouveau_cli_uvmm(cli) || internal) {
- /* for BO noVM allocs, don't assign kinds */
- if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
- nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
- if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
- kfree(nvbo);
- return ERR_PTR(-EINVAL);
- }
- nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
- } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- nvbo->kind = (tile_flags & 0x00007f00) >> 8;
- nvbo->comp = (tile_flags & 0x00030000) >> 16;
- if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
- kfree(nvbo);
- return ERR_PTR(-EINVAL);
- }
- } else {
- nvbo->zeta = (tile_flags & 0x00000007);
+ if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
+ nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return ERR_PTR(-EINVAL);
+ }
+
+ nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
+ } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ nvbo->kind = (tile_flags & 0x00007f00) >> 8;
+ nvbo->comp = (tile_flags & 0x00030000) >> 16;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return ERR_PTR(-EINVAL);
}
- nvbo->mode = tile_mode;
+ } else {
+ nvbo->zeta = (tile_flags & 0x00000007);
+ }
+ nvbo->mode = tile_mode;
+ if (!nouveau_cli_uvmm(cli) || internal) {
/* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail
@@ -297,12 +297,6 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
}
nvbo->page = vmm->page[pi].shift;
} else {
- /* reject other tile flags when in VM mode. */
- if (tile_mode)
- return ERR_PTR(-EINVAL);
- if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG)
- return ERR_PTR(-EINVAL);
-
/* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail