summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c394
1 files changed, 204 insertions, 190 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index dd0994d9ebfc..cd7feb1b25f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -37,32 +37,27 @@
* to use more "relaxed" allocation parameters when using the DMA API, since we
* never need a kernel mapping.
*/
+#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
+#include "priv.h"
-#include <subdev/fb.h>
+#include <core/memory.h>
#include <core/mm.h>
-#include <core/device.h>
+#include <core/tegra.h>
+#include <subdev/fb.h>
-#ifdef __KERNEL__
-#include <linux/dma-attrs.h>
-#include <linux/iommu.h>
-#include <nouveau_platform.h>
-#endif
+#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
-#include "priv.h"
-
-struct gk20a_instobj_priv {
- struct nvkm_instobj base;
- /* Must be second member here - see nouveau_gpuobj_map_vm() */
- struct nvkm_mem *mem;
- /* Pointed by mem */
- struct nvkm_mem _mem;
+struct gk20a_instobj {
+ struct nvkm_memory memory;
+ struct gk20a_instmem *imem;
+ struct nvkm_mem mem;
};
/*
* Used for objects allocated using the DMA API
*/
struct gk20a_instobj_dma {
- struct gk20a_instobj_priv base;
+ struct gk20a_instobj base;
void *cpuaddr;
dma_addr_t handle;
@@ -73,14 +68,15 @@ struct gk20a_instobj_dma {
* Used for objects flattened using the IOMMU API
*/
struct gk20a_instobj_iommu {
- struct gk20a_instobj_priv base;
+ struct gk20a_instobj base;
/* array of base.mem->size pages */
struct page *pages[];
};
-struct gk20a_instmem_priv {
+struct gk20a_instmem {
struct nvkm_instmem base;
+ unsigned long lock_flags;
spinlock_t lock;
u64 addr;
@@ -94,6 +90,42 @@ struct gk20a_instmem_priv {
struct dma_attrs attrs;
};
+static enum nvkm_memory_target
+gk20a_instobj_target(struct nvkm_memory *memory)
+{
+ return NVKM_MEM_TARGET_HOST;
+}
+
+static u64
+gk20a_instobj_addr(struct nvkm_memory *memory)
+{
+ return gk20a_instobj(memory)->mem.offset;
+
+}
+
+static u64
+gk20a_instobj_size(struct nvkm_memory *memory)
+{
+ return (u64)gk20a_instobj(memory)->mem.size << 12;
+}
+
+static void __iomem *
+gk20a_instobj_acquire(struct nvkm_memory *memory)
+{
+ struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
+ unsigned long flags;
+ spin_lock_irqsave(&imem->lock, flags);
+ imem->lock_flags = flags;
+ return NULL;
+}
+
+static void
+gk20a_instobj_release(struct nvkm_memory *memory)
+{
+ struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
+ spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
+}
+
/*
* Use PRAMIN to read/write data and avoid coherency issues.
* PRAMIN uses the GPU path and ensures data will always be coherent.
@@ -104,160 +136,170 @@ struct gk20a_instmem_priv {
*/
static u32
-gk20a_instobj_rd32(struct nvkm_object *object, u64 offset)
+gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
{
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
- struct gk20a_instobj_priv *node = (void *)object;
- unsigned long flags;
- u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
- u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+ struct gk20a_instobj *node = gk20a_instobj(memory);
+ struct gk20a_instmem *imem = node->imem;
+ struct nvkm_device *device = imem->base.subdev.device;
+ u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
+ u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
u32 data;
- spin_lock_irqsave(&priv->lock, flags);
- if (unlikely(priv->addr != base)) {
- nv_wr32(priv, 0x001700, base >> 16);
- priv->addr = base;
+ if (unlikely(imem->addr != base)) {
+ nvkm_wr32(device, 0x001700, base >> 16);
+ imem->addr = base;
}
- data = nv_rd32(priv, 0x700000 + addr);
- spin_unlock_irqrestore(&priv->lock, flags);
+ data = nvkm_rd32(device, 0x700000 + addr);
return data;
}
static void
-gk20a_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
+gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
- struct gk20a_instobj_priv *node = (void *)object;
- unsigned long flags;
- u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
- u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (unlikely(priv->addr != base)) {
- nv_wr32(priv, 0x001700, base >> 16);
- priv->addr = base;
+ struct gk20a_instobj *node = gk20a_instobj(memory);
+ struct gk20a_instmem *imem = node->imem;
+ struct nvkm_device *device = imem->base.subdev.device;
+ u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
+ u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
+
+ if (unlikely(imem->addr != base)) {
+ nvkm_wr32(device, 0x001700, base >> 16);
+ imem->addr = base;
}
- nv_wr32(priv, 0x700000 + addr, data);
- spin_unlock_irqrestore(&priv->lock, flags);
+ nvkm_wr32(device, 0x700000 + addr, data);
+}
+
+static void
+gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+{
+ struct gk20a_instobj *node = gk20a_instobj(memory);
+ nvkm_vm_map_at(vma, offset, &node->mem);
}
static void
-gk20a_instobj_dtor_dma(struct gk20a_instobj_priv *_node)
+gk20a_instobj_dtor_dma(struct gk20a_instobj *_node)
{
struct gk20a_instobj_dma *node = (void *)_node;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
- struct device *dev = nv_device_base(nv_device(priv));
+ struct gk20a_instmem *imem = _node->imem;
+ struct device *dev = imem->base.subdev.device->dev;
if (unlikely(!node->cpuaddr))
return;
- dma_free_attrs(dev, _node->mem->size << PAGE_SHIFT, node->cpuaddr,
- node->handle, &priv->attrs);
+ dma_free_attrs(dev, _node->mem.size << PAGE_SHIFT, node->cpuaddr,
+ node->handle, &imem->attrs);
}
static void
-gk20a_instobj_dtor_iommu(struct gk20a_instobj_priv *_node)
+gk20a_instobj_dtor_iommu(struct gk20a_instobj *_node)
{
struct gk20a_instobj_iommu *node = (void *)_node;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+ struct gk20a_instmem *imem = _node->imem;
struct nvkm_mm_node *r;
int i;
- if (unlikely(list_empty(&_node->mem->regions)))
+ if (unlikely(list_empty(&_node->mem.regions)))
return;
- r = list_first_entry(&_node->mem->regions, struct nvkm_mm_node,
+ r = list_first_entry(&_node->mem.regions, struct nvkm_mm_node,
rl_entry);
/* clear bit 34 to unmap pages */
- r->offset &= ~BIT(34 - priv->iommu_pgshift);
+ r->offset &= ~BIT(34 - imem->iommu_pgshift);
/* Unmap pages from GPU address space and free them */
- for (i = 0; i < _node->mem->size; i++) {
- iommu_unmap(priv->domain,
- (r->offset + i) << priv->iommu_pgshift, PAGE_SIZE);
+ for (i = 0; i < _node->mem.size; i++) {
+ iommu_unmap(imem->domain,
+ (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
__free_page(node->pages[i]);
}
/* Release area from GPU address space */
- mutex_lock(priv->mm_mutex);
- nvkm_mm_free(priv->mm, &r);
- mutex_unlock(priv->mm_mutex);
+ mutex_lock(imem->mm_mutex);
+ nvkm_mm_free(imem->mm, &r);
+ mutex_unlock(imem->mm_mutex);
}
-static void
-gk20a_instobj_dtor(struct nvkm_object *object)
+static void *
+gk20a_instobj_dtor(struct nvkm_memory *memory)
{
- struct gk20a_instobj_priv *node = (void *)object;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+ struct gk20a_instobj *node = gk20a_instobj(memory);
+ struct gk20a_instmem *imem = node->imem;
- if (priv->domain)
+ if (imem->domain)
gk20a_instobj_dtor_iommu(node);
else
gk20a_instobj_dtor_dma(node);
- nvkm_instobj_destroy(&node->base);
+ return node;
}
+static const struct nvkm_memory_func
+gk20a_instobj_func = {
+ .dtor = gk20a_instobj_dtor,
+ .target = gk20a_instobj_target,
+ .addr = gk20a_instobj_addr,
+ .size = gk20a_instobj_size,
+ .acquire = gk20a_instobj_acquire,
+ .release = gk20a_instobj_release,
+ .rd32 = gk20a_instobj_rd32,
+ .wr32 = gk20a_instobj_wr32,
+ .map = gk20a_instobj_map,
+};
+
static int
-gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, u32 npages, u32 align,
- struct gk20a_instobj_priv **_node)
+gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
+ struct gk20a_instobj **_node)
{
struct gk20a_instobj_dma *node;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
- struct device *dev = nv_device_base(nv_device(parent));
- int ret;
+ struct nvkm_subdev *subdev = &imem->base.subdev;
+ struct device *dev = subdev->device->dev;
- ret = nvkm_instobj_create_(parent, engine, oclass, sizeof(*node),
- (void **)&node);
+ if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
+ return -ENOMEM;
*_node = &node->base;
- if (ret)
- return ret;
node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
&node->handle, GFP_KERNEL,
- &priv->attrs);
+ &imem->attrs);
if (!node->cpuaddr) {
- nv_error(priv, "cannot allocate DMA memory\n");
+ nvkm_error(subdev, "cannot allocate DMA memory\n");
return -ENOMEM;
}
/* alignment check */
if (unlikely(node->handle & (align - 1)))
- nv_warn(priv, "memory not aligned as requested: %pad (0x%x)\n",
- &node->handle, align);
+ nvkm_warn(subdev,
+ "memory not aligned as requested: %pad (0x%x)\n",
+ &node->handle, align);
/* present memory for being mapped using small pages */
node->r.type = 12;
node->r.offset = node->handle >> 12;
node->r.length = (npages << PAGE_SHIFT) >> 12;
- node->base._mem.offset = node->handle;
+ node->base.mem.offset = node->handle;
- INIT_LIST_HEAD(&node->base._mem.regions);
- list_add_tail(&node->r.rl_entry, &node->base._mem.regions);
+ INIT_LIST_HEAD(&node->base.mem.regions);
+ list_add_tail(&node->r.rl_entry, &node->base.mem.regions);
return 0;
}
static int
-gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, u32 npages, u32 align,
- struct gk20a_instobj_priv **_node)
+gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
+ struct gk20a_instobj **_node)
{
struct gk20a_instobj_iommu *node;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
+ struct nvkm_subdev *subdev = &imem->base.subdev;
struct nvkm_mm_node *r;
int ret;
int i;
- ret = nvkm_instobj_create_(parent, engine, oclass,
- sizeof(*node) + sizeof(node->pages[0]) * npages,
- (void **)&node);
+ if (!(node = kzalloc(sizeof(*node) +
+ sizeof( node->pages[0]) * npages, GFP_KERNEL)))
+ return -ENOMEM;
*_node = &node->base;
- if (ret)
- return ret;
/* Allocate backing memory */
for (i = 0; i < npages; i++) {
@@ -270,48 +312,48 @@ gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
node->pages[i] = p;
}
- mutex_lock(priv->mm_mutex);
+ mutex_lock(imem->mm_mutex);
/* Reserve area from GPU address space */
- ret = nvkm_mm_head(priv->mm, 0, 1, npages, npages,
- align >> priv->iommu_pgshift, &r);
- mutex_unlock(priv->mm_mutex);
+ ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages,
+ align >> imem->iommu_pgshift, &r);
+ mutex_unlock(imem->mm_mutex);
if (ret) {
- nv_error(priv, "virtual space is full!\n");
+ nvkm_error(subdev, "virtual space is full!\n");
goto free_pages;
}
/* Map into GPU address space */
for (i = 0; i < npages; i++) {
struct page *p = node->pages[i];
- u32 offset = (r->offset + i) << priv->iommu_pgshift;
+ u32 offset = (r->offset + i) << imem->iommu_pgshift;
- ret = iommu_map(priv->domain, offset, page_to_phys(p),
+ ret = iommu_map(imem->domain, offset, page_to_phys(p),
PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
if (ret < 0) {
- nv_error(priv, "IOMMU mapping failure: %d\n", ret);
+ nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
while (i-- > 0) {
offset -= PAGE_SIZE;
- iommu_unmap(priv->domain, offset, PAGE_SIZE);
+ iommu_unmap(imem->domain, offset, PAGE_SIZE);
}
goto release_area;
}
}
/* Bit 34 tells that an address is to be resolved through the IOMMU */
- r->offset |= BIT(34 - priv->iommu_pgshift);
+ r->offset |= BIT(34 - imem->iommu_pgshift);
- node->base._mem.offset = ((u64)r->offset) << priv->iommu_pgshift;
+ node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
- INIT_LIST_HEAD(&node->base._mem.regions);
- list_add_tail(&r->rl_entry, &node->base._mem.regions);
+ INIT_LIST_HEAD(&node->base.mem.regions);
+ list_add_tail(&r->rl_entry, &node->base.mem.regions);
return 0;
release_area:
- mutex_lock(priv->mm_mutex);
- nvkm_mm_free(priv->mm, &r);
- mutex_unlock(priv->mm_mutex);
+ mutex_lock(imem->mm_mutex);
+ nvkm_mm_free(imem->mm, &r);
+ mutex_unlock(imem->mm_mutex);
free_pages:
for (i = 0; i < npages && node->pages[i] != NULL; i++)
@@ -321,120 +363,92 @@ free_pages:
}
static int
-gk20a_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 _size,
- struct nvkm_object **pobject)
+gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+ struct nvkm_memory **pmemory)
{
- struct nvkm_instobj_args *args = data;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
- struct gk20a_instobj_priv *node;
- u32 size, align;
+ struct gk20a_instmem *imem = gk20a_instmem(base);
+ struct gk20a_instobj *node = NULL;
+ struct nvkm_subdev *subdev = &imem->base.subdev;
int ret;
- nv_debug(parent, "%s (%s): size: %x align: %x\n", __func__,
- priv->domain ? "IOMMU" : "DMA", args->size, args->align);
+ nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
+ imem->domain ? "IOMMU" : "DMA", size, align);
/* Round size and align to page bounds */
- size = max(roundup(args->size, PAGE_SIZE), PAGE_SIZE);
- align = max(roundup(args->align, PAGE_SIZE), PAGE_SIZE);
+ size = max(roundup(size, PAGE_SIZE), PAGE_SIZE);
+ align = max(roundup(align, PAGE_SIZE), PAGE_SIZE);
- if (priv->domain)
- ret = gk20a_instobj_ctor_iommu(parent, engine, oclass,
- size >> PAGE_SHIFT, align, &node);
+ if (imem->domain)
+ ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT,
+ align, &node);
else
- ret = gk20a_instobj_ctor_dma(parent, engine, oclass,
- size >> PAGE_SHIFT, align, &node);
- *pobject = nv_object(node);
+ ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
+ align, &node);
+ *pmemory = node ? &node->memory : NULL;
if (ret)
return ret;
- node->mem = &node->_mem;
+ nvkm_memory_ctor(&gk20a_instobj_func, &node->memory);
+ node->imem = imem;
/* present memory for being mapped using small pages */
- node->mem->size = size >> 12;
- node->mem->memtype = 0;
- node->mem->page_shift = 12;
-
- node->base.addr = node->mem->offset;
- node->base.size = size;
+ node->mem.size = size >> 12;
+ node->mem.memtype = 0;
+ node->mem.page_shift = 12;
- nv_debug(parent, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
- size, align, node->mem->offset);
+ nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
+ size, align, node->mem.offset);
return 0;
}
-static struct nvkm_instobj_impl
-gk20a_instobj_oclass = {
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk20a_instobj_ctor,
- .dtor = gk20a_instobj_dtor,
- .init = _nvkm_instobj_init,
- .fini = _nvkm_instobj_fini,
- .rd32 = gk20a_instobj_rd32,
- .wr32 = gk20a_instobj_wr32,
- },
-};
-
-
-
-static int
-gk20a_instmem_fini(struct nvkm_object *object, bool suspend)
+static void
+gk20a_instmem_fini(struct nvkm_instmem *base)
{
- struct gk20a_instmem_priv *priv = (void *)object;
- priv->addr = ~0ULL;
- return nvkm_instmem_fini(&priv->base, suspend);
+ gk20a_instmem(base)->addr = ~0ULL;
}
-static int
-gk20a_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gk20a_instmem_priv *priv;
- struct nouveau_platform_device *plat;
- int ret;
+static const struct nvkm_instmem_func
+gk20a_instmem = {
+ .fini = gk20a_instmem_fini,
+ .memory_new = gk20a_instobj_new,
+ .persistent = true,
+ .zero = false,
+};
- ret = nvkm_instmem_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+int
+gk20a_instmem_new(struct nvkm_device *device, int index,
+ struct nvkm_instmem **pimem)
+{
+ struct nvkm_device_tegra *tdev = device->func->tegra(device);
+ struct gk20a_instmem *imem;
- spin_lock_init(&priv->lock);
+ if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
+ spin_lock_init(&imem->lock);
+ *pimem = &imem->base;
- plat = nv_device_to_platform(nv_device(parent));
- if (plat->gpu->iommu.domain) {
- priv->domain = plat->gpu->iommu.domain;
- priv->mm = plat->gpu->iommu.mm;
- priv->iommu_pgshift = plat->gpu->iommu.pgshift;
- priv->mm_mutex = &plat->gpu->iommu.mutex;
+ if (tdev->iommu.domain) {
+ imem->domain = tdev->iommu.domain;
+ imem->mm = &tdev->iommu.mm;
+ imem->iommu_pgshift = tdev->iommu.pgshift;
+ imem->mm_mutex = &tdev->iommu.mutex;
- nv_info(priv, "using IOMMU\n");
+ nvkm_info(&imem->base.subdev, "using IOMMU\n");
} else {
- init_dma_attrs(&priv->attrs);
+ init_dma_attrs(&imem->attrs);
/*
* We will access instmem through PRAMIN and thus do not need a
* consistent CPU pointer or kernel mapping
*/
- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &priv->attrs);
- dma_set_attr(DMA_ATTR_WEAK_ORDERING, &priv->attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &priv->attrs);
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &priv->attrs);
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
+ dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
- nv_info(priv, "using DMA API\n");
+ nvkm_info(&imem->base.subdev, "using DMA API\n");
}
return 0;
}
-
-struct nvkm_oclass *
-gk20a_instmem_oclass = &(struct nvkm_instmem_impl) {
- .base.handle = NV_SUBDEV(INSTMEM, 0xea),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk20a_instmem_ctor,
- .dtor = _nvkm_instmem_dtor,
- .init = _nvkm_instmem_init,
- .fini = gk20a_instmem_fini,
- },
- .instobj = &gk20a_instobj_oclass.base,
-}.base;