summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ast/ast_mode.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ast/ast_mode.c')
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c132
1 files changed, 77 insertions, 55 deletions
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 97fed0627d1c..fb700d620b64 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -521,7 +521,6 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
-/* ast is different - we will force move buffers out of VRAM */
static int ast_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic)
@@ -529,50 +528,54 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
struct ast_private *ast = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct ast_framebuffer *ast_fb;
- struct ast_bo *bo;
+ struct drm_gem_vram_object *gbo;
int ret;
- u64 gpu_addr;
+ s64 gpu_addr;
+ void *base;
- /* push the previous fb to system ram */
if (!atomic && fb) {
ast_fb = to_ast_framebuffer(fb);
obj = ast_fb->obj;
- bo = gem_to_ast_bo(obj);
- ret = ast_bo_reserve(bo, false);
- if (ret)
- return ret;
- ast_bo_push_sysram(bo);
- ast_bo_unreserve(bo);
+ gbo = drm_gem_vram_of_gem(obj);
+
+ /* unmap if console */
+ if (&ast->fbdev->afb == ast_fb)
+ drm_gem_vram_kunmap(gbo);
+ drm_gem_vram_unpin(gbo);
}
ast_fb = to_ast_framebuffer(crtc->primary->fb);
obj = ast_fb->obj;
- bo = gem_to_ast_bo(obj);
+ gbo = drm_gem_vram_of_gem(obj);
- ret = ast_bo_reserve(bo, false);
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret)
return ret;
-
- ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
- if (ret) {
- ast_bo_unreserve(bo);
- return ret;
+ gpu_addr = drm_gem_vram_offset(gbo);
+ if (gpu_addr < 0) {
+ ret = (int)gpu_addr;
+ goto err_drm_gem_vram_unpin;
}
if (&ast->fbdev->afb == ast_fb) {
/* if pushing console in kmap it */
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
- if (ret)
+ base = drm_gem_vram_kmap(gbo, true, NULL);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
DRM_ERROR("failed to kmap fbcon\n");
- else
+ } else {
ast_fbdev_set_base(ast, gpu_addr);
+ }
}
- ast_bo_unreserve(bo);
ast_set_offset_reg(crtc);
ast_set_start_address_crt1(crtc, (u32)gpu_addr);
return 0;
+
+err_drm_gem_vram_unpin:
+ drm_gem_vram_unpin(gbo);
+ return ret;
}
static int ast_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
@@ -618,21 +621,18 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc,
static void ast_crtc_disable(struct drm_crtc *crtc)
{
- int ret;
-
DRM_DEBUG_KMS("\n");
ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
+ struct ast_private *ast = crtc->dev->dev_private;
struct ast_framebuffer *ast_fb = to_ast_framebuffer(crtc->primary->fb);
struct drm_gem_object *obj = ast_fb->obj;
- struct ast_bo *bo = gem_to_ast_bo(obj);
-
- ret = ast_bo_reserve(bo, false);
- if (ret)
- return;
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(obj);
- ast_bo_push_sysram(bo);
- ast_bo_unreserve(bo);
+ /* unmap if console */
+ if (&ast->fbdev->afb == ast_fb)
+ drm_gem_vram_kunmap(gbo);
+ drm_gem_vram_unpin(gbo);
}
crtc->primary->fb = NULL;
}
@@ -918,28 +918,32 @@ static int ast_cursor_init(struct drm_device *dev)
int size;
int ret;
struct drm_gem_object *obj;
- struct ast_bo *bo;
- uint64_t gpu_addr;
+ struct drm_gem_vram_object *gbo;
+ s64 gpu_addr;
+ void *base;
size = (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE) * AST_DEFAULT_HWC_NUM;
ret = ast_gem_create(dev, size, true, &obj);
if (ret)
return ret;
- bo = gem_to_ast_bo(obj);
- ret = ast_bo_reserve(bo, false);
- if (unlikely(ret != 0))
- goto fail;
-
- ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
- ast_bo_unreserve(bo);
+ gbo = drm_gem_vram_of_gem(obj);
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret)
goto fail;
+ gpu_addr = drm_gem_vram_offset(gbo);
+ if (gpu_addr < 0) {
+ drm_gem_vram_unpin(gbo);
+ ret = (int)gpu_addr;
+ goto fail;
+ }
/* kmap the object */
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &ast->cache_kmap);
- if (ret)
+ base = drm_gem_vram_kmap_at(gbo, true, NULL, &ast->cache_kmap);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
goto fail;
+ }
ast->cursor_cache = obj;
ast->cursor_cache_gpu_addr = gpu_addr;
@@ -952,7 +956,9 @@ fail:
static void ast_cursor_fini(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
- ttm_bo_kunmap(&ast->cache_kmap);
+ struct drm_gem_vram_object *gbo =
+ drm_gem_vram_of_gem(ast->cursor_cache);
+ drm_gem_vram_kunmap_at(gbo, &ast->cache_kmap);
drm_gem_object_put_unlocked(ast->cursor_cache);
}
@@ -1173,8 +1179,8 @@ static int ast_cursor_set(struct drm_crtc *crtc,
struct ast_private *ast = crtc->dev->dev_private;
struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
struct drm_gem_object *obj;
- struct ast_bo *bo;
- uint64_t gpu_addr;
+ struct drm_gem_vram_object *gbo;
+ s64 gpu_addr;
u32 csum;
int ret;
struct ttm_bo_kmap_obj uobj_map;
@@ -1193,19 +1199,27 @@ static int ast_cursor_set(struct drm_crtc *crtc,
DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
return -ENOENT;
}
- bo = gem_to_ast_bo(obj);
+ gbo = drm_gem_vram_of_gem(obj);
- ret = ast_bo_reserve(bo, false);
+ ret = drm_gem_vram_lock(gbo, false);
if (ret)
goto fail;
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
-
- src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
- dst = ttm_kmap_obj_virtual(&ast->cache_kmap, &dst_isiomem);
-
+ memset(&uobj_map, 0, sizeof(uobj_map));
+ src = drm_gem_vram_kmap_at(gbo, true, &src_isiomem, &uobj_map);
+ if (IS_ERR(src)) {
+ ret = PTR_ERR(src);
+ goto fail_unlock;
+ }
if (src_isiomem == true)
DRM_ERROR("src cursor bo should be in main memory\n");
+
+ dst = drm_gem_vram_kmap_at(drm_gem_vram_of_gem(ast->cursor_cache),
+ false, &dst_isiomem, &ast->cache_kmap);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto fail_unlock;
+ }
if (dst_isiomem == false)
DRM_ERROR("dst bo should be in VRAM\n");
@@ -1214,11 +1228,14 @@ static int ast_cursor_set(struct drm_crtc *crtc,
/* do data transfer to cursor cache */
csum = copy_cursor_image(src, dst, width, height);
+ drm_gem_vram_kunmap_at(gbo, &uobj_map);
+ drm_gem_vram_unlock(gbo);
+
/* write checksum + signature */
- ttm_bo_kunmap(&uobj_map);
- ast_bo_unreserve(bo);
{
- u8 *dst = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ u8 *dst = drm_gem_vram_kmap_at(drm_gem_vram_of_gem(ast->cursor_cache),
+ false, NULL, &ast->cache_kmap);
+ dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
writel(csum, dst);
writel(width, dst + AST_HWC_SIGNATURE_SizeX);
writel(height, dst + AST_HWC_SIGNATURE_SizeY);
@@ -1244,6 +1261,9 @@ static int ast_cursor_set(struct drm_crtc *crtc,
drm_gem_object_put_unlocked(obj);
return 0;
+
+fail_unlock:
+ drm_gem_vram_unlock(gbo);
fail:
drm_gem_object_put_unlocked(obj);
return ret;
@@ -1257,7 +1277,9 @@ static int ast_cursor_move(struct drm_crtc *crtc,
int x_offset, y_offset;
u8 *sig;
- sig = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ sig = drm_gem_vram_kmap_at(drm_gem_vram_of_gem(ast->cursor_cache),
+ false, NULL, &ast->cache_kmap);
+ sig += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
writel(x, sig + AST_HWC_SIGNATURE_X);
writel(y, sig + AST_HWC_SIGNATURE_Y);