diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 220 |
1 files changed, 112 insertions, 108 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 6780a36e6171..54e942df3b8e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -40,10 +40,10 @@ */ void vmw_resource_mob_attach(struct vmw_resource *res) { - struct vmw_bo *backup = res->backup; - struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL; + struct vmw_bo *gbo = res->guest_memory_bo; + struct rb_node **new = &gbo->res_tree.rb_node, *parent = NULL; - dma_resv_assert_held(res->backup->base.base.resv); + dma_resv_assert_held(gbo->tbo.base.resv); res->used_prio = (res->res_dirty) ? res->func->dirty_prio : res->func->prio; @@ -52,14 +52,14 @@ void vmw_resource_mob_attach(struct vmw_resource *res) container_of(*new, struct vmw_resource, mob_node); parent = *new; - new = (res->backup_offset < this->backup_offset) ? + new = (res->guest_memory_offset < this->guest_memory_offset) ? &((*new)->rb_left) : &((*new)->rb_right); } rb_link_node(&res->mob_node, parent, new); - rb_insert_color(&res->mob_node, &backup->res_tree); + rb_insert_color(&res->mob_node, &gbo->res_tree); - vmw_bo_prio_add(backup, res->used_prio); + vmw_bo_prio_add(gbo, res->used_prio); } /** @@ -68,13 +68,13 @@ void vmw_resource_mob_attach(struct vmw_resource *res) */ void vmw_resource_mob_detach(struct vmw_resource *res) { - struct vmw_bo *backup = res->backup; + struct vmw_bo *gbo = res->guest_memory_bo; - dma_resv_assert_held(backup->base.base.resv); + dma_resv_assert_held(gbo->tbo.base.resv); if (vmw_resource_mob_attached(res)) { - rb_erase(&res->mob_node, &backup->res_tree); + rb_erase(&res->mob_node, &gbo->res_tree); RB_CLEAR_NODE(&res->mob_node); - vmw_bo_prio_del(backup, res->used_prio); + vmw_bo_prio_del(gbo, res->used_prio); } } @@ -121,8 +121,8 @@ static void vmw_resource_release(struct kref *kref) spin_lock(&dev_priv->resource_lock); list_del_init(&res->lru_head); spin_unlock(&dev_priv->resource_lock); - if (res->backup) { - struct ttm_buffer_object *bo = &res->backup->base; + if (res->guest_memory_bo) { + struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo; ret = ttm_bo_reserve(bo, false, false, NULL); BUG_ON(ret); @@ -134,14 +134,14 @@ static void vmw_resource_release(struct kref *kref) val_buf.num_shared = 0; res->func->unbind(res, false, &val_buf); } - res->backup_dirty = false; + res->guest_memory_size = false; vmw_resource_mob_detach(res); if (res->dirty) res->func->dirty_free(res); if (res->coherent) - vmw_bo_dirty_release(res->backup); + vmw_bo_dirty_release(res->guest_memory_bo); ttm_bo_unreserve(bo); - vmw_bo_unreference(&res->backup); + vmw_bo_unreference(&res->guest_memory_bo); } if (likely(res->hw_destroy != NULL)) { @@ -224,9 +224,9 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, INIT_LIST_HEAD(&res->lru_head); INIT_LIST_HEAD(&res->binding_head); res->id = -1; - res->backup = NULL; - res->backup_offset = 0; - res->backup_dirty = false; + res->guest_memory_bo = NULL; + res->guest_memory_offset = 0; + res->guest_memory_dirty = false; res->res_dirty = false; res->coherent = false; res->used_prio = 3; @@ -264,7 +264,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, int ret = -EINVAL; base = ttm_base_object_lookup(tfile, handle); - if (unlikely(base == NULL)) + if (unlikely(!base)) return -EINVAL; if (unlikely(ttm_base_object_type(base) != converter->object_type)) @@ -313,31 +313,36 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, } /** - * vmw_resource_buf_alloc - Allocate a backup buffer for a resource. + * vmw_resource_buf_alloc - Allocate a guest memory buffer for a resource. * - * @res: The resource for which to allocate a backup buffer. + * @res: The resource for which to allocate a gbo buffer. * @interruptible: Whether any sleeps during allocation should be * performed while interruptible. */ static int vmw_resource_buf_alloc(struct vmw_resource *res, bool interruptible) { - unsigned long size = PFN_ALIGN(res->backup_size); - struct vmw_bo *backup; + unsigned long size = PFN_ALIGN(res->guest_memory_size); + struct vmw_bo *gbo; + struct vmw_bo_params bo_params = { + .domain = res->func->domain, + .busy_domain = res->func->busy_domain, + .bo_type = ttm_bo_type_device, + .size = res->guest_memory_size, + .pin = false + }; int ret; - if (likely(res->backup)) { - BUG_ON(res->backup->base.base.size < size); + if (likely(res->guest_memory_bo)) { + BUG_ON(res->guest_memory_bo->tbo.base.size < size); return 0; } - ret = vmw_bo_create(res->dev_priv, res->backup_size, - res->func->domain, res->func->busy_domain, - interruptible, false, &backup); + ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo); if (unlikely(ret != 0)) goto out_no_bo; - res->backup = backup; + res->guest_memory_bo = gbo; out_no_bo: return ret; @@ -369,13 +374,13 @@ static int vmw_resource_do_validate(struct vmw_resource *res, } if (func->bind && - ((func->needs_backup && !vmw_resource_mob_attached(res) && - val_buf->bo != NULL) || - (!func->needs_backup && val_buf->bo != NULL))) { + ((func->needs_guest_memory && !vmw_resource_mob_attached(res) && + val_buf->bo) || + (!func->needs_guest_memory && val_buf->bo))) { ret = func->bind(res, val_buf); if (unlikely(ret != 0)) goto out_bind_failed; - if (func->needs_backup) + if (func->needs_guest_memory) vmw_resource_mob_attach(res); } @@ -385,11 +390,11 @@ static int vmw_resource_do_validate(struct vmw_resource *res, */ if (func->dirty_alloc && vmw_resource_mob_attached(res) && !res->coherent) { - if (res->backup->dirty && !res->dirty) { + if (res->guest_memory_bo->dirty && !res->dirty) { ret = func->dirty_alloc(res); if (ret) return ret; - } else if (!res->backup->dirty && res->dirty) { + } else if (!res->guest_memory_bo->dirty && res->dirty) { func->dirty_free(res); } } @@ -400,12 +405,12 @@ static int vmw_resource_do_validate(struct vmw_resource *res, */ if (res->dirty) { if (dirtying && !res->res_dirty) { - pgoff_t start = res->backup_offset >> PAGE_SHIFT; + pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT; pgoff_t end = __KERNEL_DIV_ROUND_UP - (res->backup_offset + res->backup_size, + (res->guest_memory_offset + res->guest_memory_size, PAGE_SIZE); - vmw_bo_dirty_unmap(res->backup, start, end); + vmw_bo_dirty_unmap(res->guest_memory_bo, start, end); } vmw_bo_dirty_transfer_to_res(res); @@ -427,10 +432,10 @@ out_bind_failed: * @res: Pointer to the struct vmw_resource to unreserve. * @dirty_set: Change dirty status of the resource. * @dirty: When changing dirty status indicates the new status. - * @switch_backup: Backup buffer has been switched. - * @new_backup: Pointer to new backup buffer if command submission + * @switch_guest_memory: Guest memory buffer has been switched. + * @new_guest_memory_bo: Pointer to new guest memory buffer if command submission * switched. May be NULL. - * @new_backup_offset: New backup offset if @switch_backup is true. + * @new_guest_memory_offset: New gbo offset if @switch_guest_memory is true. * * Currently unreserving a resource means putting it back on the device's * resource lru list, so that it can be evicted if necessary. @@ -438,42 +443,42 @@ out_bind_failed: void vmw_resource_unreserve(struct vmw_resource *res, bool dirty_set, bool dirty, - bool switch_backup, - struct vmw_bo *new_backup, - unsigned long new_backup_offset) + bool switch_guest_memory, + struct vmw_bo *new_guest_memory_bo, + unsigned long new_guest_memory_offset) { struct vmw_private *dev_priv = res->dev_priv; if (!list_empty(&res->lru_head)) return; - if (switch_backup && new_backup != res->backup) { - if (res->backup) { + if (switch_guest_memory && new_guest_memory_bo != res->guest_memory_bo) { + if (res->guest_memory_bo) { vmw_resource_mob_detach(res); if (res->coherent) - vmw_bo_dirty_release(res->backup); - vmw_bo_unreference(&res->backup); + vmw_bo_dirty_release(res->guest_memory_bo); + vmw_bo_unreference(&res->guest_memory_bo); } - if (new_backup) { - res->backup = vmw_bo_reference(new_backup); + if (new_guest_memory_bo) { + res->guest_memory_bo = vmw_bo_reference(new_guest_memory_bo); /* * The validation code should already have added a * dirty tracker here. */ - WARN_ON(res->coherent && !new_backup->dirty); + WARN_ON(res->coherent && !new_guest_memory_bo->dirty); vmw_resource_mob_attach(res); } else { - res->backup = NULL; + res->guest_memory_bo = NULL; } - } else if (switch_backup && res->coherent) { - vmw_bo_dirty_release(res->backup); + } else if (switch_guest_memory && res->coherent) { + vmw_bo_dirty_release(res->guest_memory_bo); } - if (switch_backup) - res->backup_offset = new_backup_offset; + if (switch_guest_memory) + res->guest_memory_offset = new_guest_memory_offset; if (dirty_set) res->res_dirty = dirty; @@ -507,32 +512,32 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, { struct ttm_operation_ctx ctx = { true, false }; struct list_head val_list; - bool backup_dirty = false; + bool guest_memory_dirty = false; int ret; - if (unlikely(res->backup == NULL)) { + if (unlikely(!res->guest_memory_bo)) { ret = vmw_resource_buf_alloc(res, interruptible); if (unlikely(ret != 0)) return ret; } INIT_LIST_HEAD(&val_list); - ttm_bo_get(&res->backup->base); - val_buf->bo = &res->backup->base; + ttm_bo_get(&res->guest_memory_bo->tbo); + val_buf->bo = &res->guest_memory_bo->tbo; val_buf->num_shared = 0; list_add_tail(&val_buf->head, &val_list); ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL); if (unlikely(ret != 0)) goto out_no_reserve; - if (res->func->needs_backup && !vmw_resource_mob_attached(res)) + if (res->func->needs_guest_memory && !vmw_resource_mob_attached(res)) return 0; - backup_dirty = res->backup_dirty; - vmw_bo_placement_set(res->backup, res->func->domain, + guest_memory_dirty = res->guest_memory_dirty; + vmw_bo_placement_set(res->guest_memory_bo, res->func->domain, res->func->busy_domain); - ret = ttm_bo_validate(&res->backup->base, - &res->backup->placement, + ret = ttm_bo_validate(&res->guest_memory_bo->tbo, + &res->guest_memory_bo->placement, &ctx); if (unlikely(ret != 0)) @@ -545,8 +550,8 @@ out_no_validate: out_no_reserve: ttm_bo_put(val_buf->bo); val_buf->bo = NULL; - if (backup_dirty) - vmw_bo_unreference(&res->backup); + if (guest_memory_dirty) + vmw_bo_unreference(&res->guest_memory_bo); return ret; } @@ -557,12 +562,13 @@ out_no_reserve: * @res: The resource to reserve. * * This function takes the resource off the LRU list and make sure - * a backup buffer is present for guest-backed resources. However, - * the buffer may not be bound to the resource at this point. + * a guest memory buffer is present for guest-backed resources. + * However, the buffer may not be bound to the resource at this + * point. * */ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, - bool no_backup) + bool no_guest_memory) { struct vmw_private *dev_priv = res->dev_priv; int ret; @@ -571,13 +577,13 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, list_del_init(&res->lru_head); spin_unlock(&dev_priv->resource_lock); - if (res->func->needs_backup && res->backup == NULL && - !no_backup) { + if (res->func->needs_guest_memory && !res->guest_memory_bo && + !no_guest_memory) { ret = vmw_resource_buf_alloc(res, interruptible); if (unlikely(ret != 0)) { - DRM_ERROR("Failed to allocate a backup buffer " + DRM_ERROR("Failed to allocate a guest memory buffer " "of size %lu. bytes\n", - (unsigned long) res->backup_size); + (unsigned long) res->guest_memory_size); return ret; } } @@ -587,10 +593,10 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, /** * vmw_resource_backoff_reservation - Unreserve and unreference a - * backup buffer + * guest memory buffer *. * @ticket: The ww acquire ctx used for reservation. - * @val_buf: Backup buffer information. + * @val_buf: Guest memory buffer information. */ static void vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, @@ -632,14 +638,14 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket, return ret; if (unlikely(func->unbind != NULL && - (!func->needs_backup || vmw_resource_mob_attached(res)))) { + (!func->needs_guest_memory || vmw_resource_mob_attached(res)))) { ret = func->unbind(res, res->res_dirty, &val_buf); if (unlikely(ret != 0)) goto out_no_unbind; vmw_resource_mob_detach(res); } ret = func->destroy(res); - res->backup_dirty = true; + res->guest_memory_dirty = true; res->res_dirty = false; out_no_unbind: vmw_resource_backoff_reservation(ticket, &val_buf); @@ -678,8 +684,8 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr, val_buf.bo = NULL; val_buf.num_shared = 0; - if (res->backup) - val_buf.bo = &res->backup->base; + if (res->guest_memory_bo) + val_buf.bo = &res->guest_memory_bo->tbo; do { ret = vmw_resource_do_validate(res, &val_buf, dirtying); if (likely(ret != -EBUSY)) @@ -719,9 +725,9 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr, if (unlikely(ret != 0)) goto out_no_validate; - else if (!res->func->needs_backup && res->backup) { + else if (!res->func->needs_guest_memory && res->guest_memory_bo) { WARN_ON_ONCE(vmw_resource_mob_attached(res)); - vmw_bo_unreference(&res->backup); + vmw_bo_unreference(&res->guest_memory_bo); } return 0; @@ -745,11 +751,11 @@ out_no_validate: void vmw_resource_unbind_list(struct vmw_bo *vbo) { struct ttm_validate_buffer val_buf = { - .bo = &vbo->base, + .bo = &vbo->tbo, .num_shared = 0 }; - dma_resv_assert_held(vbo->base.base.resv); + dma_resv_assert_held(vbo->tbo.base.resv); while (!RB_EMPTY_ROOT(&vbo->res_tree)) { struct rb_node *node = vbo->res_tree.rb_node; struct vmw_resource *res = @@ -758,12 +764,12 @@ void vmw_resource_unbind_list(struct vmw_bo *vbo) if (!WARN_ON_ONCE(!res->func->unbind)) (void) res->func->unbind(res, res->res_dirty, &val_buf); - res->backup_dirty = true; + res->guest_memory_size = true; res->res_dirty = false; vmw_resource_mob_detach(res); } - (void) ttm_bo_wait(&vbo->base, false, false); + (void) ttm_bo_wait(&vbo->tbo, false, false); } @@ -826,9 +832,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo, { struct vmw_bo *dx_query_mob; struct ttm_device *bdev = bo->bdev; - struct vmw_private *dev_priv; - - dev_priv = container_of(bdev, struct vmw_private, bdev); + struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev); mutex_lock(&dev_priv->binding_mutex); @@ -837,7 +841,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo, old_mem->mem_type == VMW_PL_MOB) { struct vmw_fence_obj *fence; - dx_query_mob = container_of(bo, struct vmw_bo, base); + dx_query_mob = to_vmw_bo(&bo->base); if (!dx_query_mob || !dx_query_mob->dx_query_ctx) { mutex_unlock(&dev_priv->binding_mutex); return; @@ -865,7 +869,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo, */ bool vmw_resource_needs_backup(const struct vmw_resource *res) { - return res->func->needs_backup; + return res->func->needs_guest_memory; } /** @@ -963,22 +967,22 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) if (res->pin_count == 0) { struct vmw_bo *vbo = NULL; - if (res->backup) { - vbo = res->backup; + if (res->guest_memory_bo) { + vbo = res->guest_memory_bo; - ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL); + ret = ttm_bo_reserve(&vbo->tbo, interruptible, false, NULL); if (ret) goto out_no_validate; - if (!vbo->base.pin_count) { + if (!vbo->tbo.pin_count) { vmw_bo_placement_set(vbo, res->func->domain, res->func->busy_domain); ret = ttm_bo_validate - (&vbo->base, + (&vbo->tbo, &vbo->placement, &ctx); if (ret) { - ttm_bo_unreserve(&vbo->base); + ttm_bo_unreserve(&vbo->tbo); goto out_no_validate; } } @@ -988,7 +992,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) } ret = vmw_resource_validate(res, interruptible, true); if (vbo) - ttm_bo_unreserve(&vbo->base); + ttm_bo_unreserve(&vbo->tbo); if (ret) goto out_no_validate; } @@ -1021,12 +1025,12 @@ void vmw_resource_unpin(struct vmw_resource *res) WARN_ON(ret); WARN_ON(res->pin_count == 0); - if (--res->pin_count == 0 && res->backup) { - struct vmw_bo *vbo = res->backup; + if (--res->pin_count == 0 && res->guest_memory_bo) { + struct vmw_bo *vbo = res->guest_memory_bo; - (void) ttm_bo_reserve(&vbo->base, false, false, NULL); + (void) ttm_bo_reserve(&vbo->tbo, false, false, NULL); vmw_bo_pin_reserved(vbo, false); - ttm_bo_unreserve(&vbo->base); + ttm_bo_unreserve(&vbo->tbo); } vmw_resource_unreserve(res, false, false, false, NULL, 0UL); @@ -1084,9 +1088,9 @@ int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, struct vmw_resource *cur_res = container_of(cur, struct vmw_resource, mob_node); - if (cur_res->backup_offset >= res_end) { + if (cur_res->guest_memory_offset >= res_end) { cur = cur->rb_left; - } else if (cur_res->backup_offset + cur_res->backup_size <= + } else if (cur_res->guest_memory_offset + cur_res->guest_memory_size <= res_start) { cur = cur->rb_right; } else { @@ -1097,7 +1101,7 @@ int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, } /* - * In order of increasing backup_offset, clean dirty resources + * In order of increasing guest_memory_offset, clean dirty resources * intersecting the range. */ while (found) { @@ -1113,13 +1117,13 @@ int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, found->res_dirty = false; } - last_cleaned = found->backup_offset + found->backup_size; + last_cleaned = found->guest_memory_offset + found->guest_memory_size; cur = rb_next(&found->mob_node); if (!cur) break; found = container_of(cur, struct vmw_resource, mob_node); - if (found->backup_offset >= res_end) + if (found->guest_memory_offset >= res_end) break; } @@ -1128,7 +1132,7 @@ int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, */ *num_prefault = 1; if (last_cleaned > res_start) { - struct ttm_buffer_object *bo = &vbo->base; + struct ttm_buffer_object *bo = &vbo->tbo; *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start, PAGE_SIZE); |