summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1935
1 files changed, 1734 insertions, 201 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 97ad3bcb99a7..b56565457c96 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,6 +29,8 @@
#include "vmwgfx_reg.h"
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_placement.h>
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
#define VMW_RES_HT_ORDER 12
@@ -59,8 +61,11 @@ struct vmw_resource_relocation {
* @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
* @first_usage: Set to true the first time the resource is referenced in
* the command stream.
- * @no_buffer_needed: Resources do not need to allocate buffer backup on
- * reservation. The command stream will provide one.
+ * @switching_backup: The command stream provides a new backup buffer for a
+ * resource.
+ * @no_buffer_needed: This means @switching_backup is true on first buffer
+ * reference. So resource reservation does not need to allocate a backup
+ * buffer for the resource.
*/
struct vmw_resource_val_node {
struct list_head head;
@@ -69,8 +74,9 @@ struct vmw_resource_val_node {
struct vmw_dma_buffer *new_backup;
struct vmw_ctx_binding_state *staged_bindings;
unsigned long new_backup_offset;
- bool first_usage;
- bool no_buffer_needed;
+ u32 first_usage : 1;
+ u32 switching_backup : 1;
+ u32 no_buffer_needed : 1;
};
/**
@@ -92,22 +98,40 @@ struct vmw_cmd_entry {
[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
(_gb_disable), (_gb_enable)}
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ struct vmw_resource *ctx);
+static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGAMobId *id,
+ struct vmw_dma_buffer **vmw_bo_p);
+static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
+ struct vmw_dma_buffer *vbo,
+ bool validate_as_mob,
+ uint32_t *p_val_node);
+
+
/**
- * vmw_resource_unreserve - unreserve resources previously reserved for
+ * vmw_resources_unreserve - unreserve resources previously reserved for
* command submission.
*
- * @list_head: list of resources to unreserve.
+ * @sw_context: pointer to the software context
* @backoff: Whether command submission failed.
*/
-static void vmw_resource_list_unreserve(struct list_head *list,
- bool backoff)
+static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
+ bool backoff)
{
struct vmw_resource_val_node *val;
+ struct list_head *list = &sw_context->resource_list;
+
+ if (sw_context->dx_query_mob && !backoff)
+ vmw_context_bind_dx_query(sw_context->dx_query_ctx,
+ sw_context->dx_query_mob);
list_for_each_entry(val, list, head) {
struct vmw_resource *res = val->res;
- struct vmw_dma_buffer *new_backup =
- backoff ? NULL : val->new_backup;
+ bool switch_backup =
+ (backoff) ? false : val->switching_backup;
/*
* Transfer staged context bindings to the
@@ -115,18 +139,71 @@ static void vmw_resource_list_unreserve(struct list_head *list,
*/
if (unlikely(val->staged_bindings)) {
if (!backoff) {
- vmw_context_binding_state_transfer
- (val->res, val->staged_bindings);
+ vmw_binding_state_commit
+ (vmw_context_binding_state(val->res),
+ val->staged_bindings);
}
- kfree(val->staged_bindings);
+
+ if (val->staged_bindings != sw_context->staged_bindings)
+ vmw_binding_state_free(val->staged_bindings);
+ else
+ sw_context->staged_bindings_inuse = false;
val->staged_bindings = NULL;
}
- vmw_resource_unreserve(res, new_backup,
- val->new_backup_offset);
+ vmw_resource_unreserve(res, switch_backup, val->new_backup,
+ val->new_backup_offset);
vmw_dmabuf_unreference(&val->new_backup);
}
}
+/**
+ * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
+ * added to the validate list.
+ *
+ * @dev_priv: Pointer to the device private:
+ * @sw_context: The validation context:
+ * @node: The validation node holding this context.
+ */
+static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ struct vmw_resource_val_node *node)
+{
+ int ret;
+
+ ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ if (!sw_context->staged_bindings) {
+ sw_context->staged_bindings =
+ vmw_binding_state_alloc(dev_priv);
+ if (IS_ERR(sw_context->staged_bindings)) {
+ DRM_ERROR("Failed to allocate context binding "
+ "information.\n");
+ ret = PTR_ERR(sw_context->staged_bindings);
+ sw_context->staged_bindings = NULL;
+ goto out_err;
+ }
+ }
+
+ if (sw_context->staged_bindings_inuse) {
+ node->staged_bindings = vmw_binding_state_alloc(dev_priv);
+ if (IS_ERR(node->staged_bindings)) {
+ DRM_ERROR("Failed to allocate context binding "
+ "information.\n");
+ ret = PTR_ERR(node->staged_bindings);
+ node->staged_bindings = NULL;
+ goto out_err;
+ }
+ } else {
+ node->staged_bindings = sw_context->staged_bindings;
+ sw_context->staged_bindings_inuse = true;
+ }
+
+ return 0;
+out_err:
+ return ret;
+}
/**
* vmw_resource_val_add - Add a resource to the software context's
@@ -141,6 +218,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
struct vmw_resource *res,
struct vmw_resource_val_node **p_node)
{
+ struct vmw_private *dev_priv = res->dev_priv;
struct vmw_resource_val_node *node;
struct drm_hash_item *hash;
int ret;
@@ -169,14 +247,90 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
kfree(node);
return ret;
}
- list_add_tail(&node->head, &sw_context->resource_list);
node->res = vmw_resource_reference(res);
node->first_usage = true;
-
if (unlikely(p_node != NULL))
*p_node = node;
- return 0;
+ if (!dev_priv->has_mob) {
+ list_add_tail(&node->head, &sw_context->resource_list);
+ return 0;
+ }
+
+ switch (vmw_res_type(res)) {
+ case vmw_res_context:
+ case vmw_res_dx_context:
+ list_add(&node->head, &sw_context->ctx_resource_list);
+ ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
+ break;
+ case vmw_res_cotable:
+ list_add_tail(&node->head, &sw_context->ctx_resource_list);
+ break;
+ default:
+ list_add_tail(&node->head, &sw_context->resource_list);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * vmw_view_res_val_add - Add a view and the surface it's pointing to
+ * to the validation list
+ *
+ * @sw_context: The software context holding the validation list.
+ * @view: Pointer to the view resource.
+ *
+ * Returns 0 if success, negative error code otherwise.
+ */
+static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
+ struct vmw_resource *view)
+{
+ int ret;
+
+ /*
+ * First add the resource the view is pointing to, otherwise
+ * it may be swapped out when the view is validated.
+ */
+ ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
+ if (ret)
+ return ret;
+
+ return vmw_resource_val_add(sw_context, view, NULL);
+}
+
+/**
+ * vmw_view_id_val_add - Look up a view and add it and the surface it's
+ * pointing to to the validation list.
+ *
+ * @sw_context: The software context holding the validation list.
+ * @view_type: The view type to look up.
+ * @id: view id of the view.
+ *
+ * The view is represented by a view id and the DX context it's created on,
+ * or scheduled for creation on. If there is no DX context set, the function
+ * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
+ */
+static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
+ enum vmw_view_type view_type, u32 id)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *view;
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ view = vmw_view_lookup(sw_context->man, view_type, id);
+ if (IS_ERR(view))
+ return PTR_ERR(view);
+
+ ret = vmw_view_res_val_add(sw_context, view);
+ vmw_resource_unreference(&view);
+
+ return ret;
}
/**
@@ -195,24 +349,56 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
struct vmw_resource *ctx)
{
struct list_head *binding_list;
- struct vmw_ctx_binding *entry;
+ struct vmw_ctx_bindinfo *entry;
int ret = 0;
struct vmw_resource *res;
+ u32 i;
+ /* Add all cotables to the validation list. */
+ if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+ for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ res = vmw_context_cotable(ctx, i);
+ if (IS_ERR(res))
+ continue;
+
+ ret = vmw_resource_val_add(sw_context, res, NULL);
+ vmw_resource_unreference(&res);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ }
+
+
+ /* Add all resources bound to the context to the validation list */
mutex_lock(&dev_priv->binding_mutex);
binding_list = vmw_context_binding_list(ctx);
list_for_each_entry(entry, binding_list, ctx_list) {
- res = vmw_resource_reference_unless_doomed(entry->bi.res);
+ /* entry->res is not refcounted */
+ res = vmw_resource_reference_unless_doomed(entry->res);
if (unlikely(res == NULL))
continue;
- ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
+ if (vmw_res_type(entry->res) == vmw_res_view)
+ ret = vmw_view_res_val_add(sw_context, entry->res);
+ else
+ ret = vmw_resource_val_add(sw_context, entry->res,
+ NULL);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
break;
}
+ if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+ struct vmw_dma_buffer *dx_query_mob;
+
+ dx_query_mob = vmw_context_get_dx_query_mob(ctx);
+ if (dx_query_mob)
+ ret = vmw_bo_to_validate_list(sw_context,
+ dx_query_mob,
+ true, NULL);
+ }
+
mutex_unlock(&dev_priv->binding_mutex);
return ret;
}
@@ -308,7 +494,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
* submission is reached.
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
- struct ttm_buffer_object *bo,
+ struct vmw_dma_buffer *vbo,
bool validate_as_mob,
uint32_t *p_val_node)
{
@@ -318,7 +504,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct drm_hash_item *hash;
int ret;
- if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
+ if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
&hash) == 0)) {
vval_buf = container_of(hash, struct vmw_validate_buffer,
hash);
@@ -336,7 +522,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
return -EINVAL;
}
vval_buf = &sw_context->val_bufs[val_node];
- vval_buf->hash.key = (unsigned long) bo;
+ vval_buf->hash.key = (unsigned long) vbo;
ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to initialize a buffer validation "
@@ -345,7 +531,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
}
++sw_context->cur_val_buf;
val_buf = &vval_buf->base;
- val_buf->bo = ttm_bo_reference(bo);
+ val_buf->bo = ttm_bo_reference(&vbo->base);
val_buf->shared = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
vval_buf->validate_as_mob = validate_as_mob;
@@ -370,27 +556,39 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
- int ret;
+ int ret = 0;
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
- ret = vmw_resource_reserve(res, val->no_buffer_needed);
+ ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
if (unlikely(ret != 0))
return ret;
if (res->backup) {
- struct ttm_buffer_object *bo = &res->backup->base;
+ struct vmw_dma_buffer *vbo = res->backup;
ret = vmw_bo_to_validate_list
- (sw_context, bo,
+ (sw_context, vbo,
vmw_resource_needs_backup(res), NULL);
if (unlikely(ret != 0))
return ret;
}
}
- return 0;
+
+ if (sw_context->dx_query_mob) {
+ struct vmw_dma_buffer *expected_dx_query_mob;
+
+ expected_dx_query_mob =
+ vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
+ if (expected_dx_query_mob &&
+ expected_dx_query_mob != sw_context->dx_query_mob) {
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
}
/**
@@ -409,6 +607,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
+ struct vmw_dma_buffer *backup = res->backup;
ret = vmw_resource_validate(res);
if (unlikely(ret != 0)) {
@@ -416,18 +615,29 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
DRM_ERROR("Failed to validate resource.\n");
return ret;
}
+
+ /* Check if the resource switched backup buffer */
+ if (backup && res->backup && (backup != res->backup)) {
+ struct vmw_dma_buffer *vbo = res->backup;
+
+ ret = vmw_bo_to_validate_list
+ (sw_context, vbo,
+ vmw_resource_needs_backup(res), NULL);
+ if (ret) {
+ ttm_bo_unreserve(&vbo->base);
+ return ret;
+ }
+ }
}
return 0;
}
-
/**
* vmw_cmd_res_reloc_add - Add a resource to a software context's
* relocation- and validation lists.
*
* @dev_priv: Pointer to a struct vmw_private identifying the device.
* @sw_context: Pointer to the software context.
- * @res_type: Resource type.
* @id_loc: Pointer to where the id that needs translation is located.
* @res: Valid pointer to a struct vmw_resource.
* @p_val: If non null, a pointer to the struct vmw_resource_validate_node
@@ -435,7 +645,6 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
*/
static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
uint32_t *id_loc,
struct vmw_resource *res,
struct vmw_resource_val_node **p_val)
@@ -454,29 +663,6 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- if (res_type == vmw_res_context && dev_priv->has_mob &&
- node->first_usage) {
-
- /*
- * Put contexts first on the list to be able to exit
- * list traversal for contexts early.
- */
- list_del(&node->head);
- list_add(&node->head, &sw_context->resource_list);
-
- ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
- if (unlikely(ret != 0))
- return ret;
- node->staged_bindings =
- kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
- if (node->staged_bindings == NULL) {
- DRM_ERROR("Failed to allocate context binding "
- "information.\n");
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&node->staged_bindings->list);
- }
-
if (p_val)
*p_val = node;
@@ -554,7 +740,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
rcache->res = res;
rcache->handle = *id_loc;
- ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
+ ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
res, &node);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -573,6 +759,46 @@ out_no_reloc:
}
/**
+ * vmw_rebind_dx_query - Rebind DX query associated with the context
+ *
+ * @ctx_res: context the query belongs to
+ *
+ * This function assumes binding_mutex is held.
+ */
+static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
+{
+ struct vmw_private *dev_priv = ctx_res->dev_priv;
+ struct vmw_dma_buffer *dx_query_mob;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindAllQuery body;
+ } *cmd;
+
+
+ dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
+
+ if (!dx_query_mob || dx_query_mob->dx_query_ctx)
+ return 0;
+
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
+
+ if (cmd == NULL) {
+ DRM_ERROR("Failed to rebind queries.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = ctx_res->id;
+ cmd->body.mobid = dx_query_mob->base.mem.start;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ vmw_context_bind_dx_query(ctx_res, dx_query_mob);
+
+ return 0;
+}
+
+/**
* vmw_rebind_contexts - Rebind all resources previously bound to
* referenced contexts.
*
@@ -589,12 +815,80 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
if (unlikely(!val->staged_bindings))
break;
- ret = vmw_context_rebind_all(val->res);
+ ret = vmw_binding_rebind_all
+ (vmw_context_binding_state(val->res));
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to rebind context.\n");
return ret;
}
+
+ ret = vmw_rebind_all_dx_query(val->res);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_view_bindings_add - Add an array of view bindings to a context
+ * binding state tracker.
+ *
+ * @sw_context: The execbuf state used for this command.
+ * @view_type: View type for the bindings.
+ * @binding_type: Binding type for the bindings.
+ * @shader_slot: The shader slot to user for the bindings.
+ * @view_ids: Array of view ids to be bound.
+ * @num_views: Number of view ids in @view_ids.
+ * @first_slot: The binding slot to be used for the first view id in @view_ids.
+ */
+static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
+ enum vmw_view_type view_type,
+ enum vmw_ctx_binding_type binding_type,
+ uint32 shader_slot,
+ uint32 view_ids[], u32 num_views,
+ u32 first_slot)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_cmdbuf_res_manager *man;
+ u32 i;
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ man = sw_context->man;
+ for (i = 0; i < num_views; ++i) {
+ struct vmw_ctx_bindinfo_view binding;
+ struct vmw_resource *view = NULL;
+
+ if (view_ids[i] != SVGA3D_INVALID_ID) {
+ view = vmw_view_lookup(man, view_type, view_ids[i]);
+ if (IS_ERR(view)) {
+ DRM_ERROR("View not found.\n");
+ return PTR_ERR(view);
+ }
+
+ ret = vmw_view_res_val_add(sw_context, view);
+ if (ret) {
+ DRM_ERROR("Could not add view to "
+ "validation list.\n");
+ vmw_resource_unreference(&view);
+ return ret;
+ }
+ }
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = view;
+ binding.bi.bt = binding_type;
+ binding.shader_slot = shader_slot;
+ binding.slot = first_slot + i;
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ shader_slot, binding.slot);
+ if (view)
+ vmw_resource_unreference(&view);
}
return 0;
@@ -638,6 +932,12 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
cmd = container_of(header, struct vmw_sid_cmd, header);
+ if (cmd->body.type >= SVGA3D_RT_MAX) {
+ DRM_ERROR("Illegal render target type %u.\n",
+ (unsigned) cmd->body.type);
+ return -EINVAL;
+ }
+
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
&ctx_node);
@@ -651,13 +951,14 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
return ret;
if (dev_priv->has_mob) {
- struct vmw_ctx_bindinfo bi;
-
- bi.ctx = ctx_node->res;
- bi.res = res_node ? res_node->res : NULL;
- bi.bt = vmw_ctx_binding_rt;
- bi.i1.rt_type = cmd->body.type;
- return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+ struct vmw_ctx_bindinfo_view binding;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = res_node ? res_node->res : NULL;
+ binding.bi.bt = vmw_ctx_binding_rt;
+ binding.slot = cmd->body.type;
+ vmw_binding_add(ctx_node->staged_bindings,
+ &binding.bi, 0, binding.slot);
}
return 0;
@@ -674,16 +975,62 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
+
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.src.sid, NULL);
- if (unlikely(ret != 0))
+ if (ret)
return ret;
+
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.dest.sid, NULL);
}
+static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBufferCopy body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.src, NULL);
+ if (ret != 0)
+ return ret;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.dest, NULL);
+}
+
+static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXPredCopyRegion body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.srcSid, NULL);
+ if (ret != 0)
+ return ret;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.dstSid, NULL);
+}
+
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -752,7 +1099,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
- struct ttm_buffer_object *new_query_bo,
+ struct vmw_dma_buffer *new_query_bo,
struct vmw_sw_context *sw_context)
{
struct vmw_res_cache_entry *ctx_entry =
@@ -764,7 +1111,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
- if (unlikely(new_query_bo->num_pages > 4)) {
+ if (unlikely(new_query_bo->base.num_pages > 4)) {
DRM_ERROR("Query buffer too large.\n");
return -EINVAL;
}
@@ -833,12 +1180,12 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
if (dev_priv->pinned_bo) {
- vmw_bo_pin(dev_priv->pinned_bo, false);
- ttm_bo_unref(&dev_priv->pinned_bo);
+ vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+ vmw_dmabuf_unreference(&dev_priv->pinned_bo);
}
if (!sw_context->needs_post_query_barrier) {
- vmw_bo_pin(sw_context->cur_query_bo, true);
+ vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
/*
* We pin also the dummy_query_bo buffer so that we
@@ -846,14 +1193,17 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
* dummy queries in context destroy paths.
*/
- vmw_bo_pin(dev_priv->dummy_query_bo, true);
- dev_priv->dummy_query_bo_pinned = true;
+ if (!dev_priv->dummy_query_bo_pinned) {
+ vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
+ true);
+ dev_priv->dummy_query_bo_pinned = true;
+ }
BUG_ON(sw_context->last_query_ctx == NULL);
dev_priv->query_cid = sw_context->last_query_ctx->id;
dev_priv->query_cid_valid = true;
dev_priv->pinned_bo =
- ttm_bo_reference(sw_context->cur_query_bo);
+ vmw_dmabuf_reference(sw_context->cur_query_bo);
}
}
}
@@ -882,7 +1232,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_dma_buffer **vmw_bo_p)
{
struct vmw_dma_buffer *vmw_bo = NULL;
- struct ttm_buffer_object *bo;
uint32_t handle = *id;
struct vmw_relocation *reloc;
int ret;
@@ -893,7 +1242,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
ret = -EINVAL;
goto out_no_reloc;
}
- bo = &vmw_bo->base;
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
DRM_ERROR("Max number relocations per submission"
@@ -906,7 +1254,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
reloc->mob_loc = id;
reloc->location = NULL;
- ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
+ ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -944,7 +1292,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_dma_buffer **vmw_bo_p)
{
struct vmw_dma_buffer *vmw_bo = NULL;
- struct ttm_buffer_object *bo;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
@@ -955,7 +1302,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
ret = -EINVAL;
goto out_no_reloc;
}
- bo = &vmw_bo->base;
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
DRM_ERROR("Max number relocations per submission"
@@ -967,7 +1313,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
- ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
+ ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -980,6 +1326,98 @@ out_no_reloc:
return ret;
}
+
+
+/**
+ * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * This function adds the new query into the query COTABLE
+ */
+static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dx_define_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDefineQuery q;
+ } *cmd;
+
+ int ret;
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *cotable_res;
+
+
+ if (ctx_node == NULL) {
+ DRM_ERROR("DX Context not set for query.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
+
+ if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
+ cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
+ return -EINVAL;
+
+ cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
+ ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
+ vmw_resource_unreference(&cotable_res);
+
+ return ret;
+}
+
+
+
+/**
+ * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * The query bind operation will eventually associate the query ID
+ * with its backing MOB. In this function, we take the user mode
+ * MOB ID and use vmw_translate_mob_ptr() to translate it to its
+ * kernel mode equivalent.
+ */
+static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dx_bind_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindQuery q;
+ } *cmd;
+
+ struct vmw_dma_buffer *vmw_bo;
+ int ret;
+
+
+ cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
+
+ /*
+ * Look up the buffer pointed to by q.mobid, put it on the relocation
+ * list so its kernel mode MOB ID can be filled in later
+ */
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
+ &vmw_bo);
+
+ if (ret != 0)
+ return ret;
+
+ sw_context->dx_query_mob = vmw_bo;
+ sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
+
+ vmw_dmabuf_unreference(&vmw_bo);
+
+ return ret;
+}
+
+
+
/**
* vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
*
@@ -1074,7 +1512,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+ ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
vmw_dmabuf_unreference(&vmw_bo);
return ret;
@@ -1128,7 +1566,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+ ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
vmw_dmabuf_unreference(&vmw_bo);
return ret;
@@ -1363,6 +1801,12 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
continue;
+ if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
+ DRM_ERROR("Illegal texture/sampler unit %u.\n",
+ (unsigned) cur_state->stage);
+ return -EINVAL;
+ }
+
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cur_state->value, &res_node);
@@ -1370,14 +1814,14 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
return ret;
if (dev_priv->has_mob) {
- struct vmw_ctx_bindinfo bi;
-
- bi.ctx = ctx_node->res;
- bi.res = res_node ? res_node->res : NULL;
- bi.bt = vmw_ctx_binding_tex;
- bi.i1.texture_stage = cur_state->stage;
- vmw_context_binding_add(ctx_node->staged_bindings,
- &bi);
+ struct vmw_ctx_bindinfo_tex binding;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = res_node ? res_node->res : NULL;
+ binding.bi.bt = vmw_ctx_binding_tex;
+ binding.texture_stage = cur_state->stage;
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ 0, binding.texture_stage);
}
}
@@ -1407,6 +1851,47 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
return ret;
}
+
+/**
+ * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
+ * switching
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @val_node: The validation node representing the resource.
+ * @buf_id: Pointer to the user-space backup buffer handle in the command
+ * stream.
+ * @backup_offset: Offset of backup into MOB.
+ *
+ * This function prepares for registering a switch of backup buffers
+ * in the resource metadata just prior to unreserving. It's basically a wrapper
+ * around vmw_cmd_res_switch_backup with a different interface.
+ */
+static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ struct vmw_resource_val_node *val_node,
+ uint32_t *buf_id,
+ unsigned long backup_offset)
+{
+ struct vmw_dma_buffer *dma_buf;
+ int ret;
+
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
+ if (ret)
+ return ret;
+
+ val_node->switching_backup = true;
+ if (val_node->first_usage)
+ val_node->no_buffer_needed = true;
+
+ vmw_dmabuf_unreference(&val_node->new_backup);
+ val_node->new_backup = dma_buf;
+ val_node->new_backup_offset = backup_offset;
+
+ return 0;
+}
+
+
/**
* vmw_cmd_switch_backup - Utility function to handle backup buffer switching
*
@@ -1420,7 +1905,8 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
* @backup_offset: Offset of backup into MOB.
*
* This function prepares for registering a switch of backup buffers
- * in the resource metadata just prior to unreserving.
+ * in the resource metadata just prior to unreserving. It's basically a wrapper
+ * around vmw_cmd_res_switch_backup with a different interface.
*/
static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -1431,27 +1917,16 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
uint32_t *buf_id,
unsigned long backup_offset)
{
- int ret;
- struct vmw_dma_buffer *dma_buf;
struct vmw_resource_val_node *val_node;
+ int ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
converter, res_id, &val_node);
- if (unlikely(ret != 0))
- return ret;
-
- ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
- if (unlikely(ret != 0))
+ if (ret)
return ret;
- if (val_node->first_usage)
- val_node->no_buffer_needed = true;
-
- vmw_dmabuf_unreference(&val_node->new_backup);
- val_node->new_backup = dma_buf;
- val_node->new_backup_offset = backup_offset;
-
- return 0;
+ return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
+ buf_id, backup_offset);
}
/**
@@ -1703,10 +2178,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
if (unlikely(!dev_priv->has_mob))
return 0;
- ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
- cmd->body.shid,
- cmd->body.type,
- &sw_context->staged_cmd_res);
+ ret = vmw_shader_remove(vmw_context_res_man(val->res),
+ cmd->body.shid,
+ cmd->body.type,
+ &sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
@@ -1734,13 +2209,19 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdSetShader body;
} *cmd;
struct vmw_resource_val_node *ctx_node, *res_node = NULL;
- struct vmw_ctx_bindinfo bi;
+ struct vmw_ctx_bindinfo_shader binding;
struct vmw_resource *res = NULL;
int ret;
cmd = container_of(header, struct vmw_set_shader_cmd,
header);
+ if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
+ DRM_ERROR("Illegal shader type %u.\n",
+ (unsigned) cmd->body.type);
+ return -EINVAL;
+ }
+
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
&ctx_node);
@@ -1751,14 +2232,12 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
return 0;
if (cmd->body.shid != SVGA3D_INVALID_ID) {
- res = vmw_compat_shader_lookup
- (vmw_context_res_man(ctx_node->res),
- cmd->body.shid,
- cmd->body.type);
+ res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+ cmd->body.shid,
+ cmd->body.type);
if (!IS_ERR(res)) {
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
- vmw_res_shader,
&cmd->body.shid, res,
&res_node);
vmw_resource_unreference(&res);
@@ -1776,11 +2255,13 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
return ret;
}
- bi.ctx = ctx_node->res;
- bi.res = res_node ? res_node->res : NULL;
- bi.bt = vmw_ctx_binding_shader;
- bi.i1.shader_type = cmd->body.type;
- return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = res_node ? res_node->res : NULL;
+ binding.bi.bt = vmw_ctx_binding_shader;
+ binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ binding.shader_slot, 0);
+ return 0;
}
/**
@@ -1842,6 +2323,690 @@ static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
cmd->body.offsetInBytes);
}
+/**
+ * vmw_cmd_dx_set_single_constant_buffer - Validate an
+ * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int
+vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetSingleConstantBuffer body;
+ } *cmd;
+ struct vmw_resource_val_node *res_node = NULL;
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_bindinfo_cb binding;
+ int ret;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = res_node ? res_node->res : NULL;
+ binding.bi.bt = vmw_ctx_binding_cb;
+ binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+ binding.offset = cmd->body.offsetInBytes;
+ binding.size = cmd->body.sizeInBytes;
+ binding.slot = cmd->body.slot;
+
+ if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
+ binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
+ DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
+ (unsigned) cmd->body.type,
+ (unsigned) binding.slot);
+ return -EINVAL;
+ }
+
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ binding.shader_slot, binding.slot);
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_dx_set_shader_res - Validate an
+ * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetShaderResources body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
+ sizeof(SVGA3dShaderResourceViewId);
+
+ if ((u64) cmd->body.startView + (u64) num_sr_view >
+ (u64) SVGA3D_DX_MAX_SRVIEWS ||
+ cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+ DRM_ERROR("Invalid shader binding.\n");
+ return -EINVAL;
+ }
+
+ return vmw_view_bindings_add(sw_context, vmw_view_sr,
+ vmw_ctx_binding_sr,
+ cmd->body.type - SVGA3D_SHADERTYPE_MIN,
+ (void *) &cmd[1], num_sr_view,
+ cmd->body.startView);
+}
+
+/**
+ * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetShader body;
+ } *cmd;
+ struct vmw_resource *res = NULL;
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_bindinfo_shader binding;
+ int ret = 0;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, typeof(*cmd), header);
+
+ if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+ DRM_ERROR("Illegal shader type %u.\n",
+ (unsigned) cmd->body.type);
+ return -EINVAL;
+ }
+
+ if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
+ res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
+ if (IS_ERR(res)) {
+ DRM_ERROR("Could not find shader for binding.\n");
+ return PTR_ERR(res);
+ }
+
+ ret = vmw_resource_val_add(sw_context, res, NULL);
+ if (ret)
+ goto out_unref;
+ }
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = res;
+ binding.bi.bt = vmw_ctx_binding_dx_shader;
+ binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ binding.shader_slot, 0);
+out_unref:
+ if (res)
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
+/**
+ * vmw_cmd_dx_set_vertex_buffers - Validates an
+ * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_bindinfo_vb binding;
+ struct vmw_resource_val_node *res_node;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetVertexBuffers body;
+ SVGA3dVertexBuffer buf[];
+ } *cmd;
+ int i, ret, num;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, typeof(*cmd), header);
+ num = (cmd->header.size - sizeof(cmd->body)) /
+ sizeof(SVGA3dVertexBuffer);
+ if ((u64)num + (u64)cmd->body.startBuffer >
+ (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
+ DRM_ERROR("Invalid number of vertex buffers.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num; i++) {
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->buf[i].sid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.bt = vmw_ctx_binding_vb;
+ binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.offset = cmd->buf[i].offset;
+ binding.stride = cmd->buf[i].stride;
+ binding.slot = i + cmd->body.startBuffer;
+
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ 0, binding.slot);
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
+ * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_bindinfo_ib binding;
+ struct vmw_resource_val_node *res_node;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetIndexBuffer body;
+ } *cmd;
+ int ret;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.bi.bt = vmw_ctx_binding_ib;
+ binding.offset = cmd->body.offset;
+ binding.format = cmd->body.format;
+
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_dx_set_rendertarget - Validate an
+ * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetRenderTargets body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+ u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
+ sizeof(SVGA3dRenderTargetViewId);
+
+ if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
+ DRM_ERROR("Invalid DX Rendertarget binding.\n");
+ return -EINVAL;
+ }
+
+ ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
+ vmw_ctx_binding_ds, 0,
+ &cmd->body.depthStencilViewId, 1, 0);
+ if (ret)
+ return ret;
+
+ return vmw_view_bindings_add(sw_context, vmw_view_rt,
+ vmw_ctx_binding_dx_rt, 0,
+ (void *)&cmd[1], num_rt_view, 0);
+}
+
+/**
+ * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXClearRenderTargetView body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ return vmw_view_id_val_add(sw_context, vmw_view_rt,
+ cmd->body.renderTargetViewId);
+}
+
+/**
+ * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXClearDepthStencilView body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ return vmw_view_id_val_add(sw_context, vmw_view_ds,
+ cmd->body.depthStencilViewId);
+}
+
+static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource_val_node *srf_node;
+ struct vmw_resource *res;
+ enum vmw_view_type view_type;
+ int ret;
+ /*
+ * This is based on the fact that all affected define commands have
+ * the same initial command body layout.
+ */
+ struct {
+ SVGA3dCmdHeader header;
+ uint32 defined_id;
+ uint32 sid;
+ } *cmd;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ view_type = vmw_view_cmd_to_type(header->id);
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->sid, &srf_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
+ ret = vmw_cotable_notify(res, cmd->defined_id);
+ vmw_resource_unreference(&res);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return vmw_view_add(sw_context->man,
+ ctx_node->res,
+ srf_node->res,
+ view_type,
+ cmd->defined_id,
+ header,
+ header->size + sizeof(*header),
+ &sw_context->staged_cmd_res);
+}
+
+/**
+ * vmw_cmd_dx_set_so_targets - Validate an
+ * SVGA_3D_CMD_DX_SET_SOTARGETS command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_bindinfo_so binding;
+ struct vmw_resource_val_node *res_node;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetSOTargets body;
+ SVGA3dSoTarget targets[];
+ } *cmd;
+ int i, ret, num;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, typeof(*cmd), header);
+ num = (cmd->header.size - sizeof(cmd->body)) /
+ sizeof(SVGA3dSoTarget);
+
+ if (num > SVGA3D_DX_MAX_SOTARGETS) {
+ DRM_ERROR("Invalid DX SO binding.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num; i++) {
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->targets[i].sid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.bi.bt = vmw_ctx_binding_so,
+ binding.offset = cmd->targets[i].offset;
+ binding.size = cmd->targets[i].sizeInBytes;
+ binding.slot = i;
+
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ 0, binding.slot);
+ }
+
+ return 0;
+}
+
+static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ /*
+ * This is based on the fact that all affected define commands have
+ * the same initial command body layout.
+ */
+ struct {
+ SVGA3dCmdHeader header;
+ uint32 defined_id;
+ } *cmd;
+ enum vmw_so_type so_type;
+ int ret;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ so_type = vmw_so_cmd_to_type(header->id);
+ res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cotable_notify(res, cmd->defined_id);
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
+/**
+ * vmw_cmd_dx_check_subresource - Validate an
+ * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ union {
+ SVGA3dCmdDXReadbackSubResource r_body;
+ SVGA3dCmdDXInvalidateSubResource i_body;
+ SVGA3dCmdDXUpdateSubResource u_body;
+ SVGA3dSurfaceId sid;
+ };
+ } *cmd;
+
+ BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
+ offsetof(typeof(*cmd), sid));
+ BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
+ offsetof(typeof(*cmd), sid));
+ BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
+ offsetof(typeof(*cmd), sid));
+
+ cmd = container_of(header, typeof(*cmd), header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->sid, NULL);
+}
+
+static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_dx_view_remove - validate a view remove command and
+ * schedule the view resource for removal.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * Check that the view exists, and if it was not created using this
+ * command batch, make sure it's validated (present in the device) so that
+ * the remove command will not confuse the device.
+ */
+static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct {
+ SVGA3dCmdHeader header;
+ union vmw_view_destroy body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
+ struct vmw_resource *view;
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ ret = vmw_view_remove(sw_context->man,
+ cmd->body.view_id, view_type,
+ &sw_context->staged_cmd_res,
+ &view);
+ if (ret || !view)
+ return ret;
+
+ /*
+ * Add view to the validate list iff it was not created using this
+ * command batch.
+ */
+ return vmw_view_res_val_add(sw_context, view);
+}
+
+/**
+ * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDefineShader body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
+ ret = vmw_cotable_notify(res, cmd->body.shaderId);
+ vmw_resource_unreference(&res);
+ if (ret)
+ return ret;
+
+ return vmw_dx_shader_add(sw_context->man, ctx_node->res,
+ cmd->body.shaderId, cmd->body.type,
+ &sw_context->staged_cmd_res);
+}
+
+/**
+ * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDestroyShader body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
+ &sw_context->staged_cmd_res);
+ if (ret)
+ DRM_ERROR("Could not find shader to remove.\n");
+
+ return ret;
+}
+
+/**
+ * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node;
+ struct vmw_resource_val_node *res_node;
+ struct vmw_resource *res;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindShader body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (cmd->body.cid != SVGA3D_INVALID_ID) {
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter,
+ &cmd->body.cid, &ctx_node);
+ if (ret)
+ return ret;
+ } else {
+ ctx_node = sw_context->dx_ctx_node;
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+ }
+
+ res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+ cmd->body.shid, 0);
+ if (IS_ERR(res)) {
+ DRM_ERROR("Could not find shader to bind.\n");
+ return PTR_ERR(res);
+ }
+
+ ret = vmw_resource_val_add(sw_context, res, &res_node);
+ if (ret) {
+ DRM_ERROR("Error creating resource validation node.\n");
+ goto out_unref;
+ }
+
+
+ ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
+ &cmd->body.mobid,
+ cmd->body.offsetInBytes);
+out_unref:
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -1849,7 +3014,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
uint32_t size_remaining = *size;
uint32_t cmd_id;
- cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+ cmd_id = ((uint32_t *)buf)[0];
switch (cmd_id) {
case SVGA_CMD_UPDATE:
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
@@ -1980,7 +3145,7 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
+ VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
false, false, true),
@@ -2051,7 +3216,147 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
- true, false, true)
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
+ false, false, true),
+
+ /*
+ * DX commands
+ */
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
+ &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
+ &vmw_cmd_dx_set_shader_res, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
+ &vmw_cmd_dx_set_vertex_buffers, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
+ &vmw_cmd_dx_set_index_buffer, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
+ &vmw_cmd_dx_set_rendertargets, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
+ &vmw_cmd_ok, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
+ &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
+ &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
+ &vmw_cmd_dx_check_subresource, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
+ &vmw_cmd_dx_check_subresource, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
+ &vmw_cmd_dx_check_subresource, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
+ &vmw_cmd_dx_view_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
+ &vmw_cmd_dx_view_remove, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
+ &vmw_cmd_dx_view_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
+ &vmw_cmd_dx_view_remove, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
+ &vmw_cmd_dx_view_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+ &vmw_cmd_dx_view_remove, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
+ &vmw_cmd_dx_define_shader, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
+ &vmw_cmd_dx_destroy_shader, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
+ &vmw_cmd_dx_bind_shader, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
+ &vmw_cmd_dx_set_so_targets, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
+ &vmw_cmd_buffer_copy_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
+ &vmw_cmd_pred_copy_check, true, false, true),
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -2065,14 +3370,14 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
const struct vmw_cmd_entry *entry;
bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
- cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+ cmd_id = ((uint32_t *)buf)[0];
/* Handle any none 3D commands */
if (unlikely(cmd_id < SVGA_CMD_MAX))
return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
- cmd_id = le32_to_cpu(header->id);
- *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
+ cmd_id = header->id;
+ *size = header->size + sizeof(SVGA3dCmdHeader);
cmd_id -= SVGA_3D_CMD_BASE;
if (unlikely(*size > size_remaining))
@@ -2184,7 +3489,8 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
*
* @list: The resource list.
*/
-static void vmw_resource_list_unreference(struct list_head *list)
+static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
+ struct list_head *list)
{
struct vmw_resource_val_node *val, *val_next;
@@ -2195,8 +3501,15 @@ static void vmw_resource_list_unreference(struct list_head *list)
list_for_each_entry_safe(val, val_next, list, head) {
list_del_init(&val->head);
vmw_resource_unreference(&val->res);
- if (unlikely(val->staged_bindings))
- kfree(val->staged_bindings);
+
+ if (val->staged_bindings) {
+ if (val->staged_bindings != sw_context->staged_bindings)
+ vmw_binding_state_free(val->staged_bindings);
+ else
+ sw_context->staged_bindings_inuse = false;
+ val->staged_bindings = NULL;
+ }
+
kfree(val);
}
}
@@ -2222,24 +3535,21 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
}
-static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo,
- bool validate_as_mob)
+int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+ struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool validate_as_mob)
{
+ struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
+ base);
int ret;
-
- /*
- * Don't validate pinned buffers.
- */
-
- if (bo == dev_priv->pinned_bo ||
- (bo == dev_priv->dummy_query_bo &&
- dev_priv->dummy_query_bo_pinned))
+ if (vbo->pin_count > 0)
return 0;
if (validate_as_mob)
- return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
+ return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
+ false);
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
@@ -2248,7 +3558,8 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* used as a GMR, this will return -ENOMEM.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
+ false);
if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;
@@ -2257,8 +3568,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* previous contents.
*/
- DRM_INFO("Falling through to VRAM.\n");
- ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
return ret;
}
@@ -2270,6 +3580,7 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv,
list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
+ true,
entry->validate_as_mob);
if (unlikely(ret != 0))
return ret;
@@ -2417,7 +3728,164 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
}
}
+/**
+ * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
+ * the fifo.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @kernel_commands: Pointer to the unpatched command batch.
+ * @command_size: Size of the unpatched command batch.
+ * @sw_context: Structure holding the relocation lists.
+ *
+ * Side effects: If this function returns 0, then the command batch
+ * pointed to by @kernel_commands will have been modified.
+ */
+static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
+ void *kernel_commands,
+ u32 command_size,
+ struct vmw_sw_context *sw_context)
+{
+ void *cmd;
+ if (sw_context->dx_ctx_node)
+ cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
+ sw_context->dx_ctx_node->res->id);
+ else
+ cmd = vmw_fifo_reserve(dev_priv, command_size);
+ if (!cmd) {
+ DRM_ERROR("Failed reserving fifo space for commands.\n");
+ return -ENOMEM;
+ }
+
+ vmw_apply_relocations(sw_context);
+ memcpy(cmd, kernel_commands, command_size);
+ vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+ vmw_fifo_commit(dev_priv, command_size);
+
+ return 0;
+}
+
+/**
+ * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
+ * the command buffer manager.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @header: Opaque handle to the command buffer allocation.
+ * @command_size: Size of the unpatched command batch.
+ * @sw_context: Structure holding the relocation lists.
+ *
+ * Side effects: If this function returns 0, then the command buffer
+ * represented by @header will have been modified.
+ */
+static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
+ struct vmw_cmdbuf_header *header,
+ u32 command_size,
+ struct vmw_sw_context *sw_context)
+{
+ u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
+ SVGA3D_INVALID_ID);
+ void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
+ id, false, header);
+
+ vmw_apply_relocations(sw_context);
+ vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+ vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
+
+ return 0;
+}
+
+/**
+ * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
+ * submission using a command buffer.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @user_commands: User-space pointer to the commands to be submitted.
+ * @command_size: Size of the unpatched command batch.
+ * @header: Out parameter returning the opaque pointer to the command buffer.
+ *
+ * This function checks whether we can use the command buffer manager for
+ * submission and if so, creates a command buffer of suitable size and
+ * copies the user data into that buffer.
+ *
+ * On successful return, the function returns a pointer to the data in the
+ * command buffer and *@header is set to non-NULL.
+ * If command buffers could not be used, the function will return the value
+ * of @kernel_commands on function call. That value may be NULL. In that case,
+ * the value of *@header will be set to NULL.
+ * If an error is encountered, the function will return a pointer error value.
+ * If the function is interrupted by a signal while sleeping, it will return
+ * -ERESTARTSYS casted to a pointer error value.
+ */
+static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
+ void __user *user_commands,
+ void *kernel_commands,
+ u32 command_size,
+ struct vmw_cmdbuf_header **header)
+{
+ size_t cmdbuf_size;
+ int ret;
+
+ *header = NULL;
+ if (!dev_priv->cman || kernel_commands)
+ return kernel_commands;
+
+ if (command_size > SVGA_CB_MAX_SIZE) {
+ DRM_ERROR("Command buffer is too large.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* If possible, add a little space for fencing. */
+ cmdbuf_size = command_size + 512;
+ cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
+ kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
+ true, header);
+ if (IS_ERR(kernel_commands))
+ return kernel_commands;
+
+ ret = copy_from_user(kernel_commands, user_commands,
+ command_size);
+ if (ret) {
+ DRM_ERROR("Failed copying commands.\n");
+ vmw_cmdbuf_header_free(*header);
+ *header = NULL;
+ return ERR_PTR(-EFAULT);
+ }
+
+ return kernel_commands;
+}
+
+static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ uint32_t handle)
+{
+ struct vmw_resource_val_node *ctx_node;
+ struct vmw_resource *res;
+ int ret;
+
+ if (handle == SVGA3D_INVALID_ID)
+ return 0;
+
+ ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
+ handle, user_context_converter,
+ &res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not find or user DX context 0x%08x.\n",
+ (unsigned) handle);
+ return ret;
+ }
+
+ ret = vmw_resource_val_add(sw_context, res, &ctx_node);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ sw_context->dx_ctx_node = ctx_node;
+ sw_context->man = vmw_context_res_man(res);
+out_err:
+ vmw_resource_unreference(&res);
+ return ret;
+}
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
@@ -2425,6 +3893,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
void *kernel_commands,
uint32_t command_size,
uint64_t throttle_us,
+ uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj **out_fence)
{
@@ -2432,18 +3901,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_fence_obj *fence = NULL;
struct vmw_resource *error_resource;
struct list_head resource_list;
+ struct vmw_cmdbuf_header *header;
struct ww_acquire_ctx ticket;
uint32_t handle;
- void *cmd;
int ret;
+ if (throttle_us) {
+ ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
+ throttle_us);
+
+ if (ret)
+ return ret;
+ }
+
+ kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
+ kernel_commands, command_size,
+ &header);
+ if (IS_ERR(kernel_commands))
+ return PTR_ERR(kernel_commands);
+
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
- if (unlikely(ret != 0))
- return -ERESTARTSYS;
+ if (ret) {
+ ret = -ERESTARTSYS;
+ goto out_free_header;
+ }
+ sw_context->kernel = false;
if (kernel_commands == NULL) {
- sw_context->kernel = false;
-
ret = vmw_resize_cmd_bounce(sw_context, command_size);
if (unlikely(ret != 0))
goto out_unlock;
@@ -2458,19 +3942,26 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock;
}
kernel_commands = sw_context->cmd_bounce;
- } else
+ } else if (!header)
sw_context->kernel = true;
sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
INIT_LIST_HEAD(&sw_context->resource_list);
+ INIT_LIST_HEAD(&sw_context->ctx_resource_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
sw_context->last_query_ctx = NULL;
sw_context->needs_post_query_barrier = false;
+ sw_context->dx_ctx_node = NULL;
+ sw_context->dx_query_mob = NULL;
+ sw_context->dx_query_ctx = NULL;
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->validate_nodes);
INIT_LIST_HEAD(&sw_context->res_relocations);
+ if (sw_context->staged_bindings)
+ vmw_binding_state_reset(sw_context->staged_bindings);
+
if (!sw_context->res_ht_initialized) {
ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
if (unlikely(ret != 0))
@@ -2478,10 +3969,24 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->res_ht_initialized = true;
}
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
-
INIT_LIST_HEAD(&resource_list);
+ ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
+ if (unlikely(ret != 0)) {
+ list_splice_init(&sw_context->ctx_resource_list,
+ &sw_context->resource_list);
+ goto out_err_nores;
+ }
+
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
+ /*
+ * Merge the resource lists before checking the return status
+ * from vmd_cmd_check_all so that all the open hashtabs will
+ * be handled properly even if vmw_cmd_check_all fails.
+ */
+ list_splice_init(&sw_context->ctx_resource_list,
+ &sw_context->resource_list);
+
if (unlikely(ret != 0))
goto out_err_nores;
@@ -2502,14 +4007,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_err;
- if (throttle_us) {
- ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
- throttle_us);
-
- if (unlikely(ret != 0))
- goto out_err;
- }
-
ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
if (unlikely(ret != 0)) {
ret = -ERESTARTSYS;
@@ -2522,21 +4019,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock_binding;
}
- cmd = vmw_fifo_reserve(dev_priv, command_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving fifo space for commands.\n");
- ret = -ENOMEM;
- goto out_unlock_binding;
+ if (!header) {
+ ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
+ command_size, sw_context);
+ } else {
+ ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
+ sw_context);
+ header = NULL;
}
-
- vmw_apply_relocations(sw_context);
- memcpy(cmd, kernel_commands, command_size);
-
- vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
- vmw_resource_relocations_free(&sw_context->res_relocations);
-
- vmw_fifo_commit(dev_priv, command_size);
mutex_unlock(&dev_priv->binding_mutex);
+ if (ret)
+ goto out_err;
vmw_query_bo_switch_commit(dev_priv, sw_context);
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +4044,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
- vmw_resource_list_unreserve(&sw_context->resource_list, false);
+ vmw_resources_unreserve(sw_context, false);
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);
@@ -2580,7 +4073,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
- vmw_resource_list_unreference(&resource_list);
+ vmw_resource_list_unreference(sw_context, &resource_list);
return 0;
@@ -2589,7 +4082,7 @@ out_unlock_binding:
out_err:
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
out_err_nores:
- vmw_resource_list_unreserve(&sw_context->resource_list, true);
+ vmw_resources_unreserve(sw_context, true);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
vmw_clear_validations(sw_context);
@@ -2607,9 +4100,12 @@ out_unlock:
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
- vmw_resource_list_unreference(&resource_list);
+ vmw_resource_list_unreference(sw_context, &resource_list);
if (unlikely(error_resource != NULL))
vmw_resource_unreference(&error_resource);
+out_free_header:
+ if (header)
+ vmw_cmdbuf_header_free(header);
return ret;
}
@@ -2628,9 +4124,11 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
- vmw_bo_pin(dev_priv->pinned_bo, false);
- vmw_bo_pin(dev_priv->dummy_query_bo, false);
- dev_priv->dummy_query_bo_pinned = false;
+ vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+ if (dev_priv->dummy_query_bo_pinned) {
+ vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
+ dev_priv->dummy_query_bo_pinned = false;
+ }
}
@@ -2672,11 +4170,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
INIT_LIST_HEAD(&validate_list);
- pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
+ pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
pinned_val.shared = false;
list_add_tail(&pinned_val.head, &validate_list);
- query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
+ query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
query_val.shared = false;
list_add_tail(&query_val.head, &validate_list);
@@ -2697,10 +4195,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
dev_priv->query_cid_valid = false;
}
- vmw_bo_pin(dev_priv->pinned_bo, false);
- vmw_bo_pin(dev_priv->dummy_query_bo, false);
- dev_priv->dummy_query_bo_pinned = false;
-
+ vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+ if (dev_priv->dummy_query_bo_pinned) {
+ vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
+ dev_priv->dummy_query_bo_pinned = false;
+ }
if (fence == NULL) {
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
NULL);
@@ -2712,7 +4211,9 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
- ttm_bo_unref(&dev_priv->pinned_bo);
+ vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+ DRM_INFO("Dummy query bo pin count: %d\n",
+ dev_priv->dummy_query_bo->pin_count);
out_unlock:
return;
@@ -2722,7 +4223,7 @@ out_no_emit:
out_no_reserve:
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
- ttm_bo_unref(&dev_priv->pinned_bo);
+ vmw_dmabuf_unreference(&dev_priv->pinned_bo);
}
/**
@@ -2751,36 +4252,68 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
-
-int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
+ struct drm_file *file_priv, size_t size)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
+ struct drm_vmw_execbuf_arg arg;
int ret;
+ static const size_t copy_offset[] = {
+ offsetof(struct drm_vmw_execbuf_arg, context_handle),
+ sizeof(struct drm_vmw_execbuf_arg)};
+
+ if (unlikely(size < copy_offset[0])) {
+ DRM_ERROR("Invalid command size, ioctl %d\n",
+ DRM_VMW_EXECBUF);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
+ return -EFAULT;
/*
- * This will allow us to extend the ioctl argument while
+ * Extend the ioctl argument while
* maintaining backwards compatibility:
* We take different code paths depending on the value of
- * arg->version.
+ * arg.version.
*/
- if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
+ if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
+ arg.version == 0)) {
DRM_ERROR("Incorrect execbuf version.\n");
- DRM_ERROR("You're running outdated experimental "
- "vmwgfx user-space drivers.");
return -EINVAL;
}
+ if (arg.version > 1 &&
+ copy_from_user(&arg.context_handle,
+ (void __user *) (data + copy_offset[0]),
+ copy_offset[arg.version - 1] -
+ copy_offset[0]) != 0)
+ return -EFAULT;
+
+ switch (arg.version) {
+ case 1:
+ arg.context_handle = (uint32_t) -1;
+ break;
+ case 2:
+ if (arg.pad64 != 0) {
+ DRM_ERROR("Unused IOCTL data not set to zero.\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
ret = vmw_execbuf_process(file_priv, dev_priv,
- (void __user *)(unsigned long)arg->commands,
- NULL, arg->command_size, arg->throttle_us,
- (void __user *)(unsigned long)arg->fence_rep,
+ (void __user *)(unsigned long)arg.commands,
+ NULL, arg.command_size, arg.throttle_us,
+ arg.context_handle,
+ (void __user *)(unsigned long)arg.fence_rep,
NULL);
ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0))