blob: dd2ff441068eca222553471f740e4e49e3aa8055 [file] [log] [blame]
Dirk Hohndel (VMware)dff96882018-05-07 01:16:26 +02001// SPDX-License-Identifier: GPL-2.0 OR MIT
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002/**************************************************************************
3 *
Dirk Hohndel (VMware)dff96882018-05-07 01:16:26 +02004 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00005 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
Sinclair Yeh585851162017-07-05 01:45:40 -070027#include <linux/sync_file.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000028
29#include "vmwgfx_drv.h"
30#include "vmwgfx_reg.h"
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/ttm/ttm_bo_api.h>
32#include <drm/ttm/ttm_placement.h>
Thomas Hellstromd80efd52015-08-10 10:39:35 -070033#include "vmwgfx_so.h"
34#include "vmwgfx_binding.h"
Martin Krastev7a7a9332021-06-09 13:23:00 -040035#include "vmwgfx_mksstat.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000036
Thomas Hellstromc0951b72012-11-20 12:19:35 +000037#define VMW_RES_HT_ORDER 12
38
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020039/*
Deepak Rawat6f74fd92019-02-08 12:53:57 -080040 * Helper macro to get dx_ctx_node if available otherwise print an error
41 * message. This is for use in command verifier function where if dx_ctx_node
42 * is not set then command is invalid.
43 */
44#define VMW_GET_CTX_NODE(__sw_context) \
45({ \
46 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
Deepak Rawat5724f892019-02-11 11:46:27 -080047 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
Deepak Rawat6f74fd92019-02-08 12:53:57 -080048 __sw_context->dx_ctx_node; \
49 }); \
50})
51
Deepak Rawatd01316d2019-02-08 15:50:40 -080052#define VMW_DECLARE_CMD_VAR(__var, __type) \
53 struct { \
54 SVGA3dCmdHeader header; \
55 __type body; \
56 } __var
57
Deepak Rawat680360a2019-02-13 13:20:42 -080058/**
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020059 * struct vmw_relocation - Buffer object relocation
60 *
61 * @head: List head for the command submission context's relocation list
Thomas Hellstromcc1e3b72018-09-26 15:38:13 +020062 * @vbo: Non ref-counted pointer to buffer object
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020063 * @mob_loc: Pointer to location for mob id to be modified
64 * @location: Pointer to location for guest pointer to be modified
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020065 */
66struct vmw_relocation {
67 struct list_head head;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020068 struct vmw_buffer_object *vbo;
Thomas Hellstromcc1e3b72018-09-26 15:38:13 +020069 union {
70 SVGAMobId *mob_loc;
71 SVGAGuestPtr *location;
72 };
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020073};
74
Thomas Hellstromc0951b72012-11-20 12:19:35 +000075/**
Thomas Hellstroma1944032016-10-10 11:06:45 -070076 * enum vmw_resource_relocation_type - Relocation type for resources
77 *
78 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
79 * command stream is replaced with the actual id after validation.
80 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
81 * with a NOP.
Deepak Rawat680360a2019-02-13 13:20:42 -080082 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
83 * validation is -1, the command is replaced with a NOP. Otherwise no action.
Lee Jones7450bf72021-01-15 18:12:36 +000084 * @vmw_res_rel_max: Last value in the enum - used for error checking
85*/
Thomas Hellstroma1944032016-10-10 11:06:45 -070086enum vmw_resource_relocation_type {
87 vmw_res_rel_normal,
88 vmw_res_rel_nop,
89 vmw_res_rel_cond_nop,
90 vmw_res_rel_max
91};
92
93/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +000094 * struct vmw_resource_relocation - Relocation info for resources
95 *
96 * @head: List head for the software context's relocation list.
97 * @res: Non-ref-counted pointer to the resource.
Deepak Rawat680360a2019-02-13 13:20:42 -080098 * @offset: Offset of single byte entries into the command buffer where the id
99 * that needs fixup is located.
Thomas Hellstroma1944032016-10-10 11:06:45 -0700100 * @rel_type: Type of relocation.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000101 */
102struct vmw_resource_relocation {
103 struct list_head head;
104 const struct vmw_resource *res;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700105 u32 offset:29;
106 enum vmw_resource_relocation_type rel_type:3;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000107};
108
Deepak Rawat680360a2019-02-13 13:20:42 -0800109/**
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200110 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
Deepak Rawat680360a2019-02-13 13:20:42 -0800111 *
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200112 * @head: List head of context list
113 * @ctx: The context resource
114 * @cur: The context's persistent binding state
115 * @staged: The binding state changes of this command buffer
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000116 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200117struct vmw_ctx_validation_info {
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000118 struct list_head head;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200119 struct vmw_resource *ctx;
120 struct vmw_ctx_binding_state *cur;
121 struct vmw_ctx_binding_state *staged;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000122};
123
124/**
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100125 * struct vmw_cmd_entry - Describe a command for the verifier
126 *
Lee Jones7450bf72021-01-15 18:12:36 +0000127 * @func: Call-back to handle the command.
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100128 * @user_allow: Whether allowed from the execbuf ioctl.
129 * @gb_disable: Whether disabled if guest-backed objects are available.
130 * @gb_enable: Whether enabled iff guest-backed objects are available.
Lee Jones7450bf72021-01-15 18:12:36 +0000131 * @cmd_name: Name of the command.
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100132 */
133struct vmw_cmd_entry {
134 int (*func) (struct vmw_private *, struct vmw_sw_context *,
135 SVGA3dCmdHeader *);
136 bool user_allow;
137 bool gb_disable;
138 bool gb_enable;
Thomas Hellstrom65b97a22017-08-24 08:06:29 +0200139 const char *cmd_name;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100140};
141
142#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
143 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
Thomas Hellstrom65b97a22017-08-24 08:06:29 +0200144 (_gb_disable), (_gb_enable), #_cmd}
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100145
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700146static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
147 struct vmw_sw_context *sw_context,
148 struct vmw_resource *ctx);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700149static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
150 struct vmw_sw_context *sw_context,
151 SVGAMobId *id,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200152 struct vmw_buffer_object **vmw_bo_p);
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700153/**
154 * vmw_ptr_diff - Compute the offset from a to b in bytes
155 *
156 * @a: A starting pointer.
157 * @b: A pointer offset in the same address space.
158 *
159 * Returns: The offset in bytes between the two pointers.
160 */
161static size_t vmw_ptr_diff(void *a, void *b)
162{
163 return (unsigned long) b - (unsigned long) a;
164}
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700165
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100166/**
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200167 * vmw_execbuf_bindings_commit - Commit modified binding state
Deepak Rawat680360a2019-02-13 13:20:42 -0800168 *
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200169 * @sw_context: The command submission context
Deepak Rawat680360a2019-02-13 13:20:42 -0800170 * @backoff: Whether this is part of the error path and binding state changes
171 * should be ignored
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000172 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200173static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
174 bool backoff)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000175{
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200176 struct vmw_ctx_validation_info *entry;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700177
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200178 list_for_each_entry(entry, &sw_context->ctx_list, head) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200179 if (!backoff)
180 vmw_binding_state_commit(entry->cur, entry->staged);
Deepak Rawat680360a2019-02-13 13:20:42 -0800181
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200182 if (entry->staged != sw_context->staged_bindings)
183 vmw_binding_state_free(entry->staged);
184 else
185 sw_context->staged_bindings_inuse = false;
186 }
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200187
188 /* List entries are freed with the validation context */
189 INIT_LIST_HEAD(&sw_context->ctx_list);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200190}
191
192/**
193 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
Deepak Rawat680360a2019-02-13 13:20:42 -0800194 *
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200195 * @sw_context: The command submission context
196 */
197static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
198{
199 if (sw_context->dx_query_mob)
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700200 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
201 sw_context->dx_query_mob);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000202}
203
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700204/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800205 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
206 * the validate list.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700207 *
208 * @dev_priv: Pointer to the device private:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200209 * @sw_context: The command submission context
Lee Jones7450bf72021-01-15 18:12:36 +0000210 * @res: Pointer to the resource
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200211 * @node: The validation node holding the context resource metadata
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700212 */
213static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
214 struct vmw_sw_context *sw_context,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200215 struct vmw_resource *res,
216 struct vmw_ctx_validation_info *node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700217{
218 int ret;
219
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200220 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700221 if (unlikely(ret != 0))
222 goto out_err;
223
224 if (!sw_context->staged_bindings) {
Deepak Rawat680360a2019-02-13 13:20:42 -0800225 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700226 if (IS_ERR(sw_context->staged_bindings)) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700227 ret = PTR_ERR(sw_context->staged_bindings);
228 sw_context->staged_bindings = NULL;
229 goto out_err;
230 }
231 }
232
233 if (sw_context->staged_bindings_inuse) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200234 node->staged = vmw_binding_state_alloc(dev_priv);
235 if (IS_ERR(node->staged)) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200236 ret = PTR_ERR(node->staged);
237 node->staged = NULL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700238 goto out_err;
239 }
240 } else {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200241 node->staged = sw_context->staged_bindings;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700242 sw_context->staged_bindings_inuse = true;
243 }
244
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200245 node->ctx = res;
246 node->cur = vmw_context_binding_state(res);
247 list_add_tail(&node->head, &sw_context->ctx_list);
248
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700249 return 0;
Deepak Rawat680360a2019-02-13 13:20:42 -0800250
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700251out_err:
252 return ret;
253}
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000254
255/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800256 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
257 *
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200258 * @dev_priv: Pointer to the device private struct.
259 * @res_type: The resource type.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000260 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800261 * Guest-backed contexts and DX contexts require extra size to store execbuf
262 * private information in the validation node. Typically the binding manager
263 * associated data structures.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200264 *
265 * Returns: The extra size requirement based on resource type.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000266 */
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200267static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
268 enum vmw_res_type res_type)
269{
270 return (res_type == vmw_res_dx_context ||
271 (res_type == vmw_res_context && dev_priv->has_mob)) ?
272 sizeof(struct vmw_ctx_validation_info) : 0;
273}
274
275/**
276 * vmw_execbuf_rcache_update - Update a resource-node cache entry
277 *
278 * @rcache: Pointer to the entry to update.
279 * @res: Pointer to the resource.
Deepak Rawat680360a2019-02-13 13:20:42 -0800280 * @private: Pointer to the execbuf-private space in the resource validation
281 * node.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200282 */
283static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
284 struct vmw_resource *res,
285 void *private)
286{
287 rcache->res = res;
288 rcache->private = private;
289 rcache->valid = 1;
290 rcache->valid_handle = 0;
291}
292
293/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800294 * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
295 * rcu-protected pointer to the validation list.
296 *
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200297 * @sw_context: Pointer to the software context.
298 * @res: Unreferenced rcu-protected pointer to the resource.
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100299 * @dirty: Whether to change dirty status.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200300 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800301 * Returns: 0 on success. Negative error code on failure. Typical error codes
302 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200303 */
304static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100305 struct vmw_resource *res,
306 u32 dirty)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000307{
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700308 struct vmw_private *dev_priv = res->dev_priv;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000309 int ret;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200310 enum vmw_res_type res_type = vmw_res_type(res);
311 struct vmw_res_cache_entry *rcache;
312 struct vmw_ctx_validation_info *ctx_info;
313 bool first_usage;
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200314 unsigned int priv_size;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000315
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200316 rcache = &sw_context->res_cache[res_type];
317 if (likely(rcache->valid && rcache->res == res)) {
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100318 if (dirty)
319 vmw_validation_res_set_dirty(sw_context->ctx,
320 rcache->private, dirty);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200321 vmw_user_resource_noref_release();
322 return 0;
323 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000324
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200325 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200326 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100327 dirty, (void **)&ctx_info,
328 &first_usage);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200329 vmw_user_resource_noref_release();
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200330 if (ret)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000331 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000332
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200333 if (priv_size && first_usage) {
334 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
335 ctx_info);
Deepak Rawatb2898402019-02-11 14:59:57 -0800336 if (ret) {
337 VMW_DEBUG_USER("Failed first usage context setup.\n");
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200338 return ret;
Deepak Rawatb2898402019-02-11 14:59:57 -0800339 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700340 }
341
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200342 vmw_execbuf_rcache_update(rcache, res, ctx_info);
343 return 0;
344}
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700345
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200346/**
347 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
348 * validation list if it's not already on it
Deepak Rawat680360a2019-02-13 13:20:42 -0800349 *
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200350 * @sw_context: Pointer to the software context.
351 * @res: Pointer to the resource.
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100352 * @dirty: Whether to change dirty status.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200353 *
354 * Returns: Zero on success. Negative error code on failure.
355 */
356static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100357 struct vmw_resource *res,
358 u32 dirty)
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200359{
360 struct vmw_res_cache_entry *rcache;
361 enum vmw_res_type res_type = vmw_res_type(res);
362 void *ptr;
363 int ret;
364
365 rcache = &sw_context->res_cache[res_type];
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100366 if (likely(rcache->valid && rcache->res == res)) {
367 if (dirty)
368 vmw_validation_res_set_dirty(sw_context->ctx,
369 rcache->private, dirty);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200370 return 0;
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100371 }
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200372
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100373 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
374 &ptr, NULL);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200375 if (ret)
376 return ret;
377
378 vmw_execbuf_rcache_update(rcache, res, ptr);
379
380 return 0;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700381}
382
383/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800384 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
385 * validation list
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700386 *
387 * @sw_context: The software context holding the validation list.
388 * @view: Pointer to the view resource.
389 *
390 * Returns 0 if success, negative error code otherwise.
391 */
392static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
393 struct vmw_resource *view)
394{
395 int ret;
396
397 /*
Deepak Rawat680360a2019-02-13 13:20:42 -0800398 * First add the resource the view is pointing to, otherwise it may be
399 * swapped out when the view is validated.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700400 */
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100401 ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
402 vmw_view_dirtying(view));
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700403 if (ret)
404 return ret;
405
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100406 return vmw_execbuf_res_noctx_val_add(sw_context, view,
407 VMW_RES_DIRTY_NONE);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700408}
409
410/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800411 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
412 * to to the validation list.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700413 *
414 * @sw_context: The software context holding the validation list.
415 * @view_type: The view type to look up.
416 * @id: view id of the view.
417 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800418 * The view is represented by a view id and the DX context it's created on, or
419 * scheduled for creation on. If there is no DX context set, the function will
420 * return an -EINVAL error pointer.
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200421 *
422 * Returns: Unreferenced pointer to the resource on success, negative error
423 * pointer on failure.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700424 */
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200425static struct vmw_resource *
426vmw_view_id_val_add(struct vmw_sw_context *sw_context,
427 enum vmw_view_type view_type, u32 id)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700428{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200429 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700430 struct vmw_resource *view;
431 int ret;
432
Deepak Rawatb2898402019-02-11 14:59:57 -0800433 if (!ctx_node)
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200434 return ERR_PTR(-EINVAL);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700435
436 view = vmw_view_lookup(sw_context->man, view_type, id);
437 if (IS_ERR(view))
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200438 return view;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700439
440 ret = vmw_view_res_val_add(sw_context, view);
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200441 if (ret)
442 return ERR_PTR(ret);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700443
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200444 return view;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000445}
446
447/**
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100448 * vmw_resource_context_res_add - Put resources previously bound to a context on
449 * the validation list
450 *
451 * @dev_priv: Pointer to a device private structure
452 * @sw_context: Pointer to a software context used for this command submission
453 * @ctx: Pointer to the context resource
454 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800455 * This function puts all resources that were previously bound to @ctx on the
456 * resource validation list. This is part of the context state reemission
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100457 */
458static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
459 struct vmw_sw_context *sw_context,
460 struct vmw_resource *ctx)
461{
462 struct list_head *binding_list;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700463 struct vmw_ctx_bindinfo *entry;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100464 int ret = 0;
465 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700466 u32 i;
Deepak Rawat5e8ec0d2018-12-13 13:51:08 -0800467 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
468 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100469
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700470 /* Add all cotables to the validation list. */
Deepak Rawat878c6ec2018-12-13 11:44:42 -0800471 if (has_sm4_context(dev_priv) &&
472 vmw_res_type(ctx) == vmw_res_dx_context) {
Deepak Rawat5e8ec0d2018-12-13 13:51:08 -0800473 for (i = 0; i < cotable_max; ++i) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700474 res = vmw_context_cotable(ctx, i);
475 if (IS_ERR(res))
476 continue;
477
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100478 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
479 VMW_RES_DIRTY_SET);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700480 if (unlikely(ret != 0))
481 return ret;
482 }
483 }
484
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700485 /* Add all resources bound to the context to the validation list */
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100486 mutex_lock(&dev_priv->binding_mutex);
487 binding_list = vmw_context_binding_list(ctx);
488
489 list_for_each_entry(entry, binding_list, ctx_list) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700490 if (vmw_res_type(entry->res) == vmw_res_view)
491 ret = vmw_view_res_val_add(sw_context, entry->res);
492 else
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100493 ret = vmw_execbuf_res_noctx_val_add
494 (sw_context, entry->res,
495 vmw_binding_dirtying(entry->bt));
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100496 if (unlikely(ret != 0))
497 break;
498 }
499
Deepak Rawat878c6ec2018-12-13 11:44:42 -0800500 if (has_sm4_context(dev_priv) &&
501 vmw_res_type(ctx) == vmw_res_dx_context) {
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200502 struct vmw_buffer_object *dx_query_mob;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700503
504 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
505 if (dx_query_mob)
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200506 ret = vmw_validation_add_bo(sw_context->ctx,
507 dx_query_mob, true, false);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700508 }
509
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100510 mutex_unlock(&dev_priv->binding_mutex);
511 return ret;
512}
513
514/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000515 * vmw_resource_relocation_add - Add a relocation to the relocation list
516 *
Lee Jones7450bf72021-01-15 18:12:36 +0000517 * @sw_context: Pointer to the software context.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000518 * @res: The resource.
Deepak Rawat680360a2019-02-13 13:20:42 -0800519 * @offset: Offset into the command buffer currently being parsed where the id
520 * that needs fixup is located. Granularity is one byte.
Thomas Hellstroma1944032016-10-10 11:06:45 -0700521 * @rel_type: Relocation type.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000522 */
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200523static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000524 const struct vmw_resource *res,
Thomas Hellstroma1944032016-10-10 11:06:45 -0700525 unsigned long offset,
526 enum vmw_resource_relocation_type
527 rel_type)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000528{
529 struct vmw_resource_relocation *rel;
530
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200531 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
Ravikant B Sharma1a4adb02016-11-08 17:30:31 +0530532 if (unlikely(!rel)) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800533 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000534 return -ENOMEM;
535 }
536
537 rel->res = res;
538 rel->offset = offset;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700539 rel->rel_type = rel_type;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200540 list_add_tail(&rel->head, &sw_context->res_relocations);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000541
542 return 0;
543}
544
545/**
546 * vmw_resource_relocations_free - Free all relocations on a list
547 *
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200548 * @list: Pointer to the head of the relocation list
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000549 */
550static void vmw_resource_relocations_free(struct list_head *list)
551{
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200552 /* Memory is validation context memory, so no need to free it */
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200553 INIT_LIST_HEAD(list);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000554}
555
556/**
557 * vmw_resource_relocations_apply - Apply all relocations on a list
558 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800559 * @cb: Pointer to the start of the command buffer bein patch. This need not be
560 * the same buffer as the one being parsed when the relocation list was built,
561 * but the contents must be the same modulo the resource ids.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000562 * @list: Pointer to the head of the relocation list.
563 */
564static void vmw_resource_relocations_apply(uint32_t *cb,
565 struct list_head *list)
566{
567 struct vmw_resource_relocation *rel;
568
Thomas Hellstroma1944032016-10-10 11:06:45 -0700569 /* Validate the struct vmw_resource_relocation member size */
570 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
571 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
572
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100573 list_for_each_entry(rel, list, head) {
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700574 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
Thomas Hellstroma1944032016-10-10 11:06:45 -0700575 switch (rel->rel_type) {
576 case vmw_res_rel_normal:
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700577 *addr = rel->res->id;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700578 break;
579 case vmw_res_rel_nop:
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700580 *addr = SVGA_3D_CMD_NOP;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700581 break;
582 default:
583 if (rel->res->id == -1)
584 *addr = SVGA_3D_CMD_NOP;
585 break;
586 }
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100587 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000588}
589
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000590static int vmw_cmd_invalid(struct vmw_private *dev_priv,
591 struct vmw_sw_context *sw_context,
592 SVGA3dCmdHeader *header)
593{
Sinclair Yehfcfffdd2017-07-17 23:28:36 -0700594 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000595}
596
597static int vmw_cmd_ok(struct vmw_private *dev_priv,
598 struct vmw_sw_context *sw_context,
599 SVGA3dCmdHeader *header)
600{
601 return 0;
602}
603
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200604/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800605 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
606 * list.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000607 *
608 * @sw_context: Pointer to the software context.
609 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800610 * Note that since vmware's command submission currently is protected by the
611 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
612 * only a single thread at once will attempt this.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000613 */
614static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
615{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200616 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000617
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200618 ret = vmw_validation_res_reserve(sw_context->ctx, true);
619 if (ret)
620 return ret;
Charmaine Lee2f633e52015-08-10 10:45:11 -0700621
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700622 if (sw_context->dx_query_mob) {
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200623 struct vmw_buffer_object *expected_dx_query_mob;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700624
625 expected_dx_query_mob =
626 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
627 if (expected_dx_query_mob &&
628 expected_dx_query_mob != sw_context->dx_query_mob) {
629 ret = -EINVAL;
630 }
631 }
632
633 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000634}
635
636/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800637 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
638 * resource validate list unless it's already there.
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100639 *
640 * @dev_priv: Pointer to a device private structure.
641 * @sw_context: Pointer to the software context.
642 * @res_type: Resource type.
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100643 * @dirty: Whether to change dirty status.
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100644 * @converter: User-space visisble type specific information.
Deepak Rawat680360a2019-02-13 13:20:42 -0800645 * @id_loc: Pointer to the location in the command buffer currently being parsed
646 * from where the user-space resource id handle is located.
Lee Jones7450bf72021-01-15 18:12:36 +0000647 * @p_res: Pointer to pointer to resource validalidation node. Populated on
Deepak Rawat680360a2019-02-13 13:20:42 -0800648 * exit.
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100649 */
650static int
651vmw_cmd_res_check(struct vmw_private *dev_priv,
652 struct vmw_sw_context *sw_context,
653 enum vmw_res_type res_type,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100654 u32 dirty,
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100655 const struct vmw_user_resource_conv *converter,
656 uint32_t *id_loc,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200657 struct vmw_resource **p_res)
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100658{
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200659 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200660 struct vmw_resource *res;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200661 int ret;
662
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200663 if (p_res)
664 *p_res = NULL;
665
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200666 if (*id_loc == SVGA3D_INVALID_ID) {
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200667 if (res_type == vmw_res_context) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800668 VMW_DEBUG_USER("Illegal context invalid id.\n");
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200669 return -EINVAL;
670 }
671 return 0;
672 }
673
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200674 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200675 res = rcache->res;
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100676 if (dirty)
677 vmw_validation_res_set_dirty(sw_context->ctx,
678 rcache->private, dirty);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200679 } else {
680 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200681
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200682 ret = vmw_validation_preload_res(sw_context->ctx, size);
683 if (ret)
684 return ret;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200685
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200686 res = vmw_user_resource_noref_lookup_handle
687 (dev_priv, sw_context->fp->tfile, *id_loc, converter);
Chengguang Xu4efa6662019-03-01 10:14:06 -0800688 if (IS_ERR(res)) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800689 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
690 (unsigned int) *id_loc);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200691 return PTR_ERR(res);
692 }
693
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100694 ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200695 if (unlikely(ret != 0))
696 return ret;
697
698 if (rcache->valid && rcache->res == res) {
699 rcache->valid_handle = true;
700 rcache->handle = *id_loc;
701 }
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200702 }
703
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200704 ret = vmw_resource_relocation_add(sw_context, res,
705 vmw_ptr_diff(sw_context->buf_start,
706 id_loc),
707 vmw_res_rel_normal);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200708 if (p_res)
709 *p_res = res;
710
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200711 return 0;
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100712}
713
714/**
Zack Rusin2cd80db2021-05-05 15:10:07 -0400715 * vmw_rebind_all_dx_query - Rebind DX query associated with the context
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700716 *
717 * @ctx_res: context the query belongs to
718 *
719 * This function assumes binding_mutex is held.
720 */
721static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
722{
723 struct vmw_private *dev_priv = ctx_res->dev_priv;
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200724 struct vmw_buffer_object *dx_query_mob;
Deepak Rawatd01316d2019-02-08 15:50:40 -0800725 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700726
727 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
728
729 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
730 return 0;
731
Zack Rusin8426ed92020-11-18 12:54:19 -0500732 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
Deepak Rawatb2898402019-02-11 14:59:57 -0800733 if (cmd == NULL)
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700734 return -ENOMEM;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700735
736 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
737 cmd->header.size = sizeof(cmd->body);
738 cmd->body.cid = ctx_res->id;
Christian Königd3116752021-04-12 15:11:47 +0200739 cmd->body.mobid = dx_query_mob->base.resource->start;
Zack Rusin8426ed92020-11-18 12:54:19 -0500740 vmw_cmd_commit(dev_priv, sizeof(*cmd));
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700741
742 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
743
744 return 0;
745}
746
747/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800748 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
749 * contexts.
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100750 *
751 * @sw_context: Pointer to the software context.
752 *
753 * Rebind context binding points that have been scrubbed because of eviction.
754 */
755static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
756{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200757 struct vmw_ctx_validation_info *val;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100758 int ret;
759
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200760 list_for_each_entry(val, &sw_context->ctx_list, head) {
761 ret = vmw_binding_rebind_all(val->cur);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100762 if (unlikely(ret != 0)) {
763 if (ret != -ERESTARTSYS)
Deepak Rawat5724f892019-02-11 11:46:27 -0800764 VMW_DEBUG_USER("Failed to rebind context.\n");
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100765 return ret;
766 }
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700767
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200768 ret = vmw_rebind_all_dx_query(val->ctx);
Deepak Rawatb2898402019-02-11 14:59:57 -0800769 if (ret != 0) {
770 VMW_DEBUG_USER("Failed to rebind queries.\n");
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700771 return ret;
Deepak Rawatb2898402019-02-11 14:59:57 -0800772 }
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100773 }
774
775 return 0;
776}
777
778/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800779 * vmw_view_bindings_add - Add an array of view bindings to a context binding
780 * state tracker.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700781 *
782 * @sw_context: The execbuf state used for this command.
783 * @view_type: View type for the bindings.
784 * @binding_type: Binding type for the bindings.
785 * @shader_slot: The shader slot to user for the bindings.
786 * @view_ids: Array of view ids to be bound.
787 * @num_views: Number of view ids in @view_ids.
788 * @first_slot: The binding slot to be used for the first view id in @view_ids.
789 */
790static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
791 enum vmw_view_type view_type,
792 enum vmw_ctx_binding_type binding_type,
793 uint32 shader_slot,
794 uint32 view_ids[], u32 num_views,
795 u32 first_slot)
796{
Deepak Rawat6f74fd92019-02-08 12:53:57 -0800797 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700798 u32 i;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700799
Deepak Rawat6f74fd92019-02-08 12:53:57 -0800800 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700801 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700802
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700803 for (i = 0; i < num_views; ++i) {
804 struct vmw_ctx_bindinfo_view binding;
805 struct vmw_resource *view = NULL;
806
807 if (view_ids[i] != SVGA3D_INVALID_ID) {
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200808 view = vmw_view_id_val_add(sw_context, view_type,
809 view_ids[i]);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700810 if (IS_ERR(view)) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800811 VMW_DEBUG_USER("View not found.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700812 return PTR_ERR(view);
813 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700814 }
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200815 binding.bi.ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700816 binding.bi.res = view;
817 binding.bi.bt = binding_type;
818 binding.shader_slot = shader_slot;
819 binding.slot = first_slot + i;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200820 vmw_binding_add(ctx_node->staged, &binding.bi,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700821 shader_slot, binding.slot);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700822 }
823
824 return 0;
825}
826
827/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000828 * vmw_cmd_cid_check - Check a command header for valid context information.
829 *
830 * @dev_priv: Pointer to a device private structure.
831 * @sw_context: Pointer to the software context.
832 * @header: A command header with an embedded user-space context handle.
833 *
834 * Convenience function: Call vmw_cmd_res_check with the user-space context
835 * handle embedded in @header.
836 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000837static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
838 struct vmw_sw_context *sw_context,
839 SVGA3dCmdHeader *header)
840{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800841 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
842 container_of(header, typeof(*cmd), header);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000843
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000844 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100845 VMW_RES_DIRTY_SET, user_context_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -0800846 &cmd->body, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000847}
848
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200849/**
850 * vmw_execbuf_info_from_res - Get the private validation metadata for a
851 * recently validated resource
Deepak Rawat680360a2019-02-13 13:20:42 -0800852 *
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200853 * @sw_context: Pointer to the command submission context
854 * @res: The resource
855 *
856 * The resource pointed to by @res needs to be present in the command submission
857 * context's resource cache and hence the last resource of that type to be
858 * processed by the validation code.
859 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800860 * Return: a pointer to the private metadata of the resource, or NULL if it
861 * wasn't found
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200862 */
863static struct vmw_ctx_validation_info *
864vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
865 struct vmw_resource *res)
866{
867 struct vmw_res_cache_entry *rcache =
868 &sw_context->res_cache[vmw_res_type(res)];
869
870 if (rcache->valid && rcache->res == res)
871 return rcache->private;
872
873 WARN_ON_ONCE(true);
874 return NULL;
875}
876
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000877static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
878 struct vmw_sw_context *sw_context,
879 SVGA3dCmdHeader *header)
880{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800881 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200882 struct vmw_resource *ctx;
883 struct vmw_resource *res;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000884 int ret;
885
Deepak Rawatd01316d2019-02-08 15:50:40 -0800886 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700887
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700888 if (cmd->body.type >= SVGA3D_RT_MAX) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800889 VMW_DEBUG_USER("Illegal render target type %u.\n",
890 (unsigned int) cmd->body.type);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700891 return -EINVAL;
892 }
893
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700894 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100895 VMW_RES_DIRTY_SET, user_context_converter,
896 &cmd->body.cid, &ctx);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000897 if (unlikely(ret != 0))
898 return ret;
899
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000900 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100901 VMW_RES_DIRTY_SET, user_surface_converter,
902 &cmd->body.target.sid, &res);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200903 if (unlikely(ret))
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700904 return ret;
905
906 if (dev_priv->has_mob) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700907 struct vmw_ctx_bindinfo_view binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200908 struct vmw_ctx_validation_info *node;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700909
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200910 node = vmw_execbuf_info_from_res(sw_context, ctx);
911 if (!node)
912 return -EINVAL;
913
914 binding.bi.ctx = ctx;
915 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700916 binding.bi.bt = vmw_ctx_binding_rt;
917 binding.slot = cmd->body.type;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200918 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700919 }
920
921 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000922}
923
924static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
925 struct vmw_sw_context *sw_context,
926 SVGA3dCmdHeader *header)
927{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800928 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000929 int ret;
930
Deepak Rawatd01316d2019-02-08 15:50:40 -0800931 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromc9146cd2015-03-02 23:45:04 -0800932
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700933 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100934 VMW_RES_DIRTY_NONE, user_surface_converter,
935 &cmd->body.src.sid, NULL);
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700936 if (ret)
937 return ret;
Thomas Hellstromc9146cd2015-03-02 23:45:04 -0800938
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000939 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100940 VMW_RES_DIRTY_SET, user_surface_converter,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000941 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000942}
943
Neha Bhende0fca749e2015-08-10 10:51:07 -0700944static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -0800945 struct vmw_sw_context *sw_context,
946 SVGA3dCmdHeader *header)
Neha Bhende0fca749e2015-08-10 10:51:07 -0700947{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800948 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
Neha Bhende0fca749e2015-08-10 10:51:07 -0700949 int ret;
950
951 cmd = container_of(header, typeof(*cmd), header);
952 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100953 VMW_RES_DIRTY_NONE, user_surface_converter,
Neha Bhende0fca749e2015-08-10 10:51:07 -0700954 &cmd->body.src, NULL);
955 if (ret != 0)
956 return ret;
957
958 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100959 VMW_RES_DIRTY_SET, user_surface_converter,
Neha Bhende0fca749e2015-08-10 10:51:07 -0700960 &cmd->body.dest, NULL);
961}
962
963static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
964 struct vmw_sw_context *sw_context,
965 SVGA3dCmdHeader *header)
966{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800967 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
Neha Bhende0fca749e2015-08-10 10:51:07 -0700968 int ret;
969
970 cmd = container_of(header, typeof(*cmd), header);
971 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100972 VMW_RES_DIRTY_NONE, user_surface_converter,
Neha Bhende0fca749e2015-08-10 10:51:07 -0700973 &cmd->body.srcSid, NULL);
974 if (ret != 0)
975 return ret;
976
977 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100978 VMW_RES_DIRTY_SET, user_surface_converter,
Neha Bhende0fca749e2015-08-10 10:51:07 -0700979 &cmd->body.dstSid, NULL);
980}
981
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000982static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
983 struct vmw_sw_context *sw_context,
984 SVGA3dCmdHeader *header)
985{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800986 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000987 int ret;
988
Deepak Rawatd01316d2019-02-08 15:50:40 -0800989 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000990 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100991 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000992 &cmd->body.src.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000993 if (unlikely(ret != 0))
994 return ret;
Deepak Rawat680360a2019-02-13 13:20:42 -0800995
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000996 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100997 VMW_RES_DIRTY_SET, user_surface_converter,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000998 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000999}
1000
1001static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1002 struct vmw_sw_context *sw_context,
1003 SVGA3dCmdHeader *header)
1004{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001005 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
1006 container_of(header, typeof(*cmd), header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02001007
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001008 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001009 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001010 &cmd->body.srcImage.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001011}
1012
1013static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1014 struct vmw_sw_context *sw_context,
1015 SVGA3dCmdHeader *header)
1016{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001017 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1018 container_of(header, typeof(*cmd), header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02001019
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001020 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001021 VMW_RES_DIRTY_NONE, user_surface_converter,
1022 &cmd->body.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001023}
1024
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001025/**
1026 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1027 *
1028 * @dev_priv: The device private structure.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001029 * @new_query_bo: The new buffer holding query results.
1030 * @sw_context: The software context used for this command submission.
1031 *
Deepak Rawat680360a2019-02-13 13:20:42 -08001032 * This function checks whether @new_query_bo is suitable for holding query
1033 * results, and if another buffer currently is pinned for query results. If so,
1034 * the function prepares the state of @sw_context for switching pinned buffers
1035 * after successful submission of the current command batch.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001036 */
1037static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001038 struct vmw_buffer_object *new_query_bo,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001039 struct vmw_sw_context *sw_context)
1040{
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001041 struct vmw_res_cache_entry *ctx_entry =
1042 &sw_context->res_cache[vmw_res_context];
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001043 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001044
1045 BUG_ON(!ctx_entry->valid);
1046 sw_context->last_query_ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001047
1048 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1049
Christian Königd3116752021-04-12 15:11:47 +02001050 if (unlikely(new_query_bo->base.resource->num_pages > 4)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001051 VMW_DEBUG_USER("Query buffer too large.\n");
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001052 return -EINVAL;
1053 }
1054
1055 if (unlikely(sw_context->cur_query_bo != NULL)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001056 sw_context->needs_post_query_barrier = true;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001057 ret = vmw_validation_add_bo(sw_context->ctx,
1058 sw_context->cur_query_bo,
1059 dev_priv->has_mob, false);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001060 if (unlikely(ret != 0))
1061 return ret;
1062 }
1063 sw_context->cur_query_bo = new_query_bo;
1064
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001065 ret = vmw_validation_add_bo(sw_context->ctx,
1066 dev_priv->dummy_query_bo,
1067 dev_priv->has_mob, false);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001068 if (unlikely(ret != 0))
1069 return ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001070 }
1071
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001072 return 0;
1073}
1074
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001075/**
1076 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1077 *
1078 * @dev_priv: The device private structure.
1079 * @sw_context: The software context used for this command submission batch.
1080 *
1081 * This function will check if we're switching query buffers, and will then,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001082 * issue a dummy occlusion query wait used as a query barrier. When the fence
Deepak Rawat680360a2019-02-13 13:20:42 -08001083 * object following that query wait has signaled, we are sure that all preceding
1084 * queries have finished, and the old query buffer can be unpinned. However,
1085 * since both the new query buffer and the old one are fenced with that fence,
1086 * we can do an asynchronus unpin now, and be sure that the old query buffer
1087 * won't be moved until the fence has signaled.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001088 *
1089 * As mentioned above, both the new - and old query buffers need to be fenced
1090 * using a sequence emitted *after* calling this function.
1091 */
1092static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1093 struct vmw_sw_context *sw_context)
1094{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001095 /*
1096 * The validate list should still hold references to all
1097 * contexts here.
1098 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001099 if (sw_context->needs_post_query_barrier) {
1100 struct vmw_res_cache_entry *ctx_entry =
1101 &sw_context->res_cache[vmw_res_context];
1102 struct vmw_resource *ctx;
1103 int ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001104
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001105 BUG_ON(!ctx_entry->valid);
1106 ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001107
Zack Rusin8426ed92020-11-18 12:54:19 -05001108 ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001109
1110 if (unlikely(ret != 0))
Deepak Rawat5724f892019-02-11 11:46:27 -08001111 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001112 }
1113
1114 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1115 if (dev_priv->pinned_bo) {
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001116 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001117 vmw_bo_unreference(&dev_priv->pinned_bo);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001118 }
1119
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001120 if (!sw_context->needs_post_query_barrier) {
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001121 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001122
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001123 /*
1124 * We pin also the dummy_query_bo buffer so that we
Deepak Rawat680360a2019-02-13 13:20:42 -08001125 * don't need to validate it when emitting dummy queries
1126 * in context destroy paths.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001127 */
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001128 if (!dev_priv->dummy_query_bo_pinned) {
1129 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1130 true);
1131 dev_priv->dummy_query_bo_pinned = true;
1132 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001133
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001134 BUG_ON(sw_context->last_query_ctx == NULL);
1135 dev_priv->query_cid = sw_context->last_query_ctx->id;
1136 dev_priv->query_cid_valid = true;
1137 dev_priv->pinned_bo =
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001138 vmw_bo_reference(sw_context->cur_query_bo);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001139 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001140 }
1141}
1142
1143/**
Zack Rusin2cd80db2021-05-05 15:10:07 -04001144 * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
Deepak Rawat680360a2019-02-13 13:20:42 -08001145 * to a MOB id.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001146 *
1147 * @dev_priv: Pointer to a device private structure.
1148 * @sw_context: The software context used for this command batch validation.
1149 * @id: Pointer to the user-space handle to be translated.
Deepak Rawat680360a2019-02-13 13:20:42 -08001150 * @vmw_bo_p: Points to a location that, on successful return will carry a
1151 * non-reference-counted pointer to the buffer object identified by the
Thomas Hellstromddcda242012-11-21 11:26:55 +01001152 * user-space handle in @id.
1153 *
1154 * This function saves information needed to translate a user-space buffer
1155 * handle to a MOB id. The translation does not take place immediately, but
Deepak Rawat680360a2019-02-13 13:20:42 -08001156 * during a call to vmw_apply_relocations().
1157 *
1158 * This function builds a relocation list and a list of buffers to validate. The
1159 * former needs to be freed using either vmw_apply_relocations() or
1160 * vmw_free_relocations(). The latter needs to be freed using
1161 * vmw_clear_validations.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001162 */
1163static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1164 struct vmw_sw_context *sw_context,
1165 SVGAMobId *id,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001166 struct vmw_buffer_object **vmw_bo_p)
Thomas Hellstromddcda242012-11-21 11:26:55 +01001167{
Thomas Hellstromb139d432018-09-26 16:27:54 +02001168 struct vmw_buffer_object *vmw_bo;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001169 uint32_t handle = *id;
1170 struct vmw_relocation *reloc;
1171 int ret;
1172
Thomas Hellstromb139d432018-09-26 16:27:54 +02001173 vmw_validation_preload_bo(sw_context->ctx);
Zack Rusin8afa13a2021-12-06 12:26:12 -05001174 vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
1175 if (IS_ERR_OR_NULL(vmw_bo)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001176 VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
Thomas Hellstromb139d432018-09-26 16:27:54 +02001177 return PTR_ERR(vmw_bo);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001178 }
Thomas Hellstromb139d432018-09-26 16:27:54 +02001179 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
Zack Rusin8afa13a2021-12-06 12:26:12 -05001180 ttm_bo_put(&vmw_bo->base);
Thomas Hellstromb139d432018-09-26 16:27:54 +02001181 if (unlikely(ret != 0))
1182 return ret;
1183
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001184 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1185 if (!reloc)
Thomas Hellstromb139d432018-09-26 16:27:54 +02001186 return -ENOMEM;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001187
Thomas Hellstromddcda242012-11-21 11:26:55 +01001188 reloc->mob_loc = id;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001189 reloc->vbo = vmw_bo;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001190
Thomas Hellstromddcda242012-11-21 11:26:55 +01001191 *vmw_bo_p = vmw_bo;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001192 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1193
Thomas Hellstromddcda242012-11-21 11:26:55 +01001194 return 0;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001195}
1196
1197/**
Zack Rusin2cd80db2021-05-05 15:10:07 -04001198 * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
Deepak Rawat680360a2019-02-13 13:20:42 -08001199 * to a valid SVGAGuestPtr
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001200 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001201 * @dev_priv: Pointer to a device private structure.
1202 * @sw_context: The software context used for this command batch validation.
1203 * @ptr: Pointer to the user-space handle to be translated.
Deepak Rawat680360a2019-02-13 13:20:42 -08001204 * @vmw_bo_p: Points to a location that, on successful return will carry a
1205 * non-reference-counted pointer to the DMA buffer identified by the user-space
1206 * handle in @id.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001207 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001208 * This function saves information needed to translate a user-space buffer
1209 * handle to a valid SVGAGuestPtr. The translation does not take place
1210 * immediately, but during a call to vmw_apply_relocations().
Deepak Rawat680360a2019-02-13 13:20:42 -08001211 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001212 * This function builds a relocation list and a list of buffers to validate.
1213 * The former needs to be freed using either vmw_apply_relocations() or
1214 * vmw_free_relocations(). The latter needs to be freed using
1215 * vmw_clear_validations.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001216 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001217static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1218 struct vmw_sw_context *sw_context,
1219 SVGAGuestPtr *ptr,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001220 struct vmw_buffer_object **vmw_bo_p)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001221{
Thomas Hellstromb139d432018-09-26 16:27:54 +02001222 struct vmw_buffer_object *vmw_bo;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001223 uint32_t handle = ptr->gmrId;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001224 struct vmw_relocation *reloc;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001225 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001226
Thomas Hellstromb139d432018-09-26 16:27:54 +02001227 vmw_validation_preload_bo(sw_context->ctx);
Zack Rusin8afa13a2021-12-06 12:26:12 -05001228 vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
1229 if (IS_ERR_OR_NULL(vmw_bo)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001230 VMW_DEBUG_USER("Could not find or use GMR region.\n");
Thomas Hellstromb139d432018-09-26 16:27:54 +02001231 return PTR_ERR(vmw_bo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001232 }
Thomas Hellstromb139d432018-09-26 16:27:54 +02001233 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
Zack Rusin8afa13a2021-12-06 12:26:12 -05001234 ttm_bo_put(&vmw_bo->base);
Thomas Hellstromb139d432018-09-26 16:27:54 +02001235 if (unlikely(ret != 0))
1236 return ret;
1237
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001238 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1239 if (!reloc)
Thomas Hellstromb139d432018-09-26 16:27:54 +02001240 return -ENOMEM;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001241
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001242 reloc->location = ptr;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001243 reloc->vbo = vmw_bo;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001244 *vmw_bo_p = vmw_bo;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001245 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1246
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001247 return 0;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001248}
1249
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001250/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001251 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001252 *
1253 * @dev_priv: Pointer to a device private struct.
1254 * @sw_context: The software context used for this command submission.
1255 * @header: Pointer to the command header in the command stream.
1256 *
1257 * This function adds the new query into the query COTABLE
1258 */
1259static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1260 struct vmw_sw_context *sw_context,
1261 SVGA3dCmdHeader *header)
1262{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001263 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
Deepak Rawat6f74fd92019-02-08 12:53:57 -08001264 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001265 struct vmw_resource *cotable_res;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001266 int ret;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001267
Deepak Rawat6f74fd92019-02-08 12:53:57 -08001268 if (!ctx_node)
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001269 return -EINVAL;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001270
Deepak Rawatd01316d2019-02-08 15:50:40 -08001271 cmd = container_of(header, typeof(*cmd), header);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001272
Deepak Rawatd01316d2019-02-08 15:50:40 -08001273 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
1274 cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001275 return -EINVAL;
1276
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001277 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
Deepak Rawatd01316d2019-02-08 15:50:40 -08001278 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001279
1280 return ret;
1281}
1282
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001283/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001284 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001285 *
1286 * @dev_priv: Pointer to a device private struct.
1287 * @sw_context: The software context used for this command submission.
1288 * @header: Pointer to the command header in the command stream.
1289 *
Deepak Rawat680360a2019-02-13 13:20:42 -08001290 * The query bind operation will eventually associate the query ID with its
1291 * backing MOB. In this function, we take the user mode MOB ID and use
1292 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001293 */
1294static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1295 struct vmw_sw_context *sw_context,
1296 SVGA3dCmdHeader *header)
1297{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001298 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001299 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001300 int ret;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001301
Deepak Rawatd01316d2019-02-08 15:50:40 -08001302 cmd = container_of(header, typeof(*cmd), header);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001303
1304 /*
1305 * Look up the buffer pointed to by q.mobid, put it on the relocation
1306 * list so its kernel mode MOB ID can be filled in later
1307 */
Deepak Rawatd01316d2019-02-08 15:50:40 -08001308 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001309 &vmw_bo);
1310
1311 if (ret != 0)
1312 return ret;
1313
1314 sw_context->dx_query_mob = vmw_bo;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001315 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
Thomas Hellstromb139d432018-09-26 16:27:54 +02001316 return 0;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001317}
1318
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001319/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001320 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001321 *
1322 * @dev_priv: Pointer to a device private struct.
1323 * @sw_context: The software context used for this command submission.
1324 * @header: Pointer to the command header in the command stream.
1325 */
1326static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1327 struct vmw_sw_context *sw_context,
1328 SVGA3dCmdHeader *header)
1329{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001330 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1331 container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001332
1333 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001334 VMW_RES_DIRTY_SET, user_context_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -08001335 &cmd->body.cid, NULL);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001336}
1337
1338/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001339 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001340 *
1341 * @dev_priv: Pointer to a device private struct.
1342 * @sw_context: The software context used for this command submission.
1343 * @header: Pointer to the command header in the command stream.
1344 */
1345static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1346 struct vmw_sw_context *sw_context,
1347 SVGA3dCmdHeader *header)
1348{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001349 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1350 container_of(header, typeof(*cmd), header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001351
Thomas Hellstromddcda242012-11-21 11:26:55 +01001352 if (unlikely(dev_priv->has_mob)) {
Deepak Rawatd01316d2019-02-08 15:50:40 -08001353 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001354
1355 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1356
1357 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1358 gb_cmd.header.size = cmd->header.size;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001359 gb_cmd.body.cid = cmd->body.cid;
1360 gb_cmd.body.type = cmd->body.type;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001361
1362 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1363 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1364 }
1365
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001366 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001367 VMW_RES_DIRTY_SET, user_context_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -08001368 &cmd->body.cid, NULL);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001369}
1370
1371/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001372 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001373 *
1374 * @dev_priv: Pointer to a device private struct.
1375 * @sw_context: The software context used for this command submission.
1376 * @header: Pointer to the command header in the command stream.
1377 */
1378static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1379 struct vmw_sw_context *sw_context,
1380 SVGA3dCmdHeader *header)
1381{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001382 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001383 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001384 int ret;
1385
Deepak Rawatd01316d2019-02-08 15:50:40 -08001386 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001387 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1388 if (unlikely(ret != 0))
1389 return ret;
1390
Deepak Rawat680360a2019-02-13 13:20:42 -08001391 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
Thomas Hellstromddcda242012-11-21 11:26:55 +01001392 &vmw_bo);
1393 if (unlikely(ret != 0))
1394 return ret;
1395
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001396 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001397
Thomas Hellstromddcda242012-11-21 11:26:55 +01001398 return ret;
1399}
1400
1401/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001402 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001403 *
1404 * @dev_priv: Pointer to a device private struct.
1405 * @sw_context: The software context used for this command submission.
1406 * @header: Pointer to the command header in the command stream.
1407 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001408static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1409 struct vmw_sw_context *sw_context,
1410 SVGA3dCmdHeader *header)
1411{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001412 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001413 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001414 int ret;
1415
Deepak Rawatd01316d2019-02-08 15:50:40 -08001416 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001417 if (dev_priv->has_mob) {
Deepak Rawatd01316d2019-02-08 15:50:40 -08001418 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001419
1420 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1421
1422 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1423 gb_cmd.header.size = cmd->header.size;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001424 gb_cmd.body.cid = cmd->body.cid;
1425 gb_cmd.body.type = cmd->body.type;
1426 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1427 gb_cmd.body.offset = cmd->body.guestResult.offset;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001428
1429 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1430 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1431 }
1432
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001433 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1434 if (unlikely(ret != 0))
1435 return ret;
1436
1437 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
Deepak Rawat680360a2019-02-13 13:20:42 -08001438 &cmd->body.guestResult, &vmw_bo);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001439 if (unlikely(ret != 0))
1440 return ret;
1441
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001442 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001443
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001444 return ret;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001445}
1446
Thomas Hellstromddcda242012-11-21 11:26:55 +01001447/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001448 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001449 *
1450 * @dev_priv: Pointer to a device private struct.
1451 * @sw_context: The software context used for this command submission.
1452 * @header: Pointer to the command header in the command stream.
1453 */
1454static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1455 struct vmw_sw_context *sw_context,
1456 SVGA3dCmdHeader *header)
1457{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001458 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001459 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001460 int ret;
1461
Deepak Rawatd01316d2019-02-08 15:50:40 -08001462 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001463 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1464 if (unlikely(ret != 0))
1465 return ret;
1466
Deepak Rawat680360a2019-02-13 13:20:42 -08001467 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
Thomas Hellstromddcda242012-11-21 11:26:55 +01001468 &vmw_bo);
1469 if (unlikely(ret != 0))
1470 return ret;
1471
Thomas Hellstromddcda242012-11-21 11:26:55 +01001472 return 0;
1473}
1474
1475/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001476 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001477 *
1478 * @dev_priv: Pointer to a device private struct.
1479 * @sw_context: The software context used for this command submission.
1480 * @header: Pointer to the command header in the command stream.
1481 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001482static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1483 struct vmw_sw_context *sw_context,
1484 SVGA3dCmdHeader *header)
1485{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001486 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001487 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001488 int ret;
1489
Deepak Rawatd01316d2019-02-08 15:50:40 -08001490 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001491 if (dev_priv->has_mob) {
Deepak Rawatd01316d2019-02-08 15:50:40 -08001492 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001493
1494 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1495
1496 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1497 gb_cmd.header.size = cmd->header.size;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001498 gb_cmd.body.cid = cmd->body.cid;
1499 gb_cmd.body.type = cmd->body.type;
1500 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1501 gb_cmd.body.offset = cmd->body.guestResult.offset;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001502
1503 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1504 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1505 }
1506
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001507 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1508 if (unlikely(ret != 0))
1509 return ret;
1510
1511 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
Deepak Rawat680360a2019-02-13 13:20:42 -08001512 &cmd->body.guestResult, &vmw_bo);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001513 if (unlikely(ret != 0))
1514 return ret;
1515
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001516 return 0;
1517}
1518
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001519static int vmw_cmd_dma(struct vmw_private *dev_priv,
1520 struct vmw_sw_context *sw_context,
1521 SVGA3dCmdHeader *header)
1522{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001523 struct vmw_buffer_object *vmw_bo = NULL;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001524 struct vmw_surface *srf = NULL;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001525 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001526 int ret;
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001527 SVGA3dCmdSurfaceDMASuffix *suffix;
1528 uint32_t bo_size;
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001529 bool dirty;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001530
Deepak Rawatd01316d2019-02-08 15:50:40 -08001531 cmd = container_of(header, typeof(*cmd), header);
1532 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001533 header->size - sizeof(*suffix));
1534
1535 /* Make sure device and verifier stays in sync. */
1536 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001537 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001538 return -EINVAL;
1539 }
1540
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001541 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
Deepak Rawat680360a2019-02-13 13:20:42 -08001542 &cmd->body.guest.ptr, &vmw_bo);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001543 if (unlikely(ret != 0))
1544 return ret;
1545
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001546 /* Make sure DMA doesn't cross BO boundaries. */
Christian Könige11bfb92020-12-09 15:07:50 +01001547 bo_size = vmw_bo->base.base.size;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001548 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001549 VMW_DEBUG_USER("Invalid DMA offset.\n");
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001550 return -EINVAL;
1551 }
1552
Deepak Rawatd01316d2019-02-08 15:50:40 -08001553 bo_size -= cmd->body.guest.ptr.offset;
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001554 if (unlikely(suffix->maximumOffset > bo_size))
1555 suffix->maximumOffset = bo_size;
1556
Deepak Rawatd01316d2019-02-08 15:50:40 -08001557 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001558 VMW_RES_DIRTY_SET : 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001559 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001560 dirty, user_surface_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -08001561 &cmd->body.host.sid, NULL);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001562 if (unlikely(ret != 0)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001563 if (unlikely(ret != -ERESTARTSYS))
Deepak Rawat5724f892019-02-11 11:46:27 -08001564 VMW_DEBUG_USER("could not find surface for DMA.\n");
Thomas Hellstromb139d432018-09-26 16:27:54 +02001565 return ret;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001566 }
1567
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001568 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001569
Deepak Rawat680360a2019-02-13 13:20:42 -08001570 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001571
Thomas Hellstromb139d432018-09-26 16:27:54 +02001572 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001573}
1574
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001575static int vmw_cmd_draw(struct vmw_private *dev_priv,
1576 struct vmw_sw_context *sw_context,
1577 SVGA3dCmdHeader *header)
1578{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001579 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001580 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1581 (unsigned long)header + sizeof(*cmd));
1582 SVGA3dPrimitiveRange *range;
1583 uint32_t i;
1584 uint32_t maxnum;
1585 int ret;
1586
1587 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1588 if (unlikely(ret != 0))
1589 return ret;
1590
Deepak Rawatd01316d2019-02-08 15:50:40 -08001591 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001592 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1593
1594 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001595 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001596 return -EINVAL;
1597 }
1598
1599 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001600 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001601 VMW_RES_DIRTY_NONE,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001602 user_surface_converter,
1603 &decl->array.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001604 if (unlikely(ret != 0))
1605 return ret;
1606 }
1607
1608 maxnum = (header->size - sizeof(cmd->body) -
1609 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1610 if (unlikely(cmd->body.numRanges > maxnum)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001611 VMW_DEBUG_USER("Illegal number of index ranges.\n");
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001612 return -EINVAL;
1613 }
1614
1615 range = (SVGA3dPrimitiveRange *) decl;
1616 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001617 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001618 VMW_RES_DIRTY_NONE,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001619 user_surface_converter,
1620 &range->indexArray.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001621 if (unlikely(ret != 0))
1622 return ret;
1623 }
1624 return 0;
1625}
1626
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001627static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1628 struct vmw_sw_context *sw_context,
1629 SVGA3dCmdHeader *header)
1630{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001631 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001632 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1633 ((unsigned long) header + header->size + sizeof(header));
1634 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
Deepak Rawatd01316d2019-02-08 15:50:40 -08001635 ((unsigned long) header + sizeof(*cmd));
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001636 struct vmw_resource *ctx;
1637 struct vmw_resource *res;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001638 int ret;
1639
Deepak Rawatd01316d2019-02-08 15:50:40 -08001640 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001641
1642 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001643 VMW_RES_DIRTY_SET, user_context_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -08001644 &cmd->body.cid, &ctx);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001645 if (unlikely(ret != 0))
1646 return ret;
1647
1648 for (; cur_state < last_state; ++cur_state) {
1649 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1650 continue;
1651
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001652 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001653 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1654 (unsigned int) cur_state->stage);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001655 return -EINVAL;
1656 }
1657
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001658 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001659 VMW_RES_DIRTY_NONE,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001660 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001661 &cur_state->value, &res);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001662 if (unlikely(ret != 0))
1663 return ret;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001664
1665 if (dev_priv->has_mob) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001666 struct vmw_ctx_bindinfo_tex binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001667 struct vmw_ctx_validation_info *node;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001668
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001669 node = vmw_execbuf_info_from_res(sw_context, ctx);
1670 if (!node)
1671 return -EINVAL;
1672
1673 binding.bi.ctx = ctx;
1674 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001675 binding.bi.bt = vmw_ctx_binding_tex;
1676 binding.texture_stage = cur_state->stage;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001677 vmw_binding_add(node->staged, &binding.bi, 0,
1678 binding.texture_stage);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001679 }
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001680 }
1681
1682 return 0;
1683}
1684
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001685static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1686 struct vmw_sw_context *sw_context,
1687 void *buf)
1688{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001689 struct vmw_buffer_object *vmw_bo;
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001690
1691 struct {
1692 uint32_t header;
1693 SVGAFifoCmdDefineGMRFB body;
1694 } *cmd = buf;
1695
Deepak Rawat680360a2019-02-13 13:20:42 -08001696 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
Thomas Hellstromb139d432018-09-26 16:27:54 +02001697 &vmw_bo);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001698}
1699
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001700/**
1701 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1702 * switching
1703 *
1704 * @dev_priv: Pointer to a device private struct.
1705 * @sw_context: The software context being used for this batch.
Lee Jones7450bf72021-01-15 18:12:36 +00001706 * @res: Pointer to the resource.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001707 * @buf_id: Pointer to the user-space backup buffer handle in the command
1708 * stream.
1709 * @backup_offset: Offset of backup into MOB.
1710 *
Deepak Rawat680360a2019-02-13 13:20:42 -08001711 * This function prepares for registering a switch of backup buffers in the
1712 * resource metadata just prior to unreserving. It's basically a wrapper around
1713 * vmw_cmd_res_switch_backup with a different interface.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001714 */
1715static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1716 struct vmw_sw_context *sw_context,
Deepak Rawat680360a2019-02-13 13:20:42 -08001717 struct vmw_resource *res, uint32_t *buf_id,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001718 unsigned long backup_offset)
1719{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001720 struct vmw_buffer_object *vbo;
1721 void *info;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001722 int ret;
1723
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001724 info = vmw_execbuf_info_from_res(sw_context, res);
1725 if (!info)
1726 return -EINVAL;
1727
1728 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001729 if (ret)
1730 return ret;
1731
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001732 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1733 backup_offset);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001734 return 0;
1735}
1736
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001737/**
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001738 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1739 *
1740 * @dev_priv: Pointer to a device private struct.
1741 * @sw_context: The software context being used for this batch.
1742 * @res_type: The resource type.
1743 * @converter: Information about user-space binding for this resource type.
1744 * @res_id: Pointer to the user-space resource handle in the command stream.
1745 * @buf_id: Pointer to the user-space backup buffer handle in the command
1746 * stream.
1747 * @backup_offset: Offset of backup into MOB.
1748 *
Deepak Rawat680360a2019-02-13 13:20:42 -08001749 * This function prepares for registering a switch of backup buffers in the
1750 * resource metadata just prior to unreserving. It's basically a wrapper around
1751 * vmw_cmd_res_switch_backup with a different interface.
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001752 */
1753static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1754 struct vmw_sw_context *sw_context,
1755 enum vmw_res_type res_type,
1756 const struct vmw_user_resource_conv
Deepak Rawat680360a2019-02-13 13:20:42 -08001757 *converter, uint32_t *res_id, uint32_t *buf_id,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001758 unsigned long backup_offset)
1759{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001760 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001761 int ret;
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001762
1763 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001764 VMW_RES_DIRTY_NONE, converter, res_id, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001765 if (ret)
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001766 return ret;
1767
Deepak Rawat680360a2019-02-13 13:20:42 -08001768 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1769 backup_offset);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001770}
1771
1772/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001773 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001774 *
1775 * @dev_priv: Pointer to a device private struct.
1776 * @sw_context: The software context being used for this batch.
1777 * @header: Pointer to the command header in the command stream.
1778 */
1779static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1780 struct vmw_sw_context *sw_context,
1781 SVGA3dCmdHeader *header)
1782{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001783 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1784 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001785
1786 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
Deepak Rawat680360a2019-02-13 13:20:42 -08001787 user_surface_converter, &cmd->body.sid,
1788 &cmd->body.mobid, 0);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001789}
1790
1791/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001792 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001793 *
1794 * @dev_priv: Pointer to a device private struct.
1795 * @sw_context: The software context being used for this batch.
1796 * @header: Pointer to the command header in the command stream.
1797 */
1798static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1799 struct vmw_sw_context *sw_context,
1800 SVGA3dCmdHeader *header)
1801{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001802 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1803 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001804
1805 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001806 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001807 &cmd->body.image.sid, NULL);
1808}
1809
1810/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001811 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001812 *
1813 * @dev_priv: Pointer to a device private struct.
1814 * @sw_context: The software context being used for this batch.
1815 * @header: Pointer to the command header in the command stream.
1816 */
1817static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1818 struct vmw_sw_context *sw_context,
1819 SVGA3dCmdHeader *header)
1820{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001821 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1822 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001823
1824 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001825 VMW_RES_DIRTY_CLEAR, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001826 &cmd->body.sid, NULL);
1827}
1828
1829/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001830 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001831 *
1832 * @dev_priv: Pointer to a device private struct.
1833 * @sw_context: The software context being used for this batch.
1834 * @header: Pointer to the command header in the command stream.
1835 */
1836static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1837 struct vmw_sw_context *sw_context,
1838 SVGA3dCmdHeader *header)
1839{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001840 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1841 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001842
1843 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001844 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001845 &cmd->body.image.sid, NULL);
1846}
1847
1848/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001849 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001850 * command
1851 *
1852 * @dev_priv: Pointer to a device private struct.
1853 * @sw_context: The software context being used for this batch.
1854 * @header: Pointer to the command header in the command stream.
1855 */
1856static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1857 struct vmw_sw_context *sw_context,
1858 SVGA3dCmdHeader *header)
1859{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001860 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1861 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001862
1863 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001864 VMW_RES_DIRTY_CLEAR, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001865 &cmd->body.sid, NULL);
1866}
1867
1868/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001869 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001870 * command
1871 *
1872 * @dev_priv: Pointer to a device private struct.
1873 * @sw_context: The software context being used for this batch.
1874 * @header: Pointer to the command header in the command stream.
1875 */
1876static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1877 struct vmw_sw_context *sw_context,
1878 SVGA3dCmdHeader *header)
1879{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001880 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1881 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001882
1883 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001884 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001885 &cmd->body.image.sid, NULL);
1886}
1887
1888/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001889 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1890 * command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001891 *
1892 * @dev_priv: Pointer to a device private struct.
1893 * @sw_context: The software context being used for this batch.
1894 * @header: Pointer to the command header in the command stream.
1895 */
1896static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1897 struct vmw_sw_context *sw_context,
1898 SVGA3dCmdHeader *header)
1899{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001900 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1901 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001902
1903 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001904 VMW_RES_DIRTY_CLEAR, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001905 &cmd->body.sid, NULL);
1906}
1907
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001908/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001909 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001910 *
1911 * @dev_priv: Pointer to a device private struct.
1912 * @sw_context: The software context being used for this batch.
1913 * @header: Pointer to the command header in the command stream.
1914 */
1915static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1916 struct vmw_sw_context *sw_context,
1917 SVGA3dCmdHeader *header)
1918{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001919 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001920 int ret;
1921 size_t size;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001922 struct vmw_resource *ctx;
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001923
Deepak Rawatd01316d2019-02-08 15:50:40 -08001924 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001925
1926 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001927 VMW_RES_DIRTY_SET, user_context_converter,
1928 &cmd->body.cid, &ctx);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001929 if (unlikely(ret != 0))
1930 return ret;
1931
1932 if (unlikely(!dev_priv->has_mob))
1933 return 0;
1934
1935 size = cmd->header.size - sizeof(cmd->body);
Deepak Rawat680360a2019-02-13 13:20:42 -08001936 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1937 cmd->body.shid, cmd + 1, cmd->body.type,
1938 size, &sw_context->staged_cmd_res);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001939 if (unlikely(ret != 0))
1940 return ret;
1941
Deepak Rawat680360a2019-02-13 13:20:42 -08001942 return vmw_resource_relocation_add(sw_context, NULL,
Thomas Hellstrome7a45282016-10-10 10:44:00 -07001943 vmw_ptr_diff(sw_context->buf_start,
Thomas Hellstroma1944032016-10-10 11:06:45 -07001944 &cmd->header.id),
1945 vmw_res_rel_nop);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001946}
1947
1948/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001949 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001950 *
1951 * @dev_priv: Pointer to a device private struct.
1952 * @sw_context: The software context being used for this batch.
1953 * @header: Pointer to the command header in the command stream.
1954 */
1955static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1956 struct vmw_sw_context *sw_context,
1957 SVGA3dCmdHeader *header)
1958{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001959 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001960 int ret;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001961 struct vmw_resource *ctx;
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001962
Deepak Rawatd01316d2019-02-08 15:50:40 -08001963 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001964
1965 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001966 VMW_RES_DIRTY_SET, user_context_converter,
1967 &cmd->body.cid, &ctx);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001968 if (unlikely(ret != 0))
1969 return ret;
1970
1971 if (unlikely(!dev_priv->has_mob))
1972 return 0;
1973
Deepak Rawat680360a2019-02-13 13:20:42 -08001974 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1975 cmd->body.type, &sw_context->staged_cmd_res);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001976 if (unlikely(ret != 0))
1977 return ret;
1978
Deepak Rawat680360a2019-02-13 13:20:42 -08001979 return vmw_resource_relocation_add(sw_context, NULL,
Thomas Hellstrome7a45282016-10-10 10:44:00 -07001980 vmw_ptr_diff(sw_context->buf_start,
Thomas Hellstroma1944032016-10-10 11:06:45 -07001981 &cmd->header.id),
1982 vmw_res_rel_nop);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001983}
1984
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001985/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001986 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001987 *
1988 * @dev_priv: Pointer to a device private struct.
1989 * @sw_context: The software context being used for this batch.
1990 * @header: Pointer to the command header in the command stream.
1991 */
1992static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1993 struct vmw_sw_context *sw_context,
1994 SVGA3dCmdHeader *header)
1995{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001996 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001997 struct vmw_ctx_bindinfo_shader binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001998 struct vmw_resource *ctx, *res = NULL;
1999 struct vmw_ctx_validation_info *ctx_info;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002000 int ret;
2001
Deepak Rawatd01316d2019-02-08 15:50:40 -08002002 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002003
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002004 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002005 VMW_DEBUG_USER("Illegal shader type %u.\n",
2006 (unsigned int) cmd->body.type);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002007 return -EINVAL;
2008 }
2009
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07002010 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002011 VMW_RES_DIRTY_SET, user_context_converter,
2012 &cmd->body.cid, &ctx);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002013 if (unlikely(ret != 0))
2014 return ret;
2015
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002016 if (!dev_priv->has_mob)
2017 return 0;
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002018
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002019 if (cmd->body.shid != SVGA3D_INVALID_ID) {
Thomas Hellstrome41c20c2019-04-04 13:25:43 +00002020 /*
2021 * This is the compat shader path - Per device guest-backed
2022 * shaders, but user-space thinks it's per context host-
2023 * backed shaders.
2024 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002025 res = vmw_shader_lookup(vmw_context_res_man(ctx),
Deepak Rawat680360a2019-02-13 13:20:42 -08002026 cmd->body.shid, cmd->body.type);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002027 if (!IS_ERR(res)) {
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002028 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2029 VMW_RES_DIRTY_NONE);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002030 if (unlikely(ret != 0))
2031 return ret;
Thomas Hellstrome41c20c2019-04-04 13:25:43 +00002032
2033 ret = vmw_resource_relocation_add
2034 (sw_context, res,
2035 vmw_ptr_diff(sw_context->buf_start,
2036 &cmd->body.shid),
2037 vmw_res_rel_normal);
2038 if (unlikely(ret != 0))
2039 return ret;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002040 }
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07002041 }
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002042
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002043 if (IS_ERR_OR_NULL(res)) {
Deepak Rawat680360a2019-02-13 13:20:42 -08002044 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2045 VMW_RES_DIRTY_NONE,
2046 user_shader_converter, &cmd->body.shid,
2047 &res);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002048 if (unlikely(ret != 0))
2049 return ret;
2050 }
2051
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002052 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2053 if (!ctx_info)
2054 return -EINVAL;
2055
2056 binding.bi.ctx = ctx;
2057 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002058 binding.bi.bt = vmw_ctx_binding_shader;
2059 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
Deepak Rawat680360a2019-02-13 13:20:42 -08002060 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2061
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002062 return 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002063}
2064
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002065/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002066 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002067 *
2068 * @dev_priv: Pointer to a device private struct.
2069 * @sw_context: The software context being used for this batch.
2070 * @header: Pointer to the command header in the command stream.
2071 */
2072static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2073 struct vmw_sw_context *sw_context,
2074 SVGA3dCmdHeader *header)
2075{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002076 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002077 int ret;
2078
Deepak Rawatd01316d2019-02-08 15:50:40 -08002079 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002080
2081 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002082 VMW_RES_DIRTY_SET, user_context_converter,
2083 &cmd->body.cid, NULL);
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002084 if (unlikely(ret != 0))
2085 return ret;
2086
2087 if (dev_priv->has_mob)
2088 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2089
2090 return 0;
2091}
2092
2093/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002094 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002095 *
2096 * @dev_priv: Pointer to a device private struct.
2097 * @sw_context: The software context being used for this batch.
2098 * @header: Pointer to the command header in the command stream.
2099 */
2100static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2101 struct vmw_sw_context *sw_context,
2102 SVGA3dCmdHeader *header)
2103{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002104 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2105 container_of(header, typeof(*cmd), header);
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002106
2107 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
Deepak Rawat680360a2019-02-13 13:20:42 -08002108 user_shader_converter, &cmd->body.shid,
2109 &cmd->body.mobid, cmd->body.offsetInBytes);
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002110}
2111
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002112/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002113 * vmw_cmd_dx_set_single_constant_buffer - Validate
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002114 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2115 *
2116 * @dev_priv: Pointer to a device private struct.
2117 * @sw_context: The software context being used for this batch.
2118 * @header: Pointer to the command header in the command stream.
2119 */
2120static int
2121vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2122 struct vmw_sw_context *sw_context,
2123 SVGA3dCmdHeader *header)
2124{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002125 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
Deepak Rawatd2e90ab2018-12-13 13:43:20 -08002126 SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
2127 SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
2128
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002129 struct vmw_resource *res = NULL;
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002130 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002131 struct vmw_ctx_bindinfo_cb binding;
2132 int ret;
2133
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002134 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002135 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002136
2137 cmd = container_of(header, typeof(*cmd), header);
2138 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002139 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002140 &cmd->body.sid, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002141 if (unlikely(ret != 0))
2142 return ret;
2143
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002144 binding.bi.ctx = ctx_node->ctx;
2145 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002146 binding.bi.bt = vmw_ctx_binding_cb;
2147 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2148 binding.offset = cmd->body.offsetInBytes;
2149 binding.size = cmd->body.sizeInBytes;
2150 binding.slot = cmd->body.slot;
2151
Deepak Rawatd2e90ab2018-12-13 13:43:20 -08002152 if (binding.shader_slot >= max_shader_num ||
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002153 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002154 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2155 (unsigned int) cmd->body.type,
2156 (unsigned int) binding.slot);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002157 return -EINVAL;
2158 }
2159
Deepak Rawat680360a2019-02-13 13:20:42 -08002160 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2161 binding.slot);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002162
2163 return 0;
2164}
2165
2166/**
Roland Scheideggerbf625872021-12-06 12:26:18 -05002167 * vmw_cmd_dx_set_constant_buffer_offset - Validate
2168 * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command.
2169 *
2170 * @dev_priv: Pointer to a device private struct.
2171 * @sw_context: The software context being used for this batch.
2172 * @header: Pointer to the command header in the command stream.
2173 */
2174static int
2175vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv,
2176 struct vmw_sw_context *sw_context,
2177 SVGA3dCmdHeader *header)
2178{
2179 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
2180
2181 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2182 u32 shader_slot;
2183
2184 if (!has_sm5_context(dev_priv))
2185 return -EINVAL;
2186
2187 if (!ctx_node)
2188 return -EINVAL;
2189
2190 cmd = container_of(header, typeof(*cmd), header);
2191 if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2192 VMW_DEBUG_USER("Illegal const buffer slot %u.\n",
2193 (unsigned int) cmd->body.slot);
2194 return -EINVAL;
2195 }
2196
2197 shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET;
2198 vmw_binding_cb_offset_update(ctx_node->staged, shader_slot,
2199 cmd->body.slot, cmd->body.offsetInBytes);
2200
2201 return 0;
2202}
2203
2204/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002205 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2206 * command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002207 *
2208 * @dev_priv: Pointer to a device private struct.
2209 * @sw_context: The software context being used for this batch.
2210 * @header: Pointer to the command header in the command stream.
2211 */
2212static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2213 struct vmw_sw_context *sw_context,
2214 SVGA3dCmdHeader *header)
2215{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002216 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2217 container_of(header, typeof(*cmd), header);
Deepak Rawatd2e90ab2018-12-13 13:43:20 -08002218 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2219 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2220
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002221 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2222 sizeof(SVGA3dShaderResourceViewId);
2223
2224 if ((u64) cmd->body.startView + (u64) num_sr_view >
2225 (u64) SVGA3D_DX_MAX_SRVIEWS ||
Deepak Rawatd2e90ab2018-12-13 13:43:20 -08002226 cmd->body.type >= max_allowed) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002227 VMW_DEBUG_USER("Invalid shader binding.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002228 return -EINVAL;
2229 }
2230
2231 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2232 vmw_ctx_binding_sr,
2233 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2234 (void *) &cmd[1], num_sr_view,
2235 cmd->body.startView);
2236}
2237
2238/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002239 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002240 *
2241 * @dev_priv: Pointer to a device private struct.
2242 * @sw_context: The software context being used for this batch.
2243 * @header: Pointer to the command header in the command stream.
2244 */
2245static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2246 struct vmw_sw_context *sw_context,
2247 SVGA3dCmdHeader *header)
2248{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002249 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
Deepak Rawatd2e90ab2018-12-13 13:43:20 -08002250 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2251 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002252 struct vmw_resource *res = NULL;
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002253 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002254 struct vmw_ctx_bindinfo_shader binding;
2255 int ret = 0;
2256
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002257 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002258 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002259
2260 cmd = container_of(header, typeof(*cmd), header);
2261
Deepak Rawatd2e90ab2018-12-13 13:43:20 -08002262 if (cmd->body.type >= max_allowed ||
Murray McAllister5ed7f4b2019-05-20 21:57:34 +12002263 cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002264 VMW_DEBUG_USER("Illegal shader type %u.\n",
2265 (unsigned int) cmd->body.type);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002266 return -EINVAL;
2267 }
2268
2269 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2270 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2271 if (IS_ERR(res)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002272 VMW_DEBUG_USER("Could not find shader for binding.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002273 return PTR_ERR(res);
2274 }
2275
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002276 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2277 VMW_RES_DIRTY_NONE);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002278 if (ret)
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002279 return ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002280 }
2281
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002282 binding.bi.ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002283 binding.bi.res = res;
2284 binding.bi.bt = vmw_ctx_binding_dx_shader;
2285 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2286
Deepak Rawat680360a2019-02-13 13:20:42 -08002287 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002288
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002289 return 0;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002290}
2291
2292/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002293 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2294 * command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002295 *
2296 * @dev_priv: Pointer to a device private struct.
2297 * @sw_context: The software context being used for this batch.
2298 * @header: Pointer to the command header in the command stream.
2299 */
2300static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2301 struct vmw_sw_context *sw_context,
2302 SVGA3dCmdHeader *header)
2303{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002304 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002305 struct vmw_ctx_bindinfo_vb binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002306 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002307 struct {
2308 SVGA3dCmdHeader header;
2309 SVGA3dCmdDXSetVertexBuffers body;
2310 SVGA3dVertexBuffer buf[];
2311 } *cmd;
2312 int i, ret, num;
2313
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002314 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002315 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002316
2317 cmd = container_of(header, typeof(*cmd), header);
2318 num = (cmd->header.size - sizeof(cmd->body)) /
2319 sizeof(SVGA3dVertexBuffer);
2320 if ((u64)num + (u64)cmd->body.startBuffer >
2321 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002322 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002323 return -EINVAL;
2324 }
2325
2326 for (i = 0; i < num; i++) {
2327 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002328 VMW_RES_DIRTY_NONE,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002329 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002330 &cmd->buf[i].sid, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002331 if (unlikely(ret != 0))
2332 return ret;
2333
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002334 binding.bi.ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002335 binding.bi.bt = vmw_ctx_binding_vb;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002336 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002337 binding.offset = cmd->buf[i].offset;
2338 binding.stride = cmd->buf[i].stride;
2339 binding.slot = i + cmd->body.startBuffer;
2340
Deepak Rawat680360a2019-02-13 13:20:42 -08002341 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002342 }
2343
2344 return 0;
2345}
2346
2347/**
Zack Rusin2cd80db2021-05-05 15:10:07 -04002348 * vmw_cmd_dx_set_index_buffer - Validate
Brian Paul8bd62872017-07-17 07:36:10 -07002349 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002350 *
2351 * @dev_priv: Pointer to a device private struct.
2352 * @sw_context: The software context being used for this batch.
2353 * @header: Pointer to the command header in the command stream.
2354 */
2355static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2356 struct vmw_sw_context *sw_context,
2357 SVGA3dCmdHeader *header)
2358{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002359 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002360 struct vmw_ctx_bindinfo_ib binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002361 struct vmw_resource *res;
Deepak Rawatd01316d2019-02-08 15:50:40 -08002362 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002363 int ret;
2364
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002365 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002366 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002367
2368 cmd = container_of(header, typeof(*cmd), header);
2369 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002370 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002371 &cmd->body.sid, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002372 if (unlikely(ret != 0))
2373 return ret;
2374
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002375 binding.bi.ctx = ctx_node->ctx;
2376 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002377 binding.bi.bt = vmw_ctx_binding_ib;
2378 binding.offset = cmd->body.offset;
2379 binding.format = cmd->body.format;
2380
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002381 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002382
2383 return 0;
2384}
2385
2386/**
Zack Rusin2cd80db2021-05-05 15:10:07 -04002387 * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
Deepak Rawat680360a2019-02-13 13:20:42 -08002388 * command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002389 *
2390 * @dev_priv: Pointer to a device private struct.
2391 * @sw_context: The software context being used for this batch.
2392 * @header: Pointer to the command header in the command stream.
2393 */
2394static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2395 struct vmw_sw_context *sw_context,
2396 SVGA3dCmdHeader *header)
2397{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002398 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2399 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002400 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2401 sizeof(SVGA3dRenderTargetViewId);
Deepak Rawatd01316d2019-02-08 15:50:40 -08002402 int ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002403
Zack Rusinebc9ac72021-06-15 14:23:33 -04002404 if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002405 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002406 return -EINVAL;
2407 }
2408
Deepak Rawat680360a2019-02-13 13:20:42 -08002409 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2410 0, &cmd->body.depthStencilViewId, 1, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002411 if (ret)
2412 return ret;
2413
2414 return vmw_view_bindings_add(sw_context, vmw_view_rt,
Deepak Rawat680360a2019-02-13 13:20:42 -08002415 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2416 num_rt_view, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002417}
2418
2419/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002420 * vmw_cmd_dx_clear_rendertarget_view - Validate
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002421 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2422 *
2423 * @dev_priv: Pointer to a device private struct.
2424 * @sw_context: The software context being used for this batch.
2425 * @header: Pointer to the command header in the command stream.
2426 */
2427static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2428 struct vmw_sw_context *sw_context,
2429 SVGA3dCmdHeader *header)
2430{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002431 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2432 container_of(header, typeof(*cmd), header);
Lukas Bulwahna26ca962019-12-08 11:53:28 +01002433 struct vmw_resource *ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002434
Lukas Bulwahna26ca962019-12-08 11:53:28 +01002435 ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2436 cmd->body.renderTargetViewId);
2437
2438 return PTR_ERR_OR_ZERO(ret);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002439}
2440
2441/**
Zack Rusin2cd80db2021-05-05 15:10:07 -04002442 * vmw_cmd_dx_clear_depthstencil_view - Validate
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002443 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2444 *
2445 * @dev_priv: Pointer to a device private struct.
2446 * @sw_context: The software context being used for this batch.
2447 * @header: Pointer to the command header in the command stream.
2448 */
2449static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2450 struct vmw_sw_context *sw_context,
2451 SVGA3dCmdHeader *header)
2452{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002453 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2454 container_of(header, typeof(*cmd), header);
Lukas Bulwahna26ca962019-12-08 11:53:28 +01002455 struct vmw_resource *ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002456
Lukas Bulwahna26ca962019-12-08 11:53:28 +01002457 ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2458 cmd->body.depthStencilViewId);
2459
2460 return PTR_ERR_OR_ZERO(ret);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002461}
2462
2463static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2464 struct vmw_sw_context *sw_context,
2465 SVGA3dCmdHeader *header)
2466{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002467 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002468 struct vmw_resource *srf;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002469 struct vmw_resource *res;
2470 enum vmw_view_type view_type;
2471 int ret;
2472 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08002473 * This is based on the fact that all affected define commands have the
2474 * same initial command body layout.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002475 */
2476 struct {
2477 SVGA3dCmdHeader header;
2478 uint32 defined_id;
2479 uint32 sid;
2480 } *cmd;
2481
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002482 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002483 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002484
2485 view_type = vmw_view_cmd_to_type(header->id);
Dan Carpenter0d9cac02018-01-10 12:40:04 +03002486 if (view_type == vmw_view_max)
2487 return -EINVAL;
Deepak Rawat680360a2019-02-13 13:20:42 -08002488
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002489 cmd = container_of(header, typeof(*cmd), header);
Murray McAllisterbcd6aa72019-05-11 18:01:37 +12002490 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2491 VMW_DEBUG_USER("Invalid surface id.\n");
2492 return -EINVAL;
2493 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002494 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002495 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002496 &cmd->sid, &srf);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002497 if (unlikely(ret != 0))
2498 return ret;
2499
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002500 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002501 ret = vmw_cotable_notify(res, cmd->defined_id);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002502 if (unlikely(ret != 0))
2503 return ret;
2504
Deepak Rawat680360a2019-02-13 13:20:42 -08002505 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2506 cmd->defined_id, header,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002507 header->size + sizeof(*header),
2508 &sw_context->staged_cmd_res);
2509}
2510
Charmaine Lee2f633e52015-08-10 10:45:11 -07002511/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002512 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
Charmaine Lee2f633e52015-08-10 10:45:11 -07002513 *
2514 * @dev_priv: Pointer to a device private struct.
2515 * @sw_context: The software context being used for this batch.
2516 * @header: Pointer to the command header in the command stream.
2517 */
2518static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2519 struct vmw_sw_context *sw_context,
2520 SVGA3dCmdHeader *header)
2521{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002522 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Deepak Rawat403fef52018-12-18 10:13:13 -08002523 struct vmw_ctx_bindinfo_so_target binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002524 struct vmw_resource *res;
Charmaine Lee2f633e52015-08-10 10:45:11 -07002525 struct {
2526 SVGA3dCmdHeader header;
2527 SVGA3dCmdDXSetSOTargets body;
2528 SVGA3dSoTarget targets[];
2529 } *cmd;
2530 int i, ret, num;
2531
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002532 if (!ctx_node)
Charmaine Lee2f633e52015-08-10 10:45:11 -07002533 return -EINVAL;
Charmaine Lee2f633e52015-08-10 10:45:11 -07002534
2535 cmd = container_of(header, typeof(*cmd), header);
Deepak Rawat680360a2019-02-13 13:20:42 -08002536 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
Charmaine Lee2f633e52015-08-10 10:45:11 -07002537
2538 if (num > SVGA3D_DX_MAX_SOTARGETS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002539 VMW_DEBUG_USER("Invalid DX SO binding.\n");
Charmaine Lee2f633e52015-08-10 10:45:11 -07002540 return -EINVAL;
2541 }
2542
2543 for (i = 0; i < num; i++) {
2544 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002545 VMW_RES_DIRTY_SET,
Charmaine Lee2f633e52015-08-10 10:45:11 -07002546 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002547 &cmd->targets[i].sid, &res);
Charmaine Lee2f633e52015-08-10 10:45:11 -07002548 if (unlikely(ret != 0))
2549 return ret;
2550
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002551 binding.bi.ctx = ctx_node->ctx;
2552 binding.bi.res = res;
Zheng Yongjuned2684e2020-12-11 16:57:51 +08002553 binding.bi.bt = vmw_ctx_binding_so_target;
Charmaine Lee2f633e52015-08-10 10:45:11 -07002554 binding.offset = cmd->targets[i].offset;
2555 binding.size = cmd->targets[i].sizeInBytes;
2556 binding.slot = i;
2557
Deepak Rawat680360a2019-02-13 13:20:42 -08002558 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
Charmaine Lee2f633e52015-08-10 10:45:11 -07002559 }
2560
2561 return 0;
2562}
2563
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002564static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2565 struct vmw_sw_context *sw_context,
2566 SVGA3dCmdHeader *header)
2567{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002568 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002569 struct vmw_resource *res;
2570 /*
2571 * This is based on the fact that all affected define commands have
2572 * the same initial command body layout.
2573 */
2574 struct {
2575 SVGA3dCmdHeader header;
2576 uint32 defined_id;
2577 } *cmd;
2578 enum vmw_so_type so_type;
2579 int ret;
2580
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002581 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002582 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002583
2584 so_type = vmw_so_cmd_to_type(header->id);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002585 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
Zack Rusin74231042021-06-09 13:23:02 -04002586 if (IS_ERR(res))
2587 return PTR_ERR(res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002588 cmd = container_of(header, typeof(*cmd), header);
2589 ret = vmw_cotable_notify(res, cmd->defined_id);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002590
2591 return ret;
2592}
2593
2594/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002595 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2596 * command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002597 *
2598 * @dev_priv: Pointer to a device private struct.
2599 * @sw_context: The software context being used for this batch.
2600 * @header: Pointer to the command header in the command stream.
2601 */
2602static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2603 struct vmw_sw_context *sw_context,
2604 SVGA3dCmdHeader *header)
2605{
2606 struct {
2607 SVGA3dCmdHeader header;
2608 union {
2609 SVGA3dCmdDXReadbackSubResource r_body;
2610 SVGA3dCmdDXInvalidateSubResource i_body;
2611 SVGA3dCmdDXUpdateSubResource u_body;
2612 SVGA3dSurfaceId sid;
2613 };
2614 } *cmd;
2615
2616 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2617 offsetof(typeof(*cmd), sid));
2618 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2619 offsetof(typeof(*cmd), sid));
2620 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2621 offsetof(typeof(*cmd), sid));
2622
2623 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002624 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002625 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002626 &cmd->sid, NULL);
2627}
2628
2629static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2630 struct vmw_sw_context *sw_context,
2631 SVGA3dCmdHeader *header)
2632{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002633 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002634
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002635 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002636 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002637
2638 return 0;
2639}
2640
2641/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002642 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2643 * resource for removal.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002644 *
2645 * @dev_priv: Pointer to a device private struct.
2646 * @sw_context: The software context being used for this batch.
2647 * @header: Pointer to the command header in the command stream.
2648 *
Deepak Rawat680360a2019-02-13 13:20:42 -08002649 * Check that the view exists, and if it was not created using this command
2650 * batch, conditionally make this command a NOP.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002651 */
2652static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2653 struct vmw_sw_context *sw_context,
2654 SVGA3dCmdHeader *header)
2655{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002656 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002657 struct {
2658 SVGA3dCmdHeader header;
2659 union vmw_view_destroy body;
2660 } *cmd = container_of(header, typeof(*cmd), header);
2661 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2662 struct vmw_resource *view;
2663 int ret;
2664
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002665 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002666 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002667
Deepak Rawat680360a2019-02-13 13:20:42 -08002668 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2669 &sw_context->staged_cmd_res, &view);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002670 if (ret || !view)
2671 return ret;
2672
2673 /*
Thomas Hellstroma1944032016-10-10 11:06:45 -07002674 * If the view wasn't created during this command batch, it might
2675 * have been removed due to a context swapout, so add a
2676 * relocation to conditionally make this command a NOP to avoid
2677 * device errors.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002678 */
Deepak Rawat680360a2019-02-13 13:20:42 -08002679 return vmw_resource_relocation_add(sw_context, view,
Thomas Hellstroma1944032016-10-10 11:06:45 -07002680 vmw_ptr_diff(sw_context->buf_start,
2681 &cmd->header.id),
2682 vmw_res_rel_cond_nop);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002683}
2684
2685/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002686 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002687 *
2688 * @dev_priv: Pointer to a device private struct.
2689 * @sw_context: The software context being used for this batch.
2690 * @header: Pointer to the command header in the command stream.
2691 */
2692static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2693 struct vmw_sw_context *sw_context,
2694 SVGA3dCmdHeader *header)
2695{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002696 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002697 struct vmw_resource *res;
Deepak Rawatd01316d2019-02-08 15:50:40 -08002698 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2699 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002700 int ret;
2701
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002702 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002703 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002704
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002705 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002706 ret = vmw_cotable_notify(res, cmd->body.shaderId);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002707 if (ret)
2708 return ret;
2709
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002710 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002711 cmd->body.shaderId, cmd->body.type,
2712 &sw_context->staged_cmd_res);
2713}
2714
2715/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002716 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002717 *
2718 * @dev_priv: Pointer to a device private struct.
2719 * @sw_context: The software context being used for this batch.
2720 * @header: Pointer to the command header in the command stream.
2721 */
2722static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2723 struct vmw_sw_context *sw_context,
2724 SVGA3dCmdHeader *header)
2725{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002726 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Deepak Rawatd01316d2019-02-08 15:50:40 -08002727 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2728 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002729 int ret;
2730
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002731 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002732 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002733
2734 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2735 &sw_context->staged_cmd_res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002736
2737 return ret;
2738}
2739
2740/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002741 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002742 *
2743 * @dev_priv: Pointer to a device private struct.
2744 * @sw_context: The software context being used for this batch.
2745 * @header: Pointer to the command header in the command stream.
2746 */
2747static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2748 struct vmw_sw_context *sw_context,
2749 SVGA3dCmdHeader *header)
2750{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002751 struct vmw_resource *ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002752 struct vmw_resource *res;
Deepak Rawatd01316d2019-02-08 15:50:40 -08002753 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2754 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002755 int ret;
2756
2757 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2758 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002759 VMW_RES_DIRTY_SET,
2760 user_context_converter, &cmd->body.cid,
2761 &ctx);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002762 if (ret)
2763 return ret;
2764 } else {
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002765 struct vmw_ctx_validation_info *ctx_node =
2766 VMW_GET_CTX_NODE(sw_context);
2767
2768 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002769 return -EINVAL;
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002770
2771 ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002772 }
2773
Deepak Rawat680360a2019-02-13 13:20:42 -08002774 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002775 if (IS_ERR(res)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002776 VMW_DEBUG_USER("Could not find shader to bind.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002777 return PTR_ERR(res);
2778 }
2779
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002780 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2781 VMW_RES_DIRTY_NONE);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002782 if (ret) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002783 VMW_DEBUG_USER("Error creating resource validation node.\n");
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002784 return ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002785 }
2786
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002787 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2788 &cmd->body.mobid,
2789 cmd->body.offsetInBytes);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002790}
2791
Charmaine Leef3b335502016-02-12 08:11:56 +01002792/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002793 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
Charmaine Leef3b335502016-02-12 08:11:56 +01002794 *
2795 * @dev_priv: Pointer to a device private struct.
2796 * @sw_context: The software context being used for this batch.
2797 * @header: Pointer to the command header in the command stream.
2798 */
2799static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2800 struct vmw_sw_context *sw_context,
2801 SVGA3dCmdHeader *header)
2802{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002803 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2804 container_of(header, typeof(*cmd), header);
Thomas Hellstrom75156a82021-05-04 23:57:36 -04002805 struct vmw_resource *view;
2806 struct vmw_res_cache_entry *rcache;
Charmaine Leef3b335502016-02-12 08:11:56 +01002807
Thomas Hellstrom75156a82021-05-04 23:57:36 -04002808 view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2809 cmd->body.shaderResourceViewId);
2810 if (IS_ERR(view))
2811 return PTR_ERR(view);
Lukas Bulwahna26ca962019-12-08 11:53:28 +01002812
Thomas Hellstrom75156a82021-05-04 23:57:36 -04002813 /*
2814 * Normally the shader-resource view is not gpu-dirtying, but for
2815 * this particular command it is...
2816 * So mark the last looked-up surface, which is the surface
2817 * the view points to, gpu-dirty.
2818 */
2819 rcache = &sw_context->res_cache[vmw_res_surface];
2820 vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2821 VMW_RES_DIRTY_SET);
2822 return 0;
Charmaine Leef3b335502016-02-12 08:11:56 +01002823}
2824
Charmaine Lee1f982e42016-10-10 10:37:03 -07002825/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002826 * vmw_cmd_dx_transfer_from_buffer - Validate
2827 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
Charmaine Lee1f982e42016-10-10 10:37:03 -07002828 *
2829 * @dev_priv: Pointer to a device private struct.
2830 * @sw_context: The software context being used for this batch.
2831 * @header: Pointer to the command header in the command stream.
2832 */
2833static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2834 struct vmw_sw_context *sw_context,
2835 SVGA3dCmdHeader *header)
2836{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002837 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2838 container_of(header, typeof(*cmd), header);
Charmaine Lee1f982e42016-10-10 10:37:03 -07002839 int ret;
2840
2841 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002842 VMW_RES_DIRTY_NONE, user_surface_converter,
Charmaine Lee1f982e42016-10-10 10:37:03 -07002843 &cmd->body.srcSid, NULL);
2844 if (ret != 0)
2845 return ret;
2846
2847 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002848 VMW_RES_DIRTY_SET, user_surface_converter,
Charmaine Lee1f982e42016-10-10 10:37:03 -07002849 &cmd->body.destSid, NULL);
2850}
2851
Neha Bhende0d81d342018-06-18 17:14:56 -07002852/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002853 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
Neha Bhende0d81d342018-06-18 17:14:56 -07002854 *
2855 * @dev_priv: Pointer to a device private struct.
2856 * @sw_context: The software context being used for this batch.
2857 * @header: Pointer to the command header in the command stream.
2858 */
2859static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2860 struct vmw_sw_context *sw_context,
2861 SVGA3dCmdHeader *header)
2862{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002863 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2864 container_of(header, typeof(*cmd), header);
Neha Bhende0d81d342018-06-18 17:14:56 -07002865
2866 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2867 return -EINVAL;
2868
2869 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002870 VMW_RES_DIRTY_SET, user_surface_converter,
2871 &cmd->body.surface.sid, NULL);
Neha Bhende0d81d342018-06-18 17:14:56 -07002872}
2873
Deepak Rawatb6fad732018-12-13 14:00:18 -08002874static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2875 struct vmw_sw_context *sw_context,
2876 SVGA3dCmdHeader *header)
2877{
2878 if (!has_sm5_context(dev_priv))
2879 return -EINVAL;
2880
2881 return 0;
2882}
2883
Deepak Rawat5e8ec0d2018-12-13 13:51:08 -08002884static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2885 struct vmw_sw_context *sw_context,
2886 SVGA3dCmdHeader *header)
2887{
2888 if (!has_sm5_context(dev_priv))
2889 return -EINVAL;
2890
2891 return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2892}
2893
2894static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2895 struct vmw_sw_context *sw_context,
2896 SVGA3dCmdHeader *header)
2897{
2898 if (!has_sm5_context(dev_priv))
2899 return -EINVAL;
2900
2901 return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2902}
2903
2904static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2905 struct vmw_sw_context *sw_context,
2906 SVGA3dCmdHeader *header)
2907{
2908 struct {
2909 SVGA3dCmdHeader header;
2910 SVGA3dCmdDXClearUAViewUint body;
2911 } *cmd = container_of(header, typeof(*cmd), header);
2912 struct vmw_resource *ret;
2913
2914 if (!has_sm5_context(dev_priv))
2915 return -EINVAL;
2916
2917 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2918 cmd->body.uaViewId);
2919
2920 return PTR_ERR_OR_ZERO(ret);
2921}
2922
2923static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2924 struct vmw_sw_context *sw_context,
2925 SVGA3dCmdHeader *header)
2926{
2927 struct {
2928 SVGA3dCmdHeader header;
2929 SVGA3dCmdDXClearUAViewFloat body;
2930 } *cmd = container_of(header, typeof(*cmd), header);
2931 struct vmw_resource *ret;
2932
2933 if (!has_sm5_context(dev_priv))
2934 return -EINVAL;
2935
2936 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2937 cmd->body.uaViewId);
2938
2939 return PTR_ERR_OR_ZERO(ret);
2940}
2941
2942static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2943 struct vmw_sw_context *sw_context,
2944 SVGA3dCmdHeader *header)
2945{
2946 struct {
2947 SVGA3dCmdHeader header;
2948 SVGA3dCmdDXSetUAViews body;
2949 } *cmd = container_of(header, typeof(*cmd), header);
2950 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2951 sizeof(SVGA3dUAViewId);
2952 int ret;
2953
2954 if (!has_sm5_context(dev_priv))
2955 return -EINVAL;
2956
Zack Rusin4fb93262021-12-06 12:26:16 -05002957 if (num_uav > vmw_max_num_uavs(dev_priv)) {
Deepak Rawat5e8ec0d2018-12-13 13:51:08 -08002958 VMW_DEBUG_USER("Invalid UAV binding.\n");
2959 return -EINVAL;
2960 }
2961
2962 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2963 vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2964 num_uav, 0);
2965 if (ret)
2966 return ret;
2967
2968 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2969 cmd->body.uavSpliceIndex);
2970
2971 return ret;
2972}
2973
2974static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2975 struct vmw_sw_context *sw_context,
2976 SVGA3dCmdHeader *header)
2977{
2978 struct {
2979 SVGA3dCmdHeader header;
2980 SVGA3dCmdDXSetCSUAViews body;
2981 } *cmd = container_of(header, typeof(*cmd), header);
2982 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2983 sizeof(SVGA3dUAViewId);
2984 int ret;
2985
2986 if (!has_sm5_context(dev_priv))
2987 return -EINVAL;
2988
Zack Rusin4fb93262021-12-06 12:26:16 -05002989 if (num_uav > vmw_max_num_uavs(dev_priv)) {
Deepak Rawat5e8ec0d2018-12-13 13:51:08 -08002990 VMW_DEBUG_USER("Invalid UAV binding.\n");
2991 return -EINVAL;
2992 }
2993
2994 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2995 vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2996 num_uav, 0);
2997 if (ret)
2998 return ret;
2999
3000 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
3001 cmd->body.startIndex);
3002
3003 return ret;
3004}
3005
Deepak Rawate8bead92018-12-13 14:04:31 -08003006static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
3007 struct vmw_sw_context *sw_context,
3008 SVGA3dCmdHeader *header)
3009{
3010 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3011 struct vmw_resource *res;
3012 struct {
3013 SVGA3dCmdHeader header;
3014 SVGA3dCmdDXDefineStreamOutputWithMob body;
3015 } *cmd = container_of(header, typeof(*cmd), header);
3016 int ret;
3017
3018 if (!has_sm5_context(dev_priv))
3019 return -EINVAL;
3020
3021 if (!ctx_node) {
3022 DRM_ERROR("DX Context not set.\n");
3023 return -EINVAL;
3024 }
3025
3026 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
3027 ret = vmw_cotable_notify(res, cmd->body.soid);
3028 if (ret)
3029 return ret;
3030
3031 return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
3032 cmd->body.soid,
3033 &sw_context->staged_cmd_res);
3034}
3035
3036static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
3037 struct vmw_sw_context *sw_context,
3038 SVGA3dCmdHeader *header)
3039{
3040 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3041 struct vmw_resource *res;
3042 struct {
3043 SVGA3dCmdHeader header;
3044 SVGA3dCmdDXDestroyStreamOutput body;
3045 } *cmd = container_of(header, typeof(*cmd), header);
3046
3047 if (!ctx_node) {
3048 DRM_ERROR("DX Context not set.\n");
3049 return -EINVAL;
3050 }
3051
3052 /*
3053 * When device does not support SM5 then streamoutput with mob command is
3054 * not available to user-space. Simply return in this case.
3055 */
3056 if (!has_sm5_context(dev_priv))
3057 return 0;
3058
3059 /*
3060 * With SM5 capable device if lookup fails then user-space probably used
3061 * old streamoutput define command. Return without an error.
3062 */
3063 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3064 cmd->body.soid);
3065 if (IS_ERR(res))
3066 return 0;
3067
3068 return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3069 &sw_context->staged_cmd_res);
3070}
3071
3072static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3073 struct vmw_sw_context *sw_context,
3074 SVGA3dCmdHeader *header)
3075{
3076 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3077 struct vmw_resource *res;
3078 struct {
3079 SVGA3dCmdHeader header;
3080 SVGA3dCmdDXBindStreamOutput body;
3081 } *cmd = container_of(header, typeof(*cmd), header);
3082 int ret;
3083
3084 if (!has_sm5_context(dev_priv))
3085 return -EINVAL;
3086
3087 if (!ctx_node) {
3088 DRM_ERROR("DX Context not set.\n");
3089 return -EINVAL;
3090 }
3091
3092 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3093 cmd->body.soid);
3094 if (IS_ERR(res)) {
Colin Ian King1ae96fc2020-08-05 12:31:55 +01003095 DRM_ERROR("Could not find streamoutput to bind.\n");
Deepak Rawate8bead92018-12-13 14:04:31 -08003096 return PTR_ERR(res);
3097 }
3098
3099 vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3100
3101 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3102 VMW_RES_DIRTY_NONE);
3103 if (ret) {
3104 DRM_ERROR("Error creating resource validation node.\n");
3105 return ret;
3106 }
3107
3108 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3109 &cmd->body.mobid,
3110 cmd->body.offsetInBytes);
3111}
3112
3113static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3114 struct vmw_sw_context *sw_context,
3115 SVGA3dCmdHeader *header)
3116{
3117 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3118 struct vmw_resource *res;
3119 struct vmw_ctx_bindinfo_so binding;
3120 struct {
3121 SVGA3dCmdHeader header;
3122 SVGA3dCmdDXSetStreamOutput body;
3123 } *cmd = container_of(header, typeof(*cmd), header);
3124 int ret;
3125
3126 if (!ctx_node) {
3127 DRM_ERROR("DX Context not set.\n");
3128 return -EINVAL;
3129 }
3130
3131 if (cmd->body.soid == SVGA3D_INVALID_ID)
3132 return 0;
3133
3134 /*
3135 * When device does not support SM5 then streamoutput with mob command is
3136 * not available to user-space. Simply return in this case.
3137 */
3138 if (!has_sm5_context(dev_priv))
3139 return 0;
3140
3141 /*
3142 * With SM5 capable device if lookup fails then user-space probably used
3143 * old streamoutput define command. Return without an error.
3144 */
3145 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3146 cmd->body.soid);
3147 if (IS_ERR(res)) {
3148 return 0;
3149 }
3150
3151 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3152 VMW_RES_DIRTY_NONE);
3153 if (ret) {
3154 DRM_ERROR("Error creating resource validation node.\n");
3155 return ret;
3156 }
3157
3158 binding.bi.ctx = ctx_node->ctx;
3159 binding.bi.res = res;
3160 binding.bi.bt = vmw_ctx_binding_so;
3161 binding.slot = 0; /* Only one SO set to context at a time. */
3162
3163 vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3164 binding.slot);
3165
3166 return ret;
3167}
3168
Deepak Rawatb6fad732018-12-13 14:00:18 -08003169static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3170 struct vmw_sw_context *sw_context,
3171 SVGA3dCmdHeader *header)
3172{
3173 struct vmw_draw_indexed_instanced_indirect_cmd {
3174 SVGA3dCmdHeader header;
3175 SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3176 } *cmd = container_of(header, typeof(*cmd), header);
3177
3178 if (!has_sm5_context(dev_priv))
3179 return -EINVAL;
3180
3181 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3182 VMW_RES_DIRTY_NONE, user_surface_converter,
3183 &cmd->body.argsBufferSid, NULL);
3184}
3185
3186static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3187 struct vmw_sw_context *sw_context,
3188 SVGA3dCmdHeader *header)
3189{
3190 struct vmw_draw_instanced_indirect_cmd {
3191 SVGA3dCmdHeader header;
3192 SVGA3dCmdDXDrawInstancedIndirect body;
3193 } *cmd = container_of(header, typeof(*cmd), header);
3194
3195 if (!has_sm5_context(dev_priv))
3196 return -EINVAL;
3197
3198 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3199 VMW_RES_DIRTY_NONE, user_surface_converter,
3200 &cmd->body.argsBufferSid, NULL);
3201}
3202
3203static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3204 struct vmw_sw_context *sw_context,
3205 SVGA3dCmdHeader *header)
3206{
3207 struct vmw_dispatch_indirect_cmd {
3208 SVGA3dCmdHeader header;
3209 SVGA3dCmdDXDispatchIndirect body;
3210 } *cmd = container_of(header, typeof(*cmd), header);
3211
3212 if (!has_sm5_context(dev_priv))
3213 return -EINVAL;
3214
3215 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3216 VMW_RES_DIRTY_NONE, user_surface_converter,
3217 &cmd->body.argsBufferSid, NULL);
3218}
3219
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003220static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3221 struct vmw_sw_context *sw_context,
3222 void *buf, uint32_t *size)
3223{
3224 uint32_t size_remaining = *size;
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003225 uint32_t cmd_id;
3226
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003227 cmd_id = ((uint32_t *)buf)[0];
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003228 switch (cmd_id) {
3229 case SVGA_CMD_UPDATE:
3230 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003231 break;
3232 case SVGA_CMD_DEFINE_GMRFB:
3233 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3234 break;
3235 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3236 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3237 break;
3238 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3239 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3240 break;
3241 default:
Deepak Rawat5724f892019-02-11 11:46:27 -08003242 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003243 return -EINVAL;
3244 }
3245
3246 if (*size > size_remaining) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003247 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3248 cmd_id);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003249 return -EINVAL;
3250 }
3251
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02003252 if (unlikely(!sw_context->kernel)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003253 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003254 return -EPERM;
3255 }
3256
3257 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3258 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3259
3260 return 0;
3261}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003262
Thomas Hellstrom4fbd9d22014-02-12 12:37:01 +01003263static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003264 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3265 false, false, false),
3266 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3267 false, false, false),
3268 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3269 true, false, false),
3270 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3271 true, false, false),
3272 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3273 true, false, false),
3274 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3275 false, false, false),
3276 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3277 false, false, false),
3278 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3279 true, false, false),
3280 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3281 true, false, false),
3282 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3283 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003284 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003285 &vmw_cmd_set_render_target_check, true, false, false),
3286 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3287 true, false, false),
3288 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3289 true, false, false),
3290 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3291 true, false, false),
3292 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3293 true, false, false),
3294 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3295 true, false, false),
3296 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3297 true, false, false),
3298 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3299 true, false, false),
3300 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3301 false, false, false),
Thomas Hellstromd5bde952014-01-31 10:12:10 +01003302 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3303 true, false, false),
3304 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3305 true, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003306 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3307 true, false, false),
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01003308 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3309 true, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003310 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3311 true, false, false),
3312 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3313 true, false, false),
3314 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3315 true, false, false),
3316 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3317 true, false, false),
3318 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3319 true, false, false),
3320 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3321 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003322 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003323 &vmw_cmd_blt_surf_screen_check, false, false, false),
3324 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3325 false, false, false),
3326 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3327 false, false, false),
3328 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3329 false, false, false),
3330 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3331 false, false, false),
3332 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3333 false, false, false),
Deepak Rawatdc75e732018-06-13 13:53:28 -07003334 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003335 false, false, false),
Deepak Rawatdc75e732018-06-13 13:53:28 -07003336 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003337 false, false, false),
Deepak Rawat3d143952018-12-13 11:55:57 -08003338 VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3339 VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3340 VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3341 VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3342 VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3343 VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003344 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3345 false, false, true),
3346 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3347 false, false, true),
3348 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3349 false, false, true),
3350 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3351 false, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003352 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3353 false, false, true),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003354 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3355 false, false, true),
3356 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3357 false, false, true),
3358 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3359 false, false, true),
3360 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3361 true, false, true),
3362 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3363 false, false, true),
3364 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3365 true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003366 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003367 &vmw_cmd_update_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003368 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003369 &vmw_cmd_readback_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003370 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003371 &vmw_cmd_readback_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003372 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003373 &vmw_cmd_invalidate_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003374 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003375 &vmw_cmd_invalidate_gb_surface, true, false, true),
3376 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3377 false, false, true),
3378 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3379 false, false, true),
3380 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3381 false, false, true),
3382 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3383 false, false, true),
3384 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3385 false, false, true),
3386 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3387 false, false, true),
3388 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3389 true, false, true),
3390 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3391 false, false, true),
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +01003392 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
Thomas Hellstrom8ba07312013-10-08 02:25:35 -07003393 false, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003394 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3395 true, false, true),
3396 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3397 true, false, true),
3398 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3399 true, false, true),
3400 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3401 true, false, true),
Thomas Hellstrom5f55be5f2017-08-24 08:06:30 +02003402 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3403 true, false, true),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003404 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3405 false, false, true),
3406 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3407 false, false, true),
3408 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3409 false, false, true),
3410 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3411 false, false, true),
3412 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3413 false, false, true),
3414 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3415 false, false, true),
3416 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3417 false, false, true),
3418 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3419 false, false, true),
3420 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3421 false, false, true),
3422 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3423 false, false, true),
3424 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003425 true, false, true),
3426 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3427 false, false, true),
3428 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3429 false, false, true),
3430 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3431 false, false, true),
3432 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3433 false, false, true),
3434
Deepak Rawat680360a2019-02-13 13:20:42 -08003435 /* SM commands */
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003436 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3437 false, false, true),
3438 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3439 false, false, true),
3440 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3441 false, false, true),
3442 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3443 false, false, true),
3444 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3445 false, false, true),
3446 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3447 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3448 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3449 &vmw_cmd_dx_set_shader_res, true, false, true),
3450 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3451 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003452 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003453 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003454 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003455 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003456 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3457 true, false, true),
3458 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3459 true, false, true),
3460 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3461 &vmw_cmd_dx_cid_check, true, false, true),
3462 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003463 true, false, true),
3464 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3465 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3466 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3467 &vmw_cmd_dx_set_index_buffer, true, false, true),
3468 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3469 &vmw_cmd_dx_set_rendertargets, true, false, true),
3470 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3471 true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003472 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
Charmaine Lee2f633e52015-08-10 10:45:11 -07003473 &vmw_cmd_dx_cid_check, true, false, true),
3474 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3475 &vmw_cmd_dx_cid_check, true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003476 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003477 true, false, true),
Charmaine Leee02e5882016-04-12 08:19:08 -07003478 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003479 true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003480 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003481 true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003482 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
Charmaine Leee02e5882016-04-12 08:19:08 -07003483 &vmw_cmd_dx_cid_check, true, false, true),
3484 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003485 true, false, true),
Charmaine Leee02e5882016-04-12 08:19:08 -07003486 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003487 true, false, true),
3488 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3489 true, false, true),
Charmaine Lee18835982016-04-12 08:14:23 -07003490 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003491 true, false, true),
3492 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3493 true, false, true),
3494 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3495 true, false, true),
3496 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3497 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3498 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3499 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003500 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3501 true, false, true),
Charmaine Leef3b335502016-02-12 08:11:56 +01003502 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003503 true, false, true),
3504 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3505 &vmw_cmd_dx_check_subresource, true, false, true),
3506 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3507 &vmw_cmd_dx_check_subresource, true, false, true),
3508 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3509 &vmw_cmd_dx_check_subresource, true, false, true),
3510 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3511 &vmw_cmd_dx_view_define, true, false, true),
3512 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3513 &vmw_cmd_dx_view_remove, true, false, true),
3514 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3515 &vmw_cmd_dx_view_define, true, false, true),
3516 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3517 &vmw_cmd_dx_view_remove, true, false, true),
3518 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3519 &vmw_cmd_dx_view_define, true, false, true),
3520 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3521 &vmw_cmd_dx_view_remove, true, false, true),
3522 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3523 &vmw_cmd_dx_so_define, true, false, true),
3524 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3525 &vmw_cmd_dx_cid_check, true, false, true),
3526 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3527 &vmw_cmd_dx_so_define, true, false, true),
3528 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3529 &vmw_cmd_dx_cid_check, true, false, true),
3530 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3531 &vmw_cmd_dx_so_define, true, false, true),
3532 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3533 &vmw_cmd_dx_cid_check, true, false, true),
3534 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3535 &vmw_cmd_dx_so_define, true, false, true),
3536 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3537 &vmw_cmd_dx_cid_check, true, false, true),
3538 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3539 &vmw_cmd_dx_so_define, true, false, true),
3540 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3541 &vmw_cmd_dx_cid_check, true, false, true),
3542 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3543 &vmw_cmd_dx_define_shader, true, false, true),
3544 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3545 &vmw_cmd_dx_destroy_shader, true, false, true),
3546 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3547 &vmw_cmd_dx_bind_shader, true, false, true),
3548 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3549 &vmw_cmd_dx_so_define, true, false, true),
3550 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
Deepak Rawate8bead92018-12-13 14:04:31 -08003551 &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3552 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3553 &vmw_cmd_dx_set_streamoutput, true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003554 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3555 &vmw_cmd_dx_set_so_targets, true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003556 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3557 &vmw_cmd_dx_cid_check, true, false, true),
3558 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3559 &vmw_cmd_dx_cid_check, true, false, true),
Neha Bhende0fca749e2015-08-10 10:51:07 -07003560 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3561 &vmw_cmd_buffer_copy_check, true, false, true),
3562 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3563 &vmw_cmd_pred_copy_check, true, false, true),
Charmaine Lee1f982e42016-10-10 10:37:03 -07003564 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3565 &vmw_cmd_dx_transfer_from_buffer,
3566 true, false, true),
Roland Scheideggerbf625872021-12-06 12:26:18 -05003567 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET,
3568 &vmw_cmd_dx_set_constant_buffer_offset,
3569 true, false, true),
3570 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET,
3571 &vmw_cmd_dx_set_constant_buffer_offset,
3572 true, false, true),
3573 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET,
3574 &vmw_cmd_dx_set_constant_buffer_offset,
3575 true, false, true),
3576 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET,
3577 &vmw_cmd_dx_set_constant_buffer_offset,
3578 true, false, true),
3579 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET,
3580 &vmw_cmd_dx_set_constant_buffer_offset,
3581 true, false, true),
3582 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET,
3583 &vmw_cmd_dx_set_constant_buffer_offset,
3584 true, false, true),
Neha Bhende0d81d342018-06-18 17:14:56 -07003585 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3586 true, false, true),
Deepak Rawat5e8ec0d2018-12-13 13:51:08 -08003587
3588 /*
3589 * SM5 commands
3590 */
3591 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3592 true, false, true),
3593 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3594 true, false, true),
3595 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3596 true, false, true),
3597 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3598 &vmw_cmd_clear_uav_float, true, false, true),
3599 VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3600 false, true),
3601 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3602 true),
Deepak Rawatb6fad732018-12-13 14:00:18 -08003603 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3604 &vmw_cmd_indexed_instanced_indirect, true, false, true),
3605 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3606 &vmw_cmd_instanced_indirect, true, false, true),
3607 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3608 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3609 &vmw_cmd_dispatch_indirect, true, false, true),
Deepak Rawat5e8ec0d2018-12-13 13:51:08 -08003610 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3611 false, true),
Deepak Rawatb6fad732018-12-13 14:00:18 -08003612 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3613 &vmw_cmd_sm5_view_define, true, false, true),
Deepak Rawate8bead92018-12-13 14:04:31 -08003614 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3615 &vmw_cmd_dx_define_streamoutput, true, false, true),
3616 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3617 &vmw_cmd_dx_bind_streamoutput, true, false, true),
Roland Scheidegger853369d2021-12-06 12:26:15 -05003618 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
3619 &vmw_cmd_dx_so_define, true, false, true),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003620};
3621
Thomas Hellstrom65b97a22017-08-24 08:06:29 +02003622bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3623{
3624 u32 cmd_id = ((u32 *) buf)[0];
3625
3626 if (cmd_id >= SVGA_CMD_MAX) {
3627 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3628 const struct vmw_cmd_entry *entry;
3629
3630 *size = header->size + sizeof(SVGA3dCmdHeader);
3631 cmd_id = header->id;
3632 if (cmd_id >= SVGA_3D_CMD_MAX)
3633 return false;
3634
3635 cmd_id -= SVGA_3D_CMD_BASE;
3636 entry = &vmw_cmd_entries[cmd_id];
3637 *cmd = entry->cmd_name;
3638 return true;
3639 }
3640
3641 switch (cmd_id) {
3642 case SVGA_CMD_UPDATE:
3643 *cmd = "SVGA_CMD_UPDATE";
3644 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3645 break;
3646 case SVGA_CMD_DEFINE_GMRFB:
3647 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3648 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3649 break;
3650 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3651 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3652 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3653 break;
3654 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3655 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3656 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3657 break;
3658 default:
3659 *cmd = "UNKNOWN";
3660 *size = 0;
3661 return false;
3662 }
3663
3664 return true;
3665}
3666
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003667static int vmw_cmd_check(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003668 struct vmw_sw_context *sw_context, void *buf,
3669 uint32_t *size)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003670{
3671 uint32_t cmd_id;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003672 uint32_t size_remaining = *size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003673 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3674 int ret;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003675 const struct vmw_cmd_entry *entry;
3676 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003677
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003678 cmd_id = ((uint32_t *)buf)[0];
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003679 /* Handle any none 3D commands */
3680 if (unlikely(cmd_id < SVGA_CMD_MAX))
3681 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3682
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003683
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003684 cmd_id = header->id;
3685 *size = header->size + sizeof(SVGA3dCmdHeader);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003686
3687 cmd_id -= SVGA_3D_CMD_BASE;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003688 if (unlikely(*size > size_remaining))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003689 goto out_invalid;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003690
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003691 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003692 goto out_invalid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003693
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003694 entry = &vmw_cmd_entries[cmd_id];
Thomas Hellstrom36e952c2014-02-12 13:19:36 +01003695 if (unlikely(!entry->func))
3696 goto out_invalid;
3697
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003698 if (unlikely(!entry->user_allow && !sw_context->kernel))
3699 goto out_privileged;
3700
3701 if (unlikely(entry->gb_disable && gb))
3702 goto out_old;
3703
3704 if (unlikely(entry->gb_enable && !gb))
3705 goto out_new;
3706
3707 ret = entry->func(dev_priv, sw_context, header);
Deepak Rawat45399b12019-02-11 12:57:38 -08003708 if (unlikely(ret != 0)) {
3709 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3710 cmd_id + SVGA_3D_CMD_BASE, ret);
3711 return ret;
3712 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003713
3714 return 0;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003715out_invalid:
Deepak Rawat5724f892019-02-11 11:46:27 -08003716 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3717 cmd_id + SVGA_3D_CMD_BASE);
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003718 return -EINVAL;
3719out_privileged:
Deepak Rawat5724f892019-02-11 11:46:27 -08003720 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3721 cmd_id + SVGA_3D_CMD_BASE);
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003722 return -EPERM;
3723out_old:
Deepak Rawat5724f892019-02-11 11:46:27 -08003724 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3725 cmd_id + SVGA_3D_CMD_BASE);
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003726 return -EINVAL;
3727out_new:
Deepak Rawat5724f892019-02-11 11:46:27 -08003728 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3729 cmd_id + SVGA_3D_CMD_BASE);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003730 return -EINVAL;
3731}
3732
3733static int vmw_cmd_check_all(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003734 struct vmw_sw_context *sw_context, void *buf,
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003735 uint32_t size)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003736{
3737 int32_t cur_size = size;
3738 int ret;
3739
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003740 sw_context->buf_start = buf;
3741
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003742 while (cur_size > 0) {
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003743 size = cur_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003744 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3745 if (unlikely(ret != 0))
3746 return ret;
3747 buf = (void *)((unsigned long) buf + size);
3748 cur_size -= size;
3749 }
3750
3751 if (unlikely(cur_size != 0)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003752 VMW_DEBUG_USER("Command verifier out of sync.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003753 return -EINVAL;
3754 }
3755
3756 return 0;
3757}
3758
3759static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3760{
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02003761 /* Memory is validation context memory, so no need to free it */
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02003762 INIT_LIST_HEAD(&sw_context->bo_relocations);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003763}
3764
3765static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3766{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003767 struct vmw_relocation *reloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003768 struct ttm_buffer_object *bo;
3769
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02003770 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003771 bo = &reloc->vbo->base;
Christian Königd3116752021-04-12 15:11:47 +02003772 switch (bo->resource->mem_type) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003773 case TTM_PL_VRAM:
Christian Königd3116752021-04-12 15:11:47 +02003774 reloc->location->offset += bo->resource->start << PAGE_SHIFT;
Thomas Hellstrom135cba02010-10-26 21:21:47 +02003775 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003776 break;
3777 case VMW_PL_GMR:
Christian Königd3116752021-04-12 15:11:47 +02003778 reloc->location->gmrId = bo->resource->start;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003779 break;
Thomas Hellstromddcda242012-11-21 11:26:55 +01003780 case VMW_PL_MOB:
Christian Königd3116752021-04-12 15:11:47 +02003781 *reloc->mob_loc = bo->resource->start;
Thomas Hellstromddcda242012-11-21 11:26:55 +01003782 break;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003783 default:
3784 BUG();
3785 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003786 }
3787 vmw_free_relocations(sw_context);
3788}
3789
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003790static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3791 uint32_t size)
3792{
3793 if (likely(sw_context->cmd_bounce_size >= size))
3794 return 0;
3795
3796 if (sw_context->cmd_bounce_size == 0)
3797 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3798
3799 while (sw_context->cmd_bounce_size < size) {
3800 sw_context->cmd_bounce_size =
3801 PAGE_ALIGN(sw_context->cmd_bounce_size +
3802 (sw_context->cmd_bounce_size >> 1));
3803 }
3804
Markus Elfring0bc32992016-07-22 13:31:00 +02003805 vfree(sw_context->cmd_bounce);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003806 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3807
3808 if (sw_context->cmd_bounce == NULL) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003809 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003810 sw_context->cmd_bounce_size = 0;
3811 return -ENOMEM;
3812 }
3813
3814 return 0;
3815}
3816
Lee Jones7450bf72021-01-15 18:12:36 +00003817/*
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003818 * vmw_execbuf_fence_commands - create and submit a command stream fence
3819 *
3820 * Creates a fence object and submits a command stream marker.
3821 * If this fails for some reason, We sync the fifo and return NULL.
3822 * It is then safe to fence buffers with a NULL pointer.
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02003823 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003824 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3825 * userspace handle if @p_handle is not NULL, otherwise not.
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003826 */
3827
3828int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3829 struct vmw_private *dev_priv,
3830 struct vmw_fence_obj **p_fence,
3831 uint32_t *p_handle)
3832{
3833 uint32_t sequence;
3834 int ret;
3835 bool synced = false;
3836
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02003837 /* p_handle implies file_priv. */
3838 BUG_ON(p_handle != NULL && file_priv == NULL);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003839
Zack Rusin8426ed92020-11-18 12:54:19 -05003840 ret = vmw_cmd_send_fence(dev_priv, &sequence);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003841 if (unlikely(ret != 0)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003842 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003843 synced = true;
3844 }
3845
3846 if (p_handle != NULL)
3847 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003848 sequence, p_fence, p_handle);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003849 else
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003850 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003851
3852 if (unlikely(ret != 0 && !synced)) {
Deepak Rawat680360a2019-02-13 13:20:42 -08003853 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3854 false, VMW_FENCE_WAIT_TIMEOUT);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003855 *p_fence = NULL;
3856 }
3857
Thomas Hellstrom728354c2019-01-31 10:55:37 +01003858 return ret;
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003859}
3860
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003861/**
Deepak Rawat680360a2019-02-13 13:20:42 -08003862 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003863 *
3864 * @dev_priv: Pointer to a vmw_private struct.
3865 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3866 * @ret: Return value from fence object creation.
Deepak Rawat680360a2019-02-13 13:20:42 -08003867 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3868 * the information should be copied.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003869 * @fence: Pointer to the fenc object.
3870 * @fence_handle: User-space fence handle.
Sinclair Yehc906965d2017-07-05 01:49:32 -07003871 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3872 * @sync_file: Only used to clean up in case of an error in this function.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003873 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003874 * This function copies fence information to user-space. If copying fails, the
3875 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3876 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3877 * will hopefully be detected.
3878 *
3879 * Also if copying fails, user-space will be unable to signal the fence object
3880 * so we wait for it immediately, and then unreference the user-space reference.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003881 */
Mathias Krausea0f90c82022-01-27 18:34:19 +10003882int
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003883vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003884 struct vmw_fpriv *vmw_fp, int ret,
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003885 struct drm_vmw_fence_rep __user *user_fence_rep,
Deepak Rawat680360a2019-02-13 13:20:42 -08003886 struct vmw_fence_obj *fence, uint32_t fence_handle,
Mathias Krausea0f90c82022-01-27 18:34:19 +10003887 int32_t out_fence_fd)
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003888{
3889 struct drm_vmw_fence_rep fence_rep;
3890
3891 if (user_fence_rep == NULL)
Mathias Krausea0f90c82022-01-27 18:34:19 +10003892 return 0;
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003893
Dan Carpenter80d9b242011-10-18 09:10:12 +03003894 memset(&fence_rep, 0, sizeof(fence_rep));
3895
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003896 fence_rep.error = ret;
Sinclair Yehc906965d2017-07-05 01:49:32 -07003897 fence_rep.fd = out_fence_fd;
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003898 if (ret == 0) {
3899 BUG_ON(fence == NULL);
3900
3901 fence_rep.handle = fence_handle;
Maarten Lankhorst2298e802014-03-26 14:07:44 +01003902 fence_rep.seqno = fence->base.seqno;
Zack Rusin2cd80db2021-05-05 15:10:07 -04003903 vmw_update_seqno(dev_priv);
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003904 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3905 }
3906
3907 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08003908 * copy_to_user errors will be detected by user space not seeing
3909 * fence_rep::error filled in. Typically user-space would have pre-set
3910 * that member to -EFAULT.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003911 */
3912 ret = copy_to_user(user_fence_rep, &fence_rep,
3913 sizeof(fence_rep));
3914
3915 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08003916 * User-space lost the fence object. We need to sync and unreference the
3917 * handle.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003918 */
3919 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
Zack Rusin8afa13a2021-12-06 12:26:12 -05003920 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
Deepak Rawat5724f892019-02-11 11:46:27 -08003921 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003922 (void) vmw_fence_obj_wait(fence, false, false,
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003923 VMW_FENCE_WAIT_TIMEOUT);
3924 }
Mathias Krausea0f90c82022-01-27 18:34:19 +10003925
3926 return ret ? -EFAULT : 0;
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003927}
3928
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003929/**
Deepak Rawat680360a2019-02-13 13:20:42 -08003930 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003931 *
3932 * @dev_priv: Pointer to a device private structure.
3933 * @kernel_commands: Pointer to the unpatched command batch.
3934 * @command_size: Size of the unpatched command batch.
3935 * @sw_context: Structure holding the relocation lists.
3936 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003937 * Side effects: If this function returns 0, then the command batch pointed to
3938 * by @kernel_commands will have been modified.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003939 */
3940static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003941 void *kernel_commands, u32 command_size,
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003942 struct vmw_sw_context *sw_context)
3943{
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003944 void *cmd;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003945
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003946 if (sw_context->dx_ctx_node)
Zack Rusin8426ed92020-11-18 12:54:19 -05003947 cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003948 sw_context->dx_ctx_node->ctx->id);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003949 else
Zack Rusin8426ed92020-11-18 12:54:19 -05003950 cmd = VMW_CMD_RESERVE(dev_priv, command_size);
Deepak Rawat11c45412019-02-14 16:15:39 -08003951
3952 if (!cmd)
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003953 return -ENOMEM;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003954
3955 vmw_apply_relocations(sw_context);
3956 memcpy(cmd, kernel_commands, command_size);
3957 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3958 vmw_resource_relocations_free(&sw_context->res_relocations);
Zack Rusin8426ed92020-11-18 12:54:19 -05003959 vmw_cmd_commit(dev_priv, command_size);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003960
3961 return 0;
3962}
3963
3964/**
Deepak Rawat680360a2019-02-13 13:20:42 -08003965 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3966 * command buffer manager.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003967 *
3968 * @dev_priv: Pointer to a device private structure.
3969 * @header: Opaque handle to the command buffer allocation.
3970 * @command_size: Size of the unpatched command batch.
3971 * @sw_context: Structure holding the relocation lists.
3972 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003973 * Side effects: If this function returns 0, then the command buffer represented
3974 * by @header will have been modified.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003975 */
3976static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3977 struct vmw_cmdbuf_header *header,
3978 u32 command_size,
3979 struct vmw_sw_context *sw_context)
3980{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003981 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003982 SVGA3D_INVALID_ID);
Deepak Rawat680360a2019-02-13 13:20:42 -08003983 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3984 header);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003985
3986 vmw_apply_relocations(sw_context);
3987 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3988 vmw_resource_relocations_free(&sw_context->res_relocations);
3989 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3990
3991 return 0;
3992}
3993
3994/**
3995 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3996 * submission using a command buffer.
3997 *
3998 * @dev_priv: Pointer to a device private structure.
3999 * @user_commands: User-space pointer to the commands to be submitted.
4000 * @command_size: Size of the unpatched command batch.
4001 * @header: Out parameter returning the opaque pointer to the command buffer.
4002 *
4003 * This function checks whether we can use the command buffer manager for
Deepak Rawat680360a2019-02-13 13:20:42 -08004004 * submission and if so, creates a command buffer of suitable size and copies
4005 * the user data into that buffer.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004006 *
4007 * On successful return, the function returns a pointer to the data in the
4008 * command buffer and *@header is set to non-NULL.
Deepak Rawat680360a2019-02-13 13:20:42 -08004009 *
Lee Jones7450bf72021-01-15 18:12:36 +00004010 * @kernel_commands: If command buffers could not be used, the function will
4011 * return the value of @kernel_commands on function call. That value may be
4012 * NULL. In that case, the value of *@header will be set to NULL.
Deepak Rawat680360a2019-02-13 13:20:42 -08004013 *
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004014 * If an error is encountered, the function will return a pointer error value.
4015 * If the function is interrupted by a signal while sleeping, it will return
4016 * -ERESTARTSYS casted to a pointer error value.
4017 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07004018static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
4019 void __user *user_commands,
Deepak Rawat680360a2019-02-13 13:20:42 -08004020 void *kernel_commands, u32 command_size,
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07004021 struct vmw_cmdbuf_header **header)
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004022{
4023 size_t cmdbuf_size;
4024 int ret;
4025
4026 *header = NULL;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004027 if (command_size > SVGA_CB_MAX_SIZE) {
Deepak Rawat5724f892019-02-11 11:46:27 -08004028 VMW_DEBUG_USER("Command buffer is too large.\n");
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004029 return ERR_PTR(-EINVAL);
4030 }
4031
Thomas Hellstrom51ab70b2016-10-10 10:51:24 -07004032 if (!dev_priv->cman || kernel_commands)
4033 return kernel_commands;
4034
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004035 /* If possible, add a little space for fencing. */
4036 cmdbuf_size = command_size + 512;
4037 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
Deepak Rawat680360a2019-02-13 13:20:42 -08004038 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
4039 header);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004040 if (IS_ERR(kernel_commands))
4041 return kernel_commands;
4042
Deepak Rawat680360a2019-02-13 13:20:42 -08004043 ret = copy_from_user(kernel_commands, user_commands, command_size);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004044 if (ret) {
Deepak Rawat5724f892019-02-11 11:46:27 -08004045 VMW_DEBUG_USER("Failed copying commands.\n");
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004046 vmw_cmdbuf_header_free(*header);
4047 *header = NULL;
4048 return ERR_PTR(-EFAULT);
4049 }
4050
4051 return kernel_commands;
4052}
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02004053
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004054static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4055 struct vmw_sw_context *sw_context,
4056 uint32_t handle)
4057{
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004058 struct vmw_resource *res;
4059 int ret;
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02004060 unsigned int size;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004061
4062 if (handle == SVGA3D_INVALID_ID)
4063 return 0;
4064
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02004065 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4066 ret = vmw_validation_preload_res(sw_context->ctx, size);
4067 if (ret)
4068 return ret;
4069
4070 res = vmw_user_resource_noref_lookup_handle
4071 (dev_priv, sw_context->fp->tfile, handle,
4072 user_context_converter);
Chengguang Xu4efa6662019-03-01 10:14:06 -08004073 if (IS_ERR(res)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08004074 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4075 (unsigned int) handle);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02004076 return PTR_ERR(res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004077 }
4078
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01004079 ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004080 if (unlikely(ret != 0))
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02004081 return ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004082
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004083 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004084 sw_context->man = vmw_context_res_man(res);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02004085
4086 return 0;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004087}
4088
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004089int vmw_execbuf_process(struct drm_file *file_priv,
4090 struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08004091 void __user *user_commands, void *kernel_commands,
4092 uint32_t command_size, uint64_t throttle_us,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004093 uint32_t dx_context_handle,
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004094 struct drm_vmw_fence_rep __user *user_fence_rep,
Deepak Rawat680360a2019-02-13 13:20:42 -08004095 struct vmw_fence_obj **out_fence, uint32_t flags)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004096{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004097 struct vmw_sw_context *sw_context = &dev_priv->ctx;
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004098 struct vmw_fence_obj *fence = NULL;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004099 struct vmw_cmdbuf_header *header;
Nathan Chancellora5020f42019-03-11 20:24:46 -07004100 uint32_t handle = 0;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004101 int ret;
Sinclair Yehc906965d2017-07-05 01:49:32 -07004102 int32_t out_fence_fd = -1;
4103 struct sync_file *sync_file = NULL;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004104 DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
Sinclair Yehc906965d2017-07-05 01:49:32 -07004105
4106 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4107 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4108 if (out_fence_fd < 0) {
Deepak Rawat5724f892019-02-11 11:46:27 -08004109 VMW_DEBUG_USER("Failed to get a fence fd.\n");
Sinclair Yehc906965d2017-07-05 01:49:32 -07004110 return out_fence_fd;
4111 }
4112 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004113
Charmaine Lee2f633e52015-08-10 10:45:11 -07004114 if (throttle_us) {
Zack Rusin359dc602020-11-10 22:14:46 -05004115 VMW_DEBUG_USER("Throttling is no longer supported.\n");
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004116 }
Charmaine Lee2f633e52015-08-10 10:45:11 -07004117
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004118 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4119 kernel_commands, command_size,
4120 &header);
Sinclair Yehc906965d2017-07-05 01:49:32 -07004121 if (IS_ERR(kernel_commands)) {
4122 ret = PTR_ERR(kernel_commands);
4123 goto out_free_fence_fd;
4124 }
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004125
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004126 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004127 if (ret) {
4128 ret = -ERESTARTSYS;
4129 goto out_free_header;
4130 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004131
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004132 sw_context->kernel = false;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004133 if (kernel_commands == NULL) {
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004134 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4135 if (unlikely(ret != 0))
4136 goto out_unlock;
4137
Deepak Rawat680360a2019-02-13 13:20:42 -08004138 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4139 command_size);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004140 if (unlikely(ret != 0)) {
4141 ret = -EFAULT;
Deepak Rawat5724f892019-02-11 11:46:27 -08004142 VMW_DEBUG_USER("Failed copying commands.\n");
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004143 goto out_unlock;
4144 }
Deepak Rawat680360a2019-02-13 13:20:42 -08004145
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004146 kernel_commands = sw_context->cmd_bounce;
Deepak Rawat680360a2019-02-13 13:20:42 -08004147 } else if (!header) {
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004148 sw_context->kernel = true;
Deepak Rawat680360a2019-02-13 13:20:42 -08004149 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004150
Zack Rusin8afa13a2021-12-06 12:26:12 -05004151 sw_context->filp = file_priv;
Thomas Hellstromd5bde952014-01-31 10:12:10 +01004152 sw_context->fp = vmw_fpriv(file_priv);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004153 INIT_LIST_HEAD(&sw_context->ctx_list);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004154 sw_context->cur_query_bo = dev_priv->pinned_bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004155 sw_context->last_query_ctx = NULL;
4156 sw_context->needs_post_query_barrier = false;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004157 sw_context->dx_ctx_node = NULL;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07004158 sw_context->dx_query_mob = NULL;
4159 sw_context->dx_query_ctx = NULL;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004160 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004161 INIT_LIST_HEAD(&sw_context->res_relocations);
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02004162 INIT_LIST_HEAD(&sw_context->bo_relocations);
Deepak Rawat680360a2019-02-13 13:20:42 -08004163
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004164 if (sw_context->staged_bindings)
4165 vmw_binding_state_reset(sw_context->staged_bindings);
4166
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004167 if (!sw_context->res_ht_initialized) {
Thomas Zimmermann2985c962021-11-29 10:48:40 +01004168 ret = vmwgfx_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004169 if (unlikely(ret != 0))
4170 goto out_unlock;
Deepak Rawat680360a2019-02-13 13:20:42 -08004171
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004172 sw_context->res_ht_initialized = true;
4173 }
Deepak Rawat680360a2019-02-13 13:20:42 -08004174
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02004175 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004176 sw_context->ctx = &val_ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004177 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004178 if (unlikely(ret != 0))
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004179 goto out_err_nores;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004180
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004181 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4182 command_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004183 if (unlikely(ret != 0))
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01004184 goto out_err_nores;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00004185
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004186 ret = vmw_resources_reserve(sw_context);
4187 if (unlikely(ret != 0))
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01004188 goto out_err_nores;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004189
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004190 ret = vmw_validation_bo_reserve(&val_ctx, true);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004191 if (unlikely(ret != 0))
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004192 goto out_err_nores;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004193
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004194 ret = vmw_validation_bo_validate(&val_ctx, true);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004195 if (unlikely(ret != 0))
4196 goto out_err;
4197
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004198 ret = vmw_validation_res_validate(&val_ctx, true);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004199 if (unlikely(ret != 0))
4200 goto out_err;
Deepak Rawat680360a2019-02-13 13:20:42 -08004201
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004202 vmw_validation_drop_ht(&val_ctx);
Thomas Hellstrom1925d452010-05-28 11:21:57 +02004203
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07004204 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4205 if (unlikely(ret != 0)) {
4206 ret = -ERESTARTSYS;
4207 goto out_err;
4208 }
4209
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01004210 if (dev_priv->has_mob) {
4211 ret = vmw_rebind_contexts(sw_context);
4212 if (unlikely(ret != 0))
Dan Carpenterb2ad9882014-02-11 19:03:47 +03004213 goto out_unlock_binding;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01004214 }
4215
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004216 if (!header) {
4217 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4218 command_size, sw_context);
4219 } else {
4220 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4221 sw_context);
4222 header = NULL;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00004223 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004224 mutex_unlock(&dev_priv->binding_mutex);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004225 if (ret)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004226 goto out_err;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004227
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004228 vmw_query_bo_switch_commit(dev_priv, sw_context);
Deepak Rawat680360a2019-02-13 13:20:42 -08004229 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004230 (user_fence_rep) ? &handle : NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004231 /*
4232 * This error is harmless, because if fence submission fails,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004233 * vmw_fifo_send_fence will sync. The error will be propagated to
4234 * user-space in @fence_rep
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004235 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004236 if (ret != 0)
Deepak Rawat5724f892019-02-11 11:46:27 -08004237 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004238
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004239 vmw_execbuf_bindings_commit(sw_context, false);
4240 vmw_bind_dx_query_mob(sw_context);
4241 vmw_validation_res_unreserve(&val_ctx, false);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07004242
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004243 vmw_validation_bo_fence(sw_context->ctx, fence);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004244
Deepak Rawat680360a2019-02-13 13:20:42 -08004245 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004246 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4247
Sinclair Yehc906965d2017-07-05 01:49:32 -07004248 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08004249 * If anything fails here, give up trying to export the fence and do a
4250 * sync since the user mode will not be able to sync the fence itself.
4251 * This ensures we are still functionally correct.
Sinclair Yehc906965d2017-07-05 01:49:32 -07004252 */
4253 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4254
4255 sync_file = sync_file_create(&fence->base);
4256 if (!sync_file) {
Deepak Rawat5724f892019-02-11 11:46:27 -08004257 VMW_DEBUG_USER("Sync file create failed for fence\n");
Sinclair Yehc906965d2017-07-05 01:49:32 -07004258 put_unused_fd(out_fence_fd);
4259 out_fence_fd = -1;
4260
4261 (void) vmw_fence_obj_wait(fence, false, false,
4262 VMW_FENCE_WAIT_TIMEOUT);
Mathias Krausea0f90c82022-01-27 18:34:19 +10004263 }
4264 }
4265
4266 ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4267 user_fence_rep, fence, handle, out_fence_fd);
4268
4269 if (sync_file) {
4270 if (ret) {
4271 /* usercopy of fence failed, put the file object */
4272 fput(sync_file->file);
4273 put_unused_fd(out_fence_fd);
Sinclair Yehc906965d2017-07-05 01:49:32 -07004274 } else {
4275 /* Link the fence with the FD created earlier */
4276 fd_install(out_fence_fd, sync_file->file);
4277 }
4278 }
4279
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004280 /* Don't unreference when handing fence out */
4281 if (unlikely(out_fence != NULL)) {
4282 *out_fence = fence;
4283 fence = NULL;
4284 } else if (likely(fence != NULL)) {
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004285 vmw_fence_obj_unreference(&fence);
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004286 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004287
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02004288 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004289 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004290
4291 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08004292 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4293 * in resource destruction paths.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004294 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004295 vmw_validation_unref_lists(&val_ctx);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004296
Mathias Krausea0f90c82022-01-27 18:34:19 +10004297 return ret;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004298
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07004299out_unlock_binding:
4300 mutex_unlock(&dev_priv->binding_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004301out_err:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004302 vmw_validation_bo_backoff(&val_ctx);
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01004303out_err_nores:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004304 vmw_execbuf_bindings_commit(sw_context, true);
4305 vmw_validation_res_unreserve(&val_ctx, true);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004306 vmw_resource_relocations_free(&sw_context->res_relocations);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004307 vmw_free_relocations(sw_context);
Deepak Rawat680360a2019-02-13 13:20:42 -08004308 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004309 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004310out_unlock:
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02004311 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004312 vmw_validation_drop_ht(&val_ctx);
4313 WARN_ON(!list_empty(&sw_context->ctx_list));
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004314 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004315
4316 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08004317 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4318 * in resource destruction paths.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004319 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004320 vmw_validation_unref_lists(&val_ctx);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004321out_free_header:
4322 if (header)
4323 vmw_cmdbuf_header_free(header);
Sinclair Yehc906965d2017-07-05 01:49:32 -07004324out_free_fence_fd:
4325 if (out_fence_fd >= 0)
4326 put_unused_fd(out_fence_fd);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004327
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004328 return ret;
4329}
4330
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004331/**
4332 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4333 *
4334 * @dev_priv: The device private structure.
4335 *
Deepak Rawat680360a2019-02-13 13:20:42 -08004336 * This function is called to idle the fifo and unpin the query buffer if the
4337 * normal way to do this hits an error, which should typically be extremely
4338 * rare.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004339 */
4340static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4341{
Deepak Rawat5724f892019-02-11 11:46:27 -08004342 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004343
4344 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004345 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4346 if (dev_priv->dummy_query_bo_pinned) {
4347 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4348 dev_priv->dummy_query_bo_pinned = false;
4349 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004350}
4351
4352
4353/**
Deepak Rawat680360a2019-02-13 13:20:42 -08004354 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4355 * bo.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004356 *
4357 * @dev_priv: The device private structure.
Deepak Rawat680360a2019-02-13 13:20:42 -08004358 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4359 * query barrier that flushes all queries touching the current buffer pointed to
4360 * by @dev_priv->pinned_bo
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004361 *
Deepak Rawat680360a2019-02-13 13:20:42 -08004362 * This function should be used to unpin the pinned query bo, or as a query
4363 * barrier when we need to make sure that all queries have finished before the
4364 * next fifo command. (For example on hardware context destructions where the
4365 * hardware may otherwise leak unfinished queries).
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004366 *
Deepak Rawat680360a2019-02-13 13:20:42 -08004367 * This function does not return any failure codes, but make attempts to do safe
4368 * unpinning in case of errors.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004369 *
Deepak Rawat680360a2019-02-13 13:20:42 -08004370 * The function will synchronize on the previous query barrier, and will thus
4371 * not finish until that barrier has executed.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004372 *
Deepak Rawat680360a2019-02-13 13:20:42 -08004373 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4374 * calling this function.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004375 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004376void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4377 struct vmw_fence_obj *fence)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004378{
4379 int ret = 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004380 struct vmw_fence_obj *lfence = NULL;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004381 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004382
4383 if (dev_priv->pinned_bo == NULL)
4384 goto out_unlock;
4385
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004386 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
4387 false);
4388 if (ret)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004389 goto out_no_reserve;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004390
4391 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
4392 false);
4393 if (ret)
4394 goto out_no_reserve;
4395
4396 ret = vmw_validation_bo_reserve(&val_ctx, false);
4397 if (ret)
4398 goto out_no_reserve;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004399
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004400 if (dev_priv->query_cid_valid) {
4401 BUG_ON(fence != NULL);
Zack Rusin8426ed92020-11-18 12:54:19 -05004402 ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004403 if (ret)
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004404 goto out_no_emit;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004405 dev_priv->query_cid_valid = false;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004406 }
4407
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004408 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4409 if (dev_priv->dummy_query_bo_pinned) {
4410 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4411 dev_priv->dummy_query_bo_pinned = false;
4412 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004413 if (fence == NULL) {
4414 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4415 NULL);
4416 fence = lfence;
4417 }
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004418 vmw_validation_bo_fence(&val_ctx, fence);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004419 if (lfence != NULL)
4420 vmw_fence_obj_unreference(&lfence);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004421
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004422 vmw_validation_unref_lists(&val_ctx);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02004423 vmw_bo_unreference(&dev_priv->pinned_bo);
Deepak Rawat680360a2019-02-13 13:20:42 -08004424
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004425out_unlock:
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004426 return;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004427out_no_emit:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004428 vmw_validation_bo_backoff(&val_ctx);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004429out_no_reserve:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004430 vmw_validation_unref_lists(&val_ctx);
4431 vmw_execbuf_unpin_panic(dev_priv);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02004432 vmw_bo_unreference(&dev_priv->pinned_bo);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004433}
4434
4435/**
Deepak Rawat680360a2019-02-13 13:20:42 -08004436 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004437 *
4438 * @dev_priv: The device private structure.
4439 *
Deepak Rawat680360a2019-02-13 13:20:42 -08004440 * This function should be used to unpin the pinned query bo, or as a query
4441 * barrier when we need to make sure that all queries have finished before the
4442 * next fifo command. (For example on hardware context destructions where the
4443 * hardware may otherwise leak unfinished queries).
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004444 *
Deepak Rawat680360a2019-02-13 13:20:42 -08004445 * This function does not return any failure codes, but make attempts to do safe
4446 * unpinning in case of errors.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004447 *
Deepak Rawat680360a2019-02-13 13:20:42 -08004448 * The function will synchronize on the previous query barrier, and will thus
4449 * not finish until that barrier has executed.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004450 */
4451void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4452{
4453 mutex_lock(&dev_priv->cmdbuf_mutex);
4454 if (dev_priv->query_cid_valid)
4455 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004456 mutex_unlock(&dev_priv->cmdbuf_mutex);
4457}
4458
Emil Velikovcbfbe472019-05-22 17:41:17 +01004459int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4460 struct drm_file *file_priv)
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004461{
4462 struct vmw_private *dev_priv = vmw_priv(dev);
Emil Velikovcbfbe472019-05-22 17:41:17 +01004463 struct drm_vmw_execbuf_arg *arg = data;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004464 int ret;
Sinclair Yeh585851162017-07-05 01:45:40 -07004465 struct dma_fence *in_fence = NULL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004466
Martin Krastev7a7a9332021-06-09 13:23:00 -04004467 MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF);
4468 MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF);
4469
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004470 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08004471 * Extend the ioctl argument while maintaining backwards compatibility:
Emil Velikovcbfbe472019-05-22 17:41:17 +01004472 * We take different code paths depending on the value of arg->version.
4473 *
4474 * Note: The ioctl argument is extended and zeropadded by core DRM.
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004475 */
Emil Velikovcbfbe472019-05-22 17:41:17 +01004476 if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4477 arg->version == 0)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08004478 VMW_DEBUG_USER("Incorrect execbuf version.\n");
Martin Krastev7a7a9332021-06-09 13:23:00 -04004479 ret = -EINVAL;
4480 goto mksstats_out;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004481 }
4482
Emil Velikovcbfbe472019-05-22 17:41:17 +01004483 switch (arg->version) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004484 case 1:
Emil Velikovcbfbe472019-05-22 17:41:17 +01004485 /* For v1 core DRM have extended + zeropadded the data */
4486 arg->context_handle = (uint32_t) -1;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004487 break;
4488 case 2:
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004489 default:
Emil Velikovcbfbe472019-05-22 17:41:17 +01004490 /* For v2 and later core DRM would have correctly copied it */
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004491 break;
4492 }
4493
Sinclair Yeh585851162017-07-05 01:45:40 -07004494 /* If imported a fence FD from elsewhere, then wait on it */
Emil Velikovcbfbe472019-05-22 17:41:17 +01004495 if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4496 in_fence = sync_file_get_fence(arg->imported_fence_fd);
Sinclair Yeh585851162017-07-05 01:45:40 -07004497
4498 if (!in_fence) {
Deepak Rawat5724f892019-02-11 11:46:27 -08004499 VMW_DEBUG_USER("Cannot get imported fence\n");
Martin Krastev7a7a9332021-06-09 13:23:00 -04004500 ret = -EINVAL;
4501 goto mksstats_out;
Sinclair Yeh585851162017-07-05 01:45:40 -07004502 }
4503
4504 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4505 if (ret)
4506 goto out;
4507 }
4508
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004509 ret = vmw_execbuf_process(file_priv, dev_priv,
Emil Velikovcbfbe472019-05-22 17:41:17 +01004510 (void __user *)(unsigned long)arg->commands,
4511 NULL, arg->command_size, arg->throttle_us,
4512 arg->context_handle,
4513 (void __user *)(unsigned long)arg->fence_rep,
4514 NULL, arg->flags);
Deepak Rawat680360a2019-02-13 13:20:42 -08004515
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004516 if (unlikely(ret != 0))
Sinclair Yeh585851162017-07-05 01:45:40 -07004517 goto out;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004518
4519 vmw_kms_cursor_post_execbuf(dev_priv);
4520
Sinclair Yeh585851162017-07-05 01:45:40 -07004521out:
4522 if (in_fence)
4523 dma_fence_put(in_fence);
Martin Krastev7a7a9332021-06-09 13:23:00 -04004524
4525mksstats_out:
4526 MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF);
Sinclair Yeh585851162017-07-05 01:45:40 -07004527 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004528}