blob: 2ff7ba04d8c8120592a4509b0efeac84b300bdfa [file] [log] [blame]
Dirk Hohndel (VMware)dff96882018-05-07 01:16:26 +02001// SPDX-License-Identifier: GPL-2.0 OR MIT
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002/**************************************************************************
3 *
Dirk Hohndel (VMware)dff96882018-05-07 01:16:26 +02004 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00005 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
Sinclair Yeh585851162017-07-05 01:45:40 -070027#include <linux/sync_file.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000028
29#include "vmwgfx_drv.h"
30#include "vmwgfx_reg.h"
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/ttm/ttm_bo_api.h>
32#include <drm/ttm/ttm_placement.h>
Thomas Hellstromd80efd52015-08-10 10:39:35 -070033#include "vmwgfx_so.h"
34#include "vmwgfx_binding.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000035
Thomas Hellstromc0951b72012-11-20 12:19:35 +000036#define VMW_RES_HT_ORDER 12
37
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020038/*
Deepak Rawat6f74fd92019-02-08 12:53:57 -080039 * Helper macro to get dx_ctx_node if available otherwise print an error
40 * message. This is for use in command verifier function where if dx_ctx_node
41 * is not set then command is invalid.
42 */
43#define VMW_GET_CTX_NODE(__sw_context) \
44({ \
45 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
Deepak Rawat5724f892019-02-11 11:46:27 -080046 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
Deepak Rawat6f74fd92019-02-08 12:53:57 -080047 __sw_context->dx_ctx_node; \
48 }); \
49})
50
Deepak Rawatd01316d2019-02-08 15:50:40 -080051#define VMW_DECLARE_CMD_VAR(__var, __type) \
52 struct { \
53 SVGA3dCmdHeader header; \
54 __type body; \
55 } __var
56
Deepak Rawat680360a2019-02-13 13:20:42 -080057/**
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020058 * struct vmw_relocation - Buffer object relocation
59 *
60 * @head: List head for the command submission context's relocation list
Thomas Hellstromcc1e3b72018-09-26 15:38:13 +020061 * @vbo: Non ref-counted pointer to buffer object
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020062 * @mob_loc: Pointer to location for mob id to be modified
63 * @location: Pointer to location for guest pointer to be modified
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020064 */
65struct vmw_relocation {
66 struct list_head head;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020067 struct vmw_buffer_object *vbo;
Thomas Hellstromcc1e3b72018-09-26 15:38:13 +020068 union {
69 SVGAMobId *mob_loc;
70 SVGAGuestPtr *location;
71 };
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020072};
73
Thomas Hellstromc0951b72012-11-20 12:19:35 +000074/**
Thomas Hellstroma1944032016-10-10 11:06:45 -070075 * enum vmw_resource_relocation_type - Relocation type for resources
76 *
77 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
78 * command stream is replaced with the actual id after validation.
79 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
80 * with a NOP.
Deepak Rawat680360a2019-02-13 13:20:42 -080081 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
82 * validation is -1, the command is replaced with a NOP. Otherwise no action.
Thomas Hellstroma1944032016-10-10 11:06:45 -070083 */
84enum vmw_resource_relocation_type {
85 vmw_res_rel_normal,
86 vmw_res_rel_nop,
87 vmw_res_rel_cond_nop,
88 vmw_res_rel_max
89};
90
91/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +000092 * struct vmw_resource_relocation - Relocation info for resources
93 *
94 * @head: List head for the software context's relocation list.
95 * @res: Non-ref-counted pointer to the resource.
Deepak Rawat680360a2019-02-13 13:20:42 -080096 * @offset: Offset of single byte entries into the command buffer where the id
97 * that needs fixup is located.
Thomas Hellstroma1944032016-10-10 11:06:45 -070098 * @rel_type: Type of relocation.
Thomas Hellstromc0951b72012-11-20 12:19:35 +000099 */
100struct vmw_resource_relocation {
101 struct list_head head;
102 const struct vmw_resource *res;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700103 u32 offset:29;
104 enum vmw_resource_relocation_type rel_type:3;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000105};
106
Deepak Rawat680360a2019-02-13 13:20:42 -0800107/**
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200108 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
Deepak Rawat680360a2019-02-13 13:20:42 -0800109 *
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200110 * @head: List head of context list
111 * @ctx: The context resource
112 * @cur: The context's persistent binding state
113 * @staged: The binding state changes of this command buffer
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000114 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200115struct vmw_ctx_validation_info {
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000116 struct list_head head;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200117 struct vmw_resource *ctx;
118 struct vmw_ctx_binding_state *cur;
119 struct vmw_ctx_binding_state *staged;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000120};
121
122/**
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100123 * struct vmw_cmd_entry - Describe a command for the verifier
124 *
125 * @user_allow: Whether allowed from the execbuf ioctl.
126 * @gb_disable: Whether disabled if guest-backed objects are available.
127 * @gb_enable: Whether enabled iff guest-backed objects are available.
128 */
129struct vmw_cmd_entry {
130 int (*func) (struct vmw_private *, struct vmw_sw_context *,
131 SVGA3dCmdHeader *);
132 bool user_allow;
133 bool gb_disable;
134 bool gb_enable;
Thomas Hellstrom65b97a22017-08-24 08:06:29 +0200135 const char *cmd_name;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100136};
137
138#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
139 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
Thomas Hellstrom65b97a22017-08-24 08:06:29 +0200140 (_gb_disable), (_gb_enable), #_cmd}
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100141
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700142static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
143 struct vmw_sw_context *sw_context,
144 struct vmw_resource *ctx);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700145static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
146 struct vmw_sw_context *sw_context,
147 SVGAMobId *id,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200148 struct vmw_buffer_object **vmw_bo_p);
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700149/**
150 * vmw_ptr_diff - Compute the offset from a to b in bytes
151 *
152 * @a: A starting pointer.
153 * @b: A pointer offset in the same address space.
154 *
155 * Returns: The offset in bytes between the two pointers.
156 */
157static size_t vmw_ptr_diff(void *a, void *b)
158{
159 return (unsigned long) b - (unsigned long) a;
160}
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700161
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100162/**
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200163 * vmw_execbuf_bindings_commit - Commit modified binding state
Deepak Rawat680360a2019-02-13 13:20:42 -0800164 *
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200165 * @sw_context: The command submission context
Deepak Rawat680360a2019-02-13 13:20:42 -0800166 * @backoff: Whether this is part of the error path and binding state changes
167 * should be ignored
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000168 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200169static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
170 bool backoff)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000171{
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200172 struct vmw_ctx_validation_info *entry;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700173
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200174 list_for_each_entry(entry, &sw_context->ctx_list, head) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200175 if (!backoff)
176 vmw_binding_state_commit(entry->cur, entry->staged);
Deepak Rawat680360a2019-02-13 13:20:42 -0800177
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200178 if (entry->staged != sw_context->staged_bindings)
179 vmw_binding_state_free(entry->staged);
180 else
181 sw_context->staged_bindings_inuse = false;
182 }
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200183
184 /* List entries are freed with the validation context */
185 INIT_LIST_HEAD(&sw_context->ctx_list);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200186}
187
188/**
189 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
Deepak Rawat680360a2019-02-13 13:20:42 -0800190 *
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200191 * @sw_context: The command submission context
192 */
193static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
194{
195 if (sw_context->dx_query_mob)
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700196 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
197 sw_context->dx_query_mob);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000198}
199
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700200/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800201 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
202 * the validate list.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700203 *
204 * @dev_priv: Pointer to the device private:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200205 * @sw_context: The command submission context
206 * @node: The validation node holding the context resource metadata
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700207 */
208static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
209 struct vmw_sw_context *sw_context,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200210 struct vmw_resource *res,
211 struct vmw_ctx_validation_info *node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700212{
213 int ret;
214
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200215 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700216 if (unlikely(ret != 0))
217 goto out_err;
218
219 if (!sw_context->staged_bindings) {
Deepak Rawat680360a2019-02-13 13:20:42 -0800220 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700221 if (IS_ERR(sw_context->staged_bindings)) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700222 ret = PTR_ERR(sw_context->staged_bindings);
223 sw_context->staged_bindings = NULL;
224 goto out_err;
225 }
226 }
227
228 if (sw_context->staged_bindings_inuse) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200229 node->staged = vmw_binding_state_alloc(dev_priv);
230 if (IS_ERR(node->staged)) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200231 ret = PTR_ERR(node->staged);
232 node->staged = NULL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700233 goto out_err;
234 }
235 } else {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200236 node->staged = sw_context->staged_bindings;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700237 sw_context->staged_bindings_inuse = true;
238 }
239
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200240 node->ctx = res;
241 node->cur = vmw_context_binding_state(res);
242 list_add_tail(&node->head, &sw_context->ctx_list);
243
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700244 return 0;
Deepak Rawat680360a2019-02-13 13:20:42 -0800245
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700246out_err:
247 return ret;
248}
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000249
250/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800251 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
252 *
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200253 * @dev_priv: Pointer to the device private struct.
254 * @res_type: The resource type.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000255 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800256 * Guest-backed contexts and DX contexts require extra size to store execbuf
257 * private information in the validation node. Typically the binding manager
258 * associated data structures.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200259 *
260 * Returns: The extra size requirement based on resource type.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000261 */
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200262static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
263 enum vmw_res_type res_type)
264{
265 return (res_type == vmw_res_dx_context ||
266 (res_type == vmw_res_context && dev_priv->has_mob)) ?
267 sizeof(struct vmw_ctx_validation_info) : 0;
268}
269
270/**
271 * vmw_execbuf_rcache_update - Update a resource-node cache entry
272 *
273 * @rcache: Pointer to the entry to update.
274 * @res: Pointer to the resource.
Deepak Rawat680360a2019-02-13 13:20:42 -0800275 * @private: Pointer to the execbuf-private space in the resource validation
276 * node.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200277 */
278static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
279 struct vmw_resource *res,
280 void *private)
281{
282 rcache->res = res;
283 rcache->private = private;
284 rcache->valid = 1;
285 rcache->valid_handle = 0;
286}
287
288/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800289 * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
290 * rcu-protected pointer to the validation list.
291 *
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200292 * @sw_context: Pointer to the software context.
293 * @res: Unreferenced rcu-protected pointer to the resource.
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100294 * @dirty: Whether to change dirty status.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200295 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800296 * Returns: 0 on success. Negative error code on failure. Typical error codes
297 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200298 */
299static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100300 struct vmw_resource *res,
301 u32 dirty)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000302{
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700303 struct vmw_private *dev_priv = res->dev_priv;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000304 int ret;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200305 enum vmw_res_type res_type = vmw_res_type(res);
306 struct vmw_res_cache_entry *rcache;
307 struct vmw_ctx_validation_info *ctx_info;
308 bool first_usage;
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200309 unsigned int priv_size;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000310
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200311 rcache = &sw_context->res_cache[res_type];
312 if (likely(rcache->valid && rcache->res == res)) {
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100313 if (dirty)
314 vmw_validation_res_set_dirty(sw_context->ctx,
315 rcache->private, dirty);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200316 vmw_user_resource_noref_release();
317 return 0;
318 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000319
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200320 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200321 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100322 dirty, (void **)&ctx_info,
323 &first_usage);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200324 vmw_user_resource_noref_release();
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200325 if (ret)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000326 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000327
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200328 if (priv_size && first_usage) {
329 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
330 ctx_info);
Deepak Rawatb2898402019-02-11 14:59:57 -0800331 if (ret) {
332 VMW_DEBUG_USER("Failed first usage context setup.\n");
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200333 return ret;
Deepak Rawatb2898402019-02-11 14:59:57 -0800334 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700335 }
336
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200337 vmw_execbuf_rcache_update(rcache, res, ctx_info);
338 return 0;
339}
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700340
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200341/**
342 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
343 * validation list if it's not already on it
Deepak Rawat680360a2019-02-13 13:20:42 -0800344 *
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200345 * @sw_context: Pointer to the software context.
346 * @res: Pointer to the resource.
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100347 * @dirty: Whether to change dirty status.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200348 *
349 * Returns: Zero on success. Negative error code on failure.
350 */
351static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100352 struct vmw_resource *res,
353 u32 dirty)
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200354{
355 struct vmw_res_cache_entry *rcache;
356 enum vmw_res_type res_type = vmw_res_type(res);
357 void *ptr;
358 int ret;
359
360 rcache = &sw_context->res_cache[res_type];
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100361 if (likely(rcache->valid && rcache->res == res)) {
362 if (dirty)
363 vmw_validation_res_set_dirty(sw_context->ctx,
364 rcache->private, dirty);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200365 return 0;
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100366 }
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200367
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100368 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
369 &ptr, NULL);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200370 if (ret)
371 return ret;
372
373 vmw_execbuf_rcache_update(rcache, res, ptr);
374
375 return 0;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700376}
377
378/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800379 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
380 * validation list
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700381 *
382 * @sw_context: The software context holding the validation list.
383 * @view: Pointer to the view resource.
384 *
385 * Returns 0 if success, negative error code otherwise.
386 */
387static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
388 struct vmw_resource *view)
389{
390 int ret;
391
392 /*
Deepak Rawat680360a2019-02-13 13:20:42 -0800393 * First add the resource the view is pointing to, otherwise it may be
394 * swapped out when the view is validated.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700395 */
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100396 ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
397 vmw_view_dirtying(view));
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700398 if (ret)
399 return ret;
400
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100401 return vmw_execbuf_res_noctx_val_add(sw_context, view,
402 VMW_RES_DIRTY_NONE);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700403}
404
405/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800406 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
407 * to to the validation list.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700408 *
409 * @sw_context: The software context holding the validation list.
410 * @view_type: The view type to look up.
411 * @id: view id of the view.
412 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800413 * The view is represented by a view id and the DX context it's created on, or
414 * scheduled for creation on. If there is no DX context set, the function will
415 * return an -EINVAL error pointer.
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200416 *
417 * Returns: Unreferenced pointer to the resource on success, negative error
418 * pointer on failure.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700419 */
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200420static struct vmw_resource *
421vmw_view_id_val_add(struct vmw_sw_context *sw_context,
422 enum vmw_view_type view_type, u32 id)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700423{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200424 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700425 struct vmw_resource *view;
426 int ret;
427
Deepak Rawatb2898402019-02-11 14:59:57 -0800428 if (!ctx_node)
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200429 return ERR_PTR(-EINVAL);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700430
431 view = vmw_view_lookup(sw_context->man, view_type, id);
432 if (IS_ERR(view))
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200433 return view;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700434
435 ret = vmw_view_res_val_add(sw_context, view);
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200436 if (ret)
437 return ERR_PTR(ret);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700438
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200439 return view;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000440}
441
442/**
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100443 * vmw_resource_context_res_add - Put resources previously bound to a context on
444 * the validation list
445 *
446 * @dev_priv: Pointer to a device private structure
447 * @sw_context: Pointer to a software context used for this command submission
448 * @ctx: Pointer to the context resource
449 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800450 * This function puts all resources that were previously bound to @ctx on the
451 * resource validation list. This is part of the context state reemission
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100452 */
453static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
454 struct vmw_sw_context *sw_context,
455 struct vmw_resource *ctx)
456{
457 struct list_head *binding_list;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700458 struct vmw_ctx_bindinfo *entry;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100459 int ret = 0;
460 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700461 u32 i;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100462
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700463 /* Add all cotables to the validation list. */
464 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
465 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
466 res = vmw_context_cotable(ctx, i);
467 if (IS_ERR(res))
468 continue;
469
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100470 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
471 VMW_RES_DIRTY_SET);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700472 if (unlikely(ret != 0))
473 return ret;
474 }
475 }
476
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700477 /* Add all resources bound to the context to the validation list */
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100478 mutex_lock(&dev_priv->binding_mutex);
479 binding_list = vmw_context_binding_list(ctx);
480
481 list_for_each_entry(entry, binding_list, ctx_list) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700482 if (vmw_res_type(entry->res) == vmw_res_view)
483 ret = vmw_view_res_val_add(sw_context, entry->res);
484 else
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100485 ret = vmw_execbuf_res_noctx_val_add
486 (sw_context, entry->res,
487 vmw_binding_dirtying(entry->bt));
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100488 if (unlikely(ret != 0))
489 break;
490 }
491
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700492 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200493 struct vmw_buffer_object *dx_query_mob;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700494
495 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
496 if (dx_query_mob)
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200497 ret = vmw_validation_add_bo(sw_context->ctx,
498 dx_query_mob, true, false);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700499 }
500
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100501 mutex_unlock(&dev_priv->binding_mutex);
502 return ret;
503}
504
505/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000506 * vmw_resource_relocation_add - Add a relocation to the relocation list
507 *
508 * @list: Pointer to head of relocation list.
509 * @res: The resource.
Deepak Rawat680360a2019-02-13 13:20:42 -0800510 * @offset: Offset into the command buffer currently being parsed where the id
511 * that needs fixup is located. Granularity is one byte.
Thomas Hellstroma1944032016-10-10 11:06:45 -0700512 * @rel_type: Relocation type.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000513 */
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200514static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000515 const struct vmw_resource *res,
Thomas Hellstroma1944032016-10-10 11:06:45 -0700516 unsigned long offset,
517 enum vmw_resource_relocation_type
518 rel_type)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000519{
520 struct vmw_resource_relocation *rel;
521
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200522 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
Ravikant B Sharma1a4adb02016-11-08 17:30:31 +0530523 if (unlikely(!rel)) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800524 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000525 return -ENOMEM;
526 }
527
528 rel->res = res;
529 rel->offset = offset;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700530 rel->rel_type = rel_type;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200531 list_add_tail(&rel->head, &sw_context->res_relocations);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000532
533 return 0;
534}
535
536/**
537 * vmw_resource_relocations_free - Free all relocations on a list
538 *
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200539 * @list: Pointer to the head of the relocation list
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000540 */
541static void vmw_resource_relocations_free(struct list_head *list)
542{
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200543 /* Memory is validation context memory, so no need to free it */
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200544 INIT_LIST_HEAD(list);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000545}
546
547/**
548 * vmw_resource_relocations_apply - Apply all relocations on a list
549 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800550 * @cb: Pointer to the start of the command buffer bein patch. This need not be
551 * the same buffer as the one being parsed when the relocation list was built,
552 * but the contents must be the same modulo the resource ids.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000553 * @list: Pointer to the head of the relocation list.
554 */
555static void vmw_resource_relocations_apply(uint32_t *cb,
556 struct list_head *list)
557{
558 struct vmw_resource_relocation *rel;
559
Thomas Hellstroma1944032016-10-10 11:06:45 -0700560 /* Validate the struct vmw_resource_relocation member size */
561 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
562 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
563
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100564 list_for_each_entry(rel, list, head) {
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700565 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
Thomas Hellstroma1944032016-10-10 11:06:45 -0700566 switch (rel->rel_type) {
567 case vmw_res_rel_normal:
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700568 *addr = rel->res->id;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700569 break;
570 case vmw_res_rel_nop:
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700571 *addr = SVGA_3D_CMD_NOP;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700572 break;
573 default:
574 if (rel->res->id == -1)
575 *addr = SVGA_3D_CMD_NOP;
576 break;
577 }
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100578 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000579}
580
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000581static int vmw_cmd_invalid(struct vmw_private *dev_priv,
582 struct vmw_sw_context *sw_context,
583 SVGA3dCmdHeader *header)
584{
Sinclair Yehfcfffdd2017-07-17 23:28:36 -0700585 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000586}
587
588static int vmw_cmd_ok(struct vmw_private *dev_priv,
589 struct vmw_sw_context *sw_context,
590 SVGA3dCmdHeader *header)
591{
592 return 0;
593}
594
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200595/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800596 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
597 * list.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000598 *
599 * @sw_context: Pointer to the software context.
600 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800601 * Note that since vmware's command submission currently is protected by the
602 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
603 * only a single thread at once will attempt this.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000604 */
605static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
606{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200607 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000608
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200609 ret = vmw_validation_res_reserve(sw_context->ctx, true);
610 if (ret)
611 return ret;
Charmaine Lee2f633e52015-08-10 10:45:11 -0700612
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700613 if (sw_context->dx_query_mob) {
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200614 struct vmw_buffer_object *expected_dx_query_mob;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700615
616 expected_dx_query_mob =
617 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
618 if (expected_dx_query_mob &&
619 expected_dx_query_mob != sw_context->dx_query_mob) {
620 ret = -EINVAL;
621 }
622 }
623
624 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000625}
626
627/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800628 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
629 * resource validate list unless it's already there.
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100630 *
631 * @dev_priv: Pointer to a device private structure.
632 * @sw_context: Pointer to the software context.
633 * @res_type: Resource type.
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100634 * @dirty: Whether to change dirty status.
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100635 * @converter: User-space visisble type specific information.
Deepak Rawat680360a2019-02-13 13:20:42 -0800636 * @id_loc: Pointer to the location in the command buffer currently being parsed
637 * from where the user-space resource id handle is located.
638 * @p_val: Pointer to pointer to resource validalidation node. Populated on
639 * exit.
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100640 */
641static int
642vmw_cmd_res_check(struct vmw_private *dev_priv,
643 struct vmw_sw_context *sw_context,
644 enum vmw_res_type res_type,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100645 u32 dirty,
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100646 const struct vmw_user_resource_conv *converter,
647 uint32_t *id_loc,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200648 struct vmw_resource **p_res)
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100649{
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200650 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200651 struct vmw_resource *res;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200652 int ret;
653
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200654 if (p_res)
655 *p_res = NULL;
656
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200657 if (*id_loc == SVGA3D_INVALID_ID) {
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200658 if (res_type == vmw_res_context) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800659 VMW_DEBUG_USER("Illegal context invalid id.\n");
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200660 return -EINVAL;
661 }
662 return 0;
663 }
664
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200665 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200666 res = rcache->res;
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100667 if (dirty)
668 vmw_validation_res_set_dirty(sw_context->ctx,
669 rcache->private, dirty);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200670 } else {
671 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200672
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200673 ret = vmw_validation_preload_res(sw_context->ctx, size);
674 if (ret)
675 return ret;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200676
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200677 res = vmw_user_resource_noref_lookup_handle
678 (dev_priv, sw_context->fp->tfile, *id_loc, converter);
Chengguang Xu4efa6662019-03-01 10:14:06 -0800679 if (IS_ERR(res)) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800680 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
681 (unsigned int) *id_loc);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200682 return PTR_ERR(res);
683 }
684
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100685 ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200686 if (unlikely(ret != 0))
687 return ret;
688
689 if (rcache->valid && rcache->res == res) {
690 rcache->valid_handle = true;
691 rcache->handle = *id_loc;
692 }
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200693 }
694
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200695 ret = vmw_resource_relocation_add(sw_context, res,
696 vmw_ptr_diff(sw_context->buf_start,
697 id_loc),
698 vmw_res_rel_normal);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200699 if (p_res)
700 *p_res = res;
701
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200702 return 0;
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100703}
704
705/**
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700706 * vmw_rebind_dx_query - Rebind DX query associated with the context
707 *
708 * @ctx_res: context the query belongs to
709 *
710 * This function assumes binding_mutex is held.
711 */
712static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
713{
714 struct vmw_private *dev_priv = ctx_res->dev_priv;
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200715 struct vmw_buffer_object *dx_query_mob;
Deepak Rawatd01316d2019-02-08 15:50:40 -0800716 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700717
718 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
719
720 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
721 return 0;
722
Deepak Rawat11c45412019-02-14 16:15:39 -0800723 cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
Deepak Rawatb2898402019-02-11 14:59:57 -0800724 if (cmd == NULL)
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700725 return -ENOMEM;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700726
727 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
728 cmd->header.size = sizeof(cmd->body);
729 cmd->body.cid = ctx_res->id;
730 cmd->body.mobid = dx_query_mob->base.mem.start;
731 vmw_fifo_commit(dev_priv, sizeof(*cmd));
732
733 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
734
735 return 0;
736}
737
738/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800739 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
740 * contexts.
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100741 *
742 * @sw_context: Pointer to the software context.
743 *
744 * Rebind context binding points that have been scrubbed because of eviction.
745 */
746static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
747{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200748 struct vmw_ctx_validation_info *val;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100749 int ret;
750
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200751 list_for_each_entry(val, &sw_context->ctx_list, head) {
752 ret = vmw_binding_rebind_all(val->cur);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100753 if (unlikely(ret != 0)) {
754 if (ret != -ERESTARTSYS)
Deepak Rawat5724f892019-02-11 11:46:27 -0800755 VMW_DEBUG_USER("Failed to rebind context.\n");
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100756 return ret;
757 }
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700758
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200759 ret = vmw_rebind_all_dx_query(val->ctx);
Deepak Rawatb2898402019-02-11 14:59:57 -0800760 if (ret != 0) {
761 VMW_DEBUG_USER("Failed to rebind queries.\n");
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700762 return ret;
Deepak Rawatb2898402019-02-11 14:59:57 -0800763 }
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100764 }
765
766 return 0;
767}
768
769/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800770 * vmw_view_bindings_add - Add an array of view bindings to a context binding
771 * state tracker.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700772 *
773 * @sw_context: The execbuf state used for this command.
774 * @view_type: View type for the bindings.
775 * @binding_type: Binding type for the bindings.
776 * @shader_slot: The shader slot to user for the bindings.
777 * @view_ids: Array of view ids to be bound.
778 * @num_views: Number of view ids in @view_ids.
779 * @first_slot: The binding slot to be used for the first view id in @view_ids.
780 */
781static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
782 enum vmw_view_type view_type,
783 enum vmw_ctx_binding_type binding_type,
784 uint32 shader_slot,
785 uint32 view_ids[], u32 num_views,
786 u32 first_slot)
787{
Deepak Rawat6f74fd92019-02-08 12:53:57 -0800788 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700789 u32 i;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700790
Deepak Rawat6f74fd92019-02-08 12:53:57 -0800791 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700792 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700793
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700794 for (i = 0; i < num_views; ++i) {
795 struct vmw_ctx_bindinfo_view binding;
796 struct vmw_resource *view = NULL;
797
798 if (view_ids[i] != SVGA3D_INVALID_ID) {
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200799 view = vmw_view_id_val_add(sw_context, view_type,
800 view_ids[i]);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700801 if (IS_ERR(view)) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800802 VMW_DEBUG_USER("View not found.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700803 return PTR_ERR(view);
804 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700805 }
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200806 binding.bi.ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700807 binding.bi.res = view;
808 binding.bi.bt = binding_type;
809 binding.shader_slot = shader_slot;
810 binding.slot = first_slot + i;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200811 vmw_binding_add(ctx_node->staged, &binding.bi,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700812 shader_slot, binding.slot);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700813 }
814
815 return 0;
816}
817
818/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000819 * vmw_cmd_cid_check - Check a command header for valid context information.
820 *
821 * @dev_priv: Pointer to a device private structure.
822 * @sw_context: Pointer to the software context.
823 * @header: A command header with an embedded user-space context handle.
824 *
825 * Convenience function: Call vmw_cmd_res_check with the user-space context
826 * handle embedded in @header.
827 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000828static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
829 struct vmw_sw_context *sw_context,
830 SVGA3dCmdHeader *header)
831{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800832 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
833 container_of(header, typeof(*cmd), header);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000834
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000835 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100836 VMW_RES_DIRTY_SET, user_context_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -0800837 &cmd->body, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000838}
839
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200840/**
841 * vmw_execbuf_info_from_res - Get the private validation metadata for a
842 * recently validated resource
Deepak Rawat680360a2019-02-13 13:20:42 -0800843 *
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200844 * @sw_context: Pointer to the command submission context
845 * @res: The resource
846 *
847 * The resource pointed to by @res needs to be present in the command submission
848 * context's resource cache and hence the last resource of that type to be
849 * processed by the validation code.
850 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800851 * Return: a pointer to the private metadata of the resource, or NULL if it
852 * wasn't found
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200853 */
854static struct vmw_ctx_validation_info *
855vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
856 struct vmw_resource *res)
857{
858 struct vmw_res_cache_entry *rcache =
859 &sw_context->res_cache[vmw_res_type(res)];
860
861 if (rcache->valid && rcache->res == res)
862 return rcache->private;
863
864 WARN_ON_ONCE(true);
865 return NULL;
866}
867
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000868static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
869 struct vmw_sw_context *sw_context,
870 SVGA3dCmdHeader *header)
871{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800872 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200873 struct vmw_resource *ctx;
874 struct vmw_resource *res;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000875 int ret;
876
Deepak Rawatd01316d2019-02-08 15:50:40 -0800877 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700878
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700879 if (cmd->body.type >= SVGA3D_RT_MAX) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800880 VMW_DEBUG_USER("Illegal render target type %u.\n",
881 (unsigned int) cmd->body.type);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700882 return -EINVAL;
883 }
884
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700885 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100886 VMW_RES_DIRTY_SET, user_context_converter,
887 &cmd->body.cid, &ctx);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000888 if (unlikely(ret != 0))
889 return ret;
890
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000891 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100892 VMW_RES_DIRTY_SET, user_surface_converter,
893 &cmd->body.target.sid, &res);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200894 if (unlikely(ret))
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700895 return ret;
896
897 if (dev_priv->has_mob) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700898 struct vmw_ctx_bindinfo_view binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200899 struct vmw_ctx_validation_info *node;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700900
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200901 node = vmw_execbuf_info_from_res(sw_context, ctx);
902 if (!node)
903 return -EINVAL;
904
905 binding.bi.ctx = ctx;
906 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700907 binding.bi.bt = vmw_ctx_binding_rt;
908 binding.slot = cmd->body.type;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200909 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700910 }
911
912 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000913}
914
915static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
916 struct vmw_sw_context *sw_context,
917 SVGA3dCmdHeader *header)
918{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800919 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000920 int ret;
921
Deepak Rawatd01316d2019-02-08 15:50:40 -0800922 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromc9146cd2015-03-02 23:45:04 -0800923
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700924 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100925 VMW_RES_DIRTY_NONE, user_surface_converter,
926 &cmd->body.src.sid, NULL);
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700927 if (ret)
928 return ret;
Thomas Hellstromc9146cd2015-03-02 23:45:04 -0800929
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000930 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100931 VMW_RES_DIRTY_SET, user_surface_converter,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000932 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000933}
934
Neha Bhende0fca749e2015-08-10 10:51:07 -0700935static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -0800936 struct vmw_sw_context *sw_context,
937 SVGA3dCmdHeader *header)
Neha Bhende0fca749e2015-08-10 10:51:07 -0700938{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800939 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
Neha Bhende0fca749e2015-08-10 10:51:07 -0700940 int ret;
941
942 cmd = container_of(header, typeof(*cmd), header);
943 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100944 VMW_RES_DIRTY_NONE, user_surface_converter,
Neha Bhende0fca749e2015-08-10 10:51:07 -0700945 &cmd->body.src, NULL);
946 if (ret != 0)
947 return ret;
948
949 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100950 VMW_RES_DIRTY_SET, user_surface_converter,
Neha Bhende0fca749e2015-08-10 10:51:07 -0700951 &cmd->body.dest, NULL);
952}
953
954static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
955 struct vmw_sw_context *sw_context,
956 SVGA3dCmdHeader *header)
957{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800958 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
Neha Bhende0fca749e2015-08-10 10:51:07 -0700959 int ret;
960
961 cmd = container_of(header, typeof(*cmd), header);
962 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100963 VMW_RES_DIRTY_NONE, user_surface_converter,
Neha Bhende0fca749e2015-08-10 10:51:07 -0700964 &cmd->body.srcSid, NULL);
965 if (ret != 0)
966 return ret;
967
968 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100969 VMW_RES_DIRTY_SET, user_surface_converter,
Neha Bhende0fca749e2015-08-10 10:51:07 -0700970 &cmd->body.dstSid, NULL);
971}
972
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000973static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
974 struct vmw_sw_context *sw_context,
975 SVGA3dCmdHeader *header)
976{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800977 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000978 int ret;
979
Deepak Rawatd01316d2019-02-08 15:50:40 -0800980 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000981 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100982 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000983 &cmd->body.src.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000984 if (unlikely(ret != 0))
985 return ret;
Deepak Rawat680360a2019-02-13 13:20:42 -0800986
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000987 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100988 VMW_RES_DIRTY_SET, user_surface_converter,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000989 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000990}
991
992static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
993 struct vmw_sw_context *sw_context,
994 SVGA3dCmdHeader *header)
995{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800996 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
997 container_of(header, typeof(*cmd), header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +0200998
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000999 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001000 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001001 &cmd->body.srcImage.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001002}
1003
1004static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1005 struct vmw_sw_context *sw_context,
1006 SVGA3dCmdHeader *header)
1007{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001008 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1009 container_of(header, typeof(*cmd), header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02001010
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001011 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001012 VMW_RES_DIRTY_NONE, user_surface_converter,
1013 &cmd->body.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001014}
1015
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001016/**
1017 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1018 *
1019 * @dev_priv: The device private structure.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001020 * @new_query_bo: The new buffer holding query results.
1021 * @sw_context: The software context used for this command submission.
1022 *
Deepak Rawat680360a2019-02-13 13:20:42 -08001023 * This function checks whether @new_query_bo is suitable for holding query
1024 * results, and if another buffer currently is pinned for query results. If so,
1025 * the function prepares the state of @sw_context for switching pinned buffers
1026 * after successful submission of the current command batch.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001027 */
1028static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001029 struct vmw_buffer_object *new_query_bo,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001030 struct vmw_sw_context *sw_context)
1031{
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001032 struct vmw_res_cache_entry *ctx_entry =
1033 &sw_context->res_cache[vmw_res_context];
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001034 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001035
1036 BUG_ON(!ctx_entry->valid);
1037 sw_context->last_query_ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001038
1039 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1040
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001041 if (unlikely(new_query_bo->base.num_pages > 4)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001042 VMW_DEBUG_USER("Query buffer too large.\n");
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001043 return -EINVAL;
1044 }
1045
1046 if (unlikely(sw_context->cur_query_bo != NULL)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001047 sw_context->needs_post_query_barrier = true;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001048 ret = vmw_validation_add_bo(sw_context->ctx,
1049 sw_context->cur_query_bo,
1050 dev_priv->has_mob, false);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001051 if (unlikely(ret != 0))
1052 return ret;
1053 }
1054 sw_context->cur_query_bo = new_query_bo;
1055
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001056 ret = vmw_validation_add_bo(sw_context->ctx,
1057 dev_priv->dummy_query_bo,
1058 dev_priv->has_mob, false);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001059 if (unlikely(ret != 0))
1060 return ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001061 }
1062
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001063 return 0;
1064}
1065
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001066/**
1067 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1068 *
1069 * @dev_priv: The device private structure.
1070 * @sw_context: The software context used for this command submission batch.
1071 *
1072 * This function will check if we're switching query buffers, and will then,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001073 * issue a dummy occlusion query wait used as a query barrier. When the fence
Deepak Rawat680360a2019-02-13 13:20:42 -08001074 * object following that query wait has signaled, we are sure that all preceding
1075 * queries have finished, and the old query buffer can be unpinned. However,
1076 * since both the new query buffer and the old one are fenced with that fence,
1077 * we can do an asynchronus unpin now, and be sure that the old query buffer
1078 * won't be moved until the fence has signaled.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001079 *
1080 * As mentioned above, both the new - and old query buffers need to be fenced
1081 * using a sequence emitted *after* calling this function.
1082 */
1083static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1084 struct vmw_sw_context *sw_context)
1085{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001086 /*
1087 * The validate list should still hold references to all
1088 * contexts here.
1089 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001090 if (sw_context->needs_post_query_barrier) {
1091 struct vmw_res_cache_entry *ctx_entry =
1092 &sw_context->res_cache[vmw_res_context];
1093 struct vmw_resource *ctx;
1094 int ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001095
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001096 BUG_ON(!ctx_entry->valid);
1097 ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001098
1099 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1100
1101 if (unlikely(ret != 0))
Deepak Rawat5724f892019-02-11 11:46:27 -08001102 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001103 }
1104
1105 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1106 if (dev_priv->pinned_bo) {
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001107 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001108 vmw_bo_unreference(&dev_priv->pinned_bo);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001109 }
1110
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001111 if (!sw_context->needs_post_query_barrier) {
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001112 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001113
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001114 /*
1115 * We pin also the dummy_query_bo buffer so that we
Deepak Rawat680360a2019-02-13 13:20:42 -08001116 * don't need to validate it when emitting dummy queries
1117 * in context destroy paths.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001118 */
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001119 if (!dev_priv->dummy_query_bo_pinned) {
1120 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1121 true);
1122 dev_priv->dummy_query_bo_pinned = true;
1123 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001124
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001125 BUG_ON(sw_context->last_query_ctx == NULL);
1126 dev_priv->query_cid = sw_context->last_query_ctx->id;
1127 dev_priv->query_cid_valid = true;
1128 dev_priv->pinned_bo =
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001129 vmw_bo_reference(sw_context->cur_query_bo);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001130 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001131 }
1132}
1133
1134/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001135 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
1136 * to a MOB id.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001137 *
1138 * @dev_priv: Pointer to a device private structure.
1139 * @sw_context: The software context used for this command batch validation.
1140 * @id: Pointer to the user-space handle to be translated.
Deepak Rawat680360a2019-02-13 13:20:42 -08001141 * @vmw_bo_p: Points to a location that, on successful return will carry a
1142 * non-reference-counted pointer to the buffer object identified by the
Thomas Hellstromddcda242012-11-21 11:26:55 +01001143 * user-space handle in @id.
1144 *
1145 * This function saves information needed to translate a user-space buffer
1146 * handle to a MOB id. The translation does not take place immediately, but
Deepak Rawat680360a2019-02-13 13:20:42 -08001147 * during a call to vmw_apply_relocations().
1148 *
1149 * This function builds a relocation list and a list of buffers to validate. The
1150 * former needs to be freed using either vmw_apply_relocations() or
1151 * vmw_free_relocations(). The latter needs to be freed using
1152 * vmw_clear_validations.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001153 */
1154static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1155 struct vmw_sw_context *sw_context,
1156 SVGAMobId *id,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001157 struct vmw_buffer_object **vmw_bo_p)
Thomas Hellstromddcda242012-11-21 11:26:55 +01001158{
Thomas Hellstromb139d432018-09-26 16:27:54 +02001159 struct vmw_buffer_object *vmw_bo;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001160 uint32_t handle = *id;
1161 struct vmw_relocation *reloc;
1162 int ret;
1163
Thomas Hellstromb139d432018-09-26 16:27:54 +02001164 vmw_validation_preload_bo(sw_context->ctx);
1165 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1166 if (IS_ERR(vmw_bo)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001167 VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
Thomas Hellstromb139d432018-09-26 16:27:54 +02001168 return PTR_ERR(vmw_bo);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001169 }
Thomas Hellstromddcda242012-11-21 11:26:55 +01001170
Thomas Hellstromb139d432018-09-26 16:27:54 +02001171 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1172 vmw_user_bo_noref_release();
1173 if (unlikely(ret != 0))
1174 return ret;
1175
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001176 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1177 if (!reloc)
Thomas Hellstromb139d432018-09-26 16:27:54 +02001178 return -ENOMEM;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001179
Thomas Hellstromddcda242012-11-21 11:26:55 +01001180 reloc->mob_loc = id;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001181 reloc->vbo = vmw_bo;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001182
Thomas Hellstromddcda242012-11-21 11:26:55 +01001183 *vmw_bo_p = vmw_bo;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001184 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1185
Thomas Hellstromddcda242012-11-21 11:26:55 +01001186 return 0;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001187}
1188
1189/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001190 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
1191 * to a valid SVGAGuestPtr
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001192 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001193 * @dev_priv: Pointer to a device private structure.
1194 * @sw_context: The software context used for this command batch validation.
1195 * @ptr: Pointer to the user-space handle to be translated.
Deepak Rawat680360a2019-02-13 13:20:42 -08001196 * @vmw_bo_p: Points to a location that, on successful return will carry a
1197 * non-reference-counted pointer to the DMA buffer identified by the user-space
1198 * handle in @id.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001199 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001200 * This function saves information needed to translate a user-space buffer
1201 * handle to a valid SVGAGuestPtr. The translation does not take place
1202 * immediately, but during a call to vmw_apply_relocations().
Deepak Rawat680360a2019-02-13 13:20:42 -08001203 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001204 * This function builds a relocation list and a list of buffers to validate.
1205 * The former needs to be freed using either vmw_apply_relocations() or
1206 * vmw_free_relocations(). The latter needs to be freed using
1207 * vmw_clear_validations.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001208 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001209static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1210 struct vmw_sw_context *sw_context,
1211 SVGAGuestPtr *ptr,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001212 struct vmw_buffer_object **vmw_bo_p)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001213{
Thomas Hellstromb139d432018-09-26 16:27:54 +02001214 struct vmw_buffer_object *vmw_bo;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001215 uint32_t handle = ptr->gmrId;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001216 struct vmw_relocation *reloc;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001217 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001218
Thomas Hellstromb139d432018-09-26 16:27:54 +02001219 vmw_validation_preload_bo(sw_context->ctx);
1220 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1221 if (IS_ERR(vmw_bo)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001222 VMW_DEBUG_USER("Could not find or use GMR region.\n");
Thomas Hellstromb139d432018-09-26 16:27:54 +02001223 return PTR_ERR(vmw_bo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001224 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001225
Thomas Hellstromb139d432018-09-26 16:27:54 +02001226 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1227 vmw_user_bo_noref_release();
1228 if (unlikely(ret != 0))
1229 return ret;
1230
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001231 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1232 if (!reloc)
Thomas Hellstromb139d432018-09-26 16:27:54 +02001233 return -ENOMEM;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001234
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001235 reloc->location = ptr;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001236 reloc->vbo = vmw_bo;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001237 *vmw_bo_p = vmw_bo;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001238 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1239
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001240 return 0;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001241}
1242
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001243/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001244 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001245 *
1246 * @dev_priv: Pointer to a device private struct.
1247 * @sw_context: The software context used for this command submission.
1248 * @header: Pointer to the command header in the command stream.
1249 *
1250 * This function adds the new query into the query COTABLE
1251 */
1252static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1253 struct vmw_sw_context *sw_context,
1254 SVGA3dCmdHeader *header)
1255{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001256 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
Deepak Rawat6f74fd92019-02-08 12:53:57 -08001257 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001258 struct vmw_resource *cotable_res;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001259 int ret;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001260
Deepak Rawat6f74fd92019-02-08 12:53:57 -08001261 if (!ctx_node)
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001262 return -EINVAL;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001263
Deepak Rawatd01316d2019-02-08 15:50:40 -08001264 cmd = container_of(header, typeof(*cmd), header);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001265
Deepak Rawatd01316d2019-02-08 15:50:40 -08001266 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
1267 cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001268 return -EINVAL;
1269
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001270 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
Deepak Rawatd01316d2019-02-08 15:50:40 -08001271 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001272
1273 return ret;
1274}
1275
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001276/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001277 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001278 *
1279 * @dev_priv: Pointer to a device private struct.
1280 * @sw_context: The software context used for this command submission.
1281 * @header: Pointer to the command header in the command stream.
1282 *
Deepak Rawat680360a2019-02-13 13:20:42 -08001283 * The query bind operation will eventually associate the query ID with its
1284 * backing MOB. In this function, we take the user mode MOB ID and use
1285 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001286 */
1287static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1288 struct vmw_sw_context *sw_context,
1289 SVGA3dCmdHeader *header)
1290{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001291 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001292 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001293 int ret;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001294
Deepak Rawatd01316d2019-02-08 15:50:40 -08001295 cmd = container_of(header, typeof(*cmd), header);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001296
1297 /*
1298 * Look up the buffer pointed to by q.mobid, put it on the relocation
1299 * list so its kernel mode MOB ID can be filled in later
1300 */
Deepak Rawatd01316d2019-02-08 15:50:40 -08001301 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001302 &vmw_bo);
1303
1304 if (ret != 0)
1305 return ret;
1306
1307 sw_context->dx_query_mob = vmw_bo;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001308 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
Thomas Hellstromb139d432018-09-26 16:27:54 +02001309 return 0;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001310}
1311
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001312/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001313 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001314 *
1315 * @dev_priv: Pointer to a device private struct.
1316 * @sw_context: The software context used for this command submission.
1317 * @header: Pointer to the command header in the command stream.
1318 */
1319static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1320 struct vmw_sw_context *sw_context,
1321 SVGA3dCmdHeader *header)
1322{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001323 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1324 container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001325
1326 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001327 VMW_RES_DIRTY_SET, user_context_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -08001328 &cmd->body.cid, NULL);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001329}
1330
1331/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001332 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001333 *
1334 * @dev_priv: Pointer to a device private struct.
1335 * @sw_context: The software context used for this command submission.
1336 * @header: Pointer to the command header in the command stream.
1337 */
1338static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1339 struct vmw_sw_context *sw_context,
1340 SVGA3dCmdHeader *header)
1341{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001342 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1343 container_of(header, typeof(*cmd), header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001344
Thomas Hellstromddcda242012-11-21 11:26:55 +01001345 if (unlikely(dev_priv->has_mob)) {
Deepak Rawatd01316d2019-02-08 15:50:40 -08001346 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001347
1348 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1349
1350 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1351 gb_cmd.header.size = cmd->header.size;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001352 gb_cmd.body.cid = cmd->body.cid;
1353 gb_cmd.body.type = cmd->body.type;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001354
1355 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1356 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1357 }
1358
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001359 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001360 VMW_RES_DIRTY_SET, user_context_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -08001361 &cmd->body.cid, NULL);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001362}
1363
1364/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001365 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001366 *
1367 * @dev_priv: Pointer to a device private struct.
1368 * @sw_context: The software context used for this command submission.
1369 * @header: Pointer to the command header in the command stream.
1370 */
1371static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1372 struct vmw_sw_context *sw_context,
1373 SVGA3dCmdHeader *header)
1374{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001375 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001376 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001377 int ret;
1378
Deepak Rawatd01316d2019-02-08 15:50:40 -08001379 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001380 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1381 if (unlikely(ret != 0))
1382 return ret;
1383
Deepak Rawat680360a2019-02-13 13:20:42 -08001384 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
Thomas Hellstromddcda242012-11-21 11:26:55 +01001385 &vmw_bo);
1386 if (unlikely(ret != 0))
1387 return ret;
1388
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001389 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001390
Thomas Hellstromddcda242012-11-21 11:26:55 +01001391 return ret;
1392}
1393
1394/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001395 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001396 *
1397 * @dev_priv: Pointer to a device private struct.
1398 * @sw_context: The software context used for this command submission.
1399 * @header: Pointer to the command header in the command stream.
1400 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001401static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1402 struct vmw_sw_context *sw_context,
1403 SVGA3dCmdHeader *header)
1404{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001405 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001406 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001407 int ret;
1408
Deepak Rawatd01316d2019-02-08 15:50:40 -08001409 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001410 if (dev_priv->has_mob) {
Deepak Rawatd01316d2019-02-08 15:50:40 -08001411 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001412
1413 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1414
1415 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1416 gb_cmd.header.size = cmd->header.size;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001417 gb_cmd.body.cid = cmd->body.cid;
1418 gb_cmd.body.type = cmd->body.type;
1419 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1420 gb_cmd.body.offset = cmd->body.guestResult.offset;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001421
1422 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1423 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1424 }
1425
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001426 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1427 if (unlikely(ret != 0))
1428 return ret;
1429
1430 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
Deepak Rawat680360a2019-02-13 13:20:42 -08001431 &cmd->body.guestResult, &vmw_bo);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001432 if (unlikely(ret != 0))
1433 return ret;
1434
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001435 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001436
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001437 return ret;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001438}
1439
Thomas Hellstromddcda242012-11-21 11:26:55 +01001440/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001441 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001442 *
1443 * @dev_priv: Pointer to a device private struct.
1444 * @sw_context: The software context used for this command submission.
1445 * @header: Pointer to the command header in the command stream.
1446 */
1447static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1448 struct vmw_sw_context *sw_context,
1449 SVGA3dCmdHeader *header)
1450{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001451 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001452 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001453 int ret;
1454
Deepak Rawatd01316d2019-02-08 15:50:40 -08001455 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001456 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1457 if (unlikely(ret != 0))
1458 return ret;
1459
Deepak Rawat680360a2019-02-13 13:20:42 -08001460 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
Thomas Hellstromddcda242012-11-21 11:26:55 +01001461 &vmw_bo);
1462 if (unlikely(ret != 0))
1463 return ret;
1464
Thomas Hellstromddcda242012-11-21 11:26:55 +01001465 return 0;
1466}
1467
1468/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001469 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001470 *
1471 * @dev_priv: Pointer to a device private struct.
1472 * @sw_context: The software context used for this command submission.
1473 * @header: Pointer to the command header in the command stream.
1474 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001475static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1476 struct vmw_sw_context *sw_context,
1477 SVGA3dCmdHeader *header)
1478{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001479 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001480 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001481 int ret;
1482
Deepak Rawatd01316d2019-02-08 15:50:40 -08001483 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001484 if (dev_priv->has_mob) {
Deepak Rawatd01316d2019-02-08 15:50:40 -08001485 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001486
1487 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1488
1489 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1490 gb_cmd.header.size = cmd->header.size;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001491 gb_cmd.body.cid = cmd->body.cid;
1492 gb_cmd.body.type = cmd->body.type;
1493 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1494 gb_cmd.body.offset = cmd->body.guestResult.offset;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001495
1496 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1497 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1498 }
1499
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001500 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1501 if (unlikely(ret != 0))
1502 return ret;
1503
1504 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
Deepak Rawat680360a2019-02-13 13:20:42 -08001505 &cmd->body.guestResult, &vmw_bo);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001506 if (unlikely(ret != 0))
1507 return ret;
1508
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001509 return 0;
1510}
1511
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001512static int vmw_cmd_dma(struct vmw_private *dev_priv,
1513 struct vmw_sw_context *sw_context,
1514 SVGA3dCmdHeader *header)
1515{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001516 struct vmw_buffer_object *vmw_bo = NULL;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001517 struct vmw_surface *srf = NULL;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001518 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001519 int ret;
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001520 SVGA3dCmdSurfaceDMASuffix *suffix;
1521 uint32_t bo_size;
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001522 bool dirty;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001523
Deepak Rawatd01316d2019-02-08 15:50:40 -08001524 cmd = container_of(header, typeof(*cmd), header);
1525 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001526 header->size - sizeof(*suffix));
1527
1528 /* Make sure device and verifier stays in sync. */
1529 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001530 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001531 return -EINVAL;
1532 }
1533
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001534 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
Deepak Rawat680360a2019-02-13 13:20:42 -08001535 &cmd->body.guest.ptr, &vmw_bo);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001536 if (unlikely(ret != 0))
1537 return ret;
1538
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001539 /* Make sure DMA doesn't cross BO boundaries. */
1540 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001541 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001542 VMW_DEBUG_USER("Invalid DMA offset.\n");
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001543 return -EINVAL;
1544 }
1545
Deepak Rawatd01316d2019-02-08 15:50:40 -08001546 bo_size -= cmd->body.guest.ptr.offset;
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001547 if (unlikely(suffix->maximumOffset > bo_size))
1548 suffix->maximumOffset = bo_size;
1549
Deepak Rawatd01316d2019-02-08 15:50:40 -08001550 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001551 VMW_RES_DIRTY_SET : 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001552 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001553 dirty, user_surface_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -08001554 &cmd->body.host.sid, NULL);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001555 if (unlikely(ret != 0)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001556 if (unlikely(ret != -ERESTARTSYS))
Deepak Rawat5724f892019-02-11 11:46:27 -08001557 VMW_DEBUG_USER("could not find surface for DMA.\n");
Thomas Hellstromb139d432018-09-26 16:27:54 +02001558 return ret;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001559 }
1560
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001561 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001562
Deepak Rawat680360a2019-02-13 13:20:42 -08001563 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001564
Thomas Hellstromb139d432018-09-26 16:27:54 +02001565 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001566}
1567
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001568static int vmw_cmd_draw(struct vmw_private *dev_priv,
1569 struct vmw_sw_context *sw_context,
1570 SVGA3dCmdHeader *header)
1571{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001572 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001573 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1574 (unsigned long)header + sizeof(*cmd));
1575 SVGA3dPrimitiveRange *range;
1576 uint32_t i;
1577 uint32_t maxnum;
1578 int ret;
1579
1580 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1581 if (unlikely(ret != 0))
1582 return ret;
1583
Deepak Rawatd01316d2019-02-08 15:50:40 -08001584 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001585 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1586
1587 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001588 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001589 return -EINVAL;
1590 }
1591
1592 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001593 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001594 VMW_RES_DIRTY_NONE,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001595 user_surface_converter,
1596 &decl->array.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001597 if (unlikely(ret != 0))
1598 return ret;
1599 }
1600
1601 maxnum = (header->size - sizeof(cmd->body) -
1602 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1603 if (unlikely(cmd->body.numRanges > maxnum)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001604 VMW_DEBUG_USER("Illegal number of index ranges.\n");
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001605 return -EINVAL;
1606 }
1607
1608 range = (SVGA3dPrimitiveRange *) decl;
1609 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001610 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001611 VMW_RES_DIRTY_NONE,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001612 user_surface_converter,
1613 &range->indexArray.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001614 if (unlikely(ret != 0))
1615 return ret;
1616 }
1617 return 0;
1618}
1619
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001620static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1621 struct vmw_sw_context *sw_context,
1622 SVGA3dCmdHeader *header)
1623{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001624 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001625 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1626 ((unsigned long) header + header->size + sizeof(header));
1627 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
Deepak Rawatd01316d2019-02-08 15:50:40 -08001628 ((unsigned long) header + sizeof(*cmd));
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001629 struct vmw_resource *ctx;
1630 struct vmw_resource *res;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001631 int ret;
1632
Deepak Rawatd01316d2019-02-08 15:50:40 -08001633 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001634
1635 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001636 VMW_RES_DIRTY_SET, user_context_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -08001637 &cmd->body.cid, &ctx);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001638 if (unlikely(ret != 0))
1639 return ret;
1640
1641 for (; cur_state < last_state; ++cur_state) {
1642 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1643 continue;
1644
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001645 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001646 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1647 (unsigned int) cur_state->stage);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001648 return -EINVAL;
1649 }
1650
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001651 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001652 VMW_RES_DIRTY_NONE,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001653 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001654 &cur_state->value, &res);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001655 if (unlikely(ret != 0))
1656 return ret;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001657
1658 if (dev_priv->has_mob) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001659 struct vmw_ctx_bindinfo_tex binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001660 struct vmw_ctx_validation_info *node;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001661
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001662 node = vmw_execbuf_info_from_res(sw_context, ctx);
1663 if (!node)
1664 return -EINVAL;
1665
1666 binding.bi.ctx = ctx;
1667 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001668 binding.bi.bt = vmw_ctx_binding_tex;
1669 binding.texture_stage = cur_state->stage;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001670 vmw_binding_add(node->staged, &binding.bi, 0,
1671 binding.texture_stage);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001672 }
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001673 }
1674
1675 return 0;
1676}
1677
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001678static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1679 struct vmw_sw_context *sw_context,
1680 void *buf)
1681{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001682 struct vmw_buffer_object *vmw_bo;
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001683
1684 struct {
1685 uint32_t header;
1686 SVGAFifoCmdDefineGMRFB body;
1687 } *cmd = buf;
1688
Deepak Rawat680360a2019-02-13 13:20:42 -08001689 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
Thomas Hellstromb139d432018-09-26 16:27:54 +02001690 &vmw_bo);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001691}
1692
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001693/**
1694 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1695 * switching
1696 *
1697 * @dev_priv: Pointer to a device private struct.
1698 * @sw_context: The software context being used for this batch.
1699 * @val_node: The validation node representing the resource.
1700 * @buf_id: Pointer to the user-space backup buffer handle in the command
1701 * stream.
1702 * @backup_offset: Offset of backup into MOB.
1703 *
Deepak Rawat680360a2019-02-13 13:20:42 -08001704 * This function prepares for registering a switch of backup buffers in the
1705 * resource metadata just prior to unreserving. It's basically a wrapper around
1706 * vmw_cmd_res_switch_backup with a different interface.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001707 */
1708static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1709 struct vmw_sw_context *sw_context,
Deepak Rawat680360a2019-02-13 13:20:42 -08001710 struct vmw_resource *res, uint32_t *buf_id,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001711 unsigned long backup_offset)
1712{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001713 struct vmw_buffer_object *vbo;
1714 void *info;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001715 int ret;
1716
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001717 info = vmw_execbuf_info_from_res(sw_context, res);
1718 if (!info)
1719 return -EINVAL;
1720
1721 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001722 if (ret)
1723 return ret;
1724
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001725 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1726 backup_offset);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001727 return 0;
1728}
1729
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001730/**
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001731 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1732 *
1733 * @dev_priv: Pointer to a device private struct.
1734 * @sw_context: The software context being used for this batch.
1735 * @res_type: The resource type.
1736 * @converter: Information about user-space binding for this resource type.
1737 * @res_id: Pointer to the user-space resource handle in the command stream.
1738 * @buf_id: Pointer to the user-space backup buffer handle in the command
1739 * stream.
1740 * @backup_offset: Offset of backup into MOB.
1741 *
Deepak Rawat680360a2019-02-13 13:20:42 -08001742 * This function prepares for registering a switch of backup buffers in the
1743 * resource metadata just prior to unreserving. It's basically a wrapper around
1744 * vmw_cmd_res_switch_backup with a different interface.
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001745 */
1746static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1747 struct vmw_sw_context *sw_context,
1748 enum vmw_res_type res_type,
1749 const struct vmw_user_resource_conv
Deepak Rawat680360a2019-02-13 13:20:42 -08001750 *converter, uint32_t *res_id, uint32_t *buf_id,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001751 unsigned long backup_offset)
1752{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001753 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001754 int ret;
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001755
1756 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001757 VMW_RES_DIRTY_NONE, converter, res_id, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001758 if (ret)
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001759 return ret;
1760
Deepak Rawat680360a2019-02-13 13:20:42 -08001761 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1762 backup_offset);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001763}
1764
1765/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001766 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001767 *
1768 * @dev_priv: Pointer to a device private struct.
1769 * @sw_context: The software context being used for this batch.
1770 * @header: Pointer to the command header in the command stream.
1771 */
1772static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1773 struct vmw_sw_context *sw_context,
1774 SVGA3dCmdHeader *header)
1775{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001776 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1777 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001778
1779 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
Deepak Rawat680360a2019-02-13 13:20:42 -08001780 user_surface_converter, &cmd->body.sid,
1781 &cmd->body.mobid, 0);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001782}
1783
1784/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001785 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001786 *
1787 * @dev_priv: Pointer to a device private struct.
1788 * @sw_context: The software context being used for this batch.
1789 * @header: Pointer to the command header in the command stream.
1790 */
1791static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1792 struct vmw_sw_context *sw_context,
1793 SVGA3dCmdHeader *header)
1794{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001795 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1796 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001797
1798 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001799 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001800 &cmd->body.image.sid, NULL);
1801}
1802
1803/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001804 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001805 *
1806 * @dev_priv: Pointer to a device private struct.
1807 * @sw_context: The software context being used for this batch.
1808 * @header: Pointer to the command header in the command stream.
1809 */
1810static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1811 struct vmw_sw_context *sw_context,
1812 SVGA3dCmdHeader *header)
1813{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001814 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1815 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001816
1817 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001818 VMW_RES_DIRTY_CLEAR, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001819 &cmd->body.sid, NULL);
1820}
1821
1822/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001823 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001824 *
1825 * @dev_priv: Pointer to a device private struct.
1826 * @sw_context: The software context being used for this batch.
1827 * @header: Pointer to the command header in the command stream.
1828 */
1829static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1830 struct vmw_sw_context *sw_context,
1831 SVGA3dCmdHeader *header)
1832{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001833 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1834 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001835
1836 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001837 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001838 &cmd->body.image.sid, NULL);
1839}
1840
1841/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001842 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001843 * command
1844 *
1845 * @dev_priv: Pointer to a device private struct.
1846 * @sw_context: The software context being used for this batch.
1847 * @header: Pointer to the command header in the command stream.
1848 */
1849static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1850 struct vmw_sw_context *sw_context,
1851 SVGA3dCmdHeader *header)
1852{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001853 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1854 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001855
1856 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001857 VMW_RES_DIRTY_CLEAR, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001858 &cmd->body.sid, NULL);
1859}
1860
1861/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001862 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001863 * command
1864 *
1865 * @dev_priv: Pointer to a device private struct.
1866 * @sw_context: The software context being used for this batch.
1867 * @header: Pointer to the command header in the command stream.
1868 */
1869static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1870 struct vmw_sw_context *sw_context,
1871 SVGA3dCmdHeader *header)
1872{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001873 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1874 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001875
1876 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001877 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001878 &cmd->body.image.sid, NULL);
1879}
1880
1881/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001882 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1883 * command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001884 *
1885 * @dev_priv: Pointer to a device private struct.
1886 * @sw_context: The software context being used for this batch.
1887 * @header: Pointer to the command header in the command stream.
1888 */
1889static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1890 struct vmw_sw_context *sw_context,
1891 SVGA3dCmdHeader *header)
1892{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001893 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1894 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001895
1896 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001897 VMW_RES_DIRTY_CLEAR, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001898 &cmd->body.sid, NULL);
1899}
1900
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001901/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001902 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001903 *
1904 * @dev_priv: Pointer to a device private struct.
1905 * @sw_context: The software context being used for this batch.
1906 * @header: Pointer to the command header in the command stream.
1907 */
1908static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1909 struct vmw_sw_context *sw_context,
1910 SVGA3dCmdHeader *header)
1911{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001912 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001913 int ret;
1914 size_t size;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001915 struct vmw_resource *ctx;
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001916
Deepak Rawatd01316d2019-02-08 15:50:40 -08001917 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001918
1919 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001920 VMW_RES_DIRTY_SET, user_context_converter,
1921 &cmd->body.cid, &ctx);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001922 if (unlikely(ret != 0))
1923 return ret;
1924
1925 if (unlikely(!dev_priv->has_mob))
1926 return 0;
1927
1928 size = cmd->header.size - sizeof(cmd->body);
Deepak Rawat680360a2019-02-13 13:20:42 -08001929 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1930 cmd->body.shid, cmd + 1, cmd->body.type,
1931 size, &sw_context->staged_cmd_res);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001932 if (unlikely(ret != 0))
1933 return ret;
1934
Deepak Rawat680360a2019-02-13 13:20:42 -08001935 return vmw_resource_relocation_add(sw_context, NULL,
Thomas Hellstrome7a45282016-10-10 10:44:00 -07001936 vmw_ptr_diff(sw_context->buf_start,
Thomas Hellstroma1944032016-10-10 11:06:45 -07001937 &cmd->header.id),
1938 vmw_res_rel_nop);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001939}
1940
1941/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001942 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001943 *
1944 * @dev_priv: Pointer to a device private struct.
1945 * @sw_context: The software context being used for this batch.
1946 * @header: Pointer to the command header in the command stream.
1947 */
1948static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1949 struct vmw_sw_context *sw_context,
1950 SVGA3dCmdHeader *header)
1951{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001952 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001953 int ret;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001954 struct vmw_resource *ctx;
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001955
Deepak Rawatd01316d2019-02-08 15:50:40 -08001956 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001957
1958 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001959 VMW_RES_DIRTY_SET, user_context_converter,
1960 &cmd->body.cid, &ctx);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001961 if (unlikely(ret != 0))
1962 return ret;
1963
1964 if (unlikely(!dev_priv->has_mob))
1965 return 0;
1966
Deepak Rawat680360a2019-02-13 13:20:42 -08001967 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1968 cmd->body.type, &sw_context->staged_cmd_res);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001969 if (unlikely(ret != 0))
1970 return ret;
1971
Deepak Rawat680360a2019-02-13 13:20:42 -08001972 return vmw_resource_relocation_add(sw_context, NULL,
Thomas Hellstrome7a45282016-10-10 10:44:00 -07001973 vmw_ptr_diff(sw_context->buf_start,
Thomas Hellstroma1944032016-10-10 11:06:45 -07001974 &cmd->header.id),
1975 vmw_res_rel_nop);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001976}
1977
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001978/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001979 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001980 *
1981 * @dev_priv: Pointer to a device private struct.
1982 * @sw_context: The software context being used for this batch.
1983 * @header: Pointer to the command header in the command stream.
1984 */
1985static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1986 struct vmw_sw_context *sw_context,
1987 SVGA3dCmdHeader *header)
1988{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001989 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001990 struct vmw_ctx_bindinfo_shader binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001991 struct vmw_resource *ctx, *res = NULL;
1992 struct vmw_ctx_validation_info *ctx_info;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001993 int ret;
1994
Deepak Rawatd01316d2019-02-08 15:50:40 -08001995 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001996
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001997 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001998 VMW_DEBUG_USER("Illegal shader type %u.\n",
1999 (unsigned int) cmd->body.type);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002000 return -EINVAL;
2001 }
2002
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07002003 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002004 VMW_RES_DIRTY_SET, user_context_converter,
2005 &cmd->body.cid, &ctx);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002006 if (unlikely(ret != 0))
2007 return ret;
2008
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002009 if (!dev_priv->has_mob)
2010 return 0;
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002011
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002012 if (cmd->body.shid != SVGA3D_INVALID_ID) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002013 res = vmw_shader_lookup(vmw_context_res_man(ctx),
Deepak Rawat680360a2019-02-13 13:20:42 -08002014 cmd->body.shid, cmd->body.type);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002015 if (!IS_ERR(res)) {
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002016 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2017 VMW_RES_DIRTY_NONE);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002018 if (unlikely(ret != 0))
2019 return ret;
2020 }
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07002021 }
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002022
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002023 if (IS_ERR_OR_NULL(res)) {
Deepak Rawat680360a2019-02-13 13:20:42 -08002024 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2025 VMW_RES_DIRTY_NONE,
2026 user_shader_converter, &cmd->body.shid,
2027 &res);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002028 if (unlikely(ret != 0))
2029 return ret;
2030 }
2031
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002032 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2033 if (!ctx_info)
2034 return -EINVAL;
2035
2036 binding.bi.ctx = ctx;
2037 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002038 binding.bi.bt = vmw_ctx_binding_shader;
2039 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
Deepak Rawat680360a2019-02-13 13:20:42 -08002040 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2041
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002042 return 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002043}
2044
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002045/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002046 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002047 *
2048 * @dev_priv: Pointer to a device private struct.
2049 * @sw_context: The software context being used for this batch.
2050 * @header: Pointer to the command header in the command stream.
2051 */
2052static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2053 struct vmw_sw_context *sw_context,
2054 SVGA3dCmdHeader *header)
2055{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002056 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002057 int ret;
2058
Deepak Rawatd01316d2019-02-08 15:50:40 -08002059 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002060
2061 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002062 VMW_RES_DIRTY_SET, user_context_converter,
2063 &cmd->body.cid, NULL);
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002064 if (unlikely(ret != 0))
2065 return ret;
2066
2067 if (dev_priv->has_mob)
2068 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2069
2070 return 0;
2071}
2072
2073/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002074 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002075 *
2076 * @dev_priv: Pointer to a device private struct.
2077 * @sw_context: The software context being used for this batch.
2078 * @header: Pointer to the command header in the command stream.
2079 */
2080static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2081 struct vmw_sw_context *sw_context,
2082 SVGA3dCmdHeader *header)
2083{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002084 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2085 container_of(header, typeof(*cmd), header);
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002086
2087 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
Deepak Rawat680360a2019-02-13 13:20:42 -08002088 user_shader_converter, &cmd->body.shid,
2089 &cmd->body.mobid, cmd->body.offsetInBytes);
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002090}
2091
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002092/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002093 * vmw_cmd_dx_set_single_constant_buffer - Validate
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002094 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2095 *
2096 * @dev_priv: Pointer to a device private struct.
2097 * @sw_context: The software context being used for this batch.
2098 * @header: Pointer to the command header in the command stream.
2099 */
2100static int
2101vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2102 struct vmw_sw_context *sw_context,
2103 SVGA3dCmdHeader *header)
2104{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002105 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002106 struct vmw_resource *res = NULL;
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002107 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002108 struct vmw_ctx_bindinfo_cb binding;
2109 int ret;
2110
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002111 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002112 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002113
2114 cmd = container_of(header, typeof(*cmd), header);
2115 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002116 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002117 &cmd->body.sid, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002118 if (unlikely(ret != 0))
2119 return ret;
2120
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002121 binding.bi.ctx = ctx_node->ctx;
2122 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002123 binding.bi.bt = vmw_ctx_binding_cb;
2124 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2125 binding.offset = cmd->body.offsetInBytes;
2126 binding.size = cmd->body.sizeInBytes;
2127 binding.slot = cmd->body.slot;
2128
2129 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2130 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002131 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2132 (unsigned int) cmd->body.type,
2133 (unsigned int) binding.slot);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002134 return -EINVAL;
2135 }
2136
Deepak Rawat680360a2019-02-13 13:20:42 -08002137 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2138 binding.slot);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002139
2140 return 0;
2141}
2142
2143/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002144 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2145 * command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002146 *
2147 * @dev_priv: Pointer to a device private struct.
2148 * @sw_context: The software context being used for this batch.
2149 * @header: Pointer to the command header in the command stream.
2150 */
2151static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2152 struct vmw_sw_context *sw_context,
2153 SVGA3dCmdHeader *header)
2154{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002155 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2156 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002157 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2158 sizeof(SVGA3dShaderResourceViewId);
2159
2160 if ((u64) cmd->body.startView + (u64) num_sr_view >
2161 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2162 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002163 VMW_DEBUG_USER("Invalid shader binding.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002164 return -EINVAL;
2165 }
2166
2167 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2168 vmw_ctx_binding_sr,
2169 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2170 (void *) &cmd[1], num_sr_view,
2171 cmd->body.startView);
2172}
2173
2174/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002175 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002176 *
2177 * @dev_priv: Pointer to a device private struct.
2178 * @sw_context: The software context being used for this batch.
2179 * @header: Pointer to the command header in the command stream.
2180 */
2181static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2182 struct vmw_sw_context *sw_context,
2183 SVGA3dCmdHeader *header)
2184{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002185 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002186 struct vmw_resource *res = NULL;
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002187 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002188 struct vmw_ctx_bindinfo_shader binding;
2189 int ret = 0;
2190
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002191 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002192 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002193
2194 cmd = container_of(header, typeof(*cmd), header);
2195
2196 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002197 VMW_DEBUG_USER("Illegal shader type %u.\n",
2198 (unsigned int) cmd->body.type);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002199 return -EINVAL;
2200 }
2201
2202 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2203 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2204 if (IS_ERR(res)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002205 VMW_DEBUG_USER("Could not find shader for binding.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002206 return PTR_ERR(res);
2207 }
2208
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002209 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2210 VMW_RES_DIRTY_NONE);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002211 if (ret)
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002212 return ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002213 }
2214
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002215 binding.bi.ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002216 binding.bi.res = res;
2217 binding.bi.bt = vmw_ctx_binding_dx_shader;
2218 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2219
Deepak Rawat680360a2019-02-13 13:20:42 -08002220 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002221
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002222 return 0;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002223}
2224
2225/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002226 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2227 * command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002228 *
2229 * @dev_priv: Pointer to a device private struct.
2230 * @sw_context: The software context being used for this batch.
2231 * @header: Pointer to the command header in the command stream.
2232 */
2233static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2234 struct vmw_sw_context *sw_context,
2235 SVGA3dCmdHeader *header)
2236{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002237 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002238 struct vmw_ctx_bindinfo_vb binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002239 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002240 struct {
2241 SVGA3dCmdHeader header;
2242 SVGA3dCmdDXSetVertexBuffers body;
2243 SVGA3dVertexBuffer buf[];
2244 } *cmd;
2245 int i, ret, num;
2246
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002247 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002248 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002249
2250 cmd = container_of(header, typeof(*cmd), header);
2251 num = (cmd->header.size - sizeof(cmd->body)) /
2252 sizeof(SVGA3dVertexBuffer);
2253 if ((u64)num + (u64)cmd->body.startBuffer >
2254 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002255 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002256 return -EINVAL;
2257 }
2258
2259 for (i = 0; i < num; i++) {
2260 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002261 VMW_RES_DIRTY_NONE,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002262 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002263 &cmd->buf[i].sid, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002264 if (unlikely(ret != 0))
2265 return ret;
2266
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002267 binding.bi.ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002268 binding.bi.bt = vmw_ctx_binding_vb;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002269 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002270 binding.offset = cmd->buf[i].offset;
2271 binding.stride = cmd->buf[i].stride;
2272 binding.slot = i + cmd->body.startBuffer;
2273
Deepak Rawat680360a2019-02-13 13:20:42 -08002274 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002275 }
2276
2277 return 0;
2278}
2279
2280/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002281 * vmw_cmd_dx_ia_set_vertex_buffers - Validate
Brian Paul8bd62872017-07-17 07:36:10 -07002282 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002283 *
2284 * @dev_priv: Pointer to a device private struct.
2285 * @sw_context: The software context being used for this batch.
2286 * @header: Pointer to the command header in the command stream.
2287 */
2288static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2289 struct vmw_sw_context *sw_context,
2290 SVGA3dCmdHeader *header)
2291{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002292 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002293 struct vmw_ctx_bindinfo_ib binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002294 struct vmw_resource *res;
Deepak Rawatd01316d2019-02-08 15:50:40 -08002295 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002296 int ret;
2297
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002298 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002299 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002300
2301 cmd = container_of(header, typeof(*cmd), header);
2302 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002303 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002304 &cmd->body.sid, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002305 if (unlikely(ret != 0))
2306 return ret;
2307
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002308 binding.bi.ctx = ctx_node->ctx;
2309 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002310 binding.bi.bt = vmw_ctx_binding_ib;
2311 binding.offset = cmd->body.offset;
2312 binding.format = cmd->body.format;
2313
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002314 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002315
2316 return 0;
2317}
2318
2319/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002320 * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2321 * command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002322 *
2323 * @dev_priv: Pointer to a device private struct.
2324 * @sw_context: The software context being used for this batch.
2325 * @header: Pointer to the command header in the command stream.
2326 */
2327static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2328 struct vmw_sw_context *sw_context,
2329 SVGA3dCmdHeader *header)
2330{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002331 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2332 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002333 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2334 sizeof(SVGA3dRenderTargetViewId);
Deepak Rawatd01316d2019-02-08 15:50:40 -08002335 int ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002336
2337 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002338 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002339 return -EINVAL;
2340 }
2341
Deepak Rawat680360a2019-02-13 13:20:42 -08002342 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2343 0, &cmd->body.depthStencilViewId, 1, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002344 if (ret)
2345 return ret;
2346
2347 return vmw_view_bindings_add(sw_context, vmw_view_rt,
Deepak Rawat680360a2019-02-13 13:20:42 -08002348 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2349 num_rt_view, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002350}
2351
2352/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002353 * vmw_cmd_dx_clear_rendertarget_view - Validate
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002354 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2355 *
2356 * @dev_priv: Pointer to a device private struct.
2357 * @sw_context: The software context being used for this batch.
2358 * @header: Pointer to the command header in the command stream.
2359 */
2360static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2361 struct vmw_sw_context *sw_context,
2362 SVGA3dCmdHeader *header)
2363{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002364 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2365 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002366
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002367 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
2368 cmd->body.renderTargetViewId));
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002369}
2370
2371/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002372 * vmw_cmd_dx_clear_rendertarget_view - Validate
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002373 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2374 *
2375 * @dev_priv: Pointer to a device private struct.
2376 * @sw_context: The software context being used for this batch.
2377 * @header: Pointer to the command header in the command stream.
2378 */
2379static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2380 struct vmw_sw_context *sw_context,
2381 SVGA3dCmdHeader *header)
2382{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002383 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2384 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002385
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002386 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
2387 cmd->body.depthStencilViewId));
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002388}
2389
2390static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2391 struct vmw_sw_context *sw_context,
2392 SVGA3dCmdHeader *header)
2393{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002394 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002395 struct vmw_resource *srf;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002396 struct vmw_resource *res;
2397 enum vmw_view_type view_type;
2398 int ret;
2399 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08002400 * This is based on the fact that all affected define commands have the
2401 * same initial command body layout.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002402 */
2403 struct {
2404 SVGA3dCmdHeader header;
2405 uint32 defined_id;
2406 uint32 sid;
2407 } *cmd;
2408
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002409 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002410 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002411
2412 view_type = vmw_view_cmd_to_type(header->id);
Dan Carpenter0d9cac02018-01-10 12:40:04 +03002413 if (view_type == vmw_view_max)
2414 return -EINVAL;
Deepak Rawat680360a2019-02-13 13:20:42 -08002415
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002416 cmd = container_of(header, typeof(*cmd), header);
2417 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002418 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002419 &cmd->sid, &srf);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002420 if (unlikely(ret != 0))
2421 return ret;
2422
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002423 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002424 ret = vmw_cotable_notify(res, cmd->defined_id);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002425 if (unlikely(ret != 0))
2426 return ret;
2427
Deepak Rawat680360a2019-02-13 13:20:42 -08002428 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2429 cmd->defined_id, header,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002430 header->size + sizeof(*header),
2431 &sw_context->staged_cmd_res);
2432}
2433
Charmaine Lee2f633e52015-08-10 10:45:11 -07002434/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002435 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
Charmaine Lee2f633e52015-08-10 10:45:11 -07002436 *
2437 * @dev_priv: Pointer to a device private struct.
2438 * @sw_context: The software context being used for this batch.
2439 * @header: Pointer to the command header in the command stream.
2440 */
2441static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2442 struct vmw_sw_context *sw_context,
2443 SVGA3dCmdHeader *header)
2444{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002445 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Charmaine Lee2f633e52015-08-10 10:45:11 -07002446 struct vmw_ctx_bindinfo_so binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002447 struct vmw_resource *res;
Charmaine Lee2f633e52015-08-10 10:45:11 -07002448 struct {
2449 SVGA3dCmdHeader header;
2450 SVGA3dCmdDXSetSOTargets body;
2451 SVGA3dSoTarget targets[];
2452 } *cmd;
2453 int i, ret, num;
2454
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002455 if (!ctx_node)
Charmaine Lee2f633e52015-08-10 10:45:11 -07002456 return -EINVAL;
Charmaine Lee2f633e52015-08-10 10:45:11 -07002457
2458 cmd = container_of(header, typeof(*cmd), header);
Deepak Rawat680360a2019-02-13 13:20:42 -08002459 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
Charmaine Lee2f633e52015-08-10 10:45:11 -07002460
2461 if (num > SVGA3D_DX_MAX_SOTARGETS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002462 VMW_DEBUG_USER("Invalid DX SO binding.\n");
Charmaine Lee2f633e52015-08-10 10:45:11 -07002463 return -EINVAL;
2464 }
2465
2466 for (i = 0; i < num; i++) {
2467 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002468 VMW_RES_DIRTY_SET,
Charmaine Lee2f633e52015-08-10 10:45:11 -07002469 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002470 &cmd->targets[i].sid, &res);
Charmaine Lee2f633e52015-08-10 10:45:11 -07002471 if (unlikely(ret != 0))
2472 return ret;
2473
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002474 binding.bi.ctx = ctx_node->ctx;
2475 binding.bi.res = res;
Charmaine Lee2f633e52015-08-10 10:45:11 -07002476 binding.bi.bt = vmw_ctx_binding_so,
2477 binding.offset = cmd->targets[i].offset;
2478 binding.size = cmd->targets[i].sizeInBytes;
2479 binding.slot = i;
2480
Deepak Rawat680360a2019-02-13 13:20:42 -08002481 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
Charmaine Lee2f633e52015-08-10 10:45:11 -07002482 }
2483
2484 return 0;
2485}
2486
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002487static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2488 struct vmw_sw_context *sw_context,
2489 SVGA3dCmdHeader *header)
2490{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002491 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002492 struct vmw_resource *res;
2493 /*
2494 * This is based on the fact that all affected define commands have
2495 * the same initial command body layout.
2496 */
2497 struct {
2498 SVGA3dCmdHeader header;
2499 uint32 defined_id;
2500 } *cmd;
2501 enum vmw_so_type so_type;
2502 int ret;
2503
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002504 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002505 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002506
2507 so_type = vmw_so_cmd_to_type(header->id);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002508 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002509 cmd = container_of(header, typeof(*cmd), header);
2510 ret = vmw_cotable_notify(res, cmd->defined_id);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002511
2512 return ret;
2513}
2514
2515/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002516 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2517 * command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002518 *
2519 * @dev_priv: Pointer to a device private struct.
2520 * @sw_context: The software context being used for this batch.
2521 * @header: Pointer to the command header in the command stream.
2522 */
2523static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2524 struct vmw_sw_context *sw_context,
2525 SVGA3dCmdHeader *header)
2526{
2527 struct {
2528 SVGA3dCmdHeader header;
2529 union {
2530 SVGA3dCmdDXReadbackSubResource r_body;
2531 SVGA3dCmdDXInvalidateSubResource i_body;
2532 SVGA3dCmdDXUpdateSubResource u_body;
2533 SVGA3dSurfaceId sid;
2534 };
2535 } *cmd;
2536
2537 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2538 offsetof(typeof(*cmd), sid));
2539 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2540 offsetof(typeof(*cmd), sid));
2541 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2542 offsetof(typeof(*cmd), sid));
2543
2544 cmd = container_of(header, typeof(*cmd), header);
2545
2546 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002547 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002548 &cmd->sid, NULL);
2549}
2550
2551static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2552 struct vmw_sw_context *sw_context,
2553 SVGA3dCmdHeader *header)
2554{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002555 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002556
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002557 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002558 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002559
2560 return 0;
2561}
2562
2563/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002564 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2565 * resource for removal.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002566 *
2567 * @dev_priv: Pointer to a device private struct.
2568 * @sw_context: The software context being used for this batch.
2569 * @header: Pointer to the command header in the command stream.
2570 *
Deepak Rawat680360a2019-02-13 13:20:42 -08002571 * Check that the view exists, and if it was not created using this command
2572 * batch, conditionally make this command a NOP.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002573 */
2574static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2575 struct vmw_sw_context *sw_context,
2576 SVGA3dCmdHeader *header)
2577{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002578 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002579 struct {
2580 SVGA3dCmdHeader header;
2581 union vmw_view_destroy body;
2582 } *cmd = container_of(header, typeof(*cmd), header);
2583 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2584 struct vmw_resource *view;
2585 int ret;
2586
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002587 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002588 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002589
Deepak Rawat680360a2019-02-13 13:20:42 -08002590 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2591 &sw_context->staged_cmd_res, &view);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002592 if (ret || !view)
2593 return ret;
2594
2595 /*
Thomas Hellstroma1944032016-10-10 11:06:45 -07002596 * If the view wasn't created during this command batch, it might
2597 * have been removed due to a context swapout, so add a
2598 * relocation to conditionally make this command a NOP to avoid
2599 * device errors.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002600 */
Deepak Rawat680360a2019-02-13 13:20:42 -08002601 return vmw_resource_relocation_add(sw_context, view,
Thomas Hellstroma1944032016-10-10 11:06:45 -07002602 vmw_ptr_diff(sw_context->buf_start,
2603 &cmd->header.id),
2604 vmw_res_rel_cond_nop);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002605}
2606
2607/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002608 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002609 *
2610 * @dev_priv: Pointer to a device private struct.
2611 * @sw_context: The software context being used for this batch.
2612 * @header: Pointer to the command header in the command stream.
2613 */
2614static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2615 struct vmw_sw_context *sw_context,
2616 SVGA3dCmdHeader *header)
2617{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002618 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002619 struct vmw_resource *res;
Deepak Rawatd01316d2019-02-08 15:50:40 -08002620 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2621 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002622 int ret;
2623
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002624 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002625 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002626
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002627 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002628 ret = vmw_cotable_notify(res, cmd->body.shaderId);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002629 if (ret)
2630 return ret;
2631
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002632 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002633 cmd->body.shaderId, cmd->body.type,
2634 &sw_context->staged_cmd_res);
2635}
2636
2637/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002638 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002639 *
2640 * @dev_priv: Pointer to a device private struct.
2641 * @sw_context: The software context being used for this batch.
2642 * @header: Pointer to the command header in the command stream.
2643 */
2644static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2645 struct vmw_sw_context *sw_context,
2646 SVGA3dCmdHeader *header)
2647{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002648 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Deepak Rawatd01316d2019-02-08 15:50:40 -08002649 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2650 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002651 int ret;
2652
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002653 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002654 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002655
2656 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2657 &sw_context->staged_cmd_res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002658
2659 return ret;
2660}
2661
2662/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002663 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002664 *
2665 * @dev_priv: Pointer to a device private struct.
2666 * @sw_context: The software context being used for this batch.
2667 * @header: Pointer to the command header in the command stream.
2668 */
2669static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2670 struct vmw_sw_context *sw_context,
2671 SVGA3dCmdHeader *header)
2672{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002673 struct vmw_resource *ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002674 struct vmw_resource *res;
Deepak Rawatd01316d2019-02-08 15:50:40 -08002675 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2676 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002677 int ret;
2678
2679 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2680 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002681 VMW_RES_DIRTY_SET,
2682 user_context_converter, &cmd->body.cid,
2683 &ctx);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002684 if (ret)
2685 return ret;
2686 } else {
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002687 struct vmw_ctx_validation_info *ctx_node =
2688 VMW_GET_CTX_NODE(sw_context);
2689
2690 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002691 return -EINVAL;
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002692
2693 ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002694 }
2695
Deepak Rawat680360a2019-02-13 13:20:42 -08002696 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002697 if (IS_ERR(res)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002698 VMW_DEBUG_USER("Could not find shader to bind.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002699 return PTR_ERR(res);
2700 }
2701
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002702 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2703 VMW_RES_DIRTY_NONE);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002704 if (ret) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002705 VMW_DEBUG_USER("Error creating resource validation node.\n");
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002706 return ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002707 }
2708
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002709 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2710 &cmd->body.mobid,
2711 cmd->body.offsetInBytes);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002712}
2713
Charmaine Leef3b335502016-02-12 08:11:56 +01002714/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002715 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
Charmaine Leef3b335502016-02-12 08:11:56 +01002716 *
2717 * @dev_priv: Pointer to a device private struct.
2718 * @sw_context: The software context being used for this batch.
2719 * @header: Pointer to the command header in the command stream.
2720 */
2721static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2722 struct vmw_sw_context *sw_context,
2723 SVGA3dCmdHeader *header)
2724{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002725 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2726 container_of(header, typeof(*cmd), header);
Charmaine Leef3b335502016-02-12 08:11:56 +01002727
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002728 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
2729 cmd->body.shaderResourceViewId));
Charmaine Leef3b335502016-02-12 08:11:56 +01002730}
2731
Charmaine Lee1f982e42016-10-10 10:37:03 -07002732/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002733 * vmw_cmd_dx_transfer_from_buffer - Validate
2734 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
Charmaine Lee1f982e42016-10-10 10:37:03 -07002735 *
2736 * @dev_priv: Pointer to a device private struct.
2737 * @sw_context: The software context being used for this batch.
2738 * @header: Pointer to the command header in the command stream.
2739 */
2740static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2741 struct vmw_sw_context *sw_context,
2742 SVGA3dCmdHeader *header)
2743{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002744 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2745 container_of(header, typeof(*cmd), header);
Charmaine Lee1f982e42016-10-10 10:37:03 -07002746 int ret;
2747
2748 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002749 VMW_RES_DIRTY_NONE, user_surface_converter,
Charmaine Lee1f982e42016-10-10 10:37:03 -07002750 &cmd->body.srcSid, NULL);
2751 if (ret != 0)
2752 return ret;
2753
2754 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002755 VMW_RES_DIRTY_SET, user_surface_converter,
Charmaine Lee1f982e42016-10-10 10:37:03 -07002756 &cmd->body.destSid, NULL);
2757}
2758
Neha Bhende0d81d342018-06-18 17:14:56 -07002759/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002760 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
Neha Bhende0d81d342018-06-18 17:14:56 -07002761 *
2762 * @dev_priv: Pointer to a device private struct.
2763 * @sw_context: The software context being used for this batch.
2764 * @header: Pointer to the command header in the command stream.
2765 */
2766static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2767 struct vmw_sw_context *sw_context,
2768 SVGA3dCmdHeader *header)
2769{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002770 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2771 container_of(header, typeof(*cmd), header);
Neha Bhende0d81d342018-06-18 17:14:56 -07002772
2773 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2774 return -EINVAL;
2775
2776 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002777 VMW_RES_DIRTY_SET, user_surface_converter,
2778 &cmd->body.surface.sid, NULL);
Neha Bhende0d81d342018-06-18 17:14:56 -07002779}
2780
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02002781static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
2782 struct vmw_sw_context *sw_context,
2783 void *buf, uint32_t *size)
2784{
2785 uint32_t size_remaining = *size;
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02002786 uint32_t cmd_id;
2787
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07002788 cmd_id = ((uint32_t *)buf)[0];
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02002789 switch (cmd_id) {
2790 case SVGA_CMD_UPDATE:
2791 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02002792 break;
2793 case SVGA_CMD_DEFINE_GMRFB:
2794 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
2795 break;
2796 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
2797 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2798 break;
2799 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
2800 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2801 break;
2802 default:
Deepak Rawat5724f892019-02-11 11:46:27 -08002803 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02002804 return -EINVAL;
2805 }
2806
2807 if (*size > size_remaining) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002808 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
2809 cmd_id);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02002810 return -EINVAL;
2811 }
2812
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02002813 if (unlikely(!sw_context->kernel)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002814 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02002815 return -EPERM;
2816 }
2817
2818 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
2819 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
2820
2821 return 0;
2822}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002823
Thomas Hellstrom4fbd9d22014-02-12 12:37:01 +01002824static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002825 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
2826 false, false, false),
2827 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
2828 false, false, false),
2829 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
2830 true, false, false),
2831 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
2832 true, false, false),
2833 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
2834 true, false, false),
2835 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
2836 false, false, false),
2837 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
2838 false, false, false),
2839 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
2840 true, false, false),
2841 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
2842 true, false, false),
2843 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
2844 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002845 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002846 &vmw_cmd_set_render_target_check, true, false, false),
2847 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
2848 true, false, false),
2849 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
2850 true, false, false),
2851 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
2852 true, false, false),
2853 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
2854 true, false, false),
2855 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
2856 true, false, false),
2857 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
2858 true, false, false),
2859 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
2860 true, false, false),
2861 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
2862 false, false, false),
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002863 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
2864 true, false, false),
2865 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
2866 true, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002867 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
2868 true, false, false),
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002869 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
2870 true, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002871 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
2872 true, false, false),
2873 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
2874 true, false, false),
2875 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
2876 true, false, false),
2877 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
2878 true, false, false),
2879 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
2880 true, false, false),
2881 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
2882 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002883 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002884 &vmw_cmd_blt_surf_screen_check, false, false, false),
2885 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
2886 false, false, false),
2887 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
2888 false, false, false),
2889 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
2890 false, false, false),
2891 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
2892 false, false, false),
2893 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
2894 false, false, false),
Deepak Rawatdc75e732018-06-13 13:53:28 -07002895 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002896 false, false, false),
Deepak Rawatdc75e732018-06-13 13:53:28 -07002897 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002898 false, false, false),
2899 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
2900 false, false, false),
2901 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
2902 false, false, false),
2903 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
2904 false, false, false),
2905 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
2906 false, false, false),
2907 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
2908 false, false, false),
2909 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
2910 false, false, false),
2911 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
2912 false, false, true),
2913 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
2914 false, false, true),
2915 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
2916 false, false, true),
2917 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
2918 false, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07002919 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
2920 false, false, true),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002921 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
2922 false, false, true),
2923 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
2924 false, false, true),
2925 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
2926 false, false, true),
2927 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
2928 true, false, true),
2929 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
2930 false, false, true),
2931 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
2932 true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01002933 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002934 &vmw_cmd_update_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01002935 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002936 &vmw_cmd_readback_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01002937 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002938 &vmw_cmd_readback_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01002939 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002940 &vmw_cmd_invalidate_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01002941 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002942 &vmw_cmd_invalidate_gb_surface, true, false, true),
2943 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2944 false, false, true),
2945 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2946 false, false, true),
2947 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2948 false, false, true),
2949 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2950 false, false, true),
2951 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2952 false, false, true),
2953 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2954 false, false, true),
2955 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2956 true, false, true),
2957 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2958 false, false, true),
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +01002959 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
Thomas Hellstrom8ba07312013-10-08 02:25:35 -07002960 false, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002961 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2962 true, false, true),
2963 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2964 true, false, true),
2965 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2966 true, false, true),
2967 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2968 true, false, true),
Thomas Hellstrom5f55be5f2017-08-24 08:06:30 +02002969 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
2970 true, false, true),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002971 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2972 false, false, true),
2973 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2974 false, false, true),
2975 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2976 false, false, true),
2977 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2978 false, false, true),
2979 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2980 false, false, true),
2981 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
2982 false, false, true),
2983 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
2984 false, false, true),
2985 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
2986 false, false, true),
2987 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2988 false, false, true),
2989 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2990 false, false, true),
2991 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002992 true, false, true),
2993 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
2994 false, false, true),
2995 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
2996 false, false, true),
2997 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
2998 false, false, true),
2999 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3000 false, false, true),
3001
Deepak Rawat680360a2019-02-13 13:20:42 -08003002 /* SM commands */
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003003 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3004 false, false, true),
3005 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3006 false, false, true),
3007 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3008 false, false, true),
3009 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3010 false, false, true),
3011 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3012 false, false, true),
3013 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3014 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3015 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3016 &vmw_cmd_dx_set_shader_res, true, false, true),
3017 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3018 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003019 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003020 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003021 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003022 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003023 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3024 true, false, true),
3025 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3026 true, false, true),
3027 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3028 &vmw_cmd_dx_cid_check, true, false, true),
3029 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003030 true, false, true),
3031 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3032 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3033 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3034 &vmw_cmd_dx_set_index_buffer, true, false, true),
3035 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3036 &vmw_cmd_dx_set_rendertargets, true, false, true),
3037 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3038 true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003039 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
Charmaine Lee2f633e52015-08-10 10:45:11 -07003040 &vmw_cmd_dx_cid_check, true, false, true),
3041 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3042 &vmw_cmd_dx_cid_check, true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003043 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003044 true, false, true),
Charmaine Leee02e5882016-04-12 08:19:08 -07003045 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003046 true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003047 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003048 true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003049 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
Charmaine Leee02e5882016-04-12 08:19:08 -07003050 &vmw_cmd_dx_cid_check, true, false, true),
3051 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003052 true, false, true),
Charmaine Leee02e5882016-04-12 08:19:08 -07003053 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003054 true, false, true),
3055 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3056 true, false, true),
Charmaine Lee18835982016-04-12 08:14:23 -07003057 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003058 true, false, true),
3059 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3060 true, false, true),
3061 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3062 true, false, true),
3063 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3064 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3065 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3066 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003067 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3068 true, false, true),
Charmaine Leef3b335502016-02-12 08:11:56 +01003069 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003070 true, false, true),
3071 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3072 &vmw_cmd_dx_check_subresource, true, false, true),
3073 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3074 &vmw_cmd_dx_check_subresource, true, false, true),
3075 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3076 &vmw_cmd_dx_check_subresource, true, false, true),
3077 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3078 &vmw_cmd_dx_view_define, true, false, true),
3079 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3080 &vmw_cmd_dx_view_remove, true, false, true),
3081 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3082 &vmw_cmd_dx_view_define, true, false, true),
3083 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3084 &vmw_cmd_dx_view_remove, true, false, true),
3085 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3086 &vmw_cmd_dx_view_define, true, false, true),
3087 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3088 &vmw_cmd_dx_view_remove, true, false, true),
3089 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3090 &vmw_cmd_dx_so_define, true, false, true),
3091 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3092 &vmw_cmd_dx_cid_check, true, false, true),
3093 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3094 &vmw_cmd_dx_so_define, true, false, true),
3095 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3096 &vmw_cmd_dx_cid_check, true, false, true),
3097 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3098 &vmw_cmd_dx_so_define, true, false, true),
3099 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3100 &vmw_cmd_dx_cid_check, true, false, true),
3101 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3102 &vmw_cmd_dx_so_define, true, false, true),
3103 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3104 &vmw_cmd_dx_cid_check, true, false, true),
3105 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3106 &vmw_cmd_dx_so_define, true, false, true),
3107 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3108 &vmw_cmd_dx_cid_check, true, false, true),
3109 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3110 &vmw_cmd_dx_define_shader, true, false, true),
3111 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3112 &vmw_cmd_dx_destroy_shader, true, false, true),
3113 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3114 &vmw_cmd_dx_bind_shader, true, false, true),
3115 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3116 &vmw_cmd_dx_so_define, true, false, true),
3117 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3118 &vmw_cmd_dx_cid_check, true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003119 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003120 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003121 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3122 &vmw_cmd_dx_set_so_targets, true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003123 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3124 &vmw_cmd_dx_cid_check, true, false, true),
3125 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3126 &vmw_cmd_dx_cid_check, true, false, true),
Neha Bhende0fca749e2015-08-10 10:51:07 -07003127 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3128 &vmw_cmd_buffer_copy_check, true, false, true),
3129 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3130 &vmw_cmd_pred_copy_check, true, false, true),
Charmaine Lee1f982e42016-10-10 10:37:03 -07003131 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3132 &vmw_cmd_dx_transfer_from_buffer,
3133 true, false, true),
Neha Bhende0d81d342018-06-18 17:14:56 -07003134 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3135 true, false, true),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003136};
3137
Thomas Hellstrom65b97a22017-08-24 08:06:29 +02003138bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3139{
3140 u32 cmd_id = ((u32 *) buf)[0];
3141
3142 if (cmd_id >= SVGA_CMD_MAX) {
3143 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3144 const struct vmw_cmd_entry *entry;
3145
3146 *size = header->size + sizeof(SVGA3dCmdHeader);
3147 cmd_id = header->id;
3148 if (cmd_id >= SVGA_3D_CMD_MAX)
3149 return false;
3150
3151 cmd_id -= SVGA_3D_CMD_BASE;
3152 entry = &vmw_cmd_entries[cmd_id];
3153 *cmd = entry->cmd_name;
3154 return true;
3155 }
3156
3157 switch (cmd_id) {
3158 case SVGA_CMD_UPDATE:
3159 *cmd = "SVGA_CMD_UPDATE";
3160 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3161 break;
3162 case SVGA_CMD_DEFINE_GMRFB:
3163 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3164 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3165 break;
3166 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3167 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3168 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3169 break;
3170 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3171 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3172 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3173 break;
3174 default:
3175 *cmd = "UNKNOWN";
3176 *size = 0;
3177 return false;
3178 }
3179
3180 return true;
3181}
3182
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003183static int vmw_cmd_check(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003184 struct vmw_sw_context *sw_context, void *buf,
3185 uint32_t *size)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003186{
3187 uint32_t cmd_id;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003188 uint32_t size_remaining = *size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003189 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3190 int ret;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003191 const struct vmw_cmd_entry *entry;
3192 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003193
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003194 cmd_id = ((uint32_t *)buf)[0];
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003195 /* Handle any none 3D commands */
3196 if (unlikely(cmd_id < SVGA_CMD_MAX))
3197 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3198
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003199
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003200 cmd_id = header->id;
3201 *size = header->size + sizeof(SVGA3dCmdHeader);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003202
3203 cmd_id -= SVGA_3D_CMD_BASE;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003204 if (unlikely(*size > size_remaining))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003205 goto out_invalid;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003206
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003207 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003208 goto out_invalid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003209
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003210 entry = &vmw_cmd_entries[cmd_id];
Thomas Hellstrom36e952c2014-02-12 13:19:36 +01003211 if (unlikely(!entry->func))
3212 goto out_invalid;
3213
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003214 if (unlikely(!entry->user_allow && !sw_context->kernel))
3215 goto out_privileged;
3216
3217 if (unlikely(entry->gb_disable && gb))
3218 goto out_old;
3219
3220 if (unlikely(entry->gb_enable && !gb))
3221 goto out_new;
3222
3223 ret = entry->func(dev_priv, sw_context, header);
Deepak Rawat45399b12019-02-11 12:57:38 -08003224 if (unlikely(ret != 0)) {
3225 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3226 cmd_id + SVGA_3D_CMD_BASE, ret);
3227 return ret;
3228 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003229
3230 return 0;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003231out_invalid:
Deepak Rawat5724f892019-02-11 11:46:27 -08003232 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3233 cmd_id + SVGA_3D_CMD_BASE);
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003234 return -EINVAL;
3235out_privileged:
Deepak Rawat5724f892019-02-11 11:46:27 -08003236 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3237 cmd_id + SVGA_3D_CMD_BASE);
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003238 return -EPERM;
3239out_old:
Deepak Rawat5724f892019-02-11 11:46:27 -08003240 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3241 cmd_id + SVGA_3D_CMD_BASE);
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003242 return -EINVAL;
3243out_new:
Deepak Rawat5724f892019-02-11 11:46:27 -08003244 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3245 cmd_id + SVGA_3D_CMD_BASE);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003246 return -EINVAL;
3247}
3248
3249static int vmw_cmd_check_all(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003250 struct vmw_sw_context *sw_context, void *buf,
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003251 uint32_t size)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003252{
3253 int32_t cur_size = size;
3254 int ret;
3255
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003256 sw_context->buf_start = buf;
3257
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003258 while (cur_size > 0) {
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003259 size = cur_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003260 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3261 if (unlikely(ret != 0))
3262 return ret;
3263 buf = (void *)((unsigned long) buf + size);
3264 cur_size -= size;
3265 }
3266
3267 if (unlikely(cur_size != 0)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003268 VMW_DEBUG_USER("Command verifier out of sync.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003269 return -EINVAL;
3270 }
3271
3272 return 0;
3273}
3274
3275static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3276{
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02003277 /* Memory is validation context memory, so no need to free it */
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02003278 INIT_LIST_HEAD(&sw_context->bo_relocations);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003279}
3280
3281static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3282{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003283 struct vmw_relocation *reloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003284 struct ttm_buffer_object *bo;
3285
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02003286 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003287 bo = &reloc->vbo->base;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003288 switch (bo->mem.mem_type) {
3289 case TTM_PL_VRAM:
Thomas Hellstrom135cba02010-10-26 21:21:47 +02003290 reloc->location->offset += bo->offset;
3291 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003292 break;
3293 case VMW_PL_GMR:
Thomas Hellstrom135cba02010-10-26 21:21:47 +02003294 reloc->location->gmrId = bo->mem.start;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003295 break;
Thomas Hellstromddcda242012-11-21 11:26:55 +01003296 case VMW_PL_MOB:
3297 *reloc->mob_loc = bo->mem.start;
3298 break;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003299 default:
3300 BUG();
3301 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003302 }
3303 vmw_free_relocations(sw_context);
3304}
3305
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003306static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3307 uint32_t size)
3308{
3309 if (likely(sw_context->cmd_bounce_size >= size))
3310 return 0;
3311
3312 if (sw_context->cmd_bounce_size == 0)
3313 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3314
3315 while (sw_context->cmd_bounce_size < size) {
3316 sw_context->cmd_bounce_size =
3317 PAGE_ALIGN(sw_context->cmd_bounce_size +
3318 (sw_context->cmd_bounce_size >> 1));
3319 }
3320
Markus Elfring0bc32992016-07-22 13:31:00 +02003321 vfree(sw_context->cmd_bounce);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003322 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3323
3324 if (sw_context->cmd_bounce == NULL) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003325 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003326 sw_context->cmd_bounce_size = 0;
3327 return -ENOMEM;
3328 }
3329
3330 return 0;
3331}
3332
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003333/**
3334 * vmw_execbuf_fence_commands - create and submit a command stream fence
3335 *
3336 * Creates a fence object and submits a command stream marker.
3337 * If this fails for some reason, We sync the fifo and return NULL.
3338 * It is then safe to fence buffers with a NULL pointer.
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02003339 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003340 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3341 * userspace handle if @p_handle is not NULL, otherwise not.
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003342 */
3343
3344int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3345 struct vmw_private *dev_priv,
3346 struct vmw_fence_obj **p_fence,
3347 uint32_t *p_handle)
3348{
3349 uint32_t sequence;
3350 int ret;
3351 bool synced = false;
3352
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02003353 /* p_handle implies file_priv. */
3354 BUG_ON(p_handle != NULL && file_priv == NULL);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003355
3356 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3357 if (unlikely(ret != 0)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003358 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003359 synced = true;
3360 }
3361
3362 if (p_handle != NULL)
3363 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003364 sequence, p_fence, p_handle);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003365 else
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003366 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003367
3368 if (unlikely(ret != 0 && !synced)) {
Deepak Rawat680360a2019-02-13 13:20:42 -08003369 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3370 false, VMW_FENCE_WAIT_TIMEOUT);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003371 *p_fence = NULL;
3372 }
3373
Thomas Hellstrom728354c2019-01-31 10:55:37 +01003374 return ret;
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003375}
3376
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003377/**
Deepak Rawat680360a2019-02-13 13:20:42 -08003378 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003379 *
3380 * @dev_priv: Pointer to a vmw_private struct.
3381 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3382 * @ret: Return value from fence object creation.
Deepak Rawat680360a2019-02-13 13:20:42 -08003383 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3384 * the information should be copied.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003385 * @fence: Pointer to the fenc object.
3386 * @fence_handle: User-space fence handle.
Sinclair Yehc906965d2017-07-05 01:49:32 -07003387 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3388 * @sync_file: Only used to clean up in case of an error in this function.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003389 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003390 * This function copies fence information to user-space. If copying fails, the
3391 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3392 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3393 * will hopefully be detected.
3394 *
3395 * Also if copying fails, user-space will be unable to signal the fence object
3396 * so we wait for it immediately, and then unreference the user-space reference.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003397 */
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +02003398void
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003399vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003400 struct vmw_fpriv *vmw_fp, int ret,
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003401 struct drm_vmw_fence_rep __user *user_fence_rep,
Deepak Rawat680360a2019-02-13 13:20:42 -08003402 struct vmw_fence_obj *fence, uint32_t fence_handle,
3403 int32_t out_fence_fd, struct sync_file *sync_file)
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003404{
3405 struct drm_vmw_fence_rep fence_rep;
3406
3407 if (user_fence_rep == NULL)
3408 return;
3409
Dan Carpenter80d9b242011-10-18 09:10:12 +03003410 memset(&fence_rep, 0, sizeof(fence_rep));
3411
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003412 fence_rep.error = ret;
Sinclair Yehc906965d2017-07-05 01:49:32 -07003413 fence_rep.fd = out_fence_fd;
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003414 if (ret == 0) {
3415 BUG_ON(fence == NULL);
3416
3417 fence_rep.handle = fence_handle;
Maarten Lankhorst2298e802014-03-26 14:07:44 +01003418 fence_rep.seqno = fence->base.seqno;
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003419 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3420 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3421 }
3422
3423 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08003424 * copy_to_user errors will be detected by user space not seeing
3425 * fence_rep::error filled in. Typically user-space would have pre-set
3426 * that member to -EFAULT.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003427 */
3428 ret = copy_to_user(user_fence_rep, &fence_rep,
3429 sizeof(fence_rep));
3430
3431 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08003432 * User-space lost the fence object. We need to sync and unreference the
3433 * handle.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003434 */
3435 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
Sinclair Yehc906965d2017-07-05 01:49:32 -07003436 if (sync_file)
3437 fput(sync_file->file);
3438
3439 if (fence_rep.fd != -1) {
3440 put_unused_fd(fence_rep.fd);
3441 fence_rep.fd = -1;
3442 }
3443
Deepak Rawat680360a2019-02-13 13:20:42 -08003444 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
3445 TTM_REF_USAGE);
Deepak Rawat5724f892019-02-11 11:46:27 -08003446 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003447 (void) vmw_fence_obj_wait(fence, false, false,
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003448 VMW_FENCE_WAIT_TIMEOUT);
3449 }
3450}
3451
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003452/**
Deepak Rawat680360a2019-02-13 13:20:42 -08003453 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003454 *
3455 * @dev_priv: Pointer to a device private structure.
3456 * @kernel_commands: Pointer to the unpatched command batch.
3457 * @command_size: Size of the unpatched command batch.
3458 * @sw_context: Structure holding the relocation lists.
3459 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003460 * Side effects: If this function returns 0, then the command batch pointed to
3461 * by @kernel_commands will have been modified.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003462 */
3463static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003464 void *kernel_commands, u32 command_size,
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003465 struct vmw_sw_context *sw_context)
3466{
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003467 void *cmd;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003468
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003469 if (sw_context->dx_ctx_node)
Deepak Rawat11c45412019-02-14 16:15:39 -08003470 cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003471 sw_context->dx_ctx_node->ctx->id);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003472 else
Deepak Rawat11c45412019-02-14 16:15:39 -08003473 cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
3474
3475 if (!cmd)
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003476 return -ENOMEM;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003477
3478 vmw_apply_relocations(sw_context);
3479 memcpy(cmd, kernel_commands, command_size);
3480 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3481 vmw_resource_relocations_free(&sw_context->res_relocations);
3482 vmw_fifo_commit(dev_priv, command_size);
3483
3484 return 0;
3485}
3486
3487/**
Deepak Rawat680360a2019-02-13 13:20:42 -08003488 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3489 * command buffer manager.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003490 *
3491 * @dev_priv: Pointer to a device private structure.
3492 * @header: Opaque handle to the command buffer allocation.
3493 * @command_size: Size of the unpatched command batch.
3494 * @sw_context: Structure holding the relocation lists.
3495 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003496 * Side effects: If this function returns 0, then the command buffer represented
3497 * by @header will have been modified.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003498 */
3499static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3500 struct vmw_cmdbuf_header *header,
3501 u32 command_size,
3502 struct vmw_sw_context *sw_context)
3503{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003504 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003505 SVGA3D_INVALID_ID);
Deepak Rawat680360a2019-02-13 13:20:42 -08003506 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3507 header);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003508
3509 vmw_apply_relocations(sw_context);
3510 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3511 vmw_resource_relocations_free(&sw_context->res_relocations);
3512 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3513
3514 return 0;
3515}
3516
3517/**
3518 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3519 * submission using a command buffer.
3520 *
3521 * @dev_priv: Pointer to a device private structure.
3522 * @user_commands: User-space pointer to the commands to be submitted.
3523 * @command_size: Size of the unpatched command batch.
3524 * @header: Out parameter returning the opaque pointer to the command buffer.
3525 *
3526 * This function checks whether we can use the command buffer manager for
Deepak Rawat680360a2019-02-13 13:20:42 -08003527 * submission and if so, creates a command buffer of suitable size and copies
3528 * the user data into that buffer.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003529 *
3530 * On successful return, the function returns a pointer to the data in the
3531 * command buffer and *@header is set to non-NULL.
Deepak Rawat680360a2019-02-13 13:20:42 -08003532 *
3533 * If command buffers could not be used, the function will return the value of
3534 * @kernel_commands on function call. That value may be NULL. In that case, the
3535 * value of *@header will be set to NULL.
3536 *
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003537 * If an error is encountered, the function will return a pointer error value.
3538 * If the function is interrupted by a signal while sleeping, it will return
3539 * -ERESTARTSYS casted to a pointer error value.
3540 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003541static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3542 void __user *user_commands,
Deepak Rawat680360a2019-02-13 13:20:42 -08003543 void *kernel_commands, u32 command_size,
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003544 struct vmw_cmdbuf_header **header)
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003545{
3546 size_t cmdbuf_size;
3547 int ret;
3548
3549 *header = NULL;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003550 if (command_size > SVGA_CB_MAX_SIZE) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003551 VMW_DEBUG_USER("Command buffer is too large.\n");
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003552 return ERR_PTR(-EINVAL);
3553 }
3554
Thomas Hellstrom51ab70b2016-10-10 10:51:24 -07003555 if (!dev_priv->cman || kernel_commands)
3556 return kernel_commands;
3557
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003558 /* If possible, add a little space for fencing. */
3559 cmdbuf_size = command_size + 512;
3560 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
Deepak Rawat680360a2019-02-13 13:20:42 -08003561 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
3562 header);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003563 if (IS_ERR(kernel_commands))
3564 return kernel_commands;
3565
Deepak Rawat680360a2019-02-13 13:20:42 -08003566 ret = copy_from_user(kernel_commands, user_commands, command_size);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003567 if (ret) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003568 VMW_DEBUG_USER("Failed copying commands.\n");
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003569 vmw_cmdbuf_header_free(*header);
3570 *header = NULL;
3571 return ERR_PTR(-EFAULT);
3572 }
3573
3574 return kernel_commands;
3575}
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003576
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003577static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3578 struct vmw_sw_context *sw_context,
3579 uint32_t handle)
3580{
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003581 struct vmw_resource *res;
3582 int ret;
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02003583 unsigned int size;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003584
3585 if (handle == SVGA3D_INVALID_ID)
3586 return 0;
3587
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02003588 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
3589 ret = vmw_validation_preload_res(sw_context->ctx, size);
3590 if (ret)
3591 return ret;
3592
3593 res = vmw_user_resource_noref_lookup_handle
3594 (dev_priv, sw_context->fp->tfile, handle,
3595 user_context_converter);
Chengguang Xu4efa6662019-03-01 10:14:06 -08003596 if (IS_ERR(res)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003597 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
3598 (unsigned int) handle);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02003599 return PTR_ERR(res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003600 }
3601
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01003602 ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003603 if (unlikely(ret != 0))
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02003604 return ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003605
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003606 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003607 sw_context->man = vmw_context_res_man(res);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02003608
3609 return 0;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003610}
3611
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003612int vmw_execbuf_process(struct drm_file *file_priv,
3613 struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003614 void __user *user_commands, void *kernel_commands,
3615 uint32_t command_size, uint64_t throttle_us,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003616 uint32_t dx_context_handle,
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01003617 struct drm_vmw_fence_rep __user *user_fence_rep,
Deepak Rawat680360a2019-02-13 13:20:42 -08003618 struct vmw_fence_obj **out_fence, uint32_t flags)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003619{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003620 struct vmw_sw_context *sw_context = &dev_priv->ctx;
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01003621 struct vmw_fence_obj *fence = NULL;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003622 struct vmw_cmdbuf_header *header;
Nathan Chancellora5020f42019-03-11 20:24:46 -07003623 uint32_t handle = 0;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003624 int ret;
Sinclair Yehc906965d2017-07-05 01:49:32 -07003625 int32_t out_fence_fd = -1;
3626 struct sync_file *sync_file = NULL;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003627 DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
Sinclair Yehc906965d2017-07-05 01:49:32 -07003628
Thomas Hellstromfd567462018-12-12 11:52:08 +01003629 vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
3630
Sinclair Yehc906965d2017-07-05 01:49:32 -07003631 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3632 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
3633 if (out_fence_fd < 0) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003634 VMW_DEBUG_USER("Failed to get a fence fd.\n");
Sinclair Yehc906965d2017-07-05 01:49:32 -07003635 return out_fence_fd;
3636 }
3637 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003638
Charmaine Lee2f633e52015-08-10 10:45:11 -07003639 if (throttle_us) {
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003640 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3641 throttle_us);
Charmaine Lee2f633e52015-08-10 10:45:11 -07003642
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003643 if (ret)
Sinclair Yehc906965d2017-07-05 01:49:32 -07003644 goto out_free_fence_fd;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003645 }
Charmaine Lee2f633e52015-08-10 10:45:11 -07003646
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003647 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3648 kernel_commands, command_size,
3649 &header);
Sinclair Yehc906965d2017-07-05 01:49:32 -07003650 if (IS_ERR(kernel_commands)) {
3651 ret = PTR_ERR(kernel_commands);
3652 goto out_free_fence_fd;
3653 }
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003654
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003655 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003656 if (ret) {
3657 ret = -ERESTARTSYS;
3658 goto out_free_header;
3659 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003660
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003661 sw_context->kernel = false;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003662 if (kernel_commands == NULL) {
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003663 ret = vmw_resize_cmd_bounce(sw_context, command_size);
3664 if (unlikely(ret != 0))
3665 goto out_unlock;
3666
Deepak Rawat680360a2019-02-13 13:20:42 -08003667 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
3668 command_size);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003669 if (unlikely(ret != 0)) {
3670 ret = -EFAULT;
Deepak Rawat5724f892019-02-11 11:46:27 -08003671 VMW_DEBUG_USER("Failed copying commands.\n");
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003672 goto out_unlock;
3673 }
Deepak Rawat680360a2019-02-13 13:20:42 -08003674
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003675 kernel_commands = sw_context->cmd_bounce;
Deepak Rawat680360a2019-02-13 13:20:42 -08003676 } else if (!header) {
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003677 sw_context->kernel = true;
Deepak Rawat680360a2019-02-13 13:20:42 -08003678 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003679
Thomas Hellstromd5bde952014-01-31 10:12:10 +01003680 sw_context->fp = vmw_fpriv(file_priv);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003681 INIT_LIST_HEAD(&sw_context->ctx_list);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003682 sw_context->cur_query_bo = dev_priv->pinned_bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003683 sw_context->last_query_ctx = NULL;
3684 sw_context->needs_post_query_barrier = false;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003685 sw_context->dx_ctx_node = NULL;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003686 sw_context->dx_query_mob = NULL;
3687 sw_context->dx_query_ctx = NULL;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003688 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003689 INIT_LIST_HEAD(&sw_context->res_relocations);
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02003690 INIT_LIST_HEAD(&sw_context->bo_relocations);
Deepak Rawat680360a2019-02-13 13:20:42 -08003691
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003692 if (sw_context->staged_bindings)
3693 vmw_binding_state_reset(sw_context->staged_bindings);
3694
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003695 if (!sw_context->res_ht_initialized) {
3696 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3697 if (unlikely(ret != 0))
3698 goto out_unlock;
Deepak Rawat680360a2019-02-13 13:20:42 -08003699
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003700 sw_context->res_ht_initialized = true;
3701 }
Deepak Rawat680360a2019-02-13 13:20:42 -08003702
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003703 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003704 sw_context->ctx = &val_ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003705 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003706 if (unlikely(ret != 0))
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003707 goto out_err_nores;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003708
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003709 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3710 command_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003711 if (unlikely(ret != 0))
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01003712 goto out_err_nores;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003713
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003714 ret = vmw_resources_reserve(sw_context);
3715 if (unlikely(ret != 0))
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01003716 goto out_err_nores;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003717
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003718 ret = vmw_validation_bo_reserve(&val_ctx, true);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003719 if (unlikely(ret != 0))
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003720 goto out_err_nores;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003721
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003722 ret = vmw_validation_bo_validate(&val_ctx, true);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003723 if (unlikely(ret != 0))
3724 goto out_err;
3725
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003726 ret = vmw_validation_res_validate(&val_ctx, true);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003727 if (unlikely(ret != 0))
3728 goto out_err;
Deepak Rawat680360a2019-02-13 13:20:42 -08003729
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003730 vmw_validation_drop_ht(&val_ctx);
Thomas Hellstrom1925d452010-05-28 11:21:57 +02003731
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07003732 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
3733 if (unlikely(ret != 0)) {
3734 ret = -ERESTARTSYS;
3735 goto out_err;
3736 }
3737
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01003738 if (dev_priv->has_mob) {
3739 ret = vmw_rebind_contexts(sw_context);
3740 if (unlikely(ret != 0))
Dan Carpenterb2ad9882014-02-11 19:03:47 +03003741 goto out_unlock_binding;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01003742 }
3743
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003744 if (!header) {
3745 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
3746 command_size, sw_context);
3747 } else {
3748 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
3749 sw_context);
3750 header = NULL;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003751 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003752 mutex_unlock(&dev_priv->binding_mutex);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003753 if (ret)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003754 goto out_err;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003755
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003756 vmw_query_bo_switch_commit(dev_priv, sw_context);
Deepak Rawat680360a2019-02-13 13:20:42 -08003757 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003758 (user_fence_rep) ? &handle : NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003759 /*
3760 * This error is harmless, because if fence submission fails,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003761 * vmw_fifo_send_fence will sync. The error will be propagated to
3762 * user-space in @fence_rep
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003763 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003764 if (ret != 0)
Deepak Rawat5724f892019-02-11 11:46:27 -08003765 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003766
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003767 vmw_execbuf_bindings_commit(sw_context, false);
3768 vmw_bind_dx_query_mob(sw_context);
3769 vmw_validation_res_unreserve(&val_ctx, false);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07003770
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003771 vmw_validation_bo_fence(sw_context->ctx, fence);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003772
Deepak Rawat680360a2019-02-13 13:20:42 -08003773 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003774 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
3775
Sinclair Yehc906965d2017-07-05 01:49:32 -07003776 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08003777 * If anything fails here, give up trying to export the fence and do a
3778 * sync since the user mode will not be able to sync the fence itself.
3779 * This ensures we are still functionally correct.
Sinclair Yehc906965d2017-07-05 01:49:32 -07003780 */
3781 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3782
3783 sync_file = sync_file_create(&fence->base);
3784 if (!sync_file) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003785 VMW_DEBUG_USER("Sync file create failed for fence\n");
Sinclair Yehc906965d2017-07-05 01:49:32 -07003786 put_unused_fd(out_fence_fd);
3787 out_fence_fd = -1;
3788
3789 (void) vmw_fence_obj_wait(fence, false, false,
3790 VMW_FENCE_WAIT_TIMEOUT);
3791 } else {
3792 /* Link the fence with the FD created earlier */
3793 fd_install(out_fence_fd, sync_file->file);
3794 }
3795 }
3796
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003797 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
Deepak Rawat680360a2019-02-13 13:20:42 -08003798 user_fence_rep, fence, handle, out_fence_fd,
3799 sync_file);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003800
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01003801 /* Don't unreference when handing fence out */
3802 if (unlikely(out_fence != NULL)) {
3803 *out_fence = fence;
3804 fence = NULL;
3805 } else if (likely(fence != NULL)) {
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003806 vmw_fence_obj_unreference(&fence);
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01003807 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003808
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003809 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003810 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003811
3812 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08003813 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3814 * in resource destruction paths.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003815 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003816 vmw_validation_unref_lists(&val_ctx);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003817
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003818 return 0;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003819
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07003820out_unlock_binding:
3821 mutex_unlock(&dev_priv->binding_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003822out_err:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003823 vmw_validation_bo_backoff(&val_ctx);
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01003824out_err_nores:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003825 vmw_execbuf_bindings_commit(sw_context, true);
3826 vmw_validation_res_unreserve(&val_ctx, true);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003827 vmw_resource_relocations_free(&sw_context->res_relocations);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003828 vmw_free_relocations(sw_context);
Deepak Rawat680360a2019-02-13 13:20:42 -08003829 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003830 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003831out_unlock:
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003832 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003833 vmw_validation_drop_ht(&val_ctx);
3834 WARN_ON(!list_empty(&sw_context->ctx_list));
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003835 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003836
3837 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08003838 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3839 * in resource destruction paths.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003840 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003841 vmw_validation_unref_lists(&val_ctx);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003842out_free_header:
3843 if (header)
3844 vmw_cmdbuf_header_free(header);
Sinclair Yehc906965d2017-07-05 01:49:32 -07003845out_free_fence_fd:
3846 if (out_fence_fd >= 0)
3847 put_unused_fd(out_fence_fd);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003848
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003849 return ret;
3850}
3851
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003852/**
3853 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
3854 *
3855 * @dev_priv: The device private structure.
3856 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003857 * This function is called to idle the fifo and unpin the query buffer if the
3858 * normal way to do this hits an error, which should typically be extremely
3859 * rare.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003860 */
3861static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
3862{
Deepak Rawat5724f892019-02-11 11:46:27 -08003863 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003864
3865 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07003866 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3867 if (dev_priv->dummy_query_bo_pinned) {
3868 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3869 dev_priv->dummy_query_bo_pinned = false;
3870 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003871}
3872
3873
3874/**
Deepak Rawat680360a2019-02-13 13:20:42 -08003875 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
3876 * bo.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003877 *
3878 * @dev_priv: The device private structure.
Deepak Rawat680360a2019-02-13 13:20:42 -08003879 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
3880 * query barrier that flushes all queries touching the current buffer pointed to
3881 * by @dev_priv->pinned_bo
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003882 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003883 * This function should be used to unpin the pinned query bo, or as a query
3884 * barrier when we need to make sure that all queries have finished before the
3885 * next fifo command. (For example on hardware context destructions where the
3886 * hardware may otherwise leak unfinished queries).
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003887 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003888 * This function does not return any failure codes, but make attempts to do safe
3889 * unpinning in case of errors.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003890 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003891 * The function will synchronize on the previous query barrier, and will thus
3892 * not finish until that barrier has executed.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003893 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003894 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
3895 * calling this function.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003896 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003897void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
3898 struct vmw_fence_obj *fence)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003899{
3900 int ret = 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003901 struct vmw_fence_obj *lfence = NULL;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003902 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003903
3904 if (dev_priv->pinned_bo == NULL)
3905 goto out_unlock;
3906
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003907 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
3908 false);
3909 if (ret)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003910 goto out_no_reserve;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003911
3912 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
3913 false);
3914 if (ret)
3915 goto out_no_reserve;
3916
3917 ret = vmw_validation_bo_reserve(&val_ctx, false);
3918 if (ret)
3919 goto out_no_reserve;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003920
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003921 if (dev_priv->query_cid_valid) {
3922 BUG_ON(fence != NULL);
3923 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003924 if (ret)
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003925 goto out_no_emit;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003926 dev_priv->query_cid_valid = false;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003927 }
3928
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07003929 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3930 if (dev_priv->dummy_query_bo_pinned) {
3931 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3932 dev_priv->dummy_query_bo_pinned = false;
3933 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003934 if (fence == NULL) {
3935 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
3936 NULL);
3937 fence = lfence;
3938 }
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003939 vmw_validation_bo_fence(&val_ctx, fence);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003940 if (lfence != NULL)
3941 vmw_fence_obj_unreference(&lfence);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003942
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003943 vmw_validation_unref_lists(&val_ctx);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02003944 vmw_bo_unreference(&dev_priv->pinned_bo);
Deepak Rawat680360a2019-02-13 13:20:42 -08003945
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003946out_unlock:
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003947 return;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003948out_no_emit:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003949 vmw_validation_bo_backoff(&val_ctx);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003950out_no_reserve:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003951 vmw_validation_unref_lists(&val_ctx);
3952 vmw_execbuf_unpin_panic(dev_priv);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02003953 vmw_bo_unreference(&dev_priv->pinned_bo);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003954}
3955
3956/**
Deepak Rawat680360a2019-02-13 13:20:42 -08003957 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003958 *
3959 * @dev_priv: The device private structure.
3960 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003961 * This function should be used to unpin the pinned query bo, or as a query
3962 * barrier when we need to make sure that all queries have finished before the
3963 * next fifo command. (For example on hardware context destructions where the
3964 * hardware may otherwise leak unfinished queries).
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003965 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003966 * This function does not return any failure codes, but make attempts to do safe
3967 * unpinning in case of errors.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003968 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003969 * The function will synchronize on the previous query barrier, and will thus
3970 * not finish until that barrier has executed.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003971 */
3972void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
3973{
3974 mutex_lock(&dev_priv->cmdbuf_mutex);
3975 if (dev_priv->query_cid_valid)
3976 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003977 mutex_unlock(&dev_priv->cmdbuf_mutex);
3978}
3979
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003980int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
3981 struct drm_file *file_priv, size_t size)
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003982{
3983 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003984 struct drm_vmw_execbuf_arg arg;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003985 int ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003986 static const size_t copy_offset[] = {
3987 offsetof(struct drm_vmw_execbuf_arg, context_handle),
3988 sizeof(struct drm_vmw_execbuf_arg)};
Sinclair Yeh585851162017-07-05 01:45:40 -07003989 struct dma_fence *in_fence = NULL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003990
3991 if (unlikely(size < copy_offset[0])) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003992 VMW_DEBUG_USER("Invalid command size, ioctl %d\n",
3993 DRM_VMW_EXECBUF);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003994 return -EINVAL;
3995 }
3996
3997 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
3998 return -EFAULT;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003999
4000 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08004001 * Extend the ioctl argument while maintaining backwards compatibility:
4002 * We take different code paths depending on the value of arg.version.
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004003 */
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004004 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4005 arg.version == 0)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08004006 VMW_DEBUG_USER("Incorrect execbuf version.\n");
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004007 return -EINVAL;
4008 }
4009
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004010 if (arg.version > 1 &&
4011 copy_from_user(&arg.context_handle,
4012 (void __user *) (data + copy_offset[0]),
Deepak Rawat680360a2019-02-13 13:20:42 -08004013 copy_offset[arg.version - 1] - copy_offset[0]) != 0)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004014 return -EFAULT;
4015
4016 switch (arg.version) {
4017 case 1:
4018 arg.context_handle = (uint32_t) -1;
4019 break;
4020 case 2:
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004021 default:
4022 break;
4023 }
4024
Sinclair Yeh585851162017-07-05 01:45:40 -07004025 /* If imported a fence FD from elsewhere, then wait on it */
4026 if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4027 in_fence = sync_file_get_fence(arg.imported_fence_fd);
4028
4029 if (!in_fence) {
Deepak Rawat5724f892019-02-11 11:46:27 -08004030 VMW_DEBUG_USER("Cannot get imported fence\n");
Sinclair Yeh585851162017-07-05 01:45:40 -07004031 return -EINVAL;
4032 }
4033
4034 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4035 if (ret)
4036 goto out;
4037 }
4038
Thomas Hellstrom294adf72014-02-27 12:34:51 +01004039 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004040 if (unlikely(ret != 0))
4041 return ret;
4042
4043 ret = vmw_execbuf_process(file_priv, dev_priv,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004044 (void __user *)(unsigned long)arg.commands,
4045 NULL, arg.command_size, arg.throttle_us,
4046 arg.context_handle,
4047 (void __user *)(unsigned long)arg.fence_rep,
Deepak Rawat680360a2019-02-13 13:20:42 -08004048 NULL, arg.flags);
4049
Thomas Hellstrom5151adb2015-03-09 01:56:21 -07004050 ttm_read_unlock(&dev_priv->reservation_sem);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004051 if (unlikely(ret != 0))
Sinclair Yeh585851162017-07-05 01:45:40 -07004052 goto out;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004053
4054 vmw_kms_cursor_post_execbuf(dev_priv);
4055
Sinclair Yeh585851162017-07-05 01:45:40 -07004056out:
4057 if (in_fence)
4058 dma_fence_put(in_fence);
4059 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004060}