blob: 0d703f431f1f19f851e67aff71696b2004c70744 [file] [log] [blame]
Dirk Hohndel (VMware)dff96882018-05-07 01:16:26 +02001// SPDX-License-Identifier: GPL-2.0 OR MIT
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002/**************************************************************************
3 *
Dirk Hohndel (VMware)dff96882018-05-07 01:16:26 +02004 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00005 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
Sinclair Yeh585851162017-07-05 01:45:40 -070027#include <linux/sync_file.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000028
29#include "vmwgfx_drv.h"
30#include "vmwgfx_reg.h"
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/ttm/ttm_bo_api.h>
32#include <drm/ttm/ttm_placement.h>
Thomas Hellstromd80efd52015-08-10 10:39:35 -070033#include "vmwgfx_so.h"
34#include "vmwgfx_binding.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000035
Thomas Hellstromc0951b72012-11-20 12:19:35 +000036#define VMW_RES_HT_ORDER 12
37
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020038/*
Deepak Rawat6f74fd92019-02-08 12:53:57 -080039 * Helper macro to get dx_ctx_node if available otherwise print an error
40 * message. This is for use in command verifier function where if dx_ctx_node
41 * is not set then command is invalid.
42 */
43#define VMW_GET_CTX_NODE(__sw_context) \
44({ \
45 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
Deepak Rawat5724f892019-02-11 11:46:27 -080046 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
Deepak Rawat6f74fd92019-02-08 12:53:57 -080047 __sw_context->dx_ctx_node; \
48 }); \
49})
50
Deepak Rawatd01316d2019-02-08 15:50:40 -080051#define VMW_DECLARE_CMD_VAR(__var, __type) \
52 struct { \
53 SVGA3dCmdHeader header; \
54 __type body; \
55 } __var
56
Deepak Rawat680360a2019-02-13 13:20:42 -080057/**
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020058 * struct vmw_relocation - Buffer object relocation
59 *
60 * @head: List head for the command submission context's relocation list
Thomas Hellstromcc1e3b72018-09-26 15:38:13 +020061 * @vbo: Non ref-counted pointer to buffer object
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020062 * @mob_loc: Pointer to location for mob id to be modified
63 * @location: Pointer to location for guest pointer to be modified
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020064 */
65struct vmw_relocation {
66 struct list_head head;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020067 struct vmw_buffer_object *vbo;
Thomas Hellstromcc1e3b72018-09-26 15:38:13 +020068 union {
69 SVGAMobId *mob_loc;
70 SVGAGuestPtr *location;
71 };
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020072};
73
Thomas Hellstromc0951b72012-11-20 12:19:35 +000074/**
Thomas Hellstroma1944032016-10-10 11:06:45 -070075 * enum vmw_resource_relocation_type - Relocation type for resources
76 *
77 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
78 * command stream is replaced with the actual id after validation.
79 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
80 * with a NOP.
Deepak Rawat680360a2019-02-13 13:20:42 -080081 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
82 * validation is -1, the command is replaced with a NOP. Otherwise no action.
Thomas Hellstroma1944032016-10-10 11:06:45 -070083 */
84enum vmw_resource_relocation_type {
85 vmw_res_rel_normal,
86 vmw_res_rel_nop,
87 vmw_res_rel_cond_nop,
88 vmw_res_rel_max
89};
90
91/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +000092 * struct vmw_resource_relocation - Relocation info for resources
93 *
94 * @head: List head for the software context's relocation list.
95 * @res: Non-ref-counted pointer to the resource.
Deepak Rawat680360a2019-02-13 13:20:42 -080096 * @offset: Offset of single byte entries into the command buffer where the id
97 * that needs fixup is located.
Thomas Hellstroma1944032016-10-10 11:06:45 -070098 * @rel_type: Type of relocation.
Thomas Hellstromc0951b72012-11-20 12:19:35 +000099 */
100struct vmw_resource_relocation {
101 struct list_head head;
102 const struct vmw_resource *res;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700103 u32 offset:29;
104 enum vmw_resource_relocation_type rel_type:3;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000105};
106
Deepak Rawat680360a2019-02-13 13:20:42 -0800107/**
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200108 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
Deepak Rawat680360a2019-02-13 13:20:42 -0800109 *
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200110 * @head: List head of context list
111 * @ctx: The context resource
112 * @cur: The context's persistent binding state
113 * @staged: The binding state changes of this command buffer
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000114 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200115struct vmw_ctx_validation_info {
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000116 struct list_head head;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200117 struct vmw_resource *ctx;
118 struct vmw_ctx_binding_state *cur;
119 struct vmw_ctx_binding_state *staged;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000120};
121
122/**
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100123 * struct vmw_cmd_entry - Describe a command for the verifier
124 *
125 * @user_allow: Whether allowed from the execbuf ioctl.
126 * @gb_disable: Whether disabled if guest-backed objects are available.
127 * @gb_enable: Whether enabled iff guest-backed objects are available.
128 */
129struct vmw_cmd_entry {
130 int (*func) (struct vmw_private *, struct vmw_sw_context *,
131 SVGA3dCmdHeader *);
132 bool user_allow;
133 bool gb_disable;
134 bool gb_enable;
Thomas Hellstrom65b97a22017-08-24 08:06:29 +0200135 const char *cmd_name;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100136};
137
138#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
139 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
Thomas Hellstrom65b97a22017-08-24 08:06:29 +0200140 (_gb_disable), (_gb_enable), #_cmd}
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100141
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700142static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
143 struct vmw_sw_context *sw_context,
144 struct vmw_resource *ctx);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700145static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
146 struct vmw_sw_context *sw_context,
147 SVGAMobId *id,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200148 struct vmw_buffer_object **vmw_bo_p);
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700149/**
150 * vmw_ptr_diff - Compute the offset from a to b in bytes
151 *
152 * @a: A starting pointer.
153 * @b: A pointer offset in the same address space.
154 *
155 * Returns: The offset in bytes between the two pointers.
156 */
157static size_t vmw_ptr_diff(void *a, void *b)
158{
159 return (unsigned long) b - (unsigned long) a;
160}
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700161
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100162/**
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200163 * vmw_execbuf_bindings_commit - Commit modified binding state
Deepak Rawat680360a2019-02-13 13:20:42 -0800164 *
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200165 * @sw_context: The command submission context
Deepak Rawat680360a2019-02-13 13:20:42 -0800166 * @backoff: Whether this is part of the error path and binding state changes
167 * should be ignored
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000168 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200169static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
170 bool backoff)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000171{
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200172 struct vmw_ctx_validation_info *entry;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700173
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200174 list_for_each_entry(entry, &sw_context->ctx_list, head) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200175 if (!backoff)
176 vmw_binding_state_commit(entry->cur, entry->staged);
Deepak Rawat680360a2019-02-13 13:20:42 -0800177
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200178 if (entry->staged != sw_context->staged_bindings)
179 vmw_binding_state_free(entry->staged);
180 else
181 sw_context->staged_bindings_inuse = false;
182 }
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200183
184 /* List entries are freed with the validation context */
185 INIT_LIST_HEAD(&sw_context->ctx_list);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200186}
187
188/**
189 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
Deepak Rawat680360a2019-02-13 13:20:42 -0800190 *
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200191 * @sw_context: The command submission context
192 */
193static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
194{
195 if (sw_context->dx_query_mob)
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700196 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
197 sw_context->dx_query_mob);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000198}
199
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700200/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800201 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
202 * the validate list.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700203 *
204 * @dev_priv: Pointer to the device private:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200205 * @sw_context: The command submission context
206 * @node: The validation node holding the context resource metadata
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700207 */
208static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
209 struct vmw_sw_context *sw_context,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200210 struct vmw_resource *res,
211 struct vmw_ctx_validation_info *node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700212{
213 int ret;
214
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200215 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700216 if (unlikely(ret != 0))
217 goto out_err;
218
219 if (!sw_context->staged_bindings) {
Deepak Rawat680360a2019-02-13 13:20:42 -0800220 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700221 if (IS_ERR(sw_context->staged_bindings)) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700222 ret = PTR_ERR(sw_context->staged_bindings);
223 sw_context->staged_bindings = NULL;
224 goto out_err;
225 }
226 }
227
228 if (sw_context->staged_bindings_inuse) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200229 node->staged = vmw_binding_state_alloc(dev_priv);
230 if (IS_ERR(node->staged)) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200231 ret = PTR_ERR(node->staged);
232 node->staged = NULL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700233 goto out_err;
234 }
235 } else {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200236 node->staged = sw_context->staged_bindings;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700237 sw_context->staged_bindings_inuse = true;
238 }
239
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200240 node->ctx = res;
241 node->cur = vmw_context_binding_state(res);
242 list_add_tail(&node->head, &sw_context->ctx_list);
243
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700244 return 0;
Deepak Rawat680360a2019-02-13 13:20:42 -0800245
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700246out_err:
247 return ret;
248}
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000249
250/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800251 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
252 *
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200253 * @dev_priv: Pointer to the device private struct.
254 * @res_type: The resource type.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000255 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800256 * Guest-backed contexts and DX contexts require extra size to store execbuf
257 * private information in the validation node. Typically the binding manager
258 * associated data structures.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200259 *
260 * Returns: The extra size requirement based on resource type.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000261 */
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200262static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
263 enum vmw_res_type res_type)
264{
265 return (res_type == vmw_res_dx_context ||
266 (res_type == vmw_res_context && dev_priv->has_mob)) ?
267 sizeof(struct vmw_ctx_validation_info) : 0;
268}
269
270/**
271 * vmw_execbuf_rcache_update - Update a resource-node cache entry
272 *
273 * @rcache: Pointer to the entry to update.
274 * @res: Pointer to the resource.
Deepak Rawat680360a2019-02-13 13:20:42 -0800275 * @private: Pointer to the execbuf-private space in the resource validation
276 * node.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200277 */
278static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
279 struct vmw_resource *res,
280 void *private)
281{
282 rcache->res = res;
283 rcache->private = private;
284 rcache->valid = 1;
285 rcache->valid_handle = 0;
286}
287
288/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800289 * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
290 * rcu-protected pointer to the validation list.
291 *
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200292 * @sw_context: Pointer to the software context.
293 * @res: Unreferenced rcu-protected pointer to the resource.
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100294 * @dirty: Whether to change dirty status.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200295 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800296 * Returns: 0 on success. Negative error code on failure. Typical error codes
297 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200298 */
299static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100300 struct vmw_resource *res,
301 u32 dirty)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000302{
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700303 struct vmw_private *dev_priv = res->dev_priv;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000304 int ret;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200305 enum vmw_res_type res_type = vmw_res_type(res);
306 struct vmw_res_cache_entry *rcache;
307 struct vmw_ctx_validation_info *ctx_info;
308 bool first_usage;
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200309 unsigned int priv_size;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000310
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200311 rcache = &sw_context->res_cache[res_type];
312 if (likely(rcache->valid && rcache->res == res)) {
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100313 if (dirty)
314 vmw_validation_res_set_dirty(sw_context->ctx,
315 rcache->private, dirty);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200316 vmw_user_resource_noref_release();
317 return 0;
318 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000319
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200320 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200321 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100322 dirty, (void **)&ctx_info,
323 &first_usage);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200324 vmw_user_resource_noref_release();
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200325 if (ret)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000326 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000327
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200328 if (priv_size && first_usage) {
329 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
330 ctx_info);
Deepak Rawatb2898402019-02-11 14:59:57 -0800331 if (ret) {
332 VMW_DEBUG_USER("Failed first usage context setup.\n");
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200333 return ret;
Deepak Rawatb2898402019-02-11 14:59:57 -0800334 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700335 }
336
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200337 vmw_execbuf_rcache_update(rcache, res, ctx_info);
338 return 0;
339}
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700340
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200341/**
342 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
343 * validation list if it's not already on it
Deepak Rawat680360a2019-02-13 13:20:42 -0800344 *
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200345 * @sw_context: Pointer to the software context.
346 * @res: Pointer to the resource.
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100347 * @dirty: Whether to change dirty status.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200348 *
349 * Returns: Zero on success. Negative error code on failure.
350 */
351static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100352 struct vmw_resource *res,
353 u32 dirty)
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200354{
355 struct vmw_res_cache_entry *rcache;
356 enum vmw_res_type res_type = vmw_res_type(res);
357 void *ptr;
358 int ret;
359
360 rcache = &sw_context->res_cache[res_type];
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100361 if (likely(rcache->valid && rcache->res == res)) {
362 if (dirty)
363 vmw_validation_res_set_dirty(sw_context->ctx,
364 rcache->private, dirty);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200365 return 0;
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100366 }
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200367
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100368 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
369 &ptr, NULL);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200370 if (ret)
371 return ret;
372
373 vmw_execbuf_rcache_update(rcache, res, ptr);
374
375 return 0;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700376}
377
378/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800379 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
380 * validation list
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700381 *
382 * @sw_context: The software context holding the validation list.
383 * @view: Pointer to the view resource.
384 *
385 * Returns 0 if success, negative error code otherwise.
386 */
387static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
388 struct vmw_resource *view)
389{
390 int ret;
391
392 /*
Deepak Rawat680360a2019-02-13 13:20:42 -0800393 * First add the resource the view is pointing to, otherwise it may be
394 * swapped out when the view is validated.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700395 */
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100396 ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
397 vmw_view_dirtying(view));
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700398 if (ret)
399 return ret;
400
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100401 return vmw_execbuf_res_noctx_val_add(sw_context, view,
402 VMW_RES_DIRTY_NONE);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700403}
404
405/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800406 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
407 * to to the validation list.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700408 *
409 * @sw_context: The software context holding the validation list.
410 * @view_type: The view type to look up.
411 * @id: view id of the view.
412 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800413 * The view is represented by a view id and the DX context it's created on, or
414 * scheduled for creation on. If there is no DX context set, the function will
415 * return an -EINVAL error pointer.
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200416 *
417 * Returns: Unreferenced pointer to the resource on success, negative error
418 * pointer on failure.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700419 */
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200420static struct vmw_resource *
421vmw_view_id_val_add(struct vmw_sw_context *sw_context,
422 enum vmw_view_type view_type, u32 id)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700423{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200424 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700425 struct vmw_resource *view;
426 int ret;
427
Deepak Rawatb2898402019-02-11 14:59:57 -0800428 if (!ctx_node)
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200429 return ERR_PTR(-EINVAL);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700430
431 view = vmw_view_lookup(sw_context->man, view_type, id);
432 if (IS_ERR(view))
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200433 return view;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700434
435 ret = vmw_view_res_val_add(sw_context, view);
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200436 if (ret)
437 return ERR_PTR(ret);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700438
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200439 return view;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000440}
441
442/**
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100443 * vmw_resource_context_res_add - Put resources previously bound to a context on
444 * the validation list
445 *
446 * @dev_priv: Pointer to a device private structure
447 * @sw_context: Pointer to a software context used for this command submission
448 * @ctx: Pointer to the context resource
449 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800450 * This function puts all resources that were previously bound to @ctx on the
451 * resource validation list. This is part of the context state reemission
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100452 */
453static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
454 struct vmw_sw_context *sw_context,
455 struct vmw_resource *ctx)
456{
457 struct list_head *binding_list;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700458 struct vmw_ctx_bindinfo *entry;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100459 int ret = 0;
460 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700461 u32 i;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100462
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700463 /* Add all cotables to the validation list. */
464 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
465 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
466 res = vmw_context_cotable(ctx, i);
467 if (IS_ERR(res))
468 continue;
469
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100470 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
471 VMW_RES_DIRTY_SET);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700472 if (unlikely(ret != 0))
473 return ret;
474 }
475 }
476
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700477 /* Add all resources bound to the context to the validation list */
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100478 mutex_lock(&dev_priv->binding_mutex);
479 binding_list = vmw_context_binding_list(ctx);
480
481 list_for_each_entry(entry, binding_list, ctx_list) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700482 if (vmw_res_type(entry->res) == vmw_res_view)
483 ret = vmw_view_res_val_add(sw_context, entry->res);
484 else
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100485 ret = vmw_execbuf_res_noctx_val_add
486 (sw_context, entry->res,
487 vmw_binding_dirtying(entry->bt));
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100488 if (unlikely(ret != 0))
489 break;
490 }
491
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700492 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200493 struct vmw_buffer_object *dx_query_mob;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700494
495 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
496 if (dx_query_mob)
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200497 ret = vmw_validation_add_bo(sw_context->ctx,
498 dx_query_mob, true, false);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700499 }
500
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100501 mutex_unlock(&dev_priv->binding_mutex);
502 return ret;
503}
504
505/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000506 * vmw_resource_relocation_add - Add a relocation to the relocation list
507 *
508 * @list: Pointer to head of relocation list.
509 * @res: The resource.
Deepak Rawat680360a2019-02-13 13:20:42 -0800510 * @offset: Offset into the command buffer currently being parsed where the id
511 * that needs fixup is located. Granularity is one byte.
Thomas Hellstroma1944032016-10-10 11:06:45 -0700512 * @rel_type: Relocation type.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000513 */
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200514static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000515 const struct vmw_resource *res,
Thomas Hellstroma1944032016-10-10 11:06:45 -0700516 unsigned long offset,
517 enum vmw_resource_relocation_type
518 rel_type)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000519{
520 struct vmw_resource_relocation *rel;
521
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200522 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
Ravikant B Sharma1a4adb02016-11-08 17:30:31 +0530523 if (unlikely(!rel)) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800524 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000525 return -ENOMEM;
526 }
527
528 rel->res = res;
529 rel->offset = offset;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700530 rel->rel_type = rel_type;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200531 list_add_tail(&rel->head, &sw_context->res_relocations);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000532
533 return 0;
534}
535
536/**
537 * vmw_resource_relocations_free - Free all relocations on a list
538 *
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200539 * @list: Pointer to the head of the relocation list
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000540 */
541static void vmw_resource_relocations_free(struct list_head *list)
542{
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200543 /* Memory is validation context memory, so no need to free it */
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200544 INIT_LIST_HEAD(list);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000545}
546
547/**
548 * vmw_resource_relocations_apply - Apply all relocations on a list
549 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800550 * @cb: Pointer to the start of the command buffer bein patch. This need not be
551 * the same buffer as the one being parsed when the relocation list was built,
552 * but the contents must be the same modulo the resource ids.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000553 * @list: Pointer to the head of the relocation list.
554 */
555static void vmw_resource_relocations_apply(uint32_t *cb,
556 struct list_head *list)
557{
558 struct vmw_resource_relocation *rel;
559
Thomas Hellstroma1944032016-10-10 11:06:45 -0700560 /* Validate the struct vmw_resource_relocation member size */
561 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
562 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
563
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100564 list_for_each_entry(rel, list, head) {
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700565 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
Thomas Hellstroma1944032016-10-10 11:06:45 -0700566 switch (rel->rel_type) {
567 case vmw_res_rel_normal:
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700568 *addr = rel->res->id;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700569 break;
570 case vmw_res_rel_nop:
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700571 *addr = SVGA_3D_CMD_NOP;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700572 break;
573 default:
574 if (rel->res->id == -1)
575 *addr = SVGA_3D_CMD_NOP;
576 break;
577 }
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100578 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000579}
580
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000581static int vmw_cmd_invalid(struct vmw_private *dev_priv,
582 struct vmw_sw_context *sw_context,
583 SVGA3dCmdHeader *header)
584{
Sinclair Yehfcfffdd2017-07-17 23:28:36 -0700585 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000586}
587
588static int vmw_cmd_ok(struct vmw_private *dev_priv,
589 struct vmw_sw_context *sw_context,
590 SVGA3dCmdHeader *header)
591{
592 return 0;
593}
594
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200595/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800596 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
597 * list.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000598 *
599 * @sw_context: Pointer to the software context.
600 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800601 * Note that since vmware's command submission currently is protected by the
602 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
603 * only a single thread at once will attempt this.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000604 */
605static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
606{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200607 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000608
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200609 ret = vmw_validation_res_reserve(sw_context->ctx, true);
610 if (ret)
611 return ret;
Charmaine Lee2f633e52015-08-10 10:45:11 -0700612
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700613 if (sw_context->dx_query_mob) {
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200614 struct vmw_buffer_object *expected_dx_query_mob;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700615
616 expected_dx_query_mob =
617 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
618 if (expected_dx_query_mob &&
619 expected_dx_query_mob != sw_context->dx_query_mob) {
620 ret = -EINVAL;
621 }
622 }
623
624 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000625}
626
627/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800628 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
629 * resource validate list unless it's already there.
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100630 *
631 * @dev_priv: Pointer to a device private structure.
632 * @sw_context: Pointer to the software context.
633 * @res_type: Resource type.
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100634 * @dirty: Whether to change dirty status.
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100635 * @converter: User-space visisble type specific information.
Deepak Rawat680360a2019-02-13 13:20:42 -0800636 * @id_loc: Pointer to the location in the command buffer currently being parsed
637 * from where the user-space resource id handle is located.
638 * @p_val: Pointer to pointer to resource validalidation node. Populated on
639 * exit.
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100640 */
641static int
642vmw_cmd_res_check(struct vmw_private *dev_priv,
643 struct vmw_sw_context *sw_context,
644 enum vmw_res_type res_type,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100645 u32 dirty,
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100646 const struct vmw_user_resource_conv *converter,
647 uint32_t *id_loc,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200648 struct vmw_resource **p_res)
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100649{
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200650 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200651 struct vmw_resource *res;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200652 int ret;
653
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200654 if (p_res)
655 *p_res = NULL;
656
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200657 if (*id_loc == SVGA3D_INVALID_ID) {
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200658 if (res_type == vmw_res_context) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800659 VMW_DEBUG_USER("Illegal context invalid id.\n");
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200660 return -EINVAL;
661 }
662 return 0;
663 }
664
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200665 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200666 res = rcache->res;
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100667 if (dirty)
668 vmw_validation_res_set_dirty(sw_context->ctx,
669 rcache->private, dirty);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200670 } else {
671 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200672
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200673 ret = vmw_validation_preload_res(sw_context->ctx, size);
674 if (ret)
675 return ret;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200676
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200677 res = vmw_user_resource_noref_lookup_handle
678 (dev_priv, sw_context->fp->tfile, *id_loc, converter);
Chengguang Xu4efa6662019-03-01 10:14:06 -0800679 if (IS_ERR(res)) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800680 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
681 (unsigned int) *id_loc);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200682 return PTR_ERR(res);
683 }
684
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100685 ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200686 if (unlikely(ret != 0))
687 return ret;
688
689 if (rcache->valid && rcache->res == res) {
690 rcache->valid_handle = true;
691 rcache->handle = *id_loc;
692 }
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200693 }
694
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200695 ret = vmw_resource_relocation_add(sw_context, res,
696 vmw_ptr_diff(sw_context->buf_start,
697 id_loc),
698 vmw_res_rel_normal);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200699 if (p_res)
700 *p_res = res;
701
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200702 return 0;
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100703}
704
705/**
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700706 * vmw_rebind_dx_query - Rebind DX query associated with the context
707 *
708 * @ctx_res: context the query belongs to
709 *
710 * This function assumes binding_mutex is held.
711 */
712static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
713{
714 struct vmw_private *dev_priv = ctx_res->dev_priv;
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200715 struct vmw_buffer_object *dx_query_mob;
Deepak Rawatd01316d2019-02-08 15:50:40 -0800716 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700717
718 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
719
720 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
721 return 0;
722
723 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
724
Deepak Rawatb2898402019-02-11 14:59:57 -0800725 if (cmd == NULL)
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700726 return -ENOMEM;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700727
728 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
729 cmd->header.size = sizeof(cmd->body);
730 cmd->body.cid = ctx_res->id;
731 cmd->body.mobid = dx_query_mob->base.mem.start;
732 vmw_fifo_commit(dev_priv, sizeof(*cmd));
733
734 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
735
736 return 0;
737}
738
739/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800740 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
741 * contexts.
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100742 *
743 * @sw_context: Pointer to the software context.
744 *
745 * Rebind context binding points that have been scrubbed because of eviction.
746 */
747static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
748{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200749 struct vmw_ctx_validation_info *val;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100750 int ret;
751
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200752 list_for_each_entry(val, &sw_context->ctx_list, head) {
753 ret = vmw_binding_rebind_all(val->cur);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100754 if (unlikely(ret != 0)) {
755 if (ret != -ERESTARTSYS)
Deepak Rawat5724f892019-02-11 11:46:27 -0800756 VMW_DEBUG_USER("Failed to rebind context.\n");
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100757 return ret;
758 }
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700759
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200760 ret = vmw_rebind_all_dx_query(val->ctx);
Deepak Rawatb2898402019-02-11 14:59:57 -0800761 if (ret != 0) {
762 VMW_DEBUG_USER("Failed to rebind queries.\n");
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700763 return ret;
Deepak Rawatb2898402019-02-11 14:59:57 -0800764 }
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100765 }
766
767 return 0;
768}
769
770/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800771 * vmw_view_bindings_add - Add an array of view bindings to a context binding
772 * state tracker.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700773 *
774 * @sw_context: The execbuf state used for this command.
775 * @view_type: View type for the bindings.
776 * @binding_type: Binding type for the bindings.
777 * @shader_slot: The shader slot to user for the bindings.
778 * @view_ids: Array of view ids to be bound.
779 * @num_views: Number of view ids in @view_ids.
780 * @first_slot: The binding slot to be used for the first view id in @view_ids.
781 */
782static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
783 enum vmw_view_type view_type,
784 enum vmw_ctx_binding_type binding_type,
785 uint32 shader_slot,
786 uint32 view_ids[], u32 num_views,
787 u32 first_slot)
788{
Deepak Rawat6f74fd92019-02-08 12:53:57 -0800789 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700790 u32 i;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700791
Deepak Rawat6f74fd92019-02-08 12:53:57 -0800792 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700793 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700794
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700795 for (i = 0; i < num_views; ++i) {
796 struct vmw_ctx_bindinfo_view binding;
797 struct vmw_resource *view = NULL;
798
799 if (view_ids[i] != SVGA3D_INVALID_ID) {
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200800 view = vmw_view_id_val_add(sw_context, view_type,
801 view_ids[i]);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700802 if (IS_ERR(view)) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800803 VMW_DEBUG_USER("View not found.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700804 return PTR_ERR(view);
805 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700806 }
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200807 binding.bi.ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700808 binding.bi.res = view;
809 binding.bi.bt = binding_type;
810 binding.shader_slot = shader_slot;
811 binding.slot = first_slot + i;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200812 vmw_binding_add(ctx_node->staged, &binding.bi,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700813 shader_slot, binding.slot);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700814 }
815
816 return 0;
817}
818
819/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000820 * vmw_cmd_cid_check - Check a command header for valid context information.
821 *
822 * @dev_priv: Pointer to a device private structure.
823 * @sw_context: Pointer to the software context.
824 * @header: A command header with an embedded user-space context handle.
825 *
826 * Convenience function: Call vmw_cmd_res_check with the user-space context
827 * handle embedded in @header.
828 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000829static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
830 struct vmw_sw_context *sw_context,
831 SVGA3dCmdHeader *header)
832{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800833 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
834 container_of(header, typeof(*cmd), header);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000835
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000836 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100837 VMW_RES_DIRTY_SET, user_context_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -0800838 &cmd->body, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000839}
840
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200841/**
842 * vmw_execbuf_info_from_res - Get the private validation metadata for a
843 * recently validated resource
Deepak Rawat680360a2019-02-13 13:20:42 -0800844 *
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200845 * @sw_context: Pointer to the command submission context
846 * @res: The resource
847 *
848 * The resource pointed to by @res needs to be present in the command submission
849 * context's resource cache and hence the last resource of that type to be
850 * processed by the validation code.
851 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800852 * Return: a pointer to the private metadata of the resource, or NULL if it
853 * wasn't found
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200854 */
855static struct vmw_ctx_validation_info *
856vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
857 struct vmw_resource *res)
858{
859 struct vmw_res_cache_entry *rcache =
860 &sw_context->res_cache[vmw_res_type(res)];
861
862 if (rcache->valid && rcache->res == res)
863 return rcache->private;
864
865 WARN_ON_ONCE(true);
866 return NULL;
867}
868
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000869static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
870 struct vmw_sw_context *sw_context,
871 SVGA3dCmdHeader *header)
872{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800873 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200874 struct vmw_resource *ctx;
875 struct vmw_resource *res;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000876 int ret;
877
Deepak Rawatd01316d2019-02-08 15:50:40 -0800878 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700879
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700880 if (cmd->body.type >= SVGA3D_RT_MAX) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800881 VMW_DEBUG_USER("Illegal render target type %u.\n",
882 (unsigned int) cmd->body.type);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700883 return -EINVAL;
884 }
885
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700886 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100887 VMW_RES_DIRTY_SET, user_context_converter,
888 &cmd->body.cid, &ctx);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000889 if (unlikely(ret != 0))
890 return ret;
891
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000892 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100893 VMW_RES_DIRTY_SET, user_surface_converter,
894 &cmd->body.target.sid, &res);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200895 if (unlikely(ret))
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700896 return ret;
897
898 if (dev_priv->has_mob) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700899 struct vmw_ctx_bindinfo_view binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200900 struct vmw_ctx_validation_info *node;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700901
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200902 node = vmw_execbuf_info_from_res(sw_context, ctx);
903 if (!node)
904 return -EINVAL;
905
906 binding.bi.ctx = ctx;
907 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700908 binding.bi.bt = vmw_ctx_binding_rt;
909 binding.slot = cmd->body.type;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200910 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700911 }
912
913 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000914}
915
916static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
917 struct vmw_sw_context *sw_context,
918 SVGA3dCmdHeader *header)
919{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800920 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000921 int ret;
922
Deepak Rawatd01316d2019-02-08 15:50:40 -0800923 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromc9146cd2015-03-02 23:45:04 -0800924
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700925 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100926 VMW_RES_DIRTY_NONE, user_surface_converter,
927 &cmd->body.src.sid, NULL);
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700928 if (ret)
929 return ret;
Thomas Hellstromc9146cd2015-03-02 23:45:04 -0800930
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000931 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100932 VMW_RES_DIRTY_SET, user_surface_converter,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000933 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000934}
935
Neha Bhende0fca749e2015-08-10 10:51:07 -0700936static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -0800937 struct vmw_sw_context *sw_context,
938 SVGA3dCmdHeader *header)
Neha Bhende0fca749e2015-08-10 10:51:07 -0700939{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800940 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
Neha Bhende0fca749e2015-08-10 10:51:07 -0700941 int ret;
942
943 cmd = container_of(header, typeof(*cmd), header);
944 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100945 VMW_RES_DIRTY_NONE, user_surface_converter,
Neha Bhende0fca749e2015-08-10 10:51:07 -0700946 &cmd->body.src, NULL);
947 if (ret != 0)
948 return ret;
949
950 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100951 VMW_RES_DIRTY_SET, user_surface_converter,
Neha Bhende0fca749e2015-08-10 10:51:07 -0700952 &cmd->body.dest, NULL);
953}
954
955static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
956 struct vmw_sw_context *sw_context,
957 SVGA3dCmdHeader *header)
958{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800959 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
Neha Bhende0fca749e2015-08-10 10:51:07 -0700960 int ret;
961
962 cmd = container_of(header, typeof(*cmd), header);
963 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100964 VMW_RES_DIRTY_NONE, user_surface_converter,
Neha Bhende0fca749e2015-08-10 10:51:07 -0700965 &cmd->body.srcSid, NULL);
966 if (ret != 0)
967 return ret;
968
969 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100970 VMW_RES_DIRTY_SET, user_surface_converter,
Neha Bhende0fca749e2015-08-10 10:51:07 -0700971 &cmd->body.dstSid, NULL);
972}
973
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000974static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
975 struct vmw_sw_context *sw_context,
976 SVGA3dCmdHeader *header)
977{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800978 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000979 int ret;
980
Deepak Rawatd01316d2019-02-08 15:50:40 -0800981 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000982 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100983 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000984 &cmd->body.src.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000985 if (unlikely(ret != 0))
986 return ret;
Deepak Rawat680360a2019-02-13 13:20:42 -0800987
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000988 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100989 VMW_RES_DIRTY_SET, user_surface_converter,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000990 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000991}
992
993static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
994 struct vmw_sw_context *sw_context,
995 SVGA3dCmdHeader *header)
996{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800997 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
998 container_of(header, typeof(*cmd), header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +0200999
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001000 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001001 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001002 &cmd->body.srcImage.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001003}
1004
1005static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1006 struct vmw_sw_context *sw_context,
1007 SVGA3dCmdHeader *header)
1008{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001009 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1010 container_of(header, typeof(*cmd), header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02001011
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001012 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001013 VMW_RES_DIRTY_NONE, user_surface_converter,
1014 &cmd->body.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001015}
1016
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001017/**
1018 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1019 *
1020 * @dev_priv: The device private structure.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001021 * @new_query_bo: The new buffer holding query results.
1022 * @sw_context: The software context used for this command submission.
1023 *
Deepak Rawat680360a2019-02-13 13:20:42 -08001024 * This function checks whether @new_query_bo is suitable for holding query
1025 * results, and if another buffer currently is pinned for query results. If so,
1026 * the function prepares the state of @sw_context for switching pinned buffers
1027 * after successful submission of the current command batch.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001028 */
1029static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001030 struct vmw_buffer_object *new_query_bo,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001031 struct vmw_sw_context *sw_context)
1032{
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001033 struct vmw_res_cache_entry *ctx_entry =
1034 &sw_context->res_cache[vmw_res_context];
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001035 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001036
1037 BUG_ON(!ctx_entry->valid);
1038 sw_context->last_query_ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001039
1040 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1041
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001042 if (unlikely(new_query_bo->base.num_pages > 4)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001043 VMW_DEBUG_USER("Query buffer too large.\n");
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001044 return -EINVAL;
1045 }
1046
1047 if (unlikely(sw_context->cur_query_bo != NULL)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001048 sw_context->needs_post_query_barrier = true;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001049 ret = vmw_validation_add_bo(sw_context->ctx,
1050 sw_context->cur_query_bo,
1051 dev_priv->has_mob, false);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001052 if (unlikely(ret != 0))
1053 return ret;
1054 }
1055 sw_context->cur_query_bo = new_query_bo;
1056
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001057 ret = vmw_validation_add_bo(sw_context->ctx,
1058 dev_priv->dummy_query_bo,
1059 dev_priv->has_mob, false);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001060 if (unlikely(ret != 0))
1061 return ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001062 }
1063
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001064 return 0;
1065}
1066
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001067/**
1068 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1069 *
1070 * @dev_priv: The device private structure.
1071 * @sw_context: The software context used for this command submission batch.
1072 *
1073 * This function will check if we're switching query buffers, and will then,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001074 * issue a dummy occlusion query wait used as a query barrier. When the fence
Deepak Rawat680360a2019-02-13 13:20:42 -08001075 * object following that query wait has signaled, we are sure that all preceding
1076 * queries have finished, and the old query buffer can be unpinned. However,
1077 * since both the new query buffer and the old one are fenced with that fence,
1078 * we can do an asynchronus unpin now, and be sure that the old query buffer
1079 * won't be moved until the fence has signaled.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001080 *
1081 * As mentioned above, both the new - and old query buffers need to be fenced
1082 * using a sequence emitted *after* calling this function.
1083 */
1084static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1085 struct vmw_sw_context *sw_context)
1086{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001087 /*
1088 * The validate list should still hold references to all
1089 * contexts here.
1090 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001091 if (sw_context->needs_post_query_barrier) {
1092 struct vmw_res_cache_entry *ctx_entry =
1093 &sw_context->res_cache[vmw_res_context];
1094 struct vmw_resource *ctx;
1095 int ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001096
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001097 BUG_ON(!ctx_entry->valid);
1098 ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001099
1100 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1101
1102 if (unlikely(ret != 0))
Deepak Rawat5724f892019-02-11 11:46:27 -08001103 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001104 }
1105
1106 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1107 if (dev_priv->pinned_bo) {
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001108 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001109 vmw_bo_unreference(&dev_priv->pinned_bo);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001110 }
1111
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001112 if (!sw_context->needs_post_query_barrier) {
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001113 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001114
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001115 /*
1116 * We pin also the dummy_query_bo buffer so that we
Deepak Rawat680360a2019-02-13 13:20:42 -08001117 * don't need to validate it when emitting dummy queries
1118 * in context destroy paths.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001119 */
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001120 if (!dev_priv->dummy_query_bo_pinned) {
1121 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1122 true);
1123 dev_priv->dummy_query_bo_pinned = true;
1124 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001125
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001126 BUG_ON(sw_context->last_query_ctx == NULL);
1127 dev_priv->query_cid = sw_context->last_query_ctx->id;
1128 dev_priv->query_cid_valid = true;
1129 dev_priv->pinned_bo =
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001130 vmw_bo_reference(sw_context->cur_query_bo);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001131 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001132 }
1133}
1134
1135/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001136 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
1137 * to a MOB id.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001138 *
1139 * @dev_priv: Pointer to a device private structure.
1140 * @sw_context: The software context used for this command batch validation.
1141 * @id: Pointer to the user-space handle to be translated.
Deepak Rawat680360a2019-02-13 13:20:42 -08001142 * @vmw_bo_p: Points to a location that, on successful return will carry a
1143 * non-reference-counted pointer to the buffer object identified by the
Thomas Hellstromddcda242012-11-21 11:26:55 +01001144 * user-space handle in @id.
1145 *
1146 * This function saves information needed to translate a user-space buffer
1147 * handle to a MOB id. The translation does not take place immediately, but
Deepak Rawat680360a2019-02-13 13:20:42 -08001148 * during a call to vmw_apply_relocations().
1149 *
1150 * This function builds a relocation list and a list of buffers to validate. The
1151 * former needs to be freed using either vmw_apply_relocations() or
1152 * vmw_free_relocations(). The latter needs to be freed using
1153 * vmw_clear_validations.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001154 */
1155static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1156 struct vmw_sw_context *sw_context,
1157 SVGAMobId *id,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001158 struct vmw_buffer_object **vmw_bo_p)
Thomas Hellstromddcda242012-11-21 11:26:55 +01001159{
Thomas Hellstromb139d432018-09-26 16:27:54 +02001160 struct vmw_buffer_object *vmw_bo;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001161 uint32_t handle = *id;
1162 struct vmw_relocation *reloc;
1163 int ret;
1164
Thomas Hellstromb139d432018-09-26 16:27:54 +02001165 vmw_validation_preload_bo(sw_context->ctx);
1166 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1167 if (IS_ERR(vmw_bo)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001168 VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
Thomas Hellstromb139d432018-09-26 16:27:54 +02001169 return PTR_ERR(vmw_bo);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001170 }
Thomas Hellstromddcda242012-11-21 11:26:55 +01001171
Thomas Hellstromb139d432018-09-26 16:27:54 +02001172 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1173 vmw_user_bo_noref_release();
1174 if (unlikely(ret != 0))
1175 return ret;
1176
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001177 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1178 if (!reloc)
Thomas Hellstromb139d432018-09-26 16:27:54 +02001179 return -ENOMEM;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001180
Thomas Hellstromddcda242012-11-21 11:26:55 +01001181 reloc->mob_loc = id;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001182 reloc->vbo = vmw_bo;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001183
Thomas Hellstromddcda242012-11-21 11:26:55 +01001184 *vmw_bo_p = vmw_bo;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001185 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1186
Thomas Hellstromddcda242012-11-21 11:26:55 +01001187 return 0;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001188}
1189
1190/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001191 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
1192 * to a valid SVGAGuestPtr
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001193 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001194 * @dev_priv: Pointer to a device private structure.
1195 * @sw_context: The software context used for this command batch validation.
1196 * @ptr: Pointer to the user-space handle to be translated.
Deepak Rawat680360a2019-02-13 13:20:42 -08001197 * @vmw_bo_p: Points to a location that, on successful return will carry a
1198 * non-reference-counted pointer to the DMA buffer identified by the user-space
1199 * handle in @id.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001200 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001201 * This function saves information needed to translate a user-space buffer
1202 * handle to a valid SVGAGuestPtr. The translation does not take place
1203 * immediately, but during a call to vmw_apply_relocations().
Deepak Rawat680360a2019-02-13 13:20:42 -08001204 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001205 * This function builds a relocation list and a list of buffers to validate.
1206 * The former needs to be freed using either vmw_apply_relocations() or
1207 * vmw_free_relocations(). The latter needs to be freed using
1208 * vmw_clear_validations.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001209 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001210static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1211 struct vmw_sw_context *sw_context,
1212 SVGAGuestPtr *ptr,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001213 struct vmw_buffer_object **vmw_bo_p)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001214{
Thomas Hellstromb139d432018-09-26 16:27:54 +02001215 struct vmw_buffer_object *vmw_bo;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001216 uint32_t handle = ptr->gmrId;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001217 struct vmw_relocation *reloc;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001218 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001219
Thomas Hellstromb139d432018-09-26 16:27:54 +02001220 vmw_validation_preload_bo(sw_context->ctx);
1221 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1222 if (IS_ERR(vmw_bo)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001223 VMW_DEBUG_USER("Could not find or use GMR region.\n");
Thomas Hellstromb139d432018-09-26 16:27:54 +02001224 return PTR_ERR(vmw_bo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001225 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001226
Thomas Hellstromb139d432018-09-26 16:27:54 +02001227 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1228 vmw_user_bo_noref_release();
1229 if (unlikely(ret != 0))
1230 return ret;
1231
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001232 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1233 if (!reloc)
Thomas Hellstromb139d432018-09-26 16:27:54 +02001234 return -ENOMEM;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001235
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001236 reloc->location = ptr;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001237 reloc->vbo = vmw_bo;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001238 *vmw_bo_p = vmw_bo;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001239 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1240
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001241 return 0;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001242}
1243
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001244/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001245 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001246 *
1247 * @dev_priv: Pointer to a device private struct.
1248 * @sw_context: The software context used for this command submission.
1249 * @header: Pointer to the command header in the command stream.
1250 *
1251 * This function adds the new query into the query COTABLE
1252 */
1253static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1254 struct vmw_sw_context *sw_context,
1255 SVGA3dCmdHeader *header)
1256{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001257 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
Deepak Rawat6f74fd92019-02-08 12:53:57 -08001258 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001259 struct vmw_resource *cotable_res;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001260 int ret;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001261
Deepak Rawat6f74fd92019-02-08 12:53:57 -08001262 if (!ctx_node)
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001263 return -EINVAL;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001264
Deepak Rawatd01316d2019-02-08 15:50:40 -08001265 cmd = container_of(header, typeof(*cmd), header);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001266
Deepak Rawatd01316d2019-02-08 15:50:40 -08001267 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
1268 cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001269 return -EINVAL;
1270
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001271 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
Deepak Rawatd01316d2019-02-08 15:50:40 -08001272 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001273
1274 return ret;
1275}
1276
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001277/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001278 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001279 *
1280 * @dev_priv: Pointer to a device private struct.
1281 * @sw_context: The software context used for this command submission.
1282 * @header: Pointer to the command header in the command stream.
1283 *
Deepak Rawat680360a2019-02-13 13:20:42 -08001284 * The query bind operation will eventually associate the query ID with its
1285 * backing MOB. In this function, we take the user mode MOB ID and use
1286 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001287 */
1288static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1289 struct vmw_sw_context *sw_context,
1290 SVGA3dCmdHeader *header)
1291{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001292 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001293 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001294 int ret;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001295
Deepak Rawatd01316d2019-02-08 15:50:40 -08001296 cmd = container_of(header, typeof(*cmd), header);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001297
1298 /*
1299 * Look up the buffer pointed to by q.mobid, put it on the relocation
1300 * list so its kernel mode MOB ID can be filled in later
1301 */
Deepak Rawatd01316d2019-02-08 15:50:40 -08001302 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001303 &vmw_bo);
1304
1305 if (ret != 0)
1306 return ret;
1307
1308 sw_context->dx_query_mob = vmw_bo;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001309 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
Thomas Hellstromb139d432018-09-26 16:27:54 +02001310 return 0;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001311}
1312
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001313/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001314 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001315 *
1316 * @dev_priv: Pointer to a device private struct.
1317 * @sw_context: The software context used for this command submission.
1318 * @header: Pointer to the command header in the command stream.
1319 */
1320static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1321 struct vmw_sw_context *sw_context,
1322 SVGA3dCmdHeader *header)
1323{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001324 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1325 container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001326
1327 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001328 VMW_RES_DIRTY_SET, user_context_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -08001329 &cmd->body.cid, NULL);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001330}
1331
1332/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001333 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001334 *
1335 * @dev_priv: Pointer to a device private struct.
1336 * @sw_context: The software context used for this command submission.
1337 * @header: Pointer to the command header in the command stream.
1338 */
1339static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1340 struct vmw_sw_context *sw_context,
1341 SVGA3dCmdHeader *header)
1342{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001343 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1344 container_of(header, typeof(*cmd), header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001345
Thomas Hellstromddcda242012-11-21 11:26:55 +01001346 if (unlikely(dev_priv->has_mob)) {
Deepak Rawatd01316d2019-02-08 15:50:40 -08001347 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001348
1349 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1350
1351 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1352 gb_cmd.header.size = cmd->header.size;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001353 gb_cmd.body.cid = cmd->body.cid;
1354 gb_cmd.body.type = cmd->body.type;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001355
1356 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1357 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1358 }
1359
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001360 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001361 VMW_RES_DIRTY_SET, user_context_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -08001362 &cmd->body.cid, NULL);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001363}
1364
1365/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001366 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001367 *
1368 * @dev_priv: Pointer to a device private struct.
1369 * @sw_context: The software context used for this command submission.
1370 * @header: Pointer to the command header in the command stream.
1371 */
1372static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1373 struct vmw_sw_context *sw_context,
1374 SVGA3dCmdHeader *header)
1375{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001376 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001377 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001378 int ret;
1379
Deepak Rawatd01316d2019-02-08 15:50:40 -08001380 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001381 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1382 if (unlikely(ret != 0))
1383 return ret;
1384
Deepak Rawat680360a2019-02-13 13:20:42 -08001385 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
Thomas Hellstromddcda242012-11-21 11:26:55 +01001386 &vmw_bo);
1387 if (unlikely(ret != 0))
1388 return ret;
1389
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001390 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001391
Thomas Hellstromddcda242012-11-21 11:26:55 +01001392 return ret;
1393}
1394
1395/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001396 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001397 *
1398 * @dev_priv: Pointer to a device private struct.
1399 * @sw_context: The software context used for this command submission.
1400 * @header: Pointer to the command header in the command stream.
1401 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001402static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1403 struct vmw_sw_context *sw_context,
1404 SVGA3dCmdHeader *header)
1405{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001406 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001407 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001408 int ret;
1409
Deepak Rawatd01316d2019-02-08 15:50:40 -08001410 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001411 if (dev_priv->has_mob) {
Deepak Rawatd01316d2019-02-08 15:50:40 -08001412 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001413
1414 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1415
1416 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1417 gb_cmd.header.size = cmd->header.size;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001418 gb_cmd.body.cid = cmd->body.cid;
1419 gb_cmd.body.type = cmd->body.type;
1420 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1421 gb_cmd.body.offset = cmd->body.guestResult.offset;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001422
1423 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1424 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1425 }
1426
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001427 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1428 if (unlikely(ret != 0))
1429 return ret;
1430
1431 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
Deepak Rawat680360a2019-02-13 13:20:42 -08001432 &cmd->body.guestResult, &vmw_bo);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001433 if (unlikely(ret != 0))
1434 return ret;
1435
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001436 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001437
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001438 return ret;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001439}
1440
Thomas Hellstromddcda242012-11-21 11:26:55 +01001441/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001442 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001443 *
1444 * @dev_priv: Pointer to a device private struct.
1445 * @sw_context: The software context used for this command submission.
1446 * @header: Pointer to the command header in the command stream.
1447 */
1448static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1449 struct vmw_sw_context *sw_context,
1450 SVGA3dCmdHeader *header)
1451{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001452 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001453 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001454 int ret;
1455
Deepak Rawatd01316d2019-02-08 15:50:40 -08001456 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001457 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1458 if (unlikely(ret != 0))
1459 return ret;
1460
Deepak Rawat680360a2019-02-13 13:20:42 -08001461 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
Thomas Hellstromddcda242012-11-21 11:26:55 +01001462 &vmw_bo);
1463 if (unlikely(ret != 0))
1464 return ret;
1465
Thomas Hellstromddcda242012-11-21 11:26:55 +01001466 return 0;
1467}
1468
1469/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001470 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001471 *
1472 * @dev_priv: Pointer to a device private struct.
1473 * @sw_context: The software context used for this command submission.
1474 * @header: Pointer to the command header in the command stream.
1475 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001476static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1477 struct vmw_sw_context *sw_context,
1478 SVGA3dCmdHeader *header)
1479{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001480 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001481 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001482 int ret;
1483
Deepak Rawatd01316d2019-02-08 15:50:40 -08001484 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001485 if (dev_priv->has_mob) {
Deepak Rawatd01316d2019-02-08 15:50:40 -08001486 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001487
1488 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1489
1490 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1491 gb_cmd.header.size = cmd->header.size;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001492 gb_cmd.body.cid = cmd->body.cid;
1493 gb_cmd.body.type = cmd->body.type;
1494 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1495 gb_cmd.body.offset = cmd->body.guestResult.offset;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001496
1497 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1498 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1499 }
1500
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001501 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1502 if (unlikely(ret != 0))
1503 return ret;
1504
1505 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
Deepak Rawat680360a2019-02-13 13:20:42 -08001506 &cmd->body.guestResult, &vmw_bo);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001507 if (unlikely(ret != 0))
1508 return ret;
1509
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001510 return 0;
1511}
1512
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001513static int vmw_cmd_dma(struct vmw_private *dev_priv,
1514 struct vmw_sw_context *sw_context,
1515 SVGA3dCmdHeader *header)
1516{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001517 struct vmw_buffer_object *vmw_bo = NULL;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001518 struct vmw_surface *srf = NULL;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001519 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001520 int ret;
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001521 SVGA3dCmdSurfaceDMASuffix *suffix;
1522 uint32_t bo_size;
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001523 bool dirty;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001524
Deepak Rawatd01316d2019-02-08 15:50:40 -08001525 cmd = container_of(header, typeof(*cmd), header);
1526 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001527 header->size - sizeof(*suffix));
1528
1529 /* Make sure device and verifier stays in sync. */
1530 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001531 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001532 return -EINVAL;
1533 }
1534
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001535 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
Deepak Rawat680360a2019-02-13 13:20:42 -08001536 &cmd->body.guest.ptr, &vmw_bo);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001537 if (unlikely(ret != 0))
1538 return ret;
1539
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001540 /* Make sure DMA doesn't cross BO boundaries. */
1541 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001542 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001543 VMW_DEBUG_USER("Invalid DMA offset.\n");
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001544 return -EINVAL;
1545 }
1546
Deepak Rawatd01316d2019-02-08 15:50:40 -08001547 bo_size -= cmd->body.guest.ptr.offset;
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001548 if (unlikely(suffix->maximumOffset > bo_size))
1549 suffix->maximumOffset = bo_size;
1550
Deepak Rawatd01316d2019-02-08 15:50:40 -08001551 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001552 VMW_RES_DIRTY_SET : 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001553 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001554 dirty, user_surface_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -08001555 &cmd->body.host.sid, NULL);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001556 if (unlikely(ret != 0)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001557 if (unlikely(ret != -ERESTARTSYS))
Deepak Rawat5724f892019-02-11 11:46:27 -08001558 VMW_DEBUG_USER("could not find surface for DMA.\n");
Thomas Hellstromb139d432018-09-26 16:27:54 +02001559 return ret;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001560 }
1561
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001562 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001563
Deepak Rawat680360a2019-02-13 13:20:42 -08001564 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001565
Thomas Hellstromb139d432018-09-26 16:27:54 +02001566 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001567}
1568
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001569static int vmw_cmd_draw(struct vmw_private *dev_priv,
1570 struct vmw_sw_context *sw_context,
1571 SVGA3dCmdHeader *header)
1572{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001573 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001574 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1575 (unsigned long)header + sizeof(*cmd));
1576 SVGA3dPrimitiveRange *range;
1577 uint32_t i;
1578 uint32_t maxnum;
1579 int ret;
1580
1581 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1582 if (unlikely(ret != 0))
1583 return ret;
1584
Deepak Rawatd01316d2019-02-08 15:50:40 -08001585 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001586 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1587
1588 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001589 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001590 return -EINVAL;
1591 }
1592
1593 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001594 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001595 VMW_RES_DIRTY_NONE,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001596 user_surface_converter,
1597 &decl->array.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001598 if (unlikely(ret != 0))
1599 return ret;
1600 }
1601
1602 maxnum = (header->size - sizeof(cmd->body) -
1603 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1604 if (unlikely(cmd->body.numRanges > maxnum)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001605 VMW_DEBUG_USER("Illegal number of index ranges.\n");
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001606 return -EINVAL;
1607 }
1608
1609 range = (SVGA3dPrimitiveRange *) decl;
1610 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001611 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001612 VMW_RES_DIRTY_NONE,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001613 user_surface_converter,
1614 &range->indexArray.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001615 if (unlikely(ret != 0))
1616 return ret;
1617 }
1618 return 0;
1619}
1620
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001621static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1622 struct vmw_sw_context *sw_context,
1623 SVGA3dCmdHeader *header)
1624{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001625 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001626 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1627 ((unsigned long) header + header->size + sizeof(header));
1628 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
Deepak Rawatd01316d2019-02-08 15:50:40 -08001629 ((unsigned long) header + sizeof(*cmd));
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001630 struct vmw_resource *ctx;
1631 struct vmw_resource *res;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001632 int ret;
1633
Deepak Rawatd01316d2019-02-08 15:50:40 -08001634 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001635
1636 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001637 VMW_RES_DIRTY_SET, user_context_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -08001638 &cmd->body.cid, &ctx);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001639 if (unlikely(ret != 0))
1640 return ret;
1641
1642 for (; cur_state < last_state; ++cur_state) {
1643 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1644 continue;
1645
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001646 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001647 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1648 (unsigned int) cur_state->stage);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001649 return -EINVAL;
1650 }
1651
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001652 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001653 VMW_RES_DIRTY_NONE,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001654 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001655 &cur_state->value, &res);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001656 if (unlikely(ret != 0))
1657 return ret;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001658
1659 if (dev_priv->has_mob) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001660 struct vmw_ctx_bindinfo_tex binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001661 struct vmw_ctx_validation_info *node;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001662
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001663 node = vmw_execbuf_info_from_res(sw_context, ctx);
1664 if (!node)
1665 return -EINVAL;
1666
1667 binding.bi.ctx = ctx;
1668 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001669 binding.bi.bt = vmw_ctx_binding_tex;
1670 binding.texture_stage = cur_state->stage;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001671 vmw_binding_add(node->staged, &binding.bi, 0,
1672 binding.texture_stage);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001673 }
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001674 }
1675
1676 return 0;
1677}
1678
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001679static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1680 struct vmw_sw_context *sw_context,
1681 void *buf)
1682{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001683 struct vmw_buffer_object *vmw_bo;
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001684
1685 struct {
1686 uint32_t header;
1687 SVGAFifoCmdDefineGMRFB body;
1688 } *cmd = buf;
1689
Deepak Rawat680360a2019-02-13 13:20:42 -08001690 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
Thomas Hellstromb139d432018-09-26 16:27:54 +02001691 &vmw_bo);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001692}
1693
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001694/**
1695 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1696 * switching
1697 *
1698 * @dev_priv: Pointer to a device private struct.
1699 * @sw_context: The software context being used for this batch.
1700 * @val_node: The validation node representing the resource.
1701 * @buf_id: Pointer to the user-space backup buffer handle in the command
1702 * stream.
1703 * @backup_offset: Offset of backup into MOB.
1704 *
Deepak Rawat680360a2019-02-13 13:20:42 -08001705 * This function prepares for registering a switch of backup buffers in the
1706 * resource metadata just prior to unreserving. It's basically a wrapper around
1707 * vmw_cmd_res_switch_backup with a different interface.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001708 */
1709static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1710 struct vmw_sw_context *sw_context,
Deepak Rawat680360a2019-02-13 13:20:42 -08001711 struct vmw_resource *res, uint32_t *buf_id,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001712 unsigned long backup_offset)
1713{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001714 struct vmw_buffer_object *vbo;
1715 void *info;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001716 int ret;
1717
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001718 info = vmw_execbuf_info_from_res(sw_context, res);
1719 if (!info)
1720 return -EINVAL;
1721
1722 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001723 if (ret)
1724 return ret;
1725
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001726 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1727 backup_offset);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001728 return 0;
1729}
1730
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001731/**
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001732 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1733 *
1734 * @dev_priv: Pointer to a device private struct.
1735 * @sw_context: The software context being used for this batch.
1736 * @res_type: The resource type.
1737 * @converter: Information about user-space binding for this resource type.
1738 * @res_id: Pointer to the user-space resource handle in the command stream.
1739 * @buf_id: Pointer to the user-space backup buffer handle in the command
1740 * stream.
1741 * @backup_offset: Offset of backup into MOB.
1742 *
Deepak Rawat680360a2019-02-13 13:20:42 -08001743 * This function prepares for registering a switch of backup buffers in the
1744 * resource metadata just prior to unreserving. It's basically a wrapper around
1745 * vmw_cmd_res_switch_backup with a different interface.
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001746 */
1747static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1748 struct vmw_sw_context *sw_context,
1749 enum vmw_res_type res_type,
1750 const struct vmw_user_resource_conv
Deepak Rawat680360a2019-02-13 13:20:42 -08001751 *converter, uint32_t *res_id, uint32_t *buf_id,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001752 unsigned long backup_offset)
1753{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001754 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001755 int ret;
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001756
1757 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001758 VMW_RES_DIRTY_NONE, converter, res_id, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001759 if (ret)
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001760 return ret;
1761
Deepak Rawat680360a2019-02-13 13:20:42 -08001762 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1763 backup_offset);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001764}
1765
1766/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001767 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001768 *
1769 * @dev_priv: Pointer to a device private struct.
1770 * @sw_context: The software context being used for this batch.
1771 * @header: Pointer to the command header in the command stream.
1772 */
1773static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1774 struct vmw_sw_context *sw_context,
1775 SVGA3dCmdHeader *header)
1776{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001777 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1778 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001779
1780 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
Deepak Rawat680360a2019-02-13 13:20:42 -08001781 user_surface_converter, &cmd->body.sid,
1782 &cmd->body.mobid, 0);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001783}
1784
1785/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001786 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001787 *
1788 * @dev_priv: Pointer to a device private struct.
1789 * @sw_context: The software context being used for this batch.
1790 * @header: Pointer to the command header in the command stream.
1791 */
1792static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1793 struct vmw_sw_context *sw_context,
1794 SVGA3dCmdHeader *header)
1795{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001796 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1797 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001798
1799 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001800 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001801 &cmd->body.image.sid, NULL);
1802}
1803
1804/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001805 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001806 *
1807 * @dev_priv: Pointer to a device private struct.
1808 * @sw_context: The software context being used for this batch.
1809 * @header: Pointer to the command header in the command stream.
1810 */
1811static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1812 struct vmw_sw_context *sw_context,
1813 SVGA3dCmdHeader *header)
1814{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001815 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1816 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001817
1818 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001819 VMW_RES_DIRTY_CLEAR, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001820 &cmd->body.sid, NULL);
1821}
1822
1823/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001824 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001825 *
1826 * @dev_priv: Pointer to a device private struct.
1827 * @sw_context: The software context being used for this batch.
1828 * @header: Pointer to the command header in the command stream.
1829 */
1830static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1831 struct vmw_sw_context *sw_context,
1832 SVGA3dCmdHeader *header)
1833{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001834 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1835 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001836
1837 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001838 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001839 &cmd->body.image.sid, NULL);
1840}
1841
1842/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001843 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001844 * command
1845 *
1846 * @dev_priv: Pointer to a device private struct.
1847 * @sw_context: The software context being used for this batch.
1848 * @header: Pointer to the command header in the command stream.
1849 */
1850static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1851 struct vmw_sw_context *sw_context,
1852 SVGA3dCmdHeader *header)
1853{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001854 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1855 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001856
1857 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001858 VMW_RES_DIRTY_CLEAR, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001859 &cmd->body.sid, NULL);
1860}
1861
1862/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001863 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001864 * command
1865 *
1866 * @dev_priv: Pointer to a device private struct.
1867 * @sw_context: The software context being used for this batch.
1868 * @header: Pointer to the command header in the command stream.
1869 */
1870static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1871 struct vmw_sw_context *sw_context,
1872 SVGA3dCmdHeader *header)
1873{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001874 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1875 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001876
1877 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001878 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001879 &cmd->body.image.sid, NULL);
1880}
1881
1882/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001883 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1884 * command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001885 *
1886 * @dev_priv: Pointer to a device private struct.
1887 * @sw_context: The software context being used for this batch.
1888 * @header: Pointer to the command header in the command stream.
1889 */
1890static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1891 struct vmw_sw_context *sw_context,
1892 SVGA3dCmdHeader *header)
1893{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001894 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1895 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001896
1897 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001898 VMW_RES_DIRTY_CLEAR, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001899 &cmd->body.sid, NULL);
1900}
1901
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001902/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001903 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001904 *
1905 * @dev_priv: Pointer to a device private struct.
1906 * @sw_context: The software context being used for this batch.
1907 * @header: Pointer to the command header in the command stream.
1908 */
1909static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1910 struct vmw_sw_context *sw_context,
1911 SVGA3dCmdHeader *header)
1912{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001913 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001914 int ret;
1915 size_t size;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001916 struct vmw_resource *ctx;
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001917
Deepak Rawatd01316d2019-02-08 15:50:40 -08001918 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001919
1920 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001921 VMW_RES_DIRTY_SET, user_context_converter,
1922 &cmd->body.cid, &ctx);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001923 if (unlikely(ret != 0))
1924 return ret;
1925
1926 if (unlikely(!dev_priv->has_mob))
1927 return 0;
1928
1929 size = cmd->header.size - sizeof(cmd->body);
Deepak Rawat680360a2019-02-13 13:20:42 -08001930 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1931 cmd->body.shid, cmd + 1, cmd->body.type,
1932 size, &sw_context->staged_cmd_res);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001933 if (unlikely(ret != 0))
1934 return ret;
1935
Deepak Rawat680360a2019-02-13 13:20:42 -08001936 return vmw_resource_relocation_add(sw_context, NULL,
Thomas Hellstrome7a45282016-10-10 10:44:00 -07001937 vmw_ptr_diff(sw_context->buf_start,
Thomas Hellstroma1944032016-10-10 11:06:45 -07001938 &cmd->header.id),
1939 vmw_res_rel_nop);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001940}
1941
1942/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001943 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001944 *
1945 * @dev_priv: Pointer to a device private struct.
1946 * @sw_context: The software context being used for this batch.
1947 * @header: Pointer to the command header in the command stream.
1948 */
1949static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1950 struct vmw_sw_context *sw_context,
1951 SVGA3dCmdHeader *header)
1952{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001953 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001954 int ret;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001955 struct vmw_resource *ctx;
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001956
Deepak Rawatd01316d2019-02-08 15:50:40 -08001957 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001958
1959 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001960 VMW_RES_DIRTY_SET, user_context_converter,
1961 &cmd->body.cid, &ctx);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001962 if (unlikely(ret != 0))
1963 return ret;
1964
1965 if (unlikely(!dev_priv->has_mob))
1966 return 0;
1967
Deepak Rawat680360a2019-02-13 13:20:42 -08001968 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1969 cmd->body.type, &sw_context->staged_cmd_res);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001970 if (unlikely(ret != 0))
1971 return ret;
1972
Deepak Rawat680360a2019-02-13 13:20:42 -08001973 return vmw_resource_relocation_add(sw_context, NULL,
Thomas Hellstrome7a45282016-10-10 10:44:00 -07001974 vmw_ptr_diff(sw_context->buf_start,
Thomas Hellstroma1944032016-10-10 11:06:45 -07001975 &cmd->header.id),
1976 vmw_res_rel_nop);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001977}
1978
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001979/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001980 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001981 *
1982 * @dev_priv: Pointer to a device private struct.
1983 * @sw_context: The software context being used for this batch.
1984 * @header: Pointer to the command header in the command stream.
1985 */
1986static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1987 struct vmw_sw_context *sw_context,
1988 SVGA3dCmdHeader *header)
1989{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001990 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001991 struct vmw_ctx_bindinfo_shader binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001992 struct vmw_resource *ctx, *res = NULL;
1993 struct vmw_ctx_validation_info *ctx_info;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001994 int ret;
1995
Deepak Rawatd01316d2019-02-08 15:50:40 -08001996 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001997
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001998 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001999 VMW_DEBUG_USER("Illegal shader type %u.\n",
2000 (unsigned int) cmd->body.type);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002001 return -EINVAL;
2002 }
2003
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07002004 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002005 VMW_RES_DIRTY_SET, user_context_converter,
2006 &cmd->body.cid, &ctx);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002007 if (unlikely(ret != 0))
2008 return ret;
2009
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002010 if (!dev_priv->has_mob)
2011 return 0;
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002012
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002013 if (cmd->body.shid != SVGA3D_INVALID_ID) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002014 res = vmw_shader_lookup(vmw_context_res_man(ctx),
Deepak Rawat680360a2019-02-13 13:20:42 -08002015 cmd->body.shid, cmd->body.type);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002016 if (!IS_ERR(res)) {
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002017 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2018 VMW_RES_DIRTY_NONE);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002019 if (unlikely(ret != 0))
2020 return ret;
2021 }
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07002022 }
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002023
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002024 if (IS_ERR_OR_NULL(res)) {
Deepak Rawat680360a2019-02-13 13:20:42 -08002025 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2026 VMW_RES_DIRTY_NONE,
2027 user_shader_converter, &cmd->body.shid,
2028 &res);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002029 if (unlikely(ret != 0))
2030 return ret;
2031 }
2032
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002033 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2034 if (!ctx_info)
2035 return -EINVAL;
2036
2037 binding.bi.ctx = ctx;
2038 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002039 binding.bi.bt = vmw_ctx_binding_shader;
2040 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
Deepak Rawat680360a2019-02-13 13:20:42 -08002041 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2042
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002043 return 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002044}
2045
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002046/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002047 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002048 *
2049 * @dev_priv: Pointer to a device private struct.
2050 * @sw_context: The software context being used for this batch.
2051 * @header: Pointer to the command header in the command stream.
2052 */
2053static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2054 struct vmw_sw_context *sw_context,
2055 SVGA3dCmdHeader *header)
2056{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002057 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002058 int ret;
2059
Deepak Rawatd01316d2019-02-08 15:50:40 -08002060 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002061
2062 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002063 VMW_RES_DIRTY_SET, user_context_converter,
2064 &cmd->body.cid, NULL);
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002065 if (unlikely(ret != 0))
2066 return ret;
2067
2068 if (dev_priv->has_mob)
2069 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2070
2071 return 0;
2072}
2073
2074/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002075 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002076 *
2077 * @dev_priv: Pointer to a device private struct.
2078 * @sw_context: The software context being used for this batch.
2079 * @header: Pointer to the command header in the command stream.
2080 */
2081static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2082 struct vmw_sw_context *sw_context,
2083 SVGA3dCmdHeader *header)
2084{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002085 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2086 container_of(header, typeof(*cmd), header);
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002087
2088 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
Deepak Rawat680360a2019-02-13 13:20:42 -08002089 user_shader_converter, &cmd->body.shid,
2090 &cmd->body.mobid, cmd->body.offsetInBytes);
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002091}
2092
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002093/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002094 * vmw_cmd_dx_set_single_constant_buffer - Validate
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002095 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2096 *
2097 * @dev_priv: Pointer to a device private struct.
2098 * @sw_context: The software context being used for this batch.
2099 * @header: Pointer to the command header in the command stream.
2100 */
2101static int
2102vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2103 struct vmw_sw_context *sw_context,
2104 SVGA3dCmdHeader *header)
2105{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002106 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002107 struct vmw_resource *res = NULL;
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002108 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002109 struct vmw_ctx_bindinfo_cb binding;
2110 int ret;
2111
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002112 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002113 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002114
2115 cmd = container_of(header, typeof(*cmd), header);
2116 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002117 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002118 &cmd->body.sid, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002119 if (unlikely(ret != 0))
2120 return ret;
2121
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002122 binding.bi.ctx = ctx_node->ctx;
2123 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002124 binding.bi.bt = vmw_ctx_binding_cb;
2125 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2126 binding.offset = cmd->body.offsetInBytes;
2127 binding.size = cmd->body.sizeInBytes;
2128 binding.slot = cmd->body.slot;
2129
2130 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2131 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002132 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2133 (unsigned int) cmd->body.type,
2134 (unsigned int) binding.slot);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002135 return -EINVAL;
2136 }
2137
Deepak Rawat680360a2019-02-13 13:20:42 -08002138 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2139 binding.slot);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002140
2141 return 0;
2142}
2143
2144/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002145 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2146 * command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002147 *
2148 * @dev_priv: Pointer to a device private struct.
2149 * @sw_context: The software context being used for this batch.
2150 * @header: Pointer to the command header in the command stream.
2151 */
2152static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2153 struct vmw_sw_context *sw_context,
2154 SVGA3dCmdHeader *header)
2155{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002156 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2157 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002158 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2159 sizeof(SVGA3dShaderResourceViewId);
2160
2161 if ((u64) cmd->body.startView + (u64) num_sr_view >
2162 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2163 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002164 VMW_DEBUG_USER("Invalid shader binding.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002165 return -EINVAL;
2166 }
2167
2168 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2169 vmw_ctx_binding_sr,
2170 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2171 (void *) &cmd[1], num_sr_view,
2172 cmd->body.startView);
2173}
2174
2175/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002176 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002177 *
2178 * @dev_priv: Pointer to a device private struct.
2179 * @sw_context: The software context being used for this batch.
2180 * @header: Pointer to the command header in the command stream.
2181 */
2182static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2183 struct vmw_sw_context *sw_context,
2184 SVGA3dCmdHeader *header)
2185{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002186 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002187 struct vmw_resource *res = NULL;
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002188 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002189 struct vmw_ctx_bindinfo_shader binding;
2190 int ret = 0;
2191
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002192 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002193 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002194
2195 cmd = container_of(header, typeof(*cmd), header);
2196
2197 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002198 VMW_DEBUG_USER("Illegal shader type %u.\n",
2199 (unsigned int) cmd->body.type);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002200 return -EINVAL;
2201 }
2202
2203 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2204 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2205 if (IS_ERR(res)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002206 VMW_DEBUG_USER("Could not find shader for binding.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002207 return PTR_ERR(res);
2208 }
2209
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002210 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2211 VMW_RES_DIRTY_NONE);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002212 if (ret)
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002213 return ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002214 }
2215
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002216 binding.bi.ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002217 binding.bi.res = res;
2218 binding.bi.bt = vmw_ctx_binding_dx_shader;
2219 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2220
Deepak Rawat680360a2019-02-13 13:20:42 -08002221 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002222
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002223 return 0;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002224}
2225
2226/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002227 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2228 * command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002229 *
2230 * @dev_priv: Pointer to a device private struct.
2231 * @sw_context: The software context being used for this batch.
2232 * @header: Pointer to the command header in the command stream.
2233 */
2234static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2235 struct vmw_sw_context *sw_context,
2236 SVGA3dCmdHeader *header)
2237{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002238 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002239 struct vmw_ctx_bindinfo_vb binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002240 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002241 struct {
2242 SVGA3dCmdHeader header;
2243 SVGA3dCmdDXSetVertexBuffers body;
2244 SVGA3dVertexBuffer buf[];
2245 } *cmd;
2246 int i, ret, num;
2247
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002248 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002249 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002250
2251 cmd = container_of(header, typeof(*cmd), header);
2252 num = (cmd->header.size - sizeof(cmd->body)) /
2253 sizeof(SVGA3dVertexBuffer);
2254 if ((u64)num + (u64)cmd->body.startBuffer >
2255 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002256 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002257 return -EINVAL;
2258 }
2259
2260 for (i = 0; i < num; i++) {
2261 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002262 VMW_RES_DIRTY_NONE,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002263 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002264 &cmd->buf[i].sid, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002265 if (unlikely(ret != 0))
2266 return ret;
2267
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002268 binding.bi.ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002269 binding.bi.bt = vmw_ctx_binding_vb;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002270 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002271 binding.offset = cmd->buf[i].offset;
2272 binding.stride = cmd->buf[i].stride;
2273 binding.slot = i + cmd->body.startBuffer;
2274
Deepak Rawat680360a2019-02-13 13:20:42 -08002275 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002276 }
2277
2278 return 0;
2279}
2280
2281/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002282 * vmw_cmd_dx_ia_set_vertex_buffers - Validate
Brian Paul8bd62872017-07-17 07:36:10 -07002283 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002284 *
2285 * @dev_priv: Pointer to a device private struct.
2286 * @sw_context: The software context being used for this batch.
2287 * @header: Pointer to the command header in the command stream.
2288 */
2289static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2290 struct vmw_sw_context *sw_context,
2291 SVGA3dCmdHeader *header)
2292{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002293 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002294 struct vmw_ctx_bindinfo_ib binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002295 struct vmw_resource *res;
Deepak Rawatd01316d2019-02-08 15:50:40 -08002296 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002297 int ret;
2298
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002299 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002300 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002301
2302 cmd = container_of(header, typeof(*cmd), header);
2303 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002304 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002305 &cmd->body.sid, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002306 if (unlikely(ret != 0))
2307 return ret;
2308
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002309 binding.bi.ctx = ctx_node->ctx;
2310 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002311 binding.bi.bt = vmw_ctx_binding_ib;
2312 binding.offset = cmd->body.offset;
2313 binding.format = cmd->body.format;
2314
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002315 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002316
2317 return 0;
2318}
2319
2320/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002321 * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2322 * command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002323 *
2324 * @dev_priv: Pointer to a device private struct.
2325 * @sw_context: The software context being used for this batch.
2326 * @header: Pointer to the command header in the command stream.
2327 */
2328static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2329 struct vmw_sw_context *sw_context,
2330 SVGA3dCmdHeader *header)
2331{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002332 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2333 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002334 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2335 sizeof(SVGA3dRenderTargetViewId);
Deepak Rawatd01316d2019-02-08 15:50:40 -08002336 int ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002337
2338 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002339 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002340 return -EINVAL;
2341 }
2342
Deepak Rawat680360a2019-02-13 13:20:42 -08002343 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2344 0, &cmd->body.depthStencilViewId, 1, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002345 if (ret)
2346 return ret;
2347
2348 return vmw_view_bindings_add(sw_context, vmw_view_rt,
Deepak Rawat680360a2019-02-13 13:20:42 -08002349 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2350 num_rt_view, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002351}
2352
2353/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002354 * vmw_cmd_dx_clear_rendertarget_view - Validate
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002355 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2356 *
2357 * @dev_priv: Pointer to a device private struct.
2358 * @sw_context: The software context being used for this batch.
2359 * @header: Pointer to the command header in the command stream.
2360 */
2361static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2362 struct vmw_sw_context *sw_context,
2363 SVGA3dCmdHeader *header)
2364{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002365 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2366 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002367
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002368 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
2369 cmd->body.renderTargetViewId));
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002370}
2371
2372/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002373 * vmw_cmd_dx_clear_rendertarget_view - Validate
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002374 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2375 *
2376 * @dev_priv: Pointer to a device private struct.
2377 * @sw_context: The software context being used for this batch.
2378 * @header: Pointer to the command header in the command stream.
2379 */
2380static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2381 struct vmw_sw_context *sw_context,
2382 SVGA3dCmdHeader *header)
2383{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002384 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2385 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002386
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002387 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
2388 cmd->body.depthStencilViewId));
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002389}
2390
2391static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2392 struct vmw_sw_context *sw_context,
2393 SVGA3dCmdHeader *header)
2394{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002395 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002396 struct vmw_resource *srf;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002397 struct vmw_resource *res;
2398 enum vmw_view_type view_type;
2399 int ret;
2400 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08002401 * This is based on the fact that all affected define commands have the
2402 * same initial command body layout.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002403 */
2404 struct {
2405 SVGA3dCmdHeader header;
2406 uint32 defined_id;
2407 uint32 sid;
2408 } *cmd;
2409
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002410 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002411 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002412
2413 view_type = vmw_view_cmd_to_type(header->id);
Dan Carpenter0d9cac02018-01-10 12:40:04 +03002414 if (view_type == vmw_view_max)
2415 return -EINVAL;
Deepak Rawat680360a2019-02-13 13:20:42 -08002416
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002417 cmd = container_of(header, typeof(*cmd), header);
2418 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002419 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002420 &cmd->sid, &srf);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002421 if (unlikely(ret != 0))
2422 return ret;
2423
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002424 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002425 ret = vmw_cotable_notify(res, cmd->defined_id);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002426 if (unlikely(ret != 0))
2427 return ret;
2428
Deepak Rawat680360a2019-02-13 13:20:42 -08002429 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2430 cmd->defined_id, header,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002431 header->size + sizeof(*header),
2432 &sw_context->staged_cmd_res);
2433}
2434
Charmaine Lee2f633e52015-08-10 10:45:11 -07002435/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002436 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
Charmaine Lee2f633e52015-08-10 10:45:11 -07002437 *
2438 * @dev_priv: Pointer to a device private struct.
2439 * @sw_context: The software context being used for this batch.
2440 * @header: Pointer to the command header in the command stream.
2441 */
2442static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2443 struct vmw_sw_context *sw_context,
2444 SVGA3dCmdHeader *header)
2445{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002446 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Charmaine Lee2f633e52015-08-10 10:45:11 -07002447 struct vmw_ctx_bindinfo_so binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002448 struct vmw_resource *res;
Charmaine Lee2f633e52015-08-10 10:45:11 -07002449 struct {
2450 SVGA3dCmdHeader header;
2451 SVGA3dCmdDXSetSOTargets body;
2452 SVGA3dSoTarget targets[];
2453 } *cmd;
2454 int i, ret, num;
2455
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002456 if (!ctx_node)
Charmaine Lee2f633e52015-08-10 10:45:11 -07002457 return -EINVAL;
Charmaine Lee2f633e52015-08-10 10:45:11 -07002458
2459 cmd = container_of(header, typeof(*cmd), header);
Deepak Rawat680360a2019-02-13 13:20:42 -08002460 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
Charmaine Lee2f633e52015-08-10 10:45:11 -07002461
2462 if (num > SVGA3D_DX_MAX_SOTARGETS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002463 VMW_DEBUG_USER("Invalid DX SO binding.\n");
Charmaine Lee2f633e52015-08-10 10:45:11 -07002464 return -EINVAL;
2465 }
2466
2467 for (i = 0; i < num; i++) {
2468 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002469 VMW_RES_DIRTY_SET,
Charmaine Lee2f633e52015-08-10 10:45:11 -07002470 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002471 &cmd->targets[i].sid, &res);
Charmaine Lee2f633e52015-08-10 10:45:11 -07002472 if (unlikely(ret != 0))
2473 return ret;
2474
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002475 binding.bi.ctx = ctx_node->ctx;
2476 binding.bi.res = res;
Charmaine Lee2f633e52015-08-10 10:45:11 -07002477 binding.bi.bt = vmw_ctx_binding_so,
2478 binding.offset = cmd->targets[i].offset;
2479 binding.size = cmd->targets[i].sizeInBytes;
2480 binding.slot = i;
2481
Deepak Rawat680360a2019-02-13 13:20:42 -08002482 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
Charmaine Lee2f633e52015-08-10 10:45:11 -07002483 }
2484
2485 return 0;
2486}
2487
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002488static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2489 struct vmw_sw_context *sw_context,
2490 SVGA3dCmdHeader *header)
2491{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002492 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002493 struct vmw_resource *res;
2494 /*
2495 * This is based on the fact that all affected define commands have
2496 * the same initial command body layout.
2497 */
2498 struct {
2499 SVGA3dCmdHeader header;
2500 uint32 defined_id;
2501 } *cmd;
2502 enum vmw_so_type so_type;
2503 int ret;
2504
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002505 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002506 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002507
2508 so_type = vmw_so_cmd_to_type(header->id);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002509 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002510 cmd = container_of(header, typeof(*cmd), header);
2511 ret = vmw_cotable_notify(res, cmd->defined_id);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002512
2513 return ret;
2514}
2515
2516/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002517 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2518 * command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002519 *
2520 * @dev_priv: Pointer to a device private struct.
2521 * @sw_context: The software context being used for this batch.
2522 * @header: Pointer to the command header in the command stream.
2523 */
2524static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2525 struct vmw_sw_context *sw_context,
2526 SVGA3dCmdHeader *header)
2527{
2528 struct {
2529 SVGA3dCmdHeader header;
2530 union {
2531 SVGA3dCmdDXReadbackSubResource r_body;
2532 SVGA3dCmdDXInvalidateSubResource i_body;
2533 SVGA3dCmdDXUpdateSubResource u_body;
2534 SVGA3dSurfaceId sid;
2535 };
2536 } *cmd;
2537
2538 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2539 offsetof(typeof(*cmd), sid));
2540 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2541 offsetof(typeof(*cmd), sid));
2542 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2543 offsetof(typeof(*cmd), sid));
2544
2545 cmd = container_of(header, typeof(*cmd), header);
2546
2547 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002548 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002549 &cmd->sid, NULL);
2550}
2551
2552static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2553 struct vmw_sw_context *sw_context,
2554 SVGA3dCmdHeader *header)
2555{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002556 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002557
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002558 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002559 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002560
2561 return 0;
2562}
2563
2564/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002565 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2566 * resource for removal.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002567 *
2568 * @dev_priv: Pointer to a device private struct.
2569 * @sw_context: The software context being used for this batch.
2570 * @header: Pointer to the command header in the command stream.
2571 *
Deepak Rawat680360a2019-02-13 13:20:42 -08002572 * Check that the view exists, and if it was not created using this command
2573 * batch, conditionally make this command a NOP.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002574 */
2575static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2576 struct vmw_sw_context *sw_context,
2577 SVGA3dCmdHeader *header)
2578{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002579 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002580 struct {
2581 SVGA3dCmdHeader header;
2582 union vmw_view_destroy body;
2583 } *cmd = container_of(header, typeof(*cmd), header);
2584 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2585 struct vmw_resource *view;
2586 int ret;
2587
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002588 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002589 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002590
Deepak Rawat680360a2019-02-13 13:20:42 -08002591 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2592 &sw_context->staged_cmd_res, &view);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002593 if (ret || !view)
2594 return ret;
2595
2596 /*
Thomas Hellstroma1944032016-10-10 11:06:45 -07002597 * If the view wasn't created during this command batch, it might
2598 * have been removed due to a context swapout, so add a
2599 * relocation to conditionally make this command a NOP to avoid
2600 * device errors.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002601 */
Deepak Rawat680360a2019-02-13 13:20:42 -08002602 return vmw_resource_relocation_add(sw_context, view,
Thomas Hellstroma1944032016-10-10 11:06:45 -07002603 vmw_ptr_diff(sw_context->buf_start,
2604 &cmd->header.id),
2605 vmw_res_rel_cond_nop);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002606}
2607
2608/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002609 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002610 *
2611 * @dev_priv: Pointer to a device private struct.
2612 * @sw_context: The software context being used for this batch.
2613 * @header: Pointer to the command header in the command stream.
2614 */
2615static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2616 struct vmw_sw_context *sw_context,
2617 SVGA3dCmdHeader *header)
2618{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002619 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002620 struct vmw_resource *res;
Deepak Rawatd01316d2019-02-08 15:50:40 -08002621 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2622 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002623 int ret;
2624
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002625 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002626 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002627
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002628 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002629 ret = vmw_cotable_notify(res, cmd->body.shaderId);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002630 if (ret)
2631 return ret;
2632
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002633 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002634 cmd->body.shaderId, cmd->body.type,
2635 &sw_context->staged_cmd_res);
2636}
2637
2638/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002639 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002640 *
2641 * @dev_priv: Pointer to a device private struct.
2642 * @sw_context: The software context being used for this batch.
2643 * @header: Pointer to the command header in the command stream.
2644 */
2645static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2646 struct vmw_sw_context *sw_context,
2647 SVGA3dCmdHeader *header)
2648{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002649 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Deepak Rawatd01316d2019-02-08 15:50:40 -08002650 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2651 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002652 int ret;
2653
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002654 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002655 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002656
2657 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2658 &sw_context->staged_cmd_res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002659
2660 return ret;
2661}
2662
2663/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002664 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002665 *
2666 * @dev_priv: Pointer to a device private struct.
2667 * @sw_context: The software context being used for this batch.
2668 * @header: Pointer to the command header in the command stream.
2669 */
2670static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2671 struct vmw_sw_context *sw_context,
2672 SVGA3dCmdHeader *header)
2673{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002674 struct vmw_resource *ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002675 struct vmw_resource *res;
Deepak Rawatd01316d2019-02-08 15:50:40 -08002676 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2677 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002678 int ret;
2679
2680 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2681 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002682 VMW_RES_DIRTY_SET,
2683 user_context_converter, &cmd->body.cid,
2684 &ctx);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002685 if (ret)
2686 return ret;
2687 } else {
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002688 struct vmw_ctx_validation_info *ctx_node =
2689 VMW_GET_CTX_NODE(sw_context);
2690
2691 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002692 return -EINVAL;
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002693
2694 ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002695 }
2696
Deepak Rawat680360a2019-02-13 13:20:42 -08002697 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002698 if (IS_ERR(res)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002699 VMW_DEBUG_USER("Could not find shader to bind.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002700 return PTR_ERR(res);
2701 }
2702
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002703 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2704 VMW_RES_DIRTY_NONE);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002705 if (ret) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002706 VMW_DEBUG_USER("Error creating resource validation node.\n");
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002707 return ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002708 }
2709
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002710 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2711 &cmd->body.mobid,
2712 cmd->body.offsetInBytes);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002713}
2714
Charmaine Leef3b335502016-02-12 08:11:56 +01002715/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002716 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
Charmaine Leef3b335502016-02-12 08:11:56 +01002717 *
2718 * @dev_priv: Pointer to a device private struct.
2719 * @sw_context: The software context being used for this batch.
2720 * @header: Pointer to the command header in the command stream.
2721 */
2722static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2723 struct vmw_sw_context *sw_context,
2724 SVGA3dCmdHeader *header)
2725{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002726 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2727 container_of(header, typeof(*cmd), header);
Charmaine Leef3b335502016-02-12 08:11:56 +01002728
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002729 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
2730 cmd->body.shaderResourceViewId));
Charmaine Leef3b335502016-02-12 08:11:56 +01002731}
2732
Charmaine Lee1f982e42016-10-10 10:37:03 -07002733/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002734 * vmw_cmd_dx_transfer_from_buffer - Validate
2735 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
Charmaine Lee1f982e42016-10-10 10:37:03 -07002736 *
2737 * @dev_priv: Pointer to a device private struct.
2738 * @sw_context: The software context being used for this batch.
2739 * @header: Pointer to the command header in the command stream.
2740 */
2741static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2742 struct vmw_sw_context *sw_context,
2743 SVGA3dCmdHeader *header)
2744{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002745 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2746 container_of(header, typeof(*cmd), header);
Charmaine Lee1f982e42016-10-10 10:37:03 -07002747 int ret;
2748
2749 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002750 VMW_RES_DIRTY_NONE, user_surface_converter,
Charmaine Lee1f982e42016-10-10 10:37:03 -07002751 &cmd->body.srcSid, NULL);
2752 if (ret != 0)
2753 return ret;
2754
2755 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002756 VMW_RES_DIRTY_SET, user_surface_converter,
Charmaine Lee1f982e42016-10-10 10:37:03 -07002757 &cmd->body.destSid, NULL);
2758}
2759
Neha Bhende0d81d342018-06-18 17:14:56 -07002760/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002761 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
Neha Bhende0d81d342018-06-18 17:14:56 -07002762 *
2763 * @dev_priv: Pointer to a device private struct.
2764 * @sw_context: The software context being used for this batch.
2765 * @header: Pointer to the command header in the command stream.
2766 */
2767static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2768 struct vmw_sw_context *sw_context,
2769 SVGA3dCmdHeader *header)
2770{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002771 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2772 container_of(header, typeof(*cmd), header);
Neha Bhende0d81d342018-06-18 17:14:56 -07002773
2774 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2775 return -EINVAL;
2776
2777 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002778 VMW_RES_DIRTY_SET, user_surface_converter,
2779 &cmd->body.surface.sid, NULL);
Neha Bhende0d81d342018-06-18 17:14:56 -07002780}
2781
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02002782static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
2783 struct vmw_sw_context *sw_context,
2784 void *buf, uint32_t *size)
2785{
2786 uint32_t size_remaining = *size;
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02002787 uint32_t cmd_id;
2788
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07002789 cmd_id = ((uint32_t *)buf)[0];
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02002790 switch (cmd_id) {
2791 case SVGA_CMD_UPDATE:
2792 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02002793 break;
2794 case SVGA_CMD_DEFINE_GMRFB:
2795 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
2796 break;
2797 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
2798 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2799 break;
2800 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
2801 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2802 break;
2803 default:
Deepak Rawat5724f892019-02-11 11:46:27 -08002804 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02002805 return -EINVAL;
2806 }
2807
2808 if (*size > size_remaining) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002809 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
2810 cmd_id);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02002811 return -EINVAL;
2812 }
2813
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02002814 if (unlikely(!sw_context->kernel)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002815 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02002816 return -EPERM;
2817 }
2818
2819 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
2820 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
2821
2822 return 0;
2823}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002824
Thomas Hellstrom4fbd9d22014-02-12 12:37:01 +01002825static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002826 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
2827 false, false, false),
2828 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
2829 false, false, false),
2830 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
2831 true, false, false),
2832 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
2833 true, false, false),
2834 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
2835 true, false, false),
2836 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
2837 false, false, false),
2838 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
2839 false, false, false),
2840 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
2841 true, false, false),
2842 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
2843 true, false, false),
2844 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
2845 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002846 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002847 &vmw_cmd_set_render_target_check, true, false, false),
2848 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
2849 true, false, false),
2850 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
2851 true, false, false),
2852 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
2853 true, false, false),
2854 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
2855 true, false, false),
2856 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
2857 true, false, false),
2858 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
2859 true, false, false),
2860 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
2861 true, false, false),
2862 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
2863 false, false, false),
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002864 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
2865 true, false, false),
2866 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
2867 true, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002868 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
2869 true, false, false),
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002870 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
2871 true, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002872 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
2873 true, false, false),
2874 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
2875 true, false, false),
2876 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
2877 true, false, false),
2878 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
2879 true, false, false),
2880 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
2881 true, false, false),
2882 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
2883 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002884 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002885 &vmw_cmd_blt_surf_screen_check, false, false, false),
2886 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
2887 false, false, false),
2888 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
2889 false, false, false),
2890 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
2891 false, false, false),
2892 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
2893 false, false, false),
2894 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
2895 false, false, false),
Deepak Rawatdc75e732018-06-13 13:53:28 -07002896 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002897 false, false, false),
Deepak Rawatdc75e732018-06-13 13:53:28 -07002898 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002899 false, false, false),
2900 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
2901 false, false, false),
2902 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
2903 false, false, false),
2904 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
2905 false, false, false),
2906 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
2907 false, false, false),
2908 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
2909 false, false, false),
2910 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
2911 false, false, false),
2912 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
2913 false, false, true),
2914 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
2915 false, false, true),
2916 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
2917 false, false, true),
2918 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
2919 false, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07002920 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
2921 false, false, true),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002922 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
2923 false, false, true),
2924 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
2925 false, false, true),
2926 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
2927 false, false, true),
2928 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
2929 true, false, true),
2930 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
2931 false, false, true),
2932 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
2933 true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01002934 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002935 &vmw_cmd_update_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01002936 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002937 &vmw_cmd_readback_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01002938 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002939 &vmw_cmd_readback_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01002940 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002941 &vmw_cmd_invalidate_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01002942 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002943 &vmw_cmd_invalidate_gb_surface, true, false, true),
2944 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2945 false, false, true),
2946 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2947 false, false, true),
2948 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2949 false, false, true),
2950 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2951 false, false, true),
2952 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2953 false, false, true),
2954 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2955 false, false, true),
2956 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2957 true, false, true),
2958 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2959 false, false, true),
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +01002960 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
Thomas Hellstrom8ba07312013-10-08 02:25:35 -07002961 false, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002962 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2963 true, false, true),
2964 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2965 true, false, true),
2966 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2967 true, false, true),
2968 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2969 true, false, true),
Thomas Hellstrom5f55be5f2017-08-24 08:06:30 +02002970 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
2971 true, false, true),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002972 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2973 false, false, true),
2974 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2975 false, false, true),
2976 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2977 false, false, true),
2978 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2979 false, false, true),
2980 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2981 false, false, true),
2982 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
2983 false, false, true),
2984 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
2985 false, false, true),
2986 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
2987 false, false, true),
2988 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2989 false, false, true),
2990 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2991 false, false, true),
2992 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002993 true, false, true),
2994 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
2995 false, false, true),
2996 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
2997 false, false, true),
2998 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
2999 false, false, true),
3000 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3001 false, false, true),
3002
Deepak Rawat680360a2019-02-13 13:20:42 -08003003 /* SM commands */
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003004 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3005 false, false, true),
3006 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3007 false, false, true),
3008 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3009 false, false, true),
3010 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3011 false, false, true),
3012 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3013 false, false, true),
3014 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3015 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3016 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3017 &vmw_cmd_dx_set_shader_res, true, false, true),
3018 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3019 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003020 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003021 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003022 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003023 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003024 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3025 true, false, true),
3026 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3027 true, false, true),
3028 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3029 &vmw_cmd_dx_cid_check, true, false, true),
3030 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003031 true, false, true),
3032 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3033 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3034 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3035 &vmw_cmd_dx_set_index_buffer, true, false, true),
3036 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3037 &vmw_cmd_dx_set_rendertargets, true, false, true),
3038 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3039 true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003040 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
Charmaine Lee2f633e52015-08-10 10:45:11 -07003041 &vmw_cmd_dx_cid_check, true, false, true),
3042 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3043 &vmw_cmd_dx_cid_check, true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003044 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003045 true, false, true),
Charmaine Leee02e5882016-04-12 08:19:08 -07003046 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003047 true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003048 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003049 true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003050 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
Charmaine Leee02e5882016-04-12 08:19:08 -07003051 &vmw_cmd_dx_cid_check, true, false, true),
3052 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003053 true, false, true),
Charmaine Leee02e5882016-04-12 08:19:08 -07003054 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003055 true, false, true),
3056 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3057 true, false, true),
Charmaine Lee18835982016-04-12 08:14:23 -07003058 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003059 true, false, true),
3060 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3061 true, false, true),
3062 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3063 true, false, true),
3064 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3065 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3066 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3067 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003068 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3069 true, false, true),
Charmaine Leef3b335502016-02-12 08:11:56 +01003070 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003071 true, false, true),
3072 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3073 &vmw_cmd_dx_check_subresource, true, false, true),
3074 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3075 &vmw_cmd_dx_check_subresource, true, false, true),
3076 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3077 &vmw_cmd_dx_check_subresource, true, false, true),
3078 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3079 &vmw_cmd_dx_view_define, true, false, true),
3080 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3081 &vmw_cmd_dx_view_remove, true, false, true),
3082 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3083 &vmw_cmd_dx_view_define, true, false, true),
3084 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3085 &vmw_cmd_dx_view_remove, true, false, true),
3086 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3087 &vmw_cmd_dx_view_define, true, false, true),
3088 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3089 &vmw_cmd_dx_view_remove, true, false, true),
3090 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3091 &vmw_cmd_dx_so_define, true, false, true),
3092 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3093 &vmw_cmd_dx_cid_check, true, false, true),
3094 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3095 &vmw_cmd_dx_so_define, true, false, true),
3096 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3097 &vmw_cmd_dx_cid_check, true, false, true),
3098 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3099 &vmw_cmd_dx_so_define, true, false, true),
3100 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3101 &vmw_cmd_dx_cid_check, true, false, true),
3102 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3103 &vmw_cmd_dx_so_define, true, false, true),
3104 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3105 &vmw_cmd_dx_cid_check, true, false, true),
3106 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3107 &vmw_cmd_dx_so_define, true, false, true),
3108 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3109 &vmw_cmd_dx_cid_check, true, false, true),
3110 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3111 &vmw_cmd_dx_define_shader, true, false, true),
3112 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3113 &vmw_cmd_dx_destroy_shader, true, false, true),
3114 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3115 &vmw_cmd_dx_bind_shader, true, false, true),
3116 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3117 &vmw_cmd_dx_so_define, true, false, true),
3118 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3119 &vmw_cmd_dx_cid_check, true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003120 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003121 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003122 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3123 &vmw_cmd_dx_set_so_targets, true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003124 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3125 &vmw_cmd_dx_cid_check, true, false, true),
3126 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3127 &vmw_cmd_dx_cid_check, true, false, true),
Neha Bhende0fca749e2015-08-10 10:51:07 -07003128 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3129 &vmw_cmd_buffer_copy_check, true, false, true),
3130 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3131 &vmw_cmd_pred_copy_check, true, false, true),
Charmaine Lee1f982e42016-10-10 10:37:03 -07003132 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3133 &vmw_cmd_dx_transfer_from_buffer,
3134 true, false, true),
Neha Bhende0d81d342018-06-18 17:14:56 -07003135 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3136 true, false, true),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003137};
3138
Thomas Hellstrom65b97a22017-08-24 08:06:29 +02003139bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3140{
3141 u32 cmd_id = ((u32 *) buf)[0];
3142
3143 if (cmd_id >= SVGA_CMD_MAX) {
3144 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3145 const struct vmw_cmd_entry *entry;
3146
3147 *size = header->size + sizeof(SVGA3dCmdHeader);
3148 cmd_id = header->id;
3149 if (cmd_id >= SVGA_3D_CMD_MAX)
3150 return false;
3151
3152 cmd_id -= SVGA_3D_CMD_BASE;
3153 entry = &vmw_cmd_entries[cmd_id];
3154 *cmd = entry->cmd_name;
3155 return true;
3156 }
3157
3158 switch (cmd_id) {
3159 case SVGA_CMD_UPDATE:
3160 *cmd = "SVGA_CMD_UPDATE";
3161 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3162 break;
3163 case SVGA_CMD_DEFINE_GMRFB:
3164 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3165 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3166 break;
3167 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3168 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3169 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3170 break;
3171 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3172 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3173 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3174 break;
3175 default:
3176 *cmd = "UNKNOWN";
3177 *size = 0;
3178 return false;
3179 }
3180
3181 return true;
3182}
3183
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003184static int vmw_cmd_check(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003185 struct vmw_sw_context *sw_context, void *buf,
3186 uint32_t *size)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003187{
3188 uint32_t cmd_id;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003189 uint32_t size_remaining = *size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003190 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3191 int ret;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003192 const struct vmw_cmd_entry *entry;
3193 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003194
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003195 cmd_id = ((uint32_t *)buf)[0];
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003196 /* Handle any none 3D commands */
3197 if (unlikely(cmd_id < SVGA_CMD_MAX))
3198 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3199
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003200
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003201 cmd_id = header->id;
3202 *size = header->size + sizeof(SVGA3dCmdHeader);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003203
3204 cmd_id -= SVGA_3D_CMD_BASE;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003205 if (unlikely(*size > size_remaining))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003206 goto out_invalid;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003207
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003208 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003209 goto out_invalid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003210
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003211 entry = &vmw_cmd_entries[cmd_id];
Thomas Hellstrom36e952c2014-02-12 13:19:36 +01003212 if (unlikely(!entry->func))
3213 goto out_invalid;
3214
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003215 if (unlikely(!entry->user_allow && !sw_context->kernel))
3216 goto out_privileged;
3217
3218 if (unlikely(entry->gb_disable && gb))
3219 goto out_old;
3220
3221 if (unlikely(entry->gb_enable && !gb))
3222 goto out_new;
3223
3224 ret = entry->func(dev_priv, sw_context, header);
Deepak Rawat45399b12019-02-11 12:57:38 -08003225 if (unlikely(ret != 0)) {
3226 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3227 cmd_id + SVGA_3D_CMD_BASE, ret);
3228 return ret;
3229 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003230
3231 return 0;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003232out_invalid:
Deepak Rawat5724f892019-02-11 11:46:27 -08003233 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3234 cmd_id + SVGA_3D_CMD_BASE);
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003235 return -EINVAL;
3236out_privileged:
Deepak Rawat5724f892019-02-11 11:46:27 -08003237 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3238 cmd_id + SVGA_3D_CMD_BASE);
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003239 return -EPERM;
3240out_old:
Deepak Rawat5724f892019-02-11 11:46:27 -08003241 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3242 cmd_id + SVGA_3D_CMD_BASE);
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003243 return -EINVAL;
3244out_new:
Deepak Rawat5724f892019-02-11 11:46:27 -08003245 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3246 cmd_id + SVGA_3D_CMD_BASE);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003247 return -EINVAL;
3248}
3249
3250static int vmw_cmd_check_all(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003251 struct vmw_sw_context *sw_context, void *buf,
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003252 uint32_t size)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003253{
3254 int32_t cur_size = size;
3255 int ret;
3256
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003257 sw_context->buf_start = buf;
3258
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003259 while (cur_size > 0) {
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003260 size = cur_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003261 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3262 if (unlikely(ret != 0))
3263 return ret;
3264 buf = (void *)((unsigned long) buf + size);
3265 cur_size -= size;
3266 }
3267
3268 if (unlikely(cur_size != 0)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003269 VMW_DEBUG_USER("Command verifier out of sync.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003270 return -EINVAL;
3271 }
3272
3273 return 0;
3274}
3275
3276static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3277{
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02003278 /* Memory is validation context memory, so no need to free it */
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02003279 INIT_LIST_HEAD(&sw_context->bo_relocations);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003280}
3281
3282static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3283{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003284 struct vmw_relocation *reloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003285 struct ttm_buffer_object *bo;
3286
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02003287 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003288 bo = &reloc->vbo->base;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003289 switch (bo->mem.mem_type) {
3290 case TTM_PL_VRAM:
Thomas Hellstrom135cba02010-10-26 21:21:47 +02003291 reloc->location->offset += bo->offset;
3292 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003293 break;
3294 case VMW_PL_GMR:
Thomas Hellstrom135cba02010-10-26 21:21:47 +02003295 reloc->location->gmrId = bo->mem.start;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003296 break;
Thomas Hellstromddcda242012-11-21 11:26:55 +01003297 case VMW_PL_MOB:
3298 *reloc->mob_loc = bo->mem.start;
3299 break;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003300 default:
3301 BUG();
3302 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003303 }
3304 vmw_free_relocations(sw_context);
3305}
3306
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003307static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3308 uint32_t size)
3309{
3310 if (likely(sw_context->cmd_bounce_size >= size))
3311 return 0;
3312
3313 if (sw_context->cmd_bounce_size == 0)
3314 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3315
3316 while (sw_context->cmd_bounce_size < size) {
3317 sw_context->cmd_bounce_size =
3318 PAGE_ALIGN(sw_context->cmd_bounce_size +
3319 (sw_context->cmd_bounce_size >> 1));
3320 }
3321
Markus Elfring0bc32992016-07-22 13:31:00 +02003322 vfree(sw_context->cmd_bounce);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003323 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3324
3325 if (sw_context->cmd_bounce == NULL) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003326 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003327 sw_context->cmd_bounce_size = 0;
3328 return -ENOMEM;
3329 }
3330
3331 return 0;
3332}
3333
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003334/**
3335 * vmw_execbuf_fence_commands - create and submit a command stream fence
3336 *
3337 * Creates a fence object and submits a command stream marker.
3338 * If this fails for some reason, We sync the fifo and return NULL.
3339 * It is then safe to fence buffers with a NULL pointer.
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02003340 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003341 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3342 * userspace handle if @p_handle is not NULL, otherwise not.
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003343 */
3344
3345int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3346 struct vmw_private *dev_priv,
3347 struct vmw_fence_obj **p_fence,
3348 uint32_t *p_handle)
3349{
3350 uint32_t sequence;
3351 int ret;
3352 bool synced = false;
3353
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02003354 /* p_handle implies file_priv. */
3355 BUG_ON(p_handle != NULL && file_priv == NULL);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003356
3357 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3358 if (unlikely(ret != 0)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003359 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003360 synced = true;
3361 }
3362
3363 if (p_handle != NULL)
3364 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003365 sequence, p_fence, p_handle);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003366 else
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003367 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003368
3369 if (unlikely(ret != 0 && !synced)) {
Deepak Rawat680360a2019-02-13 13:20:42 -08003370 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3371 false, VMW_FENCE_WAIT_TIMEOUT);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003372 *p_fence = NULL;
3373 }
3374
Thomas Hellstrom728354c2019-01-31 10:55:37 +01003375 return ret;
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003376}
3377
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003378/**
Deepak Rawat680360a2019-02-13 13:20:42 -08003379 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003380 *
3381 * @dev_priv: Pointer to a vmw_private struct.
3382 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3383 * @ret: Return value from fence object creation.
Deepak Rawat680360a2019-02-13 13:20:42 -08003384 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3385 * the information should be copied.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003386 * @fence: Pointer to the fenc object.
3387 * @fence_handle: User-space fence handle.
Sinclair Yehc906965d2017-07-05 01:49:32 -07003388 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3389 * @sync_file: Only used to clean up in case of an error in this function.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003390 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003391 * This function copies fence information to user-space. If copying fails, the
3392 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3393 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3394 * will hopefully be detected.
3395 *
3396 * Also if copying fails, user-space will be unable to signal the fence object
3397 * so we wait for it immediately, and then unreference the user-space reference.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003398 */
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +02003399void
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003400vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003401 struct vmw_fpriv *vmw_fp, int ret,
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003402 struct drm_vmw_fence_rep __user *user_fence_rep,
Deepak Rawat680360a2019-02-13 13:20:42 -08003403 struct vmw_fence_obj *fence, uint32_t fence_handle,
3404 int32_t out_fence_fd, struct sync_file *sync_file)
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003405{
3406 struct drm_vmw_fence_rep fence_rep;
3407
3408 if (user_fence_rep == NULL)
3409 return;
3410
Dan Carpenter80d9b242011-10-18 09:10:12 +03003411 memset(&fence_rep, 0, sizeof(fence_rep));
3412
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003413 fence_rep.error = ret;
Sinclair Yehc906965d2017-07-05 01:49:32 -07003414 fence_rep.fd = out_fence_fd;
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003415 if (ret == 0) {
3416 BUG_ON(fence == NULL);
3417
3418 fence_rep.handle = fence_handle;
Maarten Lankhorst2298e802014-03-26 14:07:44 +01003419 fence_rep.seqno = fence->base.seqno;
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003420 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3421 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3422 }
3423
3424 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08003425 * copy_to_user errors will be detected by user space not seeing
3426 * fence_rep::error filled in. Typically user-space would have pre-set
3427 * that member to -EFAULT.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003428 */
3429 ret = copy_to_user(user_fence_rep, &fence_rep,
3430 sizeof(fence_rep));
3431
3432 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08003433 * User-space lost the fence object. We need to sync and unreference the
3434 * handle.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003435 */
3436 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
Sinclair Yehc906965d2017-07-05 01:49:32 -07003437 if (sync_file)
3438 fput(sync_file->file);
3439
3440 if (fence_rep.fd != -1) {
3441 put_unused_fd(fence_rep.fd);
3442 fence_rep.fd = -1;
3443 }
3444
Deepak Rawat680360a2019-02-13 13:20:42 -08003445 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
3446 TTM_REF_USAGE);
Deepak Rawat5724f892019-02-11 11:46:27 -08003447 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003448 (void) vmw_fence_obj_wait(fence, false, false,
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003449 VMW_FENCE_WAIT_TIMEOUT);
3450 }
3451}
3452
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003453/**
Deepak Rawat680360a2019-02-13 13:20:42 -08003454 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003455 *
3456 * @dev_priv: Pointer to a device private structure.
3457 * @kernel_commands: Pointer to the unpatched command batch.
3458 * @command_size: Size of the unpatched command batch.
3459 * @sw_context: Structure holding the relocation lists.
3460 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003461 * Side effects: If this function returns 0, then the command batch pointed to
3462 * by @kernel_commands will have been modified.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003463 */
3464static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003465 void *kernel_commands, u32 command_size,
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003466 struct vmw_sw_context *sw_context)
3467{
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003468 void *cmd;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003469
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003470 if (sw_context->dx_ctx_node)
3471 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003472 sw_context->dx_ctx_node->ctx->id);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003473 else
3474 cmd = vmw_fifo_reserve(dev_priv, command_size);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003475 if (!cmd) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003476 VMW_DEBUG_USER("Failed reserving fifo space for commands.\n");
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003477 return -ENOMEM;
3478 }
3479
3480 vmw_apply_relocations(sw_context);
3481 memcpy(cmd, kernel_commands, command_size);
3482 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3483 vmw_resource_relocations_free(&sw_context->res_relocations);
3484 vmw_fifo_commit(dev_priv, command_size);
3485
3486 return 0;
3487}
3488
3489/**
Deepak Rawat680360a2019-02-13 13:20:42 -08003490 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3491 * command buffer manager.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003492 *
3493 * @dev_priv: Pointer to a device private structure.
3494 * @header: Opaque handle to the command buffer allocation.
3495 * @command_size: Size of the unpatched command batch.
3496 * @sw_context: Structure holding the relocation lists.
3497 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003498 * Side effects: If this function returns 0, then the command buffer represented
3499 * by @header will have been modified.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003500 */
3501static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3502 struct vmw_cmdbuf_header *header,
3503 u32 command_size,
3504 struct vmw_sw_context *sw_context)
3505{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003506 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003507 SVGA3D_INVALID_ID);
Deepak Rawat680360a2019-02-13 13:20:42 -08003508 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3509 header);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003510
3511 vmw_apply_relocations(sw_context);
3512 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3513 vmw_resource_relocations_free(&sw_context->res_relocations);
3514 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3515
3516 return 0;
3517}
3518
3519/**
3520 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3521 * submission using a command buffer.
3522 *
3523 * @dev_priv: Pointer to a device private structure.
3524 * @user_commands: User-space pointer to the commands to be submitted.
3525 * @command_size: Size of the unpatched command batch.
3526 * @header: Out parameter returning the opaque pointer to the command buffer.
3527 *
3528 * This function checks whether we can use the command buffer manager for
Deepak Rawat680360a2019-02-13 13:20:42 -08003529 * submission and if so, creates a command buffer of suitable size and copies
3530 * the user data into that buffer.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003531 *
3532 * On successful return, the function returns a pointer to the data in the
3533 * command buffer and *@header is set to non-NULL.
Deepak Rawat680360a2019-02-13 13:20:42 -08003534 *
3535 * If command buffers could not be used, the function will return the value of
3536 * @kernel_commands on function call. That value may be NULL. In that case, the
3537 * value of *@header will be set to NULL.
3538 *
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003539 * If an error is encountered, the function will return a pointer error value.
3540 * If the function is interrupted by a signal while sleeping, it will return
3541 * -ERESTARTSYS casted to a pointer error value.
3542 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003543static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3544 void __user *user_commands,
Deepak Rawat680360a2019-02-13 13:20:42 -08003545 void *kernel_commands, u32 command_size,
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003546 struct vmw_cmdbuf_header **header)
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003547{
3548 size_t cmdbuf_size;
3549 int ret;
3550
3551 *header = NULL;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003552 if (command_size > SVGA_CB_MAX_SIZE) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003553 VMW_DEBUG_USER("Command buffer is too large.\n");
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003554 return ERR_PTR(-EINVAL);
3555 }
3556
Thomas Hellstrom51ab70b2016-10-10 10:51:24 -07003557 if (!dev_priv->cman || kernel_commands)
3558 return kernel_commands;
3559
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003560 /* If possible, add a little space for fencing. */
3561 cmdbuf_size = command_size + 512;
3562 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
Deepak Rawat680360a2019-02-13 13:20:42 -08003563 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
3564 header);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003565 if (IS_ERR(kernel_commands))
3566 return kernel_commands;
3567
Deepak Rawat680360a2019-02-13 13:20:42 -08003568 ret = copy_from_user(kernel_commands, user_commands, command_size);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003569 if (ret) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003570 VMW_DEBUG_USER("Failed copying commands.\n");
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003571 vmw_cmdbuf_header_free(*header);
3572 *header = NULL;
3573 return ERR_PTR(-EFAULT);
3574 }
3575
3576 return kernel_commands;
3577}
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003578
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003579static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3580 struct vmw_sw_context *sw_context,
3581 uint32_t handle)
3582{
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003583 struct vmw_resource *res;
3584 int ret;
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02003585 unsigned int size;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003586
3587 if (handle == SVGA3D_INVALID_ID)
3588 return 0;
3589
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02003590 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
3591 ret = vmw_validation_preload_res(sw_context->ctx, size);
3592 if (ret)
3593 return ret;
3594
3595 res = vmw_user_resource_noref_lookup_handle
3596 (dev_priv, sw_context->fp->tfile, handle,
3597 user_context_converter);
Chengguang Xu4efa6662019-03-01 10:14:06 -08003598 if (IS_ERR(res)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003599 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
3600 (unsigned int) handle);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02003601 return PTR_ERR(res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003602 }
3603
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01003604 ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003605 if (unlikely(ret != 0))
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02003606 return ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003607
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003608 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003609 sw_context->man = vmw_context_res_man(res);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02003610
3611 return 0;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003612}
3613
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003614int vmw_execbuf_process(struct drm_file *file_priv,
3615 struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003616 void __user *user_commands, void *kernel_commands,
3617 uint32_t command_size, uint64_t throttle_us,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003618 uint32_t dx_context_handle,
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01003619 struct drm_vmw_fence_rep __user *user_fence_rep,
Deepak Rawat680360a2019-02-13 13:20:42 -08003620 struct vmw_fence_obj **out_fence, uint32_t flags)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003621{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003622 struct vmw_sw_context *sw_context = &dev_priv->ctx;
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01003623 struct vmw_fence_obj *fence = NULL;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003624 struct vmw_cmdbuf_header *header;
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003625 uint32_t handle;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003626 int ret;
Sinclair Yehc906965d2017-07-05 01:49:32 -07003627 int32_t out_fence_fd = -1;
3628 struct sync_file *sync_file = NULL;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003629 DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
Sinclair Yehc906965d2017-07-05 01:49:32 -07003630
Thomas Hellstromfd567462018-12-12 11:52:08 +01003631 vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
3632
Sinclair Yehc906965d2017-07-05 01:49:32 -07003633 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3634 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
3635 if (out_fence_fd < 0) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003636 VMW_DEBUG_USER("Failed to get a fence fd.\n");
Sinclair Yehc906965d2017-07-05 01:49:32 -07003637 return out_fence_fd;
3638 }
3639 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003640
Charmaine Lee2f633e52015-08-10 10:45:11 -07003641 if (throttle_us) {
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003642 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3643 throttle_us);
Charmaine Lee2f633e52015-08-10 10:45:11 -07003644
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003645 if (ret)
Sinclair Yehc906965d2017-07-05 01:49:32 -07003646 goto out_free_fence_fd;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003647 }
Charmaine Lee2f633e52015-08-10 10:45:11 -07003648
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003649 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3650 kernel_commands, command_size,
3651 &header);
Sinclair Yehc906965d2017-07-05 01:49:32 -07003652 if (IS_ERR(kernel_commands)) {
3653 ret = PTR_ERR(kernel_commands);
3654 goto out_free_fence_fd;
3655 }
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003656
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003657 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003658 if (ret) {
3659 ret = -ERESTARTSYS;
3660 goto out_free_header;
3661 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003662
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003663 sw_context->kernel = false;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003664 if (kernel_commands == NULL) {
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003665 ret = vmw_resize_cmd_bounce(sw_context, command_size);
3666 if (unlikely(ret != 0))
3667 goto out_unlock;
3668
Deepak Rawat680360a2019-02-13 13:20:42 -08003669 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
3670 command_size);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003671 if (unlikely(ret != 0)) {
3672 ret = -EFAULT;
Deepak Rawat5724f892019-02-11 11:46:27 -08003673 VMW_DEBUG_USER("Failed copying commands.\n");
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003674 goto out_unlock;
3675 }
Deepak Rawat680360a2019-02-13 13:20:42 -08003676
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003677 kernel_commands = sw_context->cmd_bounce;
Deepak Rawat680360a2019-02-13 13:20:42 -08003678 } else if (!header) {
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003679 sw_context->kernel = true;
Deepak Rawat680360a2019-02-13 13:20:42 -08003680 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003681
Thomas Hellstromd5bde952014-01-31 10:12:10 +01003682 sw_context->fp = vmw_fpriv(file_priv);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003683 INIT_LIST_HEAD(&sw_context->ctx_list);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003684 sw_context->cur_query_bo = dev_priv->pinned_bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003685 sw_context->last_query_ctx = NULL;
3686 sw_context->needs_post_query_barrier = false;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003687 sw_context->dx_ctx_node = NULL;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003688 sw_context->dx_query_mob = NULL;
3689 sw_context->dx_query_ctx = NULL;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003690 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003691 INIT_LIST_HEAD(&sw_context->res_relocations);
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02003692 INIT_LIST_HEAD(&sw_context->bo_relocations);
Deepak Rawat680360a2019-02-13 13:20:42 -08003693
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003694 if (sw_context->staged_bindings)
3695 vmw_binding_state_reset(sw_context->staged_bindings);
3696
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003697 if (!sw_context->res_ht_initialized) {
3698 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3699 if (unlikely(ret != 0))
3700 goto out_unlock;
Deepak Rawat680360a2019-02-13 13:20:42 -08003701
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003702 sw_context->res_ht_initialized = true;
3703 }
Deepak Rawat680360a2019-02-13 13:20:42 -08003704
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003705 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003706 sw_context->ctx = &val_ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003707 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003708 if (unlikely(ret != 0))
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003709 goto out_err_nores;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003710
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003711 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3712 command_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003713 if (unlikely(ret != 0))
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01003714 goto out_err_nores;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003715
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003716 ret = vmw_resources_reserve(sw_context);
3717 if (unlikely(ret != 0))
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01003718 goto out_err_nores;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003719
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003720 ret = vmw_validation_bo_reserve(&val_ctx, true);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003721 if (unlikely(ret != 0))
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003722 goto out_err_nores;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003723
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003724 ret = vmw_validation_bo_validate(&val_ctx, true);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003725 if (unlikely(ret != 0))
3726 goto out_err;
3727
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003728 ret = vmw_validation_res_validate(&val_ctx, true);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003729 if (unlikely(ret != 0))
3730 goto out_err;
Deepak Rawat680360a2019-02-13 13:20:42 -08003731
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003732 vmw_validation_drop_ht(&val_ctx);
Thomas Hellstrom1925d452010-05-28 11:21:57 +02003733
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07003734 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
3735 if (unlikely(ret != 0)) {
3736 ret = -ERESTARTSYS;
3737 goto out_err;
3738 }
3739
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01003740 if (dev_priv->has_mob) {
3741 ret = vmw_rebind_contexts(sw_context);
3742 if (unlikely(ret != 0))
Dan Carpenterb2ad9882014-02-11 19:03:47 +03003743 goto out_unlock_binding;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01003744 }
3745
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003746 if (!header) {
3747 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
3748 command_size, sw_context);
3749 } else {
3750 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
3751 sw_context);
3752 header = NULL;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003753 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003754 mutex_unlock(&dev_priv->binding_mutex);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003755 if (ret)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003756 goto out_err;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003757
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003758 vmw_query_bo_switch_commit(dev_priv, sw_context);
Deepak Rawat680360a2019-02-13 13:20:42 -08003759 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003760 (user_fence_rep) ? &handle : NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003761 /*
3762 * This error is harmless, because if fence submission fails,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003763 * vmw_fifo_send_fence will sync. The error will be propagated to
3764 * user-space in @fence_rep
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003765 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003766 if (ret != 0)
Deepak Rawat5724f892019-02-11 11:46:27 -08003767 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003768
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003769 vmw_execbuf_bindings_commit(sw_context, false);
3770 vmw_bind_dx_query_mob(sw_context);
3771 vmw_validation_res_unreserve(&val_ctx, false);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07003772
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003773 vmw_validation_bo_fence(sw_context->ctx, fence);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003774
Deepak Rawat680360a2019-02-13 13:20:42 -08003775 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003776 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
3777
Sinclair Yehc906965d2017-07-05 01:49:32 -07003778 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08003779 * If anything fails here, give up trying to export the fence and do a
3780 * sync since the user mode will not be able to sync the fence itself.
3781 * This ensures we are still functionally correct.
Sinclair Yehc906965d2017-07-05 01:49:32 -07003782 */
3783 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3784
3785 sync_file = sync_file_create(&fence->base);
3786 if (!sync_file) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003787 VMW_DEBUG_USER("Sync file create failed for fence\n");
Sinclair Yehc906965d2017-07-05 01:49:32 -07003788 put_unused_fd(out_fence_fd);
3789 out_fence_fd = -1;
3790
3791 (void) vmw_fence_obj_wait(fence, false, false,
3792 VMW_FENCE_WAIT_TIMEOUT);
3793 } else {
3794 /* Link the fence with the FD created earlier */
3795 fd_install(out_fence_fd, sync_file->file);
3796 }
3797 }
3798
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003799 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
Deepak Rawat680360a2019-02-13 13:20:42 -08003800 user_fence_rep, fence, handle, out_fence_fd,
3801 sync_file);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003802
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01003803 /* Don't unreference when handing fence out */
3804 if (unlikely(out_fence != NULL)) {
3805 *out_fence = fence;
3806 fence = NULL;
3807 } else if (likely(fence != NULL)) {
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003808 vmw_fence_obj_unreference(&fence);
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01003809 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003810
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003811 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003812 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003813
3814 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08003815 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3816 * in resource destruction paths.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003817 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003818 vmw_validation_unref_lists(&val_ctx);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003819
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003820 return 0;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003821
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07003822out_unlock_binding:
3823 mutex_unlock(&dev_priv->binding_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003824out_err:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003825 vmw_validation_bo_backoff(&val_ctx);
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01003826out_err_nores:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003827 vmw_execbuf_bindings_commit(sw_context, true);
3828 vmw_validation_res_unreserve(&val_ctx, true);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003829 vmw_resource_relocations_free(&sw_context->res_relocations);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003830 vmw_free_relocations(sw_context);
Deepak Rawat680360a2019-02-13 13:20:42 -08003831 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003832 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003833out_unlock:
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003834 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003835 vmw_validation_drop_ht(&val_ctx);
3836 WARN_ON(!list_empty(&sw_context->ctx_list));
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003837 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003838
3839 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08003840 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3841 * in resource destruction paths.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003842 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003843 vmw_validation_unref_lists(&val_ctx);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003844out_free_header:
3845 if (header)
3846 vmw_cmdbuf_header_free(header);
Sinclair Yehc906965d2017-07-05 01:49:32 -07003847out_free_fence_fd:
3848 if (out_fence_fd >= 0)
3849 put_unused_fd(out_fence_fd);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003850
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003851 return ret;
3852}
3853
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003854/**
3855 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
3856 *
3857 * @dev_priv: The device private structure.
3858 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003859 * This function is called to idle the fifo and unpin the query buffer if the
3860 * normal way to do this hits an error, which should typically be extremely
3861 * rare.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003862 */
3863static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
3864{
Deepak Rawat5724f892019-02-11 11:46:27 -08003865 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003866
3867 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07003868 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3869 if (dev_priv->dummy_query_bo_pinned) {
3870 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3871 dev_priv->dummy_query_bo_pinned = false;
3872 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003873}
3874
3875
3876/**
Deepak Rawat680360a2019-02-13 13:20:42 -08003877 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
3878 * bo.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003879 *
3880 * @dev_priv: The device private structure.
Deepak Rawat680360a2019-02-13 13:20:42 -08003881 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
3882 * query barrier that flushes all queries touching the current buffer pointed to
3883 * by @dev_priv->pinned_bo
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003884 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003885 * This function should be used to unpin the pinned query bo, or as a query
3886 * barrier when we need to make sure that all queries have finished before the
3887 * next fifo command. (For example on hardware context destructions where the
3888 * hardware may otherwise leak unfinished queries).
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003889 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003890 * This function does not return any failure codes, but make attempts to do safe
3891 * unpinning in case of errors.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003892 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003893 * The function will synchronize on the previous query barrier, and will thus
3894 * not finish until that barrier has executed.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003895 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003896 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
3897 * calling this function.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003898 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003899void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
3900 struct vmw_fence_obj *fence)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003901{
3902 int ret = 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003903 struct vmw_fence_obj *lfence = NULL;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003904 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003905
3906 if (dev_priv->pinned_bo == NULL)
3907 goto out_unlock;
3908
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003909 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
3910 false);
3911 if (ret)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003912 goto out_no_reserve;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003913
3914 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
3915 false);
3916 if (ret)
3917 goto out_no_reserve;
3918
3919 ret = vmw_validation_bo_reserve(&val_ctx, false);
3920 if (ret)
3921 goto out_no_reserve;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003922
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003923 if (dev_priv->query_cid_valid) {
3924 BUG_ON(fence != NULL);
3925 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003926 if (ret)
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003927 goto out_no_emit;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003928 dev_priv->query_cid_valid = false;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003929 }
3930
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07003931 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3932 if (dev_priv->dummy_query_bo_pinned) {
3933 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3934 dev_priv->dummy_query_bo_pinned = false;
3935 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003936 if (fence == NULL) {
3937 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
3938 NULL);
3939 fence = lfence;
3940 }
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003941 vmw_validation_bo_fence(&val_ctx, fence);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003942 if (lfence != NULL)
3943 vmw_fence_obj_unreference(&lfence);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003944
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003945 vmw_validation_unref_lists(&val_ctx);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02003946 vmw_bo_unreference(&dev_priv->pinned_bo);
Deepak Rawat680360a2019-02-13 13:20:42 -08003947
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003948out_unlock:
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003949 return;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003950out_no_emit:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003951 vmw_validation_bo_backoff(&val_ctx);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003952out_no_reserve:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003953 vmw_validation_unref_lists(&val_ctx);
3954 vmw_execbuf_unpin_panic(dev_priv);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02003955 vmw_bo_unreference(&dev_priv->pinned_bo);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003956}
3957
3958/**
Deepak Rawat680360a2019-02-13 13:20:42 -08003959 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003960 *
3961 * @dev_priv: The device private structure.
3962 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003963 * This function should be used to unpin the pinned query bo, or as a query
3964 * barrier when we need to make sure that all queries have finished before the
3965 * next fifo command. (For example on hardware context destructions where the
3966 * hardware may otherwise leak unfinished queries).
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003967 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003968 * This function does not return any failure codes, but make attempts to do safe
3969 * unpinning in case of errors.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003970 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003971 * The function will synchronize on the previous query barrier, and will thus
3972 * not finish until that barrier has executed.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003973 */
3974void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
3975{
3976 mutex_lock(&dev_priv->cmdbuf_mutex);
3977 if (dev_priv->query_cid_valid)
3978 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003979 mutex_unlock(&dev_priv->cmdbuf_mutex);
3980}
3981
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003982int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
3983 struct drm_file *file_priv, size_t size)
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003984{
3985 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003986 struct drm_vmw_execbuf_arg arg;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003987 int ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003988 static const size_t copy_offset[] = {
3989 offsetof(struct drm_vmw_execbuf_arg, context_handle),
3990 sizeof(struct drm_vmw_execbuf_arg)};
Sinclair Yeh585851162017-07-05 01:45:40 -07003991 struct dma_fence *in_fence = NULL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003992
3993 if (unlikely(size < copy_offset[0])) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003994 VMW_DEBUG_USER("Invalid command size, ioctl %d\n",
3995 DRM_VMW_EXECBUF);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003996 return -EINVAL;
3997 }
3998
3999 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4000 return -EFAULT;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004001
4002 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08004003 * Extend the ioctl argument while maintaining backwards compatibility:
4004 * We take different code paths depending on the value of arg.version.
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004005 */
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004006 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4007 arg.version == 0)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08004008 VMW_DEBUG_USER("Incorrect execbuf version.\n");
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004009 return -EINVAL;
4010 }
4011
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004012 if (arg.version > 1 &&
4013 copy_from_user(&arg.context_handle,
4014 (void __user *) (data + copy_offset[0]),
Deepak Rawat680360a2019-02-13 13:20:42 -08004015 copy_offset[arg.version - 1] - copy_offset[0]) != 0)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004016 return -EFAULT;
4017
4018 switch (arg.version) {
4019 case 1:
4020 arg.context_handle = (uint32_t) -1;
4021 break;
4022 case 2:
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004023 default:
4024 break;
4025 }
4026
Sinclair Yeh585851162017-07-05 01:45:40 -07004027 /* If imported a fence FD from elsewhere, then wait on it */
4028 if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4029 in_fence = sync_file_get_fence(arg.imported_fence_fd);
4030
4031 if (!in_fence) {
Deepak Rawat5724f892019-02-11 11:46:27 -08004032 VMW_DEBUG_USER("Cannot get imported fence\n");
Sinclair Yeh585851162017-07-05 01:45:40 -07004033 return -EINVAL;
4034 }
4035
4036 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4037 if (ret)
4038 goto out;
4039 }
4040
Thomas Hellstrom294adf72014-02-27 12:34:51 +01004041 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004042 if (unlikely(ret != 0))
4043 return ret;
4044
4045 ret = vmw_execbuf_process(file_priv, dev_priv,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004046 (void __user *)(unsigned long)arg.commands,
4047 NULL, arg.command_size, arg.throttle_us,
4048 arg.context_handle,
4049 (void __user *)(unsigned long)arg.fence_rep,
Deepak Rawat680360a2019-02-13 13:20:42 -08004050 NULL, arg.flags);
4051
Thomas Hellstrom5151adb2015-03-09 01:56:21 -07004052 ttm_read_unlock(&dev_priv->reservation_sem);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004053 if (unlikely(ret != 0))
Sinclair Yeh585851162017-07-05 01:45:40 -07004054 goto out;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004055
4056 vmw_kms_cursor_post_execbuf(dev_priv);
4057
Sinclair Yeh585851162017-07-05 01:45:40 -07004058out:
4059 if (in_fence)
4060 dma_fence_put(in_fence);
4061 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004062}