blob: 367d5b87ee6a42a4d9245d8007c33db906ee785c [file] [log] [blame]
Dirk Hohndel (VMware)dff96882018-05-07 01:16:26 +02001// SPDX-License-Identifier: GPL-2.0 OR MIT
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002/**************************************************************************
3 *
Dirk Hohndel (VMware)dff96882018-05-07 01:16:26 +02004 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00005 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
Sinclair Yeh585851162017-07-05 01:45:40 -070027#include <linux/sync_file.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000028
29#include "vmwgfx_drv.h"
30#include "vmwgfx_reg.h"
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/ttm/ttm_bo_api.h>
32#include <drm/ttm/ttm_placement.h>
Thomas Hellstromd80efd52015-08-10 10:39:35 -070033#include "vmwgfx_so.h"
34#include "vmwgfx_binding.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000035
Thomas Hellstromc0951b72012-11-20 12:19:35 +000036#define VMW_RES_HT_ORDER 12
37
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020038/*
Deepak Rawat6f74fd92019-02-08 12:53:57 -080039 * Helper macro to get dx_ctx_node if available otherwise print an error
40 * message. This is for use in command verifier function where if dx_ctx_node
41 * is not set then command is invalid.
42 */
43#define VMW_GET_CTX_NODE(__sw_context) \
44({ \
45 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
Deepak Rawat5724f892019-02-11 11:46:27 -080046 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
Deepak Rawat6f74fd92019-02-08 12:53:57 -080047 __sw_context->dx_ctx_node; \
48 }); \
49})
50
Deepak Rawatd01316d2019-02-08 15:50:40 -080051#define VMW_DECLARE_CMD_VAR(__var, __type) \
52 struct { \
53 SVGA3dCmdHeader header; \
54 __type body; \
55 } __var
56
Deepak Rawat680360a2019-02-13 13:20:42 -080057/**
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020058 * struct vmw_relocation - Buffer object relocation
59 *
60 * @head: List head for the command submission context's relocation list
Thomas Hellstromcc1e3b72018-09-26 15:38:13 +020061 * @vbo: Non ref-counted pointer to buffer object
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020062 * @mob_loc: Pointer to location for mob id to be modified
63 * @location: Pointer to location for guest pointer to be modified
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020064 */
65struct vmw_relocation {
66 struct list_head head;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020067 struct vmw_buffer_object *vbo;
Thomas Hellstromcc1e3b72018-09-26 15:38:13 +020068 union {
69 SVGAMobId *mob_loc;
70 SVGAGuestPtr *location;
71 };
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020072};
73
Thomas Hellstromc0951b72012-11-20 12:19:35 +000074/**
Thomas Hellstroma1944032016-10-10 11:06:45 -070075 * enum vmw_resource_relocation_type - Relocation type for resources
76 *
77 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
78 * command stream is replaced with the actual id after validation.
79 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
80 * with a NOP.
Deepak Rawat680360a2019-02-13 13:20:42 -080081 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
82 * validation is -1, the command is replaced with a NOP. Otherwise no action.
Thomas Hellstroma1944032016-10-10 11:06:45 -070083 */
84enum vmw_resource_relocation_type {
85 vmw_res_rel_normal,
86 vmw_res_rel_nop,
87 vmw_res_rel_cond_nop,
88 vmw_res_rel_max
89};
90
91/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +000092 * struct vmw_resource_relocation - Relocation info for resources
93 *
94 * @head: List head for the software context's relocation list.
95 * @res: Non-ref-counted pointer to the resource.
Deepak Rawat680360a2019-02-13 13:20:42 -080096 * @offset: Offset of single byte entries into the command buffer where the id
97 * that needs fixup is located.
Thomas Hellstroma1944032016-10-10 11:06:45 -070098 * @rel_type: Type of relocation.
Thomas Hellstromc0951b72012-11-20 12:19:35 +000099 */
100struct vmw_resource_relocation {
101 struct list_head head;
102 const struct vmw_resource *res;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700103 u32 offset:29;
104 enum vmw_resource_relocation_type rel_type:3;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000105};
106
Deepak Rawat680360a2019-02-13 13:20:42 -0800107/**
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200108 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
Deepak Rawat680360a2019-02-13 13:20:42 -0800109 *
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200110 * @head: List head of context list
111 * @ctx: The context resource
112 * @cur: The context's persistent binding state
113 * @staged: The binding state changes of this command buffer
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000114 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200115struct vmw_ctx_validation_info {
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000116 struct list_head head;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200117 struct vmw_resource *ctx;
118 struct vmw_ctx_binding_state *cur;
119 struct vmw_ctx_binding_state *staged;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000120};
121
122/**
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100123 * struct vmw_cmd_entry - Describe a command for the verifier
124 *
125 * @user_allow: Whether allowed from the execbuf ioctl.
126 * @gb_disable: Whether disabled if guest-backed objects are available.
127 * @gb_enable: Whether enabled iff guest-backed objects are available.
128 */
129struct vmw_cmd_entry {
130 int (*func) (struct vmw_private *, struct vmw_sw_context *,
131 SVGA3dCmdHeader *);
132 bool user_allow;
133 bool gb_disable;
134 bool gb_enable;
Thomas Hellstrom65b97a22017-08-24 08:06:29 +0200135 const char *cmd_name;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100136};
137
138#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
139 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
Thomas Hellstrom65b97a22017-08-24 08:06:29 +0200140 (_gb_disable), (_gb_enable), #_cmd}
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100141
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700142static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
143 struct vmw_sw_context *sw_context,
144 struct vmw_resource *ctx);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700145static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
146 struct vmw_sw_context *sw_context,
147 SVGAMobId *id,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200148 struct vmw_buffer_object **vmw_bo_p);
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700149/**
150 * vmw_ptr_diff - Compute the offset from a to b in bytes
151 *
152 * @a: A starting pointer.
153 * @b: A pointer offset in the same address space.
154 *
155 * Returns: The offset in bytes between the two pointers.
156 */
157static size_t vmw_ptr_diff(void *a, void *b)
158{
159 return (unsigned long) b - (unsigned long) a;
160}
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700161
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100162/**
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200163 * vmw_execbuf_bindings_commit - Commit modified binding state
Deepak Rawat680360a2019-02-13 13:20:42 -0800164 *
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200165 * @sw_context: The command submission context
Deepak Rawat680360a2019-02-13 13:20:42 -0800166 * @backoff: Whether this is part of the error path and binding state changes
167 * should be ignored
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000168 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200169static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
170 bool backoff)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000171{
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200172 struct vmw_ctx_validation_info *entry;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700173
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200174 list_for_each_entry(entry, &sw_context->ctx_list, head) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200175 if (!backoff)
176 vmw_binding_state_commit(entry->cur, entry->staged);
Deepak Rawat680360a2019-02-13 13:20:42 -0800177
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200178 if (entry->staged != sw_context->staged_bindings)
179 vmw_binding_state_free(entry->staged);
180 else
181 sw_context->staged_bindings_inuse = false;
182 }
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200183
184 /* List entries are freed with the validation context */
185 INIT_LIST_HEAD(&sw_context->ctx_list);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200186}
187
188/**
189 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
Deepak Rawat680360a2019-02-13 13:20:42 -0800190 *
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200191 * @sw_context: The command submission context
192 */
193static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
194{
195 if (sw_context->dx_query_mob)
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700196 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
197 sw_context->dx_query_mob);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000198}
199
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700200/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800201 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
202 * the validate list.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700203 *
204 * @dev_priv: Pointer to the device private:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200205 * @sw_context: The command submission context
206 * @node: The validation node holding the context resource metadata
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700207 */
208static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
209 struct vmw_sw_context *sw_context,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200210 struct vmw_resource *res,
211 struct vmw_ctx_validation_info *node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700212{
213 int ret;
214
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200215 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700216 if (unlikely(ret != 0))
217 goto out_err;
218
219 if (!sw_context->staged_bindings) {
Deepak Rawat680360a2019-02-13 13:20:42 -0800220 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700221 if (IS_ERR(sw_context->staged_bindings)) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700222 ret = PTR_ERR(sw_context->staged_bindings);
223 sw_context->staged_bindings = NULL;
224 goto out_err;
225 }
226 }
227
228 if (sw_context->staged_bindings_inuse) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200229 node->staged = vmw_binding_state_alloc(dev_priv);
230 if (IS_ERR(node->staged)) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200231 ret = PTR_ERR(node->staged);
232 node->staged = NULL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700233 goto out_err;
234 }
235 } else {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200236 node->staged = sw_context->staged_bindings;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700237 sw_context->staged_bindings_inuse = true;
238 }
239
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200240 node->ctx = res;
241 node->cur = vmw_context_binding_state(res);
242 list_add_tail(&node->head, &sw_context->ctx_list);
243
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700244 return 0;
Deepak Rawat680360a2019-02-13 13:20:42 -0800245
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700246out_err:
247 return ret;
248}
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000249
250/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800251 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
252 *
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200253 * @dev_priv: Pointer to the device private struct.
254 * @res_type: The resource type.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000255 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800256 * Guest-backed contexts and DX contexts require extra size to store execbuf
257 * private information in the validation node. Typically the binding manager
258 * associated data structures.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200259 *
260 * Returns: The extra size requirement based on resource type.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000261 */
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200262static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
263 enum vmw_res_type res_type)
264{
265 return (res_type == vmw_res_dx_context ||
266 (res_type == vmw_res_context && dev_priv->has_mob)) ?
267 sizeof(struct vmw_ctx_validation_info) : 0;
268}
269
270/**
271 * vmw_execbuf_rcache_update - Update a resource-node cache entry
272 *
273 * @rcache: Pointer to the entry to update.
274 * @res: Pointer to the resource.
Deepak Rawat680360a2019-02-13 13:20:42 -0800275 * @private: Pointer to the execbuf-private space in the resource validation
276 * node.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200277 */
278static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
279 struct vmw_resource *res,
280 void *private)
281{
282 rcache->res = res;
283 rcache->private = private;
284 rcache->valid = 1;
285 rcache->valid_handle = 0;
286}
287
288/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800289 * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
290 * rcu-protected pointer to the validation list.
291 *
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200292 * @sw_context: Pointer to the software context.
293 * @res: Unreferenced rcu-protected pointer to the resource.
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100294 * @dirty: Whether to change dirty status.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200295 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800296 * Returns: 0 on success. Negative error code on failure. Typical error codes
297 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200298 */
299static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100300 struct vmw_resource *res,
301 u32 dirty)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000302{
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700303 struct vmw_private *dev_priv = res->dev_priv;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000304 int ret;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200305 enum vmw_res_type res_type = vmw_res_type(res);
306 struct vmw_res_cache_entry *rcache;
307 struct vmw_ctx_validation_info *ctx_info;
308 bool first_usage;
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200309 unsigned int priv_size;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000310
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200311 rcache = &sw_context->res_cache[res_type];
312 if (likely(rcache->valid && rcache->res == res)) {
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100313 if (dirty)
314 vmw_validation_res_set_dirty(sw_context->ctx,
315 rcache->private, dirty);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200316 vmw_user_resource_noref_release();
317 return 0;
318 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000319
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200320 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200321 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100322 dirty, (void **)&ctx_info,
323 &first_usage);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200324 vmw_user_resource_noref_release();
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200325 if (ret)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000326 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000327
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200328 if (priv_size && first_usage) {
329 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
330 ctx_info);
Deepak Rawatb2898402019-02-11 14:59:57 -0800331 if (ret) {
332 VMW_DEBUG_USER("Failed first usage context setup.\n");
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200333 return ret;
Deepak Rawatb2898402019-02-11 14:59:57 -0800334 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700335 }
336
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200337 vmw_execbuf_rcache_update(rcache, res, ctx_info);
338 return 0;
339}
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700340
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200341/**
342 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
343 * validation list if it's not already on it
Deepak Rawat680360a2019-02-13 13:20:42 -0800344 *
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200345 * @sw_context: Pointer to the software context.
346 * @res: Pointer to the resource.
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100347 * @dirty: Whether to change dirty status.
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200348 *
349 * Returns: Zero on success. Negative error code on failure.
350 */
351static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100352 struct vmw_resource *res,
353 u32 dirty)
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200354{
355 struct vmw_res_cache_entry *rcache;
356 enum vmw_res_type res_type = vmw_res_type(res);
357 void *ptr;
358 int ret;
359
360 rcache = &sw_context->res_cache[res_type];
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100361 if (likely(rcache->valid && rcache->res == res)) {
362 if (dirty)
363 vmw_validation_res_set_dirty(sw_context->ctx,
364 rcache->private, dirty);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200365 return 0;
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100366 }
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200367
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100368 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
369 &ptr, NULL);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200370 if (ret)
371 return ret;
372
373 vmw_execbuf_rcache_update(rcache, res, ptr);
374
375 return 0;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700376}
377
378/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800379 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
380 * validation list
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700381 *
382 * @sw_context: The software context holding the validation list.
383 * @view: Pointer to the view resource.
384 *
385 * Returns 0 if success, negative error code otherwise.
386 */
387static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
388 struct vmw_resource *view)
389{
390 int ret;
391
392 /*
Deepak Rawat680360a2019-02-13 13:20:42 -0800393 * First add the resource the view is pointing to, otherwise it may be
394 * swapped out when the view is validated.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700395 */
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100396 ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
397 vmw_view_dirtying(view));
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700398 if (ret)
399 return ret;
400
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100401 return vmw_execbuf_res_noctx_val_add(sw_context, view,
402 VMW_RES_DIRTY_NONE);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700403}
404
405/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800406 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
407 * to to the validation list.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700408 *
409 * @sw_context: The software context holding the validation list.
410 * @view_type: The view type to look up.
411 * @id: view id of the view.
412 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800413 * The view is represented by a view id and the DX context it's created on, or
414 * scheduled for creation on. If there is no DX context set, the function will
415 * return an -EINVAL error pointer.
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200416 *
417 * Returns: Unreferenced pointer to the resource on success, negative error
418 * pointer on failure.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700419 */
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200420static struct vmw_resource *
421vmw_view_id_val_add(struct vmw_sw_context *sw_context,
422 enum vmw_view_type view_type, u32 id)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700423{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200424 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700425 struct vmw_resource *view;
426 int ret;
427
Deepak Rawatb2898402019-02-11 14:59:57 -0800428 if (!ctx_node)
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200429 return ERR_PTR(-EINVAL);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700430
431 view = vmw_view_lookup(sw_context->man, view_type, id);
432 if (IS_ERR(view))
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200433 return view;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700434
435 ret = vmw_view_res_val_add(sw_context, view);
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200436 if (ret)
437 return ERR_PTR(ret);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700438
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200439 return view;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000440}
441
442/**
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100443 * vmw_resource_context_res_add - Put resources previously bound to a context on
444 * the validation list
445 *
446 * @dev_priv: Pointer to a device private structure
447 * @sw_context: Pointer to a software context used for this command submission
448 * @ctx: Pointer to the context resource
449 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800450 * This function puts all resources that were previously bound to @ctx on the
451 * resource validation list. This is part of the context state reemission
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100452 */
453static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
454 struct vmw_sw_context *sw_context,
455 struct vmw_resource *ctx)
456{
457 struct list_head *binding_list;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700458 struct vmw_ctx_bindinfo *entry;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100459 int ret = 0;
460 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700461 u32 i;
Deepak Rawat5e8ec0d2018-12-13 13:51:08 -0800462 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
463 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100464
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700465 /* Add all cotables to the validation list. */
Deepak Rawat878c6ec2018-12-13 11:44:42 -0800466 if (has_sm4_context(dev_priv) &&
467 vmw_res_type(ctx) == vmw_res_dx_context) {
Deepak Rawat5e8ec0d2018-12-13 13:51:08 -0800468 for (i = 0; i < cotable_max; ++i) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700469 res = vmw_context_cotable(ctx, i);
470 if (IS_ERR(res))
471 continue;
472
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100473 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
474 VMW_RES_DIRTY_SET);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700475 if (unlikely(ret != 0))
476 return ret;
477 }
478 }
479
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700480 /* Add all resources bound to the context to the validation list */
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100481 mutex_lock(&dev_priv->binding_mutex);
482 binding_list = vmw_context_binding_list(ctx);
483
484 list_for_each_entry(entry, binding_list, ctx_list) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700485 if (vmw_res_type(entry->res) == vmw_res_view)
486 ret = vmw_view_res_val_add(sw_context, entry->res);
487 else
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100488 ret = vmw_execbuf_res_noctx_val_add
489 (sw_context, entry->res,
490 vmw_binding_dirtying(entry->bt));
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100491 if (unlikely(ret != 0))
492 break;
493 }
494
Deepak Rawat878c6ec2018-12-13 11:44:42 -0800495 if (has_sm4_context(dev_priv) &&
496 vmw_res_type(ctx) == vmw_res_dx_context) {
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200497 struct vmw_buffer_object *dx_query_mob;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700498
499 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
500 if (dx_query_mob)
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200501 ret = vmw_validation_add_bo(sw_context->ctx,
502 dx_query_mob, true, false);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700503 }
504
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100505 mutex_unlock(&dev_priv->binding_mutex);
506 return ret;
507}
508
509/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000510 * vmw_resource_relocation_add - Add a relocation to the relocation list
511 *
512 * @list: Pointer to head of relocation list.
513 * @res: The resource.
Deepak Rawat680360a2019-02-13 13:20:42 -0800514 * @offset: Offset into the command buffer currently being parsed where the id
515 * that needs fixup is located. Granularity is one byte.
Thomas Hellstroma1944032016-10-10 11:06:45 -0700516 * @rel_type: Relocation type.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000517 */
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200518static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000519 const struct vmw_resource *res,
Thomas Hellstroma1944032016-10-10 11:06:45 -0700520 unsigned long offset,
521 enum vmw_resource_relocation_type
522 rel_type)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000523{
524 struct vmw_resource_relocation *rel;
525
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200526 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
Ravikant B Sharma1a4adb02016-11-08 17:30:31 +0530527 if (unlikely(!rel)) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800528 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000529 return -ENOMEM;
530 }
531
532 rel->res = res;
533 rel->offset = offset;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700534 rel->rel_type = rel_type;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200535 list_add_tail(&rel->head, &sw_context->res_relocations);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000536
537 return 0;
538}
539
540/**
541 * vmw_resource_relocations_free - Free all relocations on a list
542 *
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200543 * @list: Pointer to the head of the relocation list
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000544 */
545static void vmw_resource_relocations_free(struct list_head *list)
546{
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200547 /* Memory is validation context memory, so no need to free it */
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200548 INIT_LIST_HEAD(list);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000549}
550
551/**
552 * vmw_resource_relocations_apply - Apply all relocations on a list
553 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800554 * @cb: Pointer to the start of the command buffer bein patch. This need not be
555 * the same buffer as the one being parsed when the relocation list was built,
556 * but the contents must be the same modulo the resource ids.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000557 * @list: Pointer to the head of the relocation list.
558 */
559static void vmw_resource_relocations_apply(uint32_t *cb,
560 struct list_head *list)
561{
562 struct vmw_resource_relocation *rel;
563
Thomas Hellstroma1944032016-10-10 11:06:45 -0700564 /* Validate the struct vmw_resource_relocation member size */
565 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
566 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
567
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100568 list_for_each_entry(rel, list, head) {
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700569 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
Thomas Hellstroma1944032016-10-10 11:06:45 -0700570 switch (rel->rel_type) {
571 case vmw_res_rel_normal:
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700572 *addr = rel->res->id;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700573 break;
574 case vmw_res_rel_nop:
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700575 *addr = SVGA_3D_CMD_NOP;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700576 break;
577 default:
578 if (rel->res->id == -1)
579 *addr = SVGA_3D_CMD_NOP;
580 break;
581 }
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100582 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000583}
584
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000585static int vmw_cmd_invalid(struct vmw_private *dev_priv,
586 struct vmw_sw_context *sw_context,
587 SVGA3dCmdHeader *header)
588{
Sinclair Yehfcfffdd2017-07-17 23:28:36 -0700589 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000590}
591
592static int vmw_cmd_ok(struct vmw_private *dev_priv,
593 struct vmw_sw_context *sw_context,
594 SVGA3dCmdHeader *header)
595{
596 return 0;
597}
598
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200599/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800600 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
601 * list.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000602 *
603 * @sw_context: Pointer to the software context.
604 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800605 * Note that since vmware's command submission currently is protected by the
606 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
607 * only a single thread at once will attempt this.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000608 */
609static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
610{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200611 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000612
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200613 ret = vmw_validation_res_reserve(sw_context->ctx, true);
614 if (ret)
615 return ret;
Charmaine Lee2f633e52015-08-10 10:45:11 -0700616
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700617 if (sw_context->dx_query_mob) {
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200618 struct vmw_buffer_object *expected_dx_query_mob;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700619
620 expected_dx_query_mob =
621 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
622 if (expected_dx_query_mob &&
623 expected_dx_query_mob != sw_context->dx_query_mob) {
624 ret = -EINVAL;
625 }
626 }
627
628 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000629}
630
631/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800632 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
633 * resource validate list unless it's already there.
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100634 *
635 * @dev_priv: Pointer to a device private structure.
636 * @sw_context: Pointer to the software context.
637 * @res_type: Resource type.
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100638 * @dirty: Whether to change dirty status.
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100639 * @converter: User-space visisble type specific information.
Deepak Rawat680360a2019-02-13 13:20:42 -0800640 * @id_loc: Pointer to the location in the command buffer currently being parsed
641 * from where the user-space resource id handle is located.
642 * @p_val: Pointer to pointer to resource validalidation node. Populated on
643 * exit.
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100644 */
645static int
646vmw_cmd_res_check(struct vmw_private *dev_priv,
647 struct vmw_sw_context *sw_context,
648 enum vmw_res_type res_type,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100649 u32 dirty,
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100650 const struct vmw_user_resource_conv *converter,
651 uint32_t *id_loc,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200652 struct vmw_resource **p_res)
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100653{
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200654 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200655 struct vmw_resource *res;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200656 int ret;
657
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200658 if (p_res)
659 *p_res = NULL;
660
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200661 if (*id_loc == SVGA3D_INVALID_ID) {
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200662 if (res_type == vmw_res_context) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800663 VMW_DEBUG_USER("Illegal context invalid id.\n");
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200664 return -EINVAL;
665 }
666 return 0;
667 }
668
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200669 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200670 res = rcache->res;
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100671 if (dirty)
672 vmw_validation_res_set_dirty(sw_context->ctx,
673 rcache->private, dirty);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200674 } else {
675 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200676
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200677 ret = vmw_validation_preload_res(sw_context->ctx, size);
678 if (ret)
679 return ret;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200680
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200681 res = vmw_user_resource_noref_lookup_handle
682 (dev_priv, sw_context->fp->tfile, *id_loc, converter);
Chengguang Xu4efa6662019-03-01 10:14:06 -0800683 if (IS_ERR(res)) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800684 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
685 (unsigned int) *id_loc);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200686 return PTR_ERR(res);
687 }
688
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100689 ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200690 if (unlikely(ret != 0))
691 return ret;
692
693 if (rcache->valid && rcache->res == res) {
694 rcache->valid_handle = true;
695 rcache->handle = *id_loc;
696 }
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200697 }
698
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200699 ret = vmw_resource_relocation_add(sw_context, res,
700 vmw_ptr_diff(sw_context->buf_start,
701 id_loc),
702 vmw_res_rel_normal);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200703 if (p_res)
704 *p_res = res;
705
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200706 return 0;
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100707}
708
709/**
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700710 * vmw_rebind_dx_query - Rebind DX query associated with the context
711 *
712 * @ctx_res: context the query belongs to
713 *
714 * This function assumes binding_mutex is held.
715 */
716static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
717{
718 struct vmw_private *dev_priv = ctx_res->dev_priv;
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200719 struct vmw_buffer_object *dx_query_mob;
Deepak Rawatd01316d2019-02-08 15:50:40 -0800720 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700721
722 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
723
724 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
725 return 0;
726
Deepak Rawat11c45412019-02-14 16:15:39 -0800727 cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
Deepak Rawatb2898402019-02-11 14:59:57 -0800728 if (cmd == NULL)
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700729 return -ENOMEM;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700730
731 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
732 cmd->header.size = sizeof(cmd->body);
733 cmd->body.cid = ctx_res->id;
734 cmd->body.mobid = dx_query_mob->base.mem.start;
735 vmw_fifo_commit(dev_priv, sizeof(*cmd));
736
737 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
738
739 return 0;
740}
741
742/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800743 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
744 * contexts.
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100745 *
746 * @sw_context: Pointer to the software context.
747 *
748 * Rebind context binding points that have been scrubbed because of eviction.
749 */
750static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
751{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200752 struct vmw_ctx_validation_info *val;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100753 int ret;
754
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200755 list_for_each_entry(val, &sw_context->ctx_list, head) {
756 ret = vmw_binding_rebind_all(val->cur);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100757 if (unlikely(ret != 0)) {
758 if (ret != -ERESTARTSYS)
Deepak Rawat5724f892019-02-11 11:46:27 -0800759 VMW_DEBUG_USER("Failed to rebind context.\n");
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100760 return ret;
761 }
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700762
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200763 ret = vmw_rebind_all_dx_query(val->ctx);
Deepak Rawatb2898402019-02-11 14:59:57 -0800764 if (ret != 0) {
765 VMW_DEBUG_USER("Failed to rebind queries.\n");
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700766 return ret;
Deepak Rawatb2898402019-02-11 14:59:57 -0800767 }
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100768 }
769
770 return 0;
771}
772
773/**
Deepak Rawat680360a2019-02-13 13:20:42 -0800774 * vmw_view_bindings_add - Add an array of view bindings to a context binding
775 * state tracker.
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700776 *
777 * @sw_context: The execbuf state used for this command.
778 * @view_type: View type for the bindings.
779 * @binding_type: Binding type for the bindings.
780 * @shader_slot: The shader slot to user for the bindings.
781 * @view_ids: Array of view ids to be bound.
782 * @num_views: Number of view ids in @view_ids.
783 * @first_slot: The binding slot to be used for the first view id in @view_ids.
784 */
785static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
786 enum vmw_view_type view_type,
787 enum vmw_ctx_binding_type binding_type,
788 uint32 shader_slot,
789 uint32 view_ids[], u32 num_views,
790 u32 first_slot)
791{
Deepak Rawat6f74fd92019-02-08 12:53:57 -0800792 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700793 u32 i;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700794
Deepak Rawat6f74fd92019-02-08 12:53:57 -0800795 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700796 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700797
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700798 for (i = 0; i < num_views; ++i) {
799 struct vmw_ctx_bindinfo_view binding;
800 struct vmw_resource *view = NULL;
801
802 if (view_ids[i] != SVGA3D_INVALID_ID) {
Thomas Hellstrom508108e2018-09-26 16:28:45 +0200803 view = vmw_view_id_val_add(sw_context, view_type,
804 view_ids[i]);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700805 if (IS_ERR(view)) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800806 VMW_DEBUG_USER("View not found.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700807 return PTR_ERR(view);
808 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700809 }
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200810 binding.bi.ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700811 binding.bi.res = view;
812 binding.bi.bt = binding_type;
813 binding.shader_slot = shader_slot;
814 binding.slot = first_slot + i;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200815 vmw_binding_add(ctx_node->staged, &binding.bi,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700816 shader_slot, binding.slot);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700817 }
818
819 return 0;
820}
821
822/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000823 * vmw_cmd_cid_check - Check a command header for valid context information.
824 *
825 * @dev_priv: Pointer to a device private structure.
826 * @sw_context: Pointer to the software context.
827 * @header: A command header with an embedded user-space context handle.
828 *
829 * Convenience function: Call vmw_cmd_res_check with the user-space context
830 * handle embedded in @header.
831 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000832static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
833 struct vmw_sw_context *sw_context,
834 SVGA3dCmdHeader *header)
835{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800836 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
837 container_of(header, typeof(*cmd), header);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000838
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000839 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100840 VMW_RES_DIRTY_SET, user_context_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -0800841 &cmd->body, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000842}
843
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200844/**
845 * vmw_execbuf_info_from_res - Get the private validation metadata for a
846 * recently validated resource
Deepak Rawat680360a2019-02-13 13:20:42 -0800847 *
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200848 * @sw_context: Pointer to the command submission context
849 * @res: The resource
850 *
851 * The resource pointed to by @res needs to be present in the command submission
852 * context's resource cache and hence the last resource of that type to be
853 * processed by the validation code.
854 *
Deepak Rawat680360a2019-02-13 13:20:42 -0800855 * Return: a pointer to the private metadata of the resource, or NULL if it
856 * wasn't found
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200857 */
858static struct vmw_ctx_validation_info *
859vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
860 struct vmw_resource *res)
861{
862 struct vmw_res_cache_entry *rcache =
863 &sw_context->res_cache[vmw_res_type(res)];
864
865 if (rcache->valid && rcache->res == res)
866 return rcache->private;
867
868 WARN_ON_ONCE(true);
869 return NULL;
870}
871
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000872static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
873 struct vmw_sw_context *sw_context,
874 SVGA3dCmdHeader *header)
875{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800876 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200877 struct vmw_resource *ctx;
878 struct vmw_resource *res;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000879 int ret;
880
Deepak Rawatd01316d2019-02-08 15:50:40 -0800881 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700882
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700883 if (cmd->body.type >= SVGA3D_RT_MAX) {
Deepak Rawat5724f892019-02-11 11:46:27 -0800884 VMW_DEBUG_USER("Illegal render target type %u.\n",
885 (unsigned int) cmd->body.type);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700886 return -EINVAL;
887 }
888
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700889 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100890 VMW_RES_DIRTY_SET, user_context_converter,
891 &cmd->body.cid, &ctx);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000892 if (unlikely(ret != 0))
893 return ret;
894
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000895 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100896 VMW_RES_DIRTY_SET, user_surface_converter,
897 &cmd->body.target.sid, &res);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +0200898 if (unlikely(ret))
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700899 return ret;
900
901 if (dev_priv->has_mob) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700902 struct vmw_ctx_bindinfo_view binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200903 struct vmw_ctx_validation_info *node;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700904
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200905 node = vmw_execbuf_info_from_res(sw_context, ctx);
906 if (!node)
907 return -EINVAL;
908
909 binding.bi.ctx = ctx;
910 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700911 binding.bi.bt = vmw_ctx_binding_rt;
912 binding.slot = cmd->body.type;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200913 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700914 }
915
916 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000917}
918
919static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
920 struct vmw_sw_context *sw_context,
921 SVGA3dCmdHeader *header)
922{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800923 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000924 int ret;
925
Deepak Rawatd01316d2019-02-08 15:50:40 -0800926 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromc9146cd2015-03-02 23:45:04 -0800927
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700928 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100929 VMW_RES_DIRTY_NONE, user_surface_converter,
930 &cmd->body.src.sid, NULL);
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700931 if (ret)
932 return ret;
Thomas Hellstromc9146cd2015-03-02 23:45:04 -0800933
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000934 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100935 VMW_RES_DIRTY_SET, user_surface_converter,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000936 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000937}
938
Neha Bhende0fca749e2015-08-10 10:51:07 -0700939static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -0800940 struct vmw_sw_context *sw_context,
941 SVGA3dCmdHeader *header)
Neha Bhende0fca749e2015-08-10 10:51:07 -0700942{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800943 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
Neha Bhende0fca749e2015-08-10 10:51:07 -0700944 int ret;
945
946 cmd = container_of(header, typeof(*cmd), header);
947 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100948 VMW_RES_DIRTY_NONE, user_surface_converter,
Neha Bhende0fca749e2015-08-10 10:51:07 -0700949 &cmd->body.src, NULL);
950 if (ret != 0)
951 return ret;
952
953 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100954 VMW_RES_DIRTY_SET, user_surface_converter,
Neha Bhende0fca749e2015-08-10 10:51:07 -0700955 &cmd->body.dest, NULL);
956}
957
958static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
959 struct vmw_sw_context *sw_context,
960 SVGA3dCmdHeader *header)
961{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800962 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
Neha Bhende0fca749e2015-08-10 10:51:07 -0700963 int ret;
964
965 cmd = container_of(header, typeof(*cmd), header);
966 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100967 VMW_RES_DIRTY_NONE, user_surface_converter,
Neha Bhende0fca749e2015-08-10 10:51:07 -0700968 &cmd->body.srcSid, NULL);
969 if (ret != 0)
970 return ret;
971
972 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100973 VMW_RES_DIRTY_SET, user_surface_converter,
Neha Bhende0fca749e2015-08-10 10:51:07 -0700974 &cmd->body.dstSid, NULL);
975}
976
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000977static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
978 struct vmw_sw_context *sw_context,
979 SVGA3dCmdHeader *header)
980{
Deepak Rawatd01316d2019-02-08 15:50:40 -0800981 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000982 int ret;
983
Deepak Rawatd01316d2019-02-08 15:50:40 -0800984 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000985 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100986 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000987 &cmd->body.src.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000988 if (unlikely(ret != 0))
989 return ret;
Deepak Rawat680360a2019-02-13 13:20:42 -0800990
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000991 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +0100992 VMW_RES_DIRTY_SET, user_surface_converter,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000993 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000994}
995
996static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
997 struct vmw_sw_context *sw_context,
998 SVGA3dCmdHeader *header)
999{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001000 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
1001 container_of(header, typeof(*cmd), header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02001002
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001003 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001004 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001005 &cmd->body.srcImage.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001006}
1007
1008static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1009 struct vmw_sw_context *sw_context,
1010 SVGA3dCmdHeader *header)
1011{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001012 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1013 container_of(header, typeof(*cmd), header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02001014
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001015 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001016 VMW_RES_DIRTY_NONE, user_surface_converter,
1017 &cmd->body.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001018}
1019
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001020/**
1021 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1022 *
1023 * @dev_priv: The device private structure.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001024 * @new_query_bo: The new buffer holding query results.
1025 * @sw_context: The software context used for this command submission.
1026 *
Deepak Rawat680360a2019-02-13 13:20:42 -08001027 * This function checks whether @new_query_bo is suitable for holding query
1028 * results, and if another buffer currently is pinned for query results. If so,
1029 * the function prepares the state of @sw_context for switching pinned buffers
1030 * after successful submission of the current command batch.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001031 */
1032static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001033 struct vmw_buffer_object *new_query_bo,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001034 struct vmw_sw_context *sw_context)
1035{
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001036 struct vmw_res_cache_entry *ctx_entry =
1037 &sw_context->res_cache[vmw_res_context];
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001038 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001039
1040 BUG_ON(!ctx_entry->valid);
1041 sw_context->last_query_ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001042
1043 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1044
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001045 if (unlikely(new_query_bo->base.num_pages > 4)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001046 VMW_DEBUG_USER("Query buffer too large.\n");
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001047 return -EINVAL;
1048 }
1049
1050 if (unlikely(sw_context->cur_query_bo != NULL)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001051 sw_context->needs_post_query_barrier = true;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001052 ret = vmw_validation_add_bo(sw_context->ctx,
1053 sw_context->cur_query_bo,
1054 dev_priv->has_mob, false);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001055 if (unlikely(ret != 0))
1056 return ret;
1057 }
1058 sw_context->cur_query_bo = new_query_bo;
1059
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001060 ret = vmw_validation_add_bo(sw_context->ctx,
1061 dev_priv->dummy_query_bo,
1062 dev_priv->has_mob, false);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001063 if (unlikely(ret != 0))
1064 return ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001065 }
1066
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001067 return 0;
1068}
1069
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001070/**
1071 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1072 *
1073 * @dev_priv: The device private structure.
1074 * @sw_context: The software context used for this command submission batch.
1075 *
1076 * This function will check if we're switching query buffers, and will then,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001077 * issue a dummy occlusion query wait used as a query barrier. When the fence
Deepak Rawat680360a2019-02-13 13:20:42 -08001078 * object following that query wait has signaled, we are sure that all preceding
1079 * queries have finished, and the old query buffer can be unpinned. However,
1080 * since both the new query buffer and the old one are fenced with that fence,
1081 * we can do an asynchronus unpin now, and be sure that the old query buffer
1082 * won't be moved until the fence has signaled.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001083 *
1084 * As mentioned above, both the new - and old query buffers need to be fenced
1085 * using a sequence emitted *after* calling this function.
1086 */
1087static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1088 struct vmw_sw_context *sw_context)
1089{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001090 /*
1091 * The validate list should still hold references to all
1092 * contexts here.
1093 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001094 if (sw_context->needs_post_query_barrier) {
1095 struct vmw_res_cache_entry *ctx_entry =
1096 &sw_context->res_cache[vmw_res_context];
1097 struct vmw_resource *ctx;
1098 int ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001099
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001100 BUG_ON(!ctx_entry->valid);
1101 ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001102
1103 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1104
1105 if (unlikely(ret != 0))
Deepak Rawat5724f892019-02-11 11:46:27 -08001106 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001107 }
1108
1109 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1110 if (dev_priv->pinned_bo) {
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001111 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001112 vmw_bo_unreference(&dev_priv->pinned_bo);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001113 }
1114
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001115 if (!sw_context->needs_post_query_barrier) {
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001116 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001117
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001118 /*
1119 * We pin also the dummy_query_bo buffer so that we
Deepak Rawat680360a2019-02-13 13:20:42 -08001120 * don't need to validate it when emitting dummy queries
1121 * in context destroy paths.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001122 */
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001123 if (!dev_priv->dummy_query_bo_pinned) {
1124 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1125 true);
1126 dev_priv->dummy_query_bo_pinned = true;
1127 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001128
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001129 BUG_ON(sw_context->last_query_ctx == NULL);
1130 dev_priv->query_cid = sw_context->last_query_ctx->id;
1131 dev_priv->query_cid_valid = true;
1132 dev_priv->pinned_bo =
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001133 vmw_bo_reference(sw_context->cur_query_bo);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001134 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001135 }
1136}
1137
1138/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001139 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
1140 * to a MOB id.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001141 *
1142 * @dev_priv: Pointer to a device private structure.
1143 * @sw_context: The software context used for this command batch validation.
1144 * @id: Pointer to the user-space handle to be translated.
Deepak Rawat680360a2019-02-13 13:20:42 -08001145 * @vmw_bo_p: Points to a location that, on successful return will carry a
1146 * non-reference-counted pointer to the buffer object identified by the
Thomas Hellstromddcda242012-11-21 11:26:55 +01001147 * user-space handle in @id.
1148 *
1149 * This function saves information needed to translate a user-space buffer
1150 * handle to a MOB id. The translation does not take place immediately, but
Deepak Rawat680360a2019-02-13 13:20:42 -08001151 * during a call to vmw_apply_relocations().
1152 *
1153 * This function builds a relocation list and a list of buffers to validate. The
1154 * former needs to be freed using either vmw_apply_relocations() or
1155 * vmw_free_relocations(). The latter needs to be freed using
1156 * vmw_clear_validations.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001157 */
1158static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1159 struct vmw_sw_context *sw_context,
1160 SVGAMobId *id,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001161 struct vmw_buffer_object **vmw_bo_p)
Thomas Hellstromddcda242012-11-21 11:26:55 +01001162{
Thomas Hellstromb139d432018-09-26 16:27:54 +02001163 struct vmw_buffer_object *vmw_bo;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001164 uint32_t handle = *id;
1165 struct vmw_relocation *reloc;
1166 int ret;
1167
Thomas Hellstromb139d432018-09-26 16:27:54 +02001168 vmw_validation_preload_bo(sw_context->ctx);
1169 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1170 if (IS_ERR(vmw_bo)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001171 VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
Thomas Hellstromb139d432018-09-26 16:27:54 +02001172 return PTR_ERR(vmw_bo);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001173 }
Thomas Hellstromddcda242012-11-21 11:26:55 +01001174
Thomas Hellstromb139d432018-09-26 16:27:54 +02001175 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1176 vmw_user_bo_noref_release();
1177 if (unlikely(ret != 0))
1178 return ret;
1179
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001180 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1181 if (!reloc)
Thomas Hellstromb139d432018-09-26 16:27:54 +02001182 return -ENOMEM;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001183
Thomas Hellstromddcda242012-11-21 11:26:55 +01001184 reloc->mob_loc = id;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001185 reloc->vbo = vmw_bo;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001186
Thomas Hellstromddcda242012-11-21 11:26:55 +01001187 *vmw_bo_p = vmw_bo;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001188 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1189
Thomas Hellstromddcda242012-11-21 11:26:55 +01001190 return 0;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001191}
1192
1193/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001194 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
1195 * to a valid SVGAGuestPtr
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001196 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001197 * @dev_priv: Pointer to a device private structure.
1198 * @sw_context: The software context used for this command batch validation.
1199 * @ptr: Pointer to the user-space handle to be translated.
Deepak Rawat680360a2019-02-13 13:20:42 -08001200 * @vmw_bo_p: Points to a location that, on successful return will carry a
1201 * non-reference-counted pointer to the DMA buffer identified by the user-space
1202 * handle in @id.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001203 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001204 * This function saves information needed to translate a user-space buffer
1205 * handle to a valid SVGAGuestPtr. The translation does not take place
1206 * immediately, but during a call to vmw_apply_relocations().
Deepak Rawat680360a2019-02-13 13:20:42 -08001207 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001208 * This function builds a relocation list and a list of buffers to validate.
1209 * The former needs to be freed using either vmw_apply_relocations() or
1210 * vmw_free_relocations(). The latter needs to be freed using
1211 * vmw_clear_validations.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001212 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001213static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1214 struct vmw_sw_context *sw_context,
1215 SVGAGuestPtr *ptr,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001216 struct vmw_buffer_object **vmw_bo_p)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001217{
Thomas Hellstromb139d432018-09-26 16:27:54 +02001218 struct vmw_buffer_object *vmw_bo;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001219 uint32_t handle = ptr->gmrId;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001220 struct vmw_relocation *reloc;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001221 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001222
Thomas Hellstromb139d432018-09-26 16:27:54 +02001223 vmw_validation_preload_bo(sw_context->ctx);
1224 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1225 if (IS_ERR(vmw_bo)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001226 VMW_DEBUG_USER("Could not find or use GMR region.\n");
Thomas Hellstromb139d432018-09-26 16:27:54 +02001227 return PTR_ERR(vmw_bo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001228 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001229
Thomas Hellstromb139d432018-09-26 16:27:54 +02001230 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1231 vmw_user_bo_noref_release();
1232 if (unlikely(ret != 0))
1233 return ret;
1234
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001235 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1236 if (!reloc)
Thomas Hellstromb139d432018-09-26 16:27:54 +02001237 return -ENOMEM;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001238
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001239 reloc->location = ptr;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001240 reloc->vbo = vmw_bo;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001241 *vmw_bo_p = vmw_bo;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001242 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1243
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001244 return 0;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001245}
1246
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001247/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001248 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001249 *
1250 * @dev_priv: Pointer to a device private struct.
1251 * @sw_context: The software context used for this command submission.
1252 * @header: Pointer to the command header in the command stream.
1253 *
1254 * This function adds the new query into the query COTABLE
1255 */
1256static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1257 struct vmw_sw_context *sw_context,
1258 SVGA3dCmdHeader *header)
1259{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001260 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
Deepak Rawat6f74fd92019-02-08 12:53:57 -08001261 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001262 struct vmw_resource *cotable_res;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001263 int ret;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001264
Deepak Rawat6f74fd92019-02-08 12:53:57 -08001265 if (!ctx_node)
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001266 return -EINVAL;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001267
Deepak Rawatd01316d2019-02-08 15:50:40 -08001268 cmd = container_of(header, typeof(*cmd), header);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001269
Deepak Rawatd01316d2019-02-08 15:50:40 -08001270 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
1271 cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001272 return -EINVAL;
1273
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001274 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
Deepak Rawatd01316d2019-02-08 15:50:40 -08001275 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001276
1277 return ret;
1278}
1279
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001280/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001281 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001282 *
1283 * @dev_priv: Pointer to a device private struct.
1284 * @sw_context: The software context used for this command submission.
1285 * @header: Pointer to the command header in the command stream.
1286 *
Deepak Rawat680360a2019-02-13 13:20:42 -08001287 * The query bind operation will eventually associate the query ID with its
1288 * backing MOB. In this function, we take the user mode MOB ID and use
1289 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001290 */
1291static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1292 struct vmw_sw_context *sw_context,
1293 SVGA3dCmdHeader *header)
1294{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001295 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001296 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001297 int ret;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001298
Deepak Rawatd01316d2019-02-08 15:50:40 -08001299 cmd = container_of(header, typeof(*cmd), header);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001300
1301 /*
1302 * Look up the buffer pointed to by q.mobid, put it on the relocation
1303 * list so its kernel mode MOB ID can be filled in later
1304 */
Deepak Rawatd01316d2019-02-08 15:50:40 -08001305 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001306 &vmw_bo);
1307
1308 if (ret != 0)
1309 return ret;
1310
1311 sw_context->dx_query_mob = vmw_bo;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001312 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
Thomas Hellstromb139d432018-09-26 16:27:54 +02001313 return 0;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001314}
1315
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001316/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001317 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001318 *
1319 * @dev_priv: Pointer to a device private struct.
1320 * @sw_context: The software context used for this command submission.
1321 * @header: Pointer to the command header in the command stream.
1322 */
1323static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1324 struct vmw_sw_context *sw_context,
1325 SVGA3dCmdHeader *header)
1326{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001327 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1328 container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001329
1330 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001331 VMW_RES_DIRTY_SET, user_context_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -08001332 &cmd->body.cid, NULL);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001333}
1334
1335/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001336 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001337 *
1338 * @dev_priv: Pointer to a device private struct.
1339 * @sw_context: The software context used for this command submission.
1340 * @header: Pointer to the command header in the command stream.
1341 */
1342static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1343 struct vmw_sw_context *sw_context,
1344 SVGA3dCmdHeader *header)
1345{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001346 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1347 container_of(header, typeof(*cmd), header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001348
Thomas Hellstromddcda242012-11-21 11:26:55 +01001349 if (unlikely(dev_priv->has_mob)) {
Deepak Rawatd01316d2019-02-08 15:50:40 -08001350 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001351
1352 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1353
1354 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1355 gb_cmd.header.size = cmd->header.size;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001356 gb_cmd.body.cid = cmd->body.cid;
1357 gb_cmd.body.type = cmd->body.type;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001358
1359 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1360 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1361 }
1362
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001363 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001364 VMW_RES_DIRTY_SET, user_context_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -08001365 &cmd->body.cid, NULL);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001366}
1367
1368/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001369 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001370 *
1371 * @dev_priv: Pointer to a device private struct.
1372 * @sw_context: The software context used for this command submission.
1373 * @header: Pointer to the command header in the command stream.
1374 */
1375static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1376 struct vmw_sw_context *sw_context,
1377 SVGA3dCmdHeader *header)
1378{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001379 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001380 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001381 int ret;
1382
Deepak Rawatd01316d2019-02-08 15:50:40 -08001383 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001384 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1385 if (unlikely(ret != 0))
1386 return ret;
1387
Deepak Rawat680360a2019-02-13 13:20:42 -08001388 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
Thomas Hellstromddcda242012-11-21 11:26:55 +01001389 &vmw_bo);
1390 if (unlikely(ret != 0))
1391 return ret;
1392
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001393 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001394
Thomas Hellstromddcda242012-11-21 11:26:55 +01001395 return ret;
1396}
1397
1398/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001399 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001400 *
1401 * @dev_priv: Pointer to a device private struct.
1402 * @sw_context: The software context used for this command submission.
1403 * @header: Pointer to the command header in the command stream.
1404 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001405static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1406 struct vmw_sw_context *sw_context,
1407 SVGA3dCmdHeader *header)
1408{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001409 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001410 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001411 int ret;
1412
Deepak Rawatd01316d2019-02-08 15:50:40 -08001413 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001414 if (dev_priv->has_mob) {
Deepak Rawatd01316d2019-02-08 15:50:40 -08001415 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001416
1417 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1418
1419 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1420 gb_cmd.header.size = cmd->header.size;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001421 gb_cmd.body.cid = cmd->body.cid;
1422 gb_cmd.body.type = cmd->body.type;
1423 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1424 gb_cmd.body.offset = cmd->body.guestResult.offset;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001425
1426 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1427 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1428 }
1429
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001430 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1431 if (unlikely(ret != 0))
1432 return ret;
1433
1434 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
Deepak Rawat680360a2019-02-13 13:20:42 -08001435 &cmd->body.guestResult, &vmw_bo);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001436 if (unlikely(ret != 0))
1437 return ret;
1438
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001439 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001440
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001441 return ret;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001442}
1443
Thomas Hellstromddcda242012-11-21 11:26:55 +01001444/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001445 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
Thomas Hellstromddcda242012-11-21 11:26:55 +01001446 *
1447 * @dev_priv: Pointer to a device private struct.
1448 * @sw_context: The software context used for this command submission.
1449 * @header: Pointer to the command header in the command stream.
1450 */
1451static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1452 struct vmw_sw_context *sw_context,
1453 SVGA3dCmdHeader *header)
1454{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001455 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001456 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001457 int ret;
1458
Deepak Rawatd01316d2019-02-08 15:50:40 -08001459 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001460 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1461 if (unlikely(ret != 0))
1462 return ret;
1463
Deepak Rawat680360a2019-02-13 13:20:42 -08001464 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
Thomas Hellstromddcda242012-11-21 11:26:55 +01001465 &vmw_bo);
1466 if (unlikely(ret != 0))
1467 return ret;
1468
Thomas Hellstromddcda242012-11-21 11:26:55 +01001469 return 0;
1470}
1471
1472/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001473 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001474 *
1475 * @dev_priv: Pointer to a device private struct.
1476 * @sw_context: The software context used for this command submission.
1477 * @header: Pointer to the command header in the command stream.
1478 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001479static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1480 struct vmw_sw_context *sw_context,
1481 SVGA3dCmdHeader *header)
1482{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001483 struct vmw_buffer_object *vmw_bo;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001484 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001485 int ret;
1486
Deepak Rawatd01316d2019-02-08 15:50:40 -08001487 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001488 if (dev_priv->has_mob) {
Deepak Rawatd01316d2019-02-08 15:50:40 -08001489 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001490
1491 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1492
1493 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1494 gb_cmd.header.size = cmd->header.size;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001495 gb_cmd.body.cid = cmd->body.cid;
1496 gb_cmd.body.type = cmd->body.type;
1497 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1498 gb_cmd.body.offset = cmd->body.guestResult.offset;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001499
1500 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1501 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1502 }
1503
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001504 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1505 if (unlikely(ret != 0))
1506 return ret;
1507
1508 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
Deepak Rawat680360a2019-02-13 13:20:42 -08001509 &cmd->body.guestResult, &vmw_bo);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001510 if (unlikely(ret != 0))
1511 return ret;
1512
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001513 return 0;
1514}
1515
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001516static int vmw_cmd_dma(struct vmw_private *dev_priv,
1517 struct vmw_sw_context *sw_context,
1518 SVGA3dCmdHeader *header)
1519{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001520 struct vmw_buffer_object *vmw_bo = NULL;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001521 struct vmw_surface *srf = NULL;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001522 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001523 int ret;
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001524 SVGA3dCmdSurfaceDMASuffix *suffix;
1525 uint32_t bo_size;
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001526 bool dirty;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001527
Deepak Rawatd01316d2019-02-08 15:50:40 -08001528 cmd = container_of(header, typeof(*cmd), header);
1529 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001530 header->size - sizeof(*suffix));
1531
1532 /* Make sure device and verifier stays in sync. */
1533 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001534 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001535 return -EINVAL;
1536 }
1537
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001538 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
Deepak Rawat680360a2019-02-13 13:20:42 -08001539 &cmd->body.guest.ptr, &vmw_bo);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001540 if (unlikely(ret != 0))
1541 return ret;
1542
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001543 /* Make sure DMA doesn't cross BO boundaries. */
1544 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
Deepak Rawatd01316d2019-02-08 15:50:40 -08001545 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001546 VMW_DEBUG_USER("Invalid DMA offset.\n");
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001547 return -EINVAL;
1548 }
1549
Deepak Rawatd01316d2019-02-08 15:50:40 -08001550 bo_size -= cmd->body.guest.ptr.offset;
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001551 if (unlikely(suffix->maximumOffset > bo_size))
1552 suffix->maximumOffset = bo_size;
1553
Deepak Rawatd01316d2019-02-08 15:50:40 -08001554 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001555 VMW_RES_DIRTY_SET : 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001556 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001557 dirty, user_surface_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -08001558 &cmd->body.host.sid, NULL);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001559 if (unlikely(ret != 0)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001560 if (unlikely(ret != -ERESTARTSYS))
Deepak Rawat5724f892019-02-11 11:46:27 -08001561 VMW_DEBUG_USER("could not find surface for DMA.\n");
Thomas Hellstromb139d432018-09-26 16:27:54 +02001562 return ret;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001563 }
1564
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001565 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001566
Deepak Rawat680360a2019-02-13 13:20:42 -08001567 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001568
Thomas Hellstromb139d432018-09-26 16:27:54 +02001569 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001570}
1571
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001572static int vmw_cmd_draw(struct vmw_private *dev_priv,
1573 struct vmw_sw_context *sw_context,
1574 SVGA3dCmdHeader *header)
1575{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001576 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001577 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1578 (unsigned long)header + sizeof(*cmd));
1579 SVGA3dPrimitiveRange *range;
1580 uint32_t i;
1581 uint32_t maxnum;
1582 int ret;
1583
1584 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1585 if (unlikely(ret != 0))
1586 return ret;
1587
Deepak Rawatd01316d2019-02-08 15:50:40 -08001588 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001589 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1590
1591 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001592 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001593 return -EINVAL;
1594 }
1595
1596 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001597 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001598 VMW_RES_DIRTY_NONE,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001599 user_surface_converter,
1600 &decl->array.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001601 if (unlikely(ret != 0))
1602 return ret;
1603 }
1604
1605 maxnum = (header->size - sizeof(cmd->body) -
1606 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1607 if (unlikely(cmd->body.numRanges > maxnum)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001608 VMW_DEBUG_USER("Illegal number of index ranges.\n");
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001609 return -EINVAL;
1610 }
1611
1612 range = (SVGA3dPrimitiveRange *) decl;
1613 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001614 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001615 VMW_RES_DIRTY_NONE,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001616 user_surface_converter,
1617 &range->indexArray.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001618 if (unlikely(ret != 0))
1619 return ret;
1620 }
1621 return 0;
1622}
1623
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001624static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1625 struct vmw_sw_context *sw_context,
1626 SVGA3dCmdHeader *header)
1627{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001628 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001629 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1630 ((unsigned long) header + header->size + sizeof(header));
1631 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
Deepak Rawatd01316d2019-02-08 15:50:40 -08001632 ((unsigned long) header + sizeof(*cmd));
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001633 struct vmw_resource *ctx;
1634 struct vmw_resource *res;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001635 int ret;
1636
Deepak Rawatd01316d2019-02-08 15:50:40 -08001637 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001638
1639 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001640 VMW_RES_DIRTY_SET, user_context_converter,
Deepak Rawatd01316d2019-02-08 15:50:40 -08001641 &cmd->body.cid, &ctx);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001642 if (unlikely(ret != 0))
1643 return ret;
1644
1645 for (; cur_state < last_state; ++cur_state) {
1646 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1647 continue;
1648
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001649 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08001650 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1651 (unsigned int) cur_state->stage);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001652 return -EINVAL;
1653 }
1654
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001655 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001656 VMW_RES_DIRTY_NONE,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001657 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001658 &cur_state->value, &res);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001659 if (unlikely(ret != 0))
1660 return ret;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001661
1662 if (dev_priv->has_mob) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001663 struct vmw_ctx_bindinfo_tex binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001664 struct vmw_ctx_validation_info *node;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001665
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001666 node = vmw_execbuf_info_from_res(sw_context, ctx);
1667 if (!node)
1668 return -EINVAL;
1669
1670 binding.bi.ctx = ctx;
1671 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001672 binding.bi.bt = vmw_ctx_binding_tex;
1673 binding.texture_stage = cur_state->stage;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001674 vmw_binding_add(node->staged, &binding.bi, 0,
1675 binding.texture_stage);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001676 }
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001677 }
1678
1679 return 0;
1680}
1681
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001682static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1683 struct vmw_sw_context *sw_context,
1684 void *buf)
1685{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001686 struct vmw_buffer_object *vmw_bo;
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001687
1688 struct {
1689 uint32_t header;
1690 SVGAFifoCmdDefineGMRFB body;
1691 } *cmd = buf;
1692
Deepak Rawat680360a2019-02-13 13:20:42 -08001693 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
Thomas Hellstromb139d432018-09-26 16:27:54 +02001694 &vmw_bo);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001695}
1696
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001697/**
1698 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1699 * switching
1700 *
1701 * @dev_priv: Pointer to a device private struct.
1702 * @sw_context: The software context being used for this batch.
1703 * @val_node: The validation node representing the resource.
1704 * @buf_id: Pointer to the user-space backup buffer handle in the command
1705 * stream.
1706 * @backup_offset: Offset of backup into MOB.
1707 *
Deepak Rawat680360a2019-02-13 13:20:42 -08001708 * This function prepares for registering a switch of backup buffers in the
1709 * resource metadata just prior to unreserving. It's basically a wrapper around
1710 * vmw_cmd_res_switch_backup with a different interface.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001711 */
1712static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1713 struct vmw_sw_context *sw_context,
Deepak Rawat680360a2019-02-13 13:20:42 -08001714 struct vmw_resource *res, uint32_t *buf_id,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001715 unsigned long backup_offset)
1716{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001717 struct vmw_buffer_object *vbo;
1718 void *info;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001719 int ret;
1720
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001721 info = vmw_execbuf_info_from_res(sw_context, res);
1722 if (!info)
1723 return -EINVAL;
1724
1725 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001726 if (ret)
1727 return ret;
1728
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001729 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1730 backup_offset);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001731 return 0;
1732}
1733
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001734/**
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001735 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1736 *
1737 * @dev_priv: Pointer to a device private struct.
1738 * @sw_context: The software context being used for this batch.
1739 * @res_type: The resource type.
1740 * @converter: Information about user-space binding for this resource type.
1741 * @res_id: Pointer to the user-space resource handle in the command stream.
1742 * @buf_id: Pointer to the user-space backup buffer handle in the command
1743 * stream.
1744 * @backup_offset: Offset of backup into MOB.
1745 *
Deepak Rawat680360a2019-02-13 13:20:42 -08001746 * This function prepares for registering a switch of backup buffers in the
1747 * resource metadata just prior to unreserving. It's basically a wrapper around
1748 * vmw_cmd_res_switch_backup with a different interface.
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001749 */
1750static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1751 struct vmw_sw_context *sw_context,
1752 enum vmw_res_type res_type,
1753 const struct vmw_user_resource_conv
Deepak Rawat680360a2019-02-13 13:20:42 -08001754 *converter, uint32_t *res_id, uint32_t *buf_id,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001755 unsigned long backup_offset)
1756{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001757 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001758 int ret;
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001759
1760 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001761 VMW_RES_DIRTY_NONE, converter, res_id, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001762 if (ret)
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001763 return ret;
1764
Deepak Rawat680360a2019-02-13 13:20:42 -08001765 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1766 backup_offset);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001767}
1768
1769/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001770 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001771 *
1772 * @dev_priv: Pointer to a device private struct.
1773 * @sw_context: The software context being used for this batch.
1774 * @header: Pointer to the command header in the command stream.
1775 */
1776static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1777 struct vmw_sw_context *sw_context,
1778 SVGA3dCmdHeader *header)
1779{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001780 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1781 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001782
1783 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
Deepak Rawat680360a2019-02-13 13:20:42 -08001784 user_surface_converter, &cmd->body.sid,
1785 &cmd->body.mobid, 0);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001786}
1787
1788/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001789 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001790 *
1791 * @dev_priv: Pointer to a device private struct.
1792 * @sw_context: The software context being used for this batch.
1793 * @header: Pointer to the command header in the command stream.
1794 */
1795static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1796 struct vmw_sw_context *sw_context,
1797 SVGA3dCmdHeader *header)
1798{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001799 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1800 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001801
1802 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001803 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001804 &cmd->body.image.sid, NULL);
1805}
1806
1807/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001808 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001809 *
1810 * @dev_priv: Pointer to a device private struct.
1811 * @sw_context: The software context being used for this batch.
1812 * @header: Pointer to the command header in the command stream.
1813 */
1814static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1815 struct vmw_sw_context *sw_context,
1816 SVGA3dCmdHeader *header)
1817{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001818 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1819 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001820
1821 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001822 VMW_RES_DIRTY_CLEAR, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001823 &cmd->body.sid, NULL);
1824}
1825
1826/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001827 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001828 *
1829 * @dev_priv: Pointer to a device private struct.
1830 * @sw_context: The software context being used for this batch.
1831 * @header: Pointer to the command header in the command stream.
1832 */
1833static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1834 struct vmw_sw_context *sw_context,
1835 SVGA3dCmdHeader *header)
1836{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001837 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1838 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001839
1840 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001841 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001842 &cmd->body.image.sid, NULL);
1843}
1844
1845/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001846 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001847 * command
1848 *
1849 * @dev_priv: Pointer to a device private struct.
1850 * @sw_context: The software context being used for this batch.
1851 * @header: Pointer to the command header in the command stream.
1852 */
1853static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1854 struct vmw_sw_context *sw_context,
1855 SVGA3dCmdHeader *header)
1856{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001857 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1858 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001859
1860 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001861 VMW_RES_DIRTY_CLEAR, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001862 &cmd->body.sid, NULL);
1863}
1864
1865/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001866 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001867 * command
1868 *
1869 * @dev_priv: Pointer to a device private struct.
1870 * @sw_context: The software context being used for this batch.
1871 * @header: Pointer to the command header in the command stream.
1872 */
1873static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1874 struct vmw_sw_context *sw_context,
1875 SVGA3dCmdHeader *header)
1876{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001877 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1878 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001879
1880 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001881 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001882 &cmd->body.image.sid, NULL);
1883}
1884
1885/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001886 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1887 * command
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001888 *
1889 * @dev_priv: Pointer to a device private struct.
1890 * @sw_context: The software context being used for this batch.
1891 * @header: Pointer to the command header in the command stream.
1892 */
1893static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1894 struct vmw_sw_context *sw_context,
1895 SVGA3dCmdHeader *header)
1896{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001897 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1898 container_of(header, typeof(*cmd), header);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001899
1900 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001901 VMW_RES_DIRTY_CLEAR, user_surface_converter,
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001902 &cmd->body.sid, NULL);
1903}
1904
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001905/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001906 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001907 *
1908 * @dev_priv: Pointer to a device private struct.
1909 * @sw_context: The software context being used for this batch.
1910 * @header: Pointer to the command header in the command stream.
1911 */
1912static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1913 struct vmw_sw_context *sw_context,
1914 SVGA3dCmdHeader *header)
1915{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001916 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001917 int ret;
1918 size_t size;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001919 struct vmw_resource *ctx;
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001920
Deepak Rawatd01316d2019-02-08 15:50:40 -08001921 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001922
1923 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001924 VMW_RES_DIRTY_SET, user_context_converter,
1925 &cmd->body.cid, &ctx);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001926 if (unlikely(ret != 0))
1927 return ret;
1928
1929 if (unlikely(!dev_priv->has_mob))
1930 return 0;
1931
1932 size = cmd->header.size - sizeof(cmd->body);
Deepak Rawat680360a2019-02-13 13:20:42 -08001933 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1934 cmd->body.shid, cmd + 1, cmd->body.type,
1935 size, &sw_context->staged_cmd_res);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001936 if (unlikely(ret != 0))
1937 return ret;
1938
Deepak Rawat680360a2019-02-13 13:20:42 -08001939 return vmw_resource_relocation_add(sw_context, NULL,
Thomas Hellstrome7a45282016-10-10 10:44:00 -07001940 vmw_ptr_diff(sw_context->buf_start,
Thomas Hellstroma1944032016-10-10 11:06:45 -07001941 &cmd->header.id),
1942 vmw_res_rel_nop);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001943}
1944
1945/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001946 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001947 *
1948 * @dev_priv: Pointer to a device private struct.
1949 * @sw_context: The software context being used for this batch.
1950 * @header: Pointer to the command header in the command stream.
1951 */
1952static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1953 struct vmw_sw_context *sw_context,
1954 SVGA3dCmdHeader *header)
1955{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001956 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001957 int ret;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001958 struct vmw_resource *ctx;
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001959
Deepak Rawatd01316d2019-02-08 15:50:40 -08001960 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001961
1962 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01001963 VMW_RES_DIRTY_SET, user_context_converter,
1964 &cmd->body.cid, &ctx);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001965 if (unlikely(ret != 0))
1966 return ret;
1967
1968 if (unlikely(!dev_priv->has_mob))
1969 return 0;
1970
Deepak Rawat680360a2019-02-13 13:20:42 -08001971 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1972 cmd->body.type, &sw_context->staged_cmd_res);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001973 if (unlikely(ret != 0))
1974 return ret;
1975
Deepak Rawat680360a2019-02-13 13:20:42 -08001976 return vmw_resource_relocation_add(sw_context, NULL,
Thomas Hellstrome7a45282016-10-10 10:44:00 -07001977 vmw_ptr_diff(sw_context->buf_start,
Thomas Hellstroma1944032016-10-10 11:06:45 -07001978 &cmd->header.id),
1979 vmw_res_rel_nop);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001980}
1981
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001982/**
Deepak Rawat680360a2019-02-13 13:20:42 -08001983 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001984 *
1985 * @dev_priv: Pointer to a device private struct.
1986 * @sw_context: The software context being used for this batch.
1987 * @header: Pointer to the command header in the command stream.
1988 */
1989static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1990 struct vmw_sw_context *sw_context,
1991 SVGA3dCmdHeader *header)
1992{
Deepak Rawatd01316d2019-02-08 15:50:40 -08001993 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001994 struct vmw_ctx_bindinfo_shader binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001995 struct vmw_resource *ctx, *res = NULL;
1996 struct vmw_ctx_validation_info *ctx_info;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001997 int ret;
1998
Deepak Rawatd01316d2019-02-08 15:50:40 -08001999 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002000
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002001 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002002 VMW_DEBUG_USER("Illegal shader type %u.\n",
2003 (unsigned int) cmd->body.type);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002004 return -EINVAL;
2005 }
2006
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07002007 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002008 VMW_RES_DIRTY_SET, user_context_converter,
2009 &cmd->body.cid, &ctx);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002010 if (unlikely(ret != 0))
2011 return ret;
2012
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002013 if (!dev_priv->has_mob)
2014 return 0;
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002015
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002016 if (cmd->body.shid != SVGA3D_INVALID_ID) {
Thomas Hellstrome41c20c2019-04-04 13:25:43 +00002017 /*
2018 * This is the compat shader path - Per device guest-backed
2019 * shaders, but user-space thinks it's per context host-
2020 * backed shaders.
2021 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002022 res = vmw_shader_lookup(vmw_context_res_man(ctx),
Deepak Rawat680360a2019-02-13 13:20:42 -08002023 cmd->body.shid, cmd->body.type);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002024 if (!IS_ERR(res)) {
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002025 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2026 VMW_RES_DIRTY_NONE);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002027 if (unlikely(ret != 0))
2028 return ret;
Thomas Hellstrome41c20c2019-04-04 13:25:43 +00002029
2030 ret = vmw_resource_relocation_add
2031 (sw_context, res,
2032 vmw_ptr_diff(sw_context->buf_start,
2033 &cmd->body.shid),
2034 vmw_res_rel_normal);
2035 if (unlikely(ret != 0))
2036 return ret;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002037 }
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07002038 }
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002039
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002040 if (IS_ERR_OR_NULL(res)) {
Deepak Rawat680360a2019-02-13 13:20:42 -08002041 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2042 VMW_RES_DIRTY_NONE,
2043 user_shader_converter, &cmd->body.shid,
2044 &res);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002045 if (unlikely(ret != 0))
2046 return ret;
2047 }
2048
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002049 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2050 if (!ctx_info)
2051 return -EINVAL;
2052
2053 binding.bi.ctx = ctx;
2054 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002055 binding.bi.bt = vmw_ctx_binding_shader;
2056 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
Deepak Rawat680360a2019-02-13 13:20:42 -08002057 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2058
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002059 return 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002060}
2061
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002062/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002063 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002064 *
2065 * @dev_priv: Pointer to a device private struct.
2066 * @sw_context: The software context being used for this batch.
2067 * @header: Pointer to the command header in the command stream.
2068 */
2069static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2070 struct vmw_sw_context *sw_context,
2071 SVGA3dCmdHeader *header)
2072{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002073 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002074 int ret;
2075
Deepak Rawatd01316d2019-02-08 15:50:40 -08002076 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002077
2078 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002079 VMW_RES_DIRTY_SET, user_context_converter,
2080 &cmd->body.cid, NULL);
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002081 if (unlikely(ret != 0))
2082 return ret;
2083
2084 if (dev_priv->has_mob)
2085 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2086
2087 return 0;
2088}
2089
2090/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002091 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002092 *
2093 * @dev_priv: Pointer to a device private struct.
2094 * @sw_context: The software context being used for this batch.
2095 * @header: Pointer to the command header in the command stream.
2096 */
2097static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2098 struct vmw_sw_context *sw_context,
2099 SVGA3dCmdHeader *header)
2100{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002101 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2102 container_of(header, typeof(*cmd), header);
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002103
2104 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
Deepak Rawat680360a2019-02-13 13:20:42 -08002105 user_shader_converter, &cmd->body.shid,
2106 &cmd->body.mobid, cmd->body.offsetInBytes);
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002107}
2108
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002109/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002110 * vmw_cmd_dx_set_single_constant_buffer - Validate
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002111 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2112 *
2113 * @dev_priv: Pointer to a device private struct.
2114 * @sw_context: The software context being used for this batch.
2115 * @header: Pointer to the command header in the command stream.
2116 */
2117static int
2118vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2119 struct vmw_sw_context *sw_context,
2120 SVGA3dCmdHeader *header)
2121{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002122 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
Deepak Rawatd2e90ab2018-12-13 13:43:20 -08002123 SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
2124 SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
2125
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002126 struct vmw_resource *res = NULL;
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002127 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002128 struct vmw_ctx_bindinfo_cb binding;
2129 int ret;
2130
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002131 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002132 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002133
2134 cmd = container_of(header, typeof(*cmd), header);
2135 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002136 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002137 &cmd->body.sid, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002138 if (unlikely(ret != 0))
2139 return ret;
2140
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002141 binding.bi.ctx = ctx_node->ctx;
2142 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002143 binding.bi.bt = vmw_ctx_binding_cb;
2144 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2145 binding.offset = cmd->body.offsetInBytes;
2146 binding.size = cmd->body.sizeInBytes;
2147 binding.slot = cmd->body.slot;
2148
Deepak Rawatd2e90ab2018-12-13 13:43:20 -08002149 if (binding.shader_slot >= max_shader_num ||
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002150 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002151 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2152 (unsigned int) cmd->body.type,
2153 (unsigned int) binding.slot);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002154 return -EINVAL;
2155 }
2156
Deepak Rawat680360a2019-02-13 13:20:42 -08002157 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2158 binding.slot);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002159
2160 return 0;
2161}
2162
2163/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002164 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2165 * command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002166 *
2167 * @dev_priv: Pointer to a device private struct.
2168 * @sw_context: The software context being used for this batch.
2169 * @header: Pointer to the command header in the command stream.
2170 */
2171static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2172 struct vmw_sw_context *sw_context,
2173 SVGA3dCmdHeader *header)
2174{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002175 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2176 container_of(header, typeof(*cmd), header);
Deepak Rawatd2e90ab2018-12-13 13:43:20 -08002177 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2178 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2179
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002180 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2181 sizeof(SVGA3dShaderResourceViewId);
2182
2183 if ((u64) cmd->body.startView + (u64) num_sr_view >
2184 (u64) SVGA3D_DX_MAX_SRVIEWS ||
Deepak Rawatd2e90ab2018-12-13 13:43:20 -08002185 cmd->body.type >= max_allowed) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002186 VMW_DEBUG_USER("Invalid shader binding.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002187 return -EINVAL;
2188 }
2189
2190 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2191 vmw_ctx_binding_sr,
2192 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2193 (void *) &cmd[1], num_sr_view,
2194 cmd->body.startView);
2195}
2196
2197/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002198 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002199 *
2200 * @dev_priv: Pointer to a device private struct.
2201 * @sw_context: The software context being used for this batch.
2202 * @header: Pointer to the command header in the command stream.
2203 */
2204static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2205 struct vmw_sw_context *sw_context,
2206 SVGA3dCmdHeader *header)
2207{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002208 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
Deepak Rawatd2e90ab2018-12-13 13:43:20 -08002209 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2210 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002211 struct vmw_resource *res = NULL;
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002212 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002213 struct vmw_ctx_bindinfo_shader binding;
2214 int ret = 0;
2215
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002216 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002217 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002218
2219 cmd = container_of(header, typeof(*cmd), header);
2220
Deepak Rawatd2e90ab2018-12-13 13:43:20 -08002221 if (cmd->body.type >= max_allowed ||
Murray McAllister5ed7f4b2019-05-20 21:57:34 +12002222 cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002223 VMW_DEBUG_USER("Illegal shader type %u.\n",
2224 (unsigned int) cmd->body.type);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002225 return -EINVAL;
2226 }
2227
2228 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2229 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2230 if (IS_ERR(res)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002231 VMW_DEBUG_USER("Could not find shader for binding.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002232 return PTR_ERR(res);
2233 }
2234
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002235 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2236 VMW_RES_DIRTY_NONE);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002237 if (ret)
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002238 return ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002239 }
2240
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002241 binding.bi.ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002242 binding.bi.res = res;
2243 binding.bi.bt = vmw_ctx_binding_dx_shader;
2244 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2245
Deepak Rawat680360a2019-02-13 13:20:42 -08002246 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002247
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002248 return 0;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002249}
2250
2251/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002252 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2253 * command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002254 *
2255 * @dev_priv: Pointer to a device private struct.
2256 * @sw_context: The software context being used for this batch.
2257 * @header: Pointer to the command header in the command stream.
2258 */
2259static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2260 struct vmw_sw_context *sw_context,
2261 SVGA3dCmdHeader *header)
2262{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002263 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002264 struct vmw_ctx_bindinfo_vb binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002265 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002266 struct {
2267 SVGA3dCmdHeader header;
2268 SVGA3dCmdDXSetVertexBuffers body;
2269 SVGA3dVertexBuffer buf[];
2270 } *cmd;
2271 int i, ret, num;
2272
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002273 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002274 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002275
2276 cmd = container_of(header, typeof(*cmd), header);
2277 num = (cmd->header.size - sizeof(cmd->body)) /
2278 sizeof(SVGA3dVertexBuffer);
2279 if ((u64)num + (u64)cmd->body.startBuffer >
2280 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002281 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002282 return -EINVAL;
2283 }
2284
2285 for (i = 0; i < num; i++) {
2286 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002287 VMW_RES_DIRTY_NONE,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002288 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002289 &cmd->buf[i].sid, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002290 if (unlikely(ret != 0))
2291 return ret;
2292
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002293 binding.bi.ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002294 binding.bi.bt = vmw_ctx_binding_vb;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002295 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002296 binding.offset = cmd->buf[i].offset;
2297 binding.stride = cmd->buf[i].stride;
2298 binding.slot = i + cmd->body.startBuffer;
2299
Deepak Rawat680360a2019-02-13 13:20:42 -08002300 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002301 }
2302
2303 return 0;
2304}
2305
2306/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002307 * vmw_cmd_dx_ia_set_vertex_buffers - Validate
Brian Paul8bd62872017-07-17 07:36:10 -07002308 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002309 *
2310 * @dev_priv: Pointer to a device private struct.
2311 * @sw_context: The software context being used for this batch.
2312 * @header: Pointer to the command header in the command stream.
2313 */
2314static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2315 struct vmw_sw_context *sw_context,
2316 SVGA3dCmdHeader *header)
2317{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002318 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002319 struct vmw_ctx_bindinfo_ib binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002320 struct vmw_resource *res;
Deepak Rawatd01316d2019-02-08 15:50:40 -08002321 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002322 int ret;
2323
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002324 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002325 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002326
2327 cmd = container_of(header, typeof(*cmd), header);
2328 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002329 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002330 &cmd->body.sid, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002331 if (unlikely(ret != 0))
2332 return ret;
2333
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002334 binding.bi.ctx = ctx_node->ctx;
2335 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002336 binding.bi.bt = vmw_ctx_binding_ib;
2337 binding.offset = cmd->body.offset;
2338 binding.format = cmd->body.format;
2339
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002340 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002341
2342 return 0;
2343}
2344
2345/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002346 * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2347 * command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002348 *
2349 * @dev_priv: Pointer to a device private struct.
2350 * @sw_context: The software context being used for this batch.
2351 * @header: Pointer to the command header in the command stream.
2352 */
2353static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2354 struct vmw_sw_context *sw_context,
2355 SVGA3dCmdHeader *header)
2356{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002357 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2358 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002359 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2360 sizeof(SVGA3dRenderTargetViewId);
Deepak Rawatd01316d2019-02-08 15:50:40 -08002361 int ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002362
2363 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002364 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002365 return -EINVAL;
2366 }
2367
Deepak Rawat680360a2019-02-13 13:20:42 -08002368 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2369 0, &cmd->body.depthStencilViewId, 1, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002370 if (ret)
2371 return ret;
2372
2373 return vmw_view_bindings_add(sw_context, vmw_view_rt,
Deepak Rawat680360a2019-02-13 13:20:42 -08002374 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2375 num_rt_view, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002376}
2377
2378/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002379 * vmw_cmd_dx_clear_rendertarget_view - Validate
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002380 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2381 *
2382 * @dev_priv: Pointer to a device private struct.
2383 * @sw_context: The software context being used for this batch.
2384 * @header: Pointer to the command header in the command stream.
2385 */
2386static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2387 struct vmw_sw_context *sw_context,
2388 SVGA3dCmdHeader *header)
2389{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002390 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2391 container_of(header, typeof(*cmd), header);
Lukas Bulwahna26ca962019-12-08 11:53:28 +01002392 struct vmw_resource *ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002393
Lukas Bulwahna26ca962019-12-08 11:53:28 +01002394 ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2395 cmd->body.renderTargetViewId);
2396
2397 return PTR_ERR_OR_ZERO(ret);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002398}
2399
2400/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002401 * vmw_cmd_dx_clear_rendertarget_view - Validate
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002402 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2403 *
2404 * @dev_priv: Pointer to a device private struct.
2405 * @sw_context: The software context being used for this batch.
2406 * @header: Pointer to the command header in the command stream.
2407 */
2408static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2409 struct vmw_sw_context *sw_context,
2410 SVGA3dCmdHeader *header)
2411{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002412 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2413 container_of(header, typeof(*cmd), header);
Lukas Bulwahna26ca962019-12-08 11:53:28 +01002414 struct vmw_resource *ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002415
Lukas Bulwahna26ca962019-12-08 11:53:28 +01002416 ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2417 cmd->body.depthStencilViewId);
2418
2419 return PTR_ERR_OR_ZERO(ret);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002420}
2421
2422static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2423 struct vmw_sw_context *sw_context,
2424 SVGA3dCmdHeader *header)
2425{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002426 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002427 struct vmw_resource *srf;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002428 struct vmw_resource *res;
2429 enum vmw_view_type view_type;
2430 int ret;
2431 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08002432 * This is based on the fact that all affected define commands have the
2433 * same initial command body layout.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002434 */
2435 struct {
2436 SVGA3dCmdHeader header;
2437 uint32 defined_id;
2438 uint32 sid;
2439 } *cmd;
2440
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002441 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002442 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002443
2444 view_type = vmw_view_cmd_to_type(header->id);
Dan Carpenter0d9cac02018-01-10 12:40:04 +03002445 if (view_type == vmw_view_max)
2446 return -EINVAL;
Deepak Rawat680360a2019-02-13 13:20:42 -08002447
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002448 cmd = container_of(header, typeof(*cmd), header);
Murray McAllisterbcd6aa72019-05-11 18:01:37 +12002449 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2450 VMW_DEBUG_USER("Invalid surface id.\n");
2451 return -EINVAL;
2452 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002453 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002454 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002455 &cmd->sid, &srf);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002456 if (unlikely(ret != 0))
2457 return ret;
2458
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002459 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002460 ret = vmw_cotable_notify(res, cmd->defined_id);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002461 if (unlikely(ret != 0))
2462 return ret;
2463
Deepak Rawat680360a2019-02-13 13:20:42 -08002464 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2465 cmd->defined_id, header,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002466 header->size + sizeof(*header),
2467 &sw_context->staged_cmd_res);
2468}
2469
Charmaine Lee2f633e52015-08-10 10:45:11 -07002470/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002471 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
Charmaine Lee2f633e52015-08-10 10:45:11 -07002472 *
2473 * @dev_priv: Pointer to a device private struct.
2474 * @sw_context: The software context being used for this batch.
2475 * @header: Pointer to the command header in the command stream.
2476 */
2477static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2478 struct vmw_sw_context *sw_context,
2479 SVGA3dCmdHeader *header)
2480{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002481 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Deepak Rawat403fef52018-12-18 10:13:13 -08002482 struct vmw_ctx_bindinfo_so_target binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002483 struct vmw_resource *res;
Charmaine Lee2f633e52015-08-10 10:45:11 -07002484 struct {
2485 SVGA3dCmdHeader header;
2486 SVGA3dCmdDXSetSOTargets body;
2487 SVGA3dSoTarget targets[];
2488 } *cmd;
2489 int i, ret, num;
2490
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002491 if (!ctx_node)
Charmaine Lee2f633e52015-08-10 10:45:11 -07002492 return -EINVAL;
Charmaine Lee2f633e52015-08-10 10:45:11 -07002493
2494 cmd = container_of(header, typeof(*cmd), header);
Deepak Rawat680360a2019-02-13 13:20:42 -08002495 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
Charmaine Lee2f633e52015-08-10 10:45:11 -07002496
2497 if (num > SVGA3D_DX_MAX_SOTARGETS) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002498 VMW_DEBUG_USER("Invalid DX SO binding.\n");
Charmaine Lee2f633e52015-08-10 10:45:11 -07002499 return -EINVAL;
2500 }
2501
2502 for (i = 0; i < num; i++) {
2503 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002504 VMW_RES_DIRTY_SET,
Charmaine Lee2f633e52015-08-10 10:45:11 -07002505 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002506 &cmd->targets[i].sid, &res);
Charmaine Lee2f633e52015-08-10 10:45:11 -07002507 if (unlikely(ret != 0))
2508 return ret;
2509
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002510 binding.bi.ctx = ctx_node->ctx;
2511 binding.bi.res = res;
Deepak Rawat403fef52018-12-18 10:13:13 -08002512 binding.bi.bt = vmw_ctx_binding_so_target,
Charmaine Lee2f633e52015-08-10 10:45:11 -07002513 binding.offset = cmd->targets[i].offset;
2514 binding.size = cmd->targets[i].sizeInBytes;
2515 binding.slot = i;
2516
Deepak Rawat680360a2019-02-13 13:20:42 -08002517 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
Charmaine Lee2f633e52015-08-10 10:45:11 -07002518 }
2519
2520 return 0;
2521}
2522
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002523static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2524 struct vmw_sw_context *sw_context,
2525 SVGA3dCmdHeader *header)
2526{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002527 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002528 struct vmw_resource *res;
2529 /*
2530 * This is based on the fact that all affected define commands have
2531 * the same initial command body layout.
2532 */
2533 struct {
2534 SVGA3dCmdHeader header;
2535 uint32 defined_id;
2536 } *cmd;
2537 enum vmw_so_type so_type;
2538 int ret;
2539
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002540 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002541 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002542
2543 so_type = vmw_so_cmd_to_type(header->id);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002544 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002545 cmd = container_of(header, typeof(*cmd), header);
2546 ret = vmw_cotable_notify(res, cmd->defined_id);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002547
2548 return ret;
2549}
2550
2551/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002552 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2553 * command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002554 *
2555 * @dev_priv: Pointer to a device private struct.
2556 * @sw_context: The software context being used for this batch.
2557 * @header: Pointer to the command header in the command stream.
2558 */
2559static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2560 struct vmw_sw_context *sw_context,
2561 SVGA3dCmdHeader *header)
2562{
2563 struct {
2564 SVGA3dCmdHeader header;
2565 union {
2566 SVGA3dCmdDXReadbackSubResource r_body;
2567 SVGA3dCmdDXInvalidateSubResource i_body;
2568 SVGA3dCmdDXUpdateSubResource u_body;
2569 SVGA3dSurfaceId sid;
2570 };
2571 } *cmd;
2572
2573 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2574 offsetof(typeof(*cmd), sid));
2575 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2576 offsetof(typeof(*cmd), sid));
2577 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2578 offsetof(typeof(*cmd), sid));
2579
2580 cmd = container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002581 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002582 VMW_RES_DIRTY_NONE, user_surface_converter,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002583 &cmd->sid, NULL);
2584}
2585
2586static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2587 struct vmw_sw_context *sw_context,
2588 SVGA3dCmdHeader *header)
2589{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002590 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002591
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002592 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002593 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002594
2595 return 0;
2596}
2597
2598/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002599 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2600 * resource for removal.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002601 *
2602 * @dev_priv: Pointer to a device private struct.
2603 * @sw_context: The software context being used for this batch.
2604 * @header: Pointer to the command header in the command stream.
2605 *
Deepak Rawat680360a2019-02-13 13:20:42 -08002606 * Check that the view exists, and if it was not created using this command
2607 * batch, conditionally make this command a NOP.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002608 */
2609static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2610 struct vmw_sw_context *sw_context,
2611 SVGA3dCmdHeader *header)
2612{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002613 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002614 struct {
2615 SVGA3dCmdHeader header;
2616 union vmw_view_destroy body;
2617 } *cmd = container_of(header, typeof(*cmd), header);
2618 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2619 struct vmw_resource *view;
2620 int ret;
2621
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002622 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002623 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002624
Deepak Rawat680360a2019-02-13 13:20:42 -08002625 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2626 &sw_context->staged_cmd_res, &view);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002627 if (ret || !view)
2628 return ret;
2629
2630 /*
Thomas Hellstroma1944032016-10-10 11:06:45 -07002631 * If the view wasn't created during this command batch, it might
2632 * have been removed due to a context swapout, so add a
2633 * relocation to conditionally make this command a NOP to avoid
2634 * device errors.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002635 */
Deepak Rawat680360a2019-02-13 13:20:42 -08002636 return vmw_resource_relocation_add(sw_context, view,
Thomas Hellstroma1944032016-10-10 11:06:45 -07002637 vmw_ptr_diff(sw_context->buf_start,
2638 &cmd->header.id),
2639 vmw_res_rel_cond_nop);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002640}
2641
2642/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002643 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002644 *
2645 * @dev_priv: Pointer to a device private struct.
2646 * @sw_context: The software context being used for this batch.
2647 * @header: Pointer to the command header in the command stream.
2648 */
2649static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2650 struct vmw_sw_context *sw_context,
2651 SVGA3dCmdHeader *header)
2652{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002653 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002654 struct vmw_resource *res;
Deepak Rawatd01316d2019-02-08 15:50:40 -08002655 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2656 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002657 int ret;
2658
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002659 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002660 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002661
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002662 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002663 ret = vmw_cotable_notify(res, cmd->body.shaderId);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002664 if (ret)
2665 return ret;
2666
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002667 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002668 cmd->body.shaderId, cmd->body.type,
2669 &sw_context->staged_cmd_res);
2670}
2671
2672/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002673 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002674 *
2675 * @dev_priv: Pointer to a device private struct.
2676 * @sw_context: The software context being used for this batch.
2677 * @header: Pointer to the command header in the command stream.
2678 */
2679static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2680 struct vmw_sw_context *sw_context,
2681 SVGA3dCmdHeader *header)
2682{
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002683 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
Deepak Rawatd01316d2019-02-08 15:50:40 -08002684 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2685 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002686 int ret;
2687
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002688 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002689 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002690
2691 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2692 &sw_context->staged_cmd_res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002693
2694 return ret;
2695}
2696
2697/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002698 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002699 *
2700 * @dev_priv: Pointer to a device private struct.
2701 * @sw_context: The software context being used for this batch.
2702 * @header: Pointer to the command header in the command stream.
2703 */
2704static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2705 struct vmw_sw_context *sw_context,
2706 SVGA3dCmdHeader *header)
2707{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002708 struct vmw_resource *ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002709 struct vmw_resource *res;
Deepak Rawatd01316d2019-02-08 15:50:40 -08002710 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2711 container_of(header, typeof(*cmd), header);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002712 int ret;
2713
2714 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2715 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002716 VMW_RES_DIRTY_SET,
2717 user_context_converter, &cmd->body.cid,
2718 &ctx);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002719 if (ret)
2720 return ret;
2721 } else {
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002722 struct vmw_ctx_validation_info *ctx_node =
2723 VMW_GET_CTX_NODE(sw_context);
2724
2725 if (!ctx_node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002726 return -EINVAL;
Deepak Rawat6f74fd92019-02-08 12:53:57 -08002727
2728 ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002729 }
2730
Deepak Rawat680360a2019-02-13 13:20:42 -08002731 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002732 if (IS_ERR(res)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002733 VMW_DEBUG_USER("Could not find shader to bind.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002734 return PTR_ERR(res);
2735 }
2736
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002737 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2738 VMW_RES_DIRTY_NONE);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002739 if (ret) {
Deepak Rawat5724f892019-02-11 11:46:27 -08002740 VMW_DEBUG_USER("Error creating resource validation node.\n");
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002741 return ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002742 }
2743
Thomas Hellstrom508108e2018-09-26 16:28:45 +02002744 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2745 &cmd->body.mobid,
2746 cmd->body.offsetInBytes);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002747}
2748
Charmaine Leef3b335502016-02-12 08:11:56 +01002749/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002750 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
Charmaine Leef3b335502016-02-12 08:11:56 +01002751 *
2752 * @dev_priv: Pointer to a device private struct.
2753 * @sw_context: The software context being used for this batch.
2754 * @header: Pointer to the command header in the command stream.
2755 */
2756static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2757 struct vmw_sw_context *sw_context,
2758 SVGA3dCmdHeader *header)
2759{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002760 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2761 container_of(header, typeof(*cmd), header);
Lukas Bulwahna26ca962019-12-08 11:53:28 +01002762 struct vmw_resource *ret;
Charmaine Leef3b335502016-02-12 08:11:56 +01002763
Lukas Bulwahna26ca962019-12-08 11:53:28 +01002764 ret = vmw_view_id_val_add(sw_context, vmw_view_sr,
2765 cmd->body.shaderResourceViewId);
2766
2767 return PTR_ERR_OR_ZERO(ret);
Charmaine Leef3b335502016-02-12 08:11:56 +01002768}
2769
Charmaine Lee1f982e42016-10-10 10:37:03 -07002770/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002771 * vmw_cmd_dx_transfer_from_buffer - Validate
2772 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
Charmaine Lee1f982e42016-10-10 10:37:03 -07002773 *
2774 * @dev_priv: Pointer to a device private struct.
2775 * @sw_context: The software context being used for this batch.
2776 * @header: Pointer to the command header in the command stream.
2777 */
2778static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2779 struct vmw_sw_context *sw_context,
2780 SVGA3dCmdHeader *header)
2781{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002782 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2783 container_of(header, typeof(*cmd), header);
Charmaine Lee1f982e42016-10-10 10:37:03 -07002784 int ret;
2785
2786 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002787 VMW_RES_DIRTY_NONE, user_surface_converter,
Charmaine Lee1f982e42016-10-10 10:37:03 -07002788 &cmd->body.srcSid, NULL);
2789 if (ret != 0)
2790 return ret;
2791
2792 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002793 VMW_RES_DIRTY_SET, user_surface_converter,
Charmaine Lee1f982e42016-10-10 10:37:03 -07002794 &cmd->body.destSid, NULL);
2795}
2796
Neha Bhende0d81d342018-06-18 17:14:56 -07002797/**
Deepak Rawat680360a2019-02-13 13:20:42 -08002798 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
Neha Bhende0d81d342018-06-18 17:14:56 -07002799 *
2800 * @dev_priv: Pointer to a device private struct.
2801 * @sw_context: The software context being used for this batch.
2802 * @header: Pointer to the command header in the command stream.
2803 */
2804static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2805 struct vmw_sw_context *sw_context,
2806 SVGA3dCmdHeader *header)
2807{
Deepak Rawatd01316d2019-02-08 15:50:40 -08002808 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2809 container_of(header, typeof(*cmd), header);
Neha Bhende0d81d342018-06-18 17:14:56 -07002810
2811 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2812 return -EINVAL;
2813
2814 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01002815 VMW_RES_DIRTY_SET, user_surface_converter,
2816 &cmd->body.surface.sid, NULL);
Neha Bhende0d81d342018-06-18 17:14:56 -07002817}
2818
Deepak Rawatb6fad732018-12-13 14:00:18 -08002819static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2820 struct vmw_sw_context *sw_context,
2821 SVGA3dCmdHeader *header)
2822{
2823 if (!has_sm5_context(dev_priv))
2824 return -EINVAL;
2825
2826 return 0;
2827}
2828
Deepak Rawat5e8ec0d2018-12-13 13:51:08 -08002829static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2830 struct vmw_sw_context *sw_context,
2831 SVGA3dCmdHeader *header)
2832{
2833 if (!has_sm5_context(dev_priv))
2834 return -EINVAL;
2835
2836 return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2837}
2838
2839static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2840 struct vmw_sw_context *sw_context,
2841 SVGA3dCmdHeader *header)
2842{
2843 if (!has_sm5_context(dev_priv))
2844 return -EINVAL;
2845
2846 return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2847}
2848
2849static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2850 struct vmw_sw_context *sw_context,
2851 SVGA3dCmdHeader *header)
2852{
2853 struct {
2854 SVGA3dCmdHeader header;
2855 SVGA3dCmdDXClearUAViewUint body;
2856 } *cmd = container_of(header, typeof(*cmd), header);
2857 struct vmw_resource *ret;
2858
2859 if (!has_sm5_context(dev_priv))
2860 return -EINVAL;
2861
2862 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2863 cmd->body.uaViewId);
2864
2865 return PTR_ERR_OR_ZERO(ret);
2866}
2867
2868static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2869 struct vmw_sw_context *sw_context,
2870 SVGA3dCmdHeader *header)
2871{
2872 struct {
2873 SVGA3dCmdHeader header;
2874 SVGA3dCmdDXClearUAViewFloat body;
2875 } *cmd = container_of(header, typeof(*cmd), header);
2876 struct vmw_resource *ret;
2877
2878 if (!has_sm5_context(dev_priv))
2879 return -EINVAL;
2880
2881 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2882 cmd->body.uaViewId);
2883
2884 return PTR_ERR_OR_ZERO(ret);
2885}
2886
2887static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2888 struct vmw_sw_context *sw_context,
2889 SVGA3dCmdHeader *header)
2890{
2891 struct {
2892 SVGA3dCmdHeader header;
2893 SVGA3dCmdDXSetUAViews body;
2894 } *cmd = container_of(header, typeof(*cmd), header);
2895 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2896 sizeof(SVGA3dUAViewId);
2897 int ret;
2898
2899 if (!has_sm5_context(dev_priv))
2900 return -EINVAL;
2901
2902 if (num_uav > SVGA3D_MAX_UAVIEWS) {
2903 VMW_DEBUG_USER("Invalid UAV binding.\n");
2904 return -EINVAL;
2905 }
2906
2907 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2908 vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2909 num_uav, 0);
2910 if (ret)
2911 return ret;
2912
2913 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2914 cmd->body.uavSpliceIndex);
2915
2916 return ret;
2917}
2918
2919static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2920 struct vmw_sw_context *sw_context,
2921 SVGA3dCmdHeader *header)
2922{
2923 struct {
2924 SVGA3dCmdHeader header;
2925 SVGA3dCmdDXSetCSUAViews body;
2926 } *cmd = container_of(header, typeof(*cmd), header);
2927 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2928 sizeof(SVGA3dUAViewId);
2929 int ret;
2930
2931 if (!has_sm5_context(dev_priv))
2932 return -EINVAL;
2933
2934 if (num_uav > SVGA3D_MAX_UAVIEWS) {
2935 VMW_DEBUG_USER("Invalid UAV binding.\n");
2936 return -EINVAL;
2937 }
2938
2939 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2940 vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2941 num_uav, 0);
2942 if (ret)
2943 return ret;
2944
2945 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2946 cmd->body.startIndex);
2947
2948 return ret;
2949}
2950
Deepak Rawate8bead92018-12-13 14:04:31 -08002951static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2952 struct vmw_sw_context *sw_context,
2953 SVGA3dCmdHeader *header)
2954{
2955 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2956 struct vmw_resource *res;
2957 struct {
2958 SVGA3dCmdHeader header;
2959 SVGA3dCmdDXDefineStreamOutputWithMob body;
2960 } *cmd = container_of(header, typeof(*cmd), header);
2961 int ret;
2962
2963 if (!has_sm5_context(dev_priv))
2964 return -EINVAL;
2965
2966 if (!ctx_node) {
2967 DRM_ERROR("DX Context not set.\n");
2968 return -EINVAL;
2969 }
2970
2971 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
2972 ret = vmw_cotable_notify(res, cmd->body.soid);
2973 if (ret)
2974 return ret;
2975
2976 return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
2977 cmd->body.soid,
2978 &sw_context->staged_cmd_res);
2979}
2980
2981static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
2982 struct vmw_sw_context *sw_context,
2983 SVGA3dCmdHeader *header)
2984{
2985 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2986 struct vmw_resource *res;
2987 struct {
2988 SVGA3dCmdHeader header;
2989 SVGA3dCmdDXDestroyStreamOutput body;
2990 } *cmd = container_of(header, typeof(*cmd), header);
2991
2992 if (!ctx_node) {
2993 DRM_ERROR("DX Context not set.\n");
2994 return -EINVAL;
2995 }
2996
2997 /*
2998 * When device does not support SM5 then streamoutput with mob command is
2999 * not available to user-space. Simply return in this case.
3000 */
3001 if (!has_sm5_context(dev_priv))
3002 return 0;
3003
3004 /*
3005 * With SM5 capable device if lookup fails then user-space probably used
3006 * old streamoutput define command. Return without an error.
3007 */
3008 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3009 cmd->body.soid);
3010 if (IS_ERR(res))
3011 return 0;
3012
3013 return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3014 &sw_context->staged_cmd_res);
3015}
3016
3017static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3018 struct vmw_sw_context *sw_context,
3019 SVGA3dCmdHeader *header)
3020{
3021 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3022 struct vmw_resource *res;
3023 struct {
3024 SVGA3dCmdHeader header;
3025 SVGA3dCmdDXBindStreamOutput body;
3026 } *cmd = container_of(header, typeof(*cmd), header);
3027 int ret;
3028
3029 if (!has_sm5_context(dev_priv))
3030 return -EINVAL;
3031
3032 if (!ctx_node) {
3033 DRM_ERROR("DX Context not set.\n");
3034 return -EINVAL;
3035 }
3036
3037 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3038 cmd->body.soid);
3039 if (IS_ERR(res)) {
3040 DRM_ERROR("Cound not find streamoutput to bind.\n");
3041 return PTR_ERR(res);
3042 }
3043
3044 vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3045
3046 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3047 VMW_RES_DIRTY_NONE);
3048 if (ret) {
3049 DRM_ERROR("Error creating resource validation node.\n");
3050 return ret;
3051 }
3052
3053 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3054 &cmd->body.mobid,
3055 cmd->body.offsetInBytes);
3056}
3057
3058static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3059 struct vmw_sw_context *sw_context,
3060 SVGA3dCmdHeader *header)
3061{
3062 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3063 struct vmw_resource *res;
3064 struct vmw_ctx_bindinfo_so binding;
3065 struct {
3066 SVGA3dCmdHeader header;
3067 SVGA3dCmdDXSetStreamOutput body;
3068 } *cmd = container_of(header, typeof(*cmd), header);
3069 int ret;
3070
3071 if (!ctx_node) {
3072 DRM_ERROR("DX Context not set.\n");
3073 return -EINVAL;
3074 }
3075
3076 if (cmd->body.soid == SVGA3D_INVALID_ID)
3077 return 0;
3078
3079 /*
3080 * When device does not support SM5 then streamoutput with mob command is
3081 * not available to user-space. Simply return in this case.
3082 */
3083 if (!has_sm5_context(dev_priv))
3084 return 0;
3085
3086 /*
3087 * With SM5 capable device if lookup fails then user-space probably used
3088 * old streamoutput define command. Return without an error.
3089 */
3090 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3091 cmd->body.soid);
3092 if (IS_ERR(res)) {
3093 return 0;
3094 }
3095
3096 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3097 VMW_RES_DIRTY_NONE);
3098 if (ret) {
3099 DRM_ERROR("Error creating resource validation node.\n");
3100 return ret;
3101 }
3102
3103 binding.bi.ctx = ctx_node->ctx;
3104 binding.bi.res = res;
3105 binding.bi.bt = vmw_ctx_binding_so;
3106 binding.slot = 0; /* Only one SO set to context at a time. */
3107
3108 vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3109 binding.slot);
3110
3111 return ret;
3112}
3113
Deepak Rawatb6fad732018-12-13 14:00:18 -08003114static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3115 struct vmw_sw_context *sw_context,
3116 SVGA3dCmdHeader *header)
3117{
3118 struct vmw_draw_indexed_instanced_indirect_cmd {
3119 SVGA3dCmdHeader header;
3120 SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3121 } *cmd = container_of(header, typeof(*cmd), header);
3122
3123 if (!has_sm5_context(dev_priv))
3124 return -EINVAL;
3125
3126 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3127 VMW_RES_DIRTY_NONE, user_surface_converter,
3128 &cmd->body.argsBufferSid, NULL);
3129}
3130
3131static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3132 struct vmw_sw_context *sw_context,
3133 SVGA3dCmdHeader *header)
3134{
3135 struct vmw_draw_instanced_indirect_cmd {
3136 SVGA3dCmdHeader header;
3137 SVGA3dCmdDXDrawInstancedIndirect body;
3138 } *cmd = container_of(header, typeof(*cmd), header);
3139
3140 if (!has_sm5_context(dev_priv))
3141 return -EINVAL;
3142
3143 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3144 VMW_RES_DIRTY_NONE, user_surface_converter,
3145 &cmd->body.argsBufferSid, NULL);
3146}
3147
3148static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3149 struct vmw_sw_context *sw_context,
3150 SVGA3dCmdHeader *header)
3151{
3152 struct vmw_dispatch_indirect_cmd {
3153 SVGA3dCmdHeader header;
3154 SVGA3dCmdDXDispatchIndirect body;
3155 } *cmd = container_of(header, typeof(*cmd), header);
3156
3157 if (!has_sm5_context(dev_priv))
3158 return -EINVAL;
3159
3160 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3161 VMW_RES_DIRTY_NONE, user_surface_converter,
3162 &cmd->body.argsBufferSid, NULL);
3163}
3164
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003165static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3166 struct vmw_sw_context *sw_context,
3167 void *buf, uint32_t *size)
3168{
3169 uint32_t size_remaining = *size;
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003170 uint32_t cmd_id;
3171
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003172 cmd_id = ((uint32_t *)buf)[0];
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003173 switch (cmd_id) {
3174 case SVGA_CMD_UPDATE:
3175 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003176 break;
3177 case SVGA_CMD_DEFINE_GMRFB:
3178 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3179 break;
3180 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3181 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3182 break;
3183 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3184 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3185 break;
3186 default:
Deepak Rawat5724f892019-02-11 11:46:27 -08003187 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003188 return -EINVAL;
3189 }
3190
3191 if (*size > size_remaining) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003192 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3193 cmd_id);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003194 return -EINVAL;
3195 }
3196
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02003197 if (unlikely(!sw_context->kernel)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003198 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003199 return -EPERM;
3200 }
3201
3202 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3203 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3204
3205 return 0;
3206}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003207
Thomas Hellstrom4fbd9d22014-02-12 12:37:01 +01003208static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003209 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3210 false, false, false),
3211 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3212 false, false, false),
3213 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3214 true, false, false),
3215 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3216 true, false, false),
3217 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3218 true, false, false),
3219 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3220 false, false, false),
3221 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3222 false, false, false),
3223 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3224 true, false, false),
3225 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3226 true, false, false),
3227 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3228 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003229 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003230 &vmw_cmd_set_render_target_check, true, false, false),
3231 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3232 true, false, false),
3233 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3234 true, false, false),
3235 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3236 true, false, false),
3237 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3238 true, false, false),
3239 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3240 true, false, false),
3241 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3242 true, false, false),
3243 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3244 true, false, false),
3245 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3246 false, false, false),
Thomas Hellstromd5bde952014-01-31 10:12:10 +01003247 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3248 true, false, false),
3249 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3250 true, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003251 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3252 true, false, false),
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01003253 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3254 true, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003255 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3256 true, false, false),
3257 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3258 true, false, false),
3259 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3260 true, false, false),
3261 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3262 true, false, false),
3263 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3264 true, false, false),
3265 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3266 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003267 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003268 &vmw_cmd_blt_surf_screen_check, false, false, false),
3269 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3270 false, false, false),
3271 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3272 false, false, false),
3273 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3274 false, false, false),
3275 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3276 false, false, false),
3277 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3278 false, false, false),
Deepak Rawatdc75e732018-06-13 13:53:28 -07003279 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003280 false, false, false),
Deepak Rawatdc75e732018-06-13 13:53:28 -07003281 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003282 false, false, false),
Deepak Rawat3d143952018-12-13 11:55:57 -08003283 VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3284 VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3285 VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3286 VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3287 VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3288 VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003289 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3290 false, false, true),
3291 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3292 false, false, true),
3293 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3294 false, false, true),
3295 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3296 false, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003297 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3298 false, false, true),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003299 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3300 false, false, true),
3301 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3302 false, false, true),
3303 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3304 false, false, true),
3305 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3306 true, false, true),
3307 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3308 false, false, true),
3309 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3310 true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003311 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003312 &vmw_cmd_update_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003313 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003314 &vmw_cmd_readback_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003315 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003316 &vmw_cmd_readback_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003317 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003318 &vmw_cmd_invalidate_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003319 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003320 &vmw_cmd_invalidate_gb_surface, true, false, true),
3321 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3322 false, false, true),
3323 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3324 false, false, true),
3325 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3326 false, false, true),
3327 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3328 false, false, true),
3329 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3330 false, false, true),
3331 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3332 false, false, true),
3333 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3334 true, false, true),
3335 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3336 false, false, true),
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +01003337 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
Thomas Hellstrom8ba07312013-10-08 02:25:35 -07003338 false, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003339 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3340 true, false, true),
3341 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3342 true, false, true),
3343 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3344 true, false, true),
3345 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3346 true, false, true),
Thomas Hellstrom5f55be5f2017-08-24 08:06:30 +02003347 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3348 true, false, true),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003349 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3350 false, false, true),
3351 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3352 false, false, true),
3353 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3354 false, false, true),
3355 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3356 false, false, true),
3357 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3358 false, false, true),
3359 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3360 false, false, true),
3361 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3362 false, false, true),
3363 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3364 false, false, true),
3365 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3366 false, false, true),
3367 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3368 false, false, true),
3369 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003370 true, false, true),
3371 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3372 false, false, true),
3373 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3374 false, false, true),
3375 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3376 false, false, true),
3377 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3378 false, false, true),
3379
Deepak Rawat680360a2019-02-13 13:20:42 -08003380 /* SM commands */
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003381 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3382 false, false, true),
3383 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3384 false, false, true),
3385 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3386 false, false, true),
3387 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3388 false, false, true),
3389 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3390 false, false, true),
3391 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3392 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3393 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3394 &vmw_cmd_dx_set_shader_res, true, false, true),
3395 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3396 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003397 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003398 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003399 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003400 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003401 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3402 true, false, true),
3403 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3404 true, false, true),
3405 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3406 &vmw_cmd_dx_cid_check, true, false, true),
3407 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003408 true, false, true),
3409 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3410 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3411 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3412 &vmw_cmd_dx_set_index_buffer, true, false, true),
3413 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3414 &vmw_cmd_dx_set_rendertargets, true, false, true),
3415 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3416 true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003417 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
Charmaine Lee2f633e52015-08-10 10:45:11 -07003418 &vmw_cmd_dx_cid_check, true, false, true),
3419 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3420 &vmw_cmd_dx_cid_check, true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003421 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003422 true, false, true),
Charmaine Leee02e5882016-04-12 08:19:08 -07003423 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003424 true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003425 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003426 true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003427 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
Charmaine Leee02e5882016-04-12 08:19:08 -07003428 &vmw_cmd_dx_cid_check, true, false, true),
3429 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003430 true, false, true),
Charmaine Leee02e5882016-04-12 08:19:08 -07003431 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003432 true, false, true),
3433 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3434 true, false, true),
Charmaine Lee18835982016-04-12 08:14:23 -07003435 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003436 true, false, true),
3437 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3438 true, false, true),
3439 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3440 true, false, true),
3441 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3442 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3443 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3444 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003445 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3446 true, false, true),
Charmaine Leef3b335502016-02-12 08:11:56 +01003447 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003448 true, false, true),
3449 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3450 &vmw_cmd_dx_check_subresource, true, false, true),
3451 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3452 &vmw_cmd_dx_check_subresource, true, false, true),
3453 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3454 &vmw_cmd_dx_check_subresource, true, false, true),
3455 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3456 &vmw_cmd_dx_view_define, true, false, true),
3457 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3458 &vmw_cmd_dx_view_remove, true, false, true),
3459 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3460 &vmw_cmd_dx_view_define, true, false, true),
3461 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3462 &vmw_cmd_dx_view_remove, true, false, true),
3463 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3464 &vmw_cmd_dx_view_define, true, false, true),
3465 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3466 &vmw_cmd_dx_view_remove, true, false, true),
3467 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3468 &vmw_cmd_dx_so_define, true, false, true),
3469 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3470 &vmw_cmd_dx_cid_check, true, false, true),
3471 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3472 &vmw_cmd_dx_so_define, true, false, true),
3473 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3474 &vmw_cmd_dx_cid_check, true, false, true),
3475 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3476 &vmw_cmd_dx_so_define, true, false, true),
3477 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3478 &vmw_cmd_dx_cid_check, true, false, true),
3479 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3480 &vmw_cmd_dx_so_define, true, false, true),
3481 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3482 &vmw_cmd_dx_cid_check, true, false, true),
3483 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3484 &vmw_cmd_dx_so_define, true, false, true),
3485 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3486 &vmw_cmd_dx_cid_check, true, false, true),
3487 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3488 &vmw_cmd_dx_define_shader, true, false, true),
3489 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3490 &vmw_cmd_dx_destroy_shader, true, false, true),
3491 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3492 &vmw_cmd_dx_bind_shader, true, false, true),
3493 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3494 &vmw_cmd_dx_so_define, true, false, true),
3495 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
Deepak Rawate8bead92018-12-13 14:04:31 -08003496 &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3497 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3498 &vmw_cmd_dx_set_streamoutput, true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003499 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3500 &vmw_cmd_dx_set_so_targets, true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003501 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3502 &vmw_cmd_dx_cid_check, true, false, true),
3503 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3504 &vmw_cmd_dx_cid_check, true, false, true),
Neha Bhende0fca749e2015-08-10 10:51:07 -07003505 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3506 &vmw_cmd_buffer_copy_check, true, false, true),
3507 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3508 &vmw_cmd_pred_copy_check, true, false, true),
Charmaine Lee1f982e42016-10-10 10:37:03 -07003509 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3510 &vmw_cmd_dx_transfer_from_buffer,
3511 true, false, true),
Neha Bhende0d81d342018-06-18 17:14:56 -07003512 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3513 true, false, true),
Deepak Rawat5e8ec0d2018-12-13 13:51:08 -08003514
3515 /*
3516 * SM5 commands
3517 */
3518 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3519 true, false, true),
3520 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3521 true, false, true),
3522 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3523 true, false, true),
3524 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3525 &vmw_cmd_clear_uav_float, true, false, true),
3526 VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3527 false, true),
3528 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3529 true),
Deepak Rawatb6fad732018-12-13 14:00:18 -08003530 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3531 &vmw_cmd_indexed_instanced_indirect, true, false, true),
3532 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3533 &vmw_cmd_instanced_indirect, true, false, true),
3534 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3535 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3536 &vmw_cmd_dispatch_indirect, true, false, true),
Deepak Rawat5e8ec0d2018-12-13 13:51:08 -08003537 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3538 false, true),
Deepak Rawatb6fad732018-12-13 14:00:18 -08003539 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3540 &vmw_cmd_sm5_view_define, true, false, true),
Deepak Rawate8bead92018-12-13 14:04:31 -08003541 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3542 &vmw_cmd_dx_define_streamoutput, true, false, true),
3543 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3544 &vmw_cmd_dx_bind_streamoutput, true, false, true),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003545};
3546
Thomas Hellstrom65b97a22017-08-24 08:06:29 +02003547bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3548{
3549 u32 cmd_id = ((u32 *) buf)[0];
3550
3551 if (cmd_id >= SVGA_CMD_MAX) {
3552 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3553 const struct vmw_cmd_entry *entry;
3554
3555 *size = header->size + sizeof(SVGA3dCmdHeader);
3556 cmd_id = header->id;
3557 if (cmd_id >= SVGA_3D_CMD_MAX)
3558 return false;
3559
3560 cmd_id -= SVGA_3D_CMD_BASE;
3561 entry = &vmw_cmd_entries[cmd_id];
3562 *cmd = entry->cmd_name;
3563 return true;
3564 }
3565
3566 switch (cmd_id) {
3567 case SVGA_CMD_UPDATE:
3568 *cmd = "SVGA_CMD_UPDATE";
3569 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3570 break;
3571 case SVGA_CMD_DEFINE_GMRFB:
3572 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3573 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3574 break;
3575 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3576 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3577 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3578 break;
3579 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3580 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3581 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3582 break;
3583 default:
3584 *cmd = "UNKNOWN";
3585 *size = 0;
3586 return false;
3587 }
3588
3589 return true;
3590}
3591
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003592static int vmw_cmd_check(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003593 struct vmw_sw_context *sw_context, void *buf,
3594 uint32_t *size)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003595{
3596 uint32_t cmd_id;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003597 uint32_t size_remaining = *size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003598 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3599 int ret;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003600 const struct vmw_cmd_entry *entry;
3601 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003602
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003603 cmd_id = ((uint32_t *)buf)[0];
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003604 /* Handle any none 3D commands */
3605 if (unlikely(cmd_id < SVGA_CMD_MAX))
3606 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3607
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003608
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003609 cmd_id = header->id;
3610 *size = header->size + sizeof(SVGA3dCmdHeader);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003611
3612 cmd_id -= SVGA_3D_CMD_BASE;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003613 if (unlikely(*size > size_remaining))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003614 goto out_invalid;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003615
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003616 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003617 goto out_invalid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003618
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003619 entry = &vmw_cmd_entries[cmd_id];
Thomas Hellstrom36e952c2014-02-12 13:19:36 +01003620 if (unlikely(!entry->func))
3621 goto out_invalid;
3622
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003623 if (unlikely(!entry->user_allow && !sw_context->kernel))
3624 goto out_privileged;
3625
3626 if (unlikely(entry->gb_disable && gb))
3627 goto out_old;
3628
3629 if (unlikely(entry->gb_enable && !gb))
3630 goto out_new;
3631
3632 ret = entry->func(dev_priv, sw_context, header);
Deepak Rawat45399b12019-02-11 12:57:38 -08003633 if (unlikely(ret != 0)) {
3634 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3635 cmd_id + SVGA_3D_CMD_BASE, ret);
3636 return ret;
3637 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003638
3639 return 0;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003640out_invalid:
Deepak Rawat5724f892019-02-11 11:46:27 -08003641 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3642 cmd_id + SVGA_3D_CMD_BASE);
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003643 return -EINVAL;
3644out_privileged:
Deepak Rawat5724f892019-02-11 11:46:27 -08003645 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3646 cmd_id + SVGA_3D_CMD_BASE);
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003647 return -EPERM;
3648out_old:
Deepak Rawat5724f892019-02-11 11:46:27 -08003649 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3650 cmd_id + SVGA_3D_CMD_BASE);
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003651 return -EINVAL;
3652out_new:
Deepak Rawat5724f892019-02-11 11:46:27 -08003653 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3654 cmd_id + SVGA_3D_CMD_BASE);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003655 return -EINVAL;
3656}
3657
3658static int vmw_cmd_check_all(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003659 struct vmw_sw_context *sw_context, void *buf,
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003660 uint32_t size)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003661{
3662 int32_t cur_size = size;
3663 int ret;
3664
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003665 sw_context->buf_start = buf;
3666
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003667 while (cur_size > 0) {
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003668 size = cur_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003669 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3670 if (unlikely(ret != 0))
3671 return ret;
3672 buf = (void *)((unsigned long) buf + size);
3673 cur_size -= size;
3674 }
3675
3676 if (unlikely(cur_size != 0)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003677 VMW_DEBUG_USER("Command verifier out of sync.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003678 return -EINVAL;
3679 }
3680
3681 return 0;
3682}
3683
3684static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3685{
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02003686 /* Memory is validation context memory, so no need to free it */
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02003687 INIT_LIST_HEAD(&sw_context->bo_relocations);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003688}
3689
3690static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3691{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003692 struct vmw_relocation *reloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003693 struct ttm_buffer_object *bo;
3694
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02003695 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003696 bo = &reloc->vbo->base;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003697 switch (bo->mem.mem_type) {
3698 case TTM_PL_VRAM:
Thomas Hellstrom135cba02010-10-26 21:21:47 +02003699 reloc->location->offset += bo->offset;
3700 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003701 break;
3702 case VMW_PL_GMR:
Thomas Hellstrom135cba02010-10-26 21:21:47 +02003703 reloc->location->gmrId = bo->mem.start;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003704 break;
Thomas Hellstromddcda242012-11-21 11:26:55 +01003705 case VMW_PL_MOB:
3706 *reloc->mob_loc = bo->mem.start;
3707 break;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003708 default:
3709 BUG();
3710 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003711 }
3712 vmw_free_relocations(sw_context);
3713}
3714
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003715static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3716 uint32_t size)
3717{
3718 if (likely(sw_context->cmd_bounce_size >= size))
3719 return 0;
3720
3721 if (sw_context->cmd_bounce_size == 0)
3722 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3723
3724 while (sw_context->cmd_bounce_size < size) {
3725 sw_context->cmd_bounce_size =
3726 PAGE_ALIGN(sw_context->cmd_bounce_size +
3727 (sw_context->cmd_bounce_size >> 1));
3728 }
3729
Markus Elfring0bc32992016-07-22 13:31:00 +02003730 vfree(sw_context->cmd_bounce);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003731 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3732
3733 if (sw_context->cmd_bounce == NULL) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003734 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003735 sw_context->cmd_bounce_size = 0;
3736 return -ENOMEM;
3737 }
3738
3739 return 0;
3740}
3741
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003742/**
3743 * vmw_execbuf_fence_commands - create and submit a command stream fence
3744 *
3745 * Creates a fence object and submits a command stream marker.
3746 * If this fails for some reason, We sync the fifo and return NULL.
3747 * It is then safe to fence buffers with a NULL pointer.
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02003748 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003749 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3750 * userspace handle if @p_handle is not NULL, otherwise not.
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003751 */
3752
3753int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3754 struct vmw_private *dev_priv,
3755 struct vmw_fence_obj **p_fence,
3756 uint32_t *p_handle)
3757{
3758 uint32_t sequence;
3759 int ret;
3760 bool synced = false;
3761
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02003762 /* p_handle implies file_priv. */
3763 BUG_ON(p_handle != NULL && file_priv == NULL);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003764
3765 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3766 if (unlikely(ret != 0)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003767 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003768 synced = true;
3769 }
3770
3771 if (p_handle != NULL)
3772 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003773 sequence, p_fence, p_handle);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003774 else
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003775 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003776
3777 if (unlikely(ret != 0 && !synced)) {
Deepak Rawat680360a2019-02-13 13:20:42 -08003778 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3779 false, VMW_FENCE_WAIT_TIMEOUT);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003780 *p_fence = NULL;
3781 }
3782
Thomas Hellstrom728354c2019-01-31 10:55:37 +01003783 return ret;
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003784}
3785
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003786/**
Deepak Rawat680360a2019-02-13 13:20:42 -08003787 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003788 *
3789 * @dev_priv: Pointer to a vmw_private struct.
3790 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3791 * @ret: Return value from fence object creation.
Deepak Rawat680360a2019-02-13 13:20:42 -08003792 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3793 * the information should be copied.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003794 * @fence: Pointer to the fenc object.
3795 * @fence_handle: User-space fence handle.
Sinclair Yehc906965d2017-07-05 01:49:32 -07003796 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3797 * @sync_file: Only used to clean up in case of an error in this function.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003798 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003799 * This function copies fence information to user-space. If copying fails, the
3800 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3801 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3802 * will hopefully be detected.
3803 *
3804 * Also if copying fails, user-space will be unable to signal the fence object
3805 * so we wait for it immediately, and then unreference the user-space reference.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003806 */
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +02003807void
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003808vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003809 struct vmw_fpriv *vmw_fp, int ret,
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003810 struct drm_vmw_fence_rep __user *user_fence_rep,
Deepak Rawat680360a2019-02-13 13:20:42 -08003811 struct vmw_fence_obj *fence, uint32_t fence_handle,
3812 int32_t out_fence_fd, struct sync_file *sync_file)
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003813{
3814 struct drm_vmw_fence_rep fence_rep;
3815
3816 if (user_fence_rep == NULL)
3817 return;
3818
Dan Carpenter80d9b242011-10-18 09:10:12 +03003819 memset(&fence_rep, 0, sizeof(fence_rep));
3820
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003821 fence_rep.error = ret;
Sinclair Yehc906965d2017-07-05 01:49:32 -07003822 fence_rep.fd = out_fence_fd;
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003823 if (ret == 0) {
3824 BUG_ON(fence == NULL);
3825
3826 fence_rep.handle = fence_handle;
Maarten Lankhorst2298e802014-03-26 14:07:44 +01003827 fence_rep.seqno = fence->base.seqno;
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003828 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3829 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3830 }
3831
3832 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08003833 * copy_to_user errors will be detected by user space not seeing
3834 * fence_rep::error filled in. Typically user-space would have pre-set
3835 * that member to -EFAULT.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003836 */
3837 ret = copy_to_user(user_fence_rep, &fence_rep,
3838 sizeof(fence_rep));
3839
3840 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08003841 * User-space lost the fence object. We need to sync and unreference the
3842 * handle.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003843 */
3844 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
Sinclair Yehc906965d2017-07-05 01:49:32 -07003845 if (sync_file)
3846 fput(sync_file->file);
3847
3848 if (fence_rep.fd != -1) {
3849 put_unused_fd(fence_rep.fd);
3850 fence_rep.fd = -1;
3851 }
3852
Deepak Rawat680360a2019-02-13 13:20:42 -08003853 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
3854 TTM_REF_USAGE);
Deepak Rawat5724f892019-02-11 11:46:27 -08003855 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003856 (void) vmw_fence_obj_wait(fence, false, false,
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003857 VMW_FENCE_WAIT_TIMEOUT);
3858 }
3859}
3860
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003861/**
Deepak Rawat680360a2019-02-13 13:20:42 -08003862 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003863 *
3864 * @dev_priv: Pointer to a device private structure.
3865 * @kernel_commands: Pointer to the unpatched command batch.
3866 * @command_size: Size of the unpatched command batch.
3867 * @sw_context: Structure holding the relocation lists.
3868 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003869 * Side effects: If this function returns 0, then the command batch pointed to
3870 * by @kernel_commands will have been modified.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003871 */
3872static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08003873 void *kernel_commands, u32 command_size,
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003874 struct vmw_sw_context *sw_context)
3875{
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003876 void *cmd;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003877
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003878 if (sw_context->dx_ctx_node)
Deepak Rawat11c45412019-02-14 16:15:39 -08003879 cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003880 sw_context->dx_ctx_node->ctx->id);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003881 else
Deepak Rawat11c45412019-02-14 16:15:39 -08003882 cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
3883
3884 if (!cmd)
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003885 return -ENOMEM;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003886
3887 vmw_apply_relocations(sw_context);
3888 memcpy(cmd, kernel_commands, command_size);
3889 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3890 vmw_resource_relocations_free(&sw_context->res_relocations);
3891 vmw_fifo_commit(dev_priv, command_size);
3892
3893 return 0;
3894}
3895
3896/**
Deepak Rawat680360a2019-02-13 13:20:42 -08003897 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3898 * command buffer manager.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003899 *
3900 * @dev_priv: Pointer to a device private structure.
3901 * @header: Opaque handle to the command buffer allocation.
3902 * @command_size: Size of the unpatched command batch.
3903 * @sw_context: Structure holding the relocation lists.
3904 *
Deepak Rawat680360a2019-02-13 13:20:42 -08003905 * Side effects: If this function returns 0, then the command buffer represented
3906 * by @header will have been modified.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003907 */
3908static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3909 struct vmw_cmdbuf_header *header,
3910 u32 command_size,
3911 struct vmw_sw_context *sw_context)
3912{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003913 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003914 SVGA3D_INVALID_ID);
Deepak Rawat680360a2019-02-13 13:20:42 -08003915 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3916 header);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003917
3918 vmw_apply_relocations(sw_context);
3919 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3920 vmw_resource_relocations_free(&sw_context->res_relocations);
3921 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3922
3923 return 0;
3924}
3925
3926/**
3927 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3928 * submission using a command buffer.
3929 *
3930 * @dev_priv: Pointer to a device private structure.
3931 * @user_commands: User-space pointer to the commands to be submitted.
3932 * @command_size: Size of the unpatched command batch.
3933 * @header: Out parameter returning the opaque pointer to the command buffer.
3934 *
3935 * This function checks whether we can use the command buffer manager for
Deepak Rawat680360a2019-02-13 13:20:42 -08003936 * submission and if so, creates a command buffer of suitable size and copies
3937 * the user data into that buffer.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003938 *
3939 * On successful return, the function returns a pointer to the data in the
3940 * command buffer and *@header is set to non-NULL.
Deepak Rawat680360a2019-02-13 13:20:42 -08003941 *
3942 * If command buffers could not be used, the function will return the value of
3943 * @kernel_commands on function call. That value may be NULL. In that case, the
3944 * value of *@header will be set to NULL.
3945 *
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003946 * If an error is encountered, the function will return a pointer error value.
3947 * If the function is interrupted by a signal while sleeping, it will return
3948 * -ERESTARTSYS casted to a pointer error value.
3949 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003950static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3951 void __user *user_commands,
Deepak Rawat680360a2019-02-13 13:20:42 -08003952 void *kernel_commands, u32 command_size,
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003953 struct vmw_cmdbuf_header **header)
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003954{
3955 size_t cmdbuf_size;
3956 int ret;
3957
3958 *header = NULL;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003959 if (command_size > SVGA_CB_MAX_SIZE) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003960 VMW_DEBUG_USER("Command buffer is too large.\n");
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003961 return ERR_PTR(-EINVAL);
3962 }
3963
Thomas Hellstrom51ab70b2016-10-10 10:51:24 -07003964 if (!dev_priv->cman || kernel_commands)
3965 return kernel_commands;
3966
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003967 /* If possible, add a little space for fencing. */
3968 cmdbuf_size = command_size + 512;
3969 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
Deepak Rawat680360a2019-02-13 13:20:42 -08003970 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
3971 header);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003972 if (IS_ERR(kernel_commands))
3973 return kernel_commands;
3974
Deepak Rawat680360a2019-02-13 13:20:42 -08003975 ret = copy_from_user(kernel_commands, user_commands, command_size);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003976 if (ret) {
Deepak Rawat5724f892019-02-11 11:46:27 -08003977 VMW_DEBUG_USER("Failed copying commands.\n");
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003978 vmw_cmdbuf_header_free(*header);
3979 *header = NULL;
3980 return ERR_PTR(-EFAULT);
3981 }
3982
3983 return kernel_commands;
3984}
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003985
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003986static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3987 struct vmw_sw_context *sw_context,
3988 uint32_t handle)
3989{
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003990 struct vmw_resource *res;
3991 int ret;
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02003992 unsigned int size;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003993
3994 if (handle == SVGA3D_INVALID_ID)
3995 return 0;
3996
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02003997 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
3998 ret = vmw_validation_preload_res(sw_context->ctx, size);
3999 if (ret)
4000 return ret;
4001
4002 res = vmw_user_resource_noref_lookup_handle
4003 (dev_priv, sw_context->fp->tfile, handle,
4004 user_context_converter);
Chengguang Xu4efa6662019-03-01 10:14:06 -08004005 if (IS_ERR(res)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08004006 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4007 (unsigned int) handle);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02004008 return PTR_ERR(res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004009 }
4010
Thomas Hellstroma9f58c42019-02-20 08:21:26 +01004011 ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004012 if (unlikely(ret != 0))
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02004013 return ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004014
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004015 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004016 sw_context->man = vmw_context_res_man(res);
Thomas Hellstrome8c66ef2018-09-26 16:32:40 +02004017
4018 return 0;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004019}
4020
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004021int vmw_execbuf_process(struct drm_file *file_priv,
4022 struct vmw_private *dev_priv,
Deepak Rawat680360a2019-02-13 13:20:42 -08004023 void __user *user_commands, void *kernel_commands,
4024 uint32_t command_size, uint64_t throttle_us,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004025 uint32_t dx_context_handle,
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004026 struct drm_vmw_fence_rep __user *user_fence_rep,
Deepak Rawat680360a2019-02-13 13:20:42 -08004027 struct vmw_fence_obj **out_fence, uint32_t flags)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004028{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004029 struct vmw_sw_context *sw_context = &dev_priv->ctx;
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004030 struct vmw_fence_obj *fence = NULL;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004031 struct vmw_cmdbuf_header *header;
Nathan Chancellora5020f42019-03-11 20:24:46 -07004032 uint32_t handle = 0;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004033 int ret;
Sinclair Yehc906965d2017-07-05 01:49:32 -07004034 int32_t out_fence_fd = -1;
4035 struct sync_file *sync_file = NULL;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004036 DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
Sinclair Yehc906965d2017-07-05 01:49:32 -07004037
Thomas Hellstromfd567462018-12-12 11:52:08 +01004038 vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
4039
Sinclair Yehc906965d2017-07-05 01:49:32 -07004040 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4041 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4042 if (out_fence_fd < 0) {
Deepak Rawat5724f892019-02-11 11:46:27 -08004043 VMW_DEBUG_USER("Failed to get a fence fd.\n");
Sinclair Yehc906965d2017-07-05 01:49:32 -07004044 return out_fence_fd;
4045 }
4046 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004047
Charmaine Lee2f633e52015-08-10 10:45:11 -07004048 if (throttle_us) {
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004049 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
4050 throttle_us);
Charmaine Lee2f633e52015-08-10 10:45:11 -07004051
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004052 if (ret)
Sinclair Yehc906965d2017-07-05 01:49:32 -07004053 goto out_free_fence_fd;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004054 }
Charmaine Lee2f633e52015-08-10 10:45:11 -07004055
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004056 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4057 kernel_commands, command_size,
4058 &header);
Sinclair Yehc906965d2017-07-05 01:49:32 -07004059 if (IS_ERR(kernel_commands)) {
4060 ret = PTR_ERR(kernel_commands);
4061 goto out_free_fence_fd;
4062 }
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004063
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004064 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004065 if (ret) {
4066 ret = -ERESTARTSYS;
4067 goto out_free_header;
4068 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004069
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004070 sw_context->kernel = false;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004071 if (kernel_commands == NULL) {
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004072 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4073 if (unlikely(ret != 0))
4074 goto out_unlock;
4075
Deepak Rawat680360a2019-02-13 13:20:42 -08004076 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4077 command_size);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004078 if (unlikely(ret != 0)) {
4079 ret = -EFAULT;
Deepak Rawat5724f892019-02-11 11:46:27 -08004080 VMW_DEBUG_USER("Failed copying commands.\n");
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004081 goto out_unlock;
4082 }
Deepak Rawat680360a2019-02-13 13:20:42 -08004083
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004084 kernel_commands = sw_context->cmd_bounce;
Deepak Rawat680360a2019-02-13 13:20:42 -08004085 } else if (!header) {
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004086 sw_context->kernel = true;
Deepak Rawat680360a2019-02-13 13:20:42 -08004087 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004088
Thomas Hellstromd5bde952014-01-31 10:12:10 +01004089 sw_context->fp = vmw_fpriv(file_priv);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004090 INIT_LIST_HEAD(&sw_context->ctx_list);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004091 sw_context->cur_query_bo = dev_priv->pinned_bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004092 sw_context->last_query_ctx = NULL;
4093 sw_context->needs_post_query_barrier = false;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004094 sw_context->dx_ctx_node = NULL;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07004095 sw_context->dx_query_mob = NULL;
4096 sw_context->dx_query_ctx = NULL;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004097 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004098 INIT_LIST_HEAD(&sw_context->res_relocations);
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02004099 INIT_LIST_HEAD(&sw_context->bo_relocations);
Deepak Rawat680360a2019-02-13 13:20:42 -08004100
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004101 if (sw_context->staged_bindings)
4102 vmw_binding_state_reset(sw_context->staged_bindings);
4103
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004104 if (!sw_context->res_ht_initialized) {
4105 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4106 if (unlikely(ret != 0))
4107 goto out_unlock;
Deepak Rawat680360a2019-02-13 13:20:42 -08004108
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004109 sw_context->res_ht_initialized = true;
4110 }
Deepak Rawat680360a2019-02-13 13:20:42 -08004111
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02004112 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004113 sw_context->ctx = &val_ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004114 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004115 if (unlikely(ret != 0))
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004116 goto out_err_nores;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004117
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004118 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4119 command_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004120 if (unlikely(ret != 0))
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01004121 goto out_err_nores;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00004122
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004123 ret = vmw_resources_reserve(sw_context);
4124 if (unlikely(ret != 0))
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01004125 goto out_err_nores;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004126
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004127 ret = vmw_validation_bo_reserve(&val_ctx, true);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004128 if (unlikely(ret != 0))
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004129 goto out_err_nores;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004130
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004131 ret = vmw_validation_bo_validate(&val_ctx, true);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004132 if (unlikely(ret != 0))
4133 goto out_err;
4134
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004135 ret = vmw_validation_res_validate(&val_ctx, true);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004136 if (unlikely(ret != 0))
4137 goto out_err;
Deepak Rawat680360a2019-02-13 13:20:42 -08004138
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004139 vmw_validation_drop_ht(&val_ctx);
Thomas Hellstrom1925d452010-05-28 11:21:57 +02004140
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07004141 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4142 if (unlikely(ret != 0)) {
4143 ret = -ERESTARTSYS;
4144 goto out_err;
4145 }
4146
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01004147 if (dev_priv->has_mob) {
4148 ret = vmw_rebind_contexts(sw_context);
4149 if (unlikely(ret != 0))
Dan Carpenterb2ad9882014-02-11 19:03:47 +03004150 goto out_unlock_binding;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01004151 }
4152
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004153 if (!header) {
4154 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4155 command_size, sw_context);
4156 } else {
4157 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4158 sw_context);
4159 header = NULL;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00004160 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004161 mutex_unlock(&dev_priv->binding_mutex);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004162 if (ret)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004163 goto out_err;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004164
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004165 vmw_query_bo_switch_commit(dev_priv, sw_context);
Deepak Rawat680360a2019-02-13 13:20:42 -08004166 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004167 (user_fence_rep) ? &handle : NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004168 /*
4169 * This error is harmless, because if fence submission fails,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004170 * vmw_fifo_send_fence will sync. The error will be propagated to
4171 * user-space in @fence_rep
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004172 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004173 if (ret != 0)
Deepak Rawat5724f892019-02-11 11:46:27 -08004174 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004175
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004176 vmw_execbuf_bindings_commit(sw_context, false);
4177 vmw_bind_dx_query_mob(sw_context);
4178 vmw_validation_res_unreserve(&val_ctx, false);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07004179
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004180 vmw_validation_bo_fence(sw_context->ctx, fence);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004181
Deepak Rawat680360a2019-02-13 13:20:42 -08004182 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004183 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4184
Sinclair Yehc906965d2017-07-05 01:49:32 -07004185 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08004186 * If anything fails here, give up trying to export the fence and do a
4187 * sync since the user mode will not be able to sync the fence itself.
4188 * This ensures we are still functionally correct.
Sinclair Yehc906965d2017-07-05 01:49:32 -07004189 */
4190 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4191
4192 sync_file = sync_file_create(&fence->base);
4193 if (!sync_file) {
Deepak Rawat5724f892019-02-11 11:46:27 -08004194 VMW_DEBUG_USER("Sync file create failed for fence\n");
Sinclair Yehc906965d2017-07-05 01:49:32 -07004195 put_unused_fd(out_fence_fd);
4196 out_fence_fd = -1;
4197
4198 (void) vmw_fence_obj_wait(fence, false, false,
4199 VMW_FENCE_WAIT_TIMEOUT);
4200 } else {
4201 /* Link the fence with the FD created earlier */
4202 fd_install(out_fence_fd, sync_file->file);
4203 }
4204 }
4205
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02004206 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
Deepak Rawat680360a2019-02-13 13:20:42 -08004207 user_fence_rep, fence, handle, out_fence_fd,
4208 sync_file);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004209
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004210 /* Don't unreference when handing fence out */
4211 if (unlikely(out_fence != NULL)) {
4212 *out_fence = fence;
4213 fence = NULL;
4214 } else if (likely(fence != NULL)) {
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004215 vmw_fence_obj_unreference(&fence);
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004216 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004217
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02004218 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004219 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004220
4221 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08004222 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4223 * in resource destruction paths.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004224 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004225 vmw_validation_unref_lists(&val_ctx);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004226
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004227 return 0;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004228
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07004229out_unlock_binding:
4230 mutex_unlock(&dev_priv->binding_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004231out_err:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004232 vmw_validation_bo_backoff(&val_ctx);
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01004233out_err_nores:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004234 vmw_execbuf_bindings_commit(sw_context, true);
4235 vmw_validation_res_unreserve(&val_ctx, true);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004236 vmw_resource_relocations_free(&sw_context->res_relocations);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004237 vmw_free_relocations(sw_context);
Deepak Rawat680360a2019-02-13 13:20:42 -08004238 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004239 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004240out_unlock:
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02004241 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004242 vmw_validation_drop_ht(&val_ctx);
4243 WARN_ON(!list_empty(&sw_context->ctx_list));
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004244 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004245
4246 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08004247 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4248 * in resource destruction paths.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004249 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004250 vmw_validation_unref_lists(&val_ctx);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004251out_free_header:
4252 if (header)
4253 vmw_cmdbuf_header_free(header);
Sinclair Yehc906965d2017-07-05 01:49:32 -07004254out_free_fence_fd:
4255 if (out_fence_fd >= 0)
4256 put_unused_fd(out_fence_fd);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004257
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004258 return ret;
4259}
4260
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004261/**
4262 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4263 *
4264 * @dev_priv: The device private structure.
4265 *
Deepak Rawat680360a2019-02-13 13:20:42 -08004266 * This function is called to idle the fifo and unpin the query buffer if the
4267 * normal way to do this hits an error, which should typically be extremely
4268 * rare.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004269 */
4270static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4271{
Deepak Rawat5724f892019-02-11 11:46:27 -08004272 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004273
4274 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004275 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4276 if (dev_priv->dummy_query_bo_pinned) {
4277 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4278 dev_priv->dummy_query_bo_pinned = false;
4279 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004280}
4281
4282
4283/**
Deepak Rawat680360a2019-02-13 13:20:42 -08004284 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4285 * bo.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004286 *
4287 * @dev_priv: The device private structure.
Deepak Rawat680360a2019-02-13 13:20:42 -08004288 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4289 * query barrier that flushes all queries touching the current buffer pointed to
4290 * by @dev_priv->pinned_bo
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004291 *
Deepak Rawat680360a2019-02-13 13:20:42 -08004292 * This function should be used to unpin the pinned query bo, or as a query
4293 * barrier when we need to make sure that all queries have finished before the
4294 * next fifo command. (For example on hardware context destructions where the
4295 * hardware may otherwise leak unfinished queries).
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004296 *
Deepak Rawat680360a2019-02-13 13:20:42 -08004297 * This function does not return any failure codes, but make attempts to do safe
4298 * unpinning in case of errors.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004299 *
Deepak Rawat680360a2019-02-13 13:20:42 -08004300 * The function will synchronize on the previous query barrier, and will thus
4301 * not finish until that barrier has executed.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004302 *
Deepak Rawat680360a2019-02-13 13:20:42 -08004303 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4304 * calling this function.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004305 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004306void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4307 struct vmw_fence_obj *fence)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004308{
4309 int ret = 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004310 struct vmw_fence_obj *lfence = NULL;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004311 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004312
4313 if (dev_priv->pinned_bo == NULL)
4314 goto out_unlock;
4315
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004316 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
4317 false);
4318 if (ret)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004319 goto out_no_reserve;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004320
4321 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
4322 false);
4323 if (ret)
4324 goto out_no_reserve;
4325
4326 ret = vmw_validation_bo_reserve(&val_ctx, false);
4327 if (ret)
4328 goto out_no_reserve;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004329
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004330 if (dev_priv->query_cid_valid) {
4331 BUG_ON(fence != NULL);
4332 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004333 if (ret)
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004334 goto out_no_emit;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004335 dev_priv->query_cid_valid = false;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004336 }
4337
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004338 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4339 if (dev_priv->dummy_query_bo_pinned) {
4340 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4341 dev_priv->dummy_query_bo_pinned = false;
4342 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004343 if (fence == NULL) {
4344 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4345 NULL);
4346 fence = lfence;
4347 }
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004348 vmw_validation_bo_fence(&val_ctx, fence);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004349 if (lfence != NULL)
4350 vmw_fence_obj_unreference(&lfence);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004351
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004352 vmw_validation_unref_lists(&val_ctx);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02004353 vmw_bo_unreference(&dev_priv->pinned_bo);
Deepak Rawat680360a2019-02-13 13:20:42 -08004354
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004355out_unlock:
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004356 return;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004357out_no_emit:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004358 vmw_validation_bo_backoff(&val_ctx);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004359out_no_reserve:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004360 vmw_validation_unref_lists(&val_ctx);
4361 vmw_execbuf_unpin_panic(dev_priv);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02004362 vmw_bo_unreference(&dev_priv->pinned_bo);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004363}
4364
4365/**
Deepak Rawat680360a2019-02-13 13:20:42 -08004366 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004367 *
4368 * @dev_priv: The device private structure.
4369 *
Deepak Rawat680360a2019-02-13 13:20:42 -08004370 * This function should be used to unpin the pinned query bo, or as a query
4371 * barrier when we need to make sure that all queries have finished before the
4372 * next fifo command. (For example on hardware context destructions where the
4373 * hardware may otherwise leak unfinished queries).
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004374 *
Deepak Rawat680360a2019-02-13 13:20:42 -08004375 * This function does not return any failure codes, but make attempts to do safe
4376 * unpinning in case of errors.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004377 *
Deepak Rawat680360a2019-02-13 13:20:42 -08004378 * The function will synchronize on the previous query barrier, and will thus
4379 * not finish until that barrier has executed.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004380 */
4381void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4382{
4383 mutex_lock(&dev_priv->cmdbuf_mutex);
4384 if (dev_priv->query_cid_valid)
4385 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004386 mutex_unlock(&dev_priv->cmdbuf_mutex);
4387}
4388
Emil Velikovcbfbe472019-05-22 17:41:17 +01004389int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4390 struct drm_file *file_priv)
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004391{
4392 struct vmw_private *dev_priv = vmw_priv(dev);
Emil Velikovcbfbe472019-05-22 17:41:17 +01004393 struct drm_vmw_execbuf_arg *arg = data;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004394 int ret;
Sinclair Yeh585851162017-07-05 01:45:40 -07004395 struct dma_fence *in_fence = NULL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004396
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004397 /*
Deepak Rawat680360a2019-02-13 13:20:42 -08004398 * Extend the ioctl argument while maintaining backwards compatibility:
Emil Velikovcbfbe472019-05-22 17:41:17 +01004399 * We take different code paths depending on the value of arg->version.
4400 *
4401 * Note: The ioctl argument is extended and zeropadded by core DRM.
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004402 */
Emil Velikovcbfbe472019-05-22 17:41:17 +01004403 if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4404 arg->version == 0)) {
Deepak Rawat5724f892019-02-11 11:46:27 -08004405 VMW_DEBUG_USER("Incorrect execbuf version.\n");
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004406 return -EINVAL;
4407 }
4408
Emil Velikovcbfbe472019-05-22 17:41:17 +01004409 switch (arg->version) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004410 case 1:
Emil Velikovcbfbe472019-05-22 17:41:17 +01004411 /* For v1 core DRM have extended + zeropadded the data */
4412 arg->context_handle = (uint32_t) -1;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004413 break;
4414 case 2:
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004415 default:
Emil Velikovcbfbe472019-05-22 17:41:17 +01004416 /* For v2 and later core DRM would have correctly copied it */
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004417 break;
4418 }
4419
Sinclair Yeh585851162017-07-05 01:45:40 -07004420 /* If imported a fence FD from elsewhere, then wait on it */
Emil Velikovcbfbe472019-05-22 17:41:17 +01004421 if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4422 in_fence = sync_file_get_fence(arg->imported_fence_fd);
Sinclair Yeh585851162017-07-05 01:45:40 -07004423
4424 if (!in_fence) {
Deepak Rawat5724f892019-02-11 11:46:27 -08004425 VMW_DEBUG_USER("Cannot get imported fence\n");
Sinclair Yeh585851162017-07-05 01:45:40 -07004426 return -EINVAL;
4427 }
4428
4429 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4430 if (ret)
4431 goto out;
4432 }
4433
Thomas Hellstrom294adf72014-02-27 12:34:51 +01004434 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004435 if (unlikely(ret != 0))
4436 return ret;
4437
4438 ret = vmw_execbuf_process(file_priv, dev_priv,
Emil Velikovcbfbe472019-05-22 17:41:17 +01004439 (void __user *)(unsigned long)arg->commands,
4440 NULL, arg->command_size, arg->throttle_us,
4441 arg->context_handle,
4442 (void __user *)(unsigned long)arg->fence_rep,
4443 NULL, arg->flags);
Deepak Rawat680360a2019-02-13 13:20:42 -08004444
Thomas Hellstrom5151adb2015-03-09 01:56:21 -07004445 ttm_read_unlock(&dev_priv->reservation_sem);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004446 if (unlikely(ret != 0))
Sinclair Yeh585851162017-07-05 01:45:40 -07004447 goto out;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004448
4449 vmw_kms_cursor_post_execbuf(dev_priv);
4450
Sinclair Yeh585851162017-07-05 01:45:40 -07004451out:
4452 if (in_fence)
4453 dma_fence_put(in_fence);
4454 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004455}