blob: 641b75110dc650e89538a7a98dc09b75212a3043 [file] [log] [blame]
Dirk Hohndel (VMware)dff96882018-05-07 01:16:26 +02001// SPDX-License-Identifier: GPL-2.0 OR MIT
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002/**************************************************************************
3 *
Dirk Hohndel (VMware)dff96882018-05-07 01:16:26 +02004 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00005 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
Sinclair Yeh585851162017-07-05 01:45:40 -070027#include <linux/sync_file.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000028
29#include "vmwgfx_drv.h"
30#include "vmwgfx_reg.h"
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/ttm/ttm_bo_api.h>
32#include <drm/ttm/ttm_placement.h>
Thomas Hellstromd80efd52015-08-10 10:39:35 -070033#include "vmwgfx_so.h"
34#include "vmwgfx_binding.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000035
Thomas Hellstromc0951b72012-11-20 12:19:35 +000036#define VMW_RES_HT_ORDER 12
37
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020038/*
39 * struct vmw_relocation - Buffer object relocation
40 *
41 * @head: List head for the command submission context's relocation list
Thomas Hellstromcc1e3b72018-09-26 15:38:13 +020042 * @vbo: Non ref-counted pointer to buffer object
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020043 * @mob_loc: Pointer to location for mob id to be modified
44 * @location: Pointer to location for guest pointer to be modified
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020045 */
46struct vmw_relocation {
47 struct list_head head;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020048 struct vmw_buffer_object *vbo;
Thomas Hellstromcc1e3b72018-09-26 15:38:13 +020049 union {
50 SVGAMobId *mob_loc;
51 SVGAGuestPtr *location;
52 };
Thomas Hellstromfc18afc2018-09-26 15:36:52 +020053};
54
Thomas Hellstromc0951b72012-11-20 12:19:35 +000055/**
Thomas Hellstroma1944032016-10-10 11:06:45 -070056 * enum vmw_resource_relocation_type - Relocation type for resources
57 *
58 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
59 * command stream is replaced with the actual id after validation.
60 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
61 * with a NOP.
62 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
63 * after validation is -1, the command is replaced with a NOP. Otherwise no
64 * action.
65 */
66enum vmw_resource_relocation_type {
67 vmw_res_rel_normal,
68 vmw_res_rel_nop,
69 vmw_res_rel_cond_nop,
70 vmw_res_rel_max
71};
72
73/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +000074 * struct vmw_resource_relocation - Relocation info for resources
75 *
76 * @head: List head for the software context's relocation list.
77 * @res: Non-ref-counted pointer to the resource.
Thomas Hellstrome7a45282016-10-10 10:44:00 -070078 * @offset: Offset of single byte entries into the command buffer where the
Thomas Hellstromc0951b72012-11-20 12:19:35 +000079 * id that needs fixup is located.
Thomas Hellstroma1944032016-10-10 11:06:45 -070080 * @rel_type: Type of relocation.
Thomas Hellstromc0951b72012-11-20 12:19:35 +000081 */
82struct vmw_resource_relocation {
83 struct list_head head;
84 const struct vmw_resource *res;
Thomas Hellstroma1944032016-10-10 11:06:45 -070085 u32 offset:29;
86 enum vmw_resource_relocation_type rel_type:3;
Thomas Hellstromc0951b72012-11-20 12:19:35 +000087};
88
Thomas Hellstrom9c079b82018-09-26 15:28:55 +020089/*
90 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
91 * @head: List head of context list
92 * @ctx: The context resource
93 * @cur: The context's persistent binding state
94 * @staged: The binding state changes of this command buffer
Thomas Hellstromc0951b72012-11-20 12:19:35 +000095 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +020096struct vmw_ctx_validation_info {
Thomas Hellstromc0951b72012-11-20 12:19:35 +000097 struct list_head head;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +020098 struct vmw_resource *ctx;
99 struct vmw_ctx_binding_state *cur;
100 struct vmw_ctx_binding_state *staged;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000101};
102
103/**
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100104 * struct vmw_cmd_entry - Describe a command for the verifier
105 *
106 * @user_allow: Whether allowed from the execbuf ioctl.
107 * @gb_disable: Whether disabled if guest-backed objects are available.
108 * @gb_enable: Whether enabled iff guest-backed objects are available.
109 */
110struct vmw_cmd_entry {
111 int (*func) (struct vmw_private *, struct vmw_sw_context *,
112 SVGA3dCmdHeader *);
113 bool user_allow;
114 bool gb_disable;
115 bool gb_enable;
Thomas Hellstrom65b97a22017-08-24 08:06:29 +0200116 const char *cmd_name;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100117};
118
119#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
120 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
Thomas Hellstrom65b97a22017-08-24 08:06:29 +0200121 (_gb_disable), (_gb_enable), #_cmd}
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100122
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700123static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
124 struct vmw_sw_context *sw_context,
125 struct vmw_resource *ctx);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700126static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
127 struct vmw_sw_context *sw_context,
128 SVGAMobId *id,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200129 struct vmw_buffer_object **vmw_bo_p);
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700130/**
131 * vmw_ptr_diff - Compute the offset from a to b in bytes
132 *
133 * @a: A starting pointer.
134 * @b: A pointer offset in the same address space.
135 *
136 * Returns: The offset in bytes between the two pointers.
137 */
138static size_t vmw_ptr_diff(void *a, void *b)
139{
140 return (unsigned long) b - (unsigned long) a;
141}
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700142
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100143/**
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200144 * vmw_execbuf_bindings_commit - Commit modified binding state
145 * @sw_context: The command submission context
146 * @backoff: Whether this is part of the error path and binding state
147 * changes should be ignored
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000148 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200149static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
150 bool backoff)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000151{
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200152 struct vmw_ctx_validation_info *entry;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700153
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200154 list_for_each_entry(entry, &sw_context->ctx_list, head) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200155 if (!backoff)
156 vmw_binding_state_commit(entry->cur, entry->staged);
157 if (entry->staged != sw_context->staged_bindings)
158 vmw_binding_state_free(entry->staged);
159 else
160 sw_context->staged_bindings_inuse = false;
161 }
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200162
163 /* List entries are freed with the validation context */
164 INIT_LIST_HEAD(&sw_context->ctx_list);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200165}
166
167/**
168 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
169 * @sw_context: The command submission context
170 */
171static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
172{
173 if (sw_context->dx_query_mob)
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700174 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
175 sw_context->dx_query_mob);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000176}
177
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700178/**
179 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
180 * added to the validate list.
181 *
182 * @dev_priv: Pointer to the device private:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200183 * @sw_context: The command submission context
184 * @node: The validation node holding the context resource metadata
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700185 */
186static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
187 struct vmw_sw_context *sw_context,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200188 struct vmw_resource *res,
189 struct vmw_ctx_validation_info *node)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700190{
191 int ret;
192
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200193 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700194 if (unlikely(ret != 0))
195 goto out_err;
196
197 if (!sw_context->staged_bindings) {
198 sw_context->staged_bindings =
199 vmw_binding_state_alloc(dev_priv);
200 if (IS_ERR(sw_context->staged_bindings)) {
201 DRM_ERROR("Failed to allocate context binding "
202 "information.\n");
203 ret = PTR_ERR(sw_context->staged_bindings);
204 sw_context->staged_bindings = NULL;
205 goto out_err;
206 }
207 }
208
209 if (sw_context->staged_bindings_inuse) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200210 node->staged = vmw_binding_state_alloc(dev_priv);
211 if (IS_ERR(node->staged)) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700212 DRM_ERROR("Failed to allocate context binding "
213 "information.\n");
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200214 ret = PTR_ERR(node->staged);
215 node->staged = NULL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700216 goto out_err;
217 }
218 } else {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200219 node->staged = sw_context->staged_bindings;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700220 sw_context->staged_bindings_inuse = true;
221 }
222
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200223 node->ctx = res;
224 node->cur = vmw_context_binding_state(res);
225 list_add_tail(&node->head, &sw_context->ctx_list);
226
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700227 return 0;
228out_err:
229 return ret;
230}
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000231
232/**
233 * vmw_resource_val_add - Add a resource to the software context's
234 * resource list if it's not already on it.
235 *
236 * @sw_context: Pointer to the software context.
237 * @res: Pointer to the resource.
238 * @p_node On successful return points to a valid pointer to a
239 * struct vmw_resource_val_node, if non-NULL on entry.
240 */
241static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200242 struct vmw_resource *res)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000243{
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700244 struct vmw_private *dev_priv = res->dev_priv;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000245 int ret;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200246 enum vmw_res_type res_type = vmw_res_type(res);
247 struct vmw_res_cache_entry *rcache;
248 struct vmw_ctx_validation_info *ctx_info;
249 bool first_usage;
250 size_t priv_size;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000251
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200252 /*
253 * If the resource is a context, set up structures to track
254 * context bindings.
255 */
256 priv_size = (res_type == vmw_res_dx_context ||
257 (res_type == vmw_res_context && dev_priv->has_mob)) ?
258 sizeof(*ctx_info) : 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000259
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200260 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
261 (void **)&ctx_info, &first_usage);
262 if (ret)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000263 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000264
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200265 if (priv_size && first_usage) {
266 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
267 ctx_info);
268 if (ret)
269 return ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700270 }
271
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200272 /* Cache info about the last added resource */
273 rcache = &sw_context->res_cache[res_type];
274 rcache->res = res;
275 rcache->private = ctx_info;
276 rcache->valid = 1;
277 rcache->valid_handle = 0;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700278
279 return ret;
280}
281
282/**
283 * vmw_view_res_val_add - Add a view and the surface it's pointing to
284 * to the validation list
285 *
286 * @sw_context: The software context holding the validation list.
287 * @view: Pointer to the view resource.
288 *
289 * Returns 0 if success, negative error code otherwise.
290 */
291static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
292 struct vmw_resource *view)
293{
294 int ret;
295
296 /*
297 * First add the resource the view is pointing to, otherwise
298 * it may be swapped out when the view is validated.
299 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200300 ret = vmw_resource_val_add(sw_context, vmw_view_srf(view));
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700301 if (ret)
302 return ret;
303
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200304 return vmw_resource_val_add(sw_context, view);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700305}
306
307/**
308 * vmw_view_id_val_add - Look up a view and add it and the surface it's
309 * pointing to to the validation list.
310 *
311 * @sw_context: The software context holding the validation list.
312 * @view_type: The view type to look up.
313 * @id: view id of the view.
314 *
315 * The view is represented by a view id and the DX context it's created on,
316 * or scheduled for creation on. If there is no DX context set, the function
317 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
318 */
319static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
320 enum vmw_view_type view_type, u32 id)
321{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200322 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700323 struct vmw_resource *view;
324 int ret;
325
326 if (!ctx_node) {
327 DRM_ERROR("DX Context not set.\n");
328 return -EINVAL;
329 }
330
331 view = vmw_view_lookup(sw_context->man, view_type, id);
332 if (IS_ERR(view))
333 return PTR_ERR(view);
334
335 ret = vmw_view_res_val_add(sw_context, view);
336 vmw_resource_unreference(&view);
337
338 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000339}
340
341/**
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100342 * vmw_resource_context_res_add - Put resources previously bound to a context on
343 * the validation list
344 *
345 * @dev_priv: Pointer to a device private structure
346 * @sw_context: Pointer to a software context used for this command submission
347 * @ctx: Pointer to the context resource
348 *
349 * This function puts all resources that were previously bound to @ctx on
350 * the resource validation list. This is part of the context state reemission
351 */
352static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
353 struct vmw_sw_context *sw_context,
354 struct vmw_resource *ctx)
355{
356 struct list_head *binding_list;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700357 struct vmw_ctx_bindinfo *entry;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100358 int ret = 0;
359 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700360 u32 i;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100361
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700362 /* Add all cotables to the validation list. */
363 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
364 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
365 res = vmw_context_cotable(ctx, i);
366 if (IS_ERR(res))
367 continue;
368
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200369 ret = vmw_resource_val_add(sw_context, res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700370 vmw_resource_unreference(&res);
371 if (unlikely(ret != 0))
372 return ret;
373 }
374 }
375
376
377 /* Add all resources bound to the context to the validation list */
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100378 mutex_lock(&dev_priv->binding_mutex);
379 binding_list = vmw_context_binding_list(ctx);
380
381 list_for_each_entry(entry, binding_list, ctx_list) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700382 /* entry->res is not refcounted */
383 res = vmw_resource_reference_unless_doomed(entry->res);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100384 if (unlikely(res == NULL))
385 continue;
386
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700387 if (vmw_res_type(entry->res) == vmw_res_view)
388 ret = vmw_view_res_val_add(sw_context, entry->res);
389 else
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200390 ret = vmw_resource_val_add(sw_context, entry->res);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100391 vmw_resource_unreference(&res);
392 if (unlikely(ret != 0))
393 break;
394 }
395
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700396 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200397 struct vmw_buffer_object *dx_query_mob;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700398
399 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
400 if (dx_query_mob)
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200401 ret = vmw_validation_add_bo(sw_context->ctx,
402 dx_query_mob, true, false);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700403 }
404
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100405 mutex_unlock(&dev_priv->binding_mutex);
406 return ret;
407}
408
409/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000410 * vmw_resource_relocation_add - Add a relocation to the relocation list
411 *
412 * @list: Pointer to head of relocation list.
413 * @res: The resource.
414 * @offset: Offset into the command buffer currently being parsed where the
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700415 * id that needs fixup is located. Granularity is one byte.
Thomas Hellstroma1944032016-10-10 11:06:45 -0700416 * @rel_type: Relocation type.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000417 */
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200418static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000419 const struct vmw_resource *res,
Thomas Hellstroma1944032016-10-10 11:06:45 -0700420 unsigned long offset,
421 enum vmw_resource_relocation_type
422 rel_type)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000423{
424 struct vmw_resource_relocation *rel;
425
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200426 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
Ravikant B Sharma1a4adb02016-11-08 17:30:31 +0530427 if (unlikely(!rel)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000428 DRM_ERROR("Failed to allocate a resource relocation.\n");
429 return -ENOMEM;
430 }
431
432 rel->res = res;
433 rel->offset = offset;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700434 rel->rel_type = rel_type;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200435 list_add_tail(&rel->head, &sw_context->res_relocations);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000436
437 return 0;
438}
439
440/**
441 * vmw_resource_relocations_free - Free all relocations on a list
442 *
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200443 * @list: Pointer to the head of the relocation list
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000444 */
445static void vmw_resource_relocations_free(struct list_head *list)
446{
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200447 /* Memory is validation context memory, so no need to free it */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000448
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200449 INIT_LIST_HEAD(list);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000450}
451
452/**
453 * vmw_resource_relocations_apply - Apply all relocations on a list
454 *
455 * @cb: Pointer to the start of the command buffer bein patch. This need
456 * not be the same buffer as the one being parsed when the relocation
457 * list was built, but the contents must be the same modulo the
458 * resource ids.
459 * @list: Pointer to the head of the relocation list.
460 */
461static void vmw_resource_relocations_apply(uint32_t *cb,
462 struct list_head *list)
463{
464 struct vmw_resource_relocation *rel;
465
Thomas Hellstroma1944032016-10-10 11:06:45 -0700466 /* Validate the struct vmw_resource_relocation member size */
467 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
468 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
469
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100470 list_for_each_entry(rel, list, head) {
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700471 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
Thomas Hellstroma1944032016-10-10 11:06:45 -0700472 switch (rel->rel_type) {
473 case vmw_res_rel_normal:
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700474 *addr = rel->res->id;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700475 break;
476 case vmw_res_rel_nop:
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700477 *addr = SVGA_3D_CMD_NOP;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700478 break;
479 default:
480 if (rel->res->id == -1)
481 *addr = SVGA_3D_CMD_NOP;
482 break;
483 }
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100484 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000485}
486
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000487static int vmw_cmd_invalid(struct vmw_private *dev_priv,
488 struct vmw_sw_context *sw_context,
489 SVGA3dCmdHeader *header)
490{
Sinclair Yehfcfffdd2017-07-17 23:28:36 -0700491 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000492}
493
494static int vmw_cmd_ok(struct vmw_private *dev_priv,
495 struct vmw_sw_context *sw_context,
496 SVGA3dCmdHeader *header)
497{
498 return 0;
499}
500
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200501/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000502 * vmw_resources_reserve - Reserve all resources on the sw_context's
503 * resource list.
504 *
505 * @sw_context: Pointer to the software context.
506 *
507 * Note that since vmware's command submission currently is protected by
508 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
509 * since only a single thread at once will attempt this.
510 */
511static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
512{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200513 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000514
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200515 ret = vmw_validation_res_reserve(sw_context->ctx, true);
516 if (ret)
517 return ret;
Charmaine Lee2f633e52015-08-10 10:45:11 -0700518
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700519 if (sw_context->dx_query_mob) {
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200520 struct vmw_buffer_object *expected_dx_query_mob;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700521
522 expected_dx_query_mob =
523 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
524 if (expected_dx_query_mob &&
525 expected_dx_query_mob != sw_context->dx_query_mob) {
526 ret = -EINVAL;
527 }
528 }
529
530 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000531}
532
533/**
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200534 * vmw_cmd_res_reloc_add - Add a resource to a software context's
535 * relocation- and validation lists.
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200536 * @dev_priv: Pointer to a struct vmw_private identifying the device.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000537 * @sw_context: Pointer to the software context.
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200538 * @id_loc: Pointer to where the id that needs translation is located.
539 * @res: Valid pointer to a struct vmw_resource.
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200540 *
541 * Return: Zero on success, negative error code on error
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000542 */
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200543static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
544 struct vmw_sw_context *sw_context,
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200545 uint32_t *id_loc,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200546 struct vmw_resource *res)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000547{
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000548 int ret;
549
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200550 ret = vmw_resource_relocation_add(sw_context, res,
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700551 vmw_ptr_diff(sw_context->buf_start,
Thomas Hellstroma1944032016-10-10 11:06:45 -0700552 id_loc),
553 vmw_res_rel_normal);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000554 if (unlikely(ret != 0))
Thomas Hellstrom9f9cb842014-08-28 11:35:25 +0200555 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000556
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200557 ret = vmw_resource_val_add(sw_context, res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000558 if (unlikely(ret != 0))
Thomas Hellstrom9f9cb842014-08-28 11:35:25 +0200559 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000560
Thomas Hellstrom9f9cb842014-08-28 11:35:25 +0200561 return 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000562}
563
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200564
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000565/**
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100566 * vmw_cmd_res_check - Check that a resource is present and if so, put it
567 * on the resource validate list unless it's already there.
568 *
569 * @dev_priv: Pointer to a device private structure.
570 * @sw_context: Pointer to the software context.
571 * @res_type: Resource type.
572 * @converter: User-space visisble type specific information.
573 * @id_loc: Pointer to the location in the command buffer currently being
574 * parsed from where the user-space resource id handle is located.
575 * @p_val: Pointer to pointer to resource validalidation node. Populated
576 * on exit.
577 */
578static int
579vmw_cmd_res_check(struct vmw_private *dev_priv,
580 struct vmw_sw_context *sw_context,
581 enum vmw_res_type res_type,
582 const struct vmw_user_resource_conv *converter,
583 uint32_t *id_loc,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200584 struct vmw_resource **p_res)
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100585{
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200586 struct vmw_res_cache_entry *rcache =
587 &sw_context->res_cache[res_type];
588 struct vmw_resource *res;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200589 int ret;
590
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200591 if (p_res)
592 *p_res = NULL;
593
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200594 if (*id_loc == SVGA3D_INVALID_ID) {
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200595 if (res_type == vmw_res_context) {
596 DRM_ERROR("Illegal context invalid id.\n");
597 return -EINVAL;
598 }
599 return 0;
600 }
601
602 /*
603 * Fastpath in case of repeated commands referencing the same
604 * resource
605 */
606
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200607 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
608 struct vmw_resource *res = rcache->res;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200609
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200610 if (p_res)
611 *p_res = res;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200612
613 return vmw_resource_relocation_add
Thomas Hellstromfc18afc2018-09-26 15:36:52 +0200614 (sw_context, res,
Thomas Hellstroma1944032016-10-10 11:06:45 -0700615 vmw_ptr_diff(sw_context->buf_start, id_loc),
616 vmw_res_rel_normal);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200617 }
618
619 ret = vmw_user_resource_lookup_handle(dev_priv,
620 sw_context->fp->tfile,
621 *id_loc,
622 converter,
623 &res);
624 if (unlikely(ret != 0)) {
625 DRM_ERROR("Could not find or use resource 0x%08x.\n",
626 (unsigned) *id_loc);
627 dump_stack();
628 return ret;
629 }
630
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700631 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200632 res);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200633 if (unlikely(ret != 0))
634 goto out_no_reloc;
635
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200636 if (p_res)
637 *p_res = res;
638
639 if (rcache->valid && rcache->res == res) {
640 rcache->valid_handle = true;
641 rcache->handle = *id_loc;
642 }
643
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200644 vmw_resource_unreference(&res);
645 return 0;
646
647out_no_reloc:
648 BUG_ON(sw_context->error_resource != NULL);
649 sw_context->error_resource = res;
650
651 return ret;
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100652}
653
654/**
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700655 * vmw_rebind_dx_query - Rebind DX query associated with the context
656 *
657 * @ctx_res: context the query belongs to
658 *
659 * This function assumes binding_mutex is held.
660 */
661static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
662{
663 struct vmw_private *dev_priv = ctx_res->dev_priv;
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200664 struct vmw_buffer_object *dx_query_mob;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700665 struct {
666 SVGA3dCmdHeader header;
667 SVGA3dCmdDXBindAllQuery body;
668 } *cmd;
669
670
671 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
672
673 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
674 return 0;
675
676 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
677
678 if (cmd == NULL) {
679 DRM_ERROR("Failed to rebind queries.\n");
680 return -ENOMEM;
681 }
682
683 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
684 cmd->header.size = sizeof(cmd->body);
685 cmd->body.cid = ctx_res->id;
686 cmd->body.mobid = dx_query_mob->base.mem.start;
687 vmw_fifo_commit(dev_priv, sizeof(*cmd));
688
689 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
690
691 return 0;
692}
693
694/**
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100695 * vmw_rebind_contexts - Rebind all resources previously bound to
696 * referenced contexts.
697 *
698 * @sw_context: Pointer to the software context.
699 *
700 * Rebind context binding points that have been scrubbed because of eviction.
701 */
702static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
703{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200704 struct vmw_ctx_validation_info *val;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100705 int ret;
706
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200707 list_for_each_entry(val, &sw_context->ctx_list, head) {
708 ret = vmw_binding_rebind_all(val->cur);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100709 if (unlikely(ret != 0)) {
710 if (ret != -ERESTARTSYS)
711 DRM_ERROR("Failed to rebind context.\n");
712 return ret;
713 }
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700714
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200715 ret = vmw_rebind_all_dx_query(val->ctx);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700716 if (ret != 0)
717 return ret;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100718 }
719
720 return 0;
721}
722
723/**
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700724 * vmw_view_bindings_add - Add an array of view bindings to a context
725 * binding state tracker.
726 *
727 * @sw_context: The execbuf state used for this command.
728 * @view_type: View type for the bindings.
729 * @binding_type: Binding type for the bindings.
730 * @shader_slot: The shader slot to user for the bindings.
731 * @view_ids: Array of view ids to be bound.
732 * @num_views: Number of view ids in @view_ids.
733 * @first_slot: The binding slot to be used for the first view id in @view_ids.
734 */
735static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
736 enum vmw_view_type view_type,
737 enum vmw_ctx_binding_type binding_type,
738 uint32 shader_slot,
739 uint32 view_ids[], u32 num_views,
740 u32 first_slot)
741{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200742 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700743 struct vmw_cmdbuf_res_manager *man;
744 u32 i;
745 int ret;
746
747 if (!ctx_node) {
748 DRM_ERROR("DX Context not set.\n");
749 return -EINVAL;
750 }
751
752 man = sw_context->man;
753 for (i = 0; i < num_views; ++i) {
754 struct vmw_ctx_bindinfo_view binding;
755 struct vmw_resource *view = NULL;
756
757 if (view_ids[i] != SVGA3D_INVALID_ID) {
758 view = vmw_view_lookup(man, view_type, view_ids[i]);
759 if (IS_ERR(view)) {
760 DRM_ERROR("View not found.\n");
761 return PTR_ERR(view);
762 }
763
764 ret = vmw_view_res_val_add(sw_context, view);
765 if (ret) {
766 DRM_ERROR("Could not add view to "
767 "validation list.\n");
768 vmw_resource_unreference(&view);
769 return ret;
770 }
771 }
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200772 binding.bi.ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700773 binding.bi.res = view;
774 binding.bi.bt = binding_type;
775 binding.shader_slot = shader_slot;
776 binding.slot = first_slot + i;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200777 vmw_binding_add(ctx_node->staged, &binding.bi,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700778 shader_slot, binding.slot);
779 if (view)
780 vmw_resource_unreference(&view);
781 }
782
783 return 0;
784}
785
786/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000787 * vmw_cmd_cid_check - Check a command header for valid context information.
788 *
789 * @dev_priv: Pointer to a device private structure.
790 * @sw_context: Pointer to the software context.
791 * @header: A command header with an embedded user-space context handle.
792 *
793 * Convenience function: Call vmw_cmd_res_check with the user-space context
794 * handle embedded in @header.
795 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000796static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
797 struct vmw_sw_context *sw_context,
798 SVGA3dCmdHeader *header)
799{
800 struct vmw_cid_cmd {
801 SVGA3dCmdHeader header;
Thomas Hellstrom8e67bbb2014-02-06 12:35:05 +0100802 uint32_t cid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000803 } *cmd;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000804
805 cmd = container_of(header, struct vmw_cid_cmd, header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000806 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
807 user_context_converter, &cmd->cid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000808}
809
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200810/**
811 * vmw_execbuf_info_from_res - Get the private validation metadata for a
812 * recently validated resource
813 * @sw_context: Pointer to the command submission context
814 * @res: The resource
815 *
816 * The resource pointed to by @res needs to be present in the command submission
817 * context's resource cache and hence the last resource of that type to be
818 * processed by the validation code.
819 *
820 * Return: a pointer to the private metadata of the resource, or NULL
821 * if it wasn't found
822 */
823static struct vmw_ctx_validation_info *
824vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
825 struct vmw_resource *res)
826{
827 struct vmw_res_cache_entry *rcache =
828 &sw_context->res_cache[vmw_res_type(res)];
829
830 if (rcache->valid && rcache->res == res)
831 return rcache->private;
832
833 WARN_ON_ONCE(true);
834 return NULL;
835}
836
837
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000838static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
839 struct vmw_sw_context *sw_context,
840 SVGA3dCmdHeader *header)
841{
842 struct vmw_sid_cmd {
843 SVGA3dCmdHeader header;
844 SVGA3dCmdSetRenderTarget body;
845 } *cmd;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200846 struct vmw_resource *ctx;
847 struct vmw_resource *res;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000848 int ret;
849
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700850 cmd = container_of(header, struct vmw_sid_cmd, header);
851
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700852 if (cmd->body.type >= SVGA3D_RT_MAX) {
853 DRM_ERROR("Illegal render target type %u.\n",
854 (unsigned) cmd->body.type);
855 return -EINVAL;
856 }
857
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700858 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
859 user_context_converter, &cmd->body.cid,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200860 &ctx);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000861 if (unlikely(ret != 0))
862 return ret;
863
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000864 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
865 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200866 &cmd->body.target.sid, &res);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700867 if (unlikely(ret != 0))
868 return ret;
869
870 if (dev_priv->has_mob) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700871 struct vmw_ctx_bindinfo_view binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200872 struct vmw_ctx_validation_info *node;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700873
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200874 node = vmw_execbuf_info_from_res(sw_context, ctx);
875 if (!node)
876 return -EINVAL;
877
878 binding.bi.ctx = ctx;
879 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700880 binding.bi.bt = vmw_ctx_binding_rt;
881 binding.slot = cmd->body.type;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +0200882 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700883 }
884
885 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000886}
887
888static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
889 struct vmw_sw_context *sw_context,
890 SVGA3dCmdHeader *header)
891{
892 struct vmw_sid_cmd {
893 SVGA3dCmdHeader header;
894 SVGA3dCmdSurfaceCopy body;
895 } *cmd;
896 int ret;
897
898 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstromc9146cd2015-03-02 23:45:04 -0800899
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700900 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
901 user_surface_converter,
902 &cmd->body.src.sid, NULL);
903 if (ret)
904 return ret;
Thomas Hellstromc9146cd2015-03-02 23:45:04 -0800905
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000906 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
907 user_surface_converter,
908 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000909}
910
Neha Bhende0fca749e2015-08-10 10:51:07 -0700911static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
912 struct vmw_sw_context *sw_context,
913 SVGA3dCmdHeader *header)
914{
915 struct {
916 SVGA3dCmdHeader header;
917 SVGA3dCmdDXBufferCopy body;
918 } *cmd;
919 int ret;
920
921 cmd = container_of(header, typeof(*cmd), header);
922 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
923 user_surface_converter,
924 &cmd->body.src, NULL);
925 if (ret != 0)
926 return ret;
927
928 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
929 user_surface_converter,
930 &cmd->body.dest, NULL);
931}
932
933static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
934 struct vmw_sw_context *sw_context,
935 SVGA3dCmdHeader *header)
936{
937 struct {
938 SVGA3dCmdHeader header;
939 SVGA3dCmdDXPredCopyRegion body;
940 } *cmd;
941 int ret;
942
943 cmd = container_of(header, typeof(*cmd), header);
944 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
945 user_surface_converter,
946 &cmd->body.srcSid, NULL);
947 if (ret != 0)
948 return ret;
949
950 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
951 user_surface_converter,
952 &cmd->body.dstSid, NULL);
953}
954
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000955static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
956 struct vmw_sw_context *sw_context,
957 SVGA3dCmdHeader *header)
958{
959 struct vmw_sid_cmd {
960 SVGA3dCmdHeader header;
961 SVGA3dCmdSurfaceStretchBlt body;
962 } *cmd;
963 int ret;
964
965 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000966 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
967 user_surface_converter,
968 &cmd->body.src.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000969 if (unlikely(ret != 0))
970 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000971 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
972 user_surface_converter,
973 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000974}
975
976static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
977 struct vmw_sw_context *sw_context,
978 SVGA3dCmdHeader *header)
979{
980 struct vmw_sid_cmd {
981 SVGA3dCmdHeader header;
982 SVGA3dCmdBlitSurfaceToScreen body;
983 } *cmd;
984
985 cmd = container_of(header, struct vmw_sid_cmd, header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +0200986
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000987 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
988 user_surface_converter,
989 &cmd->body.srcImage.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000990}
991
992static int vmw_cmd_present_check(struct vmw_private *dev_priv,
993 struct vmw_sw_context *sw_context,
994 SVGA3dCmdHeader *header)
995{
996 struct vmw_sid_cmd {
997 SVGA3dCmdHeader header;
998 SVGA3dCmdPresent body;
999 } *cmd;
1000
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001001
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001002 cmd = container_of(header, struct vmw_sid_cmd, header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02001003
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001004 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1005 user_surface_converter, &cmd->body.sid,
1006 NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001007}
1008
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001009/**
1010 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1011 *
1012 * @dev_priv: The device private structure.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001013 * @new_query_bo: The new buffer holding query results.
1014 * @sw_context: The software context used for this command submission.
1015 *
1016 * This function checks whether @new_query_bo is suitable for holding
1017 * query results, and if another buffer currently is pinned for query
1018 * results. If so, the function prepares the state of @sw_context for
1019 * switching pinned buffers after successful submission of the current
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001020 * command batch.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001021 */
1022static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001023 struct vmw_buffer_object *new_query_bo,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001024 struct vmw_sw_context *sw_context)
1025{
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001026 struct vmw_res_cache_entry *ctx_entry =
1027 &sw_context->res_cache[vmw_res_context];
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001028 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001029
1030 BUG_ON(!ctx_entry->valid);
1031 sw_context->last_query_ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001032
1033 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1034
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001035 if (unlikely(new_query_bo->base.num_pages > 4)) {
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001036 DRM_ERROR("Query buffer too large.\n");
1037 return -EINVAL;
1038 }
1039
1040 if (unlikely(sw_context->cur_query_bo != NULL)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001041 sw_context->needs_post_query_barrier = true;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001042 ret = vmw_validation_add_bo(sw_context->ctx,
1043 sw_context->cur_query_bo,
1044 dev_priv->has_mob, false);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001045 if (unlikely(ret != 0))
1046 return ret;
1047 }
1048 sw_context->cur_query_bo = new_query_bo;
1049
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001050 ret = vmw_validation_add_bo(sw_context->ctx,
1051 dev_priv->dummy_query_bo,
1052 dev_priv->has_mob, false);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001053 if (unlikely(ret != 0))
1054 return ret;
1055
1056 }
1057
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001058 return 0;
1059}
1060
1061
1062/**
1063 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1064 *
1065 * @dev_priv: The device private structure.
1066 * @sw_context: The software context used for this command submission batch.
1067 *
1068 * This function will check if we're switching query buffers, and will then,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001069 * issue a dummy occlusion query wait used as a query barrier. When the fence
1070 * object following that query wait has signaled, we are sure that all
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001071 * preceding queries have finished, and the old query buffer can be unpinned.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001072 * However, since both the new query buffer and the old one are fenced with
1073 * that fence, we can do an asynchronus unpin now, and be sure that the
1074 * old query buffer won't be moved until the fence has signaled.
1075 *
1076 * As mentioned above, both the new - and old query buffers need to be fenced
1077 * using a sequence emitted *after* calling this function.
1078 */
1079static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1080 struct vmw_sw_context *sw_context)
1081{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001082 /*
1083 * The validate list should still hold references to all
1084 * contexts here.
1085 */
1086
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001087 if (sw_context->needs_post_query_barrier) {
1088 struct vmw_res_cache_entry *ctx_entry =
1089 &sw_context->res_cache[vmw_res_context];
1090 struct vmw_resource *ctx;
1091 int ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001092
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001093 BUG_ON(!ctx_entry->valid);
1094 ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001095
1096 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1097
1098 if (unlikely(ret != 0))
1099 DRM_ERROR("Out of fifo space for dummy query.\n");
1100 }
1101
1102 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1103 if (dev_priv->pinned_bo) {
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001104 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001105 vmw_bo_unreference(&dev_priv->pinned_bo);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001106 }
1107
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001108 if (!sw_context->needs_post_query_barrier) {
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001109 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001110
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001111 /*
1112 * We pin also the dummy_query_bo buffer so that we
1113 * don't need to validate it when emitting
1114 * dummy queries in context destroy paths.
1115 */
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001116
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001117 if (!dev_priv->dummy_query_bo_pinned) {
1118 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1119 true);
1120 dev_priv->dummy_query_bo_pinned = true;
1121 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001122
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001123 BUG_ON(sw_context->last_query_ctx == NULL);
1124 dev_priv->query_cid = sw_context->last_query_ctx->id;
1125 dev_priv->query_cid_valid = true;
1126 dev_priv->pinned_bo =
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001127 vmw_bo_reference(sw_context->cur_query_bo);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001128 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001129 }
1130}
1131
1132/**
Thomas Hellstromddcda242012-11-21 11:26:55 +01001133 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1134 * handle to a MOB id.
1135 *
1136 * @dev_priv: Pointer to a device private structure.
1137 * @sw_context: The software context used for this command batch validation.
1138 * @id: Pointer to the user-space handle to be translated.
1139 * @vmw_bo_p: Points to a location that, on successful return will carry
1140 * a reference-counted pointer to the DMA buffer identified by the
1141 * user-space handle in @id.
1142 *
1143 * This function saves information needed to translate a user-space buffer
1144 * handle to a MOB id. The translation does not take place immediately, but
1145 * during a call to vmw_apply_relocations(). This function builds a relocation
1146 * list and a list of buffers to validate. The former needs to be freed using
1147 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1148 * needs to be freed using vmw_clear_validations.
1149 */
1150static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1151 struct vmw_sw_context *sw_context,
1152 SVGAMobId *id,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001153 struct vmw_buffer_object **vmw_bo_p)
Thomas Hellstromddcda242012-11-21 11:26:55 +01001154{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001155 struct vmw_buffer_object *vmw_bo = NULL;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001156 uint32_t handle = *id;
1157 struct vmw_relocation *reloc;
1158 int ret;
1159
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001160 ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001161 if (unlikely(ret != 0)) {
1162 DRM_ERROR("Could not find or use MOB buffer.\n");
Colin Ian Kingda5efff2015-01-22 15:17:07 +00001163 ret = -EINVAL;
1164 goto out_no_reloc;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001165 }
Thomas Hellstromddcda242012-11-21 11:26:55 +01001166
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001167 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1168 if (!reloc)
Thomas Hellstromddcda242012-11-21 11:26:55 +01001169 goto out_no_reloc;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001170
Thomas Hellstromddcda242012-11-21 11:26:55 +01001171 reloc->mob_loc = id;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001172 reloc->vbo = vmw_bo;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001173
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001174 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001175 if (unlikely(ret != 0))
1176 goto out_no_reloc;
1177
1178 *vmw_bo_p = vmw_bo;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001179 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1180
Thomas Hellstromddcda242012-11-21 11:26:55 +01001181 return 0;
1182
1183out_no_reloc:
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001184 vmw_bo_unreference(&vmw_bo);
Colin Ian Kingda5efff2015-01-22 15:17:07 +00001185 *vmw_bo_p = NULL;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001186 return ret;
1187}
1188
1189/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001190 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1191 * handle to a valid SVGAGuestPtr
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001192 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001193 * @dev_priv: Pointer to a device private structure.
1194 * @sw_context: The software context used for this command batch validation.
1195 * @ptr: Pointer to the user-space handle to be translated.
1196 * @vmw_bo_p: Points to a location that, on successful return will carry
1197 * a reference-counted pointer to the DMA buffer identified by the
1198 * user-space handle in @id.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001199 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001200 * This function saves information needed to translate a user-space buffer
1201 * handle to a valid SVGAGuestPtr. The translation does not take place
1202 * immediately, but during a call to vmw_apply_relocations().
1203 * This function builds a relocation list and a list of buffers to validate.
1204 * The former needs to be freed using either vmw_apply_relocations() or
1205 * vmw_free_relocations(). The latter needs to be freed using
1206 * vmw_clear_validations.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001207 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001208static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1209 struct vmw_sw_context *sw_context,
1210 SVGAGuestPtr *ptr,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001211 struct vmw_buffer_object **vmw_bo_p)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001212{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001213 struct vmw_buffer_object *vmw_bo = NULL;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001214 uint32_t handle = ptr->gmrId;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001215 struct vmw_relocation *reloc;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001216 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001217
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001218 ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001219 if (unlikely(ret != 0)) {
1220 DRM_ERROR("Could not find or use GMR region.\n");
Colin Ian Kingda5efff2015-01-22 15:17:07 +00001221 ret = -EINVAL;
1222 goto out_no_reloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001223 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001224
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001225 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1226 if (!reloc)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001227 goto out_no_reloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001228
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001229 reloc->location = ptr;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001230 reloc->vbo = vmw_bo;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001231
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001232 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001233 if (unlikely(ret != 0))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001234 goto out_no_reloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001235
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001236 *vmw_bo_p = vmw_bo;
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02001237 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1238
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001239 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001240
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001241out_no_reloc:
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001242 vmw_bo_unreference(&vmw_bo);
Colin Ian Kingda5efff2015-01-22 15:17:07 +00001243 *vmw_bo_p = NULL;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001244 return ret;
1245}
1246
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001247
1248
1249/**
1250 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1251 *
1252 * @dev_priv: Pointer to a device private struct.
1253 * @sw_context: The software context used for this command submission.
1254 * @header: Pointer to the command header in the command stream.
1255 *
1256 * This function adds the new query into the query COTABLE
1257 */
1258static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1259 struct vmw_sw_context *sw_context,
1260 SVGA3dCmdHeader *header)
1261{
1262 struct vmw_dx_define_query_cmd {
1263 SVGA3dCmdHeader header;
1264 SVGA3dCmdDXDefineQuery q;
1265 } *cmd;
1266
1267 int ret;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001268 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001269 struct vmw_resource *cotable_res;
1270
1271
1272 if (ctx_node == NULL) {
1273 DRM_ERROR("DX Context not set for query.\n");
1274 return -EINVAL;
1275 }
1276
1277 cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1278
1279 if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
1280 cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1281 return -EINVAL;
1282
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001283 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001284 ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1285 vmw_resource_unreference(&cotable_res);
1286
1287 return ret;
1288}
1289
1290
1291
1292/**
1293 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1294 *
1295 * @dev_priv: Pointer to a device private struct.
1296 * @sw_context: The software context used for this command submission.
1297 * @header: Pointer to the command header in the command stream.
1298 *
1299 * The query bind operation will eventually associate the query ID
1300 * with its backing MOB. In this function, we take the user mode
1301 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1302 * kernel mode equivalent.
1303 */
1304static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1305 struct vmw_sw_context *sw_context,
1306 SVGA3dCmdHeader *header)
1307{
1308 struct vmw_dx_bind_query_cmd {
1309 SVGA3dCmdHeader header;
1310 SVGA3dCmdDXBindQuery q;
1311 } *cmd;
1312
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001313 struct vmw_buffer_object *vmw_bo;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001314 int ret;
1315
1316
1317 cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1318
1319 /*
1320 * Look up the buffer pointed to by q.mobid, put it on the relocation
1321 * list so its kernel mode MOB ID can be filled in later
1322 */
1323 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1324 &vmw_bo);
1325
1326 if (ret != 0)
1327 return ret;
1328
1329 sw_context->dx_query_mob = vmw_bo;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001330 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001331
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001332 vmw_bo_unreference(&vmw_bo);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001333
1334 return ret;
1335}
1336
1337
1338
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001339/**
Thomas Hellstromddcda242012-11-21 11:26:55 +01001340 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
1341 *
1342 * @dev_priv: Pointer to a device private struct.
1343 * @sw_context: The software context used for this command submission.
1344 * @header: Pointer to the command header in the command stream.
1345 */
1346static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1347 struct vmw_sw_context *sw_context,
1348 SVGA3dCmdHeader *header)
1349{
1350 struct vmw_begin_gb_query_cmd {
1351 SVGA3dCmdHeader header;
1352 SVGA3dCmdBeginGBQuery q;
1353 } *cmd;
1354
1355 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1356 header);
1357
1358 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1359 user_context_converter, &cmd->q.cid,
1360 NULL);
1361}
1362
1363/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001364 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1365 *
1366 * @dev_priv: Pointer to a device private struct.
1367 * @sw_context: The software context used for this command submission.
1368 * @header: Pointer to the command header in the command stream.
1369 */
1370static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1371 struct vmw_sw_context *sw_context,
1372 SVGA3dCmdHeader *header)
1373{
1374 struct vmw_begin_query_cmd {
1375 SVGA3dCmdHeader header;
1376 SVGA3dCmdBeginQuery q;
1377 } *cmd;
1378
1379 cmd = container_of(header, struct vmw_begin_query_cmd,
1380 header);
1381
Thomas Hellstromddcda242012-11-21 11:26:55 +01001382 if (unlikely(dev_priv->has_mob)) {
1383 struct {
1384 SVGA3dCmdHeader header;
1385 SVGA3dCmdBeginGBQuery q;
1386 } gb_cmd;
1387
1388 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1389
1390 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1391 gb_cmd.header.size = cmd->header.size;
1392 gb_cmd.q.cid = cmd->q.cid;
1393 gb_cmd.q.type = cmd->q.type;
1394
1395 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1396 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1397 }
1398
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001399 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1400 user_context_converter, &cmd->q.cid,
1401 NULL);
1402}
1403
1404/**
Thomas Hellstromddcda242012-11-21 11:26:55 +01001405 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1406 *
1407 * @dev_priv: Pointer to a device private struct.
1408 * @sw_context: The software context used for this command submission.
1409 * @header: Pointer to the command header in the command stream.
1410 */
1411static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1412 struct vmw_sw_context *sw_context,
1413 SVGA3dCmdHeader *header)
1414{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001415 struct vmw_buffer_object *vmw_bo;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001416 struct vmw_query_cmd {
1417 SVGA3dCmdHeader header;
1418 SVGA3dCmdEndGBQuery q;
1419 } *cmd;
1420 int ret;
1421
1422 cmd = container_of(header, struct vmw_query_cmd, header);
1423 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1424 if (unlikely(ret != 0))
1425 return ret;
1426
1427 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1428 &cmd->q.mobid,
1429 &vmw_bo);
1430 if (unlikely(ret != 0))
1431 return ret;
1432
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001433 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001434
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001435 vmw_bo_unreference(&vmw_bo);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001436 return ret;
1437}
1438
1439/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001440 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1441 *
1442 * @dev_priv: Pointer to a device private struct.
1443 * @sw_context: The software context used for this command submission.
1444 * @header: Pointer to the command header in the command stream.
1445 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001446static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1447 struct vmw_sw_context *sw_context,
1448 SVGA3dCmdHeader *header)
1449{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001450 struct vmw_buffer_object *vmw_bo;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001451 struct vmw_query_cmd {
1452 SVGA3dCmdHeader header;
1453 SVGA3dCmdEndQuery q;
1454 } *cmd;
1455 int ret;
1456
1457 cmd = container_of(header, struct vmw_query_cmd, header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001458 if (dev_priv->has_mob) {
1459 struct {
1460 SVGA3dCmdHeader header;
1461 SVGA3dCmdEndGBQuery q;
1462 } gb_cmd;
1463
1464 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1465
1466 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1467 gb_cmd.header.size = cmd->header.size;
1468 gb_cmd.q.cid = cmd->q.cid;
1469 gb_cmd.q.type = cmd->q.type;
1470 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1471 gb_cmd.q.offset = cmd->q.guestResult.offset;
1472
1473 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1474 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1475 }
1476
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001477 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1478 if (unlikely(ret != 0))
1479 return ret;
1480
1481 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1482 &cmd->q.guestResult,
1483 &vmw_bo);
1484 if (unlikely(ret != 0))
1485 return ret;
1486
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001487 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001488
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001489 vmw_bo_unreference(&vmw_bo);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001490 return ret;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001491}
1492
Thomas Hellstromddcda242012-11-21 11:26:55 +01001493/**
1494 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1495 *
1496 * @dev_priv: Pointer to a device private struct.
1497 * @sw_context: The software context used for this command submission.
1498 * @header: Pointer to the command header in the command stream.
1499 */
1500static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1501 struct vmw_sw_context *sw_context,
1502 SVGA3dCmdHeader *header)
1503{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001504 struct vmw_buffer_object *vmw_bo;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001505 struct vmw_query_cmd {
1506 SVGA3dCmdHeader header;
1507 SVGA3dCmdWaitForGBQuery q;
1508 } *cmd;
1509 int ret;
1510
1511 cmd = container_of(header, struct vmw_query_cmd, header);
1512 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1513 if (unlikely(ret != 0))
1514 return ret;
1515
1516 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1517 &cmd->q.mobid,
1518 &vmw_bo);
1519 if (unlikely(ret != 0))
1520 return ret;
1521
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001522 vmw_bo_unreference(&vmw_bo);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001523 return 0;
1524}
1525
1526/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001527 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1528 *
1529 * @dev_priv: Pointer to a device private struct.
1530 * @sw_context: The software context used for this command submission.
1531 * @header: Pointer to the command header in the command stream.
1532 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001533static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1534 struct vmw_sw_context *sw_context,
1535 SVGA3dCmdHeader *header)
1536{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001537 struct vmw_buffer_object *vmw_bo;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001538 struct vmw_query_cmd {
1539 SVGA3dCmdHeader header;
1540 SVGA3dCmdWaitForQuery q;
1541 } *cmd;
1542 int ret;
1543
1544 cmd = container_of(header, struct vmw_query_cmd, header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001545 if (dev_priv->has_mob) {
1546 struct {
1547 SVGA3dCmdHeader header;
1548 SVGA3dCmdWaitForGBQuery q;
1549 } gb_cmd;
1550
1551 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1552
1553 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1554 gb_cmd.header.size = cmd->header.size;
1555 gb_cmd.q.cid = cmd->q.cid;
1556 gb_cmd.q.type = cmd->q.type;
1557 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1558 gb_cmd.q.offset = cmd->q.guestResult.offset;
1559
1560 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1561 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1562 }
1563
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001564 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1565 if (unlikely(ret != 0))
1566 return ret;
1567
1568 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1569 &cmd->q.guestResult,
1570 &vmw_bo);
1571 if (unlikely(ret != 0))
1572 return ret;
1573
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001574 vmw_bo_unreference(&vmw_bo);
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001575 return 0;
1576}
1577
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001578static int vmw_cmd_dma(struct vmw_private *dev_priv,
1579 struct vmw_sw_context *sw_context,
1580 SVGA3dCmdHeader *header)
1581{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001582 struct vmw_buffer_object *vmw_bo = NULL;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001583 struct vmw_surface *srf = NULL;
1584 struct vmw_dma_cmd {
1585 SVGA3dCmdHeader header;
1586 SVGA3dCmdSurfaceDMA dma;
1587 } *cmd;
1588 int ret;
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001589 SVGA3dCmdSurfaceDMASuffix *suffix;
1590 uint32_t bo_size;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001591
1592 cmd = container_of(header, struct vmw_dma_cmd, header);
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001593 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1594 header->size - sizeof(*suffix));
1595
1596 /* Make sure device and verifier stays in sync. */
1597 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1598 DRM_ERROR("Invalid DMA suffix size.\n");
1599 return -EINVAL;
1600 }
1601
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001602 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1603 &cmd->dma.guest.ptr,
1604 &vmw_bo);
1605 if (unlikely(ret != 0))
1606 return ret;
1607
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001608 /* Make sure DMA doesn't cross BO boundaries. */
1609 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1610 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1611 DRM_ERROR("Invalid DMA offset.\n");
1612 return -EINVAL;
1613 }
1614
1615 bo_size -= cmd->dma.guest.ptr.offset;
1616 if (unlikely(suffix->maximumOffset > bo_size))
1617 suffix->maximumOffset = bo_size;
1618
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001619 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1620 user_surface_converter, &cmd->dma.host.sid,
1621 NULL);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001622 if (unlikely(ret != 0)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001623 if (unlikely(ret != -ERESTARTSYS))
1624 DRM_ERROR("could not find surface for DMA.\n");
1625 goto out_no_surface;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001626 }
1627
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001628 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001629
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001630 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1631 header);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001632
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001633out_no_surface:
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001634 vmw_bo_unreference(&vmw_bo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001635 return ret;
1636}
1637
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001638static int vmw_cmd_draw(struct vmw_private *dev_priv,
1639 struct vmw_sw_context *sw_context,
1640 SVGA3dCmdHeader *header)
1641{
1642 struct vmw_draw_cmd {
1643 SVGA3dCmdHeader header;
1644 SVGA3dCmdDrawPrimitives body;
1645 } *cmd;
1646 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1647 (unsigned long)header + sizeof(*cmd));
1648 SVGA3dPrimitiveRange *range;
1649 uint32_t i;
1650 uint32_t maxnum;
1651 int ret;
1652
1653 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1654 if (unlikely(ret != 0))
1655 return ret;
1656
1657 cmd = container_of(header, struct vmw_draw_cmd, header);
1658 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1659
1660 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1661 DRM_ERROR("Illegal number of vertex declarations.\n");
1662 return -EINVAL;
1663 }
1664
1665 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001666 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1667 user_surface_converter,
1668 &decl->array.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001669 if (unlikely(ret != 0))
1670 return ret;
1671 }
1672
1673 maxnum = (header->size - sizeof(cmd->body) -
1674 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1675 if (unlikely(cmd->body.numRanges > maxnum)) {
1676 DRM_ERROR("Illegal number of index ranges.\n");
1677 return -EINVAL;
1678 }
1679
1680 range = (SVGA3dPrimitiveRange *) decl;
1681 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001682 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1683 user_surface_converter,
1684 &range->indexArray.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001685 if (unlikely(ret != 0))
1686 return ret;
1687 }
1688 return 0;
1689}
1690
1691
1692static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1693 struct vmw_sw_context *sw_context,
1694 SVGA3dCmdHeader *header)
1695{
1696 struct vmw_tex_state_cmd {
1697 SVGA3dCmdHeader header;
1698 SVGA3dCmdSetTextureState state;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001699 } *cmd;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001700
1701 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1702 ((unsigned long) header + header->size + sizeof(header));
1703 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1704 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001705 struct vmw_resource *ctx;
1706 struct vmw_resource *res;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001707 int ret;
1708
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001709 cmd = container_of(header, struct vmw_tex_state_cmd,
1710 header);
1711
1712 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1713 user_context_converter, &cmd->state.cid,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001714 &ctx);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001715 if (unlikely(ret != 0))
1716 return ret;
1717
1718 for (; cur_state < last_state; ++cur_state) {
1719 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1720 continue;
1721
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001722 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1723 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1724 (unsigned) cur_state->stage);
1725 return -EINVAL;
1726 }
1727
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001728 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1729 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001730 &cur_state->value, &res);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001731 if (unlikely(ret != 0))
1732 return ret;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001733
1734 if (dev_priv->has_mob) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001735 struct vmw_ctx_bindinfo_tex binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001736 struct vmw_ctx_validation_info *node;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001737
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001738 node = vmw_execbuf_info_from_res(sw_context, ctx);
1739 if (!node)
1740 return -EINVAL;
1741
1742 binding.bi.ctx = ctx;
1743 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001744 binding.bi.bt = vmw_ctx_binding_tex;
1745 binding.texture_stage = cur_state->stage;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001746 vmw_binding_add(node->staged, &binding.bi, 0,
1747 binding.texture_stage);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001748 }
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001749 }
1750
1751 return 0;
1752}
1753
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001754static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1755 struct vmw_sw_context *sw_context,
1756 void *buf)
1757{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001758 struct vmw_buffer_object *vmw_bo;
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001759 int ret;
1760
1761 struct {
1762 uint32_t header;
1763 SVGAFifoCmdDefineGMRFB body;
1764 } *cmd = buf;
1765
1766 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1767 &cmd->body.ptr,
1768 &vmw_bo);
1769 if (unlikely(ret != 0))
1770 return ret;
1771
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001772 vmw_bo_unreference(&vmw_bo);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001773
1774 return ret;
1775}
1776
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001777
1778/**
1779 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1780 * switching
1781 *
1782 * @dev_priv: Pointer to a device private struct.
1783 * @sw_context: The software context being used for this batch.
1784 * @val_node: The validation node representing the resource.
1785 * @buf_id: Pointer to the user-space backup buffer handle in the command
1786 * stream.
1787 * @backup_offset: Offset of backup into MOB.
1788 *
1789 * This function prepares for registering a switch of backup buffers
1790 * in the resource metadata just prior to unreserving. It's basically a wrapper
1791 * around vmw_cmd_res_switch_backup with a different interface.
1792 */
1793static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1794 struct vmw_sw_context *sw_context,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001795 struct vmw_resource *res,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001796 uint32_t *buf_id,
1797 unsigned long backup_offset)
1798{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001799 struct vmw_buffer_object *vbo;
1800 void *info;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001801 int ret;
1802
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001803 info = vmw_execbuf_info_from_res(sw_context, res);
1804 if (!info)
1805 return -EINVAL;
1806
1807 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001808 if (ret)
1809 return ret;
1810
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001811 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1812 backup_offset);
1813 vmw_bo_unreference(&vbo);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001814
1815 return 0;
1816}
1817
1818
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001819/**
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001820 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1821 *
1822 * @dev_priv: Pointer to a device private struct.
1823 * @sw_context: The software context being used for this batch.
1824 * @res_type: The resource type.
1825 * @converter: Information about user-space binding for this resource type.
1826 * @res_id: Pointer to the user-space resource handle in the command stream.
1827 * @buf_id: Pointer to the user-space backup buffer handle in the command
1828 * stream.
1829 * @backup_offset: Offset of backup into MOB.
1830 *
1831 * This function prepares for registering a switch of backup buffers
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001832 * in the resource metadata just prior to unreserving. It's basically a wrapper
1833 * around vmw_cmd_res_switch_backup with a different interface.
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001834 */
1835static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1836 struct vmw_sw_context *sw_context,
1837 enum vmw_res_type res_type,
1838 const struct vmw_user_resource_conv
1839 *converter,
1840 uint32_t *res_id,
1841 uint32_t *buf_id,
1842 unsigned long backup_offset)
1843{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001844 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001845 int ret;
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001846
1847 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001848 converter, res_id, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001849 if (ret)
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001850 return ret;
1851
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02001852 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001853 buf_id, backup_offset);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001854}
1855
1856/**
1857 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1858 * command
1859 *
1860 * @dev_priv: Pointer to a device private struct.
1861 * @sw_context: The software context being used for this batch.
1862 * @header: Pointer to the command header in the command stream.
1863 */
1864static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1865 struct vmw_sw_context *sw_context,
1866 SVGA3dCmdHeader *header)
1867{
1868 struct vmw_bind_gb_surface_cmd {
1869 SVGA3dCmdHeader header;
1870 SVGA3dCmdBindGBSurface body;
1871 } *cmd;
1872
1873 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1874
1875 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1876 user_surface_converter,
1877 &cmd->body.sid, &cmd->body.mobid,
1878 0);
1879}
1880
1881/**
1882 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1883 * command
1884 *
1885 * @dev_priv: Pointer to a device private struct.
1886 * @sw_context: The software context being used for this batch.
1887 * @header: Pointer to the command header in the command stream.
1888 */
1889static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1890 struct vmw_sw_context *sw_context,
1891 SVGA3dCmdHeader *header)
1892{
1893 struct vmw_gb_surface_cmd {
1894 SVGA3dCmdHeader header;
1895 SVGA3dCmdUpdateGBImage body;
1896 } *cmd;
1897
1898 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1899
1900 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1901 user_surface_converter,
1902 &cmd->body.image.sid, NULL);
1903}
1904
1905/**
1906 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1907 * command
1908 *
1909 * @dev_priv: Pointer to a device private struct.
1910 * @sw_context: The software context being used for this batch.
1911 * @header: Pointer to the command header in the command stream.
1912 */
1913static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1914 struct vmw_sw_context *sw_context,
1915 SVGA3dCmdHeader *header)
1916{
1917 struct vmw_gb_surface_cmd {
1918 SVGA3dCmdHeader header;
1919 SVGA3dCmdUpdateGBSurface body;
1920 } *cmd;
1921
1922 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1923
1924 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1925 user_surface_converter,
1926 &cmd->body.sid, NULL);
1927}
1928
1929/**
1930 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1931 * command
1932 *
1933 * @dev_priv: Pointer to a device private struct.
1934 * @sw_context: The software context being used for this batch.
1935 * @header: Pointer to the command header in the command stream.
1936 */
1937static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1938 struct vmw_sw_context *sw_context,
1939 SVGA3dCmdHeader *header)
1940{
1941 struct vmw_gb_surface_cmd {
1942 SVGA3dCmdHeader header;
1943 SVGA3dCmdReadbackGBImage body;
1944 } *cmd;
1945
1946 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1947
1948 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1949 user_surface_converter,
1950 &cmd->body.image.sid, NULL);
1951}
1952
1953/**
1954 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1955 * command
1956 *
1957 * @dev_priv: Pointer to a device private struct.
1958 * @sw_context: The software context being used for this batch.
1959 * @header: Pointer to the command header in the command stream.
1960 */
1961static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1962 struct vmw_sw_context *sw_context,
1963 SVGA3dCmdHeader *header)
1964{
1965 struct vmw_gb_surface_cmd {
1966 SVGA3dCmdHeader header;
1967 SVGA3dCmdReadbackGBSurface body;
1968 } *cmd;
1969
1970 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1971
1972 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1973 user_surface_converter,
1974 &cmd->body.sid, NULL);
1975}
1976
1977/**
1978 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1979 * command
1980 *
1981 * @dev_priv: Pointer to a device private struct.
1982 * @sw_context: The software context being used for this batch.
1983 * @header: Pointer to the command header in the command stream.
1984 */
1985static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1986 struct vmw_sw_context *sw_context,
1987 SVGA3dCmdHeader *header)
1988{
1989 struct vmw_gb_surface_cmd {
1990 SVGA3dCmdHeader header;
1991 SVGA3dCmdInvalidateGBImage body;
1992 } *cmd;
1993
1994 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1995
1996 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1997 user_surface_converter,
1998 &cmd->body.image.sid, NULL);
1999}
2000
2001/**
2002 * vmw_cmd_invalidate_gb_surface - Validate an
2003 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2004 *
2005 * @dev_priv: Pointer to a device private struct.
2006 * @sw_context: The software context being used for this batch.
2007 * @header: Pointer to the command header in the command stream.
2008 */
2009static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2010 struct vmw_sw_context *sw_context,
2011 SVGA3dCmdHeader *header)
2012{
2013 struct vmw_gb_surface_cmd {
2014 SVGA3dCmdHeader header;
2015 SVGA3dCmdInvalidateGBSurface body;
2016 } *cmd;
2017
2018 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2019
2020 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2021 user_surface_converter,
2022 &cmd->body.sid, NULL);
2023}
2024
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002025
2026/**
2027 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2028 * command
2029 *
2030 * @dev_priv: Pointer to a device private struct.
2031 * @sw_context: The software context being used for this batch.
2032 * @header: Pointer to the command header in the command stream.
2033 */
2034static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2035 struct vmw_sw_context *sw_context,
2036 SVGA3dCmdHeader *header)
2037{
2038 struct vmw_shader_define_cmd {
2039 SVGA3dCmdHeader header;
2040 SVGA3dCmdDefineShader body;
2041 } *cmd;
2042 int ret;
2043 size_t size;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002044 struct vmw_resource *ctx;
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002045
2046 cmd = container_of(header, struct vmw_shader_define_cmd,
2047 header);
2048
2049 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2050 user_context_converter, &cmd->body.cid,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002051 &ctx);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002052 if (unlikely(ret != 0))
2053 return ret;
2054
2055 if (unlikely(!dev_priv->has_mob))
2056 return 0;
2057
2058 size = cmd->header.size - sizeof(cmd->body);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002059 ret = vmw_compat_shader_add(dev_priv,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002060 vmw_context_res_man(ctx),
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002061 cmd->body.shid, cmd + 1,
2062 cmd->body.type, size,
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002063 &sw_context->staged_cmd_res);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002064 if (unlikely(ret != 0))
2065 return ret;
2066
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02002067 return vmw_resource_relocation_add(sw_context,
Thomas Hellstrome7a45282016-10-10 10:44:00 -07002068 NULL,
2069 vmw_ptr_diff(sw_context->buf_start,
Thomas Hellstroma1944032016-10-10 11:06:45 -07002070 &cmd->header.id),
2071 vmw_res_rel_nop);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002072}
2073
2074/**
2075 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2076 * command
2077 *
2078 * @dev_priv: Pointer to a device private struct.
2079 * @sw_context: The software context being used for this batch.
2080 * @header: Pointer to the command header in the command stream.
2081 */
2082static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2083 struct vmw_sw_context *sw_context,
2084 SVGA3dCmdHeader *header)
2085{
2086 struct vmw_shader_destroy_cmd {
2087 SVGA3dCmdHeader header;
2088 SVGA3dCmdDestroyShader body;
2089 } *cmd;
2090 int ret;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002091 struct vmw_resource *ctx;
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002092
2093 cmd = container_of(header, struct vmw_shader_destroy_cmd,
2094 header);
2095
2096 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2097 user_context_converter, &cmd->body.cid,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002098 &ctx);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002099 if (unlikely(ret != 0))
2100 return ret;
2101
2102 if (unlikely(!dev_priv->has_mob))
2103 return 0;
2104
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002105 ret = vmw_shader_remove(vmw_context_res_man(ctx),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002106 cmd->body.shid,
2107 cmd->body.type,
2108 &sw_context->staged_cmd_res);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002109 if (unlikely(ret != 0))
2110 return ret;
2111
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02002112 return vmw_resource_relocation_add(sw_context,
Thomas Hellstrome7a45282016-10-10 10:44:00 -07002113 NULL,
2114 vmw_ptr_diff(sw_context->buf_start,
Thomas Hellstroma1944032016-10-10 11:06:45 -07002115 &cmd->header.id),
2116 vmw_res_rel_nop);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002117}
2118
Thomas Hellstroma97e2192012-11-21 11:45:13 +01002119/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002120 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2121 * command
2122 *
2123 * @dev_priv: Pointer to a device private struct.
2124 * @sw_context: The software context being used for this batch.
2125 * @header: Pointer to the command header in the command stream.
2126 */
2127static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2128 struct vmw_sw_context *sw_context,
2129 SVGA3dCmdHeader *header)
2130{
2131 struct vmw_set_shader_cmd {
2132 SVGA3dCmdHeader header;
2133 SVGA3dCmdSetShader body;
2134 } *cmd;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002135 struct vmw_ctx_bindinfo_shader binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002136 struct vmw_resource *ctx, *res = NULL;
2137 struct vmw_ctx_validation_info *ctx_info;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002138 int ret;
2139
2140 cmd = container_of(header, struct vmw_set_shader_cmd,
2141 header);
2142
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002143 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2144 DRM_ERROR("Illegal shader type %u.\n",
2145 (unsigned) cmd->body.type);
2146 return -EINVAL;
2147 }
2148
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07002149 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2150 user_context_converter, &cmd->body.cid,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002151 &ctx);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002152 if (unlikely(ret != 0))
2153 return ret;
2154
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002155 if (!dev_priv->has_mob)
2156 return 0;
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002157
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002158 if (cmd->body.shid != SVGA3D_INVALID_ID) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002159 res = vmw_shader_lookup(vmw_context_res_man(ctx),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002160 cmd->body.shid,
2161 cmd->body.type);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002162
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002163 if (!IS_ERR(res)) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002164 struct vmw_resource *tmp_res = res;
2165
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002166 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002167 &cmd->body.shid, res);
2168 vmw_resource_unreference(&tmp_res);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002169 if (unlikely(ret != 0))
2170 return ret;
2171 }
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07002172 }
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002173
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002174 if (IS_ERR_OR_NULL(res)) {
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002175 ret = vmw_cmd_res_check(dev_priv, sw_context,
2176 vmw_res_shader,
2177 user_shader_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002178 &cmd->body.shid, &res);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002179 if (unlikely(ret != 0))
2180 return ret;
2181 }
2182
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002183 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2184 if (!ctx_info)
2185 return -EINVAL;
2186
2187 binding.bi.ctx = ctx;
2188 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002189 binding.bi.bt = vmw_ctx_binding_shader;
2190 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002191 vmw_binding_add(ctx_info->staged, &binding.bi,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002192 binding.shader_slot, 0);
2193 return 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002194}
2195
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002196/**
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002197 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2198 * command
2199 *
2200 * @dev_priv: Pointer to a device private struct.
2201 * @sw_context: The software context being used for this batch.
2202 * @header: Pointer to the command header in the command stream.
2203 */
2204static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2205 struct vmw_sw_context *sw_context,
2206 SVGA3dCmdHeader *header)
2207{
2208 struct vmw_set_shader_const_cmd {
2209 SVGA3dCmdHeader header;
2210 SVGA3dCmdSetShaderConst body;
2211 } *cmd;
2212 int ret;
2213
2214 cmd = container_of(header, struct vmw_set_shader_const_cmd,
2215 header);
2216
2217 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2218 user_context_converter, &cmd->body.cid,
2219 NULL);
2220 if (unlikely(ret != 0))
2221 return ret;
2222
2223 if (dev_priv->has_mob)
2224 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2225
2226 return 0;
2227}
2228
2229/**
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002230 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2231 * command
2232 *
2233 * @dev_priv: Pointer to a device private struct.
2234 * @sw_context: The software context being used for this batch.
2235 * @header: Pointer to the command header in the command stream.
2236 */
2237static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2238 struct vmw_sw_context *sw_context,
2239 SVGA3dCmdHeader *header)
2240{
2241 struct vmw_bind_gb_shader_cmd {
2242 SVGA3dCmdHeader header;
2243 SVGA3dCmdBindGBShader body;
2244 } *cmd;
2245
2246 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2247 header);
2248
2249 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2250 user_shader_converter,
2251 &cmd->body.shid, &cmd->body.mobid,
2252 cmd->body.offsetInBytes);
2253}
2254
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002255/**
2256 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2257 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2258 *
2259 * @dev_priv: Pointer to a device private struct.
2260 * @sw_context: The software context being used for this batch.
2261 * @header: Pointer to the command header in the command stream.
2262 */
2263static int
2264vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2265 struct vmw_sw_context *sw_context,
2266 SVGA3dCmdHeader *header)
2267{
2268 struct {
2269 SVGA3dCmdHeader header;
2270 SVGA3dCmdDXSetSingleConstantBuffer body;
2271 } *cmd;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002272 struct vmw_resource *res = NULL;
2273 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002274 struct vmw_ctx_bindinfo_cb binding;
2275 int ret;
2276
2277 if (unlikely(ctx_node == NULL)) {
2278 DRM_ERROR("DX Context not set.\n");
2279 return -EINVAL;
2280 }
2281
2282 cmd = container_of(header, typeof(*cmd), header);
2283 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2284 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002285 &cmd->body.sid, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002286 if (unlikely(ret != 0))
2287 return ret;
2288
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002289 binding.bi.ctx = ctx_node->ctx;
2290 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002291 binding.bi.bt = vmw_ctx_binding_cb;
2292 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2293 binding.offset = cmd->body.offsetInBytes;
2294 binding.size = cmd->body.sizeInBytes;
2295 binding.slot = cmd->body.slot;
2296
2297 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2298 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2299 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2300 (unsigned) cmd->body.type,
2301 (unsigned) binding.slot);
2302 return -EINVAL;
2303 }
2304
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002305 vmw_binding_add(ctx_node->staged, &binding.bi,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002306 binding.shader_slot, binding.slot);
2307
2308 return 0;
2309}
2310
2311/**
2312 * vmw_cmd_dx_set_shader_res - Validate an
2313 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2314 *
2315 * @dev_priv: Pointer to a device private struct.
2316 * @sw_context: The software context being used for this batch.
2317 * @header: Pointer to the command header in the command stream.
2318 */
2319static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2320 struct vmw_sw_context *sw_context,
2321 SVGA3dCmdHeader *header)
2322{
2323 struct {
2324 SVGA3dCmdHeader header;
2325 SVGA3dCmdDXSetShaderResources body;
2326 } *cmd = container_of(header, typeof(*cmd), header);
2327 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2328 sizeof(SVGA3dShaderResourceViewId);
2329
2330 if ((u64) cmd->body.startView + (u64) num_sr_view >
2331 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2332 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2333 DRM_ERROR("Invalid shader binding.\n");
2334 return -EINVAL;
2335 }
2336
2337 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2338 vmw_ctx_binding_sr,
2339 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2340 (void *) &cmd[1], num_sr_view,
2341 cmd->body.startView);
2342}
2343
2344/**
2345 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2346 * command
2347 *
2348 * @dev_priv: Pointer to a device private struct.
2349 * @sw_context: The software context being used for this batch.
2350 * @header: Pointer to the command header in the command stream.
2351 */
2352static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2353 struct vmw_sw_context *sw_context,
2354 SVGA3dCmdHeader *header)
2355{
2356 struct {
2357 SVGA3dCmdHeader header;
2358 SVGA3dCmdDXSetShader body;
2359 } *cmd;
2360 struct vmw_resource *res = NULL;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002361 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002362 struct vmw_ctx_bindinfo_shader binding;
2363 int ret = 0;
2364
2365 if (unlikely(ctx_node == NULL)) {
2366 DRM_ERROR("DX Context not set.\n");
2367 return -EINVAL;
2368 }
2369
2370 cmd = container_of(header, typeof(*cmd), header);
2371
2372 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2373 DRM_ERROR("Illegal shader type %u.\n",
2374 (unsigned) cmd->body.type);
2375 return -EINVAL;
2376 }
2377
2378 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2379 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2380 if (IS_ERR(res)) {
2381 DRM_ERROR("Could not find shader for binding.\n");
2382 return PTR_ERR(res);
2383 }
2384
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002385 ret = vmw_resource_val_add(sw_context, res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002386 if (ret)
2387 goto out_unref;
2388 }
2389
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002390 binding.bi.ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002391 binding.bi.res = res;
2392 binding.bi.bt = vmw_ctx_binding_dx_shader;
2393 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2394
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002395 vmw_binding_add(ctx_node->staged, &binding.bi,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002396 binding.shader_slot, 0);
2397out_unref:
2398 if (res)
2399 vmw_resource_unreference(&res);
2400
2401 return ret;
2402}
2403
2404/**
2405 * vmw_cmd_dx_set_vertex_buffers - Validates an
2406 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2407 *
2408 * @dev_priv: Pointer to a device private struct.
2409 * @sw_context: The software context being used for this batch.
2410 * @header: Pointer to the command header in the command stream.
2411 */
2412static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2413 struct vmw_sw_context *sw_context,
2414 SVGA3dCmdHeader *header)
2415{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002416 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002417 struct vmw_ctx_bindinfo_vb binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002418 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002419 struct {
2420 SVGA3dCmdHeader header;
2421 SVGA3dCmdDXSetVertexBuffers body;
2422 SVGA3dVertexBuffer buf[];
2423 } *cmd;
2424 int i, ret, num;
2425
2426 if (unlikely(ctx_node == NULL)) {
2427 DRM_ERROR("DX Context not set.\n");
2428 return -EINVAL;
2429 }
2430
2431 cmd = container_of(header, typeof(*cmd), header);
2432 num = (cmd->header.size - sizeof(cmd->body)) /
2433 sizeof(SVGA3dVertexBuffer);
2434 if ((u64)num + (u64)cmd->body.startBuffer >
2435 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2436 DRM_ERROR("Invalid number of vertex buffers.\n");
2437 return -EINVAL;
2438 }
2439
2440 for (i = 0; i < num; i++) {
2441 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2442 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002443 &cmd->buf[i].sid, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002444 if (unlikely(ret != 0))
2445 return ret;
2446
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002447 binding.bi.ctx = ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002448 binding.bi.bt = vmw_ctx_binding_vb;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002449 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002450 binding.offset = cmd->buf[i].offset;
2451 binding.stride = cmd->buf[i].stride;
2452 binding.slot = i + cmd->body.startBuffer;
2453
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002454 vmw_binding_add(ctx_node->staged, &binding.bi,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002455 0, binding.slot);
2456 }
2457
2458 return 0;
2459}
2460
2461/**
2462 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
Brian Paul8bd62872017-07-17 07:36:10 -07002463 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002464 *
2465 * @dev_priv: Pointer to a device private struct.
2466 * @sw_context: The software context being used for this batch.
2467 * @header: Pointer to the command header in the command stream.
2468 */
2469static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2470 struct vmw_sw_context *sw_context,
2471 SVGA3dCmdHeader *header)
2472{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002473 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002474 struct vmw_ctx_bindinfo_ib binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002475 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002476 struct {
2477 SVGA3dCmdHeader header;
2478 SVGA3dCmdDXSetIndexBuffer body;
2479 } *cmd;
2480 int ret;
2481
2482 if (unlikely(ctx_node == NULL)) {
2483 DRM_ERROR("DX Context not set.\n");
2484 return -EINVAL;
2485 }
2486
2487 cmd = container_of(header, typeof(*cmd), header);
2488 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2489 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002490 &cmd->body.sid, &res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002491 if (unlikely(ret != 0))
2492 return ret;
2493
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002494 binding.bi.ctx = ctx_node->ctx;
2495 binding.bi.res = res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002496 binding.bi.bt = vmw_ctx_binding_ib;
2497 binding.offset = cmd->body.offset;
2498 binding.format = cmd->body.format;
2499
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002500 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002501
2502 return 0;
2503}
2504
2505/**
2506 * vmw_cmd_dx_set_rendertarget - Validate an
2507 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2508 *
2509 * @dev_priv: Pointer to a device private struct.
2510 * @sw_context: The software context being used for this batch.
2511 * @header: Pointer to the command header in the command stream.
2512 */
2513static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2514 struct vmw_sw_context *sw_context,
2515 SVGA3dCmdHeader *header)
2516{
2517 struct {
2518 SVGA3dCmdHeader header;
2519 SVGA3dCmdDXSetRenderTargets body;
2520 } *cmd = container_of(header, typeof(*cmd), header);
2521 int ret;
2522 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2523 sizeof(SVGA3dRenderTargetViewId);
2524
2525 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2526 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2527 return -EINVAL;
2528 }
2529
2530 ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2531 vmw_ctx_binding_ds, 0,
2532 &cmd->body.depthStencilViewId, 1, 0);
2533 if (ret)
2534 return ret;
2535
2536 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2537 vmw_ctx_binding_dx_rt, 0,
2538 (void *)&cmd[1], num_rt_view, 0);
2539}
2540
2541/**
2542 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2543 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2544 *
2545 * @dev_priv: Pointer to a device private struct.
2546 * @sw_context: The software context being used for this batch.
2547 * @header: Pointer to the command header in the command stream.
2548 */
2549static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2550 struct vmw_sw_context *sw_context,
2551 SVGA3dCmdHeader *header)
2552{
2553 struct {
2554 SVGA3dCmdHeader header;
2555 SVGA3dCmdDXClearRenderTargetView body;
2556 } *cmd = container_of(header, typeof(*cmd), header);
2557
2558 return vmw_view_id_val_add(sw_context, vmw_view_rt,
2559 cmd->body.renderTargetViewId);
2560}
2561
2562/**
2563 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2564 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2565 *
2566 * @dev_priv: Pointer to a device private struct.
2567 * @sw_context: The software context being used for this batch.
2568 * @header: Pointer to the command header in the command stream.
2569 */
2570static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2571 struct vmw_sw_context *sw_context,
2572 SVGA3dCmdHeader *header)
2573{
2574 struct {
2575 SVGA3dCmdHeader header;
2576 SVGA3dCmdDXClearDepthStencilView body;
2577 } *cmd = container_of(header, typeof(*cmd), header);
2578
2579 return vmw_view_id_val_add(sw_context, vmw_view_ds,
2580 cmd->body.depthStencilViewId);
2581}
2582
2583static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2584 struct vmw_sw_context *sw_context,
2585 SVGA3dCmdHeader *header)
2586{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002587 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2588 struct vmw_resource *srf;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002589 struct vmw_resource *res;
2590 enum vmw_view_type view_type;
2591 int ret;
2592 /*
2593 * This is based on the fact that all affected define commands have
2594 * the same initial command body layout.
2595 */
2596 struct {
2597 SVGA3dCmdHeader header;
2598 uint32 defined_id;
2599 uint32 sid;
2600 } *cmd;
2601
2602 if (unlikely(ctx_node == NULL)) {
2603 DRM_ERROR("DX Context not set.\n");
2604 return -EINVAL;
2605 }
2606
2607 view_type = vmw_view_cmd_to_type(header->id);
Dan Carpenter0d9cac02018-01-10 12:40:04 +03002608 if (view_type == vmw_view_max)
2609 return -EINVAL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002610 cmd = container_of(header, typeof(*cmd), header);
2611 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2612 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002613 &cmd->sid, &srf);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002614 if (unlikely(ret != 0))
2615 return ret;
2616
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002617 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002618 ret = vmw_cotable_notify(res, cmd->defined_id);
2619 vmw_resource_unreference(&res);
2620 if (unlikely(ret != 0))
2621 return ret;
2622
2623 return vmw_view_add(sw_context->man,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002624 ctx_node->ctx,
2625 srf,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002626 view_type,
2627 cmd->defined_id,
2628 header,
2629 header->size + sizeof(*header),
2630 &sw_context->staged_cmd_res);
2631}
2632
Charmaine Lee2f633e52015-08-10 10:45:11 -07002633/**
2634 * vmw_cmd_dx_set_so_targets - Validate an
2635 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2636 *
2637 * @dev_priv: Pointer to a device private struct.
2638 * @sw_context: The software context being used for this batch.
2639 * @header: Pointer to the command header in the command stream.
2640 */
2641static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2642 struct vmw_sw_context *sw_context,
2643 SVGA3dCmdHeader *header)
2644{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002645 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
Charmaine Lee2f633e52015-08-10 10:45:11 -07002646 struct vmw_ctx_bindinfo_so binding;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002647 struct vmw_resource *res;
Charmaine Lee2f633e52015-08-10 10:45:11 -07002648 struct {
2649 SVGA3dCmdHeader header;
2650 SVGA3dCmdDXSetSOTargets body;
2651 SVGA3dSoTarget targets[];
2652 } *cmd;
2653 int i, ret, num;
2654
2655 if (unlikely(ctx_node == NULL)) {
2656 DRM_ERROR("DX Context not set.\n");
2657 return -EINVAL;
2658 }
2659
2660 cmd = container_of(header, typeof(*cmd), header);
2661 num = (cmd->header.size - sizeof(cmd->body)) /
2662 sizeof(SVGA3dSoTarget);
2663
2664 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2665 DRM_ERROR("Invalid DX SO binding.\n");
2666 return -EINVAL;
2667 }
2668
2669 for (i = 0; i < num; i++) {
2670 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2671 user_surface_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002672 &cmd->targets[i].sid, &res);
Charmaine Lee2f633e52015-08-10 10:45:11 -07002673 if (unlikely(ret != 0))
2674 return ret;
2675
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002676 binding.bi.ctx = ctx_node->ctx;
2677 binding.bi.res = res;
Charmaine Lee2f633e52015-08-10 10:45:11 -07002678 binding.bi.bt = vmw_ctx_binding_so,
2679 binding.offset = cmd->targets[i].offset;
2680 binding.size = cmd->targets[i].sizeInBytes;
2681 binding.slot = i;
2682
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002683 vmw_binding_add(ctx_node->staged, &binding.bi,
Charmaine Lee2f633e52015-08-10 10:45:11 -07002684 0, binding.slot);
2685 }
2686
2687 return 0;
2688}
2689
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002690static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2691 struct vmw_sw_context *sw_context,
2692 SVGA3dCmdHeader *header)
2693{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002694 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002695 struct vmw_resource *res;
2696 /*
2697 * This is based on the fact that all affected define commands have
2698 * the same initial command body layout.
2699 */
2700 struct {
2701 SVGA3dCmdHeader header;
2702 uint32 defined_id;
2703 } *cmd;
2704 enum vmw_so_type so_type;
2705 int ret;
2706
2707 if (unlikely(ctx_node == NULL)) {
2708 DRM_ERROR("DX Context not set.\n");
2709 return -EINVAL;
2710 }
2711
2712 so_type = vmw_so_cmd_to_type(header->id);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002713 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002714 cmd = container_of(header, typeof(*cmd), header);
2715 ret = vmw_cotable_notify(res, cmd->defined_id);
2716 vmw_resource_unreference(&res);
2717
2718 return ret;
2719}
2720
2721/**
2722 * vmw_cmd_dx_check_subresource - Validate an
2723 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2724 *
2725 * @dev_priv: Pointer to a device private struct.
2726 * @sw_context: The software context being used for this batch.
2727 * @header: Pointer to the command header in the command stream.
2728 */
2729static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2730 struct vmw_sw_context *sw_context,
2731 SVGA3dCmdHeader *header)
2732{
2733 struct {
2734 SVGA3dCmdHeader header;
2735 union {
2736 SVGA3dCmdDXReadbackSubResource r_body;
2737 SVGA3dCmdDXInvalidateSubResource i_body;
2738 SVGA3dCmdDXUpdateSubResource u_body;
2739 SVGA3dSurfaceId sid;
2740 };
2741 } *cmd;
2742
2743 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2744 offsetof(typeof(*cmd), sid));
2745 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2746 offsetof(typeof(*cmd), sid));
2747 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2748 offsetof(typeof(*cmd), sid));
2749
2750 cmd = container_of(header, typeof(*cmd), header);
2751
2752 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2753 user_surface_converter,
2754 &cmd->sid, NULL);
2755}
2756
2757static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2758 struct vmw_sw_context *sw_context,
2759 SVGA3dCmdHeader *header)
2760{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002761 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002762
2763 if (unlikely(ctx_node == NULL)) {
2764 DRM_ERROR("DX Context not set.\n");
2765 return -EINVAL;
2766 }
2767
2768 return 0;
2769}
2770
2771/**
2772 * vmw_cmd_dx_view_remove - validate a view remove command and
2773 * schedule the view resource for removal.
2774 *
2775 * @dev_priv: Pointer to a device private struct.
2776 * @sw_context: The software context being used for this batch.
2777 * @header: Pointer to the command header in the command stream.
2778 *
2779 * Check that the view exists, and if it was not created using this
Thomas Hellstroma1944032016-10-10 11:06:45 -07002780 * command batch, conditionally make this command a NOP.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002781 */
2782static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2783 struct vmw_sw_context *sw_context,
2784 SVGA3dCmdHeader *header)
2785{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002786 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002787 struct {
2788 SVGA3dCmdHeader header;
2789 union vmw_view_destroy body;
2790 } *cmd = container_of(header, typeof(*cmd), header);
2791 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2792 struct vmw_resource *view;
2793 int ret;
2794
2795 if (!ctx_node) {
2796 DRM_ERROR("DX Context not set.\n");
2797 return -EINVAL;
2798 }
2799
2800 ret = vmw_view_remove(sw_context->man,
2801 cmd->body.view_id, view_type,
2802 &sw_context->staged_cmd_res,
2803 &view);
2804 if (ret || !view)
2805 return ret;
2806
2807 /*
Thomas Hellstroma1944032016-10-10 11:06:45 -07002808 * If the view wasn't created during this command batch, it might
2809 * have been removed due to a context swapout, so add a
2810 * relocation to conditionally make this command a NOP to avoid
2811 * device errors.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002812 */
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02002813 return vmw_resource_relocation_add(sw_context,
Thomas Hellstroma1944032016-10-10 11:06:45 -07002814 view,
2815 vmw_ptr_diff(sw_context->buf_start,
2816 &cmd->header.id),
2817 vmw_res_rel_cond_nop);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002818}
2819
2820/**
2821 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2822 * command
2823 *
2824 * @dev_priv: Pointer to a device private struct.
2825 * @sw_context: The software context being used for this batch.
2826 * @header: Pointer to the command header in the command stream.
2827 */
2828static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2829 struct vmw_sw_context *sw_context,
2830 SVGA3dCmdHeader *header)
2831{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002832 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002833 struct vmw_resource *res;
2834 struct {
2835 SVGA3dCmdHeader header;
2836 SVGA3dCmdDXDefineShader body;
2837 } *cmd = container_of(header, typeof(*cmd), header);
2838 int ret;
2839
2840 if (!ctx_node) {
2841 DRM_ERROR("DX Context not set.\n");
2842 return -EINVAL;
2843 }
2844
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002845 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002846 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2847 vmw_resource_unreference(&res);
2848 if (ret)
2849 return ret;
2850
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002851 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002852 cmd->body.shaderId, cmd->body.type,
2853 &sw_context->staged_cmd_res);
2854}
2855
2856/**
2857 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2858 * command
2859 *
2860 * @dev_priv: Pointer to a device private struct.
2861 * @sw_context: The software context being used for this batch.
2862 * @header: Pointer to the command header in the command stream.
2863 */
2864static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2865 struct vmw_sw_context *sw_context,
2866 SVGA3dCmdHeader *header)
2867{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002868 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002869 struct {
2870 SVGA3dCmdHeader header;
2871 SVGA3dCmdDXDestroyShader body;
2872 } *cmd = container_of(header, typeof(*cmd), header);
2873 int ret;
2874
2875 if (!ctx_node) {
2876 DRM_ERROR("DX Context not set.\n");
2877 return -EINVAL;
2878 }
2879
2880 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2881 &sw_context->staged_cmd_res);
2882 if (ret)
2883 DRM_ERROR("Could not find shader to remove.\n");
2884
2885 return ret;
2886}
2887
2888/**
2889 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2890 * command
2891 *
2892 * @dev_priv: Pointer to a device private struct.
2893 * @sw_context: The software context being used for this batch.
2894 * @header: Pointer to the command header in the command stream.
2895 */
2896static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2897 struct vmw_sw_context *sw_context,
2898 SVGA3dCmdHeader *header)
2899{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002900 struct vmw_resource *ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002901 struct vmw_resource *res;
2902 struct {
2903 SVGA3dCmdHeader header;
2904 SVGA3dCmdDXBindShader body;
2905 } *cmd = container_of(header, typeof(*cmd), header);
2906 int ret;
2907
2908 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2909 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2910 user_context_converter,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002911 &cmd->body.cid, &ctx);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002912 if (ret)
2913 return ret;
2914 } else {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002915 if (!sw_context->dx_ctx_node) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002916 DRM_ERROR("DX Context not set.\n");
2917 return -EINVAL;
2918 }
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002919 ctx = sw_context->dx_ctx_node->ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002920 }
2921
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002922 res = vmw_shader_lookup(vmw_context_res_man(ctx),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002923 cmd->body.shid, 0);
2924 if (IS_ERR(res)) {
2925 DRM_ERROR("Could not find shader to bind.\n");
2926 return PTR_ERR(res);
2927 }
2928
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002929 ret = vmw_resource_val_add(sw_context, res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002930 if (ret) {
2931 DRM_ERROR("Error creating resource validation node.\n");
2932 goto out_unref;
2933 }
2934
2935
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02002936 ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002937 &cmd->body.mobid,
2938 cmd->body.offsetInBytes);
2939out_unref:
2940 vmw_resource_unreference(&res);
2941
2942 return ret;
2943}
2944
Charmaine Leef3b335502016-02-12 08:11:56 +01002945/**
2946 * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
2947 *
2948 * @dev_priv: Pointer to a device private struct.
2949 * @sw_context: The software context being used for this batch.
2950 * @header: Pointer to the command header in the command stream.
2951 */
2952static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2953 struct vmw_sw_context *sw_context,
2954 SVGA3dCmdHeader *header)
2955{
2956 struct {
2957 SVGA3dCmdHeader header;
2958 SVGA3dCmdDXGenMips body;
2959 } *cmd = container_of(header, typeof(*cmd), header);
2960
2961 return vmw_view_id_val_add(sw_context, vmw_view_sr,
2962 cmd->body.shaderResourceViewId);
2963}
2964
Charmaine Lee1f982e42016-10-10 10:37:03 -07002965/**
2966 * vmw_cmd_dx_transfer_from_buffer -
2967 * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2968 *
2969 * @dev_priv: Pointer to a device private struct.
2970 * @sw_context: The software context being used for this batch.
2971 * @header: Pointer to the command header in the command stream.
2972 */
2973static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2974 struct vmw_sw_context *sw_context,
2975 SVGA3dCmdHeader *header)
2976{
2977 struct {
2978 SVGA3dCmdHeader header;
2979 SVGA3dCmdDXTransferFromBuffer body;
2980 } *cmd = container_of(header, typeof(*cmd), header);
2981 int ret;
2982
2983 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2984 user_surface_converter,
2985 &cmd->body.srcSid, NULL);
2986 if (ret != 0)
2987 return ret;
2988
2989 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2990 user_surface_converter,
2991 &cmd->body.destSid, NULL);
2992}
2993
Neha Bhende0d81d342018-06-18 17:14:56 -07002994/**
2995 * vmw_cmd_intra_surface_copy -
2996 * Validate an SVGA_3D_CMD_INTRA_SURFACE_COPY command
2997 *
2998 * @dev_priv: Pointer to a device private struct.
2999 * @sw_context: The software context being used for this batch.
3000 * @header: Pointer to the command header in the command stream.
3001 */
3002static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
3003 struct vmw_sw_context *sw_context,
3004 SVGA3dCmdHeader *header)
3005{
3006 struct {
3007 SVGA3dCmdHeader header;
3008 SVGA3dCmdIntraSurfaceCopy body;
3009 } *cmd = container_of(header, typeof(*cmd), header);
3010
3011 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
3012 return -EINVAL;
3013
3014 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3015 user_surface_converter,
3016 &cmd->body.surface.sid, NULL);
3017}
3018
3019
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003020static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3021 struct vmw_sw_context *sw_context,
3022 void *buf, uint32_t *size)
3023{
3024 uint32_t size_remaining = *size;
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003025 uint32_t cmd_id;
3026
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003027 cmd_id = ((uint32_t *)buf)[0];
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003028 switch (cmd_id) {
3029 case SVGA_CMD_UPDATE:
3030 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003031 break;
3032 case SVGA_CMD_DEFINE_GMRFB:
3033 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3034 break;
3035 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3036 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3037 break;
3038 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3039 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3040 break;
3041 default:
3042 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3043 return -EINVAL;
3044 }
3045
3046 if (*size > size_remaining) {
3047 DRM_ERROR("Invalid SVGA command (size mismatch):"
3048 " %u.\n", cmd_id);
3049 return -EINVAL;
3050 }
3051
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02003052 if (unlikely(!sw_context->kernel)) {
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003053 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3054 return -EPERM;
3055 }
3056
3057 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3058 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3059
3060 return 0;
3061}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003062
Thomas Hellstrom4fbd9d22014-02-12 12:37:01 +01003063static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003064 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3065 false, false, false),
3066 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3067 false, false, false),
3068 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3069 true, false, false),
3070 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3071 true, false, false),
3072 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3073 true, false, false),
3074 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3075 false, false, false),
3076 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3077 false, false, false),
3078 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3079 true, false, false),
3080 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3081 true, false, false),
3082 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3083 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003084 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003085 &vmw_cmd_set_render_target_check, true, false, false),
3086 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3087 true, false, false),
3088 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3089 true, false, false),
3090 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3091 true, false, false),
3092 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3093 true, false, false),
3094 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3095 true, false, false),
3096 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3097 true, false, false),
3098 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3099 true, false, false),
3100 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3101 false, false, false),
Thomas Hellstromd5bde952014-01-31 10:12:10 +01003102 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3103 true, false, false),
3104 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3105 true, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003106 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3107 true, false, false),
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01003108 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3109 true, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003110 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3111 true, false, false),
3112 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3113 true, false, false),
3114 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3115 true, false, false),
3116 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3117 true, false, false),
3118 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3119 true, false, false),
3120 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3121 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003122 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003123 &vmw_cmd_blt_surf_screen_check, false, false, false),
3124 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3125 false, false, false),
3126 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3127 false, false, false),
3128 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3129 false, false, false),
3130 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3131 false, false, false),
3132 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3133 false, false, false),
Deepak Rawatdc75e732018-06-13 13:53:28 -07003134 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003135 false, false, false),
Deepak Rawatdc75e732018-06-13 13:53:28 -07003136 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003137 false, false, false),
3138 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3139 false, false, false),
3140 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3141 false, false, false),
3142 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3143 false, false, false),
3144 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3145 false, false, false),
3146 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3147 false, false, false),
3148 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3149 false, false, false),
3150 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3151 false, false, true),
3152 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3153 false, false, true),
3154 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3155 false, false, true),
3156 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3157 false, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003158 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3159 false, false, true),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003160 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3161 false, false, true),
3162 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3163 false, false, true),
3164 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3165 false, false, true),
3166 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3167 true, false, true),
3168 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3169 false, false, true),
3170 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3171 true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003172 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003173 &vmw_cmd_update_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003174 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003175 &vmw_cmd_readback_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003176 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003177 &vmw_cmd_readback_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003178 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003179 &vmw_cmd_invalidate_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003180 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003181 &vmw_cmd_invalidate_gb_surface, true, false, true),
3182 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3183 false, false, true),
3184 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3185 false, false, true),
3186 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3187 false, false, true),
3188 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3189 false, false, true),
3190 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3191 false, false, true),
3192 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3193 false, false, true),
3194 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3195 true, false, true),
3196 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3197 false, false, true),
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +01003198 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
Thomas Hellstrom8ba07312013-10-08 02:25:35 -07003199 false, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003200 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3201 true, false, true),
3202 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3203 true, false, true),
3204 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3205 true, false, true),
3206 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3207 true, false, true),
Thomas Hellstrom5f55be5f2017-08-24 08:06:30 +02003208 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3209 true, false, true),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003210 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3211 false, false, true),
3212 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3213 false, false, true),
3214 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3215 false, false, true),
3216 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3217 false, false, true),
3218 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3219 false, false, true),
3220 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3221 false, false, true),
3222 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3223 false, false, true),
3224 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3225 false, false, true),
3226 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3227 false, false, true),
3228 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3229 false, false, true),
3230 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003231 true, false, true),
3232 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3233 false, false, true),
3234 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3235 false, false, true),
3236 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3237 false, false, true),
3238 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3239 false, false, true),
3240
3241 /*
3242 * DX commands
3243 */
3244 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3245 false, false, true),
3246 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3247 false, false, true),
3248 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3249 false, false, true),
3250 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3251 false, false, true),
3252 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3253 false, false, true),
3254 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3255 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3256 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3257 &vmw_cmd_dx_set_shader_res, true, false, true),
3258 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3259 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003260 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003261 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003262 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003263 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003264 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3265 true, false, true),
3266 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3267 true, false, true),
3268 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3269 &vmw_cmd_dx_cid_check, true, false, true),
3270 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003271 true, false, true),
3272 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3273 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3274 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3275 &vmw_cmd_dx_set_index_buffer, true, false, true),
3276 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3277 &vmw_cmd_dx_set_rendertargets, true, false, true),
3278 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3279 true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003280 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
Charmaine Lee2f633e52015-08-10 10:45:11 -07003281 &vmw_cmd_dx_cid_check, true, false, true),
3282 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3283 &vmw_cmd_dx_cid_check, true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003284 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003285 true, false, true),
Charmaine Leee02e5882016-04-12 08:19:08 -07003286 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003287 true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003288 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003289 true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003290 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
Charmaine Leee02e5882016-04-12 08:19:08 -07003291 &vmw_cmd_dx_cid_check, true, false, true),
3292 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003293 true, false, true),
Charmaine Leee02e5882016-04-12 08:19:08 -07003294 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003295 true, false, true),
3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3297 true, false, true),
Charmaine Lee18835982016-04-12 08:14:23 -07003298 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003299 true, false, true),
3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3301 true, false, true),
3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3303 true, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3305 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3307 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003308 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3309 true, false, true),
Charmaine Leef3b335502016-02-12 08:11:56 +01003310 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003311 true, false, true),
3312 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3313 &vmw_cmd_dx_check_subresource, true, false, true),
3314 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3315 &vmw_cmd_dx_check_subresource, true, false, true),
3316 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3317 &vmw_cmd_dx_check_subresource, true, false, true),
3318 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3319 &vmw_cmd_dx_view_define, true, false, true),
3320 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3321 &vmw_cmd_dx_view_remove, true, false, true),
3322 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3323 &vmw_cmd_dx_view_define, true, false, true),
3324 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3325 &vmw_cmd_dx_view_remove, true, false, true),
3326 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3327 &vmw_cmd_dx_view_define, true, false, true),
3328 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3329 &vmw_cmd_dx_view_remove, true, false, true),
3330 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3331 &vmw_cmd_dx_so_define, true, false, true),
3332 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3333 &vmw_cmd_dx_cid_check, true, false, true),
3334 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3335 &vmw_cmd_dx_so_define, true, false, true),
3336 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3337 &vmw_cmd_dx_cid_check, true, false, true),
3338 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3339 &vmw_cmd_dx_so_define, true, false, true),
3340 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3341 &vmw_cmd_dx_cid_check, true, false, true),
3342 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3343 &vmw_cmd_dx_so_define, true, false, true),
3344 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3345 &vmw_cmd_dx_cid_check, true, false, true),
3346 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3347 &vmw_cmd_dx_so_define, true, false, true),
3348 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3349 &vmw_cmd_dx_cid_check, true, false, true),
3350 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3351 &vmw_cmd_dx_define_shader, true, false, true),
3352 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3353 &vmw_cmd_dx_destroy_shader, true, false, true),
3354 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3355 &vmw_cmd_dx_bind_shader, true, false, true),
3356 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3357 &vmw_cmd_dx_so_define, true, false, true),
3358 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3359 &vmw_cmd_dx_cid_check, true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003360 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003361 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003362 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3363 &vmw_cmd_dx_set_so_targets, true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003364 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3365 &vmw_cmd_dx_cid_check, true, false, true),
3366 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3367 &vmw_cmd_dx_cid_check, true, false, true),
Neha Bhende0fca749e2015-08-10 10:51:07 -07003368 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3369 &vmw_cmd_buffer_copy_check, true, false, true),
3370 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3371 &vmw_cmd_pred_copy_check, true, false, true),
Charmaine Lee1f982e42016-10-10 10:37:03 -07003372 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3373 &vmw_cmd_dx_transfer_from_buffer,
3374 true, false, true),
Neha Bhende0d81d342018-06-18 17:14:56 -07003375 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3376 true, false, true),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003377};
3378
Thomas Hellstrom65b97a22017-08-24 08:06:29 +02003379bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3380{
3381 u32 cmd_id = ((u32 *) buf)[0];
3382
3383 if (cmd_id >= SVGA_CMD_MAX) {
3384 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3385 const struct vmw_cmd_entry *entry;
3386
3387 *size = header->size + sizeof(SVGA3dCmdHeader);
3388 cmd_id = header->id;
3389 if (cmd_id >= SVGA_3D_CMD_MAX)
3390 return false;
3391
3392 cmd_id -= SVGA_3D_CMD_BASE;
3393 entry = &vmw_cmd_entries[cmd_id];
3394 *cmd = entry->cmd_name;
3395 return true;
3396 }
3397
3398 switch (cmd_id) {
3399 case SVGA_CMD_UPDATE:
3400 *cmd = "SVGA_CMD_UPDATE";
3401 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3402 break;
3403 case SVGA_CMD_DEFINE_GMRFB:
3404 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3405 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3406 break;
3407 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3408 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3409 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3410 break;
3411 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3412 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3413 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3414 break;
3415 default:
3416 *cmd = "UNKNOWN";
3417 *size = 0;
3418 return false;
3419 }
3420
3421 return true;
3422}
3423
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003424static int vmw_cmd_check(struct vmw_private *dev_priv,
3425 struct vmw_sw_context *sw_context,
3426 void *buf, uint32_t *size)
3427{
3428 uint32_t cmd_id;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003429 uint32_t size_remaining = *size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003430 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3431 int ret;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003432 const struct vmw_cmd_entry *entry;
3433 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003434
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003435 cmd_id = ((uint32_t *)buf)[0];
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003436 /* Handle any none 3D commands */
3437 if (unlikely(cmd_id < SVGA_CMD_MAX))
3438 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3439
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003440
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003441 cmd_id = header->id;
3442 *size = header->size + sizeof(SVGA3dCmdHeader);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003443
3444 cmd_id -= SVGA_3D_CMD_BASE;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003445 if (unlikely(*size > size_remaining))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003446 goto out_invalid;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003447
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003448 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003449 goto out_invalid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003450
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003451 entry = &vmw_cmd_entries[cmd_id];
Thomas Hellstrom36e952c2014-02-12 13:19:36 +01003452 if (unlikely(!entry->func))
3453 goto out_invalid;
3454
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003455 if (unlikely(!entry->user_allow && !sw_context->kernel))
3456 goto out_privileged;
3457
3458 if (unlikely(entry->gb_disable && gb))
3459 goto out_old;
3460
3461 if (unlikely(entry->gb_enable && !gb))
3462 goto out_new;
3463
3464 ret = entry->func(dev_priv, sw_context, header);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003465 if (unlikely(ret != 0))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003466 goto out_invalid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003467
3468 return 0;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003469out_invalid:
3470 DRM_ERROR("Invalid SVGA3D command: %d\n",
3471 cmd_id + SVGA_3D_CMD_BASE);
3472 return -EINVAL;
3473out_privileged:
3474 DRM_ERROR("Privileged SVGA3D command: %d\n",
3475 cmd_id + SVGA_3D_CMD_BASE);
3476 return -EPERM;
3477out_old:
3478 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3479 cmd_id + SVGA_3D_CMD_BASE);
3480 return -EINVAL;
3481out_new:
3482 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003483 cmd_id + SVGA_3D_CMD_BASE);
3484 return -EINVAL;
3485}
3486
3487static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3488 struct vmw_sw_context *sw_context,
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003489 void *buf,
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003490 uint32_t size)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003491{
3492 int32_t cur_size = size;
3493 int ret;
3494
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003495 sw_context->buf_start = buf;
3496
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003497 while (cur_size > 0) {
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003498 size = cur_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003499 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3500 if (unlikely(ret != 0))
3501 return ret;
3502 buf = (void *)((unsigned long) buf + size);
3503 cur_size -= size;
3504 }
3505
3506 if (unlikely(cur_size != 0)) {
3507 DRM_ERROR("Command verifier out of sync.\n");
3508 return -EINVAL;
3509 }
3510
3511 return 0;
3512}
3513
3514static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3515{
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02003516 /* Memory is validation context memory, so no need to free it */
3517
3518 INIT_LIST_HEAD(&sw_context->bo_relocations);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003519}
3520
3521static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3522{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003523 struct vmw_relocation *reloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003524 struct ttm_buffer_object *bo;
3525
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02003526 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003527 bo = &reloc->vbo->base;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003528 switch (bo->mem.mem_type) {
3529 case TTM_PL_VRAM:
Thomas Hellstrom135cba02010-10-26 21:21:47 +02003530 reloc->location->offset += bo->offset;
3531 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003532 break;
3533 case VMW_PL_GMR:
Thomas Hellstrom135cba02010-10-26 21:21:47 +02003534 reloc->location->gmrId = bo->mem.start;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003535 break;
Thomas Hellstromddcda242012-11-21 11:26:55 +01003536 case VMW_PL_MOB:
3537 *reloc->mob_loc = bo->mem.start;
3538 break;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003539 default:
3540 BUG();
3541 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003542 }
3543 vmw_free_relocations(sw_context);
3544}
3545
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003546static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3547 uint32_t size)
3548{
3549 if (likely(sw_context->cmd_bounce_size >= size))
3550 return 0;
3551
3552 if (sw_context->cmd_bounce_size == 0)
3553 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3554
3555 while (sw_context->cmd_bounce_size < size) {
3556 sw_context->cmd_bounce_size =
3557 PAGE_ALIGN(sw_context->cmd_bounce_size +
3558 (sw_context->cmd_bounce_size >> 1));
3559 }
3560
Markus Elfring0bc32992016-07-22 13:31:00 +02003561 vfree(sw_context->cmd_bounce);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003562 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3563
3564 if (sw_context->cmd_bounce == NULL) {
3565 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3566 sw_context->cmd_bounce_size = 0;
3567 return -ENOMEM;
3568 }
3569
3570 return 0;
3571}
3572
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003573/**
3574 * vmw_execbuf_fence_commands - create and submit a command stream fence
3575 *
3576 * Creates a fence object and submits a command stream marker.
3577 * If this fails for some reason, We sync the fifo and return NULL.
3578 * It is then safe to fence buffers with a NULL pointer.
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02003579 *
3580 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3581 * a userspace handle if @p_handle is not NULL, otherwise not.
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003582 */
3583
3584int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3585 struct vmw_private *dev_priv,
3586 struct vmw_fence_obj **p_fence,
3587 uint32_t *p_handle)
3588{
3589 uint32_t sequence;
3590 int ret;
3591 bool synced = false;
3592
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02003593 /* p_handle implies file_priv. */
3594 BUG_ON(p_handle != NULL && file_priv == NULL);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003595
3596 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3597 if (unlikely(ret != 0)) {
3598 DRM_ERROR("Fence submission error. Syncing.\n");
3599 synced = true;
3600 }
3601
3602 if (p_handle != NULL)
3603 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003604 sequence, p_fence, p_handle);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003605 else
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003606 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003607
3608 if (unlikely(ret != 0 && !synced)) {
3609 (void) vmw_fallback_wait(dev_priv, false, false,
3610 sequence, false,
3611 VMW_FENCE_WAIT_TIMEOUT);
3612 *p_fence = NULL;
3613 }
3614
3615 return 0;
3616}
3617
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003618/**
3619 * vmw_execbuf_copy_fence_user - copy fence object information to
3620 * user-space.
3621 *
3622 * @dev_priv: Pointer to a vmw_private struct.
3623 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3624 * @ret: Return value from fence object creation.
3625 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3626 * which the information should be copied.
3627 * @fence: Pointer to the fenc object.
3628 * @fence_handle: User-space fence handle.
Sinclair Yehc906965d2017-07-05 01:49:32 -07003629 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3630 * @sync_file: Only used to clean up in case of an error in this function.
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003631 *
3632 * This function copies fence information to user-space. If copying fails,
3633 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3634 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3635 * the error will hopefully be detected.
3636 * Also if copying fails, user-space will be unable to signal the fence
3637 * object so we wait for it immediately, and then unreference the
3638 * user-space reference.
3639 */
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +02003640void
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003641vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3642 struct vmw_fpriv *vmw_fp,
3643 int ret,
3644 struct drm_vmw_fence_rep __user *user_fence_rep,
3645 struct vmw_fence_obj *fence,
Sinclair Yehc906965d2017-07-05 01:49:32 -07003646 uint32_t fence_handle,
3647 int32_t out_fence_fd,
3648 struct sync_file *sync_file)
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003649{
3650 struct drm_vmw_fence_rep fence_rep;
3651
3652 if (user_fence_rep == NULL)
3653 return;
3654
Dan Carpenter80d9b242011-10-18 09:10:12 +03003655 memset(&fence_rep, 0, sizeof(fence_rep));
3656
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003657 fence_rep.error = ret;
Sinclair Yehc906965d2017-07-05 01:49:32 -07003658 fence_rep.fd = out_fence_fd;
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003659 if (ret == 0) {
3660 BUG_ON(fence == NULL);
3661
3662 fence_rep.handle = fence_handle;
Maarten Lankhorst2298e802014-03-26 14:07:44 +01003663 fence_rep.seqno = fence->base.seqno;
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003664 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3665 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3666 }
3667
3668 /*
3669 * copy_to_user errors will be detected by user space not
3670 * seeing fence_rep::error filled in. Typically
3671 * user-space would have pre-set that member to -EFAULT.
3672 */
3673 ret = copy_to_user(user_fence_rep, &fence_rep,
3674 sizeof(fence_rep));
3675
3676 /*
3677 * User-space lost the fence object. We need to sync
3678 * and unreference the handle.
3679 */
3680 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
Sinclair Yehc906965d2017-07-05 01:49:32 -07003681 if (sync_file)
3682 fput(sync_file->file);
3683
3684 if (fence_rep.fd != -1) {
3685 put_unused_fd(fence_rep.fd);
3686 fence_rep.fd = -1;
3687 }
3688
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003689 ttm_ref_object_base_unref(vmw_fp->tfile,
3690 fence_handle, TTM_REF_USAGE);
3691 DRM_ERROR("Fence copy error. Syncing.\n");
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003692 (void) vmw_fence_obj_wait(fence, false, false,
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003693 VMW_FENCE_WAIT_TIMEOUT);
3694 }
3695}
3696
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003697/**
3698 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3699 * the fifo.
3700 *
3701 * @dev_priv: Pointer to a device private structure.
3702 * @kernel_commands: Pointer to the unpatched command batch.
3703 * @command_size: Size of the unpatched command batch.
3704 * @sw_context: Structure holding the relocation lists.
3705 *
3706 * Side effects: If this function returns 0, then the command batch
3707 * pointed to by @kernel_commands will have been modified.
3708 */
3709static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3710 void *kernel_commands,
3711 u32 command_size,
3712 struct vmw_sw_context *sw_context)
3713{
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003714 void *cmd;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003715
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003716 if (sw_context->dx_ctx_node)
3717 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003718 sw_context->dx_ctx_node->ctx->id);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003719 else
3720 cmd = vmw_fifo_reserve(dev_priv, command_size);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003721 if (!cmd) {
3722 DRM_ERROR("Failed reserving fifo space for commands.\n");
3723 return -ENOMEM;
3724 }
3725
3726 vmw_apply_relocations(sw_context);
3727 memcpy(cmd, kernel_commands, command_size);
3728 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3729 vmw_resource_relocations_free(&sw_context->res_relocations);
3730 vmw_fifo_commit(dev_priv, command_size);
3731
3732 return 0;
3733}
3734
3735/**
3736 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3737 * the command buffer manager.
3738 *
3739 * @dev_priv: Pointer to a device private structure.
3740 * @header: Opaque handle to the command buffer allocation.
3741 * @command_size: Size of the unpatched command batch.
3742 * @sw_context: Structure holding the relocation lists.
3743 *
3744 * Side effects: If this function returns 0, then the command buffer
3745 * represented by @header will have been modified.
3746 */
3747static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3748 struct vmw_cmdbuf_header *header,
3749 u32 command_size,
3750 struct vmw_sw_context *sw_context)
3751{
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003752 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003753 SVGA3D_INVALID_ID);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003754 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003755 id, false, header);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003756
3757 vmw_apply_relocations(sw_context);
3758 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3759 vmw_resource_relocations_free(&sw_context->res_relocations);
3760 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3761
3762 return 0;
3763}
3764
3765/**
3766 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3767 * submission using a command buffer.
3768 *
3769 * @dev_priv: Pointer to a device private structure.
3770 * @user_commands: User-space pointer to the commands to be submitted.
3771 * @command_size: Size of the unpatched command batch.
3772 * @header: Out parameter returning the opaque pointer to the command buffer.
3773 *
3774 * This function checks whether we can use the command buffer manager for
3775 * submission and if so, creates a command buffer of suitable size and
3776 * copies the user data into that buffer.
3777 *
3778 * On successful return, the function returns a pointer to the data in the
3779 * command buffer and *@header is set to non-NULL.
3780 * If command buffers could not be used, the function will return the value
3781 * of @kernel_commands on function call. That value may be NULL. In that case,
3782 * the value of *@header will be set to NULL.
3783 * If an error is encountered, the function will return a pointer error value.
3784 * If the function is interrupted by a signal while sleeping, it will return
3785 * -ERESTARTSYS casted to a pointer error value.
3786 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003787static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3788 void __user *user_commands,
3789 void *kernel_commands,
3790 u32 command_size,
3791 struct vmw_cmdbuf_header **header)
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003792{
3793 size_t cmdbuf_size;
3794 int ret;
3795
3796 *header = NULL;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003797 if (command_size > SVGA_CB_MAX_SIZE) {
3798 DRM_ERROR("Command buffer is too large.\n");
3799 return ERR_PTR(-EINVAL);
3800 }
3801
Thomas Hellstrom51ab70b2016-10-10 10:51:24 -07003802 if (!dev_priv->cman || kernel_commands)
3803 return kernel_commands;
3804
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003805 /* If possible, add a little space for fencing. */
3806 cmdbuf_size = command_size + 512;
3807 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3808 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3809 true, header);
3810 if (IS_ERR(kernel_commands))
3811 return kernel_commands;
3812
3813 ret = copy_from_user(kernel_commands, user_commands,
3814 command_size);
3815 if (ret) {
3816 DRM_ERROR("Failed copying commands.\n");
3817 vmw_cmdbuf_header_free(*header);
3818 *header = NULL;
3819 return ERR_PTR(-EFAULT);
3820 }
3821
3822 return kernel_commands;
3823}
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003824
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003825static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3826 struct vmw_sw_context *sw_context,
3827 uint32_t handle)
3828{
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003829 struct vmw_resource *res;
3830 int ret;
3831
3832 if (handle == SVGA3D_INVALID_ID)
3833 return 0;
3834
3835 ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3836 handle, user_context_converter,
3837 &res);
3838 if (unlikely(ret != 0)) {
3839 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3840 (unsigned) handle);
3841 return ret;
3842 }
3843
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003844 ret = vmw_resource_val_add(sw_context, res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003845 if (unlikely(ret != 0))
3846 goto out_err;
3847
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003848 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003849 sw_context->man = vmw_context_res_man(res);
3850out_err:
3851 vmw_resource_unreference(&res);
3852 return ret;
3853}
3854
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003855int vmw_execbuf_process(struct drm_file *file_priv,
3856 struct vmw_private *dev_priv,
3857 void __user *user_commands,
3858 void *kernel_commands,
3859 uint32_t command_size,
3860 uint64_t throttle_us,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003861 uint32_t dx_context_handle,
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01003862 struct drm_vmw_fence_rep __user *user_fence_rep,
Sinclair Yehc906965d2017-07-05 01:49:32 -07003863 struct vmw_fence_obj **out_fence,
3864 uint32_t flags)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003865{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003866 struct vmw_sw_context *sw_context = &dev_priv->ctx;
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01003867 struct vmw_fence_obj *fence = NULL;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003868 struct vmw_resource *error_resource;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003869 struct vmw_cmdbuf_header *header;
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003870 uint32_t handle;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003871 int ret;
Sinclair Yehc906965d2017-07-05 01:49:32 -07003872 int32_t out_fence_fd = -1;
3873 struct sync_file *sync_file = NULL;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003874 DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
Sinclair Yehc906965d2017-07-05 01:49:32 -07003875
3876 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3877 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
3878 if (out_fence_fd < 0) {
3879 DRM_ERROR("Failed to get a fence file descriptor.\n");
3880 return out_fence_fd;
3881 }
3882 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003883
Charmaine Lee2f633e52015-08-10 10:45:11 -07003884 if (throttle_us) {
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003885 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3886 throttle_us);
Charmaine Lee2f633e52015-08-10 10:45:11 -07003887
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003888 if (ret)
Sinclair Yehc906965d2017-07-05 01:49:32 -07003889 goto out_free_fence_fd;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003890 }
Charmaine Lee2f633e52015-08-10 10:45:11 -07003891
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003892 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3893 kernel_commands, command_size,
3894 &header);
Sinclair Yehc906965d2017-07-05 01:49:32 -07003895 if (IS_ERR(kernel_commands)) {
3896 ret = PTR_ERR(kernel_commands);
3897 goto out_free_fence_fd;
3898 }
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003899
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003900 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003901 if (ret) {
3902 ret = -ERESTARTSYS;
3903 goto out_free_header;
3904 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003905
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003906 sw_context->kernel = false;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003907 if (kernel_commands == NULL) {
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003908 ret = vmw_resize_cmd_bounce(sw_context, command_size);
3909 if (unlikely(ret != 0))
3910 goto out_unlock;
3911
3912
3913 ret = copy_from_user(sw_context->cmd_bounce,
3914 user_commands, command_size);
3915
3916 if (unlikely(ret != 0)) {
3917 ret = -EFAULT;
3918 DRM_ERROR("Failed copying commands.\n");
3919 goto out_unlock;
3920 }
3921 kernel_commands = sw_context->cmd_bounce;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003922 } else if (!header)
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003923 sw_context->kernel = true;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003924
Thomas Hellstromd5bde952014-01-31 10:12:10 +01003925 sw_context->fp = vmw_fpriv(file_priv);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003926 INIT_LIST_HEAD(&sw_context->ctx_list);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003927 sw_context->cur_query_bo = dev_priv->pinned_bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003928 sw_context->last_query_ctx = NULL;
3929 sw_context->needs_post_query_barrier = false;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003930 sw_context->dx_ctx_node = NULL;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003931 sw_context->dx_query_mob = NULL;
3932 sw_context->dx_query_ctx = NULL;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003933 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003934 INIT_LIST_HEAD(&sw_context->res_relocations);
Thomas Hellstromfc18afc2018-09-26 15:36:52 +02003935 INIT_LIST_HEAD(&sw_context->bo_relocations);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003936 if (sw_context->staged_bindings)
3937 vmw_binding_state_reset(sw_context->staged_bindings);
3938
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003939 if (!sw_context->res_ht_initialized) {
3940 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3941 if (unlikely(ret != 0))
3942 goto out_unlock;
3943 sw_context->res_ht_initialized = true;
3944 }
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003945 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003946 sw_context->ctx = &val_ctx;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003947 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003948 if (unlikely(ret != 0))
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003949 goto out_err_nores;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003950
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003951 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3952 command_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003953 if (unlikely(ret != 0))
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01003954 goto out_err_nores;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003955
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003956 ret = vmw_resources_reserve(sw_context);
3957 if (unlikely(ret != 0))
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01003958 goto out_err_nores;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003959
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003960 ret = vmw_validation_bo_reserve(&val_ctx, true);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003961 if (unlikely(ret != 0))
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003962 goto out_err_nores;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003963
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003964 ret = vmw_validation_bo_validate(&val_ctx, true);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003965 if (unlikely(ret != 0))
3966 goto out_err;
3967
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003968 ret = vmw_validation_res_validate(&val_ctx, true);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003969 if (unlikely(ret != 0))
3970 goto out_err;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02003971 vmw_validation_drop_ht(&val_ctx);
Thomas Hellstrom1925d452010-05-28 11:21:57 +02003972
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07003973 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
3974 if (unlikely(ret != 0)) {
3975 ret = -ERESTARTSYS;
3976 goto out_err;
3977 }
3978
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01003979 if (dev_priv->has_mob) {
3980 ret = vmw_rebind_contexts(sw_context);
3981 if (unlikely(ret != 0))
Dan Carpenterb2ad9882014-02-11 19:03:47 +03003982 goto out_unlock_binding;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01003983 }
3984
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003985 if (!header) {
3986 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
3987 command_size, sw_context);
3988 } else {
3989 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
3990 sw_context);
3991 header = NULL;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003992 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003993 mutex_unlock(&dev_priv->binding_mutex);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003994 if (ret)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003995 goto out_err;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003996
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003997 vmw_query_bo_switch_commit(dev_priv, sw_context);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003998 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
3999 &fence,
4000 (user_fence_rep) ? &handle : NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004001 /*
4002 * This error is harmless, because if fence submission fails,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004003 * vmw_fifo_send_fence will sync. The error will be propagated to
4004 * user-space in @fence_rep
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004005 */
4006
4007 if (ret != 0)
4008 DRM_ERROR("Fence submission error. Syncing.\n");
4009
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004010 vmw_execbuf_bindings_commit(sw_context, false);
4011 vmw_bind_dx_query_mob(sw_context);
4012 vmw_validation_res_unreserve(&val_ctx, false);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07004013
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004014 vmw_validation_bo_fence(sw_context->ctx, fence);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004015
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004016 if (unlikely(dev_priv->pinned_bo != NULL &&
4017 !dev_priv->query_cid_valid))
4018 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4019
Sinclair Yehc906965d2017-07-05 01:49:32 -07004020 /*
4021 * If anything fails here, give up trying to export the fence
4022 * and do a sync since the user mode will not be able to sync
4023 * the fence itself. This ensures we are still functionally
4024 * correct.
4025 */
4026 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4027
4028 sync_file = sync_file_create(&fence->base);
4029 if (!sync_file) {
4030 DRM_ERROR("Unable to create sync file for fence\n");
4031 put_unused_fd(out_fence_fd);
4032 out_fence_fd = -1;
4033
4034 (void) vmw_fence_obj_wait(fence, false, false,
4035 VMW_FENCE_WAIT_TIMEOUT);
4036 } else {
4037 /* Link the fence with the FD created earlier */
4038 fd_install(out_fence_fd, sync_file->file);
4039 }
4040 }
4041
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02004042 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
Sinclair Yehc906965d2017-07-05 01:49:32 -07004043 user_fence_rep, fence, handle,
4044 out_fence_fd, sync_file);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004045
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004046 /* Don't unreference when handing fence out */
4047 if (unlikely(out_fence != NULL)) {
4048 *out_fence = fence;
4049 fence = NULL;
4050 } else if (likely(fence != NULL)) {
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004051 vmw_fence_obj_unreference(&fence);
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004052 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004053
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02004054 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004055 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004056
4057 /*
4058 * Unreference resources outside of the cmdbuf_mutex to
4059 * avoid deadlocks in resource destruction paths.
4060 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004061 vmw_validation_unref_lists(&val_ctx);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004062
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004063 return 0;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004064
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07004065out_unlock_binding:
4066 mutex_unlock(&dev_priv->binding_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004067out_err:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004068 vmw_validation_bo_backoff(&val_ctx);
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01004069out_err_nores:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004070 vmw_execbuf_bindings_commit(sw_context, true);
4071 vmw_validation_res_unreserve(&val_ctx, true);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004072 vmw_resource_relocations_free(&sw_context->res_relocations);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004073 vmw_free_relocations(sw_context);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004074 if (unlikely(dev_priv->pinned_bo != NULL &&
4075 !dev_priv->query_cid_valid))
4076 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004077out_unlock:
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004078 error_resource = sw_context->error_resource;
4079 sw_context->error_resource = NULL;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02004080 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004081 vmw_validation_drop_ht(&val_ctx);
4082 WARN_ON(!list_empty(&sw_context->ctx_list));
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004083 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004084
4085 /*
4086 * Unreference resources outside of the cmdbuf_mutex to
4087 * avoid deadlocks in resource destruction paths.
4088 */
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004089 vmw_validation_unref_lists(&val_ctx);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004090 if (unlikely(error_resource != NULL))
4091 vmw_resource_unreference(&error_resource);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004092out_free_header:
4093 if (header)
4094 vmw_cmdbuf_header_free(header);
Sinclair Yehc906965d2017-07-05 01:49:32 -07004095out_free_fence_fd:
4096 if (out_fence_fd >= 0)
4097 put_unused_fd(out_fence_fd);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004098
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004099 return ret;
4100}
4101
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004102/**
4103 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4104 *
4105 * @dev_priv: The device private structure.
4106 *
4107 * This function is called to idle the fifo and unpin the query buffer
4108 * if the normal way to do this hits an error, which should typically be
4109 * extremely rare.
4110 */
4111static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4112{
4113 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4114
4115 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004116 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4117 if (dev_priv->dummy_query_bo_pinned) {
4118 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4119 dev_priv->dummy_query_bo_pinned = false;
4120 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004121}
4122
4123
4124/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004125 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004126 * query bo.
4127 *
4128 * @dev_priv: The device private structure.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004129 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4130 * _after_ a query barrier that flushes all queries touching the current
4131 * buffer pointed to by @dev_priv->pinned_bo
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004132 *
4133 * This function should be used to unpin the pinned query bo, or
4134 * as a query barrier when we need to make sure that all queries have
4135 * finished before the next fifo command. (For example on hardware
4136 * context destructions where the hardware may otherwise leak unfinished
4137 * queries).
4138 *
4139 * This function does not return any failure codes, but make attempts
4140 * to do safe unpinning in case of errors.
4141 *
4142 * The function will synchronize on the previous query barrier, and will
4143 * thus not finish until that barrier has executed.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004144 *
4145 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4146 * before calling this function.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004147 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004148void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4149 struct vmw_fence_obj *fence)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004150{
4151 int ret = 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004152 struct vmw_fence_obj *lfence = NULL;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004153 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004154
4155 if (dev_priv->pinned_bo == NULL)
4156 goto out_unlock;
4157
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004158 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
4159 false);
4160 if (ret)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004161 goto out_no_reserve;
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004162
4163 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
4164 false);
4165 if (ret)
4166 goto out_no_reserve;
4167
4168 ret = vmw_validation_bo_reserve(&val_ctx, false);
4169 if (ret)
4170 goto out_no_reserve;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004171
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004172 if (dev_priv->query_cid_valid) {
4173 BUG_ON(fence != NULL);
4174 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004175 if (ret)
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004176 goto out_no_emit;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004177 dev_priv->query_cid_valid = false;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004178 }
4179
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004180 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4181 if (dev_priv->dummy_query_bo_pinned) {
4182 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4183 dev_priv->dummy_query_bo_pinned = false;
4184 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004185 if (fence == NULL) {
4186 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4187 NULL);
4188 fence = lfence;
4189 }
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004190 vmw_validation_bo_fence(&val_ctx, fence);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004191 if (lfence != NULL)
4192 vmw_fence_obj_unreference(&lfence);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004193
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004194 vmw_validation_unref_lists(&val_ctx);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02004195 vmw_bo_unreference(&dev_priv->pinned_bo);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004196out_unlock:
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004197 return;
4198
4199out_no_emit:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004200 vmw_validation_bo_backoff(&val_ctx);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004201out_no_reserve:
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004202 vmw_validation_unref_lists(&val_ctx);
4203 vmw_execbuf_unpin_panic(dev_priv);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02004204 vmw_bo_unreference(&dev_priv->pinned_bo);
Thomas Hellstrom9c079b82018-09-26 15:28:55 +02004205
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004206}
4207
4208/**
4209 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4210 * query bo.
4211 *
4212 * @dev_priv: The device private structure.
4213 *
4214 * This function should be used to unpin the pinned query bo, or
4215 * as a query barrier when we need to make sure that all queries have
4216 * finished before the next fifo command. (For example on hardware
4217 * context destructions where the hardware may otherwise leak unfinished
4218 * queries).
4219 *
4220 * This function does not return any failure codes, but make attempts
4221 * to do safe unpinning in case of errors.
4222 *
4223 * The function will synchronize on the previous query barrier, and will
4224 * thus not finish until that barrier has executed.
4225 */
4226void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4227{
4228 mutex_lock(&dev_priv->cmdbuf_mutex);
4229 if (dev_priv->query_cid_valid)
4230 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004231 mutex_unlock(&dev_priv->cmdbuf_mutex);
4232}
4233
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004234int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4235 struct drm_file *file_priv, size_t size)
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004236{
4237 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004238 struct drm_vmw_execbuf_arg arg;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004239 int ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004240 static const size_t copy_offset[] = {
4241 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4242 sizeof(struct drm_vmw_execbuf_arg)};
Sinclair Yeh585851162017-07-05 01:45:40 -07004243 struct dma_fence *in_fence = NULL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004244
4245 if (unlikely(size < copy_offset[0])) {
4246 DRM_ERROR("Invalid command size, ioctl %d\n",
4247 DRM_VMW_EXECBUF);
4248 return -EINVAL;
4249 }
4250
4251 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4252 return -EFAULT;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004253
4254 /*
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004255 * Extend the ioctl argument while
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004256 * maintaining backwards compatibility:
4257 * We take different code paths depending on the value of
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004258 * arg.version.
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004259 */
4260
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004261 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4262 arg.version == 0)) {
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004263 DRM_ERROR("Incorrect execbuf version.\n");
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004264 return -EINVAL;
4265 }
4266
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004267 if (arg.version > 1 &&
4268 copy_from_user(&arg.context_handle,
4269 (void __user *) (data + copy_offset[0]),
4270 copy_offset[arg.version - 1] -
4271 copy_offset[0]) != 0)
4272 return -EFAULT;
4273
4274 switch (arg.version) {
4275 case 1:
4276 arg.context_handle = (uint32_t) -1;
4277 break;
4278 case 2:
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004279 default:
4280 break;
4281 }
4282
Sinclair Yeh585851162017-07-05 01:45:40 -07004283
4284 /* If imported a fence FD from elsewhere, then wait on it */
4285 if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4286 in_fence = sync_file_get_fence(arg.imported_fence_fd);
4287
4288 if (!in_fence) {
4289 DRM_ERROR("Cannot get imported fence\n");
4290 return -EINVAL;
4291 }
4292
4293 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4294 if (ret)
4295 goto out;
4296 }
4297
Thomas Hellstrom294adf72014-02-27 12:34:51 +01004298 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004299 if (unlikely(ret != 0))
4300 return ret;
4301
4302 ret = vmw_execbuf_process(file_priv, dev_priv,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004303 (void __user *)(unsigned long)arg.commands,
4304 NULL, arg.command_size, arg.throttle_us,
4305 arg.context_handle,
4306 (void __user *)(unsigned long)arg.fence_rep,
Sinclair Yehc906965d2017-07-05 01:49:32 -07004307 NULL,
4308 arg.flags);
Thomas Hellstrom5151adb2015-03-09 01:56:21 -07004309 ttm_read_unlock(&dev_priv->reservation_sem);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004310 if (unlikely(ret != 0))
Sinclair Yeh585851162017-07-05 01:45:40 -07004311 goto out;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004312
4313 vmw_kms_cursor_post_execbuf(dev_priv);
4314
Sinclair Yeh585851162017-07-05 01:45:40 -07004315out:
4316 if (in_fence)
4317 dma_fence_put(in_fence);
4318 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004319}