blob: b4de756112d416d9af90adb0a12ad0ab1e41fa19 [file] [log] [blame]
Thomas Hellstrom543831c2012-11-20 12:19:36 +00001/**************************************************************************
2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h"
30#include "ttm/ttm_placement.h"
31
32struct vmw_user_context {
33 struct ttm_base_object base;
34 struct vmw_resource res;
35};
36
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -070037
38
39typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
40
Thomas Hellstrom543831c2012-11-20 12:19:36 +000041static void vmw_user_context_free(struct vmw_resource *res);
42static struct vmw_resource *
43vmw_user_context_base_to_res(struct ttm_base_object *base);
44
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +010045static int vmw_gb_context_create(struct vmw_resource *res);
46static int vmw_gb_context_bind(struct vmw_resource *res,
47 struct ttm_validate_buffer *val_buf);
48static int vmw_gb_context_unbind(struct vmw_resource *res,
49 bool readback,
50 struct ttm_validate_buffer *val_buf);
51static int vmw_gb_context_destroy(struct vmw_resource *res);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -070052static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
53static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
54static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +010055
Thomas Hellstrom543831c2012-11-20 12:19:36 +000056static uint64_t vmw_user_context_size;
57
58static const struct vmw_user_resource_conv user_context_conv = {
59 .object_type = VMW_RES_CONTEXT,
60 .base_obj_to_res = vmw_user_context_base_to_res,
61 .res_free = vmw_user_context_free
62};
63
64const struct vmw_user_resource_conv *user_context_converter =
65 &user_context_conv;
66
67
68static const struct vmw_res_func vmw_legacy_context_func = {
69 .res_type = vmw_res_context,
70 .needs_backup = false,
71 .may_evict = false,
72 .type_name = "legacy contexts",
73 .backup_placement = NULL,
74 .create = NULL,
75 .destroy = NULL,
76 .bind = NULL,
77 .unbind = NULL
78};
79
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +010080static const struct vmw_res_func vmw_gb_context_func = {
81 .res_type = vmw_res_context,
82 .needs_backup = true,
83 .may_evict = true,
84 .type_name = "guest backed contexts",
85 .backup_placement = &vmw_mob_placement,
86 .create = vmw_gb_context_create,
87 .destroy = vmw_gb_context_destroy,
88 .bind = vmw_gb_context_bind,
89 .unbind = vmw_gb_context_unbind
90};
91
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -070092static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
93 [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
94 [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
95 [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
96
Thomas Hellstrom543831c2012-11-20 12:19:36 +000097/**
98 * Context management:
99 */
100
101static void vmw_hw_context_destroy(struct vmw_resource *res)
102{
103
104 struct vmw_private *dev_priv = res->dev_priv;
105 struct {
106 SVGA3dCmdHeader header;
107 SVGA3dCmdDestroyContext body;
108 } *cmd;
109
110
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100111 if (res->func->destroy == vmw_gb_context_destroy) {
112 mutex_lock(&dev_priv->cmdbuf_mutex);
113 (void) vmw_gb_context_destroy(res);
114 if (dev_priv->pinned_bo != NULL &&
115 !dev_priv->query_cid_valid)
116 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
117 mutex_unlock(&dev_priv->cmdbuf_mutex);
118 return;
119 }
120
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000121 vmw_execbuf_release_pinned_bo(dev_priv);
122 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
123 if (unlikely(cmd == NULL)) {
124 DRM_ERROR("Failed reserving FIFO space for surface "
125 "destruction.\n");
126 return;
127 }
128
129 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
130 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
131 cmd->body.cid = cpu_to_le32(res->id);
132
133 vmw_fifo_commit(dev_priv, sizeof(*cmd));
134 vmw_3d_resource_dec(dev_priv, false);
135}
136
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100137static int vmw_gb_context_init(struct vmw_private *dev_priv,
138 struct vmw_resource *res,
139 void (*res_free) (struct vmw_resource *res))
140{
141 int ret;
142
143 ret = vmw_resource_init(dev_priv, res, true,
144 res_free, &vmw_gb_context_func);
145 res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
146
147 if (unlikely(ret != 0)) {
148 if (res_free)
149 res_free(res);
150 else
151 kfree(res);
152 return ret;
153 }
154
155 vmw_resource_activate(res, vmw_hw_context_destroy);
156 return 0;
157}
158
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000159static int vmw_context_init(struct vmw_private *dev_priv,
160 struct vmw_resource *res,
161 void (*res_free) (struct vmw_resource *res))
162{
163 int ret;
164
165 struct {
166 SVGA3dCmdHeader header;
167 SVGA3dCmdDefineContext body;
168 } *cmd;
169
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100170 if (dev_priv->has_mob)
171 return vmw_gb_context_init(dev_priv, res, res_free);
172
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000173 ret = vmw_resource_init(dev_priv, res, false,
174 res_free, &vmw_legacy_context_func);
175
176 if (unlikely(ret != 0)) {
177 DRM_ERROR("Failed to allocate a resource id.\n");
178 goto out_early;
179 }
180
181 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
182 DRM_ERROR("Out of hw context ids.\n");
183 vmw_resource_unreference(&res);
184 return -ENOMEM;
185 }
186
187 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
188 if (unlikely(cmd == NULL)) {
189 DRM_ERROR("Fifo reserve failed.\n");
190 vmw_resource_unreference(&res);
191 return -ENOMEM;
192 }
193
194 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
195 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
196 cmd->body.cid = cpu_to_le32(res->id);
197
198 vmw_fifo_commit(dev_priv, sizeof(*cmd));
199 (void) vmw_3d_resource_inc(dev_priv, false);
200 vmw_resource_activate(res, vmw_hw_context_destroy);
201 return 0;
202
203out_early:
204 if (res_free == NULL)
205 kfree(res);
206 else
207 res_free(res);
208 return ret;
209}
210
211struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
212{
213 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
214 int ret;
215
216 if (unlikely(res == NULL))
217 return NULL;
218
219 ret = vmw_context_init(dev_priv, res, NULL);
220
221 return (ret == 0) ? res : NULL;
222}
223
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100224
225static int vmw_gb_context_create(struct vmw_resource *res)
226{
227 struct vmw_private *dev_priv = res->dev_priv;
228 int ret;
229 struct {
230 SVGA3dCmdHeader header;
231 SVGA3dCmdDefineGBContext body;
232 } *cmd;
233
234 if (likely(res->id != -1))
235 return 0;
236
237 ret = vmw_resource_alloc_id(res);
238 if (unlikely(ret != 0)) {
239 DRM_ERROR("Failed to allocate a context id.\n");
240 goto out_no_id;
241 }
242
243 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
244 ret = -EBUSY;
245 goto out_no_fifo;
246 }
247
248 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
249 if (unlikely(cmd == NULL)) {
250 DRM_ERROR("Failed reserving FIFO space for context "
251 "creation.\n");
252 ret = -ENOMEM;
253 goto out_no_fifo;
254 }
255
256 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
257 cmd->header.size = sizeof(cmd->body);
258 cmd->body.cid = res->id;
259 vmw_fifo_commit(dev_priv, sizeof(*cmd));
260 (void) vmw_3d_resource_inc(dev_priv, false);
261
262 return 0;
263
264out_no_fifo:
265 vmw_resource_release_id(res);
266out_no_id:
267 return ret;
268}
269
270static int vmw_gb_context_bind(struct vmw_resource *res,
271 struct ttm_validate_buffer *val_buf)
272{
273 struct vmw_private *dev_priv = res->dev_priv;
274 struct {
275 SVGA3dCmdHeader header;
276 SVGA3dCmdBindGBContext body;
277 } *cmd;
278 struct ttm_buffer_object *bo = val_buf->bo;
279
280 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
281
282 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
283 if (unlikely(cmd == NULL)) {
284 DRM_ERROR("Failed reserving FIFO space for context "
285 "binding.\n");
286 return -ENOMEM;
287 }
288
289 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
290 cmd->header.size = sizeof(cmd->body);
291 cmd->body.cid = res->id;
292 cmd->body.mobid = bo->mem.start;
293 cmd->body.validContents = res->backup_dirty;
294 res->backup_dirty = false;
295 vmw_fifo_commit(dev_priv, sizeof(*cmd));
296
297 return 0;
298}
299
300static int vmw_gb_context_unbind(struct vmw_resource *res,
301 bool readback,
302 struct ttm_validate_buffer *val_buf)
303{
304 struct vmw_private *dev_priv = res->dev_priv;
305 struct ttm_buffer_object *bo = val_buf->bo;
306 struct vmw_fence_obj *fence;
307
308 struct {
309 SVGA3dCmdHeader header;
310 SVGA3dCmdReadbackGBContext body;
311 } *cmd1;
312 struct {
313 SVGA3dCmdHeader header;
314 SVGA3dCmdBindGBContext body;
315 } *cmd2;
316 uint32_t submit_size;
317 uint8_t *cmd;
318
319
320 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
321
322 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
323
324 cmd = vmw_fifo_reserve(dev_priv, submit_size);
325 if (unlikely(cmd == NULL)) {
326 DRM_ERROR("Failed reserving FIFO space for context "
327 "unbinding.\n");
328 return -ENOMEM;
329 }
330
331 cmd2 = (void *) cmd;
332 if (readback) {
333 cmd1 = (void *) cmd;
334 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
335 cmd1->header.size = sizeof(cmd1->body);
336 cmd1->body.cid = res->id;
337 cmd2 = (void *) (&cmd1[1]);
338 }
339 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
340 cmd2->header.size = sizeof(cmd2->body);
341 cmd2->body.cid = res->id;
342 cmd2->body.mobid = SVGA3D_INVALID_ID;
343
344 vmw_fifo_commit(dev_priv, submit_size);
345
346 /*
347 * Create a fence object and fence the backup buffer.
348 */
349
350 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
351 &fence, NULL);
352
353 vmw_fence_single_bo(bo, fence);
354
355 if (likely(fence != NULL))
356 vmw_fence_obj_unreference(&fence);
357
358 return 0;
359}
360
361static int vmw_gb_context_destroy(struct vmw_resource *res)
362{
363 struct vmw_private *dev_priv = res->dev_priv;
364 struct {
365 SVGA3dCmdHeader header;
366 SVGA3dCmdDestroyGBContext body;
367 } *cmd;
368
369 if (likely(res->id == -1))
370 return 0;
371
372 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
373 if (unlikely(cmd == NULL)) {
374 DRM_ERROR("Failed reserving FIFO space for context "
375 "destruction.\n");
376 return -ENOMEM;
377 }
378
379 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
380 cmd->header.size = sizeof(cmd->body);
381 cmd->body.cid = res->id;
382 vmw_fifo_commit(dev_priv, sizeof(*cmd));
383 if (dev_priv->query_cid == res->id)
384 dev_priv->query_cid_valid = false;
385 vmw_resource_release_id(res);
386 vmw_3d_resource_dec(dev_priv, false);
387
388 return 0;
389}
390
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000391/**
392 * User-space context management:
393 */
394
395static struct vmw_resource *
396vmw_user_context_base_to_res(struct ttm_base_object *base)
397{
398 return &(container_of(base, struct vmw_user_context, base)->res);
399}
400
401static void vmw_user_context_free(struct vmw_resource *res)
402{
403 struct vmw_user_context *ctx =
404 container_of(res, struct vmw_user_context, res);
405 struct vmw_private *dev_priv = res->dev_priv;
406
407 ttm_base_object_kfree(ctx, base);
408 ttm_mem_global_free(vmw_mem_glob(dev_priv),
409 vmw_user_context_size);
410}
411
412/**
413 * This function is called when user space has no more references on the
414 * base object. It releases the base-object's reference on the resource object.
415 */
416
417static void vmw_user_context_base_release(struct ttm_base_object **p_base)
418{
419 struct ttm_base_object *base = *p_base;
420 struct vmw_user_context *ctx =
421 container_of(base, struct vmw_user_context, base);
422 struct vmw_resource *res = &ctx->res;
423
424 *p_base = NULL;
425 vmw_resource_unreference(&res);
426}
427
428int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
429 struct drm_file *file_priv)
430{
431 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
432 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
433
434 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
435}
436
437int vmw_context_define_ioctl(struct drm_device *dev, void *data,
438 struct drm_file *file_priv)
439{
440 struct vmw_private *dev_priv = vmw_priv(dev);
441 struct vmw_user_context *ctx;
442 struct vmw_resource *res;
443 struct vmw_resource *tmp;
444 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
445 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
446 struct vmw_master *vmaster = vmw_master(file_priv->master);
447 int ret;
448
449
450 /*
451 * Approximate idr memory usage with 128 bytes. It will be limited
452 * by maximum number_of contexts anyway.
453 */
454
455 if (unlikely(vmw_user_context_size == 0))
456 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
457
458 ret = ttm_read_lock(&vmaster->lock, true);
459 if (unlikely(ret != 0))
460 return ret;
461
462 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
463 vmw_user_context_size,
464 false, true);
465 if (unlikely(ret != 0)) {
466 if (ret != -ERESTARTSYS)
467 DRM_ERROR("Out of graphics memory for context"
468 " creation.\n");
469 goto out_unlock;
470 }
471
472 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
473 if (unlikely(ctx == NULL)) {
474 ttm_mem_global_free(vmw_mem_glob(dev_priv),
475 vmw_user_context_size);
476 ret = -ENOMEM;
477 goto out_unlock;
478 }
479
480 res = &ctx->res;
481 ctx->base.shareable = false;
482 ctx->base.tfile = NULL;
483
484 /*
485 * From here on, the destructor takes over resource freeing.
486 */
487
488 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
489 if (unlikely(ret != 0))
490 goto out_unlock;
491
492 tmp = vmw_resource_reference(&ctx->res);
493 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
494 &vmw_user_context_base_release, NULL);
495
496 if (unlikely(ret != 0)) {
497 vmw_resource_unreference(&tmp);
498 goto out_err;
499 }
500
501 arg->cid = ctx->base.hash.key;
502out_err:
503 vmw_resource_unreference(&res);
504out_unlock:
505 ttm_read_unlock(&vmaster->lock);
506 return ret;
507
508}
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700509
510/**
511 * vmw_context_scrub_shader - scrub a shader binding from a context.
512 *
513 * @bi: single binding information.
514 */
515static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
516{
517 struct vmw_private *dev_priv = bi->ctx->dev_priv;
518 struct {
519 SVGA3dCmdHeader header;
520 SVGA3dCmdSetShader body;
521 } *cmd;
522
523 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
524 if (unlikely(cmd == NULL)) {
525 DRM_ERROR("Failed reserving FIFO space for shader "
526 "unbinding.\n");
527 return -ENOMEM;
528 }
529
530 cmd->header.id = SVGA_3D_CMD_SET_SHADER;
531 cmd->header.size = sizeof(cmd->body);
532 cmd->body.cid = bi->ctx->id;
533 cmd->body.type = bi->i1.shader_type;
534 cmd->body.shid = SVGA3D_INVALID_ID;
535 vmw_fifo_commit(dev_priv, sizeof(*cmd));
536
537 return 0;
538}
539
540/**
541 * vmw_context_scrub_render_target - scrub a render target binding
542 * from a context.
543 *
544 * @bi: single binding information.
545 */
546static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
547{
548 struct vmw_private *dev_priv = bi->ctx->dev_priv;
549 struct {
550 SVGA3dCmdHeader header;
551 SVGA3dCmdSetRenderTarget body;
552 } *cmd;
553
554 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
555 if (unlikely(cmd == NULL)) {
556 DRM_ERROR("Failed reserving FIFO space for render target "
557 "unbinding.\n");
558 return -ENOMEM;
559 }
560
561 cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
562 cmd->header.size = sizeof(cmd->body);
563 cmd->body.cid = bi->ctx->id;
564 cmd->body.type = bi->i1.rt_type;
565 cmd->body.target.sid = SVGA3D_INVALID_ID;
566 cmd->body.target.face = 0;
567 cmd->body.target.mipmap = 0;
568 vmw_fifo_commit(dev_priv, sizeof(*cmd));
569
570 return 0;
571}
572
573/**
574 * vmw_context_scrub_texture - scrub a texture binding from a context.
575 *
576 * @bi: single binding information.
577 *
578 * TODO: Possibly complement this function with a function that takes
579 * a list of texture bindings and combines them to a single command.
580 */
581static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
582{
583 struct vmw_private *dev_priv = bi->ctx->dev_priv;
584 struct {
585 SVGA3dCmdHeader header;
586 struct {
587 SVGA3dCmdSetTextureState c;
588 SVGA3dTextureState s1;
589 } body;
590 } *cmd;
591
592 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
593 if (unlikely(cmd == NULL)) {
594 DRM_ERROR("Failed reserving FIFO space for texture "
595 "unbinding.\n");
596 return -ENOMEM;
597 }
598
599
600 cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
601 cmd->header.size = sizeof(cmd->body);
602 cmd->body.c.cid = bi->ctx->id;
603 cmd->body.s1.stage = bi->i1.texture_stage;
604 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
605 cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
606 vmw_fifo_commit(dev_priv, sizeof(*cmd));
607
608 return 0;
609}
610
611/**
612 * vmw_context_binding_drop: Stop tracking a context binding
613 *
614 * @cb: Pointer to binding tracker storage.
615 *
616 * Stops tracking a context binding, and re-initializes its storage.
617 * Typically used when the context binding is replaced with a binding to
618 * another (or the same, for that matter) resource.
619 */
620static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
621{
622 list_del(&cb->ctx_list);
623 cb->bi.ctx = NULL;
624}
625
626/**
627 * vmw_context_binding_add: Start tracking a context binding
628 *
629 * @cbs: Pointer to the context binding state tracker.
630 * @bi: Information about the binding to track.
631 *
632 * Performs basic checks on the binding to make sure arguments are within
633 * bounds and then starts tracking the binding in the context binding
634 * state structure @cbs.
635 */
636int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
637 const struct vmw_ctx_bindinfo *bi)
638{
639 struct vmw_ctx_binding *loc;
640
641 switch (bi->bt) {
642 case vmw_ctx_binding_rt:
643 if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
644 DRM_ERROR("Illegal render target type %u.\n",
645 (unsigned) bi->i1.rt_type);
646 return -EINVAL;
647 }
648 loc = &cbs->render_targets[bi->i1.rt_type];
649 break;
650 case vmw_ctx_binding_tex:
651 if (unlikely((unsigned)bi->i1.texture_stage >=
652 SVGA3D_NUM_TEXTURE_UNITS)) {
653 DRM_ERROR("Illegal texture/sampler unit %u.\n",
654 (unsigned) bi->i1.texture_stage);
655 return -EINVAL;
656 }
657 loc = &cbs->texture_units[bi->i1.texture_stage];
658 break;
659 case vmw_ctx_binding_shader:
660 if (unlikely((unsigned)bi->i1.shader_type >=
661 SVGA3D_SHADERTYPE_MAX)) {
662 DRM_ERROR("Illegal shader type %u.\n",
663 (unsigned) bi->i1.shader_type);
664 return -EINVAL;
665 }
666 loc = &cbs->shaders[bi->i1.shader_type];
667 break;
668 default:
669 BUG();
670 }
671
672 if (loc->bi.ctx != NULL)
673 vmw_context_binding_drop(loc);
674
675 loc->bi = *bi;
676 list_add_tail(&loc->ctx_list, &cbs->list);
677
678 return 0;
679}
680
681/**
682 * vmw_context_binding_kill - Kill a binding on the device
683 * and stop tracking it.
684 *
685 * @cb: Pointer to binding tracker storage.
686 *
687 * Emits FIFO commands to scrub a binding represented by @cb.
688 * Then stops tracking the binding and re-initializes its storage.
689 */
690void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
691{
692 (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
693 vmw_context_binding_drop(cb);
694}
695
696/**
697 * vmw_context_binding_state_kill - Kill all bindings associated with a
698 * struct vmw_ctx_binding state structure, and re-initialize the structure.
699 *
700 * @cbs: Pointer to the context binding state tracker.
701 *
702 * Emits commands to scrub all bindings associated with the
703 * context binding state tracker. Then re-initializes the whole structure.
704 */
705void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
706{
707 struct vmw_ctx_binding *entry, *next;
708
709 list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) {
710 vmw_context_binding_kill(entry);
711 }
712}