blob: 77cb7c627e097dcc1943666ded6e4ce303318060 [file] [log] [blame]
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07001/**************************************************************************
2 *
3 * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "ttm/ttm_bo_api.h"
30
31/*
32 * Size of inline command buffers. Try to make sure that a page size is a
33 * multiple of the DMA pool allocation size.
34 */
35#define VMW_CMDBUF_INLINE_ALIGN 64
Thomas Hellstrom9b590782015-06-26 04:46:52 -070036#define VMW_CMDBUF_INLINE_SIZE \
37 (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -070038
39/**
40 * struct vmw_cmdbuf_context - Command buffer context queues
41 *
42 * @submitted: List of command buffers that have been submitted to the
43 * manager but not yet submitted to hardware.
44 * @hw_submitted: List of command buffers submitted to hardware.
45 * @preempted: List of preempted command buffers.
46 * @num_hw_submitted: Number of buffers currently being processed by hardware
47 */
48struct vmw_cmdbuf_context {
49 struct list_head submitted;
50 struct list_head hw_submitted;
51 struct list_head preempted;
52 unsigned num_hw_submitted;
53};
54
55/**
56 * struct vmw_cmdbuf_man: - Command buffer manager
57 *
58 * @cur_mutex: Mutex protecting the command buffer used for incremental small
59 * kernel command submissions, @cur.
60 * @space_mutex: Mutex to protect against starvation when we allocate
61 * main pool buffer space.
62 * @work: A struct work_struct implementeing command buffer error handling.
63 * Immutable.
64 * @dev_priv: Pointer to the device private struct. Immutable.
65 * @ctx: Array of command buffer context queues. The queues and the context
66 * data is protected by @lock.
67 * @error: List of command buffers that have caused device errors.
68 * Protected by @lock.
69 * @mm: Range manager for the command buffer space. Manager allocations and
70 * frees are protected by @lock.
71 * @cmd_space: Buffer object for the command buffer space, unless we were
72 * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
73 * @map_obj: Mapping state for @cmd_space. Immutable.
74 * @map: Pointer to command buffer space. May be a mapped buffer object or
75 * a contigous coherent DMA memory allocation. Immutable.
76 * @cur: Command buffer for small kernel command submissions. Protected by
77 * the @cur_mutex.
78 * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
79 * @default_size: Default size for the @cur command buffer. Immutable.
80 * @max_hw_submitted: Max number of in-flight command buffers the device can
81 * handle. Immutable.
82 * @lock: Spinlock protecting command submission queues.
83 * @header: Pool of DMA memory for device command buffer headers.
84 * Internal protection.
85 * @dheaders: Pool of DMA memory for device command buffer headers with trailing
86 * space for inline data. Internal protection.
87 * @tasklet: Tasklet struct for irq processing. Immutable.
88 * @alloc_queue: Wait queue for processes waiting to allocate command buffer
89 * space.
90 * @idle_queue: Wait queue for processes waiting for command buffer idle.
91 * @irq_on: Whether the process function has requested irq to be turned on.
92 * Protected by @lock.
93 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
94 * allocation. Immutable.
95 * @has_pool: Has a large pool of DMA memory which allows larger allocations.
96 * Typically this is false only during bootstrap.
97 * @handle: DMA address handle for the command buffer space if @using_mob is
98 * false. Immutable.
99 * @size: The size of the command buffer space. Immutable.
100 */
101struct vmw_cmdbuf_man {
102 struct mutex cur_mutex;
103 struct mutex space_mutex;
104 struct work_struct work;
105 struct vmw_private *dev_priv;
106 struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
107 struct list_head error;
108 struct drm_mm mm;
109 struct ttm_buffer_object *cmd_space;
110 struct ttm_bo_kmap_obj map_obj;
111 u8 *map;
112 struct vmw_cmdbuf_header *cur;
113 size_t cur_pos;
114 size_t default_size;
115 unsigned max_hw_submitted;
116 spinlock_t lock;
117 struct dma_pool *headers;
118 struct dma_pool *dheaders;
119 struct tasklet_struct tasklet;
120 wait_queue_head_t alloc_queue;
121 wait_queue_head_t idle_queue;
122 bool irq_on;
123 bool using_mob;
124 bool has_pool;
125 dma_addr_t handle;
126 size_t size;
127};
128
129/**
130 * struct vmw_cmdbuf_header - Command buffer metadata
131 *
132 * @man: The command buffer manager.
133 * @cb_header: Device command buffer header, allocated from a DMA pool.
134 * @cb_context: The device command buffer context.
135 * @list: List head for attaching to the manager lists.
136 * @node: The range manager node.
137 * @handle. The DMA address of @cb_header. Handed to the device on command
138 * buffer submission.
139 * @cmd: Pointer to the command buffer space of this buffer.
140 * @size: Size of the command buffer space of this buffer.
141 * @reserved: Reserved space of this buffer.
142 * @inline_space: Whether inline command buffer space is used.
143 */
144struct vmw_cmdbuf_header {
145 struct vmw_cmdbuf_man *man;
146 SVGACBHeader *cb_header;
147 SVGACBContext cb_context;
148 struct list_head list;
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700149 struct drm_mm_node node;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700150 dma_addr_t handle;
151 u8 *cmd;
152 size_t size;
153 size_t reserved;
154 bool inline_space;
155};
156
157/**
158 * struct vmw_cmdbuf_dheader - Device command buffer header with inline
159 * command buffer space.
160 *
161 * @cb_header: Device command buffer header.
162 * @cmd: Inline command buffer space.
163 */
164struct vmw_cmdbuf_dheader {
165 SVGACBHeader cb_header;
166 u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
167};
168
169/**
170 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
171 *
172 * @page_size: Size of requested command buffer space in pages.
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700173 * @node: Pointer to the range manager node.
174 * @done: True if this allocation has succeeded.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700175 */
176struct vmw_cmdbuf_alloc_info {
177 size_t page_size;
178 struct drm_mm_node *node;
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700179 bool done;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700180};
181
182/* Loop over each context in the command buffer manager. */
183#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
184 for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
185 ++(_i), ++(_ctx))
186
187static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
188
189
190/**
191 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
192 *
193 * @man: The range manager.
194 * @interruptible: Whether to wait interruptible when locking.
195 */
196static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
197{
198 if (interruptible) {
199 if (mutex_lock_interruptible(&man->cur_mutex))
200 return -ERESTARTSYS;
201 } else {
202 mutex_lock(&man->cur_mutex);
203 }
204
205 return 0;
206}
207
208/**
209 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
210 *
211 * @man: The range manager.
212 */
213static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
214{
215 mutex_unlock(&man->cur_mutex);
216}
217
218/**
219 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
220 * been used for the device context with inline command buffers.
221 * Need not be called locked.
222 *
223 * @header: Pointer to the header to free.
224 */
225static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
226{
227 struct vmw_cmdbuf_dheader *dheader;
228
229 if (WARN_ON_ONCE(!header->inline_space))
230 return;
231
232 dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
233 cb_header);
234 dma_pool_free(header->man->dheaders, dheader, header->handle);
235 kfree(header);
236}
237
238/**
239 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
240 * associated structures.
241 *
242 * header: Pointer to the header to free.
243 *
244 * For internal use. Must be called with man::lock held.
245 */
246static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
247{
248 struct vmw_cmdbuf_man *man = header->man;
249
Thomas Hellstromfb89ac52016-01-08 20:29:39 +0100250 lockdep_assert_held_once(&man->lock);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700251
252 if (header->inline_space) {
253 vmw_cmdbuf_header_inline_free(header);
254 return;
255 }
256
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700257 drm_mm_remove_node(&header->node);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700258 wake_up_all(&man->alloc_queue);
259 if (header->cb_header)
260 dma_pool_free(man->headers, header->cb_header,
261 header->handle);
262 kfree(header);
263}
264
265/**
266 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
267 * associated structures.
268 *
269 * @header: Pointer to the header to free.
270 */
271void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
272{
273 struct vmw_cmdbuf_man *man = header->man;
274
275 /* Avoid locking if inline_space */
276 if (header->inline_space) {
277 vmw_cmdbuf_header_inline_free(header);
278 return;
279 }
280 spin_lock_bh(&man->lock);
281 __vmw_cmdbuf_header_free(header);
282 spin_unlock_bh(&man->lock);
283}
284
285
286/**
287 * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
288 *
289 * @header: The header of the buffer to submit.
290 */
291static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
292{
293 struct vmw_cmdbuf_man *man = header->man;
294 u32 val;
295
Paul Bolle0e7c8752016-03-03 11:26:10 +0100296 val = upper_32_bits(header->handle);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700297 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
Thomas Hellstrom2e3cc8c2015-08-04 15:34:14 +0200298
Paul Bolle0e7c8752016-03-03 11:26:10 +0100299 val = lower_32_bits(header->handle);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700300 val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
301 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
302
303 return header->cb_header->status;
304}
305
306/**
307 * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
308 *
309 * @ctx: The command buffer context to initialize
310 */
311static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
312{
313 INIT_LIST_HEAD(&ctx->hw_submitted);
314 INIT_LIST_HEAD(&ctx->submitted);
315 INIT_LIST_HEAD(&ctx->preempted);
316 ctx->num_hw_submitted = 0;
317}
318
319/**
320 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
321 * context.
322 *
323 * @man: The command buffer manager.
324 * @ctx: The command buffer context.
325 *
326 * Submits command buffers to hardware until there are no more command
327 * buffers to submit or the hardware can't handle more command buffers.
328 */
329static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
330 struct vmw_cmdbuf_context *ctx)
331{
332 while (ctx->num_hw_submitted < man->max_hw_submitted &&
333 !list_empty(&ctx->submitted)) {
334 struct vmw_cmdbuf_header *entry;
335 SVGACBStatus status;
336
337 entry = list_first_entry(&ctx->submitted,
338 struct vmw_cmdbuf_header,
339 list);
340
341 status = vmw_cmdbuf_header_submit(entry);
342
343 /* This should never happen */
344 if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
345 entry->cb_header->status = SVGA_CB_STATUS_NONE;
346 break;
347 }
348
349 list_del(&entry->list);
350 list_add_tail(&entry->list, &ctx->hw_submitted);
351 ctx->num_hw_submitted++;
352 }
353
354}
355
356/**
357 * vmw_cmdbuf_ctx_submit: Process a command buffer context.
358 *
359 * @man: The command buffer manager.
360 * @ctx: The command buffer context.
361 *
362 * Submit command buffers to hardware if possible, and process finished
363 * buffers. Typically freeing them, but on preemption or error take
364 * appropriate action. Wake up waiters if appropriate.
365 */
366static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
367 struct vmw_cmdbuf_context *ctx,
368 int *notempty)
369{
370 struct vmw_cmdbuf_header *entry, *next;
371
372 vmw_cmdbuf_ctx_submit(man, ctx);
373
374 list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
375 SVGACBStatus status = entry->cb_header->status;
376
377 if (status == SVGA_CB_STATUS_NONE)
378 break;
379
380 list_del(&entry->list);
381 wake_up_all(&man->idle_queue);
382 ctx->num_hw_submitted--;
383 switch (status) {
384 case SVGA_CB_STATUS_COMPLETED:
385 __vmw_cmdbuf_header_free(entry);
386 break;
387 case SVGA_CB_STATUS_COMMAND_ERROR:
388 case SVGA_CB_STATUS_CB_HEADER_ERROR:
389 list_add_tail(&entry->list, &man->error);
390 schedule_work(&man->work);
391 break;
392 case SVGA_CB_STATUS_PREEMPTED:
393 list_add(&entry->list, &ctx->preempted);
394 break;
395 default:
396 WARN_ONCE(true, "Undefined command buffer status.\n");
397 __vmw_cmdbuf_header_free(entry);
398 break;
399 }
400 }
401
402 vmw_cmdbuf_ctx_submit(man, ctx);
403 if (!list_empty(&ctx->submitted))
404 (*notempty)++;
405}
406
407/**
408 * vmw_cmdbuf_man_process - Process all command buffer contexts and
409 * switch on and off irqs as appropriate.
410 *
411 * @man: The command buffer manager.
412 *
413 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
414 * command buffers left that are not submitted to hardware, Make sure
Thomas Hellstrom09dc1382015-10-21 21:31:49 +0200415 * IRQ handling is turned on. Otherwise, make sure it's turned off.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700416 */
Thomas Hellstrom09dc1382015-10-21 21:31:49 +0200417static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700418{
Thomas Hellstrom09dc1382015-10-21 21:31:49 +0200419 int notempty;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700420 struct vmw_cmdbuf_context *ctx;
421 int i;
422
Thomas Hellstrom09dc1382015-10-21 21:31:49 +0200423retry:
424 notempty = 0;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700425 for_each_cmdbuf_ctx(man, i, ctx)
426 vmw_cmdbuf_ctx_process(man, ctx, &notempty);
427
428 if (man->irq_on && !notempty) {
429 vmw_generic_waiter_remove(man->dev_priv,
430 SVGA_IRQFLAG_COMMAND_BUFFER,
431 &man->dev_priv->cmdbuf_waiters);
432 man->irq_on = false;
433 } else if (!man->irq_on && notempty) {
434 vmw_generic_waiter_add(man->dev_priv,
435 SVGA_IRQFLAG_COMMAND_BUFFER,
436 &man->dev_priv->cmdbuf_waiters);
437 man->irq_on = true;
438
439 /* Rerun in case we just missed an irq. */
Thomas Hellstrom09dc1382015-10-21 21:31:49 +0200440 goto retry;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700441 }
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700442}
443
444/**
445 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
446 * command buffer context
447 *
448 * @man: The command buffer manager.
449 * @header: The header of the buffer to submit.
450 * @cb_context: The command buffer context to use.
451 *
452 * This function adds @header to the "submitted" queue of the command
453 * buffer context identified by @cb_context. It then calls the command buffer
454 * manager processing to potentially submit the buffer to hardware.
455 * @man->lock needs to be held when calling this function.
456 */
457static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
458 struct vmw_cmdbuf_header *header,
459 SVGACBContext cb_context)
460{
461 if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
462 header->cb_header->dxContext = 0;
463 header->cb_context = cb_context;
464 list_add_tail(&header->list, &man->ctx[cb_context].submitted);
465
Thomas Hellstrom09dc1382015-10-21 21:31:49 +0200466 vmw_cmdbuf_man_process(man);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700467}
468
469/**
470 * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
471 * handler implemented as a tasklet.
472 *
473 * @data: Tasklet closure. A pointer to the command buffer manager cast to
474 * an unsigned long.
475 *
476 * The bottom half (tasklet) of the interrupt handler simply calls into the
477 * command buffer processor to free finished buffers and submit any
478 * queued buffers to hardware.
479 */
480static void vmw_cmdbuf_man_tasklet(unsigned long data)
481{
482 struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
483
484 spin_lock(&man->lock);
Thomas Hellstrom09dc1382015-10-21 21:31:49 +0200485 vmw_cmdbuf_man_process(man);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700486 spin_unlock(&man->lock);
487}
488
489/**
490 * vmw_cmdbuf_work_func - The deferred work function that handles
491 * command buffer errors.
492 *
493 * @work: The work func closure argument.
494 *
495 * Restarting the command buffer context after an error requires process
496 * context, so it is deferred to this work function.
497 */
498static void vmw_cmdbuf_work_func(struct work_struct *work)
499{
500 struct vmw_cmdbuf_man *man =
501 container_of(work, struct vmw_cmdbuf_man, work);
502 struct vmw_cmdbuf_header *entry, *next;
Thomas Hellstrom09dc1382015-10-21 21:31:49 +0200503 uint32_t dummy;
Thomas Hellstrom6a5278e2015-08-04 15:37:16 +0200504 bool restart = false;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700505
506 spin_lock_bh(&man->lock);
507 list_for_each_entry_safe(entry, next, &man->error, list) {
508 restart = true;
509 DRM_ERROR("Command buffer error.\n");
510
511 list_del(&entry->list);
512 __vmw_cmdbuf_header_free(entry);
513 wake_up_all(&man->idle_queue);
514 }
515 spin_unlock_bh(&man->lock);
516
517 if (restart && vmw_cmdbuf_startstop(man, true))
518 DRM_ERROR("Failed restarting command buffer context 0.\n");
519
Thomas Hellstrom09dc1382015-10-21 21:31:49 +0200520 /* Send a new fence in case one was removed */
521 vmw_fifo_send_fence(man->dev_priv, &dummy);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700522}
523
524/**
525 * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
526 *
527 * @man: The command buffer manager.
528 * @check_preempted: Check also the preempted queue for pending command buffers.
529 *
530 */
531static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
532 bool check_preempted)
533{
534 struct vmw_cmdbuf_context *ctx;
535 bool idle = false;
536 int i;
537
538 spin_lock_bh(&man->lock);
539 vmw_cmdbuf_man_process(man);
540 for_each_cmdbuf_ctx(man, i, ctx) {
541 if (!list_empty(&ctx->submitted) ||
542 !list_empty(&ctx->hw_submitted) ||
543 (check_preempted && !list_empty(&ctx->preempted)))
544 goto out_unlock;
545 }
546
547 idle = list_empty(&man->error);
548
549out_unlock:
550 spin_unlock_bh(&man->lock);
551
552 return idle;
553}
554
555/**
556 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
557 * command submissions
558 *
559 * @man: The command buffer manager.
560 *
561 * Flushes the current command buffer without allocating a new one. A new one
562 * is automatically allocated when needed. Call with @man->cur_mutex held.
563 */
564static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
565{
566 struct vmw_cmdbuf_header *cur = man->cur;
567
568 WARN_ON(!mutex_is_locked(&man->cur_mutex));
569
570 if (!cur)
571 return;
572
573 spin_lock_bh(&man->lock);
574 if (man->cur_pos == 0) {
575 __vmw_cmdbuf_header_free(cur);
576 goto out_unlock;
577 }
578
579 man->cur->cb_header->length = man->cur_pos;
580 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
581out_unlock:
582 spin_unlock_bh(&man->lock);
583 man->cur = NULL;
584 man->cur_pos = 0;
585}
586
587/**
588 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
589 * command submissions
590 *
591 * @man: The command buffer manager.
592 * @interruptible: Whether to sleep interruptible when sleeping.
593 *
594 * Flushes the current command buffer without allocating a new one. A new one
595 * is automatically allocated when needed.
596 */
597int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
598 bool interruptible)
599{
600 int ret = vmw_cmdbuf_cur_lock(man, interruptible);
601
602 if (ret)
603 return ret;
604
605 __vmw_cmdbuf_cur_flush(man);
606 vmw_cmdbuf_cur_unlock(man);
607
608 return 0;
609}
610
611/**
612 * vmw_cmdbuf_idle - Wait for command buffer manager idle.
613 *
614 * @man: The command buffer manager.
615 * @interruptible: Sleep interruptible while waiting.
616 * @timeout: Time out after this many ticks.
617 *
618 * Wait until the command buffer manager has processed all command buffers,
619 * or until a timeout occurs. If a timeout occurs, the function will return
620 * -EBUSY.
621 */
622int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
623 unsigned long timeout)
624{
625 int ret;
626
627 ret = vmw_cmdbuf_cur_flush(man, interruptible);
628 vmw_generic_waiter_add(man->dev_priv,
629 SVGA_IRQFLAG_COMMAND_BUFFER,
630 &man->dev_priv->cmdbuf_waiters);
631
632 if (interruptible) {
633 ret = wait_event_interruptible_timeout
634 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
635 timeout);
636 } else {
637 ret = wait_event_timeout
638 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
639 timeout);
640 }
641 vmw_generic_waiter_remove(man->dev_priv,
642 SVGA_IRQFLAG_COMMAND_BUFFER,
643 &man->dev_priv->cmdbuf_waiters);
644 if (ret == 0) {
645 if (!vmw_cmdbuf_man_idle(man, true))
646 ret = -EBUSY;
647 else
648 ret = 0;
649 }
650 if (ret > 0)
651 ret = 0;
652
653 return ret;
654}
655
656/**
657 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
658 *
659 * @man: The command buffer manager.
660 * @info: Allocation info. Will hold the size on entry and allocated mm node
661 * on successful return.
662 *
663 * Try to allocate buffer space from the main pool. Returns true if succeeded.
664 * If a fatal error was hit, the error code is returned in @info->ret.
665 */
666static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
667 struct vmw_cmdbuf_alloc_info *info)
668{
669 int ret;
670
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700671 if (info->done)
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700672 return true;
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700673
674 memset(info->node, 0, sizeof(*info->node));
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700675 spin_lock_bh(&man->lock);
Chris Wilson4e64e552017-02-02 21:04:38 +0000676 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
Thomas Hellstrom575f9c82015-09-29 07:49:56 -0700677 if (ret) {
Thomas Hellstrom09dc1382015-10-21 21:31:49 +0200678 vmw_cmdbuf_man_process(man);
Chris Wilson4e64e552017-02-02 21:04:38 +0000679 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
Thomas Hellstrom575f9c82015-09-29 07:49:56 -0700680 }
681
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700682 spin_unlock_bh(&man->lock);
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700683 info->done = !ret;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700684
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700685 return info->done;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700686}
687
688/**
689 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
690 *
691 * @man: The command buffer manager.
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700692 * @node: Pointer to pre-allocated range-manager node.
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700693 * @size: The size of the allocation.
694 * @interruptible: Whether to sleep interruptible while waiting for space.
695 *
696 * This function allocates buffer space from the main pool, and if there is
697 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
698 * become available.
699 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700700static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
701 struct drm_mm_node *node,
702 size_t size,
703 bool interruptible)
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700704{
705 struct vmw_cmdbuf_alloc_info info;
706
707 info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700708 info.node = node;
709 info.done = false;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700710
711 /*
712 * To prevent starvation of large requests, only one allocating call
713 * at a time waiting for space.
714 */
715 if (interruptible) {
716 if (mutex_lock_interruptible(&man->space_mutex))
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700717 return -ERESTARTSYS;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700718 } else {
719 mutex_lock(&man->space_mutex);
720 }
721
722 /* Try to allocate space without waiting. */
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700723 if (vmw_cmdbuf_try_alloc(man, &info))
724 goto out_unlock;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700725
726 vmw_generic_waiter_add(man->dev_priv,
727 SVGA_IRQFLAG_COMMAND_BUFFER,
728 &man->dev_priv->cmdbuf_waiters);
729
730 if (interruptible) {
731 int ret;
732
733 ret = wait_event_interruptible
734 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
735 if (ret) {
736 vmw_generic_waiter_remove
737 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
738 &man->dev_priv->cmdbuf_waiters);
739 mutex_unlock(&man->space_mutex);
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700740 return ret;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700741 }
742 } else {
743 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
744 }
745 vmw_generic_waiter_remove(man->dev_priv,
746 SVGA_IRQFLAG_COMMAND_BUFFER,
747 &man->dev_priv->cmdbuf_waiters);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700748
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700749out_unlock:
750 mutex_unlock(&man->space_mutex);
751
752 return 0;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700753}
754
755/**
756 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
757 * space from the main pool.
758 *
759 * @man: The command buffer manager.
760 * @header: Pointer to the header to set up.
761 * @size: The requested size of the buffer space.
762 * @interruptible: Whether to sleep interruptible while waiting for space.
763 */
764static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
765 struct vmw_cmdbuf_header *header,
766 size_t size,
767 bool interruptible)
768{
769 SVGACBHeader *cb_hdr;
770 size_t offset;
771 int ret;
772
773 if (!man->has_pool)
774 return -ENOMEM;
775
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700776 ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700777
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700778 if (ret)
779 return ret;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700780
781 header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
782 &header->handle);
783 if (!header->cb_header) {
784 ret = -ENOMEM;
785 goto out_no_cb_header;
786 }
787
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700788 header->size = header->node.size << PAGE_SHIFT;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700789 cb_hdr = header->cb_header;
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700790 offset = header->node.start << PAGE_SHIFT;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700791 header->cmd = man->map + offset;
792 memset(cb_hdr, 0, sizeof(*cb_hdr));
793 if (man->using_mob) {
794 cb_hdr->flags = SVGA_CB_FLAG_MOB;
795 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
796 cb_hdr->ptr.mob.mobOffset = offset;
797 } else {
798 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
799 }
800
801 return 0;
802
803out_no_cb_header:
804 spin_lock_bh(&man->lock);
Thomas Hellstrom9b590782015-06-26 04:46:52 -0700805 drm_mm_remove_node(&header->node);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700806 spin_unlock_bh(&man->lock);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700807
808 return ret;
809}
810
811/**
812 * vmw_cmdbuf_space_inline - Set up a command buffer header with
813 * inline command buffer space.
814 *
815 * @man: The command buffer manager.
816 * @header: Pointer to the header to set up.
817 * @size: The requested size of the buffer space.
818 */
819static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
820 struct vmw_cmdbuf_header *header,
821 int size)
822{
823 struct vmw_cmdbuf_dheader *dheader;
824 SVGACBHeader *cb_hdr;
825
826 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
827 return -ENOMEM;
828
829 dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
830 &header->handle);
831 if (!dheader)
832 return -ENOMEM;
833
834 header->inline_space = true;
835 header->size = VMW_CMDBUF_INLINE_SIZE;
836 cb_hdr = &dheader->cb_header;
837 header->cb_header = cb_hdr;
838 header->cmd = dheader->cmd;
839 memset(dheader, 0, sizeof(*dheader));
840 cb_hdr->status = SVGA_CB_STATUS_NONE;
841 cb_hdr->flags = SVGA_CB_FLAG_NONE;
842 cb_hdr->ptr.pa = (u64)header->handle +
843 (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
844
845 return 0;
846}
847
848/**
849 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
850 * command buffer space.
851 *
852 * @man: The command buffer manager.
853 * @size: The requested size of the buffer space.
854 * @interruptible: Whether to sleep interruptible while waiting for space.
855 * @p_header: points to a header pointer to populate on successful return.
856 *
857 * Returns a pointer to command buffer space if successful. Otherwise
858 * returns an error pointer. The header pointer returned in @p_header should
859 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
860 */
861void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
862 size_t size, bool interruptible,
863 struct vmw_cmdbuf_header **p_header)
864{
865 struct vmw_cmdbuf_header *header;
866 int ret = 0;
867
868 *p_header = NULL;
869
870 header = kzalloc(sizeof(*header), GFP_KERNEL);
871 if (!header)
872 return ERR_PTR(-ENOMEM);
873
874 if (size <= VMW_CMDBUF_INLINE_SIZE)
875 ret = vmw_cmdbuf_space_inline(man, header, size);
876 else
877 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
878
879 if (ret) {
880 kfree(header);
881 return ERR_PTR(ret);
882 }
883
884 header->man = man;
885 INIT_LIST_HEAD(&header->list);
886 header->cb_header->status = SVGA_CB_STATUS_NONE;
887 *p_header = header;
888
889 return header->cmd;
890}
891
892/**
893 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
894 * command buffer.
895 *
896 * @man: The command buffer manager.
897 * @size: The requested size of the commands.
898 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
899 * @interruptible: Whether to sleep interruptible while waiting for space.
900 *
901 * Returns a pointer to command buffer space if successful. Otherwise
902 * returns an error pointer.
903 */
904static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
905 size_t size,
906 int ctx_id,
907 bool interruptible)
908{
909 struct vmw_cmdbuf_header *cur;
910 void *ret;
911
912 if (vmw_cmdbuf_cur_lock(man, interruptible))
913 return ERR_PTR(-ERESTARTSYS);
914
915 cur = man->cur;
916 if (cur && (size + man->cur_pos > cur->size ||
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700917 ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
918 ctx_id != cur->cb_header->dxContext)))
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700919 __vmw_cmdbuf_cur_flush(man);
920
921 if (!man->cur) {
922 ret = vmw_cmdbuf_alloc(man,
923 max_t(size_t, size, man->default_size),
924 interruptible, &man->cur);
925 if (IS_ERR(ret)) {
926 vmw_cmdbuf_cur_unlock(man);
927 return ret;
928 }
929
930 cur = man->cur;
931 }
932
933 if (ctx_id != SVGA3D_INVALID_ID) {
934 cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
935 cur->cb_header->dxContext = ctx_id;
936 }
937
938 cur->reserved = size;
939
940 return (void *) (man->cur->cmd + man->cur_pos);
941}
942
943/**
944 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
945 *
946 * @man: The command buffer manager.
947 * @size: The size of the commands actually written.
948 * @flush: Whether to flush the command buffer immediately.
949 */
950static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
951 size_t size, bool flush)
952{
953 struct vmw_cmdbuf_header *cur = man->cur;
954
955 WARN_ON(!mutex_is_locked(&man->cur_mutex));
956
957 WARN_ON(size > cur->reserved);
958 man->cur_pos += size;
959 if (!size)
960 cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
961 if (flush)
962 __vmw_cmdbuf_cur_flush(man);
963 vmw_cmdbuf_cur_unlock(man);
964}
965
966/**
967 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
968 *
969 * @man: The command buffer manager.
970 * @size: The requested size of the commands.
971 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
972 * @interruptible: Whether to sleep interruptible while waiting for space.
973 * @header: Header of the command buffer. NULL if the current command buffer
974 * should be used.
975 *
976 * Returns a pointer to command buffer space if successful. Otherwise
977 * returns an error pointer.
978 */
979void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
980 int ctx_id, bool interruptible,
981 struct vmw_cmdbuf_header *header)
982{
983 if (!header)
984 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
985
986 if (size > header->size)
987 return ERR_PTR(-EINVAL);
988
989 if (ctx_id != SVGA3D_INVALID_ID) {
990 header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
991 header->cb_header->dxContext = ctx_id;
992 }
993
994 header->reserved = size;
995 return header->cmd;
996}
997
998/**
999 * vmw_cmdbuf_commit - Commit commands in a command buffer.
1000 *
1001 * @man: The command buffer manager.
1002 * @size: The size of the commands actually written.
1003 * @header: Header of the command buffer. NULL if the current command buffer
1004 * should be used.
1005 * @flush: Whether to flush the command buffer immediately.
1006 */
1007void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1008 struct vmw_cmdbuf_header *header, bool flush)
1009{
1010 if (!header) {
1011 vmw_cmdbuf_commit_cur(man, size, flush);
1012 return;
1013 }
1014
1015 (void) vmw_cmdbuf_cur_lock(man, false);
1016 __vmw_cmdbuf_cur_flush(man);
1017 WARN_ON(size > header->reserved);
1018 man->cur = header;
1019 man->cur_pos = size;
1020 if (!size)
1021 header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1022 if (flush)
1023 __vmw_cmdbuf_cur_flush(man);
1024 vmw_cmdbuf_cur_unlock(man);
1025}
1026
1027/**
1028 * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
1029 *
1030 * @man: The command buffer manager.
1031 */
1032void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
1033{
1034 if (!man)
1035 return;
1036
1037 tasklet_schedule(&man->tasklet);
1038}
1039
1040/**
1041 * vmw_cmdbuf_send_device_command - Send a command through the device context.
1042 *
1043 * @man: The command buffer manager.
1044 * @command: Pointer to the command to send.
1045 * @size: Size of the command.
1046 *
1047 * Synchronously sends a device context command.
1048 */
1049static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1050 const void *command,
1051 size_t size)
1052{
1053 struct vmw_cmdbuf_header *header;
1054 int status;
1055 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1056
1057 if (IS_ERR(cmd))
1058 return PTR_ERR(cmd);
1059
1060 memcpy(cmd, command, size);
1061 header->cb_header->length = size;
1062 header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1063 spin_lock_bh(&man->lock);
1064 status = vmw_cmdbuf_header_submit(header);
1065 spin_unlock_bh(&man->lock);
1066 vmw_cmdbuf_header_free(header);
1067
1068 if (status != SVGA_CB_STATUS_COMPLETED) {
1069 DRM_ERROR("Device context command failed with status %d\n",
1070 status);
1071 return -EINVAL;
1072 }
1073
1074 return 0;
1075}
1076
1077/**
1078 * vmw_cmdbuf_startstop - Send a start / stop command through the device
1079 * context.
1080 *
1081 * @man: The command buffer manager.
1082 * @enable: Whether to enable or disable the context.
1083 *
1084 * Synchronously sends a device start / stop context command.
1085 */
1086static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
1087 bool enable)
1088{
1089 struct {
1090 uint32 id;
1091 SVGADCCmdStartStop body;
1092 } __packed cmd;
1093
1094 cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1095 cmd.body.enable = (enable) ? 1 : 0;
1096 cmd.body.context = SVGA_CB_CONTEXT_0;
1097
1098 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1099}
1100
1101/**
1102 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1103 *
1104 * @man: The command buffer manager.
1105 * @size: The size of the main space pool.
1106 * @default_size: The default size of the command buffer for small kernel
1107 * submissions.
1108 *
1109 * Set the size and allocate the main command buffer space pool,
1110 * as well as the default size of the command buffer for
1111 * small kernel submissions. If successful, this enables large command
1112 * submissions. Note that this function requires that rudimentary command
1113 * submission is already available and that the MOB memory manager is alive.
1114 * Returns 0 on success. Negative error code on failure.
1115 */
1116int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1117 size_t size, size_t default_size)
1118{
1119 struct vmw_private *dev_priv = man->dev_priv;
1120 bool dummy;
1121 int ret;
1122
1123 if (man->has_pool)
1124 return -EINVAL;
1125
1126 /* First, try to allocate a huge chunk of DMA memory */
1127 size = PAGE_ALIGN(size);
1128 man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1129 &man->handle, GFP_KERNEL);
1130 if (man->map) {
1131 man->using_mob = false;
1132 } else {
1133 /*
1134 * DMA memory failed. If we can have command buffers in a
1135 * MOB, try to use that instead. Note that this will
1136 * actually call into the already enabled manager, when
1137 * binding the MOB.
1138 */
Sinclair Yeh8ce75f82015-07-08 21:20:39 -07001139 if (!(dev_priv->capabilities & SVGA_CAP_DX))
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07001140 return -ENOMEM;
1141
1142 ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1143 &vmw_mob_ne_placement, 0, false, NULL,
1144 &man->cmd_space);
1145 if (ret)
1146 return ret;
1147
1148 man->using_mob = true;
1149 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1150 &man->map_obj);
1151 if (ret)
1152 goto out_no_map;
1153
1154 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1155 }
1156
1157 man->size = size;
1158 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1159
1160 man->has_pool = true;
Thomas Hellstrom09dc1382015-10-21 21:31:49 +02001161
1162 /*
1163 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1164 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1165 * needs to wait for space and we block on further command
1166 * submissions to be able to free up space.
1167 */
1168 man->default_size = VMW_CMDBUF_INLINE_SIZE;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07001169 DRM_INFO("Using command buffers with %s pool.\n",
1170 (man->using_mob) ? "MOB" : "DMA");
1171
1172 return 0;
1173
1174out_no_map:
1175 if (man->using_mob)
1176 ttm_bo_unref(&man->cmd_space);
1177
1178 return ret;
1179}
1180
1181/**
1182 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1183 * inline command buffer submissions only.
1184 *
1185 * @dev_priv: Pointer to device private structure.
1186 *
1187 * Returns a pointer to a cummand buffer manager to success or error pointer
1188 * on failure. The command buffer manager will be enabled for submissions of
1189 * size VMW_CMDBUF_INLINE_SIZE only.
1190 */
1191struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1192{
1193 struct vmw_cmdbuf_man *man;
1194 struct vmw_cmdbuf_context *ctx;
1195 int i;
1196 int ret;
1197
1198 if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1199 return ERR_PTR(-ENOSYS);
1200
1201 man = kzalloc(sizeof(*man), GFP_KERNEL);
1202 if (!man)
1203 return ERR_PTR(-ENOMEM);
1204
1205 man->headers = dma_pool_create("vmwgfx cmdbuf",
1206 &dev_priv->dev->pdev->dev,
1207 sizeof(SVGACBHeader),
1208 64, PAGE_SIZE);
1209 if (!man->headers) {
1210 ret = -ENOMEM;
1211 goto out_no_pool;
1212 }
1213
1214 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1215 &dev_priv->dev->pdev->dev,
1216 sizeof(struct vmw_cmdbuf_dheader),
1217 64, PAGE_SIZE);
1218 if (!man->dheaders) {
1219 ret = -ENOMEM;
1220 goto out_no_dpool;
1221 }
1222
1223 for_each_cmdbuf_ctx(man, i, ctx)
1224 vmw_cmdbuf_ctx_init(ctx);
1225
1226 INIT_LIST_HEAD(&man->error);
1227 spin_lock_init(&man->lock);
1228 mutex_init(&man->cur_mutex);
1229 mutex_init(&man->space_mutex);
1230 tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
1231 (unsigned long) man);
1232 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1233 init_waitqueue_head(&man->alloc_queue);
1234 init_waitqueue_head(&man->idle_queue);
1235 man->dev_priv = dev_priv;
1236 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1237 INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1238 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1239 &dev_priv->error_waiters);
1240 ret = vmw_cmdbuf_startstop(man, true);
1241 if (ret) {
1242 DRM_ERROR("Failed starting command buffer context 0.\n");
1243 vmw_cmdbuf_man_destroy(man);
1244 return ERR_PTR(ret);
1245 }
1246
1247 return man;
1248
1249out_no_dpool:
1250 dma_pool_destroy(man->headers);
1251out_no_pool:
1252 kfree(man);
1253
1254 return ERR_PTR(ret);
1255}
1256
1257/**
1258 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1259 *
1260 * @man: Pointer to a command buffer manager.
1261 *
1262 * This function removes the main buffer space pool, and should be called
1263 * before MOB memory management is removed. When this function has been called,
1264 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1265 * less are allowed, and the default size of the command buffer for small kernel
1266 * submissions is also set to this size.
1267 */
1268void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1269{
1270 if (!man->has_pool)
1271 return;
1272
1273 man->has_pool = false;
1274 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1275 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1276 if (man->using_mob) {
1277 (void) ttm_bo_kunmap(&man->map_obj);
1278 ttm_bo_unref(&man->cmd_space);
1279 } else {
1280 dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1281 man->size, man->map, man->handle);
1282 }
1283}
1284
1285/**
1286 * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1287 *
1288 * @man: Pointer to a command buffer manager.
1289 *
1290 * This function idles and then destroys a command buffer manager.
1291 */
1292void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1293{
1294 WARN_ON_ONCE(man->has_pool);
1295 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1296 if (vmw_cmdbuf_startstop(man, false))
1297 DRM_ERROR("Failed stopping command buffer context 0.\n");
1298
1299 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1300 &man->dev_priv->error_waiters);
1301 tasklet_kill(&man->tasklet);
1302 (void) cancel_work_sync(&man->work);
1303 dma_pool_destroy(man->dheaders);
1304 dma_pool_destroy(man->headers);
1305 mutex_destroy(&man->cur_mutex);
1306 mutex_destroy(&man->space_mutex);
1307 kfree(man);
1308}