blob: 8ba3bad06909f557f42c449a86429584cea83f43 [file] [log] [blame]
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +02001/**************************************************************************
2 *
Sinclair Yeh54fbde82015-07-29 12:38:02 -07003 * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +02004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_kms.h"
Daniel Vetter3cb9ae42014-10-29 10:03:57 +010029#include <drm/drm_plane_helper.h>
Sinclair Yehd7721ca2017-03-23 11:48:44 -070030#include <drm/drm_atomic.h>
31#include <drm/drm_atomic_helper.h>
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +020032
33
34#define vmw_crtc_to_sou(x) \
35 container_of(x, struct vmw_screen_object_unit, base.crtc)
36#define vmw_encoder_to_sou(x) \
37 container_of(x, struct vmw_screen_object_unit, base.encoder)
38#define vmw_connector_to_sou(x) \
39 container_of(x, struct vmw_screen_object_unit, base.connector)
40
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -070041/**
42 * struct vmw_kms_sou_surface_dirty - Closure structure for
43 * blit surface to screen command.
44 * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
45 * @left: Left side of bounding box.
46 * @right: Right side of bounding box.
47 * @top: Top side of bounding box.
48 * @bottom: Bottom side of bounding box.
49 * @dst_x: Difference between source clip rects and framebuffer coordinates.
50 * @dst_y: Difference between source clip rects and framebuffer coordinates.
51 * @sid: Surface id of surface to copy from.
52 */
53struct vmw_kms_sou_surface_dirty {
54 struct vmw_kms_dirty base;
55 s32 left, right, top, bottom;
56 s32 dst_x, dst_y;
57 u32 sid;
58};
59
60/*
61 * SVGA commands that are used by this code. Please see the device headers
62 * for explanation.
63 */
64struct vmw_kms_sou_readback_blit {
65 uint32 header;
66 SVGAFifoCmdBlitScreenToGMRFB body;
67};
68
69struct vmw_kms_sou_dmabuf_blit {
70 uint32 header;
71 SVGAFifoCmdBlitGMRFBToScreen body;
72};
73
74struct vmw_kms_sou_dirty_cmd {
75 SVGA3dCmdHeader header;
76 SVGA3dCmdBlitSurfaceToScreen body;
77};
78
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +020079/**
80 * Display unit using screen objects.
81 */
82struct vmw_screen_object_unit {
83 struct vmw_display_unit base;
84
85 unsigned long buffer_size; /**< Size of allocated buffer */
86 struct vmw_dma_buffer *buffer; /**< Backing store buffer */
87
88 bool defined;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +020089};
90
91static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
92{
Sinclair Yehc8261a92015-06-26 01:23:42 -070093 vmw_du_cleanup(&sou->base);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +020094 kfree(sou);
95}
96
97
98/*
99 * Screen Object Display Unit CRTC functions
100 */
101
102static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
103{
104 vmw_sou_destroy(vmw_crtc_to_sou(crtc));
105}
106
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200107/**
108 * Send the fifo command to create a screen.
109 */
110static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
111 struct vmw_screen_object_unit *sou,
112 uint32_t x, uint32_t y,
113 struct drm_display_mode *mode)
114{
115 size_t fifo_size;
116
117 struct {
118 struct {
119 uint32_t cmdType;
120 } header;
121 SVGAScreenObject obj;
122 } *cmd;
123
124 BUG_ON(!sou->buffer);
125
126 fifo_size = sizeof(*cmd);
127 cmd = vmw_fifo_reserve(dev_priv, fifo_size);
128 /* The hardware has hung, nothing we can do about it here. */
129 if (unlikely(cmd == NULL)) {
130 DRM_ERROR("Fifo reserve failed.\n");
131 return -ENOMEM;
132 }
133
134 memset(cmd, 0, fifo_size);
135 cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
136 cmd->obj.structSize = sizeof(SVGAScreenObject);
137 cmd->obj.id = sou->base.unit;
138 cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
139 (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
140 cmd->obj.size.width = mode->hdisplay;
141 cmd->obj.size.height = mode->vdisplay;
Thomas Hellstrom69874272011-11-02 09:43:11 +0100142 if (sou->base.is_implicit) {
143 cmd->obj.root.x = x;
144 cmd->obj.root.y = y;
145 } else {
146 cmd->obj.root.x = sou->base.gui_x;
147 cmd->obj.root.y = sou->base.gui_y;
148 }
Thomas Hellstrom6dd687b2016-02-12 09:57:15 +0100149 sou->base.set_gui_x = cmd->obj.root.x;
150 sou->base.set_gui_y = cmd->obj.root.y;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200151
152 /* Ok to assume that buffer is pinned in vram */
Thomas Hellstromb37a6b92011-10-04 20:13:28 +0200153 vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200154 cmd->obj.backingStore.pitch = mode->hdisplay * 4;
155
156 vmw_fifo_commit(dev_priv, fifo_size);
157
158 sou->defined = true;
159
160 return 0;
161}
162
163/**
164 * Send the fifo command to destroy a screen.
165 */
166static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
167 struct vmw_screen_object_unit *sou)
168{
169 size_t fifo_size;
170 int ret;
171
172 struct {
173 struct {
174 uint32_t cmdType;
175 } header;
176 SVGAFifoCmdDestroyScreen body;
177 } *cmd;
178
179 /* no need to do anything */
180 if (unlikely(!sou->defined))
181 return 0;
182
183 fifo_size = sizeof(*cmd);
184 cmd = vmw_fifo_reserve(dev_priv, fifo_size);
185 /* the hardware has hung, nothing we can do about it here */
186 if (unlikely(cmd == NULL)) {
187 DRM_ERROR("Fifo reserve failed.\n");
188 return -ENOMEM;
189 }
190
191 memset(cmd, 0, fifo_size);
192 cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
193 cmd->body.screenId = sou->base.unit;
194
195 vmw_fifo_commit(dev_priv, fifo_size);
196
197 /* Force sync */
198 ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
199 if (unlikely(ret != 0))
200 DRM_ERROR("Failed to sync with HW");
201 else
202 sou->defined = false;
203
204 return ret;
205}
206
207/**
Sinclair Yeh06ec4192017-03-23 13:14:54 -0700208 * vmw_sou_crtc_mode_set_nofb - Create new screen
209 *
210 * @crtc: CRTC associated with the new screen
211 *
212 * This function creates/destroys a screen. This function cannot fail, so if
213 * somehow we run into a failure, just do the best we can to get out.
214 */
215static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
216{
217 struct vmw_private *dev_priv;
218 struct vmw_screen_object_unit *sou;
219 struct vmw_framebuffer *vfb;
220 struct drm_framebuffer *fb;
221 struct drm_plane_state *ps;
222 struct vmw_plane_state *vps;
223 int ret;
224
225
226 sou = vmw_crtc_to_sou(crtc);
227 dev_priv = vmw_priv(crtc->dev);
228 ps = crtc->primary->state;
229 fb = ps->fb;
230 vps = vmw_plane_state_to_vps(ps);
231
232 vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
233
234 if (sou->defined) {
235 ret = vmw_sou_fifo_destroy(dev_priv, sou);
236 if (ret) {
237 DRM_ERROR("Failed to destroy Screen Object\n");
238 return;
239 }
240 }
241
242 if (vfb) {
243 sou->buffer = vps->dmabuf;
244 sou->buffer_size = vps->dmabuf_size;
245
246 ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y,
247 &crtc->mode);
248 if (ret)
249 DRM_ERROR("Failed to define Screen Object %dx%d\n",
250 crtc->x, crtc->y);
251
252 vmw_kms_add_active(dev_priv, &sou->base, vfb);
253 } else {
254 sou->buffer = NULL;
255 sou->buffer_size = 0;
256
257 vmw_kms_del_active(dev_priv, &sou->base);
258 }
259}
260
261/**
262 * vmw_sou_crtc_helper_prepare - Noop
263 *
264 * @crtc: CRTC associated with the new screen
265 *
266 * Prepares the CRTC for a mode set, but we don't need to do anything here.
267 */
268static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc)
269{
270}
271
272/**
Laurent Pinchart0b20a0f2017-06-30 12:36:44 +0300273 * vmw_sou_crtc_atomic_enable - Noop
Sinclair Yeh06ec4192017-03-23 13:14:54 -0700274 *
275 * @crtc: CRTC associated with the new screen
276 *
277 * This is called after a mode set has been completed.
278 */
Laurent Pinchart0b20a0f2017-06-30 12:36:44 +0300279static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc,
280 struct drm_crtc_state *old_state)
Sinclair Yeh06ec4192017-03-23 13:14:54 -0700281{
282}
283
284/**
285 * vmw_sou_crtc_helper_disable - Turns off CRTC
286 *
287 * @crtc: CRTC to be turned off
288 */
289static void vmw_sou_crtc_helper_disable(struct drm_crtc *crtc)
290{
291 struct vmw_private *dev_priv;
292 struct vmw_screen_object_unit *sou;
293 int ret;
294
295
296 if (!crtc) {
297 DRM_ERROR("CRTC is NULL\n");
298 return;
299 }
300
301 sou = vmw_crtc_to_sou(crtc);
302 dev_priv = vmw_priv(crtc->dev);
303
304 if (sou->defined) {
305 ret = vmw_sou_fifo_destroy(dev_priv, sou);
306 if (ret)
307 DRM_ERROR("Failed to destroy Screen Object\n");
308 }
309}
310
Sinclair Yehc8261a92015-06-26 01:23:42 -0700311static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
Sinclair Yehb0119cb2017-03-23 14:38:18 -0700312 struct drm_framebuffer *new_fb,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700313 struct drm_pending_vblank_event *event,
Daniel Vetter41292b1f2017-03-22 22:50:50 +0100314 uint32_t flags,
315 struct drm_modeset_acquire_ctx *ctx)
Sinclair Yehc8261a92015-06-26 01:23:42 -0700316{
317 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
318 struct drm_framebuffer *old_fb = crtc->primary->fb;
Sinclair Yehb0119cb2017-03-23 14:38:18 -0700319 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
Sinclair Yehc8261a92015-06-26 01:23:42 -0700320 struct vmw_fence_obj *fence = NULL;
Thomas Hellstrom897b8182016-02-12 08:32:08 +0100321 struct drm_vmw_rect vclips;
Sinclair Yehc8261a92015-06-26 01:23:42 -0700322 int ret;
323
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100324 if (!vmw_kms_crtc_flippable(dev_priv, crtc))
Sinclair Yehc8261a92015-06-26 01:23:42 -0700325 return -EINVAL;
326
Sinclair Yehb0119cb2017-03-23 14:38:18 -0700327 flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
Dave Airlie320d8c32017-04-03 16:30:24 +1000328 ret = drm_atomic_helper_page_flip(crtc, new_fb, NULL, flags, ctx);
Sinclair Yehb0119cb2017-03-23 14:38:18 -0700329 if (ret) {
330 DRM_ERROR("Page flip error %d.\n", ret);
331 return ret;
332 }
Sinclair Yehc8261a92015-06-26 01:23:42 -0700333
334 /* do a full screen dirty update */
Thomas Hellstrom897b8182016-02-12 08:32:08 +0100335 vclips.x = crtc->x;
336 vclips.y = crtc->y;
337 vclips.w = crtc->mode.hdisplay;
338 vclips.h = crtc->mode.vdisplay;
Sinclair Yehc8261a92015-06-26 01:23:42 -0700339
340 if (vfb->dmabuf)
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700341 ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
Thomas Hellstrom897b8182016-02-12 08:32:08 +0100342 NULL, &vclips, 1, 1,
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700343 true, &fence);
Sinclair Yehc8261a92015-06-26 01:23:42 -0700344 else
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700345 ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
Thomas Hellstrom897b8182016-02-12 08:32:08 +0100346 NULL, &vclips, NULL,
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700347 0, 0, 1, 1, &fence);
Sinclair Yehc8261a92015-06-26 01:23:42 -0700348
349
350 if (ret != 0)
351 goto out_no_fence;
352 if (!fence) {
353 ret = -EINVAL;
354 goto out_no_fence;
355 }
356
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700357 if (event) {
358 struct drm_file *file_priv = event->base.file_priv;
359
360 ret = vmw_event_fence_action_queue(file_priv, fence,
361 &event->base,
362 &event->event.tv_sec,
363 &event->event.tv_usec,
364 true);
365 }
Sinclair Yehc8261a92015-06-26 01:23:42 -0700366
367 /*
368 * No need to hold on to this now. The only cleanup
369 * we need to do if we fail is unref the fence.
370 */
371 vmw_fence_obj_unreference(&fence);
372
373 if (vmw_crtc_to_du(crtc)->is_implicit)
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100374 vmw_kms_update_implicit_fb(dev_priv, crtc);
Sinclair Yehc8261a92015-06-26 01:23:42 -0700375
376 return ret;
377
378out_no_fence:
Sinclair Yehb0119cb2017-03-23 14:38:18 -0700379 drm_atomic_set_fb_for_plane(crtc->primary->state, old_fb);
Sinclair Yehc8261a92015-06-26 01:23:42 -0700380 return ret;
381}
382
Ville Syrjäläd7955fc2015-12-15 12:21:15 +0100383static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200384 .gamma_set = vmw_du_crtc_gamma_set,
385 .destroy = vmw_sou_crtc_destroy,
Sinclair Yeh9c2542a2017-03-23 11:33:39 -0700386 .reset = vmw_du_crtc_reset,
387 .atomic_duplicate_state = vmw_du_crtc_duplicate_state,
388 .atomic_destroy_state = vmw_du_crtc_destroy_state,
Sinclair Yehb0119cb2017-03-23 14:38:18 -0700389 .set_config = vmw_kms_set_config,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700390 .page_flip = vmw_sou_crtc_page_flip,
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200391};
392
393/*
394 * Screen Object Display Unit encoder functions
395 */
396
397static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
398{
399 vmw_sou_destroy(vmw_encoder_to_sou(encoder));
400}
401
Ville Syrjäläd7955fc2015-12-15 12:21:15 +0100402static const struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200403 .destroy = vmw_sou_encoder_destroy,
404};
405
406/*
407 * Screen Object Display Unit connector functions
408 */
409
410static void vmw_sou_connector_destroy(struct drm_connector *connector)
411{
412 vmw_sou_destroy(vmw_connector_to_sou(connector));
413}
414
Ville Syrjäläd7955fc2015-12-15 12:21:15 +0100415static const struct drm_connector_funcs vmw_sou_connector_funcs = {
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200416 .dpms = vmw_du_connector_dpms,
Thierry Redingd17e67d2016-03-07 18:06:01 +0100417 .detect = vmw_du_connector_detect,
418 .fill_modes = vmw_du_connector_fill_modes,
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200419 .set_property = vmw_du_connector_set_property,
420 .destroy = vmw_sou_connector_destroy,
Sinclair Yehd7721ca2017-03-23 11:48:44 -0700421 .reset = vmw_du_connector_reset,
422 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
423 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
424 .atomic_set_property = vmw_du_connector_atomic_set_property,
425 .atomic_get_property = vmw_du_connector_atomic_get_property,
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200426};
427
Sinclair Yehd947d1b2017-03-23 14:23:20 -0700428
429static const struct
430drm_connector_helper_funcs vmw_sou_connector_helper_funcs = {
431 .best_encoder = drm_atomic_helper_best_encoder,
432};
433
434
435
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700436/*
437 * Screen Object Display Plane Functions
438 */
439
Sinclair Yeh060e2ad2017-03-23 14:18:32 -0700440/**
441 * vmw_sou_primary_plane_cleanup_fb - Frees sou backing buffer
442 *
443 * @plane: display plane
444 * @old_state: Contains the FB to clean up
445 *
446 * Unpins the display surface
447 *
448 * Returns 0 on success
449 */
450static void
451vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
452 struct drm_plane_state *old_state)
453{
454 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
455
456 vmw_dmabuf_unreference(&vps->dmabuf);
457 vps->dmabuf_size = 0;
458
459 vmw_du_plane_cleanup_fb(plane, old_state);
460}
461
462
463/**
464 * vmw_sou_primary_plane_prepare_fb - allocate backing buffer
465 *
466 * @plane: display plane
467 * @new_state: info on the new plane state, including the FB
468 *
469 * The SOU backing buffer is our equivalent of the display plane.
470 *
471 * Returns 0 on success
472 */
473static int
474vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
475 struct drm_plane_state *new_state)
476{
477 struct drm_framebuffer *new_fb = new_state->fb;
478 struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc;
479 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
480 struct vmw_private *dev_priv;
481 size_t size;
482 int ret;
483
484
485 if (!new_fb) {
486 vmw_dmabuf_unreference(&vps->dmabuf);
487 vps->dmabuf_size = 0;
488
489 return 0;
490 }
491
492 size = new_state->crtc_w * new_state->crtc_h * 4;
493
494 if (vps->dmabuf) {
495 if (vps->dmabuf_size == size)
496 return 0;
497
498 vmw_dmabuf_unreference(&vps->dmabuf);
499 vps->dmabuf_size = 0;
500 }
501
502 vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL);
503 if (!vps->dmabuf)
504 return -ENOMEM;
505
506 dev_priv = vmw_priv(crtc->dev);
507 vmw_svga_enable(dev_priv);
508
509 /* After we have alloced the backing store might not be able to
510 * resume the overlays, this is preferred to failing to alloc.
511 */
512 vmw_overlay_pause_all(dev_priv);
513 ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size,
514 &vmw_vram_ne_placement,
515 false, &vmw_dmabuf_bo_free);
516 vmw_overlay_resume_all(dev_priv);
517
518 if (ret != 0)
519 vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
520 else
521 vps->dmabuf_size = size;
522
523 return ret;
524}
525
526
527static void
528vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
529 struct drm_plane_state *old_state)
530{
Sinclair Yehb0119cb2017-03-23 14:38:18 -0700531 struct drm_crtc *crtc = plane->state->crtc;
532
533 if (crtc)
534 crtc->primary->fb = plane->state->fb;
Sinclair Yeh060e2ad2017-03-23 14:18:32 -0700535}
536
537
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700538static const struct drm_plane_funcs vmw_sou_plane_funcs = {
Sinclair Yehb0119cb2017-03-23 14:38:18 -0700539 .update_plane = drm_atomic_helper_update_plane,
540 .disable_plane = drm_atomic_helper_disable_plane,
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700541 .destroy = vmw_du_primary_plane_destroy,
Sinclair Yehcc5ec452017-03-23 11:36:05 -0700542 .reset = vmw_du_plane_reset,
543 .atomic_duplicate_state = vmw_du_plane_duplicate_state,
544 .atomic_destroy_state = vmw_du_plane_destroy_state,
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700545};
546
547static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
Sinclair Yehb0119cb2017-03-23 14:38:18 -0700548 .update_plane = drm_atomic_helper_update_plane,
549 .disable_plane = drm_atomic_helper_disable_plane,
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700550 .destroy = vmw_du_cursor_plane_destroy,
Sinclair Yehcc5ec452017-03-23 11:36:05 -0700551 .reset = vmw_du_plane_reset,
552 .atomic_duplicate_state = vmw_du_plane_duplicate_state,
553 .atomic_destroy_state = vmw_du_plane_destroy_state,
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700554};
555
Sinclair Yeh06ec4192017-03-23 13:14:54 -0700556/*
557 * Atomic Helpers
558 */
Sinclair Yeh060e2ad2017-03-23 14:18:32 -0700559static const struct
560drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = {
561 .atomic_check = vmw_du_cursor_plane_atomic_check,
562 .atomic_update = vmw_du_cursor_plane_atomic_update,
563 .prepare_fb = vmw_du_cursor_plane_prepare_fb,
564 .cleanup_fb = vmw_du_plane_cleanup_fb,
565};
566
567static const struct
568drm_plane_helper_funcs vmw_sou_primary_plane_helper_funcs = {
569 .atomic_check = vmw_du_primary_plane_atomic_check,
570 .atomic_update = vmw_sou_primary_plane_atomic_update,
571 .prepare_fb = vmw_sou_primary_plane_prepare_fb,
572 .cleanup_fb = vmw_sou_primary_plane_cleanup_fb,
573};
574
Sinclair Yeh06ec4192017-03-23 13:14:54 -0700575static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
576 .prepare = vmw_sou_crtc_helper_prepare,
Sinclair Yeh06ec4192017-03-23 13:14:54 -0700577 .disable = vmw_sou_crtc_helper_disable,
Sinclair Yeh06ec4192017-03-23 13:14:54 -0700578 .mode_set_nofb = vmw_sou_crtc_mode_set_nofb,
579 .atomic_check = vmw_du_crtc_atomic_check,
580 .atomic_begin = vmw_du_crtc_atomic_begin,
581 .atomic_flush = vmw_du_crtc_atomic_flush,
Laurent Pinchart0b20a0f2017-06-30 12:36:44 +0300582 .atomic_enable = vmw_sou_crtc_atomic_enable,
Sinclair Yeh06ec4192017-03-23 13:14:54 -0700583};
584
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700585
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200586static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
587{
588 struct vmw_screen_object_unit *sou;
589 struct drm_device *dev = dev_priv->dev;
590 struct drm_connector *connector;
591 struct drm_encoder *encoder;
Sinclair Yehcc5ec452017-03-23 11:36:05 -0700592 struct drm_plane *primary, *cursor;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200593 struct drm_crtc *crtc;
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700594 int ret;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200595
596 sou = kzalloc(sizeof(*sou), GFP_KERNEL);
597 if (!sou)
598 return -ENOMEM;
599
600 sou->base.unit = unit;
601 crtc = &sou->base.crtc;
602 encoder = &sou->base.encoder;
603 connector = &sou->base.connector;
Sinclair Yehcc5ec452017-03-23 11:36:05 -0700604 primary = &sou->base.primary;
605 cursor = &sou->base.cursor;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200606
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100607 sou->base.active_implicit = false;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200608 sou->base.pref_active = (unit == 0);
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100609 sou->base.pref_width = dev_priv->initial_width;
610 sou->base.pref_height = dev_priv->initial_height;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200611 sou->base.pref_mode = NULL;
Sinclair Yeh9c2542a2017-03-23 11:33:39 -0700612
613 /*
614 * Remove this after enabling atomic because property values can
615 * only exist in a state object
616 */
Thomas Hellstrom2e69b252016-02-12 09:59:50 +0100617 sou->base.is_implicit = false;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200618
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700619 /* Initialize primary plane */
Sinclair Yehcc5ec452017-03-23 11:36:05 -0700620 vmw_du_plane_reset(primary);
621
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700622 ret = drm_universal_plane_init(dev, &sou->base.primary,
623 0, &vmw_sou_plane_funcs,
624 vmw_primary_plane_formats,
625 ARRAY_SIZE(vmw_primary_plane_formats),
626 DRM_PLANE_TYPE_PRIMARY, NULL);
627 if (ret) {
628 DRM_ERROR("Failed to initialize primary plane");
629 goto err_free;
630 }
631
Sinclair Yeh060e2ad2017-03-23 14:18:32 -0700632 drm_plane_helper_add(primary, &vmw_sou_primary_plane_helper_funcs);
633
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700634 /* Initialize cursor plane */
Sinclair Yehcc5ec452017-03-23 11:36:05 -0700635 vmw_du_plane_reset(cursor);
636
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700637 ret = drm_universal_plane_init(dev, &sou->base.cursor,
638 0, &vmw_sou_cursor_funcs,
639 vmw_cursor_plane_formats,
640 ARRAY_SIZE(vmw_cursor_plane_formats),
641 DRM_PLANE_TYPE_CURSOR, NULL);
642 if (ret) {
643 DRM_ERROR("Failed to initialize cursor plane");
644 drm_plane_cleanup(&sou->base.primary);
645 goto err_free;
646 }
647
Sinclair Yeh060e2ad2017-03-23 14:18:32 -0700648 drm_plane_helper_add(cursor, &vmw_sou_cursor_plane_helper_funcs);
649
650 vmw_du_connector_reset(connector);
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700651 ret = drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
652 DRM_MODE_CONNECTOR_VIRTUAL);
653 if (ret) {
654 DRM_ERROR("Failed to initialize connector\n");
655 goto err_free;
656 }
657
Sinclair Yehd947d1b2017-03-23 14:23:20 -0700658 drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200659 connector->status = vmw_du_connector_detect(connector, true);
Sinclair Yehd7721ca2017-03-23 11:48:44 -0700660 vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
661
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200662
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700663 ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
664 DRM_MODE_ENCODER_VIRTUAL, NULL);
665 if (ret) {
666 DRM_ERROR("Failed to initialize encoder\n");
667 goto err_free_connector;
668 }
669
670 (void) drm_mode_connector_attach_encoder(connector, encoder);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200671 encoder->possible_crtcs = (1 << unit);
672 encoder->possible_clones = 0;
673
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700674 ret = drm_connector_register(connector);
675 if (ret) {
676 DRM_ERROR("Failed to register connector\n");
677 goto err_free_encoder;
678 }
Thomas Hellstrom6a0a7a92013-12-02 06:04:38 -0800679
Sinclair Yehd7721ca2017-03-23 11:48:44 -0700680
681 vmw_du_crtc_reset(crtc);
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700682 ret = drm_crtc_init_with_planes(dev, crtc, &sou->base.primary,
683 &sou->base.cursor,
684 &vmw_screen_object_crtc_funcs, NULL);
685 if (ret) {
686 DRM_ERROR("Failed to initialize CRTC\n");
687 goto err_free_unregister;
688 }
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200689
Sinclair Yeh06ec4192017-03-23 13:14:54 -0700690 drm_crtc_helper_add(crtc, &vmw_sou_crtc_helper_funcs);
691
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200692 drm_mode_crtc_set_gamma_size(crtc, 256);
693
Rob Clarkb8b163b2012-10-11 20:47:14 -0500694 drm_object_attach_property(&connector->base,
Thomas Hellstrom578e6092016-02-12 09:45:42 +0100695 dev_priv->hotplug_mode_update_property, 1);
696 drm_object_attach_property(&connector->base,
697 dev->mode_config.suggested_x_property, 0);
698 drm_object_attach_property(&connector->base,
699 dev->mode_config.suggested_y_property, 0);
Thomas Hellstrom76404ac2016-02-12 09:55:45 +0100700 if (dev_priv->implicit_placement_property)
701 drm_object_attach_property
702 (&connector->base,
703 dev_priv->implicit_placement_property,
704 sou->base.is_implicit);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200705
706 return 0;
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700707
708err_free_unregister:
709 drm_connector_unregister(connector);
710err_free_encoder:
711 drm_encoder_cleanup(encoder);
712err_free_connector:
713 drm_connector_cleanup(connector);
714err_free:
715 kfree(sou);
716 return ret;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200717}
718
Sinclair Yehc8261a92015-06-26 01:23:42 -0700719int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200720{
721 struct drm_device *dev = dev_priv->dev;
Jakob Bornecrantz74b5ea32011-10-17 11:59:44 +0200722 int i, ret;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200723
Thomas Hellstrom29a16e92012-11-09 12:26:13 +0000724 if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200725 DRM_INFO("Not using screen objects,"
726 " missing cap SCREEN_OBJECT_2\n");
727 return -ENOSYS;
728 }
729
730 ret = -ENOMEM;
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100731 dev_priv->num_implicit = 0;
732 dev_priv->implicit_fb = NULL;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200733
734 ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
735 if (unlikely(ret != 0))
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100736 return ret;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200737
Thomas Hellstrom76404ac2016-02-12 09:55:45 +0100738 vmw_kms_create_implicit_placement_property(dev_priv, false);
739
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200740 for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
741 vmw_sou_init(dev_priv, i);
742
Sinclair Yehc8261a92015-06-26 01:23:42 -0700743 dev_priv->active_display_unit = vmw_du_screen_object;
744
745 DRM_INFO("Screen Objects Display Unit initialized\n");
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200746
747 return 0;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200748}
749
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700750static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700751 struct vmw_framebuffer *framebuffer)
Jakob Bornecrantzb5ec4272012-02-09 16:56:45 +0100752{
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700753 struct vmw_dma_buffer *buf =
754 container_of(framebuffer, struct vmw_framebuffer_dmabuf,
755 base)->buffer;
Ville Syrjäläb00c6002016-12-14 23:31:35 +0200756 int depth = framebuffer->base.format->depth;
Sinclair Yehc8261a92015-06-26 01:23:42 -0700757 struct {
758 uint32_t header;
759 SVGAFifoCmdDefineGMRFB body;
760 } *cmd;
Jakob Bornecrantzb5ec4272012-02-09 16:56:45 +0100761
Sinclair Yehc8261a92015-06-26 01:23:42 -0700762 /* Emulate RGBA support, contrary to svga_reg.h this is not
763 * supported by hosts. This is only a problem if we are reading
764 * this value later and expecting what we uploaded back.
765 */
766 if (depth == 32)
767 depth = 24;
Jakob Bornecrantzb5ec4272012-02-09 16:56:45 +0100768
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700769 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
770 if (!cmd) {
771 DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
Sinclair Yehc8261a92015-06-26 01:23:42 -0700772 return -ENOMEM;
773 }
774
Sinclair Yehc8261a92015-06-26 01:23:42 -0700775 cmd->header = SVGA_CMD_DEFINE_GMRFB;
Ville Syrjälä272725c2016-12-14 23:32:20 +0200776 cmd->body.format.bitsPerPixel = framebuffer->base.format->cpp[0] * 8;
Sinclair Yehc8261a92015-06-26 01:23:42 -0700777 cmd->body.format.colorDepth = depth;
778 cmd->body.format.reserved = 0;
779 cmd->body.bytesPerLine = framebuffer->base.pitches[0];
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700780 /* Buffer is reserved in vram or GMR */
781 vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
782 vmw_fifo_commit(dev_priv, sizeof(*cmd));
Sinclair Yehc8261a92015-06-26 01:23:42 -0700783
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700784 return 0;
785}
Sinclair Yehc8261a92015-06-26 01:23:42 -0700786
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700787/**
788 * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
789 * blit surface to screen command.
790 *
791 * @dirty: The closure structure.
792 *
793 * Fills in the missing fields in the command, and translates the cliprects
794 * to match the destination bounding box encoded.
795 */
796static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
797{
798 struct vmw_kms_sou_surface_dirty *sdirty =
799 container_of(dirty, typeof(*sdirty), base);
800 struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
801 s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
802 s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
803 size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
804 SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
805 int i;
806
Thomas Hellstromfea7dd542016-02-12 08:26:37 +0100807 if (!dirty->num_hits) {
808 vmw_fifo_commit(dirty->dev_priv, 0);
809 return;
810 }
811
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700812 cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
813 cmd->header.size = sizeof(cmd->body) + region_size;
814
815 /*
816 * Use the destination bounding box to specify destination - and
817 * source bounding regions.
818 */
819 cmd->body.destRect.left = sdirty->left;
820 cmd->body.destRect.right = sdirty->right;
821 cmd->body.destRect.top = sdirty->top;
822 cmd->body.destRect.bottom = sdirty->bottom;
823
824 cmd->body.srcRect.left = sdirty->left + trans_x;
825 cmd->body.srcRect.right = sdirty->right + trans_x;
826 cmd->body.srcRect.top = sdirty->top + trans_y;
827 cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
828
829 cmd->body.srcImage.sid = sdirty->sid;
830 cmd->body.destScreenId = dirty->unit->unit;
831
832 /* Blits are relative to the destination rect. Translate. */
833 for (i = 0; i < dirty->num_hits; ++i, ++blit) {
834 blit->left -= sdirty->left;
835 blit->right -= sdirty->left;
836 blit->top -= sdirty->top;
837 blit->bottom -= sdirty->top;
838 }
839
840 vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
841
842 sdirty->left = sdirty->top = S32_MAX;
843 sdirty->right = sdirty->bottom = S32_MIN;
844}
845
846/**
847 * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
848 *
849 * @dirty: The closure structure
850 *
851 * Encodes a SVGASignedRect cliprect and updates the bounding box of the
852 * BLIT_SURFACE_TO_SCREEN command.
853 */
854static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
855{
856 struct vmw_kms_sou_surface_dirty *sdirty =
857 container_of(dirty, typeof(*sdirty), base);
858 struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
859 SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
860
861 /* Destination rect. */
862 blit += dirty->num_hits;
863 blit->left = dirty->unit_x1;
864 blit->top = dirty->unit_y1;
865 blit->right = dirty->unit_x2;
866 blit->bottom = dirty->unit_y2;
867
868 /* Destination bounding box */
869 sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
870 sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
871 sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
872 sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
873
874 dirty->num_hits++;
875}
876
877/**
878 * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
879 *
880 * @dev_priv: Pointer to the device private structure.
881 * @framebuffer: Pointer to the surface-buffer backed framebuffer.
882 * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
883 * @vclips: Alternate array of clip rects. Either @clips or @vclips must
884 * be NULL.
885 * @srf: Pointer to surface to blit from. If NULL, the surface attached
886 * to @framebuffer will be used.
887 * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
888 * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
889 * @num_clips: Number of clip rects in @clips.
890 * @inc: Increment to use when looping over @clips.
891 * @out_fence: If non-NULL, will return a ref-counted pointer to a
892 * struct vmw_fence_obj. The returned fence pointer may be NULL in which
893 * case the device has already synchronized.
894 *
895 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
896 * interrupted.
897 */
898int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
899 struct vmw_framebuffer *framebuffer,
900 struct drm_clip_rect *clips,
901 struct drm_vmw_rect *vclips,
902 struct vmw_resource *srf,
903 s32 dest_x,
904 s32 dest_y,
905 unsigned num_clips, int inc,
906 struct vmw_fence_obj **out_fence)
907{
908 struct vmw_framebuffer_surface *vfbs =
909 container_of(framebuffer, typeof(*vfbs), base);
910 struct vmw_kms_sou_surface_dirty sdirty;
911 int ret;
912
913 if (!srf)
914 srf = &vfbs->surface->res;
915
916 ret = vmw_kms_helper_resource_prepare(srf, true);
917 if (ret)
918 return ret;
919
920 sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
921 sdirty.base.clip = vmw_sou_surface_clip;
922 sdirty.base.dev_priv = dev_priv;
923 sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
924 sizeof(SVGASignedRect) * num_clips;
925
926 sdirty.sid = srf->id;
927 sdirty.left = sdirty.top = S32_MAX;
928 sdirty.right = sdirty.bottom = S32_MIN;
929 sdirty.dst_x = dest_x;
930 sdirty.dst_y = dest_y;
931
932 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
933 dest_x, dest_y, num_clips, inc,
934 &sdirty.base);
935 vmw_kms_helper_resource_finish(srf, out_fence);
Sinclair Yehc8261a92015-06-26 01:23:42 -0700936
937 return ret;
Jakob Bornecrantzb5ec4272012-02-09 16:56:45 +0100938}
939
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700940/**
941 * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
942 *
943 * @dirty: The closure structure.
944 *
945 * Commits a previously built command buffer of readback clips.
946 */
947static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
948{
Thomas Hellstromfea7dd542016-02-12 08:26:37 +0100949 if (!dirty->num_hits) {
950 vmw_fifo_commit(dirty->dev_priv, 0);
951 return;
952 }
953
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700954 vmw_fifo_commit(dirty->dev_priv,
955 sizeof(struct vmw_kms_sou_dmabuf_blit) *
956 dirty->num_hits);
957}
958
959/**
960 * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
961 *
962 * @dirty: The closure structure
963 *
964 * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
965 */
966static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
967{
968 struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
969
970 blit += dirty->num_hits;
971 blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
972 blit->body.destScreenId = dirty->unit->unit;
973 blit->body.srcOrigin.x = dirty->fb_x;
974 blit->body.srcOrigin.y = dirty->fb_y;
975 blit->body.destRect.left = dirty->unit_x1;
976 blit->body.destRect.top = dirty->unit_y1;
977 blit->body.destRect.right = dirty->unit_x2;
978 blit->body.destRect.bottom = dirty->unit_y2;
979 dirty->num_hits++;
980}
981
982/**
983 * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
984 *
985 * @dev_priv: Pointer to the device private structure.
986 * @framebuffer: Pointer to the dma-buffer backed framebuffer.
987 * @clips: Array of clip rects.
Thomas Hellstrom897b8182016-02-12 08:32:08 +0100988 * @vclips: Alternate array of clip rects. Either @clips or @vclips must
989 * be NULL.
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700990 * @num_clips: Number of clip rects in @clips.
991 * @increment: Increment to use when looping over @clips.
992 * @interruptible: Whether to perform waits interruptible if possible.
993 * @out_fence: If non-NULL, will return a ref-counted pointer to a
994 * struct vmw_fence_obj. The returned fence pointer may be NULL in which
995 * case the device has already synchronized.
996 *
997 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
998 * interrupted.
999 */
1000int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
Sinclair Yehc8261a92015-06-26 01:23:42 -07001001 struct vmw_framebuffer *framebuffer,
Sinclair Yehc8261a92015-06-26 01:23:42 -07001002 struct drm_clip_rect *clips,
Thomas Hellstrom897b8182016-02-12 08:32:08 +01001003 struct drm_vmw_rect *vclips,
Sinclair Yehc8261a92015-06-26 01:23:42 -07001004 unsigned num_clips, int increment,
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -07001005 bool interruptible,
Sinclair Yehc8261a92015-06-26 01:23:42 -07001006 struct vmw_fence_obj **out_fence)
Jakob Bornecrantzb5ec4272012-02-09 16:56:45 +01001007{
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -07001008 struct vmw_dma_buffer *buf =
1009 container_of(framebuffer, struct vmw_framebuffer_dmabuf,
1010 base)->buffer;
1011 struct vmw_kms_dirty dirty;
1012 int ret;
Jakob Bornecrantzb5ec4272012-02-09 16:56:45 +01001013
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -07001014 ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
1015 false);
1016 if (ret)
1017 return ret;
Jakob Bornecrantzb5ec4272012-02-09 16:56:45 +01001018
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -07001019 ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
Sinclair Yehc8261a92015-06-26 01:23:42 -07001020 if (unlikely(ret != 0))
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -07001021 goto out_revert;
Sinclair Yehc8261a92015-06-26 01:23:42 -07001022
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -07001023 dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
1024 dirty.clip = vmw_sou_dmabuf_clip;
1025 dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
1026 num_clips;
Thomas Hellstrom897b8182016-02-12 08:32:08 +01001027 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -07001028 0, 0, num_clips, increment, &dirty);
1029 vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
Sinclair Yehc8261a92015-06-26 01:23:42 -07001030
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -07001031 return ret;
Sinclair Yehc8261a92015-06-26 01:23:42 -07001032
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -07001033out_revert:
1034 vmw_kms_helper_buffer_revert(buf);
Sinclair Yehc8261a92015-06-26 01:23:42 -07001035
1036 return ret;
Jakob Bornecrantzb5ec4272012-02-09 16:56:45 +01001037}
Sinclair Yehc8261a92015-06-26 01:23:42 -07001038
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -07001039
1040/**
1041 * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
1042 *
1043 * @dirty: The closure structure.
1044 *
1045 * Commits a previously built command buffer of readback clips.
1046 */
1047static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
1048{
Thomas Hellstromfea7dd542016-02-12 08:26:37 +01001049 if (!dirty->num_hits) {
1050 vmw_fifo_commit(dirty->dev_priv, 0);
1051 return;
1052 }
1053
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -07001054 vmw_fifo_commit(dirty->dev_priv,
1055 sizeof(struct vmw_kms_sou_readback_blit) *
1056 dirty->num_hits);
1057}
1058
1059/**
1060 * vmw_sou_readback_clip - Callback to encode a readback cliprect.
1061 *
1062 * @dirty: The closure structure
1063 *
1064 * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
1065 */
1066static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
1067{
1068 struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
1069
1070 blit += dirty->num_hits;
1071 blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
1072 blit->body.srcScreenId = dirty->unit->unit;
1073 blit->body.destOrigin.x = dirty->fb_x;
1074 blit->body.destOrigin.y = dirty->fb_y;
1075 blit->body.srcRect.left = dirty->unit_x1;
1076 blit->body.srcRect.top = dirty->unit_y1;
1077 blit->body.srcRect.right = dirty->unit_x2;
1078 blit->body.srcRect.bottom = dirty->unit_y2;
1079 dirty->num_hits++;
1080}
1081
1082/**
1083 * vmw_kms_sou_readback - Perform a readback from the screen object system to
1084 * a dma-buffer backed framebuffer.
1085 *
1086 * @dev_priv: Pointer to the device private structure.
1087 * @file_priv: Pointer to a struct drm_file identifying the caller.
1088 * Must be set to NULL if @user_fence_rep is NULL.
1089 * @vfb: Pointer to the dma-buffer backed framebuffer.
1090 * @user_fence_rep: User-space provided structure for fence information.
1091 * Must be set to non-NULL if @file_priv is non-NULL.
1092 * @vclips: Array of clip rects.
1093 * @num_clips: Number of clip rects in @vclips.
1094 *
1095 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1096 * interrupted.
1097 */
1098int vmw_kms_sou_readback(struct vmw_private *dev_priv,
1099 struct drm_file *file_priv,
1100 struct vmw_framebuffer *vfb,
1101 struct drm_vmw_fence_rep __user *user_fence_rep,
1102 struct drm_vmw_rect *vclips,
1103 uint32_t num_clips)
1104{
1105 struct vmw_dma_buffer *buf =
1106 container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
1107 struct vmw_kms_dirty dirty;
1108 int ret;
1109
1110 ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
1111 if (ret)
1112 return ret;
1113
1114 ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
1115 if (unlikely(ret != 0))
1116 goto out_revert;
1117
1118 dirty.fifo_commit = vmw_sou_readback_fifo_commit;
1119 dirty.clip = vmw_sou_readback_clip;
1120 dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
1121 num_clips;
1122 ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
1123 0, 0, num_clips, 1, &dirty);
1124 vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
1125 user_fence_rep);
1126
1127 return ret;
1128
1129out_revert:
1130 vmw_kms_helper_buffer_revert(buf);
1131
1132 return ret;
1133}