blob: c59806d40e15bc2df5814b49566da3c2b6dcbb96 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2007 David Airlie
Sinclair Yeh54fbde82015-07-29 12:38:02 -07004 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00005 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
Sam Ravnborg6ae87482019-06-23 12:23:34 +020029#include <linux/pci.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040030
Sam Ravnborg6ae87482019-06-23 12:23:34 +020031#include <drm/drm_fourcc.h>
32#include <drm/ttm/ttm_placement.h>
33
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000034#include "vmwgfx_drv.h"
Thomas Hellstroma2787242015-06-29 12:55:07 -070035#include "vmwgfx_kms.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000036
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000037#define VMW_DIRTY_DELAY (HZ / 30)
38
39struct vmw_fb_par {
40 struct vmw_private *vmw_priv;
41
42 void *vmalloc;
43
Thomas Hellstroma2787242015-06-29 12:55:07 -070044 struct mutex bo_mutex;
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +020045 struct vmw_buffer_object *vmw_bo;
Thomas Hellstroma2787242015-06-29 12:55:07 -070046 unsigned bo_size;
47 struct drm_framebuffer *set_fb;
48 struct drm_display_mode *set_mode;
49 u32 fb_x;
50 u32 fb_y;
51 bool bo_iowrite;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000052
53 u32 pseudo_palette[17];
54
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000055 unsigned max_width;
56 unsigned max_height;
57
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000058 struct {
59 spinlock_t lock;
60 bool active;
61 unsigned x1;
62 unsigned y1;
63 unsigned x2;
64 unsigned y2;
65 } dirty;
Thomas Hellstroma2787242015-06-29 12:55:07 -070066
67 struct drm_crtc *crtc;
68 struct drm_connector *con;
Thomas Hellstrom772269f2015-08-18 09:07:38 -070069 struct delayed_work local_work;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000070};
71
72static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
73 unsigned blue, unsigned transp,
74 struct fb_info *info)
75{
76 struct vmw_fb_par *par = info->par;
77 u32 *pal = par->pseudo_palette;
78
79 if (regno > 15) {
80 DRM_ERROR("Bad regno %u.\n", regno);
81 return 1;
82 }
83
Ville Syrjäläb00c6002016-12-14 23:31:35 +020084 switch (par->set_fb->format->depth) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000085 case 24:
86 case 32:
87 pal[regno] = ((red & 0xff00) << 8) |
88 (green & 0xff00) |
89 ((blue & 0xff00) >> 8);
90 break;
91 default:
Ville Syrjäläb00c6002016-12-14 23:31:35 +020092 DRM_ERROR("Bad depth %u, bpp %u.\n",
93 par->set_fb->format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +020094 par->set_fb->format->cpp[0] * 8);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000095 return 1;
96 }
97
98 return 0;
99}
100
101static int vmw_fb_check_var(struct fb_var_screeninfo *var,
102 struct fb_info *info)
103{
104 int depth = var->bits_per_pixel;
105 struct vmw_fb_par *par = info->par;
106 struct vmw_private *vmw_priv = par->vmw_priv;
107
108 switch (var->bits_per_pixel) {
109 case 32:
110 depth = (var->transp.length > 0) ? 32 : 24;
111 break;
112 default:
113 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
114 return -EINVAL;
115 }
116
117 switch (depth) {
118 case 24:
119 var->red.offset = 16;
120 var->green.offset = 8;
121 var->blue.offset = 0;
122 var->red.length = 8;
123 var->green.length = 8;
124 var->blue.length = 8;
125 var->transp.length = 0;
126 var->transp.offset = 0;
127 break;
128 case 32:
129 var->red.offset = 16;
130 var->green.offset = 8;
131 var->blue.offset = 0;
132 var->red.length = 8;
133 var->green.length = 8;
134 var->blue.length = 8;
135 var->transp.length = 8;
136 var->transp.offset = 24;
137 break;
138 default:
139 DRM_ERROR("Bad depth %u.\n", depth);
140 return -EINVAL;
141 }
142
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200143 if ((var->xoffset + var->xres) > par->max_width ||
144 (var->yoffset + var->yres) > par->max_height) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000145 DRM_ERROR("Requested geom can not fit in framebuffer\n");
146 return -EINVAL;
147 }
148
Thomas Hellstrome133e7372010-10-05 12:43:04 +0200149 if (!vmw_kms_validate_mode_vram(vmw_priv,
Christopher Friedtaa6de142014-02-01 10:01:15 -0500150 var->xres * var->bits_per_pixel/8,
Thomas Hellstrome133e7372010-10-05 12:43:04 +0200151 var->yoffset + var->yres)) {
152 DRM_ERROR("Requested geom can not fit in framebuffer\n");
153 return -EINVAL;
154 }
155
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000156 return 0;
157}
158
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000159static int vmw_fb_blank(int blank, struct fb_info *info)
160{
161 return 0;
162}
163
Thomas Hellstromc3b9b162018-03-22 10:26:37 +0100164/**
165 * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer
166 *
167 * @work: The struct work_struct associated with this task.
168 *
169 * This function flushes the dirty regions of the vmalloc framebuffer to the
170 * kms framebuffer, and if the kms framebuffer is visible, also updated the
171 * corresponding displays. Note that this function runs even if the kms
172 * framebuffer is not bound to a crtc and thus not visible, but it's turned
173 * off during hibernation using the par->dirty.active bool.
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000174 */
Thomas Hellstrom772269f2015-08-18 09:07:38 -0700175static void vmw_fb_dirty_flush(struct work_struct *work)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000176{
Thomas Hellstrom772269f2015-08-18 09:07:38 -0700177 struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
178 local_work.work);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000179 struct vmw_private *vmw_priv = par->vmw_priv;
180 struct fb_info *info = vmw_priv->fb_info;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700181 unsigned long irq_flags;
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100182 s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700183 u32 cpp, max_x, max_y;
184 struct drm_clip_rect clip;
185 struct drm_framebuffer *cur_fb;
186 u8 *src_ptr, *dst_ptr;
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200187 struct vmw_buffer_object *vbo = par->vmw_bo;
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100188 void *virtual;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000189
Thomas Hellstrom4e3e7332018-03-22 10:30:19 +0100190 if (!READ_ONCE(par->dirty.active))
Thomas Hellstrom09e26012010-10-05 12:43:05 +0200191 return;
192
Thomas Hellstroma2787242015-06-29 12:55:07 -0700193 mutex_lock(&par->bo_mutex);
194 cur_fb = par->set_fb;
195 if (!cur_fb)
196 goto out_unlock;
197
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100198 (void) ttm_read_lock(&vmw_priv->reservation_sem, false);
199 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
Thomas Hellstrome9431ea2018-06-19 15:33:53 +0200200 virtual = vmw_bo_map_and_cache(vbo);
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100201 if (!virtual)
202 goto out_unreserve;
203
Thomas Hellstroma2787242015-06-29 12:55:07 -0700204 spin_lock_irqsave(&par->dirty.lock, irq_flags);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000205 if (!par->dirty.active) {
Thomas Hellstroma2787242015-06-29 12:55:07 -0700206 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100207 goto out_unreserve;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000208 }
Thomas Hellstroma2787242015-06-29 12:55:07 -0700209
210 /*
211 * Handle panning when copying from vmalloc to framebuffer.
212 * Clip dirty area to framebuffer.
213 */
Ville Syrjälä272725c2016-12-14 23:32:20 +0200214 cpp = cur_fb->format->cpp[0];
Thomas Hellstroma2787242015-06-29 12:55:07 -0700215 max_x = par->fb_x + cur_fb->width;
216 max_y = par->fb_y + cur_fb->height;
217
218 dst_x1 = par->dirty.x1 - par->fb_x;
219 dst_y1 = par->dirty.y1 - par->fb_y;
220 dst_x1 = max_t(s32, dst_x1, 0);
221 dst_y1 = max_t(s32, dst_y1, 0);
222
223 dst_x2 = par->dirty.x2 - par->fb_x;
224 dst_y2 = par->dirty.y2 - par->fb_y;
225 dst_x2 = min_t(s32, dst_x2, max_x);
226 dst_y2 = min_t(s32, dst_y2, max_y);
227 w = dst_x2 - dst_x1;
228 h = dst_y2 - dst_y1;
229 w = max_t(s32, 0, w);
230 h = max_t(s32, 0, h);
231
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000232 par->dirty.x1 = par->dirty.x2 = 0;
233 par->dirty.y1 = par->dirty.y2 = 0;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700234 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000235
Thomas Hellstroma2787242015-06-29 12:55:07 -0700236 if (w && h) {
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100237 dst_ptr = (u8 *)virtual +
Thomas Hellstroma2787242015-06-29 12:55:07 -0700238 (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
239 src_ptr = (u8 *)par->vmalloc +
240 ((dst_y1 + par->fb_y) * info->fix.line_length +
241 (dst_x1 + par->fb_x) * cpp);
242
243 while (h-- > 0) {
244 memcpy(dst_ptr, src_ptr, w*cpp);
245 dst_ptr += par->set_fb->pitches[0];
246 src_ptr += info->fix.line_length;
247 }
248
249 clip.x1 = dst_x1;
250 clip.x2 = dst_x2;
251 clip.y1 = dst_y1;
252 clip.y2 = dst_y2;
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100253 }
Thomas Hellstroma2787242015-06-29 12:55:07 -0700254
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100255out_unreserve:
256 ttm_bo_unreserve(&vbo->base);
257 ttm_read_unlock(&vmw_priv->reservation_sem);
258 if (w && h) {
Thomas Hellstroma2787242015-06-29 12:55:07 -0700259 WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
260 &clip, 1));
261 vmw_fifo_flush(vmw_priv, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000262 }
Thomas Hellstroma2787242015-06-29 12:55:07 -0700263out_unlock:
264 mutex_unlock(&par->bo_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000265}
266
267static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
268 unsigned x1, unsigned y1,
269 unsigned width, unsigned height)
270{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000271 unsigned long flags;
272 unsigned x2 = x1 + width;
273 unsigned y2 = y1 + height;
274
275 spin_lock_irqsave(&par->dirty.lock, flags);
276 if (par->dirty.x1 == par->dirty.x2) {
277 par->dirty.x1 = x1;
278 par->dirty.y1 = y1;
279 par->dirty.x2 = x2;
280 par->dirty.y2 = y2;
281 /* if we are active start the dirty work
282 * we share the work with the defio system */
283 if (par->dirty.active)
Thomas Hellstrom772269f2015-08-18 09:07:38 -0700284 schedule_delayed_work(&par->local_work,
285 VMW_DIRTY_DELAY);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000286 } else {
287 if (x1 < par->dirty.x1)
288 par->dirty.x1 = x1;
289 if (y1 < par->dirty.y1)
290 par->dirty.y1 = y1;
291 if (x2 > par->dirty.x2)
292 par->dirty.x2 = x2;
293 if (y2 > par->dirty.y2)
294 par->dirty.y2 = y2;
295 }
296 spin_unlock_irqrestore(&par->dirty.lock, flags);
297}
298
Thomas Hellstroma2787242015-06-29 12:55:07 -0700299static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
300 struct fb_info *info)
301{
302 struct vmw_fb_par *par = info->par;
303
304 if ((var->xoffset + var->xres) > var->xres_virtual ||
305 (var->yoffset + var->yres) > var->yres_virtual) {
306 DRM_ERROR("Requested panning can not fit in framebuffer\n");
307 return -EINVAL;
308 }
309
310 mutex_lock(&par->bo_mutex);
311 par->fb_x = var->xoffset;
312 par->fb_y = var->yoffset;
313 if (par->set_fb)
314 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
315 par->set_fb->height);
316 mutex_unlock(&par->bo_mutex);
317
318 return 0;
319}
320
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000321static void vmw_deferred_io(struct fb_info *info,
322 struct list_head *pagelist)
323{
324 struct vmw_fb_par *par = info->par;
325 unsigned long start, end, min, max;
326 unsigned long flags;
327 struct page *page;
328 int y1, y2;
329
330 min = ULONG_MAX;
331 max = 0;
332 list_for_each_entry(page, pagelist, lru) {
333 start = page->index << PAGE_SHIFT;
334 end = start + PAGE_SIZE - 1;
335 min = min(min, start);
336 max = max(max, end);
337 }
338
339 if (min < max) {
340 y1 = min / info->fix.line_length;
341 y2 = (max / info->fix.line_length) + 1;
342
343 spin_lock_irqsave(&par->dirty.lock, flags);
344 par->dirty.x1 = 0;
345 par->dirty.y1 = y1;
346 par->dirty.x2 = info->var.xres;
347 par->dirty.y2 = y2;
348 spin_unlock_irqrestore(&par->dirty.lock, flags);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000349
Thomas Hellstrom772269f2015-08-18 09:07:38 -0700350 /*
351 * Since we've already waited on this work once, try to
352 * execute asap.
353 */
354 cancel_delayed_work(&par->local_work);
355 schedule_delayed_work(&par->local_work, 0);
356 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000357};
358
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700359static struct fb_deferred_io vmw_defio = {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000360 .delay = VMW_DIRTY_DELAY,
361 .deferred_io = vmw_deferred_io,
362};
363
364/*
365 * Draw code
366 */
367
368static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
369{
370 cfb_fillrect(info, rect);
371 vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
372 rect->width, rect->height);
373}
374
375static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
376{
377 cfb_copyarea(info, region);
378 vmw_fb_dirty_mark(info->par, region->dx, region->dy,
379 region->width, region->height);
380}
381
382static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
383{
384 cfb_imageblit(info, image);
385 vmw_fb_dirty_mark(info->par, image->dx, image->dy,
386 image->width, image->height);
387}
388
389/*
390 * Bring up code
391 */
392
Thomas Hellstroma2787242015-06-29 12:55:07 -0700393static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200394 size_t size, struct vmw_buffer_object **out)
Thomas Hellstroma2787242015-06-29 12:55:07 -0700395{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200396 struct vmw_buffer_object *vmw_bo;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700397 int ret;
398
399 (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
400
401 vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
402 if (!vmw_bo) {
403 ret = -ENOMEM;
404 goto err_unlock;
405 }
406
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200407 ret = vmw_bo_init(vmw_priv, vmw_bo, size,
Thomas Hellstroma2787242015-06-29 12:55:07 -0700408 &vmw_sys_placement,
409 false,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200410 &vmw_bo_bo_free);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700411 if (unlikely(ret != 0))
412 goto err_unlock; /* init frees the buffer on failure */
413
414 *out = vmw_bo;
415 ttm_write_unlock(&vmw_priv->reservation_sem);
416
417 return 0;
418
419err_unlock:
420 ttm_write_unlock(&vmw_priv->reservation_sem);
421 return ret;
422}
423
424static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
425 int *depth)
426{
427 switch (var->bits_per_pixel) {
428 case 32:
429 *depth = (var->transp.length > 0) ? 32 : 24;
430 break;
431 default:
432 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
433 return -EINVAL;
434 }
435
436 return 0;
437}
438
Daniel Vetter3bacf432017-04-06 22:02:56 +0200439static int vmwgfx_set_config_internal(struct drm_mode_set *set)
440{
441 struct drm_crtc *crtc = set->crtc;
Thomas Hellstrom21fbd082018-04-26 09:48:55 +0200442 struct drm_modeset_acquire_ctx ctx;
Daniel Vetter3bacf432017-04-06 22:02:56 +0200443 int ret;
444
Thomas Hellstrom21fbd082018-04-26 09:48:55 +0200445 drm_modeset_acquire_init(&ctx, 0);
Daniel Vetter3bacf432017-04-06 22:02:56 +0200446
447restart:
Thomas Hellstrom21fbd082018-04-26 09:48:55 +0200448 ret = crtc->funcs->set_config(set, &ctx);
Daniel Vetter3bacf432017-04-06 22:02:56 +0200449
450 if (ret == -EDEADLK) {
Thomas Hellstrom21fbd082018-04-26 09:48:55 +0200451 drm_modeset_backoff(&ctx);
Daniel Vetter3bacf432017-04-06 22:02:56 +0200452 goto restart;
453 }
454
Thomas Hellstrom21fbd082018-04-26 09:48:55 +0200455 drm_modeset_drop_locks(&ctx);
456 drm_modeset_acquire_fini(&ctx);
457
Daniel Vetter3bacf432017-04-06 22:02:56 +0200458 return ret;
459}
460
Thomas Hellstroma2787242015-06-29 12:55:07 -0700461static int vmw_fb_kms_detach(struct vmw_fb_par *par,
462 bool detach_bo,
463 bool unref_bo)
464{
465 struct drm_framebuffer *cur_fb = par->set_fb;
466 int ret;
467
468 /* Detach the KMS framebuffer from crtcs */
469 if (par->set_mode) {
470 struct drm_mode_set set;
471
472 set.crtc = par->crtc;
473 set.x = 0;
474 set.y = 0;
475 set.mode = NULL;
476 set.fb = NULL;
Sinclair Yehaa74f062017-03-23 14:28:21 -0700477 set.num_connectors = 0;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700478 set.connectors = &par->con;
Daniel Vetter3bacf432017-04-06 22:02:56 +0200479 ret = vmwgfx_set_config_internal(&set);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700480 if (ret) {
481 DRM_ERROR("Could not unset a mode.\n");
482 return ret;
483 }
484 drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
485 par->set_mode = NULL;
486 }
487
488 if (cur_fb) {
Haneen Mohammed25a28902018-03-11 17:33:13 -0600489 drm_framebuffer_put(cur_fb);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700490 par->set_fb = NULL;
491 }
492
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100493 if (par->vmw_bo && detach_bo && unref_bo)
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200494 vmw_bo_unreference(&par->vmw_bo);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700495
496 return 0;
497}
498
499static int vmw_fb_kms_framebuffer(struct fb_info *info)
500{
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100501 struct drm_mode_fb_cmd2 mode_cmd;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700502 struct vmw_fb_par *par = info->par;
503 struct fb_var_screeninfo *var = &info->var;
504 struct drm_framebuffer *cur_fb;
505 struct vmw_framebuffer *vfb;
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100506 int ret = 0, depth;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700507 size_t new_bo_size;
508
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100509 ret = vmw_fb_compute_depth(var, &depth);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700510 if (ret)
511 return ret;
512
513 mode_cmd.width = var->xres;
514 mode_cmd.height = var->yres;
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100515 mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
516 mode_cmd.pixel_format =
Sinclair Yeh8c957422017-01-18 14:14:01 -0800517 drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700518
519 cur_fb = par->set_fb;
520 if (cur_fb && cur_fb->width == mode_cmd.width &&
521 cur_fb->height == mode_cmd.height &&
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200522 cur_fb->format->format == mode_cmd.pixel_format &&
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100523 cur_fb->pitches[0] == mode_cmd.pitches[0])
Thomas Hellstroma2787242015-06-29 12:55:07 -0700524 return 0;
525
526 /* Need new buffer object ? */
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100527 new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700528 ret = vmw_fb_kms_detach(par,
529 par->bo_size < new_bo_size ||
530 par->bo_size > 2*new_bo_size,
531 true);
532 if (ret)
533 return ret;
534
535 if (!par->vmw_bo) {
536 ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
537 &par->vmw_bo);
538 if (ret) {
539 DRM_ERROR("Failed creating a buffer object for "
540 "fbdev.\n");
541 return ret;
542 }
543 par->bo_size = new_bo_size;
544 }
545
546 vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
547 true, &mode_cmd);
548 if (IS_ERR(vfb))
549 return PTR_ERR(vfb);
550
551 par->set_fb = &vfb->base;
552
Thomas Hellstroma2787242015-06-29 12:55:07 -0700553 return 0;
554}
555
556static int vmw_fb_set_par(struct fb_info *info)
557{
558 struct vmw_fb_par *par = info->par;
559 struct vmw_private *vmw_priv = par->vmw_priv;
560 struct drm_mode_set set;
561 struct fb_var_screeninfo *var = &info->var;
562 struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
563 DRM_MODE_TYPE_DRIVER,
564 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
565 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
566 };
Thomas Hellstroma2787242015-06-29 12:55:07 -0700567 struct drm_display_mode *mode;
568 int ret;
569
Thomas Hellstroma2787242015-06-29 12:55:07 -0700570 mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
571 if (!mode) {
572 DRM_ERROR("Could not create new fb mode.\n");
573 return -ENOMEM;
574 }
575
576 mode->hdisplay = var->xres;
577 mode->vdisplay = var->yres;
578 vmw_guess_mode_timing(mode);
579
Thomas Zimmermannc2d31152019-03-18 15:47:58 +0100580 if (!vmw_kms_validate_mode_vram(vmw_priv,
Sinclair Yeh78514962016-04-21 11:29:31 -0700581 mode->hdisplay *
582 DIV_ROUND_UP(var->bits_per_pixel, 8),
583 mode->vdisplay)) {
Thomas Hellstroma2787242015-06-29 12:55:07 -0700584 drm_mode_destroy(vmw_priv->dev, mode);
585 return -EINVAL;
586 }
587
588 mutex_lock(&par->bo_mutex);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700589 ret = vmw_fb_kms_framebuffer(info);
590 if (ret)
591 goto out_unlock;
592
593 par->fb_x = var->xoffset;
594 par->fb_y = var->yoffset;
595
596 set.crtc = par->crtc;
597 set.x = 0;
598 set.y = 0;
599 set.mode = mode;
600 set.fb = par->set_fb;
601 set.num_connectors = 1;
602 set.connectors = &par->con;
603
Daniel Vetter3bacf432017-04-06 22:02:56 +0200604 ret = vmwgfx_set_config_internal(&set);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700605 if (ret)
606 goto out_unlock;
607
608 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
609 par->set_fb->width, par->set_fb->height);
610
611 /* If there already was stuff dirty we wont
612 * schedule a new work, so lets do it now */
613
Thomas Hellstrom772269f2015-08-18 09:07:38 -0700614 schedule_delayed_work(&par->local_work, 0);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700615
616out_unlock:
Thomas Zimmermannc2d31152019-03-18 15:47:58 +0100617 if (par->set_mode)
618 drm_mode_destroy(vmw_priv->dev, par->set_mode);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700619 par->set_mode = mode;
620
Thomas Hellstroma2787242015-06-29 12:55:07 -0700621 mutex_unlock(&par->bo_mutex);
622
623 return ret;
624}
625
626
Jani Nikulab6ff7532019-12-03 18:38:48 +0200627static const struct fb_ops vmw_fb_ops = {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000628 .owner = THIS_MODULE,
629 .fb_check_var = vmw_fb_check_var,
630 .fb_set_par = vmw_fb_set_par,
631 .fb_setcolreg = vmw_fb_setcolreg,
632 .fb_fillrect = vmw_fb_fillrect,
633 .fb_copyarea = vmw_fb_copyarea,
634 .fb_imageblit = vmw_fb_imageblit,
635 .fb_pan_display = vmw_fb_pan_display,
636 .fb_blank = vmw_fb_blank,
637};
638
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000639int vmw_fb_init(struct vmw_private *vmw_priv)
640{
641 struct device *device = &vmw_priv->dev->pdev->dev;
642 struct vmw_fb_par *par;
643 struct fb_info *info;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000644 unsigned fb_width, fb_height;
YueHaibingc601b122019-03-25 10:32:17 -0700645 unsigned int fb_bpp, fb_pitch, fb_size;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700646 struct drm_display_mode *init_mode;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000647 int ret;
648
Michel Dänzer6558429b2011-08-31 07:42:49 +0000649 fb_bpp = 32;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000650
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200651 /* XXX As shouldn't these be as well. */
652 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
653 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000654
Michel Dänzer6558429b2011-08-31 07:42:49 +0000655 fb_pitch = fb_width * fb_bpp / 8;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200656 fb_size = fb_pitch * fb_height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000657
658 info = framebuffer_alloc(sizeof(*par), device);
659 if (!info)
660 return -ENOMEM;
661
662 /*
663 * Par
664 */
665 vmw_priv->fb_info = info;
666 par = info->par;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700667 memset(par, 0, sizeof(*par));
Thomas Hellstrom772269f2015-08-18 09:07:38 -0700668 INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000669 par->vmw_priv = vmw_priv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000670 par->vmalloc = NULL;
671 par->max_width = fb_width;
672 par->max_height = fb_height;
673
Thomas Hellstroma2787242015-06-29 12:55:07 -0700674 ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
675 par->max_height, &par->con,
676 &par->crtc, &init_mode);
Thomas Hellstrom21fbd082018-04-26 09:48:55 +0200677 if (ret)
Thomas Hellstroma2787242015-06-29 12:55:07 -0700678 goto err_kms;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700679
680 info->var.xres = init_mode->hdisplay;
681 info->var.yres = init_mode->vdisplay;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700682
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000683 /*
684 * Create buffers and alloc memory
685 */
Thomas Hellstroma2787242015-06-29 12:55:07 -0700686 par->vmalloc = vzalloc(fb_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000687 if (unlikely(par->vmalloc == NULL)) {
688 ret = -ENOMEM;
689 goto err_free;
690 }
691
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000692 /*
693 * Fixed and var
694 */
695 strcpy(info->fix.id, "svgadrmfb");
696 info->fix.type = FB_TYPE_PACKED_PIXELS;
697 info->fix.visual = FB_VISUAL_TRUECOLOR;
698 info->fix.type_aux = 0;
699 info->fix.xpanstep = 1; /* doing it in hw */
700 info->fix.ypanstep = 1; /* doing it in hw */
701 info->fix.ywrapstep = 0;
702 info->fix.accel = FB_ACCEL_NONE;
703 info->fix.line_length = fb_pitch;
704
705 info->fix.smem_start = 0;
706 info->fix.smem_len = fb_size;
707
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000708 info->pseudo_palette = par->pseudo_palette;
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700709 info->screen_base = (char __iomem *)par->vmalloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000710 info->screen_size = fb_size;
711
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000712 info->fbops = &vmw_fb_ops;
713
714 /* 24 depth per default */
715 info->var.red.offset = 16;
716 info->var.green.offset = 8;
717 info->var.blue.offset = 0;
718 info->var.red.length = 8;
719 info->var.green.length = 8;
720 info->var.blue.length = 8;
721 info->var.transp.offset = 0;
722 info->var.transp.length = 0;
723
724 info->var.xres_virtual = fb_width;
725 info->var.yres_virtual = fb_height;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700726 info->var.bits_per_pixel = fb_bpp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000727 info->var.xoffset = 0;
728 info->var.yoffset = 0;
729 info->var.activate = FB_ACTIVATE_NOW;
730 info->var.height = -1;
731 info->var.width = -1;
732
Sascha Hauerfb2a99e2012-02-06 10:58:19 +0100733 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
Marcin Slusarz1471ca92010-05-16 17:27:03 +0200734 info->apertures = alloc_apertures(1);
735 if (!info->apertures) {
736 ret = -ENOMEM;
737 goto err_aper;
738 }
739 info->apertures->ranges[0].base = vmw_priv->vram_start;
740 info->apertures->ranges[0].size = vmw_priv->vram_size;
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000741
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000742 /*
743 * Dirty & Deferred IO
744 */
745 par->dirty.x1 = par->dirty.x2 = 0;
Chris Wilsonc39721c2010-07-24 17:15:11 +0100746 par->dirty.y1 = par->dirty.y2 = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000747 par->dirty.active = true;
748 spin_lock_init(&par->dirty.lock);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700749 mutex_init(&par->bo_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000750 info->fbdefio = &vmw_defio;
751 fb_deferred_io_init(info);
752
753 ret = register_framebuffer(info);
754 if (unlikely(ret != 0))
755 goto err_defio;
756
Thomas Hellstroma2787242015-06-29 12:55:07 -0700757 vmw_fb_set_par(info);
758
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000759 return 0;
760
761err_defio:
762 fb_deferred_io_cleanup(info);
Marcin Slusarz1471ca92010-05-16 17:27:03 +0200763err_aper:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000764err_free:
765 vfree(par->vmalloc);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700766err_kms:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000767 framebuffer_release(info);
768 vmw_priv->fb_info = NULL;
769
770 return ret;
771}
772
773int vmw_fb_close(struct vmw_private *vmw_priv)
774{
775 struct fb_info *info;
776 struct vmw_fb_par *par;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000777
778 if (!vmw_priv->fb_info)
779 return 0;
780
781 info = vmw_priv->fb_info;
782 par = info->par;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000783
784 /* ??? order */
785 fb_deferred_io_cleanup(info);
Thomas Hellstrom772269f2015-08-18 09:07:38 -0700786 cancel_delayed_work_sync(&par->local_work);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000787 unregister_framebuffer(info);
788
Thomas Hellstrom21fbd082018-04-26 09:48:55 +0200789 mutex_lock(&par->bo_mutex);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700790 (void) vmw_fb_kms_detach(par, true, true);
Thomas Hellstrom21fbd082018-04-26 09:48:55 +0200791 mutex_unlock(&par->bo_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000792
793 vfree(par->vmalloc);
794 framebuffer_release(info);
795
796 return 0;
797}
798
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000799int vmw_fb_off(struct vmw_private *vmw_priv)
800{
801 struct fb_info *info;
802 struct vmw_fb_par *par;
803 unsigned long flags;
804
805 if (!vmw_priv->fb_info)
806 return -EINVAL;
807
808 info = vmw_priv->fb_info;
809 par = info->par;
810
811 spin_lock_irqsave(&par->dirty.lock, flags);
812 par->dirty.active = false;
813 spin_unlock_irqrestore(&par->dirty.lock, flags);
814
Tejun Heo43829732012-08-20 14:51:24 -0700815 flush_delayed_work(&info->deferred_work);
Thomas Hellstrom772269f2015-08-18 09:07:38 -0700816 flush_delayed_work(&par->local_work);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000817
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000818 return 0;
819}
820
821int vmw_fb_on(struct vmw_private *vmw_priv)
822{
823 struct fb_info *info;
824 struct vmw_fb_par *par;
825 unsigned long flags;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000826
827 if (!vmw_priv->fb_info)
828 return -EINVAL;
829
830 info = vmw_priv->fb_info;
831 par = info->par;
832
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000833 spin_lock_irqsave(&par->dirty.lock, flags);
834 par->dirty.active = true;
835 spin_unlock_irqrestore(&par->dirty.lock, flags);
Thomas Hellstrom6a93cea2018-05-23 16:14:54 +0200836
837 /*
838 * Need to reschedule a dirty update, because otherwise that's
839 * only done in dirty_mark() if the previous coalesced
840 * dirty region was empty.
841 */
842 schedule_delayed_work(&par->local_work, 0);
843
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000844 return 0;
845}