blob: 8ee34576c7d08ac66632beadf513ecbc4c811fba [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2007 David Airlie
Sinclair Yeh54fbde82015-07-29 12:38:02 -07004 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00005 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
Sam Ravnborg6ae87482019-06-23 12:23:34 +020029#include <linux/pci.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040030
Sam Ravnborg6ae87482019-06-23 12:23:34 +020031#include <drm/drm_fourcc.h>
32#include <drm/ttm/ttm_placement.h>
33
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000034#include "vmwgfx_drv.h"
Thomas Hellstroma2787242015-06-29 12:55:07 -070035#include "vmwgfx_kms.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000036
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000037#define VMW_DIRTY_DELAY (HZ / 30)
38
39struct vmw_fb_par {
40 struct vmw_private *vmw_priv;
41
42 void *vmalloc;
43
Thomas Hellstroma2787242015-06-29 12:55:07 -070044 struct mutex bo_mutex;
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +020045 struct vmw_buffer_object *vmw_bo;
Thomas Hellstroma2787242015-06-29 12:55:07 -070046 unsigned bo_size;
47 struct drm_framebuffer *set_fb;
48 struct drm_display_mode *set_mode;
49 u32 fb_x;
50 u32 fb_y;
51 bool bo_iowrite;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000052
53 u32 pseudo_palette[17];
54
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000055 unsigned max_width;
56 unsigned max_height;
57
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000058 struct {
59 spinlock_t lock;
60 bool active;
61 unsigned x1;
62 unsigned y1;
63 unsigned x2;
64 unsigned y2;
65 } dirty;
Thomas Hellstroma2787242015-06-29 12:55:07 -070066
67 struct drm_crtc *crtc;
68 struct drm_connector *con;
Thomas Hellstrom772269f2015-08-18 09:07:38 -070069 struct delayed_work local_work;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000070};
71
72static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
73 unsigned blue, unsigned transp,
74 struct fb_info *info)
75{
76 struct vmw_fb_par *par = info->par;
77 u32 *pal = par->pseudo_palette;
78
79 if (regno > 15) {
80 DRM_ERROR("Bad regno %u.\n", regno);
81 return 1;
82 }
83
Ville Syrjäläb00c6002016-12-14 23:31:35 +020084 switch (par->set_fb->format->depth) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000085 case 24:
86 case 32:
87 pal[regno] = ((red & 0xff00) << 8) |
88 (green & 0xff00) |
89 ((blue & 0xff00) >> 8);
90 break;
91 default:
Ville Syrjäläb00c6002016-12-14 23:31:35 +020092 DRM_ERROR("Bad depth %u, bpp %u.\n",
93 par->set_fb->format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +020094 par->set_fb->format->cpp[0] * 8);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000095 return 1;
96 }
97
98 return 0;
99}
100
101static int vmw_fb_check_var(struct fb_var_screeninfo *var,
102 struct fb_info *info)
103{
104 int depth = var->bits_per_pixel;
105 struct vmw_fb_par *par = info->par;
106 struct vmw_private *vmw_priv = par->vmw_priv;
107
108 switch (var->bits_per_pixel) {
109 case 32:
110 depth = (var->transp.length > 0) ? 32 : 24;
111 break;
112 default:
113 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
114 return -EINVAL;
115 }
116
117 switch (depth) {
118 case 24:
119 var->red.offset = 16;
120 var->green.offset = 8;
121 var->blue.offset = 0;
122 var->red.length = 8;
123 var->green.length = 8;
124 var->blue.length = 8;
125 var->transp.length = 0;
126 var->transp.offset = 0;
127 break;
128 case 32:
129 var->red.offset = 16;
130 var->green.offset = 8;
131 var->blue.offset = 0;
132 var->red.length = 8;
133 var->green.length = 8;
134 var->blue.length = 8;
135 var->transp.length = 8;
136 var->transp.offset = 24;
137 break;
138 default:
139 DRM_ERROR("Bad depth %u.\n", depth);
140 return -EINVAL;
141 }
142
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200143 if ((var->xoffset + var->xres) > par->max_width ||
144 (var->yoffset + var->yres) > par->max_height) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000145 DRM_ERROR("Requested geom can not fit in framebuffer\n");
146 return -EINVAL;
147 }
148
Thomas Hellstrome133e7372010-10-05 12:43:04 +0200149 if (!vmw_kms_validate_mode_vram(vmw_priv,
Christopher Friedtaa6de142014-02-01 10:01:15 -0500150 var->xres * var->bits_per_pixel/8,
Thomas Hellstrome133e7372010-10-05 12:43:04 +0200151 var->yoffset + var->yres)) {
152 DRM_ERROR("Requested geom can not fit in framebuffer\n");
153 return -EINVAL;
154 }
155
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000156 return 0;
157}
158
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000159static int vmw_fb_blank(int blank, struct fb_info *info)
160{
161 return 0;
162}
163
Thomas Hellstromc3b9b162018-03-22 10:26:37 +0100164/**
165 * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer
166 *
167 * @work: The struct work_struct associated with this task.
168 *
169 * This function flushes the dirty regions of the vmalloc framebuffer to the
170 * kms framebuffer, and if the kms framebuffer is visible, also updated the
171 * corresponding displays. Note that this function runs even if the kms
172 * framebuffer is not bound to a crtc and thus not visible, but it's turned
173 * off during hibernation using the par->dirty.active bool.
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000174 */
Thomas Hellstrom772269f2015-08-18 09:07:38 -0700175static void vmw_fb_dirty_flush(struct work_struct *work)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000176{
Thomas Hellstrom772269f2015-08-18 09:07:38 -0700177 struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
178 local_work.work);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000179 struct vmw_private *vmw_priv = par->vmw_priv;
180 struct fb_info *info = vmw_priv->fb_info;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700181 unsigned long irq_flags;
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100182 s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700183 u32 cpp, max_x, max_y;
184 struct drm_clip_rect clip;
185 struct drm_framebuffer *cur_fb;
186 u8 *src_ptr, *dst_ptr;
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200187 struct vmw_buffer_object *vbo = par->vmw_bo;
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100188 void *virtual;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000189
Thomas Hellstrom4e3e7332018-03-22 10:30:19 +0100190 if (!READ_ONCE(par->dirty.active))
Thomas Hellstrom09e26012010-10-05 12:43:05 +0200191 return;
192
Thomas Hellstroma2787242015-06-29 12:55:07 -0700193 mutex_lock(&par->bo_mutex);
194 cur_fb = par->set_fb;
195 if (!cur_fb)
196 goto out_unlock;
197
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100198 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
Thomas Hellstrome9431ea2018-06-19 15:33:53 +0200199 virtual = vmw_bo_map_and_cache(vbo);
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100200 if (!virtual)
201 goto out_unreserve;
202
Thomas Hellstroma2787242015-06-29 12:55:07 -0700203 spin_lock_irqsave(&par->dirty.lock, irq_flags);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000204 if (!par->dirty.active) {
Thomas Hellstroma2787242015-06-29 12:55:07 -0700205 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100206 goto out_unreserve;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000207 }
Thomas Hellstroma2787242015-06-29 12:55:07 -0700208
209 /*
210 * Handle panning when copying from vmalloc to framebuffer.
211 * Clip dirty area to framebuffer.
212 */
Ville Syrjälä272725c2016-12-14 23:32:20 +0200213 cpp = cur_fb->format->cpp[0];
Thomas Hellstroma2787242015-06-29 12:55:07 -0700214 max_x = par->fb_x + cur_fb->width;
215 max_y = par->fb_y + cur_fb->height;
216
217 dst_x1 = par->dirty.x1 - par->fb_x;
218 dst_y1 = par->dirty.y1 - par->fb_y;
219 dst_x1 = max_t(s32, dst_x1, 0);
220 dst_y1 = max_t(s32, dst_y1, 0);
221
222 dst_x2 = par->dirty.x2 - par->fb_x;
223 dst_y2 = par->dirty.y2 - par->fb_y;
224 dst_x2 = min_t(s32, dst_x2, max_x);
225 dst_y2 = min_t(s32, dst_y2, max_y);
226 w = dst_x2 - dst_x1;
227 h = dst_y2 - dst_y1;
228 w = max_t(s32, 0, w);
229 h = max_t(s32, 0, h);
230
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000231 par->dirty.x1 = par->dirty.x2 = 0;
232 par->dirty.y1 = par->dirty.y2 = 0;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700233 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000234
Thomas Hellstroma2787242015-06-29 12:55:07 -0700235 if (w && h) {
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100236 dst_ptr = (u8 *)virtual +
Thomas Hellstroma2787242015-06-29 12:55:07 -0700237 (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
238 src_ptr = (u8 *)par->vmalloc +
239 ((dst_y1 + par->fb_y) * info->fix.line_length +
240 (dst_x1 + par->fb_x) * cpp);
241
242 while (h-- > 0) {
243 memcpy(dst_ptr, src_ptr, w*cpp);
244 dst_ptr += par->set_fb->pitches[0];
245 src_ptr += info->fix.line_length;
246 }
247
248 clip.x1 = dst_x1;
249 clip.x2 = dst_x2;
250 clip.y1 = dst_y1;
251 clip.y2 = dst_y2;
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100252 }
Thomas Hellstroma2787242015-06-29 12:55:07 -0700253
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100254out_unreserve:
255 ttm_bo_unreserve(&vbo->base);
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100256 if (w && h) {
Thomas Hellstroma2787242015-06-29 12:55:07 -0700257 WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
258 &clip, 1));
Zack Rusin8426ed92020-11-18 12:54:19 -0500259 vmw_cmd_flush(vmw_priv, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000260 }
Thomas Hellstroma2787242015-06-29 12:55:07 -0700261out_unlock:
262 mutex_unlock(&par->bo_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000263}
264
265static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
266 unsigned x1, unsigned y1,
267 unsigned width, unsigned height)
268{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000269 unsigned long flags;
270 unsigned x2 = x1 + width;
271 unsigned y2 = y1 + height;
272
273 spin_lock_irqsave(&par->dirty.lock, flags);
274 if (par->dirty.x1 == par->dirty.x2) {
275 par->dirty.x1 = x1;
276 par->dirty.y1 = y1;
277 par->dirty.x2 = x2;
278 par->dirty.y2 = y2;
279 /* if we are active start the dirty work
280 * we share the work with the defio system */
281 if (par->dirty.active)
Thomas Hellstrom772269f2015-08-18 09:07:38 -0700282 schedule_delayed_work(&par->local_work,
283 VMW_DIRTY_DELAY);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000284 } else {
285 if (x1 < par->dirty.x1)
286 par->dirty.x1 = x1;
287 if (y1 < par->dirty.y1)
288 par->dirty.y1 = y1;
289 if (x2 > par->dirty.x2)
290 par->dirty.x2 = x2;
291 if (y2 > par->dirty.y2)
292 par->dirty.y2 = y2;
293 }
294 spin_unlock_irqrestore(&par->dirty.lock, flags);
295}
296
Thomas Hellstroma2787242015-06-29 12:55:07 -0700297static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
298 struct fb_info *info)
299{
300 struct vmw_fb_par *par = info->par;
301
302 if ((var->xoffset + var->xres) > var->xres_virtual ||
303 (var->yoffset + var->yres) > var->yres_virtual) {
304 DRM_ERROR("Requested panning can not fit in framebuffer\n");
305 return -EINVAL;
306 }
307
308 mutex_lock(&par->bo_mutex);
309 par->fb_x = var->xoffset;
310 par->fb_y = var->yoffset;
311 if (par->set_fb)
312 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
313 par->set_fb->height);
314 mutex_unlock(&par->bo_mutex);
315
316 return 0;
317}
318
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000319static void vmw_deferred_io(struct fb_info *info,
320 struct list_head *pagelist)
321{
322 struct vmw_fb_par *par = info->par;
323 unsigned long start, end, min, max;
324 unsigned long flags;
325 struct page *page;
326 int y1, y2;
327
328 min = ULONG_MAX;
329 max = 0;
330 list_for_each_entry(page, pagelist, lru) {
331 start = page->index << PAGE_SHIFT;
332 end = start + PAGE_SIZE - 1;
333 min = min(min, start);
334 max = max(max, end);
335 }
336
337 if (min < max) {
338 y1 = min / info->fix.line_length;
339 y2 = (max / info->fix.line_length) + 1;
340
341 spin_lock_irqsave(&par->dirty.lock, flags);
342 par->dirty.x1 = 0;
343 par->dirty.y1 = y1;
344 par->dirty.x2 = info->var.xres;
345 par->dirty.y2 = y2;
346 spin_unlock_irqrestore(&par->dirty.lock, flags);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000347
Thomas Hellstrom772269f2015-08-18 09:07:38 -0700348 /*
349 * Since we've already waited on this work once, try to
350 * execute asap.
351 */
352 cancel_delayed_work(&par->local_work);
353 schedule_delayed_work(&par->local_work, 0);
354 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000355};
356
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700357static struct fb_deferred_io vmw_defio = {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000358 .delay = VMW_DIRTY_DELAY,
359 .deferred_io = vmw_deferred_io,
360};
361
362/*
363 * Draw code
364 */
365
366static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
367{
368 cfb_fillrect(info, rect);
369 vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
370 rect->width, rect->height);
371}
372
373static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
374{
375 cfb_copyarea(info, region);
376 vmw_fb_dirty_mark(info->par, region->dx, region->dy,
377 region->width, region->height);
378}
379
380static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
381{
382 cfb_imageblit(info, image);
383 vmw_fb_dirty_mark(info->par, image->dx, image->dy,
384 image->width, image->height);
385}
386
387/*
388 * Bring up code
389 */
390
Thomas Hellstroma2787242015-06-29 12:55:07 -0700391static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200392 size_t size, struct vmw_buffer_object **out)
Thomas Hellstroma2787242015-06-29 12:55:07 -0700393{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200394 struct vmw_buffer_object *vmw_bo;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700395 int ret;
396
Zack Rusin8afa13a2021-12-06 12:26:12 -0500397 ret = vmw_bo_create(vmw_priv, size,
Thomas Hellstroma2787242015-06-29 12:55:07 -0700398 &vmw_sys_placement,
Christian Königfbe86ca52020-09-21 14:37:25 +0200399 false, false,
Zack Rusin8afa13a2021-12-06 12:26:12 -0500400 &vmw_bo_bo_free, &vmw_bo);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700401 if (unlikely(ret != 0))
Zack Rusin8afa13a2021-12-06 12:26:12 -0500402 return ret;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700403
404 *out = vmw_bo;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700405
Thomas Hellstroma2787242015-06-29 12:55:07 -0700406 return ret;
407}
408
409static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
410 int *depth)
411{
412 switch (var->bits_per_pixel) {
413 case 32:
414 *depth = (var->transp.length > 0) ? 32 : 24;
415 break;
416 default:
417 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
418 return -EINVAL;
419 }
420
421 return 0;
422}
423
Daniel Vetter3bacf432017-04-06 22:02:56 +0200424static int vmwgfx_set_config_internal(struct drm_mode_set *set)
425{
426 struct drm_crtc *crtc = set->crtc;
Thomas Hellstrom21fbd082018-04-26 09:48:55 +0200427 struct drm_modeset_acquire_ctx ctx;
Daniel Vetter3bacf432017-04-06 22:02:56 +0200428 int ret;
429
Thomas Hellstrom21fbd082018-04-26 09:48:55 +0200430 drm_modeset_acquire_init(&ctx, 0);
Daniel Vetter3bacf432017-04-06 22:02:56 +0200431
432restart:
Thomas Hellstrom21fbd082018-04-26 09:48:55 +0200433 ret = crtc->funcs->set_config(set, &ctx);
Daniel Vetter3bacf432017-04-06 22:02:56 +0200434
435 if (ret == -EDEADLK) {
Thomas Hellstrom21fbd082018-04-26 09:48:55 +0200436 drm_modeset_backoff(&ctx);
Daniel Vetter3bacf432017-04-06 22:02:56 +0200437 goto restart;
438 }
439
Thomas Hellstrom21fbd082018-04-26 09:48:55 +0200440 drm_modeset_drop_locks(&ctx);
441 drm_modeset_acquire_fini(&ctx);
442
Daniel Vetter3bacf432017-04-06 22:02:56 +0200443 return ret;
444}
445
Thomas Hellstroma2787242015-06-29 12:55:07 -0700446static int vmw_fb_kms_detach(struct vmw_fb_par *par,
447 bool detach_bo,
448 bool unref_bo)
449{
450 struct drm_framebuffer *cur_fb = par->set_fb;
451 int ret;
452
453 /* Detach the KMS framebuffer from crtcs */
454 if (par->set_mode) {
455 struct drm_mode_set set;
456
457 set.crtc = par->crtc;
458 set.x = 0;
459 set.y = 0;
460 set.mode = NULL;
461 set.fb = NULL;
Sinclair Yehaa74f062017-03-23 14:28:21 -0700462 set.num_connectors = 0;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700463 set.connectors = &par->con;
Daniel Vetter3bacf432017-04-06 22:02:56 +0200464 ret = vmwgfx_set_config_internal(&set);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700465 if (ret) {
466 DRM_ERROR("Could not unset a mode.\n");
467 return ret;
468 }
Zack Rusin9703bb32020-11-03 22:21:34 -0500469 drm_mode_destroy(&par->vmw_priv->drm, par->set_mode);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700470 par->set_mode = NULL;
471 }
472
473 if (cur_fb) {
Haneen Mohammed25a28902018-03-11 17:33:13 -0600474 drm_framebuffer_put(cur_fb);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700475 par->set_fb = NULL;
476 }
477
Thomas Hellstrombf833fd2018-03-22 10:19:01 +0100478 if (par->vmw_bo && detach_bo && unref_bo)
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200479 vmw_bo_unreference(&par->vmw_bo);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700480
481 return 0;
482}
483
484static int vmw_fb_kms_framebuffer(struct fb_info *info)
485{
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100486 struct drm_mode_fb_cmd2 mode_cmd;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700487 struct vmw_fb_par *par = info->par;
488 struct fb_var_screeninfo *var = &info->var;
489 struct drm_framebuffer *cur_fb;
490 struct vmw_framebuffer *vfb;
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100491 int ret = 0, depth;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700492 size_t new_bo_size;
493
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100494 ret = vmw_fb_compute_depth(var, &depth);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700495 if (ret)
496 return ret;
497
498 mode_cmd.width = var->xres;
499 mode_cmd.height = var->yres;
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100500 mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
501 mode_cmd.pixel_format =
Sinclair Yeh8c957422017-01-18 14:14:01 -0800502 drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700503
504 cur_fb = par->set_fb;
505 if (cur_fb && cur_fb->width == mode_cmd.width &&
506 cur_fb->height == mode_cmd.height &&
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200507 cur_fb->format->format == mode_cmd.pixel_format &&
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100508 cur_fb->pitches[0] == mode_cmd.pitches[0])
Thomas Hellstroma2787242015-06-29 12:55:07 -0700509 return 0;
510
511 /* Need new buffer object ? */
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100512 new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700513 ret = vmw_fb_kms_detach(par,
514 par->bo_size < new_bo_size ||
515 par->bo_size > 2*new_bo_size,
516 true);
517 if (ret)
518 return ret;
519
520 if (!par->vmw_bo) {
521 ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
522 &par->vmw_bo);
523 if (ret) {
524 DRM_ERROR("Failed creating a buffer object for "
525 "fbdev.\n");
526 return ret;
527 }
528 par->bo_size = new_bo_size;
529 }
530
531 vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
532 true, &mode_cmd);
533 if (IS_ERR(vfb))
534 return PTR_ERR(vfb);
535
536 par->set_fb = &vfb->base;
537
Thomas Hellstroma2787242015-06-29 12:55:07 -0700538 return 0;
539}
540
541static int vmw_fb_set_par(struct fb_info *info)
542{
543 struct vmw_fb_par *par = info->par;
544 struct vmw_private *vmw_priv = par->vmw_priv;
545 struct drm_mode_set set;
546 struct fb_var_screeninfo *var = &info->var;
547 struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
548 DRM_MODE_TYPE_DRIVER,
549 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
550 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
551 };
Thomas Hellstroma2787242015-06-29 12:55:07 -0700552 struct drm_display_mode *mode;
553 int ret;
554
Zack Rusin9703bb32020-11-03 22:21:34 -0500555 mode = drm_mode_duplicate(&vmw_priv->drm, &new_mode);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700556 if (!mode) {
557 DRM_ERROR("Could not create new fb mode.\n");
558 return -ENOMEM;
559 }
560
561 mode->hdisplay = var->xres;
562 mode->vdisplay = var->yres;
563 vmw_guess_mode_timing(mode);
564
Thomas Zimmermannc2d31152019-03-18 15:47:58 +0100565 if (!vmw_kms_validate_mode_vram(vmw_priv,
Sinclair Yeh78514962016-04-21 11:29:31 -0700566 mode->hdisplay *
567 DIV_ROUND_UP(var->bits_per_pixel, 8),
568 mode->vdisplay)) {
Zack Rusin9703bb32020-11-03 22:21:34 -0500569 drm_mode_destroy(&vmw_priv->drm, mode);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700570 return -EINVAL;
571 }
572
573 mutex_lock(&par->bo_mutex);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700574 ret = vmw_fb_kms_framebuffer(info);
575 if (ret)
576 goto out_unlock;
577
578 par->fb_x = var->xoffset;
579 par->fb_y = var->yoffset;
580
581 set.crtc = par->crtc;
582 set.x = 0;
583 set.y = 0;
584 set.mode = mode;
585 set.fb = par->set_fb;
586 set.num_connectors = 1;
587 set.connectors = &par->con;
588
Daniel Vetter3bacf432017-04-06 22:02:56 +0200589 ret = vmwgfx_set_config_internal(&set);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700590 if (ret)
591 goto out_unlock;
592
593 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
594 par->set_fb->width, par->set_fb->height);
595
596 /* If there already was stuff dirty we wont
597 * schedule a new work, so lets do it now */
598
Thomas Hellstrom772269f2015-08-18 09:07:38 -0700599 schedule_delayed_work(&par->local_work, 0);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700600
601out_unlock:
Thomas Zimmermannc2d31152019-03-18 15:47:58 +0100602 if (par->set_mode)
Zack Rusin9703bb32020-11-03 22:21:34 -0500603 drm_mode_destroy(&vmw_priv->drm, par->set_mode);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700604 par->set_mode = mode;
605
Thomas Hellstroma2787242015-06-29 12:55:07 -0700606 mutex_unlock(&par->bo_mutex);
607
608 return ret;
609}
610
611
Jani Nikulab6ff7532019-12-03 18:38:48 +0200612static const struct fb_ops vmw_fb_ops = {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000613 .owner = THIS_MODULE,
614 .fb_check_var = vmw_fb_check_var,
615 .fb_set_par = vmw_fb_set_par,
616 .fb_setcolreg = vmw_fb_setcolreg,
617 .fb_fillrect = vmw_fb_fillrect,
618 .fb_copyarea = vmw_fb_copyarea,
619 .fb_imageblit = vmw_fb_imageblit,
620 .fb_pan_display = vmw_fb_pan_display,
621 .fb_blank = vmw_fb_blank,
622};
623
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000624int vmw_fb_init(struct vmw_private *vmw_priv)
625{
Zack Rusin9703bb32020-11-03 22:21:34 -0500626 struct device *device = vmw_priv->drm.dev;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000627 struct vmw_fb_par *par;
628 struct fb_info *info;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000629 unsigned fb_width, fb_height;
YueHaibingc601b122019-03-25 10:32:17 -0700630 unsigned int fb_bpp, fb_pitch, fb_size;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700631 struct drm_display_mode *init_mode;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000632 int ret;
633
Michel Dänzer6558429b2011-08-31 07:42:49 +0000634 fb_bpp = 32;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000635
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200636 /* XXX As shouldn't these be as well. */
637 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
638 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000639
Michel Dänzer6558429b2011-08-31 07:42:49 +0000640 fb_pitch = fb_width * fb_bpp / 8;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200641 fb_size = fb_pitch * fb_height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000642
643 info = framebuffer_alloc(sizeof(*par), device);
644 if (!info)
645 return -ENOMEM;
646
647 /*
648 * Par
649 */
650 vmw_priv->fb_info = info;
651 par = info->par;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700652 memset(par, 0, sizeof(*par));
Thomas Hellstrom772269f2015-08-18 09:07:38 -0700653 INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000654 par->vmw_priv = vmw_priv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000655 par->vmalloc = NULL;
656 par->max_width = fb_width;
657 par->max_height = fb_height;
658
Thomas Hellstroma2787242015-06-29 12:55:07 -0700659 ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
660 par->max_height, &par->con,
661 &par->crtc, &init_mode);
Thomas Hellstrom21fbd082018-04-26 09:48:55 +0200662 if (ret)
Thomas Hellstroma2787242015-06-29 12:55:07 -0700663 goto err_kms;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700664
665 info->var.xres = init_mode->hdisplay;
666 info->var.yres = init_mode->vdisplay;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700667
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000668 /*
669 * Create buffers and alloc memory
670 */
Thomas Hellstroma2787242015-06-29 12:55:07 -0700671 par->vmalloc = vzalloc(fb_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000672 if (unlikely(par->vmalloc == NULL)) {
673 ret = -ENOMEM;
674 goto err_free;
675 }
676
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000677 /*
678 * Fixed and var
679 */
680 strcpy(info->fix.id, "svgadrmfb");
681 info->fix.type = FB_TYPE_PACKED_PIXELS;
682 info->fix.visual = FB_VISUAL_TRUECOLOR;
683 info->fix.type_aux = 0;
684 info->fix.xpanstep = 1; /* doing it in hw */
685 info->fix.ypanstep = 1; /* doing it in hw */
686 info->fix.ywrapstep = 0;
687 info->fix.accel = FB_ACCEL_NONE;
688 info->fix.line_length = fb_pitch;
689
690 info->fix.smem_start = 0;
691 info->fix.smem_len = fb_size;
692
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000693 info->pseudo_palette = par->pseudo_palette;
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700694 info->screen_base = (char __iomem *)par->vmalloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000695 info->screen_size = fb_size;
696
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000697 info->fbops = &vmw_fb_ops;
698
699 /* 24 depth per default */
700 info->var.red.offset = 16;
701 info->var.green.offset = 8;
702 info->var.blue.offset = 0;
703 info->var.red.length = 8;
704 info->var.green.length = 8;
705 info->var.blue.length = 8;
706 info->var.transp.offset = 0;
707 info->var.transp.length = 0;
708
709 info->var.xres_virtual = fb_width;
710 info->var.yres_virtual = fb_height;
Thomas Hellstroma2787242015-06-29 12:55:07 -0700711 info->var.bits_per_pixel = fb_bpp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000712 info->var.xoffset = 0;
713 info->var.yoffset = 0;
714 info->var.activate = FB_ACTIVATE_NOW;
715 info->var.height = -1;
716 info->var.width = -1;
717
Sascha Hauerfb2a99e2012-02-06 10:58:19 +0100718 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
Marcin Slusarz1471ca92010-05-16 17:27:03 +0200719 info->apertures = alloc_apertures(1);
720 if (!info->apertures) {
721 ret = -ENOMEM;
722 goto err_aper;
723 }
724 info->apertures->ranges[0].base = vmw_priv->vram_start;
725 info->apertures->ranges[0].size = vmw_priv->vram_size;
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000726
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000727 /*
728 * Dirty & Deferred IO
729 */
730 par->dirty.x1 = par->dirty.x2 = 0;
Chris Wilsonc39721c2010-07-24 17:15:11 +0100731 par->dirty.y1 = par->dirty.y2 = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000732 par->dirty.active = true;
733 spin_lock_init(&par->dirty.lock);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700734 mutex_init(&par->bo_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000735 info->fbdefio = &vmw_defio;
736 fb_deferred_io_init(info);
737
738 ret = register_framebuffer(info);
739 if (unlikely(ret != 0))
740 goto err_defio;
741
Thomas Hellstroma2787242015-06-29 12:55:07 -0700742 vmw_fb_set_par(info);
743
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000744 return 0;
745
746err_defio:
747 fb_deferred_io_cleanup(info);
Marcin Slusarz1471ca92010-05-16 17:27:03 +0200748err_aper:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000749err_free:
750 vfree(par->vmalloc);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700751err_kms:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000752 framebuffer_release(info);
753 vmw_priv->fb_info = NULL;
754
755 return ret;
756}
757
758int vmw_fb_close(struct vmw_private *vmw_priv)
759{
760 struct fb_info *info;
761 struct vmw_fb_par *par;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000762
763 if (!vmw_priv->fb_info)
764 return 0;
765
766 info = vmw_priv->fb_info;
767 par = info->par;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000768
769 /* ??? order */
770 fb_deferred_io_cleanup(info);
Thomas Hellstrom772269f2015-08-18 09:07:38 -0700771 cancel_delayed_work_sync(&par->local_work);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000772 unregister_framebuffer(info);
773
Thomas Hellstrom21fbd082018-04-26 09:48:55 +0200774 mutex_lock(&par->bo_mutex);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700775 (void) vmw_fb_kms_detach(par, true, true);
Thomas Hellstrom21fbd082018-04-26 09:48:55 +0200776 mutex_unlock(&par->bo_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000777
778 vfree(par->vmalloc);
779 framebuffer_release(info);
780
781 return 0;
782}
783
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000784int vmw_fb_off(struct vmw_private *vmw_priv)
785{
786 struct fb_info *info;
787 struct vmw_fb_par *par;
788 unsigned long flags;
789
790 if (!vmw_priv->fb_info)
791 return -EINVAL;
792
793 info = vmw_priv->fb_info;
794 par = info->par;
795
796 spin_lock_irqsave(&par->dirty.lock, flags);
797 par->dirty.active = false;
798 spin_unlock_irqrestore(&par->dirty.lock, flags);
799
Tejun Heo43829732012-08-20 14:51:24 -0700800 flush_delayed_work(&info->deferred_work);
Thomas Hellstrom772269f2015-08-18 09:07:38 -0700801 flush_delayed_work(&par->local_work);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000802
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000803 return 0;
804}
805
806int vmw_fb_on(struct vmw_private *vmw_priv)
807{
808 struct fb_info *info;
809 struct vmw_fb_par *par;
810 unsigned long flags;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000811
812 if (!vmw_priv->fb_info)
813 return -EINVAL;
814
815 info = vmw_priv->fb_info;
816 par = info->par;
817
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000818 spin_lock_irqsave(&par->dirty.lock, flags);
819 par->dirty.active = true;
820 spin_unlock_irqrestore(&par->dirty.lock, flags);
Thomas Hellstrom6a93cea2018-05-23 16:14:54 +0200821
822 /*
823 * Need to reschedule a dirty update, because otherwise that's
824 * only done in dirty_mark() if the previous coalesced
825 * dirty region was empty.
826 */
827 schedule_delayed_work(&par->local_work, 0);
828
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000829 return 0;
830}