Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1 | /************************************************************************** |
| 2 | * |
| 3 | * Copyright © 2007 David Airlie |
Sinclair Yeh | 54fbde8 | 2015-07-29 12:38:02 -0700 | [diff] [blame] | 4 | * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 5 | * All Rights Reserved. |
| 6 | * |
| 7 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 8 | * copy of this software and associated documentation files (the |
| 9 | * "Software"), to deal in the Software without restriction, including |
| 10 | * without limitation the rights to use, copy, modify, merge, publish, |
| 11 | * distribute, sub license, and/or sell copies of the Software, and to |
| 12 | * permit persons to whom the Software is furnished to do so, subject to |
| 13 | * the following conditions: |
| 14 | * |
| 15 | * The above copyright notice and this permission notice (including the |
| 16 | * next paragraph) shall be included in all copies or substantial portions |
| 17 | * of the Software. |
| 18 | * |
| 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 26 | * |
| 27 | **************************************************************************/ |
| 28 | |
Sam Ravnborg | 6ae8748 | 2019-06-23 12:23:34 +0200 | [diff] [blame] | 29 | #include <linux/pci.h> |
Paul Gortmaker | 2d1a8a4 | 2011-08-30 18:16:33 -0400 | [diff] [blame] | 30 | |
Sam Ravnborg | 6ae8748 | 2019-06-23 12:23:34 +0200 | [diff] [blame] | 31 | #include <drm/drm_fourcc.h> |
| 32 | #include <drm/ttm/ttm_placement.h> |
| 33 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 34 | #include "vmwgfx_drv.h" |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 35 | #include "vmwgfx_kms.h" |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 36 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 37 | #define VMW_DIRTY_DELAY (HZ / 30) |
| 38 | |
| 39 | struct vmw_fb_par { |
| 40 | struct vmw_private *vmw_priv; |
| 41 | |
| 42 | void *vmalloc; |
| 43 | |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 44 | struct mutex bo_mutex; |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 45 | struct vmw_buffer_object *vmw_bo; |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 46 | unsigned bo_size; |
| 47 | struct drm_framebuffer *set_fb; |
| 48 | struct drm_display_mode *set_mode; |
| 49 | u32 fb_x; |
| 50 | u32 fb_y; |
| 51 | bool bo_iowrite; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 52 | |
| 53 | u32 pseudo_palette[17]; |
| 54 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 55 | unsigned max_width; |
| 56 | unsigned max_height; |
| 57 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 58 | struct { |
| 59 | spinlock_t lock; |
| 60 | bool active; |
| 61 | unsigned x1; |
| 62 | unsigned y1; |
| 63 | unsigned x2; |
| 64 | unsigned y2; |
| 65 | } dirty; |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 66 | |
| 67 | struct drm_crtc *crtc; |
| 68 | struct drm_connector *con; |
Thomas Hellstrom | 772269f | 2015-08-18 09:07:38 -0700 | [diff] [blame] | 69 | struct delayed_work local_work; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 70 | }; |
| 71 | |
| 72 | static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, |
| 73 | unsigned blue, unsigned transp, |
| 74 | struct fb_info *info) |
| 75 | { |
| 76 | struct vmw_fb_par *par = info->par; |
| 77 | u32 *pal = par->pseudo_palette; |
| 78 | |
| 79 | if (regno > 15) { |
| 80 | DRM_ERROR("Bad regno %u.\n", regno); |
| 81 | return 1; |
| 82 | } |
| 83 | |
Ville Syrjälä | b00c600 | 2016-12-14 23:31:35 +0200 | [diff] [blame] | 84 | switch (par->set_fb->format->depth) { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 85 | case 24: |
| 86 | case 32: |
| 87 | pal[regno] = ((red & 0xff00) << 8) | |
| 88 | (green & 0xff00) | |
| 89 | ((blue & 0xff00) >> 8); |
| 90 | break; |
| 91 | default: |
Ville Syrjälä | b00c600 | 2016-12-14 23:31:35 +0200 | [diff] [blame] | 92 | DRM_ERROR("Bad depth %u, bpp %u.\n", |
| 93 | par->set_fb->format->depth, |
Ville Syrjälä | 272725c | 2016-12-14 23:32:20 +0200 | [diff] [blame] | 94 | par->set_fb->format->cpp[0] * 8); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 95 | return 1; |
| 96 | } |
| 97 | |
| 98 | return 0; |
| 99 | } |
| 100 | |
| 101 | static int vmw_fb_check_var(struct fb_var_screeninfo *var, |
| 102 | struct fb_info *info) |
| 103 | { |
| 104 | int depth = var->bits_per_pixel; |
| 105 | struct vmw_fb_par *par = info->par; |
| 106 | struct vmw_private *vmw_priv = par->vmw_priv; |
| 107 | |
| 108 | switch (var->bits_per_pixel) { |
| 109 | case 32: |
| 110 | depth = (var->transp.length > 0) ? 32 : 24; |
| 111 | break; |
| 112 | default: |
| 113 | DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel); |
| 114 | return -EINVAL; |
| 115 | } |
| 116 | |
| 117 | switch (depth) { |
| 118 | case 24: |
| 119 | var->red.offset = 16; |
| 120 | var->green.offset = 8; |
| 121 | var->blue.offset = 0; |
| 122 | var->red.length = 8; |
| 123 | var->green.length = 8; |
| 124 | var->blue.length = 8; |
| 125 | var->transp.length = 0; |
| 126 | var->transp.offset = 0; |
| 127 | break; |
| 128 | case 32: |
| 129 | var->red.offset = 16; |
| 130 | var->green.offset = 8; |
| 131 | var->blue.offset = 0; |
| 132 | var->red.length = 8; |
| 133 | var->green.length = 8; |
| 134 | var->blue.length = 8; |
| 135 | var->transp.length = 8; |
| 136 | var->transp.offset = 24; |
| 137 | break; |
| 138 | default: |
| 139 | DRM_ERROR("Bad depth %u.\n", depth); |
| 140 | return -EINVAL; |
| 141 | } |
| 142 | |
Jakob Bornecrantz | d7e1958 | 2010-05-28 11:21:59 +0200 | [diff] [blame] | 143 | if ((var->xoffset + var->xres) > par->max_width || |
| 144 | (var->yoffset + var->yres) > par->max_height) { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 145 | DRM_ERROR("Requested geom can not fit in framebuffer\n"); |
| 146 | return -EINVAL; |
| 147 | } |
| 148 | |
Thomas Hellstrom | e133e737 | 2010-10-05 12:43:04 +0200 | [diff] [blame] | 149 | if (!vmw_kms_validate_mode_vram(vmw_priv, |
Christopher Friedt | aa6de14 | 2014-02-01 10:01:15 -0500 | [diff] [blame] | 150 | var->xres * var->bits_per_pixel/8, |
Thomas Hellstrom | e133e737 | 2010-10-05 12:43:04 +0200 | [diff] [blame] | 151 | var->yoffset + var->yres)) { |
| 152 | DRM_ERROR("Requested geom can not fit in framebuffer\n"); |
| 153 | return -EINVAL; |
| 154 | } |
| 155 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 156 | return 0; |
| 157 | } |
| 158 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 159 | static int vmw_fb_blank(int blank, struct fb_info *info) |
| 160 | { |
| 161 | return 0; |
| 162 | } |
| 163 | |
Thomas Hellstrom | c3b9b16 | 2018-03-22 10:26:37 +0100 | [diff] [blame] | 164 | /** |
| 165 | * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer |
| 166 | * |
| 167 | * @work: The struct work_struct associated with this task. |
| 168 | * |
| 169 | * This function flushes the dirty regions of the vmalloc framebuffer to the |
| 170 | * kms framebuffer, and if the kms framebuffer is visible, also updated the |
| 171 | * corresponding displays. Note that this function runs even if the kms |
| 172 | * framebuffer is not bound to a crtc and thus not visible, but it's turned |
| 173 | * off during hibernation using the par->dirty.active bool. |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 174 | */ |
Thomas Hellstrom | 772269f | 2015-08-18 09:07:38 -0700 | [diff] [blame] | 175 | static void vmw_fb_dirty_flush(struct work_struct *work) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 176 | { |
Thomas Hellstrom | 772269f | 2015-08-18 09:07:38 -0700 | [diff] [blame] | 177 | struct vmw_fb_par *par = container_of(work, struct vmw_fb_par, |
| 178 | local_work.work); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 179 | struct vmw_private *vmw_priv = par->vmw_priv; |
| 180 | struct fb_info *info = vmw_priv->fb_info; |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 181 | unsigned long irq_flags; |
Thomas Hellstrom | bf833fd | 2018-03-22 10:19:01 +0100 | [diff] [blame] | 182 | s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0; |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 183 | u32 cpp, max_x, max_y; |
| 184 | struct drm_clip_rect clip; |
| 185 | struct drm_framebuffer *cur_fb; |
| 186 | u8 *src_ptr, *dst_ptr; |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 187 | struct vmw_buffer_object *vbo = par->vmw_bo; |
Thomas Hellstrom | bf833fd | 2018-03-22 10:19:01 +0100 | [diff] [blame] | 188 | void *virtual; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 189 | |
Thomas Hellstrom | 4e3e733 | 2018-03-22 10:30:19 +0100 | [diff] [blame] | 190 | if (!READ_ONCE(par->dirty.active)) |
Thomas Hellstrom | 09e2601 | 2010-10-05 12:43:05 +0200 | [diff] [blame] | 191 | return; |
| 192 | |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 193 | mutex_lock(&par->bo_mutex); |
| 194 | cur_fb = par->set_fb; |
| 195 | if (!cur_fb) |
| 196 | goto out_unlock; |
| 197 | |
Thomas Hellstrom | bf833fd | 2018-03-22 10:19:01 +0100 | [diff] [blame] | 198 | (void) ttm_read_lock(&vmw_priv->reservation_sem, false); |
| 199 | (void) ttm_bo_reserve(&vbo->base, false, false, NULL); |
Thomas Hellstrom | e9431ea | 2018-06-19 15:33:53 +0200 | [diff] [blame] | 200 | virtual = vmw_bo_map_and_cache(vbo); |
Thomas Hellstrom | bf833fd | 2018-03-22 10:19:01 +0100 | [diff] [blame] | 201 | if (!virtual) |
| 202 | goto out_unreserve; |
| 203 | |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 204 | spin_lock_irqsave(&par->dirty.lock, irq_flags); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 205 | if (!par->dirty.active) { |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 206 | spin_unlock_irqrestore(&par->dirty.lock, irq_flags); |
Thomas Hellstrom | bf833fd | 2018-03-22 10:19:01 +0100 | [diff] [blame] | 207 | goto out_unreserve; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 208 | } |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 209 | |
| 210 | /* |
| 211 | * Handle panning when copying from vmalloc to framebuffer. |
| 212 | * Clip dirty area to framebuffer. |
| 213 | */ |
Ville Syrjälä | 272725c | 2016-12-14 23:32:20 +0200 | [diff] [blame] | 214 | cpp = cur_fb->format->cpp[0]; |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 215 | max_x = par->fb_x + cur_fb->width; |
| 216 | max_y = par->fb_y + cur_fb->height; |
| 217 | |
| 218 | dst_x1 = par->dirty.x1 - par->fb_x; |
| 219 | dst_y1 = par->dirty.y1 - par->fb_y; |
| 220 | dst_x1 = max_t(s32, dst_x1, 0); |
| 221 | dst_y1 = max_t(s32, dst_y1, 0); |
| 222 | |
| 223 | dst_x2 = par->dirty.x2 - par->fb_x; |
| 224 | dst_y2 = par->dirty.y2 - par->fb_y; |
| 225 | dst_x2 = min_t(s32, dst_x2, max_x); |
| 226 | dst_y2 = min_t(s32, dst_y2, max_y); |
| 227 | w = dst_x2 - dst_x1; |
| 228 | h = dst_y2 - dst_y1; |
| 229 | w = max_t(s32, 0, w); |
| 230 | h = max_t(s32, 0, h); |
| 231 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 232 | par->dirty.x1 = par->dirty.x2 = 0; |
| 233 | par->dirty.y1 = par->dirty.y2 = 0; |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 234 | spin_unlock_irqrestore(&par->dirty.lock, irq_flags); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 235 | |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 236 | if (w && h) { |
Thomas Hellstrom | bf833fd | 2018-03-22 10:19:01 +0100 | [diff] [blame] | 237 | dst_ptr = (u8 *)virtual + |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 238 | (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp); |
| 239 | src_ptr = (u8 *)par->vmalloc + |
| 240 | ((dst_y1 + par->fb_y) * info->fix.line_length + |
| 241 | (dst_x1 + par->fb_x) * cpp); |
| 242 | |
| 243 | while (h-- > 0) { |
| 244 | memcpy(dst_ptr, src_ptr, w*cpp); |
| 245 | dst_ptr += par->set_fb->pitches[0]; |
| 246 | src_ptr += info->fix.line_length; |
| 247 | } |
| 248 | |
| 249 | clip.x1 = dst_x1; |
| 250 | clip.x2 = dst_x2; |
| 251 | clip.y1 = dst_y1; |
| 252 | clip.y2 = dst_y2; |
Thomas Hellstrom | bf833fd | 2018-03-22 10:19:01 +0100 | [diff] [blame] | 253 | } |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 254 | |
Thomas Hellstrom | bf833fd | 2018-03-22 10:19:01 +0100 | [diff] [blame] | 255 | out_unreserve: |
| 256 | ttm_bo_unreserve(&vbo->base); |
| 257 | ttm_read_unlock(&vmw_priv->reservation_sem); |
| 258 | if (w && h) { |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 259 | WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0, |
| 260 | &clip, 1)); |
| 261 | vmw_fifo_flush(vmw_priv, false); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 262 | } |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 263 | out_unlock: |
| 264 | mutex_unlock(&par->bo_mutex); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 265 | } |
| 266 | |
| 267 | static void vmw_fb_dirty_mark(struct vmw_fb_par *par, |
| 268 | unsigned x1, unsigned y1, |
| 269 | unsigned width, unsigned height) |
| 270 | { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 271 | unsigned long flags; |
| 272 | unsigned x2 = x1 + width; |
| 273 | unsigned y2 = y1 + height; |
| 274 | |
| 275 | spin_lock_irqsave(&par->dirty.lock, flags); |
| 276 | if (par->dirty.x1 == par->dirty.x2) { |
| 277 | par->dirty.x1 = x1; |
| 278 | par->dirty.y1 = y1; |
| 279 | par->dirty.x2 = x2; |
| 280 | par->dirty.y2 = y2; |
| 281 | /* if we are active start the dirty work |
| 282 | * we share the work with the defio system */ |
| 283 | if (par->dirty.active) |
Thomas Hellstrom | 772269f | 2015-08-18 09:07:38 -0700 | [diff] [blame] | 284 | schedule_delayed_work(&par->local_work, |
| 285 | VMW_DIRTY_DELAY); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 286 | } else { |
| 287 | if (x1 < par->dirty.x1) |
| 288 | par->dirty.x1 = x1; |
| 289 | if (y1 < par->dirty.y1) |
| 290 | par->dirty.y1 = y1; |
| 291 | if (x2 > par->dirty.x2) |
| 292 | par->dirty.x2 = x2; |
| 293 | if (y2 > par->dirty.y2) |
| 294 | par->dirty.y2 = y2; |
| 295 | } |
| 296 | spin_unlock_irqrestore(&par->dirty.lock, flags); |
| 297 | } |
| 298 | |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 299 | static int vmw_fb_pan_display(struct fb_var_screeninfo *var, |
| 300 | struct fb_info *info) |
| 301 | { |
| 302 | struct vmw_fb_par *par = info->par; |
| 303 | |
| 304 | if ((var->xoffset + var->xres) > var->xres_virtual || |
| 305 | (var->yoffset + var->yres) > var->yres_virtual) { |
| 306 | DRM_ERROR("Requested panning can not fit in framebuffer\n"); |
| 307 | return -EINVAL; |
| 308 | } |
| 309 | |
| 310 | mutex_lock(&par->bo_mutex); |
| 311 | par->fb_x = var->xoffset; |
| 312 | par->fb_y = var->yoffset; |
| 313 | if (par->set_fb) |
| 314 | vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width, |
| 315 | par->set_fb->height); |
| 316 | mutex_unlock(&par->bo_mutex); |
| 317 | |
| 318 | return 0; |
| 319 | } |
| 320 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 321 | static void vmw_deferred_io(struct fb_info *info, |
| 322 | struct list_head *pagelist) |
| 323 | { |
| 324 | struct vmw_fb_par *par = info->par; |
| 325 | unsigned long start, end, min, max; |
| 326 | unsigned long flags; |
| 327 | struct page *page; |
| 328 | int y1, y2; |
| 329 | |
| 330 | min = ULONG_MAX; |
| 331 | max = 0; |
| 332 | list_for_each_entry(page, pagelist, lru) { |
| 333 | start = page->index << PAGE_SHIFT; |
| 334 | end = start + PAGE_SIZE - 1; |
| 335 | min = min(min, start); |
| 336 | max = max(max, end); |
| 337 | } |
| 338 | |
| 339 | if (min < max) { |
| 340 | y1 = min / info->fix.line_length; |
| 341 | y2 = (max / info->fix.line_length) + 1; |
| 342 | |
| 343 | spin_lock_irqsave(&par->dirty.lock, flags); |
| 344 | par->dirty.x1 = 0; |
| 345 | par->dirty.y1 = y1; |
| 346 | par->dirty.x2 = info->var.xres; |
| 347 | par->dirty.y2 = y2; |
| 348 | spin_unlock_irqrestore(&par->dirty.lock, flags); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 349 | |
Thomas Hellstrom | 772269f | 2015-08-18 09:07:38 -0700 | [diff] [blame] | 350 | /* |
| 351 | * Since we've already waited on this work once, try to |
| 352 | * execute asap. |
| 353 | */ |
| 354 | cancel_delayed_work(&par->local_work); |
| 355 | schedule_delayed_work(&par->local_work, 0); |
| 356 | } |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 357 | }; |
| 358 | |
Thomas Hellstrom | b9eb1a6 | 2015-04-02 02:39:45 -0700 | [diff] [blame] | 359 | static struct fb_deferred_io vmw_defio = { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 360 | .delay = VMW_DIRTY_DELAY, |
| 361 | .deferred_io = vmw_deferred_io, |
| 362 | }; |
| 363 | |
| 364 | /* |
| 365 | * Draw code |
| 366 | */ |
| 367 | |
| 368 | static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) |
| 369 | { |
| 370 | cfb_fillrect(info, rect); |
| 371 | vmw_fb_dirty_mark(info->par, rect->dx, rect->dy, |
| 372 | rect->width, rect->height); |
| 373 | } |
| 374 | |
| 375 | static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region) |
| 376 | { |
| 377 | cfb_copyarea(info, region); |
| 378 | vmw_fb_dirty_mark(info->par, region->dx, region->dy, |
| 379 | region->width, region->height); |
| 380 | } |
| 381 | |
| 382 | static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image) |
| 383 | { |
| 384 | cfb_imageblit(info, image); |
| 385 | vmw_fb_dirty_mark(info->par, image->dx, image->dy, |
| 386 | image->width, image->height); |
| 387 | } |
| 388 | |
| 389 | /* |
| 390 | * Bring up code |
| 391 | */ |
| 392 | |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 393 | static int vmw_fb_create_bo(struct vmw_private *vmw_priv, |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 394 | size_t size, struct vmw_buffer_object **out) |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 395 | { |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 396 | struct vmw_buffer_object *vmw_bo; |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 397 | int ret; |
| 398 | |
| 399 | (void) ttm_write_lock(&vmw_priv->reservation_sem, false); |
| 400 | |
| 401 | vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); |
| 402 | if (!vmw_bo) { |
| 403 | ret = -ENOMEM; |
| 404 | goto err_unlock; |
| 405 | } |
| 406 | |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 407 | ret = vmw_bo_init(vmw_priv, vmw_bo, size, |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 408 | &vmw_sys_placement, |
| 409 | false, |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 410 | &vmw_bo_bo_free); |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 411 | if (unlikely(ret != 0)) |
| 412 | goto err_unlock; /* init frees the buffer on failure */ |
| 413 | |
| 414 | *out = vmw_bo; |
| 415 | ttm_write_unlock(&vmw_priv->reservation_sem); |
| 416 | |
| 417 | return 0; |
| 418 | |
| 419 | err_unlock: |
| 420 | ttm_write_unlock(&vmw_priv->reservation_sem); |
| 421 | return ret; |
| 422 | } |
| 423 | |
| 424 | static int vmw_fb_compute_depth(struct fb_var_screeninfo *var, |
| 425 | int *depth) |
| 426 | { |
| 427 | switch (var->bits_per_pixel) { |
| 428 | case 32: |
| 429 | *depth = (var->transp.length > 0) ? 32 : 24; |
| 430 | break; |
| 431 | default: |
| 432 | DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel); |
| 433 | return -EINVAL; |
| 434 | } |
| 435 | |
| 436 | return 0; |
| 437 | } |
| 438 | |
Daniel Vetter | 3bacf43 | 2017-04-06 22:02:56 +0200 | [diff] [blame] | 439 | static int vmwgfx_set_config_internal(struct drm_mode_set *set) |
| 440 | { |
| 441 | struct drm_crtc *crtc = set->crtc; |
Thomas Hellstrom | 21fbd08 | 2018-04-26 09:48:55 +0200 | [diff] [blame] | 442 | struct drm_modeset_acquire_ctx ctx; |
Daniel Vetter | 3bacf43 | 2017-04-06 22:02:56 +0200 | [diff] [blame] | 443 | int ret; |
| 444 | |
Thomas Hellstrom | 21fbd08 | 2018-04-26 09:48:55 +0200 | [diff] [blame] | 445 | drm_modeset_acquire_init(&ctx, 0); |
Daniel Vetter | 3bacf43 | 2017-04-06 22:02:56 +0200 | [diff] [blame] | 446 | |
| 447 | restart: |
Thomas Hellstrom | 21fbd08 | 2018-04-26 09:48:55 +0200 | [diff] [blame] | 448 | ret = crtc->funcs->set_config(set, &ctx); |
Daniel Vetter | 3bacf43 | 2017-04-06 22:02:56 +0200 | [diff] [blame] | 449 | |
| 450 | if (ret == -EDEADLK) { |
Thomas Hellstrom | 21fbd08 | 2018-04-26 09:48:55 +0200 | [diff] [blame] | 451 | drm_modeset_backoff(&ctx); |
Daniel Vetter | 3bacf43 | 2017-04-06 22:02:56 +0200 | [diff] [blame] | 452 | goto restart; |
| 453 | } |
| 454 | |
Thomas Hellstrom | 21fbd08 | 2018-04-26 09:48:55 +0200 | [diff] [blame] | 455 | drm_modeset_drop_locks(&ctx); |
| 456 | drm_modeset_acquire_fini(&ctx); |
| 457 | |
Daniel Vetter | 3bacf43 | 2017-04-06 22:02:56 +0200 | [diff] [blame] | 458 | return ret; |
| 459 | } |
| 460 | |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 461 | static int vmw_fb_kms_detach(struct vmw_fb_par *par, |
| 462 | bool detach_bo, |
| 463 | bool unref_bo) |
| 464 | { |
| 465 | struct drm_framebuffer *cur_fb = par->set_fb; |
| 466 | int ret; |
| 467 | |
| 468 | /* Detach the KMS framebuffer from crtcs */ |
| 469 | if (par->set_mode) { |
| 470 | struct drm_mode_set set; |
| 471 | |
| 472 | set.crtc = par->crtc; |
| 473 | set.x = 0; |
| 474 | set.y = 0; |
| 475 | set.mode = NULL; |
| 476 | set.fb = NULL; |
Sinclair Yeh | aa74f06 | 2017-03-23 14:28:21 -0700 | [diff] [blame] | 477 | set.num_connectors = 0; |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 478 | set.connectors = &par->con; |
Daniel Vetter | 3bacf43 | 2017-04-06 22:02:56 +0200 | [diff] [blame] | 479 | ret = vmwgfx_set_config_internal(&set); |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 480 | if (ret) { |
| 481 | DRM_ERROR("Could not unset a mode.\n"); |
| 482 | return ret; |
| 483 | } |
| 484 | drm_mode_destroy(par->vmw_priv->dev, par->set_mode); |
| 485 | par->set_mode = NULL; |
| 486 | } |
| 487 | |
| 488 | if (cur_fb) { |
Haneen Mohammed | 25a2890 | 2018-03-11 17:33:13 -0600 | [diff] [blame] | 489 | drm_framebuffer_put(cur_fb); |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 490 | par->set_fb = NULL; |
| 491 | } |
| 492 | |
Thomas Hellstrom | bf833fd | 2018-03-22 10:19:01 +0100 | [diff] [blame] | 493 | if (par->vmw_bo && detach_bo && unref_bo) |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 494 | vmw_bo_unreference(&par->vmw_bo); |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 495 | |
| 496 | return 0; |
| 497 | } |
| 498 | |
| 499 | static int vmw_fb_kms_framebuffer(struct fb_info *info) |
| 500 | { |
Daniel Vetter | dabdcdc | 2016-12-02 08:07:40 +0100 | [diff] [blame] | 501 | struct drm_mode_fb_cmd2 mode_cmd; |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 502 | struct vmw_fb_par *par = info->par; |
| 503 | struct fb_var_screeninfo *var = &info->var; |
| 504 | struct drm_framebuffer *cur_fb; |
| 505 | struct vmw_framebuffer *vfb; |
Daniel Vetter | dabdcdc | 2016-12-02 08:07:40 +0100 | [diff] [blame] | 506 | int ret = 0, depth; |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 507 | size_t new_bo_size; |
| 508 | |
Daniel Vetter | dabdcdc | 2016-12-02 08:07:40 +0100 | [diff] [blame] | 509 | ret = vmw_fb_compute_depth(var, &depth); |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 510 | if (ret) |
| 511 | return ret; |
| 512 | |
| 513 | mode_cmd.width = var->xres; |
| 514 | mode_cmd.height = var->yres; |
Daniel Vetter | dabdcdc | 2016-12-02 08:07:40 +0100 | [diff] [blame] | 515 | mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width; |
| 516 | mode_cmd.pixel_format = |
Sinclair Yeh | 8c95742 | 2017-01-18 14:14:01 -0800 | [diff] [blame] | 517 | drm_mode_legacy_fb_format(var->bits_per_pixel, depth); |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 518 | |
| 519 | cur_fb = par->set_fb; |
| 520 | if (cur_fb && cur_fb->width == mode_cmd.width && |
| 521 | cur_fb->height == mode_cmd.height && |
Ville Syrjälä | 438b74a | 2016-12-14 23:32:55 +0200 | [diff] [blame] | 522 | cur_fb->format->format == mode_cmd.pixel_format && |
Daniel Vetter | dabdcdc | 2016-12-02 08:07:40 +0100 | [diff] [blame] | 523 | cur_fb->pitches[0] == mode_cmd.pitches[0]) |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 524 | return 0; |
| 525 | |
| 526 | /* Need new buffer object ? */ |
Daniel Vetter | dabdcdc | 2016-12-02 08:07:40 +0100 | [diff] [blame] | 527 | new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height; |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 528 | ret = vmw_fb_kms_detach(par, |
| 529 | par->bo_size < new_bo_size || |
| 530 | par->bo_size > 2*new_bo_size, |
| 531 | true); |
| 532 | if (ret) |
| 533 | return ret; |
| 534 | |
| 535 | if (!par->vmw_bo) { |
| 536 | ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size, |
| 537 | &par->vmw_bo); |
| 538 | if (ret) { |
| 539 | DRM_ERROR("Failed creating a buffer object for " |
| 540 | "fbdev.\n"); |
| 541 | return ret; |
| 542 | } |
| 543 | par->bo_size = new_bo_size; |
| 544 | } |
| 545 | |
| 546 | vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL, |
| 547 | true, &mode_cmd); |
| 548 | if (IS_ERR(vfb)) |
| 549 | return PTR_ERR(vfb); |
| 550 | |
| 551 | par->set_fb = &vfb->base; |
| 552 | |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 553 | return 0; |
| 554 | } |
| 555 | |
| 556 | static int vmw_fb_set_par(struct fb_info *info) |
| 557 | { |
| 558 | struct vmw_fb_par *par = info->par; |
| 559 | struct vmw_private *vmw_priv = par->vmw_priv; |
| 560 | struct drm_mode_set set; |
| 561 | struct fb_var_screeninfo *var = &info->var; |
| 562 | struct drm_display_mode new_mode = { DRM_MODE("fb_mode", |
| 563 | DRM_MODE_TYPE_DRIVER, |
| 564 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
| 565 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) |
| 566 | }; |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 567 | struct drm_display_mode *mode; |
| 568 | int ret; |
| 569 | |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 570 | mode = drm_mode_duplicate(vmw_priv->dev, &new_mode); |
| 571 | if (!mode) { |
| 572 | DRM_ERROR("Could not create new fb mode.\n"); |
| 573 | return -ENOMEM; |
| 574 | } |
| 575 | |
| 576 | mode->hdisplay = var->xres; |
| 577 | mode->vdisplay = var->yres; |
| 578 | vmw_guess_mode_timing(mode); |
| 579 | |
Thomas Zimmermann | c2d3115 | 2019-03-18 15:47:58 +0100 | [diff] [blame] | 580 | if (!vmw_kms_validate_mode_vram(vmw_priv, |
Sinclair Yeh | 7851496 | 2016-04-21 11:29:31 -0700 | [diff] [blame] | 581 | mode->hdisplay * |
| 582 | DIV_ROUND_UP(var->bits_per_pixel, 8), |
| 583 | mode->vdisplay)) { |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 584 | drm_mode_destroy(vmw_priv->dev, mode); |
| 585 | return -EINVAL; |
| 586 | } |
| 587 | |
| 588 | mutex_lock(&par->bo_mutex); |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 589 | ret = vmw_fb_kms_framebuffer(info); |
| 590 | if (ret) |
| 591 | goto out_unlock; |
| 592 | |
| 593 | par->fb_x = var->xoffset; |
| 594 | par->fb_y = var->yoffset; |
| 595 | |
| 596 | set.crtc = par->crtc; |
| 597 | set.x = 0; |
| 598 | set.y = 0; |
| 599 | set.mode = mode; |
| 600 | set.fb = par->set_fb; |
| 601 | set.num_connectors = 1; |
| 602 | set.connectors = &par->con; |
| 603 | |
Daniel Vetter | 3bacf43 | 2017-04-06 22:02:56 +0200 | [diff] [blame] | 604 | ret = vmwgfx_set_config_internal(&set); |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 605 | if (ret) |
| 606 | goto out_unlock; |
| 607 | |
| 608 | vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, |
| 609 | par->set_fb->width, par->set_fb->height); |
| 610 | |
| 611 | /* If there already was stuff dirty we wont |
| 612 | * schedule a new work, so lets do it now */ |
| 613 | |
Thomas Hellstrom | 772269f | 2015-08-18 09:07:38 -0700 | [diff] [blame] | 614 | schedule_delayed_work(&par->local_work, 0); |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 615 | |
| 616 | out_unlock: |
Thomas Zimmermann | c2d3115 | 2019-03-18 15:47:58 +0100 | [diff] [blame] | 617 | if (par->set_mode) |
| 618 | drm_mode_destroy(vmw_priv->dev, par->set_mode); |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 619 | par->set_mode = mode; |
| 620 | |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 621 | mutex_unlock(&par->bo_mutex); |
| 622 | |
| 623 | return ret; |
| 624 | } |
| 625 | |
| 626 | |
Jani Nikula | b6ff753 | 2019-12-03 18:38:48 +0200 | [diff] [blame^] | 627 | static const struct fb_ops vmw_fb_ops = { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 628 | .owner = THIS_MODULE, |
| 629 | .fb_check_var = vmw_fb_check_var, |
| 630 | .fb_set_par = vmw_fb_set_par, |
| 631 | .fb_setcolreg = vmw_fb_setcolreg, |
| 632 | .fb_fillrect = vmw_fb_fillrect, |
| 633 | .fb_copyarea = vmw_fb_copyarea, |
| 634 | .fb_imageblit = vmw_fb_imageblit, |
| 635 | .fb_pan_display = vmw_fb_pan_display, |
| 636 | .fb_blank = vmw_fb_blank, |
| 637 | }; |
| 638 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 639 | int vmw_fb_init(struct vmw_private *vmw_priv) |
| 640 | { |
| 641 | struct device *device = &vmw_priv->dev->pdev->dev; |
| 642 | struct vmw_fb_par *par; |
| 643 | struct fb_info *info; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 644 | unsigned fb_width, fb_height; |
YueHaibing | c601b12 | 2019-03-25 10:32:17 -0700 | [diff] [blame] | 645 | unsigned int fb_bpp, fb_pitch, fb_size; |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 646 | struct drm_display_mode *init_mode; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 647 | int ret; |
| 648 | |
Michel Dänzer | 6558429b | 2011-08-31 07:42:49 +0000 | [diff] [blame] | 649 | fb_bpp = 32; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 650 | |
Jakob Bornecrantz | d7e1958 | 2010-05-28 11:21:59 +0200 | [diff] [blame] | 651 | /* XXX As shouldn't these be as well. */ |
| 652 | fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); |
| 653 | fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 654 | |
Michel Dänzer | 6558429b | 2011-08-31 07:42:49 +0000 | [diff] [blame] | 655 | fb_pitch = fb_width * fb_bpp / 8; |
Jakob Bornecrantz | d7e1958 | 2010-05-28 11:21:59 +0200 | [diff] [blame] | 656 | fb_size = fb_pitch * fb_height; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 657 | |
| 658 | info = framebuffer_alloc(sizeof(*par), device); |
| 659 | if (!info) |
| 660 | return -ENOMEM; |
| 661 | |
| 662 | /* |
| 663 | * Par |
| 664 | */ |
| 665 | vmw_priv->fb_info = info; |
| 666 | par = info->par; |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 667 | memset(par, 0, sizeof(*par)); |
Thomas Hellstrom | 772269f | 2015-08-18 09:07:38 -0700 | [diff] [blame] | 668 | INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 669 | par->vmw_priv = vmw_priv; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 670 | par->vmalloc = NULL; |
| 671 | par->max_width = fb_width; |
| 672 | par->max_height = fb_height; |
| 673 | |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 674 | ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width, |
| 675 | par->max_height, &par->con, |
| 676 | &par->crtc, &init_mode); |
Thomas Hellstrom | 21fbd08 | 2018-04-26 09:48:55 +0200 | [diff] [blame] | 677 | if (ret) |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 678 | goto err_kms; |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 679 | |
| 680 | info->var.xres = init_mode->hdisplay; |
| 681 | info->var.yres = init_mode->vdisplay; |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 682 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 683 | /* |
| 684 | * Create buffers and alloc memory |
| 685 | */ |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 686 | par->vmalloc = vzalloc(fb_size); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 687 | if (unlikely(par->vmalloc == NULL)) { |
| 688 | ret = -ENOMEM; |
| 689 | goto err_free; |
| 690 | } |
| 691 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 692 | /* |
| 693 | * Fixed and var |
| 694 | */ |
| 695 | strcpy(info->fix.id, "svgadrmfb"); |
| 696 | info->fix.type = FB_TYPE_PACKED_PIXELS; |
| 697 | info->fix.visual = FB_VISUAL_TRUECOLOR; |
| 698 | info->fix.type_aux = 0; |
| 699 | info->fix.xpanstep = 1; /* doing it in hw */ |
| 700 | info->fix.ypanstep = 1; /* doing it in hw */ |
| 701 | info->fix.ywrapstep = 0; |
| 702 | info->fix.accel = FB_ACCEL_NONE; |
| 703 | info->fix.line_length = fb_pitch; |
| 704 | |
| 705 | info->fix.smem_start = 0; |
| 706 | info->fix.smem_len = fb_size; |
| 707 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 708 | info->pseudo_palette = par->pseudo_palette; |
Thomas Hellstrom | b9eb1a6 | 2015-04-02 02:39:45 -0700 | [diff] [blame] | 709 | info->screen_base = (char __iomem *)par->vmalloc; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 710 | info->screen_size = fb_size; |
| 711 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 712 | info->fbops = &vmw_fb_ops; |
| 713 | |
| 714 | /* 24 depth per default */ |
| 715 | info->var.red.offset = 16; |
| 716 | info->var.green.offset = 8; |
| 717 | info->var.blue.offset = 0; |
| 718 | info->var.red.length = 8; |
| 719 | info->var.green.length = 8; |
| 720 | info->var.blue.length = 8; |
| 721 | info->var.transp.offset = 0; |
| 722 | info->var.transp.length = 0; |
| 723 | |
| 724 | info->var.xres_virtual = fb_width; |
| 725 | info->var.yres_virtual = fb_height; |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 726 | info->var.bits_per_pixel = fb_bpp; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 727 | info->var.xoffset = 0; |
| 728 | info->var.yoffset = 0; |
| 729 | info->var.activate = FB_ACTIVATE_NOW; |
| 730 | info->var.height = -1; |
| 731 | info->var.width = -1; |
| 732 | |
Sascha Hauer | fb2a99e | 2012-02-06 10:58:19 +0100 | [diff] [blame] | 733 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ |
Marcin Slusarz | 1471ca9 | 2010-05-16 17:27:03 +0200 | [diff] [blame] | 734 | info->apertures = alloc_apertures(1); |
| 735 | if (!info->apertures) { |
| 736 | ret = -ENOMEM; |
| 737 | goto err_aper; |
| 738 | } |
| 739 | info->apertures->ranges[0].base = vmw_priv->vram_start; |
| 740 | info->apertures->ranges[0].size = vmw_priv->vram_size; |
Thomas Hellstrom | f2d12b8 | 2010-02-15 14:45:22 +0000 | [diff] [blame] | 741 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 742 | /* |
| 743 | * Dirty & Deferred IO |
| 744 | */ |
| 745 | par->dirty.x1 = par->dirty.x2 = 0; |
Chris Wilson | c39721c | 2010-07-24 17:15:11 +0100 | [diff] [blame] | 746 | par->dirty.y1 = par->dirty.y2 = 0; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 747 | par->dirty.active = true; |
| 748 | spin_lock_init(&par->dirty.lock); |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 749 | mutex_init(&par->bo_mutex); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 750 | info->fbdefio = &vmw_defio; |
| 751 | fb_deferred_io_init(info); |
| 752 | |
| 753 | ret = register_framebuffer(info); |
| 754 | if (unlikely(ret != 0)) |
| 755 | goto err_defio; |
| 756 | |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 757 | vmw_fb_set_par(info); |
| 758 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 759 | return 0; |
| 760 | |
| 761 | err_defio: |
| 762 | fb_deferred_io_cleanup(info); |
Marcin Slusarz | 1471ca9 | 2010-05-16 17:27:03 +0200 | [diff] [blame] | 763 | err_aper: |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 764 | err_free: |
| 765 | vfree(par->vmalloc); |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 766 | err_kms: |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 767 | framebuffer_release(info); |
| 768 | vmw_priv->fb_info = NULL; |
| 769 | |
| 770 | return ret; |
| 771 | } |
| 772 | |
| 773 | int vmw_fb_close(struct vmw_private *vmw_priv) |
| 774 | { |
| 775 | struct fb_info *info; |
| 776 | struct vmw_fb_par *par; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 777 | |
| 778 | if (!vmw_priv->fb_info) |
| 779 | return 0; |
| 780 | |
| 781 | info = vmw_priv->fb_info; |
| 782 | par = info->par; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 783 | |
| 784 | /* ??? order */ |
| 785 | fb_deferred_io_cleanup(info); |
Thomas Hellstrom | 772269f | 2015-08-18 09:07:38 -0700 | [diff] [blame] | 786 | cancel_delayed_work_sync(&par->local_work); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 787 | unregister_framebuffer(info); |
| 788 | |
Thomas Hellstrom | 21fbd08 | 2018-04-26 09:48:55 +0200 | [diff] [blame] | 789 | mutex_lock(&par->bo_mutex); |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 790 | (void) vmw_fb_kms_detach(par, true, true); |
Thomas Hellstrom | 21fbd08 | 2018-04-26 09:48:55 +0200 | [diff] [blame] | 791 | mutex_unlock(&par->bo_mutex); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 792 | |
| 793 | vfree(par->vmalloc); |
| 794 | framebuffer_release(info); |
| 795 | |
| 796 | return 0; |
| 797 | } |
| 798 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 799 | int vmw_fb_off(struct vmw_private *vmw_priv) |
| 800 | { |
| 801 | struct fb_info *info; |
| 802 | struct vmw_fb_par *par; |
| 803 | unsigned long flags; |
| 804 | |
| 805 | if (!vmw_priv->fb_info) |
| 806 | return -EINVAL; |
| 807 | |
| 808 | info = vmw_priv->fb_info; |
| 809 | par = info->par; |
| 810 | |
| 811 | spin_lock_irqsave(&par->dirty.lock, flags); |
| 812 | par->dirty.active = false; |
| 813 | spin_unlock_irqrestore(&par->dirty.lock, flags); |
| 814 | |
Tejun Heo | 4382973 | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 815 | flush_delayed_work(&info->deferred_work); |
Thomas Hellstrom | 772269f | 2015-08-18 09:07:38 -0700 | [diff] [blame] | 816 | flush_delayed_work(&par->local_work); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 817 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 818 | return 0; |
| 819 | } |
| 820 | |
| 821 | int vmw_fb_on(struct vmw_private *vmw_priv) |
| 822 | { |
| 823 | struct fb_info *info; |
| 824 | struct vmw_fb_par *par; |
| 825 | unsigned long flags; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 826 | |
| 827 | if (!vmw_priv->fb_info) |
| 828 | return -EINVAL; |
| 829 | |
| 830 | info = vmw_priv->fb_info; |
| 831 | par = info->par; |
| 832 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 833 | spin_lock_irqsave(&par->dirty.lock, flags); |
| 834 | par->dirty.active = true; |
| 835 | spin_unlock_irqrestore(&par->dirty.lock, flags); |
Thomas Hellstrom | 6a93cea | 2018-05-23 16:14:54 +0200 | [diff] [blame] | 836 | |
| 837 | /* |
| 838 | * Need to reschedule a dirty update, because otherwise that's |
| 839 | * only done in dirty_mark() if the previous coalesced |
| 840 | * dirty region was empty. |
| 841 | */ |
| 842 | schedule_delayed_work(&par->local_work, 0); |
| 843 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 844 | return 0; |
| 845 | } |