blob: 7fbd305d3f8df9715b21ede7a01b51904bc2c161 [file] [log] [blame]
Eugeni Dodonov85208be2012-04-16 22:20:34 -03001/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
28#include "i915_drv.h"
29#include "intel_drv.h"
30
31/* FBC, or Frame Buffer Compression, is a technique employed to compress the framebuffer contents in-memory, aiming at reducing the required bandwidth during in-memory transfers and, therefore, reduce the power packet.
32 *
33 * The benefits of FBC are mostly visible with solid backgrounds and variation-less patterns.
34 *
35 * FBC-related functionality can be enabled by the means of the i915.i915_enable_fbc parameter
36 */
37
38void i8xx_disable_fbc(struct drm_device *dev)
39{
40 struct drm_i915_private *dev_priv = dev->dev_private;
41 u32 fbc_ctl;
42
43 /* Disable compression */
44 fbc_ctl = I915_READ(FBC_CONTROL);
45 if ((fbc_ctl & FBC_CTL_EN) == 0)
46 return;
47
48 fbc_ctl &= ~FBC_CTL_EN;
49 I915_WRITE(FBC_CONTROL, fbc_ctl);
50
51 /* Wait for compressing bit to clear */
52 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
53 DRM_DEBUG_KMS("FBC idle timed out\n");
54 return;
55 }
56
57 DRM_DEBUG_KMS("disabled FBC\n");
58}
59
60void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
61{
62 struct drm_device *dev = crtc->dev;
63 struct drm_i915_private *dev_priv = dev->dev_private;
64 struct drm_framebuffer *fb = crtc->fb;
65 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
66 struct drm_i915_gem_object *obj = intel_fb->obj;
67 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
68 int cfb_pitch;
69 int plane, i;
70 u32 fbc_ctl, fbc_ctl2;
71
72 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
73 if (fb->pitches[0] < cfb_pitch)
74 cfb_pitch = fb->pitches[0];
75
76 /* FBC_CTL wants 64B units */
77 cfb_pitch = (cfb_pitch / 64) - 1;
78 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
79
80 /* Clear old tags */
81 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
82 I915_WRITE(FBC_TAG + (i * 4), 0);
83
84 /* Set it up... */
85 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
86 fbc_ctl2 |= plane;
87 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
88 I915_WRITE(FBC_FENCE_OFF, crtc->y);
89
90 /* enable it... */
91 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
92 if (IS_I945GM(dev))
93 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
94 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
95 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
96 fbc_ctl |= obj->fence_reg;
97 I915_WRITE(FBC_CONTROL, fbc_ctl);
98
99 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
100 cfb_pitch, crtc->y, intel_crtc->plane);
101}
102
103bool i8xx_fbc_enabled(struct drm_device *dev)
104{
105 struct drm_i915_private *dev_priv = dev->dev_private;
106
107 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
108}
109
110void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
111{
112 struct drm_device *dev = crtc->dev;
113 struct drm_i915_private *dev_priv = dev->dev_private;
114 struct drm_framebuffer *fb = crtc->fb;
115 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
116 struct drm_i915_gem_object *obj = intel_fb->obj;
117 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
118 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
119 unsigned long stall_watermark = 200;
120 u32 dpfc_ctl;
121
122 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
123 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
124 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
125
126 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
127 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
128 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
129 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
130
131 /* enable it... */
132 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
133
134 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
135}
136
137void g4x_disable_fbc(struct drm_device *dev)
138{
139 struct drm_i915_private *dev_priv = dev->dev_private;
140 u32 dpfc_ctl;
141
142 /* Disable compression */
143 dpfc_ctl = I915_READ(DPFC_CONTROL);
144 if (dpfc_ctl & DPFC_CTL_EN) {
145 dpfc_ctl &= ~DPFC_CTL_EN;
146 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
147
148 DRM_DEBUG_KMS("disabled FBC\n");
149 }
150}
151
152bool g4x_fbc_enabled(struct drm_device *dev)
153{
154 struct drm_i915_private *dev_priv = dev->dev_private;
155
156 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
157}
158
159static void sandybridge_blit_fbc_update(struct drm_device *dev)
160{
161 struct drm_i915_private *dev_priv = dev->dev_private;
162 u32 blt_ecoskpd;
163
164 /* Make sure blitter notifies FBC of writes */
165 gen6_gt_force_wake_get(dev_priv);
166 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
167 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
168 GEN6_BLITTER_LOCK_SHIFT;
169 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
170 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
171 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
172 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
173 GEN6_BLITTER_LOCK_SHIFT);
174 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
175 POSTING_READ(GEN6_BLITTER_ECOSKPD);
176 gen6_gt_force_wake_put(dev_priv);
177}
178
179void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
180{
181 struct drm_device *dev = crtc->dev;
182 struct drm_i915_private *dev_priv = dev->dev_private;
183 struct drm_framebuffer *fb = crtc->fb;
184 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
185 struct drm_i915_gem_object *obj = intel_fb->obj;
186 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
187 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
188 unsigned long stall_watermark = 200;
189 u32 dpfc_ctl;
190
191 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
192 dpfc_ctl &= DPFC_RESERVED;
193 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
194 /* Set persistent mode for front-buffer rendering, ala X. */
195 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
196 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
197 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
198
199 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
200 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
201 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
202 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
203 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
204 /* enable it... */
205 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
206
207 if (IS_GEN6(dev)) {
208 I915_WRITE(SNB_DPFC_CTL_SA,
209 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
210 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
211 sandybridge_blit_fbc_update(dev);
212 }
213
214 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
215}
216
217void ironlake_disable_fbc(struct drm_device *dev)
218{
219 struct drm_i915_private *dev_priv = dev->dev_private;
220 u32 dpfc_ctl;
221
222 /* Disable compression */
223 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
224 if (dpfc_ctl & DPFC_CTL_EN) {
225 dpfc_ctl &= ~DPFC_CTL_EN;
226 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
227
228 DRM_DEBUG_KMS("disabled FBC\n");
229 }
230}
231
232bool ironlake_fbc_enabled(struct drm_device *dev)
233{
234 struct drm_i915_private *dev_priv = dev->dev_private;
235
236 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
237}
238
239bool intel_fbc_enabled(struct drm_device *dev)
240{
241 struct drm_i915_private *dev_priv = dev->dev_private;
242
243 if (!dev_priv->display.fbc_enabled)
244 return false;
245
246 return dev_priv->display.fbc_enabled(dev);
247}
248
249static void intel_fbc_work_fn(struct work_struct *__work)
250{
251 struct intel_fbc_work *work =
252 container_of(to_delayed_work(__work),
253 struct intel_fbc_work, work);
254 struct drm_device *dev = work->crtc->dev;
255 struct drm_i915_private *dev_priv = dev->dev_private;
256
257 mutex_lock(&dev->struct_mutex);
258 if (work == dev_priv->fbc_work) {
259 /* Double check that we haven't switched fb without cancelling
260 * the prior work.
261 */
262 if (work->crtc->fb == work->fb) {
263 dev_priv->display.enable_fbc(work->crtc,
264 work->interval);
265
266 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
267 dev_priv->cfb_fb = work->crtc->fb->base.id;
268 dev_priv->cfb_y = work->crtc->y;
269 }
270
271 dev_priv->fbc_work = NULL;
272 }
273 mutex_unlock(&dev->struct_mutex);
274
275 kfree(work);
276}
277
278static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
279{
280 if (dev_priv->fbc_work == NULL)
281 return;
282
283 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
284
285 /* Synchronisation is provided by struct_mutex and checking of
286 * dev_priv->fbc_work, so we can perform the cancellation
287 * entirely asynchronously.
288 */
289 if (cancel_delayed_work(&dev_priv->fbc_work->work))
290 /* tasklet was killed before being run, clean up */
291 kfree(dev_priv->fbc_work);
292
293 /* Mark the work as no longer wanted so that if it does
294 * wake-up (because the work was already running and waiting
295 * for our mutex), it will discover that is no longer
296 * necessary to run.
297 */
298 dev_priv->fbc_work = NULL;
299}
300
301void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
302{
303 struct intel_fbc_work *work;
304 struct drm_device *dev = crtc->dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306
307 if (!dev_priv->display.enable_fbc)
308 return;
309
310 intel_cancel_fbc_work(dev_priv);
311
312 work = kzalloc(sizeof *work, GFP_KERNEL);
313 if (work == NULL) {
314 dev_priv->display.enable_fbc(crtc, interval);
315 return;
316 }
317
318 work->crtc = crtc;
319 work->fb = crtc->fb;
320 work->interval = interval;
321 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
322
323 dev_priv->fbc_work = work;
324
325 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
326
327 /* Delay the actual enabling to let pageflipping cease and the
328 * display to settle before starting the compression. Note that
329 * this delay also serves a second purpose: it allows for a
330 * vblank to pass after disabling the FBC before we attempt
331 * to modify the control registers.
332 *
333 * A more complicated solution would involve tracking vblanks
334 * following the termination of the page-flipping sequence
335 * and indeed performing the enable as a co-routine and not
336 * waiting synchronously upon the vblank.
337 */
338 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
339}
340
341void intel_disable_fbc(struct drm_device *dev)
342{
343 struct drm_i915_private *dev_priv = dev->dev_private;
344
345 intel_cancel_fbc_work(dev_priv);
346
347 if (!dev_priv->display.disable_fbc)
348 return;
349
350 dev_priv->display.disable_fbc(dev);
351 dev_priv->cfb_plane = -1;
352}
353
354/**
355 * intel_update_fbc - enable/disable FBC as needed
356 * @dev: the drm_device
357 *
358 * Set up the framebuffer compression hardware at mode set time. We
359 * enable it if possible:
360 * - plane A only (on pre-965)
361 * - no pixel mulitply/line duplication
362 * - no alpha buffer discard
363 * - no dual wide
364 * - framebuffer <= 2048 in width, 1536 in height
365 *
366 * We can't assume that any compression will take place (worst case),
367 * so the compressed buffer has to be the same size as the uncompressed
368 * one. It also must reside (along with the line length buffer) in
369 * stolen memory.
370 *
371 * We need to enable/disable FBC on a global basis.
372 */
373void intel_update_fbc(struct drm_device *dev)
374{
375 struct drm_i915_private *dev_priv = dev->dev_private;
376 struct drm_crtc *crtc = NULL, *tmp_crtc;
377 struct intel_crtc *intel_crtc;
378 struct drm_framebuffer *fb;
379 struct intel_framebuffer *intel_fb;
380 struct drm_i915_gem_object *obj;
381 int enable_fbc;
382
383 DRM_DEBUG_KMS("\n");
384
385 if (!i915_powersave)
386 return;
387
388 if (!I915_HAS_FBC(dev))
389 return;
390
391 /*
392 * If FBC is already on, we just have to verify that we can
393 * keep it that way...
394 * Need to disable if:
395 * - more than one pipe is active
396 * - changing FBC params (stride, fence, mode)
397 * - new fb is too large to fit in compressed buffer
398 * - going to an unsupported config (interlace, pixel multiply, etc.)
399 */
400 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
401 if (tmp_crtc->enabled && tmp_crtc->fb) {
402 if (crtc) {
403 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
404 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
405 goto out_disable;
406 }
407 crtc = tmp_crtc;
408 }
409 }
410
411 if (!crtc || crtc->fb == NULL) {
412 DRM_DEBUG_KMS("no output, disabling\n");
413 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
414 goto out_disable;
415 }
416
417 intel_crtc = to_intel_crtc(crtc);
418 fb = crtc->fb;
419 intel_fb = to_intel_framebuffer(fb);
420 obj = intel_fb->obj;
421
422 enable_fbc = i915_enable_fbc;
423 if (enable_fbc < 0) {
424 DRM_DEBUG_KMS("fbc set to per-chip default\n");
425 enable_fbc = 1;
426 if (INTEL_INFO(dev)->gen <= 6)
427 enable_fbc = 0;
428 }
429 if (!enable_fbc) {
430 DRM_DEBUG_KMS("fbc disabled per module param\n");
431 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
432 goto out_disable;
433 }
434 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
435 DRM_DEBUG_KMS("framebuffer too large, disabling "
436 "compression\n");
437 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
438 goto out_disable;
439 }
440 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
441 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
442 DRM_DEBUG_KMS("mode incompatible with compression, "
443 "disabling\n");
444 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
445 goto out_disable;
446 }
447 if ((crtc->mode.hdisplay > 2048) ||
448 (crtc->mode.vdisplay > 1536)) {
449 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
450 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
451 goto out_disable;
452 }
453 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
454 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
455 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
456 goto out_disable;
457 }
458
459 /* The use of a CPU fence is mandatory in order to detect writes
460 * by the CPU to the scanout and trigger updates to the FBC.
461 */
462 if (obj->tiling_mode != I915_TILING_X ||
463 obj->fence_reg == I915_FENCE_REG_NONE) {
464 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
465 dev_priv->no_fbc_reason = FBC_NOT_TILED;
466 goto out_disable;
467 }
468
469 /* If the kernel debugger is active, always disable compression */
470 if (in_dbg_master())
471 goto out_disable;
472
473 /* If the scanout has not changed, don't modify the FBC settings.
474 * Note that we make the fundamental assumption that the fb->obj
475 * cannot be unpinned (and have its GTT offset and fence revoked)
476 * without first being decoupled from the scanout and FBC disabled.
477 */
478 if (dev_priv->cfb_plane == intel_crtc->plane &&
479 dev_priv->cfb_fb == fb->base.id &&
480 dev_priv->cfb_y == crtc->y)
481 return;
482
483 if (intel_fbc_enabled(dev)) {
484 /* We update FBC along two paths, after changing fb/crtc
485 * configuration (modeswitching) and after page-flipping
486 * finishes. For the latter, we know that not only did
487 * we disable the FBC at the start of the page-flip
488 * sequence, but also more than one vblank has passed.
489 *
490 * For the former case of modeswitching, it is possible
491 * to switch between two FBC valid configurations
492 * instantaneously so we do need to disable the FBC
493 * before we can modify its control registers. We also
494 * have to wait for the next vblank for that to take
495 * effect. However, since we delay enabling FBC we can
496 * assume that a vblank has passed since disabling and
497 * that we can safely alter the registers in the deferred
498 * callback.
499 *
500 * In the scenario that we go from a valid to invalid
501 * and then back to valid FBC configuration we have
502 * no strict enforcement that a vblank occurred since
503 * disabling the FBC. However, along all current pipe
504 * disabling paths we do need to wait for a vblank at
505 * some point. And we wait before enabling FBC anyway.
506 */
507 DRM_DEBUG_KMS("disabling active FBC for update\n");
508 intel_disable_fbc(dev);
509 }
510
511 intel_enable_fbc(crtc, 500);
512 return;
513
514out_disable:
515 /* Multiple disables should be harmless */
516 if (intel_fbc_enabled(dev)) {
517 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
518 intel_disable_fbc(dev);
519 }
520}
521