blob: 0552058a202fa0ba037c08e98f3db73cd1f967c4 [file] [log] [blame]
Eugeni Dodonov85208be2012-04-16 22:20:34 -03001/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -030028#include <linux/cpufreq.h>
Eugeni Dodonov85208be2012-04-16 22:20:34 -030029#include "i915_drv.h"
30#include "intel_drv.h"
31
Eugeni Dodonovf6750b32012-04-18 11:51:14 -030032/* FBC, or Frame Buffer Compression, is a technique employed to compress the
33 * framebuffer contents in-memory, aiming at reducing the required bandwidth
34 * during in-memory transfers and, therefore, reduce the power packet.
Eugeni Dodonov85208be2012-04-16 22:20:34 -030035 *
Eugeni Dodonovf6750b32012-04-18 11:51:14 -030036 * The benefits of FBC are mostly visible with solid backgrounds and
37 * variation-less patterns.
Eugeni Dodonov85208be2012-04-16 22:20:34 -030038 *
Eugeni Dodonovf6750b32012-04-18 11:51:14 -030039 * FBC-related functionality can be enabled by the means of the
40 * i915.i915_enable_fbc parameter
Eugeni Dodonov85208be2012-04-16 22:20:34 -030041 */
42
Eugeni Dodonov1fa61102012-04-18 15:29:26 -030043static void i8xx_disable_fbc(struct drm_device *dev)
Eugeni Dodonov85208be2012-04-16 22:20:34 -030044{
45 struct drm_i915_private *dev_priv = dev->dev_private;
46 u32 fbc_ctl;
47
48 /* Disable compression */
49 fbc_ctl = I915_READ(FBC_CONTROL);
50 if ((fbc_ctl & FBC_CTL_EN) == 0)
51 return;
52
53 fbc_ctl &= ~FBC_CTL_EN;
54 I915_WRITE(FBC_CONTROL, fbc_ctl);
55
56 /* Wait for compressing bit to clear */
57 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
58 DRM_DEBUG_KMS("FBC idle timed out\n");
59 return;
60 }
61
62 DRM_DEBUG_KMS("disabled FBC\n");
63}
64
Eugeni Dodonov1fa61102012-04-18 15:29:26 -030065static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
Eugeni Dodonov85208be2012-04-16 22:20:34 -030066{
67 struct drm_device *dev = crtc->dev;
68 struct drm_i915_private *dev_priv = dev->dev_private;
69 struct drm_framebuffer *fb = crtc->fb;
70 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
71 struct drm_i915_gem_object *obj = intel_fb->obj;
72 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
73 int cfb_pitch;
74 int plane, i;
75 u32 fbc_ctl, fbc_ctl2;
76
77 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
78 if (fb->pitches[0] < cfb_pitch)
79 cfb_pitch = fb->pitches[0];
80
81 /* FBC_CTL wants 64B units */
82 cfb_pitch = (cfb_pitch / 64) - 1;
83 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
84
85 /* Clear old tags */
86 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
87 I915_WRITE(FBC_TAG + (i * 4), 0);
88
89 /* Set it up... */
90 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
91 fbc_ctl2 |= plane;
92 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
93 I915_WRITE(FBC_FENCE_OFF, crtc->y);
94
95 /* enable it... */
96 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
97 if (IS_I945GM(dev))
98 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
99 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
100 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
101 fbc_ctl |= obj->fence_reg;
102 I915_WRITE(FBC_CONTROL, fbc_ctl);
103
104 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
105 cfb_pitch, crtc->y, intel_crtc->plane);
106}
107
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300108static bool i8xx_fbc_enabled(struct drm_device *dev)
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300109{
110 struct drm_i915_private *dev_priv = dev->dev_private;
111
112 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
113}
114
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300115static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300116{
117 struct drm_device *dev = crtc->dev;
118 struct drm_i915_private *dev_priv = dev->dev_private;
119 struct drm_framebuffer *fb = crtc->fb;
120 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
121 struct drm_i915_gem_object *obj = intel_fb->obj;
122 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
123 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
124 unsigned long stall_watermark = 200;
125 u32 dpfc_ctl;
126
127 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
128 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
129 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
130
131 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
132 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
133 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
134 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
135
136 /* enable it... */
137 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
138
139 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
140}
141
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300142static void g4x_disable_fbc(struct drm_device *dev)
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300143{
144 struct drm_i915_private *dev_priv = dev->dev_private;
145 u32 dpfc_ctl;
146
147 /* Disable compression */
148 dpfc_ctl = I915_READ(DPFC_CONTROL);
149 if (dpfc_ctl & DPFC_CTL_EN) {
150 dpfc_ctl &= ~DPFC_CTL_EN;
151 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
152
153 DRM_DEBUG_KMS("disabled FBC\n");
154 }
155}
156
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300157static bool g4x_fbc_enabled(struct drm_device *dev)
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300158{
159 struct drm_i915_private *dev_priv = dev->dev_private;
160
161 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
162}
163
164static void sandybridge_blit_fbc_update(struct drm_device *dev)
165{
166 struct drm_i915_private *dev_priv = dev->dev_private;
167 u32 blt_ecoskpd;
168
169 /* Make sure blitter notifies FBC of writes */
170 gen6_gt_force_wake_get(dev_priv);
171 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
172 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
173 GEN6_BLITTER_LOCK_SHIFT;
174 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
175 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
176 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
177 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
178 GEN6_BLITTER_LOCK_SHIFT);
179 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
180 POSTING_READ(GEN6_BLITTER_ECOSKPD);
181 gen6_gt_force_wake_put(dev_priv);
182}
183
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300184static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300185{
186 struct drm_device *dev = crtc->dev;
187 struct drm_i915_private *dev_priv = dev->dev_private;
188 struct drm_framebuffer *fb = crtc->fb;
189 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
190 struct drm_i915_gem_object *obj = intel_fb->obj;
191 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
192 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
193 unsigned long stall_watermark = 200;
194 u32 dpfc_ctl;
195
196 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
197 dpfc_ctl &= DPFC_RESERVED;
198 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
199 /* Set persistent mode for front-buffer rendering, ala X. */
200 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
201 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
202 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
203
204 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
205 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
206 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
207 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
208 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
209 /* enable it... */
210 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
211
212 if (IS_GEN6(dev)) {
213 I915_WRITE(SNB_DPFC_CTL_SA,
214 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
215 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
216 sandybridge_blit_fbc_update(dev);
217 }
218
219 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
220}
221
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300222static void ironlake_disable_fbc(struct drm_device *dev)
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300223{
224 struct drm_i915_private *dev_priv = dev->dev_private;
225 u32 dpfc_ctl;
226
227 /* Disable compression */
228 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
229 if (dpfc_ctl & DPFC_CTL_EN) {
230 dpfc_ctl &= ~DPFC_CTL_EN;
231 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
232
233 DRM_DEBUG_KMS("disabled FBC\n");
234 }
235}
236
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300237static bool ironlake_fbc_enabled(struct drm_device *dev)
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300238{
239 struct drm_i915_private *dev_priv = dev->dev_private;
240
241 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
242}
243
244bool intel_fbc_enabled(struct drm_device *dev)
245{
246 struct drm_i915_private *dev_priv = dev->dev_private;
247
248 if (!dev_priv->display.fbc_enabled)
249 return false;
250
251 return dev_priv->display.fbc_enabled(dev);
252}
253
254static void intel_fbc_work_fn(struct work_struct *__work)
255{
256 struct intel_fbc_work *work =
257 container_of(to_delayed_work(__work),
258 struct intel_fbc_work, work);
259 struct drm_device *dev = work->crtc->dev;
260 struct drm_i915_private *dev_priv = dev->dev_private;
261
262 mutex_lock(&dev->struct_mutex);
263 if (work == dev_priv->fbc_work) {
264 /* Double check that we haven't switched fb without cancelling
265 * the prior work.
266 */
267 if (work->crtc->fb == work->fb) {
268 dev_priv->display.enable_fbc(work->crtc,
269 work->interval);
270
271 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
272 dev_priv->cfb_fb = work->crtc->fb->base.id;
273 dev_priv->cfb_y = work->crtc->y;
274 }
275
276 dev_priv->fbc_work = NULL;
277 }
278 mutex_unlock(&dev->struct_mutex);
279
280 kfree(work);
281}
282
283static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
284{
285 if (dev_priv->fbc_work == NULL)
286 return;
287
288 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
289
290 /* Synchronisation is provided by struct_mutex and checking of
291 * dev_priv->fbc_work, so we can perform the cancellation
292 * entirely asynchronously.
293 */
294 if (cancel_delayed_work(&dev_priv->fbc_work->work))
295 /* tasklet was killed before being run, clean up */
296 kfree(dev_priv->fbc_work);
297
298 /* Mark the work as no longer wanted so that if it does
299 * wake-up (because the work was already running and waiting
300 * for our mutex), it will discover that is no longer
301 * necessary to run.
302 */
303 dev_priv->fbc_work = NULL;
304}
305
306void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
307{
308 struct intel_fbc_work *work;
309 struct drm_device *dev = crtc->dev;
310 struct drm_i915_private *dev_priv = dev->dev_private;
311
312 if (!dev_priv->display.enable_fbc)
313 return;
314
315 intel_cancel_fbc_work(dev_priv);
316
317 work = kzalloc(sizeof *work, GFP_KERNEL);
318 if (work == NULL) {
319 dev_priv->display.enable_fbc(crtc, interval);
320 return;
321 }
322
323 work->crtc = crtc;
324 work->fb = crtc->fb;
325 work->interval = interval;
326 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
327
328 dev_priv->fbc_work = work;
329
330 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
331
332 /* Delay the actual enabling to let pageflipping cease and the
333 * display to settle before starting the compression. Note that
334 * this delay also serves a second purpose: it allows for a
335 * vblank to pass after disabling the FBC before we attempt
336 * to modify the control registers.
337 *
338 * A more complicated solution would involve tracking vblanks
339 * following the termination of the page-flipping sequence
340 * and indeed performing the enable as a co-routine and not
341 * waiting synchronously upon the vblank.
342 */
343 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
344}
345
346void intel_disable_fbc(struct drm_device *dev)
347{
348 struct drm_i915_private *dev_priv = dev->dev_private;
349
350 intel_cancel_fbc_work(dev_priv);
351
352 if (!dev_priv->display.disable_fbc)
353 return;
354
355 dev_priv->display.disable_fbc(dev);
356 dev_priv->cfb_plane = -1;
357}
358
359/**
360 * intel_update_fbc - enable/disable FBC as needed
361 * @dev: the drm_device
362 *
363 * Set up the framebuffer compression hardware at mode set time. We
364 * enable it if possible:
365 * - plane A only (on pre-965)
366 * - no pixel mulitply/line duplication
367 * - no alpha buffer discard
368 * - no dual wide
369 * - framebuffer <= 2048 in width, 1536 in height
370 *
371 * We can't assume that any compression will take place (worst case),
372 * so the compressed buffer has to be the same size as the uncompressed
373 * one. It also must reside (along with the line length buffer) in
374 * stolen memory.
375 *
376 * We need to enable/disable FBC on a global basis.
377 */
378void intel_update_fbc(struct drm_device *dev)
379{
380 struct drm_i915_private *dev_priv = dev->dev_private;
381 struct drm_crtc *crtc = NULL, *tmp_crtc;
382 struct intel_crtc *intel_crtc;
383 struct drm_framebuffer *fb;
384 struct intel_framebuffer *intel_fb;
385 struct drm_i915_gem_object *obj;
386 int enable_fbc;
387
388 DRM_DEBUG_KMS("\n");
389
390 if (!i915_powersave)
391 return;
392
393 if (!I915_HAS_FBC(dev))
394 return;
395
396 /*
397 * If FBC is already on, we just have to verify that we can
398 * keep it that way...
399 * Need to disable if:
400 * - more than one pipe is active
401 * - changing FBC params (stride, fence, mode)
402 * - new fb is too large to fit in compressed buffer
403 * - going to an unsupported config (interlace, pixel multiply, etc.)
404 */
405 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
406 if (tmp_crtc->enabled && tmp_crtc->fb) {
407 if (crtc) {
408 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
409 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
410 goto out_disable;
411 }
412 crtc = tmp_crtc;
413 }
414 }
415
416 if (!crtc || crtc->fb == NULL) {
417 DRM_DEBUG_KMS("no output, disabling\n");
418 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
419 goto out_disable;
420 }
421
422 intel_crtc = to_intel_crtc(crtc);
423 fb = crtc->fb;
424 intel_fb = to_intel_framebuffer(fb);
425 obj = intel_fb->obj;
426
427 enable_fbc = i915_enable_fbc;
428 if (enable_fbc < 0) {
429 DRM_DEBUG_KMS("fbc set to per-chip default\n");
430 enable_fbc = 1;
431 if (INTEL_INFO(dev)->gen <= 6)
432 enable_fbc = 0;
433 }
434 if (!enable_fbc) {
435 DRM_DEBUG_KMS("fbc disabled per module param\n");
436 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
437 goto out_disable;
438 }
439 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
440 DRM_DEBUG_KMS("framebuffer too large, disabling "
441 "compression\n");
442 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
443 goto out_disable;
444 }
445 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
446 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
447 DRM_DEBUG_KMS("mode incompatible with compression, "
448 "disabling\n");
449 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
450 goto out_disable;
451 }
452 if ((crtc->mode.hdisplay > 2048) ||
453 (crtc->mode.vdisplay > 1536)) {
454 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
455 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
456 goto out_disable;
457 }
458 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
459 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
460 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
461 goto out_disable;
462 }
463
464 /* The use of a CPU fence is mandatory in order to detect writes
465 * by the CPU to the scanout and trigger updates to the FBC.
466 */
467 if (obj->tiling_mode != I915_TILING_X ||
468 obj->fence_reg == I915_FENCE_REG_NONE) {
469 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
470 dev_priv->no_fbc_reason = FBC_NOT_TILED;
471 goto out_disable;
472 }
473
474 /* If the kernel debugger is active, always disable compression */
475 if (in_dbg_master())
476 goto out_disable;
477
478 /* If the scanout has not changed, don't modify the FBC settings.
479 * Note that we make the fundamental assumption that the fb->obj
480 * cannot be unpinned (and have its GTT offset and fence revoked)
481 * without first being decoupled from the scanout and FBC disabled.
482 */
483 if (dev_priv->cfb_plane == intel_crtc->plane &&
484 dev_priv->cfb_fb == fb->base.id &&
485 dev_priv->cfb_y == crtc->y)
486 return;
487
488 if (intel_fbc_enabled(dev)) {
489 /* We update FBC along two paths, after changing fb/crtc
490 * configuration (modeswitching) and after page-flipping
491 * finishes. For the latter, we know that not only did
492 * we disable the FBC at the start of the page-flip
493 * sequence, but also more than one vblank has passed.
494 *
495 * For the former case of modeswitching, it is possible
496 * to switch between two FBC valid configurations
497 * instantaneously so we do need to disable the FBC
498 * before we can modify its control registers. We also
499 * have to wait for the next vblank for that to take
500 * effect. However, since we delay enabling FBC we can
501 * assume that a vblank has passed since disabling and
502 * that we can safely alter the registers in the deferred
503 * callback.
504 *
505 * In the scenario that we go from a valid to invalid
506 * and then back to valid FBC configuration we have
507 * no strict enforcement that a vblank occurred since
508 * disabling the FBC. However, along all current pipe
509 * disabling paths we do need to wait for a vblank at
510 * some point. And we wait before enabling FBC anyway.
511 */
512 DRM_DEBUG_KMS("disabling active FBC for update\n");
513 intel_disable_fbc(dev);
514 }
515
516 intel_enable_fbc(crtc, 500);
517 return;
518
519out_disable:
520 /* Multiple disables should be harmless */
521 if (intel_fbc_enabled(dev)) {
522 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
523 intel_disable_fbc(dev);
524 }
525}
526
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300527static const struct cxsr_latency cxsr_latency_table[] = {
528 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
529 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
530 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
531 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
532 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
533
534 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
535 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
536 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
537 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
538 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
539
540 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
541 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
542 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
543 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
544 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
545
546 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
547 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
548 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
549 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
550 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
551
552 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
553 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
554 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
555 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
556 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
557
558 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
559 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
560 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
561 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
562 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
563};
564
Daniel Vetter63c62272012-04-21 23:17:55 +0200565static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300566 int is_ddr3,
567 int fsb,
568 int mem)
569{
570 const struct cxsr_latency *latency;
571 int i;
572
573 if (fsb == 0 || mem == 0)
574 return NULL;
575
576 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
577 latency = &cxsr_latency_table[i];
578 if (is_desktop == latency->is_desktop &&
579 is_ddr3 == latency->is_ddr3 &&
580 fsb == latency->fsb_freq && mem == latency->mem_freq)
581 return latency;
582 }
583
584 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
585
586 return NULL;
587}
588
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300589static void pineview_disable_cxsr(struct drm_device *dev)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300590{
591 struct drm_i915_private *dev_priv = dev->dev_private;
592
593 /* deactivate cxsr */
594 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
595}
596
597/*
598 * Latency for FIFO fetches is dependent on several factors:
599 * - memory configuration (speed, channels)
600 * - chipset
601 * - current MCH state
602 * It can be fairly high in some situations, so here we assume a fairly
603 * pessimal value. It's a tradeoff between extra memory fetches (if we
604 * set this value too high, the FIFO will fetch frequently to stay full)
605 * and power consumption (set it too low to save power and we might see
606 * FIFO underruns and display "flicker").
607 *
608 * A value of 5us seems to be a good balance; safe for very low end
609 * platforms but not overly aggressive on lower latency configs.
610 */
611static const int latency_ns = 5000;
612
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300613static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300614{
615 struct drm_i915_private *dev_priv = dev->dev_private;
616 uint32_t dsparb = I915_READ(DSPARB);
617 int size;
618
619 size = dsparb & 0x7f;
620 if (plane)
621 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
622
623 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
624 plane ? "B" : "A", size);
625
626 return size;
627}
628
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300629static int i85x_get_fifo_size(struct drm_device *dev, int plane)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300630{
631 struct drm_i915_private *dev_priv = dev->dev_private;
632 uint32_t dsparb = I915_READ(DSPARB);
633 int size;
634
635 size = dsparb & 0x1ff;
636 if (plane)
637 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
638 size >>= 1; /* Convert to cachelines */
639
640 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
641 plane ? "B" : "A", size);
642
643 return size;
644}
645
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300646static int i845_get_fifo_size(struct drm_device *dev, int plane)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300647{
648 struct drm_i915_private *dev_priv = dev->dev_private;
649 uint32_t dsparb = I915_READ(DSPARB);
650 int size;
651
652 size = dsparb & 0x7f;
653 size >>= 2; /* Convert to cachelines */
654
655 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
656 plane ? "B" : "A",
657 size);
658
659 return size;
660}
661
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300662static int i830_get_fifo_size(struct drm_device *dev, int plane)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300663{
664 struct drm_i915_private *dev_priv = dev->dev_private;
665 uint32_t dsparb = I915_READ(DSPARB);
666 int size;
667
668 size = dsparb & 0x7f;
669 size >>= 1; /* Convert to cachelines */
670
671 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
672 plane ? "B" : "A", size);
673
674 return size;
675}
676
677/* Pineview has different values for various configs */
678static const struct intel_watermark_params pineview_display_wm = {
679 PINEVIEW_DISPLAY_FIFO,
680 PINEVIEW_MAX_WM,
681 PINEVIEW_DFT_WM,
682 PINEVIEW_GUARD_WM,
683 PINEVIEW_FIFO_LINE_SIZE
684};
685static const struct intel_watermark_params pineview_display_hplloff_wm = {
686 PINEVIEW_DISPLAY_FIFO,
687 PINEVIEW_MAX_WM,
688 PINEVIEW_DFT_HPLLOFF_WM,
689 PINEVIEW_GUARD_WM,
690 PINEVIEW_FIFO_LINE_SIZE
691};
692static const struct intel_watermark_params pineview_cursor_wm = {
693 PINEVIEW_CURSOR_FIFO,
694 PINEVIEW_CURSOR_MAX_WM,
695 PINEVIEW_CURSOR_DFT_WM,
696 PINEVIEW_CURSOR_GUARD_WM,
697 PINEVIEW_FIFO_LINE_SIZE,
698};
699static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
700 PINEVIEW_CURSOR_FIFO,
701 PINEVIEW_CURSOR_MAX_WM,
702 PINEVIEW_CURSOR_DFT_WM,
703 PINEVIEW_CURSOR_GUARD_WM,
704 PINEVIEW_FIFO_LINE_SIZE
705};
706static const struct intel_watermark_params g4x_wm_info = {
707 G4X_FIFO_SIZE,
708 G4X_MAX_WM,
709 G4X_MAX_WM,
710 2,
711 G4X_FIFO_LINE_SIZE,
712};
713static const struct intel_watermark_params g4x_cursor_wm_info = {
714 I965_CURSOR_FIFO,
715 I965_CURSOR_MAX_WM,
716 I965_CURSOR_DFT_WM,
717 2,
718 G4X_FIFO_LINE_SIZE,
719};
720static const struct intel_watermark_params valleyview_wm_info = {
721 VALLEYVIEW_FIFO_SIZE,
722 VALLEYVIEW_MAX_WM,
723 VALLEYVIEW_MAX_WM,
724 2,
725 G4X_FIFO_LINE_SIZE,
726};
727static const struct intel_watermark_params valleyview_cursor_wm_info = {
728 I965_CURSOR_FIFO,
729 VALLEYVIEW_CURSOR_MAX_WM,
730 I965_CURSOR_DFT_WM,
731 2,
732 G4X_FIFO_LINE_SIZE,
733};
734static const struct intel_watermark_params i965_cursor_wm_info = {
735 I965_CURSOR_FIFO,
736 I965_CURSOR_MAX_WM,
737 I965_CURSOR_DFT_WM,
738 2,
739 I915_FIFO_LINE_SIZE,
740};
741static const struct intel_watermark_params i945_wm_info = {
742 I945_FIFO_SIZE,
743 I915_MAX_WM,
744 1,
745 2,
746 I915_FIFO_LINE_SIZE
747};
748static const struct intel_watermark_params i915_wm_info = {
749 I915_FIFO_SIZE,
750 I915_MAX_WM,
751 1,
752 2,
753 I915_FIFO_LINE_SIZE
754};
755static const struct intel_watermark_params i855_wm_info = {
756 I855GM_FIFO_SIZE,
757 I915_MAX_WM,
758 1,
759 2,
760 I830_FIFO_LINE_SIZE
761};
762static const struct intel_watermark_params i830_wm_info = {
763 I830_FIFO_SIZE,
764 I915_MAX_WM,
765 1,
766 2,
767 I830_FIFO_LINE_SIZE
768};
769
770static const struct intel_watermark_params ironlake_display_wm_info = {
771 ILK_DISPLAY_FIFO,
772 ILK_DISPLAY_MAXWM,
773 ILK_DISPLAY_DFTWM,
774 2,
775 ILK_FIFO_LINE_SIZE
776};
777static const struct intel_watermark_params ironlake_cursor_wm_info = {
778 ILK_CURSOR_FIFO,
779 ILK_CURSOR_MAXWM,
780 ILK_CURSOR_DFTWM,
781 2,
782 ILK_FIFO_LINE_SIZE
783};
784static const struct intel_watermark_params ironlake_display_srwm_info = {
785 ILK_DISPLAY_SR_FIFO,
786 ILK_DISPLAY_MAX_SRWM,
787 ILK_DISPLAY_DFT_SRWM,
788 2,
789 ILK_FIFO_LINE_SIZE
790};
791static const struct intel_watermark_params ironlake_cursor_srwm_info = {
792 ILK_CURSOR_SR_FIFO,
793 ILK_CURSOR_MAX_SRWM,
794 ILK_CURSOR_DFT_SRWM,
795 2,
796 ILK_FIFO_LINE_SIZE
797};
798
799static const struct intel_watermark_params sandybridge_display_wm_info = {
800 SNB_DISPLAY_FIFO,
801 SNB_DISPLAY_MAXWM,
802 SNB_DISPLAY_DFTWM,
803 2,
804 SNB_FIFO_LINE_SIZE
805};
806static const struct intel_watermark_params sandybridge_cursor_wm_info = {
807 SNB_CURSOR_FIFO,
808 SNB_CURSOR_MAXWM,
809 SNB_CURSOR_DFTWM,
810 2,
811 SNB_FIFO_LINE_SIZE
812};
813static const struct intel_watermark_params sandybridge_display_srwm_info = {
814 SNB_DISPLAY_SR_FIFO,
815 SNB_DISPLAY_MAX_SRWM,
816 SNB_DISPLAY_DFT_SRWM,
817 2,
818 SNB_FIFO_LINE_SIZE
819};
820static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
821 SNB_CURSOR_SR_FIFO,
822 SNB_CURSOR_MAX_SRWM,
823 SNB_CURSOR_DFT_SRWM,
824 2,
825 SNB_FIFO_LINE_SIZE
826};
827
828
829/**
830 * intel_calculate_wm - calculate watermark level
831 * @clock_in_khz: pixel clock
832 * @wm: chip FIFO params
833 * @pixel_size: display pixel size
834 * @latency_ns: memory latency for the platform
835 *
836 * Calculate the watermark level (the level at which the display plane will
837 * start fetching from memory again). Each chip has a different display
838 * FIFO size and allocation, so the caller needs to figure that out and pass
839 * in the correct intel_watermark_params structure.
840 *
841 * As the pixel clock runs, the FIFO will be drained at a rate that depends
842 * on the pixel size. When it reaches the watermark level, it'll start
843 * fetching FIFO line sized based chunks from memory until the FIFO fills
844 * past the watermark point. If the FIFO drains completely, a FIFO underrun
845 * will occur, and a display engine hang could result.
846 */
847static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
848 const struct intel_watermark_params *wm,
849 int fifo_size,
850 int pixel_size,
851 unsigned long latency_ns)
852{
853 long entries_required, wm_size;
854
855 /*
856 * Note: we need to make sure we don't overflow for various clock &
857 * latency values.
858 * clocks go from a few thousand to several hundred thousand.
859 * latency is usually a few thousand
860 */
861 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
862 1000;
863 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
864
865 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
866
867 wm_size = fifo_size - (entries_required + wm->guard_size);
868
869 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
870
871 /* Don't promote wm_size to unsigned... */
872 if (wm_size > (long)wm->max_wm)
873 wm_size = wm->max_wm;
874 if (wm_size <= 0)
875 wm_size = wm->default_wm;
876 return wm_size;
877}
878
879static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
880{
881 struct drm_crtc *crtc, *enabled = NULL;
882
883 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
884 if (crtc->enabled && crtc->fb) {
885 if (enabled)
886 return NULL;
887 enabled = crtc;
888 }
889 }
890
891 return enabled;
892}
893
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300894static void pineview_update_wm(struct drm_device *dev)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300895{
896 struct drm_i915_private *dev_priv = dev->dev_private;
897 struct drm_crtc *crtc;
898 const struct cxsr_latency *latency;
899 u32 reg;
900 unsigned long wm;
901
902 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
903 dev_priv->fsb_freq, dev_priv->mem_freq);
904 if (!latency) {
905 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
906 pineview_disable_cxsr(dev);
907 return;
908 }
909
910 crtc = single_enabled_crtc(dev);
911 if (crtc) {
912 int clock = crtc->mode.clock;
913 int pixel_size = crtc->fb->bits_per_pixel / 8;
914
915 /* Display SR */
916 wm = intel_calculate_wm(clock, &pineview_display_wm,
917 pineview_display_wm.fifo_size,
918 pixel_size, latency->display_sr);
919 reg = I915_READ(DSPFW1);
920 reg &= ~DSPFW_SR_MASK;
921 reg |= wm << DSPFW_SR_SHIFT;
922 I915_WRITE(DSPFW1, reg);
923 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
924
925 /* cursor SR */
926 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
927 pineview_display_wm.fifo_size,
928 pixel_size, latency->cursor_sr);
929 reg = I915_READ(DSPFW3);
930 reg &= ~DSPFW_CURSOR_SR_MASK;
931 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
932 I915_WRITE(DSPFW3, reg);
933
934 /* Display HPLL off SR */
935 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
936 pineview_display_hplloff_wm.fifo_size,
937 pixel_size, latency->display_hpll_disable);
938 reg = I915_READ(DSPFW3);
939 reg &= ~DSPFW_HPLL_SR_MASK;
940 reg |= wm & DSPFW_HPLL_SR_MASK;
941 I915_WRITE(DSPFW3, reg);
942
943 /* cursor HPLL off SR */
944 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
945 pineview_display_hplloff_wm.fifo_size,
946 pixel_size, latency->cursor_hpll_disable);
947 reg = I915_READ(DSPFW3);
948 reg &= ~DSPFW_HPLL_CURSOR_MASK;
949 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
950 I915_WRITE(DSPFW3, reg);
951 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
952
953 /* activate cxsr */
954 I915_WRITE(DSPFW3,
955 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
956 DRM_DEBUG_KMS("Self-refresh is enabled\n");
957 } else {
958 pineview_disable_cxsr(dev);
959 DRM_DEBUG_KMS("Self-refresh is disabled\n");
960 }
961}
962
963static bool g4x_compute_wm0(struct drm_device *dev,
964 int plane,
965 const struct intel_watermark_params *display,
966 int display_latency_ns,
967 const struct intel_watermark_params *cursor,
968 int cursor_latency_ns,
969 int *plane_wm,
970 int *cursor_wm)
971{
972 struct drm_crtc *crtc;
973 int htotal, hdisplay, clock, pixel_size;
974 int line_time_us, line_count;
975 int entries, tlb_miss;
976
977 crtc = intel_get_crtc_for_plane(dev, plane);
978 if (crtc->fb == NULL || !crtc->enabled) {
979 *cursor_wm = cursor->guard_size;
980 *plane_wm = display->guard_size;
981 return false;
982 }
983
984 htotal = crtc->mode.htotal;
985 hdisplay = crtc->mode.hdisplay;
986 clock = crtc->mode.clock;
987 pixel_size = crtc->fb->bits_per_pixel / 8;
988
989 /* Use the small buffer method to calculate plane watermark */
990 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
991 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
992 if (tlb_miss > 0)
993 entries += tlb_miss;
994 entries = DIV_ROUND_UP(entries, display->cacheline_size);
995 *plane_wm = entries + display->guard_size;
996 if (*plane_wm > (int)display->max_wm)
997 *plane_wm = display->max_wm;
998
999 /* Use the large buffer method to calculate cursor watermark */
1000 line_time_us = ((htotal * 1000) / clock);
1001 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1002 entries = line_count * 64 * pixel_size;
1003 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1004 if (tlb_miss > 0)
1005 entries += tlb_miss;
1006 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1007 *cursor_wm = entries + cursor->guard_size;
1008 if (*cursor_wm > (int)cursor->max_wm)
1009 *cursor_wm = (int)cursor->max_wm;
1010
1011 return true;
1012}
1013
1014/*
1015 * Check the wm result.
1016 *
1017 * If any calculated watermark values is larger than the maximum value that
1018 * can be programmed into the associated watermark register, that watermark
1019 * must be disabled.
1020 */
1021static bool g4x_check_srwm(struct drm_device *dev,
1022 int display_wm, int cursor_wm,
1023 const struct intel_watermark_params *display,
1024 const struct intel_watermark_params *cursor)
1025{
1026 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1027 display_wm, cursor_wm);
1028
1029 if (display_wm > display->max_wm) {
1030 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1031 display_wm, display->max_wm);
1032 return false;
1033 }
1034
1035 if (cursor_wm > cursor->max_wm) {
1036 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1037 cursor_wm, cursor->max_wm);
1038 return false;
1039 }
1040
1041 if (!(display_wm || cursor_wm)) {
1042 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1043 return false;
1044 }
1045
1046 return true;
1047}
1048
1049static bool g4x_compute_srwm(struct drm_device *dev,
1050 int plane,
1051 int latency_ns,
1052 const struct intel_watermark_params *display,
1053 const struct intel_watermark_params *cursor,
1054 int *display_wm, int *cursor_wm)
1055{
1056 struct drm_crtc *crtc;
1057 int hdisplay, htotal, pixel_size, clock;
1058 unsigned long line_time_us;
1059 int line_count, line_size;
1060 int small, large;
1061 int entries;
1062
1063 if (!latency_ns) {
1064 *display_wm = *cursor_wm = 0;
1065 return false;
1066 }
1067
1068 crtc = intel_get_crtc_for_plane(dev, plane);
1069 hdisplay = crtc->mode.hdisplay;
1070 htotal = crtc->mode.htotal;
1071 clock = crtc->mode.clock;
1072 pixel_size = crtc->fb->bits_per_pixel / 8;
1073
1074 line_time_us = (htotal * 1000) / clock;
1075 line_count = (latency_ns / line_time_us + 1000) / 1000;
1076 line_size = hdisplay * pixel_size;
1077
1078 /* Use the minimum of the small and large buffer method for primary */
1079 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1080 large = line_count * line_size;
1081
1082 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1083 *display_wm = entries + display->guard_size;
1084
1085 /* calculate the self-refresh watermark for display cursor */
1086 entries = line_count * pixel_size * 64;
1087 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1088 *cursor_wm = entries + cursor->guard_size;
1089
1090 return g4x_check_srwm(dev,
1091 *display_wm, *cursor_wm,
1092 display, cursor);
1093}
1094
1095static bool vlv_compute_drain_latency(struct drm_device *dev,
1096 int plane,
1097 int *plane_prec_mult,
1098 int *plane_dl,
1099 int *cursor_prec_mult,
1100 int *cursor_dl)
1101{
1102 struct drm_crtc *crtc;
1103 int clock, pixel_size;
1104 int entries;
1105
1106 crtc = intel_get_crtc_for_plane(dev, plane);
1107 if (crtc->fb == NULL || !crtc->enabled)
1108 return false;
1109
1110 clock = crtc->mode.clock; /* VESA DOT Clock */
1111 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
1112
1113 entries = (clock / 1000) * pixel_size;
1114 *plane_prec_mult = (entries > 256) ?
1115 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1116 *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1117 pixel_size);
1118
1119 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
1120 *cursor_prec_mult = (entries > 256) ?
1121 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1122 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1123
1124 return true;
1125}
1126
1127/*
1128 * Update drain latency registers of memory arbiter
1129 *
1130 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1131 * to be programmed. Each plane has a drain latency multiplier and a drain
1132 * latency value.
1133 */
1134
1135static void vlv_update_drain_latency(struct drm_device *dev)
1136{
1137 struct drm_i915_private *dev_priv = dev->dev_private;
1138 int planea_prec, planea_dl, planeb_prec, planeb_dl;
1139 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1140 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1141 either 16 or 32 */
1142
1143 /* For plane A, Cursor A */
1144 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1145 &cursor_prec_mult, &cursora_dl)) {
1146 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1147 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1148 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1149 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1150
1151 I915_WRITE(VLV_DDL1, cursora_prec |
1152 (cursora_dl << DDL_CURSORA_SHIFT) |
1153 planea_prec | planea_dl);
1154 }
1155
1156 /* For plane B, Cursor B */
1157 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1158 &cursor_prec_mult, &cursorb_dl)) {
1159 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1160 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1161 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1162 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1163
1164 I915_WRITE(VLV_DDL2, cursorb_prec |
1165 (cursorb_dl << DDL_CURSORB_SHIFT) |
1166 planeb_prec | planeb_dl);
1167 }
1168}
1169
1170#define single_plane_enabled(mask) is_power_of_2(mask)
1171
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03001172static void valleyview_update_wm(struct drm_device *dev)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001173{
1174 static const int sr_latency_ns = 12000;
1175 struct drm_i915_private *dev_priv = dev->dev_private;
1176 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1177 int plane_sr, cursor_sr;
1178 unsigned int enabled = 0;
1179
1180 vlv_update_drain_latency(dev);
1181
1182 if (g4x_compute_wm0(dev, 0,
1183 &valleyview_wm_info, latency_ns,
1184 &valleyview_cursor_wm_info, latency_ns,
1185 &planea_wm, &cursora_wm))
1186 enabled |= 1;
1187
1188 if (g4x_compute_wm0(dev, 1,
1189 &valleyview_wm_info, latency_ns,
1190 &valleyview_cursor_wm_info, latency_ns,
1191 &planeb_wm, &cursorb_wm))
1192 enabled |= 2;
1193
1194 plane_sr = cursor_sr = 0;
1195 if (single_plane_enabled(enabled) &&
1196 g4x_compute_srwm(dev, ffs(enabled) - 1,
1197 sr_latency_ns,
1198 &valleyview_wm_info,
1199 &valleyview_cursor_wm_info,
1200 &plane_sr, &cursor_sr))
1201 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1202 else
1203 I915_WRITE(FW_BLC_SELF_VLV,
1204 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1205
1206 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1207 planea_wm, cursora_wm,
1208 planeb_wm, cursorb_wm,
1209 plane_sr, cursor_sr);
1210
1211 I915_WRITE(DSPFW1,
1212 (plane_sr << DSPFW_SR_SHIFT) |
1213 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1214 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1215 planea_wm);
1216 I915_WRITE(DSPFW2,
1217 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1218 (cursora_wm << DSPFW_CURSORA_SHIFT));
1219 I915_WRITE(DSPFW3,
1220 (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
1221}
1222
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03001223static void g4x_update_wm(struct drm_device *dev)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001224{
1225 static const int sr_latency_ns = 12000;
1226 struct drm_i915_private *dev_priv = dev->dev_private;
1227 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1228 int plane_sr, cursor_sr;
1229 unsigned int enabled = 0;
1230
1231 if (g4x_compute_wm0(dev, 0,
1232 &g4x_wm_info, latency_ns,
1233 &g4x_cursor_wm_info, latency_ns,
1234 &planea_wm, &cursora_wm))
1235 enabled |= 1;
1236
1237 if (g4x_compute_wm0(dev, 1,
1238 &g4x_wm_info, latency_ns,
1239 &g4x_cursor_wm_info, latency_ns,
1240 &planeb_wm, &cursorb_wm))
1241 enabled |= 2;
1242
1243 plane_sr = cursor_sr = 0;
1244 if (single_plane_enabled(enabled) &&
1245 g4x_compute_srwm(dev, ffs(enabled) - 1,
1246 sr_latency_ns,
1247 &g4x_wm_info,
1248 &g4x_cursor_wm_info,
1249 &plane_sr, &cursor_sr))
1250 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1251 else
1252 I915_WRITE(FW_BLC_SELF,
1253 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1254
1255 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1256 planea_wm, cursora_wm,
1257 planeb_wm, cursorb_wm,
1258 plane_sr, cursor_sr);
1259
1260 I915_WRITE(DSPFW1,
1261 (plane_sr << DSPFW_SR_SHIFT) |
1262 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1263 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1264 planea_wm);
1265 I915_WRITE(DSPFW2,
1266 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1267 (cursora_wm << DSPFW_CURSORA_SHIFT));
1268 /* HPLL off in SR has some issues on G4x... disable it */
1269 I915_WRITE(DSPFW3,
1270 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
1271 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1272}
1273
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03001274static void i965_update_wm(struct drm_device *dev)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001275{
1276 struct drm_i915_private *dev_priv = dev->dev_private;
1277 struct drm_crtc *crtc;
1278 int srwm = 1;
1279 int cursor_sr = 16;
1280
1281 /* Calc sr entries for one plane configs */
1282 crtc = single_enabled_crtc(dev);
1283 if (crtc) {
1284 /* self-refresh has much higher latency */
1285 static const int sr_latency_ns = 12000;
1286 int clock = crtc->mode.clock;
1287 int htotal = crtc->mode.htotal;
1288 int hdisplay = crtc->mode.hdisplay;
1289 int pixel_size = crtc->fb->bits_per_pixel / 8;
1290 unsigned long line_time_us;
1291 int entries;
1292
1293 line_time_us = ((htotal * 1000) / clock);
1294
1295 /* Use ns/us then divide to preserve precision */
1296 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1297 pixel_size * hdisplay;
1298 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1299 srwm = I965_FIFO_SIZE - entries;
1300 if (srwm < 0)
1301 srwm = 1;
1302 srwm &= 0x1ff;
1303 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1304 entries, srwm);
1305
1306 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1307 pixel_size * 64;
1308 entries = DIV_ROUND_UP(entries,
1309 i965_cursor_wm_info.cacheline_size);
1310 cursor_sr = i965_cursor_wm_info.fifo_size -
1311 (entries + i965_cursor_wm_info.guard_size);
1312
1313 if (cursor_sr > i965_cursor_wm_info.max_wm)
1314 cursor_sr = i965_cursor_wm_info.max_wm;
1315
1316 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1317 "cursor %d\n", srwm, cursor_sr);
1318
1319 if (IS_CRESTLINE(dev))
1320 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1321 } else {
1322 /* Turn off self refresh if both pipes are enabled */
1323 if (IS_CRESTLINE(dev))
1324 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1325 & ~FW_BLC_SELF_EN);
1326 }
1327
1328 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1329 srwm);
1330
1331 /* 965 has limitations... */
1332 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1333 (8 << 16) | (8 << 8) | (8 << 0));
1334 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1335 /* update cursor SR watermark */
1336 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1337}
1338
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03001339static void i9xx_update_wm(struct drm_device *dev)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001340{
1341 struct drm_i915_private *dev_priv = dev->dev_private;
1342 const struct intel_watermark_params *wm_info;
1343 uint32_t fwater_lo;
1344 uint32_t fwater_hi;
1345 int cwm, srwm = 1;
1346 int fifo_size;
1347 int planea_wm, planeb_wm;
1348 struct drm_crtc *crtc, *enabled = NULL;
1349
1350 if (IS_I945GM(dev))
1351 wm_info = &i945_wm_info;
1352 else if (!IS_GEN2(dev))
1353 wm_info = &i915_wm_info;
1354 else
1355 wm_info = &i855_wm_info;
1356
1357 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1358 crtc = intel_get_crtc_for_plane(dev, 0);
1359 if (crtc->enabled && crtc->fb) {
1360 planea_wm = intel_calculate_wm(crtc->mode.clock,
1361 wm_info, fifo_size,
1362 crtc->fb->bits_per_pixel / 8,
1363 latency_ns);
1364 enabled = crtc;
1365 } else
1366 planea_wm = fifo_size - wm_info->guard_size;
1367
1368 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1369 crtc = intel_get_crtc_for_plane(dev, 1);
1370 if (crtc->enabled && crtc->fb) {
1371 planeb_wm = intel_calculate_wm(crtc->mode.clock,
1372 wm_info, fifo_size,
1373 crtc->fb->bits_per_pixel / 8,
1374 latency_ns);
1375 if (enabled == NULL)
1376 enabled = crtc;
1377 else
1378 enabled = NULL;
1379 } else
1380 planeb_wm = fifo_size - wm_info->guard_size;
1381
1382 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1383
1384 /*
1385 * Overlay gets an aggressive default since video jitter is bad.
1386 */
1387 cwm = 2;
1388
1389 /* Play safe and disable self-refresh before adjusting watermarks. */
1390 if (IS_I945G(dev) || IS_I945GM(dev))
1391 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1392 else if (IS_I915GM(dev))
1393 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1394
1395 /* Calc sr entries for one plane configs */
1396 if (HAS_FW_BLC(dev) && enabled) {
1397 /* self-refresh has much higher latency */
1398 static const int sr_latency_ns = 6000;
1399 int clock = enabled->mode.clock;
1400 int htotal = enabled->mode.htotal;
1401 int hdisplay = enabled->mode.hdisplay;
1402 int pixel_size = enabled->fb->bits_per_pixel / 8;
1403 unsigned long line_time_us;
1404 int entries;
1405
1406 line_time_us = (htotal * 1000) / clock;
1407
1408 /* Use ns/us then divide to preserve precision */
1409 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1410 pixel_size * hdisplay;
1411 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1412 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1413 srwm = wm_info->fifo_size - entries;
1414 if (srwm < 0)
1415 srwm = 1;
1416
1417 if (IS_I945G(dev) || IS_I945GM(dev))
1418 I915_WRITE(FW_BLC_SELF,
1419 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1420 else if (IS_I915GM(dev))
1421 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1422 }
1423
1424 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1425 planea_wm, planeb_wm, cwm, srwm);
1426
1427 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1428 fwater_hi = (cwm & 0x1f);
1429
1430 /* Set request length to 8 cachelines per fetch */
1431 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1432 fwater_hi = fwater_hi | (1 << 8);
1433
1434 I915_WRITE(FW_BLC, fwater_lo);
1435 I915_WRITE(FW_BLC2, fwater_hi);
1436
1437 if (HAS_FW_BLC(dev)) {
1438 if (enabled) {
1439 if (IS_I945G(dev) || IS_I945GM(dev))
1440 I915_WRITE(FW_BLC_SELF,
1441 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1442 else if (IS_I915GM(dev))
1443 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1444 DRM_DEBUG_KMS("memory self refresh enabled\n");
1445 } else
1446 DRM_DEBUG_KMS("memory self refresh disabled\n");
1447 }
1448}
1449
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03001450static void i830_update_wm(struct drm_device *dev)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001451{
1452 struct drm_i915_private *dev_priv = dev->dev_private;
1453 struct drm_crtc *crtc;
1454 uint32_t fwater_lo;
1455 int planea_wm;
1456
1457 crtc = single_enabled_crtc(dev);
1458 if (crtc == NULL)
1459 return;
1460
1461 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1462 dev_priv->display.get_fifo_size(dev, 0),
1463 crtc->fb->bits_per_pixel / 8,
1464 latency_ns);
1465 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1466 fwater_lo |= (3<<8) | planea_wm;
1467
1468 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1469
1470 I915_WRITE(FW_BLC, fwater_lo);
1471}
1472
1473#define ILK_LP0_PLANE_LATENCY 700
1474#define ILK_LP0_CURSOR_LATENCY 1300
1475
1476/*
1477 * Check the wm result.
1478 *
1479 * If any calculated watermark values is larger than the maximum value that
1480 * can be programmed into the associated watermark register, that watermark
1481 * must be disabled.
1482 */
1483static bool ironlake_check_srwm(struct drm_device *dev, int level,
1484 int fbc_wm, int display_wm, int cursor_wm,
1485 const struct intel_watermark_params *display,
1486 const struct intel_watermark_params *cursor)
1487{
1488 struct drm_i915_private *dev_priv = dev->dev_private;
1489
1490 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1491 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1492
1493 if (fbc_wm > SNB_FBC_MAX_SRWM) {
1494 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1495 fbc_wm, SNB_FBC_MAX_SRWM, level);
1496
1497 /* fbc has it's own way to disable FBC WM */
1498 I915_WRITE(DISP_ARB_CTL,
1499 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1500 return false;
1501 }
1502
1503 if (display_wm > display->max_wm) {
1504 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1505 display_wm, SNB_DISPLAY_MAX_SRWM, level);
1506 return false;
1507 }
1508
1509 if (cursor_wm > cursor->max_wm) {
1510 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1511 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1512 return false;
1513 }
1514
1515 if (!(fbc_wm || display_wm || cursor_wm)) {
1516 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1517 return false;
1518 }
1519
1520 return true;
1521}
1522
1523/*
1524 * Compute watermark values of WM[1-3],
1525 */
1526static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1527 int latency_ns,
1528 const struct intel_watermark_params *display,
1529 const struct intel_watermark_params *cursor,
1530 int *fbc_wm, int *display_wm, int *cursor_wm)
1531{
1532 struct drm_crtc *crtc;
1533 unsigned long line_time_us;
1534 int hdisplay, htotal, pixel_size, clock;
1535 int line_count, line_size;
1536 int small, large;
1537 int entries;
1538
1539 if (!latency_ns) {
1540 *fbc_wm = *display_wm = *cursor_wm = 0;
1541 return false;
1542 }
1543
1544 crtc = intel_get_crtc_for_plane(dev, plane);
1545 hdisplay = crtc->mode.hdisplay;
1546 htotal = crtc->mode.htotal;
1547 clock = crtc->mode.clock;
1548 pixel_size = crtc->fb->bits_per_pixel / 8;
1549
1550 line_time_us = (htotal * 1000) / clock;
1551 line_count = (latency_ns / line_time_us + 1000) / 1000;
1552 line_size = hdisplay * pixel_size;
1553
1554 /* Use the minimum of the small and large buffer method for primary */
1555 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1556 large = line_count * line_size;
1557
1558 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1559 *display_wm = entries + display->guard_size;
1560
1561 /*
1562 * Spec says:
1563 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1564 */
1565 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
1566
1567 /* calculate the self-refresh watermark for display cursor */
1568 entries = line_count * pixel_size * 64;
1569 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1570 *cursor_wm = entries + cursor->guard_size;
1571
1572 return ironlake_check_srwm(dev, level,
1573 *fbc_wm, *display_wm, *cursor_wm,
1574 display, cursor);
1575}
1576
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03001577static void ironlake_update_wm(struct drm_device *dev)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001578{
1579 struct drm_i915_private *dev_priv = dev->dev_private;
1580 int fbc_wm, plane_wm, cursor_wm;
1581 unsigned int enabled;
1582
1583 enabled = 0;
1584 if (g4x_compute_wm0(dev, 0,
1585 &ironlake_display_wm_info,
1586 ILK_LP0_PLANE_LATENCY,
1587 &ironlake_cursor_wm_info,
1588 ILK_LP0_CURSOR_LATENCY,
1589 &plane_wm, &cursor_wm)) {
1590 I915_WRITE(WM0_PIPEA_ILK,
1591 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1592 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1593 " plane %d, " "cursor: %d\n",
1594 plane_wm, cursor_wm);
1595 enabled |= 1;
1596 }
1597
1598 if (g4x_compute_wm0(dev, 1,
1599 &ironlake_display_wm_info,
1600 ILK_LP0_PLANE_LATENCY,
1601 &ironlake_cursor_wm_info,
1602 ILK_LP0_CURSOR_LATENCY,
1603 &plane_wm, &cursor_wm)) {
1604 I915_WRITE(WM0_PIPEB_ILK,
1605 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1606 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1607 " plane %d, cursor: %d\n",
1608 plane_wm, cursor_wm);
1609 enabled |= 2;
1610 }
1611
1612 /*
1613 * Calculate and update the self-refresh watermark only when one
1614 * display plane is used.
1615 */
1616 I915_WRITE(WM3_LP_ILK, 0);
1617 I915_WRITE(WM2_LP_ILK, 0);
1618 I915_WRITE(WM1_LP_ILK, 0);
1619
1620 if (!single_plane_enabled(enabled))
1621 return;
1622 enabled = ffs(enabled) - 1;
1623
1624 /* WM1 */
1625 if (!ironlake_compute_srwm(dev, 1, enabled,
1626 ILK_READ_WM1_LATENCY() * 500,
1627 &ironlake_display_srwm_info,
1628 &ironlake_cursor_srwm_info,
1629 &fbc_wm, &plane_wm, &cursor_wm))
1630 return;
1631
1632 I915_WRITE(WM1_LP_ILK,
1633 WM1_LP_SR_EN |
1634 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1635 (fbc_wm << WM1_LP_FBC_SHIFT) |
1636 (plane_wm << WM1_LP_SR_SHIFT) |
1637 cursor_wm);
1638
1639 /* WM2 */
1640 if (!ironlake_compute_srwm(dev, 2, enabled,
1641 ILK_READ_WM2_LATENCY() * 500,
1642 &ironlake_display_srwm_info,
1643 &ironlake_cursor_srwm_info,
1644 &fbc_wm, &plane_wm, &cursor_wm))
1645 return;
1646
1647 I915_WRITE(WM2_LP_ILK,
1648 WM2_LP_EN |
1649 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1650 (fbc_wm << WM1_LP_FBC_SHIFT) |
1651 (plane_wm << WM1_LP_SR_SHIFT) |
1652 cursor_wm);
1653
1654 /*
1655 * WM3 is unsupported on ILK, probably because we don't have latency
1656 * data for that power state
1657 */
1658}
1659
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03001660static void sandybridge_update_wm(struct drm_device *dev)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001661{
1662 struct drm_i915_private *dev_priv = dev->dev_private;
1663 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1664 u32 val;
1665 int fbc_wm, plane_wm, cursor_wm;
1666 unsigned int enabled;
1667
1668 enabled = 0;
1669 if (g4x_compute_wm0(dev, 0,
1670 &sandybridge_display_wm_info, latency,
1671 &sandybridge_cursor_wm_info, latency,
1672 &plane_wm, &cursor_wm)) {
1673 val = I915_READ(WM0_PIPEA_ILK);
1674 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1675 I915_WRITE(WM0_PIPEA_ILK, val |
1676 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1677 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1678 " plane %d, " "cursor: %d\n",
1679 plane_wm, cursor_wm);
1680 enabled |= 1;
1681 }
1682
1683 if (g4x_compute_wm0(dev, 1,
1684 &sandybridge_display_wm_info, latency,
1685 &sandybridge_cursor_wm_info, latency,
1686 &plane_wm, &cursor_wm)) {
1687 val = I915_READ(WM0_PIPEB_ILK);
1688 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1689 I915_WRITE(WM0_PIPEB_ILK, val |
1690 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1691 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1692 " plane %d, cursor: %d\n",
1693 plane_wm, cursor_wm);
1694 enabled |= 2;
1695 }
1696
1697 /* IVB has 3 pipes */
1698 if (IS_IVYBRIDGE(dev) &&
1699 g4x_compute_wm0(dev, 2,
1700 &sandybridge_display_wm_info, latency,
1701 &sandybridge_cursor_wm_info, latency,
1702 &plane_wm, &cursor_wm)) {
1703 val = I915_READ(WM0_PIPEC_IVB);
1704 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1705 I915_WRITE(WM0_PIPEC_IVB, val |
1706 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1707 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1708 " plane %d, cursor: %d\n",
1709 plane_wm, cursor_wm);
1710 enabled |= 3;
1711 }
1712
1713 /*
1714 * Calculate and update the self-refresh watermark only when one
1715 * display plane is used.
1716 *
1717 * SNB support 3 levels of watermark.
1718 *
1719 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1720 * and disabled in the descending order
1721 *
1722 */
1723 I915_WRITE(WM3_LP_ILK, 0);
1724 I915_WRITE(WM2_LP_ILK, 0);
1725 I915_WRITE(WM1_LP_ILK, 0);
1726
1727 if (!single_plane_enabled(enabled) ||
1728 dev_priv->sprite_scaling_enabled)
1729 return;
1730 enabled = ffs(enabled) - 1;
1731
1732 /* WM1 */
1733 if (!ironlake_compute_srwm(dev, 1, enabled,
1734 SNB_READ_WM1_LATENCY() * 500,
1735 &sandybridge_display_srwm_info,
1736 &sandybridge_cursor_srwm_info,
1737 &fbc_wm, &plane_wm, &cursor_wm))
1738 return;
1739
1740 I915_WRITE(WM1_LP_ILK,
1741 WM1_LP_SR_EN |
1742 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1743 (fbc_wm << WM1_LP_FBC_SHIFT) |
1744 (plane_wm << WM1_LP_SR_SHIFT) |
1745 cursor_wm);
1746
1747 /* WM2 */
1748 if (!ironlake_compute_srwm(dev, 2, enabled,
1749 SNB_READ_WM2_LATENCY() * 500,
1750 &sandybridge_display_srwm_info,
1751 &sandybridge_cursor_srwm_info,
1752 &fbc_wm, &plane_wm, &cursor_wm))
1753 return;
1754
1755 I915_WRITE(WM2_LP_ILK,
1756 WM2_LP_EN |
1757 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1758 (fbc_wm << WM1_LP_FBC_SHIFT) |
1759 (plane_wm << WM1_LP_SR_SHIFT) |
1760 cursor_wm);
1761
1762 /* WM3 */
1763 if (!ironlake_compute_srwm(dev, 3, enabled,
1764 SNB_READ_WM3_LATENCY() * 500,
1765 &sandybridge_display_srwm_info,
1766 &sandybridge_cursor_srwm_info,
1767 &fbc_wm, &plane_wm, &cursor_wm))
1768 return;
1769
1770 I915_WRITE(WM3_LP_ILK,
1771 WM3_LP_EN |
1772 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1773 (fbc_wm << WM1_LP_FBC_SHIFT) |
1774 (plane_wm << WM1_LP_SR_SHIFT) |
1775 cursor_wm);
1776}
1777
1778static bool
1779sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
1780 uint32_t sprite_width, int pixel_size,
1781 const struct intel_watermark_params *display,
1782 int display_latency_ns, int *sprite_wm)
1783{
1784 struct drm_crtc *crtc;
1785 int clock;
1786 int entries, tlb_miss;
1787
1788 crtc = intel_get_crtc_for_plane(dev, plane);
1789 if (crtc->fb == NULL || !crtc->enabled) {
1790 *sprite_wm = display->guard_size;
1791 return false;
1792 }
1793
1794 clock = crtc->mode.clock;
1795
1796 /* Use the small buffer method to calculate the sprite watermark */
1797 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1798 tlb_miss = display->fifo_size*display->cacheline_size -
1799 sprite_width * 8;
1800 if (tlb_miss > 0)
1801 entries += tlb_miss;
1802 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1803 *sprite_wm = entries + display->guard_size;
1804 if (*sprite_wm > (int)display->max_wm)
1805 *sprite_wm = display->max_wm;
1806
1807 return true;
1808}
1809
1810static bool
1811sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
1812 uint32_t sprite_width, int pixel_size,
1813 const struct intel_watermark_params *display,
1814 int latency_ns, int *sprite_wm)
1815{
1816 struct drm_crtc *crtc;
1817 unsigned long line_time_us;
1818 int clock;
1819 int line_count, line_size;
1820 int small, large;
1821 int entries;
1822
1823 if (!latency_ns) {
1824 *sprite_wm = 0;
1825 return false;
1826 }
1827
1828 crtc = intel_get_crtc_for_plane(dev, plane);
1829 clock = crtc->mode.clock;
1830 if (!clock) {
1831 *sprite_wm = 0;
1832 return false;
1833 }
1834
1835 line_time_us = (sprite_width * 1000) / clock;
1836 if (!line_time_us) {
1837 *sprite_wm = 0;
1838 return false;
1839 }
1840
1841 line_count = (latency_ns / line_time_us + 1000) / 1000;
1842 line_size = sprite_width * pixel_size;
1843
1844 /* Use the minimum of the small and large buffer method for primary */
1845 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1846 large = line_count * line_size;
1847
1848 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1849 *sprite_wm = entries + display->guard_size;
1850
1851 return *sprite_wm > 0x3ff ? false : true;
1852}
1853
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03001854static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001855 uint32_t sprite_width, int pixel_size)
1856{
1857 struct drm_i915_private *dev_priv = dev->dev_private;
1858 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1859 u32 val;
1860 int sprite_wm, reg;
1861 int ret;
1862
1863 switch (pipe) {
1864 case 0:
1865 reg = WM0_PIPEA_ILK;
1866 break;
1867 case 1:
1868 reg = WM0_PIPEB_ILK;
1869 break;
1870 case 2:
1871 reg = WM0_PIPEC_IVB;
1872 break;
1873 default:
1874 return; /* bad pipe */
1875 }
1876
1877 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
1878 &sandybridge_display_wm_info,
1879 latency, &sprite_wm);
1880 if (!ret) {
1881 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
1882 pipe);
1883 return;
1884 }
1885
1886 val = I915_READ(reg);
1887 val &= ~WM0_PIPE_SPRITE_MASK;
1888 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
1889 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
1890
1891
1892 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
1893 pixel_size,
1894 &sandybridge_display_srwm_info,
1895 SNB_READ_WM1_LATENCY() * 500,
1896 &sprite_wm);
1897 if (!ret) {
1898 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
1899 pipe);
1900 return;
1901 }
1902 I915_WRITE(WM1S_LP_ILK, sprite_wm);
1903
1904 /* Only IVB has two more LP watermarks for sprite */
1905 if (!IS_IVYBRIDGE(dev))
1906 return;
1907
1908 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
1909 pixel_size,
1910 &sandybridge_display_srwm_info,
1911 SNB_READ_WM2_LATENCY() * 500,
1912 &sprite_wm);
1913 if (!ret) {
1914 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
1915 pipe);
1916 return;
1917 }
1918 I915_WRITE(WM2S_LP_IVB, sprite_wm);
1919
1920 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
1921 pixel_size,
1922 &sandybridge_display_srwm_info,
1923 SNB_READ_WM3_LATENCY() * 500,
1924 &sprite_wm);
1925 if (!ret) {
1926 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
1927 pipe);
1928 return;
1929 }
1930 I915_WRITE(WM3S_LP_IVB, sprite_wm);
1931}
1932
1933/**
1934 * intel_update_watermarks - update FIFO watermark values based on current modes
1935 *
1936 * Calculate watermark values for the various WM regs based on current mode
1937 * and plane configuration.
1938 *
1939 * There are several cases to deal with here:
1940 * - normal (i.e. non-self-refresh)
1941 * - self-refresh (SR) mode
1942 * - lines are large relative to FIFO size (buffer can hold up to 2)
1943 * - lines are small relative to FIFO size (buffer can hold more than 2
1944 * lines), so need to account for TLB latency
1945 *
1946 * The normal calculation is:
1947 * watermark = dotclock * bytes per pixel * latency
1948 * where latency is platform & configuration dependent (we assume pessimal
1949 * values here).
1950 *
1951 * The SR calculation is:
1952 * watermark = (trunc(latency/line time)+1) * surface width *
1953 * bytes per pixel
1954 * where
1955 * line time = htotal / dotclock
1956 * surface width = hdisplay for normal plane and 64 for cursor
1957 * and latency is assumed to be high, as above.
1958 *
1959 * The final value programmed to the register should always be rounded up,
1960 * and include an extra 2 entries to account for clock crossings.
1961 *
1962 * We don't use the sprite, so we can ignore that. And on Crestline we have
1963 * to set the non-SR watermarks to 8.
1964 */
1965void intel_update_watermarks(struct drm_device *dev)
1966{
1967 struct drm_i915_private *dev_priv = dev->dev_private;
1968
1969 if (dev_priv->display.update_wm)
1970 dev_priv->display.update_wm(dev);
1971}
1972
1973void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
1974 uint32_t sprite_width, int pixel_size)
1975{
1976 struct drm_i915_private *dev_priv = dev->dev_private;
1977
1978 if (dev_priv->display.update_sprite_wm)
1979 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
1980 pixel_size);
1981}
1982
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03001983static struct drm_i915_gem_object *
1984intel_alloc_context_page(struct drm_device *dev)
1985{
1986 struct drm_i915_gem_object *ctx;
1987 int ret;
1988
1989 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1990
1991 ctx = i915_gem_alloc_object(dev, 4096);
1992 if (!ctx) {
1993 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
1994 return NULL;
1995 }
1996
1997 ret = i915_gem_object_pin(ctx, 4096, true);
1998 if (ret) {
1999 DRM_ERROR("failed to pin power context: %d\n", ret);
2000 goto err_unref;
2001 }
2002
2003 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2004 if (ret) {
2005 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2006 goto err_unpin;
2007 }
2008
2009 return ctx;
2010
2011err_unpin:
2012 i915_gem_object_unpin(ctx);
2013err_unref:
2014 drm_gem_object_unreference(&ctx->base);
2015 mutex_unlock(&dev->struct_mutex);
2016 return NULL;
2017}
2018
2019bool ironlake_set_drps(struct drm_device *dev, u8 val)
2020{
2021 struct drm_i915_private *dev_priv = dev->dev_private;
2022 u16 rgvswctl;
2023
2024 rgvswctl = I915_READ16(MEMSWCTL);
2025 if (rgvswctl & MEMCTL_CMD_STS) {
2026 DRM_DEBUG("gpu busy, RCS change rejected\n");
2027 return false; /* still busy with another command */
2028 }
2029
2030 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2031 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2032 I915_WRITE16(MEMSWCTL, rgvswctl);
2033 POSTING_READ16(MEMSWCTL);
2034
2035 rgvswctl |= MEMCTL_CMD_STS;
2036 I915_WRITE16(MEMSWCTL, rgvswctl);
2037
2038 return true;
2039}
2040
2041void ironlake_enable_drps(struct drm_device *dev)
2042{
2043 struct drm_i915_private *dev_priv = dev->dev_private;
2044 u32 rgvmodectl = I915_READ(MEMMODECTL);
2045 u8 fmax, fmin, fstart, vstart;
2046
2047 /* Enable temp reporting */
2048 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2049 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2050
2051 /* 100ms RC evaluation intervals */
2052 I915_WRITE(RCUPEI, 100000);
2053 I915_WRITE(RCDNEI, 100000);
2054
2055 /* Set max/min thresholds to 90ms and 80ms respectively */
2056 I915_WRITE(RCBMAXAVG, 90000);
2057 I915_WRITE(RCBMINAVG, 80000);
2058
2059 I915_WRITE(MEMIHYST, 1);
2060
2061 /* Set up min, max, and cur for interrupt handling */
2062 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2063 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2064 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2065 MEMMODE_FSTART_SHIFT;
2066
2067 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2068 PXVFREQ_PX_SHIFT;
2069
2070 dev_priv->fmax = fmax; /* IPS callback will increase this */
2071 dev_priv->fstart = fstart;
2072
2073 dev_priv->max_delay = fstart;
2074 dev_priv->min_delay = fmin;
2075 dev_priv->cur_delay = fstart;
2076
2077 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2078 fmax, fmin, fstart);
2079
2080 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2081
2082 /*
2083 * Interrupts will be enabled in ironlake_irq_postinstall
2084 */
2085
2086 I915_WRITE(VIDSTART, vstart);
2087 POSTING_READ(VIDSTART);
2088
2089 rgvmodectl |= MEMMODE_SWMODE_EN;
2090 I915_WRITE(MEMMODECTL, rgvmodectl);
2091
2092 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2093 DRM_ERROR("stuck trying to change perf mode\n");
2094 msleep(1);
2095
2096 ironlake_set_drps(dev, fstart);
2097
2098 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2099 I915_READ(0x112e0);
2100 dev_priv->last_time1 = jiffies_to_msecs(jiffies);
2101 dev_priv->last_count2 = I915_READ(0x112f4);
2102 getrawmonotonic(&dev_priv->last_time2);
2103}
2104
2105void ironlake_disable_drps(struct drm_device *dev)
2106{
2107 struct drm_i915_private *dev_priv = dev->dev_private;
2108 u16 rgvswctl = I915_READ16(MEMSWCTL);
2109
2110 /* Ack interrupts, disable EFC interrupt */
2111 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2112 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2113 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2114 I915_WRITE(DEIIR, DE_PCU_EVENT);
2115 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2116
2117 /* Go back to the starting frequency */
2118 ironlake_set_drps(dev, dev_priv->fstart);
2119 msleep(1);
2120 rgvswctl |= MEMCTL_CMD_STS;
2121 I915_WRITE(MEMSWCTL, rgvswctl);
2122 msleep(1);
2123
2124}
2125
2126void gen6_set_rps(struct drm_device *dev, u8 val)
2127{
2128 struct drm_i915_private *dev_priv = dev->dev_private;
2129 u32 swreq;
2130
2131 swreq = (val & 0x3ff) << 25;
2132 I915_WRITE(GEN6_RPNSWREQ, swreq);
2133}
2134
2135void gen6_disable_rps(struct drm_device *dev)
2136{
2137 struct drm_i915_private *dev_priv = dev->dev_private;
2138
2139 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
2140 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
2141 I915_WRITE(GEN6_PMIER, 0);
2142 /* Complete PM interrupt masking here doesn't race with the rps work
2143 * item again unmasking PM interrupts because that is using a different
2144 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
2145 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
2146
2147 spin_lock_irq(&dev_priv->rps_lock);
2148 dev_priv->pm_iir = 0;
2149 spin_unlock_irq(&dev_priv->rps_lock);
2150
2151 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2152}
2153
2154int intel_enable_rc6(const struct drm_device *dev)
2155{
2156 /*
2157 * Respect the kernel parameter if it is set
2158 */
2159 if (i915_enable_rc6 >= 0)
2160 return i915_enable_rc6;
2161
2162 /*
2163 * Disable RC6 on Ironlake
2164 */
2165 if (INTEL_INFO(dev)->gen == 5)
2166 return 0;
2167
2168 /* Sorry Haswell, no RC6 for you for now. */
2169 if (IS_HASWELL(dev))
2170 return 0;
2171
2172 /*
2173 * Disable rc6 on Sandybridge
2174 */
2175 if (INTEL_INFO(dev)->gen == 6) {
2176 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
2177 return INTEL_RC6_ENABLE;
2178 }
2179 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
2180 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
2181}
2182
2183void gen6_enable_rps(struct drm_i915_private *dev_priv)
2184{
2185 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
2186 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2187 u32 pcu_mbox, rc6_mask = 0;
2188 u32 gtfifodbg;
2189 int cur_freq, min_freq, max_freq;
2190 int rc6_mode;
2191 int i;
2192
2193 /* Here begins a magic sequence of register writes to enable
2194 * auto-downclocking.
2195 *
2196 * Perhaps there might be some value in exposing these to
2197 * userspace...
2198 */
2199 I915_WRITE(GEN6_RC_STATE, 0);
2200 mutex_lock(&dev_priv->dev->struct_mutex);
2201
2202 /* Clear the DBG now so we don't confuse earlier errors */
2203 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
2204 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
2205 I915_WRITE(GTFIFODBG, gtfifodbg);
2206 }
2207
2208 gen6_gt_force_wake_get(dev_priv);
2209
2210 /* disable the counters and set deterministic thresholds */
2211 I915_WRITE(GEN6_RC_CONTROL, 0);
2212
2213 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
2214 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
2215 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
2216 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
2217 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
2218
2219 for (i = 0; i < I915_NUM_RINGS; i++)
2220 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
2221
2222 I915_WRITE(GEN6_RC_SLEEP, 0);
2223 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
2224 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
2225 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
2226 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
2227
2228 rc6_mode = intel_enable_rc6(dev_priv->dev);
2229 if (rc6_mode & INTEL_RC6_ENABLE)
2230 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
2231
2232 if (rc6_mode & INTEL_RC6p_ENABLE)
2233 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
2234
2235 if (rc6_mode & INTEL_RC6pp_ENABLE)
2236 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
2237
2238 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
2239 (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
2240 (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
2241 (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
2242
2243 I915_WRITE(GEN6_RC_CONTROL,
2244 rc6_mask |
2245 GEN6_RC_CTL_EI_MODE(1) |
2246 GEN6_RC_CTL_HW_ENABLE);
2247
2248 I915_WRITE(GEN6_RPNSWREQ,
2249 GEN6_FREQUENCY(10) |
2250 GEN6_OFFSET(0) |
2251 GEN6_AGGRESSIVE_TURBO);
2252 I915_WRITE(GEN6_RC_VIDEO_FREQ,
2253 GEN6_FREQUENCY(12));
2254
2255 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2256 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
2257 18 << 24 |
2258 6 << 16);
2259 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
2260 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
2261 I915_WRITE(GEN6_RP_UP_EI, 100000);
2262 I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
2263 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2264 I915_WRITE(GEN6_RP_CONTROL,
2265 GEN6_RP_MEDIA_TURBO |
2266 GEN6_RP_MEDIA_HW_MODE |
2267 GEN6_RP_MEDIA_IS_GFX |
2268 GEN6_RP_ENABLE |
2269 GEN6_RP_UP_BUSY_AVG |
2270 GEN6_RP_DOWN_IDLE_CONT);
2271
2272 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2273 500))
2274 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2275
2276 I915_WRITE(GEN6_PCODE_DATA, 0);
2277 I915_WRITE(GEN6_PCODE_MAILBOX,
2278 GEN6_PCODE_READY |
2279 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2280 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2281 500))
2282 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2283
2284 min_freq = (rp_state_cap & 0xff0000) >> 16;
2285 max_freq = rp_state_cap & 0xff;
2286 cur_freq = (gt_perf_status & 0xff00) >> 8;
2287
2288 /* Check for overclock support */
2289 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2290 500))
2291 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2292 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
2293 pcu_mbox = I915_READ(GEN6_PCODE_DATA);
2294 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2295 500))
2296 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2297 if (pcu_mbox & (1<<31)) { /* OC supported */
2298 max_freq = pcu_mbox & 0xff;
2299 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2300 }
2301
2302 /* In units of 100MHz */
2303 dev_priv->max_delay = max_freq;
2304 dev_priv->min_delay = min_freq;
2305 dev_priv->cur_delay = cur_freq;
2306
2307 /* requires MSI enabled */
2308 I915_WRITE(GEN6_PMIER,
2309 GEN6_PM_MBOX_EVENT |
2310 GEN6_PM_THERMAL_EVENT |
2311 GEN6_PM_RP_DOWN_TIMEOUT |
2312 GEN6_PM_RP_UP_THRESHOLD |
2313 GEN6_PM_RP_DOWN_THRESHOLD |
2314 GEN6_PM_RP_UP_EI_EXPIRED |
2315 GEN6_PM_RP_DOWN_EI_EXPIRED);
2316 spin_lock_irq(&dev_priv->rps_lock);
2317 WARN_ON(dev_priv->pm_iir != 0);
2318 I915_WRITE(GEN6_PMIMR, 0);
2319 spin_unlock_irq(&dev_priv->rps_lock);
2320 /* enable all PM interrupts */
2321 I915_WRITE(GEN6_PMINTRMSK, 0);
2322
2323 gen6_gt_force_wake_put(dev_priv);
2324 mutex_unlock(&dev_priv->dev->struct_mutex);
2325}
2326
2327void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
2328{
2329 int min_freq = 15;
2330 int gpu_freq, ia_freq, max_ia_freq;
2331 int scaling_factor = 180;
2332
2333 max_ia_freq = cpufreq_quick_get_max(0);
2334 /*
2335 * Default to measured freq if none found, PCU will ensure we don't go
2336 * over
2337 */
2338 if (!max_ia_freq)
2339 max_ia_freq = tsc_khz;
2340
2341 /* Convert from kHz to MHz */
2342 max_ia_freq /= 1000;
2343
2344 mutex_lock(&dev_priv->dev->struct_mutex);
2345
2346 /*
2347 * For each potential GPU frequency, load a ring frequency we'd like
2348 * to use for memory access. We do this by specifying the IA frequency
2349 * the PCU should use as a reference to determine the ring frequency.
2350 */
2351 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
2352 gpu_freq--) {
2353 int diff = dev_priv->max_delay - gpu_freq;
2354
2355 /*
2356 * For GPU frequencies less than 750MHz, just use the lowest
2357 * ring freq.
2358 */
2359 if (gpu_freq < min_freq)
2360 ia_freq = 800;
2361 else
2362 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
2363 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
2364
2365 I915_WRITE(GEN6_PCODE_DATA,
2366 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
2367 gpu_freq);
2368 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
2369 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2370 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
2371 GEN6_PCODE_READY) == 0, 10)) {
2372 DRM_ERROR("pcode write of freq table timed out\n");
2373 continue;
2374 }
2375 }
2376
2377 mutex_unlock(&dev_priv->dev->struct_mutex);
2378}
2379
2380static void ironlake_teardown_rc6(struct drm_device *dev)
2381{
2382 struct drm_i915_private *dev_priv = dev->dev_private;
2383
2384 if (dev_priv->renderctx) {
2385 i915_gem_object_unpin(dev_priv->renderctx);
2386 drm_gem_object_unreference(&dev_priv->renderctx->base);
2387 dev_priv->renderctx = NULL;
2388 }
2389
2390 if (dev_priv->pwrctx) {
2391 i915_gem_object_unpin(dev_priv->pwrctx);
2392 drm_gem_object_unreference(&dev_priv->pwrctx->base);
2393 dev_priv->pwrctx = NULL;
2394 }
2395}
2396
2397void ironlake_disable_rc6(struct drm_device *dev)
2398{
2399 struct drm_i915_private *dev_priv = dev->dev_private;
2400
2401 if (I915_READ(PWRCTXA)) {
2402 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
2403 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
2404 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
2405 50);
2406
2407 I915_WRITE(PWRCTXA, 0);
2408 POSTING_READ(PWRCTXA);
2409
2410 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2411 POSTING_READ(RSTDBYCTL);
2412 }
2413
2414 ironlake_teardown_rc6(dev);
2415}
2416
2417static int ironlake_setup_rc6(struct drm_device *dev)
2418{
2419 struct drm_i915_private *dev_priv = dev->dev_private;
2420
2421 if (dev_priv->renderctx == NULL)
2422 dev_priv->renderctx = intel_alloc_context_page(dev);
2423 if (!dev_priv->renderctx)
2424 return -ENOMEM;
2425
2426 if (dev_priv->pwrctx == NULL)
2427 dev_priv->pwrctx = intel_alloc_context_page(dev);
2428 if (!dev_priv->pwrctx) {
2429 ironlake_teardown_rc6(dev);
2430 return -ENOMEM;
2431 }
2432
2433 return 0;
2434}
2435
2436void ironlake_enable_rc6(struct drm_device *dev)
2437{
2438 struct drm_i915_private *dev_priv = dev->dev_private;
2439 int ret;
2440
2441 /* rc6 disabled by default due to repeated reports of hanging during
2442 * boot and resume.
2443 */
2444 if (!intel_enable_rc6(dev))
2445 return;
2446
2447 mutex_lock(&dev->struct_mutex);
2448 ret = ironlake_setup_rc6(dev);
2449 if (ret) {
2450 mutex_unlock(&dev->struct_mutex);
2451 return;
2452 }
2453
2454 /*
2455 * GPU can automatically power down the render unit if given a page
2456 * to save state.
2457 */
2458 ret = BEGIN_LP_RING(6);
2459 if (ret) {
2460 ironlake_teardown_rc6(dev);
2461 mutex_unlock(&dev->struct_mutex);
2462 return;
2463 }
2464
2465 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
2466 OUT_RING(MI_SET_CONTEXT);
2467 OUT_RING(dev_priv->renderctx->gtt_offset |
2468 MI_MM_SPACE_GTT |
2469 MI_SAVE_EXT_STATE_EN |
2470 MI_RESTORE_EXT_STATE_EN |
2471 MI_RESTORE_INHIBIT);
2472 OUT_RING(MI_SUSPEND_FLUSH);
2473 OUT_RING(MI_NOOP);
2474 OUT_RING(MI_FLUSH);
2475 ADVANCE_LP_RING();
2476
2477 /*
2478 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
2479 * does an implicit flush, combined with MI_FLUSH above, it should be
2480 * safe to assume that renderctx is valid
2481 */
2482 ret = intel_wait_ring_idle(LP_RING(dev_priv));
2483 if (ret) {
2484 DRM_ERROR("failed to enable ironlake power power savings\n");
2485 ironlake_teardown_rc6(dev);
2486 mutex_unlock(&dev->struct_mutex);
2487 return;
2488 }
2489
2490 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
2491 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2492 mutex_unlock(&dev->struct_mutex);
2493}
2494
Eugeni Dodonovdde18882012-04-18 15:29:24 -03002495static unsigned long intel_pxfreq(u32 vidfreq)
2496{
2497 unsigned long freq;
2498 int div = (vidfreq & 0x3f0000) >> 16;
2499 int post = (vidfreq & 0x3000) >> 12;
2500 int pre = (vidfreq & 0x7);
2501
2502 if (!pre)
2503 return 0;
2504
2505 freq = ((div * 133333) / ((1<<post) * pre));
2506
2507 return freq;
2508}
2509
2510void intel_init_emon(struct drm_device *dev)
2511{
2512 struct drm_i915_private *dev_priv = dev->dev_private;
2513 u32 lcfuse;
2514 u8 pxw[16];
2515 int i;
2516
2517 /* Disable to program */
2518 I915_WRITE(ECR, 0);
2519 POSTING_READ(ECR);
2520
2521 /* Program energy weights for various events */
2522 I915_WRITE(SDEW, 0x15040d00);
2523 I915_WRITE(CSIEW0, 0x007f0000);
2524 I915_WRITE(CSIEW1, 0x1e220004);
2525 I915_WRITE(CSIEW2, 0x04000004);
2526
2527 for (i = 0; i < 5; i++)
2528 I915_WRITE(PEW + (i * 4), 0);
2529 for (i = 0; i < 3; i++)
2530 I915_WRITE(DEW + (i * 4), 0);
2531
2532 /* Program P-state weights to account for frequency power adjustment */
2533 for (i = 0; i < 16; i++) {
2534 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
2535 unsigned long freq = intel_pxfreq(pxvidfreq);
2536 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
2537 PXVFREQ_PX_SHIFT;
2538 unsigned long val;
2539
2540 val = vid * vid;
2541 val *= (freq / 1000);
2542 val *= 255;
2543 val /= (127*127*900);
2544 if (val > 0xff)
2545 DRM_ERROR("bad pxval: %ld\n", val);
2546 pxw[i] = val;
2547 }
2548 /* Render standby states get 0 weight */
2549 pxw[14] = 0;
2550 pxw[15] = 0;
2551
2552 for (i = 0; i < 4; i++) {
2553 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
2554 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
2555 I915_WRITE(PXW + (i * 4), val);
2556 }
2557
2558 /* Adjust magic regs to magic values (more experimental results) */
2559 I915_WRITE(OGW0, 0);
2560 I915_WRITE(OGW1, 0);
2561 I915_WRITE(EG0, 0x00007f00);
2562 I915_WRITE(EG1, 0x0000000e);
2563 I915_WRITE(EG2, 0x000e0000);
2564 I915_WRITE(EG3, 0x68000300);
2565 I915_WRITE(EG4, 0x42000000);
2566 I915_WRITE(EG5, 0x00140031);
2567 I915_WRITE(EG6, 0);
2568 I915_WRITE(EG7, 0);
2569
2570 for (i = 0; i < 8; i++)
2571 I915_WRITE(PXWL + (i * 4), 0);
2572
2573 /* Enable PMON + select events */
2574 I915_WRITE(ECR, 0x80000019);
2575
2576 lcfuse = I915_READ(LCFUSE02);
2577
2578 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
2579}
2580
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03002581static void ironlake_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03002582{
2583 struct drm_i915_private *dev_priv = dev->dev_private;
2584 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
2585
2586 /* Required for FBC */
2587 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
2588 DPFCRUNIT_CLOCK_GATE_DISABLE |
2589 DPFDUNIT_CLOCK_GATE_DISABLE;
2590 /* Required for CxSR */
2591 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
2592
2593 I915_WRITE(PCH_3DCGDIS0,
2594 MARIUNIT_CLOCK_GATE_DISABLE |
2595 SVSMUNIT_CLOCK_GATE_DISABLE);
2596 I915_WRITE(PCH_3DCGDIS1,
2597 VFMUNIT_CLOCK_GATE_DISABLE);
2598
2599 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
2600
2601 /*
2602 * According to the spec the following bits should be set in
2603 * order to enable memory self-refresh
2604 * The bit 22/21 of 0x42004
2605 * The bit 5 of 0x42020
2606 * The bit 15 of 0x45000
2607 */
2608 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2609 (I915_READ(ILK_DISPLAY_CHICKEN2) |
2610 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
2611 I915_WRITE(ILK_DSPCLK_GATE,
2612 (I915_READ(ILK_DSPCLK_GATE) |
2613 ILK_DPARB_CLK_GATE));
2614 I915_WRITE(DISP_ARB_CTL,
2615 (I915_READ(DISP_ARB_CTL) |
2616 DISP_FBC_WM_DIS));
2617 I915_WRITE(WM3_LP_ILK, 0);
2618 I915_WRITE(WM2_LP_ILK, 0);
2619 I915_WRITE(WM1_LP_ILK, 0);
2620
2621 /*
2622 * Based on the document from hardware guys the following bits
2623 * should be set unconditionally in order to enable FBC.
2624 * The bit 22 of 0x42000
2625 * The bit 22 of 0x42004
2626 * The bit 7,8,9 of 0x42020.
2627 */
2628 if (IS_IRONLAKE_M(dev)) {
2629 I915_WRITE(ILK_DISPLAY_CHICKEN1,
2630 I915_READ(ILK_DISPLAY_CHICKEN1) |
2631 ILK_FBCQ_DIS);
2632 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2633 I915_READ(ILK_DISPLAY_CHICKEN2) |
2634 ILK_DPARB_GATE);
2635 I915_WRITE(ILK_DSPCLK_GATE,
2636 I915_READ(ILK_DSPCLK_GATE) |
2637 ILK_DPFC_DIS1 |
2638 ILK_DPFC_DIS2 |
2639 ILK_CLK_FBC);
2640 }
2641
2642 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2643 I915_READ(ILK_DISPLAY_CHICKEN2) |
2644 ILK_ELPIN_409_SELECT);
2645 I915_WRITE(_3D_CHICKEN2,
2646 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
2647 _3D_CHICKEN2_WM_READ_PIPELINED);
2648}
2649
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03002650static void gen6_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03002651{
2652 struct drm_i915_private *dev_priv = dev->dev_private;
2653 int pipe;
2654 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
2655
2656 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
2657
2658 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2659 I915_READ(ILK_DISPLAY_CHICKEN2) |
2660 ILK_ELPIN_409_SELECT);
2661
2662 I915_WRITE(WM3_LP_ILK, 0);
2663 I915_WRITE(WM2_LP_ILK, 0);
2664 I915_WRITE(WM1_LP_ILK, 0);
2665
2666 /* clear masked bit */
2667 I915_WRITE(CACHE_MODE_0,
2668 CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
2669
2670 I915_WRITE(GEN6_UCGCTL1,
2671 I915_READ(GEN6_UCGCTL1) |
2672 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
2673 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
2674
2675 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
2676 * gating disable must be set. Failure to set it results in
2677 * flickering pixels due to Z write ordering failures after
2678 * some amount of runtime in the Mesa "fire" demo, and Unigine
2679 * Sanctuary and Tropics, and apparently anything else with
2680 * alpha test or pixel discard.
2681 *
2682 * According to the spec, bit 11 (RCCUNIT) must also be set,
2683 * but we didn't debug actual testcases to find it out.
2684 */
2685 I915_WRITE(GEN6_UCGCTL2,
2686 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
2687 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
2688
2689 /* Bspec says we need to always set all mask bits. */
2690 I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
2691 _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
2692
2693 /*
2694 * According to the spec the following bits should be
2695 * set in order to enable memory self-refresh and fbc:
2696 * The bit21 and bit22 of 0x42000
2697 * The bit21 and bit22 of 0x42004
2698 * The bit5 and bit7 of 0x42020
2699 * The bit14 of 0x70180
2700 * The bit14 of 0x71180
2701 */
2702 I915_WRITE(ILK_DISPLAY_CHICKEN1,
2703 I915_READ(ILK_DISPLAY_CHICKEN1) |
2704 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
2705 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2706 I915_READ(ILK_DISPLAY_CHICKEN2) |
2707 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
2708 I915_WRITE(ILK_DSPCLK_GATE,
2709 I915_READ(ILK_DSPCLK_GATE) |
2710 ILK_DPARB_CLK_GATE |
2711 ILK_DPFD_CLK_GATE);
2712
2713 for_each_pipe(pipe) {
2714 I915_WRITE(DSPCNTR(pipe),
2715 I915_READ(DSPCNTR(pipe)) |
2716 DISPPLANE_TRICKLE_FEED_DISABLE);
2717 intel_flush_display_plane(dev_priv, pipe);
2718 }
2719}
2720
2721static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
2722{
2723 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
2724
2725 reg &= ~GEN7_FF_SCHED_MASK;
2726 reg |= GEN7_FF_TS_SCHED_HW;
2727 reg |= GEN7_FF_VS_SCHED_HW;
2728 reg |= GEN7_FF_DS_SCHED_HW;
2729
2730 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
2731}
2732
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03002733static void ivybridge_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03002734{
2735 struct drm_i915_private *dev_priv = dev->dev_private;
2736 int pipe;
2737 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
2738
2739 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
2740
2741 I915_WRITE(WM3_LP_ILK, 0);
2742 I915_WRITE(WM2_LP_ILK, 0);
2743 I915_WRITE(WM1_LP_ILK, 0);
2744
2745 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
2746 * This implements the WaDisableRCZUnitClockGating workaround.
2747 */
2748 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
2749
2750 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
2751
2752 I915_WRITE(IVB_CHICKEN3,
2753 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
2754 CHICKEN3_DGMG_DONE_FIX_DISABLE);
2755
2756 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
2757 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
2758 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
2759
2760 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
2761 I915_WRITE(GEN7_L3CNTLREG1,
2762 GEN7_WA_FOR_GEN7_L3_CONTROL);
2763 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
2764 GEN7_WA_L3_CHICKEN_MODE);
2765
2766 /* This is required by WaCatErrorRejectionIssue */
2767 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
2768 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
2769 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
2770
2771 for_each_pipe(pipe) {
2772 I915_WRITE(DSPCNTR(pipe),
2773 I915_READ(DSPCNTR(pipe)) |
2774 DISPPLANE_TRICKLE_FEED_DISABLE);
2775 intel_flush_display_plane(dev_priv, pipe);
2776 }
2777
2778 gen7_setup_fixed_func_scheduler(dev_priv);
Daniel Vetter97e19302012-04-24 16:00:21 +02002779
2780 /* WaDisable4x2SubspanOptimization */
2781 I915_WRITE(CACHE_MODE_1,
2782 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03002783}
2784
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03002785static void valleyview_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03002786{
2787 struct drm_i915_private *dev_priv = dev->dev_private;
2788 int pipe;
2789 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
2790
2791 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
2792
2793 I915_WRITE(WM3_LP_ILK, 0);
2794 I915_WRITE(WM2_LP_ILK, 0);
2795 I915_WRITE(WM1_LP_ILK, 0);
2796
2797 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
2798 * This implements the WaDisableRCZUnitClockGating workaround.
2799 */
2800 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
2801
2802 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
2803
2804 I915_WRITE(IVB_CHICKEN3,
2805 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
2806 CHICKEN3_DGMG_DONE_FIX_DISABLE);
2807
2808 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
2809 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
2810 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
2811
2812 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
2813 I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
2814 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
2815
2816 /* This is required by WaCatErrorRejectionIssue */
2817 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
2818 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
2819 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
2820
2821 for_each_pipe(pipe) {
2822 I915_WRITE(DSPCNTR(pipe),
2823 I915_READ(DSPCNTR(pipe)) |
2824 DISPPLANE_TRICKLE_FEED_DISABLE);
2825 intel_flush_display_plane(dev_priv, pipe);
2826 }
2827
Daniel Vetter6b26c862012-04-24 14:04:12 +02002828 I915_WRITE(CACHE_MODE_1,
2829 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03002830}
2831
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03002832static void g4x_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03002833{
2834 struct drm_i915_private *dev_priv = dev->dev_private;
2835 uint32_t dspclk_gate;
2836
2837 I915_WRITE(RENCLK_GATE_D1, 0);
2838 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
2839 GS_UNIT_CLOCK_GATE_DISABLE |
2840 CL_UNIT_CLOCK_GATE_DISABLE);
2841 I915_WRITE(RAMCLK_GATE_D, 0);
2842 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
2843 OVRUNIT_CLOCK_GATE_DISABLE |
2844 OVCUNIT_CLOCK_GATE_DISABLE;
2845 if (IS_GM45(dev))
2846 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
2847 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
2848}
2849
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03002850static void crestline_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03002851{
2852 struct drm_i915_private *dev_priv = dev->dev_private;
2853
2854 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
2855 I915_WRITE(RENCLK_GATE_D2, 0);
2856 I915_WRITE(DSPCLK_GATE_D, 0);
2857 I915_WRITE(RAMCLK_GATE_D, 0);
2858 I915_WRITE16(DEUC, 0);
2859}
2860
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03002861static void broadwater_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03002862{
2863 struct drm_i915_private *dev_priv = dev->dev_private;
2864
2865 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
2866 I965_RCC_CLOCK_GATE_DISABLE |
2867 I965_RCPB_CLOCK_GATE_DISABLE |
2868 I965_ISC_CLOCK_GATE_DISABLE |
2869 I965_FBC_CLOCK_GATE_DISABLE);
2870 I915_WRITE(RENCLK_GATE_D2, 0);
2871}
2872
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03002873static void gen3_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03002874{
2875 struct drm_i915_private *dev_priv = dev->dev_private;
2876 u32 dstate = I915_READ(D_STATE);
2877
2878 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
2879 DSTATE_DOT_CLOCK_GATING;
2880 I915_WRITE(D_STATE, dstate);
Chris Wilson13a86b82012-04-24 14:51:43 +01002881
2882 if (IS_PINEVIEW(dev))
2883 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03002884}
2885
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03002886static void i85x_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03002887{
2888 struct drm_i915_private *dev_priv = dev->dev_private;
2889
2890 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
2891}
2892
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03002893static void i830_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03002894{
2895 struct drm_i915_private *dev_priv = dev->dev_private;
2896
2897 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
2898}
2899
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03002900static void ibx_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03002901{
2902 struct drm_i915_private *dev_priv = dev->dev_private;
2903
2904 /*
2905 * On Ibex Peak and Cougar Point, we need to disable clock
2906 * gating for the panel power sequencer or it will fail to
2907 * start up when no ports are active.
2908 */
2909 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
2910}
2911
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03002912static void cpt_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03002913{
2914 struct drm_i915_private *dev_priv = dev->dev_private;
2915 int pipe;
2916
2917 /*
2918 * On Ibex Peak and Cougar Point, we need to disable clock
2919 * gating for the panel power sequencer or it will fail to
2920 * start up when no ports are active.
2921 */
2922 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
2923 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
2924 DPLS_EDP_PPS_FIX_DIS);
2925 /* Without this, mode sets may fail silently on FDI */
2926 for_each_pipe(pipe)
2927 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
2928}
2929
2930void intel_init_clock_gating(struct drm_device *dev)
2931{
2932 struct drm_i915_private *dev_priv = dev->dev_private;
2933
2934 dev_priv->display.init_clock_gating(dev);
2935
2936 if (dev_priv->display.init_pch_clock_gating)
2937 dev_priv->display.init_pch_clock_gating(dev);
2938}
2939
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03002940/* Set up chip specific power management-related functions */
2941void intel_init_pm(struct drm_device *dev)
2942{
2943 struct drm_i915_private *dev_priv = dev->dev_private;
2944
2945 if (I915_HAS_FBC(dev)) {
2946 if (HAS_PCH_SPLIT(dev)) {
2947 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
2948 dev_priv->display.enable_fbc = ironlake_enable_fbc;
2949 dev_priv->display.disable_fbc = ironlake_disable_fbc;
2950 } else if (IS_GM45(dev)) {
2951 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
2952 dev_priv->display.enable_fbc = g4x_enable_fbc;
2953 dev_priv->display.disable_fbc = g4x_disable_fbc;
2954 } else if (IS_CRESTLINE(dev)) {
2955 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
2956 dev_priv->display.enable_fbc = i8xx_enable_fbc;
2957 dev_priv->display.disable_fbc = i8xx_disable_fbc;
2958 }
2959 /* 855GM needs testing */
2960 }
2961
2962 /* For FIFO watermark updates */
2963 if (HAS_PCH_SPLIT(dev)) {
2964 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
2965 dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
2966
2967 /* IVB configs may use multi-threaded forcewake */
2968 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
2969 u32 ecobus;
2970
2971 /* A small trick here - if the bios hasn't configured MT forcewake,
2972 * and if the device is in RC6, then force_wake_mt_get will not wake
2973 * the device and the ECOBUS read will return zero. Which will be
2974 * (correctly) interpreted by the test below as MT forcewake being
2975 * disabled.
2976 */
2977 mutex_lock(&dev->struct_mutex);
2978 __gen6_gt_force_wake_mt_get(dev_priv);
2979 ecobus = I915_READ_NOTRACE(ECOBUS);
2980 __gen6_gt_force_wake_mt_put(dev_priv);
2981 mutex_unlock(&dev->struct_mutex);
2982
2983 if (ecobus & FORCEWAKE_MT_ENABLE) {
2984 DRM_DEBUG_KMS("Using MT version of forcewake\n");
2985 dev_priv->display.force_wake_get =
2986 __gen6_gt_force_wake_mt_get;
2987 dev_priv->display.force_wake_put =
2988 __gen6_gt_force_wake_mt_put;
2989 }
2990 }
2991
2992 if (HAS_PCH_IBX(dev))
2993 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
2994 else if (HAS_PCH_CPT(dev))
2995 dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
2996
2997 if (IS_GEN5(dev)) {
2998 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
2999 dev_priv->display.update_wm = ironlake_update_wm;
3000 else {
3001 DRM_DEBUG_KMS("Failed to get proper latency. "
3002 "Disable CxSR\n");
3003 dev_priv->display.update_wm = NULL;
3004 }
3005 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
3006 } else if (IS_GEN6(dev)) {
3007 if (SNB_READ_WM0_LATENCY()) {
3008 dev_priv->display.update_wm = sandybridge_update_wm;
3009 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3010 } else {
3011 DRM_DEBUG_KMS("Failed to read display plane latency. "
3012 "Disable CxSR\n");
3013 dev_priv->display.update_wm = NULL;
3014 }
3015 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
3016 } else if (IS_IVYBRIDGE(dev)) {
3017 /* FIXME: detect B0+ stepping and use auto training */
3018 if (SNB_READ_WM0_LATENCY()) {
3019 dev_priv->display.update_wm = sandybridge_update_wm;
3020 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3021 } else {
3022 DRM_DEBUG_KMS("Failed to read display plane latency. "
3023 "Disable CxSR\n");
3024 dev_priv->display.update_wm = NULL;
3025 }
3026 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
3027 } else
3028 dev_priv->display.update_wm = NULL;
3029 } else if (IS_VALLEYVIEW(dev)) {
3030 dev_priv->display.update_wm = valleyview_update_wm;
3031 dev_priv->display.init_clock_gating =
3032 valleyview_init_clock_gating;
3033 dev_priv->display.force_wake_get = vlv_force_wake_get;
3034 dev_priv->display.force_wake_put = vlv_force_wake_put;
3035 } else if (IS_PINEVIEW(dev)) {
3036 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
3037 dev_priv->is_ddr3,
3038 dev_priv->fsb_freq,
3039 dev_priv->mem_freq)) {
3040 DRM_INFO("failed to find known CxSR latency "
3041 "(found ddr%s fsb freq %d, mem freq %d), "
3042 "disabling CxSR\n",
3043 (dev_priv->is_ddr3 == 1) ? "3" : "2",
3044 dev_priv->fsb_freq, dev_priv->mem_freq);
3045 /* Disable CxSR and never update its watermark again */
3046 pineview_disable_cxsr(dev);
3047 dev_priv->display.update_wm = NULL;
3048 } else
3049 dev_priv->display.update_wm = pineview_update_wm;
3050 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
3051 } else if (IS_G4X(dev)) {
3052 dev_priv->display.update_wm = g4x_update_wm;
3053 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
3054 } else if (IS_GEN4(dev)) {
3055 dev_priv->display.update_wm = i965_update_wm;
3056 if (IS_CRESTLINE(dev))
3057 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
3058 else if (IS_BROADWATER(dev))
3059 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
3060 } else if (IS_GEN3(dev)) {
3061 dev_priv->display.update_wm = i9xx_update_wm;
3062 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
3063 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
3064 } else if (IS_I865G(dev)) {
3065 dev_priv->display.update_wm = i830_update_wm;
3066 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
3067 dev_priv->display.get_fifo_size = i830_get_fifo_size;
3068 } else if (IS_I85X(dev)) {
3069 dev_priv->display.update_wm = i9xx_update_wm;
3070 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
3071 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
3072 } else {
3073 dev_priv->display.update_wm = i830_update_wm;
3074 dev_priv->display.init_clock_gating = i830_init_clock_gating;
3075 if (IS_845G(dev))
3076 dev_priv->display.get_fifo_size = i845_get_fifo_size;
3077 else
3078 dev_priv->display.get_fifo_size = i830_get_fifo_size;
3079 }
3080}
3081