1 /*
2 * Copyright © 2012-2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
28 /*
29 * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
30 */
31
32 #include "drmP.h"
33 #include "i915_drv.h"
34 #include "intel_drv.h"
35 #include <sys/archsystm.h>
36
37 #define FORCEWAKE_ACK_TIMEOUT_MS 2
38
39 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
40 * framebuffer contents in-memory, aiming at reducing the required bandwidth
41 * during in-memory transfers and, therefore, reduce the power packet.
42 *
43 * The benefits of FBC are mostly visible with solid backgrounds and
44 * variation-less patterns.
45 *
46 * FBC-related functionality can be enabled by the means of the
47 * i915.i915_enable_fbc parameter
48 */
49
intel_crtc_active(struct drm_crtc * crtc)50 static bool intel_crtc_active(struct drm_crtc *crtc)
51 {
52 /* Be paranoid as we can arrive here with only partial
53 * state retrieved from the hardware during setup.
54 */
55 return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
56 }
57
i8xx_disable_fbc(struct drm_device * dev)58 static void i8xx_disable_fbc(struct drm_device *dev)
59 {
60 struct drm_i915_private *dev_priv = dev->dev_private;
61 u32 fbc_ctl;
62
63 /* Disable compression */
64 fbc_ctl = I915_READ(FBC_CONTROL);
65 if ((fbc_ctl & FBC_CTL_EN) == 0)
66 return;
67
68 fbc_ctl &= ~FBC_CTL_EN;
69 I915_WRITE(FBC_CONTROL, fbc_ctl);
70
71 /* Wait for compressing bit to clear */
72 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
73 DRM_DEBUG_KMS("FBC idle timed out\n");
74 return;
75 }
76
77 DRM_DEBUG_KMS("disabled FBC\n");
78 }
79
i8xx_enable_fbc(struct drm_crtc * crtc,unsigned long interval)80 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
81 {
82 struct drm_device *dev = crtc->dev;
83 struct drm_i915_private *dev_priv = dev->dev_private;
84 struct drm_framebuffer *fb = crtc->fb;
85 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
86 struct drm_i915_gem_object *obj = intel_fb->obj;
87 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
88 int cfb_pitch;
89 int plane, i;
90 u32 fbc_ctl, fbc_ctl2;
91
92 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
93 if (fb->pitches[0] < cfb_pitch)
94 cfb_pitch = fb->pitches[0];
95
96 /* FBC_CTL wants 64B units */
97 cfb_pitch = (cfb_pitch / 64) - 1;
98 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
99
100 /* Clear old tags */
101 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
102 I915_WRITE(FBC_TAG + (i * 4), 0);
103
104 /* Set it up... */
105 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
106 fbc_ctl2 |= plane;
107 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
108 I915_WRITE(FBC_FENCE_OFF, crtc->y);
109
110 /* enable it... */
111 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
112 if (IS_I945GM(dev))
113 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
114 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
115 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
116 fbc_ctl |= obj->fence_reg;
117 I915_WRITE(FBC_CONTROL, fbc_ctl);
118
119 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ",
120 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
121 }
122
i8xx_fbc_enabled(struct drm_device * dev)123 static bool i8xx_fbc_enabled(struct drm_device *dev)
124 {
125 struct drm_i915_private *dev_priv = dev->dev_private;
126
127 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
128 }
129
g4x_enable_fbc(struct drm_crtc * crtc,unsigned long interval)130 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
131 {
132 struct drm_device *dev = crtc->dev;
133 struct drm_i915_private *dev_priv = dev->dev_private;
134 struct drm_framebuffer *fb = crtc->fb;
135 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
136 struct drm_i915_gem_object *obj = intel_fb->obj;
137 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
138 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
139 unsigned long stall_watermark = 200;
140 u32 dpfc_ctl;
141
142 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
143 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
144 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
145
146 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
147 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
148 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
149 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
150
151 /* enable it... */
152 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
153
154 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
155 }
156
g4x_disable_fbc(struct drm_device * dev)157 static void g4x_disable_fbc(struct drm_device *dev)
158 {
159 struct drm_i915_private *dev_priv = dev->dev_private;
160 u32 dpfc_ctl;
161
162 /* Disable compression */
163 dpfc_ctl = I915_READ(DPFC_CONTROL);
164 if (dpfc_ctl & DPFC_CTL_EN) {
165 dpfc_ctl &= ~DPFC_CTL_EN;
166 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
167
168 DRM_DEBUG_KMS("disabled FBC\n");
169 }
170 }
171
g4x_fbc_enabled(struct drm_device * dev)172 static bool g4x_fbc_enabled(struct drm_device *dev)
173 {
174 struct drm_i915_private *dev_priv = dev->dev_private;
175
176 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
177 }
178
sandybridge_blit_fbc_update(struct drm_device * dev)179 static void sandybridge_blit_fbc_update(struct drm_device *dev)
180 {
181 struct drm_i915_private *dev_priv = dev->dev_private;
182 u32 blt_ecoskpd;
183
184 /* Make sure blitter notifies FBC of writes */
185 gen6_gt_force_wake_get(dev_priv);
186 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
187 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
188 GEN6_BLITTER_LOCK_SHIFT;
189 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
190 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
191 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
192 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
193 GEN6_BLITTER_LOCK_SHIFT);
194 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
195 POSTING_READ(GEN6_BLITTER_ECOSKPD);
196 gen6_gt_force_wake_put(dev_priv);
197 }
198
ironlake_enable_fbc(struct drm_crtc * crtc,unsigned long interval)199 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
200 {
201 struct drm_device *dev = crtc->dev;
202 struct drm_i915_private *dev_priv = dev->dev_private;
203 struct drm_framebuffer *fb = crtc->fb;
204 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
205 struct drm_i915_gem_object *obj = intel_fb->obj;
206 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
207 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
208 unsigned long stall_watermark = 200;
209 u32 dpfc_ctl;
210
211 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
212 dpfc_ctl &= DPFC_RESERVED;
213 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
214 /* Set persistent mode for front-buffer rendering, ala X. */
215 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
216 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
217 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
218
219 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
220 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
221 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
222 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
223 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
224 /* enable it... */
225 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
226
227 if (IS_GEN6(dev)) {
228 I915_WRITE(SNB_DPFC_CTL_SA,
229 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
230 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
231 sandybridge_blit_fbc_update(dev);
232 }
233
234 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
235 }
236
ironlake_disable_fbc(struct drm_device * dev)237 static void ironlake_disable_fbc(struct drm_device *dev)
238 {
239 struct drm_i915_private *dev_priv = dev->dev_private;
240 u32 dpfc_ctl;
241
242 /* Disable compression */
243 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
244 if (dpfc_ctl & DPFC_CTL_EN) {
245 dpfc_ctl &= ~DPFC_CTL_EN;
246 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
247
248 if (IS_IVYBRIDGE(dev))
249 /* WaFbcDisableDpfcClockGating:ivb */
250 I915_WRITE(ILK_DSPCLK_GATE_D,
251 I915_READ(ILK_DSPCLK_GATE_D) &
252 ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
253
254 if (IS_HASWELL(dev))
255 /* WaFbcDisableDpfcClockGating:hsw */
256 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
257 I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
258 ~HSW_DPFC_GATING_DISABLE);
259
260 DRM_DEBUG_KMS("disabled FBC\n");
261 }
262 }
263
ironlake_fbc_enabled(struct drm_device * dev)264 static bool ironlake_fbc_enabled(struct drm_device *dev)
265 {
266 struct drm_i915_private *dev_priv = dev->dev_private;
267
268 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
269 }
270
gen7_enable_fbc(struct drm_crtc * crtc,unsigned long interval)271 static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
272 {
273 struct drm_device *dev = crtc->dev;
274 struct drm_i915_private *dev_priv = dev->dev_private;
275 struct drm_framebuffer *fb = crtc->fb;
276 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
277 struct drm_i915_gem_object *obj = intel_fb->obj;
278 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
279
280 I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset);
281
282 I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
283 IVB_DPFC_CTL_FENCE_EN |
284 intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
285
286 if (IS_IVYBRIDGE(dev)) {
287 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
288 I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
289 /* WaFbcDisableDpfcClockGating:ivb */
290 I915_WRITE(ILK_DSPCLK_GATE_D,
291 I915_READ(ILK_DSPCLK_GATE_D) |
292 ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
293 } else {
294 /* WaFbcAsynchFlipDisableFbcQueue:hsw */
295 I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
296 HSW_BYPASS_FBC_QUEUE);
297 /* WaFbcDisableDpfcClockGating:hsw */
298 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
299 I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
300 HSW_DPFC_GATING_DISABLE);
301 }
302
303 I915_WRITE(SNB_DPFC_CTL_SA,
304 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
305 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
306
307 sandybridge_blit_fbc_update(dev);
308
309 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
310 }
311
intel_fbc_enabled(struct drm_device * dev)312 bool intel_fbc_enabled(struct drm_device *dev)
313 {
314 struct drm_i915_private *dev_priv = dev->dev_private;
315
316 if (!dev_priv->display.fbc_enabled)
317 return false;
318
319 return dev_priv->display.fbc_enabled(dev);
320 }
321
intel_fbc_work_fn(struct work_struct * arg)322 static void intel_fbc_work_fn(struct work_struct *arg)
323 {
324 struct intel_fbc_work *work = container_of(arg, struct intel_fbc_work,
325 work);
326 struct drm_device *dev = work->dev;
327 struct drm_i915_private *dev_priv = dev->dev_private;
328
329 mutex_lock(&dev->struct_mutex);
330 if (work == dev_priv->fbc_work) {
331 /* Double check that we haven't switched fb without cancelling
332 * the prior work.
333 */
334 if (work->crtc->fb == work->fb) {
335 dev_priv->display.enable_fbc(work->crtc,
336 work->interval);
337
338 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
339 dev_priv->cfb_fb = work->crtc->fb->base.id;
340 dev_priv->cfb_y = work->crtc->y;
341 }
342
343 dev_priv->fbc_work = NULL;
344 }
345 mutex_unlock(&dev->struct_mutex);
346
347 kfree(work, sizeof(struct intel_fbc_work));
348 }
349
350 void
intel_fbc_work_timer(void * device)351 intel_fbc_work_timer(void *device)
352 {
353 struct intel_fbc_work *work = (struct intel_fbc_work *)device;
354 struct drm_device *dev = work->dev;
355 drm_i915_private_t *dev_priv = dev->dev_private;
356 queue_work(dev_priv->other_wq, &work->work);
357 }
358
intel_cancel_fbc_work(struct drm_i915_private * dev_priv)359 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
360 {
361 if (dev_priv->fbc_work == NULL)
362 return;
363
364 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
365
366 /* Synchronisation is provided by struct_mutex and checking of
367 * dev_priv->fbc_work, so we can perform the cancellation
368 * entirely asynchronously.
369 */
370 del_timer_sync(&dev_priv->fbc_timer);
371 cancel_delayed_work(dev_priv->other_wq);
372 /* tasklet was killed before being run, clean up */
373 kfree(dev_priv->fbc_work, sizeof(struct intel_fbc_work));
374
375 /* Mark the work as no longer wanted so that if it does
376 * wake-up (because the work was already running and waiting
377 * for our mutex), it will discover that is no longer
378 * necessary to run.
379 */
380 dev_priv->fbc_work = NULL;
381 }
382
intel_enable_fbc(struct drm_crtc * crtc,unsigned long interval)383 void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
384 {
385 struct intel_fbc_work *work;
386 struct drm_device *dev = crtc->dev;
387 struct drm_i915_private *dev_priv = dev->dev_private;
388
389 if (!dev_priv->display.enable_fbc)
390 return;
391
392 intel_cancel_fbc_work(dev_priv);
393
394 work = kzalloc(sizeof *work, GFP_KERNEL);
395 if (work == NULL) {
396 dev_priv->display.enable_fbc(crtc, interval);
397 return;
398 }
399
400 work->dev = crtc->dev;
401 work->crtc = crtc;
402 work->fb = crtc->fb;
403 work->interval = interval;
404
405 INIT_WORK(&work->work, intel_fbc_work_fn);
406 setup_timer(&dev_priv->fbc_timer, intel_fbc_work_timer,
407 (void *)work);
408
409 dev_priv->fbc_work = work;
410
411 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
412
413 /* Delay the actual enabling to let pageflipping cease and the
414 * display to settle before starting the compression. Note that
415 * this delay also serves a second purpose: it allows for a
416 * vblank to pass after disabling the FBC before we attempt
417 * to modify the control registers.
418 *
419 * A more complicated solution would involve tracking vblanks
420 * following the termination of the page-flipping sequence
421 * and indeed performing the enable as a co-routine and not
422 * waiting synchronously upon the vblank.
423 */
424 test_set_timer(&dev_priv->fbc_timer, msecs_to_jiffies(50));
425 }
426
intel_disable_fbc(struct drm_device * dev)427 void intel_disable_fbc(struct drm_device *dev)
428 {
429 struct drm_i915_private *dev_priv = dev->dev_private;
430
431 intel_cancel_fbc_work(dev_priv);
432
433 if (!dev_priv->display.disable_fbc)
434 return;
435
436 dev_priv->display.disable_fbc(dev);
437 dev_priv->cfb_plane = -1;
438 }
439
440 /**
441 * intel_update_fbc - enable/disable FBC as needed
442 * @mode: mode in use
443 *
444 * Set up the framebuffer compression hardware at mode set time. We
445 * enable it if possible:
446 * - plane A only (on pre-965)
447 * - no pixel multiply/line duplication
448 * - no alpha buffer discard
449 * - no dual wide
450 * - framebuffer <= 2048 in width, 1536 in height
451 *
452 * We can't assume that any compression will take place (worst case),
453 * so the compressed buffer has to be the same size as the uncompressed
454 * one. It also must reside (along with the line length buffer) in
455 * stolen memory.
456 *
457 * We need to enable/disable FBC on a global basis.
458 */
intel_update_fbc(struct drm_device * dev)459 void intel_update_fbc(struct drm_device *dev)
460 {
461 struct drm_i915_private *dev_priv = dev->dev_private;
462 struct drm_crtc *crtc = NULL, *tmp_crtc;
463 struct intel_crtc *intel_crtc;
464 struct drm_framebuffer *fb;
465 struct intel_framebuffer *intel_fb;
466 struct drm_i915_gem_object *obj;
467 int enable_fbc;
468 unsigned int max_hdisplay, max_vdisplay;
469
470 if (!i915_powersave)
471 return;
472
473 if (!I915_HAS_FBC(dev))
474 return;
475
476 /*
477 * If FBC is already on, we just have to verify that we can
478 * keep it that way...
479 * Need to disable if:
480 * - more than one pipe is active
481 * - changing FBC params (stride, fence, mode)
482 * - new fb is too large to fit in compressed buffer
483 * - going to an unsupported config (interlace, pixel multiply, etc.)
484 */
485 list_for_each_entry(tmp_crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
486 if (intel_crtc_active(tmp_crtc) &&
487 !to_intel_crtc(tmp_crtc)->primary_disabled) {
488 if (crtc) {
489 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
490 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
491 goto out_disable;
492 }
493 crtc = tmp_crtc;
494 }
495 }
496
497 if (!crtc || crtc->fb == NULL) {
498 DRM_DEBUG_KMS("no output, disabling\n");
499 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
500 goto out_disable;
501 }
502
503 intel_crtc = to_intel_crtc(crtc);
504 fb = crtc->fb;
505 intel_fb = to_intel_framebuffer(fb);
506 obj = intel_fb->obj;
507
508 enable_fbc = i915_enable_fbc;
509 if (enable_fbc < 0) {
510 DRM_DEBUG_KMS("fbc set to per-chip default\n");
511 enable_fbc = 1;
512 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
513 enable_fbc = 0;
514 }
515 if (!enable_fbc) {
516 DRM_DEBUG_KMS("fbc disabled per module param\n");
517 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
518 goto out_disable;
519 }
520 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
521 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
522 DRM_DEBUG_KMS("mode incompatible with compression, "
523 "disabling\n");
524 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
525 goto out_disable;
526 }
527
528 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
529 max_hdisplay = 4096;
530 max_vdisplay = 2048;
531 } else {
532 max_hdisplay = 2048;
533 max_vdisplay = 1536;
534 }
535 if ((crtc->mode.hdisplay > max_hdisplay) ||
536 (crtc->mode.vdisplay > max_vdisplay)) {
537 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
538 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
539 goto out_disable;
540 }
541 if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
542 intel_crtc->plane != 0) {
543 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
544 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
545 goto out_disable;
546 }
547
548 /* The use of a CPU fence is mandatory in order to detect writes
549 * by the CPU to the scanout and trigger updates to the FBC.
550 */
551 if (obj->tiling_mode != I915_TILING_X ||
552 obj->fence_reg == I915_FENCE_REG_NONE) {
553 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
554 dev_priv->no_fbc_reason = FBC_NOT_TILED;
555 goto out_disable;
556 }
557
558 /* If the kernel debugger is active, always disable compression */
559 /*LINTED*/
560 if (in_dbg_master())
561 goto out_disable;
562
563 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
564 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
565 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
566 goto out_disable;
567 }
568
569 /* If the scanout has not changed, don't modify the FBC settings.
570 * Note that we make the fundamental assumption that the fb->obj
571 * cannot be unpinned (and have its GTT offset and fence revoked)
572 * without first being decoupled from the scanout and FBC disabled.
573 */
574 if (dev_priv->cfb_plane == intel_crtc->plane &&
575 dev_priv->cfb_fb == fb->base.id &&
576 dev_priv->cfb_y == crtc->y)
577 return;
578
579 if (intel_fbc_enabled(dev)) {
580 /* We update FBC along two paths, after changing fb/crtc
581 * configuration (modeswitching) and after page-flipping
582 * finishes. For the latter, we know that not only did
583 * we disable the FBC at the start of the page-flip
584 * sequence, but also more than one vblank has passed.
585 *
586 * For the former case of modeswitching, it is possible
587 * to switch between two FBC valid configurations
588 * instantaneously so we do need to disable the FBC
589 * before we can modify its control registers. We also
590 * have to wait for the next vblank for that to take
591 * effect. However, since we delay enabling FBC we can
592 * assume that a vblank has passed since disabling and
593 * that we can safely alter the registers in the deferred
594 * callback.
595 *
596 * In the scenario that we go from a valid to invalid
597 * and then back to valid FBC configuration we have
598 * no strict enforcement that a vblank occurred since
599 * disabling the FBC. However, along all current pipe
600 * disabling paths we do need to wait for a vblank at
601 * some point. And we wait before enabling FBC anyway.
602 */
603 DRM_DEBUG_KMS("disabling active FBC for update\n");
604 intel_disable_fbc(dev);
605 }
606
607 intel_enable_fbc(crtc, 500);
608 return;
609
610 out_disable:
611 /* Multiple disables should be harmless */
612 if (intel_fbc_enabled(dev)) {
613 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
614 intel_disable_fbc(dev);
615 }
616 i915_gem_stolen_cleanup_compression(dev);
617 }
618
i915_pineview_get_mem_freq(struct drm_device * dev)619 static void i915_pineview_get_mem_freq(struct drm_device *dev)
620 {
621 drm_i915_private_t *dev_priv = dev->dev_private;
622 u32 tmp;
623
624 tmp = I915_READ(CLKCFG);
625
626 switch (tmp & CLKCFG_FSB_MASK) {
627 case CLKCFG_FSB_533:
628 dev_priv->fsb_freq = 533; /* 133*4 */
629 break;
630 case CLKCFG_FSB_800:
631 dev_priv->fsb_freq = 800; /* 200*4 */
632 break;
633 case CLKCFG_FSB_667:
634 dev_priv->fsb_freq = 667; /* 167*4 */
635 break;
636 case CLKCFG_FSB_400:
637 dev_priv->fsb_freq = 400; /* 100*4 */
638 break;
639 }
640
641 switch (tmp & CLKCFG_MEM_MASK) {
642 case CLKCFG_MEM_533:
643 dev_priv->mem_freq = 533;
644 break;
645 case CLKCFG_MEM_667:
646 dev_priv->mem_freq = 667;
647 break;
648 case CLKCFG_MEM_800:
649 dev_priv->mem_freq = 800;
650 break;
651 }
652
653 /* detect pineview DDR3 setting */
654 tmp = I915_READ(CSHRDDR3CTL);
655 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
656 }
657
i915_ironlake_get_mem_freq(struct drm_device * dev)658 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
659 {
660 drm_i915_private_t *dev_priv = dev->dev_private;
661 u16 ddrpll, csipll;
662
663 ddrpll = I915_READ16(DDRMPLL1);
664 csipll = I915_READ16(CSIPLL0);
665
666 switch (ddrpll & 0xff) {
667 case 0xc:
668 dev_priv->mem_freq = 800;
669 break;
670 case 0x10:
671 dev_priv->mem_freq = 1066;
672 break;
673 case 0x14:
674 dev_priv->mem_freq = 1333;
675 break;
676 case 0x18:
677 dev_priv->mem_freq = 1600;
678 break;
679 default:
680 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
681 ddrpll & 0xff);
682 dev_priv->mem_freq = 0;
683 break;
684 }
685
686 dev_priv->ips.r_t = dev_priv->mem_freq;
687
688 switch (csipll & 0x3ff) {
689 case 0x00c:
690 dev_priv->fsb_freq = 3200;
691 break;
692 case 0x00e:
693 dev_priv->fsb_freq = 3733;
694 break;
695 case 0x010:
696 dev_priv->fsb_freq = 4266;
697 break;
698 case 0x012:
699 dev_priv->fsb_freq = 4800;
700 break;
701 case 0x014:
702 dev_priv->fsb_freq = 5333;
703 break;
704 case 0x016:
705 dev_priv->fsb_freq = 5866;
706 break;
707 case 0x018:
708 dev_priv->fsb_freq = 6400;
709 break;
710 default:
711 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
712 csipll & 0x3ff);
713 dev_priv->fsb_freq = 0;
714 break;
715 }
716
717 if (dev_priv->fsb_freq == 3200) {
718 dev_priv->ips.c_m = 0;
719 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
720 dev_priv->ips.c_m = 1;
721 } else {
722 dev_priv->ips.c_m = 2;
723 }
724 }
725
726 static const struct cxsr_latency cxsr_latency_table[] = {
727 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
728 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
729 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
730 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
731 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
732
733 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
734 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
735 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
736 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
737 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
738
739 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
740 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
741 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
742 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
743 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
744
745 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
746 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
747 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
748 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
749 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
750
751 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
752 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
753 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
754 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
755 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
756
757 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
758 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
759 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
760 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
761 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
762 };
763
intel_get_cxsr_latency(int is_desktop,int is_ddr3,int fsb,int mem)764 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
765 int is_ddr3,
766 int fsb,
767 int mem)
768 {
769 const struct cxsr_latency *latency;
770 int i;
771
772 if (fsb == 0 || mem == 0)
773 return NULL;
774
775 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
776 latency = &cxsr_latency_table[i];
777 if (is_desktop == latency->is_desktop &&
778 is_ddr3 == latency->is_ddr3 &&
779 fsb == latency->fsb_freq && mem == latency->mem_freq)
780 return latency;
781 }
782
783 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
784
785 return NULL;
786 }
787
pineview_disable_cxsr(struct drm_device * dev)788 static void pineview_disable_cxsr(struct drm_device *dev)
789 {
790 struct drm_i915_private *dev_priv = dev->dev_private;
791
792 /* deactivate cxsr */
793 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
794 }
795
796 /*
797 * Latency for FIFO fetches is dependent on several factors:
798 * - memory configuration (speed, channels)
799 * - chipset
800 * - current MCH state
801 * It can be fairly high in some situations, so here we assume a fairly
802 * pessimal value. It's a tradeoff between extra memory fetches (if we
803 * set this value too high, the FIFO will fetch frequently to stay full)
804 * and power consumption (set it too low to save power and we might see
805 * FIFO underruns and display "flicker").
806 *
807 * A value of 5us seems to be a good balance; safe for very low end
808 * platforms but not overly aggressive on lower latency configs.
809 */
810 static const int latency_ns = 5000;
811
i9xx_get_fifo_size(struct drm_device * dev,int plane)812 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
813 {
814 struct drm_i915_private *dev_priv = dev->dev_private;
815 uint32_t dsparb = I915_READ(DSPARB);
816 int size;
817
818 size = dsparb & 0x7f;
819 if (plane)
820 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
821
822 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
823 plane ? "B" : "A", size);
824
825 return size;
826 }
827
i85x_get_fifo_size(struct drm_device * dev,int plane)828 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
829 {
830 struct drm_i915_private *dev_priv = dev->dev_private;
831 uint32_t dsparb = I915_READ(DSPARB);
832 int size;
833
834 size = dsparb & 0x1ff;
835 if (plane)
836 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
837 size >>= 1; /* Convert to cachelines */
838
839 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
840 plane ? "B" : "A", size);
841
842 return size;
843 }
844
i845_get_fifo_size(struct drm_device * dev,int plane)845 static int i845_get_fifo_size(struct drm_device *dev, int plane)
846 {
847 struct drm_i915_private *dev_priv = dev->dev_private;
848 uint32_t dsparb = I915_READ(DSPARB);
849 int size;
850
851 size = dsparb & 0x7f;
852 size >>= 2; /* Convert to cachelines */
853
854 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
855 plane ? "B" : "A",
856 size);
857
858 return size;
859 }
860
i830_get_fifo_size(struct drm_device * dev,int plane)861 static int i830_get_fifo_size(struct drm_device *dev, int plane)
862 {
863 struct drm_i915_private *dev_priv = dev->dev_private;
864 uint32_t dsparb = I915_READ(DSPARB);
865 int size;
866
867 size = dsparb & 0x7f;
868 size >>= 1; /* Convert to cachelines */
869
870 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
871 plane ? "B" : "A", size);
872
873 return size;
874 }
875
876 /* Pineview has different values for various configs */
877 static const struct intel_watermark_params pineview_display_wm = {
878 PINEVIEW_DISPLAY_FIFO,
879 PINEVIEW_MAX_WM,
880 PINEVIEW_DFT_WM,
881 PINEVIEW_GUARD_WM,
882 PINEVIEW_FIFO_LINE_SIZE
883 };
884 static const struct intel_watermark_params pineview_display_hplloff_wm = {
885 PINEVIEW_DISPLAY_FIFO,
886 PINEVIEW_MAX_WM,
887 PINEVIEW_DFT_HPLLOFF_WM,
888 PINEVIEW_GUARD_WM,
889 PINEVIEW_FIFO_LINE_SIZE
890 };
891 static const struct intel_watermark_params pineview_cursor_wm = {
892 PINEVIEW_CURSOR_FIFO,
893 PINEVIEW_CURSOR_MAX_WM,
894 PINEVIEW_CURSOR_DFT_WM,
895 PINEVIEW_CURSOR_GUARD_WM,
896 PINEVIEW_FIFO_LINE_SIZE,
897 };
898 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
899 PINEVIEW_CURSOR_FIFO,
900 PINEVIEW_CURSOR_MAX_WM,
901 PINEVIEW_CURSOR_DFT_WM,
902 PINEVIEW_CURSOR_GUARD_WM,
903 PINEVIEW_FIFO_LINE_SIZE
904 };
905 static const struct intel_watermark_params g4x_wm_info = {
906 G4X_FIFO_SIZE,
907 G4X_MAX_WM,
908 G4X_MAX_WM,
909 2,
910 G4X_FIFO_LINE_SIZE,
911 };
912 static const struct intel_watermark_params g4x_cursor_wm_info = {
913 I965_CURSOR_FIFO,
914 I965_CURSOR_MAX_WM,
915 I965_CURSOR_DFT_WM,
916 2,
917 G4X_FIFO_LINE_SIZE,
918 };
919 static const struct intel_watermark_params valleyview_wm_info = {
920 VALLEYVIEW_FIFO_SIZE,
921 VALLEYVIEW_MAX_WM,
922 VALLEYVIEW_MAX_WM,
923 2,
924 G4X_FIFO_LINE_SIZE,
925 };
926 static const struct intel_watermark_params valleyview_cursor_wm_info = {
927 I965_CURSOR_FIFO,
928 VALLEYVIEW_CURSOR_MAX_WM,
929 I965_CURSOR_DFT_WM,
930 2,
931 G4X_FIFO_LINE_SIZE,
932 };
933 static const struct intel_watermark_params i965_cursor_wm_info = {
934 I965_CURSOR_FIFO,
935 I965_CURSOR_MAX_WM,
936 I965_CURSOR_DFT_WM,
937 2,
938 I915_FIFO_LINE_SIZE,
939 };
940 static const struct intel_watermark_params i945_wm_info = {
941 I945_FIFO_SIZE,
942 I915_MAX_WM,
943 1,
944 2,
945 I915_FIFO_LINE_SIZE
946 };
947 static const struct intel_watermark_params i915_wm_info = {
948 I915_FIFO_SIZE,
949 I915_MAX_WM,
950 1,
951 2,
952 I915_FIFO_LINE_SIZE
953 };
954 static const struct intel_watermark_params i855_wm_info = {
955 I855GM_FIFO_SIZE,
956 I915_MAX_WM,
957 1,
958 2,
959 I830_FIFO_LINE_SIZE
960 };
961 static const struct intel_watermark_params i830_wm_info = {
962 I830_FIFO_SIZE,
963 I915_MAX_WM,
964 1,
965 2,
966 I830_FIFO_LINE_SIZE
967 };
968
969 static const struct intel_watermark_params ironlake_display_wm_info = {
970 ILK_DISPLAY_FIFO,
971 ILK_DISPLAY_MAXWM,
972 ILK_DISPLAY_DFTWM,
973 2,
974 ILK_FIFO_LINE_SIZE
975 };
976 static const struct intel_watermark_params ironlake_cursor_wm_info = {
977 ILK_CURSOR_FIFO,
978 ILK_CURSOR_MAXWM,
979 ILK_CURSOR_DFTWM,
980 2,
981 ILK_FIFO_LINE_SIZE
982 };
983 static const struct intel_watermark_params ironlake_display_srwm_info = {
984 ILK_DISPLAY_SR_FIFO,
985 ILK_DISPLAY_MAX_SRWM,
986 ILK_DISPLAY_DFT_SRWM,
987 2,
988 ILK_FIFO_LINE_SIZE
989 };
990 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
991 ILK_CURSOR_SR_FIFO,
992 ILK_CURSOR_MAX_SRWM,
993 ILK_CURSOR_DFT_SRWM,
994 2,
995 ILK_FIFO_LINE_SIZE
996 };
997
998 static const struct intel_watermark_params sandybridge_display_wm_info = {
999 SNB_DISPLAY_FIFO,
1000 SNB_DISPLAY_MAXWM,
1001 SNB_DISPLAY_DFTWM,
1002 2,
1003 SNB_FIFO_LINE_SIZE
1004 };
1005 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
1006 SNB_CURSOR_FIFO,
1007 SNB_CURSOR_MAXWM,
1008 SNB_CURSOR_DFTWM,
1009 2,
1010 SNB_FIFO_LINE_SIZE
1011 };
1012 static const struct intel_watermark_params sandybridge_display_srwm_info = {
1013 SNB_DISPLAY_SR_FIFO,
1014 SNB_DISPLAY_MAX_SRWM,
1015 SNB_DISPLAY_DFT_SRWM,
1016 2,
1017 SNB_FIFO_LINE_SIZE
1018 };
1019 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
1020 SNB_CURSOR_SR_FIFO,
1021 SNB_CURSOR_MAX_SRWM,
1022 SNB_CURSOR_DFT_SRWM,
1023 2,
1024 SNB_FIFO_LINE_SIZE
1025 };
1026
1027
1028 /**
1029 * intel_calculate_wm - calculate watermark level
1030 * @clock_in_khz: pixel clock
1031 * @wm: chip FIFO params
1032 * @pixel_size: display pixel size
1033 * @latency_ns: memory latency for the platform
1034 *
1035 * Calculate the watermark level (the level at which the display plane will
1036 * start fetching from memory again). Each chip has a different display
1037 * FIFO size and allocation, so the caller needs to figure that out and pass
1038 * in the correct intel_watermark_params structure.
1039 *
1040 * As the pixel clock runs, the FIFO will be drained at a rate that depends
1041 * on the pixel size. When it reaches the watermark level, it'll start
1042 * fetching FIFO line sized based chunks from memory until the FIFO fills
1043 * past the watermark point. If the FIFO drains completely, a FIFO underrun
1044 * will occur, and a display engine hang could result.
1045 */
intel_calculate_wm(unsigned long clock_in_khz,const struct intel_watermark_params * wm,int fifo_size,int pixel_size,unsigned long latency_ns)1046 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1047 const struct intel_watermark_params *wm,
1048 int fifo_size,
1049 int pixel_size,
1050 unsigned long latency_ns)
1051 {
1052 long entries_required, wm_size;
1053
1054 /*
1055 * Note: we need to make sure we don't overflow for various clock &
1056 * latency values.
1057 * clocks go from a few thousand to several hundred thousand.
1058 * latency is usually a few thousand
1059 */
1060 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1061 1000;
1062 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
1063
1064 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
1065
1066 wm_size = fifo_size - (entries_required + wm->guard_size);
1067
1068 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
1069
1070 /* Don't promote wm_size to unsigned... */
1071 if (wm_size > (long)wm->max_wm)
1072 wm_size = wm->max_wm;
1073 if (wm_size <= 0)
1074 wm_size = wm->default_wm;
1075 return wm_size;
1076 }
1077
single_enabled_crtc(struct drm_device * dev)1078 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1079 {
1080 struct drm_crtc *crtc, *enabled = NULL;
1081
1082 list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
1083 if (intel_crtc_active(crtc)) {
1084 if (enabled)
1085 return NULL;
1086 enabled = crtc;
1087 }
1088 }
1089
1090 return enabled;
1091 }
1092
pineview_update_wm(struct drm_device * dev)1093 static void pineview_update_wm(struct drm_device *dev)
1094 {
1095 struct drm_i915_private *dev_priv = dev->dev_private;
1096 struct drm_crtc *crtc;
1097 const struct cxsr_latency *latency;
1098 u32 reg;
1099 unsigned long wm;
1100
1101 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1102 dev_priv->fsb_freq, dev_priv->mem_freq);
1103 if (!latency) {
1104 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1105 pineview_disable_cxsr(dev);
1106 return;
1107 }
1108
1109 crtc = single_enabled_crtc(dev);
1110 if (crtc) {
1111 int clock = crtc->mode.clock;
1112 int pixel_size = crtc->fb->bits_per_pixel / 8;
1113
1114 /* Display SR */
1115 wm = intel_calculate_wm(clock, &pineview_display_wm,
1116 pineview_display_wm.fifo_size,
1117 pixel_size, latency->display_sr);
1118 reg = I915_READ(DSPFW1);
1119 reg &= ~DSPFW_SR_MASK;
1120 reg |= wm << DSPFW_SR_SHIFT;
1121 I915_WRITE(DSPFW1, reg);
1122 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1123
1124 /* cursor SR */
1125 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1126 pineview_display_wm.fifo_size,
1127 pixel_size, latency->cursor_sr);
1128 reg = I915_READ(DSPFW3);
1129 reg &= ~DSPFW_CURSOR_SR_MASK;
1130 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1131 I915_WRITE(DSPFW3, reg);
1132
1133 /* Display HPLL off SR */
1134 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1135 pineview_display_hplloff_wm.fifo_size,
1136 pixel_size, latency->display_hpll_disable);
1137 reg = I915_READ(DSPFW3);
1138 reg &= ~DSPFW_HPLL_SR_MASK;
1139 reg |= wm & DSPFW_HPLL_SR_MASK;
1140 I915_WRITE(DSPFW3, reg);
1141
1142 /* cursor HPLL off SR */
1143 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1144 pineview_display_hplloff_wm.fifo_size,
1145 pixel_size, latency->cursor_hpll_disable);
1146 reg = I915_READ(DSPFW3);
1147 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1148 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1149 I915_WRITE(DSPFW3, reg);
1150 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1151
1152 /* activate cxsr */
1153 I915_WRITE(DSPFW3,
1154 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1155 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1156 } else {
1157 pineview_disable_cxsr(dev);
1158 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1159 }
1160 }
1161
g4x_compute_wm0(struct drm_device * dev,int plane,const struct intel_watermark_params * display,int display_latency_ns,const struct intel_watermark_params * cursor,int cursor_latency_ns,int * plane_wm,int * cursor_wm)1162 static bool g4x_compute_wm0(struct drm_device *dev,
1163 int plane,
1164 const struct intel_watermark_params *display,
1165 int display_latency_ns,
1166 const struct intel_watermark_params *cursor,
1167 int cursor_latency_ns,
1168 int *plane_wm,
1169 int *cursor_wm)
1170 {
1171 struct drm_crtc *crtc;
1172 int htotal, hdisplay, clock, pixel_size;
1173 int line_time_us, line_count;
1174 int entries, tlb_miss;
1175
1176 crtc = intel_get_crtc_for_plane(dev, plane);
1177 if (!intel_crtc_active(crtc)) {
1178 *cursor_wm = cursor->guard_size;
1179 *plane_wm = display->guard_size;
1180 return false;
1181 }
1182
1183 htotal = crtc->mode.htotal;
1184 hdisplay = crtc->mode.hdisplay;
1185 clock = crtc->mode.clock;
1186 pixel_size = crtc->fb->bits_per_pixel / 8;
1187
1188 /* Use the small buffer method to calculate plane watermark */
1189 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1190 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1191 if (tlb_miss > 0)
1192 entries += tlb_miss;
1193 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1194 *plane_wm = entries + display->guard_size;
1195 if (*plane_wm > (int)display->max_wm)
1196 *plane_wm = display->max_wm;
1197
1198 /* Use the large buffer method to calculate cursor watermark */
1199 line_time_us = ((htotal * 1000) / clock);
1200 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1201 entries = line_count * 64 * pixel_size;
1202 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1203 if (tlb_miss > 0)
1204 entries += tlb_miss;
1205 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1206 *cursor_wm = entries + cursor->guard_size;
1207 if (*cursor_wm > (int)cursor->max_wm)
1208 *cursor_wm = (int)cursor->max_wm;
1209
1210 return true;
1211 }
1212
1213 /*
1214 * Check the wm result.
1215 *
1216 * If any calculated watermark values is larger than the maximum value that
1217 * can be programmed into the associated watermark register, that watermark
1218 * must be disabled.
1219 */
g4x_check_srwm(struct drm_device * dev,int display_wm,int cursor_wm,const struct intel_watermark_params * display,const struct intel_watermark_params * cursor)1220 static bool g4x_check_srwm(struct drm_device *dev,
1221 int display_wm, int cursor_wm,
1222 const struct intel_watermark_params *display,
1223 const struct intel_watermark_params *cursor)
1224 {
1225 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1226 display_wm, cursor_wm);
1227
1228 if (display_wm > display->max_wm) {
1229 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1230 display_wm, display->max_wm);
1231 return false;
1232 }
1233
1234 if (cursor_wm > cursor->max_wm) {
1235 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1236 cursor_wm, cursor->max_wm);
1237 return false;
1238 }
1239
1240 if (!(display_wm || cursor_wm)) {
1241 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1242 return false;
1243 }
1244
1245 return true;
1246 }
1247
g4x_compute_srwm(struct drm_device * dev,int plane,int latency_ns,const struct intel_watermark_params * display,const struct intel_watermark_params * cursor,int * display_wm,int * cursor_wm)1248 static bool g4x_compute_srwm(struct drm_device *dev,
1249 int plane,
1250 int latency_ns,
1251 const struct intel_watermark_params *display,
1252 const struct intel_watermark_params *cursor,
1253 int *display_wm, int *cursor_wm)
1254 {
1255 struct drm_crtc *crtc;
1256 int hdisplay, htotal, pixel_size, clock;
1257 unsigned long line_time_us;
1258 int line_count, line_size;
1259 int small, large;
1260 int entries;
1261
1262 if (!latency_ns) {
1263 *display_wm = *cursor_wm = 0;
1264 return false;
1265 }
1266
1267 crtc = intel_get_crtc_for_plane(dev, plane);
1268 hdisplay = crtc->mode.hdisplay;
1269 htotal = crtc->mode.htotal;
1270 clock = crtc->mode.clock;
1271 pixel_size = crtc->fb->bits_per_pixel / 8;
1272
1273 line_time_us = (htotal * 1000) / clock;
1274 line_count = (latency_ns / line_time_us + 1000) / 1000;
1275 line_size = hdisplay * pixel_size;
1276
1277 /* Use the minimum of the small and large buffer method for primary */
1278 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1279 large = line_count * line_size;
1280
1281 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1282 *display_wm = entries + display->guard_size;
1283
1284 /* calculate the self-refresh watermark for display cursor */
1285 entries = line_count * pixel_size * 64;
1286 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1287 *cursor_wm = entries + cursor->guard_size;
1288
1289 return g4x_check_srwm(dev,
1290 *display_wm, *cursor_wm,
1291 display, cursor);
1292 }
1293
vlv_compute_drain_latency(struct drm_device * dev,int plane,int * plane_prec_mult,int * plane_dl,int * cursor_prec_mult,int * cursor_dl)1294 static bool vlv_compute_drain_latency(struct drm_device *dev,
1295 int plane,
1296 int *plane_prec_mult,
1297 int *plane_dl,
1298 int *cursor_prec_mult,
1299 int *cursor_dl)
1300 {
1301 struct drm_crtc *crtc;
1302 int clock, pixel_size;
1303 int entries;
1304
1305 crtc = intel_get_crtc_for_plane(dev, plane);
1306 if (!intel_crtc_active(crtc))
1307 return false;
1308
1309 clock = crtc->mode.clock; /* VESA DOT Clock */
1310 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
1311
1312 entries = (clock / 1000) * pixel_size;
1313 *plane_prec_mult = (entries > 256) ?
1314 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1315 *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1316 pixel_size);
1317
1318 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
1319 *cursor_prec_mult = (entries > 256) ?
1320 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1321 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1322
1323 return true;
1324 }
1325
1326 /*
1327 * Update drain latency registers of memory arbiter
1328 *
1329 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1330 * to be programmed. Each plane has a drain latency multiplier and a drain
1331 * latency value.
1332 */
1333
vlv_update_drain_latency(struct drm_device * dev)1334 static void vlv_update_drain_latency(struct drm_device *dev)
1335 {
1336 struct drm_i915_private *dev_priv = dev->dev_private;
1337 int planea_prec, planea_dl, planeb_prec, planeb_dl;
1338 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1339 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1340 either 16 or 32 */
1341
1342 /* For plane A, Cursor A */
1343 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1344 &cursor_prec_mult, &cursora_dl)) {
1345 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1346 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1347 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1348 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1349
1350 I915_WRITE(VLV_DDL1, cursora_prec |
1351 (cursora_dl << DDL_CURSORA_SHIFT) |
1352 planea_prec | planea_dl);
1353 }
1354
1355 /* For plane B, Cursor B */
1356 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1357 &cursor_prec_mult, &cursorb_dl)) {
1358 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1359 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1360 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1361 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1362
1363 I915_WRITE(VLV_DDL2, cursorb_prec |
1364 (cursorb_dl << DDL_CURSORB_SHIFT) |
1365 planeb_prec | planeb_dl);
1366 }
1367 }
1368
1369 #define single_plane_enabled(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
1370
valleyview_update_wm(struct drm_device * dev)1371 static void valleyview_update_wm(struct drm_device *dev)
1372 {
1373 static const int sr_latency_ns = 12000;
1374 struct drm_i915_private *dev_priv = dev->dev_private;
1375 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1376 int plane_sr, cursor_sr;
1377 int ignore_plane_sr, ignore_cursor_sr;
1378 unsigned int enabled = 0;
1379
1380 vlv_update_drain_latency(dev);
1381
1382 if (g4x_compute_wm0(dev, PIPE_A,
1383 &valleyview_wm_info, latency_ns,
1384 &valleyview_cursor_wm_info, latency_ns,
1385 &planea_wm, &cursora_wm))
1386 enabled |= 1 << PIPE_A;
1387
1388 if (g4x_compute_wm0(dev, PIPE_B,
1389 &valleyview_wm_info, latency_ns,
1390 &valleyview_cursor_wm_info, latency_ns,
1391 &planeb_wm, &cursorb_wm))
1392 enabled |= 1 << PIPE_B;
1393
1394 if (single_plane_enabled(enabled) &&
1395 g4x_compute_srwm(dev, ffs(enabled) - 1,
1396 sr_latency_ns,
1397 &valleyview_wm_info,
1398 &valleyview_cursor_wm_info,
1399 &plane_sr, &ignore_cursor_sr) &&
1400 g4x_compute_srwm(dev, ffs(enabled) - 1,
1401 2*sr_latency_ns,
1402 &valleyview_wm_info,
1403 &valleyview_cursor_wm_info,
1404 &ignore_plane_sr, &cursor_sr)) {
1405 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1406 } else {
1407 I915_WRITE(FW_BLC_SELF_VLV,
1408 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1409 plane_sr = cursor_sr = 0;
1410 }
1411
1412 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1413 planea_wm, cursora_wm,
1414 planeb_wm, cursorb_wm,
1415 plane_sr, cursor_sr);
1416
1417 I915_WRITE(DSPFW1,
1418 (plane_sr << DSPFW_SR_SHIFT) |
1419 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1420 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1421 planea_wm);
1422 I915_WRITE(DSPFW2,
1423 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1424 (cursora_wm << DSPFW_CURSORA_SHIFT));
1425 I915_WRITE(DSPFW3,
1426 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1427 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1428 }
1429
g4x_update_wm(struct drm_device * dev)1430 static void g4x_update_wm(struct drm_device *dev)
1431 {
1432 static const int sr_latency_ns = 12000;
1433 struct drm_i915_private *dev_priv = dev->dev_private;
1434 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1435 int plane_sr, cursor_sr;
1436 unsigned int enabled = 0;
1437
1438 if (g4x_compute_wm0(dev, PIPE_A,
1439 &g4x_wm_info, latency_ns,
1440 &g4x_cursor_wm_info, latency_ns,
1441 &planea_wm, &cursora_wm))
1442 enabled |= 1 << PIPE_A;
1443
1444 if (g4x_compute_wm0(dev, PIPE_B,
1445 &g4x_wm_info, latency_ns,
1446 &g4x_cursor_wm_info, latency_ns,
1447 &planeb_wm, &cursorb_wm))
1448 enabled |= 1 << PIPE_B;
1449
1450 if (single_plane_enabled(enabled) &&
1451 g4x_compute_srwm(dev, ffs(enabled) - 1,
1452 sr_latency_ns,
1453 &g4x_wm_info,
1454 &g4x_cursor_wm_info,
1455 &plane_sr, &cursor_sr)) {
1456 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1457 } else {
1458 I915_WRITE(FW_BLC_SELF,
1459 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1460 plane_sr = cursor_sr = 0;
1461 }
1462
1463 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1464 planea_wm, cursora_wm,
1465 planeb_wm, cursorb_wm,
1466 plane_sr, cursor_sr);
1467
1468 I915_WRITE(DSPFW1,
1469 (plane_sr << DSPFW_SR_SHIFT) |
1470 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1471 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1472 planea_wm);
1473 I915_WRITE(DSPFW2,
1474 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1475 (cursora_wm << DSPFW_CURSORA_SHIFT));
1476 /* HPLL off in SR has some issues on G4x... disable it */
1477 I915_WRITE(DSPFW3,
1478 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1479 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1480 }
1481
i965_update_wm(struct drm_device * dev)1482 static void i965_update_wm(struct drm_device *dev)
1483 {
1484 struct drm_i915_private *dev_priv = dev->dev_private;
1485 struct drm_crtc *crtc;
1486 int srwm = 1;
1487 int cursor_sr = 16;
1488
1489 /* Calc sr entries for one plane configs */
1490 crtc = single_enabled_crtc(dev);
1491 if (crtc) {
1492 /* self-refresh has much higher latency */
1493 static const int sr_latency_ns = 12000;
1494 int clock = crtc->mode.clock;
1495 int htotal = crtc->mode.htotal;
1496 int hdisplay = crtc->mode.hdisplay;
1497 int pixel_size = crtc->fb->bits_per_pixel / 8;
1498 unsigned long line_time_us;
1499 int entries;
1500
1501 line_time_us = ((htotal * 1000) / clock);
1502
1503 /* Use ns/us then divide to preserve precision */
1504 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1505 pixel_size * hdisplay;
1506 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1507 srwm = I965_FIFO_SIZE - entries;
1508 if (srwm < 0)
1509 srwm = 1;
1510 srwm &= 0x1ff;
1511 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1512 entries, srwm);
1513
1514 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1515 pixel_size * 64;
1516 entries = DIV_ROUND_UP(entries,
1517 i965_cursor_wm_info.cacheline_size);
1518 cursor_sr = i965_cursor_wm_info.fifo_size -
1519 (entries + i965_cursor_wm_info.guard_size);
1520
1521 if (cursor_sr > i965_cursor_wm_info.max_wm)
1522 cursor_sr = (int) i965_cursor_wm_info.max_wm;
1523
1524 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1525 "cursor %d\n", srwm, cursor_sr);
1526
1527 if (IS_CRESTLINE(dev))
1528 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1529 } else {
1530 /* Turn off self refresh if both pipes are enabled */
1531 if (IS_CRESTLINE(dev))
1532 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1533 & ~FW_BLC_SELF_EN);
1534 }
1535
1536 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1537 srwm);
1538
1539 /* 965 has limitations... */
1540 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1541 (8 << 16) | (8 << 8) | (8 << 0));
1542 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1543 /* update cursor SR watermark */
1544 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1545 }
1546
i9xx_update_wm(struct drm_device * dev)1547 static void i9xx_update_wm(struct drm_device *dev)
1548 {
1549 struct drm_i915_private *dev_priv = dev->dev_private;
1550 const struct intel_watermark_params *wm_info;
1551 uint32_t fwater_lo;
1552 uint32_t fwater_hi;
1553 int cwm, srwm = 1;
1554 int fifo_size;
1555 int planea_wm, planeb_wm;
1556 struct drm_crtc *crtc, *enabled = NULL;
1557
1558 if (IS_I945GM(dev))
1559 wm_info = &i945_wm_info;
1560 else if (!IS_GEN2(dev))
1561 wm_info = &i915_wm_info;
1562 else
1563 wm_info = &i855_wm_info;
1564
1565 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1566 crtc = intel_get_crtc_for_plane(dev, 0);
1567 if (intel_crtc_active(crtc)) {
1568 int cpp = crtc->fb->bits_per_pixel / 8;
1569 if (IS_GEN2(dev))
1570 cpp = 4;
1571
1572 planea_wm = intel_calculate_wm(crtc->mode.clock,
1573 wm_info, fifo_size, cpp,
1574 latency_ns);
1575 enabled = crtc;
1576 } else
1577 planea_wm = fifo_size - wm_info->guard_size;
1578
1579 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1580 crtc = intel_get_crtc_for_plane(dev, 1);
1581 if (intel_crtc_active(crtc)) {
1582 int cpp = crtc->fb->bits_per_pixel / 8;
1583 if (IS_GEN2(dev))
1584 cpp = 4;
1585
1586 planeb_wm = intel_calculate_wm(crtc->mode.clock,
1587 wm_info, fifo_size, cpp,
1588 latency_ns);
1589 if (enabled == NULL)
1590 enabled = crtc;
1591 else
1592 enabled = NULL;
1593 } else
1594 planeb_wm = fifo_size - wm_info->guard_size;
1595
1596 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1597
1598 /*
1599 * Overlay gets an aggressive default since video jitter is bad.
1600 */
1601 cwm = 2;
1602
1603 /* Play safe and disable self-refresh before adjusting watermarks. */
1604 if (IS_I945G(dev) || IS_I945GM(dev))
1605 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1606 else if (IS_I915GM(dev))
1607 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1608
1609 /* Calc sr entries for one plane configs */
1610 if (HAS_FW_BLC(dev) && enabled) {
1611 /* self-refresh has much higher latency */
1612 static const int sr_latency_ns = 6000;
1613 int clock = enabled->mode.clock;
1614 int htotal = enabled->mode.htotal;
1615 int hdisplay = enabled->mode.hdisplay;
1616 int pixel_size = enabled->fb->bits_per_pixel / 8;
1617 unsigned long line_time_us;
1618 int entries;
1619
1620 line_time_us = (htotal * 1000) / clock;
1621
1622 /* Use ns/us then divide to preserve precision */
1623 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1624 pixel_size * hdisplay;
1625 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1626 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1627 srwm = wm_info->fifo_size - entries;
1628 if (srwm < 0)
1629 srwm = 1;
1630
1631 if (IS_I945G(dev) || IS_I945GM(dev))
1632 I915_WRITE(FW_BLC_SELF,
1633 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1634 else if (IS_I915GM(dev))
1635 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1636 }
1637
1638 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1639 planea_wm, planeb_wm, cwm, srwm);
1640
1641 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1642 fwater_hi = (cwm & 0x1f);
1643
1644 /* Set request length to 8 cachelines per fetch */
1645 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1646 fwater_hi = fwater_hi | (1 << 8);
1647
1648 I915_WRITE(FW_BLC, fwater_lo);
1649 I915_WRITE(FW_BLC2, fwater_hi);
1650
1651 if (HAS_FW_BLC(dev)) {
1652 if (enabled) {
1653 if (IS_I945G(dev) || IS_I945GM(dev))
1654 I915_WRITE(FW_BLC_SELF,
1655 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1656 else if (IS_I915GM(dev))
1657 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1658 DRM_DEBUG_KMS("memory self refresh enabled\n");
1659 } else
1660 DRM_DEBUG_KMS("memory self refresh disabled\n");
1661 }
1662 }
1663
i830_update_wm(struct drm_device * dev)1664 static void i830_update_wm(struct drm_device *dev)
1665 {
1666 struct drm_i915_private *dev_priv = dev->dev_private;
1667 struct drm_crtc *crtc;
1668 uint32_t fwater_lo;
1669 int planea_wm;
1670
1671 crtc = single_enabled_crtc(dev);
1672 if (crtc == NULL)
1673 return;
1674
1675 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1676 dev_priv->display.get_fifo_size(dev, 0),
1677 4, latency_ns);
1678 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1679 fwater_lo |= (3<<8) | planea_wm;
1680
1681 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1682
1683 I915_WRITE(FW_BLC, fwater_lo);
1684 }
1685
1686 #define ILK_LP0_PLANE_LATENCY 700
1687 #define ILK_LP0_CURSOR_LATENCY 1300
1688
1689 /*
1690 * Check the wm result.
1691 *
1692 * If any calculated watermark values is larger than the maximum value that
1693 * can be programmed into the associated watermark register, that watermark
1694 * must be disabled.
1695 */
ironlake_check_srwm(struct drm_device * dev,int level,int fbc_wm,int display_wm,int cursor_wm,const struct intel_watermark_params * display,const struct intel_watermark_params * cursor)1696 static bool ironlake_check_srwm(struct drm_device *dev, int level,
1697 int fbc_wm, int display_wm, int cursor_wm,
1698 const struct intel_watermark_params *display,
1699 const struct intel_watermark_params *cursor)
1700 {
1701 struct drm_i915_private *dev_priv = dev->dev_private;
1702
1703 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1704 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1705
1706 if (fbc_wm > SNB_FBC_MAX_SRWM) {
1707 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1708 fbc_wm, SNB_FBC_MAX_SRWM, level);
1709
1710 /* fbc has it's own way to disable FBC WM */
1711 I915_WRITE(DISP_ARB_CTL,
1712 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1713 return false;
1714 } else if (INTEL_INFO(dev)->gen >= 6) {
1715 /* enable FBC WM (except on ILK, where it must remain off) */
1716 I915_WRITE(DISP_ARB_CTL,
1717 I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
1718 }
1719
1720 if (display_wm > display->max_wm) {
1721 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1722 display_wm, SNB_DISPLAY_MAX_SRWM, level);
1723 return false;
1724 }
1725
1726 if (cursor_wm > cursor->max_wm) {
1727 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1728 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1729 return false;
1730 }
1731
1732 if (!(fbc_wm || display_wm || cursor_wm)) {
1733 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1734 return false;
1735 }
1736
1737 return true;
1738 }
1739
1740 /*
1741 * Compute watermark values of WM[1-3],
1742 */
ironlake_compute_srwm(struct drm_device * dev,int level,int plane,int latency_ns,const struct intel_watermark_params * display,const struct intel_watermark_params * cursor,int * fbc_wm,int * display_wm,int * cursor_wm)1743 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1744 int latency_ns,
1745 const struct intel_watermark_params *display,
1746 const struct intel_watermark_params *cursor,
1747 int *fbc_wm, int *display_wm, int *cursor_wm)
1748 {
1749 struct drm_crtc *crtc;
1750 unsigned long line_time_us;
1751 int hdisplay, htotal, pixel_size, clock;
1752 int line_count, line_size;
1753 int small, large;
1754 int entries;
1755
1756 if (!latency_ns) {
1757 *fbc_wm = *display_wm = *cursor_wm = 0;
1758 return false;
1759 }
1760
1761 crtc = intel_get_crtc_for_plane(dev, plane);
1762 hdisplay = crtc->mode.hdisplay;
1763 htotal = crtc->mode.htotal;
1764 clock = crtc->mode.clock;
1765 pixel_size = crtc->fb->bits_per_pixel / 8;
1766
1767 line_time_us = (htotal * 1000) / clock;
1768 line_count = (latency_ns / line_time_us + 1000) / 1000;
1769 line_size = hdisplay * pixel_size;
1770
1771 /* Use the minimum of the small and large buffer method for primary */
1772 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1773 large = line_count * line_size;
1774
1775 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1776 *display_wm = entries + display->guard_size;
1777
1778 /*
1779 * Spec says:
1780 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1781 */
1782 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
1783
1784 /* calculate the self-refresh watermark for display cursor */
1785 entries = line_count * pixel_size * 64;
1786 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1787 *cursor_wm = entries + cursor->guard_size;
1788
1789 return ironlake_check_srwm(dev, level,
1790 *fbc_wm, *display_wm, *cursor_wm,
1791 display, cursor);
1792 }
1793
ironlake_update_wm(struct drm_device * dev)1794 static void ironlake_update_wm(struct drm_device *dev)
1795 {
1796 struct drm_i915_private *dev_priv = dev->dev_private;
1797 int fbc_wm, plane_wm, cursor_wm;
1798 unsigned int enabled;
1799
1800 enabled = 0;
1801 if (g4x_compute_wm0(dev, PIPE_A,
1802 &ironlake_display_wm_info,
1803 ILK_LP0_PLANE_LATENCY,
1804 &ironlake_cursor_wm_info,
1805 ILK_LP0_CURSOR_LATENCY,
1806 &plane_wm, &cursor_wm)) {
1807 I915_WRITE(WM0_PIPEA_ILK,
1808 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1809 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1810 " plane %d, " "cursor: %d\n",
1811 plane_wm, cursor_wm);
1812 enabled |= 1 << PIPE_A;
1813 }
1814
1815 if (g4x_compute_wm0(dev, PIPE_B,
1816 &ironlake_display_wm_info,
1817 ILK_LP0_PLANE_LATENCY,
1818 &ironlake_cursor_wm_info,
1819 ILK_LP0_CURSOR_LATENCY,
1820 &plane_wm, &cursor_wm)) {
1821 I915_WRITE(WM0_PIPEB_ILK,
1822 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1823 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1824 " plane %d, cursor: %d\n",
1825 plane_wm, cursor_wm);
1826 enabled |= 1 << PIPE_B;
1827 }
1828
1829 /*
1830 * Calculate and update the self-refresh watermark only when one
1831 * display plane is used.
1832 */
1833 I915_WRITE(WM3_LP_ILK, 0);
1834 I915_WRITE(WM2_LP_ILK, 0);
1835 I915_WRITE(WM1_LP_ILK, 0);
1836
1837 if (!single_plane_enabled(enabled))
1838 return;
1839 enabled = ffs(enabled) - 1;
1840
1841 /* WM1 */
1842 if (!ironlake_compute_srwm(dev, 1, enabled,
1843 ILK_READ_WM1_LATENCY() * 500,
1844 &ironlake_display_srwm_info,
1845 &ironlake_cursor_srwm_info,
1846 &fbc_wm, &plane_wm, &cursor_wm))
1847 return;
1848
1849 I915_WRITE(WM1_LP_ILK,
1850 WM1_LP_SR_EN |
1851 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1852 (fbc_wm << WM1_LP_FBC_SHIFT) |
1853 (plane_wm << WM1_LP_SR_SHIFT) |
1854 cursor_wm);
1855
1856 /* WM2 */
1857 if (!ironlake_compute_srwm(dev, 2, enabled,
1858 ILK_READ_WM2_LATENCY() * 500,
1859 &ironlake_display_srwm_info,
1860 &ironlake_cursor_srwm_info,
1861 &fbc_wm, &plane_wm, &cursor_wm))
1862 return;
1863
1864 I915_WRITE(WM2_LP_ILK,
1865 WM2_LP_EN |
1866 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1867 (fbc_wm << WM1_LP_FBC_SHIFT) |
1868 (plane_wm << WM1_LP_SR_SHIFT) |
1869 cursor_wm);
1870
1871 /*
1872 * WM3 is unsupported on ILK, probably because we don't have latency
1873 * data for that power state
1874 */
1875 }
1876
sandybridge_update_wm(struct drm_device * dev)1877 static void sandybridge_update_wm(struct drm_device *dev)
1878 {
1879 struct drm_i915_private *dev_priv = dev->dev_private;
1880 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1881 u32 val;
1882 int fbc_wm, plane_wm, cursor_wm;
1883 unsigned int enabled;
1884
1885 enabled = 0;
1886 if (g4x_compute_wm0(dev, PIPE_A,
1887 &sandybridge_display_wm_info, latency,
1888 &sandybridge_cursor_wm_info, latency,
1889 &plane_wm, &cursor_wm)) {
1890 val = I915_READ(WM0_PIPEA_ILK);
1891 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1892 I915_WRITE(WM0_PIPEA_ILK, val |
1893 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1894 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1895 " plane %d, " "cursor: %d\n",
1896 plane_wm, cursor_wm);
1897 enabled |= 1 << PIPE_A;
1898 }
1899
1900 if (g4x_compute_wm0(dev, PIPE_B,
1901 &sandybridge_display_wm_info, latency,
1902 &sandybridge_cursor_wm_info, latency,
1903 &plane_wm, &cursor_wm)) {
1904 val = I915_READ(WM0_PIPEB_ILK);
1905 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1906 I915_WRITE(WM0_PIPEB_ILK, val |
1907 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1908 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1909 " plane %d, cursor: %d\n",
1910 plane_wm, cursor_wm);
1911 enabled |= 1 << PIPE_B;
1912 }
1913
1914 /*
1915 * Calculate and update the self-refresh watermark only when one
1916 * display plane is used.
1917 *
1918 * SNB support 3 levels of watermark.
1919 *
1920 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1921 * and disabled in the descending order
1922 *
1923 */
1924 I915_WRITE(WM3_LP_ILK, 0);
1925 I915_WRITE(WM2_LP_ILK, 0);
1926 I915_WRITE(WM1_LP_ILK, 0);
1927
1928 if (!single_plane_enabled(enabled) ||
1929 dev_priv->sprite_scaling_enabled)
1930 return;
1931 enabled = ffs(enabled) - 1;
1932
1933 /* WM1 */
1934 if (!ironlake_compute_srwm(dev, 1, enabled,
1935 SNB_READ_WM1_LATENCY() * 500,
1936 &sandybridge_display_srwm_info,
1937 &sandybridge_cursor_srwm_info,
1938 &fbc_wm, &plane_wm, &cursor_wm))
1939 return;
1940
1941 I915_WRITE(WM1_LP_ILK,
1942 WM1_LP_SR_EN |
1943 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1944 (fbc_wm << WM1_LP_FBC_SHIFT) |
1945 (plane_wm << WM1_LP_SR_SHIFT) |
1946 cursor_wm);
1947
1948 /* WM2 */
1949 if (!ironlake_compute_srwm(dev, 2, enabled,
1950 SNB_READ_WM2_LATENCY() * 500,
1951 &sandybridge_display_srwm_info,
1952 &sandybridge_cursor_srwm_info,
1953 &fbc_wm, &plane_wm, &cursor_wm))
1954 return;
1955
1956 I915_WRITE(WM2_LP_ILK,
1957 WM2_LP_EN |
1958 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1959 (fbc_wm << WM1_LP_FBC_SHIFT) |
1960 (plane_wm << WM1_LP_SR_SHIFT) |
1961 cursor_wm);
1962
1963 /* WM3 */
1964 if (!ironlake_compute_srwm(dev, 3, enabled,
1965 SNB_READ_WM3_LATENCY() * 500,
1966 &sandybridge_display_srwm_info,
1967 &sandybridge_cursor_srwm_info,
1968 &fbc_wm, &plane_wm, &cursor_wm))
1969 return;
1970
1971 I915_WRITE(WM3_LP_ILK,
1972 WM3_LP_EN |
1973 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1974 (fbc_wm << WM1_LP_FBC_SHIFT) |
1975 (plane_wm << WM1_LP_SR_SHIFT) |
1976 cursor_wm);
1977 }
1978
ivybridge_update_wm(struct drm_device * dev)1979 static void ivybridge_update_wm(struct drm_device *dev)
1980 {
1981 struct drm_i915_private *dev_priv = dev->dev_private;
1982 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1983 u32 val;
1984 int fbc_wm, plane_wm, cursor_wm;
1985 int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
1986 unsigned int enabled;
1987
1988 enabled = 0;
1989 if (g4x_compute_wm0(dev, PIPE_A,
1990 &sandybridge_display_wm_info, latency,
1991 &sandybridge_cursor_wm_info, latency,
1992 &plane_wm, &cursor_wm)) {
1993 val = I915_READ(WM0_PIPEA_ILK);
1994 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1995 I915_WRITE(WM0_PIPEA_ILK, val |
1996 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1997 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1998 " plane %d, " "cursor: %d\n",
1999 plane_wm, cursor_wm);
2000 enabled |= 1 << PIPE_A;
2001 }
2002
2003 if (g4x_compute_wm0(dev, PIPE_B,
2004 &sandybridge_display_wm_info, latency,
2005 &sandybridge_cursor_wm_info, latency,
2006 &plane_wm, &cursor_wm)) {
2007 val = I915_READ(WM0_PIPEB_ILK);
2008 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2009 I915_WRITE(WM0_PIPEB_ILK, val |
2010 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2011 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
2012 " plane %d, cursor: %d\n",
2013 plane_wm, cursor_wm);
2014 enabled |= 1 << PIPE_B;
2015 }
2016
2017 if (g4x_compute_wm0(dev, PIPE_C,
2018 &sandybridge_display_wm_info, latency,
2019 &sandybridge_cursor_wm_info, latency,
2020 &plane_wm, &cursor_wm)) {
2021 val = I915_READ(WM0_PIPEC_IVB);
2022 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2023 I915_WRITE(WM0_PIPEC_IVB, val |
2024 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2025 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
2026 " plane %d, cursor: %d\n",
2027 plane_wm, cursor_wm);
2028 enabled |= 1 << PIPE_C;
2029 }
2030
2031 /*
2032 * Calculate and update the self-refresh watermark only when one
2033 * display plane is used.
2034 *
2035 * SNB support 3 levels of watermark.
2036 *
2037 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
2038 * and disabled in the descending order
2039 *
2040 */
2041 I915_WRITE(WM3_LP_ILK, 0);
2042 I915_WRITE(WM2_LP_ILK, 0);
2043 I915_WRITE(WM1_LP_ILK, 0);
2044
2045 if (!single_plane_enabled(enabled) ||
2046 dev_priv->sprite_scaling_enabled)
2047 return;
2048 enabled = ffs(enabled) - 1;
2049
2050 /* WM1 */
2051 if (!ironlake_compute_srwm(dev, 1, enabled,
2052 SNB_READ_WM1_LATENCY() * 500,
2053 &sandybridge_display_srwm_info,
2054 &sandybridge_cursor_srwm_info,
2055 &fbc_wm, &plane_wm, &cursor_wm))
2056 return;
2057
2058 I915_WRITE(WM1_LP_ILK,
2059 WM1_LP_SR_EN |
2060 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
2061 (fbc_wm << WM1_LP_FBC_SHIFT) |
2062 (plane_wm << WM1_LP_SR_SHIFT) |
2063 cursor_wm);
2064
2065 /* WM2 */
2066 if (!ironlake_compute_srwm(dev, 2, enabled,
2067 SNB_READ_WM2_LATENCY() * 500,
2068 &sandybridge_display_srwm_info,
2069 &sandybridge_cursor_srwm_info,
2070 &fbc_wm, &plane_wm, &cursor_wm))
2071 return;
2072
2073 I915_WRITE(WM2_LP_ILK,
2074 WM2_LP_EN |
2075 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
2076 (fbc_wm << WM1_LP_FBC_SHIFT) |
2077 (plane_wm << WM1_LP_SR_SHIFT) |
2078 cursor_wm);
2079
2080 /* WM3, note we have to correct the cursor latency */
2081 if (!ironlake_compute_srwm(dev, 3, enabled,
2082 SNB_READ_WM3_LATENCY() * 500,
2083 &sandybridge_display_srwm_info,
2084 &sandybridge_cursor_srwm_info,
2085 &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
2086 !ironlake_compute_srwm(dev, 3, enabled,
2087 2 * SNB_READ_WM3_LATENCY() * 500,
2088 &sandybridge_display_srwm_info,
2089 &sandybridge_cursor_srwm_info,
2090 &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
2091 return;
2092
2093 I915_WRITE(WM3_LP_ILK,
2094 WM3_LP_EN |
2095 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
2096 (fbc_wm << WM1_LP_FBC_SHIFT) |
2097 (plane_wm << WM1_LP_SR_SHIFT) |
2098 cursor_wm);
2099 }
2100
hsw_wm_get_pixel_rate(struct drm_device * dev,struct drm_crtc * crtc)2101 static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev,
2102 struct drm_crtc *crtc)
2103 {
2104 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2105 uint32_t pixel_rate, pfit_size;
2106
2107 pixel_rate = intel_crtc->config.adjusted_mode.clock;
2108
2109 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
2110 * adjust the pixel_rate here. */
2111
2112 pfit_size = intel_crtc->config.pch_pfit.size;
2113 if (pfit_size) {
2114 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
2115
2116 pipe_w = intel_crtc->config.requested_mode.hdisplay;
2117 pipe_h = intel_crtc->config.requested_mode.vdisplay;
2118 pfit_w = (pfit_size >> 16) & 0xFFFF;
2119 pfit_h = pfit_size & 0xFFFF;
2120 if (pipe_w < pfit_w)
2121 pipe_w = pfit_w;
2122 if (pipe_h < pfit_h)
2123 pipe_h = pfit_h;
2124
2125 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
2126 pfit_w * pfit_h);
2127 }
2128
2129 return pixel_rate;
2130 }
2131
hsw_wm_method1(uint32_t pixel_rate,uint8_t bytes_per_pixel,uint32_t latency)2132 static uint32_t hsw_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
2133 uint32_t latency)
2134 {
2135 uint64_t ret;
2136
2137 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
2138 ret = POS_DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
2139
2140 return (uint32_t)ret;
2141 }
2142
hsw_wm_method2(uint32_t pixel_rate,uint32_t pipe_htotal,uint32_t horiz_pixels,uint8_t bytes_per_pixel,uint32_t latency)2143 static uint32_t hsw_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
2144 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
2145 uint32_t latency)
2146 {
2147 uint32_t ret;
2148
2149 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
2150 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
2151 ret = DIV_ROUND_UP(ret, 64) + 2;
2152 return ret;
2153 }
2154
hsw_wm_fbc(uint32_t pri_val,uint32_t horiz_pixels,uint8_t bytes_per_pixel)2155 static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
2156 uint8_t bytes_per_pixel)
2157 {
2158 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
2159 }
2160
2161 struct hsw_pipe_wm_parameters {
2162 bool active;
2163 bool sprite_enabled;
2164 uint8_t pri_bytes_per_pixel;
2165 uint8_t spr_bytes_per_pixel;
2166 uint8_t cur_bytes_per_pixel;
2167 uint32_t pri_horiz_pixels;
2168 uint32_t spr_horiz_pixels;
2169 uint32_t cur_horiz_pixels;
2170 uint32_t pipe_htotal;
2171 uint32_t pixel_rate;
2172 };
2173
2174 struct hsw_wm_maximums {
2175 uint16_t pri;
2176 uint16_t spr;
2177 uint16_t cur;
2178 uint16_t fbc;
2179 };
2180
2181 struct hsw_lp_wm_result {
2182 bool enable;
2183 bool fbc_enable;
2184 uint32_t pri_val;
2185 uint32_t spr_val;
2186 uint32_t cur_val;
2187 uint32_t fbc_val;
2188 };
2189
2190 struct hsw_wm_values {
2191 uint32_t wm_pipe[3];
2192 uint32_t wm_lp[3];
2193 uint32_t wm_lp_spr[3];
2194 uint32_t wm_linetime[3];
2195 bool enable_fbc_wm;
2196 };
2197
2198 enum hsw_data_buf_partitioning {
2199 HSW_DATA_BUF_PART_1_2,
2200 HSW_DATA_BUF_PART_5_6,
2201 };
2202
2203 /* For both WM_PIPE and WM_LP. */
hsw_compute_pri_wm(struct hsw_pipe_wm_parameters * params,uint32_t mem_value,bool is_lp)2204 static uint32_t hsw_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
2205 uint32_t mem_value,
2206 bool is_lp)
2207 {
2208 uint32_t method1, method2;
2209
2210 /* TODO: for now, assume the primary plane is always enabled. */
2211 if (!params->active)
2212 return 0;
2213
2214 method1 = hsw_wm_method1(params->pixel_rate,
2215 params->pri_bytes_per_pixel,
2216 mem_value);
2217
2218 if (!is_lp)
2219 return method1;
2220
2221 method2 = hsw_wm_method2(params->pixel_rate,
2222 params->pipe_htotal,
2223 params->pri_horiz_pixels,
2224 params->pri_bytes_per_pixel,
2225 mem_value);
2226
2227 return min(method1, method2);
2228 }
2229
2230 /* For both WM_PIPE and WM_LP. */
hsw_compute_spr_wm(struct hsw_pipe_wm_parameters * params,uint32_t mem_value)2231 static uint32_t hsw_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
2232 uint32_t mem_value)
2233 {
2234 uint32_t method1, method2;
2235
2236 if (!params->active || !params->sprite_enabled)
2237 return 0;
2238
2239 method1 = hsw_wm_method1(params->pixel_rate,
2240 params->spr_bytes_per_pixel,
2241 mem_value);
2242 method2 = hsw_wm_method2(params->pixel_rate,
2243 params->pipe_htotal,
2244 params->spr_horiz_pixels,
2245 params->spr_bytes_per_pixel,
2246 mem_value);
2247 return min(method1, method2);
2248 }
2249
2250 /* For both WM_PIPE and WM_LP. */
hsw_compute_cur_wm(struct hsw_pipe_wm_parameters * params,uint32_t mem_value)2251 static uint32_t hsw_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
2252 uint32_t mem_value)
2253 {
2254 if (!params->active)
2255 return 0;
2256
2257 return hsw_wm_method2(params->pixel_rate,
2258 params->pipe_htotal,
2259 params->cur_horiz_pixels,
2260 params->cur_bytes_per_pixel,
2261 mem_value);
2262 }
2263
2264 /* Only for WM_LP. */
hsw_compute_fbc_wm(struct hsw_pipe_wm_parameters * params,uint32_t pri_val,uint32_t mem_value)2265 static uint32_t hsw_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
2266 uint32_t pri_val,
2267 uint32_t mem_value)
2268 {
2269 if (!params->active)
2270 return 0;
2271
2272 return hsw_wm_fbc(pri_val,
2273 params->pri_horiz_pixels,
2274 params->pri_bytes_per_pixel);
2275 }
2276
hsw_compute_lp_wm(uint32_t mem_value,struct hsw_wm_maximums * max,struct hsw_pipe_wm_parameters * params,struct hsw_lp_wm_result * result)2277 static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max,
2278 struct hsw_pipe_wm_parameters *params,
2279 struct hsw_lp_wm_result *result)
2280 {
2281 enum pipe pipe;
2282 uint32_t pri_val[3], spr_val[3], cur_val[3], fbc_val[3];
2283
2284 for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) {
2285 struct hsw_pipe_wm_parameters *p = ¶ms[pipe];
2286
2287 pri_val[pipe] = hsw_compute_pri_wm(p, mem_value, true);
2288 spr_val[pipe] = hsw_compute_spr_wm(p, mem_value);
2289 cur_val[pipe] = hsw_compute_cur_wm(p, mem_value);
2290 fbc_val[pipe] = hsw_compute_fbc_wm(p, pri_val[pipe], mem_value);
2291 }
2292
2293 result->pri_val = max(max(pri_val[0], pri_val[1]), pri_val[2]);
2294 result->spr_val = max(max(spr_val[0], spr_val[1]), spr_val[2]);
2295 result->cur_val = max(max(cur_val[0], cur_val[1]), cur_val[2]);
2296 result->fbc_val = max(max(fbc_val[0], fbc_val[1]), fbc_val[2]);
2297
2298 if (result->fbc_val > max->fbc) {
2299 result->fbc_enable = false;
2300 result->fbc_val = 0;
2301 } else {
2302 result->fbc_enable = true;
2303 }
2304
2305 result->enable = result->pri_val <= max->pri &&
2306 result->spr_val <= max->spr &&
2307 result->cur_val <= max->cur;
2308 return result->enable;
2309 }
2310
hsw_compute_wm_pipe(struct drm_i915_private * dev_priv,uint32_t mem_value,enum pipe pipe,struct hsw_pipe_wm_parameters * params)2311 static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
2312 uint32_t mem_value, enum pipe pipe,
2313 struct hsw_pipe_wm_parameters *params)
2314 {
2315 uint32_t pri_val, cur_val, spr_val;
2316
2317 pri_val = hsw_compute_pri_wm(params, mem_value, false);
2318 spr_val = hsw_compute_spr_wm(params, mem_value);
2319 cur_val = hsw_compute_cur_wm(params, mem_value);
2320
2321 if(pri_val > 127)
2322 DRM_ERROR("Primary WM error, mode not supported for pipe %c\n",
2323 pipe_name(pipe));
2324 if(spr_val > 127)
2325 DRM_ERROR("Sprite WM error, mode not supported for pipe %c\n",
2326 pipe_name(pipe));
2327 if(cur_val > 63)
2328 DRM_ERROR("Cursor WM error, mode not supported for pipe %c\n",
2329 pipe_name(pipe));
2330
2331 return (pri_val << WM0_PIPE_PLANE_SHIFT) |
2332 (spr_val << WM0_PIPE_SPRITE_SHIFT) |
2333 cur_val;
2334 }
2335
2336 static uint32_t
hsw_compute_linetime_wm(struct drm_device * dev,struct drm_crtc * crtc)2337 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2338 {
2339 struct drm_i915_private *dev_priv = dev->dev_private;
2340 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2341 struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
2342 u32 linetime, ips_linetime;
2343
2344 if (!intel_crtc_active(crtc))
2345 return 0;
2346
2347 /* The WM are computed with base on how long it takes to fill a single
2348 * row at the given clock rate, multiplied by 8.
2349 * */
2350 linetime = POS_DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock);
2351 ips_linetime = POS_DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8,
2352 intel_ddi_get_cdclk_freq(dev_priv));
2353
2354 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2355 PIPE_WM_LINETIME_TIME(linetime);
2356 }
2357
hsw_compute_wm_parameters(struct drm_device * dev,struct hsw_pipe_wm_parameters * params,uint32_t * wm,struct hsw_wm_maximums * lp_max_1_2,struct hsw_wm_maximums * lp_max_5_6)2358 static void hsw_compute_wm_parameters(struct drm_device *dev,
2359 struct hsw_pipe_wm_parameters *params,
2360 uint32_t *wm,
2361 struct hsw_wm_maximums *lp_max_1_2,
2362 struct hsw_wm_maximums *lp_max_5_6)
2363 {
2364 struct drm_i915_private *dev_priv = dev->dev_private;
2365 struct drm_crtc *crtc;
2366 struct drm_plane *plane;
2367 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2368 enum pipe pipe;
2369 int pipes_active = 0, sprites_enabled = 0;
2370
2371 if ((sskpd >> 56) & 0xFF)
2372 wm[0] = (sskpd >> 56) & 0xFF;
2373 else
2374 wm[0] = sskpd & 0xF;
2375 wm[1] = ((sskpd >> 4) & 0xFF) * 5;
2376 wm[2] = ((sskpd >> 12) & 0xFF) * 5;
2377 wm[3] = ((sskpd >> 20) & 0x1FF) * 5;
2378 wm[4] = ((sskpd >> 32) & 0x1FF) * 5;
2379
2380 list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
2381 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2382 struct hsw_pipe_wm_parameters *p;
2383
2384 pipe = intel_crtc->pipe;
2385 p = ¶ms[pipe];
2386
2387 p->active = intel_crtc_active(crtc);
2388 if (!p->active)
2389 continue;
2390
2391 pipes_active++;
2392
2393 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
2394 p->pixel_rate = hsw_wm_get_pixel_rate(dev, crtc);
2395 p->pri_bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2396 p->cur_bytes_per_pixel = 4;
2397 p->pri_horiz_pixels =
2398 intel_crtc->config.requested_mode.hdisplay;
2399 p->cur_horiz_pixels = 64;
2400 }
2401
2402 list_for_each_entry(plane, struct drm_plane, &dev->mode_config.plane_list, head) {
2403 struct intel_plane *intel_plane = to_intel_plane(plane);
2404 struct hsw_pipe_wm_parameters *p;
2405
2406 pipe = intel_plane->pipe;
2407 p = ¶ms[pipe];
2408
2409 p->sprite_enabled = intel_plane->wm.enable;
2410 p->spr_bytes_per_pixel = intel_plane->wm.bytes_per_pixel;
2411 p->spr_horiz_pixels = intel_plane->wm.horiz_pixels;
2412
2413 if (p->sprite_enabled)
2414 sprites_enabled++;
2415 }
2416
2417 if (pipes_active > 1) {
2418 lp_max_1_2->pri = lp_max_5_6->pri = sprites_enabled ? 128 : 256;
2419 lp_max_1_2->spr = lp_max_5_6->spr = 128;
2420 lp_max_1_2->cur = lp_max_5_6->cur = 64;
2421 } else {
2422 lp_max_1_2->pri = sprites_enabled ? 384 : 768;
2423 lp_max_5_6->pri = sprites_enabled ? 128 : 768;
2424 lp_max_1_2->spr = 384;
2425 lp_max_5_6->spr = 640;
2426 lp_max_1_2->cur = lp_max_5_6->cur = 255;
2427 }
2428 lp_max_1_2->fbc = lp_max_5_6->fbc = 15;
2429 }
2430
hsw_compute_wm_results(struct drm_device * dev,struct hsw_pipe_wm_parameters * params,uint32_t * wm,struct hsw_wm_maximums * lp_maximums,struct hsw_wm_values * results)2431 static void hsw_compute_wm_results(struct drm_device *dev,
2432 struct hsw_pipe_wm_parameters *params,
2433 uint32_t *wm,
2434 struct hsw_wm_maximums *lp_maximums,
2435 struct hsw_wm_values *results)
2436 {
2437 struct drm_i915_private *dev_priv = dev->dev_private;
2438 struct drm_crtc *crtc;
2439 struct hsw_lp_wm_result lp_results[4];
2440 enum pipe pipe;
2441 int level, max_level, wm_lp;
2442
2443 for (level = 1; level <= 4; level++)
2444 if (!hsw_compute_lp_wm(wm[level], lp_maximums, params,
2445 &lp_results[level - 1]))
2446 break;
2447 max_level = level - 1;
2448
2449 /* The spec says it is preferred to disable FBC WMs instead of disabling
2450 * a WM level. */
2451 results->enable_fbc_wm = true;
2452 for (level = 1; level <= max_level; level++) {
2453 if (!lp_results[level - 1].fbc_enable) {
2454 results->enable_fbc_wm = false;
2455 break;
2456 }
2457 }
2458
2459 memset(results, 0, sizeof(*results));
2460 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2461 const struct hsw_lp_wm_result *r;
2462
2463 level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
2464 if (level > max_level)
2465 break;
2466
2467 r = &lp_results[level - 1];
2468 results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
2469 r->fbc_val,
2470 r->pri_val,
2471 r->cur_val);
2472 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2473 }
2474
2475 for_each_pipe(pipe)
2476 results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, wm[0],
2477 pipe,
2478 ¶ms[pipe]);
2479
2480 for_each_pipe(pipe) {
2481 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2482 results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc);
2483 }
2484 }
2485
2486 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2487 * case both are at the same level. Prefer r1 in case they're the same. */
hsw_find_best_result(struct hsw_wm_values * r1,struct hsw_wm_values * r2)2488 struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
2489 struct hsw_wm_values *r2)
2490 {
2491 int i, val_r1 = 0, val_r2 = 0;
2492
2493 for (i = 0; i < 3; i++) {
2494 if (r1->wm_lp[i] & WM3_LP_EN)
2495 val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
2496 if (r2->wm_lp[i] & WM3_LP_EN)
2497 val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
2498 }
2499
2500 if (val_r1 == val_r2) {
2501 if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
2502 return r2;
2503 else
2504 return r1;
2505 } else if (val_r1 > val_r2) {
2506 return r1;
2507 } else {
2508 return r2;
2509 }
2510 }
2511
2512 /*
2513 * The spec says we shouldn't write when we don't need, because every write
2514 * causes WMs to be re-evaluated, expending some power.
2515 */
hsw_write_wm_values(struct drm_i915_private * dev_priv,struct hsw_wm_values * results,enum hsw_data_buf_partitioning partitioning)2516 static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2517 struct hsw_wm_values *results,
2518 enum hsw_data_buf_partitioning partitioning)
2519 {
2520 struct hsw_wm_values previous;
2521 uint32_t val;
2522 enum hsw_data_buf_partitioning prev_partitioning;
2523 bool prev_enable_fbc_wm;
2524
2525 previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
2526 previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
2527 previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
2528 previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
2529 previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
2530 previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
2531 previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2532 previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2533 previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2534 previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
2535 previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
2536 previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
2537
2538 prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2539 HSW_DATA_BUF_PART_5_6 : HSW_DATA_BUF_PART_1_2;
2540
2541 prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2542
2543 if (memcmp(results->wm_pipe, previous.wm_pipe,
2544 sizeof(results->wm_pipe)) == 0 &&
2545 memcmp(results->wm_lp, previous.wm_lp,
2546 sizeof(results->wm_lp)) == 0 &&
2547 memcmp(results->wm_lp_spr, previous.wm_lp_spr,
2548 sizeof(results->wm_lp_spr)) == 0 &&
2549 memcmp(results->wm_linetime, previous.wm_linetime,
2550 sizeof(results->wm_linetime)) == 0 &&
2551 partitioning == prev_partitioning &&
2552 results->enable_fbc_wm == prev_enable_fbc_wm)
2553 return;
2554
2555 if (previous.wm_lp[2] != 0)
2556 I915_WRITE(WM3_LP_ILK, 0);
2557 if (previous.wm_lp[1] != 0)
2558 I915_WRITE(WM2_LP_ILK, 0);
2559 if (previous.wm_lp[0] != 0)
2560 I915_WRITE(WM1_LP_ILK, 0);
2561
2562 if (previous.wm_pipe[0] != results->wm_pipe[0])
2563 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2564 if (previous.wm_pipe[1] != results->wm_pipe[1])
2565 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2566 if (previous.wm_pipe[2] != results->wm_pipe[2])
2567 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2568
2569 if (previous.wm_linetime[0] != results->wm_linetime[0])
2570 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2571 if (previous.wm_linetime[1] != results->wm_linetime[1])
2572 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2573 if (previous.wm_linetime[2] != results->wm_linetime[2])
2574 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2575
2576 if (prev_partitioning != partitioning) {
2577 val = I915_READ(WM_MISC);
2578 if (partitioning == HSW_DATA_BUF_PART_1_2)
2579 val &= ~WM_MISC_DATA_PARTITION_5_6;
2580 else
2581 val |= WM_MISC_DATA_PARTITION_5_6;
2582 I915_WRITE(WM_MISC, val);
2583 }
2584
2585 if (prev_enable_fbc_wm != results->enable_fbc_wm) {
2586 val = I915_READ(DISP_ARB_CTL);
2587 if (results->enable_fbc_wm)
2588 val &= ~DISP_FBC_WM_DIS;
2589 else
2590 val |= DISP_FBC_WM_DIS;
2591 I915_WRITE(DISP_ARB_CTL, val);
2592 }
2593
2594 if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
2595 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2596 if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
2597 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2598 if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
2599 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2600
2601 if (results->wm_lp[0] != 0)
2602 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2603 if (results->wm_lp[1] != 0)
2604 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2605 if (results->wm_lp[2] != 0)
2606 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2607 }
2608
haswell_update_wm(struct drm_device * dev)2609 static void haswell_update_wm(struct drm_device *dev)
2610 {
2611 struct drm_i915_private *dev_priv = dev->dev_private;
2612 struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
2613 struct hsw_pipe_wm_parameters params[3];
2614 struct hsw_wm_values results_1_2, results_5_6, *best_results;
2615 uint32_t wm[5];
2616 enum hsw_data_buf_partitioning partitioning;
2617
2618 hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2, &lp_max_5_6);
2619
2620 hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results_1_2);
2621 if (lp_max_1_2.pri != lp_max_5_6.pri) {
2622 hsw_compute_wm_results(dev, params, wm, &lp_max_5_6,
2623 &results_5_6);
2624 best_results = hsw_find_best_result(&results_1_2, &results_5_6);
2625 } else {
2626 best_results = &results_1_2;
2627 }
2628
2629 partitioning = (best_results == &results_1_2) ?
2630 HSW_DATA_BUF_PART_1_2 : HSW_DATA_BUF_PART_5_6;
2631
2632 hsw_write_wm_values(dev_priv, best_results, partitioning);
2633 }
2634
haswell_update_sprite_wm(struct drm_device * dev,int pipe,uint32_t sprite_width,int pixel_size,bool enable)2635 static void haswell_update_sprite_wm(struct drm_device *dev, int pipe,
2636 uint32_t sprite_width, int pixel_size,
2637 bool enable)
2638 {
2639 struct drm_plane *plane;
2640
2641 list_for_each_entry(plane, struct drm_plane, &dev->mode_config.plane_list, head) {
2642 struct intel_plane *intel_plane = to_intel_plane(plane);
2643
2644 if (intel_plane->pipe == pipe) {
2645 intel_plane->wm.enable = enable;
2646 intel_plane->wm.horiz_pixels = sprite_width + 1;
2647 intel_plane->wm.bytes_per_pixel = (uint8_t)pixel_size;
2648 break;
2649 }
2650 }
2651
2652 haswell_update_wm(dev);
2653 }
2654
2655 static bool
sandybridge_compute_sprite_wm(struct drm_device * dev,int plane,uint32_t sprite_width,int pixel_size,const struct intel_watermark_params * display,int display_latency_ns,int * sprite_wm)2656 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
2657 uint32_t sprite_width, int pixel_size,
2658 const struct intel_watermark_params *display,
2659 int display_latency_ns, int *sprite_wm)
2660 {
2661 struct drm_crtc *crtc;
2662 int clock;
2663 int entries, tlb_miss;
2664
2665 crtc = intel_get_crtc_for_plane(dev, plane);
2666 if (!intel_crtc_active(crtc)) {
2667 *sprite_wm = display->guard_size;
2668 return false;
2669 }
2670
2671 clock = crtc->mode.clock;
2672
2673 /* Use the small buffer method to calculate the sprite watermark */
2674 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
2675 tlb_miss = display->fifo_size*display->cacheline_size -
2676 sprite_width * 8;
2677 if (tlb_miss > 0)
2678 entries += tlb_miss;
2679 entries = DIV_ROUND_UP(entries, display->cacheline_size);
2680 *sprite_wm = entries + display->guard_size;
2681 if (*sprite_wm > (int)display->max_wm)
2682 *sprite_wm = display->max_wm;
2683
2684 return true;
2685 }
2686
2687 static bool
sandybridge_compute_sprite_srwm(struct drm_device * dev,int plane,uint32_t sprite_width,int pixel_size,const struct intel_watermark_params * display,int latency_ns,int * sprite_wm)2688 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
2689 uint32_t sprite_width, int pixel_size,
2690 const struct intel_watermark_params *display,
2691 int latency_ns, int *sprite_wm)
2692 {
2693 struct drm_crtc *crtc;
2694 unsigned long line_time_us;
2695 int clock;
2696 int line_count, line_size;
2697 int small, large;
2698 int entries;
2699
2700 if (!latency_ns) {
2701 *sprite_wm = 0;
2702 return false;
2703 }
2704
2705 crtc = intel_get_crtc_for_plane(dev, plane);
2706 clock = crtc->mode.clock;
2707 if (!clock) {
2708 *sprite_wm = 0;
2709 return false;
2710 }
2711
2712 line_time_us = (sprite_width * 1000) / clock;
2713 if (!line_time_us) {
2714 *sprite_wm = 0;
2715 return false;
2716 }
2717
2718 line_count = (latency_ns / line_time_us + 1000) / 1000;
2719 line_size = sprite_width * pixel_size;
2720
2721 /* Use the minimum of the small and large buffer method for primary */
2722 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
2723 large = line_count * line_size;
2724
2725 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
2726 *sprite_wm = entries + display->guard_size;
2727
2728 return *sprite_wm > 0x3ff ? false : true;
2729 }
2730
sandybridge_update_sprite_wm(struct drm_device * dev,int pipe,uint32_t sprite_width,int pixel_size,bool enable)2731 static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2732 uint32_t sprite_width, int pixel_size,
2733 bool enable)
2734 {
2735 struct drm_i915_private *dev_priv = dev->dev_private;
2736 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
2737 u32 val;
2738 int sprite_wm, reg;
2739 int ret;
2740
2741 if (!enable)
2742 return;
2743
2744 switch (pipe) {
2745 case 0:
2746 reg = WM0_PIPEA_ILK;
2747 break;
2748 case 1:
2749 reg = WM0_PIPEB_ILK;
2750 break;
2751 case 2:
2752 reg = WM0_PIPEC_IVB;
2753 break;
2754 default:
2755 return; /* bad pipe */
2756 }
2757
2758 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
2759 &sandybridge_display_wm_info,
2760 latency, &sprite_wm);
2761 if (!ret) {
2762 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
2763 pipe_name(pipe));
2764 return;
2765 }
2766
2767 val = I915_READ(reg);
2768 val &= ~WM0_PIPE_SPRITE_MASK;
2769 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
2770 DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
2771
2772
2773 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2774 pixel_size,
2775 &sandybridge_display_srwm_info,
2776 SNB_READ_WM1_LATENCY() * 500,
2777 &sprite_wm);
2778 if (!ret) {
2779 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
2780 pipe_name(pipe));
2781 return;
2782 }
2783 I915_WRITE(WM1S_LP_ILK, sprite_wm);
2784
2785 /* Only IVB has two more LP watermarks for sprite */
2786 if (!IS_IVYBRIDGE(dev))
2787 return;
2788
2789 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2790 pixel_size,
2791 &sandybridge_display_srwm_info,
2792 SNB_READ_WM2_LATENCY() * 500,
2793 &sprite_wm);
2794 if (!ret) {
2795 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
2796 pipe_name(pipe));
2797 return;
2798 }
2799 I915_WRITE(WM2S_LP_IVB, sprite_wm);
2800
2801 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2802 pixel_size,
2803 &sandybridge_display_srwm_info,
2804 SNB_READ_WM3_LATENCY() * 500,
2805 &sprite_wm);
2806 if (!ret) {
2807 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
2808 pipe_name(pipe));
2809 return;
2810 }
2811 I915_WRITE(WM3S_LP_IVB, sprite_wm);
2812 }
2813
2814 /**
2815 * intel_update_watermarks - update FIFO watermark values based on current modes
2816 *
2817 * Calculate watermark values for the various WM regs based on current mode
2818 * and plane configuration.
2819 *
2820 * There are several cases to deal with here:
2821 * - normal (i.e. non-self-refresh)
2822 * - self-refresh (SR) mode
2823 * - lines are large relative to FIFO size (buffer can hold up to 2)
2824 * - lines are small relative to FIFO size (buffer can hold more than 2
2825 * lines), so need to account for TLB latency
2826 *
2827 * The normal calculation is:
2828 * watermark = dotclock * bytes per pixel * latency
2829 * where latency is platform & configuration dependent (we assume pessimal
2830 * values here).
2831 *
2832 * The SR calculation is:
2833 * watermark = (trunc(latency/line time)+1) * surface width *
2834 * bytes per pixel
2835 * where
2836 * line time = htotal / dotclock
2837 * surface width = hdisplay for normal plane and 64 for cursor
2838 * and latency is assumed to be high, as above.
2839 *
2840 * The final value programmed to the register should always be rounded up,
2841 * and include an extra 2 entries to account for clock crossings.
2842 *
2843 * We don't use the sprite, so we can ignore that. And on Crestline we have
2844 * to set the non-SR watermarks to 8.
2845 */
intel_update_watermarks(struct drm_device * dev)2846 void intel_update_watermarks(struct drm_device *dev)
2847 {
2848 struct drm_i915_private *dev_priv = dev->dev_private;
2849
2850 if (dev_priv->display.update_wm)
2851 dev_priv->display.update_wm(dev);
2852 }
2853
intel_update_sprite_watermarks(struct drm_device * dev,int pipe,uint32_t sprite_width,int pixel_size,bool enable)2854 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
2855 uint32_t sprite_width, int pixel_size,
2856 bool enable)
2857 {
2858 struct drm_i915_private *dev_priv = dev->dev_private;
2859
2860 if (dev_priv->display.update_sprite_wm)
2861 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
2862 pixel_size, enable);
2863 }
2864
2865 static struct drm_i915_gem_object *
intel_alloc_context_page(struct drm_device * dev)2866 intel_alloc_context_page(struct drm_device *dev)
2867 {
2868 struct drm_i915_gem_object *ctx;
2869 int ret;
2870
2871 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2872
2873 ctx = i915_gem_alloc_object(dev, 4096);
2874 if (!ctx) {
2875 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2876 return NULL;
2877 }
2878
2879 ret = i915_gem_object_pin(ctx, 4096, true, false);
2880 if (ret) {
2881 DRM_ERROR("failed to pin power context: %d\n", ret);
2882 goto err_unref;
2883 }
2884
2885 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2886 if (ret) {
2887 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2888 goto err_unpin;
2889 }
2890
2891 return ctx;
2892
2893 err_unpin:
2894 i915_gem_object_unpin(ctx);
2895 err_unref:
2896 drm_gem_object_unreference(&ctx->base);
2897 return NULL;
2898 }
2899
2900 /**
2901 * Lock protecting IPS related data structures
2902 */
2903 spinlock_t mchdev_lock;
2904
2905 /* Global for IPS driver to get at the current i915 device. Protected by
2906 * mchdev_lock. */
2907 static struct drm_i915_private *i915_mch_dev;
2908
ironlake_set_drps(struct drm_device * dev,u8 val)2909 bool ironlake_set_drps(struct drm_device *dev, u8 val)
2910 {
2911 struct drm_i915_private *dev_priv = dev->dev_private;
2912 u16 rgvswctl;
2913
2914 ASSERT(MUTEX_HELD(&mchdev_lock));
2915
2916 rgvswctl = I915_READ16(MEMSWCTL);
2917 if (rgvswctl & MEMCTL_CMD_STS) {
2918 DRM_DEBUG("gpu busy, RCS change rejected\n");
2919 return false; /* still busy with another command */
2920 }
2921
2922 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2923 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2924 I915_WRITE16(MEMSWCTL, rgvswctl);
2925 POSTING_READ16(MEMSWCTL);
2926
2927 rgvswctl |= MEMCTL_CMD_STS;
2928 I915_WRITE16(MEMSWCTL, rgvswctl);
2929
2930 return true;
2931 }
2932
ironlake_enable_drps(struct drm_device * dev)2933 static void ironlake_enable_drps(struct drm_device *dev)
2934 {
2935 struct drm_i915_private *dev_priv = dev->dev_private;
2936 u32 rgvmodectl = I915_READ(MEMMODECTL);
2937 u8 fmax, fmin, fstart, vstart;
2938
2939 spin_lock_irq(&mchdev_lock);
2940
2941 /* Enable temp reporting */
2942 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2943 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2944
2945 /* 100ms RC evaluation intervals */
2946 I915_WRITE(RCUPEI, 100000);
2947 I915_WRITE(RCDNEI, 100000);
2948
2949 /* Set max/min thresholds to 90ms and 80ms respectively */
2950 I915_WRITE(RCBMAXAVG, 90000);
2951 I915_WRITE(RCBMINAVG, 80000);
2952
2953 I915_WRITE(MEMIHYST, 1);
2954
2955 /* Set up min, max, and cur for interrupt handling */
2956 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2957 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2958 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2959 MEMMODE_FSTART_SHIFT;
2960
2961 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2962 PXVFREQ_PX_SHIFT;
2963
2964 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
2965 dev_priv->ips.fstart = fstart;
2966
2967 dev_priv->ips.max_delay = fstart;
2968 dev_priv->ips.min_delay = fmin;
2969 dev_priv->ips.cur_delay = fstart;
2970
2971 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2972 fmax, fmin, fstart);
2973
2974 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2975
2976 /*
2977 * Interrupts will be enabled in ironlake_irq_postinstall
2978 */
2979
2980 I915_WRITE(VIDSTART, vstart);
2981 POSTING_READ(VIDSTART);
2982
2983 rgvmodectl |= MEMMODE_SWMODE_EN;
2984 I915_WRITE(MEMMODECTL, rgvmodectl);
2985
2986 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2987 DRM_ERROR("stuck trying to change perf mode\n");
2988 msleep(1);
2989
2990 (void) ironlake_set_drps(dev, fstart);
2991
2992 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2993 I915_READ(0x112e0);
2994 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
2995 dev_priv->ips.last_count2 = I915_READ(0x112f4);
2996 dev_priv->ips.last_time2 = jiffies;
2997
2998 spin_unlock_irq(&mchdev_lock);
2999 }
3000
ironlake_disable_drps(struct drm_device * dev)3001 static void ironlake_disable_drps(struct drm_device *dev)
3002 {
3003 struct drm_i915_private *dev_priv = dev->dev_private;
3004 u16 rgvswctl;
3005
3006 spin_lock_irq(&mchdev_lock);
3007
3008 rgvswctl = I915_READ16(MEMSWCTL);
3009
3010 /* Ack interrupts, disable EFC interrupt */
3011 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
3012 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
3013 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
3014 I915_WRITE(DEIIR, DE_PCU_EVENT);
3015 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
3016
3017 /* Go back to the starting frequency */
3018 (void) ironlake_set_drps(dev, dev_priv->ips.fstart);
3019 msleep(1);
3020 rgvswctl |= MEMCTL_CMD_STS;
3021 I915_WRITE(MEMSWCTL, rgvswctl);
3022 msleep(1);
3023
3024 spin_unlock_irq(&mchdev_lock);
3025 }
3026
3027 /* There's a funny hw issue where the hw returns all 0 when reading from
3028 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
3029 * ourselves, instead of doing a rmw cycle (which might result in us clearing
3030 * all limits and the gpu stuck at whatever frequency it is at atm).
3031 */
gen6_rps_limits(struct drm_i915_private * dev_priv,u8 * val)3032 static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
3033 {
3034 u32 limits;
3035
3036 limits = 0;
3037
3038 if (*val >= dev_priv->rps.max_delay)
3039 *val = dev_priv->rps.max_delay;
3040 limits |= dev_priv->rps.max_delay << 24;
3041
3042 /* Only set the down limit when we've reached the lowest level to avoid
3043 * getting more interrupts, otherwise leave this clear. This prevents a
3044 * race in the hw when coming out of rc6: There's a tiny window where
3045 * the hw runs at the minimal clock before selecting the desired
3046 * frequency, if the down threshold expires in that window we will not
3047 * receive a down interrupt. */
3048 if (*val <= dev_priv->rps.min_delay) {
3049 *val = dev_priv->rps.min_delay;
3050 limits |= dev_priv->rps.min_delay << 16;
3051 }
3052
3053 return limits;
3054 }
3055
gen6_set_rps(struct drm_device * dev,u8 val)3056 void gen6_set_rps(struct drm_device *dev, u8 val)
3057 {
3058 struct drm_i915_private *dev_priv = dev->dev_private;
3059 u32 limits = gen6_rps_limits(dev_priv, &val);
3060
3061 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3062 WARN_ON(val > dev_priv->rps.max_delay);
3063 WARN_ON(val < dev_priv->rps.min_delay);
3064
3065 if (val == dev_priv->rps.cur_delay)
3066 return;
3067
3068 if (IS_HASWELL(dev))
3069 I915_WRITE(GEN6_RPNSWREQ,
3070 HSW_FREQUENCY(val));
3071 else
3072 I915_WRITE(GEN6_RPNSWREQ,
3073 GEN6_FREQUENCY(val) |
3074 GEN6_OFFSET(0) |
3075 GEN6_AGGRESSIVE_TURBO);
3076
3077 /* Make sure we continue to get interrupts
3078 * until we hit the minimum or maximum frequencies.
3079 */
3080 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
3081
3082 POSTING_READ(GEN6_RPNSWREQ);
3083
3084 dev_priv->rps.cur_delay = (u8)val;
3085 }
3086
3087 /*
3088 * Wait until the previous freq change has completed,
3089 * or the timeout elapsed, and then update our notion
3090 * of the current GPU frequency.
3091 */
vlv_update_rps_cur_delay(struct drm_i915_private * dev_priv)3092 static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
3093 {
3094 unsigned long timeout = jiffies + msecs_to_jiffies(10);
3095 u32 pval;
3096
3097 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3098
3099 do {
3100 pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3101 if (time_after(jiffies, timeout)) {
3102 DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
3103 break;
3104 }
3105 udelay(10);
3106 } while (pval & 1);
3107
3108 pval >>= 8;
3109
3110 if (pval != dev_priv->rps.cur_delay)
3111 DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
3112 vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
3113 dev_priv->rps.cur_delay,
3114 vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
3115
3116 dev_priv->rps.cur_delay = (u8)pval;
3117 }
3118
valleyview_set_rps(struct drm_device * dev,u8 val)3119 void valleyview_set_rps(struct drm_device *dev, u8 val)
3120 {
3121 struct drm_i915_private *dev_priv = dev->dev_private;
3122
3123 gen6_rps_limits(dev_priv, &val);
3124
3125 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3126 WARN_ON(val > dev_priv->rps.max_delay);
3127 WARN_ON(val < dev_priv->rps.min_delay);
3128
3129 vlv_update_rps_cur_delay(dev_priv);
3130
3131 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3132 vlv_gpu_freq(dev_priv->mem_freq,
3133 dev_priv->rps.cur_delay),
3134 dev_priv->rps.cur_delay,
3135 vlv_gpu_freq(dev_priv->mem_freq, val), val);
3136
3137 if (val == dev_priv->rps.cur_delay)
3138 return;
3139
3140 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3141
3142 dev_priv->rps.cur_delay = val;
3143 }
3144
3145
gen6_disable_rps(struct drm_device * dev)3146 static void gen6_disable_rps(struct drm_device *dev)
3147 {
3148 struct drm_i915_private *dev_priv = dev->dev_private;
3149
3150 I915_WRITE(GEN6_RC_CONTROL, 0);
3151 I915_WRITE(GEN6_RPNSWREQ, 1UL << 31);
3152 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3153 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
3154 /* Complete PM interrupt masking here doesn't race with the rps work
3155 * item again unmasking PM interrupts because that is using a different
3156 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3157 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3158
3159 spin_lock_irq(&dev_priv->rps.lock);
3160 dev_priv->rps.pm_iir = 0;
3161 spin_unlock_irq(&dev_priv->rps.lock);
3162
3163 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3164 }
3165
valleyview_disable_rps(struct drm_device * dev)3166 static void valleyview_disable_rps(struct drm_device *dev)
3167 {
3168 struct drm_i915_private *dev_priv = dev->dev_private;
3169
3170 I915_WRITE(GEN6_RC_CONTROL, 0);
3171 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3172 I915_WRITE(GEN6_PMIER, 0);
3173 /* Complete PM interrupt masking here doesn't race with the rps work
3174 * item again unmasking PM interrupts because that is using a different
3175 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3176 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3177
3178 spin_lock_irq(&dev_priv->rps.lock);
3179 dev_priv->rps.pm_iir = 0;
3180 spin_unlock_irq(&dev_priv->rps.lock);
3181
3182 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
3183
3184 if (dev_priv->vlv_pctx) {
3185 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3186 dev_priv->vlv_pctx = NULL;
3187 }
3188 }
3189
intel_enable_rc6(const struct drm_device * dev)3190 int intel_enable_rc6(const struct drm_device *dev)
3191 {
3192 /* Respect the kernel parameter if it is set */
3193 if (i915_enable_rc6 >= 0)
3194 return i915_enable_rc6;
3195
3196 /* Disable RC6 on Ironlake */
3197 if (INTEL_INFO(dev)->gen == 5)
3198 return 0;
3199
3200 if (IS_HASWELL(dev)) {
3201 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
3202 return INTEL_RC6_ENABLE;
3203 }
3204
3205 /* snb/ivb have more than one rc6 state. */
3206 if (INTEL_INFO(dev)->gen == 6) {
3207 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
3208 return INTEL_RC6_ENABLE;
3209 }
3210
3211 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
3212 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3213 }
3214
gen6_enable_rps(struct drm_device * dev)3215 static void gen6_enable_rps(struct drm_device *dev)
3216 {
3217 struct drm_i915_private *dev_priv = dev->dev_private;
3218 struct intel_ring_buffer *ring;
3219 u32 rp_state_cap;
3220 u32 gt_perf_status;
3221 u32 rc6vids, pcu_mbox, rc6_mask = 0;
3222 u32 gtfifodbg;
3223 int rc6_mode;
3224 int i, ret;
3225
3226 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3227
3228 /* Here begins a magic sequence of register writes to enable
3229 * auto-downclocking.
3230 *
3231 * Perhaps there might be some value in exposing these to
3232 * userspace...
3233 */
3234 I915_WRITE(GEN6_RC_STATE, 0);
3235
3236 /* Clear the DBG now so we don't confuse earlier errors */
3237 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3238 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3239 I915_WRITE(GTFIFODBG, gtfifodbg);
3240 }
3241
3242 gen6_gt_force_wake_get(dev_priv);
3243
3244 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3245 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3246
3247 /* In units of 100MHz */
3248 dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
3249 dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
3250 dev_priv->rps.cur_delay = 0;
3251
3252 /* disable the counters and set deterministic thresholds */
3253 I915_WRITE(GEN6_RC_CONTROL, 0);
3254
3255 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
3256 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
3257 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
3258 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3259 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3260
3261 for_each_ring(ring, dev_priv, i)
3262 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3263
3264 I915_WRITE(GEN6_RC_SLEEP, 0);
3265 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3266 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3267 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3268 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3269
3270 /* Check if we are enabling RC6 */
3271 rc6_mode = intel_enable_rc6(dev_priv->dev);
3272 if (rc6_mode & INTEL_RC6_ENABLE)
3273 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
3274
3275 /* We don't use those on Haswell */
3276 if (!IS_HASWELL(dev)) {
3277 if (rc6_mode & INTEL_RC6p_ENABLE)
3278 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
3279
3280 if (rc6_mode & INTEL_RC6pp_ENABLE)
3281 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3282 }
3283
3284 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3285 (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3286 (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3287 (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3288
3289 I915_WRITE(GEN6_RC_CONTROL,
3290 rc6_mask |
3291 GEN6_RC_CTL_EI_MODE(1) |
3292 GEN6_RC_CTL_HW_ENABLE);
3293
3294 if (IS_HASWELL(dev)) {
3295 I915_WRITE(GEN6_RPNSWREQ,
3296 HSW_FREQUENCY(10));
3297 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3298 HSW_FREQUENCY(12));
3299 } else {
3300 I915_WRITE(GEN6_RPNSWREQ,
3301 GEN6_FREQUENCY(10) |
3302 GEN6_OFFSET(0) |
3303 GEN6_AGGRESSIVE_TURBO);
3304 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3305 GEN6_FREQUENCY(12));
3306 }
3307
3308 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
3309 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3310 dev_priv->rps.max_delay << 24 |
3311 dev_priv->rps.min_delay << 16);
3312
3313 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3314 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3315 I915_WRITE(GEN6_RP_UP_EI, 66000);
3316 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3317
3318 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3319 I915_WRITE(GEN6_RP_CONTROL,
3320 GEN6_RP_MEDIA_TURBO |
3321 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3322 GEN6_RP_MEDIA_IS_GFX |
3323 GEN6_RP_ENABLE |
3324 GEN6_RP_UP_BUSY_AVG |
3325 (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
3326
3327 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3328 if (!ret) {
3329 pcu_mbox = 0;
3330 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3331 if (ret && pcu_mbox & (1UL<<31)) { /* OC supported */
3332 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3333 (dev_priv->rps.max_delay & 0xff) * 50,
3334 (pcu_mbox & 0xff) * 50);
3335 dev_priv->rps.hw_max = pcu_mbox & 0xff;
3336 }
3337 } else {
3338 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3339 }
3340
3341 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
3342
3343 /* requires MSI enabled */
3344 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
3345 spin_lock_irq(&dev_priv->rps.lock);
3346 /* FIXME: Our interrupt enabling sequence is bonghits.
3347 * dev_priv->rps.pm_iir really should be 0 here. */
3348 dev_priv->rps.pm_iir = 0;
3349 I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
3350 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3351 spin_unlock_irq(&dev_priv->rps.lock);
3352 /* enable all PM interrupts */
3353 I915_WRITE(GEN6_PMINTRMSK, 0);
3354
3355 rc6vids = 0;
3356 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3357 if (IS_GEN6(dev) && ret) {
3358 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3359 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
3360 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3361 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
3362 rc6vids &= 0xffff00;
3363 rc6vids |= GEN6_ENCODE_RC6_VID(450);
3364 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3365 if (ret)
3366 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3367 }
3368
3369 gen6_gt_force_wake_put(dev_priv);
3370 }
3371
gen6_update_ring_freq(struct drm_device * dev)3372 static void gen6_update_ring_freq(struct drm_device *dev)
3373 {
3374 struct drm_i915_private *dev_priv = dev->dev_private;
3375 int min_freq = 15;
3376 unsigned int gpu_freq;
3377 unsigned int max_ia_freq, min_ring_freq;
3378 int scaling_factor = 180;
3379
3380 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3381
3382 if (cpu_freq == 0)
3383 return;
3384
3385 max_ia_freq = cpu_freq;
3386 DRM_INFO("CPU frequence %d MHz", max_ia_freq);
3387 min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
3388 /* convert DDR frequency from units of 133.3MHz to bandwidth */
3389 min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3;
3390
3391 /*
3392 * For each potential GPU frequency, load a ring frequency we'd like
3393 * to use for memory access. We do this by specifying the IA frequency
3394 * the PCU should use as a reference to determine the ring frequency.
3395 */
3396 for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
3397 gpu_freq--) {
3398 int diff = dev_priv->rps.max_delay - gpu_freq;
3399 unsigned int ia_freq = 0, ring_freq = 0;
3400
3401 if (IS_HASWELL(dev)) {
3402 ring_freq = (gpu_freq * 5 + 3) / 4;
3403 ring_freq = max(min_ring_freq, ring_freq);
3404 /* leave ia_freq as the default, chosen by cpufreq */
3405 } else {
3406 /* On older processors, there is no separate ring
3407 * clock domain, so in order to boost the bandwidth
3408 * of the ring, we need to upclock the CPU (ia_freq).
3409 *
3410 * For GPU frequencies less than 750MHz, just use the lowest
3411 * ring freq.
3412 */
3413 if (gpu_freq < min_freq)
3414 ia_freq = 800;
3415 else
3416 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
3417 ia_freq = ia_freq / 100 + (((ia_freq % 100) >= 50)? 1 : 0);
3418 }
3419
3420 sandybridge_pcode_write(dev_priv,
3421 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
3422 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
3423 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
3424 gpu_freq);
3425 }
3426 }
3427
valleyview_rps_max_freq(struct drm_i915_private * dev_priv)3428 int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3429 {
3430 u32 val, rp0;
3431
3432 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
3433
3434 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
3435 /* Clamp to max */
3436 rp0 = min(rp0, 0xea);
3437
3438 return rp0;
3439 }
3440
valleyview_rps_rpe_freq(struct drm_i915_private * dev_priv)3441 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3442 {
3443 u32 val, rpe;
3444
3445 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
3446 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
3447 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
3448 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
3449
3450 return rpe;
3451 }
3452
valleyview_rps_min_freq(struct drm_i915_private * dev_priv)3453 int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3454 {
3455 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3456 }
3457 #if 0
3458 static void vlv_rps_timer_work(struct work_struct *work)
3459 {
3460 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
3461 rps.vlv_work.work);
3462
3463 /*
3464 * Timer fired, we must be idle. Drop to min voltage state.
3465 * Note: we use RPe here since it should match the
3466 * Vmin we were shooting for. That should give us better
3467 * perf when we come back out of RC6 than if we used the
3468 * min freq available.
3469 */
3470 mutex_lock(&dev_priv->rps.hw_lock);
3471 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
3472 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3473 mutex_unlock(&dev_priv->rps.hw_lock);
3474 }
3475 #endif
valleyview_setup_pctx(struct drm_device * dev)3476 static void valleyview_setup_pctx(struct drm_device *dev)
3477 {
3478 struct drm_i915_private *dev_priv = dev->dev_private;
3479 struct drm_i915_gem_object *pctx;
3480 unsigned long pctx_paddr;
3481 u32 pcbr;
3482 int pctx_size = 24*1024;
3483
3484 pcbr = I915_READ(VLV_PCBR);
3485 if (pcbr) {
3486 /* BIOS set it up already, grab the pre-alloc'd space */
3487 int pcbr_offset;
3488
3489 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
3490 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
3491 pcbr_offset,
3492 -1,
3493 pctx_size);
3494 goto out;
3495 }
3496
3497 /*
3498 * From the Gunit register HAS:
3499 * The Gfx driver is expected to program this register and ensure
3500 * proper allocation within Gfx stolen memory. For example, this
3501 * register should be programmed such than the PCBR range does not
3502 * overlap with other ranges, such as the frame buffer, protected
3503 * memory, or any other relevant ranges.
3504 */
3505 pctx = i915_gem_object_create_stolen(dev, pctx_size);
3506 if (!pctx) {
3507 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
3508 return;
3509 }
3510
3511 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
3512 I915_WRITE(VLV_PCBR, pctx_paddr);
3513
3514 out:
3515 dev_priv->vlv_pctx = pctx;
3516 }
3517
valleyview_enable_rps(struct drm_device * dev)3518 static void valleyview_enable_rps(struct drm_device *dev)
3519 {
3520 struct drm_i915_private *dev_priv = dev->dev_private;
3521 struct intel_ring_buffer *ring;
3522 u32 gtfifodbg, val;
3523 int i;
3524
3525 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3526
3527 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3528 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3529 I915_WRITE(GTFIFODBG, gtfifodbg);
3530 }
3531
3532 valleyview_setup_pctx(dev);
3533
3534 gen6_gt_force_wake_get(dev_priv);
3535
3536 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3537 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3538 I915_WRITE(GEN6_RP_UP_EI, 66000);
3539 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3540
3541 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3542
3543 I915_WRITE(GEN6_RP_CONTROL,
3544 GEN6_RP_MEDIA_TURBO |
3545 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3546 GEN6_RP_MEDIA_IS_GFX |
3547 GEN6_RP_ENABLE |
3548 GEN6_RP_UP_BUSY_AVG |
3549 GEN6_RP_DOWN_IDLE_CONT);
3550
3551 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
3552 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3553 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3554
3555 for_each_ring(ring, dev_priv, i)
3556 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3557
3558 I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
3559
3560 /* allows RC6 residency counter to work */
3561 I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
3562 I915_WRITE(GEN6_RC_CONTROL,
3563 GEN7_RC_CTL_TO_MODE);
3564
3565 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3566 switch ((val >> 6) & 3) {
3567 case 0:
3568 case 1:
3569 dev_priv->mem_freq = 800;
3570 break;
3571 case 2:
3572 dev_priv->mem_freq = 1066;
3573 break;
3574 case 3:
3575 dev_priv->mem_freq = 1333;
3576 break;
3577 }
3578 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
3579
3580 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
3581 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
3582
3583 dev_priv->rps.cur_delay = (val >> 8) & 0xff;
3584 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
3585 vlv_gpu_freq(dev_priv->mem_freq,
3586 dev_priv->rps.cur_delay),
3587 dev_priv->rps.cur_delay);
3588
3589 dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
3590 dev_priv->rps.hw_max = dev_priv->rps.max_delay;
3591 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3592 vlv_gpu_freq(dev_priv->mem_freq,
3593 dev_priv->rps.max_delay),
3594 dev_priv->rps.max_delay);
3595
3596 dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
3597 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3598 vlv_gpu_freq(dev_priv->mem_freq,
3599 dev_priv->rps.rpe_delay),
3600 dev_priv->rps.rpe_delay);
3601
3602 dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
3603 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3604 vlv_gpu_freq(dev_priv->mem_freq,
3605 dev_priv->rps.min_delay),
3606 dev_priv->rps.min_delay);
3607
3608 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
3609 vlv_gpu_freq(dev_priv->mem_freq,
3610 dev_priv->rps.rpe_delay),
3611 dev_priv->rps.rpe_delay);
3612
3613 //INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
3614
3615 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3616
3617 /* requires MSI enabled */
3618 I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
3619 spin_lock_irq(&dev_priv->rps.lock);
3620 WARN_ON(dev_priv->rps.pm_iir != 0);
3621 I915_WRITE(GEN6_PMIMR, 0);
3622 spin_unlock_irq(&dev_priv->rps.lock);
3623 /* enable all PM interrupts */
3624 I915_WRITE(GEN6_PMINTRMSK, 0);
3625
3626 gen6_gt_force_wake_put(dev_priv);
3627 }
3628
ironlake_teardown_rc6(struct drm_device * dev)3629 void ironlake_teardown_rc6(struct drm_device *dev)
3630 {
3631 struct drm_i915_private *dev_priv = dev->dev_private;
3632
3633 if (dev_priv->ips.renderctx) {
3634 i915_gem_object_unpin(dev_priv->ips.renderctx);
3635 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
3636 dev_priv->ips.renderctx = NULL;
3637 }
3638
3639 if (dev_priv->ips.pwrctx) {
3640 i915_gem_object_unpin(dev_priv->ips.pwrctx);
3641 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
3642 dev_priv->ips.pwrctx = NULL;
3643 }
3644 }
3645
ironlake_disable_rc6(struct drm_device * dev)3646 static void ironlake_disable_rc6(struct drm_device *dev)
3647 {
3648 struct drm_i915_private *dev_priv = dev->dev_private;
3649
3650 if (I915_READ(PWRCTXA)) {
3651 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
3652 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
3653 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
3654 50);
3655
3656 I915_WRITE(PWRCTXA, 0);
3657 POSTING_READ(PWRCTXA);
3658
3659 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3660 POSTING_READ(RSTDBYCTL);
3661 }
3662 }
3663
ironlake_setup_rc6(struct drm_device * dev)3664 static int ironlake_setup_rc6(struct drm_device *dev)
3665 {
3666 struct drm_i915_private *dev_priv = dev->dev_private;
3667
3668 if (dev_priv->ips.renderctx == NULL)
3669 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
3670 if (!dev_priv->ips.renderctx)
3671 return -ENOMEM;
3672
3673 if (dev_priv->ips.pwrctx == NULL)
3674 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
3675 if (!dev_priv->ips.pwrctx) {
3676 ironlake_teardown_rc6(dev);
3677 return -ENOMEM;
3678 }
3679
3680 return 0;
3681 }
3682
ironlake_enable_rc6(struct drm_device * dev)3683 static void ironlake_enable_rc6(struct drm_device *dev)
3684 {
3685 struct drm_i915_private *dev_priv = dev->dev_private;
3686 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
3687 bool was_interruptible;
3688 int ret;
3689
3690 /* rc6 disabled by default due to repeated reports of hanging during
3691 * boot and resume.
3692 */
3693 if (!intel_enable_rc6(dev))
3694 return;
3695
3696 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3697
3698 ret = ironlake_setup_rc6(dev);
3699 if (ret)
3700 return;
3701
3702 was_interruptible = dev_priv->mm.interruptible;
3703 dev_priv->mm.interruptible = false;
3704
3705 /*
3706 * GPU can automatically power down the render unit if given a page
3707 * to save state.
3708 */
3709 ret = intel_ring_begin(ring, 6);
3710 if (ret) {
3711 ironlake_teardown_rc6(dev);
3712 dev_priv->mm.interruptible = was_interruptible;
3713 return;
3714 }
3715
3716 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
3717 intel_ring_emit(ring, MI_SET_CONTEXT);
3718 intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
3719 MI_MM_SPACE_GTT |
3720 MI_SAVE_EXT_STATE_EN |
3721 MI_RESTORE_EXT_STATE_EN |
3722 MI_RESTORE_INHIBIT);
3723 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
3724 intel_ring_emit(ring, MI_NOOP);
3725 intel_ring_emit(ring, MI_FLUSH);
3726 intel_ring_advance(ring);
3727
3728 /*
3729 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
3730 * does an implicit flush, combined with MI_FLUSH above, it should be
3731 * safe to assume that renderctx is valid
3732 */
3733 ret = intel_ring_idle(ring);
3734 dev_priv->mm.interruptible = was_interruptible;
3735 if (ret) {
3736 DRM_ERROR("failed to enable ironlake power savings\n");
3737 ironlake_teardown_rc6(dev);
3738 return;
3739 }
3740
3741 I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
3742 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3743 }
3744
intel_pxfreq(u32 vidfreq)3745 static unsigned long intel_pxfreq(u32 vidfreq)
3746 {
3747 unsigned long freq;
3748 int div = (vidfreq & 0x3f0000) >> 16;
3749 int post = (vidfreq & 0x3000) >> 12;
3750 int pre = (vidfreq & 0x7);
3751
3752 if (!pre)
3753 return 0;
3754
3755 freq = ((div * 133333) / ((1<<post) * pre));
3756
3757 return freq;
3758 }
3759
3760 static const struct cparams {
3761 u16 i;
3762 u16 t;
3763 u16 m;
3764 u16 c;
3765 } cparams[] = {
3766 { 1, 1333, 301, 28664 },
3767 { 1, 1066, 294, 24460 },
3768 { 1, 800, 294, 25192 },
3769 { 0, 1333, 276, 27605 },
3770 { 0, 1066, 276, 27605 },
3771 { 0, 800, 231, 23784 },
3772 };
3773
intel_init_emon(struct drm_device * dev)3774 static void intel_init_emon(struct drm_device *dev)
3775 {
3776 struct drm_i915_private *dev_priv = dev->dev_private;
3777 u32 lcfuse;
3778 u8 pxw[16];
3779 int i;
3780
3781 /* Disable to program */
3782 I915_WRITE(ECR, 0);
3783 POSTING_READ(ECR);
3784
3785 /* Program energy weights for various events */
3786 I915_WRITE(SDEW, 0x15040d00);
3787 I915_WRITE(CSIEW0, 0x007f0000);
3788 I915_WRITE(CSIEW1, 0x1e220004);
3789 I915_WRITE(CSIEW2, 0x04000004);
3790
3791 for (i = 0; i < 5; i++)
3792 I915_WRITE(PEW + (i * 4), 0);
3793 for (i = 0; i < 3; i++)
3794 I915_WRITE(DEW + (i * 4), 0);
3795
3796 /* Program P-state weights to account for frequency power adjustment */
3797 for (i = 0; i < 16; i++) {
3798 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
3799 unsigned long freq = intel_pxfreq(pxvidfreq);
3800 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
3801 PXVFREQ_PX_SHIFT;
3802 unsigned long val;
3803
3804 val = vid * vid;
3805 val *= (freq / 1000);
3806 val *= 255;
3807 val /= (127*127*900);
3808 if (val > 0xff)
3809 DRM_ERROR("bad pxval: %ld\n", val);
3810 pxw[i] = (u8)val;
3811 }
3812 /* Render standby states get 0 weight */
3813 pxw[14] = 0;
3814 pxw[15] = 0;
3815
3816 for (i = 0; i < 4; i++) {
3817 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
3818 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
3819 I915_WRITE(PXW + (i * 4), val);
3820 }
3821
3822 /* Adjust magic regs to magic values (more experimental results) */
3823 I915_WRITE(OGW0, 0);
3824 I915_WRITE(OGW1, 0);
3825 I915_WRITE(EG0, 0x00007f00);
3826 I915_WRITE(EG1, 0x0000000e);
3827 I915_WRITE(EG2, 0x000e0000);
3828 I915_WRITE(EG3, 0x68000300);
3829 I915_WRITE(EG4, 0x42000000);
3830 I915_WRITE(EG5, 0x00140031);
3831 I915_WRITE(EG6, 0);
3832 I915_WRITE(EG7, 0);
3833
3834 for (i = 0; i < 8; i++)
3835 I915_WRITE(PXWL + (i * 4), 0);
3836
3837 /* Enable PMON + select events */
3838 I915_WRITE(ECR, 0x80000019);
3839
3840 lcfuse = I915_READ(LCFUSE02);
3841
3842 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
3843 }
3844
intel_disable_gt_powersave(struct drm_device * dev)3845 void intel_disable_gt_powersave(struct drm_device *dev)
3846 {
3847 struct drm_i915_private *dev_priv = dev->dev_private;
3848
3849 /* Interrupts should be disabled already to avoid re-arming. */
3850 /* fix me i915_quiesce */
3851 // WARN_ON(dev->irq_enabled);
3852
3853 if (IS_IRONLAKE_M(dev)) {
3854 ironlake_disable_drps(dev);
3855 ironlake_disable_rc6(dev);
3856 } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
3857 del_timer_sync(&dev_priv->rps.delayed_resume_timer);
3858 // if (IS_VALLEYVIEW(dev))
3859 // cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
3860 mutex_lock(&dev_priv->rps.hw_lock);
3861 if (IS_VALLEYVIEW(dev))
3862 valleyview_disable_rps(dev);
3863 else
3864 gen6_disable_rps(dev);
3865 mutex_unlock(&dev_priv->rps.hw_lock);
3866 }
3867 }
3868
intel_gen6_powersave_work(struct work_struct * work)3869 static void intel_gen6_powersave_work(struct work_struct *work)
3870 {
3871 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
3872 rps.delayed_resume_work);
3873 struct drm_device *dev = dev_priv->dev;
3874
3875 mutex_lock(&dev_priv->rps.hw_lock);
3876
3877 if (IS_VALLEYVIEW(dev)) {
3878 valleyview_enable_rps(dev);
3879 } else {
3880 gen6_enable_rps(dev);
3881 gen6_update_ring_freq(dev);
3882 }
3883 mutex_unlock(&dev_priv->rps.hw_lock);
3884 }
3885
3886 void
intel_gen6_powersave_work_timer(void * device)3887 intel_gen6_powersave_work_timer(void *device)
3888 {
3889 struct drm_device *dev = (struct drm_device *)device;
3890 struct drm_i915_private *dev_priv = dev->dev_private;
3891 (void) queue_work(dev_priv->wq, &dev_priv->rps.delayed_resume_work);
3892 }
intel_enable_gt_powersave(struct drm_device * dev)3893 void intel_enable_gt_powersave(struct drm_device *dev)
3894 {
3895 struct drm_i915_private *dev_priv = dev->dev_private;
3896
3897 if (IS_IRONLAKE_M(dev)) {
3898 ironlake_enable_drps(dev);
3899 ironlake_enable_rc6(dev);
3900 intel_init_emon(dev);
3901 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
3902 /*
3903 * PCU communication is slow and this doesn't need to be
3904 * done at any specific time, so do this out of our fast path
3905 * to make resume and init faster.
3906 */
3907 test_set_timer(&dev_priv->rps.delayed_resume_timer, DRM_HZ);
3908 }
3909 }
3910
ibx_init_clock_gating(struct drm_device * dev)3911 static void ibx_init_clock_gating(struct drm_device *dev)
3912 {
3913 struct drm_i915_private *dev_priv = dev->dev_private;
3914
3915 /*
3916 * On Ibex Peak and Cougar Point, we need to disable clock
3917 * gating for the panel power sequencer or it will fail to
3918 * start up when no ports are active.
3919 */
3920 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3921 }
3922
g4x_disable_trickle_feed(struct drm_device * dev)3923 static void g4x_disable_trickle_feed(struct drm_device *dev)
3924 {
3925 struct drm_i915_private *dev_priv = dev->dev_private;
3926 int pipe;
3927
3928 for_each_pipe(pipe) {
3929 I915_WRITE(DSPCNTR(pipe),
3930 I915_READ(DSPCNTR(pipe)) |
3931 DISPPLANE_TRICKLE_FEED_DISABLE);
3932 intel_flush_display_plane(dev_priv, pipe);
3933 }
3934 }
3935
ironlake_init_clock_gating(struct drm_device * dev)3936 static void ironlake_init_clock_gating(struct drm_device *dev)
3937 {
3938 struct drm_i915_private *dev_priv = dev->dev_private;
3939 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
3940
3941 /* Required for FBC */
3942 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
3943 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
3944 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
3945
3946 I915_WRITE(PCH_3DCGDIS0,
3947 MARIUNIT_CLOCK_GATE_DISABLE |
3948 SVSMUNIT_CLOCK_GATE_DISABLE);
3949 I915_WRITE(PCH_3DCGDIS1,
3950 VFMUNIT_CLOCK_GATE_DISABLE);
3951
3952 /*
3953 * According to the spec the following bits should be set in
3954 * order to enable memory self-refresh
3955 * The bit 22/21 of 0x42004
3956 * The bit 5 of 0x42020
3957 * The bit 15 of 0x45000
3958 */
3959 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3960 (I915_READ(ILK_DISPLAY_CHICKEN2) |
3961 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
3962 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
3963 I915_WRITE(DISP_ARB_CTL,
3964 (I915_READ(DISP_ARB_CTL) |
3965 DISP_FBC_WM_DIS));
3966 I915_WRITE(WM3_LP_ILK, 0);
3967 I915_WRITE(WM2_LP_ILK, 0);
3968 I915_WRITE(WM1_LP_ILK, 0);
3969
3970 /*
3971 * Based on the document from hardware guys the following bits
3972 * should be set unconditionally in order to enable FBC.
3973 * The bit 22 of 0x42000
3974 * The bit 22 of 0x42004
3975 * The bit 7,8,9 of 0x42020.
3976 */
3977 if (IS_IRONLAKE_M(dev)) {
3978 I915_WRITE(ILK_DISPLAY_CHICKEN1,
3979 I915_READ(ILK_DISPLAY_CHICKEN1) |
3980 ILK_FBCQ_DIS);
3981 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3982 I915_READ(ILK_DISPLAY_CHICKEN2) |
3983 ILK_DPARB_GATE);
3984 }
3985
3986 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
3987
3988 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3989 I915_READ(ILK_DISPLAY_CHICKEN2) |
3990 ILK_ELPIN_409_SELECT);
3991 I915_WRITE(_3D_CHICKEN2,
3992 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
3993 _3D_CHICKEN2_WM_READ_PIPELINED);
3994
3995 /* WaDisableRenderCachePipelinedFlush */
3996 I915_WRITE(CACHE_MODE_0,
3997 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
3998
3999 g4x_disable_trickle_feed(dev);
4000
4001 ibx_init_clock_gating(dev);
4002 }
4003
cpt_init_clock_gating(struct drm_device * dev)4004 static void cpt_init_clock_gating(struct drm_device *dev)
4005 {
4006 struct drm_i915_private *dev_priv = dev->dev_private;
4007 int pipe;
4008 uint32_t val;
4009
4010 /*
4011 * On Ibex Peak and Cougar Point, we need to disable clock
4012 * gating for the panel power sequencer or it will fail to
4013 * start up when no ports are active.
4014 */
4015 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
4016 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
4017 DPLS_EDP_PPS_FIX_DIS);
4018 /* The below fixes the weird display corruption, a few pixels shifted
4019 * downward, on (only) LVDS of some HP laptops with IVY.
4020 */
4021 for_each_pipe(pipe) {
4022 val = I915_READ(TRANS_CHICKEN2(pipe));
4023 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
4024 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
4025 if (dev_priv->vbt.fdi_rx_polarity_inverted)
4026 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
4027 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
4028 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
4029 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
4030 I915_WRITE(TRANS_CHICKEN2(pipe), val);
4031 }
4032 /* WADP0ClockGatingDisable */
4033 for_each_pipe(pipe) {
4034 I915_WRITE(TRANS_CHICKEN1(pipe),
4035 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
4036 }
4037 }
4038
gen6_check_mch_setup(struct drm_device * dev)4039 static void gen6_check_mch_setup(struct drm_device *dev)
4040 {
4041 struct drm_i915_private *dev_priv = dev->dev_private;
4042 uint32_t tmp;
4043
4044 tmp = I915_READ(MCH_SSKPD);
4045 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
4046 DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
4047 DRM_INFO("This can cause pipe underruns and display issues.\n");
4048 DRM_INFO("Please upgrade your BIOS to fix this.\n");
4049 }
4050 }
4051
gen6_init_clock_gating(struct drm_device * dev)4052 static void gen6_init_clock_gating(struct drm_device *dev)
4053 {
4054 struct drm_i915_private *dev_priv = dev->dev_private;
4055 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
4056
4057 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
4058
4059 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4060 I915_READ(ILK_DISPLAY_CHICKEN2) |
4061 ILK_ELPIN_409_SELECT);
4062
4063 /* WaDisableHiZPlanesWhenMSAAEnabled */
4064 I915_WRITE(_3D_CHICKEN,
4065 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
4066
4067 /* WaSetupGtModeTdRowDispatch */
4068 if (IS_SNB_GT1(dev))
4069 I915_WRITE(GEN6_GT_MODE,
4070 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
4071
4072 I915_WRITE(WM3_LP_ILK, 0);
4073 I915_WRITE(WM2_LP_ILK, 0);
4074 I915_WRITE(WM1_LP_ILK, 0);
4075
4076 I915_WRITE(CACHE_MODE_0,
4077 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
4078
4079 I915_WRITE(GEN6_UCGCTL1,
4080 I915_READ(GEN6_UCGCTL1) |
4081 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
4082 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
4083
4084 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
4085 * gating disable must be set. Failure to set it results in
4086 * flickering pixels due to Z write ordering failures after
4087 * some amount of runtime in the Mesa "fire" demo, and Unigine
4088 * Sanctuary and Tropics, and apparently anything else with
4089 * alpha test or pixel discard.
4090 *
4091 * According to the spec, bit 11 (RCCUNIT) must also be set,
4092 * but we didn't debug actual testcases to find it out.
4093 *
4094 * Also apply WaDisableVDSUnitClockGating and
4095 * WaDisableRCPBUnitClockGating.
4096 */
4097 I915_WRITE(GEN6_UCGCTL2,
4098 GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
4099 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
4100 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4101
4102 /* Bspec says we need to always set all mask bits. */
4103 I915_WRITE(_3D_CHICKEN3, (0xFFFFUL << 16) |
4104 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
4105
4106 /*
4107 * According to the spec the following bits should be
4108 * set in order to enable memory self-refresh and fbc:
4109 * The bit21 and bit22 of 0x42000
4110 * The bit21 and bit22 of 0x42004
4111 * The bit5 and bit7 of 0x42020
4112 * The bit14 of 0x70180
4113 * The bit14 of 0x71180
4114 */
4115 I915_WRITE(ILK_DISPLAY_CHICKEN1,
4116 I915_READ(ILK_DISPLAY_CHICKEN1) |
4117 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
4118 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4119 I915_READ(ILK_DISPLAY_CHICKEN2) |
4120 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
4121 I915_WRITE(ILK_DSPCLK_GATE_D,
4122 I915_READ(ILK_DSPCLK_GATE_D) |
4123 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
4124 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
4125
4126 /* WaMbcDriverBootEnable */
4127 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4128 GEN6_MBCTL_ENABLE_BOOT_FETCH);
4129
4130 g4x_disable_trickle_feed(dev);
4131
4132 /* The default value should be 0x200 according to docs, but the two
4133 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
4134 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffffUL));
4135 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
4136
4137 cpt_init_clock_gating(dev);
4138
4139 gen6_check_mch_setup(dev);
4140 }
4141
gen7_setup_fixed_func_scheduler(struct drm_i915_private * dev_priv)4142 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
4143 {
4144 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
4145
4146 reg &= ~GEN7_FF_SCHED_MASK;
4147 reg |= GEN7_FF_TS_SCHED_HW;
4148 reg |= GEN7_FF_VS_SCHED_HW;
4149 reg |= GEN7_FF_DS_SCHED_HW;
4150
4151 if (IS_HASWELL(dev_priv->dev))
4152 reg &= ~GEN7_FF_VS_REF_CNT_FFME;
4153
4154 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
4155 }
4156
lpt_init_clock_gating(struct drm_device * dev)4157 static void lpt_init_clock_gating(struct drm_device *dev)
4158 {
4159 struct drm_i915_private *dev_priv = dev->dev_private;
4160
4161 /*
4162 * TODO: this bit should only be enabled when really needed, then
4163 * disabled when not needed anymore in order to save power.
4164 */
4165 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
4166 I915_WRITE(SOUTH_DSPCLK_GATE_D,
4167 I915_READ(SOUTH_DSPCLK_GATE_D) |
4168 PCH_LP_PARTITION_LEVEL_DISABLE);
4169
4170 /* WADPOClockGatingDisable:hsw */
4171 I915_WRITE(_TRANSA_CHICKEN1,
4172 I915_READ(_TRANSA_CHICKEN1) |
4173 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
4174 }
4175
lpt_suspend_hw(struct drm_device * dev)4176 static void lpt_suspend_hw(struct drm_device *dev)
4177 {
4178 struct drm_i915_private *dev_priv = dev->dev_private;
4179
4180 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
4181 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
4182
4183 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4184 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4185 }
4186 }
4187
haswell_init_clock_gating(struct drm_device * dev)4188 static void haswell_init_clock_gating(struct drm_device *dev)
4189 {
4190 struct drm_i915_private *dev_priv = dev->dev_private;
4191
4192 I915_WRITE(WM3_LP_ILK, 0);
4193 I915_WRITE(WM2_LP_ILK, 0);
4194 I915_WRITE(WM1_LP_ILK, 0);
4195
4196 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4197 * This implements the WaDisableRCZUnitClockGating workaround.
4198 */
4199 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
4200
4201 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
4202 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
4203 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
4204
4205 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
4206 I915_WRITE(GEN7_L3CNTLREG1,
4207 GEN7_WA_FOR_GEN7_L3_CONTROL);
4208 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
4209 GEN7_WA_L3_CHICKEN_MODE);
4210
4211 /* This is required by WaCatErrorRejectionIssue */
4212 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4213 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4214 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4215
4216 g4x_disable_trickle_feed(dev);
4217
4218
4219 gen7_setup_fixed_func_scheduler(dev_priv);
4220
4221 /* WaDisable4x2SubspanOptimization */
4222 I915_WRITE(CACHE_MODE_1,
4223 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4224
4225 /* WaMbcDriverBootEnable */
4226 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4227 GEN6_MBCTL_ENABLE_BOOT_FETCH);
4228
4229 /* WaSwitchSolVfFArbitrationPriority:hsw */
4230 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
4231
4232 /* WaRsPkgCStateDisplayPMReq:hsw */
4233 I915_WRITE(CHICKEN_PAR1_1,
4234 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
4235
4236 lpt_init_clock_gating(dev);
4237 }
4238
ivybridge_init_clock_gating(struct drm_device * dev)4239 static void ivybridge_init_clock_gating(struct drm_device *dev)
4240 {
4241 struct drm_i915_private *dev_priv = dev->dev_private;
4242 uint32_t snpcr;
4243
4244 I915_WRITE(WM3_LP_ILK, 0);
4245 I915_WRITE(WM2_LP_ILK, 0);
4246 I915_WRITE(WM1_LP_ILK, 0);
4247
4248 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
4249
4250 /* WaDisableEarlyCull */
4251 I915_WRITE(_3D_CHICKEN3,
4252 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
4253
4254 /* WaDisableBackToBackFlipFix */
4255 I915_WRITE(IVB_CHICKEN3,
4256 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
4257 CHICKEN3_DGMG_DONE_FIX_DISABLE);
4258
4259 /* WaDisablePSDDualDispatchEnable */
4260 if (IS_IVB_GT1(dev))
4261 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4262 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4263 else
4264 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
4265 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4266
4267 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
4268 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
4269 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
4270
4271 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
4272 I915_WRITE(GEN7_L3CNTLREG1,
4273 GEN7_WA_FOR_GEN7_L3_CONTROL);
4274 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
4275 GEN7_WA_L3_CHICKEN_MODE);
4276 if (IS_IVB_GT1(dev))
4277 I915_WRITE(GEN7_ROW_CHICKEN2,
4278 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4279 else
4280 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
4281 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4282
4283
4284 /* WaForceL3Serialization */
4285 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
4286 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
4287
4288 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
4289 * gating disable must be set. Failure to set it results in
4290 * flickering pixels due to Z write ordering failures after
4291 * some amount of runtime in the Mesa "fire" demo, and Unigine
4292 * Sanctuary and Tropics, and apparently anything else with
4293 * alpha test or pixel discard.
4294 *
4295 * According to the spec, bit 11 (RCCUNIT) must also be set,
4296 * but we didn't debug actual testcases to find it out.
4297 *
4298 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4299 * This implements the WaDisableRCZUnitClockGating workaround.
4300 */
4301 I915_WRITE(GEN6_UCGCTL2,
4302 GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
4303 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4304
4305 /* This is required by WaCatErrorRejectionIssue */
4306 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4307 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4308 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4309
4310 g4x_disable_trickle_feed(dev);
4311
4312 /* WaMbcDriverBootEnable */
4313 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4314 GEN6_MBCTL_ENABLE_BOOT_FETCH);
4315
4316 /* WaVSRefCountFullforceMissDisable:ivb */
4317 gen7_setup_fixed_func_scheduler(dev_priv);
4318
4319 /* WaDisable4x2SubspanOptimization */
4320 I915_WRITE(CACHE_MODE_1,
4321 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4322
4323 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4324 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4325 snpcr |= GEN6_MBC_SNPCR_MED;
4326 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4327
4328 if (!HAS_PCH_NOP(dev))
4329 cpt_init_clock_gating(dev);
4330
4331 gen6_check_mch_setup(dev);
4332 }
4333
valleyview_init_clock_gating(struct drm_device * dev)4334 static void valleyview_init_clock_gating(struct drm_device *dev)
4335 {
4336 struct drm_i915_private *dev_priv = dev->dev_private;
4337
4338 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
4339
4340 /* WaDisableEarlyCull */
4341 I915_WRITE(_3D_CHICKEN3,
4342 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
4343
4344 /* WaDisableBackToBackFlipFix */
4345 I915_WRITE(IVB_CHICKEN3,
4346 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
4347 CHICKEN3_DGMG_DONE_FIX_DISABLE);
4348
4349 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4350 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
4351 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4352
4353 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
4354 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
4355 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
4356
4357 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
4358 I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
4359 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
4360
4361 /* WaForceL3Serialization */
4362 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
4363 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
4364
4365 /* WaDisableDopClockGating */
4366 I915_WRITE(GEN7_ROW_CHICKEN2,
4367 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4368
4369 /* This is required by WaCatErrorRejectionIssue */
4370 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4371 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4372 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4373
4374 /* WaMbcDriverBootEnable */
4375 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4376 GEN6_MBCTL_ENABLE_BOOT_FETCH);
4377
4378
4379 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
4380 * gating disable must be set. Failure to set it results in
4381 * flickering pixels due to Z write ordering failures after
4382 * some amount of runtime in the Mesa "fire" demo, and Unigine
4383 * Sanctuary and Tropics, and apparently anything else with
4384 * alpha test or pixel discard.
4385 *
4386 * According to the spec, bit 11 (RCCUNIT) must also be set,
4387 * but we didn't debug actual testcases to find it out.
4388 *
4389 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4390 * This implements the WaDisableRCZUnitClockGating workaround.
4391 *
4392 * Also apply WaDisableVDSUnitClockGating and
4393 * WaDisableRCPBUnitClockGating.
4394 */
4395 I915_WRITE(GEN6_UCGCTL2,
4396 GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
4397 GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
4398 GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
4399 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
4400 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4401
4402 I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
4403
4404 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
4405
4406 I915_WRITE(CACHE_MODE_1,
4407 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4408
4409 /*
4410 * On ValleyView, the GUnit needs to signal the GT
4411 * when flip and other events complete. So enable
4412 * all the GUnit->GT interrupts here
4413 */
4414 I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
4415
4416 /* Conservative clock gating settings for now */
4417 I915_WRITE(0x9400, 0xffffffff);
4418 I915_WRITE(0x9404, 0xffffffff);
4419 I915_WRITE(0x9408, 0xffffffff);
4420 I915_WRITE(0x940c, 0xffffffff);
4421 I915_WRITE(0x9410, 0xffffffff);
4422 I915_WRITE(0x9414, 0xffffffff);
4423 I915_WRITE(0x9418, 0xffffffff);
4424 }
4425
g4x_init_clock_gating(struct drm_device * dev)4426 static void g4x_init_clock_gating(struct drm_device *dev)
4427 {
4428 struct drm_i915_private *dev_priv = dev->dev_private;
4429 uint32_t dspclk_gate;
4430
4431 I915_WRITE(RENCLK_GATE_D1, 0);
4432 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
4433 GS_UNIT_CLOCK_GATE_DISABLE |
4434 CL_UNIT_CLOCK_GATE_DISABLE);
4435 I915_WRITE(RAMCLK_GATE_D, 0);
4436 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
4437 OVRUNIT_CLOCK_GATE_DISABLE |
4438 OVCUNIT_CLOCK_GATE_DISABLE;
4439 if (IS_GM45(dev))
4440 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
4441 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
4442
4443 /* WaDisableRenderCachePipelinedFlush */
4444 I915_WRITE(CACHE_MODE_0,
4445 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
4446
4447 g4x_disable_trickle_feed(dev);
4448 }
4449
crestline_init_clock_gating(struct drm_device * dev)4450 static void crestline_init_clock_gating(struct drm_device *dev)
4451 {
4452 struct drm_i915_private *dev_priv = dev->dev_private;
4453
4454 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
4455 I915_WRITE(RENCLK_GATE_D2, 0);
4456 I915_WRITE(DSPCLK_GATE_D, 0);
4457 I915_WRITE(RAMCLK_GATE_D, 0);
4458 I915_WRITE16(DEUC, 0);
4459 I915_WRITE(MI_ARB_STATE,
4460 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
4461 }
4462
broadwater_init_clock_gating(struct drm_device * dev)4463 static void broadwater_init_clock_gating(struct drm_device *dev)
4464 {
4465 struct drm_i915_private *dev_priv = dev->dev_private;
4466
4467 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
4468 I965_RCC_CLOCK_GATE_DISABLE |
4469 I965_RCPB_CLOCK_GATE_DISABLE |
4470 I965_ISC_CLOCK_GATE_DISABLE |
4471 I965_FBC_CLOCK_GATE_DISABLE);
4472 I915_WRITE(RENCLK_GATE_D2, 0);
4473 I915_WRITE(MI_ARB_STATE,
4474 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
4475 }
4476
gen3_init_clock_gating(struct drm_device * dev)4477 static void gen3_init_clock_gating(struct drm_device *dev)
4478 {
4479 struct drm_i915_private *dev_priv = dev->dev_private;
4480 u32 dstate = I915_READ(D_STATE);
4481
4482 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
4483 DSTATE_DOT_CLOCK_GATING;
4484 I915_WRITE(D_STATE, dstate);
4485
4486 if (IS_PINEVIEW(dev))
4487 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
4488
4489 /* IIR "flip pending" means done if this bit is set */
4490 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
4491 }
4492
i85x_init_clock_gating(struct drm_device * dev)4493 static void i85x_init_clock_gating(struct drm_device *dev)
4494 {
4495 struct drm_i915_private *dev_priv = dev->dev_private;
4496
4497 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
4498 }
4499
i830_init_clock_gating(struct drm_device * dev)4500 static void i830_init_clock_gating(struct drm_device *dev)
4501 {
4502 struct drm_i915_private *dev_priv = dev->dev_private;
4503
4504 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
4505 }
4506
intel_init_clock_gating(struct drm_device * dev)4507 void intel_init_clock_gating(struct drm_device *dev)
4508 {
4509 struct drm_i915_private *dev_priv = dev->dev_private;
4510
4511 dev_priv->display.init_clock_gating(dev);
4512 }
4513
intel_suspend_hw(struct drm_device * dev)4514 void intel_suspend_hw(struct drm_device *dev)
4515 {
4516 if (HAS_PCH_LPT(dev))
4517 lpt_suspend_hw(dev);
4518 }
4519
4520 /**
4521 * We should only use the power well if we explicitly asked the hardware to
4522 * enable it, so check if it's enabled and also check if we've requested it to
4523 * be enabled.
4524 */
intel_display_power_enabled(struct drm_device * dev,enum intel_display_power_domain domain)4525 bool intel_display_power_enabled(struct drm_device *dev,
4526 enum intel_display_power_domain domain)
4527 {
4528 struct drm_i915_private *dev_priv = dev->dev_private;
4529
4530 if (!HAS_POWER_WELL(dev))
4531 return true;
4532
4533 switch (domain) {
4534 case POWER_DOMAIN_PIPE_A:
4535 case POWER_DOMAIN_TRANSCODER_EDP:
4536 return true;
4537 case POWER_DOMAIN_PIPE_B:
4538 case POWER_DOMAIN_PIPE_C:
4539 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
4540 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
4541 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
4542 case POWER_DOMAIN_TRANSCODER_A:
4543 case POWER_DOMAIN_TRANSCODER_B:
4544 case POWER_DOMAIN_TRANSCODER_C:
4545 return I915_READ(HSW_PWR_WELL_DRIVER) ==
4546 (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE);
4547 default:
4548 BUG();
4549 return false;
4550 }
4551 }
4552
__intel_set_power_well(struct drm_device * dev,bool enable)4553 static void __intel_set_power_well(struct drm_device *dev, bool enable)
4554 {
4555 struct drm_i915_private *dev_priv = dev->dev_private;
4556 bool is_enabled, enable_requested;
4557 uint32_t tmp;
4558
4559 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
4560 is_enabled = tmp & HSW_PWR_WELL_STATE;
4561 enable_requested = tmp & HSW_PWR_WELL_ENABLE;
4562
4563 if (enable) {
4564 if (!enable_requested)
4565 I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE);
4566
4567 if (!is_enabled) {
4568 DRM_DEBUG_KMS("Enabling power well\n");
4569 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
4570 HSW_PWR_WELL_STATE), 20))
4571 DRM_ERROR("Timeout enabling power well\n");
4572 }
4573 } else {
4574 if (enable_requested) {
4575 unsigned long irqflags;
4576 enum pipe p;
4577
4578 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
4579 POSTING_READ(HSW_PWR_WELL_DRIVER);
4580
4581 DRM_DEBUG_KMS("Requesting to disable the power well\n");
4582 /*
4583 * After this, the registers on the pipes that are part
4584 * of the power well will become zero, so we have to
4585 * adjust our counters according to that.
4586 *
4587 * FIXME: Should we do this in general in
4588 * drm_vblank_post_modeset?
4589 */
4590 spin_lock_irqsave(&dev->vbl_lock, irqflags);
4591 for_each_pipe(p)
4592 if (p != PIPE_A)
4593 dev->last_vblank[p] = 0;
4594 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
4595 }
4596 }
4597 }
4598
4599 static struct i915_power_well *hsw_pwr;
4600
4601 /* Display audio driver power well request */
i915_request_power_well(void)4602 void i915_request_power_well(void)
4603 {
4604 if (!hsw_pwr)
4605 return;
4606
4607 spin_lock_irq(&hsw_pwr->lock);
4608 if (!hsw_pwr->count++ &&
4609 !hsw_pwr->i915_request)
4610 __intel_set_power_well(hsw_pwr->device, true);
4611 spin_unlock_irq(&hsw_pwr->lock);
4612 }
4613
4614 /* Display audio driver power well release */
i915_release_power_well(void)4615 void i915_release_power_well(void)
4616 {
4617 if (!hsw_pwr)
4618 return;
4619
4620 spin_lock_irq(&hsw_pwr->lock);
4621 WARN_ON(!hsw_pwr->count);
4622 if (!--hsw_pwr->count &&
4623 !hsw_pwr->i915_request)
4624 __intel_set_power_well(hsw_pwr->device, false);
4625 spin_unlock_irq(&hsw_pwr->lock);
4626 }
4627
i915_init_power_well(struct drm_device * dev)4628 int i915_init_power_well(struct drm_device *dev)
4629 {
4630 struct drm_i915_private *dev_priv = dev->dev_private;
4631
4632 hsw_pwr = &dev_priv->power_well;
4633
4634 hsw_pwr->device = dev;
4635 spin_lock_init(&hsw_pwr->lock);
4636 hsw_pwr->count = 0;
4637
4638 return 0;
4639 }
4640
i915_remove_power_well(struct drm_device * dev)4641 void i915_remove_power_well(struct drm_device *dev)
4642 {
4643 hsw_pwr = NULL;
4644 }
4645
intel_set_power_well(struct drm_device * dev,bool enable)4646 void intel_set_power_well(struct drm_device *dev, bool enable)
4647 {
4648 struct drm_i915_private *dev_priv = dev->dev_private;
4649 struct i915_power_well *power_well = &dev_priv->power_well;
4650
4651 if (!HAS_POWER_WELL(dev))
4652 return;
4653
4654 if (!i915_disable_power_well && !enable)
4655 return;
4656
4657 spin_lock_irq(&power_well->lock);
4658 power_well->i915_request = enable;
4659
4660 /* only reject "disable" power well request */
4661 if (power_well->count && !enable) {
4662 spin_unlock_irq(&power_well->lock);
4663 return;
4664 }
4665
4666 __intel_set_power_well(dev, enable);
4667 spin_unlock_irq(&power_well->lock);
4668 }
4669
4670 /*
4671 * Starting with Haswell, we have a "Power Down Well" that can be turned off
4672 * when not needed anymore. We have 4 registers that can request the power well
4673 * to be enabled, and it will only be disabled if none of the registers is
4674 * requesting it to be enabled.
4675 */
intel_init_power_well(struct drm_device * dev)4676 void intel_init_power_well(struct drm_device *dev)
4677 {
4678 struct drm_i915_private *dev_priv = dev->dev_private;
4679
4680 if (!HAS_POWER_WELL(dev))
4681 return;
4682
4683 /* For now, we need the power well to be always enabled. */
4684 intel_set_power_well(dev, true);
4685
4686 /* We're taking over the BIOS, so clear any requests made by it since
4687 * the driver is in charge now. */
4688 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE)
4689 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
4690 }
4691
4692 /* Set up chip specific power management-related functions */
intel_init_pm(struct drm_device * dev)4693 void intel_init_pm(struct drm_device *dev)
4694 {
4695 struct drm_i915_private *dev_priv = dev->dev_private;
4696
4697 if (I915_HAS_FBC(dev)) {
4698 if (HAS_PCH_SPLIT(dev)) {
4699 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
4700 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4701 dev_priv->display.enable_fbc =
4702 gen7_enable_fbc;
4703 else
4704 dev_priv->display.enable_fbc =
4705 ironlake_enable_fbc;
4706 dev_priv->display.disable_fbc = ironlake_disable_fbc;
4707 } else if (IS_GM45(dev)) {
4708 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
4709 dev_priv->display.enable_fbc = g4x_enable_fbc;
4710 dev_priv->display.disable_fbc = g4x_disable_fbc;
4711 } else if (IS_CRESTLINE(dev)) {
4712 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
4713 dev_priv->display.enable_fbc = i8xx_enable_fbc;
4714 dev_priv->display.disable_fbc = i8xx_disable_fbc;
4715 }
4716 /* 855GM needs testing */
4717 }
4718
4719 /* For cxsr */
4720 if (IS_PINEVIEW(dev))
4721 i915_pineview_get_mem_freq(dev);
4722 else if (IS_GEN5(dev))
4723 i915_ironlake_get_mem_freq(dev);
4724
4725 /* For FIFO watermark updates */
4726 if (HAS_PCH_SPLIT(dev)) {
4727 if (IS_GEN5(dev)) {
4728 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
4729 dev_priv->display.update_wm = ironlake_update_wm;
4730 else {
4731 DRM_DEBUG_KMS("Failed to get proper latency. "
4732 "Disable CxSR\n");
4733 dev_priv->display.update_wm = NULL;
4734 }
4735 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
4736 } else if (IS_GEN6(dev)) {
4737 if (SNB_READ_WM0_LATENCY()) {
4738 dev_priv->display.update_wm = sandybridge_update_wm;
4739 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
4740 } else {
4741 DRM_DEBUG_KMS("Failed to read display plane latency. "
4742 "Disable CxSR\n");
4743 dev_priv->display.update_wm = NULL;
4744 }
4745 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
4746 } else if (IS_IVYBRIDGE(dev)) {
4747 /* FIXME: detect B0+ stepping and use auto training */
4748 if (SNB_READ_WM0_LATENCY()) {
4749 dev_priv->display.update_wm = ivybridge_update_wm;
4750 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
4751 } else {
4752 DRM_DEBUG_KMS("Failed to read display plane latency. "
4753 "Disable CxSR\n");
4754 dev_priv->display.update_wm = NULL;
4755 }
4756 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
4757 } else if (IS_HASWELL(dev)) {
4758 if (I915_READ64(MCH_SSKPD)) {
4759 dev_priv->display.update_wm = haswell_update_wm;
4760 dev_priv->display.update_sprite_wm =
4761 haswell_update_sprite_wm;
4762 } else {
4763 DRM_DEBUG_KMS("Failed to read display plane latency. "
4764 "Disable CxSR\n");
4765 dev_priv->display.update_wm = NULL;
4766 }
4767 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
4768 } else
4769 dev_priv->display.update_wm = NULL;
4770 } else if (IS_VALLEYVIEW(dev)) {
4771 dev_priv->display.update_wm = valleyview_update_wm;
4772 dev_priv->display.init_clock_gating =
4773 valleyview_init_clock_gating;
4774 } else if (IS_PINEVIEW(dev)) {
4775 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
4776 dev_priv->is_ddr3,
4777 dev_priv->fsb_freq,
4778 dev_priv->mem_freq)) {
4779 DRM_INFO("failed to find known CxSR latency "
4780 "(found ddr%s fsb freq %d, mem freq %d), "
4781 "disabling CxSR\n",
4782 (dev_priv->is_ddr3 == 1) ? "3" : "2",
4783 dev_priv->fsb_freq, dev_priv->mem_freq);
4784 /* Disable CxSR and never update its watermark again */
4785 pineview_disable_cxsr(dev);
4786 dev_priv->display.update_wm = NULL;
4787 } else
4788 dev_priv->display.update_wm = pineview_update_wm;
4789 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
4790 } else if (IS_G4X(dev)) {
4791 dev_priv->display.update_wm = g4x_update_wm;
4792 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
4793 } else if (IS_GEN4(dev)) {
4794 dev_priv->display.update_wm = i965_update_wm;
4795 if (IS_CRESTLINE(dev))
4796 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
4797 else if (IS_BROADWATER(dev))
4798 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
4799 } else if (IS_GEN3(dev)) {
4800 dev_priv->display.update_wm = i9xx_update_wm;
4801 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
4802 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
4803 } else if (IS_I865G(dev)) {
4804 dev_priv->display.update_wm = i830_update_wm;
4805 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
4806 dev_priv->display.get_fifo_size = i830_get_fifo_size;
4807 } else if (IS_I85X(dev)) {
4808 dev_priv->display.update_wm = i9xx_update_wm;
4809 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
4810 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
4811 } else {
4812 dev_priv->display.update_wm = i830_update_wm;
4813 dev_priv->display.init_clock_gating = i830_init_clock_gating;
4814 if (IS_845G(dev))
4815 dev_priv->display.get_fifo_size = i845_get_fifo_size;
4816 else
4817 dev_priv->display.get_fifo_size = i830_get_fifo_size;
4818 }
4819 }
4820
__gen6_gt_wait_for_thread_c0(struct drm_i915_private * dev_priv)4821 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
4822 {
4823 u32 gt_thread_status_mask;
4824
4825 if (IS_HASWELL(dev_priv->dev))
4826 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
4827 else
4828 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
4829
4830 /* w/a for a sporadic read returning 0 by waiting for the GT
4831 * thread to wake up.
4832 */
4833 if (wait_for_atomic((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 1))
4834 DRM_INFO("GT thread status wait timed out\n");
4835 }
4836
__gen6_gt_force_wake_reset(struct drm_i915_private * dev_priv)4837 static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
4838 {
4839 I915_WRITE_NOTRACE(FORCEWAKE, 0);
4840 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4841 }
4842
__gen6_gt_force_wake_get(struct drm_i915_private * dev_priv)4843 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4844 {
4845 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
4846 FORCEWAKE_ACK_TIMEOUT_MS))
4847 DRM_INFO("Timed out waiting for forcewake old ack to clear.\n");
4848
4849 I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
4850 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4851
4852 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
4853 FORCEWAKE_ACK_TIMEOUT_MS))
4854 DRM_INFO("Timed out waiting for forcewake to ack request.\n");
4855
4856 __gen6_gt_wait_for_thread_c0(dev_priv);
4857 }
4858
__gen6_gt_force_wake_mt_reset(struct drm_i915_private * dev_priv)4859 static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
4860 {
4861 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffffUL));
4862 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4863 }
4864
__gen6_gt_force_wake_mt_get(struct drm_i915_private * dev_priv)4865 static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
4866 {
4867 u32 forcewake_ack;
4868
4869 if (IS_HASWELL(dev_priv->dev))
4870 forcewake_ack = FORCEWAKE_ACK_HSW;
4871 else
4872 forcewake_ack = FORCEWAKE_MT_ACK;
4873
4874 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
4875 FORCEWAKE_ACK_TIMEOUT_MS))
4876 DRM_INFO("Timed out waiting for forcewake old ack to clear.\n");
4877
4878 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4879 /* something from same cacheline, but !FORCEWAKE_MT */
4880 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4881
4882 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
4883 FORCEWAKE_ACK_TIMEOUT_MS))
4884 DRM_INFO("Timed out waiting for forcewake to ack request.\n");
4885
4886 __gen6_gt_wait_for_thread_c0(dev_priv);
4887 }
4888
4889 /*
4890 * Generally this is called implicitly by the register read function. However,
4891 * if some sequence requires the GT to not power down then this function should
4892 * be called at the beginning of the sequence followed by a call to
4893 * gen6_gt_force_wake_put() at the end of the sequence.
4894 */
gen6_gt_force_wake_get(struct drm_i915_private * dev_priv)4895 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4896 {
4897 unsigned long irqflags;
4898
4899 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
4900 if (dev_priv->forcewake_count++ == 0)
4901 dev_priv->gt.force_wake_get(dev_priv);
4902 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
4903 }
4904
gen6_gt_check_fifodbg(struct drm_i915_private * dev_priv)4905 void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
4906 {
4907 u32 gtfifodbg;
4908 gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
4909 if (gtfifodbg & GT_FIFO_CPU_ERROR_MASK) {
4910 DRM_ERROR("MMIO read or write has been dropped %x\n", gtfifodbg);
4911 I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
4912 }
4913 }
4914
__gen6_gt_force_wake_put(struct drm_i915_private * dev_priv)4915 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
4916 {
4917 I915_WRITE_NOTRACE(FORCEWAKE, 0);
4918 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
4919 POSTING_READ(ECOBUS);
4920 gen6_gt_check_fifodbg(dev_priv);
4921 }
4922
__gen6_gt_force_wake_mt_put(struct drm_i915_private * dev_priv)4923 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
4924 {
4925 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
4926 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
4927 POSTING_READ(ECOBUS);
4928 gen6_gt_check_fifodbg(dev_priv);
4929 }
4930
4931 /*
4932 * see gen6_gt_force_wake_get()
4933 */
gen6_gt_force_wake_put(struct drm_i915_private * dev_priv)4934 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
4935 {
4936 unsigned long irqflags;
4937
4938 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
4939 if (--dev_priv->forcewake_count == 0)
4940 dev_priv->gt.force_wake_put(dev_priv);
4941 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
4942 }
4943
__gen6_gt_wait_for_fifo(struct drm_i915_private * dev_priv)4944 int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
4945 {
4946 int ret = 0;
4947
4948 if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
4949 int loop = 500;
4950 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
4951 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
4952 udelay(10);
4953 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
4954 }
4955 if (loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES) {
4956 ++ret;
4957 WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES);
4958 }
4959 dev_priv->gt_fifo_count = fifo;
4960 }
4961 dev_priv->gt_fifo_count--;
4962
4963 return ret;
4964 }
4965
vlv_force_wake_reset(struct drm_i915_private * dev_priv)4966 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
4967 {
4968 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffffUL));
4969 /* something from same cacheline, but !FORCEWAKE_VLV */
4970 POSTING_READ(FORCEWAKE_ACK_VLV);
4971 }
4972
vlv_force_wake_get(struct drm_i915_private * dev_priv)4973 static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
4974 {
4975 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
4976 FORCEWAKE_ACK_TIMEOUT_MS))
4977 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4978
4979 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4980 I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
4981 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4982
4983 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
4984 FORCEWAKE_ACK_TIMEOUT_MS))
4985 DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
4986
4987 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
4988 FORCEWAKE_KERNEL),
4989 FORCEWAKE_ACK_TIMEOUT_MS))
4990 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
4991
4992 __gen6_gt_wait_for_thread_c0(dev_priv);
4993 }
4994
vlv_force_wake_put(struct drm_i915_private * dev_priv)4995 static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
4996 {
4997 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
4998 I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
4999 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5000 /* The below doubles as a POSTING_READ */
5001 POSTING_READ(FORCEWAKE_ACK_VLV);
5002 gen6_gt_check_fifodbg(dev_priv);
5003 }
5004
intel_gt_sanitize(struct drm_device * dev)5005 void intel_gt_sanitize(struct drm_device *dev)
5006 {
5007 struct drm_i915_private *dev_priv = dev->dev_private;
5008
5009 if (IS_VALLEYVIEW(dev)) {
5010 vlv_force_wake_reset(dev_priv);
5011 } else if (INTEL_INFO(dev)->gen >= 6) {
5012 __gen6_gt_force_wake_reset(dev_priv);
5013 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
5014 __gen6_gt_force_wake_mt_reset(dev_priv);
5015 }
5016
5017 /* BIOS often leaves RC6 enabled, but disable it for hw init */
5018 if (INTEL_INFO(dev)->gen >= 6)
5019 intel_disable_gt_powersave(dev);
5020 }
5021
intel_gt_init(struct drm_device * dev)5022 void intel_gt_init(struct drm_device *dev)
5023 {
5024 struct drm_i915_private *dev_priv = dev->dev_private;
5025
5026 if (IS_VALLEYVIEW(dev)) {
5027 dev_priv->gt.force_wake_get = vlv_force_wake_get;
5028 dev_priv->gt.force_wake_put = vlv_force_wake_put;
5029 } else if (IS_HASWELL(dev)) {
5030 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
5031 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
5032 } else if (IS_IVYBRIDGE(dev)) {
5033 u32 ecobus;
5034
5035 /* IVB configs may use multi-threaded forcewake */
5036
5037 /* A small trick here - if the bios hasn't configured
5038 * MT forcewake, and if the device is in RC6, then
5039 * force_wake_mt_get will not wake the device and the
5040 * ECOBUS read will return zero. Which will be
5041 * (correctly) interpreted by the test below as MT
5042 * forcewake being disabled.
5043 */
5044 mutex_lock(&dev->struct_mutex);
5045 __gen6_gt_force_wake_mt_get(dev_priv);
5046 ecobus = I915_READ_NOTRACE(ECOBUS);
5047 __gen6_gt_force_wake_mt_put(dev_priv);
5048 mutex_unlock(&dev->struct_mutex);
5049
5050 if (ecobus & FORCEWAKE_MT_ENABLE) {
5051 dev_priv->gt.force_wake_get =
5052 __gen6_gt_force_wake_mt_get;
5053 dev_priv->gt.force_wake_put =
5054 __gen6_gt_force_wake_mt_put;
5055 } else {
5056 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
5057 DRM_INFO("when using vblank-synced partial screen updates.\n");
5058 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
5059 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
5060 }
5061 } else if (IS_GEN6(dev)) {
5062 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
5063 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
5064 }
5065 }
5066
intel_pm_init(struct drm_device * dev)5067 void intel_pm_init(struct drm_device *dev)
5068 {
5069 struct drm_i915_private *dev_priv = dev->dev_private;
5070
5071 INIT_WORK(&dev_priv->rps.delayed_resume_work, intel_gen6_powersave_work);
5072 setup_timer(&dev_priv->rps.delayed_resume_timer, intel_gen6_powersave_work_timer,
5073 (void *)dev);
5074 }
5075
sandybridge_pcode_read(struct drm_i915_private * dev_priv,u8 mbox,u32 * val)5076 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
5077 {
5078 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5079
5080 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
5081 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
5082 return -EAGAIN;
5083 }
5084
5085 I915_WRITE(GEN6_PCODE_DATA, *val);
5086 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
5087
5088 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
5089 500)) {
5090 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
5091 return -ETIMEDOUT;
5092 }
5093
5094 *val = I915_READ(GEN6_PCODE_DATA);
5095 I915_WRITE(GEN6_PCODE_DATA, 0);
5096
5097 return 0;
5098 }
5099
sandybridge_pcode_write(struct drm_i915_private * dev_priv,u8 mbox,u32 val)5100 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
5101 {
5102 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5103
5104 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
5105 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
5106 return -EAGAIN;
5107 }
5108
5109 I915_WRITE(GEN6_PCODE_DATA, val);
5110 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
5111
5112 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
5113 500)) {
5114 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
5115 return -ETIMEDOUT;
5116 }
5117
5118 I915_WRITE(GEN6_PCODE_DATA, 0);
5119
5120 return 0;
5121 }
5122
vlv_gpu_freq(int ddr_freq,int val)5123 int vlv_gpu_freq(int ddr_freq, int val)
5124 {
5125 int mult, base;
5126
5127 switch (ddr_freq) {
5128 case 800:
5129 mult = 20;
5130 base = 120;
5131 break;
5132 case 1066:
5133 mult = 22;
5134 base = 133;
5135 break;
5136 case 1333:
5137 mult = 21;
5138 base = 125;
5139 break;
5140 default:
5141 return -1;
5142 }
5143
5144 return ((val - 0xbd) * mult) + base;
5145 }
5146
vlv_freq_opcode(int ddr_freq,int val)5147 int vlv_freq_opcode(int ddr_freq, int val)
5148 {
5149 int mult, base;
5150
5151 switch (ddr_freq) {
5152 case 800:
5153 mult = 20;
5154 base = 120;
5155 break;
5156 case 1066:
5157 mult = 22;
5158 base = 133;
5159 break;
5160 case 1333:
5161 mult = 21;
5162 base = 125;
5163 break;
5164 default:
5165 return -1;
5166 }
5167
5168 val /= mult;
5169 val -= base / mult;
5170 val += 0xbd;
5171
5172 if (val > 0xea)
5173 val = 0xea;
5174
5175 return val;
5176 }
5177
5178