intel_display.c revision 282199
1/*
2 * Copyright �� 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 *	Eric Anholt <eric@anholt.net>
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_display.c 282199 2015-04-28 19:35:05Z dumbbell $");
29
30#include <dev/drm2/drmP.h>
31#include <dev/drm2/drm.h>
32#include <dev/drm2/i915/i915_drm.h>
33#include <dev/drm2/i915/i915_drv.h>
34#include <dev/drm2/i915/intel_drv.h>
35#include <dev/drm2/drm_edid.h>
36#include <dev/drm2/drm_dp_helper.h>
37#include <dev/drm2/drm_crtc_helper.h>
38#include <sys/limits.h>
39
40#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
41
42bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
43static void intel_increase_pllclock(struct drm_crtc *crtc);
44static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
45
46typedef struct {
47	/* given values */
48	int n;
49	int m1, m2;
50	int p1, p2;
51	/* derived values */
52	int	dot;
53	int	vco;
54	int	m;
55	int	p;
56} intel_clock_t;
57
58typedef struct {
59	int	min, max;
60} intel_range_t;
61
62typedef struct {
63	int	dot_limit;
64	int	p2_slow, p2_fast;
65} intel_p2_t;
66
67#define INTEL_P2_NUM		      2
68typedef struct intel_limit intel_limit_t;
69struct intel_limit {
70	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
71	intel_p2_t	    p2;
72	bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
73			int, int, intel_clock_t *, intel_clock_t *);
74};
75
76/* FDI */
77#define IRONLAKE_FDI_FREQ		2700000 /* in kHz for mode->clock */
78
79static bool
80intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
81		    int target, int refclk, intel_clock_t *match_clock,
82		    intel_clock_t *best_clock);
83static bool
84intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
85			int target, int refclk, intel_clock_t *match_clock,
86			intel_clock_t *best_clock);
87
88static bool
89intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
90		      int target, int refclk, intel_clock_t *match_clock,
91		      intel_clock_t *best_clock);
92static bool
93intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
94			   int target, int refclk, intel_clock_t *match_clock,
95			   intel_clock_t *best_clock);
96
97static inline u32 /* units of 100MHz */
98intel_fdi_link_freq(struct drm_device *dev)
99{
100	if (IS_GEN5(dev)) {
101		struct drm_i915_private *dev_priv = dev->dev_private;
102		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
103	} else
104		return 27;
105}
106
107static const intel_limit_t intel_limits_i8xx_dvo = {
108	.dot = { .min = 25000, .max = 350000 },
109	.vco = { .min = 930000, .max = 1400000 },
110	.n = { .min = 3, .max = 16 },
111	.m = { .min = 96, .max = 140 },
112	.m1 = { .min = 18, .max = 26 },
113	.m2 = { .min = 6, .max = 16 },
114	.p = { .min = 4, .max = 128 },
115	.p1 = { .min = 2, .max = 33 },
116	.p2 = { .dot_limit = 165000,
117		.p2_slow = 4, .p2_fast = 2 },
118	.find_pll = intel_find_best_PLL,
119};
120
121static const intel_limit_t intel_limits_i8xx_lvds = {
122	.dot = { .min = 25000, .max = 350000 },
123	.vco = { .min = 930000, .max = 1400000 },
124	.n = { .min = 3, .max = 16 },
125	.m = { .min = 96, .max = 140 },
126	.m1 = { .min = 18, .max = 26 },
127	.m2 = { .min = 6, .max = 16 },
128	.p = { .min = 4, .max = 128 },
129	.p1 = { .min = 1, .max = 6 },
130	.p2 = { .dot_limit = 165000,
131		.p2_slow = 14, .p2_fast = 7 },
132	.find_pll = intel_find_best_PLL,
133};
134
135static const intel_limit_t intel_limits_i9xx_sdvo = {
136	.dot = { .min = 20000, .max = 400000 },
137	.vco = { .min = 1400000, .max = 2800000 },
138	.n = { .min = 1, .max = 6 },
139	.m = { .min = 70, .max = 120 },
140	.m1 = { .min = 10, .max = 22 },
141	.m2 = { .min = 5, .max = 9 },
142	.p = { .min = 5, .max = 80 },
143	.p1 = { .min = 1, .max = 8 },
144	.p2 = { .dot_limit = 200000,
145		.p2_slow = 10, .p2_fast = 5 },
146	.find_pll = intel_find_best_PLL,
147};
148
149static const intel_limit_t intel_limits_i9xx_lvds = {
150	.dot = { .min = 20000, .max = 400000 },
151	.vco = { .min = 1400000, .max = 2800000 },
152	.n = { .min = 1, .max = 6 },
153	.m = { .min = 70, .max = 120 },
154	.m1 = { .min = 10, .max = 22 },
155	.m2 = { .min = 5, .max = 9 },
156	.p = { .min = 7, .max = 98 },
157	.p1 = { .min = 1, .max = 8 },
158	.p2 = { .dot_limit = 112000,
159		.p2_slow = 14, .p2_fast = 7 },
160	.find_pll = intel_find_best_PLL,
161};
162
163
164static const intel_limit_t intel_limits_g4x_sdvo = {
165	.dot = { .min = 25000, .max = 270000 },
166	.vco = { .min = 1750000, .max = 3500000},
167	.n = { .min = 1, .max = 4 },
168	.m = { .min = 104, .max = 138 },
169	.m1 = { .min = 17, .max = 23 },
170	.m2 = { .min = 5, .max = 11 },
171	.p = { .min = 10, .max = 30 },
172	.p1 = { .min = 1, .max = 3},
173	.p2 = { .dot_limit = 270000,
174		.p2_slow = 10,
175		.p2_fast = 10
176	},
177	.find_pll = intel_g4x_find_best_PLL,
178};
179
180static const intel_limit_t intel_limits_g4x_hdmi = {
181	.dot = { .min = 22000, .max = 400000 },
182	.vco = { .min = 1750000, .max = 3500000},
183	.n = { .min = 1, .max = 4 },
184	.m = { .min = 104, .max = 138 },
185	.m1 = { .min = 16, .max = 23 },
186	.m2 = { .min = 5, .max = 11 },
187	.p = { .min = 5, .max = 80 },
188	.p1 = { .min = 1, .max = 8},
189	.p2 = { .dot_limit = 165000,
190		.p2_slow = 10, .p2_fast = 5 },
191	.find_pll = intel_g4x_find_best_PLL,
192};
193
194static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
195	.dot = { .min = 20000, .max = 115000 },
196	.vco = { .min = 1750000, .max = 3500000 },
197	.n = { .min = 1, .max = 3 },
198	.m = { .min = 104, .max = 138 },
199	.m1 = { .min = 17, .max = 23 },
200	.m2 = { .min = 5, .max = 11 },
201	.p = { .min = 28, .max = 112 },
202	.p1 = { .min = 2, .max = 8 },
203	.p2 = { .dot_limit = 0,
204		.p2_slow = 14, .p2_fast = 14
205	},
206	.find_pll = intel_g4x_find_best_PLL,
207};
208
209static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
210	.dot = { .min = 80000, .max = 224000 },
211	.vco = { .min = 1750000, .max = 3500000 },
212	.n = { .min = 1, .max = 3 },
213	.m = { .min = 104, .max = 138 },
214	.m1 = { .min = 17, .max = 23 },
215	.m2 = { .min = 5, .max = 11 },
216	.p = { .min = 14, .max = 42 },
217	.p1 = { .min = 2, .max = 6 },
218	.p2 = { .dot_limit = 0,
219		.p2_slow = 7, .p2_fast = 7
220	},
221	.find_pll = intel_g4x_find_best_PLL,
222};
223
224static const intel_limit_t intel_limits_g4x_display_port = {
225	.dot = { .min = 161670, .max = 227000 },
226	.vco = { .min = 1750000, .max = 3500000},
227	.n = { .min = 1, .max = 2 },
228	.m = { .min = 97, .max = 108 },
229	.m1 = { .min = 0x10, .max = 0x12 },
230	.m2 = { .min = 0x05, .max = 0x06 },
231	.p = { .min = 10, .max = 20 },
232	.p1 = { .min = 1, .max = 2},
233	.p2 = { .dot_limit = 0,
234		.p2_slow = 10, .p2_fast = 10 },
235	.find_pll = intel_find_pll_g4x_dp,
236};
237
238static const intel_limit_t intel_limits_pineview_sdvo = {
239	.dot = { .min = 20000, .max = 400000},
240	.vco = { .min = 1700000, .max = 3500000 },
241	/* Pineview's Ncounter is a ring counter */
242	.n = { .min = 3, .max = 6 },
243	.m = { .min = 2, .max = 256 },
244	/* Pineview only has one combined m divider, which we treat as m2. */
245	.m1 = { .min = 0, .max = 0 },
246	.m2 = { .min = 0, .max = 254 },
247	.p = { .min = 5, .max = 80 },
248	.p1 = { .min = 1, .max = 8 },
249	.p2 = { .dot_limit = 200000,
250		.p2_slow = 10, .p2_fast = 5 },
251	.find_pll = intel_find_best_PLL,
252};
253
254static const intel_limit_t intel_limits_pineview_lvds = {
255	.dot = { .min = 20000, .max = 400000 },
256	.vco = { .min = 1700000, .max = 3500000 },
257	.n = { .min = 3, .max = 6 },
258	.m = { .min = 2, .max = 256 },
259	.m1 = { .min = 0, .max = 0 },
260	.m2 = { .min = 0, .max = 254 },
261	.p = { .min = 7, .max = 112 },
262	.p1 = { .min = 1, .max = 8 },
263	.p2 = { .dot_limit = 112000,
264		.p2_slow = 14, .p2_fast = 14 },
265	.find_pll = intel_find_best_PLL,
266};
267
268/* Ironlake / Sandybridge
269 *
270 * We calculate clock using (register_value + 2) for N/M1/M2, so here
271 * the range value for them is (actual_value - 2).
272 */
273static const intel_limit_t intel_limits_ironlake_dac = {
274	.dot = { .min = 25000, .max = 350000 },
275	.vco = { .min = 1760000, .max = 3510000 },
276	.n = { .min = 1, .max = 5 },
277	.m = { .min = 79, .max = 127 },
278	.m1 = { .min = 12, .max = 22 },
279	.m2 = { .min = 5, .max = 9 },
280	.p = { .min = 5, .max = 80 },
281	.p1 = { .min = 1, .max = 8 },
282	.p2 = { .dot_limit = 225000,
283		.p2_slow = 10, .p2_fast = 5 },
284	.find_pll = intel_g4x_find_best_PLL,
285};
286
287static const intel_limit_t intel_limits_ironlake_single_lvds = {
288	.dot = { .min = 25000, .max = 350000 },
289	.vco = { .min = 1760000, .max = 3510000 },
290	.n = { .min = 1, .max = 3 },
291	.m = { .min = 79, .max = 118 },
292	.m1 = { .min = 12, .max = 22 },
293	.m2 = { .min = 5, .max = 9 },
294	.p = { .min = 28, .max = 112 },
295	.p1 = { .min = 2, .max = 8 },
296	.p2 = { .dot_limit = 225000,
297		.p2_slow = 14, .p2_fast = 14 },
298	.find_pll = intel_g4x_find_best_PLL,
299};
300
301static const intel_limit_t intel_limits_ironlake_dual_lvds = {
302	.dot = { .min = 25000, .max = 350000 },
303	.vco = { .min = 1760000, .max = 3510000 },
304	.n = { .min = 1, .max = 3 },
305	.m = { .min = 79, .max = 127 },
306	.m1 = { .min = 12, .max = 22 },
307	.m2 = { .min = 5, .max = 9 },
308	.p = { .min = 14, .max = 56 },
309	.p1 = { .min = 2, .max = 8 },
310	.p2 = { .dot_limit = 225000,
311		.p2_slow = 7, .p2_fast = 7 },
312	.find_pll = intel_g4x_find_best_PLL,
313};
314
315/* LVDS 100mhz refclk limits. */
316static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
317	.dot = { .min = 25000, .max = 350000 },
318	.vco = { .min = 1760000, .max = 3510000 },
319	.n = { .min = 1, .max = 2 },
320	.m = { .min = 79, .max = 126 },
321	.m1 = { .min = 12, .max = 22 },
322	.m2 = { .min = 5, .max = 9 },
323	.p = { .min = 28, .max = 112 },
324	.p1 = { .min = 2, .max = 8 },
325	.p2 = { .dot_limit = 225000,
326		.p2_slow = 14, .p2_fast = 14 },
327	.find_pll = intel_g4x_find_best_PLL,
328};
329
330static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
331	.dot = { .min = 25000, .max = 350000 },
332	.vco = { .min = 1760000, .max = 3510000 },
333	.n = { .min = 1, .max = 3 },
334	.m = { .min = 79, .max = 126 },
335	.m1 = { .min = 12, .max = 22 },
336	.m2 = { .min = 5, .max = 9 },
337	.p = { .min = 14, .max = 42 },
338	.p1 = { .min = 2, .max = 6 },
339	.p2 = { .dot_limit = 225000,
340		.p2_slow = 7, .p2_fast = 7 },
341	.find_pll = intel_g4x_find_best_PLL,
342};
343
344static const intel_limit_t intel_limits_ironlake_display_port = {
345	.dot = { .min = 25000, .max = 350000 },
346	.vco = { .min = 1760000, .max = 3510000},
347	.n = { .min = 1, .max = 2 },
348	.m = { .min = 81, .max = 90 },
349	.m1 = { .min = 12, .max = 22 },
350	.m2 = { .min = 5, .max = 9 },
351	.p = { .min = 10, .max = 20 },
352	.p1 = { .min = 1, .max = 2},
353	.p2 = { .dot_limit = 0,
354		.p2_slow = 10, .p2_fast = 10 },
355	.find_pll = intel_find_pll_ironlake_dp,
356};
357
358u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
359{
360	u32 val = 0;
361
362	mtx_lock(&dev_priv->dpio_lock);
363	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
364		DRM_ERROR("DPIO idle wait timed out\n");
365		goto out_unlock;
366	}
367
368	I915_WRITE(DPIO_REG, reg);
369	I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
370		   DPIO_BYTE);
371	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
372		DRM_ERROR("DPIO read wait timed out\n");
373		goto out_unlock;
374	}
375	val = I915_READ(DPIO_DATA);
376
377out_unlock:
378	mtx_unlock(&dev_priv->dpio_lock);
379	return val;
380}
381
382#if 0
383static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
384			     u32 val)
385{
386
387	mtx_lock(&dev_priv->dpio_lock);
388	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
389		DRM_ERROR("DPIO idle wait timed out\n");
390		goto out_unlock;
391	}
392
393	I915_WRITE(DPIO_DATA, val);
394	I915_WRITE(DPIO_REG, reg);
395	I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
396		   DPIO_BYTE);
397	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
398		DRM_ERROR("DPIO write wait timed out\n");
399
400out_unlock:
401	mtx_unlock(&dev_priv->dpio_lock);
402}
403#endif
404
405static void vlv_init_dpio(struct drm_device *dev)
406{
407	struct drm_i915_private *dev_priv = dev->dev_private;
408
409	/* Reset the DPIO config */
410	I915_WRITE(DPIO_CTL, 0);
411	POSTING_READ(DPIO_CTL);
412	I915_WRITE(DPIO_CTL, 1);
413	POSTING_READ(DPIO_CTL);
414}
415
416static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
417{
418	DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
419	return 1;
420}
421
422static const struct dmi_system_id intel_dual_link_lvds[] = {
423	{
424		.callback = intel_dual_link_lvds_callback,
425		.ident = "Apple MacBook Pro (Core i5/i7 Series)",
426		.matches = {
427			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
428			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
429		},
430	},
431	{ }	/* terminating entry */
432};
433
434static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
435			      unsigned int reg)
436{
437	unsigned int val;
438
439	/* use the module option value if specified */
440	if (i915_lvds_channel_mode > 0)
441		return i915_lvds_channel_mode == 2;
442
443	if (dmi_check_system(intel_dual_link_lvds))
444		return true;
445
446	if (dev_priv->lvds_val)
447		val = dev_priv->lvds_val;
448	else {
449		/* BIOS should set the proper LVDS register value at boot, but
450		 * in reality, it doesn't set the value when the lid is closed;
451		 * we need to check "the value to be set" in VBT when LVDS
452		 * register is uninitialized.
453		 */
454		val = I915_READ(reg);
455		if (!(val & ~LVDS_DETECTED))
456			val = dev_priv->bios_lvds_val;
457		dev_priv->lvds_val = val;
458	}
459	return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
460}
461
462static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
463						int refclk)
464{
465	struct drm_device *dev = crtc->dev;
466	struct drm_i915_private *dev_priv = dev->dev_private;
467	const intel_limit_t *limit;
468
469	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
470		if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
471			/* LVDS dual channel */
472			if (refclk == 100000)
473				limit = &intel_limits_ironlake_dual_lvds_100m;
474			else
475				limit = &intel_limits_ironlake_dual_lvds;
476		} else {
477			if (refclk == 100000)
478				limit = &intel_limits_ironlake_single_lvds_100m;
479			else
480				limit = &intel_limits_ironlake_single_lvds;
481		}
482	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
483			HAS_eDP)
484		limit = &intel_limits_ironlake_display_port;
485	else
486		limit = &intel_limits_ironlake_dac;
487
488	return limit;
489}
490
491static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
492{
493	struct drm_device *dev = crtc->dev;
494	struct drm_i915_private *dev_priv = dev->dev_private;
495	const intel_limit_t *limit;
496
497	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
498		if (is_dual_link_lvds(dev_priv, LVDS))
499			/* LVDS with dual channel */
500			limit = &intel_limits_g4x_dual_channel_lvds;
501		else
502			/* LVDS with dual channel */
503			limit = &intel_limits_g4x_single_channel_lvds;
504	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
505		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
506		limit = &intel_limits_g4x_hdmi;
507	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
508		limit = &intel_limits_g4x_sdvo;
509	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
510		limit = &intel_limits_g4x_display_port;
511	} else /* The option is for other outputs */
512		limit = &intel_limits_i9xx_sdvo;
513
514	return limit;
515}
516
517static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
518{
519	struct drm_device *dev = crtc->dev;
520	const intel_limit_t *limit;
521
522	if (HAS_PCH_SPLIT(dev))
523		limit = intel_ironlake_limit(crtc, refclk);
524	else if (IS_G4X(dev)) {
525		limit = intel_g4x_limit(crtc);
526	} else if (IS_PINEVIEW(dev)) {
527		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
528			limit = &intel_limits_pineview_lvds;
529		else
530			limit = &intel_limits_pineview_sdvo;
531	} else if (!IS_GEN2(dev)) {
532		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
533			limit = &intel_limits_i9xx_lvds;
534		else
535			limit = &intel_limits_i9xx_sdvo;
536	} else {
537		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
538			limit = &intel_limits_i8xx_lvds;
539		else
540			limit = &intel_limits_i8xx_dvo;
541	}
542	return limit;
543}
544
545/* m1 is reserved as 0 in Pineview, n is a ring counter */
546static void pineview_clock(int refclk, intel_clock_t *clock)
547{
548	clock->m = clock->m2 + 2;
549	clock->p = clock->p1 * clock->p2;
550	clock->vco = refclk * clock->m / clock->n;
551	clock->dot = clock->vco / clock->p;
552}
553
554static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
555{
556	if (IS_PINEVIEW(dev)) {
557		pineview_clock(refclk, clock);
558		return;
559	}
560	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
561	clock->p = clock->p1 * clock->p2;
562	clock->vco = refclk * clock->m / (clock->n + 2);
563	clock->dot = clock->vco / clock->p;
564}
565
566/**
567 * Returns whether any output on the specified pipe is of the specified type
568 */
569bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
570{
571	struct drm_device *dev = crtc->dev;
572	struct drm_mode_config *mode_config = &dev->mode_config;
573	struct intel_encoder *encoder;
574
575	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
576		if (encoder->base.crtc == crtc && encoder->type == type)
577			return true;
578
579	return false;
580}
581
582#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
583/**
584 * Returns whether the given set of divisors are valid for a given refclk with
585 * the given connectors.
586 */
587
588static bool intel_PLL_is_valid(struct drm_device *dev,
589			       const intel_limit_t *limit,
590			       const intel_clock_t *clock)
591{
592	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
593		INTELPllInvalid("p1 out of range\n");
594	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
595		INTELPllInvalid("p out of range\n");
596	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
597		INTELPllInvalid("m2 out of range\n");
598	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
599		INTELPllInvalid("m1 out of range\n");
600	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
601		INTELPllInvalid("m1 <= m2\n");
602	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
603		INTELPllInvalid("m out of range\n");
604	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
605		INTELPllInvalid("n out of range\n");
606	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
607		INTELPllInvalid("vco out of range\n");
608	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
609	 * connector, etc., rather than just a single range.
610	 */
611	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
612		INTELPllInvalid("dot out of range\n");
613
614	return true;
615}
616
617static bool
618intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
619		    int target, int refclk, intel_clock_t *match_clock,
620		    intel_clock_t *best_clock)
621
622{
623	struct drm_device *dev = crtc->dev;
624	struct drm_i915_private *dev_priv = dev->dev_private;
625	intel_clock_t clock;
626	int err = target;
627
628	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
629	    (I915_READ(LVDS)) != 0) {
630		/*
631		 * For LVDS, if the panel is on, just rely on its current
632		 * settings for dual-channel.  We haven't figured out how to
633		 * reliably set up different single/dual channel state, if we
634		 * even can.
635		 */
636		if (is_dual_link_lvds(dev_priv, LVDS))
637			clock.p2 = limit->p2.p2_fast;
638		else
639			clock.p2 = limit->p2.p2_slow;
640	} else {
641		if (target < limit->p2.dot_limit)
642			clock.p2 = limit->p2.p2_slow;
643		else
644			clock.p2 = limit->p2.p2_fast;
645	}
646
647	memset(best_clock, 0, sizeof(*best_clock));
648
649	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
650	     clock.m1++) {
651		for (clock.m2 = limit->m2.min;
652		     clock.m2 <= limit->m2.max; clock.m2++) {
653			/* m1 is always 0 in Pineview */
654			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
655				break;
656			for (clock.n = limit->n.min;
657			     clock.n <= limit->n.max; clock.n++) {
658				for (clock.p1 = limit->p1.min;
659					clock.p1 <= limit->p1.max; clock.p1++) {
660					int this_err;
661
662					intel_clock(dev, refclk, &clock);
663					if (!intel_PLL_is_valid(dev, limit,
664								&clock))
665						continue;
666					if (match_clock &&
667					    clock.p != match_clock->p)
668						continue;
669
670					this_err = abs(clock.dot - target);
671					if (this_err < err) {
672						*best_clock = clock;
673						err = this_err;
674					}
675				}
676			}
677		}
678	}
679
680	return (err != target);
681}
682
683static bool
684intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
685			int target, int refclk, intel_clock_t *match_clock,
686			intel_clock_t *best_clock)
687{
688	struct drm_device *dev = crtc->dev;
689	struct drm_i915_private *dev_priv = dev->dev_private;
690	intel_clock_t clock;
691	int max_n;
692	bool found;
693	/* approximately equals target * 0.00585 */
694	int err_most = (target >> 8) + (target >> 9);
695	found = false;
696
697	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
698		int lvds_reg;
699
700		if (HAS_PCH_SPLIT(dev))
701			lvds_reg = PCH_LVDS;
702		else
703			lvds_reg = LVDS;
704		if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
705		    LVDS_CLKB_POWER_UP)
706			clock.p2 = limit->p2.p2_fast;
707		else
708			clock.p2 = limit->p2.p2_slow;
709	} else {
710		if (target < limit->p2.dot_limit)
711			clock.p2 = limit->p2.p2_slow;
712		else
713			clock.p2 = limit->p2.p2_fast;
714	}
715
716	memset(best_clock, 0, sizeof(*best_clock));
717	max_n = limit->n.max;
718	/* based on hardware requirement, prefer smaller n to precision */
719	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
720		/* based on hardware requirement, prefere larger m1,m2 */
721		for (clock.m1 = limit->m1.max;
722		     clock.m1 >= limit->m1.min; clock.m1--) {
723			for (clock.m2 = limit->m2.max;
724			     clock.m2 >= limit->m2.min; clock.m2--) {
725				for (clock.p1 = limit->p1.max;
726				     clock.p1 >= limit->p1.min; clock.p1--) {
727					int this_err;
728
729					intel_clock(dev, refclk, &clock);
730					if (!intel_PLL_is_valid(dev, limit,
731								&clock))
732						continue;
733					if (match_clock &&
734					    clock.p != match_clock->p)
735						continue;
736
737					this_err = abs(clock.dot - target);
738					if (this_err < err_most) {
739						*best_clock = clock;
740						err_most = this_err;
741						max_n = clock.n;
742						found = true;
743					}
744				}
745			}
746		}
747	}
748	return found;
749}
750
751static bool
752intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
753			   int target, int refclk, intel_clock_t *match_clock,
754			   intel_clock_t *best_clock)
755{
756	struct drm_device *dev = crtc->dev;
757	intel_clock_t clock;
758
759	if (target < 200000) {
760		clock.n = 1;
761		clock.p1 = 2;
762		clock.p2 = 10;
763		clock.m1 = 12;
764		clock.m2 = 9;
765	} else {
766		clock.n = 2;
767		clock.p1 = 1;
768		clock.p2 = 10;
769		clock.m1 = 14;
770		clock.m2 = 8;
771	}
772	intel_clock(dev, refclk, &clock);
773	memcpy(best_clock, &clock, sizeof(intel_clock_t));
774	return true;
775}
776
777/* DisplayPort has only two frequencies, 162MHz and 270MHz */
778static bool
779intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
780		      int target, int refclk, intel_clock_t *match_clock,
781		      intel_clock_t *best_clock)
782{
783	intel_clock_t clock;
784	if (target < 200000) {
785		clock.p1 = 2;
786		clock.p2 = 10;
787		clock.n = 2;
788		clock.m1 = 23;
789		clock.m2 = 8;
790	} else {
791		clock.p1 = 1;
792		clock.p2 = 10;
793		clock.n = 1;
794		clock.m1 = 14;
795		clock.m2 = 2;
796	}
797	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
798	clock.p = (clock.p1 * clock.p2);
799	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
800	clock.vco = 0;
801	memcpy(best_clock, &clock, sizeof(intel_clock_t));
802	return true;
803}
804
805static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
806{
807	struct drm_i915_private *dev_priv = dev->dev_private;
808	u32 frame, frame_reg = PIPEFRAME(pipe);
809
810	frame = I915_READ(frame_reg);
811
812	if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
813		DRM_DEBUG_KMS("vblank wait timed out\n");
814}
815
816/**
817 * intel_wait_for_vblank - wait for vblank on a given pipe
818 * @dev: drm device
819 * @pipe: pipe to wait for
820 *
821 * Wait for vblank to occur on a given pipe.  Needed for various bits of
822 * mode setting code.
823 */
824void intel_wait_for_vblank(struct drm_device *dev, int pipe)
825{
826	struct drm_i915_private *dev_priv = dev->dev_private;
827	int pipestat_reg = PIPESTAT(pipe);
828
829	if (INTEL_INFO(dev)->gen >= 5) {
830		ironlake_wait_for_vblank(dev, pipe);
831		return;
832	}
833
834	/* Clear existing vblank status. Note this will clear any other
835	 * sticky status fields as well.
836	 *
837	 * This races with i915_driver_irq_handler() with the result
838	 * that either function could miss a vblank event.  Here it is not
839	 * fatal, as we will either wait upon the next vblank interrupt or
840	 * timeout.  Generally speaking intel_wait_for_vblank() is only
841	 * called during modeset at which time the GPU should be idle and
842	 * should *not* be performing page flips and thus not waiting on
843	 * vblanks...
844	 * Currently, the result of us stealing a vblank from the irq
845	 * handler is that a single frame will be skipped during swapbuffers.
846	 */
847	I915_WRITE(pipestat_reg,
848		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
849
850	/* Wait for vblank interrupt bit to set */
851	if (_intel_wait_for(dev,
852	    I915_READ(pipestat_reg) & PIPE_VBLANK_INTERRUPT_STATUS,
853	    50, 1, "915vbl"))
854		DRM_DEBUG_KMS("vblank wait timed out\n");
855}
856
857/*
858 * intel_wait_for_pipe_off - wait for pipe to turn off
859 * @dev: drm device
860 * @pipe: pipe to wait for
861 *
862 * After disabling a pipe, we can't wait for vblank in the usual way,
863 * spinning on the vblank interrupt status bit, since we won't actually
864 * see an interrupt when the pipe is disabled.
865 *
866 * On Gen4 and above:
867 *   wait for the pipe register state bit to turn off
868 *
869 * Otherwise:
870 *   wait for the display line value to settle (it usually
871 *   ends up stopping at the start of the next frame).
872 *
873 */
874void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
875{
876	struct drm_i915_private *dev_priv = dev->dev_private;
877
878	if (INTEL_INFO(dev)->gen >= 4) {
879		int reg = PIPECONF(pipe);
880
881		/* Wait for the Pipe State to go off */
882		if (_intel_wait_for(dev,
883		    (I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 100,
884		    1, "915pip"))
885			DRM_DEBUG_KMS("pipe_off wait timed out\n");
886	} else {
887		u32 last_line, line_mask;
888		int reg = PIPEDSL(pipe);
889		unsigned long timeout = jiffies + msecs_to_jiffies(100);
890
891		if (IS_GEN2(dev))
892			line_mask = DSL_LINEMASK_GEN2;
893		else
894			line_mask = DSL_LINEMASK_GEN3;
895
896		/* Wait for the display line to settle */
897		do {
898			last_line = I915_READ(reg) & line_mask;
899			DELAY(5000);
900		} while (((I915_READ(reg) & line_mask) != last_line) &&
901			 time_after(timeout, jiffies));
902		if (time_after(jiffies, timeout))
903			DRM_DEBUG_KMS("pipe_off wait timed out\n");
904	}
905}
906
907static const char *state_string(bool enabled)
908{
909	return enabled ? "on" : "off";
910}
911
912/* Only for pre-ILK configs */
913static void assert_pll(struct drm_i915_private *dev_priv,
914		       enum pipe pipe, bool state)
915{
916	int reg;
917	u32 val;
918	bool cur_state;
919
920	reg = DPLL(pipe);
921	val = I915_READ(reg);
922	cur_state = !!(val & DPLL_VCO_ENABLE);
923	if (cur_state != state)
924		printf("PLL state assertion failure (expected %s, current %s)\n",
925		    state_string(state), state_string(cur_state));
926}
927#define assert_pll_enabled(d, p) assert_pll(d, p, true)
928#define assert_pll_disabled(d, p) assert_pll(d, p, false)
929
930/* For ILK+ */
931static void assert_pch_pll(struct drm_i915_private *dev_priv,
932			   struct intel_crtc *intel_crtc, bool state)
933{
934	int reg;
935	u32 val;
936	bool cur_state;
937
938	if (HAS_PCH_LPT(dev_priv->dev)) {
939		DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
940		return;
941	}
942
943	if (!intel_crtc->pch_pll) {
944		printf("asserting PCH PLL enabled with no PLL\n");
945		return;
946	}
947
948	if (HAS_PCH_CPT(dev_priv->dev)) {
949		u32 pch_dpll;
950
951		pch_dpll = I915_READ(PCH_DPLL_SEL);
952
953		/* Make sure the selected PLL is enabled to the transcoder */
954		KASSERT(((pch_dpll >> (4 * intel_crtc->pipe)) & 8) != 0,
955		    ("transcoder %d PLL not enabled\n", intel_crtc->pipe));
956	}
957
958	reg = intel_crtc->pch_pll->pll_reg;
959	val = I915_READ(reg);
960	cur_state = !!(val & DPLL_VCO_ENABLE);
961	if (cur_state != state)
962		printf("PCH PLL state assertion failure (expected %s, current %s)\n",
963		    state_string(state), state_string(cur_state));
964}
965#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
966#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
967
968static void assert_fdi_tx(struct drm_i915_private *dev_priv,
969			  enum pipe pipe, bool state)
970{
971	int reg;
972	u32 val;
973	bool cur_state;
974
975	if (IS_HASWELL(dev_priv->dev)) {
976		/* On Haswell, DDI is used instead of FDI_TX_CTL */
977		reg = DDI_FUNC_CTL(pipe);
978		val = I915_READ(reg);
979		cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
980	} else {
981		reg = FDI_TX_CTL(pipe);
982		val = I915_READ(reg);
983		cur_state = !!(val & FDI_TX_ENABLE);
984	}
985	if (cur_state != state)
986		printf("FDI TX state assertion failure (expected %s, current %s)\n",
987		    state_string(state), state_string(cur_state));
988}
989#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
990#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
991
992static void assert_fdi_rx(struct drm_i915_private *dev_priv,
993			  enum pipe pipe, bool state)
994{
995	int reg;
996	u32 val;
997	bool cur_state;
998
999	if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1000			DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
1001			return;
1002	} else {
1003		reg = FDI_RX_CTL(pipe);
1004		val = I915_READ(reg);
1005		cur_state = !!(val & FDI_RX_ENABLE);
1006	}
1007	if (cur_state != state)
1008		printf("FDI RX state assertion failure (expected %s, current %s)\n",
1009		    state_string(state), state_string(cur_state));
1010}
1011#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1012#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1013
1014static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1015				      enum pipe pipe)
1016{
1017	int reg;
1018	u32 val;
1019
1020	/* ILK FDI PLL is always enabled */
1021	if (dev_priv->info->gen == 5)
1022		return;
1023
1024	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1025	if (IS_HASWELL(dev_priv->dev))
1026		return;
1027
1028	reg = FDI_TX_CTL(pipe);
1029	val = I915_READ(reg);
1030	if (!(val & FDI_TX_PLL_ENABLE))
1031		printf("FDI TX PLL assertion failure, should be active but is disabled\n");
1032}
1033
1034static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
1035				      enum pipe pipe)
1036{
1037	int reg;
1038	u32 val;
1039
1040	if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1041		DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
1042		return;
1043	}
1044	reg = FDI_RX_CTL(pipe);
1045	val = I915_READ(reg);
1046	if (!(val & FDI_RX_PLL_ENABLE))
1047		printf("FDI RX PLL assertion failure, should be active but is disabled\n");
1048}
1049
1050static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1051				  enum pipe pipe)
1052{
1053	int pp_reg, lvds_reg;
1054	u32 val;
1055	enum pipe panel_pipe = PIPE_A;
1056	bool locked = true;
1057
1058	if (HAS_PCH_SPLIT(dev_priv->dev)) {
1059		pp_reg = PCH_PP_CONTROL;
1060		lvds_reg = PCH_LVDS;
1061	} else {
1062		pp_reg = PP_CONTROL;
1063		lvds_reg = LVDS;
1064	}
1065
1066	val = I915_READ(pp_reg);
1067	if (!(val & PANEL_POWER_ON) ||
1068	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1069		locked = false;
1070
1071	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1072		panel_pipe = PIPE_B;
1073
1074	if (panel_pipe == pipe && locked)
1075		printf("panel assertion failure, pipe %c regs locked\n",
1076	     pipe_name(pipe));
1077}
1078
1079void assert_pipe(struct drm_i915_private *dev_priv,
1080		 enum pipe pipe, bool state)
1081{
1082	int reg;
1083	u32 val;
1084	bool cur_state;
1085
1086	/* if we need the pipe A quirk it must be always on */
1087	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1088		state = true;
1089
1090	reg = PIPECONF(pipe);
1091	val = I915_READ(reg);
1092	cur_state = !!(val & PIPECONF_ENABLE);
1093	if (cur_state != state)
1094		printf("pipe %c assertion failure (expected %s, current %s)\n",
1095		    pipe_name(pipe), state_string(state), state_string(cur_state));
1096}
1097
1098static void assert_plane(struct drm_i915_private *dev_priv,
1099			 enum plane plane, bool state)
1100{
1101	int reg;
1102	u32 val;
1103	bool cur_state;
1104
1105	reg = DSPCNTR(plane);
1106	val = I915_READ(reg);
1107	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1108	if (cur_state != state)
1109		printf("plane %c assertion failure, (expected %s, current %s)\n",
1110		       plane_name(plane), state_string(state), state_string(cur_state));
1111}
1112
1113#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1114#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1115
1116static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1117				   enum pipe pipe)
1118{
1119	int reg, i;
1120	u32 val;
1121	int cur_pipe;
1122
1123	/* Planes are fixed to pipes on ILK+ */
1124	if (HAS_PCH_SPLIT(dev_priv->dev)) {
1125		reg = DSPCNTR(pipe);
1126		val = I915_READ(reg);
1127		if ((val & DISPLAY_PLANE_ENABLE) != 0)
1128			printf("plane %c assertion failure, should be disabled but not\n",
1129			       plane_name(pipe));
1130		return;
1131	}
1132
1133	/* Need to check both planes against the pipe */
1134	for (i = 0; i < 2; i++) {
1135		reg = DSPCNTR(i);
1136		val = I915_READ(reg);
1137		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1138			DISPPLANE_SEL_PIPE_SHIFT;
1139		if ((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe)
1140			printf("plane %c assertion failure, should be off on pipe %c but is still active\n",
1141		     plane_name(i), pipe_name(pipe));
1142	}
1143}
1144
1145static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1146{
1147	u32 val;
1148	bool enabled;
1149
1150	if (HAS_PCH_LPT(dev_priv->dev)) {
1151		DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1152		return;
1153	}
1154
1155	val = I915_READ(PCH_DREF_CONTROL);
1156	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1157			    DREF_SUPERSPREAD_SOURCE_MASK));
1158	if (!enabled)
1159		printf("PCH refclk assertion failure, should be active but is disabled\n");
1160}
1161
1162static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1163				       enum pipe pipe)
1164{
1165	int reg;
1166	u32 val;
1167	bool enabled;
1168
1169	reg = TRANSCONF(pipe);
1170	val = I915_READ(reg);
1171	enabled = !!(val & TRANS_ENABLE);
1172	if (enabled)
1173		printf("transcoder assertion failed, should be off on pipe %c but is still active\n",
1174	     pipe_name(pipe));
1175}
1176
1177static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1178			      enum pipe pipe, u32 val)
1179{
1180	if ((val & PORT_ENABLE) == 0)
1181		return false;
1182
1183	if (HAS_PCH_CPT(dev_priv->dev)) {
1184		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1185			return false;
1186	} else {
1187		if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1188			return false;
1189	}
1190	return true;
1191}
1192
1193static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1194			      enum pipe pipe, u32 val)
1195{
1196	if ((val & LVDS_PORT_EN) == 0)
1197		return false;
1198
1199	if (HAS_PCH_CPT(dev_priv->dev)) {
1200		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1201			return false;
1202	} else {
1203		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1204			return false;
1205	}
1206	return true;
1207}
1208
1209static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1210			      enum pipe pipe, u32 val)
1211{
1212	if ((val & ADPA_DAC_ENABLE) == 0)
1213		return false;
1214	if (HAS_PCH_CPT(dev_priv->dev)) {
1215		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1216			return false;
1217	} else {
1218		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1219			return false;
1220	}
1221	return true;
1222}
1223
1224static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1225			    enum pipe pipe, u32 port_sel, u32 val)
1226{
1227	if ((val & DP_PORT_EN) == 0)
1228		return false;
1229
1230	if (HAS_PCH_CPT(dev_priv->dev)) {
1231		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1232		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1233		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1234			return false;
1235	} else {
1236		if ((val & DP_PIPE_MASK) != (pipe << 30))
1237			return false;
1238	}
1239	return true;
1240}
1241
1242static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1243				   enum pipe pipe, int reg, u32 port_sel)
1244{
1245	u32 val = I915_READ(reg);
1246	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val))
1247		printf("PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1248	     reg, pipe_name(pipe));
1249}
1250
1251static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1252				     enum pipe pipe, int reg)
1253{
1254	u32 val = I915_READ(reg);
1255	if (hdmi_pipe_enabled(dev_priv, val, pipe))
1256		printf("PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1257	     reg, pipe_name(pipe));
1258}
1259
1260static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1261				      enum pipe pipe)
1262{
1263	int reg;
1264	u32 val;
1265
1266	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1267	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1268	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1269
1270	reg = PCH_ADPA;
1271	val = I915_READ(reg);
1272	if (adpa_pipe_enabled(dev_priv, val, pipe))
1273		printf("PCH VGA enabled on transcoder %c, should be disabled\n",
1274	     pipe_name(pipe));
1275
1276	reg = PCH_LVDS;
1277	val = I915_READ(reg);
1278	if (lvds_pipe_enabled(dev_priv, val, pipe))
1279		printf("PCH LVDS enabled on transcoder %c, should be disabled\n",
1280	     pipe_name(pipe));
1281
1282	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1283	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1284	assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1285}
1286
1287/**
1288 * intel_enable_pll - enable a PLL
1289 * @dev_priv: i915 private structure
1290 * @pipe: pipe PLL to enable
1291 *
1292 * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1293 * make sure the PLL reg is writable first though, since the panel write
1294 * protect mechanism may be enabled.
1295 *
1296 * Note!  This is for pre-ILK only.
1297 */
1298static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1299{
1300	int reg;
1301	u32 val;
1302
1303	/* No really, not for ILK+ */
1304	KASSERT(dev_priv->info->gen < 5, ("Wrong device gen"));
1305
1306	/* PLL is protected by panel, make sure we can write it */
1307	if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1308		assert_panel_unlocked(dev_priv, pipe);
1309
1310	reg = DPLL(pipe);
1311	val = I915_READ(reg);
1312	val |= DPLL_VCO_ENABLE;
1313
1314	/* We do this three times for luck */
1315	I915_WRITE(reg, val);
1316	POSTING_READ(reg);
1317	DELAY(150); /* wait for warmup */
1318	I915_WRITE(reg, val);
1319	POSTING_READ(reg);
1320	DELAY(150); /* wait for warmup */
1321	I915_WRITE(reg, val);
1322	POSTING_READ(reg);
1323	DELAY(150); /* wait for warmup */
1324}
1325
1326/**
1327 * intel_disable_pll - disable a PLL
1328 * @dev_priv: i915 private structure
1329 * @pipe: pipe PLL to disable
1330 *
1331 * Disable the PLL for @pipe, making sure the pipe is off first.
1332 *
1333 * Note!  This is for pre-ILK only.
1334 */
1335static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1336{
1337	int reg;
1338	u32 val;
1339
1340	/* Don't disable pipe A or pipe A PLLs if needed */
1341	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1342		return;
1343
1344	/* Make sure the pipe isn't still relying on us */
1345	assert_pipe_disabled(dev_priv, pipe);
1346
1347	reg = DPLL(pipe);
1348	val = I915_READ(reg);
1349	val &= ~DPLL_VCO_ENABLE;
1350	I915_WRITE(reg, val);
1351	POSTING_READ(reg);
1352}
1353
1354/* SBI access */
1355static void
1356intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
1357{
1358
1359	mtx_lock(&dev_priv->dpio_lock);
1360	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
1361				100)) {
1362		DRM_ERROR("timeout waiting for SBI to become ready\n");
1363		goto out_unlock;
1364	}
1365
1366	I915_WRITE(SBI_ADDR,
1367			(reg << 16));
1368	I915_WRITE(SBI_DATA,
1369			value);
1370	I915_WRITE(SBI_CTL_STAT,
1371			SBI_BUSY |
1372			SBI_CTL_OP_CRWR);
1373
1374	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
1375				100)) {
1376		DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1377		goto out_unlock;
1378	}
1379
1380out_unlock:
1381	mtx_unlock(&dev_priv->dpio_lock);
1382}
1383
1384static u32
1385intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
1386{
1387	u32 value;
1388
1389	value = 0;
1390	mtx_lock(&dev_priv->dpio_lock);
1391	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
1392				100)) {
1393		DRM_ERROR("timeout waiting for SBI to become ready\n");
1394		goto out_unlock;
1395	}
1396
1397	I915_WRITE(SBI_ADDR,
1398			(reg << 16));
1399	I915_WRITE(SBI_CTL_STAT,
1400			SBI_BUSY |
1401			SBI_CTL_OP_CRRD);
1402
1403	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
1404				100)) {
1405		DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1406		goto out_unlock;
1407	}
1408
1409	value = I915_READ(SBI_DATA);
1410
1411out_unlock:
1412	mtx_unlock(&dev_priv->dpio_lock);
1413	return value;
1414}
1415
1416/**
1417 * intel_enable_pch_pll - enable PCH PLL
1418 * @dev_priv: i915 private structure
1419 * @pipe: pipe PLL to enable
1420 *
1421 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1422 * drives the transcoder clock.
1423 */
1424static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
1425{
1426	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1427	struct intel_pch_pll *pll;
1428	int reg;
1429	u32 val;
1430
1431	/* PCH PLLs only available on ILK, SNB and IVB */
1432	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1433	pll = intel_crtc->pch_pll;
1434	if (pll == NULL)
1435		return;
1436
1437	if (pll->refcount == 0) {
1438		DRM_DEBUG_KMS("pll->refcount == 0\n");
1439		return;
1440	}
1441
1442	DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
1443		      pll->pll_reg, pll->active, pll->on,
1444		      intel_crtc->base.base.id);
1445
1446	/* PCH refclock must be enabled first */
1447	assert_pch_refclk_enabled(dev_priv);
1448
1449	if (pll->active++ && pll->on) {
1450		assert_pch_pll_enabled(dev_priv, intel_crtc);
1451		return;
1452	}
1453
1454	DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
1455
1456	reg = pll->pll_reg;
1457	val = I915_READ(reg);
1458	val |= DPLL_VCO_ENABLE;
1459	I915_WRITE(reg, val);
1460	POSTING_READ(reg);
1461	DELAY(200);
1462
1463	pll->on = true;
1464}
1465
1466static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
1467{
1468	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1469	struct intel_pch_pll *pll = intel_crtc->pch_pll;
1470	int reg;
1471	u32 val;
1472
1473	/* PCH only available on ILK+ */
1474	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1475	if (pll == NULL)
1476		return;
1477
1478	if (pll->refcount == 0) {
1479		DRM_DEBUG_KMS("pll->refcount == 0\n");
1480		return;
1481	}
1482
1483	DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
1484		      pll->pll_reg, pll->active, pll->on,
1485		      intel_crtc->base.base.id);
1486
1487	if (pll->active == 0) {
1488		DRM_DEBUG_KMS("pll->active == 0\n");
1489		assert_pch_pll_disabled(dev_priv, intel_crtc);
1490		return;
1491	}
1492
1493	if (--pll->active) {
1494		assert_pch_pll_enabled(dev_priv, intel_crtc);
1495		return;
1496	}
1497
1498	DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
1499
1500	/* Make sure transcoder isn't still depending on us */
1501	assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
1502
1503	reg = pll->pll_reg;
1504	val = I915_READ(reg);
1505	val &= ~DPLL_VCO_ENABLE;
1506	I915_WRITE(reg, val);
1507	POSTING_READ(reg);
1508	DELAY(200);
1509
1510	pll->on = false;
1511}
1512
1513static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1514				    enum pipe pipe)
1515{
1516	int reg;
1517	u32 val, pipeconf_val;
1518	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1519
1520	/* PCH only available on ILK+ */
1521	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1522
1523	/* Make sure PCH DPLL is enabled */
1524	assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc));
1525
1526	/* FDI must be feeding us bits for PCH ports */
1527	assert_fdi_tx_enabled(dev_priv, pipe);
1528	assert_fdi_rx_enabled(dev_priv, pipe);
1529
1530	if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1531		DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
1532		return;
1533	}
1534	reg = TRANSCONF(pipe);
1535	val = I915_READ(reg);
1536	pipeconf_val = I915_READ(PIPECONF(pipe));
1537	if (HAS_PCH_IBX(dev_priv->dev)) {
1538		/*
1539		 * make the BPC in transcoder be consistent with
1540		 * that in pipeconf reg.
1541		 */
1542		val &= ~PIPE_BPC_MASK;
1543		val |= pipeconf_val & PIPE_BPC_MASK;
1544	}
1545
1546	val &= ~TRANS_INTERLACE_MASK;
1547	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1548		if (HAS_PCH_IBX(dev_priv->dev) &&
1549		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1550			val |= TRANS_LEGACY_INTERLACED_ILK;
1551		else
1552			val |= TRANS_INTERLACED;
1553	else
1554		val |= TRANS_PROGRESSIVE;
1555
1556	I915_WRITE(reg, val | TRANS_ENABLE);
1557	if (_intel_wait_for(dev_priv->dev, I915_READ(reg) & TRANS_STATE_ENABLE,
1558	    100, 1, "915trc"))
1559		DRM_ERROR("failed to enable transcoder %d\n", pipe);
1560}
1561
1562static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1563				     enum pipe pipe)
1564{
1565	int reg;
1566	u32 val;
1567
1568	/* FDI relies on the transcoder */
1569	assert_fdi_tx_disabled(dev_priv, pipe);
1570	assert_fdi_rx_disabled(dev_priv, pipe);
1571
1572	/* Ports must be off as well */
1573	assert_pch_ports_disabled(dev_priv, pipe);
1574
1575	reg = TRANSCONF(pipe);
1576	val = I915_READ(reg);
1577	val &= ~TRANS_ENABLE;
1578	I915_WRITE(reg, val);
1579	/* wait for PCH transcoder off, transcoder state */
1580	if (_intel_wait_for(dev_priv->dev,
1581	    (I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50,
1582	    1, "915trd"))
1583		DRM_ERROR("failed to disable transcoder %d\n", pipe);
1584}
1585
1586/**
1587 * intel_enable_pipe - enable a pipe, asserting requirements
1588 * @dev_priv: i915 private structure
1589 * @pipe: pipe to enable
1590 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1591 *
1592 * Enable @pipe, making sure that various hardware specific requirements
1593 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1594 *
1595 * @pipe should be %PIPE_A or %PIPE_B.
1596 *
1597 * Will wait until the pipe is actually running (i.e. first vblank) before
1598 * returning.
1599 */
1600static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1601			      bool pch_port)
1602{
1603	int reg;
1604	u32 val;
1605
1606	/*
1607	 * A pipe without a PLL won't actually be able to drive bits from
1608	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1609	 * need the check.
1610	 */
1611	if (!HAS_PCH_SPLIT(dev_priv->dev))
1612		assert_pll_enabled(dev_priv, pipe);
1613	else {
1614		if (pch_port) {
1615			/* if driving the PCH, we need FDI enabled */
1616			assert_fdi_rx_pll_enabled(dev_priv, pipe);
1617			assert_fdi_tx_pll_enabled(dev_priv, pipe);
1618		}
1619		/* FIXME: assert CPU port conditions for SNB+ */
1620	}
1621
1622	reg = PIPECONF(pipe);
1623	val = I915_READ(reg);
1624	if (val & PIPECONF_ENABLE)
1625		return;
1626
1627	I915_WRITE(reg, val | PIPECONF_ENABLE);
1628	intel_wait_for_vblank(dev_priv->dev, pipe);
1629}
1630
1631/**
1632 * intel_disable_pipe - disable a pipe, asserting requirements
1633 * @dev_priv: i915 private structure
1634 * @pipe: pipe to disable
1635 *
1636 * Disable @pipe, making sure that various hardware specific requirements
1637 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1638 *
1639 * @pipe should be %PIPE_A or %PIPE_B.
1640 *
1641 * Will wait until the pipe has shut down before returning.
1642 */
1643static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1644			       enum pipe pipe)
1645{
1646	int reg;
1647	u32 val;
1648
1649	/*
1650	 * Make sure planes won't keep trying to pump pixels to us,
1651	 * or we might hang the display.
1652	 */
1653	assert_planes_disabled(dev_priv, pipe);
1654
1655	/* Don't disable pipe A or pipe A PLLs if needed */
1656	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1657		return;
1658
1659	reg = PIPECONF(pipe);
1660	val = I915_READ(reg);
1661	if ((val & PIPECONF_ENABLE) == 0)
1662		return;
1663
1664	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1665	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1666}
1667
1668/*
1669 * Plane regs are double buffered, going from enabled->disabled needs a
1670 * trigger in order to latch.  The display address reg provides this.
1671 */
1672void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1673				      enum plane plane)
1674{
1675	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1676	I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1677}
1678
1679/**
1680 * intel_enable_plane - enable a display plane on a given pipe
1681 * @dev_priv: i915 private structure
1682 * @plane: plane to enable
1683 * @pipe: pipe being fed
1684 *
1685 * Enable @plane on @pipe, making sure that @pipe is running first.
1686 */
1687static void intel_enable_plane(struct drm_i915_private *dev_priv,
1688			       enum plane plane, enum pipe pipe)
1689{
1690	int reg;
1691	u32 val;
1692
1693	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1694	assert_pipe_enabled(dev_priv, pipe);
1695
1696	reg = DSPCNTR(plane);
1697	val = I915_READ(reg);
1698	if (val & DISPLAY_PLANE_ENABLE)
1699		return;
1700
1701	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1702	intel_flush_display_plane(dev_priv, plane);
1703	intel_wait_for_vblank(dev_priv->dev, pipe);
1704}
1705
1706/**
1707 * intel_disable_plane - disable a display plane
1708 * @dev_priv: i915 private structure
1709 * @plane: plane to disable
1710 * @pipe: pipe consuming the data
1711 *
1712 * Disable @plane; should be an independent operation.
1713 */
1714static void intel_disable_plane(struct drm_i915_private *dev_priv,
1715				enum plane plane, enum pipe pipe)
1716{
1717	int reg;
1718	u32 val;
1719
1720	reg = DSPCNTR(plane);
1721	val = I915_READ(reg);
1722	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1723		return;
1724
1725	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1726	intel_flush_display_plane(dev_priv, plane);
1727	intel_wait_for_vblank(dev_priv->dev, pipe);
1728}
1729
1730static void disable_pch_dp(struct drm_i915_private *dev_priv,
1731			   enum pipe pipe, int reg, u32 port_sel)
1732{
1733	u32 val = I915_READ(reg);
1734	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1735		DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1736		I915_WRITE(reg, val & ~DP_PORT_EN);
1737	}
1738}
1739
1740static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1741			     enum pipe pipe, int reg)
1742{
1743	u32 val = I915_READ(reg);
1744	if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1745		DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1746			      reg, pipe);
1747		I915_WRITE(reg, val & ~PORT_ENABLE);
1748	}
1749}
1750
1751/* Disable any ports connected to this transcoder */
1752static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1753				    enum pipe pipe)
1754{
1755	u32 reg, val;
1756
1757	val = I915_READ(PCH_PP_CONTROL);
1758	I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1759
1760	disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1761	disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1762	disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1763
1764	reg = PCH_ADPA;
1765	val = I915_READ(reg);
1766	if (adpa_pipe_enabled(dev_priv, val, pipe))
1767		I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1768
1769	reg = PCH_LVDS;
1770	val = I915_READ(reg);
1771	if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1772		DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1773		I915_WRITE(reg, val & ~LVDS_PORT_EN);
1774		POSTING_READ(reg);
1775		DELAY(100);
1776	}
1777
1778	disable_pch_hdmi(dev_priv, pipe, HDMIB);
1779	disable_pch_hdmi(dev_priv, pipe, HDMIC);
1780	disable_pch_hdmi(dev_priv, pipe, HDMID);
1781}
1782
1783int
1784intel_pin_and_fence_fb_obj(struct drm_device *dev,
1785			   struct drm_i915_gem_object *obj,
1786			   struct intel_ring_buffer *pipelined)
1787{
1788	struct drm_i915_private *dev_priv = dev->dev_private;
1789	u32 alignment;
1790	int ret;
1791
1792	alignment = 0; /* shut gcc */
1793	switch (obj->tiling_mode) {
1794	case I915_TILING_NONE:
1795		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1796			alignment = 128 * 1024;
1797		else if (INTEL_INFO(dev)->gen >= 4)
1798			alignment = 4 * 1024;
1799		else
1800			alignment = 64 * 1024;
1801		break;
1802	case I915_TILING_X:
1803		/* pin() will align the object as required by fence */
1804		alignment = 0;
1805		break;
1806	case I915_TILING_Y:
1807		/* FIXME: Is this true? */
1808		DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1809		return -EINVAL;
1810	default:
1811		KASSERT(0, ("Wrong tiling for fb obj"));
1812	}
1813
1814	dev_priv->mm.interruptible = false;
1815	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1816	if (ret)
1817		goto err_interruptible;
1818
1819	/* Install a fence for tiled scan-out. Pre-i965 always needs a
1820	 * fence, whereas 965+ only requires a fence if using
1821	 * framebuffer compression.  For simplicity, we always install
1822	 * a fence as the cost is not that onerous.
1823	 */
1824	ret = i915_gem_object_get_fence(obj);
1825	if (ret)
1826		goto err_unpin;
1827
1828	i915_gem_object_pin_fence(obj);
1829
1830	dev_priv->mm.interruptible = true;
1831	return 0;
1832
1833err_unpin:
1834	i915_gem_object_unpin_from_display_plane(obj);
1835err_interruptible:
1836	dev_priv->mm.interruptible = true;
1837	return ret;
1838}
1839
1840void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1841{
1842	i915_gem_object_unpin_fence(obj);
1843	i915_gem_object_unpin_from_display_plane(obj);
1844}
1845
1846static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1847			     int x, int y)
1848{
1849	struct drm_device *dev = crtc->dev;
1850	struct drm_i915_private *dev_priv = dev->dev_private;
1851	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1852	struct intel_framebuffer *intel_fb;
1853	struct drm_i915_gem_object *obj;
1854	int plane = intel_crtc->plane;
1855	unsigned long Start, Offset;
1856	u32 dspcntr;
1857	u32 reg;
1858
1859	switch (plane) {
1860	case 0:
1861	case 1:
1862		break;
1863	default:
1864		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1865		return -EINVAL;
1866	}
1867
1868	intel_fb = to_intel_framebuffer(fb);
1869	obj = intel_fb->obj;
1870
1871	reg = DSPCNTR(plane);
1872	dspcntr = I915_READ(reg);
1873	/* Mask out pixel format bits in case we change it */
1874	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1875	switch (fb->bits_per_pixel) {
1876	case 8:
1877		dspcntr |= DISPPLANE_8BPP;
1878		break;
1879	case 16:
1880		if (fb->depth == 15)
1881			dspcntr |= DISPPLANE_15_16BPP;
1882		else
1883			dspcntr |= DISPPLANE_16BPP;
1884		break;
1885	case 24:
1886	case 32:
1887		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1888		break;
1889	default:
1890		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
1891		return -EINVAL;
1892	}
1893	if (INTEL_INFO(dev)->gen >= 4) {
1894		if (obj->tiling_mode != I915_TILING_NONE)
1895			dspcntr |= DISPPLANE_TILED;
1896		else
1897			dspcntr &= ~DISPPLANE_TILED;
1898	}
1899
1900	I915_WRITE(reg, dspcntr);
1901
1902	Start = obj->gtt_offset;
1903	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
1904
1905	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1906		      Start, Offset, x, y, fb->pitches[0]);
1907	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
1908	if (INTEL_INFO(dev)->gen >= 4) {
1909		I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
1910		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1911		I915_WRITE(DSPADDR(plane), Offset);
1912	} else
1913		I915_WRITE(DSPADDR(plane), Start + Offset);
1914	POSTING_READ(reg);
1915
1916	return (0);
1917}
1918
1919static int ironlake_update_plane(struct drm_crtc *crtc,
1920				 struct drm_framebuffer *fb, int x, int y)
1921{
1922	struct drm_device *dev = crtc->dev;
1923	struct drm_i915_private *dev_priv = dev->dev_private;
1924	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1925	struct intel_framebuffer *intel_fb;
1926	struct drm_i915_gem_object *obj;
1927	int plane = intel_crtc->plane;
1928	unsigned long Start, Offset;
1929	u32 dspcntr;
1930	u32 reg;
1931
1932	switch (plane) {
1933	case 0:
1934	case 1:
1935	case 2:
1936		break;
1937	default:
1938		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1939		return -EINVAL;
1940	}
1941
1942	intel_fb = to_intel_framebuffer(fb);
1943	obj = intel_fb->obj;
1944
1945	reg = DSPCNTR(plane);
1946	dspcntr = I915_READ(reg);
1947	/* Mask out pixel format bits in case we change it */
1948	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1949	switch (fb->bits_per_pixel) {
1950	case 8:
1951		dspcntr |= DISPPLANE_8BPP;
1952		break;
1953	case 16:
1954		if (fb->depth != 16) {
1955			DRM_ERROR("bpp 16, depth %d\n", fb->depth);
1956			return -EINVAL;
1957		}
1958
1959		dspcntr |= DISPPLANE_16BPP;
1960		break;
1961	case 24:
1962	case 32:
1963		if (fb->depth == 24)
1964			dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1965		else if (fb->depth == 30)
1966			dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
1967		else {
1968			DRM_ERROR("bpp %d depth %d\n", fb->bits_per_pixel,
1969			    fb->depth);
1970			return -EINVAL;
1971		}
1972		break;
1973	default:
1974		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
1975		return -EINVAL;
1976	}
1977
1978	if (obj->tiling_mode != I915_TILING_NONE)
1979		dspcntr |= DISPPLANE_TILED;
1980	else
1981		dspcntr &= ~DISPPLANE_TILED;
1982
1983	/* must disable */
1984	dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1985
1986	I915_WRITE(reg, dspcntr);
1987
1988	Start = obj->gtt_offset;
1989	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
1990
1991	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1992		      Start, Offset, x, y, fb->pitches[0]);
1993	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
1994	I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
1995	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1996	I915_WRITE(DSPADDR(plane), Offset);
1997	POSTING_READ(reg);
1998
1999	return 0;
2000}
2001
2002/* Assume fb object is pinned & idle & fenced and just update base pointers */
2003static int
2004intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2005			   int x, int y, enum mode_set_atomic state)
2006{
2007	struct drm_device *dev = crtc->dev;
2008	struct drm_i915_private *dev_priv = dev->dev_private;
2009
2010	if (dev_priv->display.disable_fbc)
2011		dev_priv->display.disable_fbc(dev);
2012	intel_increase_pllclock(crtc);
2013
2014	return dev_priv->display.update_plane(crtc, fb, x, y);
2015}
2016
2017static int
2018intel_finish_fb(struct drm_framebuffer *old_fb)
2019{
2020	struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2021	struct drm_device *dev = obj->base.dev;
2022	struct drm_i915_private *dev_priv = dev->dev_private;
2023	bool was_interruptible = dev_priv->mm.interruptible;
2024	int ret;
2025
2026	mtx_lock(&dev->event_lock);
2027	while (!atomic_load_acq_int(&dev_priv->mm.wedged) &&
2028	    atomic_load_acq_int(&obj->pending_flip) != 0) {
2029		msleep(&obj->pending_flip, &dev->event_lock,
2030		    0, "915flp", 0);
2031	}
2032	mtx_unlock(&dev->event_lock);
2033
2034	/* Big Hammer, we also need to ensure that any pending
2035	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2036	 * current scanout is retired before unpinning the old
2037	 * framebuffer.
2038	 *
2039	 * This should only fail upon a hung GPU, in which case we
2040	 * can safely continue.
2041	 */
2042	dev_priv->mm.interruptible = false;
2043	ret = i915_gem_object_finish_gpu(obj);
2044	dev_priv->mm.interruptible = was_interruptible;
2045	return ret;
2046}
2047
2048static int
2049intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2050		    struct drm_framebuffer *old_fb)
2051{
2052	struct drm_device *dev = crtc->dev;
2053	struct drm_i915_private *dev_priv = dev->dev_private;
2054	struct drm_i915_master_private *master_priv;
2055	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2056	int ret;
2057
2058	/* no fb bound */
2059	if (!crtc->fb) {
2060		DRM_ERROR("No FB bound\n");
2061		return 0;
2062	}
2063
2064	if(intel_crtc->plane > dev_priv->num_pipe) {
2065		DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
2066				intel_crtc->plane,
2067				dev_priv->num_pipe);
2068		return -EINVAL;
2069	}
2070
2071	DRM_LOCK(dev);
2072	ret = intel_pin_and_fence_fb_obj(dev,
2073					 to_intel_framebuffer(crtc->fb)->obj,
2074					 NULL);
2075	if (ret != 0) {
2076		DRM_UNLOCK(dev);
2077		DRM_ERROR("pin & fence failed\n");
2078		return ret;
2079	}
2080
2081	if (old_fb)
2082		intel_finish_fb(old_fb);
2083
2084	ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
2085	if (ret) {
2086		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2087		DRM_UNLOCK(dev);
2088		DRM_ERROR("failed to update base address\n");
2089		return ret;
2090	}
2091
2092	if (old_fb) {
2093		intel_wait_for_vblank(dev, intel_crtc->pipe);
2094		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2095	}
2096
2097	intel_update_fbc(dev);
2098	DRM_UNLOCK(dev);
2099
2100	if (!dev->primary->master)
2101		return 0;
2102
2103	master_priv = dev->primary->master->driver_priv;
2104	if (!master_priv->sarea_priv)
2105		return 0;
2106
2107	if (intel_crtc->pipe) {
2108		master_priv->sarea_priv->pipeB_x = x;
2109		master_priv->sarea_priv->pipeB_y = y;
2110	} else {
2111		master_priv->sarea_priv->pipeA_x = x;
2112		master_priv->sarea_priv->pipeA_y = y;
2113	}
2114
2115	return 0;
2116}
2117
2118static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2119{
2120	struct drm_device *dev = crtc->dev;
2121	struct drm_i915_private *dev_priv = dev->dev_private;
2122	u32 dpa_ctl;
2123
2124	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2125	dpa_ctl = I915_READ(DP_A);
2126	dpa_ctl &= ~DP_PLL_FREQ_MASK;
2127
2128	if (clock < 200000) {
2129		u32 temp;
2130		dpa_ctl |= DP_PLL_FREQ_160MHZ;
2131		/* workaround for 160Mhz:
2132		   1) program 0x4600c bits 15:0 = 0x8124
2133		   2) program 0x46010 bit 0 = 1
2134		   3) program 0x46034 bit 24 = 1
2135		   4) program 0x64000 bit 14 = 1
2136		   */
2137		temp = I915_READ(0x4600c);
2138		temp &= 0xffff0000;
2139		I915_WRITE(0x4600c, temp | 0x8124);
2140
2141		temp = I915_READ(0x46010);
2142		I915_WRITE(0x46010, temp | 1);
2143
2144		temp = I915_READ(0x46034);
2145		I915_WRITE(0x46034, temp | (1 << 24));
2146	} else {
2147		dpa_ctl |= DP_PLL_FREQ_270MHZ;
2148	}
2149	I915_WRITE(DP_A, dpa_ctl);
2150
2151	POSTING_READ(DP_A);
2152	DELAY(500);
2153}
2154
2155static void intel_fdi_normal_train(struct drm_crtc *crtc)
2156{
2157	struct drm_device *dev = crtc->dev;
2158	struct drm_i915_private *dev_priv = dev->dev_private;
2159	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2160	int pipe = intel_crtc->pipe;
2161	u32 reg, temp;
2162
2163	/* enable normal train */
2164	reg = FDI_TX_CTL(pipe);
2165	temp = I915_READ(reg);
2166	if (IS_IVYBRIDGE(dev)) {
2167		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2168		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2169	} else {
2170		temp &= ~FDI_LINK_TRAIN_NONE;
2171		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2172	}
2173	I915_WRITE(reg, temp);
2174
2175	reg = FDI_RX_CTL(pipe);
2176	temp = I915_READ(reg);
2177	if (HAS_PCH_CPT(dev)) {
2178		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2179		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2180	} else {
2181		temp &= ~FDI_LINK_TRAIN_NONE;
2182		temp |= FDI_LINK_TRAIN_NONE;
2183	}
2184	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2185
2186	/* wait one idle pattern time */
2187	POSTING_READ(reg);
2188	DELAY(1000);
2189
2190	/* IVB wants error correction enabled */
2191	if (IS_IVYBRIDGE(dev))
2192		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2193			   FDI_FE_ERRC_ENABLE);
2194}
2195
2196static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2197{
2198	struct drm_i915_private *dev_priv = dev->dev_private;
2199	u32 flags = I915_READ(SOUTH_CHICKEN1);
2200
2201	flags |= FDI_PHASE_SYNC_OVR(pipe);
2202	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2203	flags |= FDI_PHASE_SYNC_EN(pipe);
2204	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2205	POSTING_READ(SOUTH_CHICKEN1);
2206}
2207
2208/* The FDI link training functions for ILK/Ibexpeak. */
2209static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2210{
2211	struct drm_device *dev = crtc->dev;
2212	struct drm_i915_private *dev_priv = dev->dev_private;
2213	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2214	int pipe = intel_crtc->pipe;
2215	int plane = intel_crtc->plane;
2216	u32 reg, temp, tries;
2217
2218	/* FDI needs bits from pipe & plane first */
2219	assert_pipe_enabled(dev_priv, pipe);
2220	assert_plane_enabled(dev_priv, plane);
2221
2222	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2223	   for train result */
2224	reg = FDI_RX_IMR(pipe);
2225	temp = I915_READ(reg);
2226	temp &= ~FDI_RX_SYMBOL_LOCK;
2227	temp &= ~FDI_RX_BIT_LOCK;
2228	I915_WRITE(reg, temp);
2229	I915_READ(reg);
2230	DELAY(150);
2231
2232	/* enable CPU FDI TX and PCH FDI RX */
2233	reg = FDI_TX_CTL(pipe);
2234	temp = I915_READ(reg);
2235	temp &= ~(7 << 19);
2236	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2237	temp &= ~FDI_LINK_TRAIN_NONE;
2238	temp |= FDI_LINK_TRAIN_PATTERN_1;
2239	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2240
2241	reg = FDI_RX_CTL(pipe);
2242	temp = I915_READ(reg);
2243	temp &= ~FDI_LINK_TRAIN_NONE;
2244	temp |= FDI_LINK_TRAIN_PATTERN_1;
2245	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2246
2247	POSTING_READ(reg);
2248	DELAY(150);
2249
2250	/* Ironlake workaround, enable clock pointer after FDI enable*/
2251	if (HAS_PCH_IBX(dev)) {
2252		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2253		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2254			   FDI_RX_PHASE_SYNC_POINTER_EN);
2255	}
2256
2257	reg = FDI_RX_IIR(pipe);
2258	for (tries = 0; tries < 5; tries++) {
2259		temp = I915_READ(reg);
2260		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2261
2262		if ((temp & FDI_RX_BIT_LOCK)) {
2263			DRM_DEBUG_KMS("FDI train 1 done.\n");
2264			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2265			break;
2266		}
2267	}
2268	if (tries == 5)
2269		DRM_ERROR("FDI train 1 fail!\n");
2270
2271	/* Train 2 */
2272	reg = FDI_TX_CTL(pipe);
2273	temp = I915_READ(reg);
2274	temp &= ~FDI_LINK_TRAIN_NONE;
2275	temp |= FDI_LINK_TRAIN_PATTERN_2;
2276	I915_WRITE(reg, temp);
2277
2278	reg = FDI_RX_CTL(pipe);
2279	temp = I915_READ(reg);
2280	temp &= ~FDI_LINK_TRAIN_NONE;
2281	temp |= FDI_LINK_TRAIN_PATTERN_2;
2282	I915_WRITE(reg, temp);
2283
2284	POSTING_READ(reg);
2285	DELAY(150);
2286
2287	reg = FDI_RX_IIR(pipe);
2288	for (tries = 0; tries < 5; tries++) {
2289		temp = I915_READ(reg);
2290		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2291
2292		if (temp & FDI_RX_SYMBOL_LOCK) {
2293			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2294			DRM_DEBUG_KMS("FDI train 2 done.\n");
2295			break;
2296		}
2297	}
2298	if (tries == 5)
2299		DRM_ERROR("FDI train 2 fail!\n");
2300
2301	DRM_DEBUG_KMS("FDI train done\n");
2302
2303}
2304
2305static const int snb_b_fdi_train_param[] = {
2306	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2307	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2308	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2309	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2310};
2311
2312/* The FDI link training functions for SNB/Cougarpoint. */
2313static void gen6_fdi_link_train(struct drm_crtc *crtc)
2314{
2315	struct drm_device *dev = crtc->dev;
2316	struct drm_i915_private *dev_priv = dev->dev_private;
2317	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2318	int pipe = intel_crtc->pipe;
2319	u32 reg, temp, i, retry;
2320
2321	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2322	   for train result */
2323	reg = FDI_RX_IMR(pipe);
2324	temp = I915_READ(reg);
2325	temp &= ~FDI_RX_SYMBOL_LOCK;
2326	temp &= ~FDI_RX_BIT_LOCK;
2327	I915_WRITE(reg, temp);
2328
2329	POSTING_READ(reg);
2330	DELAY(150);
2331
2332	/* enable CPU FDI TX and PCH FDI RX */
2333	reg = FDI_TX_CTL(pipe);
2334	temp = I915_READ(reg);
2335	temp &= ~(7 << 19);
2336	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2337	temp &= ~FDI_LINK_TRAIN_NONE;
2338	temp |= FDI_LINK_TRAIN_PATTERN_1;
2339	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2340	/* SNB-B */
2341	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2342	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2343
2344	reg = FDI_RX_CTL(pipe);
2345	temp = I915_READ(reg);
2346	if (HAS_PCH_CPT(dev)) {
2347		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2348		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2349	} else {
2350		temp &= ~FDI_LINK_TRAIN_NONE;
2351		temp |= FDI_LINK_TRAIN_PATTERN_1;
2352	}
2353	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2354
2355	POSTING_READ(reg);
2356	DELAY(150);
2357
2358	if (HAS_PCH_CPT(dev))
2359		cpt_phase_pointer_enable(dev, pipe);
2360
2361	for (i = 0; i < 4; i++) {
2362		reg = FDI_TX_CTL(pipe);
2363		temp = I915_READ(reg);
2364		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2365		temp |= snb_b_fdi_train_param[i];
2366		I915_WRITE(reg, temp);
2367
2368		POSTING_READ(reg);
2369		DELAY(500);
2370
2371		for (retry = 0; retry < 5; retry++) {
2372			reg = FDI_RX_IIR(pipe);
2373			temp = I915_READ(reg);
2374			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2375
2376			if (temp & FDI_RX_BIT_LOCK) {
2377				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2378				DRM_DEBUG_KMS("FDI train 1 done.\n");
2379				break;
2380			}
2381			DELAY(50);
2382		}
2383		if (retry < 5)
2384			break;
2385	}
2386	if (i == 4)
2387		DRM_ERROR("FDI train 1 fail!\n");
2388
2389	/* Train 2 */
2390	reg = FDI_TX_CTL(pipe);
2391	temp = I915_READ(reg);
2392	temp &= ~FDI_LINK_TRAIN_NONE;
2393	temp |= FDI_LINK_TRAIN_PATTERN_2;
2394	if (IS_GEN6(dev)) {
2395		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2396		/* SNB-B */
2397		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2398	}
2399	I915_WRITE(reg, temp);
2400
2401	reg = FDI_RX_CTL(pipe);
2402	temp = I915_READ(reg);
2403	if (HAS_PCH_CPT(dev)) {
2404		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2405		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2406	} else {
2407		temp &= ~FDI_LINK_TRAIN_NONE;
2408		temp |= FDI_LINK_TRAIN_PATTERN_2;
2409	}
2410	I915_WRITE(reg, temp);
2411
2412	POSTING_READ(reg);
2413	DELAY(150);
2414
2415	for (i = 0; i < 4; i++) {
2416		reg = FDI_TX_CTL(pipe);
2417		temp = I915_READ(reg);
2418		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2419		temp |= snb_b_fdi_train_param[i];
2420		I915_WRITE(reg, temp);
2421
2422		POSTING_READ(reg);
2423		DELAY(500);
2424
2425		for (retry = 0; retry < 5; retry++) {
2426			reg = FDI_RX_IIR(pipe);
2427			temp = I915_READ(reg);
2428			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2429
2430			if (temp & FDI_RX_SYMBOL_LOCK) {
2431				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2432				DRM_DEBUG_KMS("FDI train 2 done.\n");
2433				break;
2434			}
2435			DELAY(50);
2436		}
2437		if (retry < 5)
2438			break;
2439	}
2440	if (i == 4)
2441		DRM_ERROR("FDI train 2 fail!\n");
2442
2443	DRM_DEBUG_KMS("FDI train done.\n");
2444}
2445
2446/* Manual link training for Ivy Bridge A0 parts */
2447static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2448{
2449	struct drm_device *dev = crtc->dev;
2450	struct drm_i915_private *dev_priv = dev->dev_private;
2451	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2452	int pipe = intel_crtc->pipe;
2453	u32 reg, temp, i;
2454
2455	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2456	   for train result */
2457	reg = FDI_RX_IMR(pipe);
2458	temp = I915_READ(reg);
2459	temp &= ~FDI_RX_SYMBOL_LOCK;
2460	temp &= ~FDI_RX_BIT_LOCK;
2461	I915_WRITE(reg, temp);
2462
2463	POSTING_READ(reg);
2464	DELAY(150);
2465
2466	/* enable CPU FDI TX and PCH FDI RX */
2467	reg = FDI_TX_CTL(pipe);
2468	temp = I915_READ(reg);
2469	temp &= ~(7 << 19);
2470	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2471	temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2472	temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2473	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2474	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2475	temp |= FDI_COMPOSITE_SYNC;
2476	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2477
2478	reg = FDI_RX_CTL(pipe);
2479	temp = I915_READ(reg);
2480	temp &= ~FDI_LINK_TRAIN_AUTO;
2481	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2482	temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2483	temp |= FDI_COMPOSITE_SYNC;
2484	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2485
2486	POSTING_READ(reg);
2487	DELAY(150);
2488
2489	for (i = 0; i < 4; i++) {
2490		reg = FDI_TX_CTL(pipe);
2491		temp = I915_READ(reg);
2492		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2493		temp |= snb_b_fdi_train_param[i];
2494		I915_WRITE(reg, temp);
2495
2496		POSTING_READ(reg);
2497		DELAY(500);
2498
2499		reg = FDI_RX_IIR(pipe);
2500		temp = I915_READ(reg);
2501		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2502
2503		if (temp & FDI_RX_BIT_LOCK ||
2504		    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2505			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2506			DRM_DEBUG_KMS("FDI train 1 done.\n");
2507			break;
2508		}
2509	}
2510	if (i == 4)
2511		DRM_ERROR("FDI train 1 fail!\n");
2512
2513	/* Train 2 */
2514	reg = FDI_TX_CTL(pipe);
2515	temp = I915_READ(reg);
2516	temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2517	temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2518	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2519	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2520	I915_WRITE(reg, temp);
2521
2522	reg = FDI_RX_CTL(pipe);
2523	temp = I915_READ(reg);
2524	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2525	temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2526	I915_WRITE(reg, temp);
2527
2528	POSTING_READ(reg);
2529	DELAY(150);
2530
2531	for (i = 0; i < 4; i++ ) {
2532		reg = FDI_TX_CTL(pipe);
2533		temp = I915_READ(reg);
2534		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2535		temp |= snb_b_fdi_train_param[i];
2536		I915_WRITE(reg, temp);
2537
2538		POSTING_READ(reg);
2539		DELAY(500);
2540
2541		reg = FDI_RX_IIR(pipe);
2542		temp = I915_READ(reg);
2543		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2544
2545		if (temp & FDI_RX_SYMBOL_LOCK) {
2546			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2547			DRM_DEBUG_KMS("FDI train 2 done.\n");
2548			break;
2549		}
2550	}
2551	if (i == 4)
2552		DRM_ERROR("FDI train 2 fail!\n");
2553
2554	DRM_DEBUG_KMS("FDI train done.\n");
2555}
2556
2557static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2558{
2559	struct drm_device *dev = crtc->dev;
2560	struct drm_i915_private *dev_priv = dev->dev_private;
2561	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2562	int pipe = intel_crtc->pipe;
2563	u32 reg, temp;
2564
2565	/* Write the TU size bits so error detection works */
2566	I915_WRITE(FDI_RX_TUSIZE1(pipe),
2567		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2568
2569	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2570	reg = FDI_RX_CTL(pipe);
2571	temp = I915_READ(reg);
2572	temp &= ~((0x7 << 19) | (0x7 << 16));
2573	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2574	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2575	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2576
2577	POSTING_READ(reg);
2578	DELAY(200);
2579
2580	/* Switch from Rawclk to PCDclk */
2581	temp = I915_READ(reg);
2582	I915_WRITE(reg, temp | FDI_PCDCLK);
2583
2584	POSTING_READ(reg);
2585	DELAY(200);
2586
2587	/* On Haswell, the PLL configuration for ports and pipes is handled
2588	 * separately, as part of DDI setup */
2589	if (!IS_HASWELL(dev)) {
2590		/* Enable CPU FDI TX PLL, always on for Ironlake */
2591		reg = FDI_TX_CTL(pipe);
2592		temp = I915_READ(reg);
2593		if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2594			I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2595
2596			POSTING_READ(reg);
2597			DELAY(100);
2598		}
2599 	}
2600}
2601
2602static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2603{
2604	struct drm_i915_private *dev_priv = dev->dev_private;
2605	u32 flags = I915_READ(SOUTH_CHICKEN1);
2606
2607	flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2608	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2609	flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2610	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2611	POSTING_READ(SOUTH_CHICKEN1);
2612}
2613
2614static void ironlake_fdi_disable(struct drm_crtc *crtc)
2615{
2616	struct drm_device *dev = crtc->dev;
2617	struct drm_i915_private *dev_priv = dev->dev_private;
2618	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2619	int pipe = intel_crtc->pipe;
2620	u32 reg, temp;
2621
2622	/* disable CPU FDI tx and PCH FDI rx */
2623	reg = FDI_TX_CTL(pipe);
2624	temp = I915_READ(reg);
2625	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2626	POSTING_READ(reg);
2627
2628	reg = FDI_RX_CTL(pipe);
2629	temp = I915_READ(reg);
2630	temp &= ~(0x7 << 16);
2631	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2632	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2633
2634	POSTING_READ(reg);
2635	DELAY(100);
2636
2637	/* Ironlake workaround, disable clock pointer after downing FDI */
2638	if (HAS_PCH_IBX(dev)) {
2639		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2640		I915_WRITE(FDI_RX_CHICKEN(pipe),
2641			   I915_READ(FDI_RX_CHICKEN(pipe) &
2642				     ~FDI_RX_PHASE_SYNC_POINTER_EN));
2643	} else if (HAS_PCH_CPT(dev)) {
2644		cpt_phase_pointer_disable(dev, pipe);
2645	}
2646
2647	/* still set train pattern 1 */
2648	reg = FDI_TX_CTL(pipe);
2649	temp = I915_READ(reg);
2650	temp &= ~FDI_LINK_TRAIN_NONE;
2651	temp |= FDI_LINK_TRAIN_PATTERN_1;
2652	I915_WRITE(reg, temp);
2653
2654	reg = FDI_RX_CTL(pipe);
2655	temp = I915_READ(reg);
2656	if (HAS_PCH_CPT(dev)) {
2657		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2658		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2659	} else {
2660		temp &= ~FDI_LINK_TRAIN_NONE;
2661		temp |= FDI_LINK_TRAIN_PATTERN_1;
2662	}
2663	/* BPC in FDI rx is consistent with that in PIPECONF */
2664	temp &= ~(0x07 << 16);
2665	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2666	I915_WRITE(reg, temp);
2667
2668	POSTING_READ(reg);
2669	DELAY(100);
2670}
2671
2672static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2673{
2674	struct drm_device *dev = crtc->dev;
2675
2676	if (crtc->fb == NULL)
2677		return;
2678
2679	DRM_LOCK(dev);
2680	intel_finish_fb(crtc->fb);
2681	DRM_UNLOCK(dev);
2682}
2683
2684static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2685{
2686	struct drm_device *dev = crtc->dev;
2687	struct drm_mode_config *mode_config = &dev->mode_config;
2688	struct intel_encoder *encoder;
2689
2690	/*
2691	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2692	 * must be driven by its own crtc; no sharing is possible.
2693	 */
2694	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2695		if (encoder->base.crtc != crtc)
2696			continue;
2697
2698		/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
2699		 * CPU handles all others */
2700		if (IS_HASWELL(dev)) {
2701			/* It is still unclear how this will work on PPT, so throw up a warning */
2702			if (!HAS_PCH_LPT(dev))
2703				DRM_DEBUG_KMS("Haswell: PPT\n");
2704
2705			if (encoder->type == DRM_MODE_ENCODER_DAC) {
2706				DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
2707				return true;
2708			} else {
2709				DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
2710						encoder->type);
2711				return false;
2712			}
2713		}
2714
2715		switch (encoder->type) {
2716		case INTEL_OUTPUT_EDP:
2717			if (!intel_encoder_is_pch_edp(&encoder->base))
2718				return false;
2719			continue;
2720		}
2721	}
2722
2723	return true;
2724}
2725
2726/* Program iCLKIP clock to the desired frequency */
2727static void lpt_program_iclkip(struct drm_crtc *crtc)
2728{
2729	struct drm_device *dev = crtc->dev;
2730	struct drm_i915_private *dev_priv = dev->dev_private;
2731	u32 divsel, phaseinc, auxdiv, phasedir = 0;
2732	u32 temp;
2733
2734	/* It is necessary to ungate the pixclk gate prior to programming
2735	 * the divisors, and gate it back when it is done.
2736	 */
2737	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
2738
2739	/* Disable SSCCTL */
2740	intel_sbi_write(dev_priv, SBI_SSCCTL6,
2741				intel_sbi_read(dev_priv, SBI_SSCCTL6) |
2742					SBI_SSCCTL_DISABLE);
2743
2744	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
2745	if (crtc->mode.clock == 20000) {
2746		auxdiv = 1;
2747		divsel = 0x41;
2748		phaseinc = 0x20;
2749	} else {
2750		/* The iCLK virtual clock root frequency is in MHz,
2751		 * but the crtc->mode.clock in in KHz. To get the divisors,
2752		 * it is necessary to divide one by another, so we
2753		 * convert the virtual clock precision to KHz here for higher
2754		 * precision.
2755		 */
2756		u32 iclk_virtual_root_freq = 172800 * 1000;
2757		u32 iclk_pi_range = 64;
2758		u32 desired_divisor, msb_divisor_value, pi_value;
2759
2760		desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
2761		msb_divisor_value = desired_divisor / iclk_pi_range;
2762		pi_value = desired_divisor % iclk_pi_range;
2763
2764		auxdiv = 0;
2765		divsel = msb_divisor_value - 2;
2766		phaseinc = pi_value;
2767	}
2768
2769	/* This should not happen with any sane values */
2770	if ((SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2771	     ~SBI_SSCDIVINTPHASE_DIVSEL_MASK))
2772		DRM_DEBUG_KMS("DIVSEL_MASK");
2773	if ((SBI_SSCDIVINTPHASE_DIR(phasedir) &
2774	     ~SBI_SSCDIVINTPHASE_INCVAL_MASK))
2775		DRM_DEBUG_KMS("INCVAL_MASK");
2776
2777	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2778			crtc->mode.clock,
2779			auxdiv,
2780			divsel,
2781			phasedir,
2782			phaseinc);
2783
2784	/* Program SSCDIVINTPHASE6 */
2785	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
2786	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2787	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2788	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2789	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2790	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2791	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2792
2793	intel_sbi_write(dev_priv,
2794			SBI_SSCDIVINTPHASE6,
2795			temp);
2796
2797	/* Program SSCAUXDIV */
2798	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
2799	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2800	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2801	intel_sbi_write(dev_priv,
2802			SBI_SSCAUXDIV6,
2803			temp);
2804
2805
2806	/* Enable modulator and associated divider */
2807	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
2808	temp &= ~SBI_SSCCTL_DISABLE;
2809	intel_sbi_write(dev_priv,
2810			SBI_SSCCTL6,
2811			temp);
2812
2813	/* Wait for initialization time */
2814	DELAY(24);
2815
2816	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2817}
2818
2819/*
2820 * Enable PCH resources required for PCH ports:
2821 *   - PCH PLLs
2822 *   - FDI training & RX/TX
2823 *   - update transcoder timings
2824 *   - DP transcoding bits
2825 *   - transcoder
2826 */
2827static void ironlake_pch_enable(struct drm_crtc *crtc)
2828{
2829	struct drm_device *dev = crtc->dev;
2830	struct drm_i915_private *dev_priv = dev->dev_private;
2831	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2832	int pipe = intel_crtc->pipe;
2833	u32 reg, temp;
2834
2835	assert_transcoder_disabled(dev_priv, pipe);
2836
2837	/* For PCH output, training FDI link */
2838	dev_priv->display.fdi_link_train(crtc);
2839
2840	intel_enable_pch_pll(intel_crtc);
2841
2842	if (HAS_PCH_LPT(dev)) {
2843		DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
2844		lpt_program_iclkip(crtc);
2845	} else if (HAS_PCH_CPT(dev)) {
2846		u32 sel;
2847
2848		temp = I915_READ(PCH_DPLL_SEL);
2849		switch (pipe) {
2850		default:
2851		case 0:
2852			temp |= TRANSA_DPLL_ENABLE;
2853			sel = TRANSA_DPLLB_SEL;
2854			break;
2855		case 1:
2856			temp |= TRANSB_DPLL_ENABLE;
2857			sel = TRANSB_DPLLB_SEL;
2858			break;
2859		case 2:
2860			temp |= TRANSC_DPLL_ENABLE;
2861			sel = TRANSC_DPLLB_SEL;
2862			break;
2863		}
2864		if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
2865			temp |= sel;
2866		else
2867			temp &= ~sel;
2868		I915_WRITE(PCH_DPLL_SEL, temp);
2869	}
2870
2871	/* set transcoder timing, panel must allow it */
2872	assert_panel_unlocked(dev_priv, pipe);
2873	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2874	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2875	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
2876
2877	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2878	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2879	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
2880	I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
2881
2882	if (!IS_HASWELL(dev))
2883		intel_fdi_normal_train(crtc);
2884
2885	/* For PCH DP, enable TRANS_DP_CTL */
2886	if (HAS_PCH_CPT(dev) &&
2887	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
2888	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2889		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2890		reg = TRANS_DP_CTL(pipe);
2891		temp = I915_READ(reg);
2892		temp &= ~(TRANS_DP_PORT_SEL_MASK |
2893			  TRANS_DP_SYNC_MASK |
2894			  TRANS_DP_BPC_MASK);
2895		temp |= (TRANS_DP_OUTPUT_ENABLE |
2896			 TRANS_DP_ENH_FRAMING);
2897		temp |= bpc << 9; /* same format but at 11:9 */
2898
2899		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2900			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2901		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2902			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2903
2904		switch (intel_trans_dp_port_sel(crtc)) {
2905		case PCH_DP_B:
2906			temp |= TRANS_DP_PORT_SEL_B;
2907			break;
2908		case PCH_DP_C:
2909			temp |= TRANS_DP_PORT_SEL_C;
2910			break;
2911		case PCH_DP_D:
2912			temp |= TRANS_DP_PORT_SEL_D;
2913			break;
2914		default:
2915			DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2916			temp |= TRANS_DP_PORT_SEL_B;
2917			break;
2918		}
2919
2920		I915_WRITE(reg, temp);
2921	}
2922
2923	intel_enable_transcoder(dev_priv, pipe);
2924}
2925
2926static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
2927{
2928	struct intel_pch_pll *pll = intel_crtc->pch_pll;
2929
2930	if (pll == NULL)
2931		return;
2932
2933	if (pll->refcount == 0) {
2934		printf("bad PCH PLL refcount\n");
2935		return;
2936	}
2937
2938	--pll->refcount;
2939	intel_crtc->pch_pll = NULL;
2940}
2941
2942static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
2943{
2944	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
2945	struct intel_pch_pll *pll;
2946	int i;
2947
2948	pll = intel_crtc->pch_pll;
2949	if (pll) {
2950		DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
2951			      intel_crtc->base.base.id, pll->pll_reg);
2952		goto prepare;
2953	}
2954
2955	if (HAS_PCH_IBX(dev_priv->dev)) {
2956		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
2957		i = intel_crtc->pipe;
2958		pll = &dev_priv->pch_plls[i];
2959
2960		DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
2961			      intel_crtc->base.base.id, pll->pll_reg);
2962
2963		goto found;
2964	}
2965
2966	for (i = 0; i < dev_priv->num_pch_pll; i++) {
2967		pll = &dev_priv->pch_plls[i];
2968
2969		/* Only want to check enabled timings first */
2970		if (pll->refcount == 0)
2971			continue;
2972
2973		if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
2974		    fp == I915_READ(pll->fp0_reg)) {
2975			DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
2976				      intel_crtc->base.base.id,
2977				      pll->pll_reg, pll->refcount, pll->active);
2978
2979			goto found;
2980		}
2981	}
2982
2983	/* Ok no matching timings, maybe there's a free one? */
2984	for (i = 0; i < dev_priv->num_pch_pll; i++) { /* XXXKIB: HACK */
2985		pll = &dev_priv->pch_plls[i];
2986		if (pll->refcount == 0) {
2987			DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
2988				      intel_crtc->base.base.id, pll->pll_reg);
2989			goto found;
2990		}
2991	}
2992
2993	return NULL;
2994
2995found:
2996	intel_crtc->pch_pll = pll;
2997	pll->refcount++;
2998	DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
2999prepare: /* separate function? */
3000	DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
3001
3002	/* Wait for the clocks to stabilize before rewriting the regs */
3003	I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
3004	POSTING_READ(pll->pll_reg);
3005	DELAY(150);
3006
3007	I915_WRITE(pll->fp0_reg, fp);
3008	I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
3009	pll->on = false;
3010	return pll;
3011}
3012
3013void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3014{
3015	struct drm_i915_private *dev_priv = dev->dev_private;
3016	int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
3017	u32 temp;
3018
3019	temp = I915_READ(dslreg);
3020	DELAY(500);
3021	if (_intel_wait_for(dev, I915_READ(dslreg) != temp, 5, 1, "915cp1")) {
3022		/* Without this, mode sets may fail silently on FDI */
3023		I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
3024		DELAY(250);
3025		I915_WRITE(tc2reg, 0);
3026		if (_intel_wait_for(dev, I915_READ(dslreg) != temp, 5, 1,
3027		    "915cp2"))
3028			DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3029	}
3030}
3031
3032static void ironlake_crtc_enable(struct drm_crtc *crtc)
3033{
3034	struct drm_device *dev = crtc->dev;
3035	struct drm_i915_private *dev_priv = dev->dev_private;
3036	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3037	int pipe = intel_crtc->pipe;
3038	int plane = intel_crtc->plane;
3039	u32 temp;
3040	bool is_pch_port;
3041
3042	if (intel_crtc->active)
3043		return;
3044
3045	intel_crtc->active = true;
3046	intel_update_watermarks(dev);
3047
3048	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3049		temp = I915_READ(PCH_LVDS);
3050		if ((temp & LVDS_PORT_EN) == 0)
3051			I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3052	}
3053
3054	is_pch_port = intel_crtc_driving_pch(crtc);
3055
3056	if (is_pch_port)
3057		ironlake_fdi_pll_enable(crtc);
3058	else
3059		ironlake_fdi_disable(crtc);
3060
3061	/* Enable panel fitting for LVDS */
3062	if (dev_priv->pch_pf_size &&
3063	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3064		/* Force use of hard-coded filter coefficients
3065		 * as some pre-programmed values are broken,
3066		 * e.g. x201.
3067		 */
3068		I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3069		I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3070		I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3071	}
3072
3073	intel_enable_pipe(dev_priv, pipe, is_pch_port);
3074	intel_enable_plane(dev_priv, plane, pipe);
3075
3076	if (is_pch_port)
3077		ironlake_pch_enable(crtc);
3078
3079	intel_crtc_load_lut(crtc);
3080
3081	DRM_LOCK(dev);
3082	intel_update_fbc(dev);
3083	DRM_UNLOCK(dev);
3084
3085	intel_crtc_update_cursor(crtc, true);
3086}
3087
3088static void ironlake_crtc_disable(struct drm_crtc *crtc)
3089{
3090	struct drm_device *dev = crtc->dev;
3091	struct drm_i915_private *dev_priv = dev->dev_private;
3092	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3093	int pipe = intel_crtc->pipe;
3094	int plane = intel_crtc->plane;
3095	u32 reg, temp;
3096
3097	if (!intel_crtc->active)
3098		return;
3099
3100	intel_crtc_wait_for_pending_flips(crtc);
3101	drm_vblank_off(dev, pipe);
3102	intel_crtc_update_cursor(crtc, false);
3103
3104	intel_disable_plane(dev_priv, plane, pipe);
3105
3106	if (dev_priv->cfb_plane == plane)
3107		intel_disable_fbc(dev);
3108
3109	intel_disable_pipe(dev_priv, pipe);
3110
3111	/* Disable PF */
3112	I915_WRITE(PF_CTL(pipe), 0);
3113	I915_WRITE(PF_WIN_SZ(pipe), 0);
3114
3115	ironlake_fdi_disable(crtc);
3116
3117	/* This is a horrible layering violation; we should be doing this in
3118	 * the connector/encoder ->prepare instead, but we don't always have
3119	 * enough information there about the config to know whether it will
3120	 * actually be necessary or just cause undesired flicker.
3121	 */
3122	intel_disable_pch_ports(dev_priv, pipe);
3123
3124	intel_disable_transcoder(dev_priv, pipe);
3125
3126	if (HAS_PCH_CPT(dev)) {
3127		/* disable TRANS_DP_CTL */
3128		reg = TRANS_DP_CTL(pipe);
3129		temp = I915_READ(reg);
3130		temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3131		temp |= TRANS_DP_PORT_SEL_NONE;
3132		I915_WRITE(reg, temp);
3133
3134		/* disable DPLL_SEL */
3135		temp = I915_READ(PCH_DPLL_SEL);
3136		switch (pipe) {
3137		case 0:
3138			temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
3139			break;
3140		case 1:
3141			temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3142			break;
3143		case 2:
3144			/* C shares PLL A or B */
3145			temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3146			break;
3147		default:
3148			KASSERT(1, ("Wrong pipe %d", pipe)); /* wtf */
3149		}
3150		I915_WRITE(PCH_DPLL_SEL, temp);
3151	}
3152
3153	/* disable PCH DPLL */
3154	intel_disable_pch_pll(intel_crtc);
3155
3156	/* Switch from PCDclk to Rawclk */
3157	reg = FDI_RX_CTL(pipe);
3158	temp = I915_READ(reg);
3159	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3160
3161	/* Disable CPU FDI TX PLL */
3162	reg = FDI_TX_CTL(pipe);
3163	temp = I915_READ(reg);
3164	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3165
3166	POSTING_READ(reg);
3167	DELAY(100);
3168
3169	reg = FDI_RX_CTL(pipe);
3170	temp = I915_READ(reg);
3171	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3172
3173	/* Wait for the clocks to turn off. */
3174	POSTING_READ(reg);
3175	DELAY(100);
3176
3177	intel_crtc->active = false;
3178	intel_update_watermarks(dev);
3179
3180	DRM_LOCK(dev);
3181	intel_update_fbc(dev);
3182	DRM_UNLOCK(dev);
3183}
3184
3185static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3186{
3187	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3188	int pipe = intel_crtc->pipe;
3189	int plane = intel_crtc->plane;
3190
3191	/* XXX: When our outputs are all unaware of DPMS modes other than off
3192	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3193	 */
3194	switch (mode) {
3195	case DRM_MODE_DPMS_ON:
3196	case DRM_MODE_DPMS_STANDBY:
3197	case DRM_MODE_DPMS_SUSPEND:
3198		DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3199		ironlake_crtc_enable(crtc);
3200		break;
3201
3202	case DRM_MODE_DPMS_OFF:
3203		DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3204		ironlake_crtc_disable(crtc);
3205		break;
3206	}
3207}
3208
3209static void ironlake_crtc_off(struct drm_crtc *crtc)
3210{
3211	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3212	intel_put_pch_pll(intel_crtc);
3213}
3214
3215static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3216{
3217	if (!enable && intel_crtc->overlay) {
3218		struct drm_device *dev = intel_crtc->base.dev;
3219		struct drm_i915_private *dev_priv = dev->dev_private;
3220
3221		DRM_LOCK(dev);
3222		dev_priv->mm.interruptible = false;
3223		(void) intel_overlay_switch_off(intel_crtc->overlay);
3224		dev_priv->mm.interruptible = true;
3225		DRM_UNLOCK(dev);
3226	}
3227
3228	/* Let userspace switch the overlay on again. In most cases userspace
3229	 * has to recompute where to put it anyway.
3230	 */
3231}
3232
3233static void i9xx_crtc_enable(struct drm_crtc *crtc)
3234{
3235	struct drm_device *dev = crtc->dev;
3236	struct drm_i915_private *dev_priv = dev->dev_private;
3237	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3238	int pipe = intel_crtc->pipe;
3239	int plane = intel_crtc->plane;
3240
3241	if (intel_crtc->active)
3242		return;
3243
3244	intel_crtc->active = true;
3245	intel_update_watermarks(dev);
3246
3247	intel_enable_pll(dev_priv, pipe);
3248	intel_enable_pipe(dev_priv, pipe, false);
3249	intel_enable_plane(dev_priv, plane, pipe);
3250
3251	intel_crtc_load_lut(crtc);
3252	intel_update_fbc(dev);
3253
3254	/* Give the overlay scaler a chance to enable if it's on this pipe */
3255	intel_crtc_dpms_overlay(intel_crtc, true);
3256	intel_crtc_update_cursor(crtc, true);
3257}
3258
3259static void i9xx_crtc_disable(struct drm_crtc *crtc)
3260{
3261	struct drm_device *dev = crtc->dev;
3262	struct drm_i915_private *dev_priv = dev->dev_private;
3263	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3264	int pipe = intel_crtc->pipe;
3265	int plane = intel_crtc->plane;
3266
3267	if (!intel_crtc->active)
3268		return;
3269
3270	/* Give the overlay scaler a chance to disable if it's on this pipe */
3271	intel_crtc_wait_for_pending_flips(crtc);
3272	drm_vblank_off(dev, pipe);
3273	intel_crtc_dpms_overlay(intel_crtc, false);
3274	intel_crtc_update_cursor(crtc, false);
3275
3276	if (dev_priv->cfb_plane == plane)
3277		intel_disable_fbc(dev);
3278
3279	intel_disable_plane(dev_priv, plane, pipe);
3280	intel_disable_pipe(dev_priv, pipe);
3281	intel_disable_pll(dev_priv, pipe);
3282
3283	intel_crtc->active = false;
3284	intel_update_fbc(dev);
3285	intel_update_watermarks(dev);
3286}
3287
3288static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3289{
3290	/* XXX: When our outputs are all unaware of DPMS modes other than off
3291	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3292	 */
3293	switch (mode) {
3294	case DRM_MODE_DPMS_ON:
3295	case DRM_MODE_DPMS_STANDBY:
3296	case DRM_MODE_DPMS_SUSPEND:
3297		i9xx_crtc_enable(crtc);
3298		break;
3299	case DRM_MODE_DPMS_OFF:
3300		i9xx_crtc_disable(crtc);
3301		break;
3302	}
3303}
3304
3305static void i9xx_crtc_off(struct drm_crtc *crtc)
3306{
3307}
3308
3309/**
3310 * Sets the power management mode of the pipe and plane.
3311 */
3312static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3313{
3314	struct drm_device *dev = crtc->dev;
3315	struct drm_i915_private *dev_priv = dev->dev_private;
3316	struct drm_i915_master_private *master_priv;
3317	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3318	int pipe = intel_crtc->pipe;
3319	bool enabled;
3320
3321	if (intel_crtc->dpms_mode == mode)
3322		return;
3323
3324	intel_crtc->dpms_mode = mode;
3325
3326	dev_priv->display.dpms(crtc, mode);
3327
3328	if (!dev->primary->master)
3329		return;
3330
3331	master_priv = dev->primary->master->driver_priv;
3332	if (!master_priv->sarea_priv)
3333		return;
3334
3335	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3336
3337	switch (pipe) {
3338	case 0:
3339		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3340		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3341		break;
3342	case 1:
3343		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3344		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3345		break;
3346	default:
3347		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3348		break;
3349	}
3350}
3351
3352static void intel_crtc_disable(struct drm_crtc *crtc)
3353{
3354	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3355	struct drm_device *dev = crtc->dev;
3356	struct drm_i915_private *dev_priv = dev->dev_private;
3357
3358	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3359	dev_priv->display.off(crtc);
3360
3361	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3362	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3363
3364	if (crtc->fb) {
3365		DRM_LOCK(dev);
3366		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3367		DRM_UNLOCK(dev);
3368	}
3369}
3370
3371/* Prepare for a mode set.
3372 *
3373 * Note we could be a lot smarter here.  We need to figure out which outputs
3374 * will be enabled, which disabled (in short, how the config will changes)
3375 * and perform the minimum necessary steps to accomplish that, e.g. updating
3376 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3377 * panel fitting is in the proper state, etc.
3378 */
3379static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3380{
3381	i9xx_crtc_disable(crtc);
3382}
3383
3384static void i9xx_crtc_commit(struct drm_crtc *crtc)
3385{
3386	i9xx_crtc_enable(crtc);
3387}
3388
3389static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3390{
3391	ironlake_crtc_disable(crtc);
3392}
3393
3394static void ironlake_crtc_commit(struct drm_crtc *crtc)
3395{
3396	ironlake_crtc_enable(crtc);
3397}
3398
3399void intel_encoder_prepare(struct drm_encoder *encoder)
3400{
3401	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3402	/* lvds has its own version of prepare see intel_lvds_prepare */
3403	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3404}
3405
3406void intel_encoder_commit(struct drm_encoder *encoder)
3407{
3408	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3409	struct drm_device *dev = encoder->dev;
3410	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
3411
3412	/* lvds has its own version of commit see intel_lvds_commit */
3413	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3414
3415	if (HAS_PCH_CPT(dev))
3416		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3417}
3418
3419void intel_encoder_destroy(struct drm_encoder *encoder)
3420{
3421	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3422
3423	drm_encoder_cleanup(encoder);
3424	free(intel_encoder, DRM_MEM_KMS);
3425}
3426
3427static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3428				  const struct drm_display_mode *mode,
3429				  struct drm_display_mode *adjusted_mode)
3430{
3431	struct drm_device *dev = crtc->dev;
3432
3433	if (HAS_PCH_SPLIT(dev)) {
3434		/* FDI link clock is fixed at 2.7G */
3435		if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3436			return false;
3437	}
3438
3439	/* All interlaced capable intel hw wants timings in frames. Note though
3440	 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3441	 * timings, so we need to be careful not to clobber these.*/
3442	if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3443		drm_mode_set_crtcinfo(adjusted_mode, 0);
3444
3445	return true;
3446}
3447
3448static int valleyview_get_display_clock_speed(struct drm_device *dev)
3449{
3450	return 400000; /* FIXME */
3451}
3452
3453static int i945_get_display_clock_speed(struct drm_device *dev)
3454{
3455	return 400000;
3456}
3457
3458static int i915_get_display_clock_speed(struct drm_device *dev)
3459{
3460	return 333000;
3461}
3462
3463static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3464{
3465	return 200000;
3466}
3467
3468static int i915gm_get_display_clock_speed(struct drm_device *dev)
3469{
3470	u16 gcfgc = 0;
3471
3472	gcfgc = pci_read_config(dev->dev, GCFGC, 2);
3473
3474	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3475		return 133000;
3476	else {
3477		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3478		case GC_DISPLAY_CLOCK_333_MHZ:
3479			return 333000;
3480		default:
3481		case GC_DISPLAY_CLOCK_190_200_MHZ:
3482			return 190000;
3483		}
3484	}
3485}
3486
3487static int i865_get_display_clock_speed(struct drm_device *dev)
3488{
3489	return 266000;
3490}
3491
3492static int i855_get_display_clock_speed(struct drm_device *dev)
3493{
3494	u16 hpllcc = 0;
3495	/* Assume that the hardware is in the high speed state.  This
3496	 * should be the default.
3497	 */
3498	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3499	case GC_CLOCK_133_200:
3500	case GC_CLOCK_100_200:
3501		return 200000;
3502	case GC_CLOCK_166_250:
3503		return 250000;
3504	case GC_CLOCK_100_133:
3505		return 133000;
3506	}
3507
3508	/* Shouldn't happen */
3509	return 0;
3510}
3511
3512static int i830_get_display_clock_speed(struct drm_device *dev)
3513{
3514	return 133000;
3515}
3516
3517struct fdi_m_n {
3518	u32        tu;
3519	u32        gmch_m;
3520	u32        gmch_n;
3521	u32        link_m;
3522	u32        link_n;
3523};
3524
3525static void
3526fdi_reduce_ratio(u32 *num, u32 *den)
3527{
3528	while (*num > 0xffffff || *den > 0xffffff) {
3529		*num >>= 1;
3530		*den >>= 1;
3531	}
3532}
3533
3534static void
3535ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3536		     int link_clock, struct fdi_m_n *m_n)
3537{
3538	m_n->tu = 64; /* default size */
3539
3540	/* BUG_ON(pixel_clock > INT_MAX / 36); */
3541	m_n->gmch_m = bits_per_pixel * pixel_clock;
3542	m_n->gmch_n = link_clock * nlanes * 8;
3543	fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3544
3545	m_n->link_m = pixel_clock;
3546	m_n->link_n = link_clock;
3547	fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3548}
3549
3550static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
3551{
3552	if (i915_panel_use_ssc >= 0)
3553		return i915_panel_use_ssc != 0;
3554	return dev_priv->lvds_use_ssc
3555		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
3556}
3557
3558/**
3559 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
3560 * @crtc: CRTC structure
3561 * @mode: requested mode
3562 *
3563 * A pipe may be connected to one or more outputs.  Based on the depth of the
3564 * attached framebuffer, choose a good color depth to use on the pipe.
3565 *
3566 * If possible, match the pipe depth to the fb depth.  In some cases, this
3567 * isn't ideal, because the connected output supports a lesser or restricted
3568 * set of depths.  Resolve that here:
3569 *    LVDS typically supports only 6bpc, so clamp down in that case
3570 *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
3571 *    Displays may support a restricted set as well, check EDID and clamp as
3572 *      appropriate.
3573 *    DP may want to dither down to 6bpc to fit larger modes
3574 *
3575 * RETURNS:
3576 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
3577 * true if they don't match).
3578 */
3579static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
3580					 unsigned int *pipe_bpp,
3581					 struct drm_display_mode *mode)
3582{
3583	struct drm_device *dev = crtc->dev;
3584	struct drm_i915_private *dev_priv = dev->dev_private;
3585	struct drm_encoder *encoder;
3586	struct drm_connector *connector;
3587	unsigned int display_bpc = UINT_MAX, bpc;
3588
3589	/* Walk the encoders & connectors on this crtc, get min bpc */
3590	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3591		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3592
3593		if (encoder->crtc != crtc)
3594			continue;
3595
3596		if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
3597			unsigned int lvds_bpc;
3598
3599			if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
3600			    LVDS_A3_POWER_UP)
3601				lvds_bpc = 8;
3602			else
3603				lvds_bpc = 6;
3604
3605			if (lvds_bpc < display_bpc) {
3606				DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
3607				display_bpc = lvds_bpc;
3608			}
3609			continue;
3610		}
3611
3612		if (intel_encoder->type == INTEL_OUTPUT_EDP) {
3613			/* Use VBT settings if we have an eDP panel */
3614			unsigned int edp_bpc = dev_priv->edp.bpp / 3;
3615
3616			if (edp_bpc < display_bpc) {
3617				DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
3618				display_bpc = edp_bpc;
3619			}
3620			continue;
3621		}
3622
3623		/* Not one of the known troublemakers, check the EDID */
3624		list_for_each_entry(connector, &dev->mode_config.connector_list,
3625				    head) {
3626			if (connector->encoder != encoder)
3627				continue;
3628
3629			/* Don't use an invalid EDID bpc value */
3630			if (connector->display_info.bpc &&
3631			    connector->display_info.bpc < display_bpc) {
3632				DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
3633				display_bpc = connector->display_info.bpc;
3634			}
3635		}
3636
3637		/*
3638		 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
3639		 * through, clamp it down.  (Note: >12bpc will be caught below.)
3640		 */
3641		if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
3642			if (display_bpc > 8 && display_bpc < 12) {
3643				DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
3644				display_bpc = 12;
3645			} else {
3646				DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
3647				display_bpc = 8;
3648			}
3649		}
3650	}
3651
3652	if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
3653		DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
3654		display_bpc = 6;
3655	}
3656
3657	/*
3658	 * We could just drive the pipe at the highest bpc all the time and
3659	 * enable dithering as needed, but that costs bandwidth.  So choose
3660	 * the minimum value that expresses the full color range of the fb but
3661	 * also stays within the max display bpc discovered above.
3662	 */
3663
3664	switch (crtc->fb->depth) {
3665	case 8:
3666		bpc = 8; /* since we go through a colormap */
3667		break;
3668	case 15:
3669	case 16:
3670		bpc = 6; /* min is 18bpp */
3671		break;
3672	case 24:
3673		bpc = 8;
3674		break;
3675	case 30:
3676		bpc = 10;
3677		break;
3678	case 48:
3679		bpc = 12;
3680		break;
3681	default:
3682		DRM_DEBUG("unsupported depth, assuming 24 bits\n");
3683		bpc = min((unsigned int)8, display_bpc);
3684		break;
3685	}
3686
3687	display_bpc = min(display_bpc, bpc);
3688
3689	DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
3690			 bpc, display_bpc);
3691
3692	*pipe_bpp = display_bpc * 3;
3693
3694	return display_bpc != bpc;
3695}
3696
3697static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
3698{
3699	struct drm_device *dev = crtc->dev;
3700	struct drm_i915_private *dev_priv = dev->dev_private;
3701	int refclk;
3702
3703	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3704	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
3705		refclk = dev_priv->lvds_ssc_freq * 1000;
3706		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
3707			      refclk / 1000);
3708	} else if (!IS_GEN2(dev)) {
3709		refclk = 96000;
3710	} else {
3711		refclk = 48000;
3712	}
3713
3714	return refclk;
3715}
3716
3717static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
3718				      intel_clock_t *clock)
3719{
3720	/* SDVO TV has fixed PLL values depend on its clock range,
3721	   this mirrors vbios setting. */
3722	if (adjusted_mode->clock >= 100000
3723	    && adjusted_mode->clock < 140500) {
3724		clock->p1 = 2;
3725		clock->p2 = 10;
3726		clock->n = 3;
3727		clock->m1 = 16;
3728		clock->m2 = 8;
3729	} else if (adjusted_mode->clock >= 140500
3730		   && adjusted_mode->clock <= 200000) {
3731		clock->p1 = 1;
3732		clock->p2 = 10;
3733		clock->n = 6;
3734		clock->m1 = 12;
3735		clock->m2 = 8;
3736	}
3737}
3738
3739static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
3740				     intel_clock_t *clock,
3741				     intel_clock_t *reduced_clock)
3742{
3743	struct drm_device *dev = crtc->dev;
3744	struct drm_i915_private *dev_priv = dev->dev_private;
3745	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3746	int pipe = intel_crtc->pipe;
3747	u32 fp, fp2 = 0;
3748
3749	if (IS_PINEVIEW(dev)) {
3750		fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
3751		if (reduced_clock)
3752			fp2 = (1 << reduced_clock->n) << 16 |
3753				reduced_clock->m1 << 8 | reduced_clock->m2;
3754	} else {
3755		fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
3756		if (reduced_clock)
3757			fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
3758				reduced_clock->m2;
3759	}
3760
3761	I915_WRITE(FP0(pipe), fp);
3762
3763	intel_crtc->lowfreq_avail = false;
3764	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3765	    reduced_clock && i915_powersave) {
3766		I915_WRITE(FP1(pipe), fp2);
3767		intel_crtc->lowfreq_avail = true;
3768	} else {
3769		I915_WRITE(FP1(pipe), fp);
3770	}
3771}
3772
3773static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
3774			      struct drm_display_mode *adjusted_mode)
3775{
3776	struct drm_device *dev = crtc->dev;
3777	struct drm_i915_private *dev_priv = dev->dev_private;
3778	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3779	int pipe = intel_crtc->pipe;
3780	u32 temp;
3781
3782	temp = I915_READ(LVDS);
3783	temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
3784	if (pipe == 1) {
3785		temp |= LVDS_PIPEB_SELECT;
3786	} else {
3787		temp &= ~LVDS_PIPEB_SELECT;
3788	}
3789	/* set the corresponsding LVDS_BORDER bit */
3790	temp |= dev_priv->lvds_border_bits;
3791	/* Set the B0-B3 data pairs corresponding to whether we're going to
3792	 * set the DPLLs for dual-channel mode or not.
3793	 */
3794	if (clock->p2 == 7)
3795		temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
3796	else
3797		temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
3798
3799	/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
3800	 * appropriately here, but we need to look more thoroughly into how
3801	 * panels behave in the two modes.
3802	 */
3803	/* set the dithering flag on LVDS as needed */
3804	if (INTEL_INFO(dev)->gen >= 4) {
3805		if (dev_priv->lvds_dither)
3806			temp |= LVDS_ENABLE_DITHER;
3807		else
3808			temp &= ~LVDS_ENABLE_DITHER;
3809	}
3810	temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
3811	if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
3812		temp |= LVDS_HSYNC_POLARITY;
3813	if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
3814		temp |= LVDS_VSYNC_POLARITY;
3815	I915_WRITE(LVDS, temp);
3816}
3817
3818static void i9xx_update_pll(struct drm_crtc *crtc,
3819			    struct drm_display_mode *mode,
3820			    struct drm_display_mode *adjusted_mode,
3821			    intel_clock_t *clock, intel_clock_t *reduced_clock,
3822			    int num_connectors)
3823{
3824	struct drm_device *dev = crtc->dev;
3825	struct drm_i915_private *dev_priv = dev->dev_private;
3826	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3827	int pipe = intel_crtc->pipe;
3828	u32 dpll;
3829	bool is_sdvo;
3830
3831	is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
3832		intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
3833
3834	dpll = DPLL_VGA_MODE_DIS;
3835
3836	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
3837		dpll |= DPLLB_MODE_LVDS;
3838	else
3839		dpll |= DPLLB_MODE_DAC_SERIAL;
3840	if (is_sdvo) {
3841		int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
3842		if (pixel_multiplier > 1) {
3843			if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3844				dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
3845		}
3846		dpll |= DPLL_DVO_HIGH_SPEED;
3847	}
3848	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
3849		dpll |= DPLL_DVO_HIGH_SPEED;
3850
3851	/* compute bitmask from p1 value */
3852	if (IS_PINEVIEW(dev))
3853		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
3854	else {
3855		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3856		if (IS_G4X(dev) && reduced_clock)
3857			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
3858	}
3859	switch (clock->p2) {
3860	case 5:
3861		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
3862		break;
3863	case 7:
3864		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
3865		break;
3866	case 10:
3867		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
3868		break;
3869	case 14:
3870		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
3871		break;
3872	}
3873	if (INTEL_INFO(dev)->gen >= 4)
3874		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
3875
3876	if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3877		dpll |= PLL_REF_INPUT_TVCLKINBC;
3878	else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3879		/* XXX: just matching BIOS for now */
3880		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
3881		dpll |= 3;
3882	else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3883		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
3884		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
3885	else
3886		dpll |= PLL_REF_INPUT_DREFCLK;
3887
3888	dpll |= DPLL_VCO_ENABLE;
3889	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
3890	POSTING_READ(DPLL(pipe));
3891	DELAY(150);
3892
3893	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
3894	 * This is an exception to the general rule that mode_set doesn't turn
3895	 * things on.
3896	 */
3897	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
3898		intel_update_lvds(crtc, clock, adjusted_mode);
3899
3900	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
3901		intel_dp_set_m_n(crtc, mode, adjusted_mode);
3902
3903	I915_WRITE(DPLL(pipe), dpll);
3904
3905	/* Wait for the clocks to stabilize. */
3906	POSTING_READ(DPLL(pipe));
3907	DELAY(150);
3908
3909	if (INTEL_INFO(dev)->gen >= 4) {
3910		u32 temp = 0;
3911		if (is_sdvo) {
3912			temp = intel_mode_get_pixel_multiplier(adjusted_mode);
3913			if (temp > 1)
3914				temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
3915			else
3916				temp = 0;
3917		}
3918		I915_WRITE(DPLL_MD(pipe), temp);
3919	} else {
3920		/* The pixel multiplier can only be updated once the
3921		 * DPLL is enabled and the clocks are stable.
3922		 *
3923		 * So write it again.
3924		 */
3925		I915_WRITE(DPLL(pipe), dpll);
3926	}
3927}
3928
3929static void i8xx_update_pll(struct drm_crtc *crtc,
3930			    struct drm_display_mode *adjusted_mode,
3931			    intel_clock_t *clock,
3932			    int num_connectors)
3933{
3934	struct drm_device *dev = crtc->dev;
3935	struct drm_i915_private *dev_priv = dev->dev_private;
3936	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3937	int pipe = intel_crtc->pipe;
3938	u32 dpll;
3939
3940	dpll = DPLL_VGA_MODE_DIS;
3941
3942	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3943		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3944	} else {
3945		if (clock->p1 == 2)
3946			dpll |= PLL_P1_DIVIDE_BY_TWO;
3947		else
3948			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3949		if (clock->p2 == 4)
3950			dpll |= PLL_P2_DIVIDE_BY_4;
3951	}
3952
3953	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3954		/* XXX: just matching BIOS for now */
3955		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
3956		dpll |= 3;
3957	else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3958		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
3959		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
3960	else
3961		dpll |= PLL_REF_INPUT_DREFCLK;
3962
3963	dpll |= DPLL_VCO_ENABLE;
3964	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
3965	POSTING_READ(DPLL(pipe));
3966	DELAY(150);
3967
3968	I915_WRITE(DPLL(pipe), dpll);
3969
3970	/* Wait for the clocks to stabilize. */
3971	POSTING_READ(DPLL(pipe));
3972	DELAY(150);
3973
3974	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
3975	 * This is an exception to the general rule that mode_set doesn't turn
3976	 * things on.
3977	 */
3978	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
3979		intel_update_lvds(crtc, clock, adjusted_mode);
3980
3981	/* The pixel multiplier can only be updated once the
3982	 * DPLL is enabled and the clocks are stable.
3983	 *
3984	 * So write it again.
3985	 */
3986	I915_WRITE(DPLL(pipe), dpll);
3987}
3988
3989static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
3990			      struct drm_display_mode *mode,
3991			      struct drm_display_mode *adjusted_mode,
3992			      int x, int y,
3993			      struct drm_framebuffer *old_fb)
3994{
3995	struct drm_device *dev = crtc->dev;
3996	struct drm_i915_private *dev_priv = dev->dev_private;
3997	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3998	int pipe = intel_crtc->pipe;
3999	int plane = intel_crtc->plane;
4000	int refclk, num_connectors = 0;
4001	intel_clock_t clock, reduced_clock;
4002	u32 dspcntr, pipeconf, vsyncshift;
4003	bool ok, has_reduced_clock = false, is_sdvo = false;
4004	bool is_lvds = false, is_tv = false, is_dp = false;
4005	struct drm_mode_config *mode_config = &dev->mode_config;
4006	struct intel_encoder *encoder;
4007	const intel_limit_t *limit;
4008	int ret;
4009
4010	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4011		if (encoder->base.crtc != crtc)
4012			continue;
4013
4014		switch (encoder->type) {
4015		case INTEL_OUTPUT_LVDS:
4016			is_lvds = true;
4017			break;
4018		case INTEL_OUTPUT_SDVO:
4019		case INTEL_OUTPUT_HDMI:
4020			is_sdvo = true;
4021			if (encoder->needs_tv_clock)
4022				is_tv = true;
4023			break;
4024		case INTEL_OUTPUT_TVOUT:
4025			is_tv = true;
4026			break;
4027		case INTEL_OUTPUT_DISPLAYPORT:
4028			is_dp = true;
4029			break;
4030		}
4031
4032		num_connectors++;
4033	}
4034
4035	refclk = i9xx_get_refclk(crtc, num_connectors);
4036
4037	/*
4038	 * Returns a set of divisors for the desired target clock with the given
4039	 * refclk, or false.  The returned values represent the clock equation:
4040	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4041	 */
4042	limit = intel_limit(crtc, refclk);
4043	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4044			     &clock);
4045	if (!ok) {
4046		DRM_ERROR("Couldn't find PLL settings for mode!\n");
4047		return -EINVAL;
4048	}
4049
4050	/* Ensure that the cursor is valid for the new mode before changing... */
4051	intel_crtc_update_cursor(crtc, true);
4052
4053	if (is_lvds && dev_priv->lvds_downclock_avail) {
4054		/*
4055		 * Ensure we match the reduced clock's P to the target clock.
4056		 * If the clocks don't match, we can't switch the display clock
4057		 * by using the FP0/FP1. In such case we will disable the LVDS
4058		 * downclock feature.
4059		*/
4060		has_reduced_clock = limit->find_pll(limit, crtc,
4061						    dev_priv->lvds_downclock,
4062						    refclk,
4063						    &clock,
4064						    &reduced_clock);
4065	}
4066
4067	if (is_sdvo && is_tv)
4068		i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
4069
4070	i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
4071				 &reduced_clock : NULL);
4072
4073	if (IS_GEN2(dev))
4074		i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
4075	else
4076		i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
4077				has_reduced_clock ? &reduced_clock : NULL,
4078				num_connectors);
4079
4080	/* setup pipeconf */
4081	pipeconf = I915_READ(PIPECONF(pipe));
4082
4083	/* Set up the display plane register */
4084	dspcntr = DISPPLANE_GAMMA_ENABLE;
4085
4086	if (pipe == 0)
4087		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4088	else
4089		dspcntr |= DISPPLANE_SEL_PIPE_B;
4090
4091	if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4092		/* Enable pixel doubling when the dot clock is > 90% of the (display)
4093		 * core speed.
4094		 *
4095		 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4096		 * pipe == 0 check?
4097		 */
4098		if (mode->clock >
4099		    dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4100			pipeconf |= PIPECONF_DOUBLE_WIDE;
4101		else
4102			pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4103	}
4104
4105	/* default to 8bpc */
4106	pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
4107	if (is_dp) {
4108		if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4109			pipeconf |= PIPECONF_BPP_6 |
4110				    PIPECONF_DITHER_EN |
4111				    PIPECONF_DITHER_TYPE_SP;
4112		}
4113	}
4114
4115	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4116	drm_mode_debug_printmodeline(mode);
4117
4118	if (HAS_PIPE_CXSR(dev)) {
4119		if (intel_crtc->lowfreq_avail) {
4120			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4121			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4122		} else {
4123			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4124			pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4125		}
4126	}
4127
4128	pipeconf &= ~PIPECONF_INTERLACE_MASK;
4129	if (!IS_GEN2(dev) &&
4130	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4131		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4132		/* the chip adds 2 halflines automatically */
4133		adjusted_mode->crtc_vtotal -= 1;
4134		adjusted_mode->crtc_vblank_end -= 1;
4135		vsyncshift = adjusted_mode->crtc_hsync_start
4136			     - adjusted_mode->crtc_htotal/2;
4137	} else {
4138		pipeconf |= PIPECONF_PROGRESSIVE;
4139		vsyncshift = 0;
4140	}
4141
4142	if (!IS_GEN3(dev))
4143		I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
4144
4145	I915_WRITE(HTOTAL(pipe),
4146		   (adjusted_mode->crtc_hdisplay - 1) |
4147		   ((adjusted_mode->crtc_htotal - 1) << 16));
4148	I915_WRITE(HBLANK(pipe),
4149		   (adjusted_mode->crtc_hblank_start - 1) |
4150		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
4151	I915_WRITE(HSYNC(pipe),
4152		   (adjusted_mode->crtc_hsync_start - 1) |
4153		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
4154
4155	I915_WRITE(VTOTAL(pipe),
4156		   (adjusted_mode->crtc_vdisplay - 1) |
4157		   ((adjusted_mode->crtc_vtotal - 1) << 16));
4158	I915_WRITE(VBLANK(pipe),
4159		   (adjusted_mode->crtc_vblank_start - 1) |
4160		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
4161	I915_WRITE(VSYNC(pipe),
4162		   (adjusted_mode->crtc_vsync_start - 1) |
4163		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
4164
4165	/* pipesrc and dspsize control the size that is scaled from,
4166	 * which should always be the user's requested size.
4167	 */
4168	I915_WRITE(DSPSIZE(plane),
4169		   ((mode->vdisplay - 1) << 16) |
4170		   (mode->hdisplay - 1));
4171	I915_WRITE(DSPPOS(plane), 0);
4172	I915_WRITE(PIPESRC(pipe),
4173		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4174
4175	I915_WRITE(PIPECONF(pipe), pipeconf);
4176	POSTING_READ(PIPECONF(pipe));
4177	intel_enable_pipe(dev_priv, pipe, false);
4178
4179	intel_wait_for_vblank(dev, pipe);
4180
4181	I915_WRITE(DSPCNTR(plane), dspcntr);
4182	POSTING_READ(DSPCNTR(plane));
4183
4184	ret = intel_pipe_set_base(crtc, x, y, old_fb);
4185
4186	intel_update_watermarks(dev);
4187
4188	return ret;
4189}
4190
4191/*
4192 * Initialize reference clocks when the driver loads
4193 */
4194void ironlake_init_pch_refclk(struct drm_device *dev)
4195{
4196	struct drm_i915_private *dev_priv = dev->dev_private;
4197	struct drm_mode_config *mode_config = &dev->mode_config;
4198	struct intel_encoder *encoder;
4199	u32 temp;
4200	bool has_lvds = false;
4201	bool has_cpu_edp = false;
4202	bool has_pch_edp = false;
4203	bool has_panel = false;
4204	bool has_ck505 = false;
4205	bool can_ssc = false;
4206
4207	/* We need to take the global config into account */
4208	list_for_each_entry(encoder, &mode_config->encoder_list,
4209			    base.head) {
4210		switch (encoder->type) {
4211		case INTEL_OUTPUT_LVDS:
4212			has_panel = true;
4213			has_lvds = true;
4214			break;
4215		case INTEL_OUTPUT_EDP:
4216			has_panel = true;
4217			if (intel_encoder_is_pch_edp(&encoder->base))
4218				has_pch_edp = true;
4219			else
4220				has_cpu_edp = true;
4221			break;
4222		}
4223	}
4224
4225	if (HAS_PCH_IBX(dev)) {
4226		has_ck505 = dev_priv->display_clock_mode;
4227		can_ssc = has_ck505;
4228	} else {
4229		has_ck505 = false;
4230		can_ssc = true;
4231	}
4232
4233	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
4234		      has_panel, has_lvds, has_pch_edp, has_cpu_edp,
4235		      has_ck505);
4236
4237	/* Ironlake: try to setup display ref clock before DPLL
4238	 * enabling. This is only under driver's control after
4239	 * PCH B stepping, previous chipset stepping should be
4240	 * ignoring this setting.
4241	 */
4242	temp = I915_READ(PCH_DREF_CONTROL);
4243	/* Always enable nonspread source */
4244	temp &= ~DREF_NONSPREAD_SOURCE_MASK;
4245
4246	if (has_ck505)
4247		temp |= DREF_NONSPREAD_CK505_ENABLE;
4248	else
4249		temp |= DREF_NONSPREAD_SOURCE_ENABLE;
4250
4251	if (has_panel) {
4252		temp &= ~DREF_SSC_SOURCE_MASK;
4253		temp |= DREF_SSC_SOURCE_ENABLE;
4254
4255		/* SSC must be turned on before enabling the CPU output  */
4256		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4257			DRM_DEBUG_KMS("Using SSC on panel\n");
4258			temp |= DREF_SSC1_ENABLE;
4259		} else
4260			temp &= ~DREF_SSC1_ENABLE;
4261
4262		/* Get SSC going before enabling the outputs */
4263		I915_WRITE(PCH_DREF_CONTROL, temp);
4264		POSTING_READ(PCH_DREF_CONTROL);
4265		DELAY(200);
4266
4267		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4268
4269		/* Enable CPU source on CPU attached eDP */
4270		if (has_cpu_edp) {
4271			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4272				DRM_DEBUG_KMS("Using SSC on eDP\n");
4273				temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4274			}
4275			else
4276				temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4277		} else
4278			temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4279
4280		I915_WRITE(PCH_DREF_CONTROL, temp);
4281		POSTING_READ(PCH_DREF_CONTROL);
4282		DELAY(200);
4283	} else {
4284		DRM_DEBUG_KMS("Disabling SSC entirely\n");
4285
4286		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4287
4288		/* Turn off CPU output */
4289		temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4290
4291		I915_WRITE(PCH_DREF_CONTROL, temp);
4292		POSTING_READ(PCH_DREF_CONTROL);
4293		DELAY(200);
4294
4295		/* Turn off the SSC source */
4296		temp &= ~DREF_SSC_SOURCE_MASK;
4297		temp |= DREF_SSC_SOURCE_DISABLE;
4298
4299		/* Turn off SSC1 */
4300		temp &= ~ DREF_SSC1_ENABLE;
4301
4302		I915_WRITE(PCH_DREF_CONTROL, temp);
4303		POSTING_READ(PCH_DREF_CONTROL);
4304		DELAY(200);
4305	}
4306}
4307
4308static int ironlake_get_refclk(struct drm_crtc *crtc)
4309{
4310	struct drm_device *dev = crtc->dev;
4311	struct drm_i915_private *dev_priv = dev->dev_private;
4312	struct intel_encoder *encoder;
4313	struct drm_mode_config *mode_config = &dev->mode_config;
4314	struct intel_encoder *edp_encoder = NULL;
4315	int num_connectors = 0;
4316	bool is_lvds = false;
4317
4318	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4319		if (encoder->base.crtc != crtc)
4320			continue;
4321
4322		switch (encoder->type) {
4323		case INTEL_OUTPUT_LVDS:
4324			is_lvds = true;
4325			break;
4326		case INTEL_OUTPUT_EDP:
4327			edp_encoder = encoder;
4328			break;
4329		}
4330		num_connectors++;
4331	}
4332
4333	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4334		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4335			      dev_priv->lvds_ssc_freq);
4336		return dev_priv->lvds_ssc_freq * 1000;
4337	}
4338
4339	return 120000;
4340}
4341
4342static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4343				  struct drm_display_mode *mode,
4344				  struct drm_display_mode *adjusted_mode,
4345				  int x, int y,
4346				  struct drm_framebuffer *old_fb)
4347{
4348	struct drm_device *dev = crtc->dev;
4349	struct drm_i915_private *dev_priv = dev->dev_private;
4350	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4351	int pipe = intel_crtc->pipe;
4352	int plane = intel_crtc->plane;
4353	int refclk, num_connectors = 0;
4354	intel_clock_t clock, reduced_clock;
4355	u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4356	bool ok, has_reduced_clock = false, is_sdvo = false;
4357	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4358	struct drm_mode_config *mode_config = &dev->mode_config;
4359	struct intel_encoder *encoder, *edp_encoder = NULL;
4360	const intel_limit_t *limit;
4361	int ret;
4362	struct fdi_m_n m_n = {0};
4363	u32 temp;
4364	int target_clock, pixel_multiplier, lane, link_bw, factor;
4365	unsigned int pipe_bpp;
4366	bool dither;
4367	bool is_cpu_edp = false, is_pch_edp = false;
4368
4369	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4370		if (encoder->base.crtc != crtc)
4371			continue;
4372
4373		switch (encoder->type) {
4374		case INTEL_OUTPUT_LVDS:
4375			is_lvds = true;
4376			break;
4377		case INTEL_OUTPUT_SDVO:
4378		case INTEL_OUTPUT_HDMI:
4379			is_sdvo = true;
4380			if (encoder->needs_tv_clock)
4381				is_tv = true;
4382			break;
4383		case INTEL_OUTPUT_TVOUT:
4384			is_tv = true;
4385			break;
4386		case INTEL_OUTPUT_ANALOG:
4387			is_crt = true;
4388			break;
4389		case INTEL_OUTPUT_DISPLAYPORT:
4390			is_dp = true;
4391			break;
4392		case INTEL_OUTPUT_EDP:
4393			is_dp = true;
4394			if (intel_encoder_is_pch_edp(&encoder->base))
4395				is_pch_edp = true;
4396			else
4397				is_cpu_edp = true;
4398			edp_encoder = encoder;
4399			break;
4400		}
4401
4402		num_connectors++;
4403	}
4404
4405	refclk = ironlake_get_refclk(crtc);
4406
4407	/*
4408	 * Returns a set of divisors for the desired target clock with the given
4409	 * refclk, or false.  The returned values represent the clock equation:
4410	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4411	 */
4412	limit = intel_limit(crtc, refclk);
4413	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4414			     &clock);
4415	if (!ok) {
4416		DRM_ERROR("Couldn't find PLL settings for mode!\n");
4417		return -EINVAL;
4418	}
4419
4420	/* Ensure that the cursor is valid for the new mode before changing... */
4421	intel_crtc_update_cursor(crtc, true);
4422
4423	if (is_lvds && dev_priv->lvds_downclock_avail) {
4424		/*
4425		 * Ensure we match the reduced clock's P to the target clock.
4426		 * If the clocks don't match, we can't switch the display clock
4427		 * by using the FP0/FP1. In such case we will disable the LVDS
4428		 * downclock feature.
4429		*/
4430		has_reduced_clock = limit->find_pll(limit, crtc,
4431						    dev_priv->lvds_downclock,
4432						    refclk,
4433						    &clock,
4434						    &reduced_clock);
4435	}
4436	/* SDVO TV has fixed PLL values depend on its clock range,
4437	   this mirrors vbios setting. */
4438	if (is_sdvo && is_tv) {
4439		if (adjusted_mode->clock >= 100000
4440		    && adjusted_mode->clock < 140500) {
4441			clock.p1 = 2;
4442			clock.p2 = 10;
4443			clock.n = 3;
4444			clock.m1 = 16;
4445			clock.m2 = 8;
4446		} else if (adjusted_mode->clock >= 140500
4447			   && adjusted_mode->clock <= 200000) {
4448			clock.p1 = 1;
4449			clock.p2 = 10;
4450			clock.n = 6;
4451			clock.m1 = 12;
4452			clock.m2 = 8;
4453		}
4454	}
4455
4456	/* FDI link */
4457	pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4458	lane = 0;
4459	/* CPU eDP doesn't require FDI link, so just set DP M/N
4460	   according to current link config */
4461	if (is_cpu_edp) {
4462		target_clock = mode->clock;
4463		intel_edp_link_config(edp_encoder, &lane, &link_bw);
4464	} else {
4465		/* [e]DP over FDI requires target mode clock
4466		   instead of link clock */
4467		if (is_dp)
4468			target_clock = mode->clock;
4469		else
4470			target_clock = adjusted_mode->clock;
4471
4472		/* FDI is a binary signal running at ~2.7GHz, encoding
4473		 * each output octet as 10 bits. The actual frequency
4474		 * is stored as a divider into a 100MHz clock, and the
4475		 * mode pixel clock is stored in units of 1KHz.
4476		 * Hence the bw of each lane in terms of the mode signal
4477		 * is:
4478		 */
4479		link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4480	}
4481
4482	/* determine panel color depth */
4483	temp = I915_READ(PIPECONF(pipe));
4484	temp &= ~PIPE_BPC_MASK;
4485	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
4486	switch (pipe_bpp) {
4487	case 18:
4488		temp |= PIPE_6BPC;
4489		break;
4490	case 24:
4491		temp |= PIPE_8BPC;
4492		break;
4493	case 30:
4494		temp |= PIPE_10BPC;
4495		break;
4496	case 36:
4497		temp |= PIPE_12BPC;
4498		break;
4499	default:
4500		printf("intel_choose_pipe_bpp returned invalid value %d\n",
4501			pipe_bpp);
4502		temp |= PIPE_8BPC;
4503		pipe_bpp = 24;
4504		break;
4505	}
4506
4507	intel_crtc->bpp = pipe_bpp;
4508	I915_WRITE(PIPECONF(pipe), temp);
4509
4510	if (!lane) {
4511		/*
4512		 * Account for spread spectrum to avoid
4513		 * oversubscribing the link. Max center spread
4514		 * is 2.5%; use 5% for safety's sake.
4515		 */
4516		u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
4517		lane = bps / (link_bw * 8) + 1;
4518	}
4519
4520	intel_crtc->fdi_lanes = lane;
4521
4522	if (pixel_multiplier > 1)
4523		link_bw *= pixel_multiplier;
4524	ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
4525			     &m_n);
4526
4527	fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
4528	if (has_reduced_clock)
4529		fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
4530			reduced_clock.m2;
4531
4532	/* Enable autotuning of the PLL clock (if permissible) */
4533	factor = 21;
4534	if (is_lvds) {
4535		if ((intel_panel_use_ssc(dev_priv) &&
4536		     dev_priv->lvds_ssc_freq == 100) ||
4537		    (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
4538			factor = 25;
4539	} else if (is_sdvo && is_tv)
4540		factor = 20;
4541
4542	if (clock.m < factor * clock.n)
4543		fp |= FP_CB_TUNE;
4544
4545	dpll = 0;
4546
4547	if (is_lvds)
4548		dpll |= DPLLB_MODE_LVDS;
4549	else
4550		dpll |= DPLLB_MODE_DAC_SERIAL;
4551	if (is_sdvo) {
4552		int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4553		if (pixel_multiplier > 1) {
4554			dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
4555		}
4556		dpll |= DPLL_DVO_HIGH_SPEED;
4557	}
4558	if (is_dp && !is_cpu_edp)
4559		dpll |= DPLL_DVO_HIGH_SPEED;
4560
4561	/* compute bitmask from p1 value */
4562	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4563	/* also FPA1 */
4564	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4565
4566	switch (clock.p2) {
4567	case 5:
4568		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4569		break;
4570	case 7:
4571		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4572		break;
4573	case 10:
4574		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4575		break;
4576	case 14:
4577		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4578		break;
4579	}
4580
4581	if (is_sdvo && is_tv)
4582		dpll |= PLL_REF_INPUT_TVCLKINBC;
4583	else if (is_tv)
4584		/* XXX: just matching BIOS for now */
4585		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
4586		dpll |= 3;
4587	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4588		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4589	else
4590		dpll |= PLL_REF_INPUT_DREFCLK;
4591
4592	/* setup pipeconf */
4593	pipeconf = I915_READ(PIPECONF(pipe));
4594
4595	/* Set up the display plane register */
4596	dspcntr = DISPPLANE_GAMMA_ENABLE;
4597	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
4598	drm_mode_debug_printmodeline(mode);
4599
4600	/* CPU eDP is the only output that doesn't need a PCH PLL of its own on
4601	 * pre-Haswell/LPT generation */
4602	if (HAS_PCH_LPT(dev)) {
4603		DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
4604				pipe);
4605	} else if (!is_cpu_edp) {
4606		struct intel_pch_pll *pll;
4607
4608		pll = intel_get_pch_pll(intel_crtc, dpll, fp);
4609		if (pll == NULL) {
4610			DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
4611					 pipe);
4612 			return -EINVAL;
4613 		}
4614	} else
4615		intel_put_pch_pll(intel_crtc);
4616
4617	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
4618	 * This is an exception to the general rule that mode_set doesn't turn
4619	 * things on.
4620	 */
4621	if (is_lvds) {
4622		temp = I915_READ(PCH_LVDS);
4623		temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4624		if (HAS_PCH_CPT(dev)) {
4625			temp &= ~PORT_TRANS_SEL_MASK;
4626			temp |= PORT_TRANS_SEL_CPT(pipe);
4627		} else {
4628			if (pipe == 1)
4629				temp |= LVDS_PIPEB_SELECT;
4630			else
4631				temp &= ~LVDS_PIPEB_SELECT;
4632		}
4633
4634		/* set the corresponsding LVDS_BORDER bit */
4635		temp |= dev_priv->lvds_border_bits;
4636		/* Set the B0-B3 data pairs corresponding to whether we're going to
4637		 * set the DPLLs for dual-channel mode or not.
4638		 */
4639		if (clock.p2 == 7)
4640			temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4641		else
4642			temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4643
4644		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4645		 * appropriately here, but we need to look more thoroughly into how
4646		 * panels behave in the two modes.
4647		 */
4648		temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
4649		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
4650			temp |= LVDS_HSYNC_POLARITY;
4651		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
4652			temp |= LVDS_VSYNC_POLARITY;
4653		I915_WRITE(PCH_LVDS, temp);
4654	}
4655
4656	pipeconf &= ~PIPECONF_DITHER_EN;
4657	pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
4658	if ((is_lvds && dev_priv->lvds_dither) || dither) {
4659		pipeconf |= PIPECONF_DITHER_EN;
4660		pipeconf |= PIPECONF_DITHER_TYPE_SP;
4661	}
4662	if (is_dp && !is_cpu_edp) {
4663		intel_dp_set_m_n(crtc, mode, adjusted_mode);
4664	} else {
4665		/* For non-DP output, clear any trans DP clock recovery setting.*/
4666		I915_WRITE(TRANSDATA_M1(pipe), 0);
4667		I915_WRITE(TRANSDATA_N1(pipe), 0);
4668		I915_WRITE(TRANSDPLINK_M1(pipe), 0);
4669		I915_WRITE(TRANSDPLINK_N1(pipe), 0);
4670	}
4671
4672	if (intel_crtc->pch_pll) {
4673		I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
4674
4675		/* Wait for the clocks to stabilize. */
4676		POSTING_READ(intel_crtc->pch_pll->pll_reg);
4677		DELAY(150);
4678
4679		/* The pixel multiplier can only be updated once the
4680		 * DPLL is enabled and the clocks are stable.
4681		 *
4682		 * So write it again.
4683		 */
4684		I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
4685	}
4686
4687	intel_crtc->lowfreq_avail = false;
4688	if (intel_crtc->pch_pll) {
4689		if (is_lvds && has_reduced_clock && i915_powersave) {
4690			I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
4691			intel_crtc->lowfreq_avail = true;
4692			if (HAS_PIPE_CXSR(dev)) {
4693				DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4694				pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4695			}
4696		} else {
4697			I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
4698			if (HAS_PIPE_CXSR(dev)) {
4699				DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4700				pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4701			}
4702		}
4703	}
4704
4705	pipeconf &= ~PIPECONF_INTERLACE_MASK;
4706	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4707		pipeconf |= PIPECONF_INTERLACED_ILK;
4708		/* the chip adds 2 halflines automatically */
4709		adjusted_mode->crtc_vtotal -= 1;
4710		adjusted_mode->crtc_vblank_end -= 1;
4711		I915_WRITE(VSYNCSHIFT(pipe),
4712			   adjusted_mode->crtc_hsync_start
4713			   - adjusted_mode->crtc_htotal/2);
4714	} else {
4715		pipeconf |= PIPECONF_PROGRESSIVE;
4716		I915_WRITE(VSYNCSHIFT(pipe), 0);
4717	}
4718
4719	I915_WRITE(HTOTAL(pipe),
4720		   (adjusted_mode->crtc_hdisplay - 1) |
4721		   ((adjusted_mode->crtc_htotal - 1) << 16));
4722	I915_WRITE(HBLANK(pipe),
4723		   (adjusted_mode->crtc_hblank_start - 1) |
4724		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
4725	I915_WRITE(HSYNC(pipe),
4726		   (adjusted_mode->crtc_hsync_start - 1) |
4727		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
4728
4729	I915_WRITE(VTOTAL(pipe),
4730		   (adjusted_mode->crtc_vdisplay - 1) |
4731		   ((adjusted_mode->crtc_vtotal - 1) << 16));
4732	I915_WRITE(VBLANK(pipe),
4733		   (adjusted_mode->crtc_vblank_start - 1) |
4734		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
4735	I915_WRITE(VSYNC(pipe),
4736		   (adjusted_mode->crtc_vsync_start - 1) |
4737		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
4738
4739	/* pipesrc controls the size that is scaled from, which should
4740	 * always be the user's requested size.
4741	 */
4742	I915_WRITE(PIPESRC(pipe),
4743		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4744
4745	I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
4746	I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
4747	I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
4748	I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
4749
4750	if (is_cpu_edp)
4751		ironlake_set_pll_edp(crtc, adjusted_mode->clock);
4752
4753	I915_WRITE(PIPECONF(pipe), pipeconf);
4754	POSTING_READ(PIPECONF(pipe));
4755
4756	intel_wait_for_vblank(dev, pipe);
4757
4758	I915_WRITE(DSPCNTR(plane), dspcntr);
4759	POSTING_READ(DSPCNTR(plane));
4760
4761	ret = intel_pipe_set_base(crtc, x, y, old_fb);
4762
4763	intel_update_watermarks(dev);
4764
4765	intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
4766
4767	return ret;
4768}
4769
4770static int intel_crtc_mode_set(struct drm_crtc *crtc,
4771			       struct drm_display_mode *mode,
4772			       struct drm_display_mode *adjusted_mode,
4773			       int x, int y,
4774			       struct drm_framebuffer *old_fb)
4775{
4776	struct drm_device *dev = crtc->dev;
4777	struct drm_i915_private *dev_priv = dev->dev_private;
4778	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4779	int pipe = intel_crtc->pipe;
4780	int ret;
4781
4782	drm_vblank_pre_modeset(dev, pipe);
4783
4784	ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
4785					      x, y, old_fb);
4786	drm_vblank_post_modeset(dev, pipe);
4787
4788	if (ret)
4789		intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
4790	else
4791		intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
4792
4793	return ret;
4794}
4795
4796static bool intel_eld_uptodate(struct drm_connector *connector,
4797			       int reg_eldv, uint32_t bits_eldv,
4798			       int reg_elda, uint32_t bits_elda,
4799			       int reg_edid)
4800{
4801	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4802	uint8_t *eld = connector->eld;
4803	uint32_t i;
4804
4805	i = I915_READ(reg_eldv);
4806	i &= bits_eldv;
4807
4808	if (!eld[0])
4809		return !i;
4810
4811	if (!i)
4812		return false;
4813
4814	i = I915_READ(reg_elda);
4815	i &= ~bits_elda;
4816	I915_WRITE(reg_elda, i);
4817
4818	for (i = 0; i < eld[2]; i++)
4819		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
4820			return false;
4821
4822	return true;
4823}
4824
4825static void g4x_write_eld(struct drm_connector *connector,
4826			  struct drm_crtc *crtc)
4827{
4828	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4829	uint8_t *eld = connector->eld;
4830	uint32_t eldv;
4831	uint32_t len;
4832	uint32_t i;
4833
4834	i = I915_READ(G4X_AUD_VID_DID);
4835
4836	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
4837		eldv = G4X_ELDV_DEVCL_DEVBLC;
4838	else
4839		eldv = G4X_ELDV_DEVCTG;
4840
4841	if (intel_eld_uptodate(connector,
4842			       G4X_AUD_CNTL_ST, eldv,
4843			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
4844			       G4X_HDMIW_HDMIEDID))
4845		return;
4846
4847	i = I915_READ(G4X_AUD_CNTL_ST);
4848	i &= ~(eldv | G4X_ELD_ADDR);
4849	len = (i >> 9) & 0x1f;		/* ELD buffer size */
4850	I915_WRITE(G4X_AUD_CNTL_ST, i);
4851
4852	if (!eld[0])
4853		return;
4854
4855	if (eld[2] < (uint8_t)len)
4856		len = eld[2];
4857	DRM_DEBUG_KMS("ELD size %d\n", len);
4858	for (i = 0; i < len; i++)
4859		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
4860
4861	i = I915_READ(G4X_AUD_CNTL_ST);
4862	i |= eldv;
4863	I915_WRITE(G4X_AUD_CNTL_ST, i);
4864}
4865
4866static void ironlake_write_eld(struct drm_connector *connector,
4867				     struct drm_crtc *crtc)
4868{
4869	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4870	uint8_t *eld = connector->eld;
4871	uint32_t eldv;
4872	uint32_t i;
4873	int len;
4874	int hdmiw_hdmiedid;
4875	int aud_config;
4876	int aud_cntl_st;
4877	int aud_cntrl_st2;
4878
4879	if (HAS_PCH_IBX(connector->dev)) {
4880		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
4881		aud_config = IBX_AUD_CONFIG_A;
4882		aud_cntl_st = IBX_AUD_CNTL_ST_A;
4883		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
4884	} else {
4885		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
4886		aud_config = CPT_AUD_CONFIG_A;
4887		aud_cntl_st = CPT_AUD_CNTL_ST_A;
4888		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
4889	}
4890
4891	i = to_intel_crtc(crtc)->pipe;
4892	hdmiw_hdmiedid += i * 0x100;
4893	aud_cntl_st += i * 0x100;
4894	aud_config += i * 0x100;
4895
4896	DRM_DEBUG_KMS("ELD on pipe %c\n", pipe_name(i));
4897
4898	i = I915_READ(aud_cntl_st);
4899	i = (i >> 29) & 0x3;		/* DIP_Port_Select, 0x1 = PortB */
4900	if (!i) {
4901		DRM_DEBUG_KMS("Audio directed to unknown port\n");
4902		/* operate blindly on all ports */
4903		eldv = IBX_ELD_VALIDB;
4904		eldv |= IBX_ELD_VALIDB << 4;
4905		eldv |= IBX_ELD_VALIDB << 8;
4906	} else {
4907		DRM_DEBUG_KMS("ELD on port %c\n", 'A' + i);
4908		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
4909	}
4910
4911	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
4912		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
4913		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
4914		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
4915	} else
4916		I915_WRITE(aud_config, 0);
4917
4918	if (intel_eld_uptodate(connector,
4919			       aud_cntrl_st2, eldv,
4920			       aud_cntl_st, IBX_ELD_ADDRESS,
4921			       hdmiw_hdmiedid))
4922		return;
4923
4924	i = I915_READ(aud_cntrl_st2);
4925	i &= ~eldv;
4926	I915_WRITE(aud_cntrl_st2, i);
4927
4928	if (!eld[0])
4929		return;
4930
4931	i = I915_READ(aud_cntl_st);
4932	i &= ~IBX_ELD_ADDRESS;
4933	I915_WRITE(aud_cntl_st, i);
4934
4935	/* 84 bytes of hw ELD buffer */
4936	len = 21;
4937	if (eld[2] < (uint8_t)len)
4938		len = eld[2];
4939	DRM_DEBUG_KMS("ELD size %d\n", len);
4940	for (i = 0; i < len; i++)
4941		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
4942
4943	i = I915_READ(aud_cntrl_st2);
4944	i |= eldv;
4945	I915_WRITE(aud_cntrl_st2, i);
4946}
4947
4948void intel_write_eld(struct drm_encoder *encoder,
4949		     struct drm_display_mode *mode)
4950{
4951	struct drm_crtc *crtc = encoder->crtc;
4952	struct drm_connector *connector;
4953	struct drm_device *dev = encoder->dev;
4954	struct drm_i915_private *dev_priv = dev->dev_private;
4955
4956	connector = drm_select_eld(encoder, mode);
4957	if (!connector)
4958		return;
4959
4960	DRM_DEBUG_KMS("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4961			 connector->base.id,
4962			 drm_get_connector_name(connector),
4963			 connector->encoder->base.id,
4964			 drm_get_encoder_name(connector->encoder));
4965
4966	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
4967
4968	if (dev_priv->display.write_eld)
4969		dev_priv->display.write_eld(connector, crtc);
4970}
4971
4972/** Loads the palette/gamma unit for the CRTC with the prepared values */
4973void intel_crtc_load_lut(struct drm_crtc *crtc)
4974{
4975	struct drm_device *dev = crtc->dev;
4976	struct drm_i915_private *dev_priv = dev->dev_private;
4977	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4978	int palreg = PALETTE(intel_crtc->pipe);
4979	int i;
4980
4981	/* The clocks have to be on to load the palette. */
4982	if (!crtc->enabled || !intel_crtc->active)
4983		return;
4984
4985	/* use legacy palette for Ironlake */
4986	if (HAS_PCH_SPLIT(dev))
4987		palreg = LGC_PALETTE(intel_crtc->pipe);
4988
4989	for (i = 0; i < 256; i++) {
4990		I915_WRITE(palreg + 4 * i,
4991			   (intel_crtc->lut_r[i] << 16) |
4992			   (intel_crtc->lut_g[i] << 8) |
4993			   intel_crtc->lut_b[i]);
4994	}
4995}
4996
4997static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
4998{
4999	struct drm_device *dev = crtc->dev;
5000	struct drm_i915_private *dev_priv = dev->dev_private;
5001	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5002	bool visible = base != 0;
5003	u32 cntl;
5004
5005	if (intel_crtc->cursor_visible == visible)
5006		return;
5007
5008	cntl = I915_READ(_CURACNTR);
5009	if (visible) {
5010		/* On these chipsets we can only modify the base whilst
5011		 * the cursor is disabled.
5012		 */
5013		I915_WRITE(_CURABASE, base);
5014
5015		cntl &= ~(CURSOR_FORMAT_MASK);
5016		/* XXX width must be 64, stride 256 => 0x00 << 28 */
5017		cntl |= CURSOR_ENABLE |
5018			CURSOR_GAMMA_ENABLE |
5019			CURSOR_FORMAT_ARGB;
5020	} else
5021		cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
5022	I915_WRITE(_CURACNTR, cntl);
5023
5024	intel_crtc->cursor_visible = visible;
5025}
5026
5027static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
5028{
5029	struct drm_device *dev = crtc->dev;
5030	struct drm_i915_private *dev_priv = dev->dev_private;
5031	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5032	int pipe = intel_crtc->pipe;
5033	bool visible = base != 0;
5034
5035	if (intel_crtc->cursor_visible != visible) {
5036		uint32_t cntl = I915_READ(CURCNTR(pipe));
5037		if (base) {
5038			cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
5039			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
5040			cntl |= pipe << 28; /* Connect to correct pipe */
5041		} else {
5042			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
5043			cntl |= CURSOR_MODE_DISABLE;
5044		}
5045		I915_WRITE(CURCNTR(pipe), cntl);
5046
5047		intel_crtc->cursor_visible = visible;
5048	}
5049	/* and commit changes on next vblank */
5050	I915_WRITE(CURBASE(pipe), base);
5051}
5052
5053static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
5054{
5055	struct drm_device *dev = crtc->dev;
5056	struct drm_i915_private *dev_priv = dev->dev_private;
5057	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5058	int pipe = intel_crtc->pipe;
5059	bool visible = base != 0;
5060
5061	if (intel_crtc->cursor_visible != visible) {
5062		uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
5063		if (base) {
5064			cntl &= ~CURSOR_MODE;
5065			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
5066		} else {
5067			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
5068			cntl |= CURSOR_MODE_DISABLE;
5069		}
5070		I915_WRITE(CURCNTR_IVB(pipe), cntl);
5071
5072		intel_crtc->cursor_visible = visible;
5073	}
5074	/* and commit changes on next vblank */
5075	I915_WRITE(CURBASE_IVB(pipe), base);
5076}
5077
5078/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
5079static void intel_crtc_update_cursor(struct drm_crtc *crtc,
5080				     bool on)
5081{
5082	struct drm_device *dev = crtc->dev;
5083	struct drm_i915_private *dev_priv = dev->dev_private;
5084	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5085	int pipe = intel_crtc->pipe;
5086	int x = intel_crtc->cursor_x;
5087	int y = intel_crtc->cursor_y;
5088	u32 base, pos;
5089	bool visible;
5090
5091	pos = 0;
5092
5093	if (on && crtc->enabled && crtc->fb) {
5094		base = intel_crtc->cursor_addr;
5095		if (x > (int) crtc->fb->width)
5096			base = 0;
5097
5098		if (y > (int) crtc->fb->height)
5099			base = 0;
5100	} else
5101		base = 0;
5102
5103	if (x < 0) {
5104		if (x + intel_crtc->cursor_width < 0)
5105			base = 0;
5106
5107		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
5108		x = -x;
5109	}
5110	pos |= x << CURSOR_X_SHIFT;
5111
5112	if (y < 0) {
5113		if (y + intel_crtc->cursor_height < 0)
5114			base = 0;
5115
5116		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
5117		y = -y;
5118	}
5119	pos |= y << CURSOR_Y_SHIFT;
5120
5121	visible = base != 0;
5122	if (!visible && !intel_crtc->cursor_visible)
5123		return;
5124
5125	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
5126		I915_WRITE(CURPOS_IVB(pipe), pos);
5127		ivb_update_cursor(crtc, base);
5128	} else {
5129		I915_WRITE(CURPOS(pipe), pos);
5130		if (IS_845G(dev) || IS_I865G(dev))
5131			i845_update_cursor(crtc, base);
5132		else
5133			i9xx_update_cursor(crtc, base);
5134	}
5135}
5136
5137static int intel_crtc_cursor_set(struct drm_crtc *crtc,
5138				 struct drm_file *file,
5139				 uint32_t handle,
5140				 uint32_t width, uint32_t height)
5141{
5142	struct drm_device *dev = crtc->dev;
5143	struct drm_i915_private *dev_priv = dev->dev_private;
5144	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5145	struct drm_i915_gem_object *obj;
5146	uint32_t addr;
5147	int ret;
5148
5149	DRM_DEBUG_KMS("\n");
5150
5151	/* if we want to turn off the cursor ignore width and height */
5152	if (!handle) {
5153		DRM_DEBUG_KMS("cursor off\n");
5154		addr = 0;
5155		obj = NULL;
5156		DRM_LOCK(dev);
5157		goto finish;
5158	}
5159
5160	/* Currently we only support 64x64 cursors */
5161	if (width != 64 || height != 64) {
5162		DRM_ERROR("we currently only support 64x64 cursors\n");
5163		return -EINVAL;
5164	}
5165
5166	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
5167	if (&obj->base == NULL)
5168		return -ENOENT;
5169
5170	if (obj->base.size < width * height * 4) {
5171		DRM_ERROR("buffer is to small\n");
5172		ret = -ENOMEM;
5173		goto fail;
5174	}
5175
5176	/* we only need to pin inside GTT if cursor is non-phy */
5177	DRM_LOCK(dev);
5178	if (!dev_priv->info->cursor_needs_physical) {
5179		if (obj->tiling_mode) {
5180			DRM_ERROR("cursor cannot be tiled\n");
5181			ret = -EINVAL;
5182			goto fail_locked;
5183		}
5184
5185		ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
5186		if (ret) {
5187			DRM_ERROR("failed to move cursor bo into the GTT\n");
5188			goto fail_locked;
5189		}
5190
5191		ret = i915_gem_object_put_fence(obj);
5192		if (ret) {
5193			DRM_ERROR("failed to release fence for cursor\n");
5194			goto fail_unpin;
5195		}
5196
5197		addr = obj->gtt_offset;
5198	} else {
5199		int align = IS_I830(dev) ? 16 * 1024 : 256;
5200		ret = i915_gem_attach_phys_object(dev, obj,
5201						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
5202						  align);
5203		if (ret) {
5204			DRM_ERROR("failed to attach phys object\n");
5205			goto fail_locked;
5206		}
5207		addr = obj->phys_obj->handle->busaddr;
5208	}
5209
5210	if (IS_GEN2(dev))
5211		I915_WRITE(CURSIZE, (height << 12) | width);
5212
5213 finish:
5214	if (intel_crtc->cursor_bo) {
5215		if (dev_priv->info->cursor_needs_physical) {
5216			if (intel_crtc->cursor_bo != obj)
5217				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
5218		} else
5219			i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
5220		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
5221	}
5222
5223	DRM_UNLOCK(dev);
5224
5225	intel_crtc->cursor_addr = addr;
5226	intel_crtc->cursor_bo = obj;
5227	intel_crtc->cursor_width = width;
5228	intel_crtc->cursor_height = height;
5229
5230	intel_crtc_update_cursor(crtc, true);
5231
5232	return 0;
5233fail_unpin:
5234	i915_gem_object_unpin_from_display_plane(obj);
5235fail_locked:
5236	DRM_UNLOCK(dev);
5237fail:
5238	drm_gem_object_unreference_unlocked(&obj->base);
5239	return ret;
5240}
5241
5242static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
5243{
5244	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5245
5246	intel_crtc->cursor_x = x;
5247	intel_crtc->cursor_y = y;
5248
5249	intel_crtc_update_cursor(crtc, true);
5250
5251	return 0;
5252}
5253
5254/** Sets the color ramps on behalf of RandR */
5255void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
5256				 u16 blue, int regno)
5257{
5258	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5259
5260	intel_crtc->lut_r[regno] = red >> 8;
5261	intel_crtc->lut_g[regno] = green >> 8;
5262	intel_crtc->lut_b[regno] = blue >> 8;
5263}
5264
5265void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
5266			     u16 *blue, int regno)
5267{
5268	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5269
5270	*red = intel_crtc->lut_r[regno] << 8;
5271	*green = intel_crtc->lut_g[regno] << 8;
5272	*blue = intel_crtc->lut_b[regno] << 8;
5273}
5274
5275static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
5276				 u16 *blue, uint32_t start, uint32_t size)
5277{
5278	int end = (start + size > 256) ? 256 : start + size, i;
5279	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5280
5281	for (i = start; i < end; i++) {
5282		intel_crtc->lut_r[i] = red[i] >> 8;
5283		intel_crtc->lut_g[i] = green[i] >> 8;
5284		intel_crtc->lut_b[i] = blue[i] >> 8;
5285	}
5286
5287	intel_crtc_load_lut(crtc);
5288}
5289
5290/**
5291 * Get a pipe with a simple mode set on it for doing load-based monitor
5292 * detection.
5293 *
5294 * It will be up to the load-detect code to adjust the pipe as appropriate for
5295 * its requirements.  The pipe will be connected to no other encoders.
5296 *
5297 * Currently this code will only succeed if there is a pipe with no encoders
5298 * configured for it.  In the future, it could choose to temporarily disable
5299 * some outputs to free up a pipe for its use.
5300 *
5301 * \return crtc, or NULL if no pipes are available.
5302 */
5303
5304/* VESA 640x480x72Hz mode to set on the pipe */
5305static struct drm_display_mode load_detect_mode = {
5306	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
5307		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
5308};
5309
5310static int
5311intel_framebuffer_create(struct drm_device *dev,
5312    struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj,
5313     struct drm_framebuffer **res)
5314{
5315	struct intel_framebuffer *intel_fb;
5316	int ret;
5317
5318	intel_fb = malloc(sizeof(*intel_fb), DRM_MEM_KMS, M_WAITOK | M_ZERO);
5319	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
5320	if (ret) {
5321		drm_gem_object_unreference_unlocked(&obj->base);
5322		free(intel_fb, DRM_MEM_KMS);
5323		return (ret);
5324	}
5325
5326	*res = &intel_fb->base;
5327	return (0);
5328}
5329
5330static u32
5331intel_framebuffer_pitch_for_width(int width, int bpp)
5332{
5333	u32 pitch = howmany(width * bpp, 8);
5334	return roundup2(pitch, 64);
5335}
5336
5337static u32
5338intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
5339{
5340	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
5341	return roundup2(pitch * mode->vdisplay, PAGE_SIZE);
5342}
5343
5344static int
5345intel_framebuffer_create_for_mode(struct drm_device *dev,
5346    struct drm_display_mode *mode, int depth, int bpp,
5347    struct drm_framebuffer **res)
5348{
5349	struct drm_i915_gem_object *obj;
5350	struct drm_mode_fb_cmd2 mode_cmd;
5351
5352	obj = i915_gem_alloc_object(dev,
5353				    intel_framebuffer_size_for_mode(mode, bpp));
5354	if (obj == NULL)
5355		return (-ENOMEM);
5356
5357	mode_cmd.width = mode->hdisplay;
5358	mode_cmd.height = mode->vdisplay;
5359	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
5360								bpp);
5361	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
5362
5363	return (intel_framebuffer_create(dev, &mode_cmd, obj, res));
5364}
5365
5366static int
5367mode_fits_in_fbdev(struct drm_device *dev,
5368    struct drm_display_mode *mode, struct drm_framebuffer **res)
5369{
5370	struct drm_i915_private *dev_priv = dev->dev_private;
5371	struct drm_i915_gem_object *obj;
5372	struct drm_framebuffer *fb;
5373
5374	if (dev_priv->fbdev == NULL) {
5375		*res = NULL;
5376		return (0);
5377	}
5378
5379	obj = dev_priv->fbdev->ifb.obj;
5380	if (obj == NULL) {
5381		*res = NULL;
5382		return (0);
5383	}
5384
5385	fb = &dev_priv->fbdev->ifb.base;
5386	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
5387	    fb->bits_per_pixel)) {
5388		*res = NULL;
5389		return (0);
5390	}
5391
5392	if (obj->base.size < mode->vdisplay * fb->pitches[0]) {
5393		*res = NULL;
5394		return (0);
5395	}
5396
5397	*res = fb;
5398	return (0);
5399}
5400
5401bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5402				struct drm_connector *connector,
5403				struct drm_display_mode *mode,
5404				struct intel_load_detect_pipe *old)
5405{
5406	struct intel_crtc *intel_crtc;
5407	struct drm_crtc *possible_crtc;
5408	struct drm_encoder *encoder = &intel_encoder->base;
5409	struct drm_crtc *crtc = NULL;
5410	struct drm_device *dev = encoder->dev;
5411	struct drm_framebuffer *old_fb;
5412	int i = -1, r;
5413
5414	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5415		      connector->base.id, drm_get_connector_name(connector),
5416		      encoder->base.id, drm_get_encoder_name(encoder));
5417
5418	/*
5419	 * Algorithm gets a little messy:
5420	 *
5421	 *   - if the connector already has an assigned crtc, use it (but make
5422	 *     sure it's on first)
5423	 *
5424	 *   - try to find the first unused crtc that can drive this connector,
5425	 *     and use that if we find one
5426	 */
5427
5428	/* See if we already have a CRTC for this connector */
5429	if (encoder->crtc) {
5430		crtc = encoder->crtc;
5431
5432		intel_crtc = to_intel_crtc(crtc);
5433		old->dpms_mode = intel_crtc->dpms_mode;
5434		old->load_detect_temp = false;
5435
5436		/* Make sure the crtc and connector are running */
5437		if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
5438			struct drm_encoder_helper_funcs *encoder_funcs;
5439			struct drm_crtc_helper_funcs *crtc_funcs;
5440
5441			crtc_funcs = crtc->helper_private;
5442			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
5443
5444			encoder_funcs = encoder->helper_private;
5445			encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
5446		}
5447
5448		return true;
5449	}
5450
5451	/* Find an unused one (if possible) */
5452	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
5453		i++;
5454		if (!(encoder->possible_crtcs & (1 << i)))
5455			continue;
5456		if (!possible_crtc->enabled) {
5457			crtc = possible_crtc;
5458			break;
5459		}
5460	}
5461
5462	/*
5463	 * If we didn't find an unused CRTC, don't use any.
5464	 */
5465	if (!crtc) {
5466		DRM_DEBUG_KMS("no pipe available for load-detect\n");
5467		return false;
5468	}
5469
5470	encoder->crtc = crtc;
5471	connector->encoder = encoder;
5472
5473	intel_crtc = to_intel_crtc(crtc);
5474	old->dpms_mode = intel_crtc->dpms_mode;
5475	old->load_detect_temp = true;
5476	old->release_fb = NULL;
5477
5478	if (!mode)
5479		mode = &load_detect_mode;
5480
5481	old_fb = crtc->fb;
5482
5483	/* We need a framebuffer large enough to accommodate all accesses
5484	 * that the plane may generate whilst we perform load detection.
5485	 * We can not rely on the fbcon either being present (we get called
5486	 * during its initialisation to detect all boot displays, or it may
5487	 * not even exist) or that it is large enough to satisfy the
5488	 * requested mode.
5489	 */
5490	r = mode_fits_in_fbdev(dev, mode, &crtc->fb);
5491	if (crtc->fb == NULL) {
5492		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
5493		r = intel_framebuffer_create_for_mode(dev, mode, 24, 32,
5494		    &crtc->fb);
5495		old->release_fb = crtc->fb;
5496	} else
5497		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
5498	if (r != 0) {
5499		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5500		crtc->fb = old_fb;
5501		return false;
5502	}
5503
5504	if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
5505		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
5506		if (old->release_fb)
5507			old->release_fb->funcs->destroy(old->release_fb);
5508		crtc->fb = old_fb;
5509		return false;
5510	}
5511
5512	/* let the connector get through one full cycle before testing */
5513	intel_wait_for_vblank(dev, intel_crtc->pipe);
5514
5515	return true;
5516}
5517
5518void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
5519				    struct drm_connector *connector,
5520				    struct intel_load_detect_pipe *old)
5521{
5522	struct drm_encoder *encoder = &intel_encoder->base;
5523	struct drm_device *dev = encoder->dev;
5524	struct drm_crtc *crtc = encoder->crtc;
5525	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
5526	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
5527
5528	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5529		      connector->base.id, drm_get_connector_name(connector),
5530		      encoder->base.id, drm_get_encoder_name(encoder));
5531
5532	if (old->load_detect_temp) {
5533		connector->encoder = NULL;
5534		drm_helper_disable_unused_functions(dev);
5535
5536		if (old->release_fb)
5537			old->release_fb->funcs->destroy(old->release_fb);
5538
5539		return;
5540	}
5541
5542	/* Switch crtc and encoder back off if necessary */
5543	if (old->dpms_mode != DRM_MODE_DPMS_ON) {
5544		encoder_funcs->dpms(encoder, old->dpms_mode);
5545		crtc_funcs->dpms(crtc, old->dpms_mode);
5546	}
5547}
5548
5549/* Returns the clock of the currently programmed mode of the given pipe. */
5550static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
5551{
5552	struct drm_i915_private *dev_priv = dev->dev_private;
5553	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5554	int pipe = intel_crtc->pipe;
5555	u32 dpll = I915_READ(DPLL(pipe));
5556	u32 fp;
5557	intel_clock_t clock;
5558
5559	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
5560		fp = I915_READ(FP0(pipe));
5561	else
5562		fp = I915_READ(FP1(pipe));
5563
5564	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
5565	if (IS_PINEVIEW(dev)) {
5566		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
5567		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
5568	} else {
5569		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
5570		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
5571	}
5572
5573	if (!IS_GEN2(dev)) {
5574		if (IS_PINEVIEW(dev))
5575			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
5576				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
5577		else
5578			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
5579			       DPLL_FPA01_P1_POST_DIV_SHIFT);
5580
5581		switch (dpll & DPLL_MODE_MASK) {
5582		case DPLLB_MODE_DAC_SERIAL:
5583			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
5584				5 : 10;
5585			break;
5586		case DPLLB_MODE_LVDS:
5587			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
5588				7 : 14;
5589			break;
5590		default:
5591			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
5592				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
5593			return 0;
5594		}
5595
5596		/* XXX: Handle the 100Mhz refclk */
5597		intel_clock(dev, 96000, &clock);
5598	} else {
5599		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
5600
5601		if (is_lvds) {
5602			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
5603				       DPLL_FPA01_P1_POST_DIV_SHIFT);
5604			clock.p2 = 14;
5605
5606			if ((dpll & PLL_REF_INPUT_MASK) ==
5607			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5608				/* XXX: might not be 66MHz */
5609				intel_clock(dev, 66000, &clock);
5610			} else
5611				intel_clock(dev, 48000, &clock);
5612		} else {
5613			if (dpll & PLL_P1_DIVIDE_BY_TWO)
5614				clock.p1 = 2;
5615			else {
5616				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
5617					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
5618			}
5619			if (dpll & PLL_P2_DIVIDE_BY_4)
5620				clock.p2 = 4;
5621			else
5622				clock.p2 = 2;
5623
5624			intel_clock(dev, 48000, &clock);
5625		}
5626	}
5627
5628	/* XXX: It would be nice to validate the clocks, but we can't reuse
5629	 * i830PllIsValid() because it relies on the xf86_config connector
5630	 * configuration being accurate, which it isn't necessarily.
5631	 */
5632
5633	return clock.dot;
5634}
5635
5636/** Returns the currently programmed mode of the given pipe. */
5637struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
5638					     struct drm_crtc *crtc)
5639{
5640	struct drm_i915_private *dev_priv = dev->dev_private;
5641	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5642	int pipe = intel_crtc->pipe;
5643	struct drm_display_mode *mode;
5644	int htot = I915_READ(HTOTAL(pipe));
5645	int hsync = I915_READ(HSYNC(pipe));
5646	int vtot = I915_READ(VTOTAL(pipe));
5647	int vsync = I915_READ(VSYNC(pipe));
5648
5649	mode = malloc(sizeof(*mode), DRM_MEM_KMS, M_WAITOK | M_ZERO);
5650
5651	mode->clock = intel_crtc_clock_get(dev, crtc);
5652	mode->hdisplay = (htot & 0xffff) + 1;
5653	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
5654	mode->hsync_start = (hsync & 0xffff) + 1;
5655	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
5656	mode->vdisplay = (vtot & 0xffff) + 1;
5657	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
5658	mode->vsync_start = (vsync & 0xffff) + 1;
5659	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
5660
5661	drm_mode_set_name(mode);
5662
5663	return mode;
5664}
5665
5666#define GPU_IDLE_TIMEOUT (500 /* ms */ * 1000 / hz)
5667
5668/* When this timer fires, we've been idle for awhile */
5669static void intel_gpu_idle_timer(void *arg)
5670{
5671	struct drm_device *dev = arg;
5672	drm_i915_private_t *dev_priv = dev->dev_private;
5673
5674	if (!list_empty(&dev_priv->mm.active_list)) {
5675		/* Still processing requests, so just re-arm the timer. */
5676		callout_schedule(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT);
5677		return;
5678	}
5679
5680	dev_priv->busy = false;
5681	taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task);
5682}
5683
5684#define CRTC_IDLE_TIMEOUT (1000 /* ms */ * 1000 / hz)
5685
5686static void intel_crtc_idle_timer(void *arg)
5687{
5688	struct intel_crtc *intel_crtc = arg;
5689	struct drm_crtc *crtc = &intel_crtc->base;
5690	drm_i915_private_t *dev_priv = crtc->dev->dev_private;
5691	struct intel_framebuffer *intel_fb;
5692
5693	intel_fb = to_intel_framebuffer(crtc->fb);
5694	if (intel_fb && intel_fb->obj->active) {
5695		/* The framebuffer is still being accessed by the GPU. */
5696		callout_schedule(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT);
5697		return;
5698	}
5699
5700	intel_crtc->busy = false;
5701	taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task);
5702}
5703
5704static void intel_increase_pllclock(struct drm_crtc *crtc)
5705{
5706	struct drm_device *dev = crtc->dev;
5707	drm_i915_private_t *dev_priv = dev->dev_private;
5708	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5709	int pipe = intel_crtc->pipe;
5710	int dpll_reg = DPLL(pipe);
5711	int dpll;
5712
5713	if (HAS_PCH_SPLIT(dev))
5714		return;
5715
5716	if (!dev_priv->lvds_downclock_avail)
5717		return;
5718
5719	dpll = I915_READ(dpll_reg);
5720	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
5721		DRM_DEBUG_DRIVER("upclocking LVDS\n");
5722
5723		assert_panel_unlocked(dev_priv, pipe);
5724
5725		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
5726		I915_WRITE(dpll_reg, dpll);
5727		intel_wait_for_vblank(dev, pipe);
5728
5729		dpll = I915_READ(dpll_reg);
5730		if (dpll & DISPLAY_RATE_SELECT_FPA1)
5731			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
5732	}
5733
5734	/* Schedule downclock */
5735	callout_reset(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT,
5736	    intel_crtc_idle_timer, intel_crtc);
5737}
5738
5739static void intel_decrease_pllclock(struct drm_crtc *crtc)
5740{
5741	struct drm_device *dev = crtc->dev;
5742	drm_i915_private_t *dev_priv = dev->dev_private;
5743	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5744
5745	if (HAS_PCH_SPLIT(dev))
5746		return;
5747
5748	if (!dev_priv->lvds_downclock_avail)
5749		return;
5750
5751	/*
5752	 * Since this is called by a timer, we should never get here in
5753	 * the manual case.
5754	 */
5755	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
5756		int pipe = intel_crtc->pipe;
5757		int dpll_reg = DPLL(pipe);
5758		u32 dpll;
5759
5760		DRM_DEBUG_DRIVER("downclocking LVDS\n");
5761
5762		assert_panel_unlocked(dev_priv, pipe);
5763
5764		dpll = I915_READ(dpll_reg);
5765		dpll |= DISPLAY_RATE_SELECT_FPA1;
5766		I915_WRITE(dpll_reg, dpll);
5767		intel_wait_for_vblank(dev, pipe);
5768		dpll = I915_READ(dpll_reg);
5769		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
5770			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
5771	}
5772}
5773
5774/**
5775 * intel_idle_update - adjust clocks for idleness
5776 * @work: work struct
5777 *
5778 * Either the GPU or display (or both) went idle.  Check the busy status
5779 * here and adjust the CRTC and GPU clocks as necessary.
5780 */
5781static void intel_idle_update(void *arg, int pending)
5782{
5783	drm_i915_private_t *dev_priv = arg;
5784	struct drm_device *dev = dev_priv->dev;
5785	struct drm_crtc *crtc;
5786	struct intel_crtc *intel_crtc;
5787
5788	if (!i915_powersave)
5789		return;
5790
5791	DRM_LOCK(dev);
5792
5793	i915_update_gfx_val(dev_priv);
5794
5795	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5796		/* Skip inactive CRTCs */
5797		if (!crtc->fb)
5798			continue;
5799
5800		intel_crtc = to_intel_crtc(crtc);
5801		if (!intel_crtc->busy)
5802			intel_decrease_pllclock(crtc);
5803	}
5804
5805	DRM_UNLOCK(dev);
5806}
5807
5808/**
5809 * intel_mark_busy - mark the GPU and possibly the display busy
5810 * @dev: drm device
5811 * @obj: object we're operating on
5812 *
5813 * Callers can use this function to indicate that the GPU is busy processing
5814 * commands.  If @obj matches one of the CRTC objects (i.e. it's a scanout
5815 * buffer), we'll also mark the display as busy, so we know to increase its
5816 * clock frequency.
5817 */
5818void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
5819{
5820	drm_i915_private_t *dev_priv = dev->dev_private;
5821	struct drm_crtc *crtc = NULL;
5822	struct intel_framebuffer *intel_fb;
5823	struct intel_crtc *intel_crtc;
5824
5825	if (!drm_core_check_feature(dev, DRIVER_MODESET))
5826		return;
5827
5828	if (!dev_priv->busy) {
5829		intel_sanitize_pm(dev);
5830		dev_priv->busy = true;
5831	} else
5832		callout_reset(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT,
5833		    intel_gpu_idle_timer, dev);
5834
5835	if (obj == NULL)
5836		return;
5837
5838	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5839		if (!crtc->fb)
5840			continue;
5841
5842		intel_crtc = to_intel_crtc(crtc);
5843		intel_fb = to_intel_framebuffer(crtc->fb);
5844		if (intel_fb->obj == obj) {
5845			if (!intel_crtc->busy) {
5846				/* Non-busy -> busy, upclock */
5847				intel_increase_pllclock(crtc);
5848				intel_crtc->busy = true;
5849			} else {
5850				/* Busy -> busy, put off timer */
5851				callout_reset(&intel_crtc->idle_callout,
5852				    CRTC_IDLE_TIMEOUT, intel_crtc_idle_timer,
5853				    intel_crtc);
5854			}
5855		}
5856	}
5857}
5858
5859static void intel_crtc_destroy(struct drm_crtc *crtc)
5860{
5861	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5862	struct drm_device *dev = crtc->dev;
5863	struct drm_i915_private *dev_priv = dev->dev_private;
5864	struct intel_unpin_work *work;
5865
5866	mtx_lock(&dev->event_lock);
5867	work = intel_crtc->unpin_work;
5868	intel_crtc->unpin_work = NULL;
5869	mtx_unlock(&dev->event_lock);
5870
5871	if (work) {
5872		taskqueue_cancel(dev_priv->tq, &work->task, NULL);
5873		taskqueue_drain(dev_priv->tq, &work->task);
5874		free(work, DRM_MEM_KMS);
5875	}
5876
5877	drm_crtc_cleanup(crtc);
5878
5879	free(intel_crtc, DRM_MEM_KMS);
5880}
5881
5882static void intel_unpin_work_fn(void *arg, int pending)
5883{
5884	struct intel_unpin_work *work = arg;
5885	struct drm_device *dev;
5886
5887	dev = work->dev;
5888	DRM_LOCK(dev);
5889	intel_unpin_fb_obj(work->old_fb_obj);
5890	drm_gem_object_unreference(&work->pending_flip_obj->base);
5891	drm_gem_object_unreference(&work->old_fb_obj->base);
5892
5893	intel_update_fbc(work->dev);
5894	DRM_UNLOCK(dev);
5895	free(work, DRM_MEM_KMS);
5896}
5897
5898static void do_intel_finish_page_flip(struct drm_device *dev,
5899				      struct drm_crtc *crtc)
5900{
5901	drm_i915_private_t *dev_priv = dev->dev_private;
5902	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5903	struct intel_unpin_work *work;
5904	struct drm_i915_gem_object *obj;
5905	struct drm_pending_vblank_event *e;
5906	struct timeval tnow, tvbl;
5907
5908	/* Ignore early vblank irqs */
5909	if (intel_crtc == NULL)
5910		return;
5911
5912	microtime(&tnow);
5913
5914	mtx_lock(&dev->event_lock);
5915	work = intel_crtc->unpin_work;
5916	if (work == NULL || !work->pending) {
5917		mtx_unlock(&dev->event_lock);
5918		return;
5919	}
5920
5921	intel_crtc->unpin_work = NULL;
5922
5923	if (work->event) {
5924		e = work->event;
5925		e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
5926
5927		/* Called before vblank count and timestamps have
5928		 * been updated for the vblank interval of flip
5929		 * completion? Need to increment vblank count and
5930		 * add one videorefresh duration to returned timestamp
5931		 * to account for this. We assume this happened if we
5932		 * get called over 0.9 frame durations after the last
5933		 * timestamped vblank.
5934		 *
5935		 * This calculation can not be used with vrefresh rates
5936		 * below 5Hz (10Hz to be on the safe side) without
5937		 * promoting to 64 integers.
5938		 */
5939		if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
5940		    9 * crtc->framedur_ns) {
5941			e->event.sequence++;
5942			tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
5943					     crtc->framedur_ns);
5944		}
5945
5946		e->event.tv_sec = tvbl.tv_sec;
5947		e->event.tv_usec = tvbl.tv_usec;
5948
5949		list_add_tail(&e->base.link,
5950			      &e->base.file_priv->event_list);
5951		drm_event_wakeup(&e->base);
5952	}
5953
5954	drm_vblank_put(dev, intel_crtc->pipe);
5955
5956	obj = work->old_fb_obj;
5957
5958	atomic_clear_int(&obj->pending_flip, 1 << intel_crtc->plane);
5959	if (atomic_load_acq_int(&obj->pending_flip) == 0)
5960		wakeup(&obj->pending_flip);
5961	mtx_unlock(&dev->event_lock);
5962
5963	taskqueue_enqueue(dev_priv->tq, &work->task);
5964
5965	CTR2(KTR_DRM, "i915_flip_complete %d %p", intel_crtc->plane,
5966	    work->pending_flip_obj);
5967}
5968
5969void intel_finish_page_flip(struct drm_device *dev, int pipe)
5970{
5971	drm_i915_private_t *dev_priv = dev->dev_private;
5972	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
5973
5974	do_intel_finish_page_flip(dev, crtc);
5975}
5976
5977void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
5978{
5979	drm_i915_private_t *dev_priv = dev->dev_private;
5980	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
5981
5982	do_intel_finish_page_flip(dev, crtc);
5983}
5984
5985void intel_prepare_page_flip(struct drm_device *dev, int plane)
5986{
5987	drm_i915_private_t *dev_priv = dev->dev_private;
5988	struct intel_crtc *intel_crtc =
5989		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
5990
5991	mtx_lock(&dev->event_lock);
5992	if (intel_crtc->unpin_work) {
5993		if ((++intel_crtc->unpin_work->pending) > 1)
5994			DRM_ERROR("Prepared flip multiple times\n");
5995	} else {
5996		DRM_DEBUG("preparing flip with no unpin work?\n");
5997	}
5998	mtx_unlock(&dev->event_lock);
5999}
6000
6001static int intel_gen2_queue_flip(struct drm_device *dev,
6002				 struct drm_crtc *crtc,
6003				 struct drm_framebuffer *fb,
6004				 struct drm_i915_gem_object *obj)
6005{
6006	struct drm_i915_private *dev_priv = dev->dev_private;
6007	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6008	unsigned long offset;
6009	u32 flip_mask;
6010	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
6011	int ret;
6012
6013	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6014	if (ret)
6015		goto err;
6016
6017	/* Offset into the new buffer for cases of shared fbs between CRTCs */
6018	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
6019
6020	ret = intel_ring_begin(ring, 6);
6021	if (ret)
6022		goto err_unpin;
6023
6024	/* Can't queue multiple flips, so wait for the previous
6025	 * one to finish before executing the next.
6026	 */
6027	if (intel_crtc->plane)
6028		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6029	else
6030		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6031	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
6032	intel_ring_emit(ring, MI_NOOP);
6033	intel_ring_emit(ring, MI_DISPLAY_FLIP |
6034			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6035	intel_ring_emit(ring, fb->pitches[0]);
6036	intel_ring_emit(ring, obj->gtt_offset + offset);
6037	intel_ring_emit(ring, 0); /* aux display base address, unused */
6038	intel_ring_advance(ring);
6039	return 0;
6040
6041err_unpin:
6042	intel_unpin_fb_obj(obj);
6043err:
6044	return ret;
6045}
6046
6047static int intel_gen3_queue_flip(struct drm_device *dev,
6048				 struct drm_crtc *crtc,
6049				 struct drm_framebuffer *fb,
6050				 struct drm_i915_gem_object *obj)
6051{
6052	struct drm_i915_private *dev_priv = dev->dev_private;
6053	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6054	unsigned long offset;
6055	u32 flip_mask;
6056	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
6057	int ret;
6058
6059	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6060	if (ret)
6061		goto err;
6062
6063	/* Offset into the new buffer for cases of shared fbs between CRTCs */
6064	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
6065
6066	ret = intel_ring_begin(ring, 6);
6067	if (ret)
6068		goto err_unpin;
6069
6070	if (intel_crtc->plane)
6071		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6072	else
6073		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6074	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
6075	intel_ring_emit(ring, MI_NOOP);
6076	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
6077			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6078	intel_ring_emit(ring, fb->pitches[0]);
6079	intel_ring_emit(ring, obj->gtt_offset + offset);
6080	intel_ring_emit(ring, MI_NOOP);
6081
6082	intel_ring_advance(ring);
6083	return 0;
6084
6085err_unpin:
6086	intel_unpin_fb_obj(obj);
6087err:
6088	return ret;
6089}
6090
6091static int intel_gen4_queue_flip(struct drm_device *dev,
6092				 struct drm_crtc *crtc,
6093				 struct drm_framebuffer *fb,
6094				 struct drm_i915_gem_object *obj)
6095{
6096	struct drm_i915_private *dev_priv = dev->dev_private;
6097	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6098	uint32_t pf, pipesrc;
6099	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
6100	int ret;
6101
6102	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6103	if (ret)
6104		goto err;
6105
6106	ret = intel_ring_begin(ring, 4);
6107	if (ret)
6108		goto err_unpin;
6109
6110	/* i965+ uses the linear or tiled offsets from the
6111	 * Display Registers (which do not change across a page-flip)
6112	 * so we need only reprogram the base address.
6113	 */
6114	intel_ring_emit(ring, MI_DISPLAY_FLIP |
6115			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6116	intel_ring_emit(ring, fb->pitches[0]);
6117	intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);
6118
6119	/* XXX Enabling the panel-fitter across page-flip is so far
6120	 * untested on non-native modes, so ignore it for now.
6121	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6122	 */
6123	pf = 0;
6124	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6125	intel_ring_emit(ring, pf | pipesrc);
6126	intel_ring_advance(ring);
6127	return 0;
6128
6129err_unpin:
6130	intel_unpin_fb_obj(obj);
6131err:
6132	return ret;
6133}
6134
6135static int intel_gen6_queue_flip(struct drm_device *dev,
6136				 struct drm_crtc *crtc,
6137				 struct drm_framebuffer *fb,
6138				 struct drm_i915_gem_object *obj)
6139{
6140	struct drm_i915_private *dev_priv = dev->dev_private;
6141	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6142	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
6143	uint32_t pf, pipesrc;
6144	int ret;
6145
6146	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6147	if (ret)
6148		goto err;
6149
6150	ret = intel_ring_begin(ring, 4);
6151	if (ret)
6152		goto err_unpin;
6153
6154	intel_ring_emit(ring, MI_DISPLAY_FLIP |
6155			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6156	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
6157	intel_ring_emit(ring, obj->gtt_offset);
6158
6159	/* Contrary to the suggestions in the documentation,
6160	 * "Enable Panel Fitter" does not seem to be required when page
6161	 * flipping with a non-native mode, and worse causes a normal
6162	 * modeset to fail.
6163	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
6164	 */
6165	pf = 0;
6166	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6167	intel_ring_emit(ring, pf | pipesrc);
6168	intel_ring_advance(ring);
6169	return 0;
6170
6171err_unpin:
6172	intel_unpin_fb_obj(obj);
6173err:
6174	return ret;
6175}
6176
6177/*
6178 * On gen7 we currently use the blit ring because (in early silicon at least)
6179 * the render ring doesn't give us interrpts for page flip completion, which
6180 * means clients will hang after the first flip is queued.  Fortunately the
6181 * blit ring generates interrupts properly, so use it instead.
6182 */
6183static int intel_gen7_queue_flip(struct drm_device *dev,
6184				 struct drm_crtc *crtc,
6185				 struct drm_framebuffer *fb,
6186				 struct drm_i915_gem_object *obj)
6187{
6188	struct drm_i915_private *dev_priv = dev->dev_private;
6189	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6190	struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
6191	int ret;
6192
6193	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6194	if (ret)
6195		goto err;
6196
6197	ret = intel_ring_begin(ring, 4);
6198	if (ret)
6199		goto err_unpin;
6200
6201	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
6202	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
6203	intel_ring_emit(ring, (obj->gtt_offset));
6204	intel_ring_emit(ring, (MI_NOOP));
6205	intel_ring_advance(ring);
6206	return 0;
6207
6208err_unpin:
6209	intel_unpin_fb_obj(obj);
6210err:
6211	return ret;
6212}
6213
6214static int intel_default_queue_flip(struct drm_device *dev,
6215				    struct drm_crtc *crtc,
6216				    struct drm_framebuffer *fb,
6217				    struct drm_i915_gem_object *obj)
6218{
6219	return -ENODEV;
6220}
6221
6222static int intel_crtc_page_flip(struct drm_crtc *crtc,
6223				struct drm_framebuffer *fb,
6224				struct drm_pending_vblank_event *event)
6225{
6226	struct drm_device *dev = crtc->dev;
6227	struct drm_i915_private *dev_priv = dev->dev_private;
6228	struct intel_framebuffer *intel_fb;
6229	struct drm_i915_gem_object *obj;
6230	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6231	struct intel_unpin_work *work;
6232	int ret;
6233
6234	work = malloc(sizeof *work, DRM_MEM_KMS, M_WAITOK | M_ZERO);
6235
6236	work->event = event;
6237	work->dev = crtc->dev;
6238	intel_fb = to_intel_framebuffer(crtc->fb);
6239	work->old_fb_obj = intel_fb->obj;
6240	TASK_INIT(&work->task, 0, intel_unpin_work_fn, work);
6241
6242	ret = drm_vblank_get(dev, intel_crtc->pipe);
6243	if (ret)
6244		goto free_work;
6245
6246	/* We borrow the event spin lock for protecting unpin_work */
6247	mtx_lock(&dev->event_lock);
6248	if (intel_crtc->unpin_work) {
6249		mtx_unlock(&dev->event_lock);
6250		free(work, DRM_MEM_KMS);
6251		drm_vblank_put(dev, intel_crtc->pipe);
6252
6253		DRM_DEBUG("flip queue: crtc already busy\n");
6254		return -EBUSY;
6255	}
6256	intel_crtc->unpin_work = work;
6257	mtx_unlock(&dev->event_lock);
6258
6259	intel_fb = to_intel_framebuffer(fb);
6260	obj = intel_fb->obj;
6261
6262	DRM_LOCK(dev);
6263
6264	/* Reference the objects for the scheduled work. */
6265	drm_gem_object_reference(&work->old_fb_obj->base);
6266	drm_gem_object_reference(&obj->base);
6267
6268	crtc->fb = fb;
6269
6270	work->pending_flip_obj = obj;
6271
6272	work->enable_stall_check = true;
6273
6274	/* Block clients from rendering to the new back buffer until
6275	 * the flip occurs and the object is no longer visible.
6276	 */
6277	atomic_set_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane);
6278
6279	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
6280	if (ret)
6281		goto cleanup_pending;
6282	intel_disable_fbc(dev);
6283	intel_mark_busy(dev, obj);
6284	DRM_UNLOCK(dev);
6285
6286	CTR2(KTR_DRM, "i915_flip_request %d %p", intel_crtc->plane, obj);
6287
6288	return 0;
6289
6290cleanup_pending:
6291	atomic_clear_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane);
6292	drm_gem_object_unreference(&work->old_fb_obj->base);
6293	drm_gem_object_unreference(&obj->base);
6294	DRM_UNLOCK(dev);
6295
6296	mtx_lock(&dev->event_lock);
6297	intel_crtc->unpin_work = NULL;
6298	mtx_unlock(&dev->event_lock);
6299
6300	drm_vblank_put(dev, intel_crtc->pipe);
6301free_work:
6302	free(work, DRM_MEM_KMS);
6303
6304	return ret;
6305}
6306
6307static void intel_sanitize_modesetting(struct drm_device *dev,
6308				       int pipe, int plane)
6309{
6310	struct drm_i915_private *dev_priv = dev->dev_private;
6311	u32 reg, val;
6312	int i;
6313
6314	/* Clear any frame start delays used for debugging left by the BIOS */
6315	for_each_pipe(i) {
6316		reg = PIPECONF(i);
6317		I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
6318	}
6319
6320	if (HAS_PCH_SPLIT(dev))
6321		return;
6322
6323	/* Who knows what state these registers were left in by the BIOS or
6324	 * grub?
6325	 *
6326	 * If we leave the registers in a conflicting state (e.g. with the
6327	 * display plane reading from the other pipe than the one we intend
6328	 * to use) then when we attempt to teardown the active mode, we will
6329	 * not disable the pipes and planes in the correct order -- leaving
6330	 * a plane reading from a disabled pipe and possibly leading to
6331	 * undefined behaviour.
6332	 */
6333
6334	reg = DSPCNTR(plane);
6335	val = I915_READ(reg);
6336
6337	if ((val & DISPLAY_PLANE_ENABLE) == 0)
6338		return;
6339	if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
6340		return;
6341
6342	/* This display plane is active and attached to the other CPU pipe. */
6343	pipe = !pipe;
6344
6345	/* Disable the plane and wait for it to stop reading from the pipe. */
6346	intel_disable_plane(dev_priv, plane, pipe);
6347	intel_disable_pipe(dev_priv, pipe);
6348}
6349
6350static void intel_crtc_reset(struct drm_crtc *crtc)
6351{
6352	struct drm_device *dev = crtc->dev;
6353	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6354
6355	/* Reset flags back to the 'unknown' status so that they
6356	 * will be correctly set on the initial modeset.
6357	 */
6358	intel_crtc->dpms_mode = -1;
6359
6360	/* We need to fix up any BIOS configuration that conflicts with
6361	 * our expectations.
6362	 */
6363	intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
6364}
6365
6366static struct drm_crtc_helper_funcs intel_helper_funcs = {
6367	.dpms = intel_crtc_dpms,
6368	.mode_fixup = intel_crtc_mode_fixup,
6369	.mode_set = intel_crtc_mode_set,
6370	.mode_set_base = intel_pipe_set_base,
6371	.mode_set_base_atomic = intel_pipe_set_base_atomic,
6372	.load_lut = intel_crtc_load_lut,
6373	.disable = intel_crtc_disable,
6374};
6375
6376static const struct drm_crtc_funcs intel_crtc_funcs = {
6377	.reset = intel_crtc_reset,
6378	.cursor_set = intel_crtc_cursor_set,
6379	.cursor_move = intel_crtc_cursor_move,
6380	.gamma_set = intel_crtc_gamma_set,
6381	.set_config = drm_crtc_helper_set_config,
6382	.destroy = intel_crtc_destroy,
6383	.page_flip = intel_crtc_page_flip,
6384};
6385
6386static void intel_pch_pll_init(struct drm_device *dev)
6387{
6388	drm_i915_private_t *dev_priv = dev->dev_private;
6389	int i;
6390
6391	if (dev_priv->num_pch_pll == 0) {
6392		DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
6393		return;
6394	}
6395
6396	for (i = 0; i < dev_priv->num_pch_pll; i++) {
6397		dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
6398		dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
6399		dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
6400	}
6401}
6402
6403static void intel_crtc_init(struct drm_device *dev, int pipe)
6404{
6405	drm_i915_private_t *dev_priv = dev->dev_private;
6406	struct intel_crtc *intel_crtc;
6407	int i;
6408
6409	intel_crtc = malloc(sizeof(struct intel_crtc) +
6410	    (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
6411	    DRM_MEM_KMS, M_WAITOK | M_ZERO);
6412
6413	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
6414
6415	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
6416	for (i = 0; i < 256; i++) {
6417		intel_crtc->lut_r[i] = i;
6418		intel_crtc->lut_g[i] = i;
6419		intel_crtc->lut_b[i] = i;
6420	}
6421
6422	/* Swap pipes & planes for FBC on pre-965 */
6423	intel_crtc->pipe = pipe;
6424	intel_crtc->plane = pipe;
6425	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
6426		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
6427		intel_crtc->plane = !pipe;
6428	}
6429
6430	KASSERT(pipe < DRM_ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) &&
6431	    dev_priv->plane_to_crtc_mapping[intel_crtc->plane] == NULL,
6432	    ("plane_to_crtc is already initialized"));
6433	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
6434	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
6435
6436	intel_crtc_reset(&intel_crtc->base);
6437	intel_crtc->active = true; /* force the pipe off on setup_init_config */
6438	intel_crtc->bpp = 24; /* default for pre-Ironlake */
6439
6440	if (HAS_PCH_SPLIT(dev)) {
6441		intel_helper_funcs.prepare = ironlake_crtc_prepare;
6442		intel_helper_funcs.commit = ironlake_crtc_commit;
6443	} else {
6444		intel_helper_funcs.prepare = i9xx_crtc_prepare;
6445		intel_helper_funcs.commit = i9xx_crtc_commit;
6446	}
6447
6448	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
6449
6450	intel_crtc->busy = false;
6451
6452	callout_init(&intel_crtc->idle_callout, CALLOUT_MPSAFE);
6453}
6454
6455int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
6456				struct drm_file *file)
6457{
6458	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
6459	struct drm_mode_object *drmmode_obj;
6460	struct intel_crtc *crtc;
6461
6462	if (!drm_core_check_feature(dev, DRIVER_MODESET))
6463		return -ENODEV;
6464
6465	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
6466			DRM_MODE_OBJECT_CRTC);
6467
6468	if (!drmmode_obj) {
6469		DRM_ERROR("no such CRTC id\n");
6470		return -EINVAL;
6471	}
6472
6473	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
6474	pipe_from_crtc_id->pipe = crtc->pipe;
6475
6476	return 0;
6477}
6478
6479static int intel_encoder_clones(struct drm_device *dev, int type_mask)
6480{
6481	struct intel_encoder *encoder;
6482	int index_mask = 0;
6483	int entry = 0;
6484
6485	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6486		if (type_mask & encoder->clone_mask)
6487			index_mask |= (1 << entry);
6488		entry++;
6489	}
6490
6491	return index_mask;
6492}
6493
6494static bool has_edp_a(struct drm_device *dev)
6495{
6496	struct drm_i915_private *dev_priv = dev->dev_private;
6497
6498	if (!IS_MOBILE(dev))
6499		return false;
6500
6501	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
6502		return false;
6503
6504	if (IS_GEN5(dev) &&
6505	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
6506		return false;
6507
6508	return true;
6509}
6510
6511static void intel_setup_outputs(struct drm_device *dev)
6512{
6513	struct drm_i915_private *dev_priv = dev->dev_private;
6514	struct intel_encoder *encoder;
6515	bool dpd_is_edp = false;
6516	bool has_lvds;
6517
6518	has_lvds = intel_lvds_init(dev);
6519	if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
6520		/* disable the panel fitter on everything but LVDS */
6521		I915_WRITE(PFIT_CONTROL, 0);
6522	}
6523
6524	if (HAS_PCH_SPLIT(dev)) {
6525		dpd_is_edp = intel_dpd_is_edp(dev);
6526
6527		if (has_edp_a(dev))
6528			intel_dp_init(dev, DP_A);
6529
6530		if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6531			intel_dp_init(dev, PCH_DP_D);
6532	}
6533
6534	intel_crt_init(dev);
6535
6536	if (IS_HASWELL(dev)) {
6537		int found;
6538
6539		/* Haswell uses DDI functions to detect digital outputs */
6540		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
6541		/* DDI A only supports eDP */
6542		if (found)
6543			intel_ddi_init(dev, PORT_A);
6544
6545		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
6546		 * register */
6547		found = I915_READ(SFUSE_STRAP);
6548
6549		if (found & SFUSE_STRAP_DDIB_DETECTED)
6550			intel_ddi_init(dev, PORT_B);
6551		if (found & SFUSE_STRAP_DDIC_DETECTED)
6552			intel_ddi_init(dev, PORT_C);
6553		if (found & SFUSE_STRAP_DDID_DETECTED)
6554			intel_ddi_init(dev, PORT_D);
6555	} else if (HAS_PCH_SPLIT(dev)) {
6556		int found;
6557
6558		DRM_DEBUG_KMS(
6559"HDMIB %d PCH_DP_B %d HDMIC %d HDMID %d PCH_DP_C %d PCH_DP_D %d LVDS %d\n",
6560		    (I915_READ(HDMIB) & PORT_DETECTED) != 0,
6561		    (I915_READ(PCH_DP_B) & DP_DETECTED) != 0,
6562		    (I915_READ(HDMIC) & PORT_DETECTED) != 0,
6563		    (I915_READ(HDMID) & PORT_DETECTED) != 0,
6564		    (I915_READ(PCH_DP_C) & DP_DETECTED) != 0,
6565		    (I915_READ(PCH_DP_D) & DP_DETECTED) != 0,
6566		    (I915_READ(PCH_LVDS) & LVDS_DETECTED) != 0);
6567
6568		if (I915_READ(HDMIB) & PORT_DETECTED) {
6569			/* PCH SDVOB multiplex with HDMIB */
6570			found = intel_sdvo_init(dev, PCH_SDVOB, true);
6571			if (!found)
6572				intel_hdmi_init(dev, HDMIB);
6573			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
6574				intel_dp_init(dev, PCH_DP_B);
6575		}
6576
6577		if (I915_READ(HDMIC) & PORT_DETECTED)
6578			intel_hdmi_init(dev, HDMIC);
6579
6580		if (I915_READ(HDMID) & PORT_DETECTED)
6581			intel_hdmi_init(dev, HDMID);
6582
6583		if (I915_READ(PCH_DP_C) & DP_DETECTED)
6584			intel_dp_init(dev, PCH_DP_C);
6585
6586		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6587			intel_dp_init(dev, PCH_DP_D);
6588
6589	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
6590		bool found = false;
6591
6592		if (I915_READ(SDVOB) & SDVO_DETECTED) {
6593			DRM_DEBUG_KMS("probing SDVOB\n");
6594			found = intel_sdvo_init(dev, SDVOB, true);
6595			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
6596				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
6597				intel_hdmi_init(dev, SDVOB);
6598			}
6599
6600			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
6601				DRM_DEBUG_KMS("probing DP_B\n");
6602				intel_dp_init(dev, DP_B);
6603			}
6604		}
6605
6606		/* Before G4X SDVOC doesn't have its own detect register */
6607
6608		if (I915_READ(SDVOB) & SDVO_DETECTED) {
6609			DRM_DEBUG_KMS("probing SDVOC\n");
6610			found = intel_sdvo_init(dev, SDVOC, false);
6611		}
6612
6613		if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
6614
6615			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
6616				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
6617				intel_hdmi_init(dev, SDVOC);
6618			}
6619			if (SUPPORTS_INTEGRATED_DP(dev)) {
6620				DRM_DEBUG_KMS("probing DP_C\n");
6621				intel_dp_init(dev, DP_C);
6622			}
6623		}
6624
6625		if (SUPPORTS_INTEGRATED_DP(dev) &&
6626		    (I915_READ(DP_D) & DP_DETECTED)) {
6627			DRM_DEBUG_KMS("probing DP_D\n");
6628			intel_dp_init(dev, DP_D);
6629		}
6630	} else if (IS_GEN2(dev)) {
6631#if 1
6632		KIB_NOTYET();
6633#else
6634		intel_dvo_init(dev);
6635#endif
6636	}
6637
6638	if (SUPPORTS_TV(dev))
6639		intel_tv_init(dev);
6640
6641	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6642		encoder->base.possible_crtcs = encoder->crtc_mask;
6643		encoder->base.possible_clones =
6644			intel_encoder_clones(dev, encoder->clone_mask);
6645	}
6646
6647	/* disable all the possible outputs/crtcs before entering KMS mode */
6648	drm_helper_disable_unused_functions(dev);
6649
6650	if (HAS_PCH_SPLIT(dev))
6651		ironlake_init_pch_refclk(dev);
6652}
6653
6654static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
6655{
6656	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
6657
6658	drm_framebuffer_cleanup(fb);
6659	drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
6660
6661	free(intel_fb, DRM_MEM_KMS);
6662}
6663
6664static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
6665						struct drm_file *file,
6666						unsigned int *handle)
6667{
6668	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
6669	struct drm_i915_gem_object *obj = intel_fb->obj;
6670
6671	return drm_gem_handle_create(file, &obj->base, handle);
6672}
6673
6674static const struct drm_framebuffer_funcs intel_fb_funcs = {
6675	.destroy = intel_user_framebuffer_destroy,
6676	.create_handle = intel_user_framebuffer_create_handle,
6677};
6678
6679int intel_framebuffer_init(struct drm_device *dev,
6680			   struct intel_framebuffer *intel_fb,
6681			   struct drm_mode_fb_cmd2 *mode_cmd,
6682			   struct drm_i915_gem_object *obj)
6683{
6684	int ret;
6685
6686	if (obj->tiling_mode == I915_TILING_Y)
6687		return -EINVAL;
6688
6689	if (mode_cmd->pitches[0] & 63)
6690		return -EINVAL;
6691
6692	switch (mode_cmd->pixel_format) {
6693	case DRM_FORMAT_RGB332:
6694	case DRM_FORMAT_RGB565:
6695	case DRM_FORMAT_XRGB8888:
6696	case DRM_FORMAT_XBGR8888:
6697	case DRM_FORMAT_ARGB8888:
6698	case DRM_FORMAT_XRGB2101010:
6699	case DRM_FORMAT_ARGB2101010:
6700		/* RGB formats are common across chipsets */
6701		break;
6702	case DRM_FORMAT_YUYV:
6703	case DRM_FORMAT_UYVY:
6704	case DRM_FORMAT_YVYU:
6705	case DRM_FORMAT_VYUY:
6706		break;
6707	default:
6708		DRM_DEBUG_KMS("unsupported pixel format %u\n",
6709				mode_cmd->pixel_format);
6710		return -EINVAL;
6711	}
6712
6713	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
6714	if (ret) {
6715		DRM_ERROR("framebuffer init failed %d\n", ret);
6716		return ret;
6717	}
6718
6719	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
6720	intel_fb->obj = obj;
6721	return 0;
6722}
6723
6724static int
6725intel_user_framebuffer_create(struct drm_device *dev,
6726    struct drm_file *filp, struct drm_mode_fb_cmd2 *mode_cmd,
6727    struct drm_framebuffer **res)
6728{
6729	struct drm_i915_gem_object *obj;
6730
6731	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
6732						mode_cmd->handles[0]));
6733	if (&obj->base == NULL)
6734		return (-ENOENT);
6735
6736	return (intel_framebuffer_create(dev, mode_cmd, obj, res));
6737}
6738
6739static const struct drm_mode_config_funcs intel_mode_funcs = {
6740	.fb_create = intel_user_framebuffer_create,
6741	.output_poll_changed = intel_fb_output_poll_changed,
6742};
6743
6744/* Set up chip specific display functions */
6745static void intel_init_display(struct drm_device *dev)
6746{
6747	struct drm_i915_private *dev_priv = dev->dev_private;
6748
6749	/* We always want a DPMS function */
6750	if (HAS_PCH_SPLIT(dev)) {
6751		dev_priv->display.dpms = ironlake_crtc_dpms;
6752		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
6753		dev_priv->display.off = ironlake_crtc_off;
6754		dev_priv->display.update_plane = ironlake_update_plane;
6755	} else {
6756		dev_priv->display.dpms = i9xx_crtc_dpms;
6757		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
6758		dev_priv->display.off = i9xx_crtc_off;
6759		dev_priv->display.update_plane = i9xx_update_plane;
6760	}
6761
6762	/* Returns the core display clock speed */
6763	if (IS_VALLEYVIEW(dev))
6764		dev_priv->display.get_display_clock_speed =
6765			valleyview_get_display_clock_speed;
6766	else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
6767		dev_priv->display.get_display_clock_speed =
6768			i945_get_display_clock_speed;
6769	else if (IS_I915G(dev))
6770		dev_priv->display.get_display_clock_speed =
6771			i915_get_display_clock_speed;
6772	else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
6773		dev_priv->display.get_display_clock_speed =
6774			i9xx_misc_get_display_clock_speed;
6775	else if (IS_I915GM(dev))
6776		dev_priv->display.get_display_clock_speed =
6777			i915gm_get_display_clock_speed;
6778	else if (IS_I865G(dev))
6779		dev_priv->display.get_display_clock_speed =
6780			i865_get_display_clock_speed;
6781	else if (IS_I85X(dev))
6782		dev_priv->display.get_display_clock_speed =
6783			i855_get_display_clock_speed;
6784	else /* 852, 830 */
6785		dev_priv->display.get_display_clock_speed =
6786			i830_get_display_clock_speed;
6787
6788	if (HAS_PCH_SPLIT(dev)) {
6789		if (IS_GEN5(dev)) {
6790			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
6791			dev_priv->display.write_eld = ironlake_write_eld;
6792		} else if (IS_GEN6(dev)) {
6793			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
6794			dev_priv->display.write_eld = ironlake_write_eld;
6795		} else if (IS_IVYBRIDGE(dev)) {
6796			/* FIXME: detect B0+ stepping and use auto training */
6797			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
6798			dev_priv->display.write_eld = ironlake_write_eld;
6799		} else if (IS_HASWELL(dev)) {
6800			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
6801			dev_priv->display.write_eld = ironlake_write_eld;
6802		} else
6803			dev_priv->display.update_wm = NULL;
6804	} else if (IS_VALLEYVIEW(dev)) {
6805		dev_priv->display.force_wake_get = vlv_force_wake_get;
6806		dev_priv->display.force_wake_put = vlv_force_wake_put;
6807	} else if (IS_G4X(dev)) {
6808		dev_priv->display.write_eld = g4x_write_eld;
6809	}
6810
6811	/* Default just returns -ENODEV to indicate unsupported */
6812	dev_priv->display.queue_flip = intel_default_queue_flip;
6813
6814	switch (INTEL_INFO(dev)->gen) {
6815	case 2:
6816		dev_priv->display.queue_flip = intel_gen2_queue_flip;
6817		break;
6818
6819	case 3:
6820		dev_priv->display.queue_flip = intel_gen3_queue_flip;
6821		break;
6822
6823	case 4:
6824	case 5:
6825		dev_priv->display.queue_flip = intel_gen4_queue_flip;
6826		break;
6827
6828	case 6:
6829		dev_priv->display.queue_flip = intel_gen6_queue_flip;
6830		break;
6831	case 7:
6832		dev_priv->display.queue_flip = intel_gen7_queue_flip;
6833		break;
6834	}
6835}
6836
6837/*
6838 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
6839 * resume, or other times.  This quirk makes sure that's the case for
6840 * affected systems.
6841 */
6842static void quirk_pipea_force(struct drm_device *dev)
6843{
6844	struct drm_i915_private *dev_priv = dev->dev_private;
6845
6846	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
6847	DRM_INFO("applying pipe a force quirk\n");
6848}
6849
6850/*
6851 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
6852 */
6853static void quirk_ssc_force_disable(struct drm_device *dev)
6854{
6855	struct drm_i915_private *dev_priv = dev->dev_private;
6856	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
6857	DRM_INFO("applying lvds SSC disable quirk\n");
6858}
6859
6860/*
6861 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
6862 * brightness value
6863 */
6864static void quirk_invert_brightness(struct drm_device *dev)
6865{
6866	struct drm_i915_private *dev_priv = dev->dev_private;
6867	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
6868	DRM_INFO("applying inverted panel brightness quirk\n");
6869}
6870
6871struct intel_quirk {
6872	int device;
6873	int subsystem_vendor;
6874	int subsystem_device;
6875	void (*hook)(struct drm_device *dev);
6876};
6877
6878#define	PCI_ANY_ID	(~0u)
6879
6880static struct intel_quirk intel_quirks[] = {
6881	/* HP Mini needs pipe A force quirk (LP: #322104) */
6882	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
6883
6884	/* Thinkpad R31 needs pipe A force quirk */
6885	{ 0x3577, 0x1014, 0x0505, quirk_pipea_force },
6886	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
6887	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
6888
6889	/* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
6890	{ 0x3577,  0x1014, 0x0513, quirk_pipea_force },
6891	/* ThinkPad X40 needs pipe A force quirk */
6892
6893	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
6894	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
6895
6896	/* 855 & before need to leave pipe A & dpll A up */
6897	{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
6898	{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
6899
6900	/* Lenovo U160 cannot use SSC on LVDS */
6901	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
6902
6903	/* Sony Vaio Y cannot use SSC on LVDS */
6904	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
6905
6906	/* Acer Aspire 5734Z must invert backlight brightness */
6907	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
6908};
6909
6910static void intel_init_quirks(struct drm_device *dev)
6911{
6912	struct intel_quirk *q;
6913	device_t d;
6914	int i;
6915
6916	d = dev->dev;
6917	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
6918		q = &intel_quirks[i];
6919		if (pci_get_device(d) == q->device &&
6920		    (pci_get_subvendor(d) == q->subsystem_vendor ||
6921		     q->subsystem_vendor == PCI_ANY_ID) &&
6922		    (pci_get_subdevice(d) == q->subsystem_device ||
6923		     q->subsystem_device == PCI_ANY_ID))
6924			q->hook(dev);
6925	}
6926}
6927
6928/* Disable the VGA plane that we never use */
6929static void i915_disable_vga(struct drm_device *dev)
6930{
6931	struct drm_i915_private *dev_priv = dev->dev_private;
6932	u8 sr1;
6933	u32 vga_reg;
6934
6935	if (HAS_PCH_SPLIT(dev))
6936		vga_reg = CPU_VGACNTRL;
6937	else
6938		vga_reg = VGACNTRL;
6939
6940#if 0
6941	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
6942#endif
6943	outb(VGA_SR_INDEX, SR01);
6944	sr1 = inb(VGA_SR_DATA);
6945	outb(VGA_SR_DATA, sr1 | 1 << 5);
6946#if 0
6947	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
6948#endif
6949	DELAY(300);
6950
6951	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
6952	POSTING_READ(vga_reg);
6953}
6954
6955static void ivb_pch_pwm_override(struct drm_device *dev)
6956{
6957	struct drm_i915_private *dev_priv = dev->dev_private;
6958
6959	/*
6960	 * IVB has CPU eDP backlight regs too, set things up to let the
6961	 * PCH regs control the backlight
6962	 */
6963	I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
6964	I915_WRITE(BLC_PWM_CPU_CTL, 0);
6965	I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE);
6966}
6967
6968void intel_modeset_init_hw(struct drm_device *dev)
6969{
6970	struct drm_i915_private *dev_priv = dev->dev_private;
6971
6972	intel_init_clock_gating(dev);
6973
6974	if (IS_IRONLAKE_M(dev)) {
6975		ironlake_enable_drps(dev);
6976		ironlake_enable_rc6(dev);
6977		intel_init_emon(dev);
6978	}
6979
6980	if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
6981		gen6_enable_rps(dev_priv);
6982		gen6_update_ring_freq(dev_priv);
6983	}
6984
6985	if (IS_IVYBRIDGE(dev))
6986		ivb_pch_pwm_override(dev);
6987}
6988
6989void intel_modeset_init(struct drm_device *dev)
6990{
6991	struct drm_i915_private *dev_priv = dev->dev_private;
6992	int i, ret;
6993
6994	drm_mode_config_init(dev);
6995
6996	dev->mode_config.min_width = 0;
6997	dev->mode_config.min_height = 0;
6998
6999	dev->mode_config.preferred_depth = 24;
7000	dev->mode_config.prefer_shadow = 1;
7001
7002	dev->mode_config.funcs = &intel_mode_funcs;
7003
7004	intel_init_quirks(dev);
7005
7006	intel_init_pm(dev);
7007
7008	intel_prepare_ddi(dev);
7009
7010	intel_init_display(dev);
7011
7012	if (IS_GEN2(dev)) {
7013		dev->mode_config.max_width = 2048;
7014		dev->mode_config.max_height = 2048;
7015	} else if (IS_GEN3(dev)) {
7016		dev->mode_config.max_width = 4096;
7017		dev->mode_config.max_height = 4096;
7018	} else {
7019		dev->mode_config.max_width = 8192;
7020		dev->mode_config.max_height = 8192;
7021	}
7022	dev->mode_config.fb_base = dev->agp->base;
7023
7024	DRM_DEBUG_KMS("%d display pipe%s available.\n",
7025		      dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
7026
7027	for (i = 0; i < dev_priv->num_pipe; i++) {
7028		intel_crtc_init(dev, i);
7029		ret = intel_plane_init(dev, i);
7030		if (ret)
7031			DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
7032	}
7033
7034	intel_pch_pll_init(dev);
7035
7036	/* Just disable it once at startup */
7037	i915_disable_vga(dev);
7038	intel_setup_outputs(dev);
7039
7040	TASK_INIT(&dev_priv->idle_task, 0, intel_idle_update, dev_priv);
7041	callout_init(&dev_priv->idle_callout, CALLOUT_MPSAFE);
7042}
7043
7044void intel_modeset_gem_init(struct drm_device *dev)
7045{
7046	intel_modeset_init_hw(dev);
7047
7048	intel_setup_overlay(dev);
7049}
7050
7051void intel_modeset_cleanup(struct drm_device *dev)
7052{
7053	struct drm_i915_private *dev_priv = dev->dev_private;
7054	struct drm_crtc *crtc;
7055	struct intel_crtc *intel_crtc;
7056
7057	drm_kms_helper_poll_fini(dev);
7058	DRM_LOCK(dev);
7059
7060#if 0
7061	intel_unregister_dsm_handler();
7062#endif
7063
7064	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7065		/* Skip inactive CRTCs */
7066		if (!crtc->fb)
7067			continue;
7068
7069		intel_crtc = to_intel_crtc(crtc);
7070		intel_increase_pllclock(crtc);
7071	}
7072
7073	intel_disable_fbc(dev);
7074
7075	if (IS_IRONLAKE_M(dev))
7076		ironlake_disable_drps(dev);
7077	if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
7078		gen6_disable_rps(dev);
7079
7080	if (IS_IRONLAKE_M(dev))
7081		ironlake_disable_rc6(dev);
7082
7083	if (IS_VALLEYVIEW(dev))
7084		vlv_init_dpio(dev);
7085
7086	DRM_UNLOCK(dev);
7087
7088	/* Disable the irq before mode object teardown, for the irq might
7089	 * enqueue unpin/hotplug work. */
7090	drm_irq_uninstall(dev);
7091
7092	if (taskqueue_cancel(dev_priv->tq, &dev_priv->hotplug_task, NULL))
7093		taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
7094	if (taskqueue_cancel(dev_priv->tq, &dev_priv->rps_task, NULL))
7095		taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
7096
7097	/* Shut off idle work before the crtcs get freed. */
7098	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7099		intel_crtc = to_intel_crtc(crtc);
7100		callout_drain(&intel_crtc->idle_callout);
7101	}
7102	callout_drain(&dev_priv->idle_callout);
7103	if (taskqueue_cancel(dev_priv->tq, &dev_priv->idle_task, NULL))
7104		taskqueue_drain(dev_priv->tq, &dev_priv->idle_task);
7105
7106	drm_mode_config_cleanup(dev);
7107}
7108
7109/*
7110 * Return which encoder is currently attached for connector.
7111 */
7112struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
7113{
7114	return &intel_attached_encoder(connector)->base;
7115}
7116
7117void intel_connector_attach_encoder(struct intel_connector *connector,
7118				    struct intel_encoder *encoder)
7119{
7120	connector->encoder = encoder;
7121	drm_mode_connector_attach_encoder(&connector->base,
7122					  &encoder->base);
7123}
7124
7125/*
7126 * set vga decode state - true == enable VGA decode
7127 */
7128int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
7129{
7130	struct drm_i915_private *dev_priv;
7131	device_t bridge_dev;
7132	u16 gmch_ctrl;
7133
7134	dev_priv = dev->dev_private;
7135	bridge_dev = intel_gtt_get_bridge_device();
7136	gmch_ctrl = pci_read_config(bridge_dev, INTEL_GMCH_CTRL, 2);
7137	if (state)
7138		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
7139	else
7140		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
7141	pci_write_config(bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl, 2);
7142	return (0);
7143}
7144
7145struct intel_display_error_state {
7146	struct intel_cursor_error_state {
7147		u32 control;
7148		u32 position;
7149		u32 base;
7150		u32 size;
7151	} cursor[2];
7152
7153	struct intel_pipe_error_state {
7154		u32 conf;
7155		u32 source;
7156
7157		u32 htotal;
7158		u32 hblank;
7159		u32 hsync;
7160		u32 vtotal;
7161		u32 vblank;
7162		u32 vsync;
7163	} pipe[2];
7164
7165	struct intel_plane_error_state {
7166		u32 control;
7167		u32 stride;
7168		u32 size;
7169		u32 pos;
7170		u32 addr;
7171		u32 surface;
7172		u32 tile_offset;
7173	} plane[2];
7174};
7175
7176struct intel_display_error_state *
7177intel_display_capture_error_state(struct drm_device *dev)
7178{
7179	drm_i915_private_t *dev_priv = dev->dev_private;
7180	struct intel_display_error_state *error;
7181	int i;
7182
7183	error = malloc(sizeof(*error), DRM_MEM_KMS, M_NOWAIT);
7184	if (error == NULL)
7185		return NULL;
7186
7187	for (i = 0; i < 2; i++) {
7188		error->cursor[i].control = I915_READ(CURCNTR(i));
7189		error->cursor[i].position = I915_READ(CURPOS(i));
7190		error->cursor[i].base = I915_READ(CURBASE(i));
7191
7192		error->plane[i].control = I915_READ(DSPCNTR(i));
7193		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
7194		error->plane[i].size = I915_READ(DSPSIZE(i));
7195		error->plane[i].pos = I915_READ(DSPPOS(i));
7196		error->plane[i].addr = I915_READ(DSPADDR(i));
7197		if (INTEL_INFO(dev)->gen >= 4) {
7198			error->plane[i].surface = I915_READ(DSPSURF(i));
7199			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
7200		}
7201
7202		error->pipe[i].conf = I915_READ(PIPECONF(i));
7203		error->pipe[i].source = I915_READ(PIPESRC(i));
7204		error->pipe[i].htotal = I915_READ(HTOTAL(i));
7205		error->pipe[i].hblank = I915_READ(HBLANK(i));
7206		error->pipe[i].hsync = I915_READ(HSYNC(i));
7207		error->pipe[i].vtotal = I915_READ(VTOTAL(i));
7208		error->pipe[i].vblank = I915_READ(VBLANK(i));
7209		error->pipe[i].vsync = I915_READ(VSYNC(i));
7210	}
7211
7212	return error;
7213}
7214
7215void
7216intel_display_print_error_state(struct sbuf *m,
7217				struct drm_device *dev,
7218				struct intel_display_error_state *error)
7219{
7220	int i;
7221
7222	for (i = 0; i < 2; i++) {
7223		sbuf_printf(m, "Pipe [%d]:\n", i);
7224		sbuf_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
7225		sbuf_printf(m, "  SRC: %08x\n", error->pipe[i].source);
7226		sbuf_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
7227		sbuf_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
7228		sbuf_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
7229		sbuf_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
7230		sbuf_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
7231		sbuf_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
7232
7233		sbuf_printf(m, "Plane [%d]:\n", i);
7234		sbuf_printf(m, "  CNTR: %08x\n", error->plane[i].control);
7235		sbuf_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
7236		sbuf_printf(m, "  SIZE: %08x\n", error->plane[i].size);
7237		sbuf_printf(m, "  POS: %08x\n", error->plane[i].pos);
7238		sbuf_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
7239		if (INTEL_INFO(dev)->gen >= 4) {
7240			sbuf_printf(m, "  SURF: %08x\n", error->plane[i].surface);
7241			sbuf_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
7242		}
7243
7244		sbuf_printf(m, "Cursor [%d]:\n", i);
7245		sbuf_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
7246		sbuf_printf(m, "  POS: %08x\n", error->cursor[i].position);
7247		sbuf_printf(m, "  BASE: %08x\n", error->cursor[i].base);
7248	}
7249}
7250