intel_dp.c revision 282199
1/*
2 * Copyright �� 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_dp.c 282199 2015-04-28 19:35:05Z dumbbell $");
30
31#include <dev/drm2/drmP.h>
32#include <dev/drm2/drm.h>
33#include <dev/drm2/drm_crtc.h>
34#include <dev/drm2/drm_crtc_helper.h>
35#include <dev/drm2/i915/i915_drm.h>
36#include <dev/drm2/i915/i915_drv.h>
37#include <dev/drm2/i915/intel_drv.h>
38#include <dev/drm2/drm_dp_helper.h>
39
40#define DP_RECEIVER_CAP_SIZE	0xf
41#define DP_LINK_STATUS_SIZE	6
42#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
43
44#define DP_LINK_CONFIGURATION_SIZE	9
45
46struct intel_dp {
47	struct intel_encoder base;
48	uint32_t output_reg;
49	uint32_t DP;
50	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
51	bool has_audio;
52	enum hdmi_force_audio force_audio;
53	uint32_t color_range;
54	int dpms_mode;
55	uint8_t link_bw;
56	uint8_t lane_count;
57	uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
58	device_t dp_iic_bus;
59	device_t adapter;
60	bool is_pch_edp;
61	uint8_t	train_set[4];
62	int panel_power_up_delay;
63	int panel_power_down_delay;
64	int panel_power_cycle_delay;
65	int backlight_on_delay;
66	int backlight_off_delay;
67	struct drm_display_mode *panel_fixed_mode;  /* for eDP */
68	struct timeout_task panel_vdd_task;
69	bool want_panel_vdd;
70};
71
72/**
73 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
74 * @intel_dp: DP struct
75 *
76 * If a CPU or PCH DP output is attached to an eDP panel, this function
77 * will return true, and false otherwise.
78 */
79static bool is_edp(struct intel_dp *intel_dp)
80{
81	return intel_dp->base.type == INTEL_OUTPUT_EDP;
82}
83
84/**
85 * is_pch_edp - is the port on the PCH and attached to an eDP panel?
86 * @intel_dp: DP struct
87 *
88 * Returns true if the given DP struct corresponds to a PCH DP port attached
89 * to an eDP panel, false otherwise.  Helpful for determining whether we
90 * may need FDI resources for a given DP output or not.
91 */
92static bool is_pch_edp(struct intel_dp *intel_dp)
93{
94	return intel_dp->is_pch_edp;
95}
96
97/**
98 * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
99 * @intel_dp: DP struct
100 *
101 * Returns true if the given DP struct corresponds to a CPU eDP port.
102 */
103static bool is_cpu_edp(struct intel_dp *intel_dp)
104{
105	return is_edp(intel_dp) && !is_pch_edp(intel_dp);
106}
107
108static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
109{
110	return container_of(encoder, struct intel_dp, base.base);
111}
112
113static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
114{
115	return container_of(intel_attached_encoder(connector),
116			    struct intel_dp, base);
117}
118
119/**
120 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
121 * @encoder: DRM encoder
122 *
123 * Return true if @encoder corresponds to a PCH attached eDP panel.  Needed
124 * by intel_display.c.
125 */
126bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
127{
128	struct intel_dp *intel_dp;
129
130	if (!encoder)
131		return false;
132
133	intel_dp = enc_to_intel_dp(encoder);
134
135	return is_pch_edp(intel_dp);
136}
137
138static void intel_dp_start_link_train(struct intel_dp *intel_dp);
139static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
140static void intel_dp_link_down(struct intel_dp *intel_dp);
141
142void
143intel_edp_link_config(struct intel_encoder *intel_encoder,
144		       int *lane_num, int *link_bw)
145{
146	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
147
148	*lane_num = intel_dp->lane_count;
149	if (intel_dp->link_bw == DP_LINK_BW_1_62)
150		*link_bw = 162000;
151	else if (intel_dp->link_bw == DP_LINK_BW_2_7)
152		*link_bw = 270000;
153}
154
155static int
156intel_dp_max_lane_count(struct intel_dp *intel_dp)
157{
158	int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
159	switch (max_lane_count) {
160	case 1: case 2: case 4:
161		break;
162	default:
163		max_lane_count = 4;
164	}
165	return max_lane_count;
166}
167
168static int
169intel_dp_max_link_bw(struct intel_dp *intel_dp)
170{
171	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
172
173	switch (max_link_bw) {
174	case DP_LINK_BW_1_62:
175	case DP_LINK_BW_2_7:
176		break;
177	default:
178		max_link_bw = DP_LINK_BW_1_62;
179		break;
180	}
181	return max_link_bw;
182}
183
184static int
185intel_dp_link_clock(uint8_t link_bw)
186{
187	if (link_bw == DP_LINK_BW_2_7)
188		return 270000;
189	else
190		return 162000;
191}
192
193/*
194 * The units on the numbers in the next two are... bizarre.  Examples will
195 * make it clearer; this one parallels an example in the eDP spec.
196 *
197 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
198 *
199 *     270000 * 1 * 8 / 10 == 216000
200 *
201 * The actual data capacity of that configuration is 2.16Gbit/s, so the
202 * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
203 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
204 * 119000.  At 18bpp that's 2142000 kilobits per second.
205 *
206 * Thus the strange-looking division by 10 in intel_dp_link_required, to
207 * get the result in decakilobits instead of kilobits.
208 */
209
210static int
211intel_dp_link_required(int pixel_clock, int bpp)
212{
213	return (pixel_clock * bpp + 9) / 10;
214}
215
216static int
217intel_dp_max_data_rate(int max_link_clock, int max_lanes)
218{
219	return (max_link_clock * max_lanes * 8) / 10;
220}
221
222static bool
223intel_dp_adjust_dithering(struct intel_dp *intel_dp,
224			  const struct drm_display_mode *mode,
225			  struct drm_display_mode *adjusted_mode)
226{
227	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
228	int max_lanes = intel_dp_max_lane_count(intel_dp);
229	int max_rate, mode_rate;
230
231	mode_rate = intel_dp_link_required(mode->clock, 24);
232	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
233
234	if (mode_rate > max_rate) {
235		mode_rate = intel_dp_link_required(mode->clock, 18);
236		if (mode_rate > max_rate)
237			return false;
238
239		if (adjusted_mode)
240			adjusted_mode->private_flags
241				|= INTEL_MODE_DP_FORCE_6BPC;
242
243		return true;
244	}
245
246	return true;
247}
248
249static int
250intel_dp_mode_valid(struct drm_connector *connector,
251		    struct drm_display_mode *mode)
252{
253	struct intel_dp *intel_dp = intel_attached_dp(connector);
254
255	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
256		if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
257			return MODE_PANEL;
258
259		if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
260			return MODE_PANEL;
261	}
262
263	if (!intel_dp_adjust_dithering(intel_dp, mode, NULL))
264		return MODE_CLOCK_HIGH;
265
266	if (mode->clock < 10000)
267		return MODE_CLOCK_LOW;
268
269	return MODE_OK;
270}
271
272static uint32_t
273pack_aux(uint8_t *src, int src_bytes)
274{
275	int	i;
276	uint32_t v = 0;
277
278	if (src_bytes > 4)
279		src_bytes = 4;
280	for (i = 0; i < src_bytes; i++)
281		v |= ((uint32_t) src[i]) << ((3-i) * 8);
282	return v;
283}
284
285static void
286unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
287{
288	int i;
289	if (dst_bytes > 4)
290		dst_bytes = 4;
291	for (i = 0; i < dst_bytes; i++)
292		dst[i] = src >> ((3-i) * 8);
293}
294
295/* hrawclock is 1/4 the FSB frequency */
296static int
297intel_hrawclk(struct drm_device *dev)
298{
299	struct drm_i915_private *dev_priv = dev->dev_private;
300	uint32_t clkcfg;
301
302	clkcfg = I915_READ(CLKCFG);
303	switch (clkcfg & CLKCFG_FSB_MASK) {
304	case CLKCFG_FSB_400:
305		return 100;
306	case CLKCFG_FSB_533:
307		return 133;
308	case CLKCFG_FSB_667:
309		return 166;
310	case CLKCFG_FSB_800:
311		return 200;
312	case CLKCFG_FSB_1067:
313		return 266;
314	case CLKCFG_FSB_1333:
315		return 333;
316	/* these two are just a guess; one of them might be right */
317	case CLKCFG_FSB_1600:
318	case CLKCFG_FSB_1600_ALT:
319		return 400;
320	default:
321		return 133;
322	}
323}
324
325static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
326{
327	struct drm_device *dev = intel_dp->base.base.dev;
328	struct drm_i915_private *dev_priv = dev->dev_private;
329
330	return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
331}
332
333static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
334{
335	struct drm_device *dev = intel_dp->base.base.dev;
336	struct drm_i915_private *dev_priv = dev->dev_private;
337
338	return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
339}
340
341static void
342intel_dp_check_edp(struct intel_dp *intel_dp)
343{
344	struct drm_device *dev = intel_dp->base.base.dev;
345	struct drm_i915_private *dev_priv = dev->dev_private;
346
347	if (!is_edp(intel_dp))
348		return;
349	if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
350		printf("eDP powered off while attempting aux channel communication.\n");
351		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
352			      I915_READ(PCH_PP_STATUS),
353			      I915_READ(PCH_PP_CONTROL));
354	}
355}
356
357static int
358intel_dp_aux_ch(struct intel_dp *intel_dp,
359		uint8_t *send, int send_bytes,
360		uint8_t *recv, int recv_size)
361{
362	uint32_t output_reg = intel_dp->output_reg;
363	struct drm_device *dev = intel_dp->base.base.dev;
364	struct drm_i915_private *dev_priv = dev->dev_private;
365	uint32_t ch_ctl = output_reg + 0x10;
366	uint32_t ch_data = ch_ctl + 4;
367	int i;
368	int recv_bytes;
369	uint32_t status;
370	uint32_t aux_clock_divider;
371	int try, precharge = 5;
372
373	intel_dp_check_edp(intel_dp);
374	/* The clock divider is based off the hrawclk,
375	 * and would like to run at 2MHz. So, take the
376	 * hrawclk value and divide by 2 and use that
377	 *
378	 * Note that PCH attached eDP panels should use a 125MHz input
379	 * clock divider.
380	 */
381	if (is_cpu_edp(intel_dp)) {
382		if (IS_GEN6(dev) || IS_GEN7(dev))
383			aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
384		else
385			aux_clock_divider = 225; /* eDP input clock at 450Mhz */
386	} else if (HAS_PCH_SPLIT(dev))
387		aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
388	else
389		aux_clock_divider = intel_hrawclk(dev) / 2;
390
391	/* Try to wait for any previous AUX channel activity */
392	for (try = 0; try < 3; try++) {
393		status = I915_READ(ch_ctl);
394		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
395			break;
396		drm_msleep(1, "915ach");
397	}
398
399	if (try == 3) {
400		printf("dp_aux_ch not started status 0x%08x\n",
401		     I915_READ(ch_ctl));
402		return -EBUSY;
403	}
404
405	/* Must try at least 3 times according to DP spec */
406	for (try = 0; try < 5; try++) {
407		/* Load the send data into the aux channel data registers */
408		for (i = 0; i < send_bytes; i += 4)
409			I915_WRITE(ch_data + i,
410				   pack_aux(send + i, send_bytes - i));
411
412		/* Send the command and wait for it to complete */
413		I915_WRITE(ch_ctl,
414			   DP_AUX_CH_CTL_SEND_BUSY |
415			   DP_AUX_CH_CTL_TIME_OUT_400us |
416			   (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
417			   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
418			   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
419			   DP_AUX_CH_CTL_DONE |
420			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
421			   DP_AUX_CH_CTL_RECEIVE_ERROR);
422		for (;;) {
423			status = I915_READ(ch_ctl);
424			if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
425				break;
426			DELAY(100);
427		}
428
429		/* Clear done status and any errors */
430		I915_WRITE(ch_ctl,
431			   status |
432			   DP_AUX_CH_CTL_DONE |
433			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
434			   DP_AUX_CH_CTL_RECEIVE_ERROR);
435
436		if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
437			      DP_AUX_CH_CTL_RECEIVE_ERROR))
438			continue;
439		if (status & DP_AUX_CH_CTL_DONE)
440			break;
441	}
442
443	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
444		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
445		return -EBUSY;
446	}
447
448	/* Check for timeout or receive error.
449	 * Timeouts occur when the sink is not connected
450	 */
451	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
452		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
453		return -EIO;
454	}
455
456	/* Timeouts occur when the device isn't connected, so they're
457	 * "normal" -- don't fill the kernel log with these */
458	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
459		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
460		return -ETIMEDOUT;
461	}
462
463	/* Unload any bytes sent back from the other side */
464	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
465		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
466	if (recv_bytes > recv_size)
467		recv_bytes = recv_size;
468
469	for (i = 0; i < recv_bytes; i += 4)
470		unpack_aux(I915_READ(ch_data + i),
471			   recv + i, recv_bytes - i);
472
473	return recv_bytes;
474}
475
476/* Write data to the aux channel in native mode */
477static int
478intel_dp_aux_native_write(struct intel_dp *intel_dp,
479			  uint16_t address, uint8_t *send, int send_bytes)
480{
481	int ret;
482	uint8_t	msg[20];
483	int msg_bytes;
484	uint8_t	ack;
485
486	intel_dp_check_edp(intel_dp);
487	if (send_bytes > 16)
488		return -1;
489	msg[0] = AUX_NATIVE_WRITE << 4;
490	msg[1] = address >> 8;
491	msg[2] = address & 0xff;
492	msg[3] = send_bytes - 1;
493	memcpy(&msg[4], send, send_bytes);
494	msg_bytes = send_bytes + 4;
495	for (;;) {
496		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
497		if (ret < 0)
498			return ret;
499		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
500			break;
501		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
502			DELAY(100);
503		else
504			return -EIO;
505	}
506	return send_bytes;
507}
508
509/* Write a single byte to the aux channel in native mode */
510static int
511intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
512			    uint16_t address, uint8_t byte)
513{
514	return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
515}
516
517/* read bytes from a native aux channel */
518static int
519intel_dp_aux_native_read(struct intel_dp *intel_dp,
520			 uint16_t address, uint8_t *recv, int recv_bytes)
521{
522	uint8_t msg[4];
523	int msg_bytes;
524	uint8_t reply[20];
525	int reply_bytes;
526	uint8_t ack;
527	int ret;
528
529	intel_dp_check_edp(intel_dp);
530	msg[0] = AUX_NATIVE_READ << 4;
531	msg[1] = address >> 8;
532	msg[2] = address & 0xff;
533	msg[3] = recv_bytes - 1;
534
535	msg_bytes = 4;
536	reply_bytes = recv_bytes + 1;
537
538	for (;;) {
539		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
540				      reply, reply_bytes);
541		if (ret == 0)
542			return -EPROTO;
543		if (ret < 0)
544			return ret;
545		ack = reply[0];
546		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
547			memcpy(recv, reply + 1, ret - 1);
548			return ret - 1;
549		}
550		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
551			DELAY(100);
552		else
553			return -EIO;
554	}
555}
556
557static int
558intel_dp_i2c_aux_ch(device_t idev, int mode, uint8_t write_byte,
559    uint8_t *read_byte)
560{
561	struct iic_dp_aux_data *data;
562	struct intel_dp *intel_dp;
563	uint16_t address;
564	uint8_t msg[5];
565	uint8_t reply[2];
566	unsigned retry;
567	int msg_bytes;
568	int reply_bytes;
569	int ret;
570
571	data = device_get_softc(idev);
572	intel_dp = data->priv;
573	address = data->address;
574
575	intel_dp_check_edp(intel_dp);
576	/* Set up the command byte */
577	if (mode & MODE_I2C_READ)
578		msg[0] = AUX_I2C_READ << 4;
579	else
580		msg[0] = AUX_I2C_WRITE << 4;
581
582	if (!(mode & MODE_I2C_STOP))
583		msg[0] |= AUX_I2C_MOT << 4;
584
585	msg[1] = address >> 8;
586	msg[2] = address;
587
588	switch (mode) {
589	case MODE_I2C_WRITE:
590		msg[3] = 0;
591		msg[4] = write_byte;
592		msg_bytes = 5;
593		reply_bytes = 1;
594		break;
595	case MODE_I2C_READ:
596		msg[3] = 0;
597		msg_bytes = 4;
598		reply_bytes = 2;
599		break;
600	default:
601		msg_bytes = 3;
602		reply_bytes = 1;
603		break;
604	}
605
606	for (retry = 0; retry < 5; retry++) {
607		ret = intel_dp_aux_ch(intel_dp,
608				      msg, msg_bytes,
609				      reply, reply_bytes);
610		if (ret < 0) {
611			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
612			return (ret);
613		}
614
615		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
616		case AUX_NATIVE_REPLY_ACK:
617			/* I2C-over-AUX Reply field is only valid
618			 * when paired with AUX ACK.
619			 */
620			break;
621		case AUX_NATIVE_REPLY_NACK:
622			DRM_DEBUG_KMS("aux_ch native nack\n");
623			return (-EREMOTEIO);
624		case AUX_NATIVE_REPLY_DEFER:
625			DELAY(100);
626			continue;
627		default:
628			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
629				  reply[0]);
630			return (-EREMOTEIO);
631		}
632
633		switch (reply[0] & AUX_I2C_REPLY_MASK) {
634		case AUX_I2C_REPLY_ACK:
635			if (mode == MODE_I2C_READ) {
636				*read_byte = reply[1];
637			}
638			return (0/*reply_bytes - 1*/);
639		case AUX_I2C_REPLY_NACK:
640			DRM_DEBUG_KMS("aux_i2c nack\n");
641			return (-EREMOTEIO);
642		case AUX_I2C_REPLY_DEFER:
643			DRM_DEBUG_KMS("aux_i2c defer\n");
644			DELAY(100);
645			break;
646		default:
647			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
648			return (-EREMOTEIO);
649		}
650	}
651
652	DRM_ERROR("too many retries, giving up\n");
653	return (-EREMOTEIO);
654}
655
656static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
657static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
658
659static int
660intel_dp_i2c_init(struct intel_dp *intel_dp,
661		  struct intel_connector *intel_connector, const char *name)
662{
663	int ret;
664
665	DRM_DEBUG_KMS("i2c_init %s\n", name);
666
667	ironlake_edp_panel_vdd_on(intel_dp);
668	ret = iic_dp_aux_add_bus(intel_connector->base.dev->dev, name,
669	    intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
670	    &intel_dp->adapter);
671	ironlake_edp_panel_vdd_off(intel_dp, false);
672	return (ret);
673}
674
675static bool
676intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
677		    struct drm_display_mode *adjusted_mode)
678{
679	struct drm_device *dev = encoder->dev;
680	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
681	int lane_count, clock;
682	int max_lane_count = intel_dp_max_lane_count(intel_dp);
683	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
684	int bpp, mode_rate;
685	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
686
687	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
688		intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
689		intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
690					mode, adjusted_mode);
691	}
692
693	DRM_DEBUG_KMS("DP link computation with max lane count %i "
694		      "max bw %02x pixel clock %iKHz\n",
695		      max_lane_count, bws[max_clock], mode->clock);
696
697	if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, adjusted_mode))
698		return false;
699
700	bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
701	mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
702
703	for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
704		for (clock = 0; clock <= max_clock; clock++) {
705			int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
706
707			if (mode_rate <= link_avail) {
708				intel_dp->link_bw = bws[clock];
709				intel_dp->lane_count = lane_count;
710				adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
711				DRM_DEBUG_KMS("DP link bw %02x lane "
712						"count %d clock %d bpp %d\n",
713				       intel_dp->link_bw, intel_dp->lane_count,
714				       adjusted_mode->clock, bpp);
715				DRM_DEBUG_KMS("DP link bw required %i available %i\n",
716					      mode_rate, link_avail);
717				return true;
718			}
719		}
720	}
721
722	return false;
723}
724
725struct intel_dp_m_n {
726	uint32_t	tu;
727	uint32_t	gmch_m;
728	uint32_t	gmch_n;
729	uint32_t	link_m;
730	uint32_t	link_n;
731};
732
733static void
734intel_reduce_ratio(uint32_t *num, uint32_t *den)
735{
736	while (*num > 0xffffff || *den > 0xffffff) {
737		*num >>= 1;
738		*den >>= 1;
739	}
740}
741
742static void
743intel_dp_compute_m_n(int bpp,
744		     int nlanes,
745		     int pixel_clock,
746		     int link_clock,
747		     struct intel_dp_m_n *m_n)
748{
749	m_n->tu = 64;
750	m_n->gmch_m = (pixel_clock * bpp) >> 3;
751	m_n->gmch_n = link_clock * nlanes;
752	intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
753	m_n->link_m = pixel_clock;
754	m_n->link_n = link_clock;
755	intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
756}
757
758void
759intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
760		 struct drm_display_mode *adjusted_mode)
761{
762	struct drm_device *dev = crtc->dev;
763	struct drm_mode_config *mode_config = &dev->mode_config;
764	struct drm_encoder *encoder;
765	struct drm_i915_private *dev_priv = dev->dev_private;
766	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
767	int lane_count = 4;
768	struct intel_dp_m_n m_n;
769	int pipe = intel_crtc->pipe;
770
771	/*
772	 * Find the lane count in the intel_encoder private
773	 */
774	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
775		struct intel_dp *intel_dp;
776
777		if (encoder->crtc != crtc)
778			continue;
779
780		intel_dp = enc_to_intel_dp(encoder);
781		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
782		    intel_dp->base.type == INTEL_OUTPUT_EDP)
783		{
784			lane_count = intel_dp->lane_count;
785			break;
786		}
787	}
788
789	/*
790	 * Compute the GMCH and Link ratios. The '3' here is
791	 * the number of bytes_per_pixel post-LUT, which we always
792	 * set up for 8-bits of R/G/B, or 3 bytes total.
793	 */
794	intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
795			     mode->clock, adjusted_mode->clock, &m_n);
796
797	if (HAS_PCH_SPLIT(dev)) {
798		I915_WRITE(TRANSDATA_M1(pipe),
799			   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
800			   m_n.gmch_m);
801		I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
802		I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
803		I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
804	} else {
805		I915_WRITE(PIPE_GMCH_DATA_M(pipe),
806			   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
807			   m_n.gmch_m);
808		I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
809		I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
810		I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
811	}
812}
813
814static void ironlake_edp_pll_on(struct drm_encoder *encoder);
815static void ironlake_edp_pll_off(struct drm_encoder *encoder);
816
817static void
818intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
819		  struct drm_display_mode *adjusted_mode)
820{
821	struct drm_device *dev = encoder->dev;
822	struct drm_i915_private *dev_priv = dev->dev_private;
823	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
824	struct drm_crtc *crtc = intel_dp->base.base.crtc;
825	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
826
827	/* Turn on the eDP PLL if needed */
828	if (is_edp(intel_dp)) {
829		if (!is_pch_edp(intel_dp))
830			ironlake_edp_pll_on(encoder);
831		else
832			ironlake_edp_pll_off(encoder);
833	}
834
835	/*
836	 * There are four kinds of DP registers:
837	 *
838	 * 	IBX PCH
839	 * 	SNB CPU
840	 *	IVB CPU
841	 * 	CPT PCH
842	 *
843	 * IBX PCH and CPU are the same for almost everything,
844	 * except that the CPU DP PLL is configured in this
845	 * register
846	 *
847	 * CPT PCH is quite different, having many bits moved
848	 * to the TRANS_DP_CTL register instead. That
849	 * configuration happens (oddly) in ironlake_pch_enable
850	 */
851
852	/* Preserve the BIOS-computed detected bit. This is
853	 * supposed to be read-only.
854	 */
855	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
856	intel_dp->DP |=  DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
857
858	/* Handle DP bits in common between all three register formats */
859
860	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
861
862	switch (intel_dp->lane_count) {
863	case 1:
864		intel_dp->DP |= DP_PORT_WIDTH_1;
865		break;
866	case 2:
867		intel_dp->DP |= DP_PORT_WIDTH_2;
868		break;
869	case 4:
870		intel_dp->DP |= DP_PORT_WIDTH_4;
871		break;
872	}
873	if (intel_dp->has_audio) {
874		DRM_DEBUG_KMS("Enabling DP audio on pipe %c\n",
875				 pipe_name(intel_crtc->pipe));
876		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
877		intel_write_eld(encoder, adjusted_mode);
878	}
879	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
880	intel_dp->link_configuration[0] = intel_dp->link_bw;
881	intel_dp->link_configuration[1] = intel_dp->lane_count;
882	/*
883	 * Check for DPCD version > 1.1 and enhanced framing support
884	 */
885	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
886	    (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
887		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
888	}
889
890	/* Split out the IBX/CPU vs CPT settings */
891
892	if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
893		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
894			intel_dp->DP |= DP_SYNC_HS_HIGH;
895		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
896			intel_dp->DP |= DP_SYNC_VS_HIGH;
897		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
898
899		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
900			intel_dp->DP |= DP_ENHANCED_FRAMING;
901
902		intel_dp->DP |= intel_crtc->pipe << 29;
903
904		/* don't miss out required setting for eDP */
905		intel_dp->DP |= DP_PLL_ENABLE;
906		if (adjusted_mode->clock < 200000)
907			intel_dp->DP |= DP_PLL_FREQ_160MHZ;
908		else
909			intel_dp->DP |= DP_PLL_FREQ_270MHZ;
910	} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
911		intel_dp->DP |= intel_dp->color_range;
912
913		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
914			intel_dp->DP |= DP_SYNC_HS_HIGH;
915		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
916			intel_dp->DP |= DP_SYNC_VS_HIGH;
917		intel_dp->DP |= DP_LINK_TRAIN_OFF;
918
919		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
920			intel_dp->DP |= DP_ENHANCED_FRAMING;
921
922		if (intel_crtc->pipe == 1)
923			intel_dp->DP |= DP_PIPEB_SELECT;
924
925		if (is_cpu_edp(intel_dp)) {
926			/* don't miss out required setting for eDP */
927			intel_dp->DP |= DP_PLL_ENABLE;
928			if (adjusted_mode->clock < 200000)
929				intel_dp->DP |= DP_PLL_FREQ_160MHZ;
930			else
931				intel_dp->DP |= DP_PLL_FREQ_270MHZ;
932		}
933	} else {
934		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
935	}
936}
937
938#define IDLE_ON_MASK		(PP_ON | 0 	  | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
939#define IDLE_ON_VALUE   	(PP_ON | 0 	  | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
940
941#define IDLE_OFF_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
942#define IDLE_OFF_VALUE		(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
943
944#define IDLE_CYCLE_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
945#define IDLE_CYCLE_VALUE	(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
946
947static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
948				       u32 mask,
949				       u32 value)
950{
951	struct drm_device *dev = intel_dp->base.base.dev;
952	struct drm_i915_private *dev_priv = dev->dev_private;
953
954	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
955		      mask, value,
956		      I915_READ(PCH_PP_STATUS),
957		      I915_READ(PCH_PP_CONTROL));
958
959	if (_intel_wait_for(dev,
960	    (I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10, "915iwp")) {
961		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
962			  I915_READ(PCH_PP_STATUS),
963			  I915_READ(PCH_PP_CONTROL));
964	}
965}
966
967static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
968{
969	DRM_DEBUG_KMS("Wait for panel power on\n");
970	ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
971}
972
973static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
974{
975	DRM_DEBUG_KMS("Wait for panel power off time\n");
976	ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
977}
978
979static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
980{
981	DRM_DEBUG_KMS("Wait for panel power cycle\n");
982	ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
983}
984
985
986/* Read the current pp_control value, unlocking the register if it
987 * is locked
988 */
989
990static  u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
991{
992	u32	control = I915_READ(PCH_PP_CONTROL);
993
994	control &= ~PANEL_UNLOCK_MASK;
995	control |= PANEL_UNLOCK_REGS;
996	return control;
997}
998
999static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1000{
1001	struct drm_device *dev = intel_dp->base.base.dev;
1002	struct drm_i915_private *dev_priv = dev->dev_private;
1003	u32 pp;
1004
1005	if (!is_edp(intel_dp))
1006		return;
1007	DRM_DEBUG_KMS("Turn eDP VDD on\n");
1008
1009	if (intel_dp->want_panel_vdd)
1010		printf("eDP VDD already requested on\n");
1011
1012	intel_dp->want_panel_vdd = true;
1013
1014	if (ironlake_edp_have_panel_vdd(intel_dp)) {
1015		DRM_DEBUG_KMS("eDP VDD already on\n");
1016		return;
1017	}
1018
1019	if (!ironlake_edp_have_panel_power(intel_dp))
1020		ironlake_wait_panel_power_cycle(intel_dp);
1021
1022	pp = ironlake_get_pp_control(dev_priv);
1023	pp |= EDP_FORCE_VDD;
1024	I915_WRITE(PCH_PP_CONTROL, pp);
1025	POSTING_READ(PCH_PP_CONTROL);
1026	DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1027		      I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
1028
1029	/*
1030	 * If the panel wasn't on, delay before accessing aux channel
1031	 */
1032	if (!ironlake_edp_have_panel_power(intel_dp)) {
1033		DRM_DEBUG_KMS("eDP was not running\n");
1034		drm_msleep(intel_dp->panel_power_up_delay, "915edpon");
1035	}
1036}
1037
1038static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1039{
1040	struct drm_device *dev = intel_dp->base.base.dev;
1041	struct drm_i915_private *dev_priv = dev->dev_private;
1042	u32 pp;
1043
1044	if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1045		pp = ironlake_get_pp_control(dev_priv);
1046		pp &= ~EDP_FORCE_VDD;
1047		I915_WRITE(PCH_PP_CONTROL, pp);
1048		POSTING_READ(PCH_PP_CONTROL);
1049
1050		/* Make sure sequencer is idle before allowing subsequent activity */
1051		DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1052			      I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
1053
1054		drm_msleep(intel_dp->panel_power_down_delay, "915vddo");
1055	}
1056}
1057
1058static void ironlake_panel_vdd_work(void *arg, int pending __unused)
1059{
1060	struct intel_dp *intel_dp = arg;
1061	struct drm_device *dev = intel_dp->base.base.dev;
1062
1063	sx_xlock(&dev->mode_config.mutex);
1064	ironlake_panel_vdd_off_sync(intel_dp);
1065	sx_xunlock(&dev->mode_config.mutex);
1066}
1067
1068static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1069{
1070	if (!is_edp(intel_dp))
1071		return;
1072
1073	DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1074	if (!intel_dp->want_panel_vdd)
1075		printf("eDP VDD not forced on\n");
1076
1077	intel_dp->want_panel_vdd = false;
1078
1079	if (sync) {
1080		ironlake_panel_vdd_off_sync(intel_dp);
1081	} else {
1082		/*
1083		 * Queue the timer to fire a long
1084		 * time from now (relative to the power down delay)
1085		 * to keep the panel power up across a sequence of operations
1086		 */
1087		struct drm_i915_private *dev_priv = intel_dp->base.base.dev->dev_private;
1088		taskqueue_enqueue_timeout(dev_priv->tq,
1089		    &intel_dp->panel_vdd_task,
1090		    msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1091	}
1092}
1093
1094static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1095{
1096	struct drm_device *dev = intel_dp->base.base.dev;
1097	struct drm_i915_private *dev_priv = dev->dev_private;
1098	u32 pp;
1099
1100	if (!is_edp(intel_dp))
1101		return;
1102
1103	DRM_DEBUG_KMS("Turn eDP power on\n");
1104
1105	if (ironlake_edp_have_panel_power(intel_dp)) {
1106		DRM_DEBUG_KMS("eDP power already on\n");
1107		return;
1108	}
1109
1110	ironlake_wait_panel_power_cycle(intel_dp);
1111
1112	pp = ironlake_get_pp_control(dev_priv);
1113	if (IS_GEN5(dev)) {
1114		/* ILK workaround: disable reset around power sequence */
1115		pp &= ~PANEL_POWER_RESET;
1116		I915_WRITE(PCH_PP_CONTROL, pp);
1117		POSTING_READ(PCH_PP_CONTROL);
1118	}
1119
1120	pp |= POWER_TARGET_ON;
1121	if (!IS_GEN5(dev))
1122		pp |= PANEL_POWER_RESET;
1123
1124	I915_WRITE(PCH_PP_CONTROL, pp);
1125	POSTING_READ(PCH_PP_CONTROL);
1126
1127	ironlake_wait_panel_on(intel_dp);
1128
1129	if (IS_GEN5(dev)) {
1130		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1131		I915_WRITE(PCH_PP_CONTROL, pp);
1132		POSTING_READ(PCH_PP_CONTROL);
1133	}
1134}
1135
1136static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1137{
1138	struct drm_device *dev = intel_dp->base.base.dev;
1139	struct drm_i915_private *dev_priv = dev->dev_private;
1140	u32 pp;
1141
1142	if (!is_edp(intel_dp))
1143		return;
1144
1145	DRM_DEBUG_KMS("Turn eDP power off\n");
1146
1147	if (intel_dp->want_panel_vdd)
1148		printf("Cannot turn power off while VDD is on\n");
1149	ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */
1150
1151	pp = ironlake_get_pp_control(dev_priv);
1152	pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1153	I915_WRITE(PCH_PP_CONTROL, pp);
1154	POSTING_READ(PCH_PP_CONTROL);
1155
1156	ironlake_wait_panel_off(intel_dp);
1157}
1158
1159static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1160{
1161	struct drm_device *dev = intel_dp->base.base.dev;
1162	struct drm_i915_private *dev_priv = dev->dev_private;
1163	u32 pp;
1164
1165	if (!is_edp(intel_dp))
1166		return;
1167
1168	DRM_DEBUG_KMS("\n");
1169	/*
1170	 * If we enable the backlight right away following a panel power
1171	 * on, we may see slight flicker as the panel syncs with the eDP
1172	 * link.  So delay a bit to make sure the image is solid before
1173	 * allowing it to appear.
1174	 */
1175	drm_msleep(intel_dp->backlight_on_delay, "915ebo");
1176	pp = ironlake_get_pp_control(dev_priv);
1177	pp |= EDP_BLC_ENABLE;
1178	I915_WRITE(PCH_PP_CONTROL, pp);
1179	POSTING_READ(PCH_PP_CONTROL);
1180}
1181
1182static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1183{
1184	struct drm_device *dev = intel_dp->base.base.dev;
1185	struct drm_i915_private *dev_priv = dev->dev_private;
1186	u32 pp;
1187
1188	if (!is_edp(intel_dp))
1189		return;
1190
1191	DRM_DEBUG_KMS("\n");
1192	pp = ironlake_get_pp_control(dev_priv);
1193	pp &= ~EDP_BLC_ENABLE;
1194	I915_WRITE(PCH_PP_CONTROL, pp);
1195	POSTING_READ(PCH_PP_CONTROL);
1196	drm_msleep(intel_dp->backlight_off_delay, "915bo1");
1197}
1198
1199static void ironlake_edp_pll_on(struct drm_encoder *encoder)
1200{
1201	struct drm_device *dev = encoder->dev;
1202	struct drm_i915_private *dev_priv = dev->dev_private;
1203	u32 dpa_ctl;
1204
1205	DRM_DEBUG_KMS("\n");
1206	dpa_ctl = I915_READ(DP_A);
1207	dpa_ctl |= DP_PLL_ENABLE;
1208	I915_WRITE(DP_A, dpa_ctl);
1209	POSTING_READ(DP_A);
1210	DELAY(200);
1211}
1212
1213static void ironlake_edp_pll_off(struct drm_encoder *encoder)
1214{
1215	struct drm_device *dev = encoder->dev;
1216	struct drm_i915_private *dev_priv = dev->dev_private;
1217	u32 dpa_ctl;
1218
1219	dpa_ctl = I915_READ(DP_A);
1220	dpa_ctl &= ~DP_PLL_ENABLE;
1221	I915_WRITE(DP_A, dpa_ctl);
1222	POSTING_READ(DP_A);
1223	DELAY(200);
1224}
1225
1226/* If the sink supports it, try to set the power state appropriately */
1227static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1228{
1229	int ret, i;
1230
1231	/* Should have a valid DPCD by this point */
1232	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1233		return;
1234
1235	if (mode != DRM_MODE_DPMS_ON) {
1236		ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1237						  DP_SET_POWER_D3);
1238		if (ret != 1)
1239			DRM_DEBUG("failed to write sink power state\n");
1240	} else {
1241		/*
1242		 * When turning on, we need to retry for 1ms to give the sink
1243		 * time to wake up.
1244		 */
1245		for (i = 0; i < 3; i++) {
1246			ret = intel_dp_aux_native_write_1(intel_dp,
1247							  DP_SET_POWER,
1248							  DP_SET_POWER_D0);
1249			if (ret == 1)
1250				break;
1251			drm_msleep(1, "915dps");
1252		}
1253	}
1254}
1255
1256static void intel_dp_prepare(struct drm_encoder *encoder)
1257{
1258	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1259
1260	ironlake_edp_backlight_off(intel_dp);
1261	ironlake_edp_panel_off(intel_dp);
1262
1263	/* Wake up the sink first */
1264	ironlake_edp_panel_vdd_on(intel_dp);
1265	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1266	intel_dp_link_down(intel_dp);
1267	ironlake_edp_panel_vdd_off(intel_dp, false);
1268
1269	/* Make sure the panel is off before trying to
1270	 * change the mode
1271	 */
1272}
1273
1274static void intel_dp_commit(struct drm_encoder *encoder)
1275{
1276	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1277	struct drm_device *dev = encoder->dev;
1278	struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
1279
1280	ironlake_edp_panel_vdd_on(intel_dp);
1281	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1282	intel_dp_start_link_train(intel_dp);
1283	ironlake_edp_panel_on(intel_dp);
1284	ironlake_edp_panel_vdd_off(intel_dp, true);
1285	intel_dp_complete_link_train(intel_dp);
1286	ironlake_edp_backlight_on(intel_dp);
1287
1288	intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
1289
1290	if (HAS_PCH_CPT(dev))
1291		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
1292}
1293
1294static void
1295intel_dp_dpms(struct drm_encoder *encoder, int mode)
1296{
1297	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1298	struct drm_device *dev = encoder->dev;
1299	struct drm_i915_private *dev_priv = dev->dev_private;
1300	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1301
1302	if (mode != DRM_MODE_DPMS_ON) {
1303		ironlake_edp_backlight_off(intel_dp);
1304		ironlake_edp_panel_off(intel_dp);
1305
1306		ironlake_edp_panel_vdd_on(intel_dp);
1307		intel_dp_sink_dpms(intel_dp, mode);
1308		intel_dp_link_down(intel_dp);
1309		ironlake_edp_panel_vdd_off(intel_dp, false);
1310
1311		if (is_cpu_edp(intel_dp))
1312			ironlake_edp_pll_off(encoder);
1313	} else {
1314		if (is_cpu_edp(intel_dp))
1315			ironlake_edp_pll_on(encoder);
1316
1317		ironlake_edp_panel_vdd_on(intel_dp);
1318		intel_dp_sink_dpms(intel_dp, mode);
1319		if (!(dp_reg & DP_PORT_EN)) {
1320			intel_dp_start_link_train(intel_dp);
1321			ironlake_edp_panel_on(intel_dp);
1322			ironlake_edp_panel_vdd_off(intel_dp, true);
1323			intel_dp_complete_link_train(intel_dp);
1324		} else
1325			ironlake_edp_panel_vdd_off(intel_dp, false);
1326		ironlake_edp_backlight_on(intel_dp);
1327	}
1328	intel_dp->dpms_mode = mode;
1329}
1330/*
1331 * Native read with retry for link status and receiver capability reads for
1332 * cases where the sink may still be asleep.
1333 */
1334static bool
1335intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1336			       uint8_t *recv, int recv_bytes)
1337{
1338	int ret, i;
1339
1340	/*
1341	 * Sinks are *supposed* to come up within 1ms from an off state,
1342	 * but we're also supposed to retry 3 times per the spec.
1343	 */
1344	for (i = 0; i < 3; i++) {
1345		ret = intel_dp_aux_native_read(intel_dp, address, recv,
1346					       recv_bytes);
1347		if (ret == recv_bytes)
1348			return true;
1349		drm_msleep(1, "915dpl");
1350	}
1351
1352	return false;
1353}
1354
1355/*
1356 * Fetch AUX CH registers 0x202 - 0x207 which contain
1357 * link status information
1358 */
1359static bool
1360intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1361{
1362	return intel_dp_aux_native_read_retry(intel_dp,
1363					      DP_LANE0_1_STATUS,
1364					      link_status,
1365					      DP_LINK_STATUS_SIZE);
1366}
1367
1368static uint8_t
1369intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1370		     int r)
1371{
1372	return link_status[r - DP_LANE0_1_STATUS];
1373}
1374
1375static uint8_t
1376intel_get_adjust_request_voltage(uint8_t adjust_request[2],
1377				 int lane)
1378{
1379	int	    s = ((lane & 1) ?
1380			 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
1381			 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
1382	uint8_t l = adjust_request[lane>>1];
1383
1384	return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
1385}
1386
1387static uint8_t
1388intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
1389				      int lane)
1390{
1391	int	    s = ((lane & 1) ?
1392			 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
1393			 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
1394	uint8_t l = adjust_request[lane>>1];
1395
1396	return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1397}
1398
1399
1400#if 0
1401static char	*voltage_names[] = {
1402	"0.4V", "0.6V", "0.8V", "1.2V"
1403};
1404static char	*pre_emph_names[] = {
1405	"0dB", "3.5dB", "6dB", "9.5dB"
1406};
1407static char	*link_train_names[] = {
1408	"pattern 1", "pattern 2", "idle", "off"
1409};
1410#endif
1411
1412/*
1413 * These are source-specific values; current Intel hardware supports
1414 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1415 */
1416
1417static uint8_t
1418intel_dp_voltage_max(struct intel_dp *intel_dp)
1419{
1420	struct drm_device *dev = intel_dp->base.base.dev;
1421
1422	if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
1423		return DP_TRAIN_VOLTAGE_SWING_800;
1424	else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1425		return DP_TRAIN_VOLTAGE_SWING_1200;
1426	else
1427		return DP_TRAIN_VOLTAGE_SWING_800;
1428}
1429
1430static uint8_t
1431intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1432{
1433	struct drm_device *dev = intel_dp->base.base.dev;
1434
1435	if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1436		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1437		case DP_TRAIN_VOLTAGE_SWING_400:
1438			return DP_TRAIN_PRE_EMPHASIS_6;
1439		case DP_TRAIN_VOLTAGE_SWING_600:
1440		case DP_TRAIN_VOLTAGE_SWING_800:
1441			return DP_TRAIN_PRE_EMPHASIS_3_5;
1442		default:
1443			return DP_TRAIN_PRE_EMPHASIS_0;
1444		}
1445	} else {
1446		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1447		case DP_TRAIN_VOLTAGE_SWING_400:
1448			return DP_TRAIN_PRE_EMPHASIS_6;
1449		case DP_TRAIN_VOLTAGE_SWING_600:
1450			return DP_TRAIN_PRE_EMPHASIS_6;
1451		case DP_TRAIN_VOLTAGE_SWING_800:
1452			return DP_TRAIN_PRE_EMPHASIS_3_5;
1453		case DP_TRAIN_VOLTAGE_SWING_1200:
1454		default:
1455			return DP_TRAIN_PRE_EMPHASIS_0;
1456		}
1457	}
1458}
1459
1460static void
1461intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1462{
1463	uint8_t v = 0;
1464	uint8_t p = 0;
1465	int lane;
1466	uint8_t	*adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
1467	uint8_t voltage_max;
1468	uint8_t preemph_max;
1469
1470	for (lane = 0; lane < intel_dp->lane_count; lane++) {
1471		uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
1472		uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
1473
1474		if (this_v > v)
1475			v = this_v;
1476		if (this_p > p)
1477			p = this_p;
1478	}
1479
1480	voltage_max = intel_dp_voltage_max(intel_dp);
1481	if (v >= voltage_max)
1482		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
1483
1484	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1485	if (p >= preemph_max)
1486		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1487
1488	for (lane = 0; lane < 4; lane++)
1489		intel_dp->train_set[lane] = v | p;
1490}
1491
1492static uint32_t
1493intel_dp_signal_levels(uint8_t train_set)
1494{
1495	uint32_t	signal_levels = 0;
1496
1497	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1498	case DP_TRAIN_VOLTAGE_SWING_400:
1499	default:
1500		signal_levels |= DP_VOLTAGE_0_4;
1501		break;
1502	case DP_TRAIN_VOLTAGE_SWING_600:
1503		signal_levels |= DP_VOLTAGE_0_6;
1504		break;
1505	case DP_TRAIN_VOLTAGE_SWING_800:
1506		signal_levels |= DP_VOLTAGE_0_8;
1507		break;
1508	case DP_TRAIN_VOLTAGE_SWING_1200:
1509		signal_levels |= DP_VOLTAGE_1_2;
1510		break;
1511	}
1512	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1513	case DP_TRAIN_PRE_EMPHASIS_0:
1514	default:
1515		signal_levels |= DP_PRE_EMPHASIS_0;
1516		break;
1517	case DP_TRAIN_PRE_EMPHASIS_3_5:
1518		signal_levels |= DP_PRE_EMPHASIS_3_5;
1519		break;
1520	case DP_TRAIN_PRE_EMPHASIS_6:
1521		signal_levels |= DP_PRE_EMPHASIS_6;
1522		break;
1523	case DP_TRAIN_PRE_EMPHASIS_9_5:
1524		signal_levels |= DP_PRE_EMPHASIS_9_5;
1525		break;
1526	}
1527	return signal_levels;
1528}
1529
1530/* Gen6's DP voltage swing and pre-emphasis control */
1531static uint32_t
1532intel_gen6_edp_signal_levels(uint8_t train_set)
1533{
1534	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1535					 DP_TRAIN_PRE_EMPHASIS_MASK);
1536	switch (signal_levels) {
1537	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1538	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1539		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1540	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1541		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
1542	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1543	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1544		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
1545	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1546	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1547		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
1548	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1549	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
1550		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
1551	default:
1552		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1553			      "0x%x\n", signal_levels);
1554		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1555	}
1556}
1557
1558/* Gen7's DP voltage swing and pre-emphasis control */
1559static uint32_t
1560intel_gen7_edp_signal_levels(uint8_t train_set)
1561{
1562	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1563					 DP_TRAIN_PRE_EMPHASIS_MASK);
1564	switch (signal_levels) {
1565	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1566		return EDP_LINK_TRAIN_400MV_0DB_IVB;
1567	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1568		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
1569	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1570		return EDP_LINK_TRAIN_400MV_6DB_IVB;
1571
1572	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1573		return EDP_LINK_TRAIN_600MV_0DB_IVB;
1574	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1575		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
1576
1577	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1578		return EDP_LINK_TRAIN_800MV_0DB_IVB;
1579	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1580		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
1581
1582	default:
1583		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1584			      "0x%x\n", signal_levels);
1585		return EDP_LINK_TRAIN_500MV_0DB_IVB;
1586	}
1587}
1588
1589static uint8_t
1590intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1591		      int lane)
1592{
1593	int s = (lane & 1) * 4;
1594	uint8_t l = link_status[lane>>1];
1595
1596	return (l >> s) & 0xf;
1597}
1598
1599/* Check for clock recovery is done on all channels */
1600static bool
1601intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
1602{
1603	int lane;
1604	uint8_t lane_status;
1605
1606	for (lane = 0; lane < lane_count; lane++) {
1607		lane_status = intel_get_lane_status(link_status, lane);
1608		if ((lane_status & DP_LANE_CR_DONE) == 0)
1609			return false;
1610	}
1611	return true;
1612}
1613
1614/* Check to see if channel eq is done on all channels */
1615#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
1616			 DP_LANE_CHANNEL_EQ_DONE|\
1617			 DP_LANE_SYMBOL_LOCKED)
1618static bool
1619intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1620{
1621	uint8_t lane_align;
1622	uint8_t lane_status;
1623	int lane;
1624
1625	lane_align = intel_dp_link_status(link_status,
1626					  DP_LANE_ALIGN_STATUS_UPDATED);
1627	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
1628		return false;
1629	for (lane = 0; lane < intel_dp->lane_count; lane++) {
1630		lane_status = intel_get_lane_status(link_status, lane);
1631		if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
1632			return false;
1633	}
1634	return true;
1635}
1636
1637static bool
1638intel_dp_set_link_train(struct intel_dp *intel_dp,
1639			uint32_t dp_reg_value,
1640			uint8_t dp_train_pat)
1641{
1642	struct drm_device *dev = intel_dp->base.base.dev;
1643	struct drm_i915_private *dev_priv = dev->dev_private;
1644	int ret;
1645
1646	I915_WRITE(intel_dp->output_reg, dp_reg_value);
1647	POSTING_READ(intel_dp->output_reg);
1648
1649	intel_dp_aux_native_write_1(intel_dp,
1650				    DP_TRAINING_PATTERN_SET,
1651				    dp_train_pat);
1652
1653	ret = intel_dp_aux_native_write(intel_dp,
1654					DP_TRAINING_LANE0_SET,
1655					intel_dp->train_set,
1656					intel_dp->lane_count);
1657	if (ret != intel_dp->lane_count)
1658		return false;
1659
1660	return true;
1661}
1662
1663/* Enable corresponding port and start training pattern 1 */
1664static void
1665intel_dp_start_link_train(struct intel_dp *intel_dp)
1666{
1667	struct drm_device *dev = intel_dp->base.base.dev;
1668	struct drm_i915_private *dev_priv = dev->dev_private;
1669	struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
1670	int i;
1671	uint8_t voltage;
1672	bool clock_recovery = false;
1673	int voltage_tries, loop_tries;
1674	u32 reg;
1675	uint32_t DP = intel_dp->DP;
1676
1677	/* Enable output, wait for it to become active */
1678	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
1679	POSTING_READ(intel_dp->output_reg);
1680	intel_wait_for_vblank(dev, intel_crtc->pipe);
1681
1682	/* Write the link configuration data */
1683	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1684				  intel_dp->link_configuration,
1685				  DP_LINK_CONFIGURATION_SIZE);
1686
1687	DP |= DP_PORT_EN;
1688
1689	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1690		DP &= ~DP_LINK_TRAIN_MASK_CPT;
1691	else
1692		DP &= ~DP_LINK_TRAIN_MASK;
1693	memset(intel_dp->train_set, 0, 4);
1694	voltage = 0xff;
1695	voltage_tries = 0;
1696	loop_tries = 0;
1697	clock_recovery = false;
1698	for (;;) {
1699		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1700		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
1701		uint32_t    signal_levels;
1702
1703
1704		if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1705			signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1706			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1707		} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1708			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1709			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1710		} else {
1711			signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1712			DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
1713			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1714		}
1715
1716		if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1717			reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
1718		else
1719			reg = DP | DP_LINK_TRAIN_PAT_1;
1720
1721		if (!intel_dp_set_link_train(intel_dp, reg,
1722					     DP_TRAINING_PATTERN_1))
1723			break;
1724		/* Set training pattern 1 */
1725
1726		DELAY(100);
1727		if (!intel_dp_get_link_status(intel_dp, link_status)) {
1728			DRM_ERROR("failed to get link status\n");
1729			break;
1730		}
1731
1732		if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1733			DRM_DEBUG_KMS("clock recovery OK\n");
1734			clock_recovery = true;
1735			break;
1736		}
1737
1738		/* Check to see if we've tried the max voltage */
1739		for (i = 0; i < intel_dp->lane_count; i++)
1740			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1741				break;
1742		if (i == intel_dp->lane_count) {
1743			++loop_tries;
1744			if (loop_tries == 5) {
1745				DRM_DEBUG_KMS("too many full retries, give up\n");
1746				break;
1747			}
1748			memset(intel_dp->train_set, 0, 4);
1749			voltage_tries = 0;
1750			continue;
1751		}
1752
1753		/* Check to see if we've tried the same voltage 5 times */
1754		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1755			++voltage_tries;
1756			if (voltage_tries == 5) {
1757				DRM_DEBUG_KMS("too many voltage retries, give up\n");
1758				break;
1759			}
1760		} else
1761			voltage_tries = 0;
1762		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1763
1764		/* Compute new intel_dp->train_set as requested by target */
1765		intel_get_adjust_train(intel_dp, link_status);
1766	}
1767
1768	intel_dp->DP = DP;
1769}
1770
1771static void
1772intel_dp_complete_link_train(struct intel_dp *intel_dp)
1773{
1774	struct drm_device *dev = intel_dp->base.base.dev;
1775	struct drm_i915_private *dev_priv = dev->dev_private;
1776	bool channel_eq = false;
1777	int tries, cr_tries;
1778	u32 reg;
1779	uint32_t DP = intel_dp->DP;
1780
1781	/* channel equalization */
1782	tries = 0;
1783	cr_tries = 0;
1784	channel_eq = false;
1785	for (;;) {
1786		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1787		uint32_t    signal_levels;
1788		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
1789
1790		if (cr_tries > 5) {
1791			DRM_ERROR("failed to train DP, aborting\n");
1792			intel_dp_link_down(intel_dp);
1793			break;
1794		}
1795
1796		if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1797			signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1798			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1799		} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1800			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1801			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1802		} else {
1803			signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1804			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1805		}
1806
1807		if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1808			reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
1809		else
1810			reg = DP | DP_LINK_TRAIN_PAT_2;
1811
1812		/* channel eq pattern */
1813		if (!intel_dp_set_link_train(intel_dp, reg,
1814					     DP_TRAINING_PATTERN_2))
1815			break;
1816
1817		DELAY(400);
1818		if (!intel_dp_get_link_status(intel_dp, link_status))
1819			break;
1820
1821		/* Make sure clock is still ok */
1822		if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1823			intel_dp_start_link_train(intel_dp);
1824			cr_tries++;
1825			continue;
1826		}
1827
1828		if (intel_channel_eq_ok(intel_dp, link_status)) {
1829			channel_eq = true;
1830			break;
1831		}
1832
1833		/* Try 5 times, then try clock recovery if that fails */
1834		if (tries > 5) {
1835			intel_dp_link_down(intel_dp);
1836			intel_dp_start_link_train(intel_dp);
1837			tries = 0;
1838			cr_tries++;
1839			continue;
1840		}
1841
1842		/* Compute new intel_dp->train_set as requested by target */
1843		intel_get_adjust_train(intel_dp, link_status);
1844		++tries;
1845	}
1846
1847	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1848		reg = DP | DP_LINK_TRAIN_OFF_CPT;
1849	else
1850		reg = DP | DP_LINK_TRAIN_OFF;
1851
1852	I915_WRITE(intel_dp->output_reg, reg);
1853	POSTING_READ(intel_dp->output_reg);
1854	intel_dp_aux_native_write_1(intel_dp,
1855				    DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
1856}
1857
1858static void
1859intel_dp_link_down(struct intel_dp *intel_dp)
1860{
1861	struct drm_device *dev = intel_dp->base.base.dev;
1862	struct drm_i915_private *dev_priv = dev->dev_private;
1863	uint32_t DP = intel_dp->DP;
1864
1865	if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
1866		return;
1867
1868	DRM_DEBUG_KMS("\n");
1869
1870	if (is_edp(intel_dp)) {
1871		DP &= ~DP_PLL_ENABLE;
1872		I915_WRITE(intel_dp->output_reg, DP);
1873		POSTING_READ(intel_dp->output_reg);
1874		DELAY(100);
1875	}
1876
1877	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1878		DP &= ~DP_LINK_TRAIN_MASK_CPT;
1879		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
1880	} else {
1881		DP &= ~DP_LINK_TRAIN_MASK;
1882		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
1883	}
1884	POSTING_READ(intel_dp->output_reg);
1885
1886	drm_msleep(17, "915dlo");
1887
1888	if (is_edp(intel_dp)) {
1889		if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1890			DP |= DP_LINK_TRAIN_OFF_CPT;
1891		else
1892			DP |= DP_LINK_TRAIN_OFF;
1893	}
1894
1895
1896	if (!HAS_PCH_CPT(dev) &&
1897	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
1898		struct drm_crtc *crtc = intel_dp->base.base.crtc;
1899
1900		/* Hardware workaround: leaving our transcoder select
1901		 * set to transcoder B while it's off will prevent the
1902		 * corresponding HDMI output on transcoder A.
1903		 *
1904		 * Combine this with another hardware workaround:
1905		 * transcoder select bit can only be cleared while the
1906		 * port is enabled.
1907		 */
1908		DP &= ~DP_PIPEB_SELECT;
1909		I915_WRITE(intel_dp->output_reg, DP);
1910
1911		/* Changes to enable or select take place the vblank
1912		 * after being written.
1913		 */
1914		if (crtc == NULL) {
1915			/* We can arrive here never having been attached
1916			 * to a CRTC, for instance, due to inheriting
1917			 * random state from the BIOS.
1918			 *
1919			 * If the pipe is not running, play safe and
1920			 * wait for the clocks to stabilise before
1921			 * continuing.
1922			 */
1923			POSTING_READ(intel_dp->output_reg);
1924			drm_msleep(50, "915dla");
1925		} else
1926			intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
1927	}
1928
1929	DP &= ~DP_AUDIO_OUTPUT_ENABLE;
1930	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
1931	POSTING_READ(intel_dp->output_reg);
1932	drm_msleep(intel_dp->panel_power_down_delay, "915ldo");
1933}
1934
1935static bool
1936intel_dp_get_dpcd(struct intel_dp *intel_dp)
1937{
1938	if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
1939					   sizeof(intel_dp->dpcd)) &&
1940	    (intel_dp->dpcd[DP_DPCD_REV] != 0)) {
1941		return true;
1942	}
1943
1944	return false;
1945}
1946
1947static void
1948intel_dp_probe_oui(struct intel_dp *intel_dp)
1949{
1950	u8 buf[3];
1951
1952	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
1953		return;
1954
1955	if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
1956		DRM_DEBUG_KMS("Sink OUI: %02x%02x%02x\n",
1957			      buf[0], buf[1], buf[2]);
1958
1959	if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
1960		DRM_DEBUG_KMS("Branch OUI: %02x%02x%02x\n",
1961			      buf[0], buf[1], buf[2]);
1962}
1963
1964static bool
1965intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
1966{
1967	int ret;
1968
1969	ret = intel_dp_aux_native_read_retry(intel_dp,
1970					     DP_DEVICE_SERVICE_IRQ_VECTOR,
1971					     sink_irq_vector, 1);
1972	if (!ret)
1973		return false;
1974
1975	return true;
1976}
1977
1978static void
1979intel_dp_handle_test_request(struct intel_dp *intel_dp)
1980{
1981	/* NAK by default */
1982	intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
1983}
1984
1985/*
1986 * According to DP spec
1987 * 5.1.2:
1988 *  1. Read DPCD
1989 *  2. Configure link according to Receiver Capabilities
1990 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
1991 *  4. Check link status on receipt of hot-plug interrupt
1992 */
1993
1994static void
1995intel_dp_check_link_status(struct intel_dp *intel_dp)
1996{
1997	u8 sink_irq_vector;
1998	u8 link_status[DP_LINK_STATUS_SIZE];
1999
2000	if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
2001		return;
2002
2003	if (!intel_dp->base.base.crtc)
2004		return;
2005
2006	/* Try to read receiver status if the link appears to be up */
2007	if (!intel_dp_get_link_status(intel_dp, link_status)) {
2008		intel_dp_link_down(intel_dp);
2009		return;
2010	}
2011
2012	/* Now read the DPCD to see if it's actually running */
2013	if (!intel_dp_get_dpcd(intel_dp)) {
2014		intel_dp_link_down(intel_dp);
2015		return;
2016	}
2017
2018	/* Try to read the source of the interrupt */
2019	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2020	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2021		/* Clear interrupt source */
2022		intel_dp_aux_native_write_1(intel_dp,
2023					    DP_DEVICE_SERVICE_IRQ_VECTOR,
2024					    sink_irq_vector);
2025
2026		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2027			intel_dp_handle_test_request(intel_dp);
2028		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2029			DRM_DEBUG_KMS("CP or sink specific irq unhandled\n");
2030	}
2031
2032	if (!intel_channel_eq_ok(intel_dp, link_status)) {
2033		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
2034			      drm_get_encoder_name(&intel_dp->base.base));
2035 		intel_dp_start_link_train(intel_dp);
2036		intel_dp_complete_link_train(intel_dp);
2037	}
2038}
2039
2040static enum drm_connector_status
2041intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2042{
2043	if (intel_dp_get_dpcd(intel_dp))
2044		return connector_status_connected;
2045	return connector_status_disconnected;
2046}
2047
2048static enum drm_connector_status
2049ironlake_dp_detect(struct intel_dp *intel_dp)
2050{
2051	enum drm_connector_status status;
2052
2053	/* Can't disconnect eDP, but you can close the lid... */
2054	if (is_edp(intel_dp)) {
2055		status = intel_panel_detect(intel_dp->base.base.dev);
2056		if (status == connector_status_unknown)
2057			status = connector_status_connected;
2058		return status;
2059	}
2060
2061	return intel_dp_detect_dpcd(intel_dp);
2062}
2063
2064static enum drm_connector_status
2065g4x_dp_detect(struct intel_dp *intel_dp)
2066{
2067	struct drm_device *dev = intel_dp->base.base.dev;
2068	struct drm_i915_private *dev_priv = dev->dev_private;
2069	uint32_t temp, bit;
2070
2071	switch (intel_dp->output_reg) {
2072	case DP_B:
2073		bit = DPB_HOTPLUG_INT_STATUS;
2074		break;
2075	case DP_C:
2076		bit = DPC_HOTPLUG_INT_STATUS;
2077		break;
2078	case DP_D:
2079		bit = DPD_HOTPLUG_INT_STATUS;
2080		break;
2081	default:
2082		return connector_status_unknown;
2083	}
2084
2085	temp = I915_READ(PORT_HOTPLUG_STAT);
2086
2087	if ((temp & bit) == 0)
2088		return connector_status_disconnected;
2089
2090	return intel_dp_detect_dpcd(intel_dp);
2091}
2092
2093static struct edid *
2094intel_dp_get_edid(struct drm_connector *connector, device_t adapter)
2095{
2096	struct intel_dp *intel_dp = intel_attached_dp(connector);
2097	struct edid	*edid;
2098
2099	ironlake_edp_panel_vdd_on(intel_dp);
2100	edid = drm_get_edid(connector, adapter);
2101	ironlake_edp_panel_vdd_off(intel_dp, false);
2102	return edid;
2103}
2104
2105static int
2106intel_dp_get_edid_modes(struct drm_connector *connector, device_t adapter)
2107{
2108	struct intel_dp *intel_dp = intel_attached_dp(connector);
2109	int	ret;
2110
2111	ironlake_edp_panel_vdd_on(intel_dp);
2112	ret = intel_ddc_get_modes(connector, adapter);
2113	ironlake_edp_panel_vdd_off(intel_dp, false);
2114	return ret;
2115}
2116
2117
2118/**
2119 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
2120 *
2121 * \return true if DP port is connected.
2122 * \return false if DP port is disconnected.
2123 */
2124static enum drm_connector_status
2125intel_dp_detect(struct drm_connector *connector, bool force)
2126{
2127	struct intel_dp *intel_dp = intel_attached_dp(connector);
2128	struct drm_device *dev = intel_dp->base.base.dev;
2129	enum drm_connector_status status;
2130	struct edid *edid = NULL;
2131
2132	intel_dp->has_audio = false;
2133
2134	if (HAS_PCH_SPLIT(dev))
2135		status = ironlake_dp_detect(intel_dp);
2136	else
2137		status = g4x_dp_detect(intel_dp);
2138	if (status != connector_status_connected)
2139		return status;
2140
2141	intel_dp_probe_oui(intel_dp);
2142
2143	if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
2144		intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
2145	} else {
2146		edid = intel_dp_get_edid(connector, intel_dp->adapter);
2147		if (edid) {
2148			intel_dp->has_audio = drm_detect_monitor_audio(edid);
2149			free(edid, DRM_MEM_KMS);
2150		}
2151	}
2152
2153	return connector_status_connected;
2154}
2155
2156static int intel_dp_get_modes(struct drm_connector *connector)
2157{
2158	struct intel_dp *intel_dp = intel_attached_dp(connector);
2159	struct drm_device *dev = intel_dp->base.base.dev;
2160	struct drm_i915_private *dev_priv = dev->dev_private;
2161	int ret;
2162
2163	/* We should parse the EDID data and find out if it has an audio sink
2164	 */
2165
2166	ret = intel_dp_get_edid_modes(connector, intel_dp->adapter);
2167	if (ret) {
2168		if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
2169			struct drm_display_mode *newmode;
2170			list_for_each_entry(newmode, &connector->probed_modes,
2171					    head) {
2172				if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
2173					intel_dp->panel_fixed_mode =
2174						drm_mode_duplicate(dev, newmode);
2175					break;
2176				}
2177			}
2178		}
2179		return ret;
2180	}
2181
2182	/* if eDP has no EDID, try to use fixed panel mode from VBT */
2183	if (is_edp(intel_dp)) {
2184		/* initialize panel mode from VBT if available for eDP */
2185		if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) {
2186			intel_dp->panel_fixed_mode =
2187				drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
2188			if (intel_dp->panel_fixed_mode) {
2189				intel_dp->panel_fixed_mode->type |=
2190					DRM_MODE_TYPE_PREFERRED;
2191			}
2192		}
2193		if (intel_dp->panel_fixed_mode) {
2194			struct drm_display_mode *mode;
2195			mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
2196			drm_mode_probed_add(connector, mode);
2197			return 1;
2198		}
2199	}
2200	return 0;
2201}
2202
2203static bool
2204intel_dp_detect_audio(struct drm_connector *connector)
2205{
2206	struct intel_dp *intel_dp = intel_attached_dp(connector);
2207	struct edid *edid;
2208	bool has_audio = false;
2209
2210	edid = intel_dp_get_edid(connector, intel_dp->adapter);
2211	if (edid) {
2212		has_audio = drm_detect_monitor_audio(edid);
2213
2214		free(edid, DRM_MEM_KMS);
2215	}
2216
2217	return has_audio;
2218}
2219
2220static int
2221intel_dp_set_property(struct drm_connector *connector,
2222		      struct drm_property *property,
2223		      uint64_t val)
2224{
2225	struct drm_i915_private *dev_priv = connector->dev->dev_private;
2226	struct intel_dp *intel_dp = intel_attached_dp(connector);
2227	int ret;
2228
2229	ret = drm_object_property_set_value(&connector->base, property, val);
2230	if (ret)
2231		return ret;
2232
2233	if (property == dev_priv->force_audio_property) {
2234		int i = val;
2235		bool has_audio;
2236
2237		if (i == intel_dp->force_audio)
2238			return 0;
2239
2240		intel_dp->force_audio = i;
2241
2242		if (i == HDMI_AUDIO_AUTO)
2243			has_audio = intel_dp_detect_audio(connector);
2244		else
2245			has_audio = (i == HDMI_AUDIO_ON);
2246
2247		if (has_audio == intel_dp->has_audio)
2248			return 0;
2249
2250		intel_dp->has_audio = has_audio;
2251		goto done;
2252	}
2253
2254	if (property == dev_priv->broadcast_rgb_property) {
2255		if (val == !!intel_dp->color_range)
2256			return 0;
2257
2258		intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
2259		goto done;
2260	}
2261
2262	return -EINVAL;
2263
2264done:
2265	if (intel_dp->base.base.crtc) {
2266		struct drm_crtc *crtc = intel_dp->base.base.crtc;
2267		drm_crtc_helper_set_mode(crtc, &crtc->mode,
2268					 crtc->x, crtc->y,
2269					 crtc->fb);
2270	}
2271
2272	return 0;
2273}
2274
2275static void
2276intel_dp_destroy(struct drm_connector *connector)
2277{
2278	struct drm_device *dev = connector->dev;
2279
2280	if (intel_dpd_is_edp(dev))
2281		intel_panel_destroy_backlight(dev);
2282
2283#if 0
2284	drm_sysfs_connector_remove(connector);
2285#endif
2286	drm_connector_cleanup(connector);
2287	free(connector, DRM_MEM_KMS);
2288}
2289
2290static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2291{
2292	struct drm_device *dev;
2293	struct intel_dp *intel_dp;
2294
2295	intel_dp = enc_to_intel_dp(encoder);
2296	dev = encoder->dev;
2297
2298	if (intel_dp->dp_iic_bus != NULL) {
2299		if (intel_dp->adapter != NULL) {
2300			device_delete_child(intel_dp->dp_iic_bus,
2301			    intel_dp->adapter);
2302		}
2303		device_delete_child(dev->dev, intel_dp->dp_iic_bus);
2304	}
2305	drm_encoder_cleanup(encoder);
2306	if (is_edp(intel_dp)) {
2307		struct drm_i915_private *dev_priv = intel_dp->base.base.dev->dev_private;
2308
2309		taskqueue_cancel_timeout(dev_priv->tq,
2310		    &intel_dp->panel_vdd_task, NULL);
2311		taskqueue_drain_timeout(dev_priv->tq,
2312		    &intel_dp->panel_vdd_task);
2313		ironlake_panel_vdd_off_sync(intel_dp);
2314	}
2315	free(intel_dp, DRM_MEM_KMS);
2316}
2317
2318static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
2319	.dpms = intel_dp_dpms,
2320	.mode_fixup = intel_dp_mode_fixup,
2321	.prepare = intel_dp_prepare,
2322	.mode_set = intel_dp_mode_set,
2323	.commit = intel_dp_commit,
2324};
2325
2326static const struct drm_connector_funcs intel_dp_connector_funcs = {
2327	.dpms = drm_helper_connector_dpms,
2328	.detect = intel_dp_detect,
2329	.fill_modes = drm_helper_probe_single_connector_modes,
2330	.set_property = intel_dp_set_property,
2331	.destroy = intel_dp_destroy,
2332};
2333
2334static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
2335	.get_modes = intel_dp_get_modes,
2336	.mode_valid = intel_dp_mode_valid,
2337	.best_encoder = intel_best_encoder,
2338};
2339
2340static const struct drm_encoder_funcs intel_dp_enc_funcs = {
2341	.destroy = intel_dp_encoder_destroy,
2342};
2343
2344static void
2345intel_dp_hot_plug(struct intel_encoder *intel_encoder)
2346{
2347	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
2348
2349	intel_dp_check_link_status(intel_dp);
2350}
2351
2352/* Return which DP Port should be selected for Transcoder DP control */
2353int
2354intel_trans_dp_port_sel(struct drm_crtc *crtc)
2355{
2356	struct drm_device *dev = crtc->dev;
2357	struct drm_mode_config *mode_config = &dev->mode_config;
2358	struct drm_encoder *encoder;
2359
2360	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
2361		struct intel_dp *intel_dp;
2362
2363		if (encoder->crtc != crtc)
2364			continue;
2365
2366		intel_dp = enc_to_intel_dp(encoder);
2367		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
2368		    intel_dp->base.type == INTEL_OUTPUT_EDP)
2369			return intel_dp->output_reg;
2370	}
2371
2372	return -1;
2373}
2374
2375/* check the VBT to see whether the eDP is on DP-D port */
2376bool intel_dpd_is_edp(struct drm_device *dev)
2377{
2378	struct drm_i915_private *dev_priv = dev->dev_private;
2379	struct child_device_config *p_child;
2380	int i;
2381
2382	if (!dev_priv->child_dev_num)
2383		return false;
2384
2385	for (i = 0; i < dev_priv->child_dev_num; i++) {
2386		p_child = dev_priv->child_dev + i;
2387
2388		if (p_child->dvo_port == PORT_IDPD &&
2389		    p_child->device_type == DEVICE_TYPE_eDP)
2390			return true;
2391	}
2392	return false;
2393}
2394
2395static void
2396intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
2397{
2398	intel_attach_force_audio_property(connector);
2399	intel_attach_broadcast_rgb_property(connector);
2400}
2401
2402void
2403intel_dp_init(struct drm_device *dev, int output_reg)
2404{
2405	struct drm_i915_private *dev_priv = dev->dev_private;
2406	struct drm_connector *connector;
2407	struct intel_dp *intel_dp;
2408	struct intel_encoder *intel_encoder;
2409	struct intel_connector *intel_connector;
2410	const char *name = NULL;
2411	int type;
2412
2413	intel_dp = malloc(sizeof(struct intel_dp), DRM_MEM_KMS,
2414	    M_WAITOK | M_ZERO);
2415
2416	intel_dp->output_reg = output_reg;
2417	intel_dp->dpms_mode = -1;
2418
2419	intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS,
2420	    M_WAITOK | M_ZERO);
2421	intel_encoder = &intel_dp->base;
2422
2423	if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
2424		if (intel_dpd_is_edp(dev))
2425			intel_dp->is_pch_edp = true;
2426
2427	if (output_reg == DP_A || is_pch_edp(intel_dp)) {
2428		type = DRM_MODE_CONNECTOR_eDP;
2429		intel_encoder->type = INTEL_OUTPUT_EDP;
2430	} else {
2431		type = DRM_MODE_CONNECTOR_DisplayPort;
2432		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2433	}
2434
2435	connector = &intel_connector->base;
2436	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
2437	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
2438
2439	connector->polled = DRM_CONNECTOR_POLL_HPD;
2440
2441	if (output_reg == DP_B || output_reg == PCH_DP_B)
2442		intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
2443	else if (output_reg == DP_C || output_reg == PCH_DP_C)
2444		intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
2445	else if (output_reg == DP_D || output_reg == PCH_DP_D)
2446		intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
2447
2448	if (is_edp(intel_dp)) {
2449		intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
2450		TIMEOUT_TASK_INIT(dev_priv->tq, &intel_dp->panel_vdd_task, 0,
2451		    ironlake_panel_vdd_work, intel_dp);
2452	}
2453
2454	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
2455
2456	connector->interlace_allowed = true;
2457	connector->doublescan_allowed = 0;
2458
2459	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
2460			 DRM_MODE_ENCODER_TMDS);
2461	drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
2462
2463	intel_connector_attach_encoder(intel_connector, intel_encoder);
2464#if 0
2465	drm_sysfs_connector_add(connector);
2466#endif
2467
2468	/* Set up the DDC bus. */
2469	switch (output_reg) {
2470		case DP_A:
2471			name = "DPDDC-A";
2472			break;
2473		case DP_B:
2474		case PCH_DP_B:
2475			dev_priv->hotplug_supported_mask |=
2476				HDMIB_HOTPLUG_INT_STATUS;
2477			name = "DPDDC-B";
2478			break;
2479		case DP_C:
2480		case PCH_DP_C:
2481			dev_priv->hotplug_supported_mask |=
2482				HDMIC_HOTPLUG_INT_STATUS;
2483			name = "DPDDC-C";
2484			break;
2485		case DP_D:
2486		case PCH_DP_D:
2487			dev_priv->hotplug_supported_mask |=
2488				HDMID_HOTPLUG_INT_STATUS;
2489			name = "DPDDC-D";
2490			break;
2491	}
2492
2493	/* Cache some DPCD data in the eDP case */
2494	if (is_edp(intel_dp)) {
2495		bool ret;
2496		struct edp_power_seq	cur, vbt;
2497		u32 pp_on, pp_off, pp_div;
2498
2499		pp_on = I915_READ(PCH_PP_ON_DELAYS);
2500		pp_off = I915_READ(PCH_PP_OFF_DELAYS);
2501		pp_div = I915_READ(PCH_PP_DIVISOR);
2502
2503		if (!pp_on || !pp_off || !pp_div) {
2504			DRM_INFO("bad panel power sequencing delays, disabling panel\n");
2505			intel_dp_encoder_destroy(&intel_dp->base.base);
2506			intel_dp_destroy(&intel_connector->base);
2507			return;
2508		}
2509
2510		/* Pull timing values out of registers */
2511		cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2512			PANEL_POWER_UP_DELAY_SHIFT;
2513
2514		cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2515			PANEL_LIGHT_ON_DELAY_SHIFT;
2516
2517		cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2518			PANEL_LIGHT_OFF_DELAY_SHIFT;
2519
2520		cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
2521			PANEL_POWER_DOWN_DELAY_SHIFT;
2522
2523		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
2524			       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
2525
2526		DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2527			      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2528
2529		vbt = dev_priv->edp.pps;
2530
2531		DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2532			      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
2533
2534#define get_delay(field)	((max(cur.field, vbt.field) + 9) / 10)
2535
2536		intel_dp->panel_power_up_delay = get_delay(t1_t3);
2537		intel_dp->backlight_on_delay = get_delay(t8);
2538		intel_dp->backlight_off_delay = get_delay(t9);
2539		intel_dp->panel_power_down_delay = get_delay(t10);
2540		intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2541
2542		DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2543			      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2544			      intel_dp->panel_power_cycle_delay);
2545
2546		DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2547			      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2548
2549		ironlake_edp_panel_vdd_on(intel_dp);
2550		ret = intel_dp_get_dpcd(intel_dp);
2551		ironlake_edp_panel_vdd_off(intel_dp, false);
2552
2553		if (ret) {
2554			if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2555				dev_priv->no_aux_handshake =
2556					intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
2557					DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
2558		} else {
2559			/* if this fails, presume the device is a ghost */
2560			DRM_INFO("failed to retrieve link info, disabling eDP\n");
2561			intel_dp_encoder_destroy(&intel_dp->base.base);
2562			intel_dp_destroy(&intel_connector->base);
2563			return;
2564		}
2565	}
2566
2567	intel_dp_i2c_init(intel_dp, intel_connector, name);
2568
2569	intel_encoder->hot_plug = intel_dp_hot_plug;
2570
2571	if (is_edp(intel_dp)) {
2572		dev_priv->int_edp_connector = connector;
2573		intel_panel_setup_backlight(dev);
2574	}
2575
2576	intel_dp_add_properties(intel_dp, connector);
2577
2578	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
2579	 * 0xd.  Failure to do so will result in spurious interrupts being
2580	 * generated on the port when a cable is not attached.
2581	 */
2582	if (IS_G4X(dev) && !IS_GM45(dev)) {
2583		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
2584		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
2585	}
2586}
2587