i915_drv.c revision 280369
1/* i915_drv.c -- Intel i915 driver -*- linux-c -*-
2 * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
3 */
4/*-
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 *    Gareth Hughes <gareth@valinux.com>
29 *
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_drv.c 280369 2015-03-23 13:38:33Z kib $");
34
35#include <dev/drm2/drmP.h>
36#include <dev/drm2/drm.h>
37#include <dev/drm2/drm_mm.h>
38#include <dev/drm2/i915/i915_drm.h>
39#include <dev/drm2/i915/i915_drv.h>
40#include <dev/drm2/drm_pciids.h>
41#include <dev/drm2/i915/intel_drv.h>
42
43#include "fb_if.h"
44
45/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
46static drm_pci_id_list_t i915_pciidlist[] = {
47	i915_PCI_IDS
48};
49
50static const struct intel_device_info intel_i830_info = {
51	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
52	.has_overlay = 1, .overlay_needs_physical = 1,
53};
54
55static const struct intel_device_info intel_845g_info = {
56	.gen = 2,
57	.has_overlay = 1, .overlay_needs_physical = 1,
58};
59
60static const struct intel_device_info intel_i85x_info = {
61	.gen = 2, .is_i85x = 1, .is_mobile = 1,
62	.cursor_needs_physical = 1,
63	.has_overlay = 1, .overlay_needs_physical = 1,
64};
65
66static const struct intel_device_info intel_i865g_info = {
67	.gen = 2,
68	.has_overlay = 1, .overlay_needs_physical = 1,
69};
70
71static const struct intel_device_info intel_i915g_info = {
72	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
73	.has_overlay = 1, .overlay_needs_physical = 1,
74};
75static const struct intel_device_info intel_i915gm_info = {
76	.gen = 3, .is_mobile = 1,
77	.cursor_needs_physical = 1,
78	.has_overlay = 1, .overlay_needs_physical = 1,
79	.supports_tv = 1,
80};
81static const struct intel_device_info intel_i945g_info = {
82	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
83	.has_overlay = 1, .overlay_needs_physical = 1,
84};
85static const struct intel_device_info intel_i945gm_info = {
86	.gen = 3, .is_i945gm = 1, .is_mobile = 1,
87	.has_hotplug = 1, .cursor_needs_physical = 1,
88	.has_overlay = 1, .overlay_needs_physical = 1,
89	.supports_tv = 1,
90};
91
92static const struct intel_device_info intel_i965g_info = {
93	.gen = 4, .is_broadwater = 1,
94	.has_hotplug = 1,
95	.has_overlay = 1,
96};
97
98static const struct intel_device_info intel_i965gm_info = {
99	.gen = 4, .is_crestline = 1,
100	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
101	.has_overlay = 1,
102	.supports_tv = 1,
103};
104
105static const struct intel_device_info intel_g33_info = {
106	.gen = 3, .is_g33 = 1,
107	.need_gfx_hws = 1, .has_hotplug = 1,
108	.has_overlay = 1,
109};
110
111static const struct intel_device_info intel_g45_info = {
112	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
113	.has_pipe_cxsr = 1, .has_hotplug = 1,
114	.has_bsd_ring = 1,
115};
116
117static const struct intel_device_info intel_gm45_info = {
118	.gen = 4, .is_g4x = 1,
119	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
120	.has_pipe_cxsr = 1, .has_hotplug = 1,
121	.supports_tv = 1,
122	.has_bsd_ring = 1,
123};
124
125static const struct intel_device_info intel_pineview_info = {
126	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
127	.need_gfx_hws = 1, .has_hotplug = 1,
128	.has_overlay = 1,
129};
130
131static const struct intel_device_info intel_ironlake_d_info = {
132	.gen = 5,
133	.need_gfx_hws = 1, .has_hotplug = 1,
134	.has_bsd_ring = 1,
135	.has_pch_split = 1,
136};
137
138static const struct intel_device_info intel_ironlake_m_info = {
139	.gen = 5, .is_mobile = 1,
140	.need_gfx_hws = 1, .has_hotplug = 1,
141	.has_fbc = 0, /* disabled due to buggy hardware */
142	.has_bsd_ring = 1,
143	.has_pch_split = 1,
144};
145
146static const struct intel_device_info intel_sandybridge_d_info = {
147	.gen = 6,
148	.need_gfx_hws = 1, .has_hotplug = 1,
149	.has_bsd_ring = 1,
150	.has_blt_ring = 1,
151	.has_llc = 1,
152	.has_pch_split = 1,
153};
154
155static const struct intel_device_info intel_sandybridge_m_info = {
156	.gen = 6, .is_mobile = 1,
157	.need_gfx_hws = 1, .has_hotplug = 1,
158	.has_fbc = 1,
159	.has_bsd_ring = 1,
160	.has_blt_ring = 1,
161	.has_llc = 1,
162	.has_pch_split = 1,
163};
164
165static const struct intel_device_info intel_ivybridge_d_info = {
166	.is_ivybridge = 1, .gen = 7,
167	.need_gfx_hws = 1, .has_hotplug = 1,
168	.has_bsd_ring = 1,
169	.has_blt_ring = 1,
170	.has_llc = 1,
171	.has_pch_split = 1,
172};
173
174static const struct intel_device_info intel_ivybridge_m_info = {
175	.is_ivybridge = 1, .gen = 7, .is_mobile = 1,
176	.need_gfx_hws = 1, .has_hotplug = 1,
177	.has_fbc = 0,	/* FBC is not enabled on Ivybridge mobile yet */
178	.has_bsd_ring = 1,
179	.has_blt_ring = 1,
180	.has_llc = 1,
181	.has_pch_split = 1,
182};
183
184#if 0
185static const struct intel_device_info intel_valleyview_m_info = {
186	.gen = 7, .is_mobile = 1,
187	.need_gfx_hws = 1, .has_hotplug = 1,
188	.has_fbc = 0,
189	.has_bsd_ring = 1,
190	.has_blt_ring = 1,
191	.is_valleyview = 1,
192};
193
194static const struct intel_device_info intel_valleyview_d_info = {
195	.gen = 7,
196	.need_gfx_hws = 1, .has_hotplug = 1,
197	.has_fbc = 0,
198	.has_bsd_ring = 1,
199	.has_blt_ring = 1,
200	.is_valleyview = 1,
201};
202#endif
203
204static const struct intel_device_info intel_haswell_d_info = {
205	.is_haswell = 1, .gen = 7,
206	.need_gfx_hws = 1, .has_hotplug = 1,
207	.has_bsd_ring = 1,
208	.has_blt_ring = 1,
209	.has_llc = 1,
210	.has_pch_split = 1,
211	.not_supported = 1,
212};
213
214static const struct intel_device_info intel_haswell_m_info = {
215	.is_haswell = 1, .gen = 7, .is_mobile = 1,
216	.need_gfx_hws = 1, .has_hotplug = 1,
217	.has_bsd_ring = 1,
218	.has_blt_ring = 1,
219	.has_llc = 1,
220	.has_pch_split = 1,
221	.not_supported = 1,
222};
223
224#define INTEL_VGA_DEVICE(id, info_) {		\
225	.device = id,				\
226	.info = info_,				\
227}
228
229static const struct intel_gfx_device_id {
230	int device;
231	const struct intel_device_info *info;
232} pciidlist[] = {		/* aka */
233	INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
234	INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
235	INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
236	INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
237	INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
238	INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
239	INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
240	INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
241	INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
242	INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
243	INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
244	INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
245	INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
246	INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
247	INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
248	INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
249	INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
250	INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
251	INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
252	INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
253	INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
254	INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
255	INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
256	INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
257	INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
258	INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
259	INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),
260	INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
261	INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
262	INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
263	INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
264	INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
265	INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
266	INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
267	INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
268	INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
269	INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
270	INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
271	INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
272	INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
273	INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
274	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
275	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
276	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
277	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
278	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
279	INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
280	INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
281	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
282	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
283	INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
284	{0, 0}
285};
286
287static int i915_enable_unsupported;
288
289static int i915_drm_freeze(struct drm_device *dev)
290{
291	struct drm_i915_private *dev_priv;
292	int error;
293
294	dev_priv = dev->dev_private;
295	drm_kms_helper_poll_disable(dev);
296
297#if 0
298	pci_save_state(dev->pdev);
299#endif
300
301	DRM_LOCK(dev);
302	/* If KMS is active, we do the leavevt stuff here */
303	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
304		error = -i915_gem_idle(dev);
305		if (error) {
306			DRM_UNLOCK(dev);
307			device_printf(dev->device,
308			    "GEM idle failed, resume might fail\n");
309			return (error);
310		}
311		drm_irq_uninstall(dev);
312	}
313
314	i915_save_state(dev);
315
316	intel_opregion_fini(dev);
317
318	/* Modeset on resume, not lid events */
319	dev_priv->modeset_on_lid = 0;
320	DRM_UNLOCK(dev);
321
322	return 0;
323}
324
325static int
326i915_suspend(device_t kdev)
327{
328	struct drm_device *dev;
329	int error;
330
331	dev = device_get_softc(kdev);
332	if (dev == NULL || dev->dev_private == NULL) {
333		DRM_ERROR("DRM not initialized, aborting suspend.\n");
334		return -ENODEV;
335	}
336
337	DRM_DEBUG_KMS("starting suspend\n");
338	error = i915_drm_freeze(dev);
339	if (error)
340		return (error);
341
342	error = bus_generic_suspend(kdev);
343	DRM_DEBUG_KMS("finished suspend %d\n", error);
344	return (error);
345}
346
347static int i915_drm_thaw(struct drm_device *dev)
348{
349	struct drm_i915_private *dev_priv = dev->dev_private;
350	int error = 0;
351
352	DRM_LOCK(dev);
353	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
354		i915_gem_restore_gtt_mappings(dev);
355	}
356
357	i915_restore_state(dev);
358	intel_opregion_setup(dev);
359
360	/* KMS EnterVT equivalent */
361	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
362		if (HAS_PCH_SPLIT(dev))
363			ironlake_init_pch_refclk(dev);
364
365		dev_priv->mm.suspended = 0;
366
367		error = i915_gem_init_hw(dev);
368		DRM_UNLOCK(dev);
369
370		intel_modeset_init_hw(dev);
371		sx_xlock(&dev->mode_config.mutex);
372		drm_mode_config_reset(dev);
373		sx_xunlock(&dev->mode_config.mutex);
374		drm_irq_install(dev);
375
376		sx_xlock(&dev->mode_config.mutex);
377		/* Resume the modeset for every activated CRTC */
378		drm_helper_resume_force_mode(dev);
379		sx_xunlock(&dev->mode_config.mutex);
380		DRM_LOCK(dev);
381	}
382
383	intel_opregion_init(dev);
384
385	dev_priv->modeset_on_lid = 0;
386
387	DRM_UNLOCK(dev);
388
389	return error;
390}
391
392static int
393i915_resume(device_t kdev)
394{
395	struct drm_device *dev;
396	int ret;
397
398	dev = device_get_softc(kdev);
399	DRM_DEBUG_KMS("starting resume\n");
400#if 0
401	if (pci_enable_device(dev->pdev))
402		return -EIO;
403
404	pci_set_master(dev->pdev);
405#endif
406
407	ret = -i915_drm_thaw(dev);
408	if (ret != 0)
409		return (ret);
410
411	drm_kms_helper_poll_enable(dev);
412	ret = bus_generic_resume(kdev);
413	DRM_DEBUG_KMS("finished resume %d\n", ret);
414	return (ret);
415}
416
417static int
418i915_probe(device_t kdev)
419{
420	const struct intel_device_info *info;
421	int error;
422
423	error = drm_probe(kdev, i915_pciidlist);
424	if (error != 0)
425		return (error);
426	info = i915_get_device_id(pci_get_device(kdev));
427	if (info == NULL)
428		return (ENXIO);
429	return (0);
430}
431
432int i915_modeset;
433
434static int
435i915_attach(device_t kdev)
436{
437	struct drm_device *dev;
438
439	dev = device_get_softc(kdev);
440	if (i915_modeset == 1)
441		i915_driver_info.driver_features |= DRIVER_MODESET;
442	dev->driver = &i915_driver_info;
443	return (drm_attach(kdev, i915_pciidlist));
444}
445
446static struct fb_info *
447i915_fb_helper_getinfo(device_t kdev)
448{
449	struct intel_fbdev *ifbdev;
450	drm_i915_private_t *dev_priv;
451	struct drm_device *dev;
452	struct fb_info *info;
453
454	dev = device_get_softc(kdev);
455	dev_priv = dev->dev_private;
456	ifbdev = dev_priv->fbdev;
457	if (ifbdev == NULL)
458		return (NULL);
459
460	info = ifbdev->helper.fbdev;
461
462	return (info);
463}
464
465const struct intel_device_info *
466i915_get_device_id(int device)
467{
468	const struct intel_gfx_device_id *did;
469
470	for (did = &pciidlist[0]; did->device != 0; did++) {
471		if (did->device != device)
472			continue;
473		if (did->info->not_supported && !i915_enable_unsupported)
474			return (NULL);
475		return (did->info);
476	}
477	return (NULL);
478}
479
480static device_method_t i915_methods[] = {
481	/* Device interface */
482	DEVMETHOD(device_probe,		i915_probe),
483	DEVMETHOD(device_attach,	i915_attach),
484	DEVMETHOD(device_suspend,	i915_suspend),
485	DEVMETHOD(device_resume,	i915_resume),
486	DEVMETHOD(device_detach,	drm_detach),
487
488	/* Framebuffer service methods */
489	DEVMETHOD(fb_getinfo,		i915_fb_helper_getinfo),
490
491	DEVMETHOD_END
492};
493
494static driver_t i915_driver = {
495	"drmn",
496	i915_methods,
497	sizeof(struct drm_device)
498};
499
500extern devclass_t drm_devclass;
501DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0,
502    SI_ORDER_ANY);
503MODULE_DEPEND(i915kms, drmn, 1, 1, 1);
504MODULE_DEPEND(i915kms, agp, 1, 1, 1);
505MODULE_DEPEND(i915kms, iicbus, 1, 1, 1);
506MODULE_DEPEND(i915kms, iic, 1, 1, 1);
507MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);
508
509int intel_iommu_enabled = 0;
510TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled);
511int intel_iommu_gfx_mapped = 0;
512TUNABLE_INT("drm.i915.intel_iommu_gfx_mapped", &intel_iommu_gfx_mapped);
513
514int i915_prefault_disable;
515TUNABLE_INT("drm.i915.prefault_disable", &i915_prefault_disable);
516int i915_semaphores = -1;
517TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
518static int i915_try_reset = 1;
519TUNABLE_INT("drm.i915.try_reset", &i915_try_reset);
520unsigned int i915_lvds_downclock = 0;
521TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock);
522int i915_vbt_sdvo_panel_type = -1;
523TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type);
524unsigned int i915_powersave = 1;
525TUNABLE_INT("drm.i915.powersave", &i915_powersave);
526int i915_enable_fbc = 0;
527TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
528int i915_enable_rc6 = 0;
529TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
530int i915_lvds_channel_mode;
531TUNABLE_INT("drm.i915.lvds_channel_mode", &i915_lvds_channel_mode);
532int i915_panel_use_ssc = -1;
533TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
534int i915_panel_ignore_lid = 0;
535TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
536int i915_panel_invert_brightness;
537TUNABLE_INT("drm.i915.panel_invert_brightness", &i915_panel_invert_brightness);
538int i915_modeset = 1;
539TUNABLE_INT("drm.i915.modeset", &i915_modeset);
540int i915_enable_ppgtt = -1;
541TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
542int i915_enable_hangcheck = 1;
543TUNABLE_INT("drm.i915.enable_hangcheck", &i915_enable_hangcheck);
544TUNABLE_INT("drm.i915.enable_unsupported", &i915_enable_unsupported);
545
546#define	PCI_VENDOR_INTEL		0x8086
547#define INTEL_PCH_DEVICE_ID_MASK	0xff00
548#define INTEL_PCH_IBX_DEVICE_ID_TYPE	0x3b00
549#define INTEL_PCH_CPT_DEVICE_ID_TYPE	0x1c00
550#define INTEL_PCH_PPT_DEVICE_ID_TYPE	0x1e00
551#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
552
553void intel_detect_pch(struct drm_device *dev)
554{
555	struct drm_i915_private *dev_priv;
556	device_t pch;
557	uint32_t id;
558
559	dev_priv = dev->dev_private;
560	pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
561	if (pch != NULL && pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
562		id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
563		if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
564			dev_priv->pch_type = PCH_IBX;
565			dev_priv->num_pch_pll = 2;
566			DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
567		} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
568			dev_priv->pch_type = PCH_CPT;
569			dev_priv->num_pch_pll = 2;
570			DRM_DEBUG_KMS("Found CougarPoint PCH\n");
571		} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
572			/* PantherPoint is CPT compatible */
573			dev_priv->pch_type = PCH_CPT;
574			dev_priv->num_pch_pll = 2;
575			DRM_DEBUG_KMS("Found PatherPoint PCH\n");
576		} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
577			dev_priv->pch_type = PCH_LPT;
578			dev_priv->num_pch_pll = 0;
579			DRM_DEBUG_KMS("Found LynxPoint PCH\n");
580		} else
581			DRM_DEBUG_KMS("No PCH detected\n");
582		KASSERT(dev_priv->num_pch_pll <= I915_NUM_PLLS,
583		    ("num_pch_pll %d\n", dev_priv->num_pch_pll));
584	} else
585		DRM_DEBUG_KMS("No Intel PCI-ISA bridge found\n");
586}
587
588bool i915_semaphore_is_enabled(struct drm_device *dev)
589{
590	if (INTEL_INFO(dev)->gen < 6)
591		return 0;
592
593	if (i915_semaphores >= 0)
594		return i915_semaphores;
595
596	/* Enable semaphores on SNB when IO remapping is off */
597	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
598		return false;
599
600	return 1;
601}
602
603void
604__gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
605{
606	int count;
607
608	count = 0;
609	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
610		DELAY(10);
611
612	I915_WRITE_NOTRACE(FORCEWAKE, 1);
613	POSTING_READ(FORCEWAKE);
614
615	count = 0;
616	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
617		DELAY(10);
618}
619
620void
621__gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
622{
623	int count;
624
625	count = 0;
626	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
627		DELAY(10);
628
629	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
630	POSTING_READ(FORCEWAKE_MT);
631
632	count = 0;
633	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
634		DELAY(10);
635}
636
637void
638gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
639{
640
641	mtx_lock(&dev_priv->gt_lock);
642	if (dev_priv->forcewake_count++ == 0)
643		dev_priv->display.force_wake_get(dev_priv);
644	mtx_unlock(&dev_priv->gt_lock);
645}
646
647static void
648gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
649{
650	u32 gtfifodbg;
651
652	gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
653	if ((gtfifodbg & GT_FIFO_CPU_ERROR_MASK) != 0) {
654		printf("MMIO read or write has been dropped %x\n", gtfifodbg);
655		I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
656	}
657}
658
659void
660__gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
661{
662
663	I915_WRITE_NOTRACE(FORCEWAKE, 0);
664	/* The below doubles as a POSTING_READ */
665	gen6_gt_check_fifodbg(dev_priv);
666}
667
668void
669__gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
670{
671
672	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
673	/* The below doubles as a POSTING_READ */
674	gen6_gt_check_fifodbg(dev_priv);
675}
676
677void
678gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
679{
680
681	mtx_lock(&dev_priv->gt_lock);
682	if (--dev_priv->forcewake_count == 0)
683 		dev_priv->display.force_wake_put(dev_priv);
684	mtx_unlock(&dev_priv->gt_lock);
685}
686
687int
688__gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
689{
690	int ret = 0;
691
692	if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
693		int loop = 500;
694		u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
695		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
696			DELAY(10);
697			fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
698		}
699		if (loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES) {
700			printf("%s loop\n", __func__);
701			++ret;
702		}
703		dev_priv->gt_fifo_count = fifo;
704	}
705	dev_priv->gt_fifo_count--;
706
707	return (ret);
708}
709
710void vlv_force_wake_get(struct drm_i915_private *dev_priv)
711{
712	int count;
713
714	count = 0;
715
716	/* Already awake? */
717	if ((I915_READ(0x130094) & 0xa1) == 0xa1)
718		return;
719
720	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
721	POSTING_READ(FORCEWAKE_VLV);
722
723	count = 0;
724	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
725		DELAY(10);
726}
727
728void vlv_force_wake_put(struct drm_i915_private *dev_priv)
729{
730	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
731	/* FIXME: confirm VLV behavior with Punit folks */
732	POSTING_READ(FORCEWAKE_VLV);
733}
734
735static int
736i8xx_do_reset(struct drm_device *dev)
737{
738	struct drm_i915_private *dev_priv = dev->dev_private;
739	int onems;
740
741	if (IS_I85X(dev))
742		return -ENODEV;
743
744	onems = hz / 1000;
745	if (onems == 0)
746		onems = 1;
747
748	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
749	POSTING_READ(D_STATE);
750
751	if (IS_I830(dev) || IS_845G(dev)) {
752		I915_WRITE(DEBUG_RESET_I830,
753			   DEBUG_RESET_DISPLAY |
754			   DEBUG_RESET_RENDER |
755			   DEBUG_RESET_FULL);
756		POSTING_READ(DEBUG_RESET_I830);
757		pause("i8xxrst1", onems);
758
759		I915_WRITE(DEBUG_RESET_I830, 0);
760		POSTING_READ(DEBUG_RESET_I830);
761	}
762
763	pause("i8xxrst2", onems);
764
765	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
766	POSTING_READ(D_STATE);
767
768	return 0;
769}
770
771static int
772i965_reset_complete(struct drm_device *dev)
773{
774	u8 gdrst;
775
776	gdrst = pci_read_config(dev->device, I965_GDRST, 1);
777	return (gdrst & GRDOM_RESET_ENABLE) == 0;
778}
779
780static int
781i965_do_reset(struct drm_device *dev)
782{
783	int ret;
784	u8 gdrst;
785
786	/*
787	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
788	 * well as the reset bit (GR/bit 0).  Setting the GR bit
789	 * triggers the reset; when done, the hardware will clear it.
790	 */
791	gdrst = pci_read_config(dev->device, I965_GDRST, 1);
792	pci_write_config(dev->device, I965_GDRST,
793	    gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE, 1);
794
795	ret =  wait_for(i965_reset_complete(dev), 500);
796	if (ret)
797		return ret;
798
799	/* We can't reset render&media without also resetting display ... */
800	gdrst = pci_read_config(dev->device, I965_GDRST, 1);
801	pci_write_config(dev->device, I965_GDRST,
802			 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE, 1);
803
804 	return wait_for(i965_reset_complete(dev), 500);
805}
806
807static int
808ironlake_do_reset(struct drm_device *dev)
809{
810	struct drm_i915_private *dev_priv;
811	u32 gdrst;
812	int ret;
813
814	dev_priv = dev->dev_private;
815	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
816	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
817		   gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
818	ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
819	if (ret)
820		return ret;
821
822	/* We can't reset render&media without also resetting display ... */
823	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
824	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
825		   gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
826 	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
827}
828
829static int
830gen6_do_reset(struct drm_device *dev)
831{
832	struct drm_i915_private *dev_priv;
833	int ret;
834
835	dev_priv = dev->dev_private;
836
837	/* Hold gt_lock across reset to prevent any register access
838	 * with forcewake not set correctly
839	 */
840	mtx_lock(&dev_priv->gt_lock);
841
842	/* Reset the chip */
843
844	/* GEN6_GDRST is not in the gt power well, no need to check
845	 * for fifo space for the write or forcewake the chip for
846	 * the read
847	 */
848	I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
849
850	/* Spin waiting for the device to ack the reset request */
851	ret = _intel_wait_for(dev,
852	    (I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0,
853	    500, 0, "915rst");
854
855	/* If reset with a user forcewake, try to restore, otherwise turn it off */
856 	if (dev_priv->forcewake_count)
857 		dev_priv->display.force_wake_get(dev_priv);
858	else
859		dev_priv->display.force_wake_put(dev_priv);
860
861	/* Restore fifo count */
862	dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
863
864	mtx_unlock(&dev_priv->gt_lock);
865	return (ret);
866}
867
868int
869intel_gpu_reset(struct drm_device *dev)
870{
871	struct drm_i915_private *dev_priv = dev->dev_private;
872	int ret = -ENODEV;
873
874	switch (INTEL_INFO(dev)->gen) {
875	case 7:
876	case 6:
877		ret = gen6_do_reset(dev);
878		break;
879	case 5:
880		ret = ironlake_do_reset(dev);
881		break;
882	case 4:
883		ret = i965_do_reset(dev);
884		break;
885	case 2:
886		ret = i8xx_do_reset(dev);
887		break;
888	}
889
890	/* Also reset the gpu hangman. */
891	if (dev_priv->stop_rings) {
892		DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
893		dev_priv->stop_rings = 0;
894		if (ret == -ENODEV) {
895			DRM_ERROR("Reset not implemented, but ignoring "
896				  "error for simulated gpu hangs\n");
897			ret = 0;
898		}
899	}
900
901	return ret;
902}
903
904int i915_reset(struct drm_device *dev)
905{
906	drm_i915_private_t *dev_priv = dev->dev_private;
907	int ret;
908
909	if (!i915_try_reset)
910		return (0);
911
912	if (!sx_try_xlock(&dev->dev_struct_lock))
913		return (-EBUSY);
914
915	dev_priv->stop_rings = 0;
916
917	i915_gem_reset(dev);
918
919	ret = -ENODEV;
920	if (time_second - dev_priv->last_gpu_reset < 5)
921		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
922	else
923		ret = intel_gpu_reset(dev);
924
925	dev_priv->last_gpu_reset = time_second;
926	if (ret) {
927		DRM_ERROR("Failed to reset chip.\n");
928		DRM_UNLOCK(dev);
929		return (ret);
930	}
931
932	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
933	    !dev_priv->mm.suspended) {
934		struct intel_ring_buffer *ring;
935		int i;
936
937		dev_priv->mm.suspended = 0;
938
939		i915_gem_init_swizzling(dev);
940
941		for_each_ring(ring, dev_priv, i)
942			ring->init(ring);
943
944		i915_gem_context_init(dev);
945		i915_gem_init_ppgtt(dev);
946
947		DRM_UNLOCK(dev);
948
949		if (drm_core_check_feature(dev, DRIVER_MODESET))
950			intel_modeset_init_hw(dev);
951
952		DRM_LOCK(dev);
953		drm_irq_uninstall(dev);
954		DRM_UNLOCK(dev);
955		drm_irq_install(dev);
956	} else
957		DRM_UNLOCK(dev);
958
959	return (0);
960}
961
962/* We give fast paths for the really cool registers */
963#define NEEDS_FORCE_WAKE(dev_priv, reg) \
964       (((dev_priv)->info->gen >= 6) && \
965        ((reg) < 0x40000) &&            \
966        ((reg) != FORCEWAKE)) && \
967       (!IS_VALLEYVIEW((dev_priv)->dev))
968
969#define __i915_read(x, y) \
970u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
971	u##x val = 0; \
972	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
973		mtx_lock(&dev_priv->gt_lock); \
974		if (dev_priv->forcewake_count == 0) \
975			dev_priv->display.force_wake_get(dev_priv); \
976		val = DRM_READ##y(dev_priv->mmio_map, reg);	\
977		if (dev_priv->forcewake_count == 0) \
978			dev_priv->display.force_wake_put(dev_priv); \
979		mtx_unlock(&dev_priv->gt_lock); \
980	} else { \
981		val = DRM_READ##y(dev_priv->mmio_map, reg);	\
982	} \
983	trace_i915_reg_rw(false, reg, val, sizeof(val)); \
984	return val; \
985}
986
987__i915_read(8, 8)
988__i915_read(16, 16)
989__i915_read(32, 32)
990__i915_read(64, 64)
991#undef __i915_read
992
993#define __i915_write(x, y) \
994void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
995	u32 __fifo_ret = 0; \
996	trace_i915_reg_rw(true, reg, val, sizeof(val)); \
997	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
998		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
999	} \
1000	DRM_WRITE##y(dev_priv->mmio_map, reg, val); \
1001	if (__predict_false(__fifo_ret)) { \
1002		gen6_gt_check_fifodbg(dev_priv); \
1003	} \
1004}
1005__i915_write(8, 8)
1006__i915_write(16, 16)
1007__i915_write(32, 32)
1008__i915_write(64, 64)
1009#undef __i915_write
1010