i915_drv.h revision 282199
1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
3/*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_drv.h 282199 2015-04-28 19:35:05Z dumbbell $");
32
33#ifndef _I915_DRV_H_
34#define _I915_DRV_H_
35
36#include <dev/agp/agp_i810.h>
37#include <dev/drm2/drm_mm.h>
38#include <dev/drm2/i915/i915_reg.h>
39#include <dev/drm2/i915/intel_ringbuffer.h>
40#include <dev/drm2/i915/intel_bios.h>
41
42/* General customization:
43 */
44
45#define DRIVER_AUTHOR		"Tungsten Graphics, Inc."
46
47#define DRIVER_NAME		"i915"
48#define DRIVER_DESC		"Intel Graphics"
49#define DRIVER_DATE		"20080730"
50
51MALLOC_DECLARE(DRM_I915_GEM);
52
53enum pipe {
54	PIPE_A = 0,
55	PIPE_B,
56	PIPE_C,
57	I915_MAX_PIPES
58};
59#define pipe_name(p) ((p) + 'A')
60#define I915_NUM_PIPE	2
61
62enum plane {
63	PLANE_A = 0,
64	PLANE_B,
65	PLANE_C,
66};
67#define plane_name(p) ((p) + 'A')
68
69enum port {
70	PORT_A = 0,
71	PORT_B,
72	PORT_C,
73	PORT_D,
74	PORT_E,
75	I915_MAX_PORTS
76};
77#define port_name(p) ((p) + 'A')
78
79#define I915_GEM_GPU_DOMAINS	(~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
80
81
82#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
83
84struct intel_pch_pll {
85	int refcount; /* count of number of CRTCs sharing this PLL */
86	int active; /* count of number of active CRTCs (i.e. DPMS on) */
87	bool on; /* is the PLL actually active? Disabled during modeset */
88	int pll_reg;
89	int fp0_reg;
90	int fp1_reg;
91};
92#define I915_NUM_PLLS 2
93
94/* Interface history:
95 *
96 * 1.1: Original.
97 * 1.2: Add Power Management
98 * 1.3: Add vblank support
99 * 1.4: Fix cmdbuffer path, add heap destroy
100 * 1.5: Add vblank pipe configuration
101 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
102 *      - Support vertical blank on secondary display pipe
103 */
104#define DRIVER_MAJOR		1
105#define DRIVER_MINOR		6
106#define DRIVER_PATCHLEVEL	0
107
108#define WATCH_COHERENCY	0
109#define WATCH_BUF	0
110#define WATCH_EXEC	0
111#define WATCH_LRU	0
112#define WATCH_RELOC	0
113#define WATCH_INACTIVE	0
114#define WATCH_PWRITE	0
115
116#define I915_GEM_PHYS_CURSOR_0 1
117#define I915_GEM_PHYS_CURSOR_1 2
118#define I915_GEM_PHYS_OVERLAY_REGS 3
119#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
120
121struct drm_i915_gem_phys_object {
122	int id;
123	drm_dma_handle_t *handle;
124	struct drm_i915_gem_object *cur_obj;
125};
126
127struct drm_i915_private;
128
129struct drm_i915_display_funcs {
130	void (*dpms)(struct drm_crtc *crtc, int mode);
131	bool (*fbc_enabled)(struct drm_device *dev);
132	void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
133	void (*disable_fbc)(struct drm_device *dev);
134	int (*get_display_clock_speed)(struct drm_device *dev);
135	int (*get_fifo_size)(struct drm_device *dev, int plane);
136	void (*update_wm)(struct drm_device *dev);
137	void (*update_sprite_wm)(struct drm_device *dev, int pipe,
138				 uint32_t sprite_width, int pixel_size);
139	void (*sanitize_pm)(struct drm_device *dev);
140	void (*update_linetime_wm)(struct drm_device *dev, int pipe,
141				 struct drm_display_mode *mode);
142	int (*crtc_mode_set)(struct drm_crtc *crtc,
143			     struct drm_display_mode *mode,
144			     struct drm_display_mode *adjusted_mode,
145			     int x, int y,
146			     struct drm_framebuffer *old_fb);
147	void (*off)(struct drm_crtc *crtc);
148	void (*write_eld)(struct drm_connector *connector,
149			  struct drm_crtc *crtc);
150	void (*fdi_link_train)(struct drm_crtc *crtc);
151	void (*init_clock_gating)(struct drm_device *dev);
152	void (*init_pch_clock_gating)(struct drm_device *dev);
153	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
154			  struct drm_framebuffer *fb,
155			  struct drm_i915_gem_object *obj);
156	void (*force_wake_get)(struct drm_i915_private *dev_priv);
157	void (*force_wake_put)(struct drm_i915_private *dev_priv);
158	int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
159			    int x, int y);
160	/* clock updates for mode set */
161	/* cursor updates */
162	/* render clock increase/decrease */
163	/* display clock increase/decrease */
164	/* pll clock increase/decrease */
165};
166
167struct intel_device_info {
168	u8 gen;
169	u8 not_supported:1;
170	u8 is_mobile:1;
171	u8 is_i85x:1;
172	u8 is_i915g:1;
173	u8 is_i945gm:1;
174	u8 is_g33:1;
175	u8 need_gfx_hws:1;
176	u8 is_g4x:1;
177	u8 is_pineview:1;
178	u8 is_broadwater:1;
179	u8 is_crestline:1;
180	u8 is_ivybridge:1;
181	u8 is_valleyview:1;
182	u8 has_pch_split:1;
183	u8 is_haswell:1;
184	u8 has_fbc:1;
185	u8 has_pipe_cxsr:1;
186	u8 has_hotplug:1;
187	u8 cursor_needs_physical:1;
188	u8 has_overlay:1;
189	u8 overlay_needs_physical:1;
190	u8 supports_tv:1;
191	u8 has_bsd_ring:1;
192	u8 has_blt_ring:1;
193	u8 has_llc:1;
194};
195
196#define I915_PPGTT_PD_ENTRIES 512
197#define I915_PPGTT_PT_ENTRIES 1024
198struct i915_hw_ppgtt {
199	unsigned num_pd_entries;
200	vm_page_t *pt_pages;
201	uint32_t pd_offset;
202	vm_paddr_t *pt_dma_addr;
203	vm_paddr_t scratch_page_dma_addr;
204};
205
206
207/* This must match up with the value previously used for execbuf2.rsvd1. */
208#define DEFAULT_CONTEXT_ID 0
209struct i915_hw_context {
210	uint32_t id;
211	bool is_initialized;
212	struct drm_i915_file_private *file_priv;
213	struct intel_ring_buffer *ring;
214	struct drm_i915_gem_object *obj;
215};
216
217enum no_fbc_reason {
218	FBC_NO_OUTPUT, /* no outputs enabled to compress */
219	FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
220	FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
221	FBC_MODE_TOO_LARGE, /* mode too large for compression */
222	FBC_BAD_PLANE, /* fbc not supported on plane */
223	FBC_NOT_TILED, /* buffer not tiled */
224	FBC_MULTIPLE_PIPES, /* more than one pipe active */
225	FBC_MODULE_PARAM,
226};
227
228struct mem_block {
229	struct mem_block *next;
230	struct mem_block *prev;
231	int start;
232	int size;
233	struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
234};
235
236struct opregion_header;
237struct opregion_acpi;
238struct opregion_swsci;
239struct opregion_asle;
240
241struct intel_opregion {
242	struct opregion_header *header;
243	struct opregion_acpi *acpi;
244	struct opregion_swsci *swsci;
245	struct opregion_asle *asle;
246	void *vbt;
247	u32 *lid_state;
248};
249#define OPREGION_SIZE            (8*1024)
250
251struct drm_i915_master_private {
252	drm_local_map_t *sarea;
253	struct _drm_i915_sarea *sarea_priv;
254};
255#define I915_FENCE_REG_NONE -1
256#define I915_MAX_NUM_FENCES 16
257/* 16 fences + sign bit for FENCE_REG_NONE */
258#define I915_MAX_NUM_FENCE_BITS 5
259
260struct drm_i915_fence_reg {
261	struct list_head lru_list;
262	struct drm_i915_gem_object *obj;
263	int pin_count;
264};
265
266struct sdvo_device_mapping {
267	u8 initialized;
268	u8 dvo_port;
269	u8 slave_addr;
270	u8 dvo_wiring;
271	u8 i2c_pin;
272	u8 ddc_pin;
273};
274
275enum intel_pch {
276	PCH_IBX,	/* Ibexpeak PCH */
277	PCH_CPT,	/* Cougarpoint PCH */
278	PCH_LPT,	/* Lynxpoint PCH */
279};
280
281#define QUIRK_PIPEA_FORCE (1<<0)
282#define QUIRK_LVDS_SSC_DISABLE (1<<1)
283#define QUIRK_INVERT_BRIGHTNESS (1<<2)
284
285struct intel_fbdev;
286struct intel_fbc_work;
287
288typedef struct drm_i915_private {
289	struct drm_device *dev;
290
291	device_t gmbus_bridge[GMBUS_NUM_PORTS + 1];
292	device_t bbbus_bridge[GMBUS_NUM_PORTS + 1];
293	device_t gmbus[GMBUS_NUM_PORTS + 1];
294	device_t bbbus[GMBUS_NUM_PORTS + 1];
295	/** gmbus_sx protects against concurrent usage of the single hw gmbus
296	 * controller on different i2c buses. */
297	struct sx gmbus_sx;
298	uint32_t gpio_mmio_base;
299
300	int relative_constants_mode;
301
302	drm_local_map_t *mmio_map;
303
304	/** gt_fifo_count and the subsequent register write are synchronized
305	 * with dev->struct_mutex. */
306	unsigned gt_fifo_count;
307	/** forcewake_count is protected by gt_lock */
308	unsigned forcewake_count;
309	/** gt_lock is also taken in irq contexts. */
310	struct mtx gt_lock;
311
312	/* drm_i915_ring_buffer_t ring; */
313	struct intel_ring_buffer rings[I915_NUM_RINGS];
314	uint32_t next_seqno;
315
316	drm_dma_handle_t *status_page_dmah;
317	void *hw_status_page;
318	dma_addr_t dma_status_page;
319	uint32_t counter;
320	unsigned int status_gfx_addr;
321	struct drm_gem_object *hws_obj;
322
323	struct drm_i915_gem_object *pwrctx;
324	struct drm_i915_gem_object *renderctx;
325
326	unsigned int cpp;
327	int back_offset;
328	int front_offset;
329	int current_page;
330	int page_flipping;
331
332	atomic_t irq_received;
333	u32 trace_irq_seqno;
334
335	/** Cached value of IER to avoid reads in updating the bitfield */
336	u32 pipestat[2];
337	u32 irq_mask;
338	u32 gt_irq_mask;
339	u32 pch_irq_mask;
340	struct mtx irq_lock;
341
342	struct mtx dpio_lock;
343
344	u32 hotplug_supported_mask;
345
346	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
347	int num_pipe;
348	int num_pch_pll;
349
350	/* For hangcheck timer */
351#define DRM_I915_HANGCHECK_PERIOD ((1500 /* in ms */ * hz) / 1000)
352	int hangcheck_count;
353	uint32_t last_acthd[I915_NUM_RINGS];
354	uint32_t last_instdone;
355	uint32_t last_instdone1;
356
357	unsigned int stop_rings;
358
359	struct intel_opregion opregion;
360
361
362	/* overlay */
363	struct intel_overlay *overlay;
364	bool sprite_scaling_enabled;
365
366	/* LVDS info */
367	int backlight_level;  /* restore backlight to this value */
368	bool backlight_enabled;
369	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
370	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
371
372	/* Feature bits from the VBIOS */
373	unsigned int int_tv_support:1;
374	unsigned int lvds_dither:1;
375	unsigned int lvds_vbt:1;
376	unsigned int int_crt_support:1;
377	unsigned int lvds_use_ssc:1;
378	unsigned int display_clock_mode:1;
379	int lvds_ssc_freq;
380	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
381	unsigned int lvds_val; /* used for checking LVDS channel mode */
382	struct {
383		int rate;
384		int lanes;
385		int preemphasis;
386		int vswing;
387
388		bool initialized;
389		bool support;
390		int bpp;
391		struct edp_power_seq pps;
392	} edp;
393	bool no_aux_handshake;
394
395	int crt_ddc_pin;
396	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
397	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
398	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
399
400	/* PCH chipset type */
401	enum intel_pch pch_type;
402
403	/* Display functions */
404	struct drm_i915_display_funcs display;
405
406	unsigned long quirks;
407
408	/* Register state */
409	bool modeset_on_lid;
410	u8 saveLBB;
411	u32 saveDSPACNTR;
412	u32 saveDSPBCNTR;
413	u32 saveDSPARB;
414	u32 saveHWS;
415	u32 savePIPEACONF;
416	u32 savePIPEBCONF;
417	u32 savePIPEASRC;
418	u32 savePIPEBSRC;
419	u32 saveFPA0;
420	u32 saveFPA1;
421	u32 saveDPLL_A;
422	u32 saveDPLL_A_MD;
423	u32 saveHTOTAL_A;
424	u32 saveHBLANK_A;
425	u32 saveHSYNC_A;
426	u32 saveVTOTAL_A;
427	u32 saveVBLANK_A;
428	u32 saveVSYNC_A;
429	u32 saveBCLRPAT_A;
430	u32 saveTRANSACONF;
431	u32 saveTRANS_HTOTAL_A;
432	u32 saveTRANS_HBLANK_A;
433	u32 saveTRANS_HSYNC_A;
434	u32 saveTRANS_VTOTAL_A;
435	u32 saveTRANS_VBLANK_A;
436	u32 saveTRANS_VSYNC_A;
437	u32 savePIPEASTAT;
438	u32 saveDSPASTRIDE;
439	u32 saveDSPASIZE;
440	u32 saveDSPAPOS;
441	u32 saveDSPAADDR;
442	u32 saveDSPASURF;
443	u32 saveDSPATILEOFF;
444	u32 savePFIT_PGM_RATIOS;
445	u32 saveBLC_HIST_CTL;
446	u32 saveBLC_PWM_CTL;
447	u32 saveBLC_PWM_CTL2;
448	u32 saveBLC_CPU_PWM_CTL;
449	u32 saveBLC_CPU_PWM_CTL2;
450	u32 saveFPB0;
451	u32 saveFPB1;
452	u32 saveDPLL_B;
453	u32 saveDPLL_B_MD;
454	u32 saveHTOTAL_B;
455	u32 saveHBLANK_B;
456	u32 saveHSYNC_B;
457	u32 saveVTOTAL_B;
458	u32 saveVBLANK_B;
459	u32 saveVSYNC_B;
460	u32 saveBCLRPAT_B;
461	u32 saveTRANSBCONF;
462	u32 saveTRANS_HTOTAL_B;
463	u32 saveTRANS_HBLANK_B;
464	u32 saveTRANS_HSYNC_B;
465	u32 saveTRANS_VTOTAL_B;
466	u32 saveTRANS_VBLANK_B;
467	u32 saveTRANS_VSYNC_B;
468	u32 savePIPEBSTAT;
469	u32 saveDSPBSTRIDE;
470	u32 saveDSPBSIZE;
471	u32 saveDSPBPOS;
472	u32 saveDSPBADDR;
473	u32 saveDSPBSURF;
474	u32 saveDSPBTILEOFF;
475	u32 saveVGA0;
476	u32 saveVGA1;
477	u32 saveVGA_PD;
478	u32 saveVGACNTRL;
479	u32 saveADPA;
480	u32 saveLVDS;
481	u32 savePP_ON_DELAYS;
482	u32 savePP_OFF_DELAYS;
483	u32 saveDVOA;
484	u32 saveDVOB;
485	u32 saveDVOC;
486	u32 savePP_ON;
487	u32 savePP_OFF;
488	u32 savePP_CONTROL;
489	u32 savePP_DIVISOR;
490	u32 savePFIT_CONTROL;
491	u32 save_palette_a[256];
492	u32 save_palette_b[256];
493	u32 saveDPFC_CB_BASE;
494	u32 saveFBC_CFB_BASE;
495	u32 saveFBC_LL_BASE;
496	u32 saveFBC_CONTROL;
497	u32 saveFBC_CONTROL2;
498	u32 saveIER;
499	u32 saveIIR;
500	u32 saveIMR;
501	u32 saveDEIER;
502	u32 saveDEIMR;
503	u32 saveGTIER;
504	u32 saveGTIMR;
505	u32 saveFDI_RXA_IMR;
506	u32 saveFDI_RXB_IMR;
507	u32 saveCACHE_MODE_0;
508	u32 saveMI_ARB_STATE;
509	u32 saveSWF0[16];
510	u32 saveSWF1[16];
511	u32 saveSWF2[3];
512	u8 saveMSR;
513	u8 saveSR[8];
514	u8 saveGR[25];
515	u8 saveAR_INDEX;
516	u8 saveAR[21];
517	u8 saveDACMASK;
518	u8 saveCR[37];
519	uint64_t saveFENCE[I915_MAX_NUM_FENCES];
520	u32 saveCURACNTR;
521	u32 saveCURAPOS;
522	u32 saveCURABASE;
523	u32 saveCURBCNTR;
524	u32 saveCURBPOS;
525	u32 saveCURBBASE;
526	u32 saveCURSIZE;
527	u32 saveDP_B;
528	u32 saveDP_C;
529	u32 saveDP_D;
530	u32 savePIPEA_GMCH_DATA_M;
531	u32 savePIPEB_GMCH_DATA_M;
532	u32 savePIPEA_GMCH_DATA_N;
533	u32 savePIPEB_GMCH_DATA_N;
534	u32 savePIPEA_DP_LINK_M;
535	u32 savePIPEB_DP_LINK_M;
536	u32 savePIPEA_DP_LINK_N;
537	u32 savePIPEB_DP_LINK_N;
538	u32 saveFDI_RXA_CTL;
539	u32 saveFDI_TXA_CTL;
540	u32 saveFDI_RXB_CTL;
541	u32 saveFDI_TXB_CTL;
542	u32 savePFA_CTL_1;
543	u32 savePFB_CTL_1;
544	u32 savePFA_WIN_SZ;
545	u32 savePFB_WIN_SZ;
546	u32 savePFA_WIN_POS;
547	u32 savePFB_WIN_POS;
548	u32 savePCH_DREF_CONTROL;
549	u32 saveDISP_ARB_CTL;
550	u32 savePIPEA_DATA_M1;
551	u32 savePIPEA_DATA_N1;
552	u32 savePIPEA_LINK_M1;
553	u32 savePIPEA_LINK_N1;
554	u32 savePIPEB_DATA_M1;
555	u32 savePIPEB_DATA_N1;
556	u32 savePIPEB_LINK_M1;
557	u32 savePIPEB_LINK_N1;
558	u32 saveMCHBAR_RENDER_STANDBY;
559	u32 savePCH_PORT_HOTPLUG;
560
561	struct {
562		/** Memory allocator for GTT stolen memory */
563		struct drm_mm stolen;
564		/** Memory allocator for GTT */
565		struct drm_mm gtt_space;
566		/** List of all objects in gtt_space. Used to restore gtt
567		 * mappings on resume */
568		struct list_head gtt_list;
569
570		/** Usable portion of the GTT for GEM */
571		unsigned long gtt_start;
572		unsigned long gtt_mappable_end;
573		unsigned long gtt_end;
574
575		/** PPGTT used for aliasing the PPGTT with the GTT */
576		struct i915_hw_ppgtt *aliasing_ppgtt;
577
578		/**
579		 * List of objects currently involved in rendering from the
580		 * ringbuffer.
581		 *
582		 * Includes buffers having the contents of their GPU caches
583		 * flushed, not necessarily primitives.  last_rendering_seqno
584		 * represents when the rendering involved will be completed.
585		 *
586		 * A reference is held on the buffer while on this list.
587		 */
588		struct list_head active_list;
589
590		/**
591		 * List of objects which are not in the ringbuffer but which
592		 * still have a write_domain which needs to be flushed before
593		 * unbinding.
594		 *
595		 * A reference is held on the buffer while on this list.
596		 */
597		struct list_head flushing_list;
598
599		/**
600		 * LRU list of objects which are not in the ringbuffer and
601		 * are ready to unbind, but are still in the GTT.
602		 *
603		 * last_rendering_seqno is 0 while an object is in this list.
604		 *
605		 * A reference is not held on the buffer while on this list,
606		 * as merely being GTT-bound shouldn't prevent its being
607		 * freed, and we'll pull it off the list in the free path.
608		 */
609		struct list_head inactive_list;
610
611		/** LRU list of objects with fence regs on them. */
612		struct list_head fence_list;
613
614		/**
615		 * We leave the user IRQ off as much as possible,
616		 * but this means that requests will finish and never
617		 * be retired once the system goes idle. Set a timer to
618		 * fire periodically while the ring is running. When it
619		 * fires, go retire requests.
620		 */
621		struct timeout_task retire_task;
622
623 		/**
624		 * Are we in a non-interruptible section of code like
625		 * modesetting?
626		 */
627		bool interruptible;
628
629		uint32_t next_gem_seqno;
630
631		/**
632		 * Waiting sequence number, if any
633		 */
634		uint32_t waiting_gem_seqno;
635
636		/**
637		 * Last seq seen at irq time
638		 */
639		uint32_t irq_gem_seqno;
640
641		/**
642		 * Flag if the X Server, and thus DRM, is not currently in
643		 * control of the device.
644		 *
645		 * This is set between LeaveVT and EnterVT.  It needs to be
646		 * replaced with a semaphore.  It also needs to be
647		 * transitioned away from for kernel modesetting.
648		 */
649		int suspended;
650
651		/**
652		 * Flag if the hardware appears to be wedged.
653		 *
654		 * This is set when attempts to idle the device timeout.
655		 * It prevents command submission from occuring and makes
656		 * every pending request fail
657		 */
658		int wedged;
659
660		/** Bit 6 swizzling required for X tiling */
661		uint32_t bit_6_swizzle_x;
662		/** Bit 6 swizzling required for Y tiling */
663		uint32_t bit_6_swizzle_y;
664
665		/* storage for physical objects */
666		struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
667
668		/* accounting, useful for userland debugging */
669		size_t gtt_total;
670		size_t mappable_gtt_total;
671		size_t object_memory;
672		u32 object_count;
673
674		struct intel_gtt gtt;
675		eventhandler_tag i915_lowmem;
676	} mm;
677
678	const struct intel_device_info *info;
679
680	/* Old dri1 support infrastructure, beware the dragons ya fools entering
681	 * here! */
682	struct {
683		unsigned allow_batchbuffer : 1;
684		u32 *gfx_hws_cpu_addr;
685	} dri1;
686
687	/* Kernel Modesetting */
688
689	struct sdvo_device_mapping sdvo_mappings[2];
690	/* indicate whether the LVDS_BORDER should be enabled or not */
691	unsigned int lvds_border_bits;
692	/* Panel fitter placement and size for Ironlake+ */
693	u32 pch_pf_pos, pch_pf_size;
694
695	struct drm_crtc *plane_to_crtc_mapping[3];
696	struct drm_crtc *pipe_to_crtc_mapping[3];
697	/* wait_queue_head_t pending_flip_queue; XXXKIB */
698
699	struct intel_pch_pll pch_plls[I915_NUM_PLLS];
700
701	/* Reclocking support */
702	bool render_reclock_avail;
703	bool lvds_downclock_avail;
704	/* indicates the reduced downclock for LVDS*/
705	int lvds_downclock;
706	struct task idle_task;
707	struct callout idle_callout;
708	bool busy;
709	u16 orig_clock;
710	int child_dev_num;
711	struct child_device_config *child_dev;
712	struct drm_connector *int_lvds_connector;
713	struct drm_connector *int_edp_connector;
714
715	device_t bridge_dev;
716	bool mchbar_need_disable;
717	int mch_res_rid;
718	struct resource *mch_res;
719
720	struct mtx rps_lock;
721	u32 pm_iir;
722	struct task rps_task;
723
724	u8 cur_delay;
725	u8 min_delay;
726	u8 max_delay;
727	u8 fmax;
728	u8 fstart;
729
730	u64 last_count1;
731	unsigned long last_time1;
732	unsigned long chipset_power;
733	u64 last_count2;
734	struct timespec last_time2;
735	unsigned long gfx_power;
736	int c_m;
737	int r_t;
738	u8 corr;
739	struct mtx *mchdev_lock;
740
741	enum no_fbc_reason no_fbc_reason;
742
743	struct drm_mm_node *compressed_fb;
744	struct drm_mm_node *compressed_llb;
745
746	unsigned long cfb_size;
747	unsigned int cfb_fb;
748	int cfb_plane;
749	int cfb_y;
750	struct intel_fbc_work *fbc_work;
751
752	unsigned int fsb_freq, mem_freq, is_ddr3;
753
754	struct taskqueue *tq;
755	struct task error_task;
756	struct task hotplug_task;
757	int error_completion;
758	struct mtx error_completion_lock;
759	/* Protected by dev->error_lock. */
760	struct drm_i915_error_state *first_error;
761	struct mtx error_lock;
762	struct callout hangcheck_timer;
763
764	unsigned long last_gpu_reset;
765
766	struct intel_fbdev *fbdev;
767
768	struct drm_property *broadcast_rgb_property;
769	struct drm_property *force_audio_property;
770
771	bool hw_contexts_disabled;
772	uint32_t hw_context_size;
773} drm_i915_private_t;
774
775/* Iterate over initialised rings */
776#define for_each_ring(ring__, dev_priv__, i__) \
777	for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
778		if (((ring__) = &(dev_priv__)->rings[(i__)]), intel_ring_initialized((ring__)))
779
780enum hdmi_force_audio {
781	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
782	HDMI_AUDIO_OFF,			/* force turn off HDMI audio */
783	HDMI_AUDIO_AUTO,		/* trust EDID */
784	HDMI_AUDIO_ON,			/* force turn on HDMI audio */
785};
786
787enum i915_cache_level {
788	I915_CACHE_NONE,
789	I915_CACHE_LLC,
790	I915_CACHE_LLC_MLC, /* gen6+ */
791};
792
793enum intel_chip_family {
794	CHIP_I8XX = 0x01,
795	CHIP_I9XX = 0x02,
796	CHIP_I915 = 0x04,
797	CHIP_I965 = 0x08,
798};
799
800/** driver private structure attached to each drm_gem_object */
801struct drm_i915_gem_object {
802	struct drm_gem_object base;
803
804	/** Current space allocated to this object in the GTT, if any. */
805	struct drm_mm_node *gtt_space;
806	struct list_head gtt_list;
807	/** This object's place on the active/flushing/inactive lists */
808	struct list_head ring_list;
809	struct list_head mm_list;
810	/** This object's place on GPU write list */
811	struct list_head gpu_write_list;
812	/** This object's place in the batchbuffer or on the eviction list */
813	struct list_head exec_list;
814
815	/**
816	 * This is set if the object is on the active or flushing lists
817	 * (has pending rendering), and is not set if it's on inactive (ready
818	 * to be unbound).
819	 */
820	unsigned int active:1;
821
822	/**
823	 * This is set if the object has been written to since last bound
824	 * to the GTT
825	 */
826	unsigned int dirty:1;
827
828	/**
829	 * This is set if the object has been written to since the last
830	 * GPU flush.
831	 */
832	unsigned int pending_gpu_write:1;
833
834	/**
835	 * Fence register bits (if any) for this object.  Will be set
836	 * as needed when mapped into the GTT.
837	 * Protected by dev->struct_mutex.
838	 */
839	signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
840
841	/**
842	 * Advice: are the backing pages purgeable?
843	 */
844	unsigned int madv:2;
845
846	/**
847	 * Current tiling mode for the object.
848	 */
849	unsigned int tiling_mode:2;
850	/**
851	 * Whether the tiling parameters for the currently associated fence
852	 * register have changed. Note that for the purposes of tracking
853	 * tiling changes we also treat the unfenced register, the register
854	 * slot that the object occupies whilst it executes a fenced
855	 * command (such as BLT on gen2/3), as a "fence".
856	 */
857	unsigned int fence_dirty:1;
858
859	/** How many users have pinned this object in GTT space. The following
860	 * users can each hold at most one reference: pwrite/pread, pin_ioctl
861	 * (via user_pin_count), execbuffer (objects are not allowed multiple
862	 * times for the same batchbuffer), and the framebuffer code. When
863	 * switching/pageflipping, the framebuffer code has at most two buffers
864	 * pinned per crtc.
865	 *
866	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
867	 * bits with absolutely no headroom. So use 4 bits. */
868	unsigned int pin_count:4;
869#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
870
871	/**
872	 * Is the object at the current location in the gtt mappable and
873	 * fenceable? Used to avoid costly recalculations.
874	 */
875	unsigned int map_and_fenceable:1;
876
877	/**
878	 * Whether the current gtt mapping needs to be mappable (and isn't just
879	 * mappable by accident). Track pin and fault separate for a more
880	 * accurate mappable working set.
881	 */
882	unsigned int fault_mappable:1;
883	unsigned int pin_mappable:1;
884	unsigned int pin_display:1;
885
886	/*
887	 * Is the GPU currently using a fence to access this buffer,
888	 */
889	unsigned int pending_fenced_gpu_access:1;
890	unsigned int fenced_gpu_access:1;
891
892	unsigned int cache_level:2;
893
894	unsigned int has_aliasing_ppgtt_mapping:1;
895	unsigned int has_global_gtt_mapping:1;
896
897	vm_page_t *pages;
898	int pages_pin_count;
899
900	/**
901	 * DMAR support
902	 */
903	struct sglist *sg_list;
904
905	/**
906	 * Used for performing relocations during execbuffer insertion.
907	 */
908	LIST_ENTRY(drm_i915_gem_object) exec_node;
909	unsigned long exec_handle;
910	struct drm_i915_gem_exec_object2 *exec_entry;
911
912	/**
913	 * Current offset of the object in GTT space.
914	 *
915	 * This is the same as gtt_space->start
916	 */
917	uint32_t gtt_offset;
918
919	struct intel_ring_buffer *ring;
920
921	/** Breadcrumb of last rendering to the buffer. */
922	uint32_t last_rendering_seqno;
923	/** Breadcrumb of last fenced GPU access to the buffer. */
924	uint32_t last_fenced_seqno;
925
926	/** Current tiling stride for the object, if it's tiled. */
927	uint32_t stride;
928
929	/** Record of address bit 17 of each page at last unbind. */
930	unsigned long *bit_17;
931
932	/** User space pin count and filp owning the pin */
933	uint32_t user_pin_count;
934	struct drm_file *pin_filp;
935
936	/** for phy allocated objects */
937	struct drm_i915_gem_phys_object *phys_obj;
938
939	/**
940	 * Number of crtcs where this object is currently the fb, but
941	 * will be page flipped away on the next vblank.  When it
942	 * reaches 0, dev_priv->pending_flip_queue will be woken up.
943	 */
944	int pending_flip;
945};
946
947#define	to_intel_bo(x) __containerof(x, struct drm_i915_gem_object, base)
948
949/**
950 * Request queue structure.
951 *
952 * The request queue allows us to note sequence numbers that have been emitted
953 * and may be associated with active buffers to be retired.
954 *
955 * By keeping this list, we can avoid having to do questionable
956 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
957 * an emission time with seqnos for tracking how far ahead of the GPU we are.
958 */
959struct drm_i915_gem_request {
960	/** On Which ring this request was generated */
961	struct intel_ring_buffer *ring;
962
963	/** GEM sequence number associated with this request. */
964	uint32_t seqno;
965
966	/** Postion in the ringbuffer of the end of the request */
967	u32 tail;
968
969	/** Time at which this request was emitted, in jiffies. */
970	unsigned long emitted_jiffies;
971
972	/** global list entry for this request */
973	struct list_head list;
974
975	struct drm_i915_file_private *file_priv;
976	/** file_priv list entry for this request */
977	struct list_head client_list;
978};
979
980struct drm_i915_file_private {
981	struct {
982		struct list_head request_list;
983		struct mtx lck;
984	} mm;
985	struct drm_gem_names context_idr;
986};
987
988struct drm_i915_error_state {
989	u_int ref;
990	u32 eir;
991	u32 pgtbl_er;
992	u32 ier;
993	bool waiting[I915_NUM_RINGS];
994	u32 pipestat[I915_MAX_PIPES];
995	u32 tail[I915_NUM_RINGS];
996	u32 head[I915_NUM_RINGS];
997	u32 ipeir[I915_NUM_RINGS];
998	u32 ipehr[I915_NUM_RINGS];
999	u32 instdone[I915_NUM_RINGS];
1000	u32 acthd[I915_NUM_RINGS];
1001	u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
1002	/* our own tracking of ring head and tail */
1003	u32 cpu_ring_head[I915_NUM_RINGS];
1004	u32 cpu_ring_tail[I915_NUM_RINGS];
1005	u32 error; /* gen6+ */
1006	u32 instpm[I915_NUM_RINGS];
1007	u32 instps[I915_NUM_RINGS];
1008	u32 instdone1;
1009	u32 seqno[I915_NUM_RINGS];
1010	u64 bbaddr;
1011	u32 fault_reg[I915_NUM_RINGS];
1012	u32 done_reg;
1013	u32 faddr[I915_NUM_RINGS];
1014	u64 fence[I915_MAX_NUM_FENCES];
1015	struct timeval time;
1016	struct drm_i915_error_ring {
1017		struct drm_i915_error_object {
1018			int page_count;
1019			u32 gtt_offset;
1020			u32 *pages[0];
1021		} *ringbuffer, *batchbuffer;
1022		struct drm_i915_error_request {
1023			long jiffies;
1024			u32 seqno;
1025			u32 tail;
1026		} *requests;
1027		int num_requests;
1028	} ring[I915_NUM_RINGS];
1029	struct drm_i915_error_buffer {
1030		u32 size;
1031		u32 name;
1032		u32 seqno;
1033		u32 gtt_offset;
1034		u32 read_domains;
1035		u32 write_domain;
1036		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
1037		s32 pinned:2;
1038		u32 tiling:2;
1039		u32 dirty:1;
1040		u32 purgeable:1;
1041		s32 ring:4;
1042		u32 cache_level:2;
1043	} *active_bo, *pinned_bo;
1044	u32 active_bo_count, pinned_bo_count;
1045	struct intel_overlay_error_state *overlay;
1046	struct intel_display_error_state *display;
1047};
1048
1049/**
1050 * RC6 is a special power stage which allows the GPU to enter an very
1051 * low-voltage mode when idle, using down to 0V while at this stage.  This
1052 * stage is entered automatically when the GPU is idle when RC6 support is
1053 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1054 *
1055 * There are different RC6 modes available in Intel GPU, which differentiate
1056 * among each other with the latency required to enter and leave RC6 and
1057 * voltage consumed by the GPU in different states.
1058 *
1059 * The combination of the following flags define which states GPU is allowed
1060 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1061 * RC6pp is deepest RC6. Their support by hardware varies according to the
1062 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1063 * which brings the most power savings; deeper states save more power, but
1064 * require higher latency to switch to and wake up.
1065 */
1066#define INTEL_RC6_ENABLE			(1<<0)
1067#define INTEL_RC6p_ENABLE			(1<<1)
1068#define INTEL_RC6pp_ENABLE			(1<<2)
1069
1070extern int intel_iommu_enabled;
1071extern struct drm_ioctl_desc i915_ioctls[];
1072extern struct drm_driver i915_driver_info;
1073extern struct cdev_pager_ops i915_gem_pager_ops;
1074extern unsigned int i915_fbpercrtc;
1075extern int i915_panel_ignore_lid;
1076extern int i915_panel_invert_brightness;
1077extern unsigned int i915_powersave;
1078extern int i915_prefault_disable;
1079extern int i915_semaphores;
1080extern unsigned int i915_lvds_downclock;
1081extern int i915_lvds_channel_mode;
1082extern int i915_panel_use_ssc;
1083extern int i915_vbt_sdvo_panel_type;
1084extern int i915_enable_rc6;
1085extern int i915_enable_fbc;
1086extern int i915_enable_ppgtt;
1087extern int i915_enable_hangcheck;
1088
1089const struct intel_device_info *i915_get_device_id(int device);
1090
1091int i915_reset(struct drm_device *dev);
1092extern int intel_gpu_reset(struct drm_device *dev);
1093
1094/* i915_debug.c */
1095int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
1096    struct sysctl_oid *top);
1097void i915_sysctl_cleanup(struct drm_device *dev);
1098
1099extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1100extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1101
1102				/* i915_dma.c */
1103int i915_batchbuffer(struct drm_device *dev, void *data,
1104    struct drm_file *file_priv);
1105int i915_cmdbuffer(struct drm_device *dev, void *data,
1106    struct drm_file *file_priv);
1107int i915_getparam(struct drm_device *dev, void *data,
1108    struct drm_file *file_priv);
1109void i915_update_dri1_breadcrumb(struct drm_device *dev);
1110extern void i915_kernel_lost_context(struct drm_device * dev);
1111extern int i915_driver_load(struct drm_device *, unsigned long flags);
1112extern int i915_driver_unload(struct drm_device *);
1113extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
1114extern void i915_driver_lastclose(struct drm_device * dev);
1115extern void i915_driver_preclose(struct drm_device *dev,
1116				 struct drm_file *file_priv);
1117extern void i915_driver_postclose(struct drm_device *dev,
1118				  struct drm_file *file_priv);
1119extern int i915_driver_device_is_agp(struct drm_device * dev);
1120extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1121			      unsigned long arg);
1122extern int i915_emit_box(struct drm_device *dev,
1123			 struct drm_clip_rect __user *boxes,
1124			 int i, int DR1, int DR4);
1125int i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box,
1126    int DR1, int DR4);
1127
1128unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1129unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1130void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1131unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1132unsigned long i915_read_mch_val(void);
1133bool i915_gpu_raise(void);
1134bool i915_gpu_lower(void);
1135bool i915_gpu_busy(void);
1136bool i915_gpu_turbo_disable(void);
1137
1138/* i915_irq.c */
1139extern int i915_irq_emit(struct drm_device *dev, void *data,
1140			 struct drm_file *file_priv);
1141extern void intel_irq_init(struct drm_device *dev);
1142
1143void intel_enable_asle(struct drm_device *dev);
1144void i915_hangcheck_elapsed(void *context);
1145void i915_handle_error(struct drm_device *dev, bool wedged);
1146void i915_error_state_free(struct drm_i915_error_state *error);
1147
1148void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1149void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1150
1151void i915_destroy_error_state(struct drm_device *dev);
1152
1153/* i915_gem.c */
1154int i915_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size,
1155			uint32_t *handle_p);
1156int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1157			struct drm_file *file_priv);
1158int i915_gem_create_ioctl(struct drm_device *dev, void *data,
1159			  struct drm_file *file_priv);
1160int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1161			 struct drm_file *file_priv);
1162int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1163			  struct drm_file *file_priv);
1164int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1165			struct drm_file *file_priv);
1166int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1167			struct drm_file *file_priv);
1168int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1169			      struct drm_file *file_priv);
1170int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1171			     struct drm_file *file_priv);
1172int i915_gem_execbuffer(struct drm_device *dev, void *data,
1173			struct drm_file *file_priv);
1174int i915_gem_execbuffer2(struct drm_device *dev, void *data,
1175			struct drm_file *file_priv);
1176int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1177		       struct drm_file *file_priv);
1178int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1179			 struct drm_file *file_priv);
1180int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1181			struct drm_file *file_priv);
1182int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1183			    struct drm_file *file_priv);
1184int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1185			   struct drm_file *file_priv);
1186int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
1187			   struct drm_file *file_priv);
1188int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
1189			   struct drm_file *file_priv);
1190int i915_gem_set_tiling(struct drm_device *dev, void *data,
1191			struct drm_file *file_priv);
1192int i915_gem_get_tiling(struct drm_device *dev, void *data,
1193			struct drm_file *file_priv);
1194int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1195				struct drm_file *file_priv);
1196void i915_gem_load(struct drm_device *dev);
1197void i915_gem_unload(struct drm_device *dev);
1198int i915_gem_init_object(struct drm_gem_object *obj);
1199void i915_gem_free_object(struct drm_gem_object *obj);
1200int i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment,
1201    bool map_and_fenceable);
1202void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1203int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
1204void i915_gem_lastclose(struct drm_device *dev);
1205uint32_t i915_get_gem_seqno(struct drm_device *dev);
1206
1207static inline bool
1208i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1209{
1210	if (obj->fence_reg != I915_FENCE_REG_NONE) {
1211		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1212		dev_priv->fence_regs[obj->fence_reg].pin_count++;
1213		return true;
1214	} else
1215		return false;
1216}
1217
1218static inline void
1219i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1220{
1221	if (obj->fence_reg != I915_FENCE_REG_NONE) {
1222		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1223		dev_priv->fence_regs[obj->fence_reg].pin_count--;
1224	}
1225}
1226
1227void i915_gem_retire_requests(struct drm_device *dev);
1228void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
1229void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
1230struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1231    size_t size);
1232uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1233    uint32_t size, int tiling_mode);
1234int i915_mutex_lock_interruptible(struct drm_device *dev);
1235int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1236    bool write);
1237int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
1238    bool write);
1239int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1240    u32 alignment, struct intel_ring_buffer *pipelined);
1241void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
1242int i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1243int i915_gem_flush_ring(struct intel_ring_buffer *ring,
1244    uint32_t invalidate_domains, uint32_t flush_domains);
1245void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1246int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
1247int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1248    struct intel_ring_buffer *to);
1249int i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1250int i915_gem_idle(struct drm_device *dev);
1251int i915_gem_init(struct drm_device *dev);
1252int i915_gem_init_hw(struct drm_device *dev);
1253void i915_gem_init_swizzling(struct drm_device *dev);
1254void i915_gem_init_ppgtt(struct drm_device *dev);
1255void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1256int i915_gpu_idle(struct drm_device *dev);
1257void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1258    struct intel_ring_buffer *ring, uint32_t seqno);
1259int i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file,
1260    struct drm_i915_gem_request *request);
1261int i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
1262void i915_gem_reset(struct drm_device *dev);
1263int i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno);
1264int i915_gem_mmap(struct drm_device *dev, uint64_t offset, int prot);
1265int i915_gem_fault(struct drm_device *dev, uint64_t offset, int prot,
1266    uint64_t *phys);
1267void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1268int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1269    enum i915_cache_level cache_level);
1270
1271/* i915_gem_context.c */
1272void i915_gem_context_init(struct drm_device *dev);
1273void i915_gem_context_fini(struct drm_device *dev);
1274void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
1275int i915_switch_context(struct intel_ring_buffer *ring,
1276			struct drm_file *file, int to_id);
1277int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1278				  struct drm_file *file);
1279int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
1280				   struct drm_file *file);
1281
1282void i915_gem_free_all_phys_object(struct drm_device *dev);
1283void i915_gem_detach_phys_object(struct drm_device *dev,
1284    struct drm_i915_gem_object *obj);
1285int i915_gem_attach_phys_object(struct drm_device *dev,
1286    struct drm_i915_gem_object *obj, int id, int align);
1287
1288int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
1289    struct drm_mode_create_dumb *args);
1290int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1291     uint32_t handle, uint64_t *offset);
1292int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
1293     uint32_t handle);
1294
1295/* i915_gem_tiling.c */
1296void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
1297void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1298void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
1299void i915_gem_object_do_bit_17_swizzle_page(struct drm_i915_gem_object *obj,
1300    struct vm_page *m);
1301
1302/* i915_gem_evict.c */
1303int i915_gem_evict_something(struct drm_device *dev, int min_size,
1304    unsigned alignment, bool mappable);
1305int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
1306
1307/* i915_gem_stolen.c */
1308int i915_gem_init_stolen(struct drm_device *dev);
1309void i915_gem_cleanup_stolen(struct drm_device *dev);
1310
1311/* i915_suspend.c */
1312extern int i915_save_state(struct drm_device *dev);
1313extern int i915_restore_state(struct drm_device *dev);
1314
1315/* intel_iic.c */
1316extern int intel_setup_gmbus(struct drm_device *dev);
1317extern void intel_teardown_gmbus(struct drm_device *dev);
1318extern void intel_gmbus_set_speed(device_t idev, int speed);
1319extern void intel_gmbus_force_bit(device_t idev, bool force_bit);
1320extern void intel_iic_reset(struct drm_device *dev);
1321static inline bool intel_gmbus_is_port_valid(unsigned port)
1322{
1323	return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
1324}
1325extern device_t intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
1326    unsigned port);
1327
1328/* intel_opregion.c */
1329int intel_opregion_setup(struct drm_device *dev);
1330extern void intel_opregion_init(struct drm_device *dev);
1331extern void intel_opregion_fini(struct drm_device *dev);
1332extern void intel_opregion_asle_intr(struct drm_device *dev);
1333extern void intel_opregion_gse_intr(struct drm_device *dev);
1334extern void intel_opregion_enable_asle(struct drm_device *dev);
1335
1336/* i915_gem_gtt.c */
1337int i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
1338void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
1339void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
1340    struct drm_i915_gem_object *obj, enum i915_cache_level cache_level);
1341void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
1342    struct drm_i915_gem_object *obj);
1343
1344void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1345int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
1346void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
1347			      enum i915_cache_level cache_level);
1348void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1349void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
1350int i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start,
1351    unsigned long mappable_end, unsigned long end);
1352
1353/* modesetting */
1354extern void intel_modeset_init_hw(struct drm_device *dev);
1355extern void intel_modeset_init(struct drm_device *dev);
1356extern void intel_modeset_gem_init(struct drm_device *dev);
1357extern void intel_modeset_cleanup(struct drm_device *dev);
1358extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1359extern void intel_disable_fbc(struct drm_device *dev);
1360extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1361extern void ironlake_init_pch_refclk(struct drm_device *dev);
1362extern void ironlake_enable_rc6(struct drm_device *dev);
1363extern void gen6_set_rps(struct drm_device *dev, u8 val);
1364extern void intel_detect_pch(struct drm_device *dev);
1365extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
1366/* IPS */
1367extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
1368extern void intel_gpu_ips_teardown(void);
1369
1370extern bool i915_semaphore_is_enabled(struct drm_device *dev);
1371extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1372extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
1373extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1374extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
1375
1376extern void vlv_force_wake_get(struct drm_i915_private *dev_priv);
1377extern void vlv_force_wake_put(struct drm_i915_private *dev_priv);
1378
1379extern struct intel_overlay_error_state *intel_overlay_capture_error_state(
1380    struct drm_device *dev);
1381extern void intel_overlay_print_error_state(struct sbuf *m,
1382    struct intel_overlay_error_state *error);
1383extern struct intel_display_error_state *intel_display_capture_error_state(
1384    struct drm_device *dev);
1385extern void intel_display_print_error_state(struct sbuf *m,
1386    struct drm_device *dev, struct intel_display_error_state *error);
1387
1388static inline void
1389trace_i915_reg_rw(boolean_t rw, int reg, uint64_t val, int sz)
1390{
1391
1392	CTR4(KTR_DRM_REG, "[%x/%d] %c %x", reg, sz, rw ? "w" : "r", val);
1393}
1394
1395/* On SNB platform, before reading ring registers forcewake bit
1396 * must be set to prevent GT core from power down and stale values being
1397 * returned.
1398 */
1399void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1400void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1401int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1402
1403#define __i915_read(x, y) \
1404	u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
1405
1406__i915_read(8, 8)
1407__i915_read(16, 16)
1408__i915_read(32, 32)
1409__i915_read(64, 64)
1410#undef __i915_read
1411
1412#define __i915_write(x, y) \
1413	void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
1414
1415__i915_write(8, 8)
1416__i915_write(16, 16)
1417__i915_write(32, 32)
1418__i915_write(64, 64)
1419#undef __i915_write
1420
1421#define I915_READ8(reg)		i915_read8(dev_priv, (reg))
1422#define I915_WRITE8(reg, val)	i915_write8(dev_priv, (reg), (val))
1423
1424#define I915_READ16(reg)	i915_read16(dev_priv, (reg))
1425#define I915_WRITE16(reg, val)	i915_write16(dev_priv, (reg), (val))
1426#define I915_READ16_NOTRACE(reg)	DRM_READ16(dev_priv->mmio_map, (reg))
1427#define I915_WRITE16_NOTRACE(reg, val)	DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
1428
1429#define I915_READ(reg)		i915_read32(dev_priv, (reg))
1430#define I915_WRITE(reg, val)	i915_write32(dev_priv, (reg), (val))
1431#define I915_READ_NOTRACE(reg)		DRM_READ32(dev_priv->mmio_map, (reg))
1432#define I915_WRITE_NOTRACE(reg, val)	DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
1433
1434#define I915_WRITE64(reg, val)	i915_write64(dev_priv, (reg), (val))
1435#define I915_READ64(reg)	i915_read64(dev_priv, (reg))
1436
1437#define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
1438#define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
1439
1440#define I915_VERBOSE 0
1441
1442/**
1443 * Reads a dword out of the status page, which is written to from the command
1444 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
1445 * MI_STORE_DATA_IMM.
1446 *
1447 * The following dwords have a reserved meaning:
1448 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
1449 * 0x04: ring 0 head pointer
1450 * 0x05: ring 1 head pointer (915-class)
1451 * 0x06: ring 2 head pointer (915-class)
1452 * 0x10-0x1b: Context status DWords (GM45)
1453 * 0x1f: Last written status offset. (GM45)
1454 *
1455 * The area from dword 0x20 to 0x3ff is available for driver usage.
1456 */
1457#define I915_GEM_HWS_INDEX		0x20
1458
1459#define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info)
1460
1461#define IS_I830(dev)		((dev)->pci_device == 0x3577)
1462#define IS_845G(dev)		((dev)->pci_device == 0x2562)
1463#define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
1464#define IS_I865G(dev)		((dev)->pci_device == 0x2572)
1465#define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
1466#define IS_I915GM(dev)		((dev)->pci_device == 0x2592)
1467#define IS_I945G(dev)		((dev)->pci_device == 0x2772)
1468#define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
1469#define	IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
1470#define	IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
1471#define IS_GM45(dev)		((dev)->pci_device == 0x2A42)
1472#define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
1473#define IS_PINEVIEW_G(dev)	((dev)->pci_device == 0xa001)
1474#define IS_PINEVIEW_M(dev)	((dev)->pci_device == 0xa011)
1475#define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
1476#define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
1477#define IS_IRONLAKE_D(dev)	((dev)->pci_device == 0x0042)
1478#define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
1479#define	IS_IVYBRIDGE(dev)	(INTEL_INFO(dev)->is_ivybridge)
1480#define IS_VALLEYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview)
1481#define IS_HASWELL(dev)		(INTEL_INFO(dev)->is_haswell)
1482#define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
1483
1484/* XXXKIB LEGACY */
1485#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
1486		       (dev)->pci_device == 0x2982 || \
1487		       (dev)->pci_device == 0x2992 || \
1488		       (dev)->pci_device == 0x29A2 || \
1489		       (dev)->pci_device == 0x2A02 || \
1490		       (dev)->pci_device == 0x2A12 || \
1491		       (dev)->pci_device == 0x2A42 || \
1492		       (dev)->pci_device == 0x2E02 || \
1493		       (dev)->pci_device == 0x2E12 || \
1494		       (dev)->pci_device == 0x2E22 || \
1495		       (dev)->pci_device == 0x2E32)
1496
1497#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
1498
1499#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
1500#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
1501#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
1502
1503#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
1504		      IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
1505/* XXXKIB LEGACY END */
1506
1507#define IS_GEN2(dev)	(INTEL_INFO(dev)->gen == 2)
1508#define IS_GEN3(dev)	(INTEL_INFO(dev)->gen == 3)
1509#define IS_GEN4(dev)	(INTEL_INFO(dev)->gen == 4)
1510#define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
1511#define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
1512#define IS_GEN7(dev)	(INTEL_INFO(dev)->gen == 7)
1513
1514#define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
1515#define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
1516#define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
1517#define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
1518
1519#define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 6)
1520#define HAS_ALIASING_PPGTT(dev)	(INTEL_INFO(dev)->gen >=6)
1521
1522#define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay)
1523#define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
1524
1525/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1526 * rows, which changed the alignment requirements and fence programming.
1527 */
1528#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1529						      IS_I915GM(dev)))
1530#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1531#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
1532#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
1533#define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
1534#define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
1535#define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
1536/* dsparb controlled by hw only */
1537#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1538
1539#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1540#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1541#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1542
1543#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split)
1544#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1545
1546#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1547#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1548#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1549#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1550
1551#define PRIMARY_RINGBUFFER_SIZE         (128*1024)
1552
1553static inline bool
1554i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1555{
1556
1557	return ((int32_t)(seq1 - seq2) >= 0);
1558}
1559
1560static inline void i915_gem_chipset_flush(struct drm_device *dev)
1561{
1562	if (INTEL_INFO(dev)->gen < 6)
1563		intel_gtt_chipset_flush();
1564}
1565
1566static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1567{
1568	/* KASSERT(obj->pages != NULL, ("pin and NULL pages")); */
1569	obj->pages_pin_count++;
1570}
1571static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1572{
1573	KASSERT(obj->pages_pin_count != 0, ("zero pages_pin_count"));
1574	obj->pages_pin_count--;
1575}
1576
1577u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
1578
1579#endif
1580