i915_dma.c revision 282199
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 */
3/*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_dma.c 282199 2015-04-28 19:35:05Z dumbbell $");
31
32#include <dev/drm2/drmP.h>
33#include <dev/drm2/drm.h>
34#include <dev/drm2/i915/i915_drm.h>
35#include <dev/drm2/i915/i915_drv.h>
36#include <dev/drm2/i915/intel_drv.h>
37#include <dev/drm2/i915/intel_ringbuffer.h>
38
39#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS])
40
41#define BEGIN_LP_RING(n) \
42	intel_ring_begin(LP_RING(dev_priv), (n))
43
44#define OUT_RING(x) \
45	intel_ring_emit(LP_RING(dev_priv), x)
46
47#define ADVANCE_LP_RING() \
48	intel_ring_advance(LP_RING(dev_priv))
49
50#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
51	if (LP_RING(dev->dev_private)->obj == NULL)			\
52		LOCK_TEST_WITH_RETURN(dev, file);			\
53} while (0)
54
55static inline u32
56intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
57{
58	if (I915_NEED_GFX_HWS(dev_priv->dev))
59		return ((volatile u32*)(dev_priv->dri1.gfx_hws_cpu_addr))[reg];
60	else
61		return intel_read_status_page(LP_RING(dev_priv), reg);
62}
63
64#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
65#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
66#define I915_BREADCRUMB_INDEX		0x21
67
68void i915_update_dri1_breadcrumb(struct drm_device *dev)
69{
70	drm_i915_private_t *dev_priv = dev->dev_private;
71	struct drm_i915_master_private *master_priv;
72
73	if (dev->primary->master) {
74		master_priv = dev->primary->master->driver_priv;
75		if (master_priv->sarea_priv)
76			master_priv->sarea_priv->last_dispatch =
77				READ_BREADCRUMB(dev_priv);
78	}
79}
80
81static void i915_write_hws_pga(struct drm_device *dev)
82{
83	drm_i915_private_t *dev_priv = dev->dev_private;
84	u32 addr;
85
86	addr = dev_priv->status_page_dmah->busaddr;
87	if (INTEL_INFO(dev)->gen >= 4)
88		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
89	I915_WRITE(HWS_PGA, addr);
90}
91
92/**
93 * Sets up the hardware status page for devices that need a physical address
94 * in the register.
95 */
96static int i915_init_phys_hws(struct drm_device *dev)
97{
98	drm_i915_private_t *dev_priv = dev->dev_private;
99	struct intel_ring_buffer *ring = LP_RING(dev_priv);
100
101	/*
102	 * Program Hardware Status Page
103	 * XXXKIB Keep 4GB limit for allocation for now.  This method
104	 * of allocation is used on <= 965 hardware, that has several
105	 * erratas regarding the use of physical memory > 4 GB.
106	 */
107	dev_priv->status_page_dmah =
108		drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, BUS_SPACE_MAXADDR);
109	if (!dev_priv->status_page_dmah) {
110		DRM_ERROR("Can not allocate hardware status page\n");
111		return -ENOMEM;
112	}
113	ring->status_page.page_addr = dev_priv->hw_status_page =
114	    dev_priv->status_page_dmah->vaddr;
115	dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
116
117	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
118
119	i915_write_hws_pga(dev);
120	DRM_DEBUG("Enabled hardware status page, phys %jx\n",
121	    (uintmax_t)dev_priv->dma_status_page);
122	return 0;
123}
124
125/**
126 * Frees the hardware status page, whether it's a physical address or a virtual
127 * address set up by the X Server.
128 */
129static void i915_free_hws(struct drm_device *dev)
130{
131	drm_i915_private_t *dev_priv = dev->dev_private;
132	struct intel_ring_buffer *ring = LP_RING(dev_priv);
133
134	if (dev_priv->status_page_dmah) {
135		drm_pci_free(dev, dev_priv->status_page_dmah);
136		dev_priv->status_page_dmah = NULL;
137	}
138
139	if (dev_priv->status_gfx_addr) {
140		dev_priv->status_gfx_addr = 0;
141		ring->status_page.gfx_addr = 0;
142		pmap_unmapdev((vm_offset_t)dev_priv->dri1.gfx_hws_cpu_addr,
143		    PAGE_SIZE);
144	}
145
146	/* Need to rewrite hardware status page */
147	I915_WRITE(HWS_PGA, 0x1ffff000);
148}
149
150void i915_kernel_lost_context(struct drm_device * dev)
151{
152	drm_i915_private_t *dev_priv = dev->dev_private;
153	struct drm_i915_master_private *master_priv;
154	struct intel_ring_buffer *ring = LP_RING(dev_priv);
155
156	/*
157	 * We should never lose context on the ring with modesetting
158	 * as we don't expose it to userspace
159	 */
160	if (drm_core_check_feature(dev, DRIVER_MODESET))
161		return;
162
163	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
164	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
165	ring->space = ring->head - (ring->tail + 8);
166	if (ring->space < 0)
167		ring->space += ring->size;
168
169	if (!dev->primary->master)
170		return;
171
172	master_priv = dev->primary->master->driver_priv;
173	if (ring->head == ring->tail && master_priv->sarea_priv)
174		master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
175}
176
177static int i915_dma_cleanup(struct drm_device * dev)
178{
179	drm_i915_private_t *dev_priv = dev->dev_private;
180	int i;
181
182
183	/* Make sure interrupts are disabled here because the uninstall ioctl
184	 * may not have been called from userspace and after dev_private
185	 * is freed, it's too late.
186	 */
187	if (dev->irq_enabled)
188		drm_irq_uninstall(dev);
189
190	DRM_LOCK(dev);
191	for (i = 0; i < I915_NUM_RINGS; i++)
192		intel_cleanup_ring_buffer(&dev_priv->rings[i]);
193	DRM_UNLOCK(dev);
194
195	/* Clear the HWS virtual address at teardown */
196	if (I915_NEED_GFX_HWS(dev))
197		i915_free_hws(dev);
198
199	return 0;
200}
201
202static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
203{
204	drm_i915_private_t *dev_priv = dev->dev_private;
205	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
206	int ret;
207
208	master_priv->sarea = drm_getsarea(dev);
209	if (master_priv->sarea) {
210		master_priv->sarea_priv = (drm_i915_sarea_t *)
211		    ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
212	} else {
213		DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
214	}
215
216	if (init->ring_size != 0) {
217		if (LP_RING(dev_priv)->obj != NULL) {
218			i915_dma_cleanup(dev);
219			DRM_ERROR("Client tried to initialize ringbuffer in "
220				  "GEM mode\n");
221			return -EINVAL;
222		}
223
224		ret = intel_render_ring_init_dri(dev,
225						 init->ring_start,
226						 init->ring_size);
227		if (ret) {
228			i915_dma_cleanup(dev);
229			return ret;
230		}
231	}
232
233	dev_priv->cpp = init->cpp;
234	dev_priv->back_offset = init->back_offset;
235	dev_priv->front_offset = init->front_offset;
236	dev_priv->current_page = 0;
237	if (master_priv->sarea_priv)
238		master_priv->sarea_priv->pf_current_page = 0;
239
240	/* Allow hardware batchbuffers unless told otherwise.
241	 */
242	dev_priv->dri1.allow_batchbuffer = 1;
243
244	return 0;
245}
246
247static int i915_dma_resume(struct drm_device * dev)
248{
249	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
250	struct intel_ring_buffer *ring = LP_RING(dev_priv);
251
252	DRM_DEBUG("\n");
253
254	if (ring->virtual_start == NULL) {
255		DRM_ERROR("can not ioremap virtual address for"
256			  " ring buffer\n");
257		return -ENOMEM;
258	}
259
260	/* Program Hardware Status Page */
261	if (!ring->status_page.page_addr) {
262		DRM_ERROR("Can not find hardware status page\n");
263		return -EINVAL;
264	}
265	DRM_DEBUG("hw status page @ %p\n", ring->status_page.page_addr);
266	if (ring->status_page.gfx_addr != 0)
267		intel_ring_setup_status_page(ring);
268	else
269		i915_write_hws_pga(dev);
270
271	DRM_DEBUG("Enabled hardware status page\n");
272
273	return 0;
274}
275
276static int i915_dma_init(struct drm_device *dev, void *data,
277			 struct drm_file *file_priv)
278{
279	drm_i915_init_t *init = data;
280	int retcode = 0;
281
282	if (drm_core_check_feature(dev, DRIVER_MODESET))
283		return -ENODEV;
284
285	switch (init->func) {
286	case I915_INIT_DMA:
287		retcode = i915_initialize(dev, init);
288		break;
289	case I915_CLEANUP_DMA:
290		retcode = i915_dma_cleanup(dev);
291		break;
292	case I915_RESUME_DMA:
293		retcode = i915_dma_resume(dev);
294		break;
295	default:
296		retcode = -EINVAL;
297		break;
298	}
299
300	return retcode;
301}
302
303/* Implement basically the same security restrictions as hardware does
304 * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
305 *
306 * Most of the calculations below involve calculating the size of a
307 * particular instruction.  It's important to get the size right as
308 * that tells us where the next instruction to check is.  Any illegal
309 * instruction detected will be given a size of zero, which is a
310 * signal to abort the rest of the buffer.
311 */
312static int do_validate_cmd(int cmd)
313{
314	switch (((cmd >> 29) & 0x7)) {
315	case 0x0:
316		switch ((cmd >> 23) & 0x3f) {
317		case 0x0:
318			return 1;	/* MI_NOOP */
319		case 0x4:
320			return 1;	/* MI_FLUSH */
321		default:
322			return 0;	/* disallow everything else */
323		}
324		break;
325	case 0x1:
326		return 0;	/* reserved */
327	case 0x2:
328		return (cmd & 0xff) + 2;	/* 2d commands */
329	case 0x3:
330		if (((cmd >> 24) & 0x1f) <= 0x18)
331			return 1;
332
333		switch ((cmd >> 24) & 0x1f) {
334		case 0x1c:
335			return 1;
336		case 0x1d:
337			switch ((cmd >> 16) & 0xff) {
338			case 0x3:
339				return (cmd & 0x1f) + 2;
340			case 0x4:
341				return (cmd & 0xf) + 2;
342			default:
343				return (cmd & 0xffff) + 2;
344			}
345		case 0x1e:
346			if (cmd & (1 << 23))
347				return (cmd & 0xffff) + 1;
348			else
349				return 1;
350		case 0x1f:
351			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
352				return (cmd & 0x1ffff) + 2;
353			else if (cmd & (1 << 17))	/* indirect random */
354				if ((cmd & 0xffff) == 0)
355					return 0;	/* unknown length, too hard */
356				else
357					return (((cmd & 0xffff) + 1) / 2) + 1;
358			else
359				return 2;	/* indirect sequential */
360		default:
361			return 0;
362		}
363	default:
364		return 0;
365	}
366
367	return 0;
368}
369
370static int validate_cmd(int cmd)
371{
372	int ret = do_validate_cmd(cmd);
373
374/*	printk("validate_cmd( %x ): %d\n", cmd, ret); */
375
376	return ret;
377}
378
379static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
380			  int dwords)
381{
382	drm_i915_private_t *dev_priv = dev->dev_private;
383	int i;
384
385	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
386		return -EINVAL;
387
388	BEGIN_LP_RING((dwords+1)&~1);
389
390	for (i = 0; i < dwords;) {
391		int cmd, sz;
392
393		if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
394			return -EINVAL;
395
396		if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
397			return -EINVAL;
398
399		OUT_RING(cmd);
400
401		while (++i, --sz) {
402			if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
403							 sizeof(cmd))) {
404				return -EINVAL;
405			}
406			OUT_RING(cmd);
407		}
408	}
409
410	if (dwords & 1)
411		OUT_RING(0);
412
413	ADVANCE_LP_RING();
414
415	return 0;
416}
417
418int i915_emit_box(struct drm_device * dev,
419		  struct drm_clip_rect *boxes,
420		  int i, int DR1, int DR4)
421{
422	struct drm_clip_rect box;
423
424	if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
425		return -EFAULT;
426	}
427
428	return (i915_emit_box_p(dev, &box, DR1, DR4));
429}
430
431int
432i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box,
433    int DR1, int DR4)
434{
435	drm_i915_private_t *dev_priv = dev->dev_private;
436	int ret;
437
438	if (box->y2 <= box->y1 || box->x2 <= box->x1 || box->y2 <= 0 ||
439	    box->x2 <= 0) {
440		DRM_ERROR("Bad box %d,%d..%d,%d\n",
441			  box->x1, box->y1, box->x2, box->y2);
442		return -EINVAL;
443	}
444
445	if (INTEL_INFO(dev)->gen >= 4) {
446		ret = BEGIN_LP_RING(4);
447		if (ret != 0)
448			return (ret);
449
450		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
451		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
452		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
453		OUT_RING(DR4);
454	} else {
455		ret = BEGIN_LP_RING(6);
456		if (ret != 0)
457			return (ret);
458
459		OUT_RING(GFX_OP_DRAWRECT_INFO);
460		OUT_RING(DR1);
461		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
462		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
463		OUT_RING(DR4);
464		OUT_RING(0);
465	}
466	ADVANCE_LP_RING();
467
468	return 0;
469}
470
471/* XXX: Emitting the counter should really be moved to part of the IRQ
472 * emit. For now, do it in both places:
473 */
474
475static void i915_emit_breadcrumb(struct drm_device *dev)
476{
477	drm_i915_private_t *dev_priv = dev->dev_private;
478	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
479
480	if (++dev_priv->counter > 0x7FFFFFFFUL)
481		dev_priv->counter = 0;
482	if (master_priv->sarea_priv)
483		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
484
485	if (BEGIN_LP_RING(4) == 0) {
486		OUT_RING(MI_STORE_DWORD_INDEX);
487		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
488		OUT_RING(dev_priv->counter);
489		OUT_RING(0);
490		ADVANCE_LP_RING();
491	}
492}
493
494static int i915_dispatch_cmdbuffer(struct drm_device * dev,
495    drm_i915_cmdbuffer_t * cmd, struct drm_clip_rect *cliprects, void *cmdbuf)
496{
497	int nbox = cmd->num_cliprects;
498	int i = 0, count, ret;
499
500	if (cmd->sz & 0x3) {
501		DRM_ERROR("alignment\n");
502		return -EINVAL;
503	}
504
505	i915_kernel_lost_context(dev);
506
507	count = nbox ? nbox : 1;
508
509	for (i = 0; i < count; i++) {
510		if (i < nbox) {
511			ret = i915_emit_box_p(dev, &cmd->cliprects[i],
512			    cmd->DR1, cmd->DR4);
513			if (ret)
514				return ret;
515		}
516
517		ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
518		if (ret)
519			return ret;
520	}
521
522	i915_emit_breadcrumb(dev);
523	return 0;
524}
525
526static int
527i915_dispatch_batchbuffer(struct drm_device * dev,
528    drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects)
529{
530	drm_i915_private_t *dev_priv = dev->dev_private;
531	int nbox = batch->num_cliprects;
532	int i, count, ret;
533
534	if (drm_core_check_feature(dev, DRIVER_MODESET))
535		return -ENODEV;
536
537	if ((batch->start | batch->used) & 0x7) {
538		DRM_ERROR("alignment\n");
539		return -EINVAL;
540	}
541
542	i915_kernel_lost_context(dev);
543
544	count = nbox ? nbox : 1;
545
546	for (i = 0; i < count; i++) {
547		if (i < nbox) {
548			int ret = i915_emit_box_p(dev, &cliprects[i],
549			    batch->DR1, batch->DR4);
550			if (ret)
551				return ret;
552		}
553
554		if (!IS_I830(dev) && !IS_845G(dev)) {
555			ret = BEGIN_LP_RING(2);
556			if (ret != 0)
557				return (ret);
558
559			if (INTEL_INFO(dev)->gen >= 4) {
560				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) |
561				    MI_BATCH_NON_SECURE_I965);
562				OUT_RING(batch->start);
563			} else {
564				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
565				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
566			}
567		} else {
568			ret = BEGIN_LP_RING(4);
569			if (ret != 0)
570				return (ret);
571
572			OUT_RING(MI_BATCH_BUFFER);
573			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
574			OUT_RING(batch->start + batch->used - 4);
575			OUT_RING(0);
576		}
577		ADVANCE_LP_RING();
578	}
579
580	i915_emit_breadcrumb(dev);
581
582	return 0;
583}
584
585static int i915_dispatch_flip(struct drm_device * dev)
586{
587	drm_i915_private_t *dev_priv = dev->dev_private;
588	struct drm_i915_master_private *master_priv =
589		dev->primary->master->driver_priv;
590	int ret;
591
592	if (!master_priv->sarea_priv)
593		return -EINVAL;
594
595	DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
596		  __func__,
597		  dev_priv->current_page,
598		  master_priv->sarea_priv->pf_current_page);
599
600	i915_kernel_lost_context(dev);
601
602	ret = BEGIN_LP_RING(10);
603	if (ret)
604		return ret;
605	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
606	OUT_RING(0);
607
608	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
609	OUT_RING(0);
610	if (dev_priv->current_page == 0) {
611		OUT_RING(dev_priv->back_offset);
612		dev_priv->current_page = 1;
613	} else {
614		OUT_RING(dev_priv->front_offset);
615		dev_priv->current_page = 0;
616	}
617	OUT_RING(0);
618
619	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
620	OUT_RING(0);
621
622	ADVANCE_LP_RING();
623
624	master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
625
626	if (BEGIN_LP_RING(4) == 0) {
627		OUT_RING(MI_STORE_DWORD_INDEX);
628		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
629		OUT_RING(dev_priv->counter);
630		OUT_RING(0);
631		ADVANCE_LP_RING();
632	}
633
634	master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
635	return 0;
636}
637
638static int
639i915_quiescent(struct drm_device *dev)
640{
641	struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
642
643	i915_kernel_lost_context(dev);
644	return (intel_wait_ring_idle(ring));
645}
646
647static int
648i915_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
649{
650	int ret;
651
652	if (drm_core_check_feature(dev, DRIVER_MODESET))
653		return -ENODEV;
654
655	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
656
657	DRM_LOCK(dev);
658	ret = i915_quiescent(dev);
659	DRM_UNLOCK(dev);
660
661	return (ret);
662}
663
664int i915_batchbuffer(struct drm_device *dev, void *data,
665			    struct drm_file *file_priv)
666{
667	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
668	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
669	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
670	    master_priv->sarea_priv;
671	drm_i915_batchbuffer_t *batch = data;
672	struct drm_clip_rect *cliprects;
673	size_t cliplen;
674	int ret;
675
676	if (!dev_priv->dri1.allow_batchbuffer) {
677		DRM_ERROR("Batchbuffer ioctl disabled\n");
678		return -EINVAL;
679	}
680
681	DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
682		  batch->start, batch->used, batch->num_cliprects);
683
684	cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect);
685	if (batch->num_cliprects < 0)
686		return -EFAULT;
687	if (batch->num_cliprects != 0) {
688		cliprects = malloc(batch->num_cliprects *
689		    sizeof(struct drm_clip_rect), DRM_MEM_DMA,
690		    M_WAITOK | M_ZERO);
691
692		ret = -copyin(batch->cliprects, cliprects,
693		    batch->num_cliprects * sizeof(struct drm_clip_rect));
694		if (ret != 0)
695			goto fail_free;
696	} else
697		cliprects = NULL;
698
699	DRM_LOCK(dev);
700	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
701	ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
702	DRM_UNLOCK(dev);
703
704	if (sarea_priv)
705		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
706
707fail_free:
708	free(cliprects, DRM_MEM_DMA);
709	return ret;
710}
711
712int i915_cmdbuffer(struct drm_device *dev, void *data,
713			  struct drm_file *file_priv)
714{
715	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
716	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
717	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
718	    master_priv->sarea_priv;
719	drm_i915_cmdbuffer_t *cmdbuf = data;
720	struct drm_clip_rect *cliprects = NULL;
721	void *batch_data;
722	int ret;
723
724	if (drm_core_check_feature(dev, DRIVER_MODESET))
725		return -ENODEV;
726
727	DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
728		  cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
729
730	if (cmdbuf->num_cliprects < 0)
731		return -EINVAL;
732
733	batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
734
735	ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz);
736	if (ret != 0)
737		goto fail_batch_free;
738
739	if (cmdbuf->num_cliprects) {
740		cliprects = malloc(cmdbuf->num_cliprects *
741		    sizeof(struct drm_clip_rect), DRM_MEM_DMA,
742		    M_WAITOK | M_ZERO);
743		ret = -copyin(cmdbuf->cliprects, cliprects,
744		    cmdbuf->num_cliprects * sizeof(struct drm_clip_rect));
745		if (ret != 0)
746			goto fail_clip_free;
747	}
748
749	DRM_LOCK(dev);
750	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
751	ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
752	DRM_UNLOCK(dev);
753	if (ret) {
754		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
755		goto fail_clip_free;
756	}
757
758	if (sarea_priv)
759		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
760
761fail_clip_free:
762	free(cliprects, DRM_MEM_DMA);
763fail_batch_free:
764	free(batch_data, DRM_MEM_DMA);
765	return ret;
766}
767
768static int i915_emit_irq(struct drm_device * dev)
769{
770	drm_i915_private_t *dev_priv = dev->dev_private;
771	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
772
773	i915_kernel_lost_context(dev);
774
775	DRM_DEBUG("i915: emit_irq\n");
776
777	dev_priv->counter++;
778	if (dev_priv->counter > 0x7FFFFFFFUL)
779		dev_priv->counter = 1;
780	if (master_priv->sarea_priv)
781		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
782
783	if (BEGIN_LP_RING(4) == 0) {
784		OUT_RING(MI_STORE_DWORD_INDEX);
785		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
786		OUT_RING(dev_priv->counter);
787		OUT_RING(MI_USER_INTERRUPT);
788		ADVANCE_LP_RING();
789	}
790
791	return dev_priv->counter;
792}
793
794static int i915_wait_irq(struct drm_device * dev, int irq_nr)
795{
796	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
797	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
798	int ret;
799	struct intel_ring_buffer *ring = LP_RING(dev_priv);
800
801	DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
802		  READ_BREADCRUMB(dev_priv));
803
804	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
805		if (master_priv->sarea_priv)
806			master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
807		return 0;
808	}
809
810	if (master_priv->sarea_priv)
811		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
812
813	ret = 0;
814	mtx_lock(&dev_priv->irq_lock);
815	if (ring->irq_get(ring)) {
816		while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
817			ret = -msleep(ring, &dev_priv->irq_lock, PCATCH,
818			    "915wtq", 3 * hz);
819			if (ret == -ERESTART)
820				ret = -ERESTARTSYS;
821		}
822		ring->irq_put(ring);
823		mtx_unlock(&dev_priv->irq_lock);
824	} else {
825		mtx_unlock(&dev_priv->irq_lock);
826		if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr,
827		     3000, 1, "915wir"))
828			ret = -EBUSY;
829	}
830
831	if (ret == -EBUSY) {
832		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
833			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
834	}
835
836	return ret;
837}
838
839/* Needs the lock as it touches the ring.
840 */
841int i915_irq_emit(struct drm_device *dev, void *data,
842			 struct drm_file *file_priv)
843{
844	drm_i915_private_t *dev_priv = dev->dev_private;
845	drm_i915_irq_emit_t *emit = data;
846	int result;
847
848	if (drm_core_check_feature(dev, DRIVER_MODESET))
849		return -ENODEV;
850
851	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
852		DRM_ERROR("called with no initialization\n");
853		return -EINVAL;
854	}
855
856	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
857
858	DRM_LOCK(dev);
859	result = i915_emit_irq(dev);
860	DRM_UNLOCK(dev);
861
862	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
863		DRM_ERROR("copy_to_user\n");
864		return -EFAULT;
865	}
866
867	return 0;
868}
869
870/* Doesn't need the hardware lock.
871 */
872static int i915_irq_wait(struct drm_device *dev, void *data,
873			 struct drm_file *file_priv)
874{
875	drm_i915_private_t *dev_priv = dev->dev_private;
876	drm_i915_irq_wait_t *irqwait = data;
877
878	if (drm_core_check_feature(dev, DRIVER_MODESET))
879		return -ENODEV;
880
881	if (!dev_priv) {
882		DRM_ERROR("called with no initialization\n");
883		return -EINVAL;
884	}
885
886	return i915_wait_irq(dev, irqwait->irq_seq);
887}
888
889static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
890			 struct drm_file *file_priv)
891{
892	drm_i915_private_t *dev_priv = dev->dev_private;
893	drm_i915_vblank_pipe_t *pipe = data;
894
895	if (drm_core_check_feature(dev, DRIVER_MODESET))
896		return -ENODEV;
897
898	if (!dev_priv) {
899		DRM_ERROR("called with no initialization\n");
900		return -EINVAL;
901	}
902
903	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
904
905	return 0;
906}
907
908/**
909 * Schedule buffer swap at given vertical blank.
910 */
911static int i915_vblank_swap(struct drm_device *dev, void *data,
912		     struct drm_file *file_priv)
913{
914	/* The delayed swap mechanism was fundamentally racy, and has been
915	 * removed.  The model was that the client requested a delayed flip/swap
916	 * from the kernel, then waited for vblank before continuing to perform
917	 * rendering.  The problem was that the kernel might wake the client
918	 * up before it dispatched the vblank swap (since the lock has to be
919	 * held while touching the ringbuffer), in which case the client would
920	 * clear and start the next frame before the swap occurred, and
921	 * flicker would occur in addition to likely missing the vblank.
922	 *
923	 * In the absence of this ioctl, userland falls back to a correct path
924	 * of waiting for a vblank, then dispatching the swap on its own.
925	 * Context switching to userland and back is plenty fast enough for
926	 * meeting the requirements of vblank swapping.
927	 */
928	return -EINVAL;
929}
930
931static int i915_flip_bufs(struct drm_device *dev, void *data,
932			  struct drm_file *file_priv)
933{
934	int ret;
935
936	if (drm_core_check_feature(dev, DRIVER_MODESET))
937		return -ENODEV;
938
939	DRM_DEBUG("%s\n", __func__);
940
941	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
942
943	DRM_LOCK(dev);
944	ret = i915_dispatch_flip(dev);
945	DRM_UNLOCK(dev);
946
947	return ret;
948}
949
950int i915_getparam(struct drm_device *dev, void *data,
951			 struct drm_file *file_priv)
952{
953	drm_i915_private_t *dev_priv = dev->dev_private;
954	drm_i915_getparam_t *param = data;
955	int value;
956
957	if (!dev_priv) {
958		DRM_ERROR("called with no initialization\n");
959		return -EINVAL;
960	}
961
962	switch (param->param) {
963	case I915_PARAM_IRQ_ACTIVE:
964		value = dev->irq_enabled ? 1 : 0;
965		break;
966	case I915_PARAM_ALLOW_BATCHBUFFER:
967		value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
968		break;
969	case I915_PARAM_LAST_DISPATCH:
970		value = READ_BREADCRUMB(dev_priv);
971		break;
972	case I915_PARAM_CHIPSET_ID:
973		value = dev->pci_device;
974		break;
975	case I915_PARAM_HAS_GEM:
976		value = 1;
977		break;
978	case I915_PARAM_NUM_FENCES_AVAIL:
979		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
980		break;
981	case I915_PARAM_HAS_OVERLAY:
982		value = dev_priv->overlay ? 1 : 0;
983		break;
984	case I915_PARAM_HAS_PAGEFLIPPING:
985		value = 1;
986		break;
987	case I915_PARAM_HAS_EXECBUF2:
988		value = 1;
989		break;
990	case I915_PARAM_HAS_BSD:
991		value = intel_ring_initialized(&dev_priv->rings[VCS]);
992		break;
993	case I915_PARAM_HAS_BLT:
994		value = intel_ring_initialized(&dev_priv->rings[BCS]);
995		break;
996	case I915_PARAM_HAS_RELAXED_FENCING:
997		value = 1;
998		break;
999	case I915_PARAM_HAS_COHERENT_RINGS:
1000		value = 1;
1001		break;
1002	case I915_PARAM_HAS_EXEC_CONSTANTS:
1003		value = INTEL_INFO(dev)->gen >= 4;
1004		break;
1005	case I915_PARAM_HAS_RELAXED_DELTA:
1006		value = 1;
1007		break;
1008	case I915_PARAM_HAS_GEN7_SOL_RESET:
1009		value = 1;
1010		break;
1011	case I915_PARAM_HAS_LLC:
1012		value = HAS_LLC(dev);
1013		break;
1014	case I915_PARAM_HAS_ALIASING_PPGTT:
1015		value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
1016		break;
1017	default:
1018		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1019				 param->param);
1020		return -EINVAL;
1021	}
1022
1023	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1024		DRM_ERROR("DRM_COPY_TO_USER failed\n");
1025		return -EFAULT;
1026	}
1027
1028	return 0;
1029}
1030
1031static int i915_setparam(struct drm_device *dev, void *data,
1032			 struct drm_file *file_priv)
1033{
1034	drm_i915_private_t *dev_priv = dev->dev_private;
1035	drm_i915_setparam_t *param = data;
1036
1037	if (!dev_priv) {
1038		DRM_ERROR("called with no initialization\n");
1039		return -EINVAL;
1040	}
1041
1042	switch (param->param) {
1043	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1044		break;
1045	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1046		break;
1047	case I915_SETPARAM_ALLOW_BATCHBUFFER:
1048		dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
1049		break;
1050	case I915_SETPARAM_NUM_USED_FENCES:
1051		if (param->value > dev_priv->num_fence_regs ||
1052		    param->value < 0)
1053			return -EINVAL;
1054		/* Userspace can use first N regs */
1055		dev_priv->fence_reg_start = param->value;
1056		break;
1057	default:
1058		DRM_DEBUG("unknown parameter %d\n", param->param);
1059		return -EINVAL;
1060	}
1061
1062	return 0;
1063}
1064
1065static int i915_set_status_page(struct drm_device *dev, void *data,
1066				struct drm_file *file_priv)
1067{
1068	drm_i915_private_t *dev_priv = dev->dev_private;
1069	drm_i915_hws_addr_t *hws = data;
1070	struct intel_ring_buffer *ring = LP_RING(dev_priv);
1071
1072	if (drm_core_check_feature(dev, DRIVER_MODESET))
1073		return -ENODEV;
1074
1075	if (!I915_NEED_GFX_HWS(dev))
1076		return -EINVAL;
1077
1078	if (!dev_priv) {
1079		DRM_ERROR("called with no initialization\n");
1080		return -EINVAL;
1081	}
1082
1083	DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
1084	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1085		DRM_ERROR("tried to set status page when mode setting active\n");
1086		return 0;
1087	}
1088
1089	ring->status_page.gfx_addr = dev_priv->status_gfx_addr =
1090	    hws->addr & (0x1ffff<<12);
1091
1092	dev_priv->dri1.gfx_hws_cpu_addr = pmap_mapdev_attr(
1093	    dev->agp->base + hws->addr, PAGE_SIZE,
1094	    VM_MEMATTR_WRITE_COMBINING);
1095	if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1096		i915_dma_cleanup(dev);
1097		ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0;
1098		DRM_ERROR("can not ioremap virtual address for"
1099				" G33 hw status page\n");
1100		return -ENOMEM;
1101	}
1102
1103	memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
1104	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
1105	DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
1106			dev_priv->status_gfx_addr);
1107	DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
1108	return 0;
1109}
1110
1111static int
1112i915_load_modeset_init(struct drm_device *dev)
1113{
1114	struct drm_i915_private *dev_priv = dev->dev_private;
1115	int ret;
1116
1117	ret = intel_parse_bios(dev);
1118	if (ret)
1119		DRM_INFO("failed to find VBIOS tables\n");
1120
1121#if 0
1122	intel_register_dsm_handler();
1123#endif
1124
1125	/* Initialise stolen first so that we may reserve preallocated
1126	 * objects for the BIOS to KMS transition.
1127	 */
1128	ret = i915_gem_init_stolen(dev);
1129	if (ret)
1130		goto cleanup_vga_switcheroo;
1131
1132	intel_modeset_init(dev);
1133
1134	ret = i915_gem_init(dev);
1135	if (ret != 0)
1136		goto cleanup_gem_stolen;
1137
1138	intel_modeset_gem_init(dev);
1139
1140	ret = drm_irq_install(dev);
1141	if (ret)
1142		goto cleanup_gem;
1143
1144	dev->vblank_disable_allowed = 1;
1145
1146	ret = intel_fbdev_init(dev);
1147	if (ret)
1148		goto cleanup_gem;
1149
1150	drm_kms_helper_poll_init(dev);
1151
1152	/* We're off and running w/KMS */
1153	dev_priv->mm.suspended = 0;
1154
1155	return (0);
1156
1157cleanup_gem:
1158	DRM_LOCK(dev);
1159	i915_gem_cleanup_ringbuffer(dev);
1160	DRM_UNLOCK(dev);
1161	i915_gem_cleanup_aliasing_ppgtt(dev);
1162cleanup_gem_stolen:
1163	i915_gem_cleanup_stolen(dev);
1164cleanup_vga_switcheroo:
1165	return (ret);
1166}
1167
1168int i915_master_create(struct drm_device *dev, struct drm_master *master)
1169{
1170	struct drm_i915_master_private *master_priv;
1171
1172	master_priv = malloc(sizeof(*master_priv), DRM_MEM_DMA,
1173	    M_NOWAIT | M_ZERO);
1174	if (!master_priv)
1175		return -ENOMEM;
1176
1177	master->driver_priv = master_priv;
1178	return 0;
1179}
1180
1181void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1182{
1183	struct drm_i915_master_private *master_priv = master->driver_priv;
1184
1185	if (!master_priv)
1186		return;
1187
1188	free(master_priv, DRM_MEM_DMA);
1189
1190	master->driver_priv = NULL;
1191}
1192
1193static int
1194i915_get_bridge_dev(struct drm_device *dev)
1195{
1196	struct drm_i915_private *dev_priv;
1197
1198	dev_priv = dev->dev_private;
1199
1200	dev_priv->bridge_dev = intel_gtt_get_bridge_device();
1201	if (dev_priv->bridge_dev == NULL) {
1202		DRM_ERROR("bridge device not found\n");
1203		return (-1);
1204	}
1205	return (0);
1206}
1207
1208#define MCHBAR_I915 0x44
1209#define MCHBAR_I965 0x48
1210#define MCHBAR_SIZE (4*4096)
1211
1212#define DEVEN_REG 0x54
1213#define   DEVEN_MCHBAR_EN (1 << 28)
1214
1215/* Allocate space for the MCH regs if needed, return nonzero on error */
1216static int
1217intel_alloc_mchbar_resource(struct drm_device *dev)
1218{
1219	drm_i915_private_t *dev_priv;
1220	device_t vga;
1221	int reg;
1222	u32 temp_lo, temp_hi;
1223	u64 mchbar_addr, temp;
1224
1225	dev_priv = dev->dev_private;
1226	reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1227
1228	if (INTEL_INFO(dev)->gen >= 4)
1229		temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4);
1230	else
1231		temp_hi = 0;
1232	temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4);
1233	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1234
1235	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
1236#ifdef XXX_CONFIG_PNP
1237	if (mchbar_addr &&
1238	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1239		return 0;
1240#endif
1241
1242	/* Get some space for it */
1243	vga = device_get_parent(dev->dev);
1244	dev_priv->mch_res_rid = 0x100;
1245	dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
1246	    dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
1247	    MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE);
1248	if (dev_priv->mch_res == NULL) {
1249		DRM_ERROR("failed mchbar resource alloc\n");
1250		return (-ENOMEM);
1251	}
1252
1253	if (INTEL_INFO(dev)->gen >= 4) {
1254		temp = rman_get_start(dev_priv->mch_res);
1255		temp >>= 32;
1256		pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4);
1257	}
1258	pci_write_config(dev_priv->bridge_dev, reg,
1259	    rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4);
1260	return (0);
1261}
1262
1263static void
1264intel_setup_mchbar(struct drm_device *dev)
1265{
1266	drm_i915_private_t *dev_priv;
1267	int mchbar_reg;
1268	u32 temp;
1269	bool enabled;
1270
1271	dev_priv = dev->dev_private;
1272	mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1273
1274	dev_priv->mchbar_need_disable = false;
1275
1276	if (IS_I915G(dev) || IS_I915GM(dev)) {
1277		temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4);
1278		enabled = (temp & DEVEN_MCHBAR_EN) != 0;
1279	} else {
1280		temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1281		enabled = temp & 1;
1282	}
1283
1284	/* If it's already enabled, don't have to do anything */
1285	if (enabled) {
1286		DRM_DEBUG("mchbar already enabled\n");
1287		return;
1288	}
1289
1290	if (intel_alloc_mchbar_resource(dev))
1291		return;
1292
1293	dev_priv->mchbar_need_disable = true;
1294
1295	/* Space is allocated or reserved, so enable it. */
1296	if (IS_I915G(dev) || IS_I915GM(dev)) {
1297		pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1298		    temp | DEVEN_MCHBAR_EN, 4);
1299	} else {
1300		temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1301		pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4);
1302	}
1303}
1304
1305static void
1306intel_teardown_mchbar(struct drm_device *dev)
1307{
1308	drm_i915_private_t *dev_priv;
1309	device_t vga;
1310	int mchbar_reg;
1311	u32 temp;
1312
1313	dev_priv = dev->dev_private;
1314	mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1315
1316	if (dev_priv->mchbar_need_disable) {
1317		if (IS_I915G(dev) || IS_I915GM(dev)) {
1318			temp = pci_read_config(dev_priv->bridge_dev,
1319			    DEVEN_REG, 4);
1320			temp &= ~DEVEN_MCHBAR_EN;
1321			pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1322			    temp, 4);
1323		} else {
1324			temp = pci_read_config(dev_priv->bridge_dev,
1325			    mchbar_reg, 4);
1326			temp &= ~1;
1327			pci_write_config(dev_priv->bridge_dev, mchbar_reg,
1328			    temp, 4);
1329		}
1330	}
1331
1332	if (dev_priv->mch_res != NULL) {
1333		vga = device_get_parent(dev->dev);
1334		BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev,
1335		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1336		BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev,
1337		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1338		dev_priv->mch_res = NULL;
1339	}
1340}
1341
1342int
1343i915_driver_load(struct drm_device *dev, unsigned long flags)
1344{
1345	struct drm_i915_private *dev_priv = dev->dev_private;
1346	const struct intel_device_info *info;
1347	unsigned long base, size;
1348	int mmio_bar, ret;
1349
1350	info = i915_get_device_id(dev->pci_device);
1351
1352	/* Refuse to load on gen6+ without kms enabled. */
1353	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1354		return -ENODEV;
1355
1356
1357	ret = 0;
1358
1359	/* i915 has 4 more counters */
1360	dev->counters += 4;
1361	dev->types[6] = _DRM_STAT_IRQ;
1362	dev->types[7] = _DRM_STAT_PRIMARY;
1363	dev->types[8] = _DRM_STAT_SECONDARY;
1364	dev->types[9] = _DRM_STAT_DMA;
1365
1366	dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
1367	    M_ZERO | M_WAITOK);
1368
1369	dev->dev_private = (void *)dev_priv;
1370	dev_priv->dev = dev;
1371	dev_priv->info = info;
1372
1373	if (i915_get_bridge_dev(dev)) {
1374		free(dev_priv, DRM_MEM_DRIVER);
1375		return (-EIO);
1376	}
1377	dev_priv->mm.gtt = intel_gtt_get();
1378
1379	/* Add register map (needed for suspend/resume) */
1380	mmio_bar = IS_GEN2(dev) ? 1 : 0;
1381	base = drm_get_resource_start(dev, mmio_bar);
1382	size = drm_get_resource_len(dev, mmio_bar);
1383
1384	ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
1385	    _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1386	if (ret != 0) {
1387		DRM_ERROR("Failed to allocate mmio_map: %d\n", ret);
1388		free(dev_priv, DRM_MEM_DRIVER);
1389		return (ret);
1390	}
1391
1392	dev_priv->tq = taskqueue_create("915", M_WAITOK,
1393	    taskqueue_thread_enqueue, &dev_priv->tq);
1394	taskqueue_start_threads(&dev_priv->tq, 1, PWAIT, "i915 taskq");
1395	mtx_init(&dev_priv->gt_lock, "915gt", NULL, MTX_DEF);
1396	mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF);
1397	mtx_init(&dev_priv->error_completion_lock, "915cmp", NULL, MTX_DEF);
1398	mtx_init(&dev_priv->rps_lock, "915rps", NULL, MTX_DEF);
1399	mtx_init(&dev_priv->dpio_lock, "915dpi", NULL, MTX_DEF);
1400
1401	intel_irq_init(dev);
1402
1403	intel_setup_mchbar(dev);
1404	intel_setup_gmbus(dev);
1405	intel_opregion_setup(dev);
1406
1407	intel_setup_bios(dev);
1408
1409	i915_gem_load(dev);
1410
1411	/* On the 945G/GM, the chipset reports the MSI capability on the
1412	 * integrated graphics even though the support isn't actually there
1413	 * according to the published specs.  It doesn't appear to function
1414	 * correctly in testing on 945G.
1415	 * This may be a side effect of MSI having been made available for PEG
1416	 * and the registers being closely associated.
1417	 *
1418	 * According to chipset errata, on the 965GM, MSI interrupts may
1419	 * be lost or delayed, but we use them anyways to avoid
1420	 * stuck interrupts on some machines.
1421	 */
1422	if (!IS_I945G(dev) && !IS_I945GM(dev))
1423		drm_pci_enable_msi(dev);
1424
1425	/* Init HWS */
1426	if (!I915_NEED_GFX_HWS(dev)) {
1427		ret = i915_init_phys_hws(dev);
1428		if (ret != 0) {
1429			drm_rmmap(dev, dev_priv->mmio_map);
1430			free(dev_priv, DRM_MEM_DRIVER);
1431			return ret;
1432		}
1433	}
1434
1435	mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF);
1436
1437	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1438		dev_priv->num_pipe = 3;
1439	else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1440		dev_priv->num_pipe = 2;
1441	else
1442		dev_priv->num_pipe = 1;
1443
1444	ret = drm_vblank_init(dev, dev_priv->num_pipe);
1445	if (ret)
1446		goto out_gem_unload;
1447
1448	/* Start out suspended */
1449	dev_priv->mm.suspended = 1;
1450
1451	intel_detect_pch(dev);
1452
1453	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1454		ret = i915_load_modeset_init(dev);
1455		if (ret < 0) {
1456			DRM_ERROR("failed to init modeset\n");
1457			goto out_gem_unload;
1458		}
1459	}
1460
1461	intel_opregion_init(dev);
1462
1463	callout_init(&dev_priv->hangcheck_timer, 1);
1464	callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD,
1465	    i915_hangcheck_elapsed, dev);
1466
1467	if (IS_GEN5(dev))
1468		intel_gpu_ips_init(dev_priv);
1469
1470	return (0);
1471
1472out_gem_unload:
1473	/* XXXKIB */
1474	(void) i915_driver_unload(dev);
1475	return (ret);
1476}
1477
1478int
1479i915_driver_unload(struct drm_device *dev)
1480{
1481	struct drm_i915_private *dev_priv = dev->dev_private;
1482	int ret;
1483
1484	DRM_LOCK(dev);
1485	ret = i915_gpu_idle(dev);
1486	if (ret)
1487		DRM_ERROR("failed to idle hardware: %d\n", ret);
1488	i915_gem_retire_requests(dev);
1489	DRM_UNLOCK(dev);
1490
1491	i915_free_hws(dev);
1492
1493	intel_teardown_mchbar(dev);
1494
1495	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1496		intel_fbdev_fini(dev);
1497		intel_modeset_cleanup(dev);
1498	}
1499
1500	/* Free error state after interrupts are fully disabled. */
1501	callout_stop(&dev_priv->hangcheck_timer);
1502	callout_drain(&dev_priv->hangcheck_timer);
1503
1504	i915_destroy_error_state(dev);
1505
1506	if (dev->msi_enabled)
1507		drm_pci_disable_msi(dev);
1508
1509	intel_opregion_fini(dev);
1510
1511	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1512		DRM_LOCK(dev);
1513		i915_gem_free_all_phys_object(dev);
1514		i915_gem_cleanup_ringbuffer(dev);
1515		i915_gem_context_fini(dev);
1516		DRM_UNLOCK(dev);
1517		i915_gem_cleanup_aliasing_ppgtt(dev);
1518#if 1
1519		KIB_NOTYET();
1520#else
1521		if (I915_HAS_FBC(dev) && i915_powersave)
1522			i915_cleanup_compression(dev);
1523#endif
1524		drm_mm_takedown(&dev_priv->mm.stolen);
1525
1526		intel_cleanup_overlay(dev);
1527
1528		if (!I915_NEED_GFX_HWS(dev))
1529			i915_free_hws(dev);
1530	}
1531
1532	i915_gem_unload(dev);
1533
1534	mtx_destroy(&dev_priv->irq_lock);
1535
1536	if (dev_priv->tq != NULL)
1537		taskqueue_free(dev_priv->tq);
1538
1539	bus_generic_detach(dev->dev);
1540	drm_rmmap(dev, dev_priv->mmio_map);
1541	intel_teardown_gmbus(dev);
1542
1543	mtx_destroy(&dev_priv->dpio_lock);
1544	mtx_destroy(&dev_priv->error_lock);
1545	mtx_destroy(&dev_priv->error_completion_lock);
1546	mtx_destroy(&dev_priv->rps_lock);
1547	free(dev->dev_private, DRM_MEM_DRIVER);
1548
1549	return (0);
1550}
1551
1552int
1553i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1554{
1555	struct drm_i915_file_private *i915_file_priv;
1556
1557	i915_file_priv = malloc(sizeof(*i915_file_priv), DRM_MEM_FILES,
1558	    M_WAITOK | M_ZERO);
1559
1560	mtx_init(&i915_file_priv->mm.lck, "915fp", NULL, MTX_DEF);
1561	INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
1562	file_priv->driver_priv = i915_file_priv;
1563
1564	drm_gem_names_init(&i915_file_priv->context_idr);
1565
1566	return (0);
1567}
1568
1569void
1570i915_driver_lastclose(struct drm_device * dev)
1571{
1572	drm_i915_private_t *dev_priv = dev->dev_private;
1573
1574	if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1575#if 1
1576		KIB_NOTYET();
1577#else
1578		drm_fb_helper_restore();
1579		vga_switcheroo_process_delayed_switch();
1580#endif
1581		return;
1582	}
1583	i915_gem_lastclose(dev);
1584	i915_dma_cleanup(dev);
1585}
1586
1587void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1588{
1589
1590	i915_gem_context_close(dev, file_priv);
1591	i915_gem_release(dev, file_priv);
1592}
1593
1594void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1595{
1596	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1597
1598	mtx_destroy(&i915_file_priv->mm.lck);
1599	free(i915_file_priv, DRM_MEM_FILES);
1600}
1601
1602struct drm_ioctl_desc i915_ioctls[] = {
1603	DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1604	DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1605	DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1606	DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1607	DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1608	DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1609	DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1610	DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1611	DRM_IOCTL_DEF(DRM_I915_ALLOC, drm_noop, DRM_AUTH),
1612	DRM_IOCTL_DEF(DRM_I915_FREE, drm_noop, DRM_AUTH),
1613	DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1614	DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1615	DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1616	DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1617	DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
1618	DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1619	DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1620	DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1621	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED),
1622	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED),
1623	DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1624	DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1625	DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1626	DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
1627	DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1628	DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1629	DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
1630	DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
1631	DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
1632	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
1633	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
1634	DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
1635	DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
1636	DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
1637	DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
1638	DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
1639	DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1640	DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1641	DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1642	DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1643	DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1644	DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1645	DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
1646	DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
1647};
1648
1649#ifdef COMPAT_FREEBSD32
1650extern struct drm_ioctl_desc i915_compat_ioctls[];
1651extern int i915_compat_ioctls_nr;
1652#endif
1653
1654struct drm_driver i915_driver_info = {
1655	/*
1656	 * FIXME Linux<->FreeBSD: DRIVER_USE_MTRR is commented out on
1657	 * Linux.
1658	 */
1659	.driver_features =
1660	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
1661	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
1662
1663	.buf_priv_size	= sizeof(drm_i915_private_t),
1664	.load		= i915_driver_load,
1665	.open		= i915_driver_open,
1666	.unload		= i915_driver_unload,
1667	.preclose	= i915_driver_preclose,
1668	.lastclose	= i915_driver_lastclose,
1669	.postclose	= i915_driver_postclose,
1670	.device_is_agp	= i915_driver_device_is_agp,
1671	.master_create	= i915_master_create,
1672	.master_destroy	= i915_master_destroy,
1673	.gem_init_object = i915_gem_init_object,
1674	.gem_free_object = i915_gem_free_object,
1675	.gem_pager_ops	= &i915_gem_pager_ops,
1676	.dumb_create	= i915_gem_dumb_create,
1677	.dumb_map_offset = i915_gem_mmap_gtt,
1678	.dumb_destroy	= i915_gem_dumb_destroy,
1679	.sysctl_init	= i915_sysctl_init,
1680	.sysctl_cleanup	= i915_sysctl_cleanup,
1681
1682	.ioctls		= i915_ioctls,
1683#ifdef COMPAT_FREEBSD32
1684	.compat_ioctls  = i915_compat_ioctls,
1685	.num_compat_ioctls = &i915_compat_ioctls_nr,
1686#endif
1687	.num_ioctls	= ARRAY_SIZE(i915_ioctls),
1688
1689	.name		= DRIVER_NAME,
1690	.desc		= DRIVER_DESC,
1691	.date		= DRIVER_DATE,
1692	.major		= DRIVER_MAJOR,
1693	.minor		= DRIVER_MINOR,
1694	.patchlevel	= DRIVER_PATCHLEVEL,
1695};
1696
1697/*
1698 * This is really ugly: Because old userspace abused the linux agp interface to
1699 * manage the gtt, we need to claim that all intel devices are agp.  For
1700 * otherwise the drm core refuses to initialize the agp support code.
1701 */
1702int i915_driver_device_is_agp(struct drm_device * dev)
1703{
1704	return 1;
1705}
1706