intel_ringbuffer.c revision 280369
1/*
2 * Copyright �� 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Eric Anholt <eric@anholt.net>
25 *    Zou Nan hai <nanhai.zou@intel.com>
26 *    Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_ringbuffer.c 280369 2015-03-23 13:38:33Z kib $");
32
33#include <dev/drm2/drmP.h>
34#include <dev/drm2/drm.h>
35#include <dev/drm2/i915/i915_drm.h>
36#include <dev/drm2/i915/i915_drv.h>
37#include <dev/drm2/i915/intel_drv.h>
38#include <dev/drm2/i915/intel_ringbuffer.h>
39#include <sys/sched.h>
40#include <sys/sf_buf.h>
41
42/*
43 * 965+ support PIPE_CONTROL commands, which provide finer grained control
44 * over cache flushing.
45 */
46struct pipe_control {
47	struct drm_i915_gem_object *obj;
48	volatile u32 *cpu_page;
49	u32 gtt_offset;
50};
51
52void
53i915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno)
54{
55	struct drm_i915_private *dev_priv;
56
57	if (ring->trace_irq_seqno == 0) {
58		dev_priv = ring->dev->dev_private;
59		mtx_lock(&dev_priv->irq_lock);
60		if (ring->irq_get(ring))
61			ring->trace_irq_seqno = seqno;
62		mtx_unlock(&dev_priv->irq_lock);
63	}
64}
65
66static inline int ring_space(struct intel_ring_buffer *ring)
67{
68	int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
69	if (space < 0)
70		space += ring->size;
71	return space;
72}
73
74static int
75gen2_render_ring_flush(struct intel_ring_buffer *ring,
76		       u32	invalidate_domains,
77		       u32	flush_domains)
78{
79	u32 cmd;
80	int ret;
81
82	cmd = MI_FLUSH;
83	if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
84		cmd |= MI_NO_WRITE_FLUSH;
85
86	if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
87		cmd |= MI_READ_FLUSH;
88
89	ret = intel_ring_begin(ring, 2);
90	if (ret)
91		return ret;
92
93	intel_ring_emit(ring, cmd);
94	intel_ring_emit(ring, MI_NOOP);
95	intel_ring_advance(ring);
96
97	return 0;
98}
99
100static int
101gen4_render_ring_flush(struct intel_ring_buffer *ring,
102		  u32	invalidate_domains,
103		  u32	flush_domains)
104{
105	struct drm_device *dev = ring->dev;
106	uint32_t cmd;
107	int ret;
108
109	/*
110	 * read/write caches:
111	 *
112	 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
113	 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
114	 * also flushed at 2d versus 3d pipeline switches.
115	 *
116	 * read-only caches:
117	 *
118	 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
119	 * MI_READ_FLUSH is set, and is always flushed on 965.
120	 *
121	 * I915_GEM_DOMAIN_COMMAND may not exist?
122	 *
123	 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
124	 * invalidated when MI_EXE_FLUSH is set.
125	 *
126	 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
127	 * invalidated with every MI_FLUSH.
128	 *
129	 * TLBs:
130	 *
131	 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
132	 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
133	 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
134	 * are flushed at any MI_FLUSH.
135	 */
136
137	cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
138	if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
139		cmd &= ~MI_NO_WRITE_FLUSH;
140	if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
141		cmd |= MI_EXE_FLUSH;
142
143	if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
144	    (IS_G4X(dev) || IS_GEN5(dev)))
145		cmd |= MI_INVALIDATE_ISP;
146
147	ret = intel_ring_begin(ring, 2);
148	if (ret)
149		return ret;
150
151	intel_ring_emit(ring, cmd);
152	intel_ring_emit(ring, MI_NOOP);
153	intel_ring_advance(ring);
154
155	return 0;
156}
157
158/**
159 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
160 * implementing two workarounds on gen6.  From section 1.4.7.1
161 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
162 *
163 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
164 * produced by non-pipelined state commands), software needs to first
165 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
166 * 0.
167 *
168 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
169 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
170 *
171 * And the workaround for these two requires this workaround first:
172 *
173 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
174 * BEFORE the pipe-control with a post-sync op and no write-cache
175 * flushes.
176 *
177 * And this last workaround is tricky because of the requirements on
178 * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
179 * volume 2 part 1:
180 *
181 *     "1 of the following must also be set:
182 *      - Render Target Cache Flush Enable ([12] of DW1)
183 *      - Depth Cache Flush Enable ([0] of DW1)
184 *      - Stall at Pixel Scoreboard ([1] of DW1)
185 *      - Depth Stall ([13] of DW1)
186 *      - Post-Sync Operation ([13] of DW1)
187 *      - Notify Enable ([8] of DW1)"
188 *
189 * The cache flushes require the workaround flush that triggered this
190 * one, so we can't use it.  Depth stall would trigger the same.
191 * Post-sync nonzero is what triggered this second workaround, so we
192 * can't use that one either.  Notify enable is IRQs, which aren't
193 * really our business.  That leaves only stall at scoreboard.
194 */
195static int
196intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
197{
198	struct pipe_control *pc = ring->private;
199	u32 scratch_addr = pc->gtt_offset + 128;
200	int ret;
201
202
203	ret = intel_ring_begin(ring, 6);
204	if (ret)
205		return ret;
206
207	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
208	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
209			PIPE_CONTROL_STALL_AT_SCOREBOARD);
210	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
211	intel_ring_emit(ring, 0); /* low dword */
212	intel_ring_emit(ring, 0); /* high dword */
213	intel_ring_emit(ring, MI_NOOP);
214	intel_ring_advance(ring);
215
216	ret = intel_ring_begin(ring, 6);
217	if (ret)
218		return ret;
219
220	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
221	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
222	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
223	intel_ring_emit(ring, 0);
224	intel_ring_emit(ring, 0);
225	intel_ring_emit(ring, MI_NOOP);
226	intel_ring_advance(ring);
227
228	return 0;
229}
230
231static int
232gen6_render_ring_flush(struct intel_ring_buffer *ring,
233                         u32 invalidate_domains, u32 flush_domains)
234{
235	u32 flags = 0;
236	struct pipe_control *pc = ring->private;
237	u32 scratch_addr = pc->gtt_offset + 128;
238	int ret;
239
240	/* Force SNB workarounds for PIPE_CONTROL flushes */
241	intel_emit_post_sync_nonzero_flush(ring);
242
243	/* Just flush everything.  Experiments have shown that reducing the
244	 * number of bits based on the write domains has little performance
245	 * impact.
246	 */
247	flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
248	flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
249	flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
250	flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
251	flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
252	flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
253	flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
254
255	ret = intel_ring_begin(ring, 6);
256	if (ret)
257		return ret;
258
259	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
260	intel_ring_emit(ring, flags);
261	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
262	intel_ring_emit(ring, 0); /* lower dword */
263	intel_ring_emit(ring, 0); /* uppwer dword */
264	intel_ring_emit(ring, MI_NOOP);
265	intel_ring_advance(ring);
266
267	return 0;
268}
269
270static void ring_write_tail(struct intel_ring_buffer *ring,
271			    uint32_t value)
272{
273	drm_i915_private_t *dev_priv = ring->dev->dev_private;
274	I915_WRITE_TAIL(ring, value);
275}
276
277u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
278{
279	drm_i915_private_t *dev_priv = ring->dev->dev_private;
280	uint32_t acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
281			RING_ACTHD(ring->mmio_base) : ACTHD;
282
283	return I915_READ(acthd_reg);
284}
285
286static int init_ring_common(struct intel_ring_buffer *ring)
287{
288	drm_i915_private_t *dev_priv = ring->dev->dev_private;
289	struct drm_i915_gem_object *obj = ring->obj;
290	uint32_t head;
291
292	/* Stop the ring if it's running. */
293	I915_WRITE_CTL(ring, 0);
294	I915_WRITE_HEAD(ring, 0);
295	ring->write_tail(ring, 0);
296
297	/* Initialize the ring. */
298	I915_WRITE_START(ring, obj->gtt_offset);
299	head = I915_READ_HEAD(ring) & HEAD_ADDR;
300
301	/* G45 ring initialization fails to reset head to zero */
302	if (head != 0) {
303		DRM_DEBUG("%s head not reset to zero "
304			      "ctl %08x head %08x tail %08x start %08x\n",
305			      ring->name,
306			      I915_READ_CTL(ring),
307			      I915_READ_HEAD(ring),
308			      I915_READ_TAIL(ring),
309			      I915_READ_START(ring));
310
311		I915_WRITE_HEAD(ring, 0);
312
313		if (I915_READ_HEAD(ring) & HEAD_ADDR) {
314			DRM_ERROR("failed to set %s head to zero "
315				  "ctl %08x head %08x tail %08x start %08x\n",
316				  ring->name,
317				  I915_READ_CTL(ring),
318				  I915_READ_HEAD(ring),
319				  I915_READ_TAIL(ring),
320				  I915_READ_START(ring));
321		}
322	}
323
324	I915_WRITE_CTL(ring,
325			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
326			| RING_VALID);
327
328	/* If the head is still not zero, the ring is dead */
329	if (_intel_wait_for(ring->dev,
330	    (I915_READ_CTL(ring) & RING_VALID) != 0 &&
331	     I915_READ_START(ring) == obj->gtt_offset &&
332	     (I915_READ_HEAD(ring) & HEAD_ADDR) == 0,
333	    50, 1, "915rii")) {
334		DRM_ERROR("%s initialization failed "
335				"ctl %08x head %08x tail %08x start %08x\n",
336				ring->name,
337				I915_READ_CTL(ring),
338				I915_READ_HEAD(ring),
339				I915_READ_TAIL(ring),
340				I915_READ_START(ring));
341		return -EIO;
342	}
343
344	if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
345		i915_kernel_lost_context(ring->dev);
346	else {
347		ring->head = I915_READ_HEAD(ring);
348		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
349		ring->space = ring_space(ring);
350	}
351
352	return 0;
353}
354
355static int
356init_pipe_control(struct intel_ring_buffer *ring)
357{
358	struct pipe_control *pc;
359	struct drm_i915_gem_object *obj;
360	int ret;
361
362	if (ring->private)
363		return 0;
364
365	pc = malloc(sizeof(*pc), DRM_I915_GEM, M_WAITOK);
366	if (!pc)
367		return -ENOMEM;
368
369	obj = i915_gem_alloc_object(ring->dev, 4096);
370	if (obj == NULL) {
371		DRM_ERROR("Failed to allocate seqno page\n");
372		ret = -ENOMEM;
373		goto err;
374	}
375
376	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
377
378	ret = i915_gem_object_pin(obj, 4096, true);
379	if (ret)
380		goto err_unref;
381
382	pc->gtt_offset = obj->gtt_offset;
383	pc->cpu_page = (uint32_t *)kva_alloc(PAGE_SIZE);
384	if (pc->cpu_page == NULL)
385		goto err_unpin;
386	pmap_qenter((uintptr_t)pc->cpu_page, &obj->pages[0], 1);
387	pmap_invalidate_cache_range((vm_offset_t)pc->cpu_page,
388	    (vm_offset_t)pc->cpu_page + PAGE_SIZE, FALSE);
389
390	pc->obj = obj;
391	ring->private = pc;
392	return 0;
393
394err_unpin:
395	i915_gem_object_unpin(obj);
396err_unref:
397	drm_gem_object_unreference(&obj->base);
398err:
399	free(pc, DRM_I915_GEM);
400	return ret;
401}
402
403static void
404cleanup_pipe_control(struct intel_ring_buffer *ring)
405{
406	struct pipe_control *pc = ring->private;
407	struct drm_i915_gem_object *obj;
408
409	if (!ring->private)
410		return;
411
412	obj = pc->obj;
413	pmap_qremove((vm_offset_t)pc->cpu_page, 1);
414	kva_free((uintptr_t)pc->cpu_page, PAGE_SIZE);
415	i915_gem_object_unpin(obj);
416	drm_gem_object_unreference(&obj->base);
417
418	free(pc, DRM_I915_GEM);
419	ring->private = NULL;
420}
421
422static int init_render_ring(struct intel_ring_buffer *ring)
423{
424	struct drm_device *dev = ring->dev;
425	struct drm_i915_private *dev_priv = dev->dev_private;
426	int ret = init_ring_common(ring);
427
428	if (INTEL_INFO(dev)->gen > 3) {
429		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
430		if (IS_GEN7(dev))
431			I915_WRITE(GFX_MODE_GEN7,
432				   _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
433				   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
434	}
435
436	if (INTEL_INFO(dev)->gen >= 5) {
437		ret = init_pipe_control(ring);
438		if (ret)
439			return ret;
440	}
441
442
443	if (IS_GEN6(dev)) {
444		/* From the Sandybridge PRM, volume 1 part 3, page 24:
445		 * "If this bit is set, STCunit will have LRA as replacement
446		 *  policy. [...] This bit must be reset.  LRA replacement
447		 *  policy is not supported."
448		 */
449		I915_WRITE(CACHE_MODE_0,
450			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
451
452		/* This is not explicitly set for GEN6, so read the register.
453		 * see intel_ring_mi_set_context() for why we care.
454		 * TODO: consider explicitly setting the bit for GEN5
455		 */
456		ring->itlb_before_ctx_switch =
457			!!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
458	}
459
460	if (INTEL_INFO(dev)->gen >= 6)
461		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
462
463	return ret;
464}
465
466static void render_ring_cleanup(struct intel_ring_buffer *ring)
467{
468	if (!ring->private)
469		return;
470
471	cleanup_pipe_control(ring);
472}
473
474static void
475update_mboxes(struct intel_ring_buffer *ring,
476	    u32 seqno,
477	    u32 mmio_offset)
478{
479	intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
480			      MI_SEMAPHORE_GLOBAL_GTT |
481			      MI_SEMAPHORE_REGISTER |
482			      MI_SEMAPHORE_UPDATE);
483	intel_ring_emit(ring, seqno);
484	intel_ring_emit(ring, mmio_offset);
485}
486
487/**
488 * gen6_add_request - Update the semaphore mailbox registers
489 *
490 * @ring - ring that is adding a request
491 * @seqno - return seqno stuck into the ring
492 *
493 * Update the mailbox registers in the *other* rings with the current seqno.
494 * This acts like a signal in the canonical semaphore.
495 */
496static int
497gen6_add_request(struct intel_ring_buffer *ring,
498		 u32 *seqno)
499{
500	u32 mbox1_reg;
501	u32 mbox2_reg;
502	int ret;
503
504	ret = intel_ring_begin(ring, 10);
505	if (ret)
506		return ret;
507
508	mbox1_reg = ring->signal_mbox[0];
509	mbox2_reg = ring->signal_mbox[1];
510
511	*seqno = i915_gem_next_request_seqno(ring);
512
513	update_mboxes(ring, *seqno, mbox1_reg);
514	update_mboxes(ring, *seqno, mbox2_reg);
515	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
516	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
517	intel_ring_emit(ring, *seqno);
518	intel_ring_emit(ring, MI_USER_INTERRUPT);
519	intel_ring_advance(ring);
520
521	return 0;
522}
523
524/**
525 * intel_ring_sync - sync the waiter to the signaller on seqno
526 *
527 * @waiter - ring that is waiting
528 * @signaller - ring which has, or will signal
529 * @seqno - seqno which the waiter will block on
530 */
531static int
532gen6_ring_sync(struct intel_ring_buffer *waiter,
533	       struct intel_ring_buffer *signaller,
534	       u32 seqno)
535{
536	int ret;
537	u32 dw1 = MI_SEMAPHORE_MBOX |
538		  MI_SEMAPHORE_COMPARE |
539		  MI_SEMAPHORE_REGISTER;
540
541	/* Throughout all of the GEM code, seqno passed implies our current
542	 * seqno is >= the last seqno executed. However for hardware the
543	 * comparison is strictly greater than.
544	 */
545	seqno -= 1;
546
547	if (signaller->semaphore_register[waiter->id] ==
548	    MI_SEMAPHORE_SYNC_INVALID)
549		printf("gen6_ring_sync semaphore_register %d invalid\n",
550		    waiter->id);
551
552	ret = intel_ring_begin(waiter, 4);
553	if (ret)
554		return ret;
555
556	intel_ring_emit(waiter,
557			dw1 | signaller->semaphore_register[waiter->id]);
558	intel_ring_emit(waiter, seqno);
559	intel_ring_emit(waiter, 0);
560	intel_ring_emit(waiter, MI_NOOP);
561	intel_ring_advance(waiter);
562
563	return 0;
564}
565
566int render_ring_sync_to(struct intel_ring_buffer *waiter,
567    struct intel_ring_buffer *signaller, u32 seqno);
568int gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
569    struct intel_ring_buffer *signaller, u32 seqno);
570int gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
571    struct intel_ring_buffer *signaller, u32 seqno);
572
573#define PIPE_CONTROL_FLUSH(ring__, addr__)					\
574do {									\
575	intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |		\
576		 PIPE_CONTROL_DEPTH_STALL);				\
577	intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);			\
578	intel_ring_emit(ring__, 0);							\
579	intel_ring_emit(ring__, 0);							\
580} while (0)
581
582static int
583pc_render_add_request(struct intel_ring_buffer *ring,
584		      uint32_t *result)
585{
586	u32 seqno = i915_gem_next_request_seqno(ring);
587	struct pipe_control *pc = ring->private;
588	u32 scratch_addr = pc->gtt_offset + 128;
589	int ret;
590
591	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
592	 * incoherent with writes to memory, i.e. completely fubar,
593	 * so we need to use PIPE_NOTIFY instead.
594	 *
595	 * However, we also need to workaround the qword write
596	 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
597	 * memory before requesting an interrupt.
598	 */
599	ret = intel_ring_begin(ring, 32);
600	if (ret)
601		return ret;
602
603	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
604			PIPE_CONTROL_WRITE_FLUSH |
605			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
606	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
607	intel_ring_emit(ring, seqno);
608	intel_ring_emit(ring, 0);
609	PIPE_CONTROL_FLUSH(ring, scratch_addr);
610	scratch_addr += 128; /* write to separate cachelines */
611	PIPE_CONTROL_FLUSH(ring, scratch_addr);
612	scratch_addr += 128;
613	PIPE_CONTROL_FLUSH(ring, scratch_addr);
614	scratch_addr += 128;
615	PIPE_CONTROL_FLUSH(ring, scratch_addr);
616	scratch_addr += 128;
617	PIPE_CONTROL_FLUSH(ring, scratch_addr);
618	scratch_addr += 128;
619	PIPE_CONTROL_FLUSH(ring, scratch_addr);
620	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
621			PIPE_CONTROL_WRITE_FLUSH |
622			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
623			PIPE_CONTROL_NOTIFY);
624	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
625	intel_ring_emit(ring, seqno);
626	intel_ring_emit(ring, 0);
627	intel_ring_advance(ring);
628
629	*result = seqno;
630	return 0;
631}
632
633static u32
634gen6_ring_get_seqno(struct intel_ring_buffer *ring)
635{
636	struct drm_device *dev = ring->dev;
637
638	/* Workaround to force correct ordering between irq and seqno writes on
639	 * ivb (and maybe also on snb) by reading from a CS register (like
640	 * ACTHD) before reading the status page. */
641	if (/* IS_GEN6(dev) || */IS_GEN7(dev))
642		intel_ring_get_active_head(ring);
643	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
644}
645
646static uint32_t
647ring_get_seqno(struct intel_ring_buffer *ring)
648{
649	if (ring->status_page.page_addr == NULL)
650		return (-1);
651	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
652}
653
654static uint32_t
655pc_render_get_seqno(struct intel_ring_buffer *ring)
656{
657	struct pipe_control *pc = ring->private;
658	if (pc != NULL)
659		return pc->cpu_page[0];
660	else
661		return (-1);
662}
663
664static bool
665gen5_ring_get_irq(struct intel_ring_buffer *ring)
666{
667	struct drm_device *dev = ring->dev;
668	drm_i915_private_t *dev_priv = dev->dev_private;
669
670	if (!dev->irq_enabled)
671		return false;
672
673	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
674	if (ring->irq_refcount++ == 0) {
675		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
676		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
677		POSTING_READ(GTIMR);
678	}
679
680	return true;
681}
682
683static void
684gen5_ring_put_irq(struct intel_ring_buffer *ring)
685{
686	struct drm_device *dev = ring->dev;
687	drm_i915_private_t *dev_priv = dev->dev_private;
688
689	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
690	if (--ring->irq_refcount == 0) {
691		dev_priv->gt_irq_mask |= ring->irq_enable_mask;
692		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
693		POSTING_READ(GTIMR);
694	}
695}
696
697static bool
698i9xx_ring_get_irq(struct intel_ring_buffer *ring)
699{
700	struct drm_device *dev = ring->dev;
701	drm_i915_private_t *dev_priv = dev->dev_private;
702
703	if (!dev->irq_enabled)
704		return false;
705
706	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
707	if (ring->irq_refcount++ == 0) {
708		dev_priv->irq_mask &= ~ring->irq_enable_mask;
709		I915_WRITE(IMR, dev_priv->irq_mask);
710		POSTING_READ(IMR);
711	}
712
713	return true;
714}
715
716static void
717i9xx_ring_put_irq(struct intel_ring_buffer *ring)
718{
719	struct drm_device *dev = ring->dev;
720	drm_i915_private_t *dev_priv = dev->dev_private;
721
722	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
723	if (--ring->irq_refcount == 0) {
724		dev_priv->irq_mask |= ring->irq_enable_mask;
725		I915_WRITE(IMR, dev_priv->irq_mask);
726		POSTING_READ(IMR);
727	}
728}
729
730static bool
731i8xx_ring_get_irq(struct intel_ring_buffer *ring)
732{
733	struct drm_device *dev = ring->dev;
734	drm_i915_private_t *dev_priv = dev->dev_private;
735
736	if (!dev->irq_enabled)
737		return false;
738
739	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
740	if (ring->irq_refcount++ == 0) {
741		dev_priv->irq_mask &= ~ring->irq_enable_mask;
742		I915_WRITE16(IMR, dev_priv->irq_mask);
743		POSTING_READ16(IMR);
744	}
745
746	return true;
747}
748
749static void
750i8xx_ring_put_irq(struct intel_ring_buffer *ring)
751{
752	struct drm_device *dev = ring->dev;
753	drm_i915_private_t *dev_priv = dev->dev_private;
754
755	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
756	if (--ring->irq_refcount == 0) {
757		dev_priv->irq_mask |= ring->irq_enable_mask;
758		I915_WRITE16(IMR, dev_priv->irq_mask);
759		POSTING_READ16(IMR);
760	}
761}
762
763void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
764{
765	struct drm_device *dev = ring->dev;
766	drm_i915_private_t *dev_priv = dev->dev_private;
767	uint32_t mmio = 0;
768
769	/* The ring status page addresses are no longer next to the rest of
770	 * the ring registers as of gen7.
771	 */
772	if (IS_GEN7(dev)) {
773		switch (ring->id) {
774		case RCS:
775			mmio = RENDER_HWS_PGA_GEN7;
776			break;
777		case BCS:
778			mmio = BLT_HWS_PGA_GEN7;
779			break;
780		case VCS:
781			mmio = BSD_HWS_PGA_GEN7;
782			break;
783		}
784	} else if (IS_GEN6(dev)) {
785		mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
786	} else {
787		mmio = RING_HWS_PGA(ring->mmio_base);
788	}
789
790	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
791	POSTING_READ(mmio);
792}
793
794static int
795bsd_ring_flush(struct intel_ring_buffer *ring,
796	       uint32_t     invalidate_domains,
797	       uint32_t     flush_domains)
798{
799	int ret;
800
801	ret = intel_ring_begin(ring, 2);
802	if (ret)
803		return ret;
804
805	intel_ring_emit(ring, MI_FLUSH);
806	intel_ring_emit(ring, MI_NOOP);
807	intel_ring_advance(ring);
808	return 0;
809}
810
811static int
812i9xx_add_request(struct intel_ring_buffer *ring,
813		 u32 *result)
814{
815	u32 seqno;
816	int ret;
817
818	ret = intel_ring_begin(ring, 4);
819	if (ret)
820		return ret;
821
822	seqno = i915_gem_next_request_seqno(ring);
823
824	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
825	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
826	intel_ring_emit(ring, seqno);
827	intel_ring_emit(ring, MI_USER_INTERRUPT);
828	intel_ring_advance(ring);
829
830	*result = seqno;
831	return 0;
832}
833
834static bool
835gen6_ring_get_irq(struct intel_ring_buffer *ring)
836{
837	struct drm_device *dev = ring->dev;
838	drm_i915_private_t *dev_priv = dev->dev_private;
839
840	if (!dev->irq_enabled)
841	       return false;
842
843	gen6_gt_force_wake_get(dev_priv);
844
845	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
846	if (ring->irq_refcount++ == 0) {
847		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
848		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
849		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
850		POSTING_READ(GTIMR);
851	}
852
853	return true;
854}
855
856static void
857gen6_ring_put_irq(struct intel_ring_buffer *ring)
858{
859	struct drm_device *dev = ring->dev;
860	drm_i915_private_t *dev_priv = dev->dev_private;
861
862	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
863	if (--ring->irq_refcount == 0) {
864		I915_WRITE_IMR(ring, ~0);
865		dev_priv->gt_irq_mask |= ring->irq_enable_mask;
866		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
867		POSTING_READ(GTIMR);
868	}
869
870	gen6_gt_force_wake_put(dev_priv);
871}
872
873static int
874i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
875{
876	int ret;
877
878	ret = intel_ring_begin(ring, 2);
879	if (ret)
880		return ret;
881
882	intel_ring_emit(ring,
883			MI_BATCH_BUFFER_START |
884			MI_BATCH_GTT |
885			MI_BATCH_NON_SECURE_I965);
886	intel_ring_emit(ring, offset);
887	intel_ring_advance(ring);
888
889	return 0;
890}
891
892static int
893i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
894				u32 offset, u32 len)
895{
896	int ret;
897
898	ret = intel_ring_begin(ring, 4);
899	if (ret)
900		return ret;
901
902	intel_ring_emit(ring, MI_BATCH_BUFFER);
903	intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
904	intel_ring_emit(ring, offset + len - 8);
905	intel_ring_emit(ring, 0);
906	intel_ring_advance(ring);
907
908	return 0;
909}
910
911static int
912i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
913				u32 offset, u32 len)
914{
915	int ret;
916
917	ret = intel_ring_begin(ring, 2);
918	if (ret)
919		return ret;
920
921	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
922	intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
923	intel_ring_advance(ring);
924
925	return 0;
926}
927
928static void cleanup_status_page(struct intel_ring_buffer *ring)
929{
930	struct drm_i915_gem_object *obj;
931
932	obj = ring->status_page.obj;
933	if (obj == NULL)
934		return;
935
936	pmap_qremove((vm_offset_t)ring->status_page.page_addr, 1);
937	kva_free((vm_offset_t)ring->status_page.page_addr,
938	    PAGE_SIZE);
939	i915_gem_object_unpin(obj);
940	drm_gem_object_unreference(&obj->base);
941	ring->status_page.obj = NULL;
942}
943
944static int init_status_page(struct intel_ring_buffer *ring)
945{
946	struct drm_device *dev = ring->dev;
947	struct drm_i915_gem_object *obj;
948	int ret;
949
950	obj = i915_gem_alloc_object(dev, 4096);
951	if (obj == NULL) {
952		DRM_ERROR("Failed to allocate status page\n");
953		ret = -ENOMEM;
954		goto err;
955	}
956
957	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
958
959	ret = i915_gem_object_pin(obj, 4096, true);
960	if (ret != 0) {
961		goto err_unref;
962	}
963
964	ring->status_page.gfx_addr = obj->gtt_offset;
965	ring->status_page.page_addr = (void *)kva_alloc(PAGE_SIZE);
966	if (ring->status_page.page_addr == NULL) {
967		goto err_unpin;
968	}
969	pmap_qenter((vm_offset_t)ring->status_page.page_addr, &obj->pages[0],
970	    1);
971	pmap_invalidate_cache_range((vm_offset_t)ring->status_page.page_addr,
972	    (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE, FALSE);
973	ring->status_page.obj = obj;
974	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
975
976	intel_ring_setup_status_page(ring);
977	DRM_DEBUG("i915: init_status_page %s hws offset: 0x%08x\n",
978			ring->name, ring->status_page.gfx_addr);
979
980	return 0;
981
982err_unpin:
983	i915_gem_object_unpin(obj);
984err_unref:
985	drm_gem_object_unreference(&obj->base);
986err:
987	return ret;
988}
989
990static int intel_init_ring_buffer(struct drm_device *dev,
991			   struct intel_ring_buffer *ring)
992{
993	struct drm_i915_gem_object *obj;
994	int ret;
995
996	ring->dev = dev;
997	INIT_LIST_HEAD(&ring->active_list);
998	INIT_LIST_HEAD(&ring->request_list);
999	INIT_LIST_HEAD(&ring->gpu_write_list);
1000	ring->size = 32 * PAGE_SIZE;
1001
1002	if (I915_NEED_GFX_HWS(dev)) {
1003		ret = init_status_page(ring);
1004		if (ret)
1005			return ret;
1006	}
1007
1008	obj = i915_gem_alloc_object(dev, ring->size);
1009	if (obj == NULL) {
1010		DRM_ERROR("Failed to allocate ringbuffer\n");
1011		ret = -ENOMEM;
1012		goto err_hws;
1013	}
1014
1015	ring->obj = obj;
1016
1017	ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
1018	if (ret)
1019		goto err_unref;
1020
1021	ring->virtual_start = pmap_mapdev_attr(
1022	    dev->agp->base + obj->gtt_offset, ring->size,
1023	    VM_MEMATTR_WRITE_COMBINING);
1024	if (ring->virtual_start == NULL) {
1025		DRM_ERROR("Failed to map ringbuffer.\n");
1026		ret = -EINVAL;
1027		goto err_unpin;
1028	}
1029
1030	ret = ring->init(ring);
1031	if (ret)
1032		goto err_unmap;
1033
1034	/* Workaround an erratum on the i830 which causes a hang if
1035	 * the TAIL pointer points to within the last 2 cachelines
1036	 * of the buffer.
1037	 */
1038	ring->effective_size = ring->size;
1039	if (IS_I830(ring->dev) || IS_845G(ring->dev))
1040		ring->effective_size -= 128;
1041
1042	return 0;
1043
1044err_unmap:
1045	pmap_unmapdev((vm_offset_t)ring->virtual_start, ring->size);
1046err_unpin:
1047	i915_gem_object_unpin(obj);
1048err_unref:
1049	drm_gem_object_unreference(&obj->base);
1050	ring->obj = NULL;
1051err_hws:
1052	cleanup_status_page(ring);
1053	return ret;
1054}
1055
1056void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1057{
1058	struct drm_i915_private *dev_priv;
1059	int ret;
1060
1061	if (ring->obj == NULL)
1062		return;
1063
1064	/* Disable the ring buffer. The ring must be idle at this point */
1065	dev_priv = ring->dev->dev_private;
1066	ret = intel_wait_ring_idle(ring);
1067	I915_WRITE_CTL(ring, 0);
1068
1069	pmap_unmapdev((vm_offset_t)ring->virtual_start, ring->size);
1070
1071	i915_gem_object_unpin(ring->obj);
1072	drm_gem_object_unreference(&ring->obj->base);
1073	ring->obj = NULL;
1074
1075	if (ring->cleanup)
1076		ring->cleanup(ring);
1077
1078	cleanup_status_page(ring);
1079}
1080
1081static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1082{
1083	uint32_t *virt;
1084	int rem = ring->size - ring->tail;
1085
1086	if (ring->space < rem) {
1087		int ret = intel_wait_ring_buffer(ring, rem);
1088		if (ret)
1089			return ret;
1090	}
1091
1092	virt = (uint32_t *)((char *)ring->virtual_start + ring->tail);
1093	rem /= 4;
1094	while (rem--)
1095		*virt++ = MI_NOOP;
1096
1097	ring->tail = 0;
1098	ring->space = ring_space(ring);
1099
1100	return 0;
1101}
1102
1103static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1104{
1105	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1106	bool was_interruptible;
1107	int ret;
1108
1109	/* XXX As we have not yet audited all the paths to check that
1110	 * they are ready for ERESTARTSYS from intel_ring_begin, do not
1111	 * allow us to be interruptible by a signal.
1112	 */
1113	was_interruptible = dev_priv->mm.interruptible;
1114	dev_priv->mm.interruptible = false;
1115
1116	ret = i915_wait_request(ring, seqno);
1117
1118	dev_priv->mm.interruptible = was_interruptible;
1119	if (!ret)
1120		i915_gem_retire_requests_ring(ring);
1121
1122	return ret;
1123}
1124
1125static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1126{
1127	struct drm_i915_gem_request *request;
1128	u32 seqno = 0;
1129	int ret;
1130
1131	i915_gem_retire_requests_ring(ring);
1132
1133	if (ring->last_retired_head != -1) {
1134		ring->head = ring->last_retired_head;
1135		ring->last_retired_head = -1;
1136		ring->space = ring_space(ring);
1137		if (ring->space >= n)
1138			return 0;
1139	}
1140
1141	list_for_each_entry(request, &ring->request_list, list) {
1142		int space;
1143
1144		if (request->tail == -1)
1145			continue;
1146
1147		space = request->tail - (ring->tail + 8);
1148		if (space < 0)
1149			space += ring->size;
1150		if (space >= n) {
1151			seqno = request->seqno;
1152			break;
1153		}
1154
1155		/* Consume this request in case we need more space than
1156		 * is available and so need to prevent a race between
1157		 * updating last_retired_head and direct reads of
1158		 * I915_RING_HEAD. It also provides a nice sanity check.
1159		 */
1160		request->tail = -1;
1161	}
1162
1163	if (seqno == 0)
1164		return -ENOSPC;
1165
1166	ret = intel_ring_wait_seqno(ring, seqno);
1167	if (ret)
1168		return ret;
1169
1170	if (ring->last_retired_head == -1)
1171		return -ENOSPC;
1172
1173	ring->head = ring->last_retired_head;
1174	ring->last_retired_head = -1;
1175	ring->space = ring_space(ring);
1176	if (ring->space < n)
1177		return -ENOSPC;
1178
1179	return 0;
1180}
1181
1182int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1183{
1184	struct drm_device *dev = ring->dev;
1185	struct drm_i915_private *dev_priv = dev->dev_private;
1186	int end;
1187	int ret;
1188
1189	ret = intel_ring_wait_request(ring, n);
1190	if (ret != -ENOSPC)
1191		return ret;
1192
1193	CTR1(KTR_DRM, "ring_wait_begin %s", ring->name);
1194	/* With GEM the hangcheck timer should kick us out of the loop,
1195	 * leaving it early runs the risk of corrupting GEM state (due
1196	 * to running on almost untested codepaths). But on resume
1197	 * timers don't work yet, so prevent a complete hang in that
1198	 * case by choosing an insanely large timeout. */
1199	end = ticks + hz * 60;
1200
1201	do {
1202		ring->head = I915_READ_HEAD(ring);
1203		ring->space = ring_space(ring);
1204		if (ring->space >= n) {
1205			CTR1(KTR_DRM, "ring_wait_end %s", ring->name);
1206			return 0;
1207		}
1208
1209#if 0
1210		if (dev->primary->master) {
1211			struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1212			if (master_priv->sarea_priv)
1213				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1214		}
1215#else
1216		if (dev_priv->sarea_priv)
1217			dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1218#endif
1219
1220		pause("915rng", 1);
1221		if (atomic_load_acq_32(&dev_priv->mm.wedged) != 0) {
1222			CTR1(KTR_DRM, "ring_wait_end %s wedged", ring->name);
1223			return -EAGAIN;
1224		}
1225	} while (!time_after(ticks, end));
1226	CTR1(KTR_DRM, "ring_wait_end %s busy", ring->name);
1227	return -EBUSY;
1228}
1229
1230int intel_ring_begin(struct intel_ring_buffer *ring,
1231		     int num_dwords)
1232{
1233	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1234	int n = 4*num_dwords;
1235	int ret;
1236
1237	if (atomic_load_acq_int(&dev_priv->mm.wedged))
1238		return -EIO;
1239
1240	if (ring->tail + n > ring->effective_size) {
1241		ret = intel_wrap_ring_buffer(ring);
1242		if (ret != 0)
1243			return ret;
1244	}
1245
1246	if (ring->space < n) {
1247		ret = intel_wait_ring_buffer(ring, n);
1248		if (ret != 0)
1249			return ret;
1250	}
1251
1252	ring->space -= n;
1253	return 0;
1254}
1255
1256void intel_ring_advance(struct intel_ring_buffer *ring)
1257{
1258	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1259
1260	ring->tail &= ring->size - 1;
1261	if (dev_priv->stop_rings & intel_ring_flag(ring))
1262		return;
1263	ring->write_tail(ring, ring->tail);
1264}
1265
1266
1267static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1268				     u32 value)
1269{
1270	drm_i915_private_t *dev_priv = ring->dev->dev_private;
1271
1272	/* Every tail move must follow the sequence below */
1273	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1274	    GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1275	    GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1276	I915_WRITE(GEN6_BSD_RNCID, 0x0);
1277
1278	if (_intel_wait_for(ring->dev,
1279	    (I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1280	     GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, 50,
1281	    true, "915g6i") != 0)
1282		DRM_ERROR("timed out waiting for IDLE Indicator\n");
1283
1284	I915_WRITE_TAIL(ring, value);
1285	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1286	    GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1287	    GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1288}
1289
1290static int gen6_ring_flush(struct intel_ring_buffer *ring,
1291			   uint32_t invalidate, uint32_t flush)
1292{
1293	uint32_t cmd;
1294	int ret;
1295
1296	ret = intel_ring_begin(ring, 4);
1297	if (ret)
1298		return ret;
1299
1300	cmd = MI_FLUSH_DW;
1301	if (invalidate & I915_GEM_GPU_DOMAINS)
1302		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1303	intel_ring_emit(ring, cmd);
1304	intel_ring_emit(ring, 0);
1305	intel_ring_emit(ring, 0);
1306	intel_ring_emit(ring, MI_NOOP);
1307	intel_ring_advance(ring);
1308	return 0;
1309}
1310
1311static int
1312gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1313			      uint32_t offset, uint32_t len)
1314{
1315	int ret;
1316
1317	ret = intel_ring_begin(ring, 2);
1318	if (ret)
1319		return ret;
1320
1321	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1322	/* bit0-7 is the length on GEN6+ */
1323	intel_ring_emit(ring, offset);
1324	intel_ring_advance(ring);
1325
1326	return 0;
1327}
1328
1329/* Blitter support (SandyBridge+) */
1330
1331static int blt_ring_flush(struct intel_ring_buffer *ring,
1332			  u32 invalidate, u32 flush)
1333{
1334	u32 cmd;
1335	int ret;
1336
1337	ret = intel_ring_begin(ring, 4);
1338	if (ret)
1339		return ret;
1340
1341	cmd = MI_FLUSH_DW;
1342	if (invalidate & I915_GEM_DOMAIN_RENDER)
1343		cmd |= MI_INVALIDATE_TLB;
1344	intel_ring_emit(ring, cmd);
1345	intel_ring_emit(ring, 0);
1346	intel_ring_emit(ring, 0);
1347	intel_ring_emit(ring, MI_NOOP);
1348	intel_ring_advance(ring);
1349	return 0;
1350}
1351
1352int intel_init_render_ring_buffer(struct drm_device *dev)
1353{
1354	drm_i915_private_t *dev_priv = dev->dev_private;
1355	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
1356
1357	ring->name = "render ring";
1358	ring->id = RCS;
1359	ring->mmio_base = RENDER_RING_BASE;
1360
1361	if (INTEL_INFO(dev)->gen >= 6) {
1362		ring->add_request = gen6_add_request;
1363		ring->flush = gen6_render_ring_flush;
1364		ring->irq_get = gen6_ring_get_irq;
1365		ring->irq_put = gen6_ring_put_irq;
1366		ring->irq_enable_mask = GT_USER_INTERRUPT;
1367		ring->get_seqno = gen6_ring_get_seqno;
1368		ring->sync_to = gen6_ring_sync;
1369		ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
1370		ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
1371		ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
1372		ring->signal_mbox[0] = GEN6_VRSYNC;
1373		ring->signal_mbox[1] = GEN6_BRSYNC;
1374	} else if (IS_GEN5(dev)) {
1375		ring->add_request = pc_render_add_request;
1376		ring->flush = gen4_render_ring_flush;
1377		ring->get_seqno = pc_render_get_seqno;
1378		ring->irq_get = gen5_ring_get_irq;
1379		ring->irq_put = gen5_ring_put_irq;
1380		ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
1381	} else {
1382		ring->add_request = i9xx_add_request;
1383		if (INTEL_INFO(dev)->gen < 4)
1384			ring->flush = gen2_render_ring_flush;
1385		else
1386			ring->flush = gen4_render_ring_flush;
1387		ring->get_seqno = ring_get_seqno;
1388		if (IS_GEN2(dev)) {
1389			ring->irq_get = i8xx_ring_get_irq;
1390			ring->irq_put = i8xx_ring_put_irq;
1391		} else {
1392			ring->irq_get = i9xx_ring_get_irq;
1393			ring->irq_put = i9xx_ring_put_irq;
1394		}
1395		ring->irq_enable_mask = I915_USER_INTERRUPT;
1396	}
1397	ring->write_tail = ring_write_tail;
1398	if (INTEL_INFO(dev)->gen >= 6)
1399		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1400	else if (INTEL_INFO(dev)->gen >= 4)
1401		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1402	else if (IS_I830(dev) || IS_845G(dev))
1403		ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1404	else
1405		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1406	ring->init = init_render_ring;
1407	ring->cleanup = render_ring_cleanup;
1408
1409
1410	if (!I915_NEED_GFX_HWS(dev)) {
1411		ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1412		memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1413	}
1414
1415	return intel_init_ring_buffer(dev, ring);
1416}
1417
1418int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1419{
1420	drm_i915_private_t *dev_priv = dev->dev_private;
1421	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
1422
1423	ring->name = "render ring";
1424	ring->id = RCS;
1425	ring->mmio_base = RENDER_RING_BASE;
1426
1427	if (INTEL_INFO(dev)->gen >= 6) {
1428		/* non-kms not supported on gen6+ */
1429		return -ENODEV;
1430	}
1431
1432	/* Note: gem is not supported on gen5/ilk without kms (the corresponding
1433	 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
1434	 * the special gen5 functions. */
1435	ring->add_request = i9xx_add_request;
1436	if (INTEL_INFO(dev)->gen < 4)
1437		ring->flush = gen2_render_ring_flush;
1438	else
1439		ring->flush = gen4_render_ring_flush;
1440	ring->get_seqno = ring_get_seqno;
1441	if (IS_GEN2(dev)) {
1442		ring->irq_get = i8xx_ring_get_irq;
1443		ring->irq_put = i8xx_ring_put_irq;
1444	} else {
1445		ring->irq_get = i9xx_ring_get_irq;
1446		ring->irq_put = i9xx_ring_put_irq;
1447	}
1448	ring->irq_enable_mask = I915_USER_INTERRUPT;
1449	ring->write_tail = ring_write_tail;
1450	if (INTEL_INFO(dev)->gen >= 4)
1451		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1452	else if (IS_I830(dev) || IS_845G(dev))
1453		ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1454	else
1455		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1456	ring->init = init_render_ring;
1457	ring->cleanup = render_ring_cleanup;
1458
1459	ring->dev = dev;
1460	INIT_LIST_HEAD(&ring->active_list);
1461	INIT_LIST_HEAD(&ring->request_list);
1462	INIT_LIST_HEAD(&ring->gpu_write_list);
1463
1464	ring->size = size;
1465	ring->effective_size = ring->size;
1466	if (IS_I830(ring->dev))
1467		ring->effective_size -= 128;
1468
1469	ring->virtual_start = pmap_mapdev_attr(start, size,
1470	    VM_MEMATTR_WRITE_COMBINING);
1471	if (ring->virtual_start == NULL) {
1472		DRM_ERROR("can not ioremap virtual address for"
1473			  " ring buffer\n");
1474		return -ENOMEM;
1475	}
1476
1477	return 0;
1478}
1479
1480int intel_init_bsd_ring_buffer(struct drm_device *dev)
1481{
1482	drm_i915_private_t *dev_priv = dev->dev_private;
1483	struct intel_ring_buffer *ring = &dev_priv->rings[VCS];
1484
1485	ring->name = "bsd ring";
1486	ring->id = VCS;
1487
1488	ring->write_tail = ring_write_tail;
1489	if (IS_GEN6(dev) || IS_GEN7(dev)) {
1490		ring->mmio_base = GEN6_BSD_RING_BASE;
1491		/* gen6 bsd needs a special wa for tail updates */
1492		if (IS_GEN6(dev))
1493			ring->write_tail = gen6_bsd_ring_write_tail;
1494		ring->flush = gen6_ring_flush;
1495		ring->add_request = gen6_add_request;
1496		ring->get_seqno = gen6_ring_get_seqno;
1497		ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
1498		ring->irq_get = gen6_ring_get_irq;
1499		ring->irq_put = gen6_ring_put_irq;
1500		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1501		ring->sync_to = gen6_ring_sync;
1502		ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
1503		ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
1504		ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
1505		ring->signal_mbox[0] = GEN6_RVSYNC;
1506		ring->signal_mbox[1] = GEN6_BVSYNC;
1507	} else {
1508		ring->mmio_base = BSD_RING_BASE;
1509		ring->flush = bsd_ring_flush;
1510		ring->add_request = i9xx_add_request;
1511		ring->get_seqno = ring_get_seqno;
1512		if (IS_GEN5(dev)) {
1513			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1514			ring->irq_get = gen5_ring_get_irq;
1515			ring->irq_put = gen5_ring_put_irq;
1516		} else {
1517			ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
1518			ring->irq_get = i9xx_ring_get_irq;
1519			ring->irq_put = i9xx_ring_put_irq;
1520		}
1521		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1522	}
1523	ring->init = init_ring_common;
1524
1525
1526	return intel_init_ring_buffer(dev, ring);
1527}
1528
1529int intel_init_blt_ring_buffer(struct drm_device *dev)
1530{
1531	drm_i915_private_t *dev_priv = dev->dev_private;
1532	struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
1533
1534	ring->name = "blitter ring";
1535	ring->id = BCS;
1536
1537	ring->mmio_base = BLT_RING_BASE;
1538	ring->write_tail = ring_write_tail;
1539	ring->flush = blt_ring_flush;
1540	ring->add_request = gen6_add_request;
1541	ring->get_seqno = gen6_ring_get_seqno;
1542	ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
1543	ring->irq_get = gen6_ring_get_irq;
1544	ring->irq_put = gen6_ring_put_irq;
1545	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1546	ring->sync_to = gen6_ring_sync;
1547	ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
1548	ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
1549	ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
1550	ring->signal_mbox[0] = GEN6_RBSYNC;
1551	ring->signal_mbox[1] = GEN6_VBSYNC;
1552	ring->init = init_ring_common;
1553
1554	return intel_init_ring_buffer(dev, ring);
1555}
1556