i915_gem_gtt.c revision 282199
1/*
2 * Copyright �� 2010 Daniel Vetter
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <sys/cdefs.h>
26__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_gem_gtt.c 282199 2015-04-28 19:35:05Z dumbbell $");
27
28#include <dev/drm2/drmP.h>
29#include <dev/drm2/drm.h>
30#include <dev/drm2/i915/i915_drm.h>
31#include <dev/drm2/i915/i915_drv.h>
32#include <dev/drm2/i915/intel_drv.h>
33#include <sys/sched.h>
34#include <sys/sf_buf.h>
35
36/* PPGTT support for Sandybdrige/Gen6 and later */
37static void
38i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
39    unsigned first_entry, unsigned num_entries)
40{
41	uint32_t *pt_vaddr;
42	uint32_t scratch_pte;
43	struct sf_buf *sf;
44	unsigned act_pd, first_pte, last_pte, i;
45
46	act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
47	first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
48
49	scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
50	scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
51
52	while (num_entries) {
53		last_pte = first_pte + num_entries;
54		if (last_pte > I915_PPGTT_PT_ENTRIES)
55			last_pte = I915_PPGTT_PT_ENTRIES;
56
57		sched_pin();
58		sf = sf_buf_alloc(ppgtt->pt_pages[act_pd], SFB_CPUPRIVATE);
59		pt_vaddr = (uint32_t *)(uintptr_t)sf_buf_kva(sf);
60
61		for (i = first_pte; i < last_pte; i++)
62			pt_vaddr[i] = scratch_pte;
63
64		sf_buf_free(sf);
65		sched_unpin();
66
67		num_entries -= last_pte - first_pte;
68		first_pte = 0;
69		act_pd++;
70	}
71
72}
73
74int
75i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
76{
77	struct drm_i915_private *dev_priv;
78	struct i915_hw_ppgtt *ppgtt;
79	u_int first_pd_entry_in_global_pt, i;
80
81	dev_priv = dev->dev_private;
82
83	/*
84	 * ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
85	 * entries. For aliasing ppgtt support we just steal them at the end for
86	 * now.
87	 */
88	first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
89
90	ppgtt = malloc(sizeof(*ppgtt), DRM_I915_GEM, M_WAITOK | M_ZERO);
91
92	ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
93	ppgtt->pt_pages = malloc(sizeof(vm_page_t) * ppgtt->num_pd_entries,
94	    DRM_I915_GEM, M_WAITOK | M_ZERO);
95
96	for (i = 0; i < ppgtt->num_pd_entries; i++) {
97		ppgtt->pt_pages[i] = vm_page_alloc(NULL, 0,
98		    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
99		    VM_ALLOC_ZERO);
100		if (ppgtt->pt_pages[i] == NULL) {
101			dev_priv->mm.aliasing_ppgtt = ppgtt;
102			i915_gem_cleanup_aliasing_ppgtt(dev);
103			return (-ENOMEM);
104		}
105	}
106
107	ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt.scratch_page_dma;
108
109	i915_ppgtt_clear_range(ppgtt, 0, ppgtt->num_pd_entries *
110	    I915_PPGTT_PT_ENTRIES);
111	ppgtt->pd_offset = (first_pd_entry_in_global_pt) * sizeof(uint32_t);
112	dev_priv->mm.aliasing_ppgtt = ppgtt;
113	return (0);
114}
115
116static void
117i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt, unsigned first_entry,
118    unsigned num_entries, vm_page_t *pages, uint32_t pte_flags)
119{
120	uint32_t *pt_vaddr, pte;
121	struct sf_buf *sf;
122	unsigned act_pd, first_pte;
123	unsigned last_pte, i;
124	vm_paddr_t page_addr;
125
126	act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
127	first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
128
129	while (num_entries) {
130		last_pte = first_pte + num_entries;
131		if (last_pte > I915_PPGTT_PT_ENTRIES)
132			last_pte = I915_PPGTT_PT_ENTRIES;
133
134		sched_pin();
135		sf = sf_buf_alloc(ppgtt->pt_pages[act_pd], SFB_CPUPRIVATE);
136		pt_vaddr = (uint32_t *)(uintptr_t)sf_buf_kva(sf);
137
138		for (i = first_pte; i < last_pte; i++) {
139			page_addr = VM_PAGE_TO_PHYS(*pages);
140			pte = GEN6_PTE_ADDR_ENCODE(page_addr);
141			pt_vaddr[i] = pte | pte_flags;
142
143			pages++;
144		}
145
146		sf_buf_free(sf);
147		sched_unpin();
148
149		num_entries -= last_pte - first_pte;
150		first_pte = 0;
151		act_pd++;
152	}
153}
154
155void
156i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
157    struct drm_i915_gem_object *obj, enum i915_cache_level cache_level)
158{
159	struct drm_device *dev;
160	struct drm_i915_private *dev_priv;
161	uint32_t pte_flags;
162
163	dev = obj->base.dev;
164	dev_priv = dev->dev_private;
165	pte_flags = GEN6_PTE_VALID;
166
167	switch (cache_level) {
168	case I915_CACHE_LLC_MLC:
169		pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
170		break;
171	case I915_CACHE_LLC:
172		pte_flags |= GEN6_PTE_CACHE_LLC;
173		break;
174	case I915_CACHE_NONE:
175		pte_flags |= GEN6_PTE_UNCACHED;
176		break;
177	default:
178		panic("cache mode");
179	}
180
181	i915_ppgtt_insert_pages(ppgtt, obj->gtt_space->start >> PAGE_SHIFT,
182	    obj->base.size >> PAGE_SHIFT, obj->pages, pte_flags);
183}
184
185void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
186			      struct drm_i915_gem_object *obj)
187{
188	i915_ppgtt_clear_range(ppgtt, obj->gtt_space->start >> PAGE_SHIFT,
189	    obj->base.size >> PAGE_SHIFT);
190}
191
192void
193i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
194{
195	struct drm_i915_private *dev_priv;
196	struct i915_hw_ppgtt *ppgtt;
197	vm_page_t m;
198	int i;
199
200	dev_priv = dev->dev_private;
201	ppgtt = dev_priv->mm.aliasing_ppgtt;
202	if (ppgtt == NULL)
203		return;
204	dev_priv->mm.aliasing_ppgtt = NULL;
205
206	for (i = 0; i < ppgtt->num_pd_entries; i++) {
207		m = ppgtt->pt_pages[i];
208		if (m != NULL) {
209			vm_page_unwire(m, 0);
210			vm_page_free(m);
211		}
212	}
213	free(ppgtt->pt_pages, DRM_I915_GEM);
214	free(ppgtt, DRM_I915_GEM);
215}
216
217
218static unsigned int
219cache_level_to_agp_type(struct drm_device *dev, enum i915_cache_level
220    cache_level)
221{
222
223	switch (cache_level) {
224	case I915_CACHE_LLC_MLC:
225		if (INTEL_INFO(dev)->gen >= 6)
226			return (AGP_USER_CACHED_MEMORY_LLC_MLC);
227		/*
228		 * Older chipsets do not have this extra level of CPU
229		 * cacheing, so fallthrough and request the PTE simply
230		 * as cached.
231		 */
232	case I915_CACHE_LLC:
233		return (AGP_USER_CACHED_MEMORY);
234
235	default:
236	case I915_CACHE_NONE:
237		return (AGP_USER_MEMORY);
238	}
239}
240
241static bool
242do_idling(struct drm_i915_private *dev_priv)
243{
244	bool ret = dev_priv->mm.interruptible;
245
246	if (dev_priv->mm.gtt.do_idle_maps) {
247		dev_priv->mm.interruptible = false;
248		if (i915_gpu_idle(dev_priv->dev)) {
249			DRM_ERROR("Couldn't idle GPU\n");
250			/* Wait a bit, in hopes it avoids the hang */
251			DELAY(10);
252		}
253	}
254
255	return ret;
256}
257
258static void
259undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
260{
261
262	if (dev_priv->mm.gtt.do_idle_maps)
263		dev_priv->mm.interruptible = interruptible;
264}
265
266void
267i915_gem_restore_gtt_mappings(struct drm_device *dev)
268{
269	struct drm_i915_private *dev_priv;
270	struct drm_i915_gem_object *obj;
271
272	dev_priv = dev->dev_private;
273
274	/* First fill our portion of the GTT with scratch pages */
275	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
276	    (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
277
278	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
279		i915_gem_clflush_object(obj);
280		i915_gem_gtt_bind_object(obj, obj->cache_level);
281	}
282
283	intel_gtt_chipset_flush();
284}
285
286int
287i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
288{
289
290	return (0);
291}
292
293void
294i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
295    enum i915_cache_level cache_level)
296{
297	struct drm_device *dev;
298	struct drm_i915_private *dev_priv;
299	unsigned int agp_type;
300
301	dev = obj->base.dev;
302	dev_priv = dev->dev_private;
303	agp_type = cache_level_to_agp_type(dev, cache_level);
304
305	intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
306	    obj->base.size >> PAGE_SHIFT, obj->pages, agp_type);
307
308	obj->has_global_gtt_mapping = 1;
309}
310
311void
312i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
313{
314
315	intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
316	    obj->base.size >> PAGE_SHIFT);
317
318	obj->has_global_gtt_mapping = 0;
319}
320
321void
322i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
323{
324	struct drm_device *dev = obj->base.dev;
325	struct drm_i915_private *dev_priv = dev->dev_private;
326	bool interruptible;
327
328	dev = obj->base.dev;
329	dev_priv = dev->dev_private;
330
331	interruptible = do_idling(dev_priv);
332
333	undo_idling(dev_priv, interruptible);
334}
335
336int
337i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start,
338    unsigned long mappable_end, unsigned long end)
339{
340	drm_i915_private_t *dev_priv;
341	unsigned long mappable;
342	int error;
343
344	dev_priv = dev->dev_private;
345	mappable = min(end, mappable_end) - start;
346
347	/* Substract the guard page ... */
348	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
349
350	dev_priv->mm.gtt_start = start;
351	dev_priv->mm.gtt_mappable_end = mappable_end;
352	dev_priv->mm.gtt_end = end;
353	dev_priv->mm.gtt_total = end - start;
354	dev_priv->mm.mappable_gtt_total = mappable;
355
356	/* ... but ensure that we clear the entire range. */
357	intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
358	device_printf(dev->dev,
359	    "taking over the fictitious range 0x%lx-0x%lx\n",
360	    dev->agp->base + start, dev->agp->base + start + mappable);
361	error = -vm_phys_fictitious_reg_range(dev->agp->base + start,
362	    dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
363	return (error);
364}
365