radeon_ttm.c revision 282199
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 *    Jerome Glisse <glisse@freedesktop.org>
29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 *    Dave Airlie
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_ttm.c 282199 2015-04-28 19:35:05Z dumbbell $");
35
36#include <dev/drm2/ttm/ttm_bo_api.h>
37#include <dev/drm2/ttm/ttm_bo_driver.h>
38#include <dev/drm2/ttm/ttm_placement.h>
39#include <dev/drm2/ttm/ttm_module.h>
40#include <dev/drm2/ttm/ttm_page_alloc.h>
41#include <dev/drm2/drmP.h>
42#include <dev/drm2/radeon/radeon_drm.h>
43#include "radeon_reg.h"
44#include "radeon.h"
45
46#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
47
48static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
49
50static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
51{
52	struct radeon_mman *mman;
53	struct radeon_device *rdev;
54
55	mman = container_of(bdev, struct radeon_mman, bdev);
56	rdev = container_of(mman, struct radeon_device, mman);
57	return rdev;
58}
59
60
61/*
62 * Global memory.
63 */
64static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
65{
66	return ttm_mem_global_init(ref->object);
67}
68
69static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
70{
71	ttm_mem_global_release(ref->object);
72}
73
74static int radeon_ttm_global_init(struct radeon_device *rdev)
75{
76	struct drm_global_reference *global_ref;
77	int r;
78
79	rdev->mman.mem_global_referenced = false;
80	global_ref = &rdev->mman.mem_global_ref;
81	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
82	global_ref->size = sizeof(struct ttm_mem_global);
83	global_ref->init = &radeon_ttm_mem_global_init;
84	global_ref->release = &radeon_ttm_mem_global_release;
85	r = drm_global_item_ref(global_ref);
86	if (r != 0) {
87		DRM_ERROR("Failed setting up TTM memory accounting "
88			  "subsystem.\n");
89		return r;
90	}
91
92	rdev->mman.bo_global_ref.mem_glob =
93		rdev->mman.mem_global_ref.object;
94	global_ref = &rdev->mman.bo_global_ref.ref;
95	global_ref->global_type = DRM_GLOBAL_TTM_BO;
96	global_ref->size = sizeof(struct ttm_bo_global);
97	global_ref->init = &ttm_bo_global_init;
98	global_ref->release = &ttm_bo_global_release;
99	r = drm_global_item_ref(global_ref);
100	if (r != 0) {
101		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
102		drm_global_item_unref(&rdev->mman.mem_global_ref);
103		return r;
104	}
105
106	rdev->mman.mem_global_referenced = true;
107	return 0;
108}
109
110static void radeon_ttm_global_fini(struct radeon_device *rdev)
111{
112	if (rdev->mman.mem_global_referenced) {
113		drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
114		drm_global_item_unref(&rdev->mman.mem_global_ref);
115		rdev->mman.mem_global_referenced = false;
116	}
117}
118
119static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
120{
121	return 0;
122}
123
124static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
125				struct ttm_mem_type_manager *man)
126{
127	struct radeon_device *rdev;
128
129	rdev = radeon_get_rdev(bdev);
130
131	switch (type) {
132	case TTM_PL_SYSTEM:
133		/* System memory */
134		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
135		man->available_caching = TTM_PL_MASK_CACHING;
136		man->default_caching = TTM_PL_FLAG_CACHED;
137		break;
138	case TTM_PL_TT:
139		man->func = &ttm_bo_manager_func;
140		man->gpu_offset = rdev->mc.gtt_start;
141		man->available_caching = TTM_PL_MASK_CACHING;
142		man->default_caching = TTM_PL_FLAG_CACHED;
143		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
144#if __OS_HAS_AGP
145		if (rdev->flags & RADEON_IS_AGP) {
146			if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
147				DRM_ERROR("AGP is not enabled for memory type %u\n",
148					  (unsigned)type);
149				return -EINVAL;
150			}
151			if (!rdev->ddev->agp->cant_use_aperture)
152				man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
153			man->available_caching = TTM_PL_FLAG_UNCACHED |
154						 TTM_PL_FLAG_WC;
155			man->default_caching = TTM_PL_FLAG_WC;
156		}
157#endif
158		break;
159	case TTM_PL_VRAM:
160		/* "On-card" video ram */
161		man->func = &ttm_bo_manager_func;
162		man->gpu_offset = rdev->mc.vram_start;
163		man->flags = TTM_MEMTYPE_FLAG_FIXED |
164			     TTM_MEMTYPE_FLAG_MAPPABLE;
165		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
166		man->default_caching = TTM_PL_FLAG_WC;
167		break;
168	default:
169		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
170		return -EINVAL;
171	}
172	return 0;
173}
174
175static void radeon_evict_flags(struct ttm_buffer_object *bo,
176				struct ttm_placement *placement)
177{
178	struct radeon_bo *rbo;
179	static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
180
181	if (!radeon_ttm_bo_is_radeon_bo(bo)) {
182		placement->fpfn = 0;
183		placement->lpfn = 0;
184		placement->placement = &placements;
185		placement->busy_placement = &placements;
186		placement->num_placement = 1;
187		placement->num_busy_placement = 1;
188		return;
189	}
190	rbo = container_of(bo, struct radeon_bo, tbo);
191	switch (bo->mem.mem_type) {
192	case TTM_PL_VRAM:
193		if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
194			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
195		else
196			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
197		break;
198	case TTM_PL_TT:
199	default:
200		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
201	}
202	*placement = rbo->placement;
203}
204
205static int radeon_verify_access(struct ttm_buffer_object *bo)
206{
207	return 0;
208}
209
210static void radeon_move_null(struct ttm_buffer_object *bo,
211			     struct ttm_mem_reg *new_mem)
212{
213	struct ttm_mem_reg *old_mem = &bo->mem;
214
215	KASSERT(old_mem->mm_node == NULL, ("old_mem->mm_node != NULL"));
216	*old_mem = *new_mem;
217	new_mem->mm_node = NULL;
218}
219
220static int radeon_move_blit(struct ttm_buffer_object *bo,
221			bool evict, bool no_wait_gpu,
222			struct ttm_mem_reg *new_mem,
223			struct ttm_mem_reg *old_mem)
224{
225	struct radeon_device *rdev;
226	uint64_t old_start, new_start;
227	struct radeon_fence *fence;
228	int r, ridx;
229
230	rdev = radeon_get_rdev(bo->bdev);
231	ridx = radeon_copy_ring_index(rdev);
232	old_start = old_mem->start << PAGE_SHIFT;
233	new_start = new_mem->start << PAGE_SHIFT;
234
235	switch (old_mem->mem_type) {
236	case TTM_PL_VRAM:
237		old_start += rdev->mc.vram_start;
238		break;
239	case TTM_PL_TT:
240		old_start += rdev->mc.gtt_start;
241		break;
242	default:
243		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
244		return -EINVAL;
245	}
246	switch (new_mem->mem_type) {
247	case TTM_PL_VRAM:
248		new_start += rdev->mc.vram_start;
249		break;
250	case TTM_PL_TT:
251		new_start += rdev->mc.gtt_start;
252		break;
253	default:
254		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
255		return -EINVAL;
256	}
257	if (!rdev->ring[ridx].ready) {
258		DRM_ERROR("Trying to move memory with ring turned off.\n");
259		return -EINVAL;
260	}
261
262	CTASSERT((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) == 0);
263
264	/* sync other rings */
265	fence = bo->sync_obj;
266	r = radeon_copy(rdev, old_start, new_start,
267			new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
268			&fence);
269	/* FIXME: handle copy error */
270	r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
271				      evict, no_wait_gpu, new_mem);
272	radeon_fence_unref(&fence);
273	return r;
274}
275
276static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
277				bool evict, bool interruptible,
278				bool no_wait_gpu,
279				struct ttm_mem_reg *new_mem)
280{
281	struct radeon_device *rdev;
282	struct ttm_mem_reg *old_mem = &bo->mem;
283	struct ttm_mem_reg tmp_mem;
284	u32 placements;
285	struct ttm_placement placement;
286	int r;
287
288	rdev = radeon_get_rdev(bo->bdev);
289	tmp_mem = *new_mem;
290	tmp_mem.mm_node = NULL;
291	placement.fpfn = 0;
292	placement.lpfn = 0;
293	placement.num_placement = 1;
294	placement.placement = &placements;
295	placement.num_busy_placement = 1;
296	placement.busy_placement = &placements;
297	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
298	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
299			     interruptible, no_wait_gpu);
300	if (unlikely(r)) {
301		return r;
302	}
303
304	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
305	if (unlikely(r)) {
306		goto out_cleanup;
307	}
308
309	r = ttm_tt_bind(bo->ttm, &tmp_mem);
310	if (unlikely(r)) {
311		goto out_cleanup;
312	}
313	r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
314	if (unlikely(r)) {
315		goto out_cleanup;
316	}
317	r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
318out_cleanup:
319	ttm_bo_mem_put(bo, &tmp_mem);
320	return r;
321}
322
323static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
324				bool evict, bool interruptible,
325				bool no_wait_gpu,
326				struct ttm_mem_reg *new_mem)
327{
328	struct radeon_device *rdev;
329	struct ttm_mem_reg *old_mem = &bo->mem;
330	struct ttm_mem_reg tmp_mem;
331	struct ttm_placement placement;
332	u32 placements;
333	int r;
334
335	rdev = radeon_get_rdev(bo->bdev);
336	tmp_mem = *new_mem;
337	tmp_mem.mm_node = NULL;
338	placement.fpfn = 0;
339	placement.lpfn = 0;
340	placement.num_placement = 1;
341	placement.placement = &placements;
342	placement.num_busy_placement = 1;
343	placement.busy_placement = &placements;
344	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
345	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
346			     interruptible, no_wait_gpu);
347	if (unlikely(r)) {
348		return r;
349	}
350	r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
351	if (unlikely(r)) {
352		goto out_cleanup;
353	}
354	r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
355	if (unlikely(r)) {
356		goto out_cleanup;
357	}
358out_cleanup:
359	ttm_bo_mem_put(bo, &tmp_mem);
360	return r;
361}
362
363static int radeon_bo_move(struct ttm_buffer_object *bo,
364			bool evict, bool interruptible,
365			bool no_wait_gpu,
366			struct ttm_mem_reg *new_mem)
367{
368	struct radeon_device *rdev;
369	struct ttm_mem_reg *old_mem = &bo->mem;
370	int r;
371
372	rdev = radeon_get_rdev(bo->bdev);
373	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
374		radeon_move_null(bo, new_mem);
375		return 0;
376	}
377	if ((old_mem->mem_type == TTM_PL_TT &&
378	     new_mem->mem_type == TTM_PL_SYSTEM) ||
379	    (old_mem->mem_type == TTM_PL_SYSTEM &&
380	     new_mem->mem_type == TTM_PL_TT)) {
381		/* bind is enough */
382		radeon_move_null(bo, new_mem);
383		return 0;
384	}
385	if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
386	    rdev->asic->copy.copy == NULL) {
387		/* use memcpy */
388		goto memcpy;
389	}
390
391	if (old_mem->mem_type == TTM_PL_VRAM &&
392	    new_mem->mem_type == TTM_PL_SYSTEM) {
393		r = radeon_move_vram_ram(bo, evict, interruptible,
394					no_wait_gpu, new_mem);
395	} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
396		   new_mem->mem_type == TTM_PL_VRAM) {
397		r = radeon_move_ram_vram(bo, evict, interruptible,
398					    no_wait_gpu, new_mem);
399	} else {
400		r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
401	}
402
403	if (r) {
404memcpy:
405		r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
406	}
407	return r;
408}
409
410static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
411{
412	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
413	struct radeon_device *rdev = radeon_get_rdev(bdev);
414
415	mem->bus.addr = NULL;
416	mem->bus.offset = 0;
417	mem->bus.size = mem->num_pages << PAGE_SHIFT;
418	mem->bus.base = 0;
419	mem->bus.is_iomem = false;
420	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
421		return -EINVAL;
422	switch (mem->mem_type) {
423	case TTM_PL_SYSTEM:
424		/* system memory */
425		return 0;
426	case TTM_PL_TT:
427#if __OS_HAS_AGP
428		if (rdev->flags & RADEON_IS_AGP) {
429			/* RADEON_IS_AGP is set only if AGP is active */
430			mem->bus.offset = mem->start << PAGE_SHIFT;
431			mem->bus.base = rdev->mc.agp_base;
432			mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
433		}
434#endif
435		break;
436	case TTM_PL_VRAM:
437		mem->bus.offset = mem->start << PAGE_SHIFT;
438		/* check if it's visible */
439		if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
440			return -EINVAL;
441		mem->bus.base = rdev->mc.aper_base;
442		mem->bus.is_iomem = true;
443#ifdef __alpha__
444		/*
445		 * Alpha: use bus.addr to hold the ioremap() return,
446		 * so we can modify bus.base below.
447		 */
448		if (mem->placement & TTM_PL_FLAG_WC)
449			mem->bus.addr =
450				ioremap_wc(mem->bus.base + mem->bus.offset,
451					   mem->bus.size);
452		else
453			mem->bus.addr =
454				ioremap_nocache(mem->bus.base + mem->bus.offset,
455						mem->bus.size);
456
457		/*
458		 * Alpha: Use just the bus offset plus
459		 * the hose/domain memory base for bus.base.
460		 * It then can be used to build PTEs for VRAM
461		 * access, as done in ttm_bo_vm_fault().
462		 */
463		mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
464			rdev->ddev->hose->dense_mem_base;
465#endif
466		break;
467	default:
468		return -EINVAL;
469	}
470	return 0;
471}
472
473static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
474{
475}
476
477static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
478{
479	return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
480}
481
482static int radeon_sync_obj_flush(void *sync_obj)
483{
484	return 0;
485}
486
487static void radeon_sync_obj_unref(void **sync_obj)
488{
489	radeon_fence_unref((struct radeon_fence **)sync_obj);
490}
491
492static void *radeon_sync_obj_ref(void *sync_obj)
493{
494	return radeon_fence_ref((struct radeon_fence *)sync_obj);
495}
496
497static bool radeon_sync_obj_signaled(void *sync_obj)
498{
499	return radeon_fence_signaled((struct radeon_fence *)sync_obj);
500}
501
502/*
503 * TTM backend functions.
504 */
505struct radeon_ttm_tt {
506	struct ttm_dma_tt		ttm;
507	struct radeon_device		*rdev;
508	u64				offset;
509};
510
511static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
512				   struct ttm_mem_reg *bo_mem)
513{
514	struct radeon_ttm_tt *gtt = (void*)ttm;
515	int r;
516
517	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
518	if (!ttm->num_pages) {
519		DRM_ERROR("nothing to bind %lu pages for mreg %p back %p!\n",
520		     ttm->num_pages, bo_mem, ttm);
521	}
522	r = radeon_gart_bind(gtt->rdev, gtt->offset,
523			     ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
524	if (r) {
525		DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
526			  ttm->num_pages, (unsigned)gtt->offset);
527		return r;
528	}
529	return 0;
530}
531
532static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
533{
534	struct radeon_ttm_tt *gtt = (void *)ttm;
535
536	radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
537	return 0;
538}
539
540static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
541{
542	struct radeon_ttm_tt *gtt = (void *)ttm;
543
544	ttm_dma_tt_fini(&gtt->ttm);
545	free(gtt, DRM_MEM_DRIVER);
546}
547
548static struct ttm_backend_func radeon_backend_func = {
549	.bind = &radeon_ttm_backend_bind,
550	.unbind = &radeon_ttm_backend_unbind,
551	.destroy = &radeon_ttm_backend_destroy,
552};
553
554static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
555				    unsigned long size, uint32_t page_flags,
556				    vm_page_t dummy_read_page)
557{
558	struct radeon_device *rdev;
559	struct radeon_ttm_tt *gtt;
560
561	rdev = radeon_get_rdev(bdev);
562#if __OS_HAS_AGP
563	if (rdev->flags & RADEON_IS_AGP) {
564		return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
565					 size, page_flags, dummy_read_page);
566	}
567#endif
568
569	gtt = malloc(sizeof(struct radeon_ttm_tt),
570	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
571	if (gtt == NULL) {
572		return NULL;
573	}
574	gtt->ttm.ttm.func = &radeon_backend_func;
575	gtt->rdev = rdev;
576	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
577		free(gtt, DRM_MEM_DRIVER);
578		return NULL;
579	}
580	return &gtt->ttm.ttm;
581}
582
583static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
584{
585	struct radeon_device *rdev;
586	struct radeon_ttm_tt *gtt = (void *)ttm;
587	unsigned i;
588	int r;
589#ifdef FREEBSD_WIP
590	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
591#endif /* FREEBSD_WIP */
592
593	if (ttm->state != tt_unpopulated)
594		return 0;
595
596#ifdef FREEBSD_WIP
597	if (slave && ttm->sg) {
598		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
599						 gtt->ttm.dma_address, ttm->num_pages);
600		ttm->state = tt_unbound;
601		return 0;
602	}
603#endif /* FREEBSD_WIP */
604
605	rdev = radeon_get_rdev(ttm->bdev);
606#if __OS_HAS_AGP
607	if (rdev->flags & RADEON_IS_AGP) {
608		return ttm_agp_tt_populate(ttm);
609	}
610#endif
611
612#ifdef CONFIG_SWIOTLB
613	if (swiotlb_nr_tbl()) {
614		return ttm_dma_populate(&gtt->ttm, rdev->dev);
615	}
616#endif
617
618	r = ttm_pool_populate(ttm);
619	if (r) {
620		return r;
621	}
622
623	for (i = 0; i < ttm->num_pages; i++) {
624		gtt->ttm.dma_address[i] = VM_PAGE_TO_PHYS(ttm->pages[i]);
625#ifdef FREEBSD_WIP
626		gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
627						       0, PAGE_SIZE,
628						       PCI_DMA_BIDIRECTIONAL);
629		if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
630			while (--i) {
631				pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
632					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
633				gtt->ttm.dma_address[i] = 0;
634			}
635			ttm_pool_unpopulate(ttm);
636			return -EFAULT;
637		}
638#endif /* FREEBSD_WIP */
639	}
640	return 0;
641}
642
643static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
644{
645	struct radeon_device *rdev;
646	struct radeon_ttm_tt *gtt = (void *)ttm;
647	unsigned i;
648	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
649
650	if (slave)
651		return;
652
653	rdev = radeon_get_rdev(ttm->bdev);
654#if __OS_HAS_AGP
655	if (rdev->flags & RADEON_IS_AGP) {
656		ttm_agp_tt_unpopulate(ttm);
657		return;
658	}
659#endif
660
661#ifdef CONFIG_SWIOTLB
662	if (swiotlb_nr_tbl()) {
663		ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
664		return;
665	}
666#endif
667
668	for (i = 0; i < ttm->num_pages; i++) {
669		if (gtt->ttm.dma_address[i]) {
670			gtt->ttm.dma_address[i] = 0;
671#ifdef FREEBSD_WIP
672			pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
673				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
674#endif /* FREEBSD_WIP */
675		}
676	}
677
678	ttm_pool_unpopulate(ttm);
679}
680
681static struct ttm_bo_driver radeon_bo_driver = {
682	.ttm_tt_create = &radeon_ttm_tt_create,
683	.ttm_tt_populate = &radeon_ttm_tt_populate,
684	.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
685	.invalidate_caches = &radeon_invalidate_caches,
686	.init_mem_type = &radeon_init_mem_type,
687	.evict_flags = &radeon_evict_flags,
688	.move = &radeon_bo_move,
689	.verify_access = &radeon_verify_access,
690	.sync_obj_signaled = &radeon_sync_obj_signaled,
691	.sync_obj_wait = &radeon_sync_obj_wait,
692	.sync_obj_flush = &radeon_sync_obj_flush,
693	.sync_obj_unref = &radeon_sync_obj_unref,
694	.sync_obj_ref = &radeon_sync_obj_ref,
695	.move_notify = &radeon_bo_move_notify,
696	.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
697	.io_mem_reserve = &radeon_ttm_io_mem_reserve,
698	.io_mem_free = &radeon_ttm_io_mem_free,
699};
700
701int radeon_ttm_init(struct radeon_device *rdev)
702{
703	int r;
704
705	r = radeon_ttm_global_init(rdev);
706	if (r) {
707		return r;
708	}
709	/* No others user of address space so set it to 0 */
710	r = ttm_bo_device_init(&rdev->mman.bdev,
711			       rdev->mman.bo_global_ref.ref.object,
712			       &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
713			       rdev->need_dma32);
714	if (r) {
715		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
716		return r;
717	}
718	rdev->mman.initialized = true;
719	rdev->ddev->drm_ttm_bdev = &rdev->mman.bdev;
720	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
721				rdev->mc.real_vram_size >> PAGE_SHIFT);
722	if (r) {
723		DRM_ERROR("Failed initializing VRAM heap.\n");
724		return r;
725	}
726	r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
727			     RADEON_GEM_DOMAIN_VRAM,
728			     NULL, &rdev->stollen_vga_memory);
729	if (r) {
730		return r;
731	}
732	r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
733	if (r) {
734		radeon_bo_unref(&rdev->stollen_vga_memory);
735		return r;
736	}
737	r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
738	radeon_bo_unreserve(rdev->stollen_vga_memory);
739	if (r) {
740		radeon_bo_unref(&rdev->stollen_vga_memory);
741		return r;
742	}
743	DRM_INFO("radeon: %uM of VRAM memory ready\n",
744		 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
745	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
746				rdev->mc.gtt_size >> PAGE_SHIFT);
747	if (r) {
748		DRM_ERROR("Failed initializing GTT heap.\n");
749		return r;
750	}
751	DRM_INFO("radeon: %uM of GTT memory ready.\n",
752		 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
753
754	r = radeon_ttm_debugfs_init(rdev);
755	if (r) {
756		DRM_ERROR("Failed to init debugfs\n");
757		return r;
758	}
759	return 0;
760}
761
762void radeon_ttm_fini(struct radeon_device *rdev)
763{
764	int r;
765
766	if (!rdev->mman.initialized)
767		return;
768	if (rdev->stollen_vga_memory) {
769		r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
770		if (r == 0) {
771			radeon_bo_unpin(rdev->stollen_vga_memory);
772			radeon_bo_unreserve(rdev->stollen_vga_memory);
773		}
774		radeon_bo_unref(&rdev->stollen_vga_memory);
775	}
776	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
777	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
778	ttm_bo_device_release(&rdev->mman.bdev);
779	radeon_gart_fini(rdev);
780	radeon_ttm_global_fini(rdev);
781	rdev->mman.initialized = false;
782	DRM_INFO("radeon: ttm finalized\n");
783}
784
785/* this should only be called at bootup or when userspace
786 * isn't running */
787void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
788{
789	struct ttm_mem_type_manager *man;
790
791	if (!rdev->mman.initialized)
792		return;
793
794	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
795	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
796	man->size = size >> PAGE_SHIFT;
797}
798
799#ifdef FREEBSD_WIP
800static struct vm_operations_struct radeon_ttm_vm_ops;
801static const struct vm_operations_struct *ttm_vm_ops = NULL;
802
803static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
804{
805	struct ttm_buffer_object *bo;
806	struct radeon_device *rdev;
807	int r;
808
809	bo = (struct ttm_buffer_object *)vma->vm_private_data;
810	if (bo == NULL) {
811		return VM_FAULT_NOPAGE;
812	}
813	rdev = radeon_get_rdev(bo->bdev);
814	sx_slock(&rdev->pm.mclk_lock);
815	r = ttm_vm_ops->fault(vma, vmf);
816	sx_sunlock(&rdev->pm.mclk_lock);
817	return r;
818}
819
820int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
821{
822	struct drm_file *file_priv;
823	struct radeon_device *rdev;
824	int r;
825
826	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
827		return drm_mmap(filp, vma);
828	}
829
830	file_priv = filp->private_data;
831	rdev = file_priv->minor->dev->dev_private;
832	if (rdev == NULL) {
833		return -EINVAL;
834	}
835	r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
836	if (unlikely(r != 0)) {
837		return r;
838	}
839	if (unlikely(ttm_vm_ops == NULL)) {
840		ttm_vm_ops = vma->vm_ops;
841		radeon_ttm_vm_ops = *ttm_vm_ops;
842		radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
843	}
844	vma->vm_ops = &radeon_ttm_vm_ops;
845	return 0;
846}
847#endif /* FREEBSD_WIP */
848
849
850#define RADEON_DEBUGFS_MEM_TYPES 2
851
852#if defined(CONFIG_DEBUG_FS)
853static int radeon_mm_dump_table(struct seq_file *m, void *data)
854{
855	struct drm_info_node *node = (struct drm_info_node *)m->private;
856	struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
857	struct drm_device *dev = node->minor->dev;
858	struct radeon_device *rdev = dev->dev_private;
859	int ret;
860	struct ttm_bo_global *glob = rdev->mman.bdev.glob;
861
862	spin_lock(&glob->lru_lock);
863	ret = drm_mm_dump_table(m, mm);
864	spin_unlock(&glob->lru_lock);
865	return ret;
866}
867#endif
868
869static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
870{
871#if defined(CONFIG_DEBUG_FS)
872	static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
873	static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
874	unsigned i;
875
876	for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
877		if (i == 0)
878			sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
879		else
880			sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
881		radeon_mem_types_list[i].name = radeon_mem_types_names[i];
882		radeon_mem_types_list[i].show = &radeon_mm_dump_table;
883		radeon_mem_types_list[i].driver_features = 0;
884		if (i == 0)
885			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
886		else
887			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
888
889	}
890	/* Add ttm page pool to debugfs */
891	sprintf(radeon_mem_types_names[i], "ttm_page_pool");
892	radeon_mem_types_list[i].name = radeon_mem_types_names[i];
893	radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
894	radeon_mem_types_list[i].driver_features = 0;
895	radeon_mem_types_list[i++].data = NULL;
896#ifdef CONFIG_SWIOTLB
897	if (swiotlb_nr_tbl()) {
898		sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
899		radeon_mem_types_list[i].name = radeon_mem_types_names[i];
900		radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
901		radeon_mem_types_list[i].driver_features = 0;
902		radeon_mem_types_list[i++].data = NULL;
903	}
904#endif
905	return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
906
907#endif
908	return 0;
909}
910