1247835Skib/**************************************************************************
2247835Skib *
3247835Skib * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4247835Skib * All Rights Reserved.
5247835Skib *
6247835Skib * Permission is hereby granted, free of charge, to any person obtaining a
7247835Skib * copy of this software and associated documentation files (the
8247835Skib * "Software"), to deal in the Software without restriction, including
9247835Skib * without limitation the rights to use, copy, modify, merge, publish,
10247835Skib * distribute, sub license, and/or sell copies of the Software, and to
11247835Skib * permit persons to whom the Software is furnished to do so, subject to
12247835Skib * the following conditions:
13247835Skib *
14247835Skib * The above copyright notice and this permission notice (including the
15247835Skib * next paragraph) shall be included in all copies or substantial portions
16247835Skib * of the Software.
17247835Skib *
18247835Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19247835Skib * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20247835Skib * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21247835Skib * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22247835Skib * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23247835Skib * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24247835Skib * USE OR OTHER DEALINGS IN THE SOFTWARE.
25247835Skib *
26247835Skib **************************************************************************/
27247835Skib/*
28247835Skib * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29247835Skib */
30247835Skib
31247835Skib#include <sys/cdefs.h>
32247835Skib__FBSDID("$FreeBSD$");
33247835Skib
34247835Skib#include <dev/drm2/drmP.h>
35247835Skib#include <dev/drm2/ttm/ttm_bo_driver.h>
36247835Skib#include <dev/drm2/ttm/ttm_placement.h>
37247835Skib#include <sys/sf_buf.h>
38247835Skib
39247835Skibvoid ttm_bo_free_old_node(struct ttm_buffer_object *bo)
40247835Skib{
41247835Skib	ttm_bo_mem_put(bo, &bo->mem);
42247835Skib}
43247835Skib
44247835Skibint ttm_bo_move_ttm(struct ttm_buffer_object *bo,
45247835Skib		    bool evict,
46247835Skib		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
47247835Skib{
48247835Skib	struct ttm_tt *ttm = bo->ttm;
49247835Skib	struct ttm_mem_reg *old_mem = &bo->mem;
50247835Skib	int ret;
51247835Skib
52247835Skib	if (old_mem->mem_type != TTM_PL_SYSTEM) {
53247835Skib		ttm_tt_unbind(ttm);
54247835Skib		ttm_bo_free_old_node(bo);
55247835Skib		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
56247835Skib				TTM_PL_MASK_MEM);
57247835Skib		old_mem->mem_type = TTM_PL_SYSTEM;
58247835Skib	}
59247835Skib
60247835Skib	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
61247835Skib	if (unlikely(ret != 0))
62247835Skib		return ret;
63247835Skib
64247835Skib	if (new_mem->mem_type != TTM_PL_SYSTEM) {
65247835Skib		ret = ttm_tt_bind(ttm, new_mem);
66247835Skib		if (unlikely(ret != 0))
67247835Skib			return ret;
68247835Skib	}
69247835Skib
70247835Skib	*old_mem = *new_mem;
71247835Skib	new_mem->mm_node = NULL;
72247835Skib
73247835Skib	return 0;
74247835Skib}
75247835Skib
76247835Skibint ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
77247835Skib{
78247835Skib	if (likely(man->io_reserve_fastpath))
79247835Skib		return 0;
80247835Skib
81247835Skib	if (interruptible) {
82247835Skib		if (sx_xlock_sig(&man->io_reserve_mutex))
83247835Skib			return (-EINTR);
84247835Skib		else
85247835Skib			return (0);
86247835Skib	}
87247835Skib
88247835Skib	sx_xlock(&man->io_reserve_mutex);
89247835Skib	return 0;
90247835Skib}
91247835Skib
92247835Skibvoid ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
93247835Skib{
94247835Skib	if (likely(man->io_reserve_fastpath))
95247835Skib		return;
96247835Skib
97247835Skib	sx_xunlock(&man->io_reserve_mutex);
98247835Skib}
99247835Skib
100247835Skibstatic int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
101247835Skib{
102247835Skib	struct ttm_buffer_object *bo;
103247835Skib
104247835Skib	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
105247835Skib		return -EAGAIN;
106247835Skib
107247835Skib	bo = list_first_entry(&man->io_reserve_lru,
108247835Skib			      struct ttm_buffer_object,
109247835Skib			      io_reserve_lru);
110247835Skib	list_del_init(&bo->io_reserve_lru);
111247835Skib	ttm_bo_unmap_virtual_locked(bo);
112247835Skib
113247835Skib	return 0;
114247835Skib}
115247835Skib
116247835Skibstatic int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
117247835Skib			      struct ttm_mem_reg *mem)
118247835Skib{
119247835Skib	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
120247835Skib	int ret = 0;
121247835Skib
122247835Skib	if (!bdev->driver->io_mem_reserve)
123247835Skib		return 0;
124247835Skib	if (likely(man->io_reserve_fastpath))
125247835Skib		return bdev->driver->io_mem_reserve(bdev, mem);
126247835Skib
127247835Skib	if (bdev->driver->io_mem_reserve &&
128247835Skib	    mem->bus.io_reserved_count++ == 0) {
129247835Skibretry:
130247835Skib		ret = bdev->driver->io_mem_reserve(bdev, mem);
131247835Skib		if (ret == -EAGAIN) {
132247835Skib			ret = ttm_mem_io_evict(man);
133247835Skib			if (ret == 0)
134247835Skib				goto retry;
135247835Skib		}
136247835Skib	}
137247835Skib	return ret;
138247835Skib}
139247835Skib
140247835Skibstatic void ttm_mem_io_free(struct ttm_bo_device *bdev,
141247835Skib			    struct ttm_mem_reg *mem)
142247835Skib{
143247835Skib	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
144247835Skib
145247835Skib	if (likely(man->io_reserve_fastpath))
146247835Skib		return;
147247835Skib
148247835Skib	if (bdev->driver->io_mem_reserve &&
149247835Skib	    --mem->bus.io_reserved_count == 0 &&
150247835Skib	    bdev->driver->io_mem_free)
151247835Skib		bdev->driver->io_mem_free(bdev, mem);
152247835Skib
153247835Skib}
154247835Skib
155247835Skibint ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
156247835Skib{
157247835Skib	struct ttm_mem_reg *mem = &bo->mem;
158247835Skib	int ret;
159247835Skib
160247835Skib	if (!mem->bus.io_reserved_vm) {
161247835Skib		struct ttm_mem_type_manager *man =
162247835Skib			&bo->bdev->man[mem->mem_type];
163247835Skib
164247835Skib		ret = ttm_mem_io_reserve(bo->bdev, mem);
165247835Skib		if (unlikely(ret != 0))
166247835Skib			return ret;
167247835Skib		mem->bus.io_reserved_vm = true;
168247835Skib		if (man->use_io_reserve_lru)
169247835Skib			list_add_tail(&bo->io_reserve_lru,
170247835Skib				      &man->io_reserve_lru);
171247835Skib	}
172247835Skib	return 0;
173247835Skib}
174247835Skib
175247835Skibvoid ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
176247835Skib{
177247835Skib	struct ttm_mem_reg *mem = &bo->mem;
178247835Skib
179247835Skib	if (mem->bus.io_reserved_vm) {
180247835Skib		mem->bus.io_reserved_vm = false;
181247835Skib		list_del_init(&bo->io_reserve_lru);
182247835Skib		ttm_mem_io_free(bo->bdev, mem);
183247835Skib	}
184247835Skib}
185247835Skib
186247835Skibstatic
187247835Skibint ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
188247835Skib			void **virtual)
189247835Skib{
190247835Skib	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
191247835Skib	int ret;
192247835Skib	void *addr;
193247835Skib
194247835Skib	*virtual = NULL;
195247835Skib	(void) ttm_mem_io_lock(man, false);
196247835Skib	ret = ttm_mem_io_reserve(bdev, mem);
197247835Skib	ttm_mem_io_unlock(man);
198247835Skib	if (ret || !mem->bus.is_iomem)
199247835Skib		return ret;
200247835Skib
201247835Skib	if (mem->bus.addr) {
202247835Skib		addr = mem->bus.addr;
203247835Skib	} else {
204247835Skib		addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset,
205247835Skib		    mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ?
206247835Skib		    VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
207247835Skib		if (!addr) {
208247835Skib			(void) ttm_mem_io_lock(man, false);
209247835Skib			ttm_mem_io_free(bdev, mem);
210247835Skib			ttm_mem_io_unlock(man);
211247835Skib			return -ENOMEM;
212247835Skib		}
213247835Skib	}
214247835Skib	*virtual = addr;
215247835Skib	return 0;
216247835Skib}
217247835Skib
218247835Skibstatic
219247835Skibvoid ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
220247835Skib			 void *virtual)
221247835Skib{
222247835Skib	struct ttm_mem_type_manager *man;
223247835Skib
224247835Skib	man = &bdev->man[mem->mem_type];
225247835Skib
226247835Skib	if (virtual && mem->bus.addr == NULL)
227247835Skib		pmap_unmapdev((vm_offset_t)virtual, mem->bus.size);
228247835Skib	(void) ttm_mem_io_lock(man, false);
229247835Skib	ttm_mem_io_free(bdev, mem);
230247835Skib	ttm_mem_io_unlock(man);
231247835Skib}
232247835Skib
233247835Skibstatic int ttm_copy_io_page(void *dst, void *src, unsigned long page)
234247835Skib{
235247835Skib	uint32_t *dstP =
236247835Skib	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
237247835Skib	uint32_t *srcP =
238247835Skib	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
239247835Skib
240247835Skib	int i;
241247835Skib	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
242247835Skib		/* iowrite32(ioread32(srcP++), dstP++); */
243247835Skib		*dstP++ = *srcP++;
244247835Skib	return 0;
245247835Skib}
246247835Skib
247247835Skibstatic int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
248247835Skib				unsigned long page,
249247835Skib				vm_memattr_t prot)
250247835Skib{
251247835Skib	vm_page_t d = ttm->pages[page];
252247835Skib	void *dst;
253247835Skib
254247835Skib	if (!d)
255247835Skib		return -ENOMEM;
256247835Skib
257247835Skib	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
258247835Skib
259247835Skib	/* XXXKIB can't sleep ? */
260247835Skib	dst = pmap_mapdev_attr(VM_PAGE_TO_PHYS(d), PAGE_SIZE, prot);
261247835Skib	if (!dst)
262247835Skib		return -ENOMEM;
263247835Skib
264247835Skib	memcpy(dst, src, PAGE_SIZE);
265247835Skib
266247835Skib	pmap_unmapdev((vm_offset_t)dst, PAGE_SIZE);
267247835Skib
268247835Skib	return 0;
269247835Skib}
270247835Skib
271247835Skibstatic int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
272247835Skib				unsigned long page,
273247835Skib				vm_memattr_t prot)
274247835Skib{
275247835Skib	vm_page_t s = ttm->pages[page];
276247835Skib	void *src;
277247835Skib
278247835Skib	if (!s)
279247835Skib		return -ENOMEM;
280247835Skib
281247835Skib	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
282247835Skib	src = pmap_mapdev_attr(VM_PAGE_TO_PHYS(s), PAGE_SIZE, prot);
283247835Skib	if (!src)
284247835Skib		return -ENOMEM;
285247835Skib
286247835Skib	memcpy(dst, src, PAGE_SIZE);
287247835Skib
288247835Skib	pmap_unmapdev((vm_offset_t)src, PAGE_SIZE);
289247835Skib
290247835Skib	return 0;
291247835Skib}
292247835Skib
293247835Skibint ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
294247835Skib		       bool evict, bool no_wait_gpu,
295247835Skib		       struct ttm_mem_reg *new_mem)
296247835Skib{
297247835Skib	struct ttm_bo_device *bdev = bo->bdev;
298247835Skib	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
299247835Skib	struct ttm_tt *ttm = bo->ttm;
300247835Skib	struct ttm_mem_reg *old_mem = &bo->mem;
301247835Skib	struct ttm_mem_reg old_copy = *old_mem;
302247835Skib	void *old_iomap;
303247835Skib	void *new_iomap;
304247835Skib	int ret;
305247835Skib	unsigned long i;
306247835Skib	unsigned long page;
307247835Skib	unsigned long add = 0;
308247835Skib	int dir;
309247835Skib
310247835Skib	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
311247835Skib	if (ret)
312247835Skib		return ret;
313247835Skib	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
314247835Skib	if (ret)
315247835Skib		goto out;
316247835Skib
317247835Skib	if (old_iomap == NULL && new_iomap == NULL)
318247835Skib		goto out2;
319247835Skib	if (old_iomap == NULL && ttm == NULL)
320247835Skib		goto out2;
321247835Skib
322247835Skib	if (ttm->state == tt_unpopulated) {
323247835Skib		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
324254866Sdumbbell		if (ret) {
325254866Sdumbbell			/* if we fail here don't nuke the mm node
326254866Sdumbbell			 * as the bo still owns it */
327254866Sdumbbell			old_copy.mm_node = NULL;
328247835Skib			goto out1;
329254866Sdumbbell		}
330247835Skib	}
331247835Skib
332247835Skib	add = 0;
333247835Skib	dir = 1;
334247835Skib
335247835Skib	if ((old_mem->mem_type == new_mem->mem_type) &&
336247835Skib	    (new_mem->start < old_mem->start + old_mem->size)) {
337247835Skib		dir = -1;
338247835Skib		add = new_mem->num_pages - 1;
339247835Skib	}
340247835Skib
341247835Skib	for (i = 0; i < new_mem->num_pages; ++i) {
342247835Skib		page = i * dir + add;
343247835Skib		if (old_iomap == NULL) {
344247835Skib			vm_memattr_t prot = ttm_io_prot(old_mem->placement);
345247835Skib			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
346247835Skib						   prot);
347247835Skib		} else if (new_iomap == NULL) {
348247835Skib			vm_memattr_t prot = ttm_io_prot(new_mem->placement);
349247835Skib			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
350247835Skib						   prot);
351247835Skib		} else
352247835Skib			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
353254866Sdumbbell		if (ret) {
354254866Sdumbbell			/* failing here, means keep old copy as-is */
355254866Sdumbbell			old_copy.mm_node = NULL;
356247835Skib			goto out1;
357254866Sdumbbell		}
358247835Skib	}
359247835Skib	mb();
360247835Skibout2:
361247835Skib	old_copy = *old_mem;
362247835Skib	*old_mem = *new_mem;
363247835Skib	new_mem->mm_node = NULL;
364247835Skib
365247835Skib	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
366247835Skib		ttm_tt_unbind(ttm);
367247835Skib		ttm_tt_destroy(ttm);
368247835Skib		bo->ttm = NULL;
369247835Skib	}
370247835Skib
371247835Skibout1:
372247835Skib	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
373247835Skibout:
374247835Skib	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
375247835Skib	ttm_bo_mem_put(bo, &old_copy);
376247835Skib	return ret;
377247835Skib}
378247835Skib
379247835SkibMALLOC_DEFINE(M_TTM_TRANSF_OBJ, "ttm_transf_obj", "TTM Transfer Objects");
380247835Skib
381247835Skibstatic void ttm_transfered_destroy(struct ttm_buffer_object *bo)
382247835Skib{
383247835Skib	free(bo, M_TTM_TRANSF_OBJ);
384247835Skib}
385247835Skib
386247835Skib/**
387247835Skib * ttm_buffer_object_transfer
388247835Skib *
389247835Skib * @bo: A pointer to a struct ttm_buffer_object.
390247835Skib * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
391247835Skib * holding the data of @bo with the old placement.
392247835Skib *
393247835Skib * This is a utility function that may be called after an accelerated move
394247835Skib * has been scheduled. A new buffer object is created as a placeholder for
395247835Skib * the old data while it's being copied. When that buffer object is idle,
396247835Skib * it can be destroyed, releasing the space of the old placement.
397247835Skib * Returns:
398247835Skib * !0: Failure.
399247835Skib */
400247835Skib
401248666Skibstatic int
402248666Skibttm_buffer_object_transfer(struct ttm_buffer_object *bo,
403254868Sdumbbell    struct ttm_buffer_object **new_obj)
404247835Skib{
405247835Skib	struct ttm_buffer_object *fbo;
406254868Sdumbbell	struct ttm_bo_device *bdev = bo->bdev;
407254868Sdumbbell	struct ttm_bo_driver *driver = bdev->driver;
408247835Skib
409254868Sdumbbell	fbo = malloc(sizeof(*fbo), M_TTM_TRANSF_OBJ, M_WAITOK);
410247835Skib	*fbo = *bo;
411247835Skib
412247835Skib	/**
413247835Skib	 * Fix up members that we shouldn't copy directly:
414247835Skib	 * TODO: Explicit member copy would probably be better here.
415247835Skib	 */
416247835Skib
417247835Skib	INIT_LIST_HEAD(&fbo->ddestroy);
418247835Skib	INIT_LIST_HEAD(&fbo->lru);
419247835Skib	INIT_LIST_HEAD(&fbo->swap);
420247835Skib	INIT_LIST_HEAD(&fbo->io_reserve_lru);
421247835Skib	fbo->vm_node = NULL;
422247835Skib	atomic_set(&fbo->cpu_writers, 0);
423247835Skib
424254868Sdumbbell	mtx_lock(&bdev->fence_lock);
425254868Sdumbbell	if (bo->sync_obj)
426254868Sdumbbell		fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
427254868Sdumbbell	else
428254868Sdumbbell		fbo->sync_obj = NULL;
429254868Sdumbbell	mtx_unlock(&bdev->fence_lock);
430247835Skib	refcount_init(&fbo->list_kref, 1);
431247835Skib	refcount_init(&fbo->kref, 1);
432247835Skib	fbo->destroy = &ttm_transfered_destroy;
433247835Skib	fbo->acc_size = 0;
434247835Skib
435247835Skib	*new_obj = fbo;
436247835Skib	return 0;
437247835Skib}
438247835Skib
439247835Skibvm_memattr_t
440247835Skibttm_io_prot(uint32_t caching_flags)
441247835Skib{
442247835Skib#if defined(__i386__) || defined(__amd64__)
443247835Skib	if (caching_flags & TTM_PL_FLAG_WC)
444247835Skib		return (VM_MEMATTR_WRITE_COMBINING);
445247835Skib	else
446247835Skib		/*
447247835Skib		 * We do not support i386, look at the linux source
448247835Skib		 * for the reason of the comment.
449247835Skib		 */
450247835Skib		return (VM_MEMATTR_UNCACHEABLE);
451247835Skib#else
452247835Skib#error Port me
453247835Skib#endif
454247835Skib}
455247835Skib
456247835Skibstatic int ttm_bo_ioremap(struct ttm_buffer_object *bo,
457247835Skib			  unsigned long offset,
458247835Skib			  unsigned long size,
459247835Skib			  struct ttm_bo_kmap_obj *map)
460247835Skib{
461247835Skib	struct ttm_mem_reg *mem = &bo->mem;
462247835Skib
463247835Skib	if (bo->mem.bus.addr) {
464247835Skib		map->bo_kmap_type = ttm_bo_map_premapped;
465247835Skib		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
466247835Skib	} else {
467247835Skib		map->bo_kmap_type = ttm_bo_map_iomap;
468247835Skib		map->virtual = pmap_mapdev_attr(bo->mem.bus.base +
469247835Skib		    bo->mem.bus.offset + offset, size,
470247835Skib		    (mem->placement & TTM_PL_FLAG_WC) ?
471247835Skib		    VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
472247835Skib		map->size = size;
473247835Skib	}
474247835Skib	return (!map->virtual) ? -ENOMEM : 0;
475247835Skib}
476247835Skib
477247835Skibstatic int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
478247835Skib			   unsigned long start_page,
479247835Skib			   unsigned long num_pages,
480247835Skib			   struct ttm_bo_kmap_obj *map)
481247835Skib{
482247835Skib	struct ttm_mem_reg *mem = &bo->mem;
483247835Skib	vm_memattr_t prot;
484247835Skib	struct ttm_tt *ttm = bo->ttm;
485247835Skib	int i, ret;
486247835Skib
487247835Skib	MPASS(ttm != NULL);
488247835Skib
489247835Skib	if (ttm->state == tt_unpopulated) {
490247835Skib		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
491247835Skib		if (ret)
492247835Skib			return ret;
493247835Skib	}
494247835Skib
495247835Skib	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
496247835Skib		/*
497247835Skib		 * We're mapping a single page, and the desired
498247835Skib		 * page protection is consistent with the bo.
499247835Skib		 */
500247835Skib
501247835Skib		map->bo_kmap_type = ttm_bo_map_kmap;
502247835Skib		map->page = ttm->pages[start_page];
503247835Skib		map->sf = sf_buf_alloc(map->page, 0);
504247835Skib		map->virtual = (void *)sf_buf_kva(map->sf);
505247835Skib	} else {
506247835Skib		/*
507247835Skib		 * We need to use vmap to get the desired page protection
508247835Skib		 * or to make the buffer object look contiguous.
509247835Skib		 */
510247835Skib		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
511247835Skib			VM_MEMATTR_WRITE_COMBINING :
512247835Skib			ttm_io_prot(mem->placement);
513247835Skib		map->bo_kmap_type = ttm_bo_map_vmap;
514247835Skib		map->num_pages = num_pages;
515254025Sjeff		map->virtual = (void *)kva_alloc(num_pages * PAGE_SIZE);
516247835Skib		if (map->virtual != NULL) {
517247835Skib			for (i = 0; i < num_pages; i++) {
518247835Skib				/* XXXKIB hack */
519247835Skib				pmap_page_set_memattr(ttm->pages[start_page +
520247835Skib				    i], prot);
521247835Skib			}
522247835Skib			pmap_qenter((vm_offset_t)map->virtual,
523247835Skib			    &ttm->pages[start_page], num_pages);
524247835Skib		}
525247835Skib	}
526247835Skib	return (!map->virtual) ? -ENOMEM : 0;
527247835Skib}
528247835Skib
529247835Skibint ttm_bo_kmap(struct ttm_buffer_object *bo,
530247835Skib		unsigned long start_page, unsigned long num_pages,
531247835Skib		struct ttm_bo_kmap_obj *map)
532247835Skib{
533247835Skib	struct ttm_mem_type_manager *man =
534247835Skib		&bo->bdev->man[bo->mem.mem_type];
535247835Skib	unsigned long offset, size;
536247835Skib	int ret;
537247835Skib
538247835Skib	MPASS(list_empty(&bo->swap));
539247835Skib	map->virtual = NULL;
540247835Skib	map->bo = bo;
541247835Skib	if (num_pages > bo->num_pages)
542247835Skib		return -EINVAL;
543247835Skib	if (start_page > bo->num_pages)
544247835Skib		return -EINVAL;
545247835Skib#if 0
546247835Skib	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
547247835Skib		return -EPERM;
548247835Skib#endif
549247835Skib	(void) ttm_mem_io_lock(man, false);
550247835Skib	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
551247835Skib	ttm_mem_io_unlock(man);
552247835Skib	if (ret)
553247835Skib		return ret;
554247835Skib	if (!bo->mem.bus.is_iomem) {
555247835Skib		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
556247835Skib	} else {
557247835Skib		offset = start_page << PAGE_SHIFT;
558247835Skib		size = num_pages << PAGE_SHIFT;
559247835Skib		return ttm_bo_ioremap(bo, offset, size, map);
560247835Skib	}
561247835Skib}
562247835Skib
563247835Skibvoid ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
564247835Skib{
565247835Skib	struct ttm_buffer_object *bo = map->bo;
566247835Skib	struct ttm_mem_type_manager *man =
567247835Skib		&bo->bdev->man[bo->mem.mem_type];
568247835Skib
569247835Skib	if (!map->virtual)
570247835Skib		return;
571247835Skib	switch (map->bo_kmap_type) {
572247835Skib	case ttm_bo_map_iomap:
573247835Skib		pmap_unmapdev((vm_offset_t)map->virtual, map->size);
574247835Skib		break;
575247835Skib	case ttm_bo_map_vmap:
576247835Skib		pmap_qremove((vm_offset_t)(map->virtual), map->num_pages);
577254025Sjeff		kva_free((vm_offset_t)map->virtual,
578247835Skib		    map->num_pages * PAGE_SIZE);
579247835Skib		break;
580247835Skib	case ttm_bo_map_kmap:
581247835Skib		sf_buf_free(map->sf);
582247835Skib		break;
583247835Skib	case ttm_bo_map_premapped:
584247835Skib		break;
585247835Skib	default:
586247835Skib		MPASS(0);
587247835Skib	}
588247835Skib	(void) ttm_mem_io_lock(man, false);
589247835Skib	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
590247835Skib	ttm_mem_io_unlock(man);
591247835Skib	map->virtual = NULL;
592247835Skib	map->page = NULL;
593247835Skib	map->sf = NULL;
594247835Skib}
595247835Skib
596247835Skibint ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
597247835Skib			      void *sync_obj,
598247835Skib			      bool evict,
599247835Skib			      bool no_wait_gpu,
600247835Skib			      struct ttm_mem_reg *new_mem)
601247835Skib{
602247835Skib	struct ttm_bo_device *bdev = bo->bdev;
603247835Skib	struct ttm_bo_driver *driver = bdev->driver;
604247835Skib	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
605247835Skib	struct ttm_mem_reg *old_mem = &bo->mem;
606247835Skib	int ret;
607247835Skib	struct ttm_buffer_object *ghost_obj;
608247835Skib	void *tmp_obj = NULL;
609247835Skib
610247835Skib	mtx_lock(&bdev->fence_lock);
611247835Skib	if (bo->sync_obj) {
612247835Skib		tmp_obj = bo->sync_obj;
613247835Skib		bo->sync_obj = NULL;
614247835Skib	}
615247835Skib	bo->sync_obj = driver->sync_obj_ref(sync_obj);
616247835Skib	if (evict) {
617247835Skib		ret = ttm_bo_wait(bo, false, false, false);
618247835Skib		mtx_unlock(&bdev->fence_lock);
619247835Skib		if (tmp_obj)
620247835Skib			driver->sync_obj_unref(&tmp_obj);
621247835Skib		if (ret)
622247835Skib			return ret;
623247835Skib
624247835Skib		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
625247835Skib		    (bo->ttm != NULL)) {
626247835Skib			ttm_tt_unbind(bo->ttm);
627247835Skib			ttm_tt_destroy(bo->ttm);
628247835Skib			bo->ttm = NULL;
629247835Skib		}
630247835Skib		ttm_bo_free_old_node(bo);
631247835Skib	} else {
632247835Skib		/**
633247835Skib		 * This should help pipeline ordinary buffer moves.
634247835Skib		 *
635247835Skib		 * Hang old buffer memory on a new buffer object,
636247835Skib		 * and leave it to be released when the GPU
637247835Skib		 * operation has completed.
638247835Skib		 */
639247835Skib
640255044Sjkim		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
641248666Skib		mtx_unlock(&bdev->fence_lock);
642247835Skib		if (tmp_obj)
643247835Skib			driver->sync_obj_unref(&tmp_obj);
644247835Skib
645254868Sdumbbell		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
646247835Skib		if (ret)
647247835Skib			return ret;
648247835Skib
649247835Skib		/**
650247835Skib		 * If we're not moving to fixed memory, the TTM object
651247835Skib		 * needs to stay alive. Otherwhise hang it on the ghost
652247835Skib		 * bo to be unbound and destroyed.
653247835Skib		 */
654247835Skib
655247835Skib		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
656247835Skib			ghost_obj->ttm = NULL;
657247835Skib		else
658247835Skib			bo->ttm = NULL;
659247835Skib
660247835Skib		ttm_bo_unreserve(ghost_obj);
661247835Skib		ttm_bo_unref(&ghost_obj);
662247835Skib	}
663247835Skib
664247835Skib	*old_mem = *new_mem;
665247835Skib	new_mem->mm_node = NULL;
666247835Skib
667247835Skib	return 0;
668247835Skib}
669