1247835Skib/**************************************************************************
2247835Skib *
3247835Skib * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4247835Skib * All Rights Reserved.
5247835Skib *
6247835Skib * Permission is hereby granted, free of charge, to any person obtaining a
7247835Skib * copy of this software and associated documentation files (the
8247835Skib * "Software"), to deal in the Software without restriction, including
9247835Skib * without limitation the rights to use, copy, modify, merge, publish,
10247835Skib * distribute, sub license, and/or sell copies of the Software, and to
11247835Skib * permit persons to whom the Software is furnished to do so, subject to
12247835Skib * the following conditions:
13247835Skib *
14247835Skib * The above copyright notice and this permission notice (including the
15247835Skib * next paragraph) shall be included in all copies or substantial portions
16247835Skib * of the Software.
17247835Skib *
18247835Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19247835Skib * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20247835Skib * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21247835Skib * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22247835Skib * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23247835Skib * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24247835Skib * USE OR OTHER DEALINGS IN THE SOFTWARE.
25247835Skib *
26247835Skib **************************************************************************/
27247835Skib/*
28247835Skib * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29247835Skib */
30247835Skib
31247835Skib#include <sys/cdefs.h>
32247835Skib__FBSDID("$FreeBSD$");
33247835Skib
34247835Skib#include <dev/drm2/drmP.h>
35247835Skib#include <dev/drm2/ttm/ttm_module.h>
36247835Skib#include <dev/drm2/ttm/ttm_bo_driver.h>
37247835Skib#include <dev/drm2/ttm/ttm_placement.h>
38285002Savg#include <vm/vm_pageout.h>
39247835Skib
40247835Skib#define TTM_ASSERT_LOCKED(param)
41247835Skib#define TTM_DEBUG(fmt, arg...)
42247835Skib#define TTM_BO_HASH_ORDER 13
43247835Skib
44247835Skibstatic int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
45247835Skibstatic int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
46247835Skibstatic void ttm_bo_global_kobj_release(struct ttm_bo_global *glob);
47247835Skib
48247835SkibMALLOC_DEFINE(M_TTM_BO, "ttm_bo", "TTM Buffer Objects");
49247835Skib
50247835Skibstatic inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
51247835Skib{
52247835Skib	int i;
53247835Skib
54247835Skib	for (i = 0; i <= TTM_PL_PRIV5; i++)
55247835Skib		if (flags & (1 << i)) {
56247835Skib			*mem_type = i;
57247835Skib			return 0;
58247835Skib		}
59247835Skib	return -EINVAL;
60247835Skib}
61247835Skib
62247835Skibstatic void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
63247835Skib{
64247835Skib	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
65247835Skib
66247835Skib	printf("    has_type: %d\n", man->has_type);
67247835Skib	printf("    use_type: %d\n", man->use_type);
68247835Skib	printf("    flags: 0x%08X\n", man->flags);
69247835Skib	printf("    gpu_offset: 0x%08lX\n", man->gpu_offset);
70247835Skib	printf("    size: %ju\n", (uintmax_t)man->size);
71247835Skib	printf("    available_caching: 0x%08X\n", man->available_caching);
72247835Skib	printf("    default_caching: 0x%08X\n", man->default_caching);
73247835Skib	if (mem_type != TTM_PL_SYSTEM)
74247835Skib		(*man->func->debug)(man, TTM_PFX);
75247835Skib}
76247835Skib
77247835Skibstatic void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
78247835Skib					struct ttm_placement *placement)
79247835Skib{
80247835Skib	int i, ret, mem_type;
81247835Skib
82247835Skib	printf("No space for %p (%lu pages, %luK, %luM)\n",
83247835Skib	       bo, bo->mem.num_pages, bo->mem.size >> 10,
84247835Skib	       bo->mem.size >> 20);
85247835Skib	for (i = 0; i < placement->num_placement; i++) {
86247835Skib		ret = ttm_mem_type_from_flags(placement->placement[i],
87247835Skib						&mem_type);
88247835Skib		if (ret)
89247835Skib			return;
90247835Skib		printf("  placement[%d]=0x%08X (%d)\n",
91247835Skib		       i, placement->placement[i], mem_type);
92247835Skib		ttm_mem_type_debug(bo->bdev, mem_type);
93247835Skib	}
94247835Skib}
95247835Skib
96247835Skib#if 0
97247835Skibstatic ssize_t ttm_bo_global_show(struct ttm_bo_global *glob,
98247835Skib    char *buffer)
99247835Skib{
100247835Skib
101247835Skib	return snprintf(buffer, PAGE_SIZE, "%lu\n",
102247835Skib			(unsigned long) atomic_read(&glob->bo_count));
103247835Skib}
104247835Skib#endif
105247835Skib
106247835Skibstatic inline uint32_t ttm_bo_type_flags(unsigned type)
107247835Skib{
108247835Skib	return 1 << (type);
109247835Skib}
110247835Skib
111247835Skibstatic void ttm_bo_release_list(struct ttm_buffer_object *bo)
112247835Skib{
113247835Skib	struct ttm_bo_device *bdev = bo->bdev;
114247835Skib	size_t acc_size = bo->acc_size;
115247835Skib
116247835Skib	MPASS(atomic_read(&bo->list_kref) == 0);
117247835Skib	MPASS(atomic_read(&bo->kref) == 0);
118247835Skib	MPASS(atomic_read(&bo->cpu_writers) == 0);
119247835Skib	MPASS(bo->sync_obj == NULL);
120247835Skib	MPASS(bo->mem.mm_node == NULL);
121247835Skib	MPASS(list_empty(&bo->lru));
122247835Skib	MPASS(list_empty(&bo->ddestroy));
123247835Skib
124247835Skib	if (bo->ttm)
125247835Skib		ttm_tt_destroy(bo->ttm);
126247835Skib	atomic_dec(&bo->glob->bo_count);
127247835Skib	if (bo->destroy)
128247835Skib		bo->destroy(bo);
129247835Skib	else {
130247835Skib		free(bo, M_TTM_BO);
131247835Skib	}
132247835Skib	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
133247835Skib}
134247835Skib
135254865Sdumbbellstatic int
136247835Skibttm_bo_wait_unreserved_locked(struct ttm_buffer_object *bo, bool interruptible)
137247835Skib{
138247835Skib	const char *wmsg;
139247835Skib	int flags, ret;
140247835Skib
141247835Skib	ret = 0;
142247835Skib	if (interruptible) {
143247835Skib		flags = PCATCH;
144247835Skib		wmsg = "ttbowi";
145247835Skib	} else {
146247835Skib		flags = 0;
147247835Skib		wmsg = "ttbowu";
148247835Skib	}
149254878Sdumbbell	while (ttm_bo_is_reserved(bo)) {
150247835Skib		ret = -msleep(bo, &bo->glob->lru_lock, flags, wmsg, 0);
151282199Sdumbbell		if (ret == -EINTR || ret == -ERESTART)
152259742Sdumbbell			ret = -ERESTARTSYS;
153247835Skib		if (ret != 0)
154247835Skib			break;
155247835Skib	}
156247835Skib	return (ret);
157247835Skib}
158247835Skib
159247835Skibvoid ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
160247835Skib{
161247835Skib	struct ttm_bo_device *bdev = bo->bdev;
162247835Skib	struct ttm_mem_type_manager *man;
163247835Skib
164247835Skib	MPASS(ttm_bo_is_reserved(bo));
165247835Skib
166247835Skib	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
167247835Skib
168247835Skib		MPASS(list_empty(&bo->lru));
169247835Skib
170247835Skib		man = &bdev->man[bo->mem.mem_type];
171247835Skib		list_add_tail(&bo->lru, &man->lru);
172247835Skib		refcount_acquire(&bo->list_kref);
173247835Skib
174247835Skib		if (bo->ttm != NULL) {
175247835Skib			list_add_tail(&bo->swap, &bo->glob->swap_lru);
176247835Skib			refcount_acquire(&bo->list_kref);
177247835Skib		}
178247835Skib	}
179247835Skib}
180247835Skib
181247835Skibint ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
182247835Skib{
183247835Skib	int put_count = 0;
184247835Skib
185247835Skib	if (!list_empty(&bo->swap)) {
186247835Skib		list_del_init(&bo->swap);
187247835Skib		++put_count;
188247835Skib	}
189247835Skib	if (!list_empty(&bo->lru)) {
190247835Skib		list_del_init(&bo->lru);
191247835Skib		++put_count;
192247835Skib	}
193247835Skib
194247835Skib	/*
195247835Skib	 * TODO: Add a driver hook to delete from
196247835Skib	 * driver-specific LRU's here.
197247835Skib	 */
198247835Skib
199247835Skib	return put_count;
200247835Skib}
201247835Skib
202254861Sdumbbellint ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
203247835Skib			  bool interruptible,
204247835Skib			  bool no_wait, bool use_sequence, uint32_t sequence)
205247835Skib{
206247835Skib	int ret;
207247835Skib
208254871Sdumbbell	while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
209247835Skib		/**
210247835Skib		 * Deadlock avoidance for multi-bo reserving.
211247835Skib		 */
212247835Skib		if (use_sequence && bo->seq_valid) {
213247835Skib			/**
214247835Skib			 * We've already reserved this one.
215247835Skib			 */
216247835Skib			if (unlikely(sequence == bo->val_seq))
217247835Skib				return -EDEADLK;
218247835Skib			/**
219247835Skib			 * Already reserved by a thread that will not back
220247835Skib			 * off for us. We need to back off.
221247835Skib			 */
222261455Seadler			if (unlikely(sequence - bo->val_seq < (1U << 31)))
223247835Skib				return -EAGAIN;
224247835Skib		}
225247835Skib
226247835Skib		if (no_wait)
227247835Skib			return -EBUSY;
228247835Skib
229247835Skib		ret = ttm_bo_wait_unreserved_locked(bo, interruptible);
230254861Sdumbbell
231247835Skib		if (unlikely(ret))
232247835Skib			return ret;
233247835Skib	}
234247835Skib
235247835Skib	if (use_sequence) {
236254871Sdumbbell		bool wake_up = false;
237247835Skib		/**
238247835Skib		 * Wake up waiters that may need to recheck for deadlock,
239247835Skib		 * if we decreased the sequence number.
240247835Skib		 */
241261455Seadler		if (unlikely((bo->val_seq - sequence < (1U << 31))
242247835Skib			     || !bo->seq_valid))
243254871Sdumbbell			wake_up = true;
244247835Skib
245254871Sdumbbell		/*
246254871Sdumbbell		 * In the worst case with memory ordering these values can be
247254871Sdumbbell		 * seen in the wrong order. However since we call wake_up_all
248254871Sdumbbell		 * in that case, this will hopefully not pose a problem,
249254871Sdumbbell		 * and the worst case would only cause someone to accidentally
250254871Sdumbbell		 * hit -EAGAIN in ttm_bo_reserve when they see old value of
251254871Sdumbbell		 * val_seq. However this would only happen if seq_valid was
252254871Sdumbbell		 * written before val_seq was, and just means some slightly
253254871Sdumbbell		 * increased cpu usage
254254871Sdumbbell		 */
255247835Skib		bo->val_seq = sequence;
256247835Skib		bo->seq_valid = true;
257254871Sdumbbell		if (wake_up)
258254861Sdumbbell			wakeup(bo);
259247835Skib	} else {
260247835Skib		bo->seq_valid = false;
261247835Skib	}
262247835Skib
263247835Skib	return 0;
264247835Skib}
265247835Skib
266247835Skibvoid ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
267247835Skib			 bool never_free)
268247835Skib{
269247835Skib	u_int old;
270247835Skib
271247835Skib	old = atomic_fetchadd_int(&bo->list_kref, -count);
272247835Skib	if (old <= count) {
273247835Skib		if (never_free)
274247835Skib			panic("ttm_bo_ref_buf");
275247835Skib		ttm_bo_release_list(bo);
276247835Skib	}
277247835Skib}
278247835Skib
279247835Skibint ttm_bo_reserve(struct ttm_buffer_object *bo,
280247835Skib		   bool interruptible,
281247835Skib		   bool no_wait, bool use_sequence, uint32_t sequence)
282247835Skib{
283247835Skib	struct ttm_bo_global *glob = bo->glob;
284247835Skib	int put_count = 0;
285247835Skib	int ret;
286247835Skib
287254878Sdumbbell	mtx_lock(&bo->glob->lru_lock);
288254861Sdumbbell	ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
289254861Sdumbbell				   sequence);
290254861Sdumbbell	if (likely(ret == 0)) {
291247835Skib		put_count = ttm_bo_del_from_lru(bo);
292254861Sdumbbell		mtx_unlock(&glob->lru_lock);
293254861Sdumbbell		ttm_bo_list_ref_sub(bo, put_count, true);
294254878Sdumbbell	} else
295254878Sdumbbell		mtx_unlock(&bo->glob->lru_lock);
296247835Skib
297247835Skib	return ret;
298247835Skib}
299247835Skib
300254863Sdumbbellint ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
301254863Sdumbbell				  bool interruptible, uint32_t sequence)
302254863Sdumbbell{
303254863Sdumbbell	bool wake_up = false;
304254863Sdumbbell	int ret;
305254863Sdumbbell
306254863Sdumbbell	while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
307254863Sdumbbell		if (bo->seq_valid && sequence == bo->val_seq) {
308254863Sdumbbell			DRM_ERROR(
309254863Sdumbbell			    "%s: bo->seq_valid && sequence == bo->val_seq",
310254863Sdumbbell			    __func__);
311254863Sdumbbell		}
312254863Sdumbbell
313254863Sdumbbell		ret = ttm_bo_wait_unreserved_locked(bo, interruptible);
314254863Sdumbbell
315254863Sdumbbell		if (unlikely(ret))
316254863Sdumbbell			return ret;
317254863Sdumbbell	}
318254863Sdumbbell
319261455Seadler	if ((bo->val_seq - sequence < (1U << 31)) || !bo->seq_valid)
320254863Sdumbbell		wake_up = true;
321254863Sdumbbell
322254863Sdumbbell	/**
323254863Sdumbbell	 * Wake up waiters that may need to recheck for deadlock,
324254863Sdumbbell	 * if we decreased the sequence number.
325254863Sdumbbell	 */
326254863Sdumbbell	bo->val_seq = sequence;
327254863Sdumbbell	bo->seq_valid = true;
328254863Sdumbbell	if (wake_up)
329254863Sdumbbell		wakeup(bo);
330254863Sdumbbell
331254863Sdumbbell	return 0;
332254863Sdumbbell}
333254863Sdumbbell
334254863Sdumbbellint ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
335254863Sdumbbell			    bool interruptible, uint32_t sequence)
336254863Sdumbbell{
337254863Sdumbbell	struct ttm_bo_global *glob = bo->glob;
338254863Sdumbbell	int put_count, ret;
339254863Sdumbbell
340254878Sdumbbell	mtx_lock(&glob->lru_lock);
341254863Sdumbbell	ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
342254863Sdumbbell	if (likely(!ret)) {
343254863Sdumbbell		put_count = ttm_bo_del_from_lru(bo);
344254863Sdumbbell		mtx_unlock(&glob->lru_lock);
345254863Sdumbbell		ttm_bo_list_ref_sub(bo, put_count, true);
346254878Sdumbbell	} else
347254878Sdumbbell		mtx_unlock(&glob->lru_lock);
348254863Sdumbbell	return ret;
349254863Sdumbbell}
350254863Sdumbbell
351247835Skibvoid ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
352247835Skib{
353247835Skib	ttm_bo_add_to_lru(bo);
354247835Skib	atomic_set(&bo->reserved, 0);
355247835Skib	wakeup(bo);
356247835Skib}
357247835Skib
358247835Skibvoid ttm_bo_unreserve(struct ttm_buffer_object *bo)
359247835Skib{
360247835Skib	struct ttm_bo_global *glob = bo->glob;
361247835Skib
362247835Skib	mtx_lock(&glob->lru_lock);
363247835Skib	ttm_bo_unreserve_locked(bo);
364247835Skib	mtx_unlock(&glob->lru_lock);
365247835Skib}
366247835Skib
367247835Skib/*
368247835Skib * Call bo->mutex locked.
369247835Skib */
370247835Skibstatic int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
371247835Skib{
372247835Skib	struct ttm_bo_device *bdev = bo->bdev;
373247835Skib	struct ttm_bo_global *glob = bo->glob;
374247835Skib	int ret = 0;
375247835Skib	uint32_t page_flags = 0;
376247835Skib
377247835Skib	TTM_ASSERT_LOCKED(&bo->mutex);
378247835Skib	bo->ttm = NULL;
379247835Skib
380247835Skib	if (bdev->need_dma32)
381247835Skib		page_flags |= TTM_PAGE_FLAG_DMA32;
382247835Skib
383247835Skib	switch (bo->type) {
384247835Skib	case ttm_bo_type_device:
385247835Skib		if (zero_alloc)
386247835Skib			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
387247835Skib	case ttm_bo_type_kernel:
388247835Skib		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
389247835Skib						      page_flags, glob->dummy_read_page);
390247835Skib		if (unlikely(bo->ttm == NULL))
391247835Skib			ret = -ENOMEM;
392247835Skib		break;
393247835Skib	case ttm_bo_type_sg:
394247835Skib		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
395247835Skib						      page_flags | TTM_PAGE_FLAG_SG,
396247835Skib						      glob->dummy_read_page);
397247835Skib		if (unlikely(bo->ttm == NULL)) {
398247835Skib			ret = -ENOMEM;
399247835Skib			break;
400247835Skib		}
401247835Skib		bo->ttm->sg = bo->sg;
402247835Skib		break;
403247835Skib	default:
404247835Skib		printf("[TTM] Illegal buffer object type\n");
405247835Skib		ret = -EINVAL;
406247835Skib		break;
407247835Skib	}
408247835Skib
409247835Skib	return ret;
410247835Skib}
411247835Skib
412247835Skibstatic int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
413247835Skib				  struct ttm_mem_reg *mem,
414247835Skib				  bool evict, bool interruptible,
415247835Skib				  bool no_wait_gpu)
416247835Skib{
417247835Skib	struct ttm_bo_device *bdev = bo->bdev;
418247835Skib	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
419247835Skib	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
420247835Skib	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
421247835Skib	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
422247835Skib	int ret = 0;
423247835Skib
424247835Skib	if (old_is_pci || new_is_pci ||
425247835Skib	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
426247835Skib		ret = ttm_mem_io_lock(old_man, true);
427247835Skib		if (unlikely(ret != 0))
428247835Skib			goto out_err;
429247835Skib		ttm_bo_unmap_virtual_locked(bo);
430247835Skib		ttm_mem_io_unlock(old_man);
431247835Skib	}
432247835Skib
433247835Skib	/*
434247835Skib	 * Create and bind a ttm if required.
435247835Skib	 */
436247835Skib
437247835Skib	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
438247835Skib		if (bo->ttm == NULL) {
439247835Skib			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
440247835Skib			ret = ttm_bo_add_ttm(bo, zero);
441247835Skib			if (ret)
442247835Skib				goto out_err;
443247835Skib		}
444247835Skib
445247835Skib		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
446247835Skib		if (ret)
447247835Skib			goto out_err;
448247835Skib
449247835Skib		if (mem->mem_type != TTM_PL_SYSTEM) {
450247835Skib			ret = ttm_tt_bind(bo->ttm, mem);
451247835Skib			if (ret)
452247835Skib				goto out_err;
453247835Skib		}
454247835Skib
455247835Skib		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
456247835Skib			if (bdev->driver->move_notify)
457247835Skib				bdev->driver->move_notify(bo, mem);
458247835Skib			bo->mem = *mem;
459247835Skib			mem->mm_node = NULL;
460247835Skib			goto moved;
461247835Skib		}
462247835Skib	}
463247835Skib
464247835Skib	if (bdev->driver->move_notify)
465247835Skib		bdev->driver->move_notify(bo, mem);
466247835Skib
467247835Skib	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
468247835Skib	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
469247835Skib		ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
470247835Skib	else if (bdev->driver->move)
471247835Skib		ret = bdev->driver->move(bo, evict, interruptible,
472247835Skib					 no_wait_gpu, mem);
473247835Skib	else
474247835Skib		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
475247835Skib
476247835Skib	if (ret) {
477247835Skib		if (bdev->driver->move_notify) {
478247835Skib			struct ttm_mem_reg tmp_mem = *mem;
479247835Skib			*mem = bo->mem;
480247835Skib			bo->mem = tmp_mem;
481247835Skib			bdev->driver->move_notify(bo, mem);
482247835Skib			bo->mem = *mem;
483254867Sdumbbell			*mem = tmp_mem;
484247835Skib		}
485247835Skib
486247835Skib		goto out_err;
487247835Skib	}
488247835Skib
489247835Skibmoved:
490247835Skib	if (bo->evicted) {
491247835Skib		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
492247835Skib		if (ret)
493247835Skib			printf("[TTM] Can not flush read caches\n");
494247835Skib		bo->evicted = false;
495247835Skib	}
496247835Skib
497247835Skib	if (bo->mem.mm_node) {
498247835Skib		bo->offset = (bo->mem.start << PAGE_SHIFT) +
499247835Skib		    bdev->man[bo->mem.mem_type].gpu_offset;
500247835Skib		bo->cur_placement = bo->mem.placement;
501247835Skib	} else
502247835Skib		bo->offset = 0;
503247835Skib
504247835Skib	return 0;
505247835Skib
506247835Skibout_err:
507247835Skib	new_man = &bdev->man[bo->mem.mem_type];
508247835Skib	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
509247835Skib		ttm_tt_unbind(bo->ttm);
510247835Skib		ttm_tt_destroy(bo->ttm);
511247835Skib		bo->ttm = NULL;
512247835Skib	}
513247835Skib
514247835Skib	return ret;
515247835Skib}
516247835Skib
517247835Skib/**
518247835Skib * Call bo::reserved.
519247835Skib * Will release GPU memory type usage on destruction.
520247835Skib * This is the place to put in driver specific hooks to release
521247835Skib * driver private resources.
522247835Skib * Will release the bo::reserved lock.
523247835Skib */
524247835Skib
525247835Skibstatic void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
526247835Skib{
527247835Skib	if (bo->bdev->driver->move_notify)
528247835Skib		bo->bdev->driver->move_notify(bo, NULL);
529247835Skib
530247835Skib	if (bo->ttm) {
531247835Skib		ttm_tt_unbind(bo->ttm);
532247835Skib		ttm_tt_destroy(bo->ttm);
533247835Skib		bo->ttm = NULL;
534247835Skib	}
535247835Skib	ttm_bo_mem_put(bo, &bo->mem);
536247835Skib
537247835Skib	atomic_set(&bo->reserved, 0);
538247835Skib	wakeup(&bo);
539247835Skib
540247835Skib	/*
541247835Skib	 * Since the final reference to this bo may not be dropped by
542247835Skib	 * the current task we have to put a memory barrier here to make
543247835Skib	 * sure the changes done in this function are always visible.
544247835Skib	 *
545247835Skib	 * This function only needs protection against the final kref_put.
546247835Skib	 */
547247835Skib	mb();
548247835Skib}
549247835Skib
550247835Skibstatic void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
551247835Skib{
552247835Skib	struct ttm_bo_device *bdev = bo->bdev;
553247835Skib	struct ttm_bo_global *glob = bo->glob;
554247835Skib	struct ttm_bo_driver *driver = bdev->driver;
555247835Skib	void *sync_obj = NULL;
556247835Skib	int put_count;
557247835Skib	int ret;
558247835Skib
559247835Skib	mtx_lock(&glob->lru_lock);
560254861Sdumbbell	ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
561247835Skib
562247835Skib	mtx_lock(&bdev->fence_lock);
563247835Skib	(void) ttm_bo_wait(bo, false, false, true);
564247835Skib	if (!ret && !bo->sync_obj) {
565247835Skib		mtx_unlock(&bdev->fence_lock);
566247835Skib		put_count = ttm_bo_del_from_lru(bo);
567247835Skib
568247835Skib		mtx_unlock(&glob->lru_lock);
569247835Skib		ttm_bo_cleanup_memtype_use(bo);
570247835Skib
571247835Skib		ttm_bo_list_ref_sub(bo, put_count, true);
572247835Skib
573247835Skib		return;
574247835Skib	}
575247835Skib	if (bo->sync_obj)
576247835Skib		sync_obj = driver->sync_obj_ref(bo->sync_obj);
577247835Skib	mtx_unlock(&bdev->fence_lock);
578247835Skib
579247835Skib	if (!ret) {
580247835Skib		atomic_set(&bo->reserved, 0);
581247835Skib		wakeup(bo);
582247835Skib	}
583247835Skib
584247835Skib	refcount_acquire(&bo->list_kref);
585247835Skib	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
586247835Skib	mtx_unlock(&glob->lru_lock);
587247835Skib
588247835Skib	if (sync_obj) {
589247835Skib		driver->sync_obj_flush(sync_obj);
590247835Skib		driver->sync_obj_unref(&sync_obj);
591247835Skib	}
592247835Skib	taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
593247835Skib	    ((hz / 100) < 1) ? 1 : hz / 100);
594247835Skib}
595247835Skib
596247835Skib/**
597247835Skib * function ttm_bo_cleanup_refs_and_unlock
598247835Skib * If bo idle, remove from delayed- and lru lists, and unref.
599247835Skib * If not idle, do nothing.
600247835Skib *
601247835Skib * Must be called with lru_lock and reservation held, this function
602247835Skib * will drop both before returning.
603247835Skib *
604247835Skib * @interruptible         Any sleeps should occur interruptibly.
605247835Skib * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
606247835Skib */
607247835Skib
608247835Skibstatic int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
609247835Skib					  bool interruptible,
610247835Skib					  bool no_wait_gpu)
611247835Skib{
612247835Skib	struct ttm_bo_device *bdev = bo->bdev;
613247835Skib	struct ttm_bo_driver *driver = bdev->driver;
614247835Skib	struct ttm_bo_global *glob = bo->glob;
615247835Skib	int put_count;
616247835Skib	int ret;
617247835Skib
618247835Skib	mtx_lock(&bdev->fence_lock);
619247835Skib	ret = ttm_bo_wait(bo, false, false, true);
620247835Skib
621247835Skib	if (ret && !no_wait_gpu) {
622247835Skib		void *sync_obj;
623247835Skib
624247835Skib		/*
625247835Skib		 * Take a reference to the fence and unreserve,
626247835Skib		 * at this point the buffer should be dead, so
627247835Skib		 * no new sync objects can be attached.
628247835Skib		 */
629247835Skib		sync_obj = driver->sync_obj_ref(bo->sync_obj);
630247835Skib		mtx_unlock(&bdev->fence_lock);
631247835Skib
632247835Skib		atomic_set(&bo->reserved, 0);
633247835Skib		wakeup(bo);
634247835Skib		mtx_unlock(&glob->lru_lock);
635247835Skib
636247835Skib		ret = driver->sync_obj_wait(sync_obj, false, interruptible);
637247835Skib		driver->sync_obj_unref(&sync_obj);
638247835Skib		if (ret)
639247835Skib			return ret;
640247835Skib
641247835Skib		/*
642247835Skib		 * remove sync_obj with ttm_bo_wait, the wait should be
643247835Skib		 * finished, and no new wait object should have been added.
644247835Skib		 */
645247835Skib		mtx_lock(&bdev->fence_lock);
646247835Skib		ret = ttm_bo_wait(bo, false, false, true);
647247835Skib		mtx_unlock(&bdev->fence_lock);
648247835Skib		if (ret)
649247835Skib			return ret;
650247835Skib
651247835Skib		mtx_lock(&glob->lru_lock);
652254861Sdumbbell		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
653247835Skib
654247835Skib		/*
655247835Skib		 * We raced, and lost, someone else holds the reservation now,
656247835Skib		 * and is probably busy in ttm_bo_cleanup_memtype_use.
657247835Skib		 *
658247835Skib		 * Even if it's not the case, because we finished waiting any
659247835Skib		 * delayed destruction would succeed, so just return success
660247835Skib		 * here.
661247835Skib		 */
662247835Skib		if (ret) {
663247835Skib			mtx_unlock(&glob->lru_lock);
664247835Skib			return 0;
665247835Skib		}
666247835Skib	} else
667247835Skib		mtx_unlock(&bdev->fence_lock);
668247835Skib
669247835Skib	if (ret || unlikely(list_empty(&bo->ddestroy))) {
670247835Skib		atomic_set(&bo->reserved, 0);
671247835Skib		wakeup(bo);
672247835Skib		mtx_unlock(&glob->lru_lock);
673247835Skib		return ret;
674247835Skib	}
675247835Skib
676247835Skib	put_count = ttm_bo_del_from_lru(bo);
677247835Skib	list_del_init(&bo->ddestroy);
678247835Skib	++put_count;
679247835Skib
680247835Skib	mtx_unlock(&glob->lru_lock);
681247835Skib	ttm_bo_cleanup_memtype_use(bo);
682247835Skib
683247835Skib	ttm_bo_list_ref_sub(bo, put_count, true);
684247835Skib
685247835Skib	return 0;
686247835Skib}
687247835Skib
688247835Skib/**
689247835Skib * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
690247835Skib * encountered buffers.
691247835Skib */
692247835Skib
693247835Skibstatic int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
694247835Skib{
695247835Skib	struct ttm_bo_global *glob = bdev->glob;
696247835Skib	struct ttm_buffer_object *entry = NULL;
697247835Skib	int ret = 0;
698247835Skib
699247835Skib	mtx_lock(&glob->lru_lock);
700247835Skib	if (list_empty(&bdev->ddestroy))
701247835Skib		goto out_unlock;
702247835Skib
703247835Skib	entry = list_first_entry(&bdev->ddestroy,
704247835Skib		struct ttm_buffer_object, ddestroy);
705247835Skib	refcount_acquire(&entry->list_kref);
706247835Skib
707247835Skib	for (;;) {
708247835Skib		struct ttm_buffer_object *nentry = NULL;
709247835Skib
710247835Skib		if (entry->ddestroy.next != &bdev->ddestroy) {
711247835Skib			nentry = list_first_entry(&entry->ddestroy,
712247835Skib				struct ttm_buffer_object, ddestroy);
713247835Skib			refcount_acquire(&nentry->list_kref);
714247835Skib		}
715247835Skib
716254861Sdumbbell		ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
717254861Sdumbbell		if (remove_all && ret) {
718254861Sdumbbell			ret = ttm_bo_reserve_nolru(entry, false, false,
719254861Sdumbbell						   false, 0);
720254861Sdumbbell		}
721254861Sdumbbell
722247835Skib		if (!ret)
723247835Skib			ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
724247835Skib							     !remove_all);
725247835Skib		else
726247835Skib			mtx_unlock(&glob->lru_lock);
727247835Skib
728247835Skib		if (refcount_release(&entry->list_kref))
729247835Skib			ttm_bo_release_list(entry);
730247835Skib		entry = nentry;
731247835Skib
732247835Skib		if (ret || !entry)
733247835Skib			goto out;
734247835Skib
735247835Skib		mtx_lock(&glob->lru_lock);
736247835Skib		if (list_empty(&entry->ddestroy))
737247835Skib			break;
738247835Skib	}
739247835Skib
740247835Skibout_unlock:
741247835Skib	mtx_unlock(&glob->lru_lock);
742247835Skibout:
743247835Skib	if (entry && refcount_release(&entry->list_kref))
744247835Skib		ttm_bo_release_list(entry);
745247835Skib	return ret;
746247835Skib}
747247835Skib
748247835Skibstatic void ttm_bo_delayed_workqueue(void *arg, int pending __unused)
749247835Skib{
750247835Skib	struct ttm_bo_device *bdev = arg;
751247835Skib
752247835Skib	if (ttm_bo_delayed_delete(bdev, false)) {
753247835Skib		taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
754247835Skib		    ((hz / 100) < 1) ? 1 : hz / 100);
755247835Skib	}
756247835Skib}
757247835Skib
758247835Skibstatic void ttm_bo_release(struct ttm_buffer_object *bo)
759247835Skib{
760247835Skib	struct ttm_bo_device *bdev = bo->bdev;
761247835Skib	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
762247835Skib
763247835Skib	rw_wlock(&bdev->vm_lock);
764247835Skib	if (likely(bo->vm_node != NULL)) {
765247835Skib		RB_REMOVE(ttm_bo_device_buffer_objects,
766247835Skib		    &bdev->addr_space_rb, bo);
767247835Skib		drm_mm_put_block(bo->vm_node);
768247835Skib		bo->vm_node = NULL;
769247835Skib	}
770247835Skib	rw_wunlock(&bdev->vm_lock);
771247835Skib	ttm_mem_io_lock(man, false);
772247835Skib	ttm_mem_io_free_vm(bo);
773247835Skib	ttm_mem_io_unlock(man);
774247835Skib	ttm_bo_cleanup_refs_or_queue(bo);
775247835Skib	if (refcount_release(&bo->list_kref))
776247835Skib		ttm_bo_release_list(bo);
777247835Skib}
778247835Skib
779247835Skibvoid ttm_bo_unref(struct ttm_buffer_object **p_bo)
780247835Skib{
781247835Skib	struct ttm_buffer_object *bo = *p_bo;
782247835Skib
783247835Skib	*p_bo = NULL;
784247835Skib	if (refcount_release(&bo->kref))
785247835Skib		ttm_bo_release(bo);
786247835Skib}
787247835Skib
788247835Skibint ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
789247835Skib{
790247835Skib	int pending;
791247835Skib
792247835Skib	taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, &pending);
793247835Skib	if (pending)
794247835Skib		taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
795247835Skib	return (pending);
796247835Skib}
797247835Skib
798247835Skibvoid ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
799247835Skib{
800247835Skib	if (resched) {
801247835Skib		taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
802247835Skib		    ((hz / 100) < 1) ? 1 : hz / 100);
803247835Skib	}
804247835Skib}
805247835Skib
806247835Skibstatic int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
807247835Skib			bool no_wait_gpu)
808247835Skib{
809247835Skib	struct ttm_bo_device *bdev = bo->bdev;
810247835Skib	struct ttm_mem_reg evict_mem;
811247835Skib	struct ttm_placement placement;
812247835Skib	int ret = 0;
813247835Skib
814247835Skib	mtx_lock(&bdev->fence_lock);
815247835Skib	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
816247835Skib	mtx_unlock(&bdev->fence_lock);
817247835Skib
818247835Skib	if (unlikely(ret != 0)) {
819282199Sdumbbell		if (ret != -ERESTARTSYS) {
820247835Skib			printf("[TTM] Failed to expire sync object before buffer eviction\n");
821247835Skib		}
822247835Skib		goto out;
823247835Skib	}
824247835Skib
825247835Skib	MPASS(ttm_bo_is_reserved(bo));
826247835Skib
827247835Skib	evict_mem = bo->mem;
828247835Skib	evict_mem.mm_node = NULL;
829247835Skib	evict_mem.bus.io_reserved_vm = false;
830247835Skib	evict_mem.bus.io_reserved_count = 0;
831247835Skib
832247835Skib	placement.fpfn = 0;
833247835Skib	placement.lpfn = 0;
834247835Skib	placement.num_placement = 0;
835247835Skib	placement.num_busy_placement = 0;
836247835Skib	bdev->driver->evict_flags(bo, &placement);
837247835Skib	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
838247835Skib				no_wait_gpu);
839247835Skib	if (ret) {
840282199Sdumbbell		if (ret != -ERESTARTSYS) {
841247835Skib			printf("[TTM] Failed to find memory space for buffer 0x%p eviction\n",
842247835Skib			       bo);
843247835Skib			ttm_bo_mem_space_debug(bo, &placement);
844247835Skib		}
845247835Skib		goto out;
846247835Skib	}
847247835Skib
848247835Skib	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
849247835Skib				     no_wait_gpu);
850247835Skib	if (ret) {
851282199Sdumbbell		if (ret != -ERESTARTSYS)
852247835Skib			printf("[TTM] Buffer eviction failed\n");
853247835Skib		ttm_bo_mem_put(bo, &evict_mem);
854247835Skib		goto out;
855247835Skib	}
856247835Skib	bo->evicted = true;
857247835Skibout:
858247835Skib	return ret;
859247835Skib}
860247835Skib
861247835Skibstatic int ttm_mem_evict_first(struct ttm_bo_device *bdev,
862247835Skib				uint32_t mem_type,
863247835Skib				bool interruptible,
864247835Skib				bool no_wait_gpu)
865247835Skib{
866247835Skib	struct ttm_bo_global *glob = bdev->glob;
867247835Skib	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
868247835Skib	struct ttm_buffer_object *bo;
869247835Skib	int ret = -EBUSY, put_count;
870247835Skib
871247835Skib	mtx_lock(&glob->lru_lock);
872247835Skib	list_for_each_entry(bo, &man->lru, lru) {
873254861Sdumbbell		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
874247835Skib		if (!ret)
875247835Skib			break;
876247835Skib	}
877247835Skib
878247835Skib	if (ret) {
879247835Skib		mtx_unlock(&glob->lru_lock);
880247835Skib		return ret;
881247835Skib	}
882247835Skib
883247835Skib	refcount_acquire(&bo->list_kref);
884247835Skib
885247835Skib	if (!list_empty(&bo->ddestroy)) {
886247835Skib		ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
887247835Skib						     no_wait_gpu);
888247835Skib		if (refcount_release(&bo->list_kref))
889247835Skib			ttm_bo_release_list(bo);
890247835Skib		return ret;
891247835Skib	}
892247835Skib
893247835Skib	put_count = ttm_bo_del_from_lru(bo);
894247835Skib	mtx_unlock(&glob->lru_lock);
895247835Skib
896247835Skib	MPASS(ret == 0);
897247835Skib
898247835Skib	ttm_bo_list_ref_sub(bo, put_count, true);
899247835Skib
900247835Skib	ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
901247835Skib	ttm_bo_unreserve(bo);
902247835Skib
903247835Skib	if (refcount_release(&bo->list_kref))
904247835Skib		ttm_bo_release_list(bo);
905247835Skib	return ret;
906247835Skib}
907247835Skib
908247835Skibvoid ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
909247835Skib{
910247835Skib	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
911247835Skib
912247835Skib	if (mem->mm_node)
913247835Skib		(*man->func->put_node)(man, mem);
914247835Skib}
915247835Skib
916247835Skib/**
917247835Skib * Repeatedly evict memory from the LRU for @mem_type until we create enough
918247835Skib * space, or we've evicted everything and there isn't enough space.
919247835Skib */
920247835Skibstatic int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
921247835Skib					uint32_t mem_type,
922247835Skib					struct ttm_placement *placement,
923247835Skib					struct ttm_mem_reg *mem,
924247835Skib					bool interruptible,
925247835Skib					bool no_wait_gpu)
926247835Skib{
927247835Skib	struct ttm_bo_device *bdev = bo->bdev;
928247835Skib	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
929247835Skib	int ret;
930247835Skib
931247835Skib	do {
932247835Skib		ret = (*man->func->get_node)(man, bo, placement, mem);
933247835Skib		if (unlikely(ret != 0))
934247835Skib			return ret;
935247835Skib		if (mem->mm_node)
936247835Skib			break;
937247835Skib		ret = ttm_mem_evict_first(bdev, mem_type,
938247835Skib					  interruptible, no_wait_gpu);
939247835Skib		if (unlikely(ret != 0))
940247835Skib			return ret;
941247835Skib	} while (1);
942247835Skib	if (mem->mm_node == NULL)
943247835Skib		return -ENOMEM;
944247835Skib	mem->mem_type = mem_type;
945247835Skib	return 0;
946247835Skib}
947247835Skib
948247835Skibstatic uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
949247835Skib				      uint32_t cur_placement,
950247835Skib				      uint32_t proposed_placement)
951247835Skib{
952247835Skib	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
953247835Skib	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
954247835Skib
955247835Skib	/**
956247835Skib	 * Keep current caching if possible.
957247835Skib	 */
958247835Skib
959247835Skib	if ((cur_placement & caching) != 0)
960247835Skib		result |= (cur_placement & caching);
961247835Skib	else if ((man->default_caching & caching) != 0)
962247835Skib		result |= man->default_caching;
963247835Skib	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
964247835Skib		result |= TTM_PL_FLAG_CACHED;
965247835Skib	else if ((TTM_PL_FLAG_WC & caching) != 0)
966247835Skib		result |= TTM_PL_FLAG_WC;
967247835Skib	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
968247835Skib		result |= TTM_PL_FLAG_UNCACHED;
969247835Skib
970247835Skib	return result;
971247835Skib}
972247835Skib
973247835Skibstatic bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
974247835Skib				 uint32_t mem_type,
975247835Skib				 uint32_t proposed_placement,
976247835Skib				 uint32_t *masked_placement)
977247835Skib{
978247835Skib	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
979247835Skib
980247835Skib	if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
981247835Skib		return false;
982247835Skib
983247835Skib	if ((proposed_placement & man->available_caching) == 0)
984247835Skib		return false;
985247835Skib
986247835Skib	cur_flags |= (proposed_placement & man->available_caching);
987247835Skib
988247835Skib	*masked_placement = cur_flags;
989247835Skib	return true;
990247835Skib}
991247835Skib
992247835Skib/**
993247835Skib * Creates space for memory region @mem according to its type.
994247835Skib *
995247835Skib * This function first searches for free space in compatible memory types in
996247835Skib * the priority order defined by the driver.  If free space isn't found, then
997247835Skib * ttm_bo_mem_force_space is attempted in priority order to evict and find
998247835Skib * space.
999247835Skib */
1000247835Skibint ttm_bo_mem_space(struct ttm_buffer_object *bo,
1001247835Skib			struct ttm_placement *placement,
1002247835Skib			struct ttm_mem_reg *mem,
1003247835Skib			bool interruptible,
1004247835Skib			bool no_wait_gpu)
1005247835Skib{
1006247835Skib	struct ttm_bo_device *bdev = bo->bdev;
1007247835Skib	struct ttm_mem_type_manager *man;
1008247835Skib	uint32_t mem_type = TTM_PL_SYSTEM;
1009247835Skib	uint32_t cur_flags = 0;
1010247835Skib	bool type_found = false;
1011247835Skib	bool type_ok = false;
1012247835Skib	bool has_erestartsys = false;
1013247835Skib	int i, ret;
1014247835Skib
1015247835Skib	mem->mm_node = NULL;
1016247835Skib	for (i = 0; i < placement->num_placement; ++i) {
1017247835Skib		ret = ttm_mem_type_from_flags(placement->placement[i],
1018247835Skib						&mem_type);
1019247835Skib		if (ret)
1020247835Skib			return ret;
1021247835Skib		man = &bdev->man[mem_type];
1022247835Skib
1023247835Skib		type_ok = ttm_bo_mt_compatible(man,
1024247835Skib						mem_type,
1025247835Skib						placement->placement[i],
1026247835Skib						&cur_flags);
1027247835Skib
1028247835Skib		if (!type_ok)
1029247835Skib			continue;
1030247835Skib
1031247835Skib		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1032247835Skib						  cur_flags);
1033247835Skib		/*
1034247835Skib		 * Use the access and other non-mapping-related flag bits from
1035247835Skib		 * the memory placement flags to the current flags
1036247835Skib		 */
1037247835Skib		ttm_flag_masked(&cur_flags, placement->placement[i],
1038247835Skib				~TTM_PL_MASK_MEMTYPE);
1039247835Skib
1040247835Skib		if (mem_type == TTM_PL_SYSTEM)
1041247835Skib			break;
1042247835Skib
1043247835Skib		if (man->has_type && man->use_type) {
1044247835Skib			type_found = true;
1045247835Skib			ret = (*man->func->get_node)(man, bo, placement, mem);
1046247835Skib			if (unlikely(ret))
1047247835Skib				return ret;
1048247835Skib		}
1049247835Skib		if (mem->mm_node)
1050247835Skib			break;
1051247835Skib	}
1052247835Skib
1053247835Skib	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
1054247835Skib		mem->mem_type = mem_type;
1055247835Skib		mem->placement = cur_flags;
1056247835Skib		return 0;
1057247835Skib	}
1058247835Skib
1059247835Skib	if (!type_found)
1060247835Skib		return -EINVAL;
1061247835Skib
1062247835Skib	for (i = 0; i < placement->num_busy_placement; ++i) {
1063247835Skib		ret = ttm_mem_type_from_flags(placement->busy_placement[i],
1064247835Skib						&mem_type);
1065247835Skib		if (ret)
1066247835Skib			return ret;
1067247835Skib		man = &bdev->man[mem_type];
1068247835Skib		if (!man->has_type)
1069247835Skib			continue;
1070247835Skib		if (!ttm_bo_mt_compatible(man,
1071247835Skib						mem_type,
1072247835Skib						placement->busy_placement[i],
1073247835Skib						&cur_flags))
1074247835Skib			continue;
1075247835Skib
1076247835Skib		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1077247835Skib						  cur_flags);
1078247835Skib		/*
1079247835Skib		 * Use the access and other non-mapping-related flag bits from
1080247835Skib		 * the memory placement flags to the current flags
1081247835Skib		 */
1082247835Skib		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
1083247835Skib				~TTM_PL_MASK_MEMTYPE);
1084247835Skib
1085247835Skib
1086247835Skib		if (mem_type == TTM_PL_SYSTEM) {
1087247835Skib			mem->mem_type = mem_type;
1088247835Skib			mem->placement = cur_flags;
1089247835Skib			mem->mm_node = NULL;
1090247835Skib			return 0;
1091247835Skib		}
1092247835Skib
1093247835Skib		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1094247835Skib						interruptible, no_wait_gpu);
1095247835Skib		if (ret == 0 && mem->mm_node) {
1096247835Skib			mem->placement = cur_flags;
1097247835Skib			return 0;
1098247835Skib		}
1099282199Sdumbbell		if (ret == -ERESTARTSYS)
1100247835Skib			has_erestartsys = true;
1101247835Skib	}
1102282199Sdumbbell	ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
1103247835Skib	return ret;
1104247835Skib}
1105247835Skib
1106247835Skibstatic
1107247835Skibint ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1108247835Skib			struct ttm_placement *placement,
1109247835Skib			bool interruptible,
1110247835Skib			bool no_wait_gpu)
1111247835Skib{
1112247835Skib	int ret = 0;
1113247835Skib	struct ttm_mem_reg mem;
1114247835Skib	struct ttm_bo_device *bdev = bo->bdev;
1115247835Skib
1116247835Skib	MPASS(ttm_bo_is_reserved(bo));
1117247835Skib
1118247835Skib	/*
1119247835Skib	 * FIXME: It's possible to pipeline buffer moves.
1120247835Skib	 * Have the driver move function wait for idle when necessary,
1121247835Skib	 * instead of doing it here.
1122247835Skib	 */
1123247835Skib	mtx_lock(&bdev->fence_lock);
1124247835Skib	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1125247835Skib	mtx_unlock(&bdev->fence_lock);
1126247835Skib	if (ret)
1127247835Skib		return ret;
1128247835Skib	mem.num_pages = bo->num_pages;
1129247835Skib	mem.size = mem.num_pages << PAGE_SHIFT;
1130247835Skib	mem.page_alignment = bo->mem.page_alignment;
1131247835Skib	mem.bus.io_reserved_vm = false;
1132247835Skib	mem.bus.io_reserved_count = 0;
1133247835Skib	/*
1134247835Skib	 * Determine where to move the buffer.
1135247835Skib	 */
1136247835Skib	ret = ttm_bo_mem_space(bo, placement, &mem,
1137247835Skib			       interruptible, no_wait_gpu);
1138247835Skib	if (ret)
1139247835Skib		goto out_unlock;
1140247835Skib	ret = ttm_bo_handle_move_mem(bo, &mem, false,
1141247835Skib				     interruptible, no_wait_gpu);
1142247835Skibout_unlock:
1143247835Skib	if (ret && mem.mm_node)
1144247835Skib		ttm_bo_mem_put(bo, &mem);
1145247835Skib	return ret;
1146247835Skib}
1147247835Skib
1148247835Skibstatic int ttm_bo_mem_compat(struct ttm_placement *placement,
1149247835Skib			     struct ttm_mem_reg *mem)
1150247835Skib{
1151247835Skib	int i;
1152247835Skib
1153247835Skib	if (mem->mm_node && placement->lpfn != 0 &&
1154247835Skib	    (mem->start < placement->fpfn ||
1155247835Skib	     mem->start + mem->num_pages > placement->lpfn))
1156247835Skib		return -1;
1157247835Skib
1158247835Skib	for (i = 0; i < placement->num_placement; i++) {
1159247835Skib		if ((placement->placement[i] & mem->placement &
1160247835Skib			TTM_PL_MASK_CACHING) &&
1161247835Skib			(placement->placement[i] & mem->placement &
1162247835Skib			TTM_PL_MASK_MEM))
1163247835Skib			return i;
1164247835Skib	}
1165247835Skib	return -1;
1166247835Skib}
1167247835Skib
1168247835Skibint ttm_bo_validate(struct ttm_buffer_object *bo,
1169247835Skib			struct ttm_placement *placement,
1170247835Skib			bool interruptible,
1171247835Skib			bool no_wait_gpu)
1172247835Skib{
1173247835Skib	int ret;
1174247835Skib
1175247835Skib	MPASS(ttm_bo_is_reserved(bo));
1176247835Skib	/* Check that range is valid */
1177247835Skib	if (placement->lpfn || placement->fpfn)
1178247835Skib		if (placement->fpfn > placement->lpfn ||
1179247835Skib			(placement->lpfn - placement->fpfn) < bo->num_pages)
1180247835Skib			return -EINVAL;
1181247835Skib	/*
1182247835Skib	 * Check whether we need to move buffer.
1183247835Skib	 */
1184247835Skib	ret = ttm_bo_mem_compat(placement, &bo->mem);
1185247835Skib	if (ret < 0) {
1186247835Skib		ret = ttm_bo_move_buffer(bo, placement, interruptible,
1187247835Skib					 no_wait_gpu);
1188247835Skib		if (ret)
1189247835Skib			return ret;
1190247835Skib	} else {
1191247835Skib		/*
1192247835Skib		 * Use the access and other non-mapping-related flag bits from
1193247835Skib		 * the compatible memory placement flags to the active flags
1194247835Skib		 */
1195247835Skib		ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1196247835Skib				~TTM_PL_MASK_MEMTYPE);
1197247835Skib	}
1198247835Skib	/*
1199247835Skib	 * We might need to add a TTM.
1200247835Skib	 */
1201247835Skib	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1202247835Skib		ret = ttm_bo_add_ttm(bo, true);
1203247835Skib		if (ret)
1204247835Skib			return ret;
1205247835Skib	}
1206247835Skib	return 0;
1207247835Skib}
1208247835Skib
1209247835Skibint ttm_bo_check_placement(struct ttm_buffer_object *bo,
1210247835Skib				struct ttm_placement *placement)
1211247835Skib{
1212247835Skib	MPASS(!((placement->fpfn || placement->lpfn) &&
1213247835Skib	    (bo->mem.num_pages > (placement->lpfn - placement->fpfn))));
1214247835Skib
1215247835Skib	return 0;
1216247835Skib}
1217247835Skib
1218247835Skibint ttm_bo_init(struct ttm_bo_device *bdev,
1219247835Skib		struct ttm_buffer_object *bo,
1220247835Skib		unsigned long size,
1221247835Skib		enum ttm_bo_type type,
1222247835Skib		struct ttm_placement *placement,
1223247835Skib		uint32_t page_alignment,
1224247835Skib		bool interruptible,
1225247835Skib		struct vm_object *persistent_swap_storage,
1226247835Skib		size_t acc_size,
1227247835Skib		struct sg_table *sg,
1228247835Skib		void (*destroy) (struct ttm_buffer_object *))
1229247835Skib{
1230247835Skib	int ret = 0;
1231247835Skib	unsigned long num_pages;
1232247835Skib	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1233247835Skib
1234247835Skib	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1235247835Skib	if (ret) {
1236247835Skib		printf("[TTM] Out of kernel memory\n");
1237247835Skib		if (destroy)
1238247835Skib			(*destroy)(bo);
1239247835Skib		else
1240247835Skib			free(bo, M_TTM_BO);
1241247835Skib		return -ENOMEM;
1242247835Skib	}
1243247835Skib
1244247835Skib	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1245247835Skib	if (num_pages == 0) {
1246247835Skib		printf("[TTM] Illegal buffer object size\n");
1247247835Skib		if (destroy)
1248247835Skib			(*destroy)(bo);
1249247835Skib		else
1250247835Skib			free(bo, M_TTM_BO);
1251247835Skib		ttm_mem_global_free(mem_glob, acc_size);
1252247835Skib		return -EINVAL;
1253247835Skib	}
1254247835Skib	bo->destroy = destroy;
1255247835Skib
1256247835Skib	refcount_init(&bo->kref, 1);
1257247835Skib	refcount_init(&bo->list_kref, 1);
1258247835Skib	atomic_set(&bo->cpu_writers, 0);
1259247835Skib	atomic_set(&bo->reserved, 1);
1260247835Skib	INIT_LIST_HEAD(&bo->lru);
1261247835Skib	INIT_LIST_HEAD(&bo->ddestroy);
1262247835Skib	INIT_LIST_HEAD(&bo->swap);
1263247835Skib	INIT_LIST_HEAD(&bo->io_reserve_lru);
1264247835Skib	bo->bdev = bdev;
1265247835Skib	bo->glob = bdev->glob;
1266247835Skib	bo->type = type;
1267247835Skib	bo->num_pages = num_pages;
1268247835Skib	bo->mem.size = num_pages << PAGE_SHIFT;
1269247835Skib	bo->mem.mem_type = TTM_PL_SYSTEM;
1270247835Skib	bo->mem.num_pages = bo->num_pages;
1271247835Skib	bo->mem.mm_node = NULL;
1272247835Skib	bo->mem.page_alignment = page_alignment;
1273247835Skib	bo->mem.bus.io_reserved_vm = false;
1274247835Skib	bo->mem.bus.io_reserved_count = 0;
1275247835Skib	bo->priv_flags = 0;
1276247835Skib	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1277247835Skib	bo->seq_valid = false;
1278247835Skib	bo->persistent_swap_storage = persistent_swap_storage;
1279247835Skib	bo->acc_size = acc_size;
1280247835Skib	bo->sg = sg;
1281247835Skib	atomic_inc(&bo->glob->bo_count);
1282247835Skib
1283247835Skib	ret = ttm_bo_check_placement(bo, placement);
1284247835Skib	if (unlikely(ret != 0))
1285247835Skib		goto out_err;
1286247835Skib
1287247835Skib	/*
1288247835Skib	 * For ttm_bo_type_device buffers, allocate
1289247835Skib	 * address space from the device.
1290247835Skib	 */
1291247835Skib	if (bo->type == ttm_bo_type_device ||
1292247835Skib	    bo->type == ttm_bo_type_sg) {
1293247835Skib		ret = ttm_bo_setup_vm(bo);
1294247835Skib		if (ret)
1295247835Skib			goto out_err;
1296247835Skib	}
1297247835Skib
1298247835Skib	ret = ttm_bo_validate(bo, placement, interruptible, false);
1299247835Skib	if (ret)
1300247835Skib		goto out_err;
1301247835Skib
1302247835Skib	ttm_bo_unreserve(bo);
1303247835Skib	return 0;
1304247835Skib
1305247835Skibout_err:
1306247835Skib	ttm_bo_unreserve(bo);
1307247835Skib	ttm_bo_unref(&bo);
1308247835Skib
1309247835Skib	return ret;
1310247835Skib}
1311247835Skib
1312247835Skibsize_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1313247835Skib		       unsigned long bo_size,
1314247835Skib		       unsigned struct_size)
1315247835Skib{
1316247835Skib	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1317247835Skib	size_t size = 0;
1318247835Skib
1319247835Skib	size += ttm_round_pot(struct_size);
1320247835Skib	size += PAGE_ALIGN(npages * sizeof(void *));
1321247835Skib	size += ttm_round_pot(sizeof(struct ttm_tt));
1322247835Skib	return size;
1323247835Skib}
1324247835Skib
1325247835Skibsize_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1326247835Skib			   unsigned long bo_size,
1327247835Skib			   unsigned struct_size)
1328247835Skib{
1329247835Skib	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1330247835Skib	size_t size = 0;
1331247835Skib
1332247835Skib	size += ttm_round_pot(struct_size);
1333247835Skib	size += PAGE_ALIGN(npages * sizeof(void *));
1334247835Skib	size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
1335247835Skib	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1336247835Skib	return size;
1337247835Skib}
1338247835Skib
1339247835Skibint ttm_bo_create(struct ttm_bo_device *bdev,
1340247835Skib			unsigned long size,
1341247835Skib			enum ttm_bo_type type,
1342247835Skib			struct ttm_placement *placement,
1343247835Skib			uint32_t page_alignment,
1344247835Skib			bool interruptible,
1345247835Skib			struct vm_object *persistent_swap_storage,
1346247835Skib			struct ttm_buffer_object **p_bo)
1347247835Skib{
1348247835Skib	struct ttm_buffer_object *bo;
1349247835Skib	size_t acc_size;
1350247835Skib	int ret;
1351247835Skib
1352247835Skib	bo = malloc(sizeof(*bo), M_TTM_BO, M_WAITOK | M_ZERO);
1353247835Skib	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1354247835Skib	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1355247835Skib			  interruptible, persistent_swap_storage, acc_size,
1356247835Skib			  NULL, NULL);
1357247835Skib	if (likely(ret == 0))
1358247835Skib		*p_bo = bo;
1359247835Skib
1360247835Skib	return ret;
1361247835Skib}
1362247835Skib
1363247835Skibstatic int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1364247835Skib					unsigned mem_type, bool allow_errors)
1365247835Skib{
1366247835Skib	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1367247835Skib	struct ttm_bo_global *glob = bdev->glob;
1368247835Skib	int ret;
1369247835Skib
1370247835Skib	/*
1371247835Skib	 * Can't use standard list traversal since we're unlocking.
1372247835Skib	 */
1373247835Skib
1374247835Skib	mtx_lock(&glob->lru_lock);
1375247835Skib	while (!list_empty(&man->lru)) {
1376247835Skib		mtx_unlock(&glob->lru_lock);
1377247835Skib		ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1378247835Skib		if (ret) {
1379247835Skib			if (allow_errors) {
1380247835Skib				return ret;
1381247835Skib			} else {
1382247835Skib				printf("[TTM] Cleanup eviction failed\n");
1383247835Skib			}
1384247835Skib		}
1385247835Skib		mtx_lock(&glob->lru_lock);
1386247835Skib	}
1387247835Skib	mtx_unlock(&glob->lru_lock);
1388247835Skib	return 0;
1389247835Skib}
1390247835Skib
1391247835Skibint ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1392247835Skib{
1393247835Skib	struct ttm_mem_type_manager *man;
1394247835Skib	int ret = -EINVAL;
1395247835Skib
1396247835Skib	if (mem_type >= TTM_NUM_MEM_TYPES) {
1397247835Skib		printf("[TTM] Illegal memory type %d\n", mem_type);
1398247835Skib		return ret;
1399247835Skib	}
1400247835Skib	man = &bdev->man[mem_type];
1401247835Skib
1402247835Skib	if (!man->has_type) {
1403247835Skib		printf("[TTM] Trying to take down uninitialized memory manager type %u\n",
1404247835Skib		       mem_type);
1405247835Skib		return ret;
1406247835Skib	}
1407247835Skib
1408247835Skib	man->use_type = false;
1409247835Skib	man->has_type = false;
1410247835Skib
1411247835Skib	ret = 0;
1412247835Skib	if (mem_type > 0) {
1413247835Skib		ttm_bo_force_list_clean(bdev, mem_type, false);
1414247835Skib
1415247835Skib		ret = (*man->func->takedown)(man);
1416247835Skib	}
1417247835Skib
1418247835Skib	return ret;
1419247835Skib}
1420247835Skib
1421247835Skibint ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1422247835Skib{
1423247835Skib	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1424247835Skib
1425247835Skib	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1426247835Skib		printf("[TTM] Illegal memory manager memory type %u\n", mem_type);
1427247835Skib		return -EINVAL;
1428247835Skib	}
1429247835Skib
1430247835Skib	if (!man->has_type) {
1431247835Skib		printf("[TTM] Memory type %u has not been initialized\n", mem_type);
1432247835Skib		return 0;
1433247835Skib	}
1434247835Skib
1435247835Skib	return ttm_bo_force_list_clean(bdev, mem_type, true);
1436247835Skib}
1437247835Skib
1438247835Skibint ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1439247835Skib			unsigned long p_size)
1440247835Skib{
1441247835Skib	int ret = -EINVAL;
1442247835Skib	struct ttm_mem_type_manager *man;
1443247835Skib
1444247835Skib	MPASS(type < TTM_NUM_MEM_TYPES);
1445247835Skib	man = &bdev->man[type];
1446247835Skib	MPASS(!man->has_type);
1447247835Skib	man->io_reserve_fastpath = true;
1448247835Skib	man->use_io_reserve_lru = false;
1449247835Skib	sx_init(&man->io_reserve_mutex, "ttmman");
1450247835Skib	INIT_LIST_HEAD(&man->io_reserve_lru);
1451247835Skib
1452247835Skib	ret = bdev->driver->init_mem_type(bdev, type, man);
1453247835Skib	if (ret)
1454247835Skib		return ret;
1455247835Skib	man->bdev = bdev;
1456247835Skib
1457247835Skib	ret = 0;
1458247835Skib	if (type != TTM_PL_SYSTEM) {
1459247835Skib		ret = (*man->func->init)(man, p_size);
1460247835Skib		if (ret)
1461247835Skib			return ret;
1462247835Skib	}
1463247835Skib	man->has_type = true;
1464247835Skib	man->use_type = true;
1465247835Skib	man->size = p_size;
1466247835Skib
1467247835Skib	INIT_LIST_HEAD(&man->lru);
1468247835Skib
1469247835Skib	return 0;
1470247835Skib}
1471247835Skib
1472247835Skibstatic void ttm_bo_global_kobj_release(struct ttm_bo_global *glob)
1473247835Skib{
1474247835Skib
1475247835Skib	ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1476247835Skib	vm_page_free(glob->dummy_read_page);
1477247835Skib}
1478247835Skib
1479247835Skibvoid ttm_bo_global_release(struct drm_global_reference *ref)
1480247835Skib{
1481247835Skib	struct ttm_bo_global *glob = ref->object;
1482247835Skib
1483247835Skib	if (refcount_release(&glob->kobj_ref))
1484247835Skib		ttm_bo_global_kobj_release(glob);
1485247835Skib}
1486247835Skib
1487247835Skibint ttm_bo_global_init(struct drm_global_reference *ref)
1488247835Skib{
1489247835Skib	struct ttm_bo_global_ref *bo_ref =
1490247835Skib		container_of(ref, struct ttm_bo_global_ref, ref);
1491247835Skib	struct ttm_bo_global *glob = ref->object;
1492247835Skib	int ret;
1493285002Savg	int tries;
1494247835Skib
1495247835Skib	sx_init(&glob->device_list_mutex, "ttmdlm");
1496247835Skib	mtx_init(&glob->lru_lock, "ttmlru", NULL, MTX_DEF);
1497247835Skib	glob->mem_glob = bo_ref->mem_glob;
1498285002Savg	tries = 0;
1499285002Savgretry:
1500247835Skib	glob->dummy_read_page = vm_page_alloc_contig(NULL, 0,
1501247835Skib	    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ,
1502247835Skib	    1, 0, VM_MAX_ADDRESS, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
1503247835Skib
1504247835Skib	if (unlikely(glob->dummy_read_page == NULL)) {
1505285002Savg		if (tries < 1) {
1506285002Savg			vm_pageout_grow_cache(tries, 0, VM_MAX_ADDRESS);
1507285002Savg			tries++;
1508285002Savg			goto retry;
1509285002Savg		}
1510247835Skib		ret = -ENOMEM;
1511247835Skib		goto out_no_drp;
1512247835Skib	}
1513247835Skib
1514247835Skib	INIT_LIST_HEAD(&glob->swap_lru);
1515247835Skib	INIT_LIST_HEAD(&glob->device_list);
1516247835Skib
1517247835Skib	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1518247835Skib	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1519247835Skib	if (unlikely(ret != 0)) {
1520247835Skib		printf("[TTM] Could not register buffer object swapout\n");
1521247835Skib		goto out_no_shrink;
1522247835Skib	}
1523247835Skib
1524247835Skib	atomic_set(&glob->bo_count, 0);
1525247835Skib
1526247835Skib	refcount_init(&glob->kobj_ref, 1);
1527247835Skib	return (0);
1528247835Skib
1529247835Skibout_no_shrink:
1530247835Skib	vm_page_free(glob->dummy_read_page);
1531247835Skibout_no_drp:
1532247835Skib	free(glob, M_DRM_GLOBAL);
1533247835Skib	return ret;
1534247835Skib}
1535247835Skib
1536247835Skibint ttm_bo_device_release(struct ttm_bo_device *bdev)
1537247835Skib{
1538247835Skib	int ret = 0;
1539247835Skib	unsigned i = TTM_NUM_MEM_TYPES;
1540247835Skib	struct ttm_mem_type_manager *man;
1541247835Skib	struct ttm_bo_global *glob = bdev->glob;
1542247835Skib
1543247835Skib	while (i--) {
1544247835Skib		man = &bdev->man[i];
1545247835Skib		if (man->has_type) {
1546247835Skib			man->use_type = false;
1547247835Skib			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1548247835Skib				ret = -EBUSY;
1549247835Skib				printf("[TTM] DRM memory manager type %d is not clean\n",
1550247835Skib				       i);
1551247835Skib			}
1552247835Skib			man->has_type = false;
1553247835Skib		}
1554247835Skib	}
1555247835Skib
1556247835Skib	sx_xlock(&glob->device_list_mutex);
1557247835Skib	list_del(&bdev->device_list);
1558247835Skib	sx_xunlock(&glob->device_list_mutex);
1559247835Skib
1560247835Skib	if (taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, NULL))
1561247835Skib		taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
1562247835Skib
1563247835Skib	while (ttm_bo_delayed_delete(bdev, true))
1564247835Skib		;
1565247835Skib
1566247835Skib	mtx_lock(&glob->lru_lock);
1567247835Skib	if (list_empty(&bdev->ddestroy))
1568247835Skib		TTM_DEBUG("Delayed destroy list was clean\n");
1569247835Skib
1570247835Skib	if (list_empty(&bdev->man[0].lru))
1571247835Skib		TTM_DEBUG("Swap list was clean\n");
1572247835Skib	mtx_unlock(&glob->lru_lock);
1573247835Skib
1574247835Skib	MPASS(drm_mm_clean(&bdev->addr_space_mm));
1575247835Skib	rw_wlock(&bdev->vm_lock);
1576247835Skib	drm_mm_takedown(&bdev->addr_space_mm);
1577247835Skib	rw_wunlock(&bdev->vm_lock);
1578247835Skib
1579247835Skib	return ret;
1580247835Skib}
1581247835Skib
1582247835Skibint ttm_bo_device_init(struct ttm_bo_device *bdev,
1583247835Skib		       struct ttm_bo_global *glob,
1584247835Skib		       struct ttm_bo_driver *driver,
1585247835Skib		       uint64_t file_page_offset,
1586247835Skib		       bool need_dma32)
1587247835Skib{
1588247835Skib	int ret = -EINVAL;
1589247835Skib
1590247835Skib	rw_init(&bdev->vm_lock, "ttmvml");
1591247835Skib	bdev->driver = driver;
1592247835Skib
1593247835Skib	memset(bdev->man, 0, sizeof(bdev->man));
1594247835Skib
1595247835Skib	/*
1596247835Skib	 * Initialize the system memory buffer type.
1597247835Skib	 * Other types need to be driver / IOCTL initialized.
1598247835Skib	 */
1599247835Skib	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1600247835Skib	if (unlikely(ret != 0))
1601247835Skib		goto out_no_sys;
1602247835Skib
1603247835Skib	RB_INIT(&bdev->addr_space_rb);
1604247835Skib	ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1605247835Skib	if (unlikely(ret != 0))
1606247835Skib		goto out_no_addr_mm;
1607247835Skib
1608247835Skib	TIMEOUT_TASK_INIT(taskqueue_thread, &bdev->wq, 0,
1609247835Skib	    ttm_bo_delayed_workqueue, bdev);
1610247835Skib	INIT_LIST_HEAD(&bdev->ddestroy);
1611247835Skib	bdev->dev_mapping = NULL;
1612247835Skib	bdev->glob = glob;
1613247835Skib	bdev->need_dma32 = need_dma32;
1614247835Skib	bdev->val_seq = 0;
1615247835Skib	mtx_init(&bdev->fence_lock, "ttmfence", NULL, MTX_DEF);
1616247835Skib	sx_xlock(&glob->device_list_mutex);
1617247835Skib	list_add_tail(&bdev->device_list, &glob->device_list);
1618247835Skib	sx_xunlock(&glob->device_list_mutex);
1619247835Skib
1620247835Skib	return 0;
1621247835Skibout_no_addr_mm:
1622247835Skib	ttm_bo_clean_mm(bdev, 0);
1623247835Skibout_no_sys:
1624247835Skib	return ret;
1625247835Skib}
1626247835Skib
1627247835Skib/*
1628247835Skib * buffer object vm functions.
1629247835Skib */
1630247835Skib
1631247835Skibbool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1632247835Skib{
1633247835Skib	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1634247835Skib
1635247835Skib	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1636247835Skib		if (mem->mem_type == TTM_PL_SYSTEM)
1637247835Skib			return false;
1638247835Skib
1639247835Skib		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1640247835Skib			return false;
1641247835Skib
1642247835Skib		if (mem->placement & TTM_PL_FLAG_CACHED)
1643247835Skib			return false;
1644247835Skib	}
1645247835Skib	return true;
1646247835Skib}
1647247835Skib
1648247835Skibvoid ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1649247835Skib{
1650247835Skib
1651254876Sdumbbell	ttm_bo_release_mmap(bo);
1652247835Skib	ttm_mem_io_free_vm(bo);
1653247835Skib}
1654247835Skib
1655247835Skibvoid ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1656247835Skib{
1657247835Skib	struct ttm_bo_device *bdev = bo->bdev;
1658247835Skib	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1659247835Skib
1660247835Skib	ttm_mem_io_lock(man, false);
1661247835Skib	ttm_bo_unmap_virtual_locked(bo);
1662247835Skib	ttm_mem_io_unlock(man);
1663247835Skib}
1664247835Skib
1665247835Skibstatic void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1666247835Skib{
1667247835Skib	struct ttm_bo_device *bdev = bo->bdev;
1668247835Skib
1669247835Skib	/* The caller acquired bdev->vm_lock. */
1670247835Skib	RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo);
1671247835Skib}
1672247835Skib
1673247835Skib/**
1674247835Skib * ttm_bo_setup_vm:
1675247835Skib *
1676247835Skib * @bo: the buffer to allocate address space for
1677247835Skib *
1678247835Skib * Allocate address space in the drm device so that applications
1679247835Skib * can mmap the buffer and access the contents. This only
1680247835Skib * applies to ttm_bo_type_device objects as others are not
1681247835Skib * placed in the drm device address space.
1682247835Skib */
1683247835Skib
1684247835Skibstatic int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1685247835Skib{
1686247835Skib	struct ttm_bo_device *bdev = bo->bdev;
1687247835Skib	int ret;
1688247835Skib
1689247835Skibretry_pre_get:
1690247835Skib	ret = drm_mm_pre_get(&bdev->addr_space_mm);
1691247835Skib	if (unlikely(ret != 0))
1692247835Skib		return ret;
1693247835Skib
1694247835Skib	rw_wlock(&bdev->vm_lock);
1695247835Skib	bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1696247835Skib					 bo->mem.num_pages, 0, 0);
1697247835Skib
1698247835Skib	if (unlikely(bo->vm_node == NULL)) {
1699247835Skib		ret = -ENOMEM;
1700247835Skib		goto out_unlock;
1701247835Skib	}
1702247835Skib
1703247835Skib	bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1704247835Skib					      bo->mem.num_pages, 0);
1705247835Skib
1706247835Skib	if (unlikely(bo->vm_node == NULL)) {
1707247835Skib		rw_wunlock(&bdev->vm_lock);
1708247835Skib		goto retry_pre_get;
1709247835Skib	}
1710247835Skib
1711247835Skib	ttm_bo_vm_insert_rb(bo);
1712247835Skib	rw_wunlock(&bdev->vm_lock);
1713247835Skib	bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1714247835Skib
1715247835Skib	return 0;
1716247835Skibout_unlock:
1717247835Skib	rw_wunlock(&bdev->vm_lock);
1718247835Skib	return ret;
1719247835Skib}
1720247835Skib
1721247835Skibint ttm_bo_wait(struct ttm_buffer_object *bo,
1722247835Skib		bool lazy, bool interruptible, bool no_wait)
1723247835Skib{
1724247835Skib	struct ttm_bo_driver *driver = bo->bdev->driver;
1725247835Skib	struct ttm_bo_device *bdev = bo->bdev;
1726247835Skib	void *sync_obj;
1727247835Skib	int ret = 0;
1728247835Skib
1729247835Skib	if (likely(bo->sync_obj == NULL))
1730247835Skib		return 0;
1731247835Skib
1732247835Skib	while (bo->sync_obj) {
1733247835Skib
1734247835Skib		if (driver->sync_obj_signaled(bo->sync_obj)) {
1735247835Skib			void *tmp_obj = bo->sync_obj;
1736247835Skib			bo->sync_obj = NULL;
1737255044Sjkim			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1738247835Skib			mtx_unlock(&bdev->fence_lock);
1739247835Skib			driver->sync_obj_unref(&tmp_obj);
1740247835Skib			mtx_lock(&bdev->fence_lock);
1741247835Skib			continue;
1742247835Skib		}
1743247835Skib
1744247835Skib		if (no_wait)
1745247835Skib			return -EBUSY;
1746247835Skib
1747247835Skib		sync_obj = driver->sync_obj_ref(bo->sync_obj);
1748247835Skib		mtx_unlock(&bdev->fence_lock);
1749247835Skib		ret = driver->sync_obj_wait(sync_obj,
1750247835Skib					    lazy, interruptible);
1751247835Skib		if (unlikely(ret != 0)) {
1752247835Skib			driver->sync_obj_unref(&sync_obj);
1753247835Skib			mtx_lock(&bdev->fence_lock);
1754247835Skib			return ret;
1755247835Skib		}
1756247835Skib		mtx_lock(&bdev->fence_lock);
1757247835Skib		if (likely(bo->sync_obj == sync_obj)) {
1758247835Skib			void *tmp_obj = bo->sync_obj;
1759247835Skib			bo->sync_obj = NULL;
1760255044Sjkim			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1761255044Sjkim				  &bo->priv_flags);
1762247835Skib			mtx_unlock(&bdev->fence_lock);
1763247835Skib			driver->sync_obj_unref(&sync_obj);
1764247835Skib			driver->sync_obj_unref(&tmp_obj);
1765247835Skib			mtx_lock(&bdev->fence_lock);
1766247835Skib		} else {
1767247835Skib			mtx_unlock(&bdev->fence_lock);
1768247835Skib			driver->sync_obj_unref(&sync_obj);
1769247835Skib			mtx_lock(&bdev->fence_lock);
1770247835Skib		}
1771247835Skib	}
1772247835Skib	return 0;
1773247835Skib}
1774247835Skib
1775247835Skibint ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1776247835Skib{
1777247835Skib	struct ttm_bo_device *bdev = bo->bdev;
1778247835Skib	int ret = 0;
1779247835Skib
1780247835Skib	/*
1781247835Skib	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1782247835Skib	 */
1783247835Skib
1784247835Skib	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1785247835Skib	if (unlikely(ret != 0))
1786247835Skib		return ret;
1787247835Skib	mtx_lock(&bdev->fence_lock);
1788247835Skib	ret = ttm_bo_wait(bo, false, true, no_wait);
1789247835Skib	mtx_unlock(&bdev->fence_lock);
1790247835Skib	if (likely(ret == 0))
1791247835Skib		atomic_inc(&bo->cpu_writers);
1792247835Skib	ttm_bo_unreserve(bo);
1793247835Skib	return ret;
1794247835Skib}
1795247835Skib
1796247835Skibvoid ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1797247835Skib{
1798247835Skib	atomic_dec(&bo->cpu_writers);
1799247835Skib}
1800247835Skib
1801247835Skib/**
1802247835Skib * A buffer object shrink method that tries to swap out the first
1803247835Skib * buffer object on the bo_global::swap_lru list.
1804247835Skib */
1805247835Skib
1806247835Skibstatic int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1807247835Skib{
1808247835Skib	struct ttm_bo_global *glob =
1809247835Skib	    container_of(shrink, struct ttm_bo_global, shrink);
1810247835Skib	struct ttm_buffer_object *bo;
1811247835Skib	int ret = -EBUSY;
1812247835Skib	int put_count;
1813247835Skib	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1814247835Skib
1815247835Skib	mtx_lock(&glob->lru_lock);
1816247835Skib	list_for_each_entry(bo, &glob->swap_lru, swap) {
1817254861Sdumbbell		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
1818247835Skib		if (!ret)
1819247835Skib			break;
1820247835Skib	}
1821247835Skib
1822247835Skib	if (ret) {
1823247835Skib		mtx_unlock(&glob->lru_lock);
1824247835Skib		return ret;
1825247835Skib	}
1826247835Skib
1827247835Skib	refcount_acquire(&bo->list_kref);
1828247835Skib
1829247835Skib	if (!list_empty(&bo->ddestroy)) {
1830247835Skib		ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
1831247835Skib		if (refcount_release(&bo->list_kref))
1832247835Skib			ttm_bo_release_list(bo);
1833247835Skib		return ret;
1834247835Skib	}
1835247835Skib
1836247835Skib	put_count = ttm_bo_del_from_lru(bo);
1837247835Skib	mtx_unlock(&glob->lru_lock);
1838247835Skib
1839247835Skib	ttm_bo_list_ref_sub(bo, put_count, true);
1840247835Skib
1841247835Skib	/**
1842247835Skib	 * Wait for GPU, then move to system cached.
1843247835Skib	 */
1844247835Skib
1845247835Skib	mtx_lock(&bo->bdev->fence_lock);
1846247835Skib	ret = ttm_bo_wait(bo, false, false, false);
1847247835Skib	mtx_unlock(&bo->bdev->fence_lock);
1848247835Skib
1849247835Skib	if (unlikely(ret != 0))
1850247835Skib		goto out;
1851247835Skib
1852247835Skib	if ((bo->mem.placement & swap_placement) != swap_placement) {
1853247835Skib		struct ttm_mem_reg evict_mem;
1854247835Skib
1855247835Skib		evict_mem = bo->mem;
1856247835Skib		evict_mem.mm_node = NULL;
1857247835Skib		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1858247835Skib		evict_mem.mem_type = TTM_PL_SYSTEM;
1859247835Skib
1860247835Skib		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1861247835Skib					     false, false);
1862247835Skib		if (unlikely(ret != 0))
1863247835Skib			goto out;
1864247835Skib	}
1865247835Skib
1866247835Skib	ttm_bo_unmap_virtual(bo);
1867247835Skib
1868247835Skib	/**
1869247835Skib	 * Swap out. Buffer will be swapped in again as soon as
1870247835Skib	 * anyone tries to access a ttm page.
1871247835Skib	 */
1872247835Skib
1873247835Skib	if (bo->bdev->driver->swap_notify)
1874247835Skib		bo->bdev->driver->swap_notify(bo);
1875247835Skib
1876247835Skib	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1877247835Skibout:
1878247835Skib
1879247835Skib	/**
1880247835Skib	 *
1881247835Skib	 * Unreserve without putting on LRU to avoid swapping out an
1882247835Skib	 * already swapped buffer.
1883247835Skib	 */
1884247835Skib
1885247835Skib	atomic_set(&bo->reserved, 0);
1886247835Skib	wakeup(bo);
1887247835Skib	if (refcount_release(&bo->list_kref))
1888247835Skib		ttm_bo_release_list(bo);
1889247835Skib	return ret;
1890247835Skib}
1891247835Skib
1892247835Skibvoid ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1893247835Skib{
1894247835Skib	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1895247835Skib		;
1896247835Skib}
1897