1235783Skib/**************************************************************************
2235783Skib *
3235783Skib * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4235783Skib * All Rights Reserved.
5235783Skib *
6235783Skib * Permission is hereby granted, free of charge, to any person obtaining a
7235783Skib * copy of this software and associated documentation files (the
8235783Skib * "Software"), to deal in the Software without restriction, including
9235783Skib * without limitation the rights to use, copy, modify, merge, publish,
10235783Skib * distribute, sub license, and/or sell copies of the Software, and to
11235783Skib * permit persons to whom the Software is furnished to do so, subject to
12235783Skib * the following conditions:
13235783Skib *
14235783Skib * The above copyright notice and this permission notice (including the
15235783Skib * next paragraph) shall be included in all copies or substantial portions
16235783Skib * of the Software.
17235783Skib *
18235783Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19235783Skib * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20235783Skib * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21235783Skib * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22235783Skib * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23235783Skib * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24235783Skib * USE OR OTHER DEALINGS IN THE SOFTWARE.
25235783Skib *
26235783Skib *
27235783Skib **************************************************************************/
28235783Skib
29235783Skib#include <sys/cdefs.h>
30235783Skib__FBSDID("$FreeBSD$");
31235783Skib
32235783Skib/*
33235783Skib * Generic simple memory manager implementation. Intended to be used as a base
34235783Skib * class implementation for more advanced memory managers.
35235783Skib *
36235783Skib * Note that the algorithm used is quite simple and there might be substantial
37235783Skib * performance gains if a smarter free list is implemented. Currently it is just an
38235783Skib * unordered stack of free regions. This could easily be improved if an RB-tree
39235783Skib * is used instead. At least if we expect heavy fragmentation.
40235783Skib *
41235783Skib * Aligned allocations can also see improvement.
42235783Skib *
43235783Skib * Authors:
44235783Skib * Thomas Hellstr��m <thomas-at-tungstengraphics-dot-com>
45235783Skib */
46235783Skib
47235783Skib#include <dev/drm2/drmP.h>
48235783Skib#include <dev/drm2/drm_mm.h>
49235783Skib
50235783Skib#define MM_UNUSED_TARGET 4
51235783Skib
52235783Skibstatic struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
53235783Skib{
54235783Skib	struct drm_mm_node *child;
55235783Skib
56235783Skib	child = malloc(sizeof(*child), DRM_MEM_MM, M_ZERO |
57235783Skib	    (atomic ? M_NOWAIT : M_WAITOK));
58235783Skib
59235783Skib	if (unlikely(child == NULL)) {
60235783Skib		mtx_lock(&mm->unused_lock);
61235783Skib		if (list_empty(&mm->unused_nodes))
62235783Skib			child = NULL;
63235783Skib		else {
64235783Skib			child =
65235783Skib			    list_entry(mm->unused_nodes.next,
66235783Skib				       struct drm_mm_node, node_list);
67235783Skib			list_del(&child->node_list);
68235783Skib			--mm->num_unused;
69235783Skib		}
70235783Skib		mtx_unlock(&mm->unused_lock);
71235783Skib	}
72235783Skib	return child;
73235783Skib}
74235783Skib
75235783Skibint drm_mm_pre_get(struct drm_mm *mm)
76235783Skib{
77235783Skib	struct drm_mm_node *node;
78235783Skib
79235783Skib	mtx_lock(&mm->unused_lock);
80235783Skib	while (mm->num_unused < MM_UNUSED_TARGET) {
81235783Skib		mtx_unlock(&mm->unused_lock);
82235783Skib		node = malloc(sizeof(*node), DRM_MEM_MM, M_WAITOK);
83235783Skib		mtx_lock(&mm->unused_lock);
84235783Skib
85235783Skib		if (unlikely(node == NULL)) {
86235783Skib			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
87235783Skib			mtx_unlock(&mm->unused_lock);
88235783Skib			return ret;
89235783Skib		}
90235783Skib		++mm->num_unused;
91235783Skib		list_add_tail(&node->node_list, &mm->unused_nodes);
92235783Skib	}
93235783Skib	mtx_unlock(&mm->unused_lock);
94235783Skib	return 0;
95235783Skib}
96235783Skib
97235783Skibstatic inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
98235783Skib{
99235783Skib	return hole_node->start + hole_node->size;
100235783Skib}
101235783Skib
102235783Skibstatic inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
103235783Skib{
104235783Skib	struct drm_mm_node *next_node =
105235783Skib		list_entry(hole_node->node_list.next, struct drm_mm_node,
106235783Skib			   node_list);
107235783Skib
108235783Skib	return next_node->start;
109235783Skib}
110235783Skib
111235783Skibstatic void drm_mm_insert_helper(struct drm_mm_node *hole_node,
112235783Skib				 struct drm_mm_node *node,
113235783Skib				 unsigned long size, unsigned alignment)
114235783Skib{
115235783Skib	struct drm_mm *mm = hole_node->mm;
116235783Skib	unsigned long tmp = 0, wasted = 0;
117235783Skib	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
118235783Skib	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
119235783Skib
120235783Skib	KASSERT(hole_node->hole_follows && !node->allocated, ("hole_node"));
121235783Skib
122235783Skib	if (alignment)
123235783Skib		tmp = hole_start % alignment;
124235783Skib
125235783Skib	if (!tmp) {
126235783Skib		hole_node->hole_follows = 0;
127235783Skib		list_del_init(&hole_node->hole_stack);
128235783Skib	} else
129235783Skib		wasted = alignment - tmp;
130235783Skib
131235783Skib	node->start = hole_start + wasted;
132235783Skib	node->size = size;
133235783Skib	node->mm = mm;
134235783Skib	node->allocated = 1;
135235783Skib
136235783Skib	INIT_LIST_HEAD(&node->hole_stack);
137235783Skib	list_add(&node->node_list, &hole_node->node_list);
138235783Skib
139235783Skib	KASSERT(node->start + node->size <= hole_end, ("hole pos"));
140235783Skib
141235783Skib	if (node->start + node->size < hole_end) {
142235783Skib		list_add(&node->hole_stack, &mm->hole_stack);
143235783Skib		node->hole_follows = 1;
144235783Skib	} else {
145235783Skib		node->hole_follows = 0;
146235783Skib	}
147235783Skib}
148235783Skib
149235783Skibstruct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
150235783Skib					     unsigned long size,
151235783Skib					     unsigned alignment,
152235783Skib					     int atomic)
153235783Skib{
154235783Skib	struct drm_mm_node *node;
155235783Skib
156235783Skib	node = drm_mm_kmalloc(hole_node->mm, atomic);
157235783Skib	if (unlikely(node == NULL))
158235783Skib		return NULL;
159235783Skib
160235783Skib	drm_mm_insert_helper(hole_node, node, size, alignment);
161235783Skib
162235783Skib	return node;
163235783Skib}
164235783Skib
165235783Skibint drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
166235783Skib		       unsigned long size, unsigned alignment)
167235783Skib{
168235783Skib	struct drm_mm_node *hole_node;
169235783Skib
170235783Skib	hole_node = drm_mm_search_free(mm, size, alignment, 0);
171235783Skib	if (!hole_node)
172235783Skib		return -ENOSPC;
173235783Skib
174235783Skib	drm_mm_insert_helper(hole_node, node, size, alignment);
175235783Skib
176235783Skib	return 0;
177235783Skib}
178235783Skib
179235783Skibstatic void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
180235783Skib				       struct drm_mm_node *node,
181235783Skib				       unsigned long size, unsigned alignment,
182235783Skib				       unsigned long start, unsigned long end)
183235783Skib{
184235783Skib	struct drm_mm *mm = hole_node->mm;
185235783Skib	unsigned long tmp = 0, wasted = 0;
186235783Skib	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
187235783Skib	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
188235783Skib
189235783Skib	KASSERT(hole_node->hole_follows && !node->allocated, ("hole_node"));
190235783Skib
191235783Skib	if (hole_start < start)
192235783Skib		wasted += start - hole_start;
193235783Skib	if (alignment)
194235783Skib		tmp = (hole_start + wasted) % alignment;
195235783Skib
196235783Skib	if (tmp)
197235783Skib		wasted += alignment - tmp;
198235783Skib
199235783Skib	if (!wasted) {
200235783Skib		hole_node->hole_follows = 0;
201235783Skib		list_del_init(&hole_node->hole_stack);
202235783Skib	}
203235783Skib
204235783Skib	node->start = hole_start + wasted;
205235783Skib	node->size = size;
206235783Skib	node->mm = mm;
207235783Skib	node->allocated = 1;
208235783Skib
209235783Skib	INIT_LIST_HEAD(&node->hole_stack);
210235783Skib	list_add(&node->node_list, &hole_node->node_list);
211235783Skib
212235783Skib	KASSERT(node->start + node->size <= hole_end, ("hole_end"));
213235783Skib	KASSERT(node->start + node->size <= end, ("end"));
214235783Skib
215235783Skib	if (node->start + node->size < hole_end) {
216235783Skib		list_add(&node->hole_stack, &mm->hole_stack);
217235783Skib		node->hole_follows = 1;
218235783Skib	} else {
219235783Skib		node->hole_follows = 0;
220235783Skib	}
221235783Skib}
222235783Skib
223235783Skibstruct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
224235783Skib						unsigned long size,
225235783Skib						unsigned alignment,
226235783Skib						unsigned long start,
227235783Skib						unsigned long end,
228235783Skib						int atomic)
229235783Skib{
230235783Skib	struct drm_mm_node *node;
231235783Skib
232235783Skib	node = drm_mm_kmalloc(hole_node->mm, atomic);
233235783Skib	if (unlikely(node == NULL))
234235783Skib		return NULL;
235235783Skib
236235783Skib	drm_mm_insert_helper_range(hole_node, node, size, alignment,
237235783Skib				   start, end);
238235783Skib
239235783Skib	return node;
240235783Skib}
241235783Skib
242235783Skibint drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
243235783Skib				unsigned long size, unsigned alignment,
244235783Skib				unsigned long start, unsigned long end)
245235783Skib{
246235783Skib	struct drm_mm_node *hole_node;
247235783Skib
248235783Skib	hole_node = drm_mm_search_free_in_range(mm, size, alignment,
249235783Skib						start, end, 0);
250235783Skib	if (!hole_node)
251235783Skib		return -ENOSPC;
252235783Skib
253235783Skib	drm_mm_insert_helper_range(hole_node, node, size, alignment,
254235783Skib				   start, end);
255235783Skib
256235783Skib	return 0;
257235783Skib}
258235783Skib
259235783Skibvoid drm_mm_remove_node(struct drm_mm_node *node)
260235783Skib{
261235783Skib	struct drm_mm *mm = node->mm;
262235783Skib	struct drm_mm_node *prev_node;
263235783Skib
264235783Skib	KASSERT(!node->scanned_block && !node->scanned_prev_free
265235783Skib	    && !node->scanned_next_free, ("node"));
266235783Skib
267235783Skib	prev_node =
268235783Skib	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
269235783Skib
270235783Skib	if (node->hole_follows) {
271235783Skib		KASSERT(drm_mm_hole_node_start(node)
272235783Skib			!= drm_mm_hole_node_end(node), ("hole_follows"));
273235783Skib		list_del(&node->hole_stack);
274235783Skib	} else
275235783Skib		KASSERT(drm_mm_hole_node_start(node)
276235783Skib		       == drm_mm_hole_node_end(node), ("!hole_follows"));
277235783Skib
278235783Skib	if (!prev_node->hole_follows) {
279235783Skib		prev_node->hole_follows = 1;
280235783Skib		list_add(&prev_node->hole_stack, &mm->hole_stack);
281235783Skib	} else
282235783Skib		list_move(&prev_node->hole_stack, &mm->hole_stack);
283235783Skib
284235783Skib	list_del(&node->node_list);
285235783Skib	node->allocated = 0;
286235783Skib}
287235783Skib
288235783Skib/*
289235783Skib * Put a block. Merge with the previous and / or next block if they are free.
290235783Skib * Otherwise add to the free stack.
291235783Skib */
292235783Skib
293235783Skibvoid drm_mm_put_block(struct drm_mm_node *node)
294235783Skib{
295235783Skib	struct drm_mm *mm = node->mm;
296235783Skib
297235783Skib	drm_mm_remove_node(node);
298235783Skib
299235783Skib	mtx_lock(&mm->unused_lock);
300235783Skib	if (mm->num_unused < MM_UNUSED_TARGET) {
301235783Skib		list_add(&node->node_list, &mm->unused_nodes);
302235783Skib		++mm->num_unused;
303235783Skib	} else
304235783Skib		free(node, DRM_MEM_MM);
305235783Skib	mtx_unlock(&mm->unused_lock);
306235783Skib}
307235783Skib
308235783Skibstatic int check_free_hole(unsigned long start, unsigned long end,
309235783Skib			   unsigned long size, unsigned alignment)
310235783Skib{
311235783Skib	unsigned wasted = 0;
312235783Skib
313235783Skib	if (end - start < size)
314235783Skib		return 0;
315235783Skib
316235783Skib	if (alignment) {
317235783Skib		unsigned tmp = start % alignment;
318235783Skib		if (tmp)
319235783Skib			wasted = alignment - tmp;
320235783Skib	}
321235783Skib
322235783Skib	if (end >= start + size + wasted) {
323235783Skib		return 1;
324235783Skib	}
325235783Skib
326235783Skib	return 0;
327235783Skib}
328235783Skib
329235783Skib
330235783Skibstruct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
331235783Skib				       unsigned long size,
332235783Skib				       unsigned alignment, int best_match)
333235783Skib{
334235783Skib	struct drm_mm_node *entry;
335235783Skib	struct drm_mm_node *best;
336235783Skib	unsigned long best_size;
337235783Skib
338235783Skib	best = NULL;
339235783Skib	best_size = ~0UL;
340235783Skib
341235783Skib	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
342235783Skib		KASSERT(entry->hole_follows, ("hole_follows"));
343235783Skib		if (!check_free_hole(drm_mm_hole_node_start(entry),
344235783Skib				     drm_mm_hole_node_end(entry),
345235783Skib				     size, alignment))
346235783Skib			continue;
347235783Skib
348235783Skib		if (!best_match)
349235783Skib			return entry;
350235783Skib
351235783Skib		if (entry->size < best_size) {
352235783Skib			best = entry;
353235783Skib			best_size = entry->size;
354235783Skib		}
355235783Skib	}
356235783Skib
357235783Skib	return best;
358235783Skib}
359235783Skib
360235783Skibstruct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
361235783Skib						unsigned long size,
362235783Skib						unsigned alignment,
363235783Skib						unsigned long start,
364235783Skib						unsigned long end,
365235783Skib						int best_match)
366235783Skib{
367235783Skib	struct drm_mm_node *entry;
368235783Skib	struct drm_mm_node *best;
369235783Skib	unsigned long best_size;
370235783Skib
371235783Skib	KASSERT(!mm->scanned_blocks, ("scanned"));
372235783Skib
373235783Skib	best = NULL;
374235783Skib	best_size = ~0UL;
375235783Skib
376235783Skib	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
377235783Skib		unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
378235783Skib			start : drm_mm_hole_node_start(entry);
379235783Skib		unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
380235783Skib			end : drm_mm_hole_node_end(entry);
381235783Skib
382235783Skib		KASSERT(entry->hole_follows, ("hole_follows"));
383235783Skib		if (!check_free_hole(adj_start, adj_end, size, alignment))
384235783Skib			continue;
385235783Skib
386235783Skib		if (!best_match)
387235783Skib			return entry;
388235783Skib
389235783Skib		if (entry->size < best_size) {
390235783Skib			best = entry;
391235783Skib			best_size = entry->size;
392235783Skib		}
393235783Skib	}
394235783Skib
395235783Skib	return best;
396235783Skib}
397235783Skib
398235783Skibvoid drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
399235783Skib{
400235783Skib	list_replace(&old->node_list, &new->node_list);
401235783Skib	list_replace(&old->hole_stack, &new->hole_stack);
402235783Skib	new->hole_follows = old->hole_follows;
403235783Skib	new->mm = old->mm;
404235783Skib	new->start = old->start;
405235783Skib	new->size = old->size;
406235783Skib
407235783Skib	old->allocated = 0;
408235783Skib	new->allocated = 1;
409235783Skib}
410235783Skib
411235783Skibvoid drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
412235783Skib		      unsigned alignment)
413235783Skib{
414235783Skib	mm->scan_alignment = alignment;
415235783Skib	mm->scan_size = size;
416235783Skib	mm->scanned_blocks = 0;
417235783Skib	mm->scan_hit_start = 0;
418235783Skib	mm->scan_hit_size = 0;
419235783Skib	mm->scan_check_range = 0;
420235783Skib	mm->prev_scanned_node = NULL;
421235783Skib}
422235783Skib
423235783Skibvoid drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
424235783Skib				 unsigned alignment,
425235783Skib				 unsigned long start,
426235783Skib				 unsigned long end)
427235783Skib{
428235783Skib	mm->scan_alignment = alignment;
429235783Skib	mm->scan_size = size;
430235783Skib	mm->scanned_blocks = 0;
431235783Skib	mm->scan_hit_start = 0;
432235783Skib	mm->scan_hit_size = 0;
433235783Skib	mm->scan_start = start;
434235783Skib	mm->scan_end = end;
435235783Skib	mm->scan_check_range = 1;
436235783Skib	mm->prev_scanned_node = NULL;
437235783Skib}
438235783Skib
439235783Skibint drm_mm_scan_add_block(struct drm_mm_node *node)
440235783Skib{
441235783Skib	struct drm_mm *mm = node->mm;
442235783Skib	struct drm_mm_node *prev_node;
443235783Skib	unsigned long hole_start, hole_end;
444235783Skib	unsigned long adj_start;
445235783Skib	unsigned long adj_end;
446235783Skib
447235783Skib	mm->scanned_blocks++;
448235783Skib
449235783Skib	KASSERT(!node->scanned_block, ("node->scanned_block"));
450235783Skib	node->scanned_block = 1;
451235783Skib
452235783Skib	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
453235783Skib			       node_list);
454235783Skib
455235783Skib	node->scanned_preceeds_hole = prev_node->hole_follows;
456235783Skib	prev_node->hole_follows = 1;
457235783Skib	list_del(&node->node_list);
458235783Skib	node->node_list.prev = &prev_node->node_list;
459235783Skib	node->node_list.next = &mm->prev_scanned_node->node_list;
460235783Skib	mm->prev_scanned_node = node;
461235783Skib
462235783Skib	hole_start = drm_mm_hole_node_start(prev_node);
463235783Skib	hole_end = drm_mm_hole_node_end(prev_node);
464235783Skib	if (mm->scan_check_range) {
465235783Skib		adj_start = hole_start < mm->scan_start ?
466235783Skib			mm->scan_start : hole_start;
467235783Skib		adj_end = hole_end > mm->scan_end ?
468235783Skib			mm->scan_end : hole_end;
469235783Skib	} else {
470235783Skib		adj_start = hole_start;
471235783Skib		adj_end = hole_end;
472235783Skib	}
473235783Skib
474235783Skib	if (check_free_hole(adj_start , adj_end,
475235783Skib			    mm->scan_size, mm->scan_alignment)) {
476235783Skib		mm->scan_hit_start = hole_start;
477235783Skib		mm->scan_hit_size = hole_end;
478235783Skib
479235783Skib		return 1;
480235783Skib	}
481235783Skib
482235783Skib	return 0;
483235783Skib}
484235783Skib
485235783Skibint drm_mm_scan_remove_block(struct drm_mm_node *node)
486235783Skib{
487235783Skib	struct drm_mm *mm = node->mm;
488235783Skib	struct drm_mm_node *prev_node;
489235783Skib
490235783Skib	mm->scanned_blocks--;
491235783Skib
492235783Skib	KASSERT(node->scanned_block, ("scanned_block"));
493235783Skib	node->scanned_block = 0;
494235783Skib
495235783Skib	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
496235783Skib			       node_list);
497235783Skib
498235783Skib	prev_node->hole_follows = node->scanned_preceeds_hole;
499235783Skib	INIT_LIST_HEAD(&node->node_list);
500235783Skib	list_add(&node->node_list, &prev_node->node_list);
501235783Skib
502235783Skib	/* Only need to check for containement because start&size for the
503235783Skib	 * complete resulting free block (not just the desired part) is
504235783Skib	 * stored. */
505235783Skib	if (node->start >= mm->scan_hit_start &&
506235783Skib	    node->start + node->size
507235783Skib	    		<= mm->scan_hit_start + mm->scan_hit_size) {
508235783Skib		return 1;
509235783Skib	}
510235783Skib
511235783Skib	return 0;
512235783Skib}
513235783Skib
514235783Skibint drm_mm_clean(struct drm_mm * mm)
515235783Skib{
516235783Skib	struct list_head *head = &mm->head_node.node_list;
517235783Skib
518235783Skib	return (head->next->next == head);
519235783Skib}
520235783Skib
521235783Skibint drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
522235783Skib{
523235783Skib	INIT_LIST_HEAD(&mm->hole_stack);
524235783Skib	INIT_LIST_HEAD(&mm->unused_nodes);
525235783Skib	mm->num_unused = 0;
526235783Skib	mm->scanned_blocks = 0;
527235783Skib	mtx_init(&mm->unused_lock, "drm_unused", NULL, MTX_DEF);
528235783Skib
529235783Skib	INIT_LIST_HEAD(&mm->head_node.node_list);
530235783Skib	INIT_LIST_HEAD(&mm->head_node.hole_stack);
531235783Skib	mm->head_node.hole_follows = 1;
532235783Skib	mm->head_node.scanned_block = 0;
533235783Skib	mm->head_node.scanned_prev_free = 0;
534235783Skib	mm->head_node.scanned_next_free = 0;
535235783Skib	mm->head_node.mm = mm;
536235783Skib	mm->head_node.start = start + size;
537235783Skib	mm->head_node.size = start - mm->head_node.start;
538235783Skib	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
539235783Skib
540235783Skib	return 0;
541235783Skib}
542235783Skib
543235783Skibvoid drm_mm_takedown(struct drm_mm * mm)
544235783Skib{
545235783Skib	struct drm_mm_node *entry, *next;
546235783Skib
547235783Skib	if (!list_empty(&mm->head_node.node_list)) {
548235783Skib		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
549235783Skib		return;
550235783Skib	}
551235783Skib
552235783Skib	mtx_lock(&mm->unused_lock);
553235783Skib	list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
554235783Skib		list_del(&entry->node_list);
555235783Skib		free(entry, DRM_MEM_MM);
556235783Skib		--mm->num_unused;
557235783Skib	}
558235783Skib	mtx_unlock(&mm->unused_lock);
559235783Skib
560235783Skib	mtx_destroy(&mm->unused_lock);
561235783Skib
562235783Skib	KASSERT(mm->num_unused == 0, ("num_unused != 0"));
563235783Skib}
564247833Skib
565247833Skibvoid drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
566247833Skib{
567247833Skib	struct drm_mm_node *entry;
568247833Skib	unsigned long total_used = 0, total_free = 0, total = 0;
569247833Skib	unsigned long hole_start, hole_end, hole_size;
570247833Skib
571247833Skib	hole_start = drm_mm_hole_node_start(&mm->head_node);
572247833Skib	hole_end = drm_mm_hole_node_end(&mm->head_node);
573247833Skib	hole_size = hole_end - hole_start;
574247833Skib	if (hole_size)
575247833Skib		printf("%s 0x%08lx-0x%08lx: %8lu: free\n",
576247833Skib			prefix, hole_start, hole_end,
577247833Skib			hole_size);
578247833Skib	total_free += hole_size;
579247833Skib
580247833Skib	drm_mm_for_each_node(entry, mm) {
581247833Skib		printf("%s 0x%08lx-0x%08lx: %8lu: used\n",
582247833Skib			prefix, entry->start, entry->start + entry->size,
583247833Skib			entry->size);
584247833Skib		total_used += entry->size;
585247833Skib
586247833Skib		if (entry->hole_follows) {
587247833Skib			hole_start = drm_mm_hole_node_start(entry);
588247833Skib			hole_end = drm_mm_hole_node_end(entry);
589247833Skib			hole_size = hole_end - hole_start;
590247833Skib			printf("%s 0x%08lx-0x%08lx: %8lu: free\n",
591247833Skib				prefix, hole_start, hole_end,
592247833Skib				hole_size);
593247833Skib			total_free += hole_size;
594247833Skib		}
595247833Skib	}
596247833Skib	total = total_free + total_used;
597247833Skib
598247833Skib	printf("%s total: %lu, used %lu free %lu\n", prefix, total,
599247833Skib		total_used, total_free);
600247833Skib}
601