1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
5 * Copyright (c) 2013 EMC Corp.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * From:
32 *	$NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $
33 *	$NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $
34 */
35
36/*
37 * reference:
38 * -	Magazines and Vmem: Extending the Slab Allocator
39 *	to Many CPUs and Arbitrary Resources
40 *	http://www.usenix.org/event/usenix01/bonwick.html
41 */
42
43#include <sys/cdefs.h>
44#include "opt_ddb.h"
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/kernel.h>
49#include <sys/queue.h>
50#include <sys/callout.h>
51#include <sys/hash.h>
52#include <sys/lock.h>
53#include <sys/malloc.h>
54#include <sys/mutex.h>
55#include <sys/smp.h>
56#include <sys/condvar.h>
57#include <sys/sysctl.h>
58#include <sys/taskqueue.h>
59#include <sys/vmem.h>
60#include <sys/vmmeter.h>
61
62#include "opt_vm.h"
63
64#include <vm/uma.h>
65#include <vm/vm.h>
66#include <vm/pmap.h>
67#include <vm/vm_map.h>
68#include <vm/vm_object.h>
69#include <vm/vm_kern.h>
70#include <vm/vm_extern.h>
71#include <vm/vm_param.h>
72#include <vm/vm_page.h>
73#include <vm/vm_pageout.h>
74#include <vm/vm_phys.h>
75#include <vm/vm_pagequeue.h>
76#include <vm/uma_int.h>
77
78#define	VMEM_OPTORDER		5
79#define	VMEM_OPTVALUE		(1 << VMEM_OPTORDER)
80#define	VMEM_MAXORDER						\
81    (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER)
82
83#define	VMEM_HASHSIZE_MIN	16
84#define	VMEM_HASHSIZE_MAX	131072
85
86#define	VMEM_QCACHE_IDX_MAX	16
87
88#define	VMEM_FITMASK	(M_BESTFIT | M_FIRSTFIT | M_NEXTFIT)
89
90#define	VMEM_FLAGS	(M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM |	\
91    M_BESTFIT | M_FIRSTFIT | M_NEXTFIT)
92
93#define	BT_FLAGS	(M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM)
94
95#define	QC_NAME_MAX	16
96
97/*
98 * Data structures private to vmem.
99 */
100MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures");
101
102typedef struct vmem_btag bt_t;
103
104TAILQ_HEAD(vmem_seglist, vmem_btag);
105LIST_HEAD(vmem_freelist, vmem_btag);
106LIST_HEAD(vmem_hashlist, vmem_btag);
107
108struct qcache {
109	uma_zone_t	qc_cache;
110	vmem_t 		*qc_vmem;
111	vmem_size_t	qc_size;
112	char		qc_name[QC_NAME_MAX];
113};
114typedef struct qcache qcache_t;
115#define	QC_POOL_TO_QCACHE(pool)	((qcache_t *)(pool->pr_qcache))
116
117#define	VMEM_NAME_MAX	16
118
119/* boundary tag */
120struct vmem_btag {
121	TAILQ_ENTRY(vmem_btag) bt_seglist;
122	union {
123		LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
124		LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
125	} bt_u;
126#define	bt_hashlist	bt_u.u_hashlist
127#define	bt_freelist	bt_u.u_freelist
128	vmem_addr_t	bt_start;
129	vmem_size_t	bt_size;
130	int		bt_type;
131};
132
133/* vmem arena */
134struct vmem {
135	struct mtx_padalign	vm_lock;
136	struct cv		vm_cv;
137	char			vm_name[VMEM_NAME_MAX+1];
138	LIST_ENTRY(vmem)	vm_alllist;
139	struct vmem_hashlist	vm_hash0[VMEM_HASHSIZE_MIN];
140	struct vmem_freelist	vm_freelist[VMEM_MAXORDER];
141	struct vmem_seglist	vm_seglist;
142	struct vmem_hashlist	*vm_hashlist;
143	vmem_size_t		vm_hashsize;
144
145	/* Constant after init */
146	vmem_size_t		vm_qcache_max;
147	vmem_size_t		vm_quantum_mask;
148	vmem_size_t		vm_import_quantum;
149	int			vm_quantum_shift;
150
151	/* Written on alloc/free */
152	LIST_HEAD(, vmem_btag)	vm_freetags;
153	int			vm_nfreetags;
154	int			vm_nbusytag;
155	vmem_size_t		vm_inuse;
156	vmem_size_t		vm_size;
157	vmem_size_t		vm_limit;
158	struct vmem_btag	vm_cursor;
159
160	/* Used on import. */
161	vmem_import_t		*vm_importfn;
162	vmem_release_t		*vm_releasefn;
163	void			*vm_arg;
164
165	/* Space exhaustion callback. */
166	vmem_reclaim_t		*vm_reclaimfn;
167
168	/* quantum cache */
169	qcache_t		vm_qcache[VMEM_QCACHE_IDX_MAX];
170};
171
172#define	BT_TYPE_SPAN		1	/* Allocated from importfn */
173#define	BT_TYPE_SPAN_STATIC	2	/* vmem_add() or create. */
174#define	BT_TYPE_FREE		3	/* Available space. */
175#define	BT_TYPE_BUSY		4	/* Used space. */
176#define	BT_TYPE_CURSOR		5	/* Cursor for nextfit allocations. */
177#define	BT_ISSPAN_P(bt)	((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
178
179#define	BT_END(bt)	((bt)->bt_start + (bt)->bt_size - 1)
180
181#if defined(DIAGNOSTIC)
182static int enable_vmem_check = 0;
183SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN,
184    &enable_vmem_check, 0, "Enable vmem check");
185static void vmem_check(vmem_t *);
186#endif
187
188static struct callout	vmem_periodic_ch;
189static int		vmem_periodic_interval;
190static struct task	vmem_periodic_wk;
191
192static struct mtx_padalign __exclusive_cache_line vmem_list_lock;
193static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
194static uma_zone_t vmem_zone;
195
196/* ---- misc */
197#define	VMEM_CONDVAR_INIT(vm, wchan)	cv_init(&vm->vm_cv, wchan)
198#define	VMEM_CONDVAR_DESTROY(vm)	cv_destroy(&vm->vm_cv)
199#define	VMEM_CONDVAR_WAIT(vm)		cv_wait(&vm->vm_cv, &vm->vm_lock)
200#define	VMEM_CONDVAR_BROADCAST(vm)	cv_broadcast(&vm->vm_cv)
201
202#define	VMEM_LOCK(vm)		mtx_lock(&vm->vm_lock)
203#define	VMEM_TRYLOCK(vm)	mtx_trylock(&vm->vm_lock)
204#define	VMEM_UNLOCK(vm)		mtx_unlock(&vm->vm_lock)
205#define	VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF)
206#define	VMEM_LOCK_DESTROY(vm)	mtx_destroy(&vm->vm_lock)
207#define	VMEM_ASSERT_LOCKED(vm)	mtx_assert(&vm->vm_lock, MA_OWNED);
208
209#define	VMEM_ALIGNUP(addr, align)	(-(-(addr) & -(align)))
210
211#define	VMEM_CROSS_P(addr1, addr2, boundary) \
212	((((addr1) ^ (addr2)) & -(boundary)) != 0)
213
214#define	ORDER2SIZE(order)	((order) < VMEM_OPTVALUE ? ((order) + 1) : \
215    (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1)))
216#define	SIZE2ORDER(size)	((size) <= VMEM_OPTVALUE ? ((size) - 1) : \
217    (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2)))
218
219/*
220 * Maximum number of boundary tags that may be required to satisfy an
221 * allocation.  Two may be required to import.  Another two may be
222 * required to clip edges.
223 */
224#define	BT_MAXALLOC	4
225
226/*
227 * Max free limits the number of locally cached boundary tags.  We
228 * just want to avoid hitting the zone allocator for every call.
229 */
230#define BT_MAXFREE	(BT_MAXALLOC * 8)
231
232/* Allocator for boundary tags. */
233static uma_zone_t vmem_bt_zone;
234
235/* boot time arena storage. */
236static struct vmem kernel_arena_storage;
237static struct vmem buffer_arena_storage;
238static struct vmem transient_arena_storage;
239/* kernel and kmem arenas are aliased for backwards KPI compat. */
240vmem_t *kernel_arena = &kernel_arena_storage;
241vmem_t *kmem_arena = &kernel_arena_storage;
242vmem_t *buffer_arena = &buffer_arena_storage;
243vmem_t *transient_arena = &transient_arena_storage;
244
245#ifdef DEBUG_MEMGUARD
246static struct vmem memguard_arena_storage;
247vmem_t *memguard_arena = &memguard_arena_storage;
248#endif
249
250static bool
251bt_isbusy(bt_t *bt)
252{
253	return (bt->bt_type == BT_TYPE_BUSY);
254}
255
256static bool
257bt_isfree(bt_t *bt)
258{
259	return (bt->bt_type == BT_TYPE_FREE);
260}
261
262/*
263 * Fill the vmem's boundary tag cache.  We guarantee that boundary tag
264 * allocation will not fail once bt_fill() passes.  To do so we cache
265 * at least the maximum possible tag allocations in the arena.
266 */
267static __noinline int
268_bt_fill(vmem_t *vm, int flags)
269{
270	bt_t *bt;
271
272	VMEM_ASSERT_LOCKED(vm);
273
274	/*
275	 * Only allow the kernel arena and arenas derived from kernel arena to
276	 * dip into reserve tags.  They are where new tags come from.
277	 */
278	flags &= BT_FLAGS;
279	if (vm != kernel_arena && vm->vm_arg != kernel_arena)
280		flags &= ~M_USE_RESERVE;
281
282	/*
283	 * Loop until we meet the reserve.  To minimize the lock shuffle
284	 * and prevent simultaneous fills we first try a NOWAIT regardless
285	 * of the caller's flags.  Specify M_NOVM so we don't recurse while
286	 * holding a vmem lock.
287	 */
288	while (vm->vm_nfreetags < BT_MAXALLOC) {
289		bt = uma_zalloc(vmem_bt_zone,
290		    (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM);
291		if (bt == NULL) {
292			VMEM_UNLOCK(vm);
293			bt = uma_zalloc(vmem_bt_zone, flags);
294			VMEM_LOCK(vm);
295			if (bt == NULL)
296				break;
297		}
298		LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
299		vm->vm_nfreetags++;
300	}
301
302	if (vm->vm_nfreetags < BT_MAXALLOC)
303		return ENOMEM;
304
305	return 0;
306}
307
308static inline int
309bt_fill(vmem_t *vm, int flags)
310{
311	if (vm->vm_nfreetags >= BT_MAXALLOC)
312		return (0);
313	return (_bt_fill(vm, flags));
314}
315
316/*
317 * Pop a tag off of the freetag stack.
318 */
319static bt_t *
320bt_alloc(vmem_t *vm)
321{
322	bt_t *bt;
323
324	VMEM_ASSERT_LOCKED(vm);
325	bt = LIST_FIRST(&vm->vm_freetags);
326	MPASS(bt != NULL);
327	LIST_REMOVE(bt, bt_freelist);
328	vm->vm_nfreetags--;
329
330	return bt;
331}
332
333/*
334 * Trim the per-vmem free list.  Returns with the lock released to
335 * avoid allocator recursions.
336 */
337static void
338bt_freetrim(vmem_t *vm, int freelimit)
339{
340	LIST_HEAD(, vmem_btag) freetags;
341	bt_t *bt;
342
343	LIST_INIT(&freetags);
344	VMEM_ASSERT_LOCKED(vm);
345	while (vm->vm_nfreetags > freelimit) {
346		bt = LIST_FIRST(&vm->vm_freetags);
347		LIST_REMOVE(bt, bt_freelist);
348		vm->vm_nfreetags--;
349		LIST_INSERT_HEAD(&freetags, bt, bt_freelist);
350	}
351	VMEM_UNLOCK(vm);
352	while ((bt = LIST_FIRST(&freetags)) != NULL) {
353		LIST_REMOVE(bt, bt_freelist);
354		uma_zfree(vmem_bt_zone, bt);
355	}
356}
357
358static inline void
359bt_free(vmem_t *vm, bt_t *bt)
360{
361
362	VMEM_ASSERT_LOCKED(vm);
363	MPASS(LIST_FIRST(&vm->vm_freetags) != bt);
364	LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
365	vm->vm_nfreetags++;
366}
367
368/*
369 * Hide MAXALLOC tags before dropping the arena lock to ensure that a
370 * concurrent allocation attempt does not grab them.
371 */
372static void
373bt_save(vmem_t *vm)
374{
375	KASSERT(vm->vm_nfreetags >= BT_MAXALLOC,
376	    ("%s: insufficient free tags %d", __func__, vm->vm_nfreetags));
377	vm->vm_nfreetags -= BT_MAXALLOC;
378}
379
380static void
381bt_restore(vmem_t *vm)
382{
383	vm->vm_nfreetags += BT_MAXALLOC;
384}
385
386/*
387 * freelist[0] ... [1, 1]
388 * freelist[1] ... [2, 2]
389 *  :
390 * freelist[29] ... [30, 30]
391 * freelist[30] ... [31, 31]
392 * freelist[31] ... [32, 63]
393 * freelist[33] ... [64, 127]
394 *  :
395 * freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1]
396 *  :
397 */
398
399static struct vmem_freelist *
400bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
401{
402	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
403	const int idx = SIZE2ORDER(qsize);
404
405	MPASS(size != 0 && qsize != 0);
406	MPASS((size & vm->vm_quantum_mask) == 0);
407	MPASS(idx >= 0);
408	MPASS(idx < VMEM_MAXORDER);
409
410	return &vm->vm_freelist[idx];
411}
412
413/*
414 * bt_freehead_toalloc: return the freelist for the given size and allocation
415 * strategy.
416 *
417 * For M_FIRSTFIT, return the list in which any blocks are large enough
418 * for the requested size.  otherwise, return the list which can have blocks
419 * large enough for the requested size.
420 */
421static struct vmem_freelist *
422bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat)
423{
424	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
425	int idx = SIZE2ORDER(qsize);
426
427	MPASS(size != 0 && qsize != 0);
428	MPASS((size & vm->vm_quantum_mask) == 0);
429
430	if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) {
431		idx++;
432		/* check too large request? */
433	}
434	MPASS(idx >= 0);
435	MPASS(idx < VMEM_MAXORDER);
436
437	return &vm->vm_freelist[idx];
438}
439
440/* ---- boundary tag hash */
441
442static struct vmem_hashlist *
443bt_hashhead(vmem_t *vm, vmem_addr_t addr)
444{
445	struct vmem_hashlist *list;
446	unsigned int hash;
447
448	hash = hash32_buf(&addr, sizeof(addr), 0);
449	list = &vm->vm_hashlist[hash % vm->vm_hashsize];
450
451	return list;
452}
453
454static bt_t *
455bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
456{
457	struct vmem_hashlist *list;
458	bt_t *bt;
459
460	VMEM_ASSERT_LOCKED(vm);
461	list = bt_hashhead(vm, addr);
462	LIST_FOREACH(bt, list, bt_hashlist) {
463		if (bt->bt_start == addr) {
464			break;
465		}
466	}
467
468	return bt;
469}
470
471static void
472bt_rembusy(vmem_t *vm, bt_t *bt)
473{
474
475	VMEM_ASSERT_LOCKED(vm);
476	MPASS(vm->vm_nbusytag > 0);
477	vm->vm_inuse -= bt->bt_size;
478	vm->vm_nbusytag--;
479	LIST_REMOVE(bt, bt_hashlist);
480}
481
482static void
483bt_insbusy(vmem_t *vm, bt_t *bt)
484{
485	struct vmem_hashlist *list;
486
487	VMEM_ASSERT_LOCKED(vm);
488	MPASS(bt->bt_type == BT_TYPE_BUSY);
489
490	list = bt_hashhead(vm, bt->bt_start);
491	LIST_INSERT_HEAD(list, bt, bt_hashlist);
492	vm->vm_nbusytag++;
493	vm->vm_inuse += bt->bt_size;
494}
495
496/* ---- boundary tag list */
497
498static void
499bt_remseg(vmem_t *vm, bt_t *bt)
500{
501
502	MPASS(bt->bt_type != BT_TYPE_CURSOR);
503	TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
504	bt_free(vm, bt);
505}
506
507static void
508bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
509{
510
511	TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
512}
513
514static void
515bt_insseg_tail(vmem_t *vm, bt_t *bt)
516{
517
518	TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
519}
520
521static void
522bt_remfree(vmem_t *vm __unused, bt_t *bt)
523{
524
525	MPASS(bt->bt_type == BT_TYPE_FREE);
526
527	LIST_REMOVE(bt, bt_freelist);
528}
529
530static void
531bt_insfree(vmem_t *vm, bt_t *bt)
532{
533	struct vmem_freelist *list;
534
535	list = bt_freehead_tofree(vm, bt->bt_size);
536	LIST_INSERT_HEAD(list, bt, bt_freelist);
537}
538
539/* ---- vmem internal functions */
540
541/*
542 * Import from the arena into the quantum cache in UMA.
543 *
544 * We use VMEM_ADDR_QCACHE_MIN instead of 0: uma_zalloc() returns 0 to indicate
545 * failure, so UMA can't be used to cache a resource with value 0.
546 */
547static int
548qc_import(void *arg, void **store, int cnt, int domain, int flags)
549{
550	qcache_t *qc;
551	vmem_addr_t addr;
552	int i;
553
554	KASSERT((flags & M_WAITOK) == 0, ("blocking allocation"));
555
556	qc = arg;
557	for (i = 0; i < cnt; i++) {
558		if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0,
559		    VMEM_ADDR_QCACHE_MIN, VMEM_ADDR_MAX, flags, &addr) != 0)
560			break;
561		store[i] = (void *)addr;
562	}
563	return (i);
564}
565
566/*
567 * Release memory from the UMA cache to the arena.
568 */
569static void
570qc_release(void *arg, void **store, int cnt)
571{
572	qcache_t *qc;
573	int i;
574
575	qc = arg;
576	for (i = 0; i < cnt; i++)
577		vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size);
578}
579
580static void
581qc_init(vmem_t *vm, vmem_size_t qcache_max)
582{
583	qcache_t *qc;
584	vmem_size_t size;
585	int qcache_idx_max;
586	int i;
587
588	MPASS((qcache_max & vm->vm_quantum_mask) == 0);
589	qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift,
590	    VMEM_QCACHE_IDX_MAX);
591	vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift;
592	for (i = 0; i < qcache_idx_max; i++) {
593		qc = &vm->vm_qcache[i];
594		size = (i + 1) << vm->vm_quantum_shift;
595		snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
596		    vm->vm_name, size);
597		qc->qc_vmem = vm;
598		qc->qc_size = size;
599		qc->qc_cache = uma_zcache_create(qc->qc_name, size,
600		    NULL, NULL, NULL, NULL, qc_import, qc_release, qc, 0);
601		MPASS(qc->qc_cache);
602	}
603}
604
605static void
606qc_destroy(vmem_t *vm)
607{
608	int qcache_idx_max;
609	int i;
610
611	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
612	for (i = 0; i < qcache_idx_max; i++)
613		uma_zdestroy(vm->vm_qcache[i].qc_cache);
614}
615
616static void
617qc_drain(vmem_t *vm)
618{
619	int qcache_idx_max;
620	int i;
621
622	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
623	for (i = 0; i < qcache_idx_max; i++)
624		uma_zone_reclaim(vm->vm_qcache[i].qc_cache, UMA_RECLAIM_DRAIN);
625}
626
627#ifndef UMA_USE_DMAP
628
629static struct mtx_padalign __exclusive_cache_line vmem_bt_lock;
630
631/*
632 * vmem_bt_alloc:  Allocate a new page of boundary tags.
633 *
634 * On architectures with UMA_USE_DMAP there is no recursion; no address
635 * space need be allocated to allocate boundary tags.  For the others, we
636 * must handle recursion.  Boundary tags are necessary to allocate new
637 * boundary tags.
638 *
639 * UMA guarantees that enough tags are held in reserve to allocate a new
640 * page of kva.  We dip into this reserve by specifying M_USE_RESERVE only
641 * when allocating the page to hold new boundary tags.  In this way the
642 * reserve is automatically filled by the allocation that uses the reserve.
643 *
644 * We still have to guarantee that the new tags are allocated atomically since
645 * many threads may try concurrently.  The bt_lock provides this guarantee.
646 * We convert WAITOK allocations to NOWAIT and then handle the blocking here
647 * on failure.  It's ok to return NULL for a WAITOK allocation as UMA will
648 * loop again after checking to see if we lost the race to allocate.
649 *
650 * There is a small race between vmem_bt_alloc() returning the page and the
651 * zone lock being acquired to add the page to the zone.  For WAITOK
652 * allocations we just pause briefly.  NOWAIT may experience a transient
653 * failure.  To alleviate this we permit a small number of simultaneous
654 * fills to proceed concurrently so NOWAIT is less likely to fail unless
655 * we are really out of KVA.
656 */
657static void *
658vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
659    int wait)
660{
661	vmem_addr_t addr;
662
663	*pflag = UMA_SLAB_KERNEL;
664
665	/*
666	 * Single thread boundary tag allocation so that the address space
667	 * and memory are added in one atomic operation.
668	 */
669	mtx_lock(&vmem_bt_lock);
670	if (vmem_xalloc(vm_dom[domain].vmd_kernel_arena, bytes, 0, 0, 0,
671	    VMEM_ADDR_MIN, VMEM_ADDR_MAX,
672	    M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) {
673		if (kmem_back_domain(domain, kernel_object, addr, bytes,
674		    M_NOWAIT | M_USE_RESERVE) == 0) {
675			mtx_unlock(&vmem_bt_lock);
676			return ((void *)addr);
677		}
678		vmem_xfree(vm_dom[domain].vmd_kernel_arena, addr, bytes);
679		mtx_unlock(&vmem_bt_lock);
680		/*
681		 * Out of memory, not address space.  This may not even be
682		 * possible due to M_USE_RESERVE page allocation.
683		 */
684		if (wait & M_WAITOK)
685			vm_wait_domain(domain);
686		return (NULL);
687	}
688	mtx_unlock(&vmem_bt_lock);
689	/*
690	 * We're either out of address space or lost a fill race.
691	 */
692	if (wait & M_WAITOK)
693		pause("btalloc", 1);
694
695	return (NULL);
696}
697#endif
698
699void
700vmem_startup(void)
701{
702
703	mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF);
704	vmem_zone = uma_zcreate("vmem",
705	    sizeof(struct vmem), NULL, NULL, NULL, NULL,
706	    UMA_ALIGN_PTR, 0);
707	vmem_bt_zone = uma_zcreate("vmem btag",
708	    sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
709	    UMA_ALIGN_PTR, UMA_ZONE_VM);
710#ifndef UMA_USE_DMAP
711	mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
712	uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
713	/*
714	 * Reserve enough tags to allocate new tags.  We allow multiple
715	 * CPUs to attempt to allocate new tags concurrently to limit
716	 * false restarts in UMA.  vmem_bt_alloc() allocates from a per-domain
717	 * arena, which may involve importing a range from the kernel arena,
718	 * so we need to keep at least 2 * BT_MAXALLOC tags reserved.
719	 */
720	uma_zone_reserve(vmem_bt_zone, 2 * BT_MAXALLOC * mp_ncpus);
721	uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc);
722#endif
723}
724
725/* ---- rehash */
726
727static int
728vmem_rehash(vmem_t *vm, vmem_size_t newhashsize)
729{
730	bt_t *bt;
731	struct vmem_hashlist *newhashlist;
732	struct vmem_hashlist *oldhashlist;
733	vmem_size_t i, oldhashsize;
734
735	MPASS(newhashsize > 0);
736
737	newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize,
738	    M_VMEM, M_NOWAIT);
739	if (newhashlist == NULL)
740		return ENOMEM;
741	for (i = 0; i < newhashsize; i++) {
742		LIST_INIT(&newhashlist[i]);
743	}
744
745	VMEM_LOCK(vm);
746	oldhashlist = vm->vm_hashlist;
747	oldhashsize = vm->vm_hashsize;
748	vm->vm_hashlist = newhashlist;
749	vm->vm_hashsize = newhashsize;
750	if (oldhashlist == NULL) {
751		VMEM_UNLOCK(vm);
752		return 0;
753	}
754	for (i = 0; i < oldhashsize; i++) {
755		while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
756			bt_rembusy(vm, bt);
757			bt_insbusy(vm, bt);
758		}
759	}
760	VMEM_UNLOCK(vm);
761
762	if (oldhashlist != vm->vm_hash0)
763		free(oldhashlist, M_VMEM);
764
765	return 0;
766}
767
768static void
769vmem_periodic_kick(void *dummy)
770{
771
772	taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk);
773}
774
775static void
776vmem_periodic(void *unused, int pending)
777{
778	vmem_t *vm;
779	vmem_size_t desired;
780	vmem_size_t current;
781
782	mtx_lock(&vmem_list_lock);
783	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
784#ifdef DIAGNOSTIC
785		/* Convenient time to verify vmem state. */
786		if (enable_vmem_check == 1) {
787			VMEM_LOCK(vm);
788			vmem_check(vm);
789			VMEM_UNLOCK(vm);
790		}
791#endif
792		desired = 1 << flsl(vm->vm_nbusytag);
793		desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN),
794		    VMEM_HASHSIZE_MAX);
795		current = vm->vm_hashsize;
796
797		/* Grow in powers of two.  Shrink less aggressively. */
798		if (desired >= current * 2 || desired * 4 <= current)
799			vmem_rehash(vm, desired);
800
801		/*
802		 * Periodically wake up threads waiting for resources,
803		 * so they could ask for reclamation again.
804		 */
805		VMEM_CONDVAR_BROADCAST(vm);
806	}
807	mtx_unlock(&vmem_list_lock);
808
809	callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
810	    vmem_periodic_kick, NULL);
811}
812
813static void
814vmem_start_callout(void *unused)
815{
816
817	TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL);
818	vmem_periodic_interval = hz * 10;
819	callout_init(&vmem_periodic_ch, 1);
820	callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
821	    vmem_periodic_kick, NULL);
822}
823SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL);
824
825static void
826vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
827{
828	bt_t *btfree, *btprev, *btspan;
829
830	VMEM_ASSERT_LOCKED(vm);
831	MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC);
832	MPASS((size & vm->vm_quantum_mask) == 0);
833
834	if (vm->vm_releasefn == NULL) {
835		/*
836		 * The new segment will never be released, so see if it is
837		 * contiguous with respect to an existing segment.  In this case
838		 * a span tag is not needed, and it may be possible now or in
839		 * the future to coalesce the new segment with an existing free
840		 * segment.
841		 */
842		btprev = TAILQ_LAST(&vm->vm_seglist, vmem_seglist);
843		if ((!bt_isbusy(btprev) && !bt_isfree(btprev)) ||
844		    btprev->bt_start + btprev->bt_size != addr)
845			btprev = NULL;
846	} else {
847		btprev = NULL;
848	}
849
850	if (btprev == NULL || bt_isbusy(btprev)) {
851		if (btprev == NULL) {
852			btspan = bt_alloc(vm);
853			btspan->bt_type = type;
854			btspan->bt_start = addr;
855			btspan->bt_size = size;
856			bt_insseg_tail(vm, btspan);
857		}
858
859		btfree = bt_alloc(vm);
860		btfree->bt_type = BT_TYPE_FREE;
861		btfree->bt_start = addr;
862		btfree->bt_size = size;
863		bt_insseg_tail(vm, btfree);
864		bt_insfree(vm, btfree);
865	} else {
866		bt_remfree(vm, btprev);
867		btprev->bt_size += size;
868		bt_insfree(vm, btprev);
869	}
870
871	vm->vm_size += size;
872}
873
874static void
875vmem_destroy1(vmem_t *vm)
876{
877	bt_t *bt;
878
879	/*
880	 * Drain per-cpu quantum caches.
881	 */
882	qc_destroy(vm);
883
884	/*
885	 * The vmem should now only contain empty segments.
886	 */
887	VMEM_LOCK(vm);
888	MPASS(vm->vm_nbusytag == 0);
889
890	TAILQ_REMOVE(&vm->vm_seglist, &vm->vm_cursor, bt_seglist);
891	while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL)
892		bt_remseg(vm, bt);
893
894	if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0)
895		free(vm->vm_hashlist, M_VMEM);
896
897	bt_freetrim(vm, 0);
898
899	VMEM_CONDVAR_DESTROY(vm);
900	VMEM_LOCK_DESTROY(vm);
901	uma_zfree(vmem_zone, vm);
902}
903
904static int
905vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags)
906{
907	vmem_addr_t addr;
908	int error;
909
910	if (vm->vm_importfn == NULL)
911		return (EINVAL);
912
913	/*
914	 * To make sure we get a span that meets the alignment we double it
915	 * and add the size to the tail.  This slightly overestimates.
916	 */
917	if (align != vm->vm_quantum_mask + 1)
918		size = (align * 2) + size;
919	size = roundup(size, vm->vm_import_quantum);
920
921	if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size)
922		return (ENOMEM);
923
924	bt_save(vm);
925	VMEM_UNLOCK(vm);
926	error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
927	VMEM_LOCK(vm);
928	bt_restore(vm);
929	if (error)
930		return (ENOMEM);
931
932	vmem_add1(vm, addr, size, BT_TYPE_SPAN);
933
934	return 0;
935}
936
937/*
938 * vmem_fit: check if a bt can satisfy the given restrictions.
939 *
940 * it's a caller's responsibility to ensure the region is big enough
941 * before calling us.
942 */
943static int
944vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
945    vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr,
946    vmem_addr_t maxaddr, vmem_addr_t *addrp)
947{
948	vmem_addr_t start;
949	vmem_addr_t end;
950
951	MPASS(size > 0);
952	MPASS(bt->bt_size >= size); /* caller's responsibility */
953
954	/*
955	 * XXX assumption: vmem_addr_t and vmem_size_t are
956	 * unsigned integer of the same size.
957	 */
958
959	start = bt->bt_start;
960	if (start < minaddr) {
961		start = minaddr;
962	}
963	end = BT_END(bt);
964	if (end > maxaddr)
965		end = maxaddr;
966	if (start > end)
967		return (ENOMEM);
968
969	start = VMEM_ALIGNUP(start - phase, align) + phase;
970	if (start < bt->bt_start)
971		start += align;
972	if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
973		MPASS(align < nocross);
974		start = VMEM_ALIGNUP(start - phase, nocross) + phase;
975	}
976	if (start <= end && end - start >= size - 1) {
977		MPASS((start & (align - 1)) == phase);
978		MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross));
979		MPASS(minaddr <= start);
980		MPASS(maxaddr == 0 || start + size - 1 <= maxaddr);
981		MPASS(bt->bt_start <= start);
982		MPASS(BT_END(bt) - start >= size - 1);
983		*addrp = start;
984
985		return (0);
986	}
987	return (ENOMEM);
988}
989
990/*
991 * vmem_clip:  Trim the boundary tag edges to the requested start and size.
992 */
993static void
994vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size)
995{
996	bt_t *btnew;
997	bt_t *btprev;
998
999	VMEM_ASSERT_LOCKED(vm);
1000	MPASS(bt->bt_type == BT_TYPE_FREE);
1001	MPASS(bt->bt_size >= size);
1002	bt_remfree(vm, bt);
1003	if (bt->bt_start != start) {
1004		btprev = bt_alloc(vm);
1005		btprev->bt_type = BT_TYPE_FREE;
1006		btprev->bt_start = bt->bt_start;
1007		btprev->bt_size = start - bt->bt_start;
1008		bt->bt_start = start;
1009		bt->bt_size -= btprev->bt_size;
1010		bt_insfree(vm, btprev);
1011		bt_insseg(vm, btprev,
1012		    TAILQ_PREV(bt, vmem_seglist, bt_seglist));
1013	}
1014	MPASS(bt->bt_start == start);
1015	if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
1016		/* split */
1017		btnew = bt_alloc(vm);
1018		btnew->bt_type = BT_TYPE_BUSY;
1019		btnew->bt_start = bt->bt_start;
1020		btnew->bt_size = size;
1021		bt->bt_start = bt->bt_start + size;
1022		bt->bt_size -= size;
1023		bt_insfree(vm, bt);
1024		bt_insseg(vm, btnew,
1025		    TAILQ_PREV(bt, vmem_seglist, bt_seglist));
1026		bt_insbusy(vm, btnew);
1027		bt = btnew;
1028	} else {
1029		bt->bt_type = BT_TYPE_BUSY;
1030		bt_insbusy(vm, bt);
1031	}
1032	MPASS(bt->bt_size >= size);
1033}
1034
1035static int
1036vmem_try_fetch(vmem_t *vm, const vmem_size_t size, vmem_size_t align, int flags)
1037{
1038	vmem_size_t avail;
1039
1040	VMEM_ASSERT_LOCKED(vm);
1041
1042	/*
1043	 * XXX it is possible to fail to meet xalloc constraints with the
1044	 * imported region.  It is up to the user to specify the
1045	 * import quantum such that it can satisfy any allocation.
1046	 */
1047	if (vmem_import(vm, size, align, flags) == 0)
1048		return (1);
1049
1050	/*
1051	 * Try to free some space from the quantum cache or reclaim
1052	 * functions if available.
1053	 */
1054	if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) {
1055		avail = vm->vm_size - vm->vm_inuse;
1056		bt_save(vm);
1057		VMEM_UNLOCK(vm);
1058		if (vm->vm_qcache_max != 0)
1059			qc_drain(vm);
1060		if (vm->vm_reclaimfn != NULL)
1061			vm->vm_reclaimfn(vm, flags);
1062		VMEM_LOCK(vm);
1063		bt_restore(vm);
1064		/* If we were successful retry even NOWAIT. */
1065		if (vm->vm_size - vm->vm_inuse > avail)
1066			return (1);
1067	}
1068	if ((flags & M_NOWAIT) != 0)
1069		return (0);
1070	bt_save(vm);
1071	VMEM_CONDVAR_WAIT(vm);
1072	bt_restore(vm);
1073	return (1);
1074}
1075
1076static int
1077vmem_try_release(vmem_t *vm, struct vmem_btag *bt, const bool remfree)
1078{
1079	struct vmem_btag *prev;
1080
1081	MPASS(bt->bt_type == BT_TYPE_FREE);
1082
1083	if (vm->vm_releasefn == NULL)
1084		return (0);
1085
1086	prev = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1087	MPASS(prev != NULL);
1088	MPASS(prev->bt_type != BT_TYPE_FREE);
1089
1090	if (prev->bt_type == BT_TYPE_SPAN && prev->bt_size == bt->bt_size) {
1091		vmem_addr_t spanaddr;
1092		vmem_size_t spansize;
1093
1094		MPASS(prev->bt_start == bt->bt_start);
1095		spanaddr = prev->bt_start;
1096		spansize = prev->bt_size;
1097		if (remfree)
1098			bt_remfree(vm, bt);
1099		bt_remseg(vm, bt);
1100		bt_remseg(vm, prev);
1101		vm->vm_size -= spansize;
1102		VMEM_CONDVAR_BROADCAST(vm);
1103		bt_freetrim(vm, BT_MAXFREE);
1104		vm->vm_releasefn(vm->vm_arg, spanaddr, spansize);
1105		return (1);
1106	}
1107	return (0);
1108}
1109
1110static int
1111vmem_xalloc_nextfit(vmem_t *vm, const vmem_size_t size, vmem_size_t align,
1112    const vmem_size_t phase, const vmem_size_t nocross, int flags,
1113    vmem_addr_t *addrp)
1114{
1115	struct vmem_btag *bt, *cursor, *next, *prev;
1116	int error;
1117
1118	error = ENOMEM;
1119	VMEM_LOCK(vm);
1120
1121	/*
1122	 * Make sure we have enough tags to complete the operation.
1123	 */
1124	if (bt_fill(vm, flags) != 0)
1125		goto out;
1126
1127retry:
1128	/*
1129	 * Find the next free tag meeting our constraints.  If one is found,
1130	 * perform the allocation.
1131	 */
1132	for (cursor = &vm->vm_cursor, bt = TAILQ_NEXT(cursor, bt_seglist);
1133	    bt != cursor; bt = TAILQ_NEXT(bt, bt_seglist)) {
1134		if (bt == NULL)
1135			bt = TAILQ_FIRST(&vm->vm_seglist);
1136		if (bt->bt_type == BT_TYPE_FREE && bt->bt_size >= size &&
1137		    (error = vmem_fit(bt, size, align, phase, nocross,
1138		    VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) {
1139			vmem_clip(vm, bt, *addrp, size);
1140			break;
1141		}
1142	}
1143
1144	/*
1145	 * Try to coalesce free segments around the cursor.  If we succeed, and
1146	 * have not yet satisfied the allocation request, try again with the
1147	 * newly coalesced segment.
1148	 */
1149	if ((next = TAILQ_NEXT(cursor, bt_seglist)) != NULL &&
1150	    (prev = TAILQ_PREV(cursor, vmem_seglist, bt_seglist)) != NULL &&
1151	    next->bt_type == BT_TYPE_FREE && prev->bt_type == BT_TYPE_FREE &&
1152	    prev->bt_start + prev->bt_size == next->bt_start) {
1153		prev->bt_size += next->bt_size;
1154		bt_remfree(vm, next);
1155		bt_remseg(vm, next);
1156
1157		/*
1158		 * The coalesced segment might be able to satisfy our request.
1159		 * If not, we might need to release it from the arena.
1160		 */
1161		if (error == ENOMEM && prev->bt_size >= size &&
1162		    (error = vmem_fit(prev, size, align, phase, nocross,
1163		    VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) {
1164			vmem_clip(vm, prev, *addrp, size);
1165			bt = prev;
1166		} else
1167			(void)vmem_try_release(vm, prev, true);
1168	}
1169
1170	/*
1171	 * If the allocation was successful, advance the cursor.
1172	 */
1173	if (error == 0) {
1174		TAILQ_REMOVE(&vm->vm_seglist, cursor, bt_seglist);
1175		for (; bt != NULL && bt->bt_start < *addrp + size;
1176		    bt = TAILQ_NEXT(bt, bt_seglist))
1177			;
1178		if (bt != NULL)
1179			TAILQ_INSERT_BEFORE(bt, cursor, bt_seglist);
1180		else
1181			TAILQ_INSERT_HEAD(&vm->vm_seglist, cursor, bt_seglist);
1182	}
1183
1184	/*
1185	 * Attempt to bring additional resources into the arena.  If that fails
1186	 * and M_WAITOK is specified, sleep waiting for resources to be freed.
1187	 */
1188	if (error == ENOMEM && vmem_try_fetch(vm, size, align, flags))
1189		goto retry;
1190
1191out:
1192	VMEM_UNLOCK(vm);
1193	return (error);
1194}
1195
1196/* ---- vmem API */
1197
1198void
1199vmem_set_import(vmem_t *vm, vmem_import_t *importfn,
1200     vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum)
1201{
1202
1203	VMEM_LOCK(vm);
1204	KASSERT(vm->vm_size == 0, ("%s: arena is non-empty", __func__));
1205	vm->vm_importfn = importfn;
1206	vm->vm_releasefn = releasefn;
1207	vm->vm_arg = arg;
1208	vm->vm_import_quantum = import_quantum;
1209	VMEM_UNLOCK(vm);
1210}
1211
1212void
1213vmem_set_limit(vmem_t *vm, vmem_size_t limit)
1214{
1215
1216	VMEM_LOCK(vm);
1217	vm->vm_limit = limit;
1218	VMEM_UNLOCK(vm);
1219}
1220
1221void
1222vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn)
1223{
1224
1225	VMEM_LOCK(vm);
1226	vm->vm_reclaimfn = reclaimfn;
1227	VMEM_UNLOCK(vm);
1228}
1229
1230/*
1231 * vmem_init: Initializes vmem arena.
1232 */
1233vmem_t *
1234vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size,
1235    vmem_size_t quantum, vmem_size_t qcache_max, int flags)
1236{
1237	vmem_size_t i;
1238
1239	MPASS(quantum > 0);
1240	MPASS((quantum & (quantum - 1)) == 0);
1241
1242	bzero(vm, sizeof(*vm));
1243
1244	VMEM_CONDVAR_INIT(vm, name);
1245	VMEM_LOCK_INIT(vm, name);
1246	vm->vm_nfreetags = 0;
1247	LIST_INIT(&vm->vm_freetags);
1248	strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
1249	vm->vm_quantum_mask = quantum - 1;
1250	vm->vm_quantum_shift = flsl(quantum) - 1;
1251	vm->vm_nbusytag = 0;
1252	vm->vm_size = 0;
1253	vm->vm_limit = 0;
1254	vm->vm_inuse = 0;
1255	qc_init(vm, qcache_max);
1256
1257	TAILQ_INIT(&vm->vm_seglist);
1258	vm->vm_cursor.bt_start = vm->vm_cursor.bt_size = 0;
1259	vm->vm_cursor.bt_type = BT_TYPE_CURSOR;
1260	TAILQ_INSERT_TAIL(&vm->vm_seglist, &vm->vm_cursor, bt_seglist);
1261
1262	for (i = 0; i < VMEM_MAXORDER; i++)
1263		LIST_INIT(&vm->vm_freelist[i]);
1264
1265	memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0));
1266	vm->vm_hashsize = VMEM_HASHSIZE_MIN;
1267	vm->vm_hashlist = vm->vm_hash0;
1268
1269	if (size != 0) {
1270		if (vmem_add(vm, base, size, flags) != 0) {
1271			vmem_destroy1(vm);
1272			return NULL;
1273		}
1274	}
1275
1276	mtx_lock(&vmem_list_lock);
1277	LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
1278	mtx_unlock(&vmem_list_lock);
1279
1280	return vm;
1281}
1282
1283/*
1284 * vmem_create: create an arena.
1285 */
1286vmem_t *
1287vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
1288    vmem_size_t quantum, vmem_size_t qcache_max, int flags)
1289{
1290
1291	vmem_t *vm;
1292
1293	vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT));
1294	if (vm == NULL)
1295		return (NULL);
1296	if (vmem_init(vm, name, base, size, quantum, qcache_max,
1297	    flags) == NULL)
1298		return (NULL);
1299	return (vm);
1300}
1301
1302void
1303vmem_destroy(vmem_t *vm)
1304{
1305
1306	mtx_lock(&vmem_list_lock);
1307	LIST_REMOVE(vm, vm_alllist);
1308	mtx_unlock(&vmem_list_lock);
1309
1310	vmem_destroy1(vm);
1311}
1312
1313vmem_size_t
1314vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1315{
1316
1317	return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1318}
1319
1320/*
1321 * vmem_alloc: allocate resource from the arena.
1322 */
1323int
1324vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
1325{
1326	const int strat __unused = flags & VMEM_FITMASK;
1327	qcache_t *qc;
1328
1329	flags &= VMEM_FLAGS;
1330	MPASS(size > 0);
1331	MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT);
1332	if ((flags & M_NOWAIT) == 0)
1333		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc");
1334
1335	if (size <= vm->vm_qcache_max) {
1336		/*
1337		 * Resource 0 cannot be cached, so avoid a blocking allocation
1338		 * in qc_import() and give the vmem_xalloc() call below a chance
1339		 * to return 0.
1340		 */
1341		qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1342		*addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache,
1343		    (flags & ~M_WAITOK) | M_NOWAIT);
1344		if (__predict_true(*addrp != 0))
1345			return (0);
1346	}
1347
1348	return (vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1349	    flags, addrp));
1350}
1351
1352int
1353vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1354    const vmem_size_t phase, const vmem_size_t nocross,
1355    const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags,
1356    vmem_addr_t *addrp)
1357{
1358	const vmem_size_t size = vmem_roundup_size(vm, size0);
1359	struct vmem_freelist *list;
1360	struct vmem_freelist *first;
1361	struct vmem_freelist *end;
1362	bt_t *bt;
1363	int error;
1364	int strat;
1365
1366	flags &= VMEM_FLAGS;
1367	strat = flags & VMEM_FITMASK;
1368	MPASS(size0 > 0);
1369	MPASS(size > 0);
1370	MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT);
1371	MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK));
1372	if ((flags & M_NOWAIT) == 0)
1373		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc");
1374	MPASS((align & vm->vm_quantum_mask) == 0);
1375	MPASS((align & (align - 1)) == 0);
1376	MPASS((phase & vm->vm_quantum_mask) == 0);
1377	MPASS((nocross & vm->vm_quantum_mask) == 0);
1378	MPASS((nocross & (nocross - 1)) == 0);
1379	MPASS((align == 0 && phase == 0) || phase < align);
1380	MPASS(nocross == 0 || nocross >= size);
1381	MPASS(minaddr <= maxaddr);
1382	MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1383	if (strat == M_NEXTFIT)
1384		MPASS(minaddr == VMEM_ADDR_MIN && maxaddr == VMEM_ADDR_MAX);
1385
1386	if (align == 0)
1387		align = vm->vm_quantum_mask + 1;
1388	*addrp = 0;
1389
1390	/*
1391	 * Next-fit allocations don't use the freelists.
1392	 */
1393	if (strat == M_NEXTFIT)
1394		return (vmem_xalloc_nextfit(vm, size0, align, phase, nocross,
1395		    flags, addrp));
1396
1397	end = &vm->vm_freelist[VMEM_MAXORDER];
1398	/*
1399	 * choose a free block from which we allocate.
1400	 */
1401	first = bt_freehead_toalloc(vm, size, strat);
1402	VMEM_LOCK(vm);
1403
1404	/*
1405	 * Make sure we have enough tags to complete the operation.
1406	 */
1407	error = bt_fill(vm, flags);
1408	if (error != 0)
1409		goto out;
1410	for (;;) {
1411		/*
1412	 	 * Scan freelists looking for a tag that satisfies the
1413		 * allocation.  If we're doing BESTFIT we may encounter
1414		 * sizes below the request.  If we're doing FIRSTFIT we
1415		 * inspect only the first element from each list.
1416		 */
1417		for (list = first; list < end; list++) {
1418			LIST_FOREACH(bt, list, bt_freelist) {
1419				if (bt->bt_size >= size) {
1420					error = vmem_fit(bt, size, align, phase,
1421					    nocross, minaddr, maxaddr, addrp);
1422					if (error == 0) {
1423						vmem_clip(vm, bt, *addrp, size);
1424						goto out;
1425					}
1426				}
1427				/* FIRST skips to the next list. */
1428				if (strat == M_FIRSTFIT)
1429					break;
1430			}
1431		}
1432
1433		/*
1434		 * Retry if the fast algorithm failed.
1435		 */
1436		if (strat == M_FIRSTFIT) {
1437			strat = M_BESTFIT;
1438			first = bt_freehead_toalloc(vm, size, strat);
1439			continue;
1440		}
1441
1442		/*
1443		 * Try a few measures to bring additional resources into the
1444		 * arena.  If all else fails, we will sleep waiting for
1445		 * resources to be freed.
1446		 */
1447		if (!vmem_try_fetch(vm, size, align, flags)) {
1448			error = ENOMEM;
1449			break;
1450		}
1451	}
1452out:
1453	VMEM_UNLOCK(vm);
1454	if (error != 0 && (flags & M_NOWAIT) == 0)
1455		panic("failed to allocate waiting allocation\n");
1456
1457	return (error);
1458}
1459
1460/*
1461 * vmem_free: free the resource to the arena.
1462 */
1463void
1464vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1465{
1466	qcache_t *qc;
1467	MPASS(size > 0);
1468
1469	if (size <= vm->vm_qcache_max &&
1470	    __predict_true(addr >= VMEM_ADDR_QCACHE_MIN)) {
1471		qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1472		uma_zfree(qc->qc_cache, (void *)addr);
1473	} else
1474		vmem_xfree(vm, addr, size);
1475}
1476
1477void
1478vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size __unused)
1479{
1480	bt_t *bt;
1481	bt_t *t;
1482
1483	MPASS(size > 0);
1484
1485	VMEM_LOCK(vm);
1486	bt = bt_lookupbusy(vm, addr);
1487	MPASS(bt != NULL);
1488	MPASS(bt->bt_start == addr);
1489	MPASS(bt->bt_size == vmem_roundup_size(vm, size) ||
1490	    bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1491	MPASS(bt->bt_type == BT_TYPE_BUSY);
1492	bt_rembusy(vm, bt);
1493	bt->bt_type = BT_TYPE_FREE;
1494
1495	/* coalesce */
1496	t = TAILQ_NEXT(bt, bt_seglist);
1497	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1498		MPASS(BT_END(bt) < t->bt_start);	/* YYY */
1499		bt->bt_size += t->bt_size;
1500		bt_remfree(vm, t);
1501		bt_remseg(vm, t);
1502	}
1503	t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1504	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1505		MPASS(BT_END(t) < bt->bt_start);	/* YYY */
1506		bt->bt_size += t->bt_size;
1507		bt->bt_start = t->bt_start;
1508		bt_remfree(vm, t);
1509		bt_remseg(vm, t);
1510	}
1511
1512	if (!vmem_try_release(vm, bt, false)) {
1513		bt_insfree(vm, bt);
1514		VMEM_CONDVAR_BROADCAST(vm);
1515		bt_freetrim(vm, BT_MAXFREE);
1516	}
1517}
1518
1519/*
1520 * vmem_add:
1521 *
1522 */
1523int
1524vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags)
1525{
1526	int error;
1527
1528	flags &= VMEM_FLAGS;
1529
1530	VMEM_LOCK(vm);
1531	error = bt_fill(vm, flags);
1532	if (error == 0)
1533		vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC);
1534	VMEM_UNLOCK(vm);
1535
1536	return (error);
1537}
1538
1539/*
1540 * vmem_size: information about arenas size
1541 */
1542vmem_size_t
1543vmem_size(vmem_t *vm, int typemask)
1544{
1545	int i;
1546
1547	switch (typemask) {
1548	case VMEM_ALLOC:
1549		return vm->vm_inuse;
1550	case VMEM_FREE:
1551		return vm->vm_size - vm->vm_inuse;
1552	case VMEM_FREE|VMEM_ALLOC:
1553		return vm->vm_size;
1554	case VMEM_MAXFREE:
1555		VMEM_LOCK(vm);
1556		for (i = VMEM_MAXORDER - 1; i >= 0; i--) {
1557			if (LIST_EMPTY(&vm->vm_freelist[i]))
1558				continue;
1559			VMEM_UNLOCK(vm);
1560			return ((vmem_size_t)ORDER2SIZE(i) <<
1561			    vm->vm_quantum_shift);
1562		}
1563		VMEM_UNLOCK(vm);
1564		return (0);
1565	default:
1566		panic("vmem_size");
1567	}
1568}
1569
1570/* ---- debug */
1571
1572#if defined(DDB) || defined(DIAGNOSTIC)
1573
1574static void bt_dump(const bt_t *, int (*)(const char *, ...)
1575    __printflike(1, 2));
1576
1577static const char *
1578bt_type_string(int type)
1579{
1580
1581	switch (type) {
1582	case BT_TYPE_BUSY:
1583		return "busy";
1584	case BT_TYPE_FREE:
1585		return "free";
1586	case BT_TYPE_SPAN:
1587		return "span";
1588	case BT_TYPE_SPAN_STATIC:
1589		return "static span";
1590	case BT_TYPE_CURSOR:
1591		return "cursor";
1592	default:
1593		break;
1594	}
1595	return "BOGUS";
1596}
1597
1598static void
1599bt_dump(const bt_t *bt, int (*pr)(const char *, ...))
1600{
1601
1602	(*pr)("\t%p: %jx %jx, %d(%s)\n",
1603	    bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size,
1604	    bt->bt_type, bt_type_string(bt->bt_type));
1605}
1606
1607static void
1608vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2))
1609{
1610	const bt_t *bt;
1611	int i;
1612
1613	(*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1614	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1615		bt_dump(bt, pr);
1616	}
1617
1618	for (i = 0; i < VMEM_MAXORDER; i++) {
1619		const struct vmem_freelist *fl = &vm->vm_freelist[i];
1620
1621		if (LIST_EMPTY(fl)) {
1622			continue;
1623		}
1624
1625		(*pr)("freelist[%d]\n", i);
1626		LIST_FOREACH(bt, fl, bt_freelist) {
1627			bt_dump(bt, pr);
1628		}
1629	}
1630}
1631
1632#endif /* defined(DDB) || defined(DIAGNOSTIC) */
1633
1634#if defined(DDB)
1635#include <ddb/ddb.h>
1636
1637static bt_t *
1638vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr)
1639{
1640	bt_t *bt;
1641
1642	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1643		if (BT_ISSPAN_P(bt)) {
1644			continue;
1645		}
1646		if (bt->bt_start <= addr && addr <= BT_END(bt)) {
1647			return bt;
1648		}
1649	}
1650
1651	return NULL;
1652}
1653
1654void
1655vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...))
1656{
1657	vmem_t *vm;
1658
1659	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1660		bt_t *bt;
1661
1662		bt = vmem_whatis_lookup(vm, addr);
1663		if (bt == NULL) {
1664			continue;
1665		}
1666		(*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1667		    (void *)addr, (void *)bt->bt_start,
1668		    (vmem_size_t)(addr - bt->bt_start), vm->vm_name,
1669		    (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1670	}
1671}
1672
1673void
1674vmem_printall(const char *modif, int (*pr)(const char *, ...))
1675{
1676	const vmem_t *vm;
1677
1678	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1679		vmem_dump(vm, pr);
1680	}
1681}
1682
1683void
1684vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...))
1685{
1686	const vmem_t *vm = (const void *)addr;
1687
1688	vmem_dump(vm, pr);
1689}
1690
1691DB_SHOW_COMMAND(vmemdump, vmemdump)
1692{
1693
1694	if (!have_addr) {
1695		db_printf("usage: show vmemdump <addr>\n");
1696		return;
1697	}
1698
1699	vmem_dump((const vmem_t *)addr, db_printf);
1700}
1701
1702DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall)
1703{
1704	const vmem_t *vm;
1705
1706	LIST_FOREACH(vm, &vmem_list, vm_alllist)
1707		vmem_dump(vm, db_printf);
1708}
1709
1710DB_SHOW_COMMAND(vmem, vmem_summ)
1711{
1712	const vmem_t *vm = (const void *)addr;
1713	const bt_t *bt;
1714	size_t ft[VMEM_MAXORDER], ut[VMEM_MAXORDER];
1715	size_t fs[VMEM_MAXORDER], us[VMEM_MAXORDER];
1716	int ord;
1717
1718	if (!have_addr) {
1719		db_printf("usage: show vmem <addr>\n");
1720		return;
1721	}
1722
1723	db_printf("vmem %p '%s'\n", vm, vm->vm_name);
1724	db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1);
1725	db_printf("\tsize:\t%zu\n", vm->vm_size);
1726	db_printf("\tinuse:\t%zu\n", vm->vm_inuse);
1727	db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse);
1728	db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag);
1729	db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags);
1730
1731	memset(&ft, 0, sizeof(ft));
1732	memset(&ut, 0, sizeof(ut));
1733	memset(&fs, 0, sizeof(fs));
1734	memset(&us, 0, sizeof(us));
1735	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1736		ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift);
1737		if (bt->bt_type == BT_TYPE_BUSY) {
1738			ut[ord]++;
1739			us[ord] += bt->bt_size;
1740		} else if (bt->bt_type == BT_TYPE_FREE) {
1741			ft[ord]++;
1742			fs[ord] += bt->bt_size;
1743		}
1744	}
1745	db_printf("\t\t\tinuse\tsize\t\tfree\tsize\n");
1746	for (ord = 0; ord < VMEM_MAXORDER; ord++) {
1747		if (ut[ord] == 0 && ft[ord] == 0)
1748			continue;
1749		db_printf("\t%-15zu %zu\t%-15zu %zu\t%-16zu\n",
1750		    ORDER2SIZE(ord) << vm->vm_quantum_shift,
1751		    ut[ord], us[ord], ft[ord], fs[ord]);
1752	}
1753}
1754
1755DB_SHOW_ALL_COMMAND(vmem, vmem_summall)
1756{
1757	const vmem_t *vm;
1758
1759	LIST_FOREACH(vm, &vmem_list, vm_alllist)
1760		vmem_summ((db_expr_t)vm, TRUE, count, modif);
1761}
1762#endif /* defined(DDB) */
1763
1764#define vmem_printf printf
1765
1766#if defined(DIAGNOSTIC)
1767
1768static bool
1769vmem_check_sanity(vmem_t *vm)
1770{
1771	const bt_t *bt, *bt2;
1772
1773	MPASS(vm != NULL);
1774
1775	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1776		if (bt->bt_start > BT_END(bt)) {
1777			printf("corrupted tag\n");
1778			bt_dump(bt, vmem_printf);
1779			return false;
1780		}
1781	}
1782	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1783		if (bt->bt_type == BT_TYPE_CURSOR) {
1784			if (bt->bt_start != 0 || bt->bt_size != 0) {
1785				printf("corrupted cursor\n");
1786				return false;
1787			}
1788			continue;
1789		}
1790		TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1791			if (bt == bt2) {
1792				continue;
1793			}
1794			if (bt2->bt_type == BT_TYPE_CURSOR) {
1795				continue;
1796			}
1797			if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
1798				continue;
1799			}
1800			if (bt->bt_start <= BT_END(bt2) &&
1801			    bt2->bt_start <= BT_END(bt)) {
1802				printf("overwrapped tags\n");
1803				bt_dump(bt, vmem_printf);
1804				bt_dump(bt2, vmem_printf);
1805				return false;
1806			}
1807		}
1808	}
1809
1810	return true;
1811}
1812
1813static void
1814vmem_check(vmem_t *vm)
1815{
1816
1817	if (!vmem_check_sanity(vm)) {
1818		panic("insanity vmem %p", vm);
1819	}
1820}
1821
1822#endif /* defined(DIAGNOSTIC) */
1823