uu_avl.c revision 263874
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28#include "libuutil_common.h"
29
30#include <stdlib.h>
31#include <string.h>
32#include <unistd.h>
33#include <sys/avl.h>
34
35static uu_avl_pool_t	uu_null_apool = { &uu_null_apool, &uu_null_apool };
36static pthread_mutex_t	uu_apool_list_lock = PTHREAD_MUTEX_INITIALIZER;
37
38/*
39 * The index mark change on every insert and delete, to catch stale
40 * references.
41 *
42 * We leave the low bit alone, since the avl code uses it.
43 */
44#define	INDEX_MAX		(sizeof (uintptr_t) - 2)
45#define	INDEX_NEXT(m)		(((m) == INDEX_MAX)? 2 : ((m) + 2) & INDEX_MAX)
46
47#define	INDEX_DECODE(i)		((i) & ~INDEX_MAX)
48#define	INDEX_ENCODE(p, n)	(((n) & ~INDEX_MAX) | (p)->ua_index)
49#define	INDEX_VALID(p, i)	(((i) & INDEX_MAX) == (p)->ua_index)
50#define	INDEX_CHECK(i)		(((i) & INDEX_MAX) != 0)
51
52/*
53 * When an element is inactive (not in a tree), we keep a marked pointer to
54 * its containing pool in its first word, and a NULL pointer in its second.
55 *
56 * On insert, we use these to verify that it comes from the correct pool.
57 */
58#define	NODE_ARRAY(p, n)	((uintptr_t *)((uintptr_t)(n) + \
59				    (pp)->uap_nodeoffset))
60
61#define	POOL_TO_MARKER(pp) (((uintptr_t)(pp) | 1))
62
63#define	DEAD_MARKER		0xc4
64
65uu_avl_pool_t *
66uu_avl_pool_create(const char *name, size_t objsize, size_t nodeoffset,
67    uu_compare_fn_t *compare_func, uint32_t flags)
68{
69	uu_avl_pool_t *pp, *next, *prev;
70
71	if (name == NULL ||
72	    uu_check_name(name, UU_NAME_DOMAIN) == -1 ||
73	    nodeoffset + sizeof (uu_avl_node_t) > objsize ||
74	    compare_func == NULL) {
75		uu_set_error(UU_ERROR_INVALID_ARGUMENT);
76		return (NULL);
77	}
78
79	if (flags & ~UU_AVL_POOL_DEBUG) {
80		uu_set_error(UU_ERROR_UNKNOWN_FLAG);
81		return (NULL);
82	}
83
84	pp = uu_zalloc(sizeof (uu_avl_pool_t));
85	if (pp == NULL) {
86		uu_set_error(UU_ERROR_NO_MEMORY);
87		return (NULL);
88	}
89
90	(void) strlcpy(pp->uap_name, name, sizeof (pp->uap_name));
91	pp->uap_nodeoffset = nodeoffset;
92	pp->uap_objsize = objsize;
93	pp->uap_cmp = compare_func;
94	if (flags & UU_AVL_POOL_DEBUG)
95		pp->uap_debug = 1;
96	pp->uap_last_index = 0;
97
98	(void) pthread_mutex_init(&pp->uap_lock, NULL);
99
100	pp->uap_null_avl.ua_next_enc = UU_PTR_ENCODE(&pp->uap_null_avl);
101	pp->uap_null_avl.ua_prev_enc = UU_PTR_ENCODE(&pp->uap_null_avl);
102
103	(void) pthread_mutex_lock(&uu_apool_list_lock);
104	pp->uap_next = next = &uu_null_apool;
105	pp->uap_prev = prev = next->uap_prev;
106	next->uap_prev = pp;
107	prev->uap_next = pp;
108	(void) pthread_mutex_unlock(&uu_apool_list_lock);
109
110	return (pp);
111}
112
113void
114uu_avl_pool_destroy(uu_avl_pool_t *pp)
115{
116	if (pp->uap_debug) {
117		if (pp->uap_null_avl.ua_next_enc !=
118		    UU_PTR_ENCODE(&pp->uap_null_avl) ||
119		    pp->uap_null_avl.ua_prev_enc !=
120		    UU_PTR_ENCODE(&pp->uap_null_avl)) {
121			uu_panic("uu_avl_pool_destroy: Pool \"%.*s\" (%p) has "
122			    "outstanding avls, or is corrupt.\n",
123			    (int)sizeof (pp->uap_name), pp->uap_name,
124			    (void *)pp);
125		}
126	}
127	(void) pthread_mutex_lock(&uu_apool_list_lock);
128	pp->uap_next->uap_prev = pp->uap_prev;
129	pp->uap_prev->uap_next = pp->uap_next;
130	(void) pthread_mutex_unlock(&uu_apool_list_lock);
131	(void) pthread_mutex_destroy(&pp->uap_lock);
132	pp->uap_prev = NULL;
133	pp->uap_next = NULL;
134	uu_free(pp);
135}
136
137void
138uu_avl_node_init(void *base, uu_avl_node_t *np, uu_avl_pool_t *pp)
139{
140	uintptr_t *na = (uintptr_t *)np;
141
142	if (pp->uap_debug) {
143		uintptr_t offset = (uintptr_t)np - (uintptr_t)base;
144		if (offset + sizeof (*np) > pp->uap_objsize) {
145			uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): "
146			    "offset %ld doesn't fit in object (size %ld)\n",
147			    base, (void *)np, (void *)pp, pp->uap_name,
148			    (long)offset, (long)pp->uap_objsize);
149		}
150		if (offset != pp->uap_nodeoffset) {
151			uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): "
152			    "offset %ld doesn't match pool's offset (%ld)\n",
153			    base, (void *)np, (void *)pp, pp->uap_name,
154			    (long)offset, (long)pp->uap_objsize);
155		}
156	}
157
158	na[0] = POOL_TO_MARKER(pp);
159	na[1] = 0;
160}
161
162void
163uu_avl_node_fini(void *base, uu_avl_node_t *np, uu_avl_pool_t *pp)
164{
165	uintptr_t *na = (uintptr_t *)np;
166
167	if (pp->uap_debug) {
168		if (na[0] == DEAD_MARKER && na[1] == DEAD_MARKER) {
169			uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): "
170			    "node already finied\n",
171			    base, (void *)np, (void *)pp, pp->uap_name);
172		}
173		if (na[0] != POOL_TO_MARKER(pp) || na[1] != 0) {
174			uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): "
175			    "node corrupt, in tree, or in different pool\n",
176			    base, (void *)np, (void *)pp, pp->uap_name);
177		}
178	}
179
180	na[0] = DEAD_MARKER;
181	na[1] = DEAD_MARKER;
182	na[2] = DEAD_MARKER;
183}
184
185struct uu_avl_node_compare_info {
186	uu_compare_fn_t	*ac_compare;
187	void		*ac_private;
188	void		*ac_right;
189	void		*ac_found;
190};
191
192static int
193uu_avl_node_compare(const void *l, const void *r)
194{
195	struct uu_avl_node_compare_info *info =
196	    (struct uu_avl_node_compare_info *)l;
197
198	int res = info->ac_compare(r, info->ac_right, info->ac_private);
199
200	if (res == 0) {
201		if (info->ac_found == NULL)
202			info->ac_found = (void *)r;
203		return (-1);
204	}
205	if (res < 0)
206		return (1);
207	return (-1);
208}
209
210uu_avl_t *
211uu_avl_create(uu_avl_pool_t *pp, void *parent, uint32_t flags)
212{
213	uu_avl_t *ap, *next, *prev;
214
215	if (flags & ~UU_AVL_DEBUG) {
216		uu_set_error(UU_ERROR_UNKNOWN_FLAG);
217		return (NULL);
218	}
219
220	ap = uu_zalloc(sizeof (*ap));
221	if (ap == NULL) {
222		uu_set_error(UU_ERROR_NO_MEMORY);
223		return (NULL);
224	}
225
226	ap->ua_pool = pp;
227	ap->ua_parent_enc = UU_PTR_ENCODE(parent);
228	ap->ua_debug = pp->uap_debug || (flags & UU_AVL_DEBUG);
229	ap->ua_index = (pp->uap_last_index = INDEX_NEXT(pp->uap_last_index));
230
231	avl_create(&ap->ua_tree, &uu_avl_node_compare, pp->uap_objsize,
232	    pp->uap_nodeoffset);
233
234	ap->ua_null_walk.uaw_next = &ap->ua_null_walk;
235	ap->ua_null_walk.uaw_prev = &ap->ua_null_walk;
236
237	(void) pthread_mutex_lock(&pp->uap_lock);
238	next = &pp->uap_null_avl;
239	prev = UU_PTR_DECODE(next->ua_prev_enc);
240	ap->ua_next_enc = UU_PTR_ENCODE(next);
241	ap->ua_prev_enc = UU_PTR_ENCODE(prev);
242	next->ua_prev_enc = UU_PTR_ENCODE(ap);
243	prev->ua_next_enc = UU_PTR_ENCODE(ap);
244	(void) pthread_mutex_unlock(&pp->uap_lock);
245
246	return (ap);
247}
248
249void
250uu_avl_destroy(uu_avl_t *ap)
251{
252	uu_avl_pool_t *pp = ap->ua_pool;
253
254	if (ap->ua_debug) {
255		if (avl_numnodes(&ap->ua_tree) != 0) {
256			uu_panic("uu_avl_destroy(%p): tree not empty\n",
257			    (void *)ap);
258		}
259		if (ap->ua_null_walk.uaw_next != &ap->ua_null_walk ||
260		    ap->ua_null_walk.uaw_prev != &ap->ua_null_walk) {
261			uu_panic("uu_avl_destroy(%p):  outstanding walkers\n",
262			    (void *)ap);
263		}
264	}
265	(void) pthread_mutex_lock(&pp->uap_lock);
266	UU_AVL_PTR(ap->ua_next_enc)->ua_prev_enc = ap->ua_prev_enc;
267	UU_AVL_PTR(ap->ua_prev_enc)->ua_next_enc = ap->ua_next_enc;
268	(void) pthread_mutex_unlock(&pp->uap_lock);
269	ap->ua_prev_enc = UU_PTR_ENCODE(NULL);
270	ap->ua_next_enc = UU_PTR_ENCODE(NULL);
271
272	ap->ua_pool = NULL;
273	avl_destroy(&ap->ua_tree);
274
275	uu_free(ap);
276}
277
278size_t
279uu_avl_numnodes(uu_avl_t *ap)
280{
281	return (avl_numnodes(&ap->ua_tree));
282}
283
284void *
285uu_avl_first(uu_avl_t *ap)
286{
287	return (avl_first(&ap->ua_tree));
288}
289
290void *
291uu_avl_last(uu_avl_t *ap)
292{
293	return (avl_last(&ap->ua_tree));
294}
295
296void *
297uu_avl_next(uu_avl_t *ap, void *node)
298{
299	return (AVL_NEXT(&ap->ua_tree, node));
300}
301
302void *
303uu_avl_prev(uu_avl_t *ap, void *node)
304{
305	return (AVL_PREV(&ap->ua_tree, node));
306}
307
308static void
309_avl_walk_init(uu_avl_walk_t *wp, uu_avl_t *ap, uint32_t flags)
310{
311	uu_avl_walk_t *next, *prev;
312
313	int robust = (flags & UU_WALK_ROBUST);
314	int direction = (flags & UU_WALK_REVERSE)? -1 : 1;
315
316	(void) memset(wp, 0, sizeof (*wp));
317	wp->uaw_avl = ap;
318	wp->uaw_robust = robust;
319	wp->uaw_dir = direction;
320
321	if (direction > 0)
322		wp->uaw_next_result = avl_first(&ap->ua_tree);
323	else
324		wp->uaw_next_result = avl_last(&ap->ua_tree);
325
326	if (ap->ua_debug || robust) {
327		wp->uaw_next = next = &ap->ua_null_walk;
328		wp->uaw_prev = prev = next->uaw_prev;
329		next->uaw_prev = wp;
330		prev->uaw_next = wp;
331	}
332}
333
334static void *
335_avl_walk_advance(uu_avl_walk_t *wp, uu_avl_t *ap)
336{
337	void *np = wp->uaw_next_result;
338
339	avl_tree_t *t = &ap->ua_tree;
340
341	if (np == NULL)
342		return (NULL);
343
344	wp->uaw_next_result = (wp->uaw_dir > 0)? AVL_NEXT(t, np) :
345	    AVL_PREV(t, np);
346
347	return (np);
348}
349
350static void
351_avl_walk_fini(uu_avl_walk_t *wp)
352{
353	if (wp->uaw_next != NULL) {
354		wp->uaw_next->uaw_prev = wp->uaw_prev;
355		wp->uaw_prev->uaw_next = wp->uaw_next;
356		wp->uaw_next = NULL;
357		wp->uaw_prev = NULL;
358	}
359	wp->uaw_avl = NULL;
360	wp->uaw_next_result = NULL;
361}
362
363uu_avl_walk_t *
364uu_avl_walk_start(uu_avl_t *ap, uint32_t flags)
365{
366	uu_avl_walk_t *wp;
367
368	if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
369		uu_set_error(UU_ERROR_UNKNOWN_FLAG);
370		return (NULL);
371	}
372
373	wp = uu_zalloc(sizeof (*wp));
374	if (wp == NULL) {
375		uu_set_error(UU_ERROR_NO_MEMORY);
376		return (NULL);
377	}
378
379	_avl_walk_init(wp, ap, flags);
380	return (wp);
381}
382
383void *
384uu_avl_walk_next(uu_avl_walk_t *wp)
385{
386	return (_avl_walk_advance(wp, wp->uaw_avl));
387}
388
389void
390uu_avl_walk_end(uu_avl_walk_t *wp)
391{
392	_avl_walk_fini(wp);
393	uu_free(wp);
394}
395
396int
397uu_avl_walk(uu_avl_t *ap, uu_walk_fn_t *func, void *private, uint32_t flags)
398{
399	void *e;
400	uu_avl_walk_t my_walk;
401
402	int status = UU_WALK_NEXT;
403
404	if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
405		uu_set_error(UU_ERROR_UNKNOWN_FLAG);
406		return (-1);
407	}
408
409	_avl_walk_init(&my_walk, ap, flags);
410	while (status == UU_WALK_NEXT &&
411	    (e = _avl_walk_advance(&my_walk, ap)) != NULL)
412		status = (*func)(e, private);
413	_avl_walk_fini(&my_walk);
414
415	if (status >= 0)
416		return (0);
417	uu_set_error(UU_ERROR_CALLBACK_FAILED);
418	return (-1);
419}
420
421void
422uu_avl_remove(uu_avl_t *ap, void *elem)
423{
424	uu_avl_walk_t *wp;
425	uu_avl_pool_t *pp = ap->ua_pool;
426	uintptr_t *na = NODE_ARRAY(pp, elem);
427
428	if (ap->ua_debug) {
429		/*
430		 * invalidate outstanding uu_avl_index_ts.
431		 */
432		ap->ua_index = INDEX_NEXT(ap->ua_index);
433	}
434
435	/*
436	 * Robust walkers most be advanced, if we are removing the node
437	 * they are currently using.  In debug mode, non-robust walkers
438	 * are also on the walker list.
439	 */
440	for (wp = ap->ua_null_walk.uaw_next; wp != &ap->ua_null_walk;
441	    wp = wp->uaw_next) {
442		if (wp->uaw_robust) {
443			if (elem == wp->uaw_next_result)
444				(void) _avl_walk_advance(wp, ap);
445		} else if (wp->uaw_next_result != NULL) {
446			uu_panic("uu_avl_remove(%p, %p): active non-robust "
447			    "walker\n", (void *)ap, elem);
448		}
449	}
450
451	avl_remove(&ap->ua_tree, elem);
452
453	na[0] = POOL_TO_MARKER(pp);
454	na[1] = 0;
455}
456
457void *
458uu_avl_teardown(uu_avl_t *ap, void **cookie)
459{
460	void *elem = avl_destroy_nodes(&ap->ua_tree, cookie);
461
462	if (elem != NULL) {
463		uu_avl_pool_t *pp = ap->ua_pool;
464		uintptr_t *na = NODE_ARRAY(pp, elem);
465
466		na[0] = POOL_TO_MARKER(pp);
467		na[1] = 0;
468	}
469	return (elem);
470}
471
472void *
473uu_avl_find(uu_avl_t *ap, void *elem, void *private, uu_avl_index_t *out)
474{
475	struct uu_avl_node_compare_info info;
476	void *result;
477
478	info.ac_compare = ap->ua_pool->uap_cmp;
479	info.ac_private = private;
480	info.ac_right = elem;
481	info.ac_found = NULL;
482
483	result = avl_find(&ap->ua_tree, &info, out);
484	if (out != NULL)
485		*out = INDEX_ENCODE(ap, *out);
486
487	if (ap->ua_debug && result != NULL)
488		uu_panic("uu_avl_find: internal error: avl_find succeeded\n");
489
490	return (info.ac_found);
491}
492
493void
494uu_avl_insert(uu_avl_t *ap, void *elem, uu_avl_index_t idx)
495{
496	if (ap->ua_debug) {
497		uu_avl_pool_t *pp = ap->ua_pool;
498		uintptr_t *na = NODE_ARRAY(pp, elem);
499
500		if (na[1] != 0)
501			uu_panic("uu_avl_insert(%p, %p, %p): node already "
502			    "in tree, or corrupt\n",
503			    (void *)ap, elem, (void *)idx);
504		if (na[0] == 0)
505			uu_panic("uu_avl_insert(%p, %p, %p): node not "
506			    "initialized\n",
507			    (void *)ap, elem, (void *)idx);
508		if (na[0] != POOL_TO_MARKER(pp))
509			uu_panic("uu_avl_insert(%p, %p, %p): node from "
510			    "other pool, or corrupt\n",
511			    (void *)ap, elem, (void *)idx);
512
513		if (!INDEX_VALID(ap, idx))
514			uu_panic("uu_avl_insert(%p, %p, %p): %s\n",
515			    (void *)ap, elem, (void *)idx,
516			    INDEX_CHECK(idx)? "outdated index" :
517			    "invalid index");
518
519		/*
520		 * invalidate outstanding uu_avl_index_ts.
521		 */
522		ap->ua_index = INDEX_NEXT(ap->ua_index);
523	}
524	avl_insert(&ap->ua_tree, elem, INDEX_DECODE(idx));
525}
526
527void *
528uu_avl_nearest_next(uu_avl_t *ap, uu_avl_index_t idx)
529{
530	if (ap->ua_debug && !INDEX_VALID(ap, idx))
531		uu_panic("uu_avl_nearest_next(%p, %p): %s\n",
532		    (void *)ap, (void *)idx, INDEX_CHECK(idx)?
533		    "outdated index" : "invalid index");
534	return (avl_nearest(&ap->ua_tree, INDEX_DECODE(idx), AVL_AFTER));
535}
536
537void *
538uu_avl_nearest_prev(uu_avl_t *ap, uu_avl_index_t idx)
539{
540	if (ap->ua_debug && !INDEX_VALID(ap, idx))
541		uu_panic("uu_avl_nearest_prev(%p, %p): %s\n",
542		    (void *)ap, (void *)idx, INDEX_CHECK(idx)?
543		    "outdated index" : "invalid index");
544	return (avl_nearest(&ap->ua_tree, INDEX_DECODE(idx), AVL_BEFORE));
545}
546
547/*
548 * called from uu_lockup() and uu_release(), as part of our fork1()-safety.
549 */
550void
551uu_avl_lockup(void)
552{
553	uu_avl_pool_t *pp;
554
555	(void) pthread_mutex_lock(&uu_apool_list_lock);
556	for (pp = uu_null_apool.uap_next; pp != &uu_null_apool;
557	    pp = pp->uap_next)
558		(void) pthread_mutex_lock(&pp->uap_lock);
559}
560
561void
562uu_avl_release(void)
563{
564	uu_avl_pool_t *pp;
565
566	for (pp = uu_null_apool.uap_next; pp != &uu_null_apool;
567	    pp = pp->uap_next)
568		(void) pthread_mutex_unlock(&pp->uap_lock);
569	(void) pthread_mutex_unlock(&uu_apool_list_lock);
570}
571