1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26
27
28#include "libuutil_common.h"
29
30#include <stdlib.h>
31#include <string.h>
32#include <unistd.h>
33#include <sys/time.h>
34
35#define	ELEM_TO_NODE(lp, e) \
36	((uu_list_node_impl_t *)((uintptr_t)(e) + (lp)->ul_offset))
37
38#define	NODE_TO_ELEM(lp, n) \
39	((void *)((uintptr_t)(n) - (lp)->ul_offset))
40
41/*
42 * uu_list_index_ts define a location for insertion.  They are simply a
43 * pointer to the object after the insertion point.  We store a mark
44 * in the low-bits of the index, to help prevent mistakes.
45 *
46 * When debugging, the index mark changes on every insert and delete, to
47 * catch stale references.
48 */
49#define	INDEX_MAX		(sizeof (uintptr_t) - 1)
50#define	INDEX_NEXT(m)		(((m) == INDEX_MAX)? 1 : ((m) + 1) & INDEX_MAX)
51
52#define	INDEX_TO_NODE(i)	((uu_list_node_impl_t *)((i) & ~INDEX_MAX))
53#define	NODE_TO_INDEX(p, n)	(((uintptr_t)(n) & ~INDEX_MAX) | (p)->ul_index)
54#define	INDEX_VALID(p, i)	(((i) & INDEX_MAX) == (p)->ul_index)
55#define	INDEX_CHECK(i)		(((i) & INDEX_MAX) != 0)
56
57#define	POOL_TO_MARKER(pp) ((void *)((uintptr_t)(pp) | 1))
58
59static uu_list_pool_t	uu_null_lpool = { &uu_null_lpool, &uu_null_lpool };
60static pthread_mutex_t	uu_lpool_list_lock = PTHREAD_MUTEX_INITIALIZER;
61
62uu_list_pool_t *
63uu_list_pool_create(const char *name, size_t objsize,
64    size_t nodeoffset, uu_compare_fn_t *compare_func, uint32_t flags)
65{
66	uu_list_pool_t *pp, *next, *prev;
67
68	if (name == NULL ||
69	    uu_check_name(name, UU_NAME_DOMAIN) == -1 ||
70	    nodeoffset + sizeof (uu_list_node_t) > objsize) {
71		uu_set_error(UU_ERROR_INVALID_ARGUMENT);
72		return (NULL);
73	}
74
75	if (flags & ~UU_LIST_POOL_DEBUG) {
76		uu_set_error(UU_ERROR_UNKNOWN_FLAG);
77		return (NULL);
78	}
79
80	pp = uu_zalloc(sizeof (uu_list_pool_t));
81	if (pp == NULL) {
82		uu_set_error(UU_ERROR_NO_MEMORY);
83		return (NULL);
84	}
85
86	(void) strlcpy(pp->ulp_name, name, sizeof (pp->ulp_name));
87	pp->ulp_nodeoffset = nodeoffset;
88	pp->ulp_objsize = objsize;
89	pp->ulp_cmp = compare_func;
90	if (flags & UU_LIST_POOL_DEBUG)
91		pp->ulp_debug = 1;
92	pp->ulp_last_index = 0;
93
94	(void) pthread_mutex_init(&pp->ulp_lock, NULL);
95
96	pp->ulp_null_list.ul_next = &pp->ulp_null_list;
97	pp->ulp_null_list.ul_prev = &pp->ulp_null_list;
98
99	(void) pthread_mutex_lock(&uu_lpool_list_lock);
100	pp->ulp_next = next = &uu_null_lpool;
101	pp->ulp_prev = prev = next->ulp_prev;
102	next->ulp_prev = pp;
103	prev->ulp_next = pp;
104	(void) pthread_mutex_unlock(&uu_lpool_list_lock);
105
106	return (pp);
107}
108
109void
110uu_list_pool_destroy(uu_list_pool_t *pp)
111{
112	if (pp->ulp_debug) {
113		if (pp->ulp_null_list.ul_next != &pp->ulp_null_list ||
114		    pp->ulp_null_list.ul_prev != &pp->ulp_null_list) {
115			uu_panic("uu_list_pool_destroy: Pool \"%.*s\" (%p) has "
116			    "outstanding lists, or is corrupt.\n",
117			    (int)sizeof (pp->ulp_name), pp->ulp_name,
118			    (void *)pp);
119		}
120	}
121	(void) pthread_mutex_lock(&uu_lpool_list_lock);
122	pp->ulp_next->ulp_prev = pp->ulp_prev;
123	pp->ulp_prev->ulp_next = pp->ulp_next;
124	(void) pthread_mutex_unlock(&uu_lpool_list_lock);
125	pp->ulp_prev = NULL;
126	pp->ulp_next = NULL;
127	uu_free(pp);
128}
129
130void
131uu_list_node_init(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp)
132{
133	uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg;
134
135	if (pp->ulp_debug) {
136		uintptr_t offset = (uintptr_t)np - (uintptr_t)base;
137		if (offset + sizeof (*np) > pp->ulp_objsize) {
138			uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
139			    "offset %ld doesn't fit in object (size %ld)\n",
140			    base, (void *)np, (void *)pp, pp->ulp_name,
141			    (long)offset, (long)pp->ulp_objsize);
142		}
143		if (offset != pp->ulp_nodeoffset) {
144			uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
145			    "offset %ld doesn't match pool's offset (%ld)\n",
146			    base, (void *)np, (void *)pp, pp->ulp_name,
147			    (long)offset, (long)pp->ulp_objsize);
148		}
149	}
150	np->uln_next = POOL_TO_MARKER(pp);
151	np->uln_prev = NULL;
152}
153
154void
155uu_list_node_fini(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp)
156{
157	uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg;
158
159	if (pp->ulp_debug) {
160		if (np->uln_next == NULL &&
161		    np->uln_prev == NULL) {
162			uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
163			    "node already finied\n",
164			    base, (void *)np_arg, (void *)pp, pp->ulp_name);
165		}
166		if (np->uln_next != POOL_TO_MARKER(pp) ||
167		    np->uln_prev != NULL) {
168			uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
169			    "node corrupt or on list\n",
170			    base, (void *)np_arg, (void *)pp, pp->ulp_name);
171		}
172	}
173	np->uln_next = NULL;
174	np->uln_prev = NULL;
175}
176
177uu_list_t *
178uu_list_create(uu_list_pool_t *pp, void *parent, uint32_t flags)
179{
180	uu_list_t *lp, *next, *prev;
181
182	if (flags & ~(UU_LIST_DEBUG | UU_LIST_SORTED)) {
183		uu_set_error(UU_ERROR_UNKNOWN_FLAG);
184		return (NULL);
185	}
186
187	if ((flags & UU_LIST_SORTED) && pp->ulp_cmp == NULL) {
188		if (pp->ulp_debug)
189			uu_panic("uu_list_create(%p, ...): requested "
190			    "UU_LIST_SORTED, but pool has no comparison func\n",
191			    (void *)pp);
192		uu_set_error(UU_ERROR_NOT_SUPPORTED);
193		return (NULL);
194	}
195
196	lp = uu_zalloc(sizeof (*lp));
197	if (lp == NULL) {
198		uu_set_error(UU_ERROR_NO_MEMORY);
199		return (NULL);
200	}
201
202	lp->ul_pool = pp;
203	lp->ul_parent = parent;
204	lp->ul_offset = pp->ulp_nodeoffset;
205	lp->ul_debug = pp->ulp_debug || (flags & UU_LIST_DEBUG);
206	lp->ul_sorted = (flags & UU_LIST_SORTED);
207	lp->ul_numnodes = 0;
208	lp->ul_index = (pp->ulp_last_index = INDEX_NEXT(pp->ulp_last_index));
209
210	lp->ul_null_node.uln_next = &lp->ul_null_node;
211	lp->ul_null_node.uln_prev = &lp->ul_null_node;
212
213	lp->ul_null_walk.ulw_next = &lp->ul_null_walk;
214	lp->ul_null_walk.ulw_prev = &lp->ul_null_walk;
215
216	(void) pthread_mutex_lock(&pp->ulp_lock);
217	next = &pp->ulp_null_list;
218	prev = next->ul_prev;
219	lp->ul_next = next;
220	lp->ul_prev = prev;
221	next->ul_prev = lp;
222	prev->ul_next = lp;
223	(void) pthread_mutex_unlock(&pp->ulp_lock);
224
225	return (lp);
226}
227
228void
229uu_list_destroy(uu_list_t *lp)
230{
231	uu_list_pool_t *pp = lp->ul_pool;
232
233	if (lp->ul_debug) {
234		if (lp->ul_null_node.uln_next != &lp->ul_null_node ||
235		    lp->ul_null_node.uln_prev != &lp->ul_null_node) {
236			uu_panic("uu_list_destroy(%p):  list not empty\n",
237			    (void *)lp);
238		}
239		if (lp->ul_numnodes != 0) {
240			uu_panic("uu_list_destroy(%p):  numnodes is nonzero, "
241			    "but list is empty\n", (void *)lp);
242		}
243		if (lp->ul_null_walk.ulw_next != &lp->ul_null_walk ||
244		    lp->ul_null_walk.ulw_prev != &lp->ul_null_walk) {
245			uu_panic("uu_list_destroy(%p):  outstanding walkers\n",
246			    (void *)lp);
247		}
248	}
249
250	(void) pthread_mutex_lock(&pp->ulp_lock);
251	lp->ul_next->ul_prev = lp->ul_prev;
252	lp->ul_prev->ul_next = lp->ul_next;
253	(void) pthread_mutex_unlock(&pp->ulp_lock);
254	lp->ul_prev = NULL;
255	lp->ul_next = NULL;
256	lp->ul_pool = NULL;
257	uu_free(lp);
258}
259
260static void
261list_insert(uu_list_t *lp, uu_list_node_impl_t *np, uu_list_node_impl_t *prev,
262    uu_list_node_impl_t *next)
263{
264	if (lp->ul_debug) {
265		if (next->uln_prev != prev || prev->uln_next != next)
266			uu_panic("insert(%p): internal error: %p and %p not "
267			    "neighbors\n", (void *)lp, (void *)next,
268			    (void *)prev);
269
270		if (np->uln_next != POOL_TO_MARKER(lp->ul_pool) ||
271		    np->uln_prev != NULL) {
272			uu_panic("insert(%p): elem %p node %p corrupt, "
273			    "not initialized, or already in a list.\n",
274			    (void *)lp, NODE_TO_ELEM(lp, np), (void *)np);
275		}
276		/*
277		 * invalidate outstanding uu_list_index_ts.
278		 */
279		lp->ul_index = INDEX_NEXT(lp->ul_index);
280	}
281	np->uln_next = next;
282	np->uln_prev = prev;
283	next->uln_prev = np;
284	prev->uln_next = np;
285
286	lp->ul_numnodes++;
287}
288
289void
290uu_list_insert(uu_list_t *lp, void *elem, uu_list_index_t idx)
291{
292	uu_list_node_impl_t *np;
293
294	np = INDEX_TO_NODE(idx);
295	if (np == NULL)
296		np = &lp->ul_null_node;
297
298	if (lp->ul_debug) {
299		if (!INDEX_VALID(lp, idx))
300			uu_panic("uu_list_insert(%p, %p, %p): %s\n",
301			    (void *)lp, elem, (void *)idx,
302			    INDEX_CHECK(idx)? "outdated index" :
303			    "invalid index");
304		if (np->uln_prev == NULL)
305			uu_panic("uu_list_insert(%p, %p, %p): out-of-date "
306			    "index\n", (void *)lp, elem, (void *)idx);
307	}
308
309	list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np);
310}
311
312void *
313uu_list_find(uu_list_t *lp, void *elem, void *private, uu_list_index_t *out)
314{
315	int sorted = lp->ul_sorted;
316	uu_compare_fn_t *func = lp->ul_pool->ulp_cmp;
317	uu_list_node_impl_t *np;
318
319	if (func == NULL) {
320		if (out != NULL)
321			*out = 0;
322		uu_set_error(UU_ERROR_NOT_SUPPORTED);
323		return (NULL);
324	}
325	for (np = lp->ul_null_node.uln_next; np != &lp->ul_null_node;
326	    np = np->uln_next) {
327		void *ep = NODE_TO_ELEM(lp, np);
328		int cmp = func(ep, elem, private);
329		if (cmp == 0) {
330			if (out != NULL)
331				*out = NODE_TO_INDEX(lp, np);
332			return (ep);
333		}
334		if (sorted && cmp > 0) {
335			if (out != NULL)
336				*out = NODE_TO_INDEX(lp, np);
337			return (NULL);
338		}
339	}
340	if (out != NULL)
341		*out = NODE_TO_INDEX(lp, 0);
342	return (NULL);
343}
344
345void *
346uu_list_nearest_next(uu_list_t *lp, uu_list_index_t idx)
347{
348	uu_list_node_impl_t *np = INDEX_TO_NODE(idx);
349
350	if (np == NULL)
351		np = &lp->ul_null_node;
352
353	if (lp->ul_debug) {
354		if (!INDEX_VALID(lp, idx))
355			uu_panic("uu_list_nearest_next(%p, %p): %s\n",
356			    (void *)lp, (void *)idx,
357			    INDEX_CHECK(idx)? "outdated index" :
358			    "invalid index");
359		if (np->uln_prev == NULL)
360			uu_panic("uu_list_nearest_next(%p, %p): out-of-date "
361			    "index\n", (void *)lp, (void *)idx);
362	}
363
364	if (np == &lp->ul_null_node)
365		return (NULL);
366	else
367		return (NODE_TO_ELEM(lp, np));
368}
369
370void *
371uu_list_nearest_prev(uu_list_t *lp, uu_list_index_t idx)
372{
373	uu_list_node_impl_t *np = INDEX_TO_NODE(idx);
374
375	if (np == NULL)
376		np = &lp->ul_null_node;
377
378	if (lp->ul_debug) {
379		if (!INDEX_VALID(lp, idx))
380			uu_panic("uu_list_nearest_prev(%p, %p): %s\n",
381			    (void *)lp, (void *)idx, INDEX_CHECK(idx)?
382			    "outdated index" : "invalid index");
383		if (np->uln_prev == NULL)
384			uu_panic("uu_list_nearest_prev(%p, %p): out-of-date "
385			    "index\n", (void *)lp, (void *)idx);
386	}
387
388	if ((np = np->uln_prev) == &lp->ul_null_node)
389		return (NULL);
390	else
391		return (NODE_TO_ELEM(lp, np));
392}
393
394static void
395list_walk_init(uu_list_walk_t *wp, uu_list_t *lp, uint32_t flags)
396{
397	uu_list_walk_t *next, *prev;
398
399	int robust = (flags & UU_WALK_ROBUST);
400	int direction = (flags & UU_WALK_REVERSE)? -1 : 1;
401
402	(void) memset(wp, 0, sizeof (*wp));
403	wp->ulw_list = lp;
404	wp->ulw_robust = robust;
405	wp->ulw_dir = direction;
406	if (direction > 0)
407		wp->ulw_next_result = lp->ul_null_node.uln_next;
408	else
409		wp->ulw_next_result = lp->ul_null_node.uln_prev;
410
411	if (lp->ul_debug || robust) {
412		/*
413		 * Add this walker to the list's list of walkers so
414		 * uu_list_remove() can advance us if somebody tries to
415		 * remove ulw_next_result.
416		 */
417		wp->ulw_next = next = &lp->ul_null_walk;
418		wp->ulw_prev = prev = next->ulw_prev;
419		next->ulw_prev = wp;
420		prev->ulw_next = wp;
421	}
422}
423
424static uu_list_node_impl_t *
425list_walk_advance(uu_list_walk_t *wp, uu_list_t *lp)
426{
427	uu_list_node_impl_t *np = wp->ulw_next_result;
428	uu_list_node_impl_t *next;
429
430	if (np == &lp->ul_null_node)
431		return (NULL);
432
433	next = (wp->ulw_dir > 0)? np->uln_next : np->uln_prev;
434
435	wp->ulw_next_result = next;
436	return (np);
437}
438
439static void
440list_walk_fini(uu_list_walk_t *wp)
441{
442	/* GLXXX debugging? */
443	if (wp->ulw_next != NULL) {
444		wp->ulw_next->ulw_prev = wp->ulw_prev;
445		wp->ulw_prev->ulw_next = wp->ulw_next;
446		wp->ulw_next = NULL;
447		wp->ulw_prev = NULL;
448	}
449	wp->ulw_list = NULL;
450	wp->ulw_next_result = NULL;
451}
452
453uu_list_walk_t *
454uu_list_walk_start(uu_list_t *lp, uint32_t flags)
455{
456	uu_list_walk_t *wp;
457
458	if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
459		uu_set_error(UU_ERROR_UNKNOWN_FLAG);
460		return (NULL);
461	}
462
463	wp = uu_zalloc(sizeof (*wp));
464	if (wp == NULL) {
465		uu_set_error(UU_ERROR_NO_MEMORY);
466		return (NULL);
467	}
468
469	list_walk_init(wp, lp, flags);
470	return (wp);
471}
472
473void *
474uu_list_walk_next(uu_list_walk_t *wp)
475{
476	uu_list_t *lp = wp->ulw_list;
477	uu_list_node_impl_t *np = list_walk_advance(wp, lp);
478
479	if (np == NULL)
480		return (NULL);
481
482	return (NODE_TO_ELEM(lp, np));
483}
484
485void
486uu_list_walk_end(uu_list_walk_t *wp)
487{
488	list_walk_fini(wp);
489	uu_free(wp);
490}
491
492int
493uu_list_walk(uu_list_t *lp, uu_walk_fn_t *func, void *private, uint32_t flags)
494{
495	uu_list_node_impl_t *np;
496
497	int status = UU_WALK_NEXT;
498
499	int robust = (flags & UU_WALK_ROBUST);
500	int reverse = (flags & UU_WALK_REVERSE);
501
502	if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
503		uu_set_error(UU_ERROR_UNKNOWN_FLAG);
504		return (-1);
505	}
506
507	if (lp->ul_debug || robust) {
508		uu_list_walk_t *my_walk;
509		void *e;
510
511		my_walk = uu_zalloc(sizeof (*my_walk));
512		if (my_walk == NULL)
513			return (-1);
514
515		list_walk_init(my_walk, lp, flags);
516		while (status == UU_WALK_NEXT &&
517		    (e = uu_list_walk_next(my_walk)) != NULL)
518			status = (*func)(e, private);
519		list_walk_fini(my_walk);
520
521		uu_free(my_walk);
522	} else {
523		if (!reverse) {
524			for (np = lp->ul_null_node.uln_next;
525			    status == UU_WALK_NEXT && np != &lp->ul_null_node;
526			    np = np->uln_next) {
527				status = (*func)(NODE_TO_ELEM(lp, np), private);
528			}
529		} else {
530			for (np = lp->ul_null_node.uln_prev;
531			    status == UU_WALK_NEXT && np != &lp->ul_null_node;
532			    np = np->uln_prev) {
533				status = (*func)(NODE_TO_ELEM(lp, np), private);
534			}
535		}
536	}
537	if (status >= 0)
538		return (0);
539	uu_set_error(UU_ERROR_CALLBACK_FAILED);
540	return (-1);
541}
542
543void
544uu_list_remove(uu_list_t *lp, void *elem)
545{
546	uu_list_node_impl_t *np = ELEM_TO_NODE(lp, elem);
547	uu_list_walk_t *wp;
548
549	if (lp->ul_debug) {
550		if (np->uln_prev == NULL)
551			uu_panic("uu_list_remove(%p, %p): elem not on list\n",
552			    (void *)lp, elem);
553		/*
554		 * invalidate outstanding uu_list_index_ts.
555		 */
556		lp->ul_index = INDEX_NEXT(lp->ul_index);
557	}
558
559	/*
560	 * robust walkers must be advanced.  In debug mode, non-robust
561	 * walkers are also on the list.  If there are any, it's an error.
562	 */
563	for (wp = lp->ul_null_walk.ulw_next; wp != &lp->ul_null_walk;
564	    wp = wp->ulw_next) {
565		if (wp->ulw_robust) {
566			if (np == wp->ulw_next_result)
567				(void) list_walk_advance(wp, lp);
568		} else if (wp->ulw_next_result != NULL) {
569			uu_panic("uu_list_remove(%p, %p): active non-robust "
570			    "walker\n", (void *)lp, elem);
571		}
572	}
573
574	np->uln_next->uln_prev = np->uln_prev;
575	np->uln_prev->uln_next = np->uln_next;
576
577	lp->ul_numnodes--;
578
579	np->uln_next = POOL_TO_MARKER(lp->ul_pool);
580	np->uln_prev = NULL;
581}
582
583void *
584uu_list_teardown(uu_list_t *lp, void **cookie)
585{
586	void *ep;
587
588	/*
589	 * XXX: disable list modification until list is empty
590	 */
591	if (lp->ul_debug && *cookie != NULL)
592		uu_panic("uu_list_teardown(%p, %p): unexpected cookie\n",
593		    (void *)lp, (void *)cookie);
594
595	ep = uu_list_first(lp);
596	if (ep)
597		uu_list_remove(lp, ep);
598	return (ep);
599}
600
601int
602uu_list_insert_before(uu_list_t *lp, void *target, void *elem)
603{
604	uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target);
605
606	if (target == NULL)
607		np = &lp->ul_null_node;
608
609	if (lp->ul_debug) {
610		if (np->uln_prev == NULL)
611			uu_panic("uu_list_insert_before(%p, %p, %p): %p is "
612			    "not currently on a list\n",
613			    (void *)lp, target, elem, target);
614	}
615	if (lp->ul_sorted) {
616		if (lp->ul_debug)
617			uu_panic("uu_list_insert_before(%p, ...): list is "
618			    "UU_LIST_SORTED\n", (void *)lp);
619		uu_set_error(UU_ERROR_NOT_SUPPORTED);
620		return (-1);
621	}
622
623	list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np);
624	return (0);
625}
626
627int
628uu_list_insert_after(uu_list_t *lp, void *target, void *elem)
629{
630	uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target);
631
632	if (target == NULL)
633		np = &lp->ul_null_node;
634
635	if (lp->ul_debug) {
636		if (np->uln_prev == NULL)
637			uu_panic("uu_list_insert_after(%p, %p, %p): %p is "
638			    "not currently on a list\n",
639			    (void *)lp, target, elem, target);
640	}
641	if (lp->ul_sorted) {
642		if (lp->ul_debug)
643			uu_panic("uu_list_insert_after(%p, ...): list is "
644			    "UU_LIST_SORTED\n", (void *)lp);
645		uu_set_error(UU_ERROR_NOT_SUPPORTED);
646		return (-1);
647	}
648
649	list_insert(lp, ELEM_TO_NODE(lp, elem), np, np->uln_next);
650	return (0);
651}
652
653size_t
654uu_list_numnodes(uu_list_t *lp)
655{
656	return (lp->ul_numnodes);
657}
658
659void *
660uu_list_first(uu_list_t *lp)
661{
662	uu_list_node_impl_t *n = lp->ul_null_node.uln_next;
663	if (n == &lp->ul_null_node)
664		return (NULL);
665	return (NODE_TO_ELEM(lp, n));
666}
667
668void *
669uu_list_last(uu_list_t *lp)
670{
671	uu_list_node_impl_t *n = lp->ul_null_node.uln_prev;
672	if (n == &lp->ul_null_node)
673		return (NULL);
674	return (NODE_TO_ELEM(lp, n));
675}
676
677void *
678uu_list_next(uu_list_t *lp, void *elem)
679{
680	uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem);
681
682	n = n->uln_next;
683	if (n == &lp->ul_null_node)
684		return (NULL);
685	return (NODE_TO_ELEM(lp, n));
686}
687
688void *
689uu_list_prev(uu_list_t *lp, void *elem)
690{
691	uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem);
692
693	n = n->uln_prev;
694	if (n == &lp->ul_null_node)
695		return (NULL);
696	return (NODE_TO_ELEM(lp, n));
697}
698
699/*
700 * called from uu_lockup() and uu_release(), as part of our fork1()-safety.
701 */
702void
703uu_list_lockup(void)
704{
705	uu_list_pool_t *pp;
706
707	(void) pthread_mutex_lock(&uu_lpool_list_lock);
708	for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool;
709	    pp = pp->ulp_next)
710		(void) pthread_mutex_lock(&pp->ulp_lock);
711}
712
713void
714uu_list_release(void)
715{
716	uu_list_pool_t *pp;
717
718	for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool;
719	    pp = pp->ulp_next)
720		(void) pthread_mutex_unlock(&pp->ulp_lock);
721	(void) pthread_mutex_unlock(&uu_lpool_list_lock);
722}
723