1/*
2 *  Copyright (C) 2010 Lawrence Livermore National Security, LLC.
3 *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
4 *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
5 *  UCRL-CODE-235197
6 *
7 *  This file is part of the SPL, Solaris Porting Layer.
8 *
9 *  The SPL is free software; you can redistribute it and/or modify it
10 *  under the terms of the GNU General Public License as published by the
11 *  Free Software Foundation; either version 2 of the License, or (at your
12 *  option) any later version.
13 *
14 *  The SPL is distributed in the hope that it will be useful, but WITHOUT
15 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17 *  for more details.
18 *
19 *  You should have received a copy of the GNU General Public License along
20 *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
21 *
22 *
23 *  Solaris Porting Layer (SPL) Thread Specific Data Implementation.
24 *
25 *  Thread specific data has implemented using a hash table, this avoids
26 *  the need to add a member to the task structure and allows maximum
27 *  portability between kernels.  This implementation has been optimized
28 *  to keep the tsd_set() and tsd_get() times as small as possible.
29 *
30 *  The majority of the entries in the hash table are for specific tsd
31 *  entries.  These entries are hashed by the product of their key and
32 *  pid because by design the key and pid are guaranteed to be unique.
33 *  Their product also has the desirable properly that it will be uniformly
34 *  distributed over the hash bins providing neither the pid nor key is zero.
35 *  Under linux the zero pid is always the init process and thus won't be
36 *  used, and this implementation is careful to never to assign a zero key.
37 *  By default the hash table is sized to 512 bins which is expected to
38 *  be sufficient for light to moderate usage of thread specific data.
39 *
40 *  The hash table contains two additional type of entries.  They first
41 *  type is entry is called a 'key' entry and it is added to the hash during
42 *  tsd_create().  It is used to store the address of the destructor function
43 *  and it is used as an anchor point.  All tsd entries which use the same
44 *  key will be linked to this entry.  This is used during tsd_destroy() to
45 *  quickly call the destructor function for all tsd associated with the key.
46 *  The 'key' entry may be looked up with tsd_hash_search() by passing the
47 *  key you wish to lookup and DTOR_PID constant as the pid.
48 *
49 *  The second type of entry is called a 'pid' entry and it is added to the
50 *  hash the first time a process set a key.  The 'pid' entry is also used
51 *  as an anchor and all tsd for the process will be linked to it.  This
52 *  list is using during tsd_exit() to ensure all registered destructors
53 *  are run for the process.  The 'pid' entry may be looked up with
54 *  tsd_hash_search() by passing the PID_KEY constant as the key, and
55 *  the process pid.  Note that tsd_exit() is called by thread_exit()
56 *  so if your using the Solaris thread API you should not need to call
57 *  tsd_exit() directly.
58 *
59 */
60
61#include <sys/kmem.h>
62#include <sys/thread.h>
63#include <sys/tsd.h>
64#include <linux/hash.h>
65
66typedef struct tsd_hash_bin {
67	spinlock_t		hb_lock;
68	struct hlist_head	hb_head;
69} tsd_hash_bin_t;
70
71typedef struct tsd_hash_table {
72	spinlock_t		ht_lock;
73	uint_t			ht_bits;
74	uint_t			ht_key;
75	tsd_hash_bin_t		*ht_bins;
76} tsd_hash_table_t;
77
78typedef struct tsd_hash_entry {
79	uint_t			he_key;
80	pid_t			he_pid;
81	dtor_func_t		he_dtor;
82	void			*he_value;
83	struct hlist_node	he_list;
84	struct list_head	he_key_list;
85	struct list_head	he_pid_list;
86} tsd_hash_entry_t;
87
88static tsd_hash_table_t *tsd_hash_table = NULL;
89
90
91/*
92 * tsd_hash_search - searches hash table for tsd_hash_entry
93 * @table: hash table
94 * @key: search key
95 * @pid: search pid
96 */
97static tsd_hash_entry_t *
98tsd_hash_search(tsd_hash_table_t *table, uint_t key, pid_t pid)
99{
100	struct hlist_node *node = NULL;
101	tsd_hash_entry_t *entry;
102	tsd_hash_bin_t *bin;
103	ulong_t hash;
104
105	hash = hash_long((ulong_t)key * (ulong_t)pid, table->ht_bits);
106	bin = &table->ht_bins[hash];
107	spin_lock(&bin->hb_lock);
108	hlist_for_each(node, &bin->hb_head) {
109		entry = list_entry(node, tsd_hash_entry_t, he_list);
110		if ((entry->he_key == key) && (entry->he_pid == pid)) {
111			spin_unlock(&bin->hb_lock);
112			return (entry);
113		}
114	}
115
116	spin_unlock(&bin->hb_lock);
117	return (NULL);
118}
119
120/*
121 * tsd_hash_dtor - call the destructor and free all entries on the list
122 * @work: list of hash entries
123 *
124 * For a list of entries which have all already been removed from the
125 * hash call their registered destructor then free the associated memory.
126 */
127static void
128tsd_hash_dtor(struct hlist_head *work)
129{
130	tsd_hash_entry_t *entry;
131
132	while (!hlist_empty(work)) {
133		entry = hlist_entry(work->first, tsd_hash_entry_t, he_list);
134		hlist_del(&entry->he_list);
135
136		if (entry->he_dtor && entry->he_pid != DTOR_PID)
137			entry->he_dtor(entry->he_value);
138
139		kmem_free(entry, sizeof (tsd_hash_entry_t));
140	}
141}
142
143/*
144 * tsd_hash_add - adds an entry to hash table
145 * @table: hash table
146 * @key: search key
147 * @pid: search pid
148 *
149 * The caller is responsible for ensuring the unique key/pid do not
150 * already exist in the hash table.  This possible because all entries
151 * are thread specific thus a concurrent thread will never attempt to
152 * add this key/pid.  Because multiple bins must be checked to add
153 * links to the dtor and pid entries the entire table is locked.
154 */
155static int
156tsd_hash_add(tsd_hash_table_t *table, uint_t key, pid_t pid, void *value)
157{
158	tsd_hash_entry_t *entry, *dtor_entry, *pid_entry;
159	tsd_hash_bin_t *bin;
160	ulong_t hash;
161	int rc = 0;
162
163	ASSERT3P(tsd_hash_search(table, key, pid), ==, NULL);
164
165	/* New entry allocate structure, set value, and add to hash */
166	entry = kmem_alloc(sizeof (tsd_hash_entry_t), KM_PUSHPAGE);
167	if (entry == NULL)
168		return (ENOMEM);
169
170	entry->he_key = key;
171	entry->he_pid = pid;
172	entry->he_value = value;
173	INIT_HLIST_NODE(&entry->he_list);
174	INIT_LIST_HEAD(&entry->he_key_list);
175	INIT_LIST_HEAD(&entry->he_pid_list);
176
177	spin_lock(&table->ht_lock);
178
179	/* Destructor entry must exist for all valid keys */
180	dtor_entry = tsd_hash_search(table, entry->he_key, DTOR_PID);
181	ASSERT3P(dtor_entry, !=, NULL);
182	entry->he_dtor = dtor_entry->he_dtor;
183
184	/* Process entry must exist for all valid processes */
185	pid_entry = tsd_hash_search(table, PID_KEY, entry->he_pid);
186	ASSERT3P(pid_entry, !=, NULL);
187
188	hash = hash_long((ulong_t)key * (ulong_t)pid, table->ht_bits);
189	bin = &table->ht_bins[hash];
190	spin_lock(&bin->hb_lock);
191
192	/* Add to the hash, key, and pid lists */
193	hlist_add_head(&entry->he_list, &bin->hb_head);
194	list_add(&entry->he_key_list, &dtor_entry->he_key_list);
195	list_add(&entry->he_pid_list, &pid_entry->he_pid_list);
196
197	spin_unlock(&bin->hb_lock);
198	spin_unlock(&table->ht_lock);
199
200	return (rc);
201}
202
203/*
204 * tsd_hash_add_key - adds a destructor entry to the hash table
205 * @table: hash table
206 * @keyp: search key
207 * @dtor: key destructor
208 *
209 * For every unique key there is a single entry in the hash which is used
210 * as anchor.  All other thread specific entries for this key are linked
211 * to this anchor via the 'he_key_list' list head.  On return they keyp
212 * will be set to the next available key for the hash table.
213 */
214static int
215tsd_hash_add_key(tsd_hash_table_t *table, uint_t *keyp, dtor_func_t dtor)
216{
217	tsd_hash_entry_t *tmp_entry, *entry;
218	tsd_hash_bin_t *bin;
219	ulong_t hash;
220	int keys_checked = 0;
221
222	ASSERT3P(table, !=, NULL);
223
224	/* Allocate entry to be used as a destructor for this key */
225	entry = kmem_alloc(sizeof (tsd_hash_entry_t), KM_PUSHPAGE);
226	if (entry == NULL)
227		return (ENOMEM);
228
229	/* Determine next available key value */
230	spin_lock(&table->ht_lock);
231	do {
232		/* Limited to TSD_KEYS_MAX concurrent unique keys */
233		if (table->ht_key++ > TSD_KEYS_MAX)
234			table->ht_key = 1;
235
236		/* Ensure failure when all TSD_KEYS_MAX keys are in use */
237		if (keys_checked++ >= TSD_KEYS_MAX) {
238			spin_unlock(&table->ht_lock);
239			return (ENOENT);
240		}
241
242		tmp_entry = tsd_hash_search(table, table->ht_key, DTOR_PID);
243	} while (tmp_entry);
244
245	/* Add destructor entry in to hash table */
246	entry->he_key = *keyp = table->ht_key;
247	entry->he_pid = DTOR_PID;
248	entry->he_dtor = dtor;
249	entry->he_value = NULL;
250	INIT_HLIST_NODE(&entry->he_list);
251	INIT_LIST_HEAD(&entry->he_key_list);
252	INIT_LIST_HEAD(&entry->he_pid_list);
253
254	hash = hash_long((ulong_t)*keyp * (ulong_t)DTOR_PID, table->ht_bits);
255	bin = &table->ht_bins[hash];
256	spin_lock(&bin->hb_lock);
257
258	hlist_add_head(&entry->he_list, &bin->hb_head);
259
260	spin_unlock(&bin->hb_lock);
261	spin_unlock(&table->ht_lock);
262
263	return (0);
264}
265
266/*
267 * tsd_hash_add_pid - adds a process entry to the hash table
268 * @table: hash table
269 * @pid: search pid
270 *
271 * For every process there is a single entry in the hash which is used
272 * as anchor.  All other thread specific entries for this process are
273 * linked to this anchor via the 'he_pid_list' list head.
274 */
275static int
276tsd_hash_add_pid(tsd_hash_table_t *table, pid_t pid)
277{
278	tsd_hash_entry_t *entry;
279	tsd_hash_bin_t *bin;
280	ulong_t hash;
281
282	/* Allocate entry to be used as the process reference */
283	entry = kmem_alloc(sizeof (tsd_hash_entry_t), KM_PUSHPAGE);
284	if (entry == NULL)
285		return (ENOMEM);
286
287	spin_lock(&table->ht_lock);
288	entry->he_key = PID_KEY;
289	entry->he_pid = pid;
290	entry->he_dtor = NULL;
291	entry->he_value = NULL;
292	INIT_HLIST_NODE(&entry->he_list);
293	INIT_LIST_HEAD(&entry->he_key_list);
294	INIT_LIST_HEAD(&entry->he_pid_list);
295
296	hash = hash_long((ulong_t)PID_KEY * (ulong_t)pid, table->ht_bits);
297	bin = &table->ht_bins[hash];
298	spin_lock(&bin->hb_lock);
299
300	hlist_add_head(&entry->he_list, &bin->hb_head);
301
302	spin_unlock(&bin->hb_lock);
303	spin_unlock(&table->ht_lock);
304
305	return (0);
306}
307
308/*
309 * tsd_hash_del - delete an entry from hash table, key, and pid lists
310 * @table: hash table
311 * @key: search key
312 * @pid: search pid
313 */
314static void
315tsd_hash_del(tsd_hash_table_t *table, tsd_hash_entry_t *entry)
316{
317	hlist_del(&entry->he_list);
318	list_del_init(&entry->he_key_list);
319	list_del_init(&entry->he_pid_list);
320}
321
322/*
323 * tsd_hash_table_init - allocate a hash table
324 * @bits: hash table size
325 *
326 * A hash table with 2^bits bins will be created, it may not be resized
327 * after the fact and must be free'd with tsd_hash_table_fini().
328 */
329static tsd_hash_table_t *
330tsd_hash_table_init(uint_t bits)
331{
332	tsd_hash_table_t *table;
333	int hash, size = (1 << bits);
334
335	table = kmem_zalloc(sizeof (tsd_hash_table_t), KM_SLEEP);
336	if (table == NULL)
337		return (NULL);
338
339	table->ht_bins = kmem_zalloc(sizeof (tsd_hash_bin_t) * size, KM_SLEEP);
340	if (table->ht_bins == NULL) {
341		kmem_free(table, sizeof (tsd_hash_table_t));
342		return (NULL);
343	}
344
345	for (hash = 0; hash < size; hash++) {
346		spin_lock_init(&table->ht_bins[hash].hb_lock);
347		INIT_HLIST_HEAD(&table->ht_bins[hash].hb_head);
348	}
349
350	spin_lock_init(&table->ht_lock);
351	table->ht_bits = bits;
352	table->ht_key = 1;
353
354	return (table);
355}
356
357/*
358 * tsd_hash_table_fini - free a hash table
359 * @table: hash table
360 *
361 * Free a hash table allocated by tsd_hash_table_init().  If the hash
362 * table is not empty this function will call the proper destructor for
363 * all remaining entries before freeing the memory used by those entries.
364 */
365static void
366tsd_hash_table_fini(tsd_hash_table_t *table)
367{
368	HLIST_HEAD(work);
369	tsd_hash_bin_t *bin;
370	tsd_hash_entry_t *entry;
371	int size, i;
372
373	ASSERT3P(table, !=, NULL);
374	spin_lock(&table->ht_lock);
375	for (i = 0, size = (1 << table->ht_bits); i < size; i++) {
376		bin = &table->ht_bins[i];
377		spin_lock(&bin->hb_lock);
378		while (!hlist_empty(&bin->hb_head)) {
379			entry = hlist_entry(bin->hb_head.first,
380			    tsd_hash_entry_t, he_list);
381			tsd_hash_del(table, entry);
382			hlist_add_head(&entry->he_list, &work);
383		}
384		spin_unlock(&bin->hb_lock);
385	}
386	spin_unlock(&table->ht_lock);
387
388	tsd_hash_dtor(&work);
389	kmem_free(table->ht_bins, sizeof (tsd_hash_bin_t)*(1<<table->ht_bits));
390	kmem_free(table, sizeof (tsd_hash_table_t));
391}
392
393/*
394 * tsd_remove_entry - remove a tsd entry for this thread
395 * @entry: entry to remove
396 *
397 * Remove the thread specific data @entry for this thread.
398 * If this is the last entry for this thread, also remove the PID entry.
399 */
400static void
401tsd_remove_entry(tsd_hash_entry_t *entry)
402{
403	HLIST_HEAD(work);
404	tsd_hash_table_t *table;
405	tsd_hash_entry_t *pid_entry;
406	tsd_hash_bin_t *pid_entry_bin, *entry_bin;
407	ulong_t hash;
408
409	table = tsd_hash_table;
410	ASSERT3P(table, !=, NULL);
411	ASSERT3P(entry, !=, NULL);
412
413	spin_lock(&table->ht_lock);
414
415	hash = hash_long((ulong_t)entry->he_key *
416	    (ulong_t)entry->he_pid, table->ht_bits);
417	entry_bin = &table->ht_bins[hash];
418
419	/* save the possible pid_entry */
420	pid_entry = list_entry(entry->he_pid_list.next, tsd_hash_entry_t,
421	    he_pid_list);
422
423	/* remove entry */
424	spin_lock(&entry_bin->hb_lock);
425	tsd_hash_del(table, entry);
426	hlist_add_head(&entry->he_list, &work);
427	spin_unlock(&entry_bin->hb_lock);
428
429	/* if pid_entry is indeed pid_entry, then remove it if it's empty */
430	if (pid_entry->he_key == PID_KEY &&
431	    list_empty(&pid_entry->he_pid_list)) {
432		hash = hash_long((ulong_t)pid_entry->he_key *
433		    (ulong_t)pid_entry->he_pid, table->ht_bits);
434		pid_entry_bin = &table->ht_bins[hash];
435
436		spin_lock(&pid_entry_bin->hb_lock);
437		tsd_hash_del(table, pid_entry);
438		hlist_add_head(&pid_entry->he_list, &work);
439		spin_unlock(&pid_entry_bin->hb_lock);
440	}
441
442	spin_unlock(&table->ht_lock);
443
444	tsd_hash_dtor(&work);
445}
446
447/*
448 * tsd_set - set thread specific data
449 * @key: lookup key
450 * @value: value to set
451 *
452 * Caller must prevent racing tsd_create() or tsd_destroy(), protected
453 * from racing tsd_get() or tsd_set() because it is thread specific.
454 * This function has been optimized to be fast for the update case.
455 * When setting the tsd initially it will be slower due to additional
456 * required locking and potential memory allocations.
457 */
458int
459tsd_set(uint_t key, void *value)
460{
461	tsd_hash_table_t *table;
462	tsd_hash_entry_t *entry;
463	pid_t pid;
464	int rc;
465	/* mark remove if value is NULL */
466	boolean_t remove = (value == NULL);
467
468	table = tsd_hash_table;
469	pid = curthread->pid;
470	ASSERT3P(table, !=, NULL);
471
472	if ((key == 0) || (key > TSD_KEYS_MAX))
473		return (EINVAL);
474
475	/* Entry already exists in hash table update value */
476	entry = tsd_hash_search(table, key, pid);
477	if (entry) {
478		entry->he_value = value;
479		/* remove the entry */
480		if (remove)
481			tsd_remove_entry(entry);
482		return (0);
483	}
484
485	/* don't create entry if value is NULL */
486	if (remove)
487		return (0);
488
489	/* Add a process entry to the hash if not yet exists */
490	entry = tsd_hash_search(table, PID_KEY, pid);
491	if (entry == NULL) {
492		rc = tsd_hash_add_pid(table, pid);
493		if (rc)
494			return (rc);
495	}
496
497	rc = tsd_hash_add(table, key, pid, value);
498	return (rc);
499}
500EXPORT_SYMBOL(tsd_set);
501
502/*
503 * tsd_get - get thread specific data
504 * @key: lookup key
505 *
506 * Caller must prevent racing tsd_create() or tsd_destroy().  This
507 * implementation is designed to be fast and scalable, it does not
508 * lock the entire table only a single hash bin.
509 */
510void *
511tsd_get(uint_t key)
512{
513	tsd_hash_entry_t *entry;
514
515	ASSERT3P(tsd_hash_table, !=, NULL);
516
517	if ((key == 0) || (key > TSD_KEYS_MAX))
518		return (NULL);
519
520	entry = tsd_hash_search(tsd_hash_table, key, curthread->pid);
521	if (entry == NULL)
522		return (NULL);
523
524	return (entry->he_value);
525}
526EXPORT_SYMBOL(tsd_get);
527
528/*
529 * tsd_get_by_thread - get thread specific data for specified thread
530 * @key: lookup key
531 * @thread: thread to lookup
532 *
533 * Caller must prevent racing tsd_create() or tsd_destroy().  This
534 * implementation is designed to be fast and scalable, it does not
535 * lock the entire table only a single hash bin.
536 */
537void *
538tsd_get_by_thread(uint_t key, kthread_t *thread)
539{
540	tsd_hash_entry_t *entry;
541
542	ASSERT3P(tsd_hash_table, !=, NULL);
543
544	if ((key == 0) || (key > TSD_KEYS_MAX))
545		return (NULL);
546
547	entry = tsd_hash_search(tsd_hash_table, key, thread->pid);
548	if (entry == NULL)
549		return (NULL);
550
551	return (entry->he_value);
552}
553EXPORT_SYMBOL(tsd_get_by_thread);
554
555/*
556 * tsd_create - create thread specific data key
557 * @keyp: lookup key address
558 * @dtor: destructor called during tsd_destroy() or tsd_exit()
559 *
560 * Provided key must be set to 0 or it assumed to be already in use.
561 * The dtor is allowed to be NULL in which case no additional cleanup
562 * for the data is performed during tsd_destroy() or tsd_exit().
563 *
564 * Caller must prevent racing tsd_set() or tsd_get(), this function is
565 * safe from racing tsd_create(), tsd_destroy(), and tsd_exit().
566 */
567void
568tsd_create(uint_t *keyp, dtor_func_t dtor)
569{
570	ASSERT3P(keyp, !=, NULL);
571	if (*keyp)
572		return;
573
574	(void) tsd_hash_add_key(tsd_hash_table, keyp, dtor);
575}
576EXPORT_SYMBOL(tsd_create);
577
578/*
579 * tsd_destroy - destroy thread specific data
580 * @keyp: lookup key address
581 *
582 * Destroys the thread specific data on all threads which use this key.
583 *
584 * Caller must prevent racing tsd_set() or tsd_get(), this function is
585 * safe from racing tsd_create(), tsd_destroy(), and tsd_exit().
586 */
587void
588tsd_destroy(uint_t *keyp)
589{
590	HLIST_HEAD(work);
591	tsd_hash_table_t *table;
592	tsd_hash_entry_t *dtor_entry, *entry;
593	tsd_hash_bin_t *dtor_entry_bin, *entry_bin;
594	ulong_t hash;
595
596	table = tsd_hash_table;
597	ASSERT3P(table, !=, NULL);
598
599	spin_lock(&table->ht_lock);
600	dtor_entry = tsd_hash_search(table, *keyp, DTOR_PID);
601	if (dtor_entry == NULL) {
602		spin_unlock(&table->ht_lock);
603		return;
604	}
605
606	/*
607	 * All threads which use this key must be linked off of the
608	 * DTOR_PID entry.  They are removed from the hash table and
609	 * linked in to a private working list to be destroyed.
610	 */
611	while (!list_empty(&dtor_entry->he_key_list)) {
612		entry = list_entry(dtor_entry->he_key_list.next,
613		    tsd_hash_entry_t, he_key_list);
614		ASSERT3U(dtor_entry->he_key, ==, entry->he_key);
615		ASSERT3P(dtor_entry->he_dtor, ==, entry->he_dtor);
616
617		hash = hash_long((ulong_t)entry->he_key *
618		    (ulong_t)entry->he_pid, table->ht_bits);
619		entry_bin = &table->ht_bins[hash];
620
621		spin_lock(&entry_bin->hb_lock);
622		tsd_hash_del(table, entry);
623		hlist_add_head(&entry->he_list, &work);
624		spin_unlock(&entry_bin->hb_lock);
625	}
626
627	hash = hash_long((ulong_t)dtor_entry->he_key *
628	    (ulong_t)dtor_entry->he_pid, table->ht_bits);
629	dtor_entry_bin = &table->ht_bins[hash];
630
631	spin_lock(&dtor_entry_bin->hb_lock);
632	tsd_hash_del(table, dtor_entry);
633	hlist_add_head(&dtor_entry->he_list, &work);
634	spin_unlock(&dtor_entry_bin->hb_lock);
635	spin_unlock(&table->ht_lock);
636
637	tsd_hash_dtor(&work);
638	*keyp = 0;
639}
640EXPORT_SYMBOL(tsd_destroy);
641
642/*
643 * tsd_exit - destroys all thread specific data for this thread
644 *
645 * Destroys all the thread specific data for this thread.
646 *
647 * Caller must prevent racing tsd_set() or tsd_get(), this function is
648 * safe from racing tsd_create(), tsd_destroy(), and tsd_exit().
649 */
650void
651tsd_exit(void)
652{
653	HLIST_HEAD(work);
654	tsd_hash_table_t *table;
655	tsd_hash_entry_t *pid_entry, *entry;
656	tsd_hash_bin_t *pid_entry_bin, *entry_bin;
657	ulong_t hash;
658
659	table = tsd_hash_table;
660	ASSERT3P(table, !=, NULL);
661
662	spin_lock(&table->ht_lock);
663	pid_entry = tsd_hash_search(table, PID_KEY, curthread->pid);
664	if (pid_entry == NULL) {
665		spin_unlock(&table->ht_lock);
666		return;
667	}
668
669	/*
670	 * All keys associated with this pid must be linked off of the
671	 * PID_KEY entry.  They are removed from the hash table and
672	 * linked in to a private working list to be destroyed.
673	 */
674
675	while (!list_empty(&pid_entry->he_pid_list)) {
676		entry = list_entry(pid_entry->he_pid_list.next,
677		    tsd_hash_entry_t, he_pid_list);
678		ASSERT3U(pid_entry->he_pid, ==, entry->he_pid);
679
680		hash = hash_long((ulong_t)entry->he_key *
681		    (ulong_t)entry->he_pid, table->ht_bits);
682		entry_bin = &table->ht_bins[hash];
683
684		spin_lock(&entry_bin->hb_lock);
685		tsd_hash_del(table, entry);
686		hlist_add_head(&entry->he_list, &work);
687		spin_unlock(&entry_bin->hb_lock);
688	}
689
690	hash = hash_long((ulong_t)pid_entry->he_key *
691	    (ulong_t)pid_entry->he_pid, table->ht_bits);
692	pid_entry_bin = &table->ht_bins[hash];
693
694	spin_lock(&pid_entry_bin->hb_lock);
695	tsd_hash_del(table, pid_entry);
696	hlist_add_head(&pid_entry->he_list, &work);
697	spin_unlock(&pid_entry_bin->hb_lock);
698	spin_unlock(&table->ht_lock);
699
700	tsd_hash_dtor(&work);
701}
702EXPORT_SYMBOL(tsd_exit);
703
704int
705spl_tsd_init(void)
706{
707	tsd_hash_table = tsd_hash_table_init(TSD_HASH_TABLE_BITS_DEFAULT);
708	if (tsd_hash_table == NULL)
709		return (-ENOMEM);
710
711	return (0);
712}
713
714void
715spl_tsd_fini(void)
716{
717	tsd_hash_table_fini(tsd_hash_table);
718	tsd_hash_table = NULL;
719}
720