subr_witness.c revision 286055
1/*-
2 * Copyright (c) 2008 Isilon Systems, Inc.
3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
4 * Copyright (c) 1998 Berkeley Software Design, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Berkeley Software Design Inc's name may not be used to endorse or
16 *    promote products derived from this software without specific prior
17 *    written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
32 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 */
34
35/*
36 * Implementation of the `witness' lock verifier.  Originally implemented for
37 * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
38 * classes in FreeBSD.
39 */
40
41/*
42 *	Main Entry: witness
43 *	Pronunciation: 'wit-n&s
44 *	Function: noun
45 *	Etymology: Middle English witnesse, from Old English witnes knowledge,
46 *	    testimony, witness, from 2wit
47 *	Date: before 12th century
48 *	1 : attestation of a fact or event : TESTIMONY
49 *	2 : one that gives evidence; specifically : one who testifies in
50 *	    a cause or before a judicial tribunal
51 *	3 : one asked to be present at a transaction so as to be able to
52 *	    testify to its having taken place
53 *	4 : one who has personal knowledge of something
54 *	5 a : something serving as evidence or proof : SIGN
55 *	  b : public affirmation by word or example of usually
56 *	      religious faith or conviction <the heroic witness to divine
57 *	      life -- Pilot>
58 *	6 capitalized : a member of the Jehovah's Witnesses
59 */
60
61/*
62 * Special rules concerning Giant and lock orders:
63 *
64 * 1) Giant must be acquired before any other mutexes.  Stated another way,
65 *    no other mutex may be held when Giant is acquired.
66 *
67 * 2) Giant must be released when blocking on a sleepable lock.
68 *
69 * This rule is less obvious, but is a result of Giant providing the same
70 * semantics as spl().  Basically, when a thread sleeps, it must release
71 * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
72 * 2).
73 *
74 * 3) Giant may be acquired before or after sleepable locks.
75 *
76 * This rule is also not quite as obvious.  Giant may be acquired after
77 * a sleepable lock because it is a non-sleepable lock and non-sleepable
78 * locks may always be acquired while holding a sleepable lock.  The second
79 * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
80 * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
81 * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
82 * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
83 * execute.  Thus, acquiring Giant both before and after a sleepable lock
84 * will not result in a lock order reversal.
85 */
86
87#include <sys/cdefs.h>
88__FBSDID("$FreeBSD: stable/10/sys/kern/subr_witness.c 286055 2015-07-30 02:06:29Z marius $");
89
90#include "opt_ddb.h"
91#include "opt_hwpmc_hooks.h"
92#include "opt_stack.h"
93#include "opt_witness.h"
94
95#include <sys/param.h>
96#include <sys/bus.h>
97#include <sys/kdb.h>
98#include <sys/kernel.h>
99#include <sys/ktr.h>
100#include <sys/lock.h>
101#include <sys/malloc.h>
102#include <sys/mutex.h>
103#include <sys/priv.h>
104#include <sys/proc.h>
105#include <sys/sbuf.h>
106#include <sys/sched.h>
107#include <sys/stack.h>
108#include <sys/sysctl.h>
109#include <sys/systm.h>
110
111#ifdef DDB
112#include <ddb/ddb.h>
113#endif
114
115#include <machine/stdarg.h>
116
117#if !defined(DDB) && !defined(STACK)
118#error "DDB or STACK options are required for WITNESS"
119#endif
120
121/* Note that these traces do not work with KTR_ALQ. */
122#if 0
123#define	KTR_WITNESS	KTR_SUBSYS
124#else
125#define	KTR_WITNESS	0
126#endif
127
128#define	LI_RECURSEMASK	0x0000ffff	/* Recursion depth of lock instance. */
129#define	LI_EXCLUSIVE	0x00010000	/* Exclusive lock instance. */
130#define	LI_NORELEASE	0x00020000	/* Lock not allowed to be released. */
131
132/* Define this to check for blessed mutexes */
133#undef BLESSING
134
135#define	WITNESS_COUNT 		1024
136#define	WITNESS_CHILDCOUNT 	(WITNESS_COUNT * 4)
137#define	WITNESS_HASH_SIZE	251	/* Prime, gives load factor < 2 */
138#define	WITNESS_PENDLIST	(1024 + MAXCPU)
139
140/* Allocate 256 KB of stack data space */
141#define	WITNESS_LO_DATA_COUNT	2048
142
143/* Prime, gives load factor of ~2 at full load */
144#define	WITNESS_LO_HASH_SIZE	1021
145
146/*
147 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
148 * will hold LOCK_NCHILDREN locks.  We handle failure ok, and we should
149 * probably be safe for the most part, but it's still a SWAG.
150 */
151#define	LOCK_NCHILDREN	5
152#define	LOCK_CHILDCOUNT	2048
153
154#define	MAX_W_NAME	64
155
156#define	BADSTACK_SBUF_SIZE	(256 * WITNESS_COUNT)
157#define	FULLGRAPH_SBUF_SIZE	512
158
159/*
160 * These flags go in the witness relationship matrix and describe the
161 * relationship between any two struct witness objects.
162 */
163#define	WITNESS_UNRELATED        0x00    /* No lock order relation. */
164#define	WITNESS_PARENT           0x01    /* Parent, aka direct ancestor. */
165#define	WITNESS_ANCESTOR         0x02    /* Direct or indirect ancestor. */
166#define	WITNESS_CHILD            0x04    /* Child, aka direct descendant. */
167#define	WITNESS_DESCENDANT       0x08    /* Direct or indirect descendant. */
168#define	WITNESS_ANCESTOR_MASK    (WITNESS_PARENT | WITNESS_ANCESTOR)
169#define	WITNESS_DESCENDANT_MASK  (WITNESS_CHILD | WITNESS_DESCENDANT)
170#define	WITNESS_RELATED_MASK						\
171	(WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
172#define	WITNESS_REVERSAL         0x10    /* A lock order reversal has been
173					  * observed. */
174#define	WITNESS_RESERVED1        0x20    /* Unused flag, reserved. */
175#define	WITNESS_RESERVED2        0x40    /* Unused flag, reserved. */
176#define	WITNESS_LOCK_ORDER_KNOWN 0x80    /* This lock order is known. */
177
178/* Descendant to ancestor flags */
179#define	WITNESS_DTOA(x)	(((x) & WITNESS_RELATED_MASK) >> 2)
180
181/* Ancestor to descendant flags */
182#define	WITNESS_ATOD(x)	(((x) & WITNESS_RELATED_MASK) << 2)
183
184#define	WITNESS_INDEX_ASSERT(i)						\
185	MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT)
186
187static MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
188
189/*
190 * Lock instances.  A lock instance is the data associated with a lock while
191 * it is held by witness.  For example, a lock instance will hold the
192 * recursion count of a lock.  Lock instances are held in lists.  Spin locks
193 * are held in a per-cpu list while sleep locks are held in per-thread list.
194 */
195struct lock_instance {
196	struct lock_object	*li_lock;
197	const char		*li_file;
198	int			li_line;
199	u_int			li_flags;
200};
201
202/*
203 * A simple list type used to build the list of locks held by a thread
204 * or CPU.  We can't simply embed the list in struct lock_object since a
205 * lock may be held by more than one thread if it is a shared lock.  Locks
206 * are added to the head of the list, so we fill up each list entry from
207 * "the back" logically.  To ease some of the arithmetic, we actually fill
208 * in each list entry the normal way (children[0] then children[1], etc.) but
209 * when we traverse the list we read children[count-1] as the first entry
210 * down to children[0] as the final entry.
211 */
212struct lock_list_entry {
213	struct lock_list_entry	*ll_next;
214	struct lock_instance	ll_children[LOCK_NCHILDREN];
215	u_int			ll_count;
216};
217
218/*
219 * The main witness structure. One of these per named lock type in the system
220 * (for example, "vnode interlock").
221 */
222struct witness {
223	char  			w_name[MAX_W_NAME];
224	uint32_t 		w_index;  /* Index in the relationship matrix */
225	struct lock_class	*w_class;
226	STAILQ_ENTRY(witness) 	w_list;		/* List of all witnesses. */
227	STAILQ_ENTRY(witness) 	w_typelist;	/* Witnesses of a type. */
228	struct witness		*w_hash_next; /* Linked list in hash buckets. */
229	const char		*w_file; /* File where last acquired */
230	uint32_t 		w_line; /* Line where last acquired */
231	uint32_t 		w_refcount;
232	uint16_t 		w_num_ancestors; /* direct/indirect
233						  * ancestor count */
234	uint16_t 		w_num_descendants; /* direct/indirect
235						    * descendant count */
236	int16_t 		w_ddb_level;
237	unsigned		w_displayed:1;
238	unsigned		w_reversed:1;
239};
240
241STAILQ_HEAD(witness_list, witness);
242
243/*
244 * The witness hash table. Keys are witness names (const char *), elements are
245 * witness objects (struct witness *).
246 */
247struct witness_hash {
248	struct witness	*wh_array[WITNESS_HASH_SIZE];
249	uint32_t	wh_size;
250	uint32_t	wh_count;
251};
252
253/*
254 * Key type for the lock order data hash table.
255 */
256struct witness_lock_order_key {
257	uint16_t	from;
258	uint16_t	to;
259};
260
261struct witness_lock_order_data {
262	struct stack			wlod_stack;
263	struct witness_lock_order_key	wlod_key;
264	struct witness_lock_order_data	*wlod_next;
265};
266
267/*
268 * The witness lock order data hash table. Keys are witness index tuples
269 * (struct witness_lock_order_key), elements are lock order data objects
270 * (struct witness_lock_order_data).
271 */
272struct witness_lock_order_hash {
273	struct witness_lock_order_data	*wloh_array[WITNESS_LO_HASH_SIZE];
274	u_int	wloh_size;
275	u_int	wloh_count;
276};
277
278#ifdef BLESSING
279struct witness_blessed {
280	const char	*b_lock1;
281	const char	*b_lock2;
282};
283#endif
284
285struct witness_pendhelp {
286	const char		*wh_type;
287	struct lock_object	*wh_lock;
288};
289
290struct witness_order_list_entry {
291	const char		*w_name;
292	struct lock_class	*w_class;
293};
294
295/*
296 * Returns 0 if one of the locks is a spin lock and the other is not.
297 * Returns 1 otherwise.
298 */
299static __inline int
300witness_lock_type_equal(struct witness *w1, struct witness *w2)
301{
302
303	return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
304		(w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
305}
306
307static __inline int
308witness_lock_order_key_equal(const struct witness_lock_order_key *a,
309    const struct witness_lock_order_key *b)
310{
311
312	return (a->from == b->from && a->to == b->to);
313}
314
315static int	_isitmyx(struct witness *w1, struct witness *w2, int rmask,
316		    const char *fname);
317#ifdef KDB
318static void	_witness_debugger(int cond, const char *msg);
319#endif
320static void	adopt(struct witness *parent, struct witness *child);
321#ifdef BLESSING
322static int	blessed(struct witness *, struct witness *);
323#endif
324static void	depart(struct witness *w);
325static struct witness	*enroll(const char *description,
326			    struct lock_class *lock_class);
327static struct lock_instance	*find_instance(struct lock_list_entry *list,
328				    const struct lock_object *lock);
329static int	isitmychild(struct witness *parent, struct witness *child);
330static int	isitmydescendant(struct witness *parent, struct witness *child);
331static void	itismychild(struct witness *parent, struct witness *child);
332static int	sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
333static int	sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
334static int	sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
335static void	witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
336#ifdef DDB
337static void	witness_ddb_compute_levels(void);
338static void	witness_ddb_display(int(*)(const char *fmt, ...));
339static void	witness_ddb_display_descendants(int(*)(const char *fmt, ...),
340		    struct witness *, int indent);
341static void	witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
342		    struct witness_list *list);
343static void	witness_ddb_level_descendants(struct witness *parent, int l);
344static void	witness_ddb_list(struct thread *td);
345#endif
346static void	witness_free(struct witness *m);
347static struct witness	*witness_get(void);
348static uint32_t	witness_hash_djb2(const uint8_t *key, uint32_t size);
349static struct witness	*witness_hash_get(const char *key);
350static void	witness_hash_put(struct witness *w);
351static void	witness_init_hash_tables(void);
352static void	witness_increment_graph_generation(void);
353static void	witness_lock_list_free(struct lock_list_entry *lle);
354static struct lock_list_entry	*witness_lock_list_get(void);
355static int	witness_lock_order_add(struct witness *parent,
356		    struct witness *child);
357static int	witness_lock_order_check(struct witness *parent,
358		    struct witness *child);
359static struct witness_lock_order_data	*witness_lock_order_get(
360					    struct witness *parent,
361					    struct witness *child);
362static void	witness_list_lock(struct lock_instance *instance,
363		    int (*prnt)(const char *fmt, ...));
364static void	witness_setflag(struct lock_object *lock, int flag, int set);
365
366#ifdef KDB
367#define	witness_debugger(c)	_witness_debugger(c, __func__)
368#else
369#define	witness_debugger(c)
370#endif
371
372static SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL,
373    "Witness Locking");
374
375/*
376 * If set to 0, lock order checking is disabled.  If set to -1,
377 * witness is completely disabled.  Otherwise witness performs full
378 * lock order checking for all locks.  At runtime, lock order checking
379 * may be toggled.  However, witness cannot be reenabled once it is
380 * completely disabled.
381 */
382static int witness_watch = 1;
383TUNABLE_INT("debug.witness.watch", &witness_watch);
384SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
385    sysctl_debug_witness_watch, "I", "witness is watching lock operations");
386
387#ifdef KDB
388/*
389 * When KDB is enabled and witness_kdb is 1, it will cause the system
390 * to drop into kdebug() when:
391 *	- a lock hierarchy violation occurs
392 *	- locks are held when going to sleep.
393 */
394#ifdef WITNESS_KDB
395int	witness_kdb = 1;
396#else
397int	witness_kdb = 0;
398#endif
399TUNABLE_INT("debug.witness.kdb", &witness_kdb);
400SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
401
402/*
403 * When KDB is enabled and witness_trace is 1, it will cause the system
404 * to print a stack trace:
405 *	- a lock hierarchy violation occurs
406 *	- locks are held when going to sleep.
407 */
408int	witness_trace = 1;
409TUNABLE_INT("debug.witness.trace", &witness_trace);
410SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
411#endif /* KDB */
412
413#ifdef WITNESS_SKIPSPIN
414int	witness_skipspin = 1;
415#else
416int	witness_skipspin = 0;
417#endif
418TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
419SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin,
420    0, "");
421
422/*
423 * Call this to print out the relations between locks.
424 */
425SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD,
426    NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs");
427
428/*
429 * Call this to print out the witness faulty stacks.
430 */
431SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD,
432    NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks");
433
434static struct mtx w_mtx;
435
436/* w_list */
437static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
438static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
439
440/* w_typelist */
441static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
442static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
443
444/* lock list */
445static struct lock_list_entry *w_lock_list_free = NULL;
446static struct witness_pendhelp pending_locks[WITNESS_PENDLIST];
447static u_int pending_cnt;
448
449static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
450SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
451SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
452SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
453    "");
454
455static struct witness *w_data;
456static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1];
457static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
458static struct witness_hash w_hash;	/* The witness hash table. */
459
460/* The lock order data hash */
461static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
462static struct witness_lock_order_data *w_lofree = NULL;
463static struct witness_lock_order_hash w_lohash;
464static int w_max_used_index = 0;
465static unsigned int w_generation = 0;
466static const char w_notrunning[] = "Witness not running\n";
467static const char w_stillcold[] = "Witness is still cold\n";
468
469
470static struct witness_order_list_entry order_lists[] = {
471	/*
472	 * sx locks
473	 */
474	{ "proctree", &lock_class_sx },
475	{ "allproc", &lock_class_sx },
476	{ "allprison", &lock_class_sx },
477	{ NULL, NULL },
478	/*
479	 * Various mutexes
480	 */
481	{ "Giant", &lock_class_mtx_sleep },
482	{ "pipe mutex", &lock_class_mtx_sleep },
483	{ "sigio lock", &lock_class_mtx_sleep },
484	{ "process group", &lock_class_mtx_sleep },
485	{ "process lock", &lock_class_mtx_sleep },
486	{ "session", &lock_class_mtx_sleep },
487	{ "uidinfo hash", &lock_class_rw },
488#ifdef	HWPMC_HOOKS
489	{ "pmc-sleep", &lock_class_mtx_sleep },
490#endif
491	{ "time lock", &lock_class_mtx_sleep },
492	{ NULL, NULL },
493	/*
494	 * umtx
495	 */
496	{ "umtx lock", &lock_class_mtx_sleep },
497	{ NULL, NULL },
498	/*
499	 * Sockets
500	 */
501	{ "accept", &lock_class_mtx_sleep },
502	{ "so_snd", &lock_class_mtx_sleep },
503	{ "so_rcv", &lock_class_mtx_sleep },
504	{ "sellck", &lock_class_mtx_sleep },
505	{ NULL, NULL },
506	/*
507	 * Routing
508	 */
509	{ "so_rcv", &lock_class_mtx_sleep },
510	{ "radix node head", &lock_class_rw },
511	{ "rtentry", &lock_class_mtx_sleep },
512	{ "ifaddr", &lock_class_mtx_sleep },
513	{ NULL, NULL },
514	/*
515	 * IPv4 multicast:
516	 * protocol locks before interface locks, after UDP locks.
517	 */
518	{ "udpinp", &lock_class_rw },
519	{ "in_multi_mtx", &lock_class_mtx_sleep },
520	{ "igmp_mtx", &lock_class_mtx_sleep },
521	{ "if_addr_lock", &lock_class_rw },
522	{ NULL, NULL },
523	/*
524	 * IPv6 multicast:
525	 * protocol locks before interface locks, after UDP locks.
526	 */
527	{ "udpinp", &lock_class_rw },
528	{ "in6_multi_mtx", &lock_class_mtx_sleep },
529	{ "mld_mtx", &lock_class_mtx_sleep },
530	{ "if_addr_lock", &lock_class_rw },
531	{ NULL, NULL },
532	/*
533	 * UNIX Domain Sockets
534	 */
535	{ "unp_link_rwlock", &lock_class_rw },
536	{ "unp_list_lock", &lock_class_mtx_sleep },
537	{ "unp", &lock_class_mtx_sleep },
538	{ "so_snd", &lock_class_mtx_sleep },
539	{ NULL, NULL },
540	/*
541	 * UDP/IP
542	 */
543	{ "udp", &lock_class_rw },
544	{ "udpinp", &lock_class_rw },
545	{ "so_snd", &lock_class_mtx_sleep },
546	{ NULL, NULL },
547	/*
548	 * TCP/IP
549	 */
550	{ "tcp", &lock_class_rw },
551	{ "tcpinp", &lock_class_rw },
552	{ "so_snd", &lock_class_mtx_sleep },
553	{ NULL, NULL },
554	/*
555	 * netatalk
556	 */
557	{ "ddp_list_mtx", &lock_class_mtx_sleep },
558	{ "ddp_mtx", &lock_class_mtx_sleep },
559	{ NULL, NULL },
560	/*
561	 * BPF
562	 */
563	{ "bpf global lock", &lock_class_mtx_sleep },
564	{ "bpf interface lock", &lock_class_rw },
565	{ "bpf cdev lock", &lock_class_mtx_sleep },
566	{ NULL, NULL },
567	/*
568	 * NFS server
569	 */
570	{ "nfsd_mtx", &lock_class_mtx_sleep },
571	{ "so_snd", &lock_class_mtx_sleep },
572	{ NULL, NULL },
573
574	/*
575	 * IEEE 802.11
576	 */
577	{ "802.11 com lock", &lock_class_mtx_sleep},
578	{ NULL, NULL },
579	/*
580	 * Network drivers
581	 */
582	{ "network driver", &lock_class_mtx_sleep},
583	{ NULL, NULL },
584
585	/*
586	 * Netgraph
587	 */
588	{ "ng_node", &lock_class_mtx_sleep },
589	{ "ng_worklist", &lock_class_mtx_sleep },
590	{ NULL, NULL },
591	/*
592	 * CDEV
593	 */
594	{ "vm map (system)", &lock_class_mtx_sleep },
595	{ "vm page queue", &lock_class_mtx_sleep },
596	{ "vnode interlock", &lock_class_mtx_sleep },
597	{ "cdev", &lock_class_mtx_sleep },
598	{ NULL, NULL },
599	/*
600	 * VM
601	 */
602	{ "vm map (user)", &lock_class_sx },
603	{ "vm object", &lock_class_rw },
604	{ "vm page", &lock_class_mtx_sleep },
605	{ "vm page queue", &lock_class_mtx_sleep },
606	{ "pmap pv global", &lock_class_rw },
607	{ "pmap", &lock_class_mtx_sleep },
608	{ "pmap pv list", &lock_class_rw },
609	{ "vm page free queue", &lock_class_mtx_sleep },
610	{ NULL, NULL },
611	/*
612	 * kqueue/VFS interaction
613	 */
614	{ "kqueue", &lock_class_mtx_sleep },
615	{ "struct mount mtx", &lock_class_mtx_sleep },
616	{ "vnode interlock", &lock_class_mtx_sleep },
617	{ NULL, NULL },
618	/*
619	 * ZFS locking
620	 */
621	{ "dn->dn_mtx", &lock_class_sx },
622	{ "dr->dt.di.dr_mtx", &lock_class_sx },
623	{ "db->db_mtx", &lock_class_sx },
624	{ NULL, NULL },
625	/*
626	 * spin locks
627	 */
628#ifdef SMP
629	{ "ap boot", &lock_class_mtx_spin },
630#endif
631	{ "rm.mutex_mtx", &lock_class_mtx_spin },
632	{ "sio", &lock_class_mtx_spin },
633	{ "scrlock", &lock_class_mtx_spin },
634#ifdef __i386__
635	{ "cy", &lock_class_mtx_spin },
636#endif
637#ifdef __sparc64__
638	{ "pcib_mtx", &lock_class_mtx_spin },
639	{ "rtc_mtx", &lock_class_mtx_spin },
640#endif
641	{ "scc_hwmtx", &lock_class_mtx_spin },
642	{ "uart_hwmtx", &lock_class_mtx_spin },
643	{ "fast_taskqueue", &lock_class_mtx_spin },
644	{ "intr table", &lock_class_mtx_spin },
645#ifdef	HWPMC_HOOKS
646	{ "pmc-per-proc", &lock_class_mtx_spin },
647#endif
648	{ "process slock", &lock_class_mtx_spin },
649	{ "sleepq chain", &lock_class_mtx_spin },
650	{ "rm_spinlock", &lock_class_mtx_spin },
651	{ "turnstile chain", &lock_class_mtx_spin },
652	{ "turnstile lock", &lock_class_mtx_spin },
653	{ "sched lock", &lock_class_mtx_spin },
654	{ "td_contested", &lock_class_mtx_spin },
655	{ "callout", &lock_class_mtx_spin },
656	{ "entropy harvest mutex", &lock_class_mtx_spin },
657	{ "syscons video lock", &lock_class_mtx_spin },
658#ifdef SMP
659	{ "smp rendezvous", &lock_class_mtx_spin },
660#endif
661#ifdef __powerpc__
662	{ "tlb0", &lock_class_mtx_spin },
663#endif
664	/*
665	 * leaf locks
666	 */
667	{ "intrcnt", &lock_class_mtx_spin },
668	{ "icu", &lock_class_mtx_spin },
669#if defined(SMP) && defined(__sparc64__)
670	{ "ipi", &lock_class_mtx_spin },
671#endif
672#ifdef __i386__
673	{ "allpmaps", &lock_class_mtx_spin },
674	{ "descriptor tables", &lock_class_mtx_spin },
675#endif
676	{ "clk", &lock_class_mtx_spin },
677	{ "cpuset", &lock_class_mtx_spin },
678	{ "mprof lock", &lock_class_mtx_spin },
679	{ "zombie lock", &lock_class_mtx_spin },
680	{ "ALD Queue", &lock_class_mtx_spin },
681#ifdef __ia64__
682	{ "MCA spin lock", &lock_class_mtx_spin },
683#endif
684#if defined(__i386__) || defined(__amd64__)
685	{ "pcicfg", &lock_class_mtx_spin },
686	{ "NDIS thread lock", &lock_class_mtx_spin },
687#endif
688	{ "tw_osl_io_lock", &lock_class_mtx_spin },
689	{ "tw_osl_q_lock", &lock_class_mtx_spin },
690	{ "tw_cl_io_lock", &lock_class_mtx_spin },
691	{ "tw_cl_intr_lock", &lock_class_mtx_spin },
692	{ "tw_cl_gen_lock", &lock_class_mtx_spin },
693#ifdef	HWPMC_HOOKS
694	{ "pmc-leaf", &lock_class_mtx_spin },
695#endif
696	{ "blocked lock", &lock_class_mtx_spin },
697	{ NULL, NULL },
698	{ NULL, NULL }
699};
700
701#ifdef BLESSING
702/*
703 * Pairs of locks which have been blessed
704 * Don't complain about order problems with blessed locks
705 */
706static struct witness_blessed blessed_list[] = {
707};
708static int blessed_count =
709	sizeof(blessed_list) / sizeof(struct witness_blessed);
710#endif
711
712/*
713 * This global is set to 0 once it becomes safe to use the witness code.
714 */
715static int witness_cold = 1;
716
717/*
718 * This global is set to 1 once the static lock orders have been enrolled
719 * so that a warning can be issued for any spin locks enrolled later.
720 */
721static int witness_spin_warn = 0;
722
723/* Trim useless garbage from filenames. */
724static const char *
725fixup_filename(const char *file)
726{
727
728	if (file == NULL)
729		return (NULL);
730	while (strncmp(file, "../", 3) == 0)
731		file += 3;
732	return (file);
733}
734
735/*
736 * The WITNESS-enabled diagnostic code.  Note that the witness code does
737 * assume that the early boot is single-threaded at least until after this
738 * routine is completed.
739 */
740static void
741witness_initialize(void *dummy __unused)
742{
743	struct lock_object *lock;
744	struct witness_order_list_entry *order;
745	struct witness *w, *w1;
746	int i;
747
748	w_data = malloc(sizeof (struct witness) * WITNESS_COUNT, M_WITNESS,
749	    M_NOWAIT | M_ZERO);
750
751	/*
752	 * We have to release Giant before initializing its witness
753	 * structure so that WITNESS doesn't get confused.
754	 */
755	mtx_unlock(&Giant);
756	mtx_assert(&Giant, MA_NOTOWNED);
757
758	CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
759	mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
760	    MTX_NOWITNESS | MTX_NOPROFILE);
761	for (i = WITNESS_COUNT - 1; i >= 0; i--) {
762		w = &w_data[i];
763		memset(w, 0, sizeof(*w));
764		w_data[i].w_index = i;	/* Witness index never changes. */
765		witness_free(w);
766	}
767	KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
768	    ("%s: Invalid list of free witness objects", __func__));
769
770	/* Witness with index 0 is not used to aid in debugging. */
771	STAILQ_REMOVE_HEAD(&w_free, w_list);
772	w_free_cnt--;
773
774	memset(w_rmatrix, 0,
775	    (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1)));
776
777	for (i = 0; i < LOCK_CHILDCOUNT; i++)
778		witness_lock_list_free(&w_locklistdata[i]);
779	witness_init_hash_tables();
780
781	/* First add in all the specified order lists. */
782	for (order = order_lists; order->w_name != NULL; order++) {
783		w = enroll(order->w_name, order->w_class);
784		if (w == NULL)
785			continue;
786		w->w_file = "order list";
787		for (order++; order->w_name != NULL; order++) {
788			w1 = enroll(order->w_name, order->w_class);
789			if (w1 == NULL)
790				continue;
791			w1->w_file = "order list";
792			itismychild(w, w1);
793			w = w1;
794		}
795	}
796	witness_spin_warn = 1;
797
798	/* Iterate through all locks and add them to witness. */
799	for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
800		lock = pending_locks[i].wh_lock;
801		KASSERT(lock->lo_flags & LO_WITNESS,
802		    ("%s: lock %s is on pending list but not LO_WITNESS",
803		    __func__, lock->lo_name));
804		lock->lo_witness = enroll(pending_locks[i].wh_type,
805		    LOCK_CLASS(lock));
806	}
807
808	/* Mark the witness code as being ready for use. */
809	witness_cold = 0;
810
811	mtx_lock(&Giant);
812}
813SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize,
814    NULL);
815
816void
817witness_init(struct lock_object *lock, const char *type)
818{
819	struct lock_class *class;
820
821	/* Various sanity checks. */
822	class = LOCK_CLASS(lock);
823	if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
824	    (class->lc_flags & LC_RECURSABLE) == 0)
825		kassert_panic("%s: lock (%s) %s can not be recursable",
826		    __func__, class->lc_name, lock->lo_name);
827	if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
828	    (class->lc_flags & LC_SLEEPABLE) == 0)
829		kassert_panic("%s: lock (%s) %s can not be sleepable",
830		    __func__, class->lc_name, lock->lo_name);
831	if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
832	    (class->lc_flags & LC_UPGRADABLE) == 0)
833		kassert_panic("%s: lock (%s) %s can not be upgradable",
834		    __func__, class->lc_name, lock->lo_name);
835
836	/*
837	 * If we shouldn't watch this lock, then just clear lo_witness.
838	 * Otherwise, if witness_cold is set, then it is too early to
839	 * enroll this lock, so defer it to witness_initialize() by adding
840	 * it to the pending_locks list.  If it is not too early, then enroll
841	 * the lock now.
842	 */
843	if (witness_watch < 1 || panicstr != NULL ||
844	    (lock->lo_flags & LO_WITNESS) == 0)
845		lock->lo_witness = NULL;
846	else if (witness_cold) {
847		pending_locks[pending_cnt].wh_lock = lock;
848		pending_locks[pending_cnt++].wh_type = type;
849		if (pending_cnt > WITNESS_PENDLIST)
850			panic("%s: pending locks list is too small, "
851			    "increase WITNESS_PENDLIST\n",
852			    __func__);
853	} else
854		lock->lo_witness = enroll(type, class);
855}
856
857void
858witness_destroy(struct lock_object *lock)
859{
860	struct lock_class *class;
861	struct witness *w;
862
863	class = LOCK_CLASS(lock);
864
865	if (witness_cold)
866		panic("lock (%s) %s destroyed while witness_cold",
867		    class->lc_name, lock->lo_name);
868
869	/* XXX: need to verify that no one holds the lock */
870	if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
871		return;
872	w = lock->lo_witness;
873
874	mtx_lock_spin(&w_mtx);
875	MPASS(w->w_refcount > 0);
876	w->w_refcount--;
877
878	if (w->w_refcount == 0)
879		depart(w);
880	mtx_unlock_spin(&w_mtx);
881}
882
883#ifdef DDB
884static void
885witness_ddb_compute_levels(void)
886{
887	struct witness *w;
888
889	/*
890	 * First clear all levels.
891	 */
892	STAILQ_FOREACH(w, &w_all, w_list)
893		w->w_ddb_level = -1;
894
895	/*
896	 * Look for locks with no parents and level all their descendants.
897	 */
898	STAILQ_FOREACH(w, &w_all, w_list) {
899
900		/* If the witness has ancestors (is not a root), skip it. */
901		if (w->w_num_ancestors > 0)
902			continue;
903		witness_ddb_level_descendants(w, 0);
904	}
905}
906
907static void
908witness_ddb_level_descendants(struct witness *w, int l)
909{
910	int i;
911
912	if (w->w_ddb_level >= l)
913		return;
914
915	w->w_ddb_level = l;
916	l++;
917
918	for (i = 1; i <= w_max_used_index; i++) {
919		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
920			witness_ddb_level_descendants(&w_data[i], l);
921	}
922}
923
924static void
925witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...),
926    struct witness *w, int indent)
927{
928	int i;
929
930 	for (i = 0; i < indent; i++)
931 		prnt(" ");
932	prnt("%s (type: %s, depth: %d, active refs: %d)",
933	     w->w_name, w->w_class->lc_name,
934	     w->w_ddb_level, w->w_refcount);
935 	if (w->w_displayed) {
936 		prnt(" -- (already displayed)\n");
937 		return;
938 	}
939 	w->w_displayed = 1;
940	if (w->w_file != NULL && w->w_line != 0)
941		prnt(" -- last acquired @ %s:%d\n", fixup_filename(w->w_file),
942		    w->w_line);
943	else
944		prnt(" -- never acquired\n");
945	indent++;
946	WITNESS_INDEX_ASSERT(w->w_index);
947	for (i = 1; i <= w_max_used_index; i++) {
948		if (db_pager_quit)
949			return;
950		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
951			witness_ddb_display_descendants(prnt, &w_data[i],
952			    indent);
953	}
954}
955
956static void
957witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
958    struct witness_list *list)
959{
960	struct witness *w;
961
962	STAILQ_FOREACH(w, list, w_typelist) {
963		if (w->w_file == NULL || w->w_ddb_level > 0)
964			continue;
965
966		/* This lock has no anscestors - display its descendants. */
967		witness_ddb_display_descendants(prnt, w, 0);
968		if (db_pager_quit)
969			return;
970	}
971}
972
973static void
974witness_ddb_display(int(*prnt)(const char *fmt, ...))
975{
976	struct witness *w;
977
978	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
979	witness_ddb_compute_levels();
980
981	/* Clear all the displayed flags. */
982	STAILQ_FOREACH(w, &w_all, w_list)
983		w->w_displayed = 0;
984
985	/*
986	 * First, handle sleep locks which have been acquired at least
987	 * once.
988	 */
989	prnt("Sleep locks:\n");
990	witness_ddb_display_list(prnt, &w_sleep);
991	if (db_pager_quit)
992		return;
993
994	/*
995	 * Now do spin locks which have been acquired at least once.
996	 */
997	prnt("\nSpin locks:\n");
998	witness_ddb_display_list(prnt, &w_spin);
999	if (db_pager_quit)
1000		return;
1001
1002	/*
1003	 * Finally, any locks which have not been acquired yet.
1004	 */
1005	prnt("\nLocks which were never acquired:\n");
1006	STAILQ_FOREACH(w, &w_all, w_list) {
1007		if (w->w_file != NULL || w->w_refcount == 0)
1008			continue;
1009		prnt("%s (type: %s, depth: %d)\n", w->w_name,
1010		    w->w_class->lc_name, w->w_ddb_level);
1011		if (db_pager_quit)
1012			return;
1013	}
1014}
1015#endif /* DDB */
1016
1017int
1018witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
1019{
1020
1021	if (witness_watch == -1 || panicstr != NULL)
1022		return (0);
1023
1024	/* Require locks that witness knows about. */
1025	if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
1026	    lock2->lo_witness == NULL)
1027		return (EINVAL);
1028
1029	mtx_assert(&w_mtx, MA_NOTOWNED);
1030	mtx_lock_spin(&w_mtx);
1031
1032	/*
1033	 * If we already have either an explicit or implied lock order that
1034	 * is the other way around, then return an error.
1035	 */
1036	if (witness_watch &&
1037	    isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1038		mtx_unlock_spin(&w_mtx);
1039		return (EDOOFUS);
1040	}
1041
1042	/* Try to add the new order. */
1043	CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1044	    lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1045	itismychild(lock1->lo_witness, lock2->lo_witness);
1046	mtx_unlock_spin(&w_mtx);
1047	return (0);
1048}
1049
1050void
1051witness_checkorder(struct lock_object *lock, int flags, const char *file,
1052    int line, struct lock_object *interlock)
1053{
1054	struct lock_list_entry *lock_list, *lle;
1055	struct lock_instance *lock1, *lock2, *plock;
1056	struct lock_class *class, *iclass;
1057	struct witness *w, *w1;
1058	struct thread *td;
1059	int i, j;
1060
1061	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1062	    panicstr != NULL)
1063		return;
1064
1065	w = lock->lo_witness;
1066	class = LOCK_CLASS(lock);
1067	td = curthread;
1068
1069	if (class->lc_flags & LC_SLEEPLOCK) {
1070
1071		/*
1072		 * Since spin locks include a critical section, this check
1073		 * implicitly enforces a lock order of all sleep locks before
1074		 * all spin locks.
1075		 */
1076		if (td->td_critnest != 0 && !kdb_active)
1077			kassert_panic("acquiring blockable sleep lock with "
1078			    "spinlock or critical section held (%s) %s @ %s:%d",
1079			    class->lc_name, lock->lo_name,
1080			    fixup_filename(file), line);
1081
1082		/*
1083		 * If this is the first lock acquired then just return as
1084		 * no order checking is needed.
1085		 */
1086		lock_list = td->td_sleeplocks;
1087		if (lock_list == NULL || lock_list->ll_count == 0)
1088			return;
1089	} else {
1090
1091		/*
1092		 * If this is the first lock, just return as no order
1093		 * checking is needed.  Avoid problems with thread
1094		 * migration pinning the thread while checking if
1095		 * spinlocks are held.  If at least one spinlock is held
1096		 * the thread is in a safe path and it is allowed to
1097		 * unpin it.
1098		 */
1099		sched_pin();
1100		lock_list = PCPU_GET(spinlocks);
1101		if (lock_list == NULL || lock_list->ll_count == 0) {
1102			sched_unpin();
1103			return;
1104		}
1105		sched_unpin();
1106	}
1107
1108	/*
1109	 * Check to see if we are recursing on a lock we already own.  If
1110	 * so, make sure that we don't mismatch exclusive and shared lock
1111	 * acquires.
1112	 */
1113	lock1 = find_instance(lock_list, lock);
1114	if (lock1 != NULL) {
1115		if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1116		    (flags & LOP_EXCLUSIVE) == 0) {
1117			printf("shared lock of (%s) %s @ %s:%d\n",
1118			    class->lc_name, lock->lo_name,
1119			    fixup_filename(file), line);
1120			printf("while exclusively locked from %s:%d\n",
1121			    fixup_filename(lock1->li_file), lock1->li_line);
1122			kassert_panic("excl->share");
1123		}
1124		if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1125		    (flags & LOP_EXCLUSIVE) != 0) {
1126			printf("exclusive lock of (%s) %s @ %s:%d\n",
1127			    class->lc_name, lock->lo_name,
1128			    fixup_filename(file), line);
1129			printf("while share locked from %s:%d\n",
1130			    fixup_filename(lock1->li_file), lock1->li_line);
1131			kassert_panic("share->excl");
1132		}
1133		return;
1134	}
1135
1136	/* Warn if the interlock is not locked exactly once. */
1137	if (interlock != NULL) {
1138		iclass = LOCK_CLASS(interlock);
1139		lock1 = find_instance(lock_list, interlock);
1140		if (lock1 == NULL)
1141			kassert_panic("interlock (%s) %s not locked @ %s:%d",
1142			    iclass->lc_name, interlock->lo_name,
1143			    fixup_filename(file), line);
1144		else if ((lock1->li_flags & LI_RECURSEMASK) != 0)
1145			kassert_panic("interlock (%s) %s recursed @ %s:%d",
1146			    iclass->lc_name, interlock->lo_name,
1147			    fixup_filename(file), line);
1148	}
1149
1150	/*
1151	 * Find the previously acquired lock, but ignore interlocks.
1152	 */
1153	plock = &lock_list->ll_children[lock_list->ll_count - 1];
1154	if (interlock != NULL && plock->li_lock == interlock) {
1155		if (lock_list->ll_count > 1)
1156			plock =
1157			    &lock_list->ll_children[lock_list->ll_count - 2];
1158		else {
1159			lle = lock_list->ll_next;
1160
1161			/*
1162			 * The interlock is the only lock we hold, so
1163			 * simply return.
1164			 */
1165			if (lle == NULL)
1166				return;
1167			plock = &lle->ll_children[lle->ll_count - 1];
1168		}
1169	}
1170
1171	/*
1172	 * Try to perform most checks without a lock.  If this succeeds we
1173	 * can skip acquiring the lock and return success.  Otherwise we redo
1174	 * the check with the lock held to handle races with concurrent updates.
1175	 */
1176	w1 = plock->li_lock->lo_witness;
1177	if (witness_lock_order_check(w1, w))
1178		return;
1179
1180	mtx_lock_spin(&w_mtx);
1181	if (witness_lock_order_check(w1, w)) {
1182		mtx_unlock_spin(&w_mtx);
1183		return;
1184	}
1185	witness_lock_order_add(w1, w);
1186
1187	/*
1188	 * Check for duplicate locks of the same type.  Note that we only
1189	 * have to check for this on the last lock we just acquired.  Any
1190	 * other cases will be caught as lock order violations.
1191	 */
1192	if (w1 == w) {
1193		i = w->w_index;
1194		if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1195		    !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1196		    w_rmatrix[i][i] |= WITNESS_REVERSAL;
1197			w->w_reversed = 1;
1198			mtx_unlock_spin(&w_mtx);
1199			printf(
1200			    "acquiring duplicate lock of same type: \"%s\"\n",
1201			    w->w_name);
1202			printf(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1203			    fixup_filename(plock->li_file), plock->li_line);
1204			printf(" 2nd %s @ %s:%d\n", lock->lo_name,
1205			    fixup_filename(file), line);
1206			witness_debugger(1);
1207		} else
1208			mtx_unlock_spin(&w_mtx);
1209		return;
1210	}
1211	mtx_assert(&w_mtx, MA_OWNED);
1212
1213	/*
1214	 * If we know that the lock we are acquiring comes after
1215	 * the lock we most recently acquired in the lock order tree,
1216	 * then there is no need for any further checks.
1217	 */
1218	if (isitmychild(w1, w))
1219		goto out;
1220
1221	for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1222		for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1223
1224			MPASS(j < WITNESS_COUNT);
1225			lock1 = &lle->ll_children[i];
1226
1227			/*
1228			 * Ignore the interlock.
1229			 */
1230			if (interlock == lock1->li_lock)
1231				continue;
1232
1233			/*
1234			 * If this lock doesn't undergo witness checking,
1235			 * then skip it.
1236			 */
1237			w1 = lock1->li_lock->lo_witness;
1238			if (w1 == NULL) {
1239				KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1240				    ("lock missing witness structure"));
1241				continue;
1242			}
1243
1244			/*
1245			 * If we are locking Giant and this is a sleepable
1246			 * lock, then skip it.
1247			 */
1248			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
1249			    lock == &Giant.lock_object)
1250				continue;
1251
1252			/*
1253			 * If we are locking a sleepable lock and this lock
1254			 * is Giant, then skip it.
1255			 */
1256			if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1257			    lock1->li_lock == &Giant.lock_object)
1258				continue;
1259
1260			/*
1261			 * If we are locking a sleepable lock and this lock
1262			 * isn't sleepable, we want to treat it as a lock
1263			 * order violation to enfore a general lock order of
1264			 * sleepable locks before non-sleepable locks.
1265			 */
1266			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1267			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1268				goto reversal;
1269
1270			/*
1271			 * If we are locking Giant and this is a non-sleepable
1272			 * lock, then treat it as a reversal.
1273			 */
1274			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1275			    lock == &Giant.lock_object)
1276				goto reversal;
1277
1278			/*
1279			 * Check the lock order hierarchy for a reveresal.
1280			 */
1281			if (!isitmydescendant(w, w1))
1282				continue;
1283		reversal:
1284
1285			/*
1286			 * We have a lock order violation, check to see if it
1287			 * is allowed or has already been yelled about.
1288			 */
1289#ifdef BLESSING
1290
1291			/*
1292			 * If the lock order is blessed, just bail.  We don't
1293			 * look for other lock order violations though, which
1294			 * may be a bug.
1295			 */
1296			if (blessed(w, w1))
1297				goto out;
1298#endif
1299
1300			/* Bail if this violation is known */
1301			if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1302				goto out;
1303
1304			/* Record this as a violation */
1305			w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1306			w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1307			w->w_reversed = w1->w_reversed = 1;
1308			witness_increment_graph_generation();
1309			mtx_unlock_spin(&w_mtx);
1310
1311#ifdef WITNESS_NO_VNODE
1312			/*
1313			 * There are known LORs between VNODE locks. They are
1314			 * not an indication of a bug. VNODE locks are flagged
1315			 * as such (LO_IS_VNODE) and we don't yell if the LOR
1316			 * is between 2 VNODE locks.
1317			 */
1318			if ((lock->lo_flags & LO_IS_VNODE) != 0 &&
1319			    (lock1->li_lock->lo_flags & LO_IS_VNODE) != 0)
1320				return;
1321#endif
1322
1323			/*
1324			 * Ok, yell about it.
1325			 */
1326			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1327			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1328				printf(
1329		"lock order reversal: (sleepable after non-sleepable)\n");
1330			else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1331			    && lock == &Giant.lock_object)
1332				printf(
1333		"lock order reversal: (Giant after non-sleepable)\n");
1334			else
1335				printf("lock order reversal:\n");
1336
1337			/*
1338			 * Try to locate an earlier lock with
1339			 * witness w in our list.
1340			 */
1341			do {
1342				lock2 = &lle->ll_children[i];
1343				MPASS(lock2->li_lock != NULL);
1344				if (lock2->li_lock->lo_witness == w)
1345					break;
1346				if (i == 0 && lle->ll_next != NULL) {
1347					lle = lle->ll_next;
1348					i = lle->ll_count - 1;
1349					MPASS(i >= 0 && i < LOCK_NCHILDREN);
1350				} else
1351					i--;
1352			} while (i >= 0);
1353			if (i < 0) {
1354				printf(" 1st %p %s (%s) @ %s:%d\n",
1355				    lock1->li_lock, lock1->li_lock->lo_name,
1356				    w1->w_name, fixup_filename(lock1->li_file),
1357				    lock1->li_line);
1358				printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1359				    lock->lo_name, w->w_name,
1360				    fixup_filename(file), line);
1361			} else {
1362				printf(" 1st %p %s (%s) @ %s:%d\n",
1363				    lock2->li_lock, lock2->li_lock->lo_name,
1364				    lock2->li_lock->lo_witness->w_name,
1365				    fixup_filename(lock2->li_file),
1366				    lock2->li_line);
1367				printf(" 2nd %p %s (%s) @ %s:%d\n",
1368				    lock1->li_lock, lock1->li_lock->lo_name,
1369				    w1->w_name, fixup_filename(lock1->li_file),
1370				    lock1->li_line);
1371				printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1372				    lock->lo_name, w->w_name,
1373				    fixup_filename(file), line);
1374			}
1375			witness_debugger(1);
1376			return;
1377		}
1378	}
1379
1380	/*
1381	 * If requested, build a new lock order.  However, don't build a new
1382	 * relationship between a sleepable lock and Giant if it is in the
1383	 * wrong direction.  The correct lock order is that sleepable locks
1384	 * always come before Giant.
1385	 */
1386	if (flags & LOP_NEWORDER &&
1387	    !(plock->li_lock == &Giant.lock_object &&
1388	    (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1389		CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1390		    w->w_name, plock->li_lock->lo_witness->w_name);
1391		itismychild(plock->li_lock->lo_witness, w);
1392	}
1393out:
1394	mtx_unlock_spin(&w_mtx);
1395}
1396
1397void
1398witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1399{
1400	struct lock_list_entry **lock_list, *lle;
1401	struct lock_instance *instance;
1402	struct witness *w;
1403	struct thread *td;
1404
1405	if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1406	    panicstr != NULL)
1407		return;
1408	w = lock->lo_witness;
1409	td = curthread;
1410
1411	/* Determine lock list for this lock. */
1412	if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1413		lock_list = &td->td_sleeplocks;
1414	else
1415		lock_list = PCPU_PTR(spinlocks);
1416
1417	/* Check to see if we are recursing on a lock we already own. */
1418	instance = find_instance(*lock_list, lock);
1419	if (instance != NULL) {
1420		instance->li_flags++;
1421		CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1422		    td->td_proc->p_pid, lock->lo_name,
1423		    instance->li_flags & LI_RECURSEMASK);
1424		instance->li_file = file;
1425		instance->li_line = line;
1426		return;
1427	}
1428
1429	/* Update per-witness last file and line acquire. */
1430	w->w_file = file;
1431	w->w_line = line;
1432
1433	/* Find the next open lock instance in the list and fill it. */
1434	lle = *lock_list;
1435	if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1436		lle = witness_lock_list_get();
1437		if (lle == NULL)
1438			return;
1439		lle->ll_next = *lock_list;
1440		CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1441		    td->td_proc->p_pid, lle);
1442		*lock_list = lle;
1443	}
1444	instance = &lle->ll_children[lle->ll_count++];
1445	instance->li_lock = lock;
1446	instance->li_line = line;
1447	instance->li_file = file;
1448	if ((flags & LOP_EXCLUSIVE) != 0)
1449		instance->li_flags = LI_EXCLUSIVE;
1450	else
1451		instance->li_flags = 0;
1452	CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1453	    td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1454}
1455
1456void
1457witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1458{
1459	struct lock_instance *instance;
1460	struct lock_class *class;
1461
1462	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1463	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1464		return;
1465	class = LOCK_CLASS(lock);
1466	if (witness_watch) {
1467		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1468			kassert_panic(
1469			    "upgrade of non-upgradable lock (%s) %s @ %s:%d",
1470			    class->lc_name, lock->lo_name,
1471			    fixup_filename(file), line);
1472		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1473			kassert_panic(
1474			    "upgrade of non-sleep lock (%s) %s @ %s:%d",
1475			    class->lc_name, lock->lo_name,
1476			    fixup_filename(file), line);
1477	}
1478	instance = find_instance(curthread->td_sleeplocks, lock);
1479	if (instance == NULL) {
1480		kassert_panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1481		    class->lc_name, lock->lo_name,
1482		    fixup_filename(file), line);
1483		return;
1484	}
1485	if (witness_watch) {
1486		if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1487			kassert_panic(
1488			    "upgrade of exclusive lock (%s) %s @ %s:%d",
1489			    class->lc_name, lock->lo_name,
1490			    fixup_filename(file), line);
1491		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1492			kassert_panic(
1493			    "upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1494			    class->lc_name, lock->lo_name,
1495			    instance->li_flags & LI_RECURSEMASK,
1496			    fixup_filename(file), line);
1497	}
1498	instance->li_flags |= LI_EXCLUSIVE;
1499}
1500
1501void
1502witness_downgrade(struct lock_object *lock, int flags, const char *file,
1503    int line)
1504{
1505	struct lock_instance *instance;
1506	struct lock_class *class;
1507
1508	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1509	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1510		return;
1511	class = LOCK_CLASS(lock);
1512	if (witness_watch) {
1513		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1514			kassert_panic(
1515			    "downgrade of non-upgradable lock (%s) %s @ %s:%d",
1516			    class->lc_name, lock->lo_name,
1517			    fixup_filename(file), line);
1518		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1519			kassert_panic(
1520			    "downgrade of non-sleep lock (%s) %s @ %s:%d",
1521			    class->lc_name, lock->lo_name,
1522			    fixup_filename(file), line);
1523	}
1524	instance = find_instance(curthread->td_sleeplocks, lock);
1525	if (instance == NULL) {
1526		kassert_panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1527		    class->lc_name, lock->lo_name,
1528		    fixup_filename(file), line);
1529		return;
1530	}
1531	if (witness_watch) {
1532		if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1533			kassert_panic(
1534			    "downgrade of shared lock (%s) %s @ %s:%d",
1535			    class->lc_name, lock->lo_name,
1536			    fixup_filename(file), line);
1537		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1538			kassert_panic(
1539			    "downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1540			    class->lc_name, lock->lo_name,
1541			    instance->li_flags & LI_RECURSEMASK,
1542			    fixup_filename(file), line);
1543	}
1544	instance->li_flags &= ~LI_EXCLUSIVE;
1545}
1546
1547void
1548witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1549{
1550	struct lock_list_entry **lock_list, *lle;
1551	struct lock_instance *instance;
1552	struct lock_class *class;
1553	struct thread *td;
1554	register_t s;
1555	int i, j;
1556
1557	if (witness_cold || lock->lo_witness == NULL || panicstr != NULL)
1558		return;
1559	td = curthread;
1560	class = LOCK_CLASS(lock);
1561
1562	/* Find lock instance associated with this lock. */
1563	if (class->lc_flags & LC_SLEEPLOCK)
1564		lock_list = &td->td_sleeplocks;
1565	else
1566		lock_list = PCPU_PTR(spinlocks);
1567	lle = *lock_list;
1568	for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1569		for (i = 0; i < (*lock_list)->ll_count; i++) {
1570			instance = &(*lock_list)->ll_children[i];
1571			if (instance->li_lock == lock)
1572				goto found;
1573		}
1574
1575	/*
1576	 * When disabling WITNESS through witness_watch we could end up in
1577	 * having registered locks in the td_sleeplocks queue.
1578	 * We have to make sure we flush these queues, so just search for
1579	 * eventual register locks and remove them.
1580	 */
1581	if (witness_watch > 0) {
1582		kassert_panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1583		    lock->lo_name, fixup_filename(file), line);
1584		return;
1585	} else {
1586		return;
1587	}
1588found:
1589
1590	/* First, check for shared/exclusive mismatches. */
1591	if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1592	    (flags & LOP_EXCLUSIVE) == 0) {
1593		printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1594		    lock->lo_name, fixup_filename(file), line);
1595		printf("while exclusively locked from %s:%d\n",
1596		    fixup_filename(instance->li_file), instance->li_line);
1597		kassert_panic("excl->ushare");
1598	}
1599	if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1600	    (flags & LOP_EXCLUSIVE) != 0) {
1601		printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1602		    lock->lo_name, fixup_filename(file), line);
1603		printf("while share locked from %s:%d\n",
1604		    fixup_filename(instance->li_file),
1605		    instance->li_line);
1606		kassert_panic("share->uexcl");
1607	}
1608	/* If we are recursed, unrecurse. */
1609	if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1610		CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1611		    td->td_proc->p_pid, instance->li_lock->lo_name,
1612		    instance->li_flags);
1613		instance->li_flags--;
1614		return;
1615	}
1616	/* The lock is now being dropped, check for NORELEASE flag */
1617	if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) {
1618		printf("forbidden unlock of (%s) %s @ %s:%d\n", class->lc_name,
1619		    lock->lo_name, fixup_filename(file), line);
1620		kassert_panic("lock marked norelease");
1621	}
1622
1623	/* Otherwise, remove this item from the list. */
1624	s = intr_disable();
1625	CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1626	    td->td_proc->p_pid, instance->li_lock->lo_name,
1627	    (*lock_list)->ll_count - 1);
1628	for (j = i; j < (*lock_list)->ll_count - 1; j++)
1629		(*lock_list)->ll_children[j] =
1630		    (*lock_list)->ll_children[j + 1];
1631	(*lock_list)->ll_count--;
1632	intr_restore(s);
1633
1634	/*
1635	 * In order to reduce contention on w_mtx, we want to keep always an
1636	 * head object into lists so that frequent allocation from the
1637	 * free witness pool (and subsequent locking) is avoided.
1638	 * In order to maintain the current code simple, when the head
1639	 * object is totally unloaded it means also that we do not have
1640	 * further objects in the list, so the list ownership needs to be
1641	 * hand over to another object if the current head needs to be freed.
1642	 */
1643	if ((*lock_list)->ll_count == 0) {
1644		if (*lock_list == lle) {
1645			if (lle->ll_next == NULL)
1646				return;
1647		} else
1648			lle = *lock_list;
1649		*lock_list = lle->ll_next;
1650		CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1651		    td->td_proc->p_pid, lle);
1652		witness_lock_list_free(lle);
1653	}
1654}
1655
1656void
1657witness_thread_exit(struct thread *td)
1658{
1659	struct lock_list_entry *lle;
1660	int i, n;
1661
1662	lle = td->td_sleeplocks;
1663	if (lle == NULL || panicstr != NULL)
1664		return;
1665	if (lle->ll_count != 0) {
1666		for (n = 0; lle != NULL; lle = lle->ll_next)
1667			for (i = lle->ll_count - 1; i >= 0; i--) {
1668				if (n == 0)
1669		printf("Thread %p exiting with the following locks held:\n",
1670					    td);
1671				n++;
1672				witness_list_lock(&lle->ll_children[i], printf);
1673
1674			}
1675		kassert_panic(
1676		    "Thread %p cannot exit while holding sleeplocks\n", td);
1677	}
1678	witness_lock_list_free(lle);
1679}
1680
1681/*
1682 * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1683 * exempt Giant and sleepable locks from the checks as well.  If any
1684 * non-exempt locks are held, then a supplied message is printed to the
1685 * console along with a list of the offending locks.  If indicated in the
1686 * flags then a failure results in a panic as well.
1687 */
1688int
1689witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1690{
1691	struct lock_list_entry *lock_list, *lle;
1692	struct lock_instance *lock1;
1693	struct thread *td;
1694	va_list ap;
1695	int i, n;
1696
1697	if (witness_cold || witness_watch < 1 || panicstr != NULL)
1698		return (0);
1699	n = 0;
1700	td = curthread;
1701	for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1702		for (i = lle->ll_count - 1; i >= 0; i--) {
1703			lock1 = &lle->ll_children[i];
1704			if (lock1->li_lock == lock)
1705				continue;
1706			if (flags & WARN_GIANTOK &&
1707			    lock1->li_lock == &Giant.lock_object)
1708				continue;
1709			if (flags & WARN_SLEEPOK &&
1710			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1711				continue;
1712			if (n == 0) {
1713				va_start(ap, fmt);
1714				vprintf(fmt, ap);
1715				va_end(ap);
1716				printf(" with the following");
1717				if (flags & WARN_SLEEPOK)
1718					printf(" non-sleepable");
1719				printf(" locks held:\n");
1720			}
1721			n++;
1722			witness_list_lock(lock1, printf);
1723		}
1724
1725	/*
1726	 * Pin the thread in order to avoid problems with thread migration.
1727	 * Once that all verifies are passed about spinlocks ownership,
1728	 * the thread is in a safe path and it can be unpinned.
1729	 */
1730	sched_pin();
1731	lock_list = PCPU_GET(spinlocks);
1732	if (lock_list != NULL && lock_list->ll_count != 0) {
1733		sched_unpin();
1734
1735		/*
1736		 * We should only have one spinlock and as long as
1737		 * the flags cannot match for this locks class,
1738		 * check if the first spinlock is the one curthread
1739		 * should hold.
1740		 */
1741		lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1742		if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1743		    lock1->li_lock == lock && n == 0)
1744			return (0);
1745
1746		va_start(ap, fmt);
1747		vprintf(fmt, ap);
1748		va_end(ap);
1749		printf(" with the following");
1750		if (flags & WARN_SLEEPOK)
1751			printf(" non-sleepable");
1752		printf(" locks held:\n");
1753		n += witness_list_locks(&lock_list, printf);
1754	} else
1755		sched_unpin();
1756	if (flags & WARN_PANIC && n)
1757		kassert_panic("%s", __func__);
1758	else
1759		witness_debugger(n);
1760	return (n);
1761}
1762
1763const char *
1764witness_file(struct lock_object *lock)
1765{
1766	struct witness *w;
1767
1768	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1769		return ("?");
1770	w = lock->lo_witness;
1771	return (w->w_file);
1772}
1773
1774int
1775witness_line(struct lock_object *lock)
1776{
1777	struct witness *w;
1778
1779	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1780		return (0);
1781	w = lock->lo_witness;
1782	return (w->w_line);
1783}
1784
1785static struct witness *
1786enroll(const char *description, struct lock_class *lock_class)
1787{
1788	struct witness *w;
1789	struct witness_list *typelist;
1790
1791	MPASS(description != NULL);
1792
1793	if (witness_watch == -1 || panicstr != NULL)
1794		return (NULL);
1795	if ((lock_class->lc_flags & LC_SPINLOCK)) {
1796		if (witness_skipspin)
1797			return (NULL);
1798		else
1799			typelist = &w_spin;
1800	} else if ((lock_class->lc_flags & LC_SLEEPLOCK)) {
1801		typelist = &w_sleep;
1802	} else {
1803		kassert_panic("lock class %s is not sleep or spin",
1804		    lock_class->lc_name);
1805		return (NULL);
1806	}
1807
1808	mtx_lock_spin(&w_mtx);
1809	w = witness_hash_get(description);
1810	if (w)
1811		goto found;
1812	if ((w = witness_get()) == NULL)
1813		return (NULL);
1814	MPASS(strlen(description) < MAX_W_NAME);
1815	strcpy(w->w_name, description);
1816	w->w_class = lock_class;
1817	w->w_refcount = 1;
1818	STAILQ_INSERT_HEAD(&w_all, w, w_list);
1819	if (lock_class->lc_flags & LC_SPINLOCK) {
1820		STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1821		w_spin_cnt++;
1822	} else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1823		STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1824		w_sleep_cnt++;
1825	}
1826
1827	/* Insert new witness into the hash */
1828	witness_hash_put(w);
1829	witness_increment_graph_generation();
1830	mtx_unlock_spin(&w_mtx);
1831	return (w);
1832found:
1833	w->w_refcount++;
1834	mtx_unlock_spin(&w_mtx);
1835	if (lock_class != w->w_class)
1836		kassert_panic(
1837			"lock (%s) %s does not match earlier (%s) lock",
1838			description, lock_class->lc_name,
1839			w->w_class->lc_name);
1840	return (w);
1841}
1842
1843static void
1844depart(struct witness *w)
1845{
1846	struct witness_list *list;
1847
1848	MPASS(w->w_refcount == 0);
1849	if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1850		list = &w_sleep;
1851		w_sleep_cnt--;
1852	} else {
1853		list = &w_spin;
1854		w_spin_cnt--;
1855	}
1856	/*
1857	 * Set file to NULL as it may point into a loadable module.
1858	 */
1859	w->w_file = NULL;
1860	w->w_line = 0;
1861	witness_increment_graph_generation();
1862}
1863
1864
1865static void
1866adopt(struct witness *parent, struct witness *child)
1867{
1868	int pi, ci, i, j;
1869
1870	if (witness_cold == 0)
1871		mtx_assert(&w_mtx, MA_OWNED);
1872
1873	/* If the relationship is already known, there's no work to be done. */
1874	if (isitmychild(parent, child))
1875		return;
1876
1877	/* When the structure of the graph changes, bump up the generation. */
1878	witness_increment_graph_generation();
1879
1880	/*
1881	 * The hard part ... create the direct relationship, then propagate all
1882	 * indirect relationships.
1883	 */
1884	pi = parent->w_index;
1885	ci = child->w_index;
1886	WITNESS_INDEX_ASSERT(pi);
1887	WITNESS_INDEX_ASSERT(ci);
1888	MPASS(pi != ci);
1889	w_rmatrix[pi][ci] |= WITNESS_PARENT;
1890	w_rmatrix[ci][pi] |= WITNESS_CHILD;
1891
1892	/*
1893	 * If parent was not already an ancestor of child,
1894	 * then we increment the descendant and ancestor counters.
1895	 */
1896	if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1897		parent->w_num_descendants++;
1898		child->w_num_ancestors++;
1899	}
1900
1901	/*
1902	 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
1903	 * an ancestor of 'pi' during this loop.
1904	 */
1905	for (i = 1; i <= w_max_used_index; i++) {
1906		if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
1907		    (i != pi))
1908			continue;
1909
1910		/* Find each descendant of 'i' and mark it as a descendant. */
1911		for (j = 1; j <= w_max_used_index; j++) {
1912
1913			/*
1914			 * Skip children that are already marked as
1915			 * descendants of 'i'.
1916			 */
1917			if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
1918				continue;
1919
1920			/*
1921			 * We are only interested in descendants of 'ci'. Note
1922			 * that 'ci' itself is counted as a descendant of 'ci'.
1923			 */
1924			if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
1925			    (j != ci))
1926				continue;
1927			w_rmatrix[i][j] |= WITNESS_ANCESTOR;
1928			w_rmatrix[j][i] |= WITNESS_DESCENDANT;
1929			w_data[i].w_num_descendants++;
1930			w_data[j].w_num_ancestors++;
1931
1932			/*
1933			 * Make sure we aren't marking a node as both an
1934			 * ancestor and descendant. We should have caught
1935			 * this as a lock order reversal earlier.
1936			 */
1937			if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
1938			    (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
1939				printf("witness rmatrix paradox! [%d][%d]=%d "
1940				    "both ancestor and descendant\n",
1941				    i, j, w_rmatrix[i][j]);
1942				kdb_backtrace();
1943				printf("Witness disabled.\n");
1944				witness_watch = -1;
1945			}
1946			if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
1947			    (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
1948				printf("witness rmatrix paradox! [%d][%d]=%d "
1949				    "both ancestor and descendant\n",
1950				    j, i, w_rmatrix[j][i]);
1951				kdb_backtrace();
1952				printf("Witness disabled.\n");
1953				witness_watch = -1;
1954			}
1955		}
1956	}
1957}
1958
1959static void
1960itismychild(struct witness *parent, struct witness *child)
1961{
1962	int unlocked;
1963
1964	MPASS(child != NULL && parent != NULL);
1965	if (witness_cold == 0)
1966		mtx_assert(&w_mtx, MA_OWNED);
1967
1968	if (!witness_lock_type_equal(parent, child)) {
1969		if (witness_cold == 0) {
1970			unlocked = 1;
1971			mtx_unlock_spin(&w_mtx);
1972		} else {
1973			unlocked = 0;
1974		}
1975		kassert_panic(
1976		    "%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
1977		    "the same lock type", __func__, parent->w_name,
1978		    parent->w_class->lc_name, child->w_name,
1979		    child->w_class->lc_name);
1980		if (unlocked)
1981			mtx_lock_spin(&w_mtx);
1982	}
1983	adopt(parent, child);
1984}
1985
1986/*
1987 * Generic code for the isitmy*() functions. The rmask parameter is the
1988 * expected relationship of w1 to w2.
1989 */
1990static int
1991_isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
1992{
1993	unsigned char r1, r2;
1994	int i1, i2;
1995
1996	i1 = w1->w_index;
1997	i2 = w2->w_index;
1998	WITNESS_INDEX_ASSERT(i1);
1999	WITNESS_INDEX_ASSERT(i2);
2000	r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
2001	r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
2002
2003	/* The flags on one better be the inverse of the flags on the other */
2004	if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
2005	    (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
2006		/* Don't squawk if we're potentially racing with an update. */
2007		if (!mtx_owned(&w_mtx))
2008			return (0);
2009		printf("%s: rmatrix mismatch between %s (index %d) and %s "
2010		    "(index %d): w_rmatrix[%d][%d] == %hhx but "
2011		    "w_rmatrix[%d][%d] == %hhx\n",
2012		    fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
2013		    i2, i1, r2);
2014		kdb_backtrace();
2015		printf("Witness disabled.\n");
2016		witness_watch = -1;
2017	}
2018	return (r1 & rmask);
2019}
2020
2021/*
2022 * Checks if @child is a direct child of @parent.
2023 */
2024static int
2025isitmychild(struct witness *parent, struct witness *child)
2026{
2027
2028	return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
2029}
2030
2031/*
2032 * Checks if @descendant is a direct or inderect descendant of @ancestor.
2033 */
2034static int
2035isitmydescendant(struct witness *ancestor, struct witness *descendant)
2036{
2037
2038	return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
2039	    __func__));
2040}
2041
2042#ifdef BLESSING
2043static int
2044blessed(struct witness *w1, struct witness *w2)
2045{
2046	int i;
2047	struct witness_blessed *b;
2048
2049	for (i = 0; i < blessed_count; i++) {
2050		b = &blessed_list[i];
2051		if (strcmp(w1->w_name, b->b_lock1) == 0) {
2052			if (strcmp(w2->w_name, b->b_lock2) == 0)
2053				return (1);
2054			continue;
2055		}
2056		if (strcmp(w1->w_name, b->b_lock2) == 0)
2057			if (strcmp(w2->w_name, b->b_lock1) == 0)
2058				return (1);
2059	}
2060	return (0);
2061}
2062#endif
2063
2064static struct witness *
2065witness_get(void)
2066{
2067	struct witness *w;
2068	int index;
2069
2070	if (witness_cold == 0)
2071		mtx_assert(&w_mtx, MA_OWNED);
2072
2073	if (witness_watch == -1) {
2074		mtx_unlock_spin(&w_mtx);
2075		return (NULL);
2076	}
2077	if (STAILQ_EMPTY(&w_free)) {
2078		witness_watch = -1;
2079		mtx_unlock_spin(&w_mtx);
2080		printf("WITNESS: unable to allocate a new witness object\n");
2081		return (NULL);
2082	}
2083	w = STAILQ_FIRST(&w_free);
2084	STAILQ_REMOVE_HEAD(&w_free, w_list);
2085	w_free_cnt--;
2086	index = w->w_index;
2087	MPASS(index > 0 && index == w_max_used_index+1 &&
2088	    index < WITNESS_COUNT);
2089	bzero(w, sizeof(*w));
2090	w->w_index = index;
2091	if (index > w_max_used_index)
2092		w_max_used_index = index;
2093	return (w);
2094}
2095
2096static void
2097witness_free(struct witness *w)
2098{
2099
2100	STAILQ_INSERT_HEAD(&w_free, w, w_list);
2101	w_free_cnt++;
2102}
2103
2104static struct lock_list_entry *
2105witness_lock_list_get(void)
2106{
2107	struct lock_list_entry *lle;
2108
2109	if (witness_watch == -1)
2110		return (NULL);
2111	mtx_lock_spin(&w_mtx);
2112	lle = w_lock_list_free;
2113	if (lle == NULL) {
2114		witness_watch = -1;
2115		mtx_unlock_spin(&w_mtx);
2116		printf("%s: witness exhausted\n", __func__);
2117		return (NULL);
2118	}
2119	w_lock_list_free = lle->ll_next;
2120	mtx_unlock_spin(&w_mtx);
2121	bzero(lle, sizeof(*lle));
2122	return (lle);
2123}
2124
2125static void
2126witness_lock_list_free(struct lock_list_entry *lle)
2127{
2128
2129	mtx_lock_spin(&w_mtx);
2130	lle->ll_next = w_lock_list_free;
2131	w_lock_list_free = lle;
2132	mtx_unlock_spin(&w_mtx);
2133}
2134
2135static struct lock_instance *
2136find_instance(struct lock_list_entry *list, const struct lock_object *lock)
2137{
2138	struct lock_list_entry *lle;
2139	struct lock_instance *instance;
2140	int i;
2141
2142	for (lle = list; lle != NULL; lle = lle->ll_next)
2143		for (i = lle->ll_count - 1; i >= 0; i--) {
2144			instance = &lle->ll_children[i];
2145			if (instance->li_lock == lock)
2146				return (instance);
2147		}
2148	return (NULL);
2149}
2150
2151static void
2152witness_list_lock(struct lock_instance *instance,
2153    int (*prnt)(const char *fmt, ...))
2154{
2155	struct lock_object *lock;
2156
2157	lock = instance->li_lock;
2158	prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2159	    "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2160	if (lock->lo_witness->w_name != lock->lo_name)
2161		prnt(" (%s)", lock->lo_witness->w_name);
2162	prnt(" r = %d (%p) locked @ %s:%d\n",
2163	    instance->li_flags & LI_RECURSEMASK, lock,
2164	    fixup_filename(instance->li_file), instance->li_line);
2165}
2166
2167#ifdef DDB
2168static int
2169witness_thread_has_locks(struct thread *td)
2170{
2171
2172	if (td->td_sleeplocks == NULL)
2173		return (0);
2174	return (td->td_sleeplocks->ll_count != 0);
2175}
2176
2177static int
2178witness_proc_has_locks(struct proc *p)
2179{
2180	struct thread *td;
2181
2182	FOREACH_THREAD_IN_PROC(p, td) {
2183		if (witness_thread_has_locks(td))
2184			return (1);
2185	}
2186	return (0);
2187}
2188#endif
2189
2190int
2191witness_list_locks(struct lock_list_entry **lock_list,
2192    int (*prnt)(const char *fmt, ...))
2193{
2194	struct lock_list_entry *lle;
2195	int i, nheld;
2196
2197	nheld = 0;
2198	for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2199		for (i = lle->ll_count - 1; i >= 0; i--) {
2200			witness_list_lock(&lle->ll_children[i], prnt);
2201			nheld++;
2202		}
2203	return (nheld);
2204}
2205
2206/*
2207 * This is a bit risky at best.  We call this function when we have timed
2208 * out acquiring a spin lock, and we assume that the other CPU is stuck
2209 * with this lock held.  So, we go groveling around in the other CPU's
2210 * per-cpu data to try to find the lock instance for this spin lock to
2211 * see when it was last acquired.
2212 */
2213void
2214witness_display_spinlock(struct lock_object *lock, struct thread *owner,
2215    int (*prnt)(const char *fmt, ...))
2216{
2217	struct lock_instance *instance;
2218	struct pcpu *pc;
2219
2220	if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2221		return;
2222	pc = pcpu_find(owner->td_oncpu);
2223	instance = find_instance(pc->pc_spinlocks, lock);
2224	if (instance != NULL)
2225		witness_list_lock(instance, prnt);
2226}
2227
2228void
2229witness_save(struct lock_object *lock, const char **filep, int *linep)
2230{
2231	struct lock_list_entry *lock_list;
2232	struct lock_instance *instance;
2233	struct lock_class *class;
2234
2235	/*
2236	 * This function is used independently in locking code to deal with
2237	 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2238	 * is gone.
2239	 */
2240	if (SCHEDULER_STOPPED())
2241		return;
2242	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2243	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2244		return;
2245	class = LOCK_CLASS(lock);
2246	if (class->lc_flags & LC_SLEEPLOCK)
2247		lock_list = curthread->td_sleeplocks;
2248	else {
2249		if (witness_skipspin)
2250			return;
2251		lock_list = PCPU_GET(spinlocks);
2252	}
2253	instance = find_instance(lock_list, lock);
2254	if (instance == NULL) {
2255		kassert_panic("%s: lock (%s) %s not locked", __func__,
2256		    class->lc_name, lock->lo_name);
2257		return;
2258	}
2259	*filep = instance->li_file;
2260	*linep = instance->li_line;
2261}
2262
2263void
2264witness_restore(struct lock_object *lock, const char *file, int line)
2265{
2266	struct lock_list_entry *lock_list;
2267	struct lock_instance *instance;
2268	struct lock_class *class;
2269
2270	/*
2271	 * This function is used independently in locking code to deal with
2272	 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2273	 * is gone.
2274	 */
2275	if (SCHEDULER_STOPPED())
2276		return;
2277	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2278	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2279		return;
2280	class = LOCK_CLASS(lock);
2281	if (class->lc_flags & LC_SLEEPLOCK)
2282		lock_list = curthread->td_sleeplocks;
2283	else {
2284		if (witness_skipspin)
2285			return;
2286		lock_list = PCPU_GET(spinlocks);
2287	}
2288	instance = find_instance(lock_list, lock);
2289	if (instance == NULL)
2290		kassert_panic("%s: lock (%s) %s not locked", __func__,
2291		    class->lc_name, lock->lo_name);
2292	lock->lo_witness->w_file = file;
2293	lock->lo_witness->w_line = line;
2294	if (instance == NULL)
2295		return;
2296	instance->li_file = file;
2297	instance->li_line = line;
2298}
2299
2300void
2301witness_assert(const struct lock_object *lock, int flags, const char *file,
2302    int line)
2303{
2304#ifdef INVARIANT_SUPPORT
2305	struct lock_instance *instance;
2306	struct lock_class *class;
2307
2308	if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL)
2309		return;
2310	class = LOCK_CLASS(lock);
2311	if ((class->lc_flags & LC_SLEEPLOCK) != 0)
2312		instance = find_instance(curthread->td_sleeplocks, lock);
2313	else if ((class->lc_flags & LC_SPINLOCK) != 0)
2314		instance = find_instance(PCPU_GET(spinlocks), lock);
2315	else {
2316		kassert_panic("Lock (%s) %s is not sleep or spin!",
2317		    class->lc_name, lock->lo_name);
2318		return;
2319	}
2320	switch (flags) {
2321	case LA_UNLOCKED:
2322		if (instance != NULL)
2323			kassert_panic("Lock (%s) %s locked @ %s:%d.",
2324			    class->lc_name, lock->lo_name,
2325			    fixup_filename(file), line);
2326		break;
2327	case LA_LOCKED:
2328	case LA_LOCKED | LA_RECURSED:
2329	case LA_LOCKED | LA_NOTRECURSED:
2330	case LA_SLOCKED:
2331	case LA_SLOCKED | LA_RECURSED:
2332	case LA_SLOCKED | LA_NOTRECURSED:
2333	case LA_XLOCKED:
2334	case LA_XLOCKED | LA_RECURSED:
2335	case LA_XLOCKED | LA_NOTRECURSED:
2336		if (instance == NULL) {
2337			kassert_panic("Lock (%s) %s not locked @ %s:%d.",
2338			    class->lc_name, lock->lo_name,
2339			    fixup_filename(file), line);
2340			break;
2341		}
2342		if ((flags & LA_XLOCKED) != 0 &&
2343		    (instance->li_flags & LI_EXCLUSIVE) == 0)
2344			kassert_panic(
2345			    "Lock (%s) %s not exclusively locked @ %s:%d.",
2346			    class->lc_name, lock->lo_name,
2347			    fixup_filename(file), line);
2348		if ((flags & LA_SLOCKED) != 0 &&
2349		    (instance->li_flags & LI_EXCLUSIVE) != 0)
2350			kassert_panic(
2351			    "Lock (%s) %s exclusively locked @ %s:%d.",
2352			    class->lc_name, lock->lo_name,
2353			    fixup_filename(file), line);
2354		if ((flags & LA_RECURSED) != 0 &&
2355		    (instance->li_flags & LI_RECURSEMASK) == 0)
2356			kassert_panic("Lock (%s) %s not recursed @ %s:%d.",
2357			    class->lc_name, lock->lo_name,
2358			    fixup_filename(file), line);
2359		if ((flags & LA_NOTRECURSED) != 0 &&
2360		    (instance->li_flags & LI_RECURSEMASK) != 0)
2361			kassert_panic("Lock (%s) %s recursed @ %s:%d.",
2362			    class->lc_name, lock->lo_name,
2363			    fixup_filename(file), line);
2364		break;
2365	default:
2366		kassert_panic("Invalid lock assertion at %s:%d.",
2367		    fixup_filename(file), line);
2368
2369	}
2370#endif	/* INVARIANT_SUPPORT */
2371}
2372
2373static void
2374witness_setflag(struct lock_object *lock, int flag, int set)
2375{
2376	struct lock_list_entry *lock_list;
2377	struct lock_instance *instance;
2378	struct lock_class *class;
2379
2380	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2381		return;
2382	class = LOCK_CLASS(lock);
2383	if (class->lc_flags & LC_SLEEPLOCK)
2384		lock_list = curthread->td_sleeplocks;
2385	else {
2386		if (witness_skipspin)
2387			return;
2388		lock_list = PCPU_GET(spinlocks);
2389	}
2390	instance = find_instance(lock_list, lock);
2391	if (instance == NULL) {
2392		kassert_panic("%s: lock (%s) %s not locked", __func__,
2393		    class->lc_name, lock->lo_name);
2394		return;
2395	}
2396
2397	if (set)
2398		instance->li_flags |= flag;
2399	else
2400		instance->li_flags &= ~flag;
2401}
2402
2403void
2404witness_norelease(struct lock_object *lock)
2405{
2406
2407	witness_setflag(lock, LI_NORELEASE, 1);
2408}
2409
2410void
2411witness_releaseok(struct lock_object *lock)
2412{
2413
2414	witness_setflag(lock, LI_NORELEASE, 0);
2415}
2416
2417#ifdef DDB
2418static void
2419witness_ddb_list(struct thread *td)
2420{
2421
2422	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2423	KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2424
2425	if (witness_watch < 1)
2426		return;
2427
2428	witness_list_locks(&td->td_sleeplocks, db_printf);
2429
2430	/*
2431	 * We only handle spinlocks if td == curthread.  This is somewhat broken
2432	 * if td is currently executing on some other CPU and holds spin locks
2433	 * as we won't display those locks.  If we had a MI way of getting
2434	 * the per-cpu data for a given cpu then we could use
2435	 * td->td_oncpu to get the list of spinlocks for this thread
2436	 * and "fix" this.
2437	 *
2438	 * That still wouldn't really fix this unless we locked the scheduler
2439	 * lock or stopped the other CPU to make sure it wasn't changing the
2440	 * list out from under us.  It is probably best to just not try to
2441	 * handle threads on other CPU's for now.
2442	 */
2443	if (td == curthread && PCPU_GET(spinlocks) != NULL)
2444		witness_list_locks(PCPU_PTR(spinlocks), db_printf);
2445}
2446
2447DB_SHOW_COMMAND(locks, db_witness_list)
2448{
2449	struct thread *td;
2450
2451	if (have_addr)
2452		td = db_lookup_thread(addr, TRUE);
2453	else
2454		td = kdb_thread;
2455	witness_ddb_list(td);
2456}
2457
2458DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2459{
2460	struct thread *td;
2461	struct proc *p;
2462
2463	/*
2464	 * It would be nice to list only threads and processes that actually
2465	 * held sleep locks, but that information is currently not exported
2466	 * by WITNESS.
2467	 */
2468	FOREACH_PROC_IN_SYSTEM(p) {
2469		if (!witness_proc_has_locks(p))
2470			continue;
2471		FOREACH_THREAD_IN_PROC(p, td) {
2472			if (!witness_thread_has_locks(td))
2473				continue;
2474			db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2475			    p->p_comm, td, td->td_tid);
2476			witness_ddb_list(td);
2477			if (db_pager_quit)
2478				return;
2479		}
2480	}
2481}
2482DB_SHOW_ALIAS(alllocks, db_witness_list_all)
2483
2484DB_SHOW_COMMAND(witness, db_witness_display)
2485{
2486
2487	witness_ddb_display(db_printf);
2488}
2489#endif
2490
2491static int
2492sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
2493{
2494	struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2495	struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2496	struct sbuf *sb;
2497	u_int w_rmatrix1, w_rmatrix2;
2498	int error, generation, i, j;
2499
2500	tmp_data1 = NULL;
2501	tmp_data2 = NULL;
2502	tmp_w1 = NULL;
2503	tmp_w2 = NULL;
2504	if (witness_watch < 1) {
2505		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2506		return (error);
2507	}
2508	if (witness_cold) {
2509		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2510		return (error);
2511	}
2512	error = 0;
2513	sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND);
2514	if (sb == NULL)
2515		return (ENOMEM);
2516
2517	/* Allocate and init temporary storage space. */
2518	tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2519	tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2520	tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2521	    M_WAITOK | M_ZERO);
2522	tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2523	    M_WAITOK | M_ZERO);
2524	stack_zero(&tmp_data1->wlod_stack);
2525	stack_zero(&tmp_data2->wlod_stack);
2526
2527restart:
2528	mtx_lock_spin(&w_mtx);
2529	generation = w_generation;
2530	mtx_unlock_spin(&w_mtx);
2531	sbuf_printf(sb, "Number of known direct relationships is %d\n",
2532	    w_lohash.wloh_count);
2533	for (i = 1; i < w_max_used_index; i++) {
2534		mtx_lock_spin(&w_mtx);
2535		if (generation != w_generation) {
2536			mtx_unlock_spin(&w_mtx);
2537
2538			/* The graph has changed, try again. */
2539			req->oldidx = 0;
2540			sbuf_clear(sb);
2541			goto restart;
2542		}
2543
2544		w1 = &w_data[i];
2545		if (w1->w_reversed == 0) {
2546			mtx_unlock_spin(&w_mtx);
2547			continue;
2548		}
2549
2550		/* Copy w1 locally so we can release the spin lock. */
2551		*tmp_w1 = *w1;
2552		mtx_unlock_spin(&w_mtx);
2553
2554		if (tmp_w1->w_reversed == 0)
2555			continue;
2556		for (j = 1; j < w_max_used_index; j++) {
2557			if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2558				continue;
2559
2560			mtx_lock_spin(&w_mtx);
2561			if (generation != w_generation) {
2562				mtx_unlock_spin(&w_mtx);
2563
2564				/* The graph has changed, try again. */
2565				req->oldidx = 0;
2566				sbuf_clear(sb);
2567				goto restart;
2568			}
2569
2570			w2 = &w_data[j];
2571			data1 = witness_lock_order_get(w1, w2);
2572			data2 = witness_lock_order_get(w2, w1);
2573
2574			/*
2575			 * Copy information locally so we can release the
2576			 * spin lock.
2577			 */
2578			*tmp_w2 = *w2;
2579			w_rmatrix1 = (unsigned int)w_rmatrix[i][j];
2580			w_rmatrix2 = (unsigned int)w_rmatrix[j][i];
2581
2582			if (data1) {
2583				stack_zero(&tmp_data1->wlod_stack);
2584				stack_copy(&data1->wlod_stack,
2585				    &tmp_data1->wlod_stack);
2586			}
2587			if (data2 && data2 != data1) {
2588				stack_zero(&tmp_data2->wlod_stack);
2589				stack_copy(&data2->wlod_stack,
2590				    &tmp_data2->wlod_stack);
2591			}
2592			mtx_unlock_spin(&w_mtx);
2593
2594			sbuf_printf(sb,
2595	    "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2596			    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2597			    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2598#if 0
2599 			sbuf_printf(sb,
2600			"w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n",
2601 			    tmp_w1->name, tmp_w2->w_name, w_rmatrix1,
2602 			    tmp_w2->name, tmp_w1->w_name, w_rmatrix2);
2603#endif
2604			if (data1) {
2605				sbuf_printf(sb,
2606			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2607				    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2608				    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2609				stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2610				sbuf_printf(sb, "\n");
2611			}
2612			if (data2 && data2 != data1) {
2613				sbuf_printf(sb,
2614			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2615				    tmp_w2->w_name, tmp_w2->w_class->lc_name,
2616				    tmp_w1->w_name, tmp_w1->w_class->lc_name);
2617				stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2618				sbuf_printf(sb, "\n");
2619			}
2620		}
2621	}
2622	mtx_lock_spin(&w_mtx);
2623	if (generation != w_generation) {
2624		mtx_unlock_spin(&w_mtx);
2625
2626		/*
2627		 * The graph changed while we were printing stack data,
2628		 * try again.
2629		 */
2630		req->oldidx = 0;
2631		sbuf_clear(sb);
2632		goto restart;
2633	}
2634	mtx_unlock_spin(&w_mtx);
2635
2636	/* Free temporary storage space. */
2637	free(tmp_data1, M_TEMP);
2638	free(tmp_data2, M_TEMP);
2639	free(tmp_w1, M_TEMP);
2640	free(tmp_w2, M_TEMP);
2641
2642	sbuf_finish(sb);
2643	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2644	sbuf_delete(sb);
2645
2646	return (error);
2647}
2648
2649static int
2650sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
2651{
2652	struct witness *w;
2653	struct sbuf *sb;
2654	int error;
2655
2656	if (witness_watch < 1) {
2657		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2658		return (error);
2659	}
2660	if (witness_cold) {
2661		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2662		return (error);
2663	}
2664	error = 0;
2665
2666	error = sysctl_wire_old_buffer(req, 0);
2667	if (error != 0)
2668		return (error);
2669	sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req);
2670	if (sb == NULL)
2671		return (ENOMEM);
2672	sbuf_printf(sb, "\n");
2673
2674	mtx_lock_spin(&w_mtx);
2675	STAILQ_FOREACH(w, &w_all, w_list)
2676		w->w_displayed = 0;
2677	STAILQ_FOREACH(w, &w_all, w_list)
2678		witness_add_fullgraph(sb, w);
2679	mtx_unlock_spin(&w_mtx);
2680
2681	/*
2682	 * Close the sbuf and return to userland.
2683	 */
2684	error = sbuf_finish(sb);
2685	sbuf_delete(sb);
2686
2687	return (error);
2688}
2689
2690static int
2691sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2692{
2693	int error, value;
2694
2695	value = witness_watch;
2696	error = sysctl_handle_int(oidp, &value, 0, req);
2697	if (error != 0 || req->newptr == NULL)
2698		return (error);
2699	if (value > 1 || value < -1 ||
2700	    (witness_watch == -1 && value != witness_watch))
2701		return (EINVAL);
2702	witness_watch = value;
2703	return (0);
2704}
2705
2706static void
2707witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2708{
2709	int i;
2710
2711	if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2712		return;
2713	w->w_displayed = 1;
2714
2715	WITNESS_INDEX_ASSERT(w->w_index);
2716	for (i = 1; i <= w_max_used_index; i++) {
2717		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2718			sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2719			    w_data[i].w_name);
2720			witness_add_fullgraph(sb, &w_data[i]);
2721		}
2722	}
2723}
2724
2725/*
2726 * A simple hash function. Takes a key pointer and a key size. If size == 0,
2727 * interprets the key as a string and reads until the null
2728 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2729 * hash value computed from the key.
2730 */
2731static uint32_t
2732witness_hash_djb2(const uint8_t *key, uint32_t size)
2733{
2734	unsigned int hash = 5381;
2735	int i;
2736
2737	/* hash = hash * 33 + key[i] */
2738	if (size)
2739		for (i = 0; i < size; i++)
2740			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2741	else
2742		for (i = 0; key[i] != 0; i++)
2743			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2744
2745	return (hash);
2746}
2747
2748
2749/*
2750 * Initializes the two witness hash tables. Called exactly once from
2751 * witness_initialize().
2752 */
2753static void
2754witness_init_hash_tables(void)
2755{
2756	int i;
2757
2758	MPASS(witness_cold);
2759
2760	/* Initialize the hash tables. */
2761	for (i = 0; i < WITNESS_HASH_SIZE; i++)
2762		w_hash.wh_array[i] = NULL;
2763
2764	w_hash.wh_size = WITNESS_HASH_SIZE;
2765	w_hash.wh_count = 0;
2766
2767	/* Initialize the lock order data hash. */
2768	w_lofree = NULL;
2769	for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
2770		memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
2771		w_lodata[i].wlod_next = w_lofree;
2772		w_lofree = &w_lodata[i];
2773	}
2774	w_lohash.wloh_size = WITNESS_LO_HASH_SIZE;
2775	w_lohash.wloh_count = 0;
2776	for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
2777		w_lohash.wloh_array[i] = NULL;
2778}
2779
2780static struct witness *
2781witness_hash_get(const char *key)
2782{
2783	struct witness *w;
2784	uint32_t hash;
2785
2786	MPASS(key != NULL);
2787	if (witness_cold == 0)
2788		mtx_assert(&w_mtx, MA_OWNED);
2789	hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
2790	w = w_hash.wh_array[hash];
2791	while (w != NULL) {
2792		if (strcmp(w->w_name, key) == 0)
2793			goto out;
2794		w = w->w_hash_next;
2795	}
2796
2797out:
2798	return (w);
2799}
2800
2801static void
2802witness_hash_put(struct witness *w)
2803{
2804	uint32_t hash;
2805
2806	MPASS(w != NULL);
2807	MPASS(w->w_name != NULL);
2808	if (witness_cold == 0)
2809		mtx_assert(&w_mtx, MA_OWNED);
2810	KASSERT(witness_hash_get(w->w_name) == NULL,
2811	    ("%s: trying to add a hash entry that already exists!", __func__));
2812	KASSERT(w->w_hash_next == NULL,
2813	    ("%s: w->w_hash_next != NULL", __func__));
2814
2815	hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
2816	w->w_hash_next = w_hash.wh_array[hash];
2817	w_hash.wh_array[hash] = w;
2818	w_hash.wh_count++;
2819}
2820
2821
2822static struct witness_lock_order_data *
2823witness_lock_order_get(struct witness *parent, struct witness *child)
2824{
2825	struct witness_lock_order_data *data = NULL;
2826	struct witness_lock_order_key key;
2827	unsigned int hash;
2828
2829	MPASS(parent != NULL && child != NULL);
2830	key.from = parent->w_index;
2831	key.to = child->w_index;
2832	WITNESS_INDEX_ASSERT(key.from);
2833	WITNESS_INDEX_ASSERT(key.to);
2834	if ((w_rmatrix[parent->w_index][child->w_index]
2835	    & WITNESS_LOCK_ORDER_KNOWN) == 0)
2836		goto out;
2837
2838	hash = witness_hash_djb2((const char*)&key,
2839	    sizeof(key)) % w_lohash.wloh_size;
2840	data = w_lohash.wloh_array[hash];
2841	while (data != NULL) {
2842		if (witness_lock_order_key_equal(&data->wlod_key, &key))
2843			break;
2844		data = data->wlod_next;
2845	}
2846
2847out:
2848	return (data);
2849}
2850
2851/*
2852 * Verify that parent and child have a known relationship, are not the same,
2853 * and child is actually a child of parent.  This is done without w_mtx
2854 * to avoid contention in the common case.
2855 */
2856static int
2857witness_lock_order_check(struct witness *parent, struct witness *child)
2858{
2859
2860	if (parent != child &&
2861	    w_rmatrix[parent->w_index][child->w_index]
2862	    & WITNESS_LOCK_ORDER_KNOWN &&
2863	    isitmychild(parent, child))
2864		return (1);
2865
2866	return (0);
2867}
2868
2869static int
2870witness_lock_order_add(struct witness *parent, struct witness *child)
2871{
2872	struct witness_lock_order_data *data = NULL;
2873	struct witness_lock_order_key key;
2874	unsigned int hash;
2875
2876	MPASS(parent != NULL && child != NULL);
2877	key.from = parent->w_index;
2878	key.to = child->w_index;
2879	WITNESS_INDEX_ASSERT(key.from);
2880	WITNESS_INDEX_ASSERT(key.to);
2881	if (w_rmatrix[parent->w_index][child->w_index]
2882	    & WITNESS_LOCK_ORDER_KNOWN)
2883		return (1);
2884
2885	hash = witness_hash_djb2((const char*)&key,
2886	    sizeof(key)) % w_lohash.wloh_size;
2887	w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
2888	data = w_lofree;
2889	if (data == NULL)
2890		return (0);
2891	w_lofree = data->wlod_next;
2892	data->wlod_next = w_lohash.wloh_array[hash];
2893	data->wlod_key = key;
2894	w_lohash.wloh_array[hash] = data;
2895	w_lohash.wloh_count++;
2896	stack_zero(&data->wlod_stack);
2897	stack_save(&data->wlod_stack);
2898	return (1);
2899}
2900
2901/* Call this whenver the structure of the witness graph changes. */
2902static void
2903witness_increment_graph_generation(void)
2904{
2905
2906	if (witness_cold == 0)
2907		mtx_assert(&w_mtx, MA_OWNED);
2908	w_generation++;
2909}
2910
2911#ifdef KDB
2912static void
2913_witness_debugger(int cond, const char *msg)
2914{
2915
2916	if (witness_trace && cond)
2917		kdb_backtrace();
2918	if (witness_kdb && cond)
2919		kdb_enter(KDB_WHY_WITNESS, msg);
2920}
2921#endif
2922