1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2009 Isilon Inc http://www.isilon.com/
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27/**
28 * @file
29 *
30 * fail(9) Facility.
31 *
32 * @ingroup failpoint_private
33 */
34/**
35 * @defgroup failpoint fail(9) Facility
36 *
37 * Failpoints allow for injecting fake errors into running code on the fly,
38 * without modifying code or recompiling with flags.  Failpoints are always
39 * present, and are very efficient when disabled.  Failpoints are described
40 * in man fail(9).
41 */
42/**
43 * @defgroup failpoint_private Private fail(9) Implementation functions
44 *
45 * Private implementations for the actual failpoint code.
46 *
47 * @ingroup failpoint
48 */
49/**
50 * @addtogroup failpoint_private
51 * @{
52 */
53
54#include <sys/cdefs.h>
55#include "opt_stack.h"
56
57#include <sys/ctype.h>
58#include <sys/errno.h>
59#include <sys/fail.h>
60#include <sys/kernel.h>
61#include <sys/libkern.h>
62#include <sys/limits.h>
63#include <sys/lock.h>
64#include <sys/malloc.h>
65#include <sys/mutex.h>
66#include <sys/proc.h>
67#include <sys/sbuf.h>
68#include <sys/sleepqueue.h>
69#include <sys/sx.h>
70#include <sys/sysctl.h>
71#include <sys/types.h>
72
73#include <machine/atomic.h>
74#include <machine/stdarg.h>
75
76#ifdef ILOG_DEFINE_FOR_FILE
77ILOG_DEFINE_FOR_FILE(L_ISI_FAIL_POINT, L_ILOG, fail_point);
78#endif
79
80static MALLOC_DEFINE(M_FAIL_POINT, "Fail Points", "fail points system");
81#define fp_free(ptr) free(ptr, M_FAIL_POINT)
82#define fp_malloc(size, flags) malloc((size), M_FAIL_POINT, (flags))
83#define fs_free(ptr) fp_free(ptr)
84#define fs_malloc() fp_malloc(sizeof(struct fail_point_setting), \
85    M_WAITOK | M_ZERO)
86
87/**
88 * These define the wchans that are used for sleeping, pausing respectively.
89 * They are chosen arbitrarily but need to be distinct to the failpoint and
90 * the sleep/pause distinction.
91 */
92#define FP_SLEEP_CHANNEL(fp) (void*)(fp)
93#define FP_PAUSE_CHANNEL(fp) __DEVOLATILE(void*, &fp->fp_setting)
94
95/**
96 * Don't allow more than this many entries in a fail point set by sysctl.
97 * The 99.99...% case is to have 1 entry.  I can't imagine having this many
98 * entries, so it should not limit us.  Saves on re-mallocs while holding
99 * a non-sleepable lock.
100 */
101#define FP_MAX_ENTRY_COUNT 20
102
103/* Used to drain sbufs to the sysctl output */
104int fail_sysctl_drain_func(void *, const char *, int);
105
106/* Head of tailq of struct fail_point_entry */
107TAILQ_HEAD(fail_point_entry_queue, fail_point_entry);
108
109/**
110 * fp entries garbage list; outstanding entries are cleaned up in the
111 * garbage collector
112 */
113STAILQ_HEAD(fail_point_setting_garbage, fail_point_setting);
114static struct fail_point_setting_garbage fp_setting_garbage =
115        STAILQ_HEAD_INITIALIZER(fp_setting_garbage);
116static struct mtx mtx_garbage_list;
117MTX_SYSINIT(mtx_garbage_list, &mtx_garbage_list, "fail point garbage mtx",
118        MTX_SPIN);
119
120static struct sx sx_fp_set;
121SX_SYSINIT(sx_fp_set, &sx_fp_set, "fail point set sx");
122
123/**
124 * Failpoint types.
125 * Don't change these without changing fail_type_strings in fail.c.
126 * @ingroup failpoint_private
127 */
128enum fail_point_t {
129	FAIL_POINT_OFF,		/**< don't fail */
130	FAIL_POINT_PANIC,	/**< panic */
131	FAIL_POINT_RETURN,	/**< return an errorcode */
132	FAIL_POINT_BREAK,	/**< break into the debugger */
133	FAIL_POINT_PRINT,	/**< print a message */
134	FAIL_POINT_SLEEP,	/**< sleep for some msecs */
135	FAIL_POINT_PAUSE,	/**< sleep until failpoint is set to off */
136	FAIL_POINT_YIELD,	/**< yield the cpu */
137	FAIL_POINT_DELAY,	/**< busy wait the cpu */
138	FAIL_POINT_NUMTYPES,
139	FAIL_POINT_INVALID = -1
140};
141
142static struct {
143	const char *name;
144	int	nmlen;
145} fail_type_strings[] = {
146#define	FP_TYPE_NM_LEN(s)	{ s, sizeof(s) - 1 }
147	[FAIL_POINT_OFF] =	FP_TYPE_NM_LEN("off"),
148	[FAIL_POINT_PANIC] =	FP_TYPE_NM_LEN("panic"),
149	[FAIL_POINT_RETURN] =	FP_TYPE_NM_LEN("return"),
150	[FAIL_POINT_BREAK] =	FP_TYPE_NM_LEN("break"),
151	[FAIL_POINT_PRINT] =	FP_TYPE_NM_LEN("print"),
152	[FAIL_POINT_SLEEP] =	FP_TYPE_NM_LEN("sleep"),
153	[FAIL_POINT_PAUSE] =	FP_TYPE_NM_LEN("pause"),
154	[FAIL_POINT_YIELD] =	FP_TYPE_NM_LEN("yield"),
155	[FAIL_POINT_DELAY] =	FP_TYPE_NM_LEN("delay"),
156};
157
158#define FE_COUNT_UNTRACKED (INT_MIN)
159
160/**
161 * Internal structure tracking a single term of a complete failpoint.
162 * @ingroup failpoint_private
163 */
164struct fail_point_entry {
165	volatile bool	fe_stale;
166	enum fail_point_t	fe_type;	/**< type of entry */
167	int		fe_arg;		/**< argument to type (e.g. return value) */
168	int		fe_prob;	/**< likelihood of firing in millionths */
169	int32_t		fe_count;	/**< number of times to fire, -1 means infinite */
170	pid_t		fe_pid;		/**< only fail for this process */
171	struct fail_point	*fe_parent;	/**< backpointer to fp */
172	TAILQ_ENTRY(fail_point_entry)	fe_entries; /**< next entry ptr */
173};
174
175struct fail_point_setting {
176	STAILQ_ENTRY(fail_point_setting) fs_garbage_link;
177	struct fail_point_entry_queue fp_entry_queue;
178	struct fail_point * fs_parent;
179	struct mtx feq_mtx; /* Gives fail_point_pause something to do.  */
180};
181
182/**
183 * Defines stating the equivalent of probablilty one (100%)
184 */
185enum {
186	PROB_MAX = 1000000,	/* probability between zero and this number */
187	PROB_DIGITS = 6		/* number of zero's in above number */
188};
189
190/* Get a ref on an fp's fp_setting */
191static inline struct fail_point_setting *fail_point_setting_get_ref(
192        struct fail_point *fp);
193/* Release a ref on an fp_setting */
194static inline void fail_point_setting_release_ref(struct fail_point *fp);
195/* Allocate and initialize a struct fail_point_setting */
196static struct fail_point_setting *fail_point_setting_new(struct
197        fail_point *);
198/* Free a struct fail_point_setting */
199static void fail_point_setting_destroy(struct fail_point_setting *fp_setting);
200/* Allocate and initialize a struct fail_point_entry */
201static struct fail_point_entry *fail_point_entry_new(struct
202        fail_point_setting *);
203/* Free a struct fail_point_entry */
204static void fail_point_entry_destroy(struct fail_point_entry *fp_entry);
205/* Append fp setting to garbage list */
206static inline void fail_point_setting_garbage_append(
207        struct fail_point_setting *fp_setting);
208/* Swap fp's setting with fp_setting_new */
209static inline struct fail_point_setting *
210        fail_point_swap_settings(struct fail_point *fp,
211        struct fail_point_setting *fp_setting_new);
212/* Free up any zero-ref setting in the garbage queue */
213static void fail_point_garbage_collect(void);
214/* If this fail point's setting are empty, then swap it out to NULL. */
215static inline void fail_point_eval_swap_out(struct fail_point *fp,
216        struct fail_point_setting *fp_setting);
217
218bool
219fail_point_is_off(struct fail_point *fp)
220{
221	bool return_val;
222	struct fail_point_setting *fp_setting;
223	struct fail_point_entry *ent;
224
225	return_val = true;
226
227	fp_setting = fail_point_setting_get_ref(fp);
228	if (fp_setting != NULL) {
229		TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue,
230		    fe_entries) {
231			if (!ent->fe_stale) {
232				return_val = false;
233				break;
234			}
235		}
236	}
237	fail_point_setting_release_ref(fp);
238
239	return (return_val);
240}
241
242/* Allocate and initialize a struct fail_point_setting */
243static struct fail_point_setting *
244fail_point_setting_new(struct fail_point *fp)
245{
246	struct fail_point_setting *fs_new;
247
248	fs_new = fs_malloc();
249	fs_new->fs_parent = fp;
250	TAILQ_INIT(&fs_new->fp_entry_queue);
251	mtx_init(&fs_new->feq_mtx, "fail point entries", NULL, MTX_SPIN);
252
253	fail_point_setting_garbage_append(fs_new);
254
255	return (fs_new);
256}
257
258/* Free a struct fail_point_setting */
259static void
260fail_point_setting_destroy(struct fail_point_setting *fp_setting)
261{
262	struct fail_point_entry *ent;
263
264	while (!TAILQ_EMPTY(&fp_setting->fp_entry_queue)) {
265		ent = TAILQ_FIRST(&fp_setting->fp_entry_queue);
266		TAILQ_REMOVE(&fp_setting->fp_entry_queue, ent, fe_entries);
267		fail_point_entry_destroy(ent);
268	}
269
270	fs_free(fp_setting);
271}
272
273/* Allocate and initialize a struct fail_point_entry */
274static struct fail_point_entry *
275fail_point_entry_new(struct fail_point_setting *fp_setting)
276{
277	struct fail_point_entry *fp_entry;
278
279	fp_entry = fp_malloc(sizeof(struct fail_point_entry),
280	        M_WAITOK | M_ZERO);
281	fp_entry->fe_parent = fp_setting->fs_parent;
282	fp_entry->fe_prob = PROB_MAX;
283	fp_entry->fe_pid = NO_PID;
284	fp_entry->fe_count = FE_COUNT_UNTRACKED;
285	TAILQ_INSERT_TAIL(&fp_setting->fp_entry_queue, fp_entry,
286	        fe_entries);
287
288	return (fp_entry);
289}
290
291/* Free a struct fail_point_entry */
292static void
293fail_point_entry_destroy(struct fail_point_entry *fp_entry)
294{
295
296	fp_free(fp_entry);
297}
298
299/* Get a ref on an fp's fp_setting */
300static inline struct fail_point_setting *
301fail_point_setting_get_ref(struct fail_point *fp)
302{
303	struct fail_point_setting *fp_setting;
304
305	/* Invariant: if we have a ref, our pointer to fp_setting is safe */
306	atomic_add_acq_32(&fp->fp_ref_cnt, 1);
307	fp_setting = fp->fp_setting;
308
309	return (fp_setting);
310}
311
312/* Release a ref on an fp_setting */
313static inline void
314fail_point_setting_release_ref(struct fail_point *fp)
315{
316
317	KASSERT(&fp->fp_ref_cnt > 0, ("Attempting to deref w/no refs"));
318	atomic_subtract_rel_32(&fp->fp_ref_cnt, 1);
319}
320
321/* Append fp entries to fp garbage list */
322static inline void
323fail_point_setting_garbage_append(struct fail_point_setting *fp_setting)
324{
325
326	mtx_lock_spin(&mtx_garbage_list);
327	STAILQ_INSERT_TAIL(&fp_setting_garbage, fp_setting,
328	        fs_garbage_link);
329	mtx_unlock_spin(&mtx_garbage_list);
330}
331
332/* Swap fp's entries with fp_setting_new */
333static struct fail_point_setting *
334fail_point_swap_settings(struct fail_point *fp,
335        struct fail_point_setting *fp_setting_new)
336{
337	struct fail_point_setting *fp_setting_old;
338
339	fp_setting_old = fp->fp_setting;
340	fp->fp_setting = fp_setting_new;
341
342	return (fp_setting_old);
343}
344
345static inline void
346fail_point_eval_swap_out(struct fail_point *fp,
347        struct fail_point_setting *fp_setting)
348{
349
350	/* We may have already been swapped out and replaced; ignore. */
351	if (fp->fp_setting == fp_setting)
352		fail_point_swap_settings(fp, NULL);
353}
354
355/* Free up any zero-ref entries in the garbage queue */
356static void
357fail_point_garbage_collect(void)
358{
359	struct fail_point_setting *fs_current, *fs_next;
360	struct fail_point_setting_garbage fp_ents_free_list;
361
362	/**
363	  * We will transfer the entries to free to fp_ents_free_list while holding
364	  * the spin mutex, then free it after we drop the lock. This avoids
365	  * triggering witness due to sleepable mutexes in the memory
366	  * allocator.
367	  */
368	STAILQ_INIT(&fp_ents_free_list);
369
370	mtx_lock_spin(&mtx_garbage_list);
371	STAILQ_FOREACH_SAFE(fs_current, &fp_setting_garbage, fs_garbage_link,
372	    fs_next) {
373		if (fs_current->fs_parent->fp_setting != fs_current &&
374		        fs_current->fs_parent->fp_ref_cnt == 0) {
375			STAILQ_REMOVE(&fp_setting_garbage, fs_current,
376			        fail_point_setting, fs_garbage_link);
377			STAILQ_INSERT_HEAD(&fp_ents_free_list, fs_current,
378			        fs_garbage_link);
379		}
380	}
381	mtx_unlock_spin(&mtx_garbage_list);
382
383	STAILQ_FOREACH_SAFE(fs_current, &fp_ents_free_list, fs_garbage_link,
384	        fs_next)
385		fail_point_setting_destroy(fs_current);
386}
387
388/* Drain out all refs from this fail point */
389static inline void
390fail_point_drain(struct fail_point *fp, int expected_ref)
391{
392	struct fail_point_setting *entries;
393
394	entries = fail_point_swap_settings(fp, NULL);
395	/**
396	 * We have unpaused all threads; so we will wait no longer
397	 * than the time taken for the longest remaining sleep, or
398	 * the length of time of a long-running code block.
399	 */
400	while (fp->fp_ref_cnt > expected_ref) {
401		wakeup(FP_PAUSE_CHANNEL(fp));
402		tsleep(&fp, PWAIT, "fail_point_drain", hz / 100);
403	}
404	if (fp->fp_callout)
405		callout_drain(fp->fp_callout);
406	fail_point_swap_settings(fp, entries);
407}
408
409static inline void
410fail_point_pause(struct fail_point *fp, enum fail_point_return_code *pret,
411        struct mtx *mtx_sleep)
412{
413
414	if (fp->fp_pre_sleep_fn)
415		fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
416
417	msleep_spin(FP_PAUSE_CHANNEL(fp), mtx_sleep, "failpt", 0);
418
419	if (fp->fp_post_sleep_fn)
420		fp->fp_post_sleep_fn(fp->fp_post_sleep_arg);
421}
422
423static inline void
424fail_point_sleep(struct fail_point *fp, int msecs,
425        enum fail_point_return_code *pret)
426{
427	int timo;
428
429	/* Convert from millisecs to ticks, rounding up */
430	timo = howmany((int64_t)msecs * hz, 1000L);
431
432	if (timo > 0) {
433		if (!(fp->fp_flags & FAIL_POINT_USE_TIMEOUT_PATH)) {
434			if (fp->fp_pre_sleep_fn)
435				fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
436
437			tsleep(FP_SLEEP_CHANNEL(fp), PWAIT, "failpt", timo);
438
439			if (fp->fp_post_sleep_fn)
440				fp->fp_post_sleep_fn(fp->fp_post_sleep_arg);
441		} else {
442			if (fp->fp_pre_sleep_fn)
443				fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
444
445			callout_reset(fp->fp_callout, timo,
446			    fp->fp_post_sleep_fn, fp->fp_post_sleep_arg);
447			*pret = FAIL_POINT_RC_QUEUED;
448		}
449	}
450}
451
452static char *parse_fail_point(struct fail_point_setting *, char *);
453static char *parse_term(struct fail_point_setting *, char *);
454static char *parse_number(int *out_units, int *out_decimal, char *);
455static char *parse_type(struct fail_point_entry *, char *);
456
457/**
458 * Initialize a fail_point.  The name is formed in a printf-like fashion
459 * from "fmt" and subsequent arguments.  This function is generally used
460 * for custom failpoints located at odd places in the sysctl tree, and is
461 * not explicitly needed for standard in-line-declared failpoints.
462 *
463 * @ingroup failpoint
464 */
465void
466fail_point_init(struct fail_point *fp, const char *fmt, ...)
467{
468	va_list ap;
469	char *name;
470	int n;
471
472	fp->fp_setting = NULL;
473	fp->fp_flags = 0;
474
475	/* Figure out the size of the name. */
476	va_start(ap, fmt);
477	n = vsnprintf(NULL, 0, fmt, ap);
478	va_end(ap);
479
480	/* Allocate the name and fill it in. */
481	name = fp_malloc(n + 1, M_WAITOK);
482	if (name != NULL) {
483		va_start(ap, fmt);
484		vsnprintf(name, n + 1, fmt, ap);
485		va_end(ap);
486	}
487	fp->fp_name = name;
488	fp->fp_location = "";
489	fp->fp_flags |= FAIL_POINT_DYNAMIC_NAME;
490	fp->fp_pre_sleep_fn = NULL;
491	fp->fp_pre_sleep_arg = NULL;
492	fp->fp_post_sleep_fn = NULL;
493	fp->fp_post_sleep_arg = NULL;
494}
495
496void
497fail_point_alloc_callout(struct fail_point *fp)
498{
499
500	/**
501	 * This assumes that calls to fail_point_use_timeout_path()
502	 * will not race.
503	 */
504	if (fp->fp_callout != NULL)
505		return;
506	fp->fp_callout = fp_malloc(sizeof(*fp->fp_callout), M_WAITOK);
507	callout_init(fp->fp_callout, CALLOUT_MPSAFE);
508}
509
510/**
511 * Free the resources held by a fail_point, and wake any paused threads.
512 * Thou shalt not allow threads to hit this fail point after you enter this
513 * function, nor shall you call this multiple times for a given fp.
514 * @ingroup failpoint
515 */
516void
517fail_point_destroy(struct fail_point *fp)
518{
519
520	fail_point_drain(fp, 0);
521
522	if ((fp->fp_flags & FAIL_POINT_DYNAMIC_NAME) != 0) {
523		fp_free(__DECONST(void *, fp->fp_name));
524		fp->fp_name = NULL;
525	}
526	fp->fp_flags = 0;
527	if (fp->fp_callout) {
528		fp_free(fp->fp_callout);
529		fp->fp_callout = NULL;
530	}
531
532	sx_xlock(&sx_fp_set);
533	fail_point_garbage_collect();
534	sx_xunlock(&sx_fp_set);
535}
536
537/**
538 * This does the real work of evaluating a fail point. If the fail point tells
539 * us to return a value, this function returns 1 and fills in 'return_value'
540 * (return_value is allowed to be null). If the fail point tells us to panic,
541 * we never return. Otherwise we just return 0 after doing some work, which
542 * means "keep going".
543 */
544enum fail_point_return_code
545fail_point_eval_nontrivial(struct fail_point *fp, int *return_value)
546{
547	bool execute = false;
548	struct fail_point_entry *ent;
549	struct fail_point_setting *fp_setting;
550	enum fail_point_return_code ret;
551	int cont;
552	int count;
553	int msecs;
554	int usecs;
555
556	ret = FAIL_POINT_RC_CONTINUE;
557	cont = 0; /* don't continue by default */
558
559	fp_setting = fail_point_setting_get_ref(fp);
560	if (fp_setting == NULL)
561		goto abort;
562
563	TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) {
564		if (ent->fe_stale)
565			continue;
566
567		if (ent->fe_prob < PROB_MAX &&
568		    ent->fe_prob < random() % PROB_MAX)
569			continue;
570
571		if (ent->fe_pid != NO_PID && ent->fe_pid != curproc->p_pid)
572			continue;
573
574		if (ent->fe_count != FE_COUNT_UNTRACKED) {
575			count = ent->fe_count;
576			while (count > 0) {
577				if (atomic_cmpset_32(&ent->fe_count, count, count - 1)) {
578					count--;
579					execute = true;
580					break;
581				}
582				count = ent->fe_count;
583			}
584			if (execute == false)
585				/* We lost the race; consider the entry stale and bail now */
586				continue;
587			if (count == 0)
588				ent->fe_stale = true;
589		}
590
591		switch (ent->fe_type) {
592		case FAIL_POINT_PANIC:
593			panic("fail point %s panicking", fp->fp_name);
594			/* NOTREACHED */
595
596		case FAIL_POINT_RETURN:
597			if (return_value != NULL)
598				*return_value = ent->fe_arg;
599			ret = FAIL_POINT_RC_RETURN;
600			break;
601
602		case FAIL_POINT_BREAK:
603			printf("fail point %s breaking to debugger\n",
604			        fp->fp_name);
605			breakpoint();
606			break;
607
608		case FAIL_POINT_PRINT:
609			printf("fail point %s executing\n", fp->fp_name);
610			cont = ent->fe_arg;
611			break;
612
613		case FAIL_POINT_SLEEP:
614			msecs = ent->fe_arg;
615			if (msecs)
616				fail_point_sleep(fp, msecs, &ret);
617			break;
618
619		case FAIL_POINT_PAUSE:
620			/**
621			 * Pausing is inherently strange with multiple
622			 * entries given our design.  That is because some
623			 * entries could be unreachable, for instance in cases like:
624			 * pause->return. We can never reach the return entry.
625			 * The sysctl layer actually truncates all entries after
626			 * a pause for this reason.
627			 */
628			mtx_lock_spin(&fp_setting->feq_mtx);
629			fail_point_pause(fp, &ret, &fp_setting->feq_mtx);
630			mtx_unlock_spin(&fp_setting->feq_mtx);
631			break;
632
633		case FAIL_POINT_YIELD:
634			kern_yield(PRI_UNCHANGED);
635			break;
636
637		case FAIL_POINT_DELAY:
638			usecs = ent->fe_arg;
639			DELAY(usecs);
640			break;
641
642		default:
643			break;
644		}
645
646		if (cont == 0)
647			break;
648	}
649
650	if (fail_point_is_off(fp))
651		fail_point_eval_swap_out(fp, fp_setting);
652
653abort:
654	fail_point_setting_release_ref(fp);
655
656	return (ret);
657}
658
659/**
660 * Translate internal fail_point structure into human-readable text.
661 */
662static void
663fail_point_get(struct fail_point *fp, struct sbuf *sb,
664        bool verbose)
665{
666	struct fail_point_entry *ent;
667	struct fail_point_setting *fp_setting;
668	struct fail_point_entry *fp_entry_cpy;
669	int cnt_sleeping;
670	int idx;
671	int printed_entry_count;
672
673	cnt_sleeping = 0;
674	idx = 0;
675	printed_entry_count = 0;
676
677	fp_entry_cpy = fp_malloc(sizeof(struct fail_point_entry) *
678	        (FP_MAX_ENTRY_COUNT + 1), M_WAITOK);
679
680	fp_setting = fail_point_setting_get_ref(fp);
681
682	if (fp_setting != NULL) {
683		TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) {
684			if (ent->fe_stale)
685				continue;
686
687			KASSERT(printed_entry_count < FP_MAX_ENTRY_COUNT,
688			        ("FP entry list larger than allowed"));
689
690			fp_entry_cpy[printed_entry_count] = *ent;
691			++printed_entry_count;
692		}
693	}
694	fail_point_setting_release_ref(fp);
695
696	/* This is our equivalent of a NULL terminator */
697	fp_entry_cpy[printed_entry_count].fe_type = FAIL_POINT_INVALID;
698
699	while (idx < printed_entry_count) {
700		ent = &fp_entry_cpy[idx];
701		++idx;
702		if (ent->fe_prob < PROB_MAX) {
703			int decimal = ent->fe_prob % (PROB_MAX / 100);
704			int units = ent->fe_prob / (PROB_MAX / 100);
705			sbuf_printf(sb, "%d", units);
706			if (decimal) {
707				int digits = PROB_DIGITS - 2;
708				while (!(decimal % 10)) {
709					digits--;
710					decimal /= 10;
711				}
712				sbuf_printf(sb, ".%0*d", digits, decimal);
713			}
714			sbuf_printf(sb, "%%");
715		}
716		if (ent->fe_count >= 0)
717			sbuf_printf(sb, "%d*", ent->fe_count);
718		sbuf_printf(sb, "%s", fail_type_strings[ent->fe_type].name);
719		if (ent->fe_arg)
720			sbuf_printf(sb, "(%d)", ent->fe_arg);
721		if (ent->fe_pid != NO_PID)
722			sbuf_printf(sb, "[pid %d]", ent->fe_pid);
723		if (TAILQ_NEXT(ent, fe_entries))
724			sbuf_cat(sb, "->");
725	}
726	if (!printed_entry_count)
727		sbuf_cat(sb, "off");
728
729	fp_free(fp_entry_cpy);
730	if (verbose) {
731#ifdef STACK
732		/* Print number of sleeping threads. queue=0 is the argument
733		 * used by msleep when sending our threads to sleep. */
734		sbuf_cat(sb, "\nsleeping_thread_stacks = {\n");
735		sleepq_sbuf_print_stacks(sb, FP_SLEEP_CHANNEL(fp), 0,
736		        &cnt_sleeping);
737
738		sbuf_cat(sb, "},\n");
739#endif
740		sbuf_printf(sb, "sleeping_thread_count = %d,\n",
741		        cnt_sleeping);
742
743#ifdef STACK
744		sbuf_cat(sb, "paused_thread_stacks = {\n");
745		sleepq_sbuf_print_stacks(sb, FP_PAUSE_CHANNEL(fp), 0,
746		        &cnt_sleeping);
747
748		sbuf_cat(sb, "},\n");
749#endif
750		sbuf_printf(sb, "paused_thread_count = %d\n",
751		        cnt_sleeping);
752	}
753}
754
755/**
756 * Set an internal fail_point structure from a human-readable failpoint string
757 * in a lock-safe manner.
758 */
759static int
760fail_point_set(struct fail_point *fp, char *buf)
761{
762	struct fail_point_entry *ent, *ent_next;
763	struct fail_point_setting *entries;
764	bool should_wake_paused;
765	bool should_truncate;
766	int error;
767
768	error = 0;
769	should_wake_paused = false;
770	should_truncate = false;
771
772	/* Parse new entries. */
773	/**
774	 * ref protects our new malloc'd stuff from being garbage collected
775	 * before we link it.
776	 */
777	fail_point_setting_get_ref(fp);
778	entries = fail_point_setting_new(fp);
779	if (parse_fail_point(entries, buf) == NULL) {
780		STAILQ_REMOVE(&fp_setting_garbage, entries,
781		        fail_point_setting, fs_garbage_link);
782		fail_point_setting_destroy(entries);
783		error = EINVAL;
784		goto end;
785	}
786
787	/**
788	 * Transfer the entries we are going to keep to a new list.
789	 * Get rid of useless zero probability entries, and entries with hit
790	 * count 0.
791	 * If 'off' is present, and it has no hit count set, then all entries
792	 *       after it are discarded since they are unreachable.
793	 */
794	TAILQ_FOREACH_SAFE(ent, &entries->fp_entry_queue, fe_entries, ent_next) {
795		if (ent->fe_prob == 0 || ent->fe_count == 0) {
796			printf("Discarding entry which cannot execute %s\n",
797			        fail_type_strings[ent->fe_type].name);
798			TAILQ_REMOVE(&entries->fp_entry_queue, ent,
799			        fe_entries);
800			fp_free(ent);
801			continue;
802		} else if (should_truncate) {
803			printf("Discarding unreachable entry %s\n",
804			        fail_type_strings[ent->fe_type].name);
805			TAILQ_REMOVE(&entries->fp_entry_queue, ent,
806			        fe_entries);
807			fp_free(ent);
808			continue;
809		}
810
811		if (ent->fe_type == FAIL_POINT_OFF) {
812			should_wake_paused = true;
813			if (ent->fe_count == FE_COUNT_UNTRACKED) {
814				should_truncate = true;
815				TAILQ_REMOVE(&entries->fp_entry_queue, ent,
816				        fe_entries);
817				fp_free(ent);
818			}
819		} else if (ent->fe_type == FAIL_POINT_PAUSE) {
820			should_truncate = true;
821		} else if (ent->fe_type == FAIL_POINT_SLEEP && (fp->fp_flags &
822		        FAIL_POINT_NONSLEEPABLE)) {
823			/**
824			 * If this fail point is annotated as being in a
825			 * non-sleepable ctx, convert sleep to delay and
826			 * convert the msec argument to usecs.
827			 */
828			printf("Sleep call request on fail point in "
829			        "non-sleepable context; using delay instead "
830			        "of sleep\n");
831			ent->fe_type = FAIL_POINT_DELAY;
832			ent->fe_arg *= 1000;
833		}
834	}
835
836	if (TAILQ_EMPTY(&entries->fp_entry_queue)) {
837		entries = fail_point_swap_settings(fp, NULL);
838		if (entries != NULL)
839			wakeup(FP_PAUSE_CHANNEL(fp));
840	} else {
841		if (should_wake_paused)
842			wakeup(FP_PAUSE_CHANNEL(fp));
843		fail_point_swap_settings(fp, entries);
844	}
845
846end:
847#ifdef IWARNING
848	if (error)
849		IWARNING("Failed to set %s %s to %s",
850		    fp->fp_name, fp->fp_location, buf);
851	else
852		INOTICE("Set %s %s to %s",
853		    fp->fp_name, fp->fp_location, buf);
854#endif /* IWARNING */
855
856	fail_point_setting_release_ref(fp);
857	return (error);
858}
859
860#define MAX_FAIL_POINT_BUF	1023
861
862/**
863 * Handle kernel failpoint set/get.
864 */
865int
866fail_point_sysctl(SYSCTL_HANDLER_ARGS)
867{
868	struct fail_point *fp;
869	char *buf;
870	struct sbuf sb, *sb_check;
871	int error;
872
873	buf = NULL;
874	error = 0;
875	fp = arg1;
876
877	sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND);
878	if (sb_check != &sb)
879		return (ENOMEM);
880
881	sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req);
882
883	/* Setting */
884	/**
885	 * Lock protects any new entries from being garbage collected before we
886	 * can link them to the fail point.
887	 */
888	sx_xlock(&sx_fp_set);
889	if (req->newptr) {
890		if (req->newlen > MAX_FAIL_POINT_BUF) {
891			error = EINVAL;
892			goto out;
893		}
894
895		buf = fp_malloc(req->newlen + 1, M_WAITOK);
896
897		error = SYSCTL_IN(req, buf, req->newlen);
898		if (error)
899			goto out;
900		buf[req->newlen] = '\0';
901
902		error = fail_point_set(fp, buf);
903	}
904
905	fail_point_garbage_collect();
906	sx_xunlock(&sx_fp_set);
907
908	/* Retrieving. */
909	fail_point_get(fp, &sb, false);
910
911out:
912	sbuf_finish(&sb);
913	sbuf_delete(&sb);
914
915	if (buf)
916		fp_free(buf);
917
918	return (error);
919}
920
921int
922fail_point_sysctl_status(SYSCTL_HANDLER_ARGS)
923{
924	struct fail_point *fp;
925	struct sbuf sb, *sb_check;
926
927	fp = arg1;
928
929	sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND);
930	if (sb_check != &sb)
931		return (ENOMEM);
932
933	sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req);
934
935	/* Retrieving. */
936	fail_point_get(fp, &sb, true);
937
938	sbuf_finish(&sb);
939	sbuf_delete(&sb);
940
941	/**
942	 * Lock protects any new entries from being garbage collected before we
943	 * can link them to the fail point.
944	 */
945	sx_xlock(&sx_fp_set);
946	fail_point_garbage_collect();
947	sx_xunlock(&sx_fp_set);
948
949	return (0);
950}
951
952int
953fail_sysctl_drain_func(void *sysctl_args, const char *buf, int len)
954{
955	struct sysctl_req *sa;
956	int error;
957
958	sa = sysctl_args;
959
960	error = SYSCTL_OUT(sa, buf, len);
961
962	if (error == ENOMEM)
963		return (-1);
964	else
965		return (len);
966}
967
968/**
969 * Internal helper function to translate a human-readable failpoint string
970 * into a internally-parsable fail_point structure.
971 */
972static char *
973parse_fail_point(struct fail_point_setting *ents, char *p)
974{
975	/*  <fail_point> ::
976	 *      <term> ( "->" <term> )*
977	 */
978	uint8_t term_count;
979
980	term_count = 1;
981
982	p = parse_term(ents, p);
983	if (p == NULL)
984		return (NULL);
985
986	while (*p != '\0') {
987		term_count++;
988		if (p[0] != '-' || p[1] != '>' ||
989		        (p = parse_term(ents, p+2)) == NULL ||
990		        term_count > FP_MAX_ENTRY_COUNT)
991			return (NULL);
992	}
993	return (p);
994}
995
996/**
997 * Internal helper function to parse an individual term from a failpoint.
998 */
999static char *
1000parse_term(struct fail_point_setting *ents, char *p)
1001{
1002	struct fail_point_entry *ent;
1003
1004	ent = fail_point_entry_new(ents);
1005
1006	/*
1007	 * <term> ::
1008	 *     ( (<float> "%") | (<integer> "*" ) )*
1009	 *     <type>
1010	 *     [ "(" <integer> ")" ]
1011	 *     [ "[pid " <integer> "]" ]
1012	 */
1013
1014	/* ( (<float> "%") | (<integer> "*" ) )* */
1015	while (isdigit(*p) || *p == '.') {
1016		int units, decimal;
1017
1018		p = parse_number(&units, &decimal, p);
1019		if (p == NULL)
1020			return (NULL);
1021
1022		if (*p == '%') {
1023			if (units > 100) /* prevent overflow early */
1024				units = 100;
1025			ent->fe_prob = units * (PROB_MAX / 100) + decimal;
1026			if (ent->fe_prob > PROB_MAX)
1027				ent->fe_prob = PROB_MAX;
1028		} else if (*p == '*') {
1029			if (!units || units < 0 || decimal)
1030				return (NULL);
1031			ent->fe_count = units;
1032		} else
1033			return (NULL);
1034		p++;
1035	}
1036
1037	/* <type> */
1038	p = parse_type(ent, p);
1039	if (p == NULL)
1040		return (NULL);
1041	if (*p == '\0')
1042		return (p);
1043
1044	/* [ "(" <integer> ")" ] */
1045	if (*p != '(')
1046		return (p);
1047	p++;
1048	if (!isdigit(*p) && *p != '-')
1049		return (NULL);
1050	ent->fe_arg = strtol(p, &p, 0);
1051	if (*p++ != ')')
1052		return (NULL);
1053
1054	/* [ "[pid " <integer> "]" ] */
1055#define PID_STRING "[pid "
1056	if (strncmp(p, PID_STRING, sizeof(PID_STRING) - 1) != 0)
1057		return (p);
1058	p += sizeof(PID_STRING) - 1;
1059	if (!isdigit(*p))
1060		return (NULL);
1061	ent->fe_pid = strtol(p, &p, 0);
1062	if (*p++ != ']')
1063		return (NULL);
1064
1065	return (p);
1066}
1067
1068/**
1069 * Internal helper function to parse a numeric for a failpoint term.
1070 */
1071static char *
1072parse_number(int *out_units, int *out_decimal, char *p)
1073{
1074	char *old_p;
1075
1076	/**
1077	 *  <number> ::
1078	 *      <integer> [ "." <integer> ] |
1079	 *      "." <integer>
1080	 */
1081
1082	/* whole part */
1083	old_p = p;
1084	*out_units = strtol(p, &p, 10);
1085	if (p == old_p && *p != '.')
1086		return (NULL);
1087
1088	/* fractional part */
1089	*out_decimal = 0;
1090	if (*p == '.') {
1091		int digits = 0;
1092		p++;
1093		while (isdigit(*p)) {
1094			int digit = *p - '0';
1095			if (digits < PROB_DIGITS - 2)
1096				*out_decimal = *out_decimal * 10 + digit;
1097			else if (digits == PROB_DIGITS - 2 && digit >= 5)
1098				(*out_decimal)++;
1099			digits++;
1100			p++;
1101		}
1102		if (!digits) /* need at least one digit after '.' */
1103			return (NULL);
1104		while (digits++ < PROB_DIGITS - 2) /* add implicit zeros */
1105			*out_decimal *= 10;
1106	}
1107
1108	return (p); /* success */
1109}
1110
1111/**
1112 * Internal helper function to parse an individual type for a failpoint term.
1113 */
1114static char *
1115parse_type(struct fail_point_entry *ent, char *beg)
1116{
1117	enum fail_point_t type;
1118	int len;
1119
1120	for (type = FAIL_POINT_OFF; type < FAIL_POINT_NUMTYPES; type++) {
1121		len = fail_type_strings[type].nmlen;
1122		if (strncmp(fail_type_strings[type].name, beg, len) == 0) {
1123			ent->fe_type = type;
1124			return (beg + len);
1125		}
1126	}
1127	return (NULL);
1128}
1129
1130/* The fail point sysctl tree. */
1131SYSCTL_NODE(_debug, OID_AUTO, fail_point, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1132    "fail points");
1133
1134/* Debugging/testing stuff for fail point */
1135static int
1136sysctl_test_fail_point(SYSCTL_HANDLER_ARGS)
1137{
1138
1139	KFAIL_POINT_RETURN(DEBUG_FP, test_fail_point);
1140	return (0);
1141}
1142SYSCTL_OID(_debug_fail_point, OID_AUTO, test_trigger_fail_point,
1143    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0,
1144    sysctl_test_fail_point, "A",
1145    "Trigger test fail points");
1146