1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/interrupt.h>
34#include <sys/kernel.h>
35#include <sys/kthread.h>
36#include <sys/limits.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/mutex.h>
40#include <sys/proc.h>
41#include <sys/sched.h>
42#include <sys/taskqueue.h>
43#include <sys/unistd.h>
44#include <machine/stdarg.h>
45
46static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
47static void	*taskqueue_giant_ih;
48static void	*taskqueue_ih;
49
50struct taskqueue_busy {
51	struct task	*tb_running;
52	TAILQ_ENTRY(taskqueue_busy) tb_link;
53};
54
55struct taskqueue {
56	STAILQ_HEAD(, task)	tq_queue;
57	taskqueue_enqueue_fn	tq_enqueue;
58	void			*tq_context;
59	TAILQ_HEAD(, taskqueue_busy) tq_active;
60	struct mtx		tq_mutex;
61	struct thread		**tq_threads;
62	int			tq_tcount;
63	int			tq_spin;
64	int			tq_flags;
65	int			tq_callouts;
66	taskqueue_callback_fn	tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
67	void			*tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
68};
69
70#define	TQ_FLAGS_ACTIVE		(1 << 0)
71#define	TQ_FLAGS_BLOCKED	(1 << 1)
72#define	TQ_FLAGS_PENDING	(1 << 2)
73
74#define	DT_CALLOUT_ARMED	(1 << 0)
75
76#define	TQ_LOCK(tq)							\
77	do {								\
78		if ((tq)->tq_spin)					\
79			mtx_lock_spin(&(tq)->tq_mutex);			\
80		else							\
81			mtx_lock(&(tq)->tq_mutex);			\
82	} while (0)
83#define	TQ_ASSERT_LOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_OWNED)
84
85#define	TQ_UNLOCK(tq)							\
86	do {								\
87		if ((tq)->tq_spin)					\
88			mtx_unlock_spin(&(tq)->tq_mutex);		\
89		else							\
90			mtx_unlock(&(tq)->tq_mutex);			\
91	} while (0)
92#define	TQ_ASSERT_UNLOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
93
94void
95_timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
96    int priority, task_fn_t func, void *context)
97{
98
99	TASK_INIT(&timeout_task->t, priority, func, context);
100	callout_init_mtx(&timeout_task->c, &queue->tq_mutex, 0);
101	timeout_task->q = queue;
102	timeout_task->f = 0;
103}
104
105static __inline int
106TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
107    int t)
108{
109	if (tq->tq_spin)
110		return (msleep_spin(p, m, wm, t));
111	return (msleep(p, m, pri, wm, t));
112}
113
114static struct taskqueue *
115_taskqueue_create(const char *name __unused, int mflags,
116		 taskqueue_enqueue_fn enqueue, void *context,
117		 int mtxflags, const char *mtxname)
118{
119	struct taskqueue *queue;
120
121	queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
122	if (!queue)
123		return NULL;
124
125	STAILQ_INIT(&queue->tq_queue);
126	TAILQ_INIT(&queue->tq_active);
127	queue->tq_enqueue = enqueue;
128	queue->tq_context = context;
129	queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
130	queue->tq_flags |= TQ_FLAGS_ACTIVE;
131	mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
132
133	return queue;
134}
135
136struct taskqueue *
137taskqueue_create(const char *name, int mflags,
138		 taskqueue_enqueue_fn enqueue, void *context)
139{
140	return _taskqueue_create(name, mflags, enqueue, context,
141			MTX_DEF, "taskqueue");
142}
143
144void
145taskqueue_set_callback(struct taskqueue *queue,
146    enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback,
147    void *context)
148{
149
150	KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) &&
151	    (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)),
152	    ("Callback type %d not valid, must be %d-%d", cb_type,
153	    TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX));
154	KASSERT((queue->tq_callbacks[cb_type] == NULL),
155	    ("Re-initialization of taskqueue callback?"));
156
157	queue->tq_callbacks[cb_type] = callback;
158	queue->tq_cb_contexts[cb_type] = context;
159}
160
161/*
162 * Signal a taskqueue thread to terminate.
163 */
164static void
165taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
166{
167
168	while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
169		wakeup(tq);
170		TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
171	}
172}
173
174void
175taskqueue_free(struct taskqueue *queue)
176{
177
178	TQ_LOCK(queue);
179	queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
180	taskqueue_terminate(queue->tq_threads, queue);
181	KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
182	KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
183	mtx_destroy(&queue->tq_mutex);
184	free(queue->tq_threads, M_TASKQUEUE);
185	free(queue, M_TASKQUEUE);
186}
187
188static int
189taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
190{
191	struct task *ins;
192	struct task *prev;
193
194	/*
195	 * Count multiple enqueues.
196	 */
197	if (task->ta_pending) {
198		if (task->ta_pending < USHRT_MAX)
199			task->ta_pending++;
200		return (0);
201	}
202
203	/*
204	 * Optimise the case when all tasks have the same priority.
205	 */
206	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
207	if (!prev || prev->ta_priority >= task->ta_priority) {
208		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
209	} else {
210		prev = NULL;
211		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
212		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
213			if (ins->ta_priority < task->ta_priority)
214				break;
215
216		if (prev)
217			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
218		else
219			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
220	}
221
222	task->ta_pending = 1;
223	if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
224		queue->tq_enqueue(queue->tq_context);
225	else
226		queue->tq_flags |= TQ_FLAGS_PENDING;
227
228	return (0);
229}
230int
231taskqueue_enqueue(struct taskqueue *queue, struct task *task)
232{
233	int res;
234
235	TQ_LOCK(queue);
236	res = taskqueue_enqueue_locked(queue, task);
237	TQ_UNLOCK(queue);
238
239	return (res);
240}
241
242static void
243taskqueue_timeout_func(void *arg)
244{
245	struct taskqueue *queue;
246	struct timeout_task *timeout_task;
247
248	timeout_task = arg;
249	queue = timeout_task->q;
250	KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
251	timeout_task->f &= ~DT_CALLOUT_ARMED;
252	queue->tq_callouts--;
253	taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
254}
255
256int
257taskqueue_enqueue_timeout(struct taskqueue *queue,
258    struct timeout_task *timeout_task, int ticks)
259{
260	int res;
261
262	TQ_LOCK(queue);
263	KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
264	    ("Migrated queue"));
265	KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
266	timeout_task->q = queue;
267	res = timeout_task->t.ta_pending;
268	if (ticks == 0) {
269		taskqueue_enqueue_locked(queue, &timeout_task->t);
270	} else {
271		if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
272			res++;
273		} else {
274			queue->tq_callouts++;
275			timeout_task->f |= DT_CALLOUT_ARMED;
276			if (ticks < 0)
277				ticks = -ticks; /* Ignore overflow. */
278		}
279		if (ticks > 0) {
280			callout_reset(&timeout_task->c, ticks,
281			    taskqueue_timeout_func, timeout_task);
282		}
283	}
284	TQ_UNLOCK(queue);
285	return (res);
286}
287
288static void
289taskqueue_drain_running(struct taskqueue *queue)
290{
291
292	while (!TAILQ_EMPTY(&queue->tq_active))
293		TQ_SLEEP(queue, &queue->tq_active, &queue->tq_mutex,
294		    PWAIT, "-", 0);
295}
296
297void
298taskqueue_block(struct taskqueue *queue)
299{
300
301	TQ_LOCK(queue);
302	queue->tq_flags |= TQ_FLAGS_BLOCKED;
303	TQ_UNLOCK(queue);
304}
305
306void
307taskqueue_unblock(struct taskqueue *queue)
308{
309
310	TQ_LOCK(queue);
311	queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
312	if (queue->tq_flags & TQ_FLAGS_PENDING) {
313		queue->tq_flags &= ~TQ_FLAGS_PENDING;
314		queue->tq_enqueue(queue->tq_context);
315	}
316	TQ_UNLOCK(queue);
317}
318
319static void
320taskqueue_run_locked(struct taskqueue *queue)
321{
322	struct taskqueue_busy tb;
323	struct task *task;
324	int pending;
325
326	TQ_ASSERT_LOCKED(queue);
327	tb.tb_running = NULL;
328	TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
329
330	while (STAILQ_FIRST(&queue->tq_queue)) {
331		/*
332		 * Carefully remove the first task from the queue and
333		 * zero its pending count.
334		 */
335		task = STAILQ_FIRST(&queue->tq_queue);
336		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
337		pending = task->ta_pending;
338		task->ta_pending = 0;
339		tb.tb_running = task;
340		TQ_UNLOCK(queue);
341
342		task->ta_func(task->ta_context, pending);
343
344		TQ_LOCK(queue);
345		tb.tb_running = NULL;
346		wakeup(task);
347	}
348	TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
349	if (TAILQ_EMPTY(&queue->tq_active))
350		wakeup(&queue->tq_active);
351}
352
353void
354taskqueue_run(struct taskqueue *queue)
355{
356
357	TQ_LOCK(queue);
358	taskqueue_run_locked(queue);
359	TQ_UNLOCK(queue);
360}
361
362static int
363task_is_running(struct taskqueue *queue, struct task *task)
364{
365	struct taskqueue_busy *tb;
366
367	TQ_ASSERT_LOCKED(queue);
368	TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
369		if (tb->tb_running == task)
370			return (1);
371	}
372	return (0);
373}
374
375static int
376taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
377    u_int *pendp)
378{
379
380	if (task->ta_pending > 0)
381		STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
382	if (pendp != NULL)
383		*pendp = task->ta_pending;
384	task->ta_pending = 0;
385	return (task_is_running(queue, task) ? EBUSY : 0);
386}
387
388int
389taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
390{
391	int error;
392
393	TQ_LOCK(queue);
394	error = taskqueue_cancel_locked(queue, task, pendp);
395	TQ_UNLOCK(queue);
396
397	return (error);
398}
399
400int
401taskqueue_cancel_timeout(struct taskqueue *queue,
402    struct timeout_task *timeout_task, u_int *pendp)
403{
404	u_int pending, pending1;
405	int error;
406
407	TQ_LOCK(queue);
408	pending = !!callout_stop(&timeout_task->c);
409	error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
410	if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
411		timeout_task->f &= ~DT_CALLOUT_ARMED;
412		queue->tq_callouts--;
413	}
414	TQ_UNLOCK(queue);
415
416	if (pendp != NULL)
417		*pendp = pending + pending1;
418	return (error);
419}
420
421void
422taskqueue_drain(struct taskqueue *queue, struct task *task)
423{
424
425	if (!queue->tq_spin)
426		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
427
428	TQ_LOCK(queue);
429	while (task->ta_pending != 0 || task_is_running(queue, task))
430		TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
431	TQ_UNLOCK(queue);
432}
433
434void
435taskqueue_drain_all(struct taskqueue *queue)
436{
437	struct task *task;
438
439	if (!queue->tq_spin)
440		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
441
442	TQ_LOCK(queue);
443	task = STAILQ_LAST(&queue->tq_queue, task, ta_link);
444	if (task != NULL)
445		while (task->ta_pending != 0)
446			TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
447	taskqueue_drain_running(queue);
448	KASSERT(STAILQ_EMPTY(&queue->tq_queue),
449	    ("taskqueue queue is not empty after draining"));
450	TQ_UNLOCK(queue);
451}
452
453void
454taskqueue_drain_timeout(struct taskqueue *queue,
455    struct timeout_task *timeout_task)
456{
457
458	callout_drain(&timeout_task->c);
459	taskqueue_drain(queue, &timeout_task->t);
460}
461
462static void
463taskqueue_swi_enqueue(void *context)
464{
465	swi_sched(taskqueue_ih, 0);
466}
467
468static void
469taskqueue_swi_run(void *dummy)
470{
471	taskqueue_run(taskqueue_swi);
472}
473
474static void
475taskqueue_swi_giant_enqueue(void *context)
476{
477	swi_sched(taskqueue_giant_ih, 0);
478}
479
480static void
481taskqueue_swi_giant_run(void *dummy)
482{
483	taskqueue_run(taskqueue_swi_giant);
484}
485
486int
487taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
488			const char *name, ...)
489{
490	va_list ap;
491	struct thread *td;
492	struct taskqueue *tq;
493	int i, error;
494	char ktname[MAXCOMLEN + 1];
495
496	if (count <= 0)
497		return (EINVAL);
498
499	tq = *tqp;
500
501	va_start(ap, name);
502	vsnprintf(ktname, sizeof(ktname), name, ap);
503	va_end(ap);
504
505	tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
506	    M_NOWAIT | M_ZERO);
507	if (tq->tq_threads == NULL) {
508		printf("%s: no memory for %s threads\n", __func__, ktname);
509		return (ENOMEM);
510	}
511
512	for (i = 0; i < count; i++) {
513		if (count == 1)
514			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
515			    &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
516		else
517			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
518			    &tq->tq_threads[i], RFSTOPPED, 0,
519			    "%s_%d", ktname, i);
520		if (error) {
521			/* should be ok to continue, taskqueue_free will dtrt */
522			printf("%s: kthread_add(%s): error %d", __func__,
523			    ktname, error);
524			tq->tq_threads[i] = NULL;		/* paranoid */
525		} else
526			tq->tq_tcount++;
527	}
528	for (i = 0; i < count; i++) {
529		if (tq->tq_threads[i] == NULL)
530			continue;
531		td = tq->tq_threads[i];
532		thread_lock(td);
533		sched_prio(td, pri);
534		sched_add(td, SRQ_BORING);
535		thread_unlock(td);
536	}
537
538	return (0);
539}
540
541static inline void
542taskqueue_run_callback(struct taskqueue *tq,
543    enum taskqueue_callback_type cb_type)
544{
545	taskqueue_callback_fn tq_callback;
546
547	TQ_ASSERT_UNLOCKED(tq);
548	tq_callback = tq->tq_callbacks[cb_type];
549	if (tq_callback != NULL)
550		tq_callback(tq->tq_cb_contexts[cb_type]);
551}
552
553void
554taskqueue_thread_loop(void *arg)
555{
556	struct taskqueue **tqp, *tq;
557
558	tqp = arg;
559	tq = *tqp;
560	taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
561	TQ_LOCK(tq);
562	while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
563		taskqueue_run_locked(tq);
564		/*
565		 * Because taskqueue_run() can drop tq_mutex, we need to
566		 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
567		 * meantime, which means we missed a wakeup.
568		 */
569		if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
570			break;
571		TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
572	}
573	taskqueue_run_locked(tq);
574
575	/*
576	 * This thread is on its way out, so just drop the lock temporarily
577	 * in order to call the shutdown callback.  This allows the callback
578	 * to look at the taskqueue, even just before it dies.
579	 */
580	TQ_UNLOCK(tq);
581	taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
582	TQ_LOCK(tq);
583
584	/* rendezvous with thread that asked us to terminate */
585	tq->tq_tcount--;
586	wakeup_one(tq->tq_threads);
587	TQ_UNLOCK(tq);
588	kthread_exit();
589}
590
591void
592taskqueue_thread_enqueue(void *context)
593{
594	struct taskqueue **tqp, *tq;
595
596	tqp = context;
597	tq = *tqp;
598
599	TQ_ASSERT_LOCKED(tq);
600	wakeup_one(tq);
601}
602
603TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
604		 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
605		     INTR_MPSAFE, &taskqueue_ih));
606
607TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
608		 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
609		     NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
610
611TASKQUEUE_DEFINE_THREAD(thread);
612
613struct taskqueue *
614taskqueue_create_fast(const char *name, int mflags,
615		 taskqueue_enqueue_fn enqueue, void *context)
616{
617	return _taskqueue_create(name, mflags, enqueue, context,
618			MTX_SPIN, "fast_taskqueue");
619}
620
621/* NB: for backwards compatibility */
622int
623taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
624{
625	return taskqueue_enqueue(queue, task);
626}
627
628static void	*taskqueue_fast_ih;
629
630static void
631taskqueue_fast_enqueue(void *context)
632{
633	swi_sched(taskqueue_fast_ih, 0);
634}
635
636static void
637taskqueue_fast_run(void *dummy)
638{
639	taskqueue_run(taskqueue_fast);
640}
641
642TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
643	swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
644	SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
645
646int
647taskqueue_member(struct taskqueue *queue, struct thread *td)
648{
649	int i, j, ret = 0;
650
651	for (i = 0, j = 0; ; i++) {
652		if (queue->tq_threads[i] == NULL)
653			continue;
654		if (queue->tq_threads[i] == td) {
655			ret = 1;
656			break;
657		}
658		if (++j >= queue->tq_tcount)
659			break;
660	}
661	return (ret);
662}
663