1// SPDX-License-Identifier: GPL-2.0-only
2/* Kernel thread helper functions.
3 *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
4 *   Copyright (C) 2009 Red Hat, Inc.
5 *
6 * Creation is done via kthreadd, so that we get a clean environment
7 * even if we're invoked from userspace (think modprobe, hotplug cpu,
8 * etc.).
9 */
10#include <uapi/linux/sched/types.h>
11#include <linux/mm.h>
12#include <linux/mmu_context.h>
13#include <linux/sched.h>
14#include <linux/sched/mm.h>
15#include <linux/sched/task.h>
16#include <linux/kthread.h>
17#include <linux/completion.h>
18#include <linux/err.h>
19#include <linux/cgroup.h>
20#include <linux/cpuset.h>
21#include <linux/unistd.h>
22#include <linux/file.h>
23#include <linux/export.h>
24#include <linux/mutex.h>
25#include <linux/slab.h>
26#include <linux/freezer.h>
27#include <linux/ptrace.h>
28#include <linux/uaccess.h>
29#include <linux/numa.h>
30#include <linux/sched/isolation.h>
31#include <trace/events/sched.h>
32
33
34static DEFINE_SPINLOCK(kthread_create_lock);
35static LIST_HEAD(kthread_create_list);
36struct task_struct *kthreadd_task;
37
38struct kthread_create_info
39{
40	/* Information passed to kthread() from kthreadd. */
41	char *full_name;
42	int (*threadfn)(void *data);
43	void *data;
44	int node;
45
46	/* Result passed back to kthread_create() from kthreadd. */
47	struct task_struct *result;
48	struct completion *done;
49
50	struct list_head list;
51};
52
53struct kthread {
54	unsigned long flags;
55	unsigned int cpu;
56	int result;
57	int (*threadfn)(void *);
58	void *data;
59	struct completion parked;
60	struct completion exited;
61#ifdef CONFIG_BLK_CGROUP
62	struct cgroup_subsys_state *blkcg_css;
63#endif
64	/* To store the full name if task comm is truncated. */
65	char *full_name;
66};
67
68enum KTHREAD_BITS {
69	KTHREAD_IS_PER_CPU = 0,
70	KTHREAD_SHOULD_STOP,
71	KTHREAD_SHOULD_PARK,
72};
73
74static inline struct kthread *to_kthread(struct task_struct *k)
75{
76	WARN_ON(!(k->flags & PF_KTHREAD));
77	return k->worker_private;
78}
79
80/*
81 * Variant of to_kthread() that doesn't assume @p is a kthread.
82 *
83 * Per construction; when:
84 *
85 *   (p->flags & PF_KTHREAD) && p->worker_private
86 *
87 * the task is both a kthread and struct kthread is persistent. However
88 * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
89 * begin_new_exec()).
90 */
91static inline struct kthread *__to_kthread(struct task_struct *p)
92{
93	void *kthread = p->worker_private;
94	if (kthread && !(p->flags & PF_KTHREAD))
95		kthread = NULL;
96	return kthread;
97}
98
99void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
100{
101	struct kthread *kthread = to_kthread(tsk);
102
103	if (!kthread || !kthread->full_name) {
104		__get_task_comm(buf, buf_size, tsk);
105		return;
106	}
107
108	strscpy_pad(buf, kthread->full_name, buf_size);
109}
110
111bool set_kthread_struct(struct task_struct *p)
112{
113	struct kthread *kthread;
114
115	if (WARN_ON_ONCE(to_kthread(p)))
116		return false;
117
118	kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
119	if (!kthread)
120		return false;
121
122	init_completion(&kthread->exited);
123	init_completion(&kthread->parked);
124	p->vfork_done = &kthread->exited;
125
126	p->worker_private = kthread;
127	return true;
128}
129
130void free_kthread_struct(struct task_struct *k)
131{
132	struct kthread *kthread;
133
134	/*
135	 * Can be NULL if kmalloc() in set_kthread_struct() failed.
136	 */
137	kthread = to_kthread(k);
138	if (!kthread)
139		return;
140
141#ifdef CONFIG_BLK_CGROUP
142	WARN_ON_ONCE(kthread->blkcg_css);
143#endif
144	k->worker_private = NULL;
145	kfree(kthread->full_name);
146	kfree(kthread);
147}
148
149/**
150 * kthread_should_stop - should this kthread return now?
151 *
152 * When someone calls kthread_stop() on your kthread, it will be woken
153 * and this will return true.  You should then return, and your return
154 * value will be passed through to kthread_stop().
155 */
156bool kthread_should_stop(void)
157{
158	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
159}
160EXPORT_SYMBOL(kthread_should_stop);
161
162static bool __kthread_should_park(struct task_struct *k)
163{
164	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
165}
166
167/**
168 * kthread_should_park - should this kthread park now?
169 *
170 * When someone calls kthread_park() on your kthread, it will be woken
171 * and this will return true.  You should then do the necessary
172 * cleanup and call kthread_parkme()
173 *
174 * Similar to kthread_should_stop(), but this keeps the thread alive
175 * and in a park position. kthread_unpark() "restarts" the thread and
176 * calls the thread function again.
177 */
178bool kthread_should_park(void)
179{
180	return __kthread_should_park(current);
181}
182EXPORT_SYMBOL_GPL(kthread_should_park);
183
184bool kthread_should_stop_or_park(void)
185{
186	struct kthread *kthread = __to_kthread(current);
187
188	if (!kthread)
189		return false;
190
191	return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK));
192}
193
194/**
195 * kthread_freezable_should_stop - should this freezable kthread return now?
196 * @was_frozen: optional out parameter, indicates whether %current was frozen
197 *
198 * kthread_should_stop() for freezable kthreads, which will enter
199 * refrigerator if necessary.  This function is safe from kthread_stop() /
200 * freezer deadlock and freezable kthreads should use this function instead
201 * of calling try_to_freeze() directly.
202 */
203bool kthread_freezable_should_stop(bool *was_frozen)
204{
205	bool frozen = false;
206
207	might_sleep();
208
209	if (unlikely(freezing(current)))
210		frozen = __refrigerator(true);
211
212	if (was_frozen)
213		*was_frozen = frozen;
214
215	return kthread_should_stop();
216}
217EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
218
219/**
220 * kthread_func - return the function specified on kthread creation
221 * @task: kthread task in question
222 *
223 * Returns NULL if the task is not a kthread.
224 */
225void *kthread_func(struct task_struct *task)
226{
227	struct kthread *kthread = __to_kthread(task);
228	if (kthread)
229		return kthread->threadfn;
230	return NULL;
231}
232EXPORT_SYMBOL_GPL(kthread_func);
233
234/**
235 * kthread_data - return data value specified on kthread creation
236 * @task: kthread task in question
237 *
238 * Return the data value specified when kthread @task was created.
239 * The caller is responsible for ensuring the validity of @task when
240 * calling this function.
241 */
242void *kthread_data(struct task_struct *task)
243{
244	return to_kthread(task)->data;
245}
246EXPORT_SYMBOL_GPL(kthread_data);
247
248/**
249 * kthread_probe_data - speculative version of kthread_data()
250 * @task: possible kthread task in question
251 *
252 * @task could be a kthread task.  Return the data value specified when it
253 * was created if accessible.  If @task isn't a kthread task or its data is
254 * inaccessible for any reason, %NULL is returned.  This function requires
255 * that @task itself is safe to dereference.
256 */
257void *kthread_probe_data(struct task_struct *task)
258{
259	struct kthread *kthread = __to_kthread(task);
260	void *data = NULL;
261
262	if (kthread)
263		copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
264	return data;
265}
266
267static void __kthread_parkme(struct kthread *self)
268{
269	for (;;) {
270		/*
271		 * TASK_PARKED is a special state; we must serialize against
272		 * possible pending wakeups to avoid store-store collisions on
273		 * task->state.
274		 *
275		 * Such a collision might possibly result in the task state
276		 * changin from TASK_PARKED and us failing the
277		 * wait_task_inactive() in kthread_park().
278		 */
279		set_special_state(TASK_PARKED);
280		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
281			break;
282
283		/*
284		 * Thread is going to call schedule(), do not preempt it,
285		 * or the caller of kthread_park() may spend more time in
286		 * wait_task_inactive().
287		 */
288		preempt_disable();
289		complete(&self->parked);
290		schedule_preempt_disabled();
291		preempt_enable();
292	}
293	__set_current_state(TASK_RUNNING);
294}
295
296void kthread_parkme(void)
297{
298	__kthread_parkme(to_kthread(current));
299}
300EXPORT_SYMBOL_GPL(kthread_parkme);
301
302/**
303 * kthread_exit - Cause the current kthread return @result to kthread_stop().
304 * @result: The integer value to return to kthread_stop().
305 *
306 * While kthread_exit can be called directly, it exists so that
307 * functions which do some additional work in non-modular code such as
308 * module_put_and_kthread_exit can be implemented.
309 *
310 * Does not return.
311 */
312void __noreturn kthread_exit(long result)
313{
314	struct kthread *kthread = to_kthread(current);
315	kthread->result = result;
316	do_exit(0);
317}
318EXPORT_SYMBOL(kthread_exit);
319
320/**
321 * kthread_complete_and_exit - Exit the current kthread.
322 * @comp: Completion to complete
323 * @code: The integer value to return to kthread_stop().
324 *
325 * If present, complete @comp and then return code to kthread_stop().
326 *
327 * A kernel thread whose module may be removed after the completion of
328 * @comp can use this function to exit safely.
329 *
330 * Does not return.
331 */
332void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
333{
334	if (comp)
335		complete(comp);
336
337	kthread_exit(code);
338}
339EXPORT_SYMBOL(kthread_complete_and_exit);
340
341static int kthread(void *_create)
342{
343	static const struct sched_param param = { .sched_priority = 0 };
344	/* Copy data: it's on kthread's stack */
345	struct kthread_create_info *create = _create;
346	int (*threadfn)(void *data) = create->threadfn;
347	void *data = create->data;
348	struct completion *done;
349	struct kthread *self;
350	int ret;
351
352	self = to_kthread(current);
353
354	/* Release the structure when caller killed by a fatal signal. */
355	done = xchg(&create->done, NULL);
356	if (!done) {
357		kfree(create->full_name);
358		kfree(create);
359		kthread_exit(-EINTR);
360	}
361
362	self->full_name = create->full_name;
363	self->threadfn = threadfn;
364	self->data = data;
365
366	/*
367	 * The new thread inherited kthreadd's priority and CPU mask. Reset
368	 * back to default in case they have been changed.
369	 */
370	sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
371	set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
372
373	/* OK, tell user we're spawned, wait for stop or wakeup */
374	__set_current_state(TASK_UNINTERRUPTIBLE);
375	create->result = current;
376	/*
377	 * Thread is going to call schedule(), do not preempt it,
378	 * or the creator may spend more time in wait_task_inactive().
379	 */
380	preempt_disable();
381	complete(done);
382	schedule_preempt_disabled();
383	preempt_enable();
384
385	ret = -EINTR;
386	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
387		cgroup_kthread_ready();
388		__kthread_parkme(self);
389		ret = threadfn(data);
390	}
391	kthread_exit(ret);
392}
393
394/* called from kernel_clone() to get node information for about to be created task */
395int tsk_fork_get_node(struct task_struct *tsk)
396{
397#ifdef CONFIG_NUMA
398	if (tsk == kthreadd_task)
399		return tsk->pref_node_fork;
400#endif
401	return NUMA_NO_NODE;
402}
403
404static void create_kthread(struct kthread_create_info *create)
405{
406	int pid;
407
408#ifdef CONFIG_NUMA
409	current->pref_node_fork = create->node;
410#endif
411	/* We want our own signal handler (we take no signals by default). */
412	pid = kernel_thread(kthread, create, create->full_name,
413			    CLONE_FS | CLONE_FILES | SIGCHLD);
414	if (pid < 0) {
415		/* Release the structure when caller killed by a fatal signal. */
416		struct completion *done = xchg(&create->done, NULL);
417
418		kfree(create->full_name);
419		if (!done) {
420			kfree(create);
421			return;
422		}
423		create->result = ERR_PTR(pid);
424		complete(done);
425	}
426}
427
428static __printf(4, 0)
429struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
430						    void *data, int node,
431						    const char namefmt[],
432						    va_list args)
433{
434	DECLARE_COMPLETION_ONSTACK(done);
435	struct task_struct *task;
436	struct kthread_create_info *create = kmalloc(sizeof(*create),
437						     GFP_KERNEL);
438
439	if (!create)
440		return ERR_PTR(-ENOMEM);
441	create->threadfn = threadfn;
442	create->data = data;
443	create->node = node;
444	create->done = &done;
445	create->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
446	if (!create->full_name) {
447		task = ERR_PTR(-ENOMEM);
448		goto free_create;
449	}
450
451	spin_lock(&kthread_create_lock);
452	list_add_tail(&create->list, &kthread_create_list);
453	spin_unlock(&kthread_create_lock);
454
455	wake_up_process(kthreadd_task);
456	/*
457	 * Wait for completion in killable state, for I might be chosen by
458	 * the OOM killer while kthreadd is trying to allocate memory for
459	 * new kernel thread.
460	 */
461	if (unlikely(wait_for_completion_killable(&done))) {
462		/*
463		 * If I was killed by a fatal signal before kthreadd (or new
464		 * kernel thread) calls complete(), leave the cleanup of this
465		 * structure to that thread.
466		 */
467		if (xchg(&create->done, NULL))
468			return ERR_PTR(-EINTR);
469		/*
470		 * kthreadd (or new kernel thread) will call complete()
471		 * shortly.
472		 */
473		wait_for_completion(&done);
474	}
475	task = create->result;
476free_create:
477	kfree(create);
478	return task;
479}
480
481/**
482 * kthread_create_on_node - create a kthread.
483 * @threadfn: the function to run until signal_pending(current).
484 * @data: data ptr for @threadfn.
485 * @node: task and thread structures for the thread are allocated on this node
486 * @namefmt: printf-style name for the thread.
487 *
488 * Description: This helper function creates and names a kernel
489 * thread.  The thread will be stopped: use wake_up_process() to start
490 * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
491 * is affine to all CPUs.
492 *
493 * If thread is going to be bound on a particular cpu, give its node
494 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
495 * When woken, the thread will run @threadfn() with @data as its
496 * argument. @threadfn() can either return directly if it is a
497 * standalone thread for which no one will call kthread_stop(), or
498 * return when 'kthread_should_stop()' is true (which means
499 * kthread_stop() has been called).  The return value should be zero
500 * or a negative error number; it will be passed to kthread_stop().
501 *
502 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
503 */
504struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
505					   void *data, int node,
506					   const char namefmt[],
507					   ...)
508{
509	struct task_struct *task;
510	va_list args;
511
512	va_start(args, namefmt);
513	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
514	va_end(args);
515
516	return task;
517}
518EXPORT_SYMBOL(kthread_create_on_node);
519
520static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
521{
522	unsigned long flags;
523
524	if (!wait_task_inactive(p, state)) {
525		WARN_ON(1);
526		return;
527	}
528
529	/* It's safe because the task is inactive. */
530	raw_spin_lock_irqsave(&p->pi_lock, flags);
531	do_set_cpus_allowed(p, mask);
532	p->flags |= PF_NO_SETAFFINITY;
533	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
534}
535
536static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
537{
538	__kthread_bind_mask(p, cpumask_of(cpu), state);
539}
540
541void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
542{
543	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
544}
545
546/**
547 * kthread_bind - bind a just-created kthread to a cpu.
548 * @p: thread created by kthread_create().
549 * @cpu: cpu (might not be online, must be possible) for @k to run on.
550 *
551 * Description: This function is equivalent to set_cpus_allowed(),
552 * except that @cpu doesn't need to be online, and the thread must be
553 * stopped (i.e., just returned from kthread_create()).
554 */
555void kthread_bind(struct task_struct *p, unsigned int cpu)
556{
557	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
558}
559EXPORT_SYMBOL(kthread_bind);
560
561/**
562 * kthread_create_on_cpu - Create a cpu bound kthread
563 * @threadfn: the function to run until signal_pending(current).
564 * @data: data ptr for @threadfn.
565 * @cpu: The cpu on which the thread should be bound,
566 * @namefmt: printf-style name for the thread. Format is restricted
567 *	     to "name.*%u". Code fills in cpu number.
568 *
569 * Description: This helper function creates and names a kernel thread
570 */
571struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
572					  void *data, unsigned int cpu,
573					  const char *namefmt)
574{
575	struct task_struct *p;
576
577	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
578				   cpu);
579	if (IS_ERR(p))
580		return p;
581	kthread_bind(p, cpu);
582	/* CPU hotplug need to bind once again when unparking the thread. */
583	to_kthread(p)->cpu = cpu;
584	return p;
585}
586EXPORT_SYMBOL(kthread_create_on_cpu);
587
588void kthread_set_per_cpu(struct task_struct *k, int cpu)
589{
590	struct kthread *kthread = to_kthread(k);
591	if (!kthread)
592		return;
593
594	WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
595
596	if (cpu < 0) {
597		clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
598		return;
599	}
600
601	kthread->cpu = cpu;
602	set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
603}
604
605bool kthread_is_per_cpu(struct task_struct *p)
606{
607	struct kthread *kthread = __to_kthread(p);
608	if (!kthread)
609		return false;
610
611	return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
612}
613
614/**
615 * kthread_unpark - unpark a thread created by kthread_create().
616 * @k:		thread created by kthread_create().
617 *
618 * Sets kthread_should_park() for @k to return false, wakes it, and
619 * waits for it to return. If the thread is marked percpu then its
620 * bound to the cpu again.
621 */
622void kthread_unpark(struct task_struct *k)
623{
624	struct kthread *kthread = to_kthread(k);
625
626	/*
627	 * Newly created kthread was parked when the CPU was offline.
628	 * The binding was lost and we need to set it again.
629	 */
630	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
631		__kthread_bind(k, kthread->cpu, TASK_PARKED);
632
633	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
634	/*
635	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
636	 */
637	wake_up_state(k, TASK_PARKED);
638}
639EXPORT_SYMBOL_GPL(kthread_unpark);
640
641/**
642 * kthread_park - park a thread created by kthread_create().
643 * @k: thread created by kthread_create().
644 *
645 * Sets kthread_should_park() for @k to return true, wakes it, and
646 * waits for it to return. This can also be called after kthread_create()
647 * instead of calling wake_up_process(): the thread will park without
648 * calling threadfn().
649 *
650 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
651 * If called by the kthread itself just the park bit is set.
652 */
653int kthread_park(struct task_struct *k)
654{
655	struct kthread *kthread = to_kthread(k);
656
657	if (WARN_ON(k->flags & PF_EXITING))
658		return -ENOSYS;
659
660	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
661		return -EBUSY;
662
663	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
664	if (k != current) {
665		wake_up_process(k);
666		/*
667		 * Wait for __kthread_parkme() to complete(), this means we
668		 * _will_ have TASK_PARKED and are about to call schedule().
669		 */
670		wait_for_completion(&kthread->parked);
671		/*
672		 * Now wait for that schedule() to complete and the task to
673		 * get scheduled out.
674		 */
675		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
676	}
677
678	return 0;
679}
680EXPORT_SYMBOL_GPL(kthread_park);
681
682/**
683 * kthread_stop - stop a thread created by kthread_create().
684 * @k: thread created by kthread_create().
685 *
686 * Sets kthread_should_stop() for @k to return true, wakes it, and
687 * waits for it to exit. This can also be called after kthread_create()
688 * instead of calling wake_up_process(): the thread will exit without
689 * calling threadfn().
690 *
691 * If threadfn() may call kthread_exit() itself, the caller must ensure
692 * task_struct can't go away.
693 *
694 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
695 * was never called.
696 */
697int kthread_stop(struct task_struct *k)
698{
699	struct kthread *kthread;
700	int ret;
701
702	trace_sched_kthread_stop(k);
703
704	get_task_struct(k);
705	kthread = to_kthread(k);
706	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
707	kthread_unpark(k);
708	set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
709	wake_up_process(k);
710	wait_for_completion(&kthread->exited);
711	ret = kthread->result;
712	put_task_struct(k);
713
714	trace_sched_kthread_stop_ret(ret);
715	return ret;
716}
717EXPORT_SYMBOL(kthread_stop);
718
719/**
720 * kthread_stop_put - stop a thread and put its task struct
721 * @k: thread created by kthread_create().
722 *
723 * Stops a thread created by kthread_create() and put its task_struct.
724 * Only use when holding an extra task struct reference obtained by
725 * calling get_task_struct().
726 */
727int kthread_stop_put(struct task_struct *k)
728{
729	int ret;
730
731	ret = kthread_stop(k);
732	put_task_struct(k);
733	return ret;
734}
735EXPORT_SYMBOL(kthread_stop_put);
736
737int kthreadd(void *unused)
738{
739	struct task_struct *tsk = current;
740
741	/* Setup a clean context for our children to inherit. */
742	set_task_comm(tsk, "kthreadd");
743	ignore_signals(tsk);
744	set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
745	set_mems_allowed(node_states[N_MEMORY]);
746
747	current->flags |= PF_NOFREEZE;
748	cgroup_init_kthreadd();
749
750	for (;;) {
751		set_current_state(TASK_INTERRUPTIBLE);
752		if (list_empty(&kthread_create_list))
753			schedule();
754		__set_current_state(TASK_RUNNING);
755
756		spin_lock(&kthread_create_lock);
757		while (!list_empty(&kthread_create_list)) {
758			struct kthread_create_info *create;
759
760			create = list_entry(kthread_create_list.next,
761					    struct kthread_create_info, list);
762			list_del_init(&create->list);
763			spin_unlock(&kthread_create_lock);
764
765			create_kthread(create);
766
767			spin_lock(&kthread_create_lock);
768		}
769		spin_unlock(&kthread_create_lock);
770	}
771
772	return 0;
773}
774
775void __kthread_init_worker(struct kthread_worker *worker,
776				const char *name,
777				struct lock_class_key *key)
778{
779	memset(worker, 0, sizeof(struct kthread_worker));
780	raw_spin_lock_init(&worker->lock);
781	lockdep_set_class_and_name(&worker->lock, key, name);
782	INIT_LIST_HEAD(&worker->work_list);
783	INIT_LIST_HEAD(&worker->delayed_work_list);
784}
785EXPORT_SYMBOL_GPL(__kthread_init_worker);
786
787/**
788 * kthread_worker_fn - kthread function to process kthread_worker
789 * @worker_ptr: pointer to initialized kthread_worker
790 *
791 * This function implements the main cycle of kthread worker. It processes
792 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
793 * is empty.
794 *
795 * The works are not allowed to keep any locks, disable preemption or interrupts
796 * when they finish. There is defined a safe point for freezing when one work
797 * finishes and before a new one is started.
798 *
799 * Also the works must not be handled by more than one worker at the same time,
800 * see also kthread_queue_work().
801 */
802int kthread_worker_fn(void *worker_ptr)
803{
804	struct kthread_worker *worker = worker_ptr;
805	struct kthread_work *work;
806
807	/*
808	 * FIXME: Update the check and remove the assignment when all kthread
809	 * worker users are created using kthread_create_worker*() functions.
810	 */
811	WARN_ON(worker->task && worker->task != current);
812	worker->task = current;
813
814	if (worker->flags & KTW_FREEZABLE)
815		set_freezable();
816
817repeat:
818	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
819
820	if (kthread_should_stop()) {
821		__set_current_state(TASK_RUNNING);
822		raw_spin_lock_irq(&worker->lock);
823		worker->task = NULL;
824		raw_spin_unlock_irq(&worker->lock);
825		return 0;
826	}
827
828	work = NULL;
829	raw_spin_lock_irq(&worker->lock);
830	if (!list_empty(&worker->work_list)) {
831		work = list_first_entry(&worker->work_list,
832					struct kthread_work, node);
833		list_del_init(&work->node);
834	}
835	worker->current_work = work;
836	raw_spin_unlock_irq(&worker->lock);
837
838	if (work) {
839		kthread_work_func_t func = work->func;
840		__set_current_state(TASK_RUNNING);
841		trace_sched_kthread_work_execute_start(work);
842		work->func(work);
843		/*
844		 * Avoid dereferencing work after this point.  The trace
845		 * event only cares about the address.
846		 */
847		trace_sched_kthread_work_execute_end(work, func);
848	} else if (!freezing(current))
849		schedule();
850
851	try_to_freeze();
852	cond_resched();
853	goto repeat;
854}
855EXPORT_SYMBOL_GPL(kthread_worker_fn);
856
857static __printf(3, 0) struct kthread_worker *
858__kthread_create_worker(int cpu, unsigned int flags,
859			const char namefmt[], va_list args)
860{
861	struct kthread_worker *worker;
862	struct task_struct *task;
863	int node = NUMA_NO_NODE;
864
865	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
866	if (!worker)
867		return ERR_PTR(-ENOMEM);
868
869	kthread_init_worker(worker);
870
871	if (cpu >= 0)
872		node = cpu_to_node(cpu);
873
874	task = __kthread_create_on_node(kthread_worker_fn, worker,
875						node, namefmt, args);
876	if (IS_ERR(task))
877		goto fail_task;
878
879	if (cpu >= 0)
880		kthread_bind(task, cpu);
881
882	worker->flags = flags;
883	worker->task = task;
884	wake_up_process(task);
885	return worker;
886
887fail_task:
888	kfree(worker);
889	return ERR_CAST(task);
890}
891
892/**
893 * kthread_create_worker - create a kthread worker
894 * @flags: flags modifying the default behavior of the worker
895 * @namefmt: printf-style name for the kthread worker (task).
896 *
897 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
898 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
899 * when the caller was killed by a fatal signal.
900 */
901struct kthread_worker *
902kthread_create_worker(unsigned int flags, const char namefmt[], ...)
903{
904	struct kthread_worker *worker;
905	va_list args;
906
907	va_start(args, namefmt);
908	worker = __kthread_create_worker(-1, flags, namefmt, args);
909	va_end(args);
910
911	return worker;
912}
913EXPORT_SYMBOL(kthread_create_worker);
914
915/**
916 * kthread_create_worker_on_cpu - create a kthread worker and bind it
917 *	to a given CPU and the associated NUMA node.
918 * @cpu: CPU number
919 * @flags: flags modifying the default behavior of the worker
920 * @namefmt: printf-style name for the kthread worker (task).
921 *
922 * Use a valid CPU number if you want to bind the kthread worker
923 * to the given CPU and the associated NUMA node.
924 *
925 * A good practice is to add the cpu number also into the worker name.
926 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
927 *
928 * CPU hotplug:
929 * The kthread worker API is simple and generic. It just provides a way
930 * to create, use, and destroy workers.
931 *
932 * It is up to the API user how to handle CPU hotplug. They have to decide
933 * how to handle pending work items, prevent queuing new ones, and
934 * restore the functionality when the CPU goes off and on. There are a
935 * few catches:
936 *
937 *    - CPU affinity gets lost when it is scheduled on an offline CPU.
938 *
939 *    - The worker might not exist when the CPU was off when the user
940 *      created the workers.
941 *
942 * Good practice is to implement two CPU hotplug callbacks and to
943 * destroy/create the worker when the CPU goes down/up.
944 *
945 * Return:
946 * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
947 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
948 * when the caller was killed by a fatal signal.
949 */
950struct kthread_worker *
951kthread_create_worker_on_cpu(int cpu, unsigned int flags,
952			     const char namefmt[], ...)
953{
954	struct kthread_worker *worker;
955	va_list args;
956
957	va_start(args, namefmt);
958	worker = __kthread_create_worker(cpu, flags, namefmt, args);
959	va_end(args);
960
961	return worker;
962}
963EXPORT_SYMBOL(kthread_create_worker_on_cpu);
964
965/*
966 * Returns true when the work could not be queued at the moment.
967 * It happens when it is already pending in a worker list
968 * or when it is being cancelled.
969 */
970static inline bool queuing_blocked(struct kthread_worker *worker,
971				   struct kthread_work *work)
972{
973	lockdep_assert_held(&worker->lock);
974
975	return !list_empty(&work->node) || work->canceling;
976}
977
978static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
979					     struct kthread_work *work)
980{
981	lockdep_assert_held(&worker->lock);
982	WARN_ON_ONCE(!list_empty(&work->node));
983	/* Do not use a work with >1 worker, see kthread_queue_work() */
984	WARN_ON_ONCE(work->worker && work->worker != worker);
985}
986
987/* insert @work before @pos in @worker */
988static void kthread_insert_work(struct kthread_worker *worker,
989				struct kthread_work *work,
990				struct list_head *pos)
991{
992	kthread_insert_work_sanity_check(worker, work);
993
994	trace_sched_kthread_work_queue_work(worker, work);
995
996	list_add_tail(&work->node, pos);
997	work->worker = worker;
998	if (!worker->current_work && likely(worker->task))
999		wake_up_process(worker->task);
1000}
1001
1002/**
1003 * kthread_queue_work - queue a kthread_work
1004 * @worker: target kthread_worker
1005 * @work: kthread_work to queue
1006 *
1007 * Queue @work to work processor @task for async execution.  @task
1008 * must have been created with kthread_worker_create().  Returns %true
1009 * if @work was successfully queued, %false if it was already pending.
1010 *
1011 * Reinitialize the work if it needs to be used by another worker.
1012 * For example, when the worker was stopped and started again.
1013 */
1014bool kthread_queue_work(struct kthread_worker *worker,
1015			struct kthread_work *work)
1016{
1017	bool ret = false;
1018	unsigned long flags;
1019
1020	raw_spin_lock_irqsave(&worker->lock, flags);
1021	if (!queuing_blocked(worker, work)) {
1022		kthread_insert_work(worker, work, &worker->work_list);
1023		ret = true;
1024	}
1025	raw_spin_unlock_irqrestore(&worker->lock, flags);
1026	return ret;
1027}
1028EXPORT_SYMBOL_GPL(kthread_queue_work);
1029
1030/**
1031 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
1032 *	delayed work when the timer expires.
1033 * @t: pointer to the expired timer
1034 *
1035 * The format of the function is defined by struct timer_list.
1036 * It should have been called from irqsafe timer with irq already off.
1037 */
1038void kthread_delayed_work_timer_fn(struct timer_list *t)
1039{
1040	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
1041	struct kthread_work *work = &dwork->work;
1042	struct kthread_worker *worker = work->worker;
1043	unsigned long flags;
1044
1045	/*
1046	 * This might happen when a pending work is reinitialized.
1047	 * It means that it is used a wrong way.
1048	 */
1049	if (WARN_ON_ONCE(!worker))
1050		return;
1051
1052	raw_spin_lock_irqsave(&worker->lock, flags);
1053	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1054	WARN_ON_ONCE(work->worker != worker);
1055
1056	/* Move the work from worker->delayed_work_list. */
1057	WARN_ON_ONCE(list_empty(&work->node));
1058	list_del_init(&work->node);
1059	if (!work->canceling)
1060		kthread_insert_work(worker, work, &worker->work_list);
1061
1062	raw_spin_unlock_irqrestore(&worker->lock, flags);
1063}
1064EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
1065
1066static void __kthread_queue_delayed_work(struct kthread_worker *worker,
1067					 struct kthread_delayed_work *dwork,
1068					 unsigned long delay)
1069{
1070	struct timer_list *timer = &dwork->timer;
1071	struct kthread_work *work = &dwork->work;
1072
1073	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
1074
1075	/*
1076	 * If @delay is 0, queue @dwork->work immediately.  This is for
1077	 * both optimization and correctness.  The earliest @timer can
1078	 * expire is on the closest next tick and delayed_work users depend
1079	 * on that there's no such delay when @delay is 0.
1080	 */
1081	if (!delay) {
1082		kthread_insert_work(worker, work, &worker->work_list);
1083		return;
1084	}
1085
1086	/* Be paranoid and try to detect possible races already now. */
1087	kthread_insert_work_sanity_check(worker, work);
1088
1089	list_add(&work->node, &worker->delayed_work_list);
1090	work->worker = worker;
1091	timer->expires = jiffies + delay;
1092	add_timer(timer);
1093}
1094
1095/**
1096 * kthread_queue_delayed_work - queue the associated kthread work
1097 *	after a delay.
1098 * @worker: target kthread_worker
1099 * @dwork: kthread_delayed_work to queue
1100 * @delay: number of jiffies to wait before queuing
1101 *
1102 * If the work has not been pending it starts a timer that will queue
1103 * the work after the given @delay. If @delay is zero, it queues the
1104 * work immediately.
1105 *
1106 * Return: %false if the @work has already been pending. It means that
1107 * either the timer was running or the work was queued. It returns %true
1108 * otherwise.
1109 */
1110bool kthread_queue_delayed_work(struct kthread_worker *worker,
1111				struct kthread_delayed_work *dwork,
1112				unsigned long delay)
1113{
1114	struct kthread_work *work = &dwork->work;
1115	unsigned long flags;
1116	bool ret = false;
1117
1118	raw_spin_lock_irqsave(&worker->lock, flags);
1119
1120	if (!queuing_blocked(worker, work)) {
1121		__kthread_queue_delayed_work(worker, dwork, delay);
1122		ret = true;
1123	}
1124
1125	raw_spin_unlock_irqrestore(&worker->lock, flags);
1126	return ret;
1127}
1128EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1129
1130struct kthread_flush_work {
1131	struct kthread_work	work;
1132	struct completion	done;
1133};
1134
1135static void kthread_flush_work_fn(struct kthread_work *work)
1136{
1137	struct kthread_flush_work *fwork =
1138		container_of(work, struct kthread_flush_work, work);
1139	complete(&fwork->done);
1140}
1141
1142/**
1143 * kthread_flush_work - flush a kthread_work
1144 * @work: work to flush
1145 *
1146 * If @work is queued or executing, wait for it to finish execution.
1147 */
1148void kthread_flush_work(struct kthread_work *work)
1149{
1150	struct kthread_flush_work fwork = {
1151		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1152		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1153	};
1154	struct kthread_worker *worker;
1155	bool noop = false;
1156
1157	worker = work->worker;
1158	if (!worker)
1159		return;
1160
1161	raw_spin_lock_irq(&worker->lock);
1162	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1163	WARN_ON_ONCE(work->worker != worker);
1164
1165	if (!list_empty(&work->node))
1166		kthread_insert_work(worker, &fwork.work, work->node.next);
1167	else if (worker->current_work == work)
1168		kthread_insert_work(worker, &fwork.work,
1169				    worker->work_list.next);
1170	else
1171		noop = true;
1172
1173	raw_spin_unlock_irq(&worker->lock);
1174
1175	if (!noop)
1176		wait_for_completion(&fwork.done);
1177}
1178EXPORT_SYMBOL_GPL(kthread_flush_work);
1179
1180/*
1181 * Make sure that the timer is neither set nor running and could
1182 * not manipulate the work list_head any longer.
1183 *
1184 * The function is called under worker->lock. The lock is temporary
1185 * released but the timer can't be set again in the meantime.
1186 */
1187static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1188					      unsigned long *flags)
1189{
1190	struct kthread_delayed_work *dwork =
1191		container_of(work, struct kthread_delayed_work, work);
1192	struct kthread_worker *worker = work->worker;
1193
1194	/*
1195	 * del_timer_sync() must be called to make sure that the timer
1196	 * callback is not running. The lock must be temporary released
1197	 * to avoid a deadlock with the callback. In the meantime,
1198	 * any queuing is blocked by setting the canceling counter.
1199	 */
1200	work->canceling++;
1201	raw_spin_unlock_irqrestore(&worker->lock, *flags);
1202	del_timer_sync(&dwork->timer);
1203	raw_spin_lock_irqsave(&worker->lock, *flags);
1204	work->canceling--;
1205}
1206
1207/*
1208 * This function removes the work from the worker queue.
1209 *
1210 * It is called under worker->lock. The caller must make sure that
1211 * the timer used by delayed work is not running, e.g. by calling
1212 * kthread_cancel_delayed_work_timer().
1213 *
1214 * The work might still be in use when this function finishes. See the
1215 * current_work proceed by the worker.
1216 *
1217 * Return: %true if @work was pending and successfully canceled,
1218 *	%false if @work was not pending
1219 */
1220static bool __kthread_cancel_work(struct kthread_work *work)
1221{
1222	/*
1223	 * Try to remove the work from a worker list. It might either
1224	 * be from worker->work_list or from worker->delayed_work_list.
1225	 */
1226	if (!list_empty(&work->node)) {
1227		list_del_init(&work->node);
1228		return true;
1229	}
1230
1231	return false;
1232}
1233
1234/**
1235 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1236 * @worker: kthread worker to use
1237 * @dwork: kthread delayed work to queue
1238 * @delay: number of jiffies to wait before queuing
1239 *
1240 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1241 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1242 * @work is guaranteed to be queued immediately.
1243 *
1244 * Return: %false if @dwork was idle and queued, %true otherwise.
1245 *
1246 * A special case is when the work is being canceled in parallel.
1247 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1248 * or yet another kthread_mod_delayed_work() call. We let the other command
1249 * win and return %true here. The return value can be used for reference
1250 * counting and the number of queued works stays the same. Anyway, the caller
1251 * is supposed to synchronize these operations a reasonable way.
1252 *
1253 * This function is safe to call from any context including IRQ handler.
1254 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1255 * for details.
1256 */
1257bool kthread_mod_delayed_work(struct kthread_worker *worker,
1258			      struct kthread_delayed_work *dwork,
1259			      unsigned long delay)
1260{
1261	struct kthread_work *work = &dwork->work;
1262	unsigned long flags;
1263	int ret;
1264
1265	raw_spin_lock_irqsave(&worker->lock, flags);
1266
1267	/* Do not bother with canceling when never queued. */
1268	if (!work->worker) {
1269		ret = false;
1270		goto fast_queue;
1271	}
1272
1273	/* Work must not be used with >1 worker, see kthread_queue_work() */
1274	WARN_ON_ONCE(work->worker != worker);
1275
1276	/*
1277	 * Temporary cancel the work but do not fight with another command
1278	 * that is canceling the work as well.
1279	 *
1280	 * It is a bit tricky because of possible races with another
1281	 * mod_delayed_work() and cancel_delayed_work() callers.
1282	 *
1283	 * The timer must be canceled first because worker->lock is released
1284	 * when doing so. But the work can be removed from the queue (list)
1285	 * only when it can be queued again so that the return value can
1286	 * be used for reference counting.
1287	 */
1288	kthread_cancel_delayed_work_timer(work, &flags);
1289	if (work->canceling) {
1290		/* The number of works in the queue does not change. */
1291		ret = true;
1292		goto out;
1293	}
1294	ret = __kthread_cancel_work(work);
1295
1296fast_queue:
1297	__kthread_queue_delayed_work(worker, dwork, delay);
1298out:
1299	raw_spin_unlock_irqrestore(&worker->lock, flags);
1300	return ret;
1301}
1302EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1303
1304static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1305{
1306	struct kthread_worker *worker = work->worker;
1307	unsigned long flags;
1308	int ret = false;
1309
1310	if (!worker)
1311		goto out;
1312
1313	raw_spin_lock_irqsave(&worker->lock, flags);
1314	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1315	WARN_ON_ONCE(work->worker != worker);
1316
1317	if (is_dwork)
1318		kthread_cancel_delayed_work_timer(work, &flags);
1319
1320	ret = __kthread_cancel_work(work);
1321
1322	if (worker->current_work != work)
1323		goto out_fast;
1324
1325	/*
1326	 * The work is in progress and we need to wait with the lock released.
1327	 * In the meantime, block any queuing by setting the canceling counter.
1328	 */
1329	work->canceling++;
1330	raw_spin_unlock_irqrestore(&worker->lock, flags);
1331	kthread_flush_work(work);
1332	raw_spin_lock_irqsave(&worker->lock, flags);
1333	work->canceling--;
1334
1335out_fast:
1336	raw_spin_unlock_irqrestore(&worker->lock, flags);
1337out:
1338	return ret;
1339}
1340
1341/**
1342 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1343 * @work: the kthread work to cancel
1344 *
1345 * Cancel @work and wait for its execution to finish.  This function
1346 * can be used even if the work re-queues itself. On return from this
1347 * function, @work is guaranteed to be not pending or executing on any CPU.
1348 *
1349 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1350 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1351 *
1352 * The caller must ensure that the worker on which @work was last
1353 * queued can't be destroyed before this function returns.
1354 *
1355 * Return: %true if @work was pending, %false otherwise.
1356 */
1357bool kthread_cancel_work_sync(struct kthread_work *work)
1358{
1359	return __kthread_cancel_work_sync(work, false);
1360}
1361EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1362
1363/**
1364 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1365 *	wait for it to finish.
1366 * @dwork: the kthread delayed work to cancel
1367 *
1368 * This is kthread_cancel_work_sync() for delayed works.
1369 *
1370 * Return: %true if @dwork was pending, %false otherwise.
1371 */
1372bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1373{
1374	return __kthread_cancel_work_sync(&dwork->work, true);
1375}
1376EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1377
1378/**
1379 * kthread_flush_worker - flush all current works on a kthread_worker
1380 * @worker: worker to flush
1381 *
1382 * Wait until all currently executing or pending works on @worker are
1383 * finished.
1384 */
1385void kthread_flush_worker(struct kthread_worker *worker)
1386{
1387	struct kthread_flush_work fwork = {
1388		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1389		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1390	};
1391
1392	kthread_queue_work(worker, &fwork.work);
1393	wait_for_completion(&fwork.done);
1394}
1395EXPORT_SYMBOL_GPL(kthread_flush_worker);
1396
1397/**
1398 * kthread_destroy_worker - destroy a kthread worker
1399 * @worker: worker to be destroyed
1400 *
1401 * Flush and destroy @worker.  The simple flush is enough because the kthread
1402 * worker API is used only in trivial scenarios.  There are no multi-step state
1403 * machines needed.
1404 *
1405 * Note that this function is not responsible for handling delayed work, so
1406 * caller should be responsible for queuing or canceling all delayed work items
1407 * before invoke this function.
1408 */
1409void kthread_destroy_worker(struct kthread_worker *worker)
1410{
1411	struct task_struct *task;
1412
1413	task = worker->task;
1414	if (WARN_ON(!task))
1415		return;
1416
1417	kthread_flush_worker(worker);
1418	kthread_stop(task);
1419	WARN_ON(!list_empty(&worker->delayed_work_list));
1420	WARN_ON(!list_empty(&worker->work_list));
1421	kfree(worker);
1422}
1423EXPORT_SYMBOL(kthread_destroy_worker);
1424
1425/**
1426 * kthread_use_mm - make the calling kthread operate on an address space
1427 * @mm: address space to operate on
1428 */
1429void kthread_use_mm(struct mm_struct *mm)
1430{
1431	struct mm_struct *active_mm;
1432	struct task_struct *tsk = current;
1433
1434	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1435	WARN_ON_ONCE(tsk->mm);
1436
1437	/*
1438	 * It is possible for mm to be the same as tsk->active_mm, but
1439	 * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
1440	 * because these references are not equivalent.
1441	 */
1442	mmgrab(mm);
1443
1444	task_lock(tsk);
1445	/* Hold off tlb flush IPIs while switching mm's */
1446	local_irq_disable();
1447	active_mm = tsk->active_mm;
1448	tsk->active_mm = mm;
1449	tsk->mm = mm;
1450	membarrier_update_current_mm(mm);
1451	switch_mm_irqs_off(active_mm, mm, tsk);
1452	local_irq_enable();
1453	task_unlock(tsk);
1454#ifdef finish_arch_post_lock_switch
1455	finish_arch_post_lock_switch();
1456#endif
1457
1458	/*
1459	 * When a kthread starts operating on an address space, the loop
1460	 * in membarrier_{private,global}_expedited() may not observe
1461	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1462	 * memory barrier after storing to tsk->mm, before accessing
1463	 * user-space memory. A full memory barrier for membarrier
1464	 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1465	 * mmdrop_lazy_tlb().
1466	 */
1467	mmdrop_lazy_tlb(active_mm);
1468}
1469EXPORT_SYMBOL_GPL(kthread_use_mm);
1470
1471/**
1472 * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1473 * @mm: address space to operate on
1474 */
1475void kthread_unuse_mm(struct mm_struct *mm)
1476{
1477	struct task_struct *tsk = current;
1478
1479	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1480	WARN_ON_ONCE(!tsk->mm);
1481
1482	task_lock(tsk);
1483	/*
1484	 * When a kthread stops operating on an address space, the loop
1485	 * in membarrier_{private,global}_expedited() may not observe
1486	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1487	 * memory barrier after accessing user-space memory, before
1488	 * clearing tsk->mm.
1489	 */
1490	smp_mb__after_spinlock();
1491	local_irq_disable();
1492	tsk->mm = NULL;
1493	membarrier_update_current_mm(NULL);
1494	mmgrab_lazy_tlb(mm);
1495	/* active_mm is still 'mm' */
1496	enter_lazy_tlb(mm, tsk);
1497	local_irq_enable();
1498	task_unlock(tsk);
1499
1500	mmdrop(mm);
1501}
1502EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1503
1504#ifdef CONFIG_BLK_CGROUP
1505/**
1506 * kthread_associate_blkcg - associate blkcg to current kthread
1507 * @css: the cgroup info
1508 *
1509 * Current thread must be a kthread. The thread is running jobs on behalf of
1510 * other threads. In some cases, we expect the jobs attach cgroup info of
1511 * original threads instead of that of current thread. This function stores
1512 * original thread's cgroup info in current kthread context for later
1513 * retrieval.
1514 */
1515void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1516{
1517	struct kthread *kthread;
1518
1519	if (!(current->flags & PF_KTHREAD))
1520		return;
1521	kthread = to_kthread(current);
1522	if (!kthread)
1523		return;
1524
1525	if (kthread->blkcg_css) {
1526		css_put(kthread->blkcg_css);
1527		kthread->blkcg_css = NULL;
1528	}
1529	if (css) {
1530		css_get(css);
1531		kthread->blkcg_css = css;
1532	}
1533}
1534EXPORT_SYMBOL(kthread_associate_blkcg);
1535
1536/**
1537 * kthread_blkcg - get associated blkcg css of current kthread
1538 *
1539 * Current thread must be a kthread.
1540 */
1541struct cgroup_subsys_state *kthread_blkcg(void)
1542{
1543	struct kthread *kthread;
1544
1545	if (current->flags & PF_KTHREAD) {
1546		kthread = to_kthread(current);
1547		if (kthread)
1548			return kthread->blkcg_css;
1549	}
1550	return NULL;
1551}
1552#endif
1553