1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally ported from the -rt patch by:
9 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code in the latency_tracer, that is:
12 *
13 *  Copyright (C) 2004-2006 Ingo Molnar
14 *  Copyright (C) 2004 Nadia Yvette Chambers
15 */
16
17#include <linux/stop_machine.h>
18#include <linux/clocksource.h>
19#include <linux/sched/task.h>
20#include <linux/kallsyms.h>
21#include <linux/security.h>
22#include <linux/seq_file.h>
23#include <linux/tracefs.h>
24#include <linux/hardirq.h>
25#include <linux/kthread.h>
26#include <linux/uaccess.h>
27#include <linux/bsearch.h>
28#include <linux/module.h>
29#include <linux/ftrace.h>
30#include <linux/sysctl.h>
31#include <linux/slab.h>
32#include <linux/ctype.h>
33#include <linux/sort.h>
34#include <linux/list.h>
35#include <linux/hash.h>
36#include <linux/rcupdate.h>
37#include <linux/kprobes.h>
38
39#include <trace/events/sched.h>
40
41#include <asm/sections.h>
42#include <asm/setup.h>
43
44#include "ftrace_internal.h"
45#include "trace_output.h"
46#include "trace_stat.h"
47
48/* Flags that do not get reset */
49#define FTRACE_NOCLEAR_FLAGS	(FTRACE_FL_DISABLED | FTRACE_FL_TOUCHED | \
50				 FTRACE_FL_MODIFIED)
51
52#define FTRACE_INVALID_FUNCTION		"__ftrace_invalid_address__"
53
54#define FTRACE_WARN_ON(cond)			\
55	({					\
56		int ___r = cond;		\
57		if (WARN_ON(___r))		\
58			ftrace_kill();		\
59		___r;				\
60	})
61
62#define FTRACE_WARN_ON_ONCE(cond)		\
63	({					\
64		int ___r = cond;		\
65		if (WARN_ON_ONCE(___r))		\
66			ftrace_kill();		\
67		___r;				\
68	})
69
70/* hash bits for specific function selection */
71#define FTRACE_HASH_DEFAULT_BITS 10
72#define FTRACE_HASH_MAX_BITS 12
73
74#ifdef CONFIG_DYNAMIC_FTRACE
75#define INIT_OPS_HASH(opsname)	\
76	.func_hash		= &opsname.local_hash,			\
77	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
78#else
79#define INIT_OPS_HASH(opsname)
80#endif
81
82enum {
83	FTRACE_MODIFY_ENABLE_FL		= (1 << 0),
84	FTRACE_MODIFY_MAY_SLEEP_FL	= (1 << 1),
85};
86
87struct ftrace_ops ftrace_list_end __read_mostly = {
88	.func		= ftrace_stub,
89	.flags		= FTRACE_OPS_FL_STUB,
90	INIT_OPS_HASH(ftrace_list_end)
91};
92
93/* ftrace_enabled is a method to turn ftrace on or off */
94int ftrace_enabled __read_mostly;
95static int __maybe_unused last_ftrace_enabled;
96
97/* Current function tracing op */
98struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
99/* What to set function_trace_op to */
100static struct ftrace_ops *set_function_trace_op;
101
102static bool ftrace_pids_enabled(struct ftrace_ops *ops)
103{
104	struct trace_array *tr;
105
106	if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
107		return false;
108
109	tr = ops->private;
110
111	return tr->function_pids != NULL || tr->function_no_pids != NULL;
112}
113
114static void ftrace_update_trampoline(struct ftrace_ops *ops);
115
116/*
117 * ftrace_disabled is set when an anomaly is discovered.
118 * ftrace_disabled is much stronger than ftrace_enabled.
119 */
120static int ftrace_disabled __read_mostly;
121
122DEFINE_MUTEX(ftrace_lock);
123
124struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
125ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
126struct ftrace_ops global_ops;
127
128/* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */
129void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
130			  struct ftrace_ops *op, struct ftrace_regs *fregs);
131
132#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
133/*
134 * Stub used to invoke the list ops without requiring a separate trampoline.
135 */
136const struct ftrace_ops ftrace_list_ops = {
137	.func	= ftrace_ops_list_func,
138	.flags	= FTRACE_OPS_FL_STUB,
139};
140
141static void ftrace_ops_nop_func(unsigned long ip, unsigned long parent_ip,
142				struct ftrace_ops *op,
143				struct ftrace_regs *fregs)
144{
145	/* do nothing */
146}
147
148/*
149 * Stub used when a call site is disabled. May be called transiently by threads
150 * which have made it into ftrace_caller but haven't yet recovered the ops at
151 * the point the call site is disabled.
152 */
153const struct ftrace_ops ftrace_nop_ops = {
154	.func	= ftrace_ops_nop_func,
155	.flags  = FTRACE_OPS_FL_STUB,
156};
157#endif
158
159static inline void ftrace_ops_init(struct ftrace_ops *ops)
160{
161#ifdef CONFIG_DYNAMIC_FTRACE
162	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
163		mutex_init(&ops->local_hash.regex_lock);
164		ops->func_hash = &ops->local_hash;
165		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
166	}
167#endif
168}
169
170static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
171			    struct ftrace_ops *op, struct ftrace_regs *fregs)
172{
173	struct trace_array *tr = op->private;
174	int pid;
175
176	if (tr) {
177		pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
178		if (pid == FTRACE_PID_IGNORE)
179			return;
180		if (pid != FTRACE_PID_TRACE &&
181		    pid != current->pid)
182			return;
183	}
184
185	op->saved_func(ip, parent_ip, op, fregs);
186}
187
188static void ftrace_sync_ipi(void *data)
189{
190	/* Probably not needed, but do it anyway */
191	smp_rmb();
192}
193
194static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
195{
196	/*
197	 * If this is a dynamic or RCU ops, or we force list func,
198	 * then it needs to call the list anyway.
199	 */
200	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
201	    FTRACE_FORCE_LIST_FUNC)
202		return ftrace_ops_list_func;
203
204	return ftrace_ops_get_func(ops);
205}
206
207static void update_ftrace_function(void)
208{
209	ftrace_func_t func;
210
211	/*
212	 * Prepare the ftrace_ops that the arch callback will use.
213	 * If there's only one ftrace_ops registered, the ftrace_ops_list
214	 * will point to the ops we want.
215	 */
216	set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
217						lockdep_is_held(&ftrace_lock));
218
219	/* If there's no ftrace_ops registered, just call the stub function */
220	if (set_function_trace_op == &ftrace_list_end) {
221		func = ftrace_stub;
222
223	/*
224	 * If we are at the end of the list and this ops is
225	 * recursion safe and not dynamic and the arch supports passing ops,
226	 * then have the mcount trampoline call the function directly.
227	 */
228	} else if (rcu_dereference_protected(ftrace_ops_list->next,
229			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
230		func = ftrace_ops_get_list_func(ftrace_ops_list);
231
232	} else {
233		/* Just use the default ftrace_ops */
234		set_function_trace_op = &ftrace_list_end;
235		func = ftrace_ops_list_func;
236	}
237
238	update_function_graph_func();
239
240	/* If there's no change, then do nothing more here */
241	if (ftrace_trace_function == func)
242		return;
243
244	/*
245	 * If we are using the list function, it doesn't care
246	 * about the function_trace_ops.
247	 */
248	if (func == ftrace_ops_list_func) {
249		ftrace_trace_function = func;
250		/*
251		 * Don't even bother setting function_trace_ops,
252		 * it would be racy to do so anyway.
253		 */
254		return;
255	}
256
257#ifndef CONFIG_DYNAMIC_FTRACE
258	/*
259	 * For static tracing, we need to be a bit more careful.
260	 * The function change takes affect immediately. Thus,
261	 * we need to coordinate the setting of the function_trace_ops
262	 * with the setting of the ftrace_trace_function.
263	 *
264	 * Set the function to the list ops, which will call the
265	 * function we want, albeit indirectly, but it handles the
266	 * ftrace_ops and doesn't depend on function_trace_op.
267	 */
268	ftrace_trace_function = ftrace_ops_list_func;
269	/*
270	 * Make sure all CPUs see this. Yes this is slow, but static
271	 * tracing is slow and nasty to have enabled.
272	 */
273	synchronize_rcu_tasks_rude();
274	/* Now all cpus are using the list ops. */
275	function_trace_op = set_function_trace_op;
276	/* Make sure the function_trace_op is visible on all CPUs */
277	smp_wmb();
278	/* Nasty way to force a rmb on all cpus */
279	smp_call_function(ftrace_sync_ipi, NULL, 1);
280	/* OK, we are all set to update the ftrace_trace_function now! */
281#endif /* !CONFIG_DYNAMIC_FTRACE */
282
283	ftrace_trace_function = func;
284}
285
286static void add_ftrace_ops(struct ftrace_ops __rcu **list,
287			   struct ftrace_ops *ops)
288{
289	rcu_assign_pointer(ops->next, *list);
290
291	/*
292	 * We are entering ops into the list but another
293	 * CPU might be walking that list. We need to make sure
294	 * the ops->next pointer is valid before another CPU sees
295	 * the ops pointer included into the list.
296	 */
297	rcu_assign_pointer(*list, ops);
298}
299
300static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
301			     struct ftrace_ops *ops)
302{
303	struct ftrace_ops **p;
304
305	/*
306	 * If we are removing the last function, then simply point
307	 * to the ftrace_stub.
308	 */
309	if (rcu_dereference_protected(*list,
310			lockdep_is_held(&ftrace_lock)) == ops &&
311	    rcu_dereference_protected(ops->next,
312			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
313		*list = &ftrace_list_end;
314		return 0;
315	}
316
317	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
318		if (*p == ops)
319			break;
320
321	if (*p != ops)
322		return -1;
323
324	*p = (*p)->next;
325	return 0;
326}
327
328static void ftrace_update_trampoline(struct ftrace_ops *ops);
329
330int __register_ftrace_function(struct ftrace_ops *ops)
331{
332	if (ops->flags & FTRACE_OPS_FL_DELETED)
333		return -EINVAL;
334
335	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
336		return -EBUSY;
337
338#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
339	/*
340	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
341	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
342	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
343	 */
344	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
345	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
346		return -EINVAL;
347
348	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
349		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
350#endif
351	if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
352		return -EBUSY;
353
354	if (!is_kernel_core_data((unsigned long)ops))
355		ops->flags |= FTRACE_OPS_FL_DYNAMIC;
356
357	add_ftrace_ops(&ftrace_ops_list, ops);
358
359	/* Always save the function, and reset at unregistering */
360	ops->saved_func = ops->func;
361
362	if (ftrace_pids_enabled(ops))
363		ops->func = ftrace_pid_func;
364
365	ftrace_update_trampoline(ops);
366
367	if (ftrace_enabled)
368		update_ftrace_function();
369
370	return 0;
371}
372
373int __unregister_ftrace_function(struct ftrace_ops *ops)
374{
375	int ret;
376
377	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
378		return -EBUSY;
379
380	ret = remove_ftrace_ops(&ftrace_ops_list, ops);
381
382	if (ret < 0)
383		return ret;
384
385	if (ftrace_enabled)
386		update_ftrace_function();
387
388	ops->func = ops->saved_func;
389
390	return 0;
391}
392
393static void ftrace_update_pid_func(void)
394{
395	struct ftrace_ops *op;
396
397	/* Only do something if we are tracing something */
398	if (ftrace_trace_function == ftrace_stub)
399		return;
400
401	do_for_each_ftrace_op(op, ftrace_ops_list) {
402		if (op->flags & FTRACE_OPS_FL_PID) {
403			op->func = ftrace_pids_enabled(op) ?
404				ftrace_pid_func : op->saved_func;
405			ftrace_update_trampoline(op);
406		}
407	} while_for_each_ftrace_op(op);
408
409	update_ftrace_function();
410}
411
412#ifdef CONFIG_FUNCTION_PROFILER
413struct ftrace_profile {
414	struct hlist_node		node;
415	unsigned long			ip;
416	unsigned long			counter;
417#ifdef CONFIG_FUNCTION_GRAPH_TRACER
418	unsigned long long		time;
419	unsigned long long		time_squared;
420#endif
421};
422
423struct ftrace_profile_page {
424	struct ftrace_profile_page	*next;
425	unsigned long			index;
426	struct ftrace_profile		records[];
427};
428
429struct ftrace_profile_stat {
430	atomic_t			disabled;
431	struct hlist_head		*hash;
432	struct ftrace_profile_page	*pages;
433	struct ftrace_profile_page	*start;
434	struct tracer_stat		stat;
435};
436
437#define PROFILE_RECORDS_SIZE						\
438	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
439
440#define PROFILES_PER_PAGE					\
441	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
442
443static int ftrace_profile_enabled __read_mostly;
444
445/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
446static DEFINE_MUTEX(ftrace_profile_lock);
447
448static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
449
450#define FTRACE_PROFILE_HASH_BITS 10
451#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
452
453static void *
454function_stat_next(void *v, int idx)
455{
456	struct ftrace_profile *rec = v;
457	struct ftrace_profile_page *pg;
458
459	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
460
461 again:
462	if (idx != 0)
463		rec++;
464
465	if ((void *)rec >= (void *)&pg->records[pg->index]) {
466		pg = pg->next;
467		if (!pg)
468			return NULL;
469		rec = &pg->records[0];
470		if (!rec->counter)
471			goto again;
472	}
473
474	return rec;
475}
476
477static void *function_stat_start(struct tracer_stat *trace)
478{
479	struct ftrace_profile_stat *stat =
480		container_of(trace, struct ftrace_profile_stat, stat);
481
482	if (!stat || !stat->start)
483		return NULL;
484
485	return function_stat_next(&stat->start->records[0], 0);
486}
487
488#ifdef CONFIG_FUNCTION_GRAPH_TRACER
489/* function graph compares on total time */
490static int function_stat_cmp(const void *p1, const void *p2)
491{
492	const struct ftrace_profile *a = p1;
493	const struct ftrace_profile *b = p2;
494
495	if (a->time < b->time)
496		return -1;
497	if (a->time > b->time)
498		return 1;
499	else
500		return 0;
501}
502#else
503/* not function graph compares against hits */
504static int function_stat_cmp(const void *p1, const void *p2)
505{
506	const struct ftrace_profile *a = p1;
507	const struct ftrace_profile *b = p2;
508
509	if (a->counter < b->counter)
510		return -1;
511	if (a->counter > b->counter)
512		return 1;
513	else
514		return 0;
515}
516#endif
517
518static int function_stat_headers(struct seq_file *m)
519{
520#ifdef CONFIG_FUNCTION_GRAPH_TRACER
521	seq_puts(m, "  Function                               "
522		 "Hit    Time            Avg             s^2\n"
523		    "  --------                               "
524		 "---    ----            ---             ---\n");
525#else
526	seq_puts(m, "  Function                               Hit\n"
527		    "  --------                               ---\n");
528#endif
529	return 0;
530}
531
532static int function_stat_show(struct seq_file *m, void *v)
533{
534	struct ftrace_profile *rec = v;
535	char str[KSYM_SYMBOL_LEN];
536	int ret = 0;
537#ifdef CONFIG_FUNCTION_GRAPH_TRACER
538	static struct trace_seq s;
539	unsigned long long avg;
540	unsigned long long stddev;
541#endif
542	mutex_lock(&ftrace_profile_lock);
543
544	/* we raced with function_profile_reset() */
545	if (unlikely(rec->counter == 0)) {
546		ret = -EBUSY;
547		goto out;
548	}
549
550#ifdef CONFIG_FUNCTION_GRAPH_TRACER
551	avg = div64_ul(rec->time, rec->counter);
552	if (tracing_thresh && (avg < tracing_thresh))
553		goto out;
554#endif
555
556	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
557	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
558
559#ifdef CONFIG_FUNCTION_GRAPH_TRACER
560	seq_puts(m, "    ");
561
562	/* Sample standard deviation (s^2) */
563	if (rec->counter <= 1)
564		stddev = 0;
565	else {
566		/*
567		 * Apply Welford's method:
568		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
569		 */
570		stddev = rec->counter * rec->time_squared -
571			 rec->time * rec->time;
572
573		/*
574		 * Divide only 1000 for ns^2 -> us^2 conversion.
575		 * trace_print_graph_duration will divide 1000 again.
576		 */
577		stddev = div64_ul(stddev,
578				  rec->counter * (rec->counter - 1) * 1000);
579	}
580
581	trace_seq_init(&s);
582	trace_print_graph_duration(rec->time, &s);
583	trace_seq_puts(&s, "    ");
584	trace_print_graph_duration(avg, &s);
585	trace_seq_puts(&s, "    ");
586	trace_print_graph_duration(stddev, &s);
587	trace_print_seq(m, &s);
588#endif
589	seq_putc(m, '\n');
590out:
591	mutex_unlock(&ftrace_profile_lock);
592
593	return ret;
594}
595
596static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
597{
598	struct ftrace_profile_page *pg;
599
600	pg = stat->pages = stat->start;
601
602	while (pg) {
603		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
604		pg->index = 0;
605		pg = pg->next;
606	}
607
608	memset(stat->hash, 0,
609	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
610}
611
612static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
613{
614	struct ftrace_profile_page *pg;
615	int functions;
616	int pages;
617	int i;
618
619	/* If we already allocated, do nothing */
620	if (stat->pages)
621		return 0;
622
623	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
624	if (!stat->pages)
625		return -ENOMEM;
626
627#ifdef CONFIG_DYNAMIC_FTRACE
628	functions = ftrace_update_tot_cnt;
629#else
630	/*
631	 * We do not know the number of functions that exist because
632	 * dynamic tracing is what counts them. With past experience
633	 * we have around 20K functions. That should be more than enough.
634	 * It is highly unlikely we will execute every function in
635	 * the kernel.
636	 */
637	functions = 20000;
638#endif
639
640	pg = stat->start = stat->pages;
641
642	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
643
644	for (i = 1; i < pages; i++) {
645		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
646		if (!pg->next)
647			goto out_free;
648		pg = pg->next;
649	}
650
651	return 0;
652
653 out_free:
654	pg = stat->start;
655	while (pg) {
656		unsigned long tmp = (unsigned long)pg;
657
658		pg = pg->next;
659		free_page(tmp);
660	}
661
662	stat->pages = NULL;
663	stat->start = NULL;
664
665	return -ENOMEM;
666}
667
668static int ftrace_profile_init_cpu(int cpu)
669{
670	struct ftrace_profile_stat *stat;
671	int size;
672
673	stat = &per_cpu(ftrace_profile_stats, cpu);
674
675	if (stat->hash) {
676		/* If the profile is already created, simply reset it */
677		ftrace_profile_reset(stat);
678		return 0;
679	}
680
681	/*
682	 * We are profiling all functions, but usually only a few thousand
683	 * functions are hit. We'll make a hash of 1024 items.
684	 */
685	size = FTRACE_PROFILE_HASH_SIZE;
686
687	stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
688
689	if (!stat->hash)
690		return -ENOMEM;
691
692	/* Preallocate the function profiling pages */
693	if (ftrace_profile_pages_init(stat) < 0) {
694		kfree(stat->hash);
695		stat->hash = NULL;
696		return -ENOMEM;
697	}
698
699	return 0;
700}
701
702static int ftrace_profile_init(void)
703{
704	int cpu;
705	int ret = 0;
706
707	for_each_possible_cpu(cpu) {
708		ret = ftrace_profile_init_cpu(cpu);
709		if (ret)
710			break;
711	}
712
713	return ret;
714}
715
716/* interrupts must be disabled */
717static struct ftrace_profile *
718ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
719{
720	struct ftrace_profile *rec;
721	struct hlist_head *hhd;
722	unsigned long key;
723
724	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
725	hhd = &stat->hash[key];
726
727	if (hlist_empty(hhd))
728		return NULL;
729
730	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
731		if (rec->ip == ip)
732			return rec;
733	}
734
735	return NULL;
736}
737
738static void ftrace_add_profile(struct ftrace_profile_stat *stat,
739			       struct ftrace_profile *rec)
740{
741	unsigned long key;
742
743	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
744	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
745}
746
747/*
748 * The memory is already allocated, this simply finds a new record to use.
749 */
750static struct ftrace_profile *
751ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
752{
753	struct ftrace_profile *rec = NULL;
754
755	/* prevent recursion (from NMIs) */
756	if (atomic_inc_return(&stat->disabled) != 1)
757		goto out;
758
759	/*
760	 * Try to find the function again since an NMI
761	 * could have added it
762	 */
763	rec = ftrace_find_profiled_func(stat, ip);
764	if (rec)
765		goto out;
766
767	if (stat->pages->index == PROFILES_PER_PAGE) {
768		if (!stat->pages->next)
769			goto out;
770		stat->pages = stat->pages->next;
771	}
772
773	rec = &stat->pages->records[stat->pages->index++];
774	rec->ip = ip;
775	ftrace_add_profile(stat, rec);
776
777 out:
778	atomic_dec(&stat->disabled);
779
780	return rec;
781}
782
783static void
784function_profile_call(unsigned long ip, unsigned long parent_ip,
785		      struct ftrace_ops *ops, struct ftrace_regs *fregs)
786{
787	struct ftrace_profile_stat *stat;
788	struct ftrace_profile *rec;
789	unsigned long flags;
790
791	if (!ftrace_profile_enabled)
792		return;
793
794	local_irq_save(flags);
795
796	stat = this_cpu_ptr(&ftrace_profile_stats);
797	if (!stat->hash || !ftrace_profile_enabled)
798		goto out;
799
800	rec = ftrace_find_profiled_func(stat, ip);
801	if (!rec) {
802		rec = ftrace_profile_alloc(stat, ip);
803		if (!rec)
804			goto out;
805	}
806
807	rec->counter++;
808 out:
809	local_irq_restore(flags);
810}
811
812#ifdef CONFIG_FUNCTION_GRAPH_TRACER
813static bool fgraph_graph_time = true;
814
815void ftrace_graph_graph_time_control(bool enable)
816{
817	fgraph_graph_time = enable;
818}
819
820static int profile_graph_entry(struct ftrace_graph_ent *trace)
821{
822	struct ftrace_ret_stack *ret_stack;
823
824	function_profile_call(trace->func, 0, NULL, NULL);
825
826	/* If function graph is shutting down, ret_stack can be NULL */
827	if (!current->ret_stack)
828		return 0;
829
830	ret_stack = ftrace_graph_get_ret_stack(current, 0);
831	if (ret_stack)
832		ret_stack->subtime = 0;
833
834	return 1;
835}
836
837static void profile_graph_return(struct ftrace_graph_ret *trace)
838{
839	struct ftrace_ret_stack *ret_stack;
840	struct ftrace_profile_stat *stat;
841	unsigned long long calltime;
842	struct ftrace_profile *rec;
843	unsigned long flags;
844
845	local_irq_save(flags);
846	stat = this_cpu_ptr(&ftrace_profile_stats);
847	if (!stat->hash || !ftrace_profile_enabled)
848		goto out;
849
850	/* If the calltime was zero'd ignore it */
851	if (!trace->calltime)
852		goto out;
853
854	calltime = trace->rettime - trace->calltime;
855
856	if (!fgraph_graph_time) {
857
858		/* Append this call time to the parent time to subtract */
859		ret_stack = ftrace_graph_get_ret_stack(current, 1);
860		if (ret_stack)
861			ret_stack->subtime += calltime;
862
863		ret_stack = ftrace_graph_get_ret_stack(current, 0);
864		if (ret_stack && ret_stack->subtime < calltime)
865			calltime -= ret_stack->subtime;
866		else
867			calltime = 0;
868	}
869
870	rec = ftrace_find_profiled_func(stat, trace->func);
871	if (rec) {
872		rec->time += calltime;
873		rec->time_squared += calltime * calltime;
874	}
875
876 out:
877	local_irq_restore(flags);
878}
879
880static struct fgraph_ops fprofiler_ops = {
881	.entryfunc = &profile_graph_entry,
882	.retfunc = &profile_graph_return,
883};
884
885static int register_ftrace_profiler(void)
886{
887	return register_ftrace_graph(&fprofiler_ops);
888}
889
890static void unregister_ftrace_profiler(void)
891{
892	unregister_ftrace_graph(&fprofiler_ops);
893}
894#else
895static struct ftrace_ops ftrace_profile_ops __read_mostly = {
896	.func		= function_profile_call,
897	.flags		= FTRACE_OPS_FL_INITIALIZED,
898	INIT_OPS_HASH(ftrace_profile_ops)
899};
900
901static int register_ftrace_profiler(void)
902{
903	return register_ftrace_function(&ftrace_profile_ops);
904}
905
906static void unregister_ftrace_profiler(void)
907{
908	unregister_ftrace_function(&ftrace_profile_ops);
909}
910#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
911
912static ssize_t
913ftrace_profile_write(struct file *filp, const char __user *ubuf,
914		     size_t cnt, loff_t *ppos)
915{
916	unsigned long val;
917	int ret;
918
919	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
920	if (ret)
921		return ret;
922
923	val = !!val;
924
925	mutex_lock(&ftrace_profile_lock);
926	if (ftrace_profile_enabled ^ val) {
927		if (val) {
928			ret = ftrace_profile_init();
929			if (ret < 0) {
930				cnt = ret;
931				goto out;
932			}
933
934			ret = register_ftrace_profiler();
935			if (ret < 0) {
936				cnt = ret;
937				goto out;
938			}
939			ftrace_profile_enabled = 1;
940		} else {
941			ftrace_profile_enabled = 0;
942			/*
943			 * unregister_ftrace_profiler calls stop_machine
944			 * so this acts like an synchronize_rcu.
945			 */
946			unregister_ftrace_profiler();
947		}
948	}
949 out:
950	mutex_unlock(&ftrace_profile_lock);
951
952	*ppos += cnt;
953
954	return cnt;
955}
956
957static ssize_t
958ftrace_profile_read(struct file *filp, char __user *ubuf,
959		     size_t cnt, loff_t *ppos)
960{
961	char buf[64];		/* big enough to hold a number */
962	int r;
963
964	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
965	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
966}
967
968static const struct file_operations ftrace_profile_fops = {
969	.open		= tracing_open_generic,
970	.read		= ftrace_profile_read,
971	.write		= ftrace_profile_write,
972	.llseek		= default_llseek,
973};
974
975/* used to initialize the real stat files */
976static struct tracer_stat function_stats __initdata = {
977	.name		= "functions",
978	.stat_start	= function_stat_start,
979	.stat_next	= function_stat_next,
980	.stat_cmp	= function_stat_cmp,
981	.stat_headers	= function_stat_headers,
982	.stat_show	= function_stat_show
983};
984
985static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
986{
987	struct ftrace_profile_stat *stat;
988	char *name;
989	int ret;
990	int cpu;
991
992	for_each_possible_cpu(cpu) {
993		stat = &per_cpu(ftrace_profile_stats, cpu);
994
995		name = kasprintf(GFP_KERNEL, "function%d", cpu);
996		if (!name) {
997			/*
998			 * The files created are permanent, if something happens
999			 * we still do not free memory.
1000			 */
1001			WARN(1,
1002			     "Could not allocate stat file for cpu %d\n",
1003			     cpu);
1004			return;
1005		}
1006		stat->stat = function_stats;
1007		stat->stat.name = name;
1008		ret = register_stat_tracer(&stat->stat);
1009		if (ret) {
1010			WARN(1,
1011			     "Could not register function stat for cpu %d\n",
1012			     cpu);
1013			kfree(name);
1014			return;
1015		}
1016	}
1017
1018	trace_create_file("function_profile_enabled",
1019			  TRACE_MODE_WRITE, d_tracer, NULL,
1020			  &ftrace_profile_fops);
1021}
1022
1023#else /* CONFIG_FUNCTION_PROFILER */
1024static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1025{
1026}
1027#endif /* CONFIG_FUNCTION_PROFILER */
1028
1029#ifdef CONFIG_DYNAMIC_FTRACE
1030
1031static struct ftrace_ops *removed_ops;
1032
1033/*
1034 * Set when doing a global update, like enabling all recs or disabling them.
1035 * It is not set when just updating a single ftrace_ops.
1036 */
1037static bool update_all_ops;
1038
1039#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1040# error Dynamic ftrace depends on MCOUNT_RECORD
1041#endif
1042
1043struct ftrace_func_probe {
1044	struct ftrace_probe_ops	*probe_ops;
1045	struct ftrace_ops	ops;
1046	struct trace_array	*tr;
1047	struct list_head	list;
1048	void			*data;
1049	int			ref;
1050};
1051
1052/*
1053 * We make these constant because no one should touch them,
1054 * but they are used as the default "empty hash", to avoid allocating
1055 * it all the time. These are in a read only section such that if
1056 * anyone does try to modify it, it will cause an exception.
1057 */
1058static const struct hlist_head empty_buckets[1];
1059static const struct ftrace_hash empty_hash = {
1060	.buckets = (struct hlist_head *)empty_buckets,
1061};
1062#define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
1063
1064struct ftrace_ops global_ops = {
1065	.func				= ftrace_stub,
1066	.local_hash.notrace_hash	= EMPTY_HASH,
1067	.local_hash.filter_hash		= EMPTY_HASH,
1068	INIT_OPS_HASH(global_ops)
1069	.flags				= FTRACE_OPS_FL_INITIALIZED |
1070					  FTRACE_OPS_FL_PID,
1071};
1072
1073/*
1074 * Used by the stack unwinder to know about dynamic ftrace trampolines.
1075 */
1076struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1077{
1078	struct ftrace_ops *op = NULL;
1079
1080	/*
1081	 * Some of the ops may be dynamically allocated,
1082	 * they are freed after a synchronize_rcu().
1083	 */
1084	preempt_disable_notrace();
1085
1086	do_for_each_ftrace_op(op, ftrace_ops_list) {
1087		/*
1088		 * This is to check for dynamically allocated trampolines.
1089		 * Trampolines that are in kernel text will have
1090		 * core_kernel_text() return true.
1091		 */
1092		if (op->trampoline && op->trampoline_size)
1093			if (addr >= op->trampoline &&
1094			    addr < op->trampoline + op->trampoline_size) {
1095				preempt_enable_notrace();
1096				return op;
1097			}
1098	} while_for_each_ftrace_op(op);
1099	preempt_enable_notrace();
1100
1101	return NULL;
1102}
1103
1104/*
1105 * This is used by __kernel_text_address() to return true if the
1106 * address is on a dynamically allocated trampoline that would
1107 * not return true for either core_kernel_text() or
1108 * is_module_text_address().
1109 */
1110bool is_ftrace_trampoline(unsigned long addr)
1111{
1112	return ftrace_ops_trampoline(addr) != NULL;
1113}
1114
1115struct ftrace_page {
1116	struct ftrace_page	*next;
1117	struct dyn_ftrace	*records;
1118	int			index;
1119	int			order;
1120};
1121
1122#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1123#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1124
1125static struct ftrace_page	*ftrace_pages_start;
1126static struct ftrace_page	*ftrace_pages;
1127
1128static __always_inline unsigned long
1129ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1130{
1131	if (hash->size_bits > 0)
1132		return hash_long(ip, hash->size_bits);
1133
1134	return 0;
1135}
1136
1137/* Only use this function if ftrace_hash_empty() has already been tested */
1138static __always_inline struct ftrace_func_entry *
1139__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1140{
1141	unsigned long key;
1142	struct ftrace_func_entry *entry;
1143	struct hlist_head *hhd;
1144
1145	key = ftrace_hash_key(hash, ip);
1146	hhd = &hash->buckets[key];
1147
1148	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1149		if (entry->ip == ip)
1150			return entry;
1151	}
1152	return NULL;
1153}
1154
1155/**
1156 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1157 * @hash: The hash to look at
1158 * @ip: The instruction pointer to test
1159 *
1160 * Search a given @hash to see if a given instruction pointer (@ip)
1161 * exists in it.
1162 *
1163 * Returns: the entry that holds the @ip if found. NULL otherwise.
1164 */
1165struct ftrace_func_entry *
1166ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1167{
1168	if (ftrace_hash_empty(hash))
1169		return NULL;
1170
1171	return __ftrace_lookup_ip(hash, ip);
1172}
1173
1174static void __add_hash_entry(struct ftrace_hash *hash,
1175			     struct ftrace_func_entry *entry)
1176{
1177	struct hlist_head *hhd;
1178	unsigned long key;
1179
1180	key = ftrace_hash_key(hash, entry->ip);
1181	hhd = &hash->buckets[key];
1182	hlist_add_head(&entry->hlist, hhd);
1183	hash->count++;
1184}
1185
1186static struct ftrace_func_entry *
1187add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1188{
1189	struct ftrace_func_entry *entry;
1190
1191	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1192	if (!entry)
1193		return NULL;
1194
1195	entry->ip = ip;
1196	__add_hash_entry(hash, entry);
1197
1198	return entry;
1199}
1200
1201static void
1202free_hash_entry(struct ftrace_hash *hash,
1203		  struct ftrace_func_entry *entry)
1204{
1205	hlist_del(&entry->hlist);
1206	kfree(entry);
1207	hash->count--;
1208}
1209
1210static void
1211remove_hash_entry(struct ftrace_hash *hash,
1212		  struct ftrace_func_entry *entry)
1213{
1214	hlist_del_rcu(&entry->hlist);
1215	hash->count--;
1216}
1217
1218static void ftrace_hash_clear(struct ftrace_hash *hash)
1219{
1220	struct hlist_head *hhd;
1221	struct hlist_node *tn;
1222	struct ftrace_func_entry *entry;
1223	int size = 1 << hash->size_bits;
1224	int i;
1225
1226	if (!hash->count)
1227		return;
1228
1229	for (i = 0; i < size; i++) {
1230		hhd = &hash->buckets[i];
1231		hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1232			free_hash_entry(hash, entry);
1233	}
1234	FTRACE_WARN_ON(hash->count);
1235}
1236
1237static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1238{
1239	list_del(&ftrace_mod->list);
1240	kfree(ftrace_mod->module);
1241	kfree(ftrace_mod->func);
1242	kfree(ftrace_mod);
1243}
1244
1245static void clear_ftrace_mod_list(struct list_head *head)
1246{
1247	struct ftrace_mod_load *p, *n;
1248
1249	/* stack tracer isn't supported yet */
1250	if (!head)
1251		return;
1252
1253	mutex_lock(&ftrace_lock);
1254	list_for_each_entry_safe(p, n, head, list)
1255		free_ftrace_mod(p);
1256	mutex_unlock(&ftrace_lock);
1257}
1258
1259static void free_ftrace_hash(struct ftrace_hash *hash)
1260{
1261	if (!hash || hash == EMPTY_HASH)
1262		return;
1263	ftrace_hash_clear(hash);
1264	kfree(hash->buckets);
1265	kfree(hash);
1266}
1267
1268static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1269{
1270	struct ftrace_hash *hash;
1271
1272	hash = container_of(rcu, struct ftrace_hash, rcu);
1273	free_ftrace_hash(hash);
1274}
1275
1276static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1277{
1278	if (!hash || hash == EMPTY_HASH)
1279		return;
1280	call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1281}
1282
1283/**
1284 * ftrace_free_filter - remove all filters for an ftrace_ops
1285 * @ops: the ops to remove the filters from
1286 */
1287void ftrace_free_filter(struct ftrace_ops *ops)
1288{
1289	ftrace_ops_init(ops);
1290	free_ftrace_hash(ops->func_hash->filter_hash);
1291	free_ftrace_hash(ops->func_hash->notrace_hash);
1292}
1293EXPORT_SYMBOL_GPL(ftrace_free_filter);
1294
1295static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1296{
1297	struct ftrace_hash *hash;
1298	int size;
1299
1300	hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1301	if (!hash)
1302		return NULL;
1303
1304	size = 1 << size_bits;
1305	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1306
1307	if (!hash->buckets) {
1308		kfree(hash);
1309		return NULL;
1310	}
1311
1312	hash->size_bits = size_bits;
1313
1314	return hash;
1315}
1316
1317
1318static int ftrace_add_mod(struct trace_array *tr,
1319			  const char *func, const char *module,
1320			  int enable)
1321{
1322	struct ftrace_mod_load *ftrace_mod;
1323	struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1324
1325	ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1326	if (!ftrace_mod)
1327		return -ENOMEM;
1328
1329	INIT_LIST_HEAD(&ftrace_mod->list);
1330	ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1331	ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1332	ftrace_mod->enable = enable;
1333
1334	if (!ftrace_mod->func || !ftrace_mod->module)
1335		goto out_free;
1336
1337	list_add(&ftrace_mod->list, mod_head);
1338
1339	return 0;
1340
1341 out_free:
1342	free_ftrace_mod(ftrace_mod);
1343
1344	return -ENOMEM;
1345}
1346
1347static struct ftrace_hash *
1348alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1349{
1350	struct ftrace_func_entry *entry;
1351	struct ftrace_hash *new_hash;
1352	int size;
1353	int i;
1354
1355	new_hash = alloc_ftrace_hash(size_bits);
1356	if (!new_hash)
1357		return NULL;
1358
1359	if (hash)
1360		new_hash->flags = hash->flags;
1361
1362	/* Empty hash? */
1363	if (ftrace_hash_empty(hash))
1364		return new_hash;
1365
1366	size = 1 << hash->size_bits;
1367	for (i = 0; i < size; i++) {
1368		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1369			if (add_hash_entry(new_hash, entry->ip) == NULL)
1370				goto free_hash;
1371		}
1372	}
1373
1374	FTRACE_WARN_ON(new_hash->count != hash->count);
1375
1376	return new_hash;
1377
1378 free_hash:
1379	free_ftrace_hash(new_hash);
1380	return NULL;
1381}
1382
1383static void
1384ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1385static void
1386ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1387
1388static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1389				       struct ftrace_hash *new_hash);
1390
1391static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size)
1392{
1393	struct ftrace_func_entry *entry;
1394	struct ftrace_hash *new_hash;
1395	struct hlist_head *hhd;
1396	struct hlist_node *tn;
1397	int bits = 0;
1398	int i;
1399
1400	/*
1401	 * Use around half the size (max bit of it), but
1402	 * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits).
1403	 */
1404	bits = fls(size / 2);
1405
1406	/* Don't allocate too much */
1407	if (bits > FTRACE_HASH_MAX_BITS)
1408		bits = FTRACE_HASH_MAX_BITS;
1409
1410	new_hash = alloc_ftrace_hash(bits);
1411	if (!new_hash)
1412		return NULL;
1413
1414	new_hash->flags = src->flags;
1415
1416	size = 1 << src->size_bits;
1417	for (i = 0; i < size; i++) {
1418		hhd = &src->buckets[i];
1419		hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1420			remove_hash_entry(src, entry);
1421			__add_hash_entry(new_hash, entry);
1422		}
1423	}
1424	return new_hash;
1425}
1426
1427static struct ftrace_hash *
1428__ftrace_hash_move(struct ftrace_hash *src)
1429{
1430	int size = src->count;
1431
1432	/*
1433	 * If the new source is empty, just return the empty_hash.
1434	 */
1435	if (ftrace_hash_empty(src))
1436		return EMPTY_HASH;
1437
1438	return dup_hash(src, size);
1439}
1440
1441static int
1442ftrace_hash_move(struct ftrace_ops *ops, int enable,
1443		 struct ftrace_hash **dst, struct ftrace_hash *src)
1444{
1445	struct ftrace_hash *new_hash;
1446	int ret;
1447
1448	/* Reject setting notrace hash on IPMODIFY ftrace_ops */
1449	if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1450		return -EINVAL;
1451
1452	new_hash = __ftrace_hash_move(src);
1453	if (!new_hash)
1454		return -ENOMEM;
1455
1456	/* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1457	if (enable) {
1458		/* IPMODIFY should be updated only when filter_hash updating */
1459		ret = ftrace_hash_ipmodify_update(ops, new_hash);
1460		if (ret < 0) {
1461			free_ftrace_hash(new_hash);
1462			return ret;
1463		}
1464	}
1465
1466	/*
1467	 * Remove the current set, update the hash and add
1468	 * them back.
1469	 */
1470	ftrace_hash_rec_disable_modify(ops, enable);
1471
1472	rcu_assign_pointer(*dst, new_hash);
1473
1474	ftrace_hash_rec_enable_modify(ops, enable);
1475
1476	return 0;
1477}
1478
1479static bool hash_contains_ip(unsigned long ip,
1480			     struct ftrace_ops_hash *hash)
1481{
1482	/*
1483	 * The function record is a match if it exists in the filter
1484	 * hash and not in the notrace hash. Note, an empty hash is
1485	 * considered a match for the filter hash, but an empty
1486	 * notrace hash is considered not in the notrace hash.
1487	 */
1488	return (ftrace_hash_empty(hash->filter_hash) ||
1489		__ftrace_lookup_ip(hash->filter_hash, ip)) &&
1490		(ftrace_hash_empty(hash->notrace_hash) ||
1491		 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1492}
1493
1494/*
1495 * Test the hashes for this ops to see if we want to call
1496 * the ops->func or not.
1497 *
1498 * It's a match if the ip is in the ops->filter_hash or
1499 * the filter_hash does not exist or is empty,
1500 *  AND
1501 * the ip is not in the ops->notrace_hash.
1502 *
1503 * This needs to be called with preemption disabled as
1504 * the hashes are freed with call_rcu().
1505 */
1506int
1507ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1508{
1509	struct ftrace_ops_hash hash;
1510	int ret;
1511
1512#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1513	/*
1514	 * There's a small race when adding ops that the ftrace handler
1515	 * that wants regs, may be called without them. We can not
1516	 * allow that handler to be called if regs is NULL.
1517	 */
1518	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1519		return 0;
1520#endif
1521
1522	rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1523	rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1524
1525	if (hash_contains_ip(ip, &hash))
1526		ret = 1;
1527	else
1528		ret = 0;
1529
1530	return ret;
1531}
1532
1533/*
1534 * This is a double for. Do not use 'break' to break out of the loop,
1535 * you must use a goto.
1536 */
1537#define do_for_each_ftrace_rec(pg, rec)					\
1538	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
1539		int _____i;						\
1540		for (_____i = 0; _____i < pg->index; _____i++) {	\
1541			rec = &pg->records[_____i];
1542
1543#define while_for_each_ftrace_rec()		\
1544		}				\
1545	}
1546
1547
1548static int ftrace_cmp_recs(const void *a, const void *b)
1549{
1550	const struct dyn_ftrace *key = a;
1551	const struct dyn_ftrace *rec = b;
1552
1553	if (key->flags < rec->ip)
1554		return -1;
1555	if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1556		return 1;
1557	return 0;
1558}
1559
1560static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
1561{
1562	struct ftrace_page *pg;
1563	struct dyn_ftrace *rec = NULL;
1564	struct dyn_ftrace key;
1565
1566	key.ip = start;
1567	key.flags = end;	/* overload flags, as it is unsigned long */
1568
1569	for (pg = ftrace_pages_start; pg; pg = pg->next) {
1570		if (pg->index == 0 ||
1571		    end < pg->records[0].ip ||
1572		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1573			continue;
1574		rec = bsearch(&key, pg->records, pg->index,
1575			      sizeof(struct dyn_ftrace),
1576			      ftrace_cmp_recs);
1577		if (rec)
1578			break;
1579	}
1580	return rec;
1581}
1582
1583/**
1584 * ftrace_location_range - return the first address of a traced location
1585 *	if it touches the given ip range
1586 * @start: start of range to search.
1587 * @end: end of range to search (inclusive). @end points to the last byte
1588 *	to check.
1589 *
1590 * Returns: rec->ip if the related ftrace location is a least partly within
1591 * the given address range. That is, the first address of the instruction
1592 * that is either a NOP or call to the function tracer. It checks the ftrace
1593 * internal tables to determine if the address belongs or not.
1594 */
1595unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1596{
1597	struct dyn_ftrace *rec;
1598	unsigned long ip = 0;
1599
1600	rcu_read_lock();
1601	rec = lookup_rec(start, end);
1602	if (rec)
1603		ip = rec->ip;
1604	rcu_read_unlock();
1605
1606	return ip;
1607}
1608
1609/**
1610 * ftrace_location - return the ftrace location
1611 * @ip: the instruction pointer to check
1612 *
1613 * Returns:
1614 * * If @ip matches the ftrace location, return @ip.
1615 * * If @ip matches sym+0, return sym's ftrace location.
1616 * * Otherwise, return 0.
1617 */
1618unsigned long ftrace_location(unsigned long ip)
1619{
1620	unsigned long loc;
1621	unsigned long offset;
1622	unsigned long size;
1623
1624	loc = ftrace_location_range(ip, ip);
1625	if (!loc) {
1626		if (!kallsyms_lookup_size_offset(ip, &size, &offset))
1627			goto out;
1628
1629		/* map sym+0 to __fentry__ */
1630		if (!offset)
1631			loc = ftrace_location_range(ip, ip + size - 1);
1632	}
1633
1634out:
1635	return loc;
1636}
1637
1638/**
1639 * ftrace_text_reserved - return true if range contains an ftrace location
1640 * @start: start of range to search
1641 * @end: end of range to search (inclusive). @end points to the last byte to check.
1642 *
1643 * Returns: 1 if @start and @end contains a ftrace location.
1644 * That is, the instruction that is either a NOP or call to
1645 * the function tracer. It checks the ftrace internal tables to
1646 * determine if the address belongs or not.
1647 */
1648int ftrace_text_reserved(const void *start, const void *end)
1649{
1650	unsigned long ret;
1651
1652	ret = ftrace_location_range((unsigned long)start,
1653				    (unsigned long)end);
1654
1655	return (int)!!ret;
1656}
1657
1658/* Test if ops registered to this rec needs regs */
1659static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1660{
1661	struct ftrace_ops *ops;
1662	bool keep_regs = false;
1663
1664	for (ops = ftrace_ops_list;
1665	     ops != &ftrace_list_end; ops = ops->next) {
1666		/* pass rec in as regs to have non-NULL val */
1667		if (ftrace_ops_test(ops, rec->ip, rec)) {
1668			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1669				keep_regs = true;
1670				break;
1671			}
1672		}
1673	}
1674
1675	return  keep_regs;
1676}
1677
1678static struct ftrace_ops *
1679ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1680static struct ftrace_ops *
1681ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
1682static struct ftrace_ops *
1683ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1684
1685static bool skip_record(struct dyn_ftrace *rec)
1686{
1687	/*
1688	 * At boot up, weak functions are set to disable. Function tracing
1689	 * can be enabled before they are, and they still need to be disabled now.
1690	 * If the record is disabled, still continue if it is marked as already
1691	 * enabled (this is needed to keep the accounting working).
1692	 */
1693	return rec->flags & FTRACE_FL_DISABLED &&
1694		!(rec->flags & FTRACE_FL_ENABLED);
1695}
1696
1697static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1698				     int filter_hash,
1699				     bool inc)
1700{
1701	struct ftrace_hash *hash;
1702	struct ftrace_hash *other_hash;
1703	struct ftrace_page *pg;
1704	struct dyn_ftrace *rec;
1705	bool update = false;
1706	int count = 0;
1707	int all = false;
1708
1709	/* Only update if the ops has been registered */
1710	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1711		return false;
1712
1713	/*
1714	 * In the filter_hash case:
1715	 *   If the count is zero, we update all records.
1716	 *   Otherwise we just update the items in the hash.
1717	 *
1718	 * In the notrace_hash case:
1719	 *   We enable the update in the hash.
1720	 *   As disabling notrace means enabling the tracing,
1721	 *   and enabling notrace means disabling, the inc variable
1722	 *   gets inversed.
1723	 */
1724	if (filter_hash) {
1725		hash = ops->func_hash->filter_hash;
1726		other_hash = ops->func_hash->notrace_hash;
1727		if (ftrace_hash_empty(hash))
1728			all = true;
1729	} else {
1730		inc = !inc;
1731		hash = ops->func_hash->notrace_hash;
1732		other_hash = ops->func_hash->filter_hash;
1733		/*
1734		 * If the notrace hash has no items,
1735		 * then there's nothing to do.
1736		 */
1737		if (ftrace_hash_empty(hash))
1738			return false;
1739	}
1740
1741	do_for_each_ftrace_rec(pg, rec) {
1742		int in_other_hash = 0;
1743		int in_hash = 0;
1744		int match = 0;
1745
1746		if (skip_record(rec))
1747			continue;
1748
1749		if (all) {
1750			/*
1751			 * Only the filter_hash affects all records.
1752			 * Update if the record is not in the notrace hash.
1753			 */
1754			if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1755				match = 1;
1756		} else {
1757			in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1758			in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1759
1760			/*
1761			 * If filter_hash is set, we want to match all functions
1762			 * that are in the hash but not in the other hash.
1763			 *
1764			 * If filter_hash is not set, then we are decrementing.
1765			 * That means we match anything that is in the hash
1766			 * and also in the other_hash. That is, we need to turn
1767			 * off functions in the other hash because they are disabled
1768			 * by this hash.
1769			 */
1770			if (filter_hash && in_hash && !in_other_hash)
1771				match = 1;
1772			else if (!filter_hash && in_hash &&
1773				 (in_other_hash || ftrace_hash_empty(other_hash)))
1774				match = 1;
1775		}
1776		if (!match)
1777			continue;
1778
1779		if (inc) {
1780			rec->flags++;
1781			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1782				return false;
1783
1784			if (ops->flags & FTRACE_OPS_FL_DIRECT)
1785				rec->flags |= FTRACE_FL_DIRECT;
1786
1787			/*
1788			 * If there's only a single callback registered to a
1789			 * function, and the ops has a trampoline registered
1790			 * for it, then we can call it directly.
1791			 */
1792			if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1793				rec->flags |= FTRACE_FL_TRAMP;
1794			else
1795				/*
1796				 * If we are adding another function callback
1797				 * to this function, and the previous had a
1798				 * custom trampoline in use, then we need to go
1799				 * back to the default trampoline.
1800				 */
1801				rec->flags &= ~FTRACE_FL_TRAMP;
1802
1803			/*
1804			 * If any ops wants regs saved for this function
1805			 * then all ops will get saved regs.
1806			 */
1807			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1808				rec->flags |= FTRACE_FL_REGS;
1809		} else {
1810			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1811				return false;
1812			rec->flags--;
1813
1814			/*
1815			 * Only the internal direct_ops should have the
1816			 * DIRECT flag set. Thus, if it is removing a
1817			 * function, then that function should no longer
1818			 * be direct.
1819			 */
1820			if (ops->flags & FTRACE_OPS_FL_DIRECT)
1821				rec->flags &= ~FTRACE_FL_DIRECT;
1822
1823			/*
1824			 * If the rec had REGS enabled and the ops that is
1825			 * being removed had REGS set, then see if there is
1826			 * still any ops for this record that wants regs.
1827			 * If not, we can stop recording them.
1828			 */
1829			if (ftrace_rec_count(rec) > 0 &&
1830			    rec->flags & FTRACE_FL_REGS &&
1831			    ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1832				if (!test_rec_ops_needs_regs(rec))
1833					rec->flags &= ~FTRACE_FL_REGS;
1834			}
1835
1836			/*
1837			 * The TRAMP needs to be set only if rec count
1838			 * is decremented to one, and the ops that is
1839			 * left has a trampoline. As TRAMP can only be
1840			 * enabled if there is only a single ops attached
1841			 * to it.
1842			 */
1843			if (ftrace_rec_count(rec) == 1 &&
1844			    ftrace_find_tramp_ops_any_other(rec, ops))
1845				rec->flags |= FTRACE_FL_TRAMP;
1846			else
1847				rec->flags &= ~FTRACE_FL_TRAMP;
1848
1849			/*
1850			 * flags will be cleared in ftrace_check_record()
1851			 * if rec count is zero.
1852			 */
1853		}
1854
1855		/*
1856		 * If the rec has a single associated ops, and ops->func can be
1857		 * called directly, allow the call site to call via the ops.
1858		 */
1859		if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) &&
1860		    ftrace_rec_count(rec) == 1 &&
1861		    ftrace_ops_get_func(ops) == ops->func)
1862			rec->flags |= FTRACE_FL_CALL_OPS;
1863		else
1864			rec->flags &= ~FTRACE_FL_CALL_OPS;
1865
1866		count++;
1867
1868		/* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1869		update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
1870
1871		/* Shortcut, if we handled all records, we are done. */
1872		if (!all && count == hash->count)
1873			return update;
1874	} while_for_each_ftrace_rec();
1875
1876	return update;
1877}
1878
1879static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1880				    int filter_hash)
1881{
1882	return __ftrace_hash_rec_update(ops, filter_hash, 0);
1883}
1884
1885static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1886				   int filter_hash)
1887{
1888	return __ftrace_hash_rec_update(ops, filter_hash, 1);
1889}
1890
1891static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1892					  int filter_hash, int inc)
1893{
1894	struct ftrace_ops *op;
1895
1896	__ftrace_hash_rec_update(ops, filter_hash, inc);
1897
1898	if (ops->func_hash != &global_ops.local_hash)
1899		return;
1900
1901	/*
1902	 * If the ops shares the global_ops hash, then we need to update
1903	 * all ops that are enabled and use this hash.
1904	 */
1905	do_for_each_ftrace_op(op, ftrace_ops_list) {
1906		/* Already done */
1907		if (op == ops)
1908			continue;
1909		if (op->func_hash == &global_ops.local_hash)
1910			__ftrace_hash_rec_update(op, filter_hash, inc);
1911	} while_for_each_ftrace_op(op);
1912}
1913
1914static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1915					   int filter_hash)
1916{
1917	ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1918}
1919
1920static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1921					  int filter_hash)
1922{
1923	ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1924}
1925
1926/*
1927 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1928 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1929 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1930 * Note that old_hash and new_hash has below meanings
1931 *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1932 *  - If the hash is EMPTY_HASH, it hits nothing
1933 *  - Anything else hits the recs which match the hash entries.
1934 *
1935 * DIRECT ops does not have IPMODIFY flag, but we still need to check it
1936 * against functions with FTRACE_FL_IPMODIFY. If there is any overlap, call
1937 * ops_func(SHARE_IPMODIFY_SELF) to make sure current ops can share with
1938 * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate
1939 * the return value to the caller and eventually to the owner of the DIRECT
1940 * ops.
1941 */
1942static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1943					 struct ftrace_hash *old_hash,
1944					 struct ftrace_hash *new_hash)
1945{
1946	struct ftrace_page *pg;
1947	struct dyn_ftrace *rec, *end = NULL;
1948	int in_old, in_new;
1949	bool is_ipmodify, is_direct;
1950
1951	/* Only update if the ops has been registered */
1952	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1953		return 0;
1954
1955	is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY;
1956	is_direct = ops->flags & FTRACE_OPS_FL_DIRECT;
1957
1958	/* neither IPMODIFY nor DIRECT, skip */
1959	if (!is_ipmodify && !is_direct)
1960		return 0;
1961
1962	if (WARN_ON_ONCE(is_ipmodify && is_direct))
1963		return 0;
1964
1965	/*
1966	 * Since the IPMODIFY and DIRECT are very address sensitive
1967	 * actions, we do not allow ftrace_ops to set all functions to new
1968	 * hash.
1969	 */
1970	if (!new_hash || !old_hash)
1971		return -EINVAL;
1972
1973	/* Update rec->flags */
1974	do_for_each_ftrace_rec(pg, rec) {
1975
1976		if (rec->flags & FTRACE_FL_DISABLED)
1977			continue;
1978
1979		/* We need to update only differences of filter_hash */
1980		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1981		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1982		if (in_old == in_new)
1983			continue;
1984
1985		if (in_new) {
1986			if (rec->flags & FTRACE_FL_IPMODIFY) {
1987				int ret;
1988
1989				/* Cannot have two ipmodify on same rec */
1990				if (is_ipmodify)
1991					goto rollback;
1992
1993				FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT);
1994
1995				/*
1996				 * Another ops with IPMODIFY is already
1997				 * attached. We are now attaching a direct
1998				 * ops. Run SHARE_IPMODIFY_SELF, to check
1999				 * whether sharing is supported.
2000				 */
2001				if (!ops->ops_func)
2002					return -EBUSY;
2003				ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF);
2004				if (ret)
2005					return ret;
2006			} else if (is_ipmodify) {
2007				rec->flags |= FTRACE_FL_IPMODIFY;
2008			}
2009		} else if (is_ipmodify) {
2010			rec->flags &= ~FTRACE_FL_IPMODIFY;
2011		}
2012	} while_for_each_ftrace_rec();
2013
2014	return 0;
2015
2016rollback:
2017	end = rec;
2018
2019	/* Roll back what we did above */
2020	do_for_each_ftrace_rec(pg, rec) {
2021
2022		if (rec->flags & FTRACE_FL_DISABLED)
2023			continue;
2024
2025		if (rec == end)
2026			goto err_out;
2027
2028		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
2029		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
2030		if (in_old == in_new)
2031			continue;
2032
2033		if (in_new)
2034			rec->flags &= ~FTRACE_FL_IPMODIFY;
2035		else
2036			rec->flags |= FTRACE_FL_IPMODIFY;
2037	} while_for_each_ftrace_rec();
2038
2039err_out:
2040	return -EBUSY;
2041}
2042
2043static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
2044{
2045	struct ftrace_hash *hash = ops->func_hash->filter_hash;
2046
2047	if (ftrace_hash_empty(hash))
2048		hash = NULL;
2049
2050	return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
2051}
2052
2053/* Disabling always succeeds */
2054static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
2055{
2056	struct ftrace_hash *hash = ops->func_hash->filter_hash;
2057
2058	if (ftrace_hash_empty(hash))
2059		hash = NULL;
2060
2061	__ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
2062}
2063
2064static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
2065				       struct ftrace_hash *new_hash)
2066{
2067	struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
2068
2069	if (ftrace_hash_empty(old_hash))
2070		old_hash = NULL;
2071
2072	if (ftrace_hash_empty(new_hash))
2073		new_hash = NULL;
2074
2075	return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
2076}
2077
2078static void print_ip_ins(const char *fmt, const unsigned char *p)
2079{
2080	char ins[MCOUNT_INSN_SIZE];
2081
2082	if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
2083		printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
2084		return;
2085	}
2086
2087	printk(KERN_CONT "%s", fmt);
2088	pr_cont("%*phC", MCOUNT_INSN_SIZE, ins);
2089}
2090
2091enum ftrace_bug_type ftrace_bug_type;
2092const void *ftrace_expected;
2093
2094static void print_bug_type(void)
2095{
2096	switch (ftrace_bug_type) {
2097	case FTRACE_BUG_UNKNOWN:
2098		break;
2099	case FTRACE_BUG_INIT:
2100		pr_info("Initializing ftrace call sites\n");
2101		break;
2102	case FTRACE_BUG_NOP:
2103		pr_info("Setting ftrace call site to NOP\n");
2104		break;
2105	case FTRACE_BUG_CALL:
2106		pr_info("Setting ftrace call site to call ftrace function\n");
2107		break;
2108	case FTRACE_BUG_UPDATE:
2109		pr_info("Updating ftrace call site to call a different ftrace function\n");
2110		break;
2111	}
2112}
2113
2114/**
2115 * ftrace_bug - report and shutdown function tracer
2116 * @failed: The failed type (EFAULT, EINVAL, EPERM)
2117 * @rec: The record that failed
2118 *
2119 * The arch code that enables or disables the function tracing
2120 * can call ftrace_bug() when it has detected a problem in
2121 * modifying the code. @failed should be one of either:
2122 * EFAULT - if the problem happens on reading the @ip address
2123 * EINVAL - if what is read at @ip is not what was expected
2124 * EPERM - if the problem happens on writing to the @ip address
2125 */
2126void ftrace_bug(int failed, struct dyn_ftrace *rec)
2127{
2128	unsigned long ip = rec ? rec->ip : 0;
2129
2130	pr_info("------------[ ftrace bug ]------------\n");
2131
2132	switch (failed) {
2133	case -EFAULT:
2134		pr_info("ftrace faulted on modifying ");
2135		print_ip_sym(KERN_INFO, ip);
2136		break;
2137	case -EINVAL:
2138		pr_info("ftrace failed to modify ");
2139		print_ip_sym(KERN_INFO, ip);
2140		print_ip_ins(" actual:   ", (unsigned char *)ip);
2141		pr_cont("\n");
2142		if (ftrace_expected) {
2143			print_ip_ins(" expected: ", ftrace_expected);
2144			pr_cont("\n");
2145		}
2146		break;
2147	case -EPERM:
2148		pr_info("ftrace faulted on writing ");
2149		print_ip_sym(KERN_INFO, ip);
2150		break;
2151	default:
2152		pr_info("ftrace faulted on unknown error ");
2153		print_ip_sym(KERN_INFO, ip);
2154	}
2155	print_bug_type();
2156	if (rec) {
2157		struct ftrace_ops *ops = NULL;
2158
2159		pr_info("ftrace record flags: %lx\n", rec->flags);
2160		pr_cont(" (%ld)%s%s", ftrace_rec_count(rec),
2161			rec->flags & FTRACE_FL_REGS ? " R" : "  ",
2162			rec->flags & FTRACE_FL_CALL_OPS ? " O" : "  ");
2163		if (rec->flags & FTRACE_FL_TRAMP_EN) {
2164			ops = ftrace_find_tramp_ops_any(rec);
2165			if (ops) {
2166				do {
2167					pr_cont("\ttramp: %pS (%pS)",
2168						(void *)ops->trampoline,
2169						(void *)ops->func);
2170					ops = ftrace_find_tramp_ops_next(rec, ops);
2171				} while (ops);
2172			} else
2173				pr_cont("\ttramp: ERROR!");
2174
2175		}
2176		ip = ftrace_get_addr_curr(rec);
2177		pr_cont("\n expected tramp: %lx\n", ip);
2178	}
2179
2180	FTRACE_WARN_ON_ONCE(1);
2181}
2182
2183static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
2184{
2185	unsigned long flag = 0UL;
2186
2187	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2188
2189	if (skip_record(rec))
2190		return FTRACE_UPDATE_IGNORE;
2191
2192	/*
2193	 * If we are updating calls:
2194	 *
2195	 *   If the record has a ref count, then we need to enable it
2196	 *   because someone is using it.
2197	 *
2198	 *   Otherwise we make sure its disabled.
2199	 *
2200	 * If we are disabling calls, then disable all records that
2201	 * are enabled.
2202	 */
2203	if (enable && ftrace_rec_count(rec))
2204		flag = FTRACE_FL_ENABLED;
2205
2206	/*
2207	 * If enabling and the REGS flag does not match the REGS_EN, or
2208	 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2209	 * this record. Set flags to fail the compare against ENABLED.
2210	 * Same for direct calls.
2211	 */
2212	if (flag) {
2213		if (!(rec->flags & FTRACE_FL_REGS) !=
2214		    !(rec->flags & FTRACE_FL_REGS_EN))
2215			flag |= FTRACE_FL_REGS;
2216
2217		if (!(rec->flags & FTRACE_FL_TRAMP) !=
2218		    !(rec->flags & FTRACE_FL_TRAMP_EN))
2219			flag |= FTRACE_FL_TRAMP;
2220
2221		/*
2222		 * Direct calls are special, as count matters.
2223		 * We must test the record for direct, if the
2224		 * DIRECT and DIRECT_EN do not match, but only
2225		 * if the count is 1. That's because, if the
2226		 * count is something other than one, we do not
2227		 * want the direct enabled (it will be done via the
2228		 * direct helper). But if DIRECT_EN is set, and
2229		 * the count is not one, we need to clear it.
2230		 *
2231		 */
2232		if (ftrace_rec_count(rec) == 1) {
2233			if (!(rec->flags & FTRACE_FL_DIRECT) !=
2234			    !(rec->flags & FTRACE_FL_DIRECT_EN))
2235				flag |= FTRACE_FL_DIRECT;
2236		} else if (rec->flags & FTRACE_FL_DIRECT_EN) {
2237			flag |= FTRACE_FL_DIRECT;
2238		}
2239
2240		/*
2241		 * Ops calls are special, as count matters.
2242		 * As with direct calls, they must only be enabled when count
2243		 * is one, otherwise they'll be handled via the list ops.
2244		 */
2245		if (ftrace_rec_count(rec) == 1) {
2246			if (!(rec->flags & FTRACE_FL_CALL_OPS) !=
2247			    !(rec->flags & FTRACE_FL_CALL_OPS_EN))
2248				flag |= FTRACE_FL_CALL_OPS;
2249		} else if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
2250			flag |= FTRACE_FL_CALL_OPS;
2251		}
2252	}
2253
2254	/* If the state of this record hasn't changed, then do nothing */
2255	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2256		return FTRACE_UPDATE_IGNORE;
2257
2258	if (flag) {
2259		/* Save off if rec is being enabled (for return value) */
2260		flag ^= rec->flags & FTRACE_FL_ENABLED;
2261
2262		if (update) {
2263			rec->flags |= FTRACE_FL_ENABLED | FTRACE_FL_TOUCHED;
2264			if (flag & FTRACE_FL_REGS) {
2265				if (rec->flags & FTRACE_FL_REGS)
2266					rec->flags |= FTRACE_FL_REGS_EN;
2267				else
2268					rec->flags &= ~FTRACE_FL_REGS_EN;
2269			}
2270			if (flag & FTRACE_FL_TRAMP) {
2271				if (rec->flags & FTRACE_FL_TRAMP)
2272					rec->flags |= FTRACE_FL_TRAMP_EN;
2273				else
2274					rec->flags &= ~FTRACE_FL_TRAMP_EN;
2275			}
2276
2277			/* Keep track of anything that modifies the function */
2278			if (rec->flags & (FTRACE_FL_DIRECT | FTRACE_FL_IPMODIFY))
2279				rec->flags |= FTRACE_FL_MODIFIED;
2280
2281			if (flag & FTRACE_FL_DIRECT) {
2282				/*
2283				 * If there's only one user (direct_ops helper)
2284				 * then we can call the direct function
2285				 * directly (no ftrace trampoline).
2286				 */
2287				if (ftrace_rec_count(rec) == 1) {
2288					if (rec->flags & FTRACE_FL_DIRECT)
2289						rec->flags |= FTRACE_FL_DIRECT_EN;
2290					else
2291						rec->flags &= ~FTRACE_FL_DIRECT_EN;
2292				} else {
2293					/*
2294					 * Can only call directly if there's
2295					 * only one callback to the function.
2296					 */
2297					rec->flags &= ~FTRACE_FL_DIRECT_EN;
2298				}
2299			}
2300
2301			if (flag & FTRACE_FL_CALL_OPS) {
2302				if (ftrace_rec_count(rec) == 1) {
2303					if (rec->flags & FTRACE_FL_CALL_OPS)
2304						rec->flags |= FTRACE_FL_CALL_OPS_EN;
2305					else
2306						rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
2307				} else {
2308					/*
2309					 * Can only call directly if there's
2310					 * only one set of associated ops.
2311					 */
2312					rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
2313				}
2314			}
2315		}
2316
2317		/*
2318		 * If this record is being updated from a nop, then
2319		 *   return UPDATE_MAKE_CALL.
2320		 * Otherwise,
2321		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
2322		 *   from the save regs, to a non-save regs function or
2323		 *   vice versa, or from a trampoline call.
2324		 */
2325		if (flag & FTRACE_FL_ENABLED) {
2326			ftrace_bug_type = FTRACE_BUG_CALL;
2327			return FTRACE_UPDATE_MAKE_CALL;
2328		}
2329
2330		ftrace_bug_type = FTRACE_BUG_UPDATE;
2331		return FTRACE_UPDATE_MODIFY_CALL;
2332	}
2333
2334	if (update) {
2335		/* If there's no more users, clear all flags */
2336		if (!ftrace_rec_count(rec))
2337			rec->flags &= FTRACE_NOCLEAR_FLAGS;
2338		else
2339			/*
2340			 * Just disable the record, but keep the ops TRAMP
2341			 * and REGS states. The _EN flags must be disabled though.
2342			 */
2343			rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2344					FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN |
2345					FTRACE_FL_CALL_OPS_EN);
2346	}
2347
2348	ftrace_bug_type = FTRACE_BUG_NOP;
2349	return FTRACE_UPDATE_MAKE_NOP;
2350}
2351
2352/**
2353 * ftrace_update_record - set a record that now is tracing or not
2354 * @rec: the record to update
2355 * @enable: set to true if the record is tracing, false to force disable
2356 *
2357 * The records that represent all functions that can be traced need
2358 * to be updated when tracing has been enabled.
2359 */
2360int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
2361{
2362	return ftrace_check_record(rec, enable, true);
2363}
2364
2365/**
2366 * ftrace_test_record - check if the record has been enabled or not
2367 * @rec: the record to test
2368 * @enable: set to true to check if enabled, false if it is disabled
2369 *
2370 * The arch code may need to test if a record is already set to
2371 * tracing to determine how to modify the function code that it
2372 * represents.
2373 */
2374int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
2375{
2376	return ftrace_check_record(rec, enable, false);
2377}
2378
2379static struct ftrace_ops *
2380ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2381{
2382	struct ftrace_ops *op;
2383	unsigned long ip = rec->ip;
2384
2385	do_for_each_ftrace_op(op, ftrace_ops_list) {
2386
2387		if (!op->trampoline)
2388			continue;
2389
2390		if (hash_contains_ip(ip, op->func_hash))
2391			return op;
2392	} while_for_each_ftrace_op(op);
2393
2394	return NULL;
2395}
2396
2397static struct ftrace_ops *
2398ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
2399{
2400	struct ftrace_ops *op;
2401	unsigned long ip = rec->ip;
2402
2403	do_for_each_ftrace_op(op, ftrace_ops_list) {
2404
2405		if (op == op_exclude || !op->trampoline)
2406			continue;
2407
2408		if (hash_contains_ip(ip, op->func_hash))
2409			return op;
2410	} while_for_each_ftrace_op(op);
2411
2412	return NULL;
2413}
2414
2415static struct ftrace_ops *
2416ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2417			   struct ftrace_ops *op)
2418{
2419	unsigned long ip = rec->ip;
2420
2421	while_for_each_ftrace_op(op) {
2422
2423		if (!op->trampoline)
2424			continue;
2425
2426		if (hash_contains_ip(ip, op->func_hash))
2427			return op;
2428	}
2429
2430	return NULL;
2431}
2432
2433static struct ftrace_ops *
2434ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2435{
2436	struct ftrace_ops *op;
2437	unsigned long ip = rec->ip;
2438
2439	/*
2440	 * Need to check removed ops first.
2441	 * If they are being removed, and this rec has a tramp,
2442	 * and this rec is in the ops list, then it would be the
2443	 * one with the tramp.
2444	 */
2445	if (removed_ops) {
2446		if (hash_contains_ip(ip, &removed_ops->old_hash))
2447			return removed_ops;
2448	}
2449
2450	/*
2451	 * Need to find the current trampoline for a rec.
2452	 * Now, a trampoline is only attached to a rec if there
2453	 * was a single 'ops' attached to it. But this can be called
2454	 * when we are adding another op to the rec or removing the
2455	 * current one. Thus, if the op is being added, we can
2456	 * ignore it because it hasn't attached itself to the rec
2457	 * yet.
2458	 *
2459	 * If an ops is being modified (hooking to different functions)
2460	 * then we don't care about the new functions that are being
2461	 * added, just the old ones (that are probably being removed).
2462	 *
2463	 * If we are adding an ops to a function that already is using
2464	 * a trampoline, it needs to be removed (trampolines are only
2465	 * for single ops connected), then an ops that is not being
2466	 * modified also needs to be checked.
2467	 */
2468	do_for_each_ftrace_op(op, ftrace_ops_list) {
2469
2470		if (!op->trampoline)
2471			continue;
2472
2473		/*
2474		 * If the ops is being added, it hasn't gotten to
2475		 * the point to be removed from this tree yet.
2476		 */
2477		if (op->flags & FTRACE_OPS_FL_ADDING)
2478			continue;
2479
2480
2481		/*
2482		 * If the ops is being modified and is in the old
2483		 * hash, then it is probably being removed from this
2484		 * function.
2485		 */
2486		if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2487		    hash_contains_ip(ip, &op->old_hash))
2488			return op;
2489		/*
2490		 * If the ops is not being added or modified, and it's
2491		 * in its normal filter hash, then this must be the one
2492		 * we want!
2493		 */
2494		if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2495		    hash_contains_ip(ip, op->func_hash))
2496			return op;
2497
2498	} while_for_each_ftrace_op(op);
2499
2500	return NULL;
2501}
2502
2503static struct ftrace_ops *
2504ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2505{
2506	struct ftrace_ops *op;
2507	unsigned long ip = rec->ip;
2508
2509	do_for_each_ftrace_op(op, ftrace_ops_list) {
2510		/* pass rec in as regs to have non-NULL val */
2511		if (hash_contains_ip(ip, op->func_hash))
2512			return op;
2513	} while_for_each_ftrace_op(op);
2514
2515	return NULL;
2516}
2517
2518struct ftrace_ops *
2519ftrace_find_unique_ops(struct dyn_ftrace *rec)
2520{
2521	struct ftrace_ops *op, *found = NULL;
2522	unsigned long ip = rec->ip;
2523
2524	do_for_each_ftrace_op(op, ftrace_ops_list) {
2525
2526		if (hash_contains_ip(ip, op->func_hash)) {
2527			if (found)
2528				return NULL;
2529			found = op;
2530		}
2531
2532	} while_for_each_ftrace_op(op);
2533
2534	return found;
2535}
2536
2537#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
2538/* Protected by rcu_tasks for reading, and direct_mutex for writing */
2539static struct ftrace_hash __rcu *direct_functions = EMPTY_HASH;
2540static DEFINE_MUTEX(direct_mutex);
2541
2542/*
2543 * Search the direct_functions hash to see if the given instruction pointer
2544 * has a direct caller attached to it.
2545 */
2546unsigned long ftrace_find_rec_direct(unsigned long ip)
2547{
2548	struct ftrace_func_entry *entry;
2549
2550	entry = __ftrace_lookup_ip(direct_functions, ip);
2551	if (!entry)
2552		return 0;
2553
2554	return entry->direct;
2555}
2556
2557static void call_direct_funcs(unsigned long ip, unsigned long pip,
2558			      struct ftrace_ops *ops, struct ftrace_regs *fregs)
2559{
2560	unsigned long addr = READ_ONCE(ops->direct_call);
2561
2562	if (!addr)
2563		return;
2564
2565	arch_ftrace_set_direct_caller(fregs, addr);
2566}
2567#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
2568
2569/**
2570 * ftrace_get_addr_new - Get the call address to set to
2571 * @rec:  The ftrace record descriptor
2572 *
2573 * If the record has the FTRACE_FL_REGS set, that means that it
2574 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2575 * is not set, then it wants to convert to the normal callback.
2576 *
2577 * Returns: the address of the trampoline to set to
2578 */
2579unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2580{
2581	struct ftrace_ops *ops;
2582	unsigned long addr;
2583
2584	if ((rec->flags & FTRACE_FL_DIRECT) &&
2585	    (ftrace_rec_count(rec) == 1)) {
2586		addr = ftrace_find_rec_direct(rec->ip);
2587		if (addr)
2588			return addr;
2589		WARN_ON_ONCE(1);
2590	}
2591
2592	/* Trampolines take precedence over regs */
2593	if (rec->flags & FTRACE_FL_TRAMP) {
2594		ops = ftrace_find_tramp_ops_new(rec);
2595		if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2596			pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2597				(void *)rec->ip, (void *)rec->ip, rec->flags);
2598			/* Ftrace is shutting down, return anything */
2599			return (unsigned long)FTRACE_ADDR;
2600		}
2601		return ops->trampoline;
2602	}
2603
2604	if (rec->flags & FTRACE_FL_REGS)
2605		return (unsigned long)FTRACE_REGS_ADDR;
2606	else
2607		return (unsigned long)FTRACE_ADDR;
2608}
2609
2610/**
2611 * ftrace_get_addr_curr - Get the call address that is already there
2612 * @rec:  The ftrace record descriptor
2613 *
2614 * The FTRACE_FL_REGS_EN is set when the record already points to
2615 * a function that saves all the regs. Basically the '_EN' version
2616 * represents the current state of the function.
2617 *
2618 * Returns: the address of the trampoline that is currently being called
2619 */
2620unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2621{
2622	struct ftrace_ops *ops;
2623	unsigned long addr;
2624
2625	/* Direct calls take precedence over trampolines */
2626	if (rec->flags & FTRACE_FL_DIRECT_EN) {
2627		addr = ftrace_find_rec_direct(rec->ip);
2628		if (addr)
2629			return addr;
2630		WARN_ON_ONCE(1);
2631	}
2632
2633	/* Trampolines take precedence over regs */
2634	if (rec->flags & FTRACE_FL_TRAMP_EN) {
2635		ops = ftrace_find_tramp_ops_curr(rec);
2636		if (FTRACE_WARN_ON(!ops)) {
2637			pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2638				(void *)rec->ip, (void *)rec->ip);
2639			/* Ftrace is shutting down, return anything */
2640			return (unsigned long)FTRACE_ADDR;
2641		}
2642		return ops->trampoline;
2643	}
2644
2645	if (rec->flags & FTRACE_FL_REGS_EN)
2646		return (unsigned long)FTRACE_REGS_ADDR;
2647	else
2648		return (unsigned long)FTRACE_ADDR;
2649}
2650
2651static int
2652__ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
2653{
2654	unsigned long ftrace_old_addr;
2655	unsigned long ftrace_addr;
2656	int ret;
2657
2658	ftrace_addr = ftrace_get_addr_new(rec);
2659
2660	/* This needs to be done before we call ftrace_update_record */
2661	ftrace_old_addr = ftrace_get_addr_curr(rec);
2662
2663	ret = ftrace_update_record(rec, enable);
2664
2665	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2666
2667	switch (ret) {
2668	case FTRACE_UPDATE_IGNORE:
2669		return 0;
2670
2671	case FTRACE_UPDATE_MAKE_CALL:
2672		ftrace_bug_type = FTRACE_BUG_CALL;
2673		return ftrace_make_call(rec, ftrace_addr);
2674
2675	case FTRACE_UPDATE_MAKE_NOP:
2676		ftrace_bug_type = FTRACE_BUG_NOP;
2677		return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2678
2679	case FTRACE_UPDATE_MODIFY_CALL:
2680		ftrace_bug_type = FTRACE_BUG_UPDATE;
2681		return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2682	}
2683
2684	return -1; /* unknown ftrace bug */
2685}
2686
2687void __weak ftrace_replace_code(int mod_flags)
2688{
2689	struct dyn_ftrace *rec;
2690	struct ftrace_page *pg;
2691	bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2692	int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
2693	int failed;
2694
2695	if (unlikely(ftrace_disabled))
2696		return;
2697
2698	do_for_each_ftrace_rec(pg, rec) {
2699
2700		if (skip_record(rec))
2701			continue;
2702
2703		failed = __ftrace_replace_code(rec, enable);
2704		if (failed) {
2705			ftrace_bug(failed, rec);
2706			/* Stop processing */
2707			return;
2708		}
2709		if (schedulable)
2710			cond_resched();
2711	} while_for_each_ftrace_rec();
2712}
2713
2714struct ftrace_rec_iter {
2715	struct ftrace_page	*pg;
2716	int			index;
2717};
2718
2719/**
2720 * ftrace_rec_iter_start - start up iterating over traced functions
2721 *
2722 * Returns: an iterator handle that is used to iterate over all
2723 * the records that represent address locations where functions
2724 * are traced.
2725 *
2726 * May return NULL if no records are available.
2727 */
2728struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2729{
2730	/*
2731	 * We only use a single iterator.
2732	 * Protected by the ftrace_lock mutex.
2733	 */
2734	static struct ftrace_rec_iter ftrace_rec_iter;
2735	struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2736
2737	iter->pg = ftrace_pages_start;
2738	iter->index = 0;
2739
2740	/* Could have empty pages */
2741	while (iter->pg && !iter->pg->index)
2742		iter->pg = iter->pg->next;
2743
2744	if (!iter->pg)
2745		return NULL;
2746
2747	return iter;
2748}
2749
2750/**
2751 * ftrace_rec_iter_next - get the next record to process.
2752 * @iter: The handle to the iterator.
2753 *
2754 * Returns: the next iterator after the given iterator @iter.
2755 */
2756struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2757{
2758	iter->index++;
2759
2760	if (iter->index >= iter->pg->index) {
2761		iter->pg = iter->pg->next;
2762		iter->index = 0;
2763
2764		/* Could have empty pages */
2765		while (iter->pg && !iter->pg->index)
2766			iter->pg = iter->pg->next;
2767	}
2768
2769	if (!iter->pg)
2770		return NULL;
2771
2772	return iter;
2773}
2774
2775/**
2776 * ftrace_rec_iter_record - get the record at the iterator location
2777 * @iter: The current iterator location
2778 *
2779 * Returns: the record that the current @iter is at.
2780 */
2781struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2782{
2783	return &iter->pg->records[iter->index];
2784}
2785
2786static int
2787ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
2788{
2789	int ret;
2790
2791	if (unlikely(ftrace_disabled))
2792		return 0;
2793
2794	ret = ftrace_init_nop(mod, rec);
2795	if (ret) {
2796		ftrace_bug_type = FTRACE_BUG_INIT;
2797		ftrace_bug(ret, rec);
2798		return 0;
2799	}
2800	return 1;
2801}
2802
2803/*
2804 * archs can override this function if they must do something
2805 * before the modifying code is performed.
2806 */
2807void __weak ftrace_arch_code_modify_prepare(void)
2808{
2809}
2810
2811/*
2812 * archs can override this function if they must do something
2813 * after the modifying code is performed.
2814 */
2815void __weak ftrace_arch_code_modify_post_process(void)
2816{
2817}
2818
2819static int update_ftrace_func(ftrace_func_t func)
2820{
2821	static ftrace_func_t save_func;
2822
2823	/* Avoid updating if it hasn't changed */
2824	if (func == save_func)
2825		return 0;
2826
2827	save_func = func;
2828
2829	return ftrace_update_ftrace_func(func);
2830}
2831
2832void ftrace_modify_all_code(int command)
2833{
2834	int update = command & FTRACE_UPDATE_TRACE_FUNC;
2835	int mod_flags = 0;
2836	int err = 0;
2837
2838	if (command & FTRACE_MAY_SLEEP)
2839		mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2840
2841	/*
2842	 * If the ftrace_caller calls a ftrace_ops func directly,
2843	 * we need to make sure that it only traces functions it
2844	 * expects to trace. When doing the switch of functions,
2845	 * we need to update to the ftrace_ops_list_func first
2846	 * before the transition between old and new calls are set,
2847	 * as the ftrace_ops_list_func will check the ops hashes
2848	 * to make sure the ops are having the right functions
2849	 * traced.
2850	 */
2851	if (update) {
2852		err = update_ftrace_func(ftrace_ops_list_func);
2853		if (FTRACE_WARN_ON(err))
2854			return;
2855	}
2856
2857	if (command & FTRACE_UPDATE_CALLS)
2858		ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
2859	else if (command & FTRACE_DISABLE_CALLS)
2860		ftrace_replace_code(mod_flags);
2861
2862	if (update && ftrace_trace_function != ftrace_ops_list_func) {
2863		function_trace_op = set_function_trace_op;
2864		smp_wmb();
2865		/* If irqs are disabled, we are in stop machine */
2866		if (!irqs_disabled())
2867			smp_call_function(ftrace_sync_ipi, NULL, 1);
2868		err = update_ftrace_func(ftrace_trace_function);
2869		if (FTRACE_WARN_ON(err))
2870			return;
2871	}
2872
2873	if (command & FTRACE_START_FUNC_RET)
2874		err = ftrace_enable_ftrace_graph_caller();
2875	else if (command & FTRACE_STOP_FUNC_RET)
2876		err = ftrace_disable_ftrace_graph_caller();
2877	FTRACE_WARN_ON(err);
2878}
2879
2880static int __ftrace_modify_code(void *data)
2881{
2882	int *command = data;
2883
2884	ftrace_modify_all_code(*command);
2885
2886	return 0;
2887}
2888
2889/**
2890 * ftrace_run_stop_machine - go back to the stop machine method
2891 * @command: The command to tell ftrace what to do
2892 *
2893 * If an arch needs to fall back to the stop machine method, the
2894 * it can call this function.
2895 */
2896void ftrace_run_stop_machine(int command)
2897{
2898	stop_machine(__ftrace_modify_code, &command, NULL);
2899}
2900
2901/**
2902 * arch_ftrace_update_code - modify the code to trace or not trace
2903 * @command: The command that needs to be done
2904 *
2905 * Archs can override this function if it does not need to
2906 * run stop_machine() to modify code.
2907 */
2908void __weak arch_ftrace_update_code(int command)
2909{
2910	ftrace_run_stop_machine(command);
2911}
2912
2913static void ftrace_run_update_code(int command)
2914{
2915	ftrace_arch_code_modify_prepare();
2916
2917	/*
2918	 * By default we use stop_machine() to modify the code.
2919	 * But archs can do what ever they want as long as it
2920	 * is safe. The stop_machine() is the safest, but also
2921	 * produces the most overhead.
2922	 */
2923	arch_ftrace_update_code(command);
2924
2925	ftrace_arch_code_modify_post_process();
2926}
2927
2928static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2929				   struct ftrace_ops_hash *old_hash)
2930{
2931	ops->flags |= FTRACE_OPS_FL_MODIFYING;
2932	ops->old_hash.filter_hash = old_hash->filter_hash;
2933	ops->old_hash.notrace_hash = old_hash->notrace_hash;
2934	ftrace_run_update_code(command);
2935	ops->old_hash.filter_hash = NULL;
2936	ops->old_hash.notrace_hash = NULL;
2937	ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2938}
2939
2940static ftrace_func_t saved_ftrace_func;
2941static int ftrace_start_up;
2942
2943void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2944{
2945}
2946
2947/* List of trace_ops that have allocated trampolines */
2948static LIST_HEAD(ftrace_ops_trampoline_list);
2949
2950static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
2951{
2952	lockdep_assert_held(&ftrace_lock);
2953	list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
2954}
2955
2956static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
2957{
2958	lockdep_assert_held(&ftrace_lock);
2959	list_del_rcu(&ops->list);
2960	synchronize_rcu();
2961}
2962
2963/*
2964 * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols
2965 * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is
2966 * not a module.
2967 */
2968#define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
2969#define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
2970
2971static void ftrace_trampoline_free(struct ftrace_ops *ops)
2972{
2973	if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
2974	    ops->trampoline) {
2975		/*
2976		 * Record the text poke event before the ksymbol unregister
2977		 * event.
2978		 */
2979		perf_event_text_poke((void *)ops->trampoline,
2980				     (void *)ops->trampoline,
2981				     ops->trampoline_size, NULL, 0);
2982		perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
2983				   ops->trampoline, ops->trampoline_size,
2984				   true, FTRACE_TRAMPOLINE_SYM);
2985		/* Remove from kallsyms after the perf events */
2986		ftrace_remove_trampoline_from_kallsyms(ops);
2987	}
2988
2989	arch_ftrace_trampoline_free(ops);
2990}
2991
2992static void ftrace_startup_enable(int command)
2993{
2994	if (saved_ftrace_func != ftrace_trace_function) {
2995		saved_ftrace_func = ftrace_trace_function;
2996		command |= FTRACE_UPDATE_TRACE_FUNC;
2997	}
2998
2999	if (!command || !ftrace_enabled)
3000		return;
3001
3002	ftrace_run_update_code(command);
3003}
3004
3005static void ftrace_startup_all(int command)
3006{
3007	update_all_ops = true;
3008	ftrace_startup_enable(command);
3009	update_all_ops = false;
3010}
3011
3012int ftrace_startup(struct ftrace_ops *ops, int command)
3013{
3014	int ret;
3015
3016	if (unlikely(ftrace_disabled))
3017		return -ENODEV;
3018
3019	ret = __register_ftrace_function(ops);
3020	if (ret)
3021		return ret;
3022
3023	ftrace_start_up++;
3024
3025	/*
3026	 * Note that ftrace probes uses this to start up
3027	 * and modify functions it will probe. But we still
3028	 * set the ADDING flag for modification, as probes
3029	 * do not have trampolines. If they add them in the
3030	 * future, then the probes will need to distinguish
3031	 * between adding and updating probes.
3032	 */
3033	ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
3034
3035	ret = ftrace_hash_ipmodify_enable(ops);
3036	if (ret < 0) {
3037		/* Rollback registration process */
3038		__unregister_ftrace_function(ops);
3039		ftrace_start_up--;
3040		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
3041		if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
3042			ftrace_trampoline_free(ops);
3043		return ret;
3044	}
3045
3046	if (ftrace_hash_rec_enable(ops, 1))
3047		command |= FTRACE_UPDATE_CALLS;
3048
3049	ftrace_startup_enable(command);
3050
3051	/*
3052	 * If ftrace is in an undefined state, we just remove ops from list
3053	 * to prevent the NULL pointer, instead of totally rolling it back and
3054	 * free trampoline, because those actions could cause further damage.
3055	 */
3056	if (unlikely(ftrace_disabled)) {
3057		__unregister_ftrace_function(ops);
3058		return -ENODEV;
3059	}
3060
3061	ops->flags &= ~FTRACE_OPS_FL_ADDING;
3062
3063	return 0;
3064}
3065
3066int ftrace_shutdown(struct ftrace_ops *ops, int command)
3067{
3068	int ret;
3069
3070	if (unlikely(ftrace_disabled))
3071		return -ENODEV;
3072
3073	ret = __unregister_ftrace_function(ops);
3074	if (ret)
3075		return ret;
3076
3077	ftrace_start_up--;
3078	/*
3079	 * Just warn in case of unbalance, no need to kill ftrace, it's not
3080	 * critical but the ftrace_call callers may be never nopped again after
3081	 * further ftrace uses.
3082	 */
3083	WARN_ON_ONCE(ftrace_start_up < 0);
3084
3085	/* Disabling ipmodify never fails */
3086	ftrace_hash_ipmodify_disable(ops);
3087
3088	if (ftrace_hash_rec_disable(ops, 1))
3089		command |= FTRACE_UPDATE_CALLS;
3090
3091	ops->flags &= ~FTRACE_OPS_FL_ENABLED;
3092
3093	if (saved_ftrace_func != ftrace_trace_function) {
3094		saved_ftrace_func = ftrace_trace_function;
3095		command |= FTRACE_UPDATE_TRACE_FUNC;
3096	}
3097
3098	if (!command || !ftrace_enabled)
3099		goto out;
3100
3101	/*
3102	 * If the ops uses a trampoline, then it needs to be
3103	 * tested first on update.
3104	 */
3105	ops->flags |= FTRACE_OPS_FL_REMOVING;
3106	removed_ops = ops;
3107
3108	/* The trampoline logic checks the old hashes */
3109	ops->old_hash.filter_hash = ops->func_hash->filter_hash;
3110	ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
3111
3112	ftrace_run_update_code(command);
3113
3114	/*
3115	 * If there's no more ops registered with ftrace, run a
3116	 * sanity check to make sure all rec flags are cleared.
3117	 */
3118	if (rcu_dereference_protected(ftrace_ops_list,
3119			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
3120		struct ftrace_page *pg;
3121		struct dyn_ftrace *rec;
3122
3123		do_for_each_ftrace_rec(pg, rec) {
3124			if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_NOCLEAR_FLAGS))
3125				pr_warn("  %pS flags:%lx\n",
3126					(void *)rec->ip, rec->flags);
3127		} while_for_each_ftrace_rec();
3128	}
3129
3130	ops->old_hash.filter_hash = NULL;
3131	ops->old_hash.notrace_hash = NULL;
3132
3133	removed_ops = NULL;
3134	ops->flags &= ~FTRACE_OPS_FL_REMOVING;
3135
3136out:
3137	/*
3138	 * Dynamic ops may be freed, we must make sure that all
3139	 * callers are done before leaving this function.
3140	 */
3141	if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
3142		/*
3143		 * We need to do a hard force of sched synchronization.
3144		 * This is because we use preempt_disable() to do RCU, but
3145		 * the function tracers can be called where RCU is not watching
3146		 * (like before user_exit()). We can not rely on the RCU
3147		 * infrastructure to do the synchronization, thus we must do it
3148		 * ourselves.
3149		 */
3150		synchronize_rcu_tasks_rude();
3151
3152		/*
3153		 * When the kernel is preemptive, tasks can be preempted
3154		 * while on a ftrace trampoline. Just scheduling a task on
3155		 * a CPU is not good enough to flush them. Calling
3156		 * synchronize_rcu_tasks() will wait for those tasks to
3157		 * execute and either schedule voluntarily or enter user space.
3158		 */
3159		synchronize_rcu_tasks();
3160
3161		ftrace_trampoline_free(ops);
3162	}
3163
3164	return 0;
3165}
3166
3167static u64		ftrace_update_time;
3168unsigned long		ftrace_update_tot_cnt;
3169unsigned long		ftrace_number_of_pages;
3170unsigned long		ftrace_number_of_groups;
3171
3172static inline int ops_traces_mod(struct ftrace_ops *ops)
3173{
3174	/*
3175	 * Filter_hash being empty will default to trace module.
3176	 * But notrace hash requires a test of individual module functions.
3177	 */
3178	return ftrace_hash_empty(ops->func_hash->filter_hash) &&
3179		ftrace_hash_empty(ops->func_hash->notrace_hash);
3180}
3181
3182static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3183{
3184	bool init_nop = ftrace_need_init_nop();
3185	struct ftrace_page *pg;
3186	struct dyn_ftrace *p;
3187	u64 start, stop;
3188	unsigned long update_cnt = 0;
3189	unsigned long rec_flags = 0;
3190	int i;
3191
3192	start = ftrace_now(raw_smp_processor_id());
3193
3194	/*
3195	 * When a module is loaded, this function is called to convert
3196	 * the calls to mcount in its text to nops, and also to create
3197	 * an entry in the ftrace data. Now, if ftrace is activated
3198	 * after this call, but before the module sets its text to
3199	 * read-only, the modification of enabling ftrace can fail if
3200	 * the read-only is done while ftrace is converting the calls.
3201	 * To prevent this, the module's records are set as disabled
3202	 * and will be enabled after the call to set the module's text
3203	 * to read-only.
3204	 */
3205	if (mod)
3206		rec_flags |= FTRACE_FL_DISABLED;
3207
3208	for (pg = new_pgs; pg; pg = pg->next) {
3209
3210		for (i = 0; i < pg->index; i++) {
3211
3212			/* If something went wrong, bail without enabling anything */
3213			if (unlikely(ftrace_disabled))
3214				return -1;
3215
3216			p = &pg->records[i];
3217			p->flags = rec_flags;
3218
3219			/*
3220			 * Do the initial record conversion from mcount jump
3221			 * to the NOP instructions.
3222			 */
3223			if (init_nop && !ftrace_nop_initialize(mod, p))
3224				break;
3225
3226			update_cnt++;
3227		}
3228	}
3229
3230	stop = ftrace_now(raw_smp_processor_id());
3231	ftrace_update_time = stop - start;
3232	ftrace_update_tot_cnt += update_cnt;
3233
3234	return 0;
3235}
3236
3237static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3238{
3239	int order;
3240	int pages;
3241	int cnt;
3242
3243	if (WARN_ON(!count))
3244		return -EINVAL;
3245
3246	/* We want to fill as much as possible, with no empty pages */
3247	pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
3248	order = fls(pages) - 1;
3249
3250 again:
3251	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3252
3253	if (!pg->records) {
3254		/* if we can't allocate this size, try something smaller */
3255		if (!order)
3256			return -ENOMEM;
3257		order--;
3258		goto again;
3259	}
3260
3261	ftrace_number_of_pages += 1 << order;
3262	ftrace_number_of_groups++;
3263
3264	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3265	pg->order = order;
3266
3267	if (cnt > count)
3268		cnt = count;
3269
3270	return cnt;
3271}
3272
3273static void ftrace_free_pages(struct ftrace_page *pages)
3274{
3275	struct ftrace_page *pg = pages;
3276
3277	while (pg) {
3278		if (pg->records) {
3279			free_pages((unsigned long)pg->records, pg->order);
3280			ftrace_number_of_pages -= 1 << pg->order;
3281		}
3282		pages = pg->next;
3283		kfree(pg);
3284		pg = pages;
3285		ftrace_number_of_groups--;
3286	}
3287}
3288
3289static struct ftrace_page *
3290ftrace_allocate_pages(unsigned long num_to_init)
3291{
3292	struct ftrace_page *start_pg;
3293	struct ftrace_page *pg;
3294	int cnt;
3295
3296	if (!num_to_init)
3297		return NULL;
3298
3299	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3300	if (!pg)
3301		return NULL;
3302
3303	/*
3304	 * Try to allocate as much as possible in one continues
3305	 * location that fills in all of the space. We want to
3306	 * waste as little space as possible.
3307	 */
3308	for (;;) {
3309		cnt = ftrace_allocate_records(pg, num_to_init);
3310		if (cnt < 0)
3311			goto free_pages;
3312
3313		num_to_init -= cnt;
3314		if (!num_to_init)
3315			break;
3316
3317		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3318		if (!pg->next)
3319			goto free_pages;
3320
3321		pg = pg->next;
3322	}
3323
3324	return start_pg;
3325
3326 free_pages:
3327	ftrace_free_pages(start_pg);
3328	pr_info("ftrace: FAILED to allocate memory for functions\n");
3329	return NULL;
3330}
3331
3332#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3333
3334struct ftrace_iterator {
3335	loff_t				pos;
3336	loff_t				func_pos;
3337	loff_t				mod_pos;
3338	struct ftrace_page		*pg;
3339	struct dyn_ftrace		*func;
3340	struct ftrace_func_probe	*probe;
3341	struct ftrace_func_entry	*probe_entry;
3342	struct trace_parser		parser;
3343	struct ftrace_hash		*hash;
3344	struct ftrace_ops		*ops;
3345	struct trace_array		*tr;
3346	struct list_head		*mod_list;
3347	int				pidx;
3348	int				idx;
3349	unsigned			flags;
3350};
3351
3352static void *
3353t_probe_next(struct seq_file *m, loff_t *pos)
3354{
3355	struct ftrace_iterator *iter = m->private;
3356	struct trace_array *tr = iter->ops->private;
3357	struct list_head *func_probes;
3358	struct ftrace_hash *hash;
3359	struct list_head *next;
3360	struct hlist_node *hnd = NULL;
3361	struct hlist_head *hhd;
3362	int size;
3363
3364	(*pos)++;
3365	iter->pos = *pos;
3366
3367	if (!tr)
3368		return NULL;
3369
3370	func_probes = &tr->func_probes;
3371	if (list_empty(func_probes))
3372		return NULL;
3373
3374	if (!iter->probe) {
3375		next = func_probes->next;
3376		iter->probe = list_entry(next, struct ftrace_func_probe, list);
3377	}
3378
3379	if (iter->probe_entry)
3380		hnd = &iter->probe_entry->hlist;
3381
3382	hash = iter->probe->ops.func_hash->filter_hash;
3383
3384	/*
3385	 * A probe being registered may temporarily have an empty hash
3386	 * and it's at the end of the func_probes list.
3387	 */
3388	if (!hash || hash == EMPTY_HASH)
3389		return NULL;
3390
3391	size = 1 << hash->size_bits;
3392
3393 retry:
3394	if (iter->pidx >= size) {
3395		if (iter->probe->list.next == func_probes)
3396			return NULL;
3397		next = iter->probe->list.next;
3398		iter->probe = list_entry(next, struct ftrace_func_probe, list);
3399		hash = iter->probe->ops.func_hash->filter_hash;
3400		size = 1 << hash->size_bits;
3401		iter->pidx = 0;
3402	}
3403
3404	hhd = &hash->buckets[iter->pidx];
3405
3406	if (hlist_empty(hhd)) {
3407		iter->pidx++;
3408		hnd = NULL;
3409		goto retry;
3410	}
3411
3412	if (!hnd)
3413		hnd = hhd->first;
3414	else {
3415		hnd = hnd->next;
3416		if (!hnd) {
3417			iter->pidx++;
3418			goto retry;
3419		}
3420	}
3421
3422	if (WARN_ON_ONCE(!hnd))
3423		return NULL;
3424
3425	iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3426
3427	return iter;
3428}
3429
3430static void *t_probe_start(struct seq_file *m, loff_t *pos)
3431{
3432	struct ftrace_iterator *iter = m->private;
3433	void *p = NULL;
3434	loff_t l;
3435
3436	if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3437		return NULL;
3438
3439	if (iter->mod_pos > *pos)
3440		return NULL;
3441
3442	iter->probe = NULL;
3443	iter->probe_entry = NULL;
3444	iter->pidx = 0;
3445	for (l = 0; l <= (*pos - iter->mod_pos); ) {
3446		p = t_probe_next(m, &l);
3447		if (!p)
3448			break;
3449	}
3450	if (!p)
3451		return NULL;
3452
3453	/* Only set this if we have an item */
3454	iter->flags |= FTRACE_ITER_PROBE;
3455
3456	return iter;
3457}
3458
3459static int
3460t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3461{
3462	struct ftrace_func_entry *probe_entry;
3463	struct ftrace_probe_ops *probe_ops;
3464	struct ftrace_func_probe *probe;
3465
3466	probe = iter->probe;
3467	probe_entry = iter->probe_entry;
3468
3469	if (WARN_ON_ONCE(!probe || !probe_entry))
3470		return -EIO;
3471
3472	probe_ops = probe->probe_ops;
3473
3474	if (probe_ops->print)
3475		return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
3476
3477	seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3478		   (void *)probe_ops->func);
3479
3480	return 0;
3481}
3482
3483static void *
3484t_mod_next(struct seq_file *m, loff_t *pos)
3485{
3486	struct ftrace_iterator *iter = m->private;
3487	struct trace_array *tr = iter->tr;
3488
3489	(*pos)++;
3490	iter->pos = *pos;
3491
3492	iter->mod_list = iter->mod_list->next;
3493
3494	if (iter->mod_list == &tr->mod_trace ||
3495	    iter->mod_list == &tr->mod_notrace) {
3496		iter->flags &= ~FTRACE_ITER_MOD;
3497		return NULL;
3498	}
3499
3500	iter->mod_pos = *pos;
3501
3502	return iter;
3503}
3504
3505static void *t_mod_start(struct seq_file *m, loff_t *pos)
3506{
3507	struct ftrace_iterator *iter = m->private;
3508	void *p = NULL;
3509	loff_t l;
3510
3511	if (iter->func_pos > *pos)
3512		return NULL;
3513
3514	iter->mod_pos = iter->func_pos;
3515
3516	/* probes are only available if tr is set */
3517	if (!iter->tr)
3518		return NULL;
3519
3520	for (l = 0; l <= (*pos - iter->func_pos); ) {
3521		p = t_mod_next(m, &l);
3522		if (!p)
3523			break;
3524	}
3525	if (!p) {
3526		iter->flags &= ~FTRACE_ITER_MOD;
3527		return t_probe_start(m, pos);
3528	}
3529
3530	/* Only set this if we have an item */
3531	iter->flags |= FTRACE_ITER_MOD;
3532
3533	return iter;
3534}
3535
3536static int
3537t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
3538{
3539	struct ftrace_mod_load *ftrace_mod;
3540	struct trace_array *tr = iter->tr;
3541
3542	if (WARN_ON_ONCE(!iter->mod_list) ||
3543			 iter->mod_list == &tr->mod_trace ||
3544			 iter->mod_list == &tr->mod_notrace)
3545		return -EIO;
3546
3547	ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
3548
3549	if (ftrace_mod->func)
3550		seq_printf(m, "%s", ftrace_mod->func);
3551	else
3552		seq_putc(m, '*');
3553
3554	seq_printf(m, ":mod:%s\n", ftrace_mod->module);
3555
3556	return 0;
3557}
3558
3559static void *
3560t_func_next(struct seq_file *m, loff_t *pos)
3561{
3562	struct ftrace_iterator *iter = m->private;
3563	struct dyn_ftrace *rec = NULL;
3564
3565	(*pos)++;
3566
3567 retry:
3568	if (iter->idx >= iter->pg->index) {
3569		if (iter->pg->next) {
3570			iter->pg = iter->pg->next;
3571			iter->idx = 0;
3572			goto retry;
3573		}
3574	} else {
3575		rec = &iter->pg->records[iter->idx++];
3576		if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3577		     !ftrace_lookup_ip(iter->hash, rec->ip)) ||
3578
3579		    ((iter->flags & FTRACE_ITER_ENABLED) &&
3580		     !(rec->flags & FTRACE_FL_ENABLED)) ||
3581
3582		    ((iter->flags & FTRACE_ITER_TOUCHED) &&
3583		     !(rec->flags & FTRACE_FL_TOUCHED))) {
3584
3585			rec = NULL;
3586			goto retry;
3587		}
3588	}
3589
3590	if (!rec)
3591		return NULL;
3592
3593	iter->pos = iter->func_pos = *pos;
3594	iter->func = rec;
3595
3596	return iter;
3597}
3598
3599static void *
3600t_next(struct seq_file *m, void *v, loff_t *pos)
3601{
3602	struct ftrace_iterator *iter = m->private;
3603	loff_t l = *pos; /* t_probe_start() must use original pos */
3604	void *ret;
3605
3606	if (unlikely(ftrace_disabled))
3607		return NULL;
3608
3609	if (iter->flags & FTRACE_ITER_PROBE)
3610		return t_probe_next(m, pos);
3611
3612	if (iter->flags & FTRACE_ITER_MOD)
3613		return t_mod_next(m, pos);
3614
3615	if (iter->flags & FTRACE_ITER_PRINTALL) {
3616		/* next must increment pos, and t_probe_start does not */
3617		(*pos)++;
3618		return t_mod_start(m, &l);
3619	}
3620
3621	ret = t_func_next(m, pos);
3622
3623	if (!ret)
3624		return t_mod_start(m, &l);
3625
3626	return ret;
3627}
3628
3629static void reset_iter_read(struct ftrace_iterator *iter)
3630{
3631	iter->pos = 0;
3632	iter->func_pos = 0;
3633	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
3634}
3635
3636static void *t_start(struct seq_file *m, loff_t *pos)
3637{
3638	struct ftrace_iterator *iter = m->private;
3639	void *p = NULL;
3640	loff_t l;
3641
3642	mutex_lock(&ftrace_lock);
3643
3644	if (unlikely(ftrace_disabled))
3645		return NULL;
3646
3647	/*
3648	 * If an lseek was done, then reset and start from beginning.
3649	 */
3650	if (*pos < iter->pos)
3651		reset_iter_read(iter);
3652
3653	/*
3654	 * For set_ftrace_filter reading, if we have the filter
3655	 * off, we can short cut and just print out that all
3656	 * functions are enabled.
3657	 */
3658	if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3659	    ftrace_hash_empty(iter->hash)) {
3660		iter->func_pos = 1; /* Account for the message */
3661		if (*pos > 0)
3662			return t_mod_start(m, pos);
3663		iter->flags |= FTRACE_ITER_PRINTALL;
3664		/* reset in case of seek/pread */
3665		iter->flags &= ~FTRACE_ITER_PROBE;
3666		return iter;
3667	}
3668
3669	if (iter->flags & FTRACE_ITER_MOD)
3670		return t_mod_start(m, pos);
3671
3672	/*
3673	 * Unfortunately, we need to restart at ftrace_pages_start
3674	 * every time we let go of the ftrace_mutex. This is because
3675	 * those pointers can change without the lock.
3676	 */
3677	iter->pg = ftrace_pages_start;
3678	iter->idx = 0;
3679	for (l = 0; l <= *pos; ) {
3680		p = t_func_next(m, &l);
3681		if (!p)
3682			break;
3683	}
3684
3685	if (!p)
3686		return t_mod_start(m, pos);
3687
3688	return iter;
3689}
3690
3691static void t_stop(struct seq_file *m, void *p)
3692{
3693	mutex_unlock(&ftrace_lock);
3694}
3695
3696void * __weak
3697arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3698{
3699	return NULL;
3700}
3701
3702static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3703				struct dyn_ftrace *rec)
3704{
3705	void *ptr;
3706
3707	ptr = arch_ftrace_trampoline_func(ops, rec);
3708	if (ptr)
3709		seq_printf(m, " ->%pS", ptr);
3710}
3711
3712#ifdef FTRACE_MCOUNT_MAX_OFFSET
3713/*
3714 * Weak functions can still have an mcount/fentry that is saved in
3715 * the __mcount_loc section. These can be detected by having a
3716 * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the
3717 * symbol found by kallsyms is not the function that the mcount/fentry
3718 * is part of. The offset is much greater in these cases.
3719 *
3720 * Test the record to make sure that the ip points to a valid kallsyms
3721 * and if not, mark it disabled.
3722 */
3723static int test_for_valid_rec(struct dyn_ftrace *rec)
3724{
3725	char str[KSYM_SYMBOL_LEN];
3726	unsigned long offset;
3727	const char *ret;
3728
3729	ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str);
3730
3731	/* Weak functions can cause invalid addresses */
3732	if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
3733		rec->flags |= FTRACE_FL_DISABLED;
3734		return 0;
3735	}
3736	return 1;
3737}
3738
3739static struct workqueue_struct *ftrace_check_wq __initdata;
3740static struct work_struct ftrace_check_work __initdata;
3741
3742/*
3743 * Scan all the mcount/fentry entries to make sure they are valid.
3744 */
3745static __init void ftrace_check_work_func(struct work_struct *work)
3746{
3747	struct ftrace_page *pg;
3748	struct dyn_ftrace *rec;
3749
3750	mutex_lock(&ftrace_lock);
3751	do_for_each_ftrace_rec(pg, rec) {
3752		test_for_valid_rec(rec);
3753	} while_for_each_ftrace_rec();
3754	mutex_unlock(&ftrace_lock);
3755}
3756
3757static int __init ftrace_check_for_weak_functions(void)
3758{
3759	INIT_WORK(&ftrace_check_work, ftrace_check_work_func);
3760
3761	ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0);
3762
3763	queue_work(ftrace_check_wq, &ftrace_check_work);
3764	return 0;
3765}
3766
3767static int __init ftrace_check_sync(void)
3768{
3769	/* Make sure the ftrace_check updates are finished */
3770	if (ftrace_check_wq)
3771		destroy_workqueue(ftrace_check_wq);
3772	return 0;
3773}
3774
3775late_initcall_sync(ftrace_check_sync);
3776subsys_initcall(ftrace_check_for_weak_functions);
3777
3778static int print_rec(struct seq_file *m, unsigned long ip)
3779{
3780	unsigned long offset;
3781	char str[KSYM_SYMBOL_LEN];
3782	char *modname;
3783	const char *ret;
3784
3785	ret = kallsyms_lookup(ip, NULL, &offset, &modname, str);
3786	/* Weak functions can cause invalid addresses */
3787	if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
3788		snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld",
3789			 FTRACE_INVALID_FUNCTION, offset);
3790		ret = NULL;
3791	}
3792
3793	seq_puts(m, str);
3794	if (modname)
3795		seq_printf(m, " [%s]", modname);
3796	return ret == NULL ? -1 : 0;
3797}
3798#else
3799static inline int test_for_valid_rec(struct dyn_ftrace *rec)
3800{
3801	return 1;
3802}
3803
3804static inline int print_rec(struct seq_file *m, unsigned long ip)
3805{
3806	seq_printf(m, "%ps", (void *)ip);
3807	return 0;
3808}
3809#endif
3810
3811static int t_show(struct seq_file *m, void *v)
3812{
3813	struct ftrace_iterator *iter = m->private;
3814	struct dyn_ftrace *rec;
3815
3816	if (iter->flags & FTRACE_ITER_PROBE)
3817		return t_probe_show(m, iter);
3818
3819	if (iter->flags & FTRACE_ITER_MOD)
3820		return t_mod_show(m, iter);
3821
3822	if (iter->flags & FTRACE_ITER_PRINTALL) {
3823		if (iter->flags & FTRACE_ITER_NOTRACE)
3824			seq_puts(m, "#### no functions disabled ####\n");
3825		else
3826			seq_puts(m, "#### all functions enabled ####\n");
3827		return 0;
3828	}
3829
3830	rec = iter->func;
3831
3832	if (!rec)
3833		return 0;
3834
3835	if (iter->flags & FTRACE_ITER_ADDRS)
3836		seq_printf(m, "%lx ", rec->ip);
3837
3838	if (print_rec(m, rec->ip)) {
3839		/* This should only happen when a rec is disabled */
3840		WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED));
3841		seq_putc(m, '\n');
3842		return 0;
3843	}
3844
3845	if (iter->flags & (FTRACE_ITER_ENABLED | FTRACE_ITER_TOUCHED)) {
3846		struct ftrace_ops *ops;
3847
3848		seq_printf(m, " (%ld)%s%s%s%s%s",
3849			   ftrace_rec_count(rec),
3850			   rec->flags & FTRACE_FL_REGS ? " R" : "  ",
3851			   rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ",
3852			   rec->flags & FTRACE_FL_DIRECT ? " D" : "  ",
3853			   rec->flags & FTRACE_FL_CALL_OPS ? " O" : "  ",
3854			   rec->flags & FTRACE_FL_MODIFIED ? " M " : "   ");
3855		if (rec->flags & FTRACE_FL_TRAMP_EN) {
3856			ops = ftrace_find_tramp_ops_any(rec);
3857			if (ops) {
3858				do {
3859					seq_printf(m, "\ttramp: %pS (%pS)",
3860						   (void *)ops->trampoline,
3861						   (void *)ops->func);
3862					add_trampoline_func(m, ops, rec);
3863					ops = ftrace_find_tramp_ops_next(rec, ops);
3864				} while (ops);
3865			} else
3866				seq_puts(m, "\ttramp: ERROR!");
3867		} else {
3868			add_trampoline_func(m, NULL, rec);
3869		}
3870		if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
3871			ops = ftrace_find_unique_ops(rec);
3872			if (ops) {
3873				seq_printf(m, "\tops: %pS (%pS)",
3874					   ops, ops->func);
3875			} else {
3876				seq_puts(m, "\tops: ERROR!");
3877			}
3878		}
3879		if (rec->flags & FTRACE_FL_DIRECT) {
3880			unsigned long direct;
3881
3882			direct = ftrace_find_rec_direct(rec->ip);
3883			if (direct)
3884				seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
3885		}
3886	}
3887
3888	seq_putc(m, '\n');
3889
3890	return 0;
3891}
3892
3893static const struct seq_operations show_ftrace_seq_ops = {
3894	.start = t_start,
3895	.next = t_next,
3896	.stop = t_stop,
3897	.show = t_show,
3898};
3899
3900static int
3901ftrace_avail_open(struct inode *inode, struct file *file)
3902{
3903	struct ftrace_iterator *iter;
3904	int ret;
3905
3906	ret = security_locked_down(LOCKDOWN_TRACEFS);
3907	if (ret)
3908		return ret;
3909
3910	if (unlikely(ftrace_disabled))
3911		return -ENODEV;
3912
3913	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3914	if (!iter)
3915		return -ENOMEM;
3916
3917	iter->pg = ftrace_pages_start;
3918	iter->ops = &global_ops;
3919
3920	return 0;
3921}
3922
3923static int
3924ftrace_enabled_open(struct inode *inode, struct file *file)
3925{
3926	struct ftrace_iterator *iter;
3927
3928	/*
3929	 * This shows us what functions are currently being
3930	 * traced and by what. Not sure if we want lockdown
3931	 * to hide such critical information for an admin.
3932	 * Although, perhaps it can show information we don't
3933	 * want people to see, but if something is tracing
3934	 * something, we probably want to know about it.
3935	 */
3936
3937	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3938	if (!iter)
3939		return -ENOMEM;
3940
3941	iter->pg = ftrace_pages_start;
3942	iter->flags = FTRACE_ITER_ENABLED;
3943	iter->ops = &global_ops;
3944
3945	return 0;
3946}
3947
3948static int
3949ftrace_touched_open(struct inode *inode, struct file *file)
3950{
3951	struct ftrace_iterator *iter;
3952
3953	/*
3954	 * This shows us what functions have ever been enabled
3955	 * (traced, direct, patched, etc). Not sure if we want lockdown
3956	 * to hide such critical information for an admin.
3957	 * Although, perhaps it can show information we don't
3958	 * want people to see, but if something had traced
3959	 * something, we probably want to know about it.
3960	 */
3961
3962	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3963	if (!iter)
3964		return -ENOMEM;
3965
3966	iter->pg = ftrace_pages_start;
3967	iter->flags = FTRACE_ITER_TOUCHED;
3968	iter->ops = &global_ops;
3969
3970	return 0;
3971}
3972
3973static int
3974ftrace_avail_addrs_open(struct inode *inode, struct file *file)
3975{
3976	struct ftrace_iterator *iter;
3977	int ret;
3978
3979	ret = security_locked_down(LOCKDOWN_TRACEFS);
3980	if (ret)
3981		return ret;
3982
3983	if (unlikely(ftrace_disabled))
3984		return -ENODEV;
3985
3986	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3987	if (!iter)
3988		return -ENOMEM;
3989
3990	iter->pg = ftrace_pages_start;
3991	iter->flags = FTRACE_ITER_ADDRS;
3992	iter->ops = &global_ops;
3993
3994	return 0;
3995}
3996
3997/**
3998 * ftrace_regex_open - initialize function tracer filter files
3999 * @ops: The ftrace_ops that hold the hash filters
4000 * @flag: The type of filter to process
4001 * @inode: The inode, usually passed in to your open routine
4002 * @file: The file, usually passed in to your open routine
4003 *
4004 * ftrace_regex_open() initializes the filter files for the
4005 * @ops. Depending on @flag it may process the filter hash or
4006 * the notrace hash of @ops. With this called from the open
4007 * routine, you can use ftrace_filter_write() for the write
4008 * routine if @flag has FTRACE_ITER_FILTER set, or
4009 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
4010 * tracing_lseek() should be used as the lseek routine, and
4011 * release must call ftrace_regex_release().
4012 *
4013 * Returns: 0 on success or a negative errno value on failure
4014 */
4015int
4016ftrace_regex_open(struct ftrace_ops *ops, int flag,
4017		  struct inode *inode, struct file *file)
4018{
4019	struct ftrace_iterator *iter;
4020	struct ftrace_hash *hash;
4021	struct list_head *mod_head;
4022	struct trace_array *tr = ops->private;
4023	int ret = -ENOMEM;
4024
4025	ftrace_ops_init(ops);
4026
4027	if (unlikely(ftrace_disabled))
4028		return -ENODEV;
4029
4030	if (tracing_check_open_get_tr(tr))
4031		return -ENODEV;
4032
4033	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4034	if (!iter)
4035		goto out;
4036
4037	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
4038		goto out;
4039
4040	iter->ops = ops;
4041	iter->flags = flag;
4042	iter->tr = tr;
4043
4044	mutex_lock(&ops->func_hash->regex_lock);
4045
4046	if (flag & FTRACE_ITER_NOTRACE) {
4047		hash = ops->func_hash->notrace_hash;
4048		mod_head = tr ? &tr->mod_notrace : NULL;
4049	} else {
4050		hash = ops->func_hash->filter_hash;
4051		mod_head = tr ? &tr->mod_trace : NULL;
4052	}
4053
4054	iter->mod_list = mod_head;
4055
4056	if (file->f_mode & FMODE_WRITE) {
4057		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
4058
4059		if (file->f_flags & O_TRUNC) {
4060			iter->hash = alloc_ftrace_hash(size_bits);
4061			clear_ftrace_mod_list(mod_head);
4062	        } else {
4063			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
4064		}
4065
4066		if (!iter->hash) {
4067			trace_parser_put(&iter->parser);
4068			goto out_unlock;
4069		}
4070	} else
4071		iter->hash = hash;
4072
4073	ret = 0;
4074
4075	if (file->f_mode & FMODE_READ) {
4076		iter->pg = ftrace_pages_start;
4077
4078		ret = seq_open(file, &show_ftrace_seq_ops);
4079		if (!ret) {
4080			struct seq_file *m = file->private_data;
4081			m->private = iter;
4082		} else {
4083			/* Failed */
4084			free_ftrace_hash(iter->hash);
4085			trace_parser_put(&iter->parser);
4086		}
4087	} else
4088		file->private_data = iter;
4089
4090 out_unlock:
4091	mutex_unlock(&ops->func_hash->regex_lock);
4092
4093 out:
4094	if (ret) {
4095		kfree(iter);
4096		if (tr)
4097			trace_array_put(tr);
4098	}
4099
4100	return ret;
4101}
4102
4103static int
4104ftrace_filter_open(struct inode *inode, struct file *file)
4105{
4106	struct ftrace_ops *ops = inode->i_private;
4107
4108	/* Checks for tracefs lockdown */
4109	return ftrace_regex_open(ops,
4110			FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
4111			inode, file);
4112}
4113
4114static int
4115ftrace_notrace_open(struct inode *inode, struct file *file)
4116{
4117	struct ftrace_ops *ops = inode->i_private;
4118
4119	/* Checks for tracefs lockdown */
4120	return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
4121				 inode, file);
4122}
4123
4124/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
4125struct ftrace_glob {
4126	char *search;
4127	unsigned len;
4128	int type;
4129};
4130
4131/*
4132 * If symbols in an architecture don't correspond exactly to the user-visible
4133 * name of what they represent, it is possible to define this function to
4134 * perform the necessary adjustments.
4135*/
4136char * __weak arch_ftrace_match_adjust(char *str, const char *search)
4137{
4138	return str;
4139}
4140
4141static int ftrace_match(char *str, struct ftrace_glob *g)
4142{
4143	int matched = 0;
4144	int slen;
4145
4146	str = arch_ftrace_match_adjust(str, g->search);
4147
4148	switch (g->type) {
4149	case MATCH_FULL:
4150		if (strcmp(str, g->search) == 0)
4151			matched = 1;
4152		break;
4153	case MATCH_FRONT_ONLY:
4154		if (strncmp(str, g->search, g->len) == 0)
4155			matched = 1;
4156		break;
4157	case MATCH_MIDDLE_ONLY:
4158		if (strstr(str, g->search))
4159			matched = 1;
4160		break;
4161	case MATCH_END_ONLY:
4162		slen = strlen(str);
4163		if (slen >= g->len &&
4164		    memcmp(str + slen - g->len, g->search, g->len) == 0)
4165			matched = 1;
4166		break;
4167	case MATCH_GLOB:
4168		if (glob_match(g->search, str))
4169			matched = 1;
4170		break;
4171	}
4172
4173	return matched;
4174}
4175
4176static int
4177enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
4178{
4179	struct ftrace_func_entry *entry;
4180	int ret = 0;
4181
4182	entry = ftrace_lookup_ip(hash, rec->ip);
4183	if (clear_filter) {
4184		/* Do nothing if it doesn't exist */
4185		if (!entry)
4186			return 0;
4187
4188		free_hash_entry(hash, entry);
4189	} else {
4190		/* Do nothing if it exists */
4191		if (entry)
4192			return 0;
4193		if (add_hash_entry(hash, rec->ip) == NULL)
4194			ret = -ENOMEM;
4195	}
4196	return ret;
4197}
4198
4199static int
4200add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
4201		 int clear_filter)
4202{
4203	long index;
4204	struct ftrace_page *pg;
4205	struct dyn_ftrace *rec;
4206
4207	/* The index starts at 1 */
4208	if (kstrtoul(func_g->search, 0, &index) || --index < 0)
4209		return 0;
4210
4211	do_for_each_ftrace_rec(pg, rec) {
4212		if (pg->index <= index) {
4213			index -= pg->index;
4214			/* this is a double loop, break goes to the next page */
4215			break;
4216		}
4217		rec = &pg->records[index];
4218		enter_record(hash, rec, clear_filter);
4219		return 1;
4220	} while_for_each_ftrace_rec();
4221	return 0;
4222}
4223
4224#ifdef FTRACE_MCOUNT_MAX_OFFSET
4225static int lookup_ip(unsigned long ip, char **modname, char *str)
4226{
4227	unsigned long offset;
4228
4229	kallsyms_lookup(ip, NULL, &offset, modname, str);
4230	if (offset > FTRACE_MCOUNT_MAX_OFFSET)
4231		return -1;
4232	return 0;
4233}
4234#else
4235static int lookup_ip(unsigned long ip, char **modname, char *str)
4236{
4237	kallsyms_lookup(ip, NULL, NULL, modname, str);
4238	return 0;
4239}
4240#endif
4241
4242static int
4243ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
4244		struct ftrace_glob *mod_g, int exclude_mod)
4245{
4246	char str[KSYM_SYMBOL_LEN];
4247	char *modname;
4248
4249	if (lookup_ip(rec->ip, &modname, str)) {
4250		/* This should only happen when a rec is disabled */
4251		WARN_ON_ONCE(system_state == SYSTEM_RUNNING &&
4252			     !(rec->flags & FTRACE_FL_DISABLED));
4253		return 0;
4254	}
4255
4256	if (mod_g) {
4257		int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
4258
4259		/* blank module name to match all modules */
4260		if (!mod_g->len) {
4261			/* blank module globbing: modname xor exclude_mod */
4262			if (!exclude_mod != !modname)
4263				goto func_match;
4264			return 0;
4265		}
4266
4267		/*
4268		 * exclude_mod is set to trace everything but the given
4269		 * module. If it is set and the module matches, then
4270		 * return 0. If it is not set, and the module doesn't match
4271		 * also return 0. Otherwise, check the function to see if
4272		 * that matches.
4273		 */
4274		if (!mod_matches == !exclude_mod)
4275			return 0;
4276func_match:
4277		/* blank search means to match all funcs in the mod */
4278		if (!func_g->len)
4279			return 1;
4280	}
4281
4282	return ftrace_match(str, func_g);
4283}
4284
4285static int
4286match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
4287{
4288	struct ftrace_page *pg;
4289	struct dyn_ftrace *rec;
4290	struct ftrace_glob func_g = { .type = MATCH_FULL };
4291	struct ftrace_glob mod_g = { .type = MATCH_FULL };
4292	struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
4293	int exclude_mod = 0;
4294	int found = 0;
4295	int ret;
4296	int clear_filter = 0;
4297
4298	if (func) {
4299		func_g.type = filter_parse_regex(func, len, &func_g.search,
4300						 &clear_filter);
4301		func_g.len = strlen(func_g.search);
4302	}
4303
4304	if (mod) {
4305		mod_g.type = filter_parse_regex(mod, strlen(mod),
4306				&mod_g.search, &exclude_mod);
4307		mod_g.len = strlen(mod_g.search);
4308	}
4309
4310	mutex_lock(&ftrace_lock);
4311
4312	if (unlikely(ftrace_disabled))
4313		goto out_unlock;
4314
4315	if (func_g.type == MATCH_INDEX) {
4316		found = add_rec_by_index(hash, &func_g, clear_filter);
4317		goto out_unlock;
4318	}
4319
4320	do_for_each_ftrace_rec(pg, rec) {
4321
4322		if (rec->flags & FTRACE_FL_DISABLED)
4323			continue;
4324
4325		if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
4326			ret = enter_record(hash, rec, clear_filter);
4327			if (ret < 0) {
4328				found = ret;
4329				goto out_unlock;
4330			}
4331			found = 1;
4332		}
4333		cond_resched();
4334	} while_for_each_ftrace_rec();
4335 out_unlock:
4336	mutex_unlock(&ftrace_lock);
4337
4338	return found;
4339}
4340
4341static int
4342ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
4343{
4344	return match_records(hash, buff, len, NULL);
4345}
4346
4347static void ftrace_ops_update_code(struct ftrace_ops *ops,
4348				   struct ftrace_ops_hash *old_hash)
4349{
4350	struct ftrace_ops *op;
4351
4352	if (!ftrace_enabled)
4353		return;
4354
4355	if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4356		ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4357		return;
4358	}
4359
4360	/*
4361	 * If this is the shared global_ops filter, then we need to
4362	 * check if there is another ops that shares it, is enabled.
4363	 * If so, we still need to run the modify code.
4364	 */
4365	if (ops->func_hash != &global_ops.local_hash)
4366		return;
4367
4368	do_for_each_ftrace_op(op, ftrace_ops_list) {
4369		if (op->func_hash == &global_ops.local_hash &&
4370		    op->flags & FTRACE_OPS_FL_ENABLED) {
4371			ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4372			/* Only need to do this once */
4373			return;
4374		}
4375	} while_for_each_ftrace_op(op);
4376}
4377
4378static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
4379					   struct ftrace_hash **orig_hash,
4380					   struct ftrace_hash *hash,
4381					   int enable)
4382{
4383	struct ftrace_ops_hash old_hash_ops;
4384	struct ftrace_hash *old_hash;
4385	int ret;
4386
4387	old_hash = *orig_hash;
4388	old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4389	old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4390	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4391	if (!ret) {
4392		ftrace_ops_update_code(ops, &old_hash_ops);
4393		free_ftrace_hash_rcu(old_hash);
4394	}
4395	return ret;
4396}
4397
4398static bool module_exists(const char *module)
4399{
4400	/* All modules have the symbol __this_module */
4401	static const char this_mod[] = "__this_module";
4402	char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
4403	unsigned long val;
4404	int n;
4405
4406	n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
4407
4408	if (n > sizeof(modname) - 1)
4409		return false;
4410
4411	val = module_kallsyms_lookup_name(modname);
4412	return val != 0;
4413}
4414
4415static int cache_mod(struct trace_array *tr,
4416		     const char *func, char *module, int enable)
4417{
4418	struct ftrace_mod_load *ftrace_mod, *n;
4419	struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
4420	int ret;
4421
4422	mutex_lock(&ftrace_lock);
4423
4424	/* We do not cache inverse filters */
4425	if (func[0] == '!') {
4426		func++;
4427		ret = -EINVAL;
4428
4429		/* Look to remove this hash */
4430		list_for_each_entry_safe(ftrace_mod, n, head, list) {
4431			if (strcmp(ftrace_mod->module, module) != 0)
4432				continue;
4433
4434			/* no func matches all */
4435			if (strcmp(func, "*") == 0 ||
4436			    (ftrace_mod->func &&
4437			     strcmp(ftrace_mod->func, func) == 0)) {
4438				ret = 0;
4439				free_ftrace_mod(ftrace_mod);
4440				continue;
4441			}
4442		}
4443		goto out;
4444	}
4445
4446	ret = -EINVAL;
4447	/* We only care about modules that have not been loaded yet */
4448	if (module_exists(module))
4449		goto out;
4450
4451	/* Save this string off, and execute it when the module is loaded */
4452	ret = ftrace_add_mod(tr, func, module, enable);
4453 out:
4454	mutex_unlock(&ftrace_lock);
4455
4456	return ret;
4457}
4458
4459static int
4460ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4461		 int reset, int enable);
4462
4463#ifdef CONFIG_MODULES
4464static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
4465			     char *mod, bool enable)
4466{
4467	struct ftrace_mod_load *ftrace_mod, *n;
4468	struct ftrace_hash **orig_hash, *new_hash;
4469	LIST_HEAD(process_mods);
4470	char *func;
4471
4472	mutex_lock(&ops->func_hash->regex_lock);
4473
4474	if (enable)
4475		orig_hash = &ops->func_hash->filter_hash;
4476	else
4477		orig_hash = &ops->func_hash->notrace_hash;
4478
4479	new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
4480					      *orig_hash);
4481	if (!new_hash)
4482		goto out; /* warn? */
4483
4484	mutex_lock(&ftrace_lock);
4485
4486	list_for_each_entry_safe(ftrace_mod, n, head, list) {
4487
4488		if (strcmp(ftrace_mod->module, mod) != 0)
4489			continue;
4490
4491		if (ftrace_mod->func)
4492			func = kstrdup(ftrace_mod->func, GFP_KERNEL);
4493		else
4494			func = kstrdup("*", GFP_KERNEL);
4495
4496		if (!func) /* warn? */
4497			continue;
4498
4499		list_move(&ftrace_mod->list, &process_mods);
4500
4501		/* Use the newly allocated func, as it may be "*" */
4502		kfree(ftrace_mod->func);
4503		ftrace_mod->func = func;
4504	}
4505
4506	mutex_unlock(&ftrace_lock);
4507
4508	list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
4509
4510		func = ftrace_mod->func;
4511
4512		/* Grabs ftrace_lock, which is why we have this extra step */
4513		match_records(new_hash, func, strlen(func), mod);
4514		free_ftrace_mod(ftrace_mod);
4515	}
4516
4517	if (enable && list_empty(head))
4518		new_hash->flags &= ~FTRACE_HASH_FL_MOD;
4519
4520	mutex_lock(&ftrace_lock);
4521
4522	ftrace_hash_move_and_update_ops(ops, orig_hash,
4523					      new_hash, enable);
4524	mutex_unlock(&ftrace_lock);
4525
4526 out:
4527	mutex_unlock(&ops->func_hash->regex_lock);
4528
4529	free_ftrace_hash(new_hash);
4530}
4531
4532static void process_cached_mods(const char *mod_name)
4533{
4534	struct trace_array *tr;
4535	char *mod;
4536
4537	mod = kstrdup(mod_name, GFP_KERNEL);
4538	if (!mod)
4539		return;
4540
4541	mutex_lock(&trace_types_lock);
4542	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4543		if (!list_empty(&tr->mod_trace))
4544			process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4545		if (!list_empty(&tr->mod_notrace))
4546			process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4547	}
4548	mutex_unlock(&trace_types_lock);
4549
4550	kfree(mod);
4551}
4552#endif
4553
4554/*
4555 * We register the module command as a template to show others how
4556 * to register the a command as well.
4557 */
4558
4559static int
4560ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
4561		    char *func_orig, char *cmd, char *module, int enable)
4562{
4563	char *func;
4564	int ret;
4565
4566	/* match_records() modifies func, and we need the original */
4567	func = kstrdup(func_orig, GFP_KERNEL);
4568	if (!func)
4569		return -ENOMEM;
4570
4571	/*
4572	 * cmd == 'mod' because we only registered this func
4573	 * for the 'mod' ftrace_func_command.
4574	 * But if you register one func with multiple commands,
4575	 * you can tell which command was used by the cmd
4576	 * parameter.
4577	 */
4578	ret = match_records(hash, func, strlen(func), module);
4579	kfree(func);
4580
4581	if (!ret)
4582		return cache_mod(tr, func_orig, module, enable);
4583	if (ret < 0)
4584		return ret;
4585	return 0;
4586}
4587
4588static struct ftrace_func_command ftrace_mod_cmd = {
4589	.name			= "mod",
4590	.func			= ftrace_mod_callback,
4591};
4592
4593static int __init ftrace_mod_cmd_init(void)
4594{
4595	return register_ftrace_command(&ftrace_mod_cmd);
4596}
4597core_initcall(ftrace_mod_cmd_init);
4598
4599static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
4600				      struct ftrace_ops *op, struct ftrace_regs *fregs)
4601{
4602	struct ftrace_probe_ops *probe_ops;
4603	struct ftrace_func_probe *probe;
4604
4605	probe = container_of(op, struct ftrace_func_probe, ops);
4606	probe_ops = probe->probe_ops;
4607
4608	/*
4609	 * Disable preemption for these calls to prevent a RCU grace
4610	 * period. This syncs the hash iteration and freeing of items
4611	 * on the hash. rcu_read_lock is too dangerous here.
4612	 */
4613	preempt_disable_notrace();
4614	probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
4615	preempt_enable_notrace();
4616}
4617
4618struct ftrace_func_map {
4619	struct ftrace_func_entry	entry;
4620	void				*data;
4621};
4622
4623struct ftrace_func_mapper {
4624	struct ftrace_hash		hash;
4625};
4626
4627/**
4628 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
4629 *
4630 * Returns: a ftrace_func_mapper descriptor that can be used to map ips to data.
4631 */
4632struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
4633{
4634	struct ftrace_hash *hash;
4635
4636	/*
4637	 * The mapper is simply a ftrace_hash, but since the entries
4638	 * in the hash are not ftrace_func_entry type, we define it
4639	 * as a separate structure.
4640	 */
4641	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4642	return (struct ftrace_func_mapper *)hash;
4643}
4644
4645/**
4646 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
4647 * @mapper: The mapper that has the ip maps
4648 * @ip: the instruction pointer to find the data for
4649 *
4650 * Returns: the data mapped to @ip if found otherwise NULL. The return
4651 * is actually the address of the mapper data pointer. The address is
4652 * returned for use cases where the data is no bigger than a long, and
4653 * the user can use the data pointer as its data instead of having to
4654 * allocate more memory for the reference.
4655 */
4656void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
4657				  unsigned long ip)
4658{
4659	struct ftrace_func_entry *entry;
4660	struct ftrace_func_map *map;
4661
4662	entry = ftrace_lookup_ip(&mapper->hash, ip);
4663	if (!entry)
4664		return NULL;
4665
4666	map = (struct ftrace_func_map *)entry;
4667	return &map->data;
4668}
4669
4670/**
4671 * ftrace_func_mapper_add_ip - Map some data to an ip
4672 * @mapper: The mapper that has the ip maps
4673 * @ip: The instruction pointer address to map @data to
4674 * @data: The data to map to @ip
4675 *
4676 * Returns: 0 on success otherwise an error.
4677 */
4678int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
4679			      unsigned long ip, void *data)
4680{
4681	struct ftrace_func_entry *entry;
4682	struct ftrace_func_map *map;
4683
4684	entry = ftrace_lookup_ip(&mapper->hash, ip);
4685	if (entry)
4686		return -EBUSY;
4687
4688	map = kmalloc(sizeof(*map), GFP_KERNEL);
4689	if (!map)
4690		return -ENOMEM;
4691
4692	map->entry.ip = ip;
4693	map->data = data;
4694
4695	__add_hash_entry(&mapper->hash, &map->entry);
4696
4697	return 0;
4698}
4699
4700/**
4701 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
4702 * @mapper: The mapper that has the ip maps
4703 * @ip: The instruction pointer address to remove the data from
4704 *
4705 * Returns: the data if it is found, otherwise NULL.
4706 * Note, if the data pointer is used as the data itself, (see
4707 * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
4708 * if the data pointer was set to zero.
4709 */
4710void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
4711				   unsigned long ip)
4712{
4713	struct ftrace_func_entry *entry;
4714	struct ftrace_func_map *map;
4715	void *data;
4716
4717	entry = ftrace_lookup_ip(&mapper->hash, ip);
4718	if (!entry)
4719		return NULL;
4720
4721	map = (struct ftrace_func_map *)entry;
4722	data = map->data;
4723
4724	remove_hash_entry(&mapper->hash, entry);
4725	kfree(entry);
4726
4727	return data;
4728}
4729
4730/**
4731 * free_ftrace_func_mapper - free a mapping of ips and data
4732 * @mapper: The mapper that has the ip maps
4733 * @free_func: A function to be called on each data item.
4734 *
4735 * This is used to free the function mapper. The @free_func is optional
4736 * and can be used if the data needs to be freed as well.
4737 */
4738void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
4739			     ftrace_mapper_func free_func)
4740{
4741	struct ftrace_func_entry *entry;
4742	struct ftrace_func_map *map;
4743	struct hlist_head *hhd;
4744	int size, i;
4745
4746	if (!mapper)
4747		return;
4748
4749	if (free_func && mapper->hash.count) {
4750		size = 1 << mapper->hash.size_bits;
4751		for (i = 0; i < size; i++) {
4752			hhd = &mapper->hash.buckets[i];
4753			hlist_for_each_entry(entry, hhd, hlist) {
4754				map = (struct ftrace_func_map *)entry;
4755				free_func(map);
4756			}
4757		}
4758	}
4759	free_ftrace_hash(&mapper->hash);
4760}
4761
4762static void release_probe(struct ftrace_func_probe *probe)
4763{
4764	struct ftrace_probe_ops *probe_ops;
4765
4766	mutex_lock(&ftrace_lock);
4767
4768	WARN_ON(probe->ref <= 0);
4769
4770	/* Subtract the ref that was used to protect this instance */
4771	probe->ref--;
4772
4773	if (!probe->ref) {
4774		probe_ops = probe->probe_ops;
4775		/*
4776		 * Sending zero as ip tells probe_ops to free
4777		 * the probe->data itself
4778		 */
4779		if (probe_ops->free)
4780			probe_ops->free(probe_ops, probe->tr, 0, probe->data);
4781		list_del(&probe->list);
4782		kfree(probe);
4783	}
4784	mutex_unlock(&ftrace_lock);
4785}
4786
4787static void acquire_probe_locked(struct ftrace_func_probe *probe)
4788{
4789	/*
4790	 * Add one ref to keep it from being freed when releasing the
4791	 * ftrace_lock mutex.
4792	 */
4793	probe->ref++;
4794}
4795
4796int
4797register_ftrace_function_probe(char *glob, struct trace_array *tr,
4798			       struct ftrace_probe_ops *probe_ops,
4799			       void *data)
4800{
4801	struct ftrace_func_probe *probe = NULL, *iter;
4802	struct ftrace_func_entry *entry;
4803	struct ftrace_hash **orig_hash;
4804	struct ftrace_hash *old_hash;
4805	struct ftrace_hash *hash;
4806	int count = 0;
4807	int size;
4808	int ret;
4809	int i;
4810
4811	if (WARN_ON(!tr))
4812		return -EINVAL;
4813
4814	/* We do not support '!' for function probes */
4815	if (WARN_ON(glob[0] == '!'))
4816		return -EINVAL;
4817
4818
4819	mutex_lock(&ftrace_lock);
4820	/* Check if the probe_ops is already registered */
4821	list_for_each_entry(iter, &tr->func_probes, list) {
4822		if (iter->probe_ops == probe_ops) {
4823			probe = iter;
4824			break;
4825		}
4826	}
4827	if (!probe) {
4828		probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4829		if (!probe) {
4830			mutex_unlock(&ftrace_lock);
4831			return -ENOMEM;
4832		}
4833		probe->probe_ops = probe_ops;
4834		probe->ops.func = function_trace_probe_call;
4835		probe->tr = tr;
4836		ftrace_ops_init(&probe->ops);
4837		list_add(&probe->list, &tr->func_probes);
4838	}
4839
4840	acquire_probe_locked(probe);
4841
4842	mutex_unlock(&ftrace_lock);
4843
4844	/*
4845	 * Note, there's a small window here that the func_hash->filter_hash
4846	 * may be NULL or empty. Need to be careful when reading the loop.
4847	 */
4848	mutex_lock(&probe->ops.func_hash->regex_lock);
4849
4850	orig_hash = &probe->ops.func_hash->filter_hash;
4851	old_hash = *orig_hash;
4852	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4853
4854	if (!hash) {
4855		ret = -ENOMEM;
4856		goto out;
4857	}
4858
4859	ret = ftrace_match_records(hash, glob, strlen(glob));
4860
4861	/* Nothing found? */
4862	if (!ret)
4863		ret = -EINVAL;
4864
4865	if (ret < 0)
4866		goto out;
4867
4868	size = 1 << hash->size_bits;
4869	for (i = 0; i < size; i++) {
4870		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4871			if (ftrace_lookup_ip(old_hash, entry->ip))
4872				continue;
4873			/*
4874			 * The caller might want to do something special
4875			 * for each function we find. We call the callback
4876			 * to give the caller an opportunity to do so.
4877			 */
4878			if (probe_ops->init) {
4879				ret = probe_ops->init(probe_ops, tr,
4880						      entry->ip, data,
4881						      &probe->data);
4882				if (ret < 0) {
4883					if (probe_ops->free && count)
4884						probe_ops->free(probe_ops, tr,
4885								0, probe->data);
4886					probe->data = NULL;
4887					goto out;
4888				}
4889			}
4890			count++;
4891		}
4892	}
4893
4894	mutex_lock(&ftrace_lock);
4895
4896	if (!count) {
4897		/* Nothing was added? */
4898		ret = -EINVAL;
4899		goto out_unlock;
4900	}
4901
4902	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4903					      hash, 1);
4904	if (ret < 0)
4905		goto err_unlock;
4906
4907	/* One ref for each new function traced */
4908	probe->ref += count;
4909
4910	if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4911		ret = ftrace_startup(&probe->ops, 0);
4912
4913 out_unlock:
4914	mutex_unlock(&ftrace_lock);
4915
4916	if (!ret)
4917		ret = count;
4918 out:
4919	mutex_unlock(&probe->ops.func_hash->regex_lock);
4920	free_ftrace_hash(hash);
4921
4922	release_probe(probe);
4923
4924	return ret;
4925
4926 err_unlock:
4927	if (!probe_ops->free || !count)
4928		goto out_unlock;
4929
4930	/* Failed to do the move, need to call the free functions */
4931	for (i = 0; i < size; i++) {
4932		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4933			if (ftrace_lookup_ip(old_hash, entry->ip))
4934				continue;
4935			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4936		}
4937	}
4938	goto out_unlock;
4939}
4940
4941int
4942unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4943				      struct ftrace_probe_ops *probe_ops)
4944{
4945	struct ftrace_func_probe *probe = NULL, *iter;
4946	struct ftrace_ops_hash old_hash_ops;
4947	struct ftrace_func_entry *entry;
4948	struct ftrace_glob func_g;
4949	struct ftrace_hash **orig_hash;
4950	struct ftrace_hash *old_hash;
4951	struct ftrace_hash *hash = NULL;
4952	struct hlist_node *tmp;
4953	struct hlist_head hhd;
4954	char str[KSYM_SYMBOL_LEN];
4955	int count = 0;
4956	int i, ret = -ENODEV;
4957	int size;
4958
4959	if (!glob || !strlen(glob) || !strcmp(glob, "*"))
4960		func_g.search = NULL;
4961	else {
4962		int not;
4963
4964		func_g.type = filter_parse_regex(glob, strlen(glob),
4965						 &func_g.search, &not);
4966		func_g.len = strlen(func_g.search);
4967
4968		/* we do not support '!' for function probes */
4969		if (WARN_ON(not))
4970			return -EINVAL;
4971	}
4972
4973	mutex_lock(&ftrace_lock);
4974	/* Check if the probe_ops is already registered */
4975	list_for_each_entry(iter, &tr->func_probes, list) {
4976		if (iter->probe_ops == probe_ops) {
4977			probe = iter;
4978			break;
4979		}
4980	}
4981	if (!probe)
4982		goto err_unlock_ftrace;
4983
4984	ret = -EINVAL;
4985	if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4986		goto err_unlock_ftrace;
4987
4988	acquire_probe_locked(probe);
4989
4990	mutex_unlock(&ftrace_lock);
4991
4992	mutex_lock(&probe->ops.func_hash->regex_lock);
4993
4994	orig_hash = &probe->ops.func_hash->filter_hash;
4995	old_hash = *orig_hash;
4996
4997	if (ftrace_hash_empty(old_hash))
4998		goto out_unlock;
4999
5000	old_hash_ops.filter_hash = old_hash;
5001	/* Probes only have filters */
5002	old_hash_ops.notrace_hash = NULL;
5003
5004	ret = -ENOMEM;
5005	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
5006	if (!hash)
5007		goto out_unlock;
5008
5009	INIT_HLIST_HEAD(&hhd);
5010
5011	size = 1 << hash->size_bits;
5012	for (i = 0; i < size; i++) {
5013		hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
5014
5015			if (func_g.search) {
5016				kallsyms_lookup(entry->ip, NULL, NULL,
5017						NULL, str);
5018				if (!ftrace_match(str, &func_g))
5019					continue;
5020			}
5021			count++;
5022			remove_hash_entry(hash, entry);
5023			hlist_add_head(&entry->hlist, &hhd);
5024		}
5025	}
5026
5027	/* Nothing found? */
5028	if (!count) {
5029		ret = -EINVAL;
5030		goto out_unlock;
5031	}
5032
5033	mutex_lock(&ftrace_lock);
5034
5035	WARN_ON(probe->ref < count);
5036
5037	probe->ref -= count;
5038
5039	if (ftrace_hash_empty(hash))
5040		ftrace_shutdown(&probe->ops, 0);
5041
5042	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
5043					      hash, 1);
5044
5045	/* still need to update the function call sites */
5046	if (ftrace_enabled && !ftrace_hash_empty(hash))
5047		ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
5048				       &old_hash_ops);
5049	synchronize_rcu();
5050
5051	hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
5052		hlist_del(&entry->hlist);
5053		if (probe_ops->free)
5054			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
5055		kfree(entry);
5056	}
5057	mutex_unlock(&ftrace_lock);
5058
5059 out_unlock:
5060	mutex_unlock(&probe->ops.func_hash->regex_lock);
5061	free_ftrace_hash(hash);
5062
5063	release_probe(probe);
5064
5065	return ret;
5066
5067 err_unlock_ftrace:
5068	mutex_unlock(&ftrace_lock);
5069	return ret;
5070}
5071
5072void clear_ftrace_function_probes(struct trace_array *tr)
5073{
5074	struct ftrace_func_probe *probe, *n;
5075
5076	list_for_each_entry_safe(probe, n, &tr->func_probes, list)
5077		unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
5078}
5079
5080static LIST_HEAD(ftrace_commands);
5081static DEFINE_MUTEX(ftrace_cmd_mutex);
5082
5083/*
5084 * Currently we only register ftrace commands from __init, so mark this
5085 * __init too.
5086 */
5087__init int register_ftrace_command(struct ftrace_func_command *cmd)
5088{
5089	struct ftrace_func_command *p;
5090	int ret = 0;
5091
5092	mutex_lock(&ftrace_cmd_mutex);
5093	list_for_each_entry(p, &ftrace_commands, list) {
5094		if (strcmp(cmd->name, p->name) == 0) {
5095			ret = -EBUSY;
5096			goto out_unlock;
5097		}
5098	}
5099	list_add(&cmd->list, &ftrace_commands);
5100 out_unlock:
5101	mutex_unlock(&ftrace_cmd_mutex);
5102
5103	return ret;
5104}
5105
5106/*
5107 * Currently we only unregister ftrace commands from __init, so mark
5108 * this __init too.
5109 */
5110__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
5111{
5112	struct ftrace_func_command *p, *n;
5113	int ret = -ENODEV;
5114
5115	mutex_lock(&ftrace_cmd_mutex);
5116	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
5117		if (strcmp(cmd->name, p->name) == 0) {
5118			ret = 0;
5119			list_del_init(&p->list);
5120			goto out_unlock;
5121		}
5122	}
5123 out_unlock:
5124	mutex_unlock(&ftrace_cmd_mutex);
5125
5126	return ret;
5127}
5128
5129static int ftrace_process_regex(struct ftrace_iterator *iter,
5130				char *buff, int len, int enable)
5131{
5132	struct ftrace_hash *hash = iter->hash;
5133	struct trace_array *tr = iter->ops->private;
5134	char *func, *command, *next = buff;
5135	struct ftrace_func_command *p;
5136	int ret = -EINVAL;
5137
5138	func = strsep(&next, ":");
5139
5140	if (!next) {
5141		ret = ftrace_match_records(hash, func, len);
5142		if (!ret)
5143			ret = -EINVAL;
5144		if (ret < 0)
5145			return ret;
5146		return 0;
5147	}
5148
5149	/* command found */
5150
5151	command = strsep(&next, ":");
5152
5153	mutex_lock(&ftrace_cmd_mutex);
5154	list_for_each_entry(p, &ftrace_commands, list) {
5155		if (strcmp(p->name, command) == 0) {
5156			ret = p->func(tr, hash, func, command, next, enable);
5157			goto out_unlock;
5158		}
5159	}
5160 out_unlock:
5161	mutex_unlock(&ftrace_cmd_mutex);
5162
5163	return ret;
5164}
5165
5166static ssize_t
5167ftrace_regex_write(struct file *file, const char __user *ubuf,
5168		   size_t cnt, loff_t *ppos, int enable)
5169{
5170	struct ftrace_iterator *iter;
5171	struct trace_parser *parser;
5172	ssize_t ret, read;
5173
5174	if (!cnt)
5175		return 0;
5176
5177	if (file->f_mode & FMODE_READ) {
5178		struct seq_file *m = file->private_data;
5179		iter = m->private;
5180	} else
5181		iter = file->private_data;
5182
5183	if (unlikely(ftrace_disabled))
5184		return -ENODEV;
5185
5186	/* iter->hash is a local copy, so we don't need regex_lock */
5187
5188	parser = &iter->parser;
5189	read = trace_get_user(parser, ubuf, cnt, ppos);
5190
5191	if (read >= 0 && trace_parser_loaded(parser) &&
5192	    !trace_parser_cont(parser)) {
5193		ret = ftrace_process_regex(iter, parser->buffer,
5194					   parser->idx, enable);
5195		trace_parser_clear(parser);
5196		if (ret < 0)
5197			goto out;
5198	}
5199
5200	ret = read;
5201 out:
5202	return ret;
5203}
5204
5205ssize_t
5206ftrace_filter_write(struct file *file, const char __user *ubuf,
5207		    size_t cnt, loff_t *ppos)
5208{
5209	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
5210}
5211
5212ssize_t
5213ftrace_notrace_write(struct file *file, const char __user *ubuf,
5214		     size_t cnt, loff_t *ppos)
5215{
5216	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
5217}
5218
5219static int
5220__ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
5221{
5222	struct ftrace_func_entry *entry;
5223
5224	ip = ftrace_location(ip);
5225	if (!ip)
5226		return -EINVAL;
5227
5228	if (remove) {
5229		entry = ftrace_lookup_ip(hash, ip);
5230		if (!entry)
5231			return -ENOENT;
5232		free_hash_entry(hash, entry);
5233		return 0;
5234	}
5235
5236	entry = add_hash_entry(hash, ip);
5237	return entry ? 0 :  -ENOMEM;
5238}
5239
5240static int
5241ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips,
5242		  unsigned int cnt, int remove)
5243{
5244	unsigned int i;
5245	int err;
5246
5247	for (i = 0; i < cnt; i++) {
5248		err = __ftrace_match_addr(hash, ips[i], remove);
5249		if (err) {
5250			/*
5251			 * This expects the @hash is a temporary hash and if this
5252			 * fails the caller must free the @hash.
5253			 */
5254			return err;
5255		}
5256	}
5257	return 0;
5258}
5259
5260static int
5261ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
5262		unsigned long *ips, unsigned int cnt,
5263		int remove, int reset, int enable)
5264{
5265	struct ftrace_hash **orig_hash;
5266	struct ftrace_hash *hash;
5267	int ret;
5268
5269	if (unlikely(ftrace_disabled))
5270		return -ENODEV;
5271
5272	mutex_lock(&ops->func_hash->regex_lock);
5273
5274	if (enable)
5275		orig_hash = &ops->func_hash->filter_hash;
5276	else
5277		orig_hash = &ops->func_hash->notrace_hash;
5278
5279	if (reset)
5280		hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5281	else
5282		hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
5283
5284	if (!hash) {
5285		ret = -ENOMEM;
5286		goto out_regex_unlock;
5287	}
5288
5289	if (buf && !ftrace_match_records(hash, buf, len)) {
5290		ret = -EINVAL;
5291		goto out_regex_unlock;
5292	}
5293	if (ips) {
5294		ret = ftrace_match_addr(hash, ips, cnt, remove);
5295		if (ret < 0)
5296			goto out_regex_unlock;
5297	}
5298
5299	mutex_lock(&ftrace_lock);
5300	ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
5301	mutex_unlock(&ftrace_lock);
5302
5303 out_regex_unlock:
5304	mutex_unlock(&ops->func_hash->regex_lock);
5305
5306	free_ftrace_hash(hash);
5307	return ret;
5308}
5309
5310static int
5311ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt,
5312		int remove, int reset, int enable)
5313{
5314	return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable);
5315}
5316
5317#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
5318
5319static int register_ftrace_function_nolock(struct ftrace_ops *ops);
5320
5321/*
5322 * If there are multiple ftrace_ops, use SAVE_REGS by default, so that direct
5323 * call will be jumped from ftrace_regs_caller. Only if the architecture does
5324 * not support ftrace_regs_caller but direct_call, use SAVE_ARGS so that it
5325 * jumps from ftrace_caller for multiple ftrace_ops.
5326 */
5327#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
5328#define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_ARGS)
5329#else
5330#define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS)
5331#endif
5332
5333static int check_direct_multi(struct ftrace_ops *ops)
5334{
5335	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5336		return -EINVAL;
5337	if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS)
5338		return -EINVAL;
5339	return 0;
5340}
5341
5342static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr)
5343{
5344	struct ftrace_func_entry *entry, *del;
5345	int size, i;
5346
5347	size = 1 << hash->size_bits;
5348	for (i = 0; i < size; i++) {
5349		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5350			del = __ftrace_lookup_ip(direct_functions, entry->ip);
5351			if (del && del->direct == addr) {
5352				remove_hash_entry(direct_functions, del);
5353				kfree(del);
5354			}
5355		}
5356	}
5357}
5358
5359static void register_ftrace_direct_cb(struct rcu_head *rhp)
5360{
5361	struct ftrace_hash *fhp = container_of(rhp, struct ftrace_hash, rcu);
5362
5363	free_ftrace_hash(fhp);
5364}
5365
5366/**
5367 * register_ftrace_direct - Call a custom trampoline directly
5368 * for multiple functions registered in @ops
5369 * @ops: The address of the struct ftrace_ops object
5370 * @addr: The address of the trampoline to call at @ops functions
5371 *
5372 * This is used to connect a direct calls to @addr from the nop locations
5373 * of the functions registered in @ops (with by ftrace_set_filter_ip
5374 * function).
5375 *
5376 * The location that it calls (@addr) must be able to handle a direct call,
5377 * and save the parameters of the function being traced, and restore them
5378 * (or inject new ones if needed), before returning.
5379 *
5380 * Returns:
5381 *  0 on success
5382 *  -EINVAL  - The @ops object was already registered with this call or
5383 *             when there are no functions in @ops object.
5384 *  -EBUSY   - Another direct function is already attached (there can be only one)
5385 *  -ENODEV  - @ip does not point to a ftrace nop location (or not supported)
5386 *  -ENOMEM  - There was an allocation failure.
5387 */
5388int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
5389{
5390	struct ftrace_hash *hash, *new_hash = NULL, *free_hash = NULL;
5391	struct ftrace_func_entry *entry, *new;
5392	int err = -EBUSY, size, i;
5393
5394	if (ops->func || ops->trampoline)
5395		return -EINVAL;
5396	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5397		return -EINVAL;
5398	if (ops->flags & FTRACE_OPS_FL_ENABLED)
5399		return -EINVAL;
5400
5401	hash = ops->func_hash->filter_hash;
5402	if (ftrace_hash_empty(hash))
5403		return -EINVAL;
5404
5405	mutex_lock(&direct_mutex);
5406
5407	/* Make sure requested entries are not already registered.. */
5408	size = 1 << hash->size_bits;
5409	for (i = 0; i < size; i++) {
5410		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5411			if (ftrace_find_rec_direct(entry->ip))
5412				goto out_unlock;
5413		}
5414	}
5415
5416	err = -ENOMEM;
5417
5418	/* Make a copy hash to place the new and the old entries in */
5419	size = hash->count + direct_functions->count;
5420	if (size > 32)
5421		size = 32;
5422	new_hash = alloc_ftrace_hash(fls(size));
5423	if (!new_hash)
5424		goto out_unlock;
5425
5426	/* Now copy over the existing direct entries */
5427	size = 1 << direct_functions->size_bits;
5428	for (i = 0; i < size; i++) {
5429		hlist_for_each_entry(entry, &direct_functions->buckets[i], hlist) {
5430			new = add_hash_entry(new_hash, entry->ip);
5431			if (!new)
5432				goto out_unlock;
5433			new->direct = entry->direct;
5434		}
5435	}
5436
5437	/* ... and add the new entries */
5438	size = 1 << hash->size_bits;
5439	for (i = 0; i < size; i++) {
5440		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5441			new = add_hash_entry(new_hash, entry->ip);
5442			if (!new)
5443				goto out_unlock;
5444			/* Update both the copy and the hash entry */
5445			new->direct = addr;
5446			entry->direct = addr;
5447		}
5448	}
5449
5450	free_hash = direct_functions;
5451	rcu_assign_pointer(direct_functions, new_hash);
5452	new_hash = NULL;
5453
5454	ops->func = call_direct_funcs;
5455	ops->flags = MULTI_FLAGS;
5456	ops->trampoline = FTRACE_REGS_ADDR;
5457	ops->direct_call = addr;
5458
5459	err = register_ftrace_function_nolock(ops);
5460
5461 out_unlock:
5462	mutex_unlock(&direct_mutex);
5463
5464	if (free_hash && free_hash != EMPTY_HASH)
5465		call_rcu_tasks(&free_hash->rcu, register_ftrace_direct_cb);
5466
5467	if (new_hash)
5468		free_ftrace_hash(new_hash);
5469
5470	return err;
5471}
5472EXPORT_SYMBOL_GPL(register_ftrace_direct);
5473
5474/**
5475 * unregister_ftrace_direct - Remove calls to custom trampoline
5476 * previously registered by register_ftrace_direct for @ops object.
5477 * @ops: The address of the struct ftrace_ops object
5478 *
5479 * This is used to remove a direct calls to @addr from the nop locations
5480 * of the functions registered in @ops (with by ftrace_set_filter_ip
5481 * function).
5482 *
5483 * Returns:
5484 *  0 on success
5485 *  -EINVAL - The @ops object was not properly registered.
5486 */
5487int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
5488			     bool free_filters)
5489{
5490	struct ftrace_hash *hash = ops->func_hash->filter_hash;
5491	int err;
5492
5493	if (check_direct_multi(ops))
5494		return -EINVAL;
5495	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5496		return -EINVAL;
5497
5498	mutex_lock(&direct_mutex);
5499	err = unregister_ftrace_function(ops);
5500	remove_direct_functions_hash(hash, addr);
5501	mutex_unlock(&direct_mutex);
5502
5503	/* cleanup for possible another register call */
5504	ops->func = NULL;
5505	ops->trampoline = 0;
5506
5507	if (free_filters)
5508		ftrace_free_filter(ops);
5509	return err;
5510}
5511EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
5512
5513static int
5514__modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
5515{
5516	struct ftrace_hash *hash;
5517	struct ftrace_func_entry *entry, *iter;
5518	static struct ftrace_ops tmp_ops = {
5519		.func		= ftrace_stub,
5520		.flags		= FTRACE_OPS_FL_STUB,
5521	};
5522	int i, size;
5523	int err;
5524
5525	lockdep_assert_held_once(&direct_mutex);
5526
5527	/* Enable the tmp_ops to have the same functions as the direct ops */
5528	ftrace_ops_init(&tmp_ops);
5529	tmp_ops.func_hash = ops->func_hash;
5530	tmp_ops.direct_call = addr;
5531
5532	err = register_ftrace_function_nolock(&tmp_ops);
5533	if (err)
5534		return err;
5535
5536	/*
5537	 * Now the ftrace_ops_list_func() is called to do the direct callers.
5538	 * We can safely change the direct functions attached to each entry.
5539	 */
5540	mutex_lock(&ftrace_lock);
5541
5542	hash = ops->func_hash->filter_hash;
5543	size = 1 << hash->size_bits;
5544	for (i = 0; i < size; i++) {
5545		hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
5546			entry = __ftrace_lookup_ip(direct_functions, iter->ip);
5547			if (!entry)
5548				continue;
5549			entry->direct = addr;
5550		}
5551	}
5552	/* Prevent store tearing if a trampoline concurrently accesses the value */
5553	WRITE_ONCE(ops->direct_call, addr);
5554
5555	mutex_unlock(&ftrace_lock);
5556
5557	/* Removing the tmp_ops will add the updated direct callers to the functions */
5558	unregister_ftrace_function(&tmp_ops);
5559
5560	return err;
5561}
5562
5563/**
5564 * modify_ftrace_direct_nolock - Modify an existing direct 'multi' call
5565 * to call something else
5566 * @ops: The address of the struct ftrace_ops object
5567 * @addr: The address of the new trampoline to call at @ops functions
5568 *
5569 * This is used to unregister currently registered direct caller and
5570 * register new one @addr on functions registered in @ops object.
5571 *
5572 * Note there's window between ftrace_shutdown and ftrace_startup calls
5573 * where there will be no callbacks called.
5574 *
5575 * Caller should already have direct_mutex locked, so we don't lock
5576 * direct_mutex here.
5577 *
5578 * Returns: zero on success. Non zero on error, which includes:
5579 *  -EINVAL - The @ops object was not properly registered.
5580 */
5581int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
5582{
5583	if (check_direct_multi(ops))
5584		return -EINVAL;
5585	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5586		return -EINVAL;
5587
5588	return __modify_ftrace_direct(ops, addr);
5589}
5590EXPORT_SYMBOL_GPL(modify_ftrace_direct_nolock);
5591
5592/**
5593 * modify_ftrace_direct - Modify an existing direct 'multi' call
5594 * to call something else
5595 * @ops: The address of the struct ftrace_ops object
5596 * @addr: The address of the new trampoline to call at @ops functions
5597 *
5598 * This is used to unregister currently registered direct caller and
5599 * register new one @addr on functions registered in @ops object.
5600 *
5601 * Note there's window between ftrace_shutdown and ftrace_startup calls
5602 * where there will be no callbacks called.
5603 *
5604 * Returns: zero on success. Non zero on error, which includes:
5605 *  -EINVAL - The @ops object was not properly registered.
5606 */
5607int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
5608{
5609	int err;
5610
5611	if (check_direct_multi(ops))
5612		return -EINVAL;
5613	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5614		return -EINVAL;
5615
5616	mutex_lock(&direct_mutex);
5617	err = __modify_ftrace_direct(ops, addr);
5618	mutex_unlock(&direct_mutex);
5619	return err;
5620}
5621EXPORT_SYMBOL_GPL(modify_ftrace_direct);
5622#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
5623
5624/**
5625 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
5626 * @ops: the ops to set the filter with
5627 * @ip: the address to add to or remove from the filter.
5628 * @remove: non zero to remove the ip from the filter
5629 * @reset: non zero to reset all filters before applying this filter.
5630 *
5631 * Filters denote which functions should be enabled when tracing is enabled
5632 * If @ip is NULL, it fails to update filter.
5633 *
5634 * This can allocate memory which must be freed before @ops can be freed,
5635 * either by removing each filtered addr or by using
5636 * ftrace_free_filter(@ops).
5637 */
5638int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
5639			 int remove, int reset)
5640{
5641	ftrace_ops_init(ops);
5642	return ftrace_set_addr(ops, &ip, 1, remove, reset, 1);
5643}
5644EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
5645
5646/**
5647 * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses
5648 * @ops: the ops to set the filter with
5649 * @ips: the array of addresses to add to or remove from the filter.
5650 * @cnt: the number of addresses in @ips
5651 * @remove: non zero to remove ips from the filter
5652 * @reset: non zero to reset all filters before applying this filter.
5653 *
5654 * Filters denote which functions should be enabled when tracing is enabled
5655 * If @ips array or any ip specified within is NULL , it fails to update filter.
5656 *
5657 * This can allocate memory which must be freed before @ops can be freed,
5658 * either by removing each filtered addr or by using
5659 * ftrace_free_filter(@ops).
5660*/
5661int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
5662			  unsigned int cnt, int remove, int reset)
5663{
5664	ftrace_ops_init(ops);
5665	return ftrace_set_addr(ops, ips, cnt, remove, reset, 1);
5666}
5667EXPORT_SYMBOL_GPL(ftrace_set_filter_ips);
5668
5669/**
5670 * ftrace_ops_set_global_filter - setup ops to use global filters
5671 * @ops: the ops which will use the global filters
5672 *
5673 * ftrace users who need global function trace filtering should call this.
5674 * It can set the global filter only if ops were not initialized before.
5675 */
5676void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
5677{
5678	if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
5679		return;
5680
5681	ftrace_ops_init(ops);
5682	ops->func_hash = &global_ops.local_hash;
5683}
5684EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
5685
5686static int
5687ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
5688		 int reset, int enable)
5689{
5690	return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable);
5691}
5692
5693/**
5694 * ftrace_set_filter - set a function to filter on in ftrace
5695 * @ops: the ops to set the filter with
5696 * @buf: the string that holds the function filter text.
5697 * @len: the length of the string.
5698 * @reset: non-zero to reset all filters before applying this filter.
5699 *
5700 * Filters denote which functions should be enabled when tracing is enabled.
5701 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5702 *
5703 * This can allocate memory which must be freed before @ops can be freed,
5704 * either by removing each filtered addr or by using
5705 * ftrace_free_filter(@ops).
5706 */
5707int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
5708		       int len, int reset)
5709{
5710	ftrace_ops_init(ops);
5711	return ftrace_set_regex(ops, buf, len, reset, 1);
5712}
5713EXPORT_SYMBOL_GPL(ftrace_set_filter);
5714
5715/**
5716 * ftrace_set_notrace - set a function to not trace in ftrace
5717 * @ops: the ops to set the notrace filter with
5718 * @buf: the string that holds the function notrace text.
5719 * @len: the length of the string.
5720 * @reset: non-zero to reset all filters before applying this filter.
5721 *
5722 * Notrace Filters denote which functions should not be enabled when tracing
5723 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5724 * for tracing.
5725 *
5726 * This can allocate memory which must be freed before @ops can be freed,
5727 * either by removing each filtered addr or by using
5728 * ftrace_free_filter(@ops).
5729 */
5730int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
5731			int len, int reset)
5732{
5733	ftrace_ops_init(ops);
5734	return ftrace_set_regex(ops, buf, len, reset, 0);
5735}
5736EXPORT_SYMBOL_GPL(ftrace_set_notrace);
5737/**
5738 * ftrace_set_global_filter - set a function to filter on with global tracers
5739 * @buf: the string that holds the function filter text.
5740 * @len: the length of the string.
5741 * @reset: non-zero to reset all filters before applying this filter.
5742 *
5743 * Filters denote which functions should be enabled when tracing is enabled.
5744 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5745 */
5746void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
5747{
5748	ftrace_set_regex(&global_ops, buf, len, reset, 1);
5749}
5750EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
5751
5752/**
5753 * ftrace_set_global_notrace - set a function to not trace with global tracers
5754 * @buf: the string that holds the function notrace text.
5755 * @len: the length of the string.
5756 * @reset: non-zero to reset all filters before applying this filter.
5757 *
5758 * Notrace Filters denote which functions should not be enabled when tracing
5759 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5760 * for tracing.
5761 */
5762void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
5763{
5764	ftrace_set_regex(&global_ops, buf, len, reset, 0);
5765}
5766EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
5767
5768/*
5769 * command line interface to allow users to set filters on boot up.
5770 */
5771#define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
5772static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5773static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
5774
5775/* Used by function selftest to not test if filter is set */
5776bool ftrace_filter_param __initdata;
5777
5778static int __init set_ftrace_notrace(char *str)
5779{
5780	ftrace_filter_param = true;
5781	strscpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
5782	return 1;
5783}
5784__setup("ftrace_notrace=", set_ftrace_notrace);
5785
5786static int __init set_ftrace_filter(char *str)
5787{
5788	ftrace_filter_param = true;
5789	strscpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
5790	return 1;
5791}
5792__setup("ftrace_filter=", set_ftrace_filter);
5793
5794#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5795static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
5796static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5797static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
5798
5799static int __init set_graph_function(char *str)
5800{
5801	strscpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
5802	return 1;
5803}
5804__setup("ftrace_graph_filter=", set_graph_function);
5805
5806static int __init set_graph_notrace_function(char *str)
5807{
5808	strscpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
5809	return 1;
5810}
5811__setup("ftrace_graph_notrace=", set_graph_notrace_function);
5812
5813static int __init set_graph_max_depth_function(char *str)
5814{
5815	if (!str || kstrtouint(str, 0, &fgraph_max_depth))
5816		return 0;
5817	return 1;
5818}
5819__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
5820
5821static void __init set_ftrace_early_graph(char *buf, int enable)
5822{
5823	int ret;
5824	char *func;
5825	struct ftrace_hash *hash;
5826
5827	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5828	if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
5829		return;
5830
5831	while (buf) {
5832		func = strsep(&buf, ",");
5833		/* we allow only one expression at a time */
5834		ret = ftrace_graph_set_hash(hash, func);
5835		if (ret)
5836			printk(KERN_DEBUG "ftrace: function %s not "
5837					  "traceable\n", func);
5838	}
5839
5840	if (enable)
5841		ftrace_graph_hash = hash;
5842	else
5843		ftrace_graph_notrace_hash = hash;
5844}
5845#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5846
5847void __init
5848ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
5849{
5850	char *func;
5851
5852	ftrace_ops_init(ops);
5853
5854	while (buf) {
5855		func = strsep(&buf, ",");
5856		ftrace_set_regex(ops, func, strlen(func), 0, enable);
5857	}
5858}
5859
5860static void __init set_ftrace_early_filters(void)
5861{
5862	if (ftrace_filter_buf[0])
5863		ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
5864	if (ftrace_notrace_buf[0])
5865		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
5866#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5867	if (ftrace_graph_buf[0])
5868		set_ftrace_early_graph(ftrace_graph_buf, 1);
5869	if (ftrace_graph_notrace_buf[0])
5870		set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
5871#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5872}
5873
5874int ftrace_regex_release(struct inode *inode, struct file *file)
5875{
5876	struct seq_file *m = (struct seq_file *)file->private_data;
5877	struct ftrace_iterator *iter;
5878	struct ftrace_hash **orig_hash;
5879	struct trace_parser *parser;
5880	int filter_hash;
5881
5882	if (file->f_mode & FMODE_READ) {
5883		iter = m->private;
5884		seq_release(inode, file);
5885	} else
5886		iter = file->private_data;
5887
5888	parser = &iter->parser;
5889	if (trace_parser_loaded(parser)) {
5890		int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
5891
5892		ftrace_process_regex(iter, parser->buffer,
5893				     parser->idx, enable);
5894	}
5895
5896	trace_parser_put(parser);
5897
5898	mutex_lock(&iter->ops->func_hash->regex_lock);
5899
5900	if (file->f_mode & FMODE_WRITE) {
5901		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
5902
5903		if (filter_hash) {
5904			orig_hash = &iter->ops->func_hash->filter_hash;
5905			if (iter->tr) {
5906				if (list_empty(&iter->tr->mod_trace))
5907					iter->hash->flags &= ~FTRACE_HASH_FL_MOD;
5908				else
5909					iter->hash->flags |= FTRACE_HASH_FL_MOD;
5910			}
5911		} else
5912			orig_hash = &iter->ops->func_hash->notrace_hash;
5913
5914		mutex_lock(&ftrace_lock);
5915		ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
5916						      iter->hash, filter_hash);
5917		mutex_unlock(&ftrace_lock);
5918	} else {
5919		/* For read only, the hash is the ops hash */
5920		iter->hash = NULL;
5921	}
5922
5923	mutex_unlock(&iter->ops->func_hash->regex_lock);
5924	free_ftrace_hash(iter->hash);
5925	if (iter->tr)
5926		trace_array_put(iter->tr);
5927	kfree(iter);
5928
5929	return 0;
5930}
5931
5932static const struct file_operations ftrace_avail_fops = {
5933	.open = ftrace_avail_open,
5934	.read = seq_read,
5935	.llseek = seq_lseek,
5936	.release = seq_release_private,
5937};
5938
5939static const struct file_operations ftrace_enabled_fops = {
5940	.open = ftrace_enabled_open,
5941	.read = seq_read,
5942	.llseek = seq_lseek,
5943	.release = seq_release_private,
5944};
5945
5946static const struct file_operations ftrace_touched_fops = {
5947	.open = ftrace_touched_open,
5948	.read = seq_read,
5949	.llseek = seq_lseek,
5950	.release = seq_release_private,
5951};
5952
5953static const struct file_operations ftrace_avail_addrs_fops = {
5954	.open = ftrace_avail_addrs_open,
5955	.read = seq_read,
5956	.llseek = seq_lseek,
5957	.release = seq_release_private,
5958};
5959
5960static const struct file_operations ftrace_filter_fops = {
5961	.open = ftrace_filter_open,
5962	.read = seq_read,
5963	.write = ftrace_filter_write,
5964	.llseek = tracing_lseek,
5965	.release = ftrace_regex_release,
5966};
5967
5968static const struct file_operations ftrace_notrace_fops = {
5969	.open = ftrace_notrace_open,
5970	.read = seq_read,
5971	.write = ftrace_notrace_write,
5972	.llseek = tracing_lseek,
5973	.release = ftrace_regex_release,
5974};
5975
5976#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5977
5978static DEFINE_MUTEX(graph_lock);
5979
5980struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
5981struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
5982
5983enum graph_filter_type {
5984	GRAPH_FILTER_NOTRACE	= 0,
5985	GRAPH_FILTER_FUNCTION,
5986};
5987
5988#define FTRACE_GRAPH_EMPTY	((void *)1)
5989
5990struct ftrace_graph_data {
5991	struct ftrace_hash		*hash;
5992	struct ftrace_func_entry	*entry;
5993	int				idx;   /* for hash table iteration */
5994	enum graph_filter_type		type;
5995	struct ftrace_hash		*new_hash;
5996	const struct seq_operations	*seq_ops;
5997	struct trace_parser		parser;
5998};
5999
6000static void *
6001__g_next(struct seq_file *m, loff_t *pos)
6002{
6003	struct ftrace_graph_data *fgd = m->private;
6004	struct ftrace_func_entry *entry = fgd->entry;
6005	struct hlist_head *head;
6006	int i, idx = fgd->idx;
6007
6008	if (*pos >= fgd->hash->count)
6009		return NULL;
6010
6011	if (entry) {
6012		hlist_for_each_entry_continue(entry, hlist) {
6013			fgd->entry = entry;
6014			return entry;
6015		}
6016
6017		idx++;
6018	}
6019
6020	for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
6021		head = &fgd->hash->buckets[i];
6022		hlist_for_each_entry(entry, head, hlist) {
6023			fgd->entry = entry;
6024			fgd->idx = i;
6025			return entry;
6026		}
6027	}
6028	return NULL;
6029}
6030
6031static void *
6032g_next(struct seq_file *m, void *v, loff_t *pos)
6033{
6034	(*pos)++;
6035	return __g_next(m, pos);
6036}
6037
6038static void *g_start(struct seq_file *m, loff_t *pos)
6039{
6040	struct ftrace_graph_data *fgd = m->private;
6041
6042	mutex_lock(&graph_lock);
6043
6044	if (fgd->type == GRAPH_FILTER_FUNCTION)
6045		fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6046					lockdep_is_held(&graph_lock));
6047	else
6048		fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6049					lockdep_is_held(&graph_lock));
6050
6051	/* Nothing, tell g_show to print all functions are enabled */
6052	if (ftrace_hash_empty(fgd->hash) && !*pos)
6053		return FTRACE_GRAPH_EMPTY;
6054
6055	fgd->idx = 0;
6056	fgd->entry = NULL;
6057	return __g_next(m, pos);
6058}
6059
6060static void g_stop(struct seq_file *m, void *p)
6061{
6062	mutex_unlock(&graph_lock);
6063}
6064
6065static int g_show(struct seq_file *m, void *v)
6066{
6067	struct ftrace_func_entry *entry = v;
6068
6069	if (!entry)
6070		return 0;
6071
6072	if (entry == FTRACE_GRAPH_EMPTY) {
6073		struct ftrace_graph_data *fgd = m->private;
6074
6075		if (fgd->type == GRAPH_FILTER_FUNCTION)
6076			seq_puts(m, "#### all functions enabled ####\n");
6077		else
6078			seq_puts(m, "#### no functions disabled ####\n");
6079		return 0;
6080	}
6081
6082	seq_printf(m, "%ps\n", (void *)entry->ip);
6083
6084	return 0;
6085}
6086
6087static const struct seq_operations ftrace_graph_seq_ops = {
6088	.start = g_start,
6089	.next = g_next,
6090	.stop = g_stop,
6091	.show = g_show,
6092};
6093
6094static int
6095__ftrace_graph_open(struct inode *inode, struct file *file,
6096		    struct ftrace_graph_data *fgd)
6097{
6098	int ret;
6099	struct ftrace_hash *new_hash = NULL;
6100
6101	ret = security_locked_down(LOCKDOWN_TRACEFS);
6102	if (ret)
6103		return ret;
6104
6105	if (file->f_mode & FMODE_WRITE) {
6106		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
6107
6108		if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
6109			return -ENOMEM;
6110
6111		if (file->f_flags & O_TRUNC)
6112			new_hash = alloc_ftrace_hash(size_bits);
6113		else
6114			new_hash = alloc_and_copy_ftrace_hash(size_bits,
6115							      fgd->hash);
6116		if (!new_hash) {
6117			ret = -ENOMEM;
6118			goto out;
6119		}
6120	}
6121
6122	if (file->f_mode & FMODE_READ) {
6123		ret = seq_open(file, &ftrace_graph_seq_ops);
6124		if (!ret) {
6125			struct seq_file *m = file->private_data;
6126			m->private = fgd;
6127		} else {
6128			/* Failed */
6129			free_ftrace_hash(new_hash);
6130			new_hash = NULL;
6131		}
6132	} else
6133		file->private_data = fgd;
6134
6135out:
6136	if (ret < 0 && file->f_mode & FMODE_WRITE)
6137		trace_parser_put(&fgd->parser);
6138
6139	fgd->new_hash = new_hash;
6140
6141	/*
6142	 * All uses of fgd->hash must be taken with the graph_lock
6143	 * held. The graph_lock is going to be released, so force
6144	 * fgd->hash to be reinitialized when it is taken again.
6145	 */
6146	fgd->hash = NULL;
6147
6148	return ret;
6149}
6150
6151static int
6152ftrace_graph_open(struct inode *inode, struct file *file)
6153{
6154	struct ftrace_graph_data *fgd;
6155	int ret;
6156
6157	if (unlikely(ftrace_disabled))
6158		return -ENODEV;
6159
6160	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6161	if (fgd == NULL)
6162		return -ENOMEM;
6163
6164	mutex_lock(&graph_lock);
6165
6166	fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6167					lockdep_is_held(&graph_lock));
6168	fgd->type = GRAPH_FILTER_FUNCTION;
6169	fgd->seq_ops = &ftrace_graph_seq_ops;
6170
6171	ret = __ftrace_graph_open(inode, file, fgd);
6172	if (ret < 0)
6173		kfree(fgd);
6174
6175	mutex_unlock(&graph_lock);
6176	return ret;
6177}
6178
6179static int
6180ftrace_graph_notrace_open(struct inode *inode, struct file *file)
6181{
6182	struct ftrace_graph_data *fgd;
6183	int ret;
6184
6185	if (unlikely(ftrace_disabled))
6186		return -ENODEV;
6187
6188	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6189	if (fgd == NULL)
6190		return -ENOMEM;
6191
6192	mutex_lock(&graph_lock);
6193
6194	fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6195					lockdep_is_held(&graph_lock));
6196	fgd->type = GRAPH_FILTER_NOTRACE;
6197	fgd->seq_ops = &ftrace_graph_seq_ops;
6198
6199	ret = __ftrace_graph_open(inode, file, fgd);
6200	if (ret < 0)
6201		kfree(fgd);
6202
6203	mutex_unlock(&graph_lock);
6204	return ret;
6205}
6206
6207static int
6208ftrace_graph_release(struct inode *inode, struct file *file)
6209{
6210	struct ftrace_graph_data *fgd;
6211	struct ftrace_hash *old_hash, *new_hash;
6212	struct trace_parser *parser;
6213	int ret = 0;
6214
6215	if (file->f_mode & FMODE_READ) {
6216		struct seq_file *m = file->private_data;
6217
6218		fgd = m->private;
6219		seq_release(inode, file);
6220	} else {
6221		fgd = file->private_data;
6222	}
6223
6224
6225	if (file->f_mode & FMODE_WRITE) {
6226
6227		parser = &fgd->parser;
6228
6229		if (trace_parser_loaded((parser))) {
6230			ret = ftrace_graph_set_hash(fgd->new_hash,
6231						    parser->buffer);
6232		}
6233
6234		trace_parser_put(parser);
6235
6236		new_hash = __ftrace_hash_move(fgd->new_hash);
6237		if (!new_hash) {
6238			ret = -ENOMEM;
6239			goto out;
6240		}
6241
6242		mutex_lock(&graph_lock);
6243
6244		if (fgd->type == GRAPH_FILTER_FUNCTION) {
6245			old_hash = rcu_dereference_protected(ftrace_graph_hash,
6246					lockdep_is_held(&graph_lock));
6247			rcu_assign_pointer(ftrace_graph_hash, new_hash);
6248		} else {
6249			old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6250					lockdep_is_held(&graph_lock));
6251			rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
6252		}
6253
6254		mutex_unlock(&graph_lock);
6255
6256		/*
6257		 * We need to do a hard force of sched synchronization.
6258		 * This is because we use preempt_disable() to do RCU, but
6259		 * the function tracers can be called where RCU is not watching
6260		 * (like before user_exit()). We can not rely on the RCU
6261		 * infrastructure to do the synchronization, thus we must do it
6262		 * ourselves.
6263		 */
6264		if (old_hash != EMPTY_HASH)
6265			synchronize_rcu_tasks_rude();
6266
6267		free_ftrace_hash(old_hash);
6268	}
6269
6270 out:
6271	free_ftrace_hash(fgd->new_hash);
6272	kfree(fgd);
6273
6274	return ret;
6275}
6276
6277static int
6278ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
6279{
6280	struct ftrace_glob func_g;
6281	struct dyn_ftrace *rec;
6282	struct ftrace_page *pg;
6283	struct ftrace_func_entry *entry;
6284	int fail = 1;
6285	int not;
6286
6287	/* decode regex */
6288	func_g.type = filter_parse_regex(buffer, strlen(buffer),
6289					 &func_g.search, &not);
6290
6291	func_g.len = strlen(func_g.search);
6292
6293	mutex_lock(&ftrace_lock);
6294
6295	if (unlikely(ftrace_disabled)) {
6296		mutex_unlock(&ftrace_lock);
6297		return -ENODEV;
6298	}
6299
6300	do_for_each_ftrace_rec(pg, rec) {
6301
6302		if (rec->flags & FTRACE_FL_DISABLED)
6303			continue;
6304
6305		if (ftrace_match_record(rec, &func_g, NULL, 0)) {
6306			entry = ftrace_lookup_ip(hash, rec->ip);
6307
6308			if (!not) {
6309				fail = 0;
6310
6311				if (entry)
6312					continue;
6313				if (add_hash_entry(hash, rec->ip) == NULL)
6314					goto out;
6315			} else {
6316				if (entry) {
6317					free_hash_entry(hash, entry);
6318					fail = 0;
6319				}
6320			}
6321		}
6322	} while_for_each_ftrace_rec();
6323out:
6324	mutex_unlock(&ftrace_lock);
6325
6326	if (fail)
6327		return -EINVAL;
6328
6329	return 0;
6330}
6331
6332static ssize_t
6333ftrace_graph_write(struct file *file, const char __user *ubuf,
6334		   size_t cnt, loff_t *ppos)
6335{
6336	ssize_t read, ret = 0;
6337	struct ftrace_graph_data *fgd = file->private_data;
6338	struct trace_parser *parser;
6339
6340	if (!cnt)
6341		return 0;
6342
6343	/* Read mode uses seq functions */
6344	if (file->f_mode & FMODE_READ) {
6345		struct seq_file *m = file->private_data;
6346		fgd = m->private;
6347	}
6348
6349	parser = &fgd->parser;
6350
6351	read = trace_get_user(parser, ubuf, cnt, ppos);
6352
6353	if (read >= 0 && trace_parser_loaded(parser) &&
6354	    !trace_parser_cont(parser)) {
6355
6356		ret = ftrace_graph_set_hash(fgd->new_hash,
6357					    parser->buffer);
6358		trace_parser_clear(parser);
6359	}
6360
6361	if (!ret)
6362		ret = read;
6363
6364	return ret;
6365}
6366
6367static const struct file_operations ftrace_graph_fops = {
6368	.open		= ftrace_graph_open,
6369	.read		= seq_read,
6370	.write		= ftrace_graph_write,
6371	.llseek		= tracing_lseek,
6372	.release	= ftrace_graph_release,
6373};
6374
6375static const struct file_operations ftrace_graph_notrace_fops = {
6376	.open		= ftrace_graph_notrace_open,
6377	.read		= seq_read,
6378	.write		= ftrace_graph_write,
6379	.llseek		= tracing_lseek,
6380	.release	= ftrace_graph_release,
6381};
6382#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6383
6384void ftrace_create_filter_files(struct ftrace_ops *ops,
6385				struct dentry *parent)
6386{
6387
6388	trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent,
6389			  ops, &ftrace_filter_fops);
6390
6391	trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent,
6392			  ops, &ftrace_notrace_fops);
6393}
6394
6395/*
6396 * The name "destroy_filter_files" is really a misnomer. Although
6397 * in the future, it may actually delete the files, but this is
6398 * really intended to make sure the ops passed in are disabled
6399 * and that when this function returns, the caller is free to
6400 * free the ops.
6401 *
6402 * The "destroy" name is only to match the "create" name that this
6403 * should be paired with.
6404 */
6405void ftrace_destroy_filter_files(struct ftrace_ops *ops)
6406{
6407	mutex_lock(&ftrace_lock);
6408	if (ops->flags & FTRACE_OPS_FL_ENABLED)
6409		ftrace_shutdown(ops, 0);
6410	ops->flags |= FTRACE_OPS_FL_DELETED;
6411	ftrace_free_filter(ops);
6412	mutex_unlock(&ftrace_lock);
6413}
6414
6415static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
6416{
6417
6418	trace_create_file("available_filter_functions", TRACE_MODE_READ,
6419			d_tracer, NULL, &ftrace_avail_fops);
6420
6421	trace_create_file("available_filter_functions_addrs", TRACE_MODE_READ,
6422			d_tracer, NULL, &ftrace_avail_addrs_fops);
6423
6424	trace_create_file("enabled_functions", TRACE_MODE_READ,
6425			d_tracer, NULL, &ftrace_enabled_fops);
6426
6427	trace_create_file("touched_functions", TRACE_MODE_READ,
6428			d_tracer, NULL, &ftrace_touched_fops);
6429
6430	ftrace_create_filter_files(&global_ops, d_tracer);
6431
6432#ifdef CONFIG_FUNCTION_GRAPH_TRACER
6433	trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer,
6434				    NULL,
6435				    &ftrace_graph_fops);
6436	trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer,
6437				    NULL,
6438				    &ftrace_graph_notrace_fops);
6439#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6440
6441	return 0;
6442}
6443
6444static int ftrace_cmp_ips(const void *a, const void *b)
6445{
6446	const unsigned long *ipa = a;
6447	const unsigned long *ipb = b;
6448
6449	if (*ipa > *ipb)
6450		return 1;
6451	if (*ipa < *ipb)
6452		return -1;
6453	return 0;
6454}
6455
6456#ifdef CONFIG_FTRACE_SORT_STARTUP_TEST
6457static void test_is_sorted(unsigned long *start, unsigned long count)
6458{
6459	int i;
6460
6461	for (i = 1; i < count; i++) {
6462		if (WARN(start[i - 1] > start[i],
6463			 "[%d] %pS at %lx is not sorted with %pS at %lx\n", i,
6464			 (void *)start[i - 1], start[i - 1],
6465			 (void *)start[i], start[i]))
6466			break;
6467	}
6468	if (i == count)
6469		pr_info("ftrace section at %px sorted properly\n", start);
6470}
6471#else
6472static void test_is_sorted(unsigned long *start, unsigned long count)
6473{
6474}
6475#endif
6476
6477static int ftrace_process_locs(struct module *mod,
6478			       unsigned long *start,
6479			       unsigned long *end)
6480{
6481	struct ftrace_page *pg_unuse = NULL;
6482	struct ftrace_page *start_pg;
6483	struct ftrace_page *pg;
6484	struct dyn_ftrace *rec;
6485	unsigned long skipped = 0;
6486	unsigned long count;
6487	unsigned long *p;
6488	unsigned long addr;
6489	unsigned long flags = 0; /* Shut up gcc */
6490	int ret = -ENOMEM;
6491
6492	count = end - start;
6493
6494	if (!count)
6495		return 0;
6496
6497	/*
6498	 * Sorting mcount in vmlinux at build time depend on
6499	 * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in
6500	 * modules can not be sorted at build time.
6501	 */
6502	if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) {
6503		sort(start, count, sizeof(*start),
6504		     ftrace_cmp_ips, NULL);
6505	} else {
6506		test_is_sorted(start, count);
6507	}
6508
6509	start_pg = ftrace_allocate_pages(count);
6510	if (!start_pg)
6511		return -ENOMEM;
6512
6513	mutex_lock(&ftrace_lock);
6514
6515	/*
6516	 * Core and each module needs their own pages, as
6517	 * modules will free them when they are removed.
6518	 * Force a new page to be allocated for modules.
6519	 */
6520	if (!mod) {
6521		WARN_ON(ftrace_pages || ftrace_pages_start);
6522		/* First initialization */
6523		ftrace_pages = ftrace_pages_start = start_pg;
6524	} else {
6525		if (!ftrace_pages)
6526			goto out;
6527
6528		if (WARN_ON(ftrace_pages->next)) {
6529			/* Hmm, we have free pages? */
6530			while (ftrace_pages->next)
6531				ftrace_pages = ftrace_pages->next;
6532		}
6533
6534		ftrace_pages->next = start_pg;
6535	}
6536
6537	p = start;
6538	pg = start_pg;
6539	while (p < end) {
6540		unsigned long end_offset;
6541		addr = ftrace_call_adjust(*p++);
6542		/*
6543		 * Some architecture linkers will pad between
6544		 * the different mcount_loc sections of different
6545		 * object files to satisfy alignments.
6546		 * Skip any NULL pointers.
6547		 */
6548		if (!addr) {
6549			skipped++;
6550			continue;
6551		}
6552
6553		end_offset = (pg->index+1) * sizeof(pg->records[0]);
6554		if (end_offset > PAGE_SIZE << pg->order) {
6555			/* We should have allocated enough */
6556			if (WARN_ON(!pg->next))
6557				break;
6558			pg = pg->next;
6559		}
6560
6561		rec = &pg->records[pg->index++];
6562		rec->ip = addr;
6563	}
6564
6565	if (pg->next) {
6566		pg_unuse = pg->next;
6567		pg->next = NULL;
6568	}
6569
6570	/* Assign the last page to ftrace_pages */
6571	ftrace_pages = pg;
6572
6573	/*
6574	 * We only need to disable interrupts on start up
6575	 * because we are modifying code that an interrupt
6576	 * may execute, and the modification is not atomic.
6577	 * But for modules, nothing runs the code we modify
6578	 * until we are finished with it, and there's no
6579	 * reason to cause large interrupt latencies while we do it.
6580	 */
6581	if (!mod)
6582		local_irq_save(flags);
6583	ftrace_update_code(mod, start_pg);
6584	if (!mod)
6585		local_irq_restore(flags);
6586	ret = 0;
6587 out:
6588	mutex_unlock(&ftrace_lock);
6589
6590	/* We should have used all pages unless we skipped some */
6591	if (pg_unuse) {
6592		WARN_ON(!skipped);
6593		/* Need to synchronize with ftrace_location_range() */
6594		synchronize_rcu();
6595		ftrace_free_pages(pg_unuse);
6596	}
6597	return ret;
6598}
6599
6600struct ftrace_mod_func {
6601	struct list_head	list;
6602	char			*name;
6603	unsigned long		ip;
6604	unsigned int		size;
6605};
6606
6607struct ftrace_mod_map {
6608	struct rcu_head		rcu;
6609	struct list_head	list;
6610	struct module		*mod;
6611	unsigned long		start_addr;
6612	unsigned long		end_addr;
6613	struct list_head	funcs;
6614	unsigned int		num_funcs;
6615};
6616
6617static int ftrace_get_trampoline_kallsym(unsigned int symnum,
6618					 unsigned long *value, char *type,
6619					 char *name, char *module_name,
6620					 int *exported)
6621{
6622	struct ftrace_ops *op;
6623
6624	list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
6625		if (!op->trampoline || symnum--)
6626			continue;
6627		*value = op->trampoline;
6628		*type = 't';
6629		strscpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
6630		strscpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
6631		*exported = 0;
6632		return 0;
6633	}
6634
6635	return -ERANGE;
6636}
6637
6638#if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES)
6639/*
6640 * Check if the current ops references the given ip.
6641 *
6642 * If the ops traces all functions, then it was already accounted for.
6643 * If the ops does not trace the current record function, skip it.
6644 * If the ops ignores the function via notrace filter, skip it.
6645 */
6646static bool
6647ops_references_ip(struct ftrace_ops *ops, unsigned long ip)
6648{
6649	/* If ops isn't enabled, ignore it */
6650	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
6651		return false;
6652
6653	/* If ops traces all then it includes this function */
6654	if (ops_traces_mod(ops))
6655		return true;
6656
6657	/* The function must be in the filter */
6658	if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
6659	    !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip))
6660		return false;
6661
6662	/* If in notrace hash, we ignore it too */
6663	if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip))
6664		return false;
6665
6666	return true;
6667}
6668#endif
6669
6670#ifdef CONFIG_MODULES
6671
6672#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
6673
6674static LIST_HEAD(ftrace_mod_maps);
6675
6676static int referenced_filters(struct dyn_ftrace *rec)
6677{
6678	struct ftrace_ops *ops;
6679	int cnt = 0;
6680
6681	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
6682		if (ops_references_ip(ops, rec->ip)) {
6683			if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
6684				continue;
6685			if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
6686				continue;
6687			cnt++;
6688			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
6689				rec->flags |= FTRACE_FL_REGS;
6690			if (cnt == 1 && ops->trampoline)
6691				rec->flags |= FTRACE_FL_TRAMP;
6692			else
6693				rec->flags &= ~FTRACE_FL_TRAMP;
6694		}
6695	}
6696
6697	return cnt;
6698}
6699
6700static void
6701clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
6702{
6703	struct ftrace_func_entry *entry;
6704	struct dyn_ftrace *rec;
6705	int i;
6706
6707	if (ftrace_hash_empty(hash))
6708		return;
6709
6710	for (i = 0; i < pg->index; i++) {
6711		rec = &pg->records[i];
6712		entry = __ftrace_lookup_ip(hash, rec->ip);
6713		/*
6714		 * Do not allow this rec to match again.
6715		 * Yeah, it may waste some memory, but will be removed
6716		 * if/when the hash is modified again.
6717		 */
6718		if (entry)
6719			entry->ip = 0;
6720	}
6721}
6722
6723/* Clear any records from hashes */
6724static void clear_mod_from_hashes(struct ftrace_page *pg)
6725{
6726	struct trace_array *tr;
6727
6728	mutex_lock(&trace_types_lock);
6729	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6730		if (!tr->ops || !tr->ops->func_hash)
6731			continue;
6732		mutex_lock(&tr->ops->func_hash->regex_lock);
6733		clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
6734		clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
6735		mutex_unlock(&tr->ops->func_hash->regex_lock);
6736	}
6737	mutex_unlock(&trace_types_lock);
6738}
6739
6740static void ftrace_free_mod_map(struct rcu_head *rcu)
6741{
6742	struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
6743	struct ftrace_mod_func *mod_func;
6744	struct ftrace_mod_func *n;
6745
6746	/* All the contents of mod_map are now not visible to readers */
6747	list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
6748		kfree(mod_func->name);
6749		list_del(&mod_func->list);
6750		kfree(mod_func);
6751	}
6752
6753	kfree(mod_map);
6754}
6755
6756void ftrace_release_mod(struct module *mod)
6757{
6758	struct ftrace_mod_map *mod_map;
6759	struct ftrace_mod_map *n;
6760	struct dyn_ftrace *rec;
6761	struct ftrace_page **last_pg;
6762	struct ftrace_page *tmp_page = NULL;
6763	struct ftrace_page *pg;
6764
6765	mutex_lock(&ftrace_lock);
6766
6767	if (ftrace_disabled)
6768		goto out_unlock;
6769
6770	list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
6771		if (mod_map->mod == mod) {
6772			list_del_rcu(&mod_map->list);
6773			call_rcu(&mod_map->rcu, ftrace_free_mod_map);
6774			break;
6775		}
6776	}
6777
6778	/*
6779	 * Each module has its own ftrace_pages, remove
6780	 * them from the list.
6781	 */
6782	last_pg = &ftrace_pages_start;
6783	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
6784		rec = &pg->records[0];
6785		if (within_module(rec->ip, mod)) {
6786			/*
6787			 * As core pages are first, the first
6788			 * page should never be a module page.
6789			 */
6790			if (WARN_ON(pg == ftrace_pages_start))
6791				goto out_unlock;
6792
6793			/* Check if we are deleting the last page */
6794			if (pg == ftrace_pages)
6795				ftrace_pages = next_to_ftrace_page(last_pg);
6796
6797			ftrace_update_tot_cnt -= pg->index;
6798			*last_pg = pg->next;
6799
6800			pg->next = tmp_page;
6801			tmp_page = pg;
6802		} else
6803			last_pg = &pg->next;
6804	}
6805 out_unlock:
6806	mutex_unlock(&ftrace_lock);
6807
6808	/* Need to synchronize with ftrace_location_range() */
6809	if (tmp_page)
6810		synchronize_rcu();
6811	for (pg = tmp_page; pg; pg = tmp_page) {
6812
6813		/* Needs to be called outside of ftrace_lock */
6814		clear_mod_from_hashes(pg);
6815
6816		if (pg->records) {
6817			free_pages((unsigned long)pg->records, pg->order);
6818			ftrace_number_of_pages -= 1 << pg->order;
6819		}
6820		tmp_page = pg->next;
6821		kfree(pg);
6822		ftrace_number_of_groups--;
6823	}
6824}
6825
6826void ftrace_module_enable(struct module *mod)
6827{
6828	struct dyn_ftrace *rec;
6829	struct ftrace_page *pg;
6830
6831	mutex_lock(&ftrace_lock);
6832
6833	if (ftrace_disabled)
6834		goto out_unlock;
6835
6836	/*
6837	 * If the tracing is enabled, go ahead and enable the record.
6838	 *
6839	 * The reason not to enable the record immediately is the
6840	 * inherent check of ftrace_make_nop/ftrace_make_call for
6841	 * correct previous instructions.  Making first the NOP
6842	 * conversion puts the module to the correct state, thus
6843	 * passing the ftrace_make_call check.
6844	 *
6845	 * We also delay this to after the module code already set the
6846	 * text to read-only, as we now need to set it back to read-write
6847	 * so that we can modify the text.
6848	 */
6849	if (ftrace_start_up)
6850		ftrace_arch_code_modify_prepare();
6851
6852	do_for_each_ftrace_rec(pg, rec) {
6853		int cnt;
6854		/*
6855		 * do_for_each_ftrace_rec() is a double loop.
6856		 * module text shares the pg. If a record is
6857		 * not part of this module, then skip this pg,
6858		 * which the "break" will do.
6859		 */
6860		if (!within_module(rec->ip, mod))
6861			break;
6862
6863		/* Weak functions should still be ignored */
6864		if (!test_for_valid_rec(rec)) {
6865			/* Clear all other flags. Should not be enabled anyway */
6866			rec->flags = FTRACE_FL_DISABLED;
6867			continue;
6868		}
6869
6870		cnt = 0;
6871
6872		/*
6873		 * When adding a module, we need to check if tracers are
6874		 * currently enabled and if they are, and can trace this record,
6875		 * we need to enable the module functions as well as update the
6876		 * reference counts for those function records.
6877		 */
6878		if (ftrace_start_up)
6879			cnt += referenced_filters(rec);
6880
6881		rec->flags &= ~FTRACE_FL_DISABLED;
6882		rec->flags += cnt;
6883
6884		if (ftrace_start_up && cnt) {
6885			int failed = __ftrace_replace_code(rec, 1);
6886			if (failed) {
6887				ftrace_bug(failed, rec);
6888				goto out_loop;
6889			}
6890		}
6891
6892	} while_for_each_ftrace_rec();
6893
6894 out_loop:
6895	if (ftrace_start_up)
6896		ftrace_arch_code_modify_post_process();
6897
6898 out_unlock:
6899	mutex_unlock(&ftrace_lock);
6900
6901	process_cached_mods(mod->name);
6902}
6903
6904void ftrace_module_init(struct module *mod)
6905{
6906	int ret;
6907
6908	if (ftrace_disabled || !mod->num_ftrace_callsites)
6909		return;
6910
6911	ret = ftrace_process_locs(mod, mod->ftrace_callsites,
6912				  mod->ftrace_callsites + mod->num_ftrace_callsites);
6913	if (ret)
6914		pr_warn("ftrace: failed to allocate entries for module '%s' functions\n",
6915			mod->name);
6916}
6917
6918static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6919				struct dyn_ftrace *rec)
6920{
6921	struct ftrace_mod_func *mod_func;
6922	unsigned long symsize;
6923	unsigned long offset;
6924	char str[KSYM_SYMBOL_LEN];
6925	char *modname;
6926	const char *ret;
6927
6928	ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
6929	if (!ret)
6930		return;
6931
6932	mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
6933	if (!mod_func)
6934		return;
6935
6936	mod_func->name = kstrdup(str, GFP_KERNEL);
6937	if (!mod_func->name) {
6938		kfree(mod_func);
6939		return;
6940	}
6941
6942	mod_func->ip = rec->ip - offset;
6943	mod_func->size = symsize;
6944
6945	mod_map->num_funcs++;
6946
6947	list_add_rcu(&mod_func->list, &mod_map->funcs);
6948}
6949
6950static struct ftrace_mod_map *
6951allocate_ftrace_mod_map(struct module *mod,
6952			unsigned long start, unsigned long end)
6953{
6954	struct ftrace_mod_map *mod_map;
6955
6956	mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
6957	if (!mod_map)
6958		return NULL;
6959
6960	mod_map->mod = mod;
6961	mod_map->start_addr = start;
6962	mod_map->end_addr = end;
6963	mod_map->num_funcs = 0;
6964
6965	INIT_LIST_HEAD_RCU(&mod_map->funcs);
6966
6967	list_add_rcu(&mod_map->list, &ftrace_mod_maps);
6968
6969	return mod_map;
6970}
6971
6972static const char *
6973ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
6974			   unsigned long addr, unsigned long *size,
6975			   unsigned long *off, char *sym)
6976{
6977	struct ftrace_mod_func *found_func =  NULL;
6978	struct ftrace_mod_func *mod_func;
6979
6980	list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
6981		if (addr >= mod_func->ip &&
6982		    addr < mod_func->ip + mod_func->size) {
6983			found_func = mod_func;
6984			break;
6985		}
6986	}
6987
6988	if (found_func) {
6989		if (size)
6990			*size = found_func->size;
6991		if (off)
6992			*off = addr - found_func->ip;
6993		if (sym)
6994			strscpy(sym, found_func->name, KSYM_NAME_LEN);
6995
6996		return found_func->name;
6997	}
6998
6999	return NULL;
7000}
7001
7002const char *
7003ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
7004		   unsigned long *off, char **modname, char *sym)
7005{
7006	struct ftrace_mod_map *mod_map;
7007	const char *ret = NULL;
7008
7009	/* mod_map is freed via call_rcu() */
7010	preempt_disable();
7011	list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7012		ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
7013		if (ret) {
7014			if (modname)
7015				*modname = mod_map->mod->name;
7016			break;
7017		}
7018	}
7019	preempt_enable();
7020
7021	return ret;
7022}
7023
7024int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7025			   char *type, char *name,
7026			   char *module_name, int *exported)
7027{
7028	struct ftrace_mod_map *mod_map;
7029	struct ftrace_mod_func *mod_func;
7030	int ret;
7031
7032	preempt_disable();
7033	list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7034
7035		if (symnum >= mod_map->num_funcs) {
7036			symnum -= mod_map->num_funcs;
7037			continue;
7038		}
7039
7040		list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7041			if (symnum > 1) {
7042				symnum--;
7043				continue;
7044			}
7045
7046			*value = mod_func->ip;
7047			*type = 'T';
7048			strscpy(name, mod_func->name, KSYM_NAME_LEN);
7049			strscpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
7050			*exported = 1;
7051			preempt_enable();
7052			return 0;
7053		}
7054		WARN_ON(1);
7055		break;
7056	}
7057	ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7058					    module_name, exported);
7059	preempt_enable();
7060	return ret;
7061}
7062
7063#else
7064static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7065				struct dyn_ftrace *rec) { }
7066static inline struct ftrace_mod_map *
7067allocate_ftrace_mod_map(struct module *mod,
7068			unsigned long start, unsigned long end)
7069{
7070	return NULL;
7071}
7072int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7073			   char *type, char *name, char *module_name,
7074			   int *exported)
7075{
7076	int ret;
7077
7078	preempt_disable();
7079	ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7080					    module_name, exported);
7081	preempt_enable();
7082	return ret;
7083}
7084#endif /* CONFIG_MODULES */
7085
7086struct ftrace_init_func {
7087	struct list_head list;
7088	unsigned long ip;
7089};
7090
7091/* Clear any init ips from hashes */
7092static void
7093clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
7094{
7095	struct ftrace_func_entry *entry;
7096
7097	entry = ftrace_lookup_ip(hash, func->ip);
7098	/*
7099	 * Do not allow this rec to match again.
7100	 * Yeah, it may waste some memory, but will be removed
7101	 * if/when the hash is modified again.
7102	 */
7103	if (entry)
7104		entry->ip = 0;
7105}
7106
7107static void
7108clear_func_from_hashes(struct ftrace_init_func *func)
7109{
7110	struct trace_array *tr;
7111
7112	mutex_lock(&trace_types_lock);
7113	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7114		if (!tr->ops || !tr->ops->func_hash)
7115			continue;
7116		mutex_lock(&tr->ops->func_hash->regex_lock);
7117		clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
7118		clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
7119		mutex_unlock(&tr->ops->func_hash->regex_lock);
7120	}
7121	mutex_unlock(&trace_types_lock);
7122}
7123
7124static void add_to_clear_hash_list(struct list_head *clear_list,
7125				   struct dyn_ftrace *rec)
7126{
7127	struct ftrace_init_func *func;
7128
7129	func = kmalloc(sizeof(*func), GFP_KERNEL);
7130	if (!func) {
7131		MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
7132		return;
7133	}
7134
7135	func->ip = rec->ip;
7136	list_add(&func->list, clear_list);
7137}
7138
7139void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
7140{
7141	unsigned long start = (unsigned long)(start_ptr);
7142	unsigned long end = (unsigned long)(end_ptr);
7143	struct ftrace_page **last_pg = &ftrace_pages_start;
7144	struct ftrace_page *tmp_page = NULL;
7145	struct ftrace_page *pg;
7146	struct dyn_ftrace *rec;
7147	struct dyn_ftrace key;
7148	struct ftrace_mod_map *mod_map = NULL;
7149	struct ftrace_init_func *func, *func_next;
7150	LIST_HEAD(clear_hash);
7151
7152	key.ip = start;
7153	key.flags = end;	/* overload flags, as it is unsigned long */
7154
7155	mutex_lock(&ftrace_lock);
7156
7157	/*
7158	 * If we are freeing module init memory, then check if
7159	 * any tracer is active. If so, we need to save a mapping of
7160	 * the module functions being freed with the address.
7161	 */
7162	if (mod && ftrace_ops_list != &ftrace_list_end)
7163		mod_map = allocate_ftrace_mod_map(mod, start, end);
7164
7165	for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
7166		if (end < pg->records[0].ip ||
7167		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
7168			continue;
7169 again:
7170		rec = bsearch(&key, pg->records, pg->index,
7171			      sizeof(struct dyn_ftrace),
7172			      ftrace_cmp_recs);
7173		if (!rec)
7174			continue;
7175
7176		/* rec will be cleared from hashes after ftrace_lock unlock */
7177		add_to_clear_hash_list(&clear_hash, rec);
7178
7179		if (mod_map)
7180			save_ftrace_mod_rec(mod_map, rec);
7181
7182		pg->index--;
7183		ftrace_update_tot_cnt--;
7184		if (!pg->index) {
7185			*last_pg = pg->next;
7186			pg->next = tmp_page;
7187			tmp_page = pg;
7188			pg = container_of(last_pg, struct ftrace_page, next);
7189			if (!(*last_pg))
7190				ftrace_pages = pg;
7191			continue;
7192		}
7193		memmove(rec, rec + 1,
7194			(pg->index - (rec - pg->records)) * sizeof(*rec));
7195		/* More than one function may be in this block */
7196		goto again;
7197	}
7198	mutex_unlock(&ftrace_lock);
7199
7200	list_for_each_entry_safe(func, func_next, &clear_hash, list) {
7201		clear_func_from_hashes(func);
7202		kfree(func);
7203	}
7204	/* Need to synchronize with ftrace_location_range() */
7205	if (tmp_page) {
7206		synchronize_rcu();
7207		ftrace_free_pages(tmp_page);
7208	}
7209}
7210
7211void __init ftrace_free_init_mem(void)
7212{
7213	void *start = (void *)(&__init_begin);
7214	void *end = (void *)(&__init_end);
7215
7216	ftrace_boot_snapshot();
7217
7218	ftrace_free_mem(NULL, start, end);
7219}
7220
7221int __init __weak ftrace_dyn_arch_init(void)
7222{
7223	return 0;
7224}
7225
7226void __init ftrace_init(void)
7227{
7228	extern unsigned long __start_mcount_loc[];
7229	extern unsigned long __stop_mcount_loc[];
7230	unsigned long count, flags;
7231	int ret;
7232
7233	local_irq_save(flags);
7234	ret = ftrace_dyn_arch_init();
7235	local_irq_restore(flags);
7236	if (ret)
7237		goto failed;
7238
7239	count = __stop_mcount_loc - __start_mcount_loc;
7240	if (!count) {
7241		pr_info("ftrace: No functions to be traced?\n");
7242		goto failed;
7243	}
7244
7245	pr_info("ftrace: allocating %ld entries in %ld pages\n",
7246		count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
7247
7248	ret = ftrace_process_locs(NULL,
7249				  __start_mcount_loc,
7250				  __stop_mcount_loc);
7251	if (ret) {
7252		pr_warn("ftrace: failed to allocate entries for functions\n");
7253		goto failed;
7254	}
7255
7256	pr_info("ftrace: allocated %ld pages with %ld groups\n",
7257		ftrace_number_of_pages, ftrace_number_of_groups);
7258
7259	last_ftrace_enabled = ftrace_enabled = 1;
7260
7261	set_ftrace_early_filters();
7262
7263	return;
7264 failed:
7265	ftrace_disabled = 1;
7266}
7267
7268/* Do nothing if arch does not support this */
7269void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
7270{
7271}
7272
7273static void ftrace_update_trampoline(struct ftrace_ops *ops)
7274{
7275	unsigned long trampoline = ops->trampoline;
7276
7277	arch_ftrace_update_trampoline(ops);
7278	if (ops->trampoline && ops->trampoline != trampoline &&
7279	    (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
7280		/* Add to kallsyms before the perf events */
7281		ftrace_add_trampoline_to_kallsyms(ops);
7282		perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
7283				   ops->trampoline, ops->trampoline_size, false,
7284				   FTRACE_TRAMPOLINE_SYM);
7285		/*
7286		 * Record the perf text poke event after the ksymbol register
7287		 * event.
7288		 */
7289		perf_event_text_poke((void *)ops->trampoline, NULL, 0,
7290				     (void *)ops->trampoline,
7291				     ops->trampoline_size);
7292	}
7293}
7294
7295void ftrace_init_trace_array(struct trace_array *tr)
7296{
7297	INIT_LIST_HEAD(&tr->func_probes);
7298	INIT_LIST_HEAD(&tr->mod_trace);
7299	INIT_LIST_HEAD(&tr->mod_notrace);
7300}
7301#else
7302
7303struct ftrace_ops global_ops = {
7304	.func			= ftrace_stub,
7305	.flags			= FTRACE_OPS_FL_INITIALIZED |
7306				  FTRACE_OPS_FL_PID,
7307};
7308
7309static int __init ftrace_nodyn_init(void)
7310{
7311	ftrace_enabled = 1;
7312	return 0;
7313}
7314core_initcall(ftrace_nodyn_init);
7315
7316static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
7317static inline void ftrace_startup_all(int command) { }
7318
7319static void ftrace_update_trampoline(struct ftrace_ops *ops)
7320{
7321}
7322
7323#endif /* CONFIG_DYNAMIC_FTRACE */
7324
7325__init void ftrace_init_global_array_ops(struct trace_array *tr)
7326{
7327	tr->ops = &global_ops;
7328	tr->ops->private = tr;
7329	ftrace_init_trace_array(tr);
7330}
7331
7332void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
7333{
7334	/* If we filter on pids, update to use the pid function */
7335	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
7336		if (WARN_ON(tr->ops->func != ftrace_stub))
7337			printk("ftrace ops had %pS for function\n",
7338			       tr->ops->func);
7339	}
7340	tr->ops->func = func;
7341	tr->ops->private = tr;
7342}
7343
7344void ftrace_reset_array_ops(struct trace_array *tr)
7345{
7346	tr->ops->func = ftrace_stub;
7347}
7348
7349static nokprobe_inline void
7350__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7351		       struct ftrace_ops *ignored, struct ftrace_regs *fregs)
7352{
7353	struct pt_regs *regs = ftrace_get_regs(fregs);
7354	struct ftrace_ops *op;
7355	int bit;
7356
7357	/*
7358	 * The ftrace_test_and_set_recursion() will disable preemption,
7359	 * which is required since some of the ops may be dynamically
7360	 * allocated, they must be freed after a synchronize_rcu().
7361	 */
7362	bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7363	if (bit < 0)
7364		return;
7365
7366	do_for_each_ftrace_op(op, ftrace_ops_list) {
7367		/* Stub functions don't need to be called nor tested */
7368		if (op->flags & FTRACE_OPS_FL_STUB)
7369			continue;
7370		/*
7371		 * Check the following for each ops before calling their func:
7372		 *  if RCU flag is set, then rcu_is_watching() must be true
7373		 *  Otherwise test if the ip matches the ops filter
7374		 *
7375		 * If any of the above fails then the op->func() is not executed.
7376		 */
7377		if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
7378		    ftrace_ops_test(op, ip, regs)) {
7379			if (FTRACE_WARN_ON(!op->func)) {
7380				pr_warn("op=%p %pS\n", op, op);
7381				goto out;
7382			}
7383			op->func(ip, parent_ip, op, fregs);
7384		}
7385	} while_for_each_ftrace_op(op);
7386out:
7387	trace_clear_recursion(bit);
7388}
7389
7390/*
7391 * Some archs only support passing ip and parent_ip. Even though
7392 * the list function ignores the op parameter, we do not want any
7393 * C side effects, where a function is called without the caller
7394 * sending a third parameter.
7395 * Archs are to support both the regs and ftrace_ops at the same time.
7396 * If they support ftrace_ops, it is assumed they support regs.
7397 * If call backs want to use regs, they must either check for regs
7398 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
7399 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
7400 * An architecture can pass partial regs with ftrace_ops and still
7401 * set the ARCH_SUPPORTS_FTRACE_OPS.
7402 *
7403 * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be
7404 * arch_ftrace_ops_list_func.
7405 */
7406#if ARCH_SUPPORTS_FTRACE_OPS
7407void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7408			       struct ftrace_ops *op, struct ftrace_regs *fregs)
7409{
7410	__ftrace_ops_list_func(ip, parent_ip, NULL, fregs);
7411}
7412#else
7413void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
7414{
7415	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
7416}
7417#endif
7418NOKPROBE_SYMBOL(arch_ftrace_ops_list_func);
7419
7420/*
7421 * If there's only one function registered but it does not support
7422 * recursion, needs RCU protection, then this function will be called
7423 * by the mcount trampoline.
7424 */
7425static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
7426				   struct ftrace_ops *op, struct ftrace_regs *fregs)
7427{
7428	int bit;
7429
7430	bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7431	if (bit < 0)
7432		return;
7433
7434	if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
7435		op->func(ip, parent_ip, op, fregs);
7436
7437	trace_clear_recursion(bit);
7438}
7439NOKPROBE_SYMBOL(ftrace_ops_assist_func);
7440
7441/**
7442 * ftrace_ops_get_func - get the function a trampoline should call
7443 * @ops: the ops to get the function for
7444 *
7445 * Normally the mcount trampoline will call the ops->func, but there
7446 * are times that it should not. For example, if the ops does not
7447 * have its own recursion protection, then it should call the
7448 * ftrace_ops_assist_func() instead.
7449 *
7450 * Returns: the function that the trampoline should call for @ops.
7451 */
7452ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
7453{
7454	/*
7455	 * If the function does not handle recursion or needs to be RCU safe,
7456	 * then we need to call the assist handler.
7457	 */
7458	if (ops->flags & (FTRACE_OPS_FL_RECURSION |
7459			  FTRACE_OPS_FL_RCU))
7460		return ftrace_ops_assist_func;
7461
7462	return ops->func;
7463}
7464
7465static void
7466ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
7467				     struct task_struct *prev,
7468				     struct task_struct *next,
7469				     unsigned int prev_state)
7470{
7471	struct trace_array *tr = data;
7472	struct trace_pid_list *pid_list;
7473	struct trace_pid_list *no_pid_list;
7474
7475	pid_list = rcu_dereference_sched(tr->function_pids);
7476	no_pid_list = rcu_dereference_sched(tr->function_no_pids);
7477
7478	if (trace_ignore_this_task(pid_list, no_pid_list, next))
7479		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7480			       FTRACE_PID_IGNORE);
7481	else
7482		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7483			       next->pid);
7484}
7485
7486static void
7487ftrace_pid_follow_sched_process_fork(void *data,
7488				     struct task_struct *self,
7489				     struct task_struct *task)
7490{
7491	struct trace_pid_list *pid_list;
7492	struct trace_array *tr = data;
7493
7494	pid_list = rcu_dereference_sched(tr->function_pids);
7495	trace_filter_add_remove_task(pid_list, self, task);
7496
7497	pid_list = rcu_dereference_sched(tr->function_no_pids);
7498	trace_filter_add_remove_task(pid_list, self, task);
7499}
7500
7501static void
7502ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
7503{
7504	struct trace_pid_list *pid_list;
7505	struct trace_array *tr = data;
7506
7507	pid_list = rcu_dereference_sched(tr->function_pids);
7508	trace_filter_add_remove_task(pid_list, NULL, task);
7509
7510	pid_list = rcu_dereference_sched(tr->function_no_pids);
7511	trace_filter_add_remove_task(pid_list, NULL, task);
7512}
7513
7514void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
7515{
7516	if (enable) {
7517		register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
7518						  tr);
7519		register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
7520						  tr);
7521	} else {
7522		unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
7523						    tr);
7524		unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
7525						    tr);
7526	}
7527}
7528
7529static void clear_ftrace_pids(struct trace_array *tr, int type)
7530{
7531	struct trace_pid_list *pid_list;
7532	struct trace_pid_list *no_pid_list;
7533	int cpu;
7534
7535	pid_list = rcu_dereference_protected(tr->function_pids,
7536					     lockdep_is_held(&ftrace_lock));
7537	no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7538						lockdep_is_held(&ftrace_lock));
7539
7540	/* Make sure there's something to do */
7541	if (!pid_type_enabled(type, pid_list, no_pid_list))
7542		return;
7543
7544	/* See if the pids still need to be checked after this */
7545	if (!still_need_pid_events(type, pid_list, no_pid_list)) {
7546		unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7547		for_each_possible_cpu(cpu)
7548			per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
7549	}
7550
7551	if (type & TRACE_PIDS)
7552		rcu_assign_pointer(tr->function_pids, NULL);
7553
7554	if (type & TRACE_NO_PIDS)
7555		rcu_assign_pointer(tr->function_no_pids, NULL);
7556
7557	/* Wait till all users are no longer using pid filtering */
7558	synchronize_rcu();
7559
7560	if ((type & TRACE_PIDS) && pid_list)
7561		trace_pid_list_free(pid_list);
7562
7563	if ((type & TRACE_NO_PIDS) && no_pid_list)
7564		trace_pid_list_free(no_pid_list);
7565}
7566
7567void ftrace_clear_pids(struct trace_array *tr)
7568{
7569	mutex_lock(&ftrace_lock);
7570
7571	clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
7572
7573	mutex_unlock(&ftrace_lock);
7574}
7575
7576static void ftrace_pid_reset(struct trace_array *tr, int type)
7577{
7578	mutex_lock(&ftrace_lock);
7579	clear_ftrace_pids(tr, type);
7580
7581	ftrace_update_pid_func();
7582	ftrace_startup_all(0);
7583
7584	mutex_unlock(&ftrace_lock);
7585}
7586
7587/* Greater than any max PID */
7588#define FTRACE_NO_PIDS		(void *)(PID_MAX_LIMIT + 1)
7589
7590static void *fpid_start(struct seq_file *m, loff_t *pos)
7591	__acquires(RCU)
7592{
7593	struct trace_pid_list *pid_list;
7594	struct trace_array *tr = m->private;
7595
7596	mutex_lock(&ftrace_lock);
7597	rcu_read_lock_sched();
7598
7599	pid_list = rcu_dereference_sched(tr->function_pids);
7600
7601	if (!pid_list)
7602		return !(*pos) ? FTRACE_NO_PIDS : NULL;
7603
7604	return trace_pid_start(pid_list, pos);
7605}
7606
7607static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
7608{
7609	struct trace_array *tr = m->private;
7610	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
7611
7612	if (v == FTRACE_NO_PIDS) {
7613		(*pos)++;
7614		return NULL;
7615	}
7616	return trace_pid_next(pid_list, v, pos);
7617}
7618
7619static void fpid_stop(struct seq_file *m, void *p)
7620	__releases(RCU)
7621{
7622	rcu_read_unlock_sched();
7623	mutex_unlock(&ftrace_lock);
7624}
7625
7626static int fpid_show(struct seq_file *m, void *v)
7627{
7628	if (v == FTRACE_NO_PIDS) {
7629		seq_puts(m, "no pid\n");
7630		return 0;
7631	}
7632
7633	return trace_pid_show(m, v);
7634}
7635
7636static const struct seq_operations ftrace_pid_sops = {
7637	.start = fpid_start,
7638	.next = fpid_next,
7639	.stop = fpid_stop,
7640	.show = fpid_show,
7641};
7642
7643static void *fnpid_start(struct seq_file *m, loff_t *pos)
7644	__acquires(RCU)
7645{
7646	struct trace_pid_list *pid_list;
7647	struct trace_array *tr = m->private;
7648
7649	mutex_lock(&ftrace_lock);
7650	rcu_read_lock_sched();
7651
7652	pid_list = rcu_dereference_sched(tr->function_no_pids);
7653
7654	if (!pid_list)
7655		return !(*pos) ? FTRACE_NO_PIDS : NULL;
7656
7657	return trace_pid_start(pid_list, pos);
7658}
7659
7660static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
7661{
7662	struct trace_array *tr = m->private;
7663	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
7664
7665	if (v == FTRACE_NO_PIDS) {
7666		(*pos)++;
7667		return NULL;
7668	}
7669	return trace_pid_next(pid_list, v, pos);
7670}
7671
7672static const struct seq_operations ftrace_no_pid_sops = {
7673	.start = fnpid_start,
7674	.next = fnpid_next,
7675	.stop = fpid_stop,
7676	.show = fpid_show,
7677};
7678
7679static int pid_open(struct inode *inode, struct file *file, int type)
7680{
7681	const struct seq_operations *seq_ops;
7682	struct trace_array *tr = inode->i_private;
7683	struct seq_file *m;
7684	int ret = 0;
7685
7686	ret = tracing_check_open_get_tr(tr);
7687	if (ret)
7688		return ret;
7689
7690	if ((file->f_mode & FMODE_WRITE) &&
7691	    (file->f_flags & O_TRUNC))
7692		ftrace_pid_reset(tr, type);
7693
7694	switch (type) {
7695	case TRACE_PIDS:
7696		seq_ops = &ftrace_pid_sops;
7697		break;
7698	case TRACE_NO_PIDS:
7699		seq_ops = &ftrace_no_pid_sops;
7700		break;
7701	default:
7702		trace_array_put(tr);
7703		WARN_ON_ONCE(1);
7704		return -EINVAL;
7705	}
7706
7707	ret = seq_open(file, seq_ops);
7708	if (ret < 0) {
7709		trace_array_put(tr);
7710	} else {
7711		m = file->private_data;
7712		/* copy tr over to seq ops */
7713		m->private = tr;
7714	}
7715
7716	return ret;
7717}
7718
7719static int
7720ftrace_pid_open(struct inode *inode, struct file *file)
7721{
7722	return pid_open(inode, file, TRACE_PIDS);
7723}
7724
7725static int
7726ftrace_no_pid_open(struct inode *inode, struct file *file)
7727{
7728	return pid_open(inode, file, TRACE_NO_PIDS);
7729}
7730
7731static void ignore_task_cpu(void *data)
7732{
7733	struct trace_array *tr = data;
7734	struct trace_pid_list *pid_list;
7735	struct trace_pid_list *no_pid_list;
7736
7737	/*
7738	 * This function is called by on_each_cpu() while the
7739	 * event_mutex is held.
7740	 */
7741	pid_list = rcu_dereference_protected(tr->function_pids,
7742					     mutex_is_locked(&ftrace_lock));
7743	no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7744						mutex_is_locked(&ftrace_lock));
7745
7746	if (trace_ignore_this_task(pid_list, no_pid_list, current))
7747		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7748			       FTRACE_PID_IGNORE);
7749	else
7750		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7751			       current->pid);
7752}
7753
7754static ssize_t
7755pid_write(struct file *filp, const char __user *ubuf,
7756	  size_t cnt, loff_t *ppos, int type)
7757{
7758	struct seq_file *m = filp->private_data;
7759	struct trace_array *tr = m->private;
7760	struct trace_pid_list *filtered_pids;
7761	struct trace_pid_list *other_pids;
7762	struct trace_pid_list *pid_list;
7763	ssize_t ret;
7764
7765	if (!cnt)
7766		return 0;
7767
7768	mutex_lock(&ftrace_lock);
7769
7770	switch (type) {
7771	case TRACE_PIDS:
7772		filtered_pids = rcu_dereference_protected(tr->function_pids,
7773					     lockdep_is_held(&ftrace_lock));
7774		other_pids = rcu_dereference_protected(tr->function_no_pids,
7775					     lockdep_is_held(&ftrace_lock));
7776		break;
7777	case TRACE_NO_PIDS:
7778		filtered_pids = rcu_dereference_protected(tr->function_no_pids,
7779					     lockdep_is_held(&ftrace_lock));
7780		other_pids = rcu_dereference_protected(tr->function_pids,
7781					     lockdep_is_held(&ftrace_lock));
7782		break;
7783	default:
7784		ret = -EINVAL;
7785		WARN_ON_ONCE(1);
7786		goto out;
7787	}
7788
7789	ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
7790	if (ret < 0)
7791		goto out;
7792
7793	switch (type) {
7794	case TRACE_PIDS:
7795		rcu_assign_pointer(tr->function_pids, pid_list);
7796		break;
7797	case TRACE_NO_PIDS:
7798		rcu_assign_pointer(tr->function_no_pids, pid_list);
7799		break;
7800	}
7801
7802
7803	if (filtered_pids) {
7804		synchronize_rcu();
7805		trace_pid_list_free(filtered_pids);
7806	} else if (pid_list && !other_pids) {
7807		/* Register a probe to set whether to ignore the tracing of a task */
7808		register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7809	}
7810
7811	/*
7812	 * Ignoring of pids is done at task switch. But we have to
7813	 * check for those tasks that are currently running.
7814	 * Always do this in case a pid was appended or removed.
7815	 */
7816	on_each_cpu(ignore_task_cpu, tr, 1);
7817
7818	ftrace_update_pid_func();
7819	ftrace_startup_all(0);
7820 out:
7821	mutex_unlock(&ftrace_lock);
7822
7823	if (ret > 0)
7824		*ppos += ret;
7825
7826	return ret;
7827}
7828
7829static ssize_t
7830ftrace_pid_write(struct file *filp, const char __user *ubuf,
7831		 size_t cnt, loff_t *ppos)
7832{
7833	return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
7834}
7835
7836static ssize_t
7837ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
7838		    size_t cnt, loff_t *ppos)
7839{
7840	return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
7841}
7842
7843static int
7844ftrace_pid_release(struct inode *inode, struct file *file)
7845{
7846	struct trace_array *tr = inode->i_private;
7847
7848	trace_array_put(tr);
7849
7850	return seq_release(inode, file);
7851}
7852
7853static const struct file_operations ftrace_pid_fops = {
7854	.open		= ftrace_pid_open,
7855	.write		= ftrace_pid_write,
7856	.read		= seq_read,
7857	.llseek		= tracing_lseek,
7858	.release	= ftrace_pid_release,
7859};
7860
7861static const struct file_operations ftrace_no_pid_fops = {
7862	.open		= ftrace_no_pid_open,
7863	.write		= ftrace_no_pid_write,
7864	.read		= seq_read,
7865	.llseek		= tracing_lseek,
7866	.release	= ftrace_pid_release,
7867};
7868
7869void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7870{
7871	trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer,
7872			    tr, &ftrace_pid_fops);
7873	trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE,
7874			  d_tracer, tr, &ftrace_no_pid_fops);
7875}
7876
7877void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
7878					 struct dentry *d_tracer)
7879{
7880	/* Only the top level directory has the dyn_tracefs and profile */
7881	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
7882
7883	ftrace_init_dyn_tracefs(d_tracer);
7884	ftrace_profile_tracefs(d_tracer);
7885}
7886
7887/**
7888 * ftrace_kill - kill ftrace
7889 *
7890 * This function should be used by panic code. It stops ftrace
7891 * but in a not so nice way. If you need to simply kill ftrace
7892 * from a non-atomic section, use ftrace_kill.
7893 */
7894void ftrace_kill(void)
7895{
7896	ftrace_disabled = 1;
7897	ftrace_enabled = 0;
7898	ftrace_trace_function = ftrace_stub;
7899	kprobe_ftrace_kill();
7900}
7901
7902/**
7903 * ftrace_is_dead - Test if ftrace is dead or not.
7904 *
7905 * Returns: 1 if ftrace is "dead", zero otherwise.
7906 */
7907int ftrace_is_dead(void)
7908{
7909	return ftrace_disabled;
7910}
7911
7912#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
7913/*
7914 * When registering ftrace_ops with IPMODIFY, it is necessary to make sure
7915 * it doesn't conflict with any direct ftrace_ops. If there is existing
7916 * direct ftrace_ops on a kernel function being patched, call
7917 * FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER on it to enable sharing.
7918 *
7919 * @ops:     ftrace_ops being registered.
7920 *
7921 * Returns:
7922 *         0 on success;
7923 *         Negative on failure.
7924 */
7925static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
7926{
7927	struct ftrace_func_entry *entry;
7928	struct ftrace_hash *hash;
7929	struct ftrace_ops *op;
7930	int size, i, ret;
7931
7932	lockdep_assert_held_once(&direct_mutex);
7933
7934	if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
7935		return 0;
7936
7937	hash = ops->func_hash->filter_hash;
7938	size = 1 << hash->size_bits;
7939	for (i = 0; i < size; i++) {
7940		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
7941			unsigned long ip = entry->ip;
7942			bool found_op = false;
7943
7944			mutex_lock(&ftrace_lock);
7945			do_for_each_ftrace_op(op, ftrace_ops_list) {
7946				if (!(op->flags & FTRACE_OPS_FL_DIRECT))
7947					continue;
7948				if (ops_references_ip(op, ip)) {
7949					found_op = true;
7950					break;
7951				}
7952			} while_for_each_ftrace_op(op);
7953			mutex_unlock(&ftrace_lock);
7954
7955			if (found_op) {
7956				if (!op->ops_func)
7957					return -EBUSY;
7958
7959				ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER);
7960				if (ret)
7961					return ret;
7962			}
7963		}
7964	}
7965
7966	return 0;
7967}
7968
7969/*
7970 * Similar to prepare_direct_functions_for_ipmodify, clean up after ops
7971 * with IPMODIFY is unregistered. The cleanup is optional for most DIRECT
7972 * ops.
7973 */
7974static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
7975{
7976	struct ftrace_func_entry *entry;
7977	struct ftrace_hash *hash;
7978	struct ftrace_ops *op;
7979	int size, i;
7980
7981	if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
7982		return;
7983
7984	mutex_lock(&direct_mutex);
7985
7986	hash = ops->func_hash->filter_hash;
7987	size = 1 << hash->size_bits;
7988	for (i = 0; i < size; i++) {
7989		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
7990			unsigned long ip = entry->ip;
7991			bool found_op = false;
7992
7993			mutex_lock(&ftrace_lock);
7994			do_for_each_ftrace_op(op, ftrace_ops_list) {
7995				if (!(op->flags & FTRACE_OPS_FL_DIRECT))
7996					continue;
7997				if (ops_references_ip(op, ip)) {
7998					found_op = true;
7999					break;
8000				}
8001			} while_for_each_ftrace_op(op);
8002			mutex_unlock(&ftrace_lock);
8003
8004			/* The cleanup is optional, ignore any errors */
8005			if (found_op && op->ops_func)
8006				op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER);
8007		}
8008	}
8009	mutex_unlock(&direct_mutex);
8010}
8011
8012#define lock_direct_mutex()	mutex_lock(&direct_mutex)
8013#define unlock_direct_mutex()	mutex_unlock(&direct_mutex)
8014
8015#else  /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
8016
8017static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
8018{
8019	return 0;
8020}
8021
8022static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
8023{
8024}
8025
8026#define lock_direct_mutex()	do { } while (0)
8027#define unlock_direct_mutex()	do { } while (0)
8028
8029#endif  /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
8030
8031/*
8032 * Similar to register_ftrace_function, except we don't lock direct_mutex.
8033 */
8034static int register_ftrace_function_nolock(struct ftrace_ops *ops)
8035{
8036	int ret;
8037
8038	ftrace_ops_init(ops);
8039
8040	mutex_lock(&ftrace_lock);
8041
8042	ret = ftrace_startup(ops, 0);
8043
8044	mutex_unlock(&ftrace_lock);
8045
8046	return ret;
8047}
8048
8049/**
8050 * register_ftrace_function - register a function for profiling
8051 * @ops:	ops structure that holds the function for profiling.
8052 *
8053 * Register a function to be called by all functions in the
8054 * kernel.
8055 *
8056 * Note: @ops->func and all the functions it calls must be labeled
8057 *       with "notrace", otherwise it will go into a
8058 *       recursive loop.
8059 */
8060int register_ftrace_function(struct ftrace_ops *ops)
8061{
8062	int ret;
8063
8064	lock_direct_mutex();
8065	ret = prepare_direct_functions_for_ipmodify(ops);
8066	if (ret < 0)
8067		goto out_unlock;
8068
8069	ret = register_ftrace_function_nolock(ops);
8070
8071out_unlock:
8072	unlock_direct_mutex();
8073	return ret;
8074}
8075EXPORT_SYMBOL_GPL(register_ftrace_function);
8076
8077/**
8078 * unregister_ftrace_function - unregister a function for profiling.
8079 * @ops:	ops structure that holds the function to unregister
8080 *
8081 * Unregister a function that was added to be called by ftrace profiling.
8082 */
8083int unregister_ftrace_function(struct ftrace_ops *ops)
8084{
8085	int ret;
8086
8087	mutex_lock(&ftrace_lock);
8088	ret = ftrace_shutdown(ops, 0);
8089	mutex_unlock(&ftrace_lock);
8090
8091	cleanup_direct_functions_after_ipmodify(ops);
8092	return ret;
8093}
8094EXPORT_SYMBOL_GPL(unregister_ftrace_function);
8095
8096static int symbols_cmp(const void *a, const void *b)
8097{
8098	const char **str_a = (const char **) a;
8099	const char **str_b = (const char **) b;
8100
8101	return strcmp(*str_a, *str_b);
8102}
8103
8104struct kallsyms_data {
8105	unsigned long *addrs;
8106	const char **syms;
8107	size_t cnt;
8108	size_t found;
8109};
8110
8111/* This function gets called for all kernel and module symbols
8112 * and returns 1 in case we resolved all the requested symbols,
8113 * 0 otherwise.
8114 */
8115static int kallsyms_callback(void *data, const char *name, unsigned long addr)
8116{
8117	struct kallsyms_data *args = data;
8118	const char **sym;
8119	int idx;
8120
8121	sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp);
8122	if (!sym)
8123		return 0;
8124
8125	idx = sym - args->syms;
8126	if (args->addrs[idx])
8127		return 0;
8128
8129	if (!ftrace_location(addr))
8130		return 0;
8131
8132	args->addrs[idx] = addr;
8133	args->found++;
8134	return args->found == args->cnt ? 1 : 0;
8135}
8136
8137/**
8138 * ftrace_lookup_symbols - Lookup addresses for array of symbols
8139 *
8140 * @sorted_syms: array of symbols pointers symbols to resolve,
8141 * must be alphabetically sorted
8142 * @cnt: number of symbols/addresses in @syms/@addrs arrays
8143 * @addrs: array for storing resulting addresses
8144 *
8145 * This function looks up addresses for array of symbols provided in
8146 * @syms array (must be alphabetically sorted) and stores them in
8147 * @addrs array, which needs to be big enough to store at least @cnt
8148 * addresses.
8149 *
8150 * Returns: 0 if all provided symbols are found, -ESRCH otherwise.
8151 */
8152int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
8153{
8154	struct kallsyms_data args;
8155	int found_all;
8156
8157	memset(addrs, 0, sizeof(*addrs) * cnt);
8158	args.addrs = addrs;
8159	args.syms = sorted_syms;
8160	args.cnt = cnt;
8161	args.found = 0;
8162
8163	found_all = kallsyms_on_each_symbol(kallsyms_callback, &args);
8164	if (found_all)
8165		return 0;
8166	found_all = module_kallsyms_on_each_symbol(NULL, kallsyms_callback, &args);
8167	return found_all ? 0 : -ESRCH;
8168}
8169
8170#ifdef CONFIG_SYSCTL
8171
8172#ifdef CONFIG_DYNAMIC_FTRACE
8173static void ftrace_startup_sysctl(void)
8174{
8175	int command;
8176
8177	if (unlikely(ftrace_disabled))
8178		return;
8179
8180	/* Force update next time */
8181	saved_ftrace_func = NULL;
8182	/* ftrace_start_up is true if we want ftrace running */
8183	if (ftrace_start_up) {
8184		command = FTRACE_UPDATE_CALLS;
8185		if (ftrace_graph_active)
8186			command |= FTRACE_START_FUNC_RET;
8187		ftrace_startup_enable(command);
8188	}
8189}
8190
8191static void ftrace_shutdown_sysctl(void)
8192{
8193	int command;
8194
8195	if (unlikely(ftrace_disabled))
8196		return;
8197
8198	/* ftrace_start_up is true if ftrace is running */
8199	if (ftrace_start_up) {
8200		command = FTRACE_DISABLE_CALLS;
8201		if (ftrace_graph_active)
8202			command |= FTRACE_STOP_FUNC_RET;
8203		ftrace_run_update_code(command);
8204	}
8205}
8206#else
8207# define ftrace_startup_sysctl()       do { } while (0)
8208# define ftrace_shutdown_sysctl()      do { } while (0)
8209#endif /* CONFIG_DYNAMIC_FTRACE */
8210
8211static bool is_permanent_ops_registered(void)
8212{
8213	struct ftrace_ops *op;
8214
8215	do_for_each_ftrace_op(op, ftrace_ops_list) {
8216		if (op->flags & FTRACE_OPS_FL_PERMANENT)
8217			return true;
8218	} while_for_each_ftrace_op(op);
8219
8220	return false;
8221}
8222
8223static int
8224ftrace_enable_sysctl(struct ctl_table *table, int write,
8225		     void *buffer, size_t *lenp, loff_t *ppos)
8226{
8227	int ret = -ENODEV;
8228
8229	mutex_lock(&ftrace_lock);
8230
8231	if (unlikely(ftrace_disabled))
8232		goto out;
8233
8234	ret = proc_dointvec(table, write, buffer, lenp, ppos);
8235
8236	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
8237		goto out;
8238
8239	if (ftrace_enabled) {
8240
8241		/* we are starting ftrace again */
8242		if (rcu_dereference_protected(ftrace_ops_list,
8243			lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
8244			update_ftrace_function();
8245
8246		ftrace_startup_sysctl();
8247
8248	} else {
8249		if (is_permanent_ops_registered()) {
8250			ftrace_enabled = true;
8251			ret = -EBUSY;
8252			goto out;
8253		}
8254
8255		/* stopping ftrace calls (just send to ftrace_stub) */
8256		ftrace_trace_function = ftrace_stub;
8257
8258		ftrace_shutdown_sysctl();
8259	}
8260
8261	last_ftrace_enabled = !!ftrace_enabled;
8262 out:
8263	mutex_unlock(&ftrace_lock);
8264	return ret;
8265}
8266
8267static struct ctl_table ftrace_sysctls[] = {
8268	{
8269		.procname       = "ftrace_enabled",
8270		.data           = &ftrace_enabled,
8271		.maxlen         = sizeof(int),
8272		.mode           = 0644,
8273		.proc_handler   = ftrace_enable_sysctl,
8274	},
8275};
8276
8277static int __init ftrace_sysctl_init(void)
8278{
8279	register_sysctl_init("kernel", ftrace_sysctls);
8280	return 0;
8281}
8282late_initcall(ftrace_sysctl_init);
8283#endif
8284