1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Kprobes-based tracing events
4 *
5 * Created by Masami Hiramatsu <mhiramat@redhat.com>
6 *
7 */
8#define pr_fmt(fmt)	"trace_kprobe: " fmt
9
10#include <linux/bpf-cgroup.h>
11#include <linux/security.h>
12#include <linux/module.h>
13#include <linux/uaccess.h>
14#include <linux/rculist.h>
15#include <linux/error-injection.h>
16
17#include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
18
19#include "trace_dynevent.h"
20#include "trace_kprobe_selftest.h"
21#include "trace_probe.h"
22#include "trace_probe_tmpl.h"
23#include "trace_probe_kernel.h"
24
25#define KPROBE_EVENT_SYSTEM "kprobes"
26#define KRETPROBE_MAXACTIVE_MAX 4096
27
28/* Kprobe early definition from command line */
29static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
30
31static int __init set_kprobe_boot_events(char *str)
32{
33	strscpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
34	disable_tracing_selftest("running kprobe events");
35
36	return 1;
37}
38__setup("kprobe_event=", set_kprobe_boot_events);
39
40static int trace_kprobe_create(const char *raw_command);
41static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
42static int trace_kprobe_release(struct dyn_event *ev);
43static bool trace_kprobe_is_busy(struct dyn_event *ev);
44static bool trace_kprobe_match(const char *system, const char *event,
45			int argc, const char **argv, struct dyn_event *ev);
46
47static struct dyn_event_operations trace_kprobe_ops = {
48	.create = trace_kprobe_create,
49	.show = trace_kprobe_show,
50	.is_busy = trace_kprobe_is_busy,
51	.free = trace_kprobe_release,
52	.match = trace_kprobe_match,
53};
54
55/*
56 * Kprobe event core functions
57 */
58struct trace_kprobe {
59	struct dyn_event	devent;
60	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
61	unsigned long __percpu *nhit;
62	const char		*symbol;	/* symbol name */
63	struct trace_probe	tp;
64};
65
66static bool is_trace_kprobe(struct dyn_event *ev)
67{
68	return ev->ops == &trace_kprobe_ops;
69}
70
71static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
72{
73	return container_of(ev, struct trace_kprobe, devent);
74}
75
76/**
77 * for_each_trace_kprobe - iterate over the trace_kprobe list
78 * @pos:	the struct trace_kprobe * for each entry
79 * @dpos:	the struct dyn_event * to use as a loop cursor
80 */
81#define for_each_trace_kprobe(pos, dpos)	\
82	for_each_dyn_event(dpos)		\
83		if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
84
85static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
86{
87	return tk->rp.handler != NULL;
88}
89
90static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
91{
92	return tk->symbol ? tk->symbol : "unknown";
93}
94
95static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
96{
97	return tk->rp.kp.offset;
98}
99
100static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
101{
102	return kprobe_gone(&tk->rp.kp);
103}
104
105static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
106						 struct module *mod)
107{
108	int len = strlen(module_name(mod));
109	const char *name = trace_kprobe_symbol(tk);
110
111	return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
112}
113
114#ifdef CONFIG_MODULES
115static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
116{
117	char *p;
118	bool ret;
119
120	if (!tk->symbol)
121		return false;
122	p = strchr(tk->symbol, ':');
123	if (!p)
124		return true;
125	*p = '\0';
126	rcu_read_lock_sched();
127	ret = !!find_module(tk->symbol);
128	rcu_read_unlock_sched();
129	*p = ':';
130
131	return ret;
132}
133#else
134static inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
135{
136	return false;
137}
138#endif
139
140static bool trace_kprobe_is_busy(struct dyn_event *ev)
141{
142	struct trace_kprobe *tk = to_trace_kprobe(ev);
143
144	return trace_probe_is_enabled(&tk->tp);
145}
146
147static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
148					    int argc, const char **argv)
149{
150	char buf[MAX_ARGSTR_LEN + 1];
151
152	if (!argc)
153		return true;
154
155	if (!tk->symbol)
156		snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
157	else if (tk->rp.kp.offset)
158		snprintf(buf, sizeof(buf), "%s+%u",
159			 trace_kprobe_symbol(tk), tk->rp.kp.offset);
160	else
161		snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
162	if (strcmp(buf, argv[0]))
163		return false;
164	argc--; argv++;
165
166	return trace_probe_match_command_args(&tk->tp, argc, argv);
167}
168
169static bool trace_kprobe_match(const char *system, const char *event,
170			int argc, const char **argv, struct dyn_event *ev)
171{
172	struct trace_kprobe *tk = to_trace_kprobe(ev);
173
174	return (event[0] == '\0' ||
175		strcmp(trace_probe_name(&tk->tp), event) == 0) &&
176	    (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
177	    trace_kprobe_match_command_head(tk, argc, argv);
178}
179
180static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
181{
182	unsigned long nhit = 0;
183	int cpu;
184
185	for_each_possible_cpu(cpu)
186		nhit += *per_cpu_ptr(tk->nhit, cpu);
187
188	return nhit;
189}
190
191static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
192{
193	return !(list_empty(&tk->rp.kp.list) &&
194		 hlist_unhashed(&tk->rp.kp.hlist));
195}
196
197/* Return 0 if it fails to find the symbol address */
198static nokprobe_inline
199unsigned long trace_kprobe_address(struct trace_kprobe *tk)
200{
201	unsigned long addr;
202
203	if (tk->symbol) {
204		addr = (unsigned long)
205			kallsyms_lookup_name(trace_kprobe_symbol(tk));
206		if (addr)
207			addr += tk->rp.kp.offset;
208	} else {
209		addr = (unsigned long)tk->rp.kp.addr;
210	}
211	return addr;
212}
213
214static nokprobe_inline struct trace_kprobe *
215trace_kprobe_primary_from_call(struct trace_event_call *call)
216{
217	struct trace_probe *tp;
218
219	tp = trace_probe_primary_from_call(call);
220	if (WARN_ON_ONCE(!tp))
221		return NULL;
222
223	return container_of(tp, struct trace_kprobe, tp);
224}
225
226bool trace_kprobe_on_func_entry(struct trace_event_call *call)
227{
228	struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
229
230	return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
231			tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
232			tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
233}
234
235bool trace_kprobe_error_injectable(struct trace_event_call *call)
236{
237	struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
238
239	return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
240	       false;
241}
242
243static int register_kprobe_event(struct trace_kprobe *tk);
244static int unregister_kprobe_event(struct trace_kprobe *tk);
245
246static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
247static int kretprobe_dispatcher(struct kretprobe_instance *ri,
248				struct pt_regs *regs);
249
250static void free_trace_kprobe(struct trace_kprobe *tk)
251{
252	if (tk) {
253		trace_probe_cleanup(&tk->tp);
254		kfree(tk->symbol);
255		free_percpu(tk->nhit);
256		kfree(tk);
257	}
258}
259
260/*
261 * Allocate new trace_probe and initialize it (including kprobes).
262 */
263static struct trace_kprobe *alloc_trace_kprobe(const char *group,
264					     const char *event,
265					     void *addr,
266					     const char *symbol,
267					     unsigned long offs,
268					     int maxactive,
269					     int nargs, bool is_return)
270{
271	struct trace_kprobe *tk;
272	int ret = -ENOMEM;
273
274	tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL);
275	if (!tk)
276		return ERR_PTR(ret);
277
278	tk->nhit = alloc_percpu(unsigned long);
279	if (!tk->nhit)
280		goto error;
281
282	if (symbol) {
283		tk->symbol = kstrdup(symbol, GFP_KERNEL);
284		if (!tk->symbol)
285			goto error;
286		tk->rp.kp.symbol_name = tk->symbol;
287		tk->rp.kp.offset = offs;
288	} else
289		tk->rp.kp.addr = addr;
290
291	if (is_return)
292		tk->rp.handler = kretprobe_dispatcher;
293	else
294		tk->rp.kp.pre_handler = kprobe_dispatcher;
295
296	tk->rp.maxactive = maxactive;
297	INIT_HLIST_NODE(&tk->rp.kp.hlist);
298	INIT_LIST_HEAD(&tk->rp.kp.list);
299
300	ret = trace_probe_init(&tk->tp, event, group, false, nargs);
301	if (ret < 0)
302		goto error;
303
304	dyn_event_init(&tk->devent, &trace_kprobe_ops);
305	return tk;
306error:
307	free_trace_kprobe(tk);
308	return ERR_PTR(ret);
309}
310
311static struct trace_kprobe *find_trace_kprobe(const char *event,
312					      const char *group)
313{
314	struct dyn_event *pos;
315	struct trace_kprobe *tk;
316
317	for_each_trace_kprobe(tk, pos)
318		if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
319		    strcmp(trace_probe_group_name(&tk->tp), group) == 0)
320			return tk;
321	return NULL;
322}
323
324static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
325{
326	int ret = 0;
327
328	if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
329		if (trace_kprobe_is_return(tk))
330			ret = enable_kretprobe(&tk->rp);
331		else
332			ret = enable_kprobe(&tk->rp.kp);
333	}
334
335	return ret;
336}
337
338static void __disable_trace_kprobe(struct trace_probe *tp)
339{
340	struct trace_kprobe *tk;
341
342	list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
343		if (!trace_kprobe_is_registered(tk))
344			continue;
345		if (trace_kprobe_is_return(tk))
346			disable_kretprobe(&tk->rp);
347		else
348			disable_kprobe(&tk->rp.kp);
349	}
350}
351
352/*
353 * Enable trace_probe
354 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
355 */
356static int enable_trace_kprobe(struct trace_event_call *call,
357				struct trace_event_file *file)
358{
359	struct trace_probe *tp;
360	struct trace_kprobe *tk;
361	bool enabled;
362	int ret = 0;
363
364	tp = trace_probe_primary_from_call(call);
365	if (WARN_ON_ONCE(!tp))
366		return -ENODEV;
367	enabled = trace_probe_is_enabled(tp);
368
369	/* This also changes "enabled" state */
370	if (file) {
371		ret = trace_probe_add_file(tp, file);
372		if (ret)
373			return ret;
374	} else
375		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
376
377	if (enabled)
378		return 0;
379
380	list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
381		if (trace_kprobe_has_gone(tk))
382			continue;
383		ret = __enable_trace_kprobe(tk);
384		if (ret)
385			break;
386		enabled = true;
387	}
388
389	if (ret) {
390		/* Failed to enable one of them. Roll back all */
391		if (enabled)
392			__disable_trace_kprobe(tp);
393		if (file)
394			trace_probe_remove_file(tp, file);
395		else
396			trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
397	}
398
399	return ret;
400}
401
402/*
403 * Disable trace_probe
404 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
405 */
406static int disable_trace_kprobe(struct trace_event_call *call,
407				struct trace_event_file *file)
408{
409	struct trace_probe *tp;
410
411	tp = trace_probe_primary_from_call(call);
412	if (WARN_ON_ONCE(!tp))
413		return -ENODEV;
414
415	if (file) {
416		if (!trace_probe_get_file_link(tp, file))
417			return -ENOENT;
418		if (!trace_probe_has_single_file(tp))
419			goto out;
420		trace_probe_clear_flag(tp, TP_FLAG_TRACE);
421	} else
422		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
423
424	if (!trace_probe_is_enabled(tp))
425		__disable_trace_kprobe(tp);
426
427 out:
428	if (file)
429		/*
430		 * Synchronization is done in below function. For perf event,
431		 * file == NULL and perf_trace_event_unreg() calls
432		 * tracepoint_synchronize_unregister() to ensure synchronize
433		 * event. We don't need to care about it.
434		 */
435		trace_probe_remove_file(tp, file);
436
437	return 0;
438}
439
440#if defined(CONFIG_DYNAMIC_FTRACE) && \
441	!defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
442static bool __within_notrace_func(unsigned long addr)
443{
444	unsigned long offset, size;
445
446	if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
447		return false;
448
449	/* Get the entry address of the target function */
450	addr -= offset;
451
452	/*
453	 * Since ftrace_location_range() does inclusive range check, we need
454	 * to subtract 1 byte from the end address.
455	 */
456	return !ftrace_location_range(addr, addr + size - 1);
457}
458
459static bool within_notrace_func(struct trace_kprobe *tk)
460{
461	unsigned long addr = trace_kprobe_address(tk);
462	char symname[KSYM_NAME_LEN], *p;
463
464	if (!__within_notrace_func(addr))
465		return false;
466
467	/* Check if the address is on a suffixed-symbol */
468	if (!lookup_symbol_name(addr, symname)) {
469		p = strchr(symname, '.');
470		if (!p)
471			return true;
472		*p = '\0';
473		addr = (unsigned long)kprobe_lookup_name(symname, 0);
474		if (addr)
475			return __within_notrace_func(addr);
476	}
477
478	return true;
479}
480#else
481#define within_notrace_func(tk)	(false)
482#endif
483
484/* Internal register function - just handle k*probes and flags */
485static int __register_trace_kprobe(struct trace_kprobe *tk)
486{
487	int i, ret;
488
489	ret = security_locked_down(LOCKDOWN_KPROBES);
490	if (ret)
491		return ret;
492
493	if (trace_kprobe_is_registered(tk))
494		return -EINVAL;
495
496	if (within_notrace_func(tk)) {
497		pr_warn("Could not probe notrace function %ps\n",
498			(void *)trace_kprobe_address(tk));
499		return -EINVAL;
500	}
501
502	for (i = 0; i < tk->tp.nr_args; i++) {
503		ret = traceprobe_update_arg(&tk->tp.args[i]);
504		if (ret)
505			return ret;
506	}
507
508	/* Set/clear disabled flag according to tp->flag */
509	if (trace_probe_is_enabled(&tk->tp))
510		tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
511	else
512		tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
513
514	if (trace_kprobe_is_return(tk))
515		ret = register_kretprobe(&tk->rp);
516	else
517		ret = register_kprobe(&tk->rp.kp);
518
519	return ret;
520}
521
522/* Internal unregister function - just handle k*probes and flags */
523static void __unregister_trace_kprobe(struct trace_kprobe *tk)
524{
525	if (trace_kprobe_is_registered(tk)) {
526		if (trace_kprobe_is_return(tk))
527			unregister_kretprobe(&tk->rp);
528		else
529			unregister_kprobe(&tk->rp.kp);
530		/* Cleanup kprobe for reuse and mark it unregistered */
531		INIT_HLIST_NODE(&tk->rp.kp.hlist);
532		INIT_LIST_HEAD(&tk->rp.kp.list);
533		if (tk->rp.kp.symbol_name)
534			tk->rp.kp.addr = NULL;
535	}
536}
537
538/* Unregister a trace_probe and probe_event */
539static int unregister_trace_kprobe(struct trace_kprobe *tk)
540{
541	/* If other probes are on the event, just unregister kprobe */
542	if (trace_probe_has_sibling(&tk->tp))
543		goto unreg;
544
545	/* Enabled event can not be unregistered */
546	if (trace_probe_is_enabled(&tk->tp))
547		return -EBUSY;
548
549	/* If there's a reference to the dynamic event */
550	if (trace_event_dyn_busy(trace_probe_event_call(&tk->tp)))
551		return -EBUSY;
552
553	/* Will fail if probe is being used by ftrace or perf */
554	if (unregister_kprobe_event(tk))
555		return -EBUSY;
556
557unreg:
558	__unregister_trace_kprobe(tk);
559	dyn_event_remove(&tk->devent);
560	trace_probe_unlink(&tk->tp);
561
562	return 0;
563}
564
565static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
566					 struct trace_kprobe *comp)
567{
568	struct trace_probe_event *tpe = orig->tp.event;
569	int i;
570
571	list_for_each_entry(orig, &tpe->probes, tp.list) {
572		if (strcmp(trace_kprobe_symbol(orig),
573			   trace_kprobe_symbol(comp)) ||
574		    trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
575			continue;
576
577		/*
578		 * trace_probe_compare_arg_type() ensured that nr_args and
579		 * each argument name and type are same. Let's compare comm.
580		 */
581		for (i = 0; i < orig->tp.nr_args; i++) {
582			if (strcmp(orig->tp.args[i].comm,
583				   comp->tp.args[i].comm))
584				break;
585		}
586
587		if (i == orig->tp.nr_args)
588			return true;
589	}
590
591	return false;
592}
593
594static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
595{
596	int ret;
597
598	ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
599	if (ret) {
600		/* Note that argument starts index = 2 */
601		trace_probe_log_set_index(ret + 1);
602		trace_probe_log_err(0, DIFF_ARG_TYPE);
603		return -EEXIST;
604	}
605	if (trace_kprobe_has_same_kprobe(to, tk)) {
606		trace_probe_log_set_index(0);
607		trace_probe_log_err(0, SAME_PROBE);
608		return -EEXIST;
609	}
610
611	/* Append to existing event */
612	ret = trace_probe_append(&tk->tp, &to->tp);
613	if (ret)
614		return ret;
615
616	/* Register k*probe */
617	ret = __register_trace_kprobe(tk);
618	if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
619		pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
620		ret = 0;
621	}
622
623	if (ret)
624		trace_probe_unlink(&tk->tp);
625	else
626		dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
627
628	return ret;
629}
630
631/* Register a trace_probe and probe_event */
632static int register_trace_kprobe(struct trace_kprobe *tk)
633{
634	struct trace_kprobe *old_tk;
635	int ret;
636
637	mutex_lock(&event_mutex);
638
639	old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
640				   trace_probe_group_name(&tk->tp));
641	if (old_tk) {
642		if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
643			trace_probe_log_set_index(0);
644			trace_probe_log_err(0, DIFF_PROBE_TYPE);
645			ret = -EEXIST;
646		} else {
647			ret = append_trace_kprobe(tk, old_tk);
648		}
649		goto end;
650	}
651
652	/* Register new event */
653	ret = register_kprobe_event(tk);
654	if (ret) {
655		if (ret == -EEXIST) {
656			trace_probe_log_set_index(0);
657			trace_probe_log_err(0, EVENT_EXIST);
658		} else
659			pr_warn("Failed to register probe event(%d)\n", ret);
660		goto end;
661	}
662
663	/* Register k*probe */
664	ret = __register_trace_kprobe(tk);
665	if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
666		pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
667		ret = 0;
668	}
669
670	if (ret < 0)
671		unregister_kprobe_event(tk);
672	else
673		dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
674
675end:
676	mutex_unlock(&event_mutex);
677	return ret;
678}
679
680#ifdef CONFIG_MODULES
681/* Module notifier call back, checking event on the module */
682static int trace_kprobe_module_callback(struct notifier_block *nb,
683				       unsigned long val, void *data)
684{
685	struct module *mod = data;
686	struct dyn_event *pos;
687	struct trace_kprobe *tk;
688	int ret;
689
690	if (val != MODULE_STATE_COMING)
691		return NOTIFY_DONE;
692
693	/* Update probes on coming module */
694	mutex_lock(&event_mutex);
695	for_each_trace_kprobe(tk, pos) {
696		if (trace_kprobe_within_module(tk, mod)) {
697			/* Don't need to check busy - this should have gone. */
698			__unregister_trace_kprobe(tk);
699			ret = __register_trace_kprobe(tk);
700			if (ret)
701				pr_warn("Failed to re-register probe %s on %s: %d\n",
702					trace_probe_name(&tk->tp),
703					module_name(mod), ret);
704		}
705	}
706	mutex_unlock(&event_mutex);
707
708	return NOTIFY_DONE;
709}
710
711static struct notifier_block trace_kprobe_module_nb = {
712	.notifier_call = trace_kprobe_module_callback,
713	.priority = 1	/* Invoked after kprobe module callback */
714};
715static int trace_kprobe_register_module_notifier(void)
716{
717	return register_module_notifier(&trace_kprobe_module_nb);
718}
719#else
720static int trace_kprobe_register_module_notifier(void)
721{
722	return 0;
723}
724#endif /* CONFIG_MODULES */
725
726static int count_symbols(void *data, unsigned long unused)
727{
728	unsigned int *count = data;
729
730	(*count)++;
731
732	return 0;
733}
734
735struct sym_count_ctx {
736	unsigned int count;
737	const char *name;
738};
739
740static int count_mod_symbols(void *data, const char *name, unsigned long unused)
741{
742	struct sym_count_ctx *ctx = data;
743
744	if (strcmp(name, ctx->name) == 0)
745		ctx->count++;
746
747	return 0;
748}
749
750static unsigned int number_of_same_symbols(char *func_name)
751{
752	struct sym_count_ctx ctx = { .count = 0, .name = func_name };
753
754	kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count);
755
756	module_kallsyms_on_each_symbol(NULL, count_mod_symbols, &ctx);
757
758	return ctx.count;
759}
760
761static int trace_kprobe_entry_handler(struct kretprobe_instance *ri,
762				      struct pt_regs *regs);
763
764static int __trace_kprobe_create(int argc, const char *argv[])
765{
766	/*
767	 * Argument syntax:
768	 *  - Add kprobe:
769	 *      p[:[GRP/][EVENT]] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
770	 *  - Add kretprobe:
771	 *      r[MAXACTIVE][:[GRP/][EVENT]] [MOD:]KSYM[+0] [FETCHARGS]
772	 *    Or
773	 *      p[:[GRP/][EVENT]] [MOD:]KSYM[+0]%return [FETCHARGS]
774	 *
775	 * Fetch args:
776	 *  $retval	: fetch return value
777	 *  $stack	: fetch stack address
778	 *  $stackN	: fetch Nth of stack (N:0-)
779	 *  $comm       : fetch current task comm
780	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
781	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
782	 *  %REG	: fetch register REG
783	 * Dereferencing memory fetch:
784	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
785	 * Alias name of args:
786	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
787	 * Type of args:
788	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
789	 */
790	struct trace_kprobe *tk = NULL;
791	int i, len, new_argc = 0, ret = 0;
792	bool is_return = false;
793	char *symbol = NULL, *tmp = NULL;
794	const char **new_argv = NULL;
795	const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
796	enum probe_print_type ptype;
797	int maxactive = 0;
798	long offset = 0;
799	void *addr = NULL;
800	char buf[MAX_EVENT_NAME_LEN];
801	char gbuf[MAX_EVENT_NAME_LEN];
802	char abuf[MAX_BTF_ARGS_LEN];
803	char *dbuf = NULL;
804	struct traceprobe_parse_context ctx = { .flags = TPARG_FL_KERNEL };
805
806	switch (argv[0][0]) {
807	case 'r':
808		is_return = true;
809		break;
810	case 'p':
811		break;
812	default:
813		return -ECANCELED;
814	}
815	if (argc < 2)
816		return -ECANCELED;
817
818	trace_probe_log_init("trace_kprobe", argc, argv);
819
820	event = strchr(&argv[0][1], ':');
821	if (event)
822		event++;
823
824	if (isdigit(argv[0][1])) {
825		if (!is_return) {
826			trace_probe_log_err(1, BAD_MAXACT_TYPE);
827			goto parse_error;
828		}
829		if (event)
830			len = event - &argv[0][1] - 1;
831		else
832			len = strlen(&argv[0][1]);
833		if (len > MAX_EVENT_NAME_LEN - 1) {
834			trace_probe_log_err(1, BAD_MAXACT);
835			goto parse_error;
836		}
837		memcpy(buf, &argv[0][1], len);
838		buf[len] = '\0';
839		ret = kstrtouint(buf, 0, &maxactive);
840		if (ret || !maxactive) {
841			trace_probe_log_err(1, BAD_MAXACT);
842			goto parse_error;
843		}
844		/* kretprobes instances are iterated over via a list. The
845		 * maximum should stay reasonable.
846		 */
847		if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
848			trace_probe_log_err(1, MAXACT_TOO_BIG);
849			goto parse_error;
850		}
851	}
852
853	/* try to parse an address. if that fails, try to read the
854	 * input as a symbol. */
855	if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
856		trace_probe_log_set_index(1);
857		/* Check whether uprobe event specified */
858		if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
859			ret = -ECANCELED;
860			goto error;
861		}
862		/* a symbol specified */
863		symbol = kstrdup(argv[1], GFP_KERNEL);
864		if (!symbol)
865			return -ENOMEM;
866
867		tmp = strchr(symbol, '%');
868		if (tmp) {
869			if (!strcmp(tmp, "%return")) {
870				*tmp = '\0';
871				is_return = true;
872			} else {
873				trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
874				goto parse_error;
875			}
876		}
877
878		/* TODO: support .init module functions */
879		ret = traceprobe_split_symbol_offset(symbol, &offset);
880		if (ret || offset < 0 || offset > UINT_MAX) {
881			trace_probe_log_err(0, BAD_PROBE_ADDR);
882			goto parse_error;
883		}
884		if (is_return)
885			ctx.flags |= TPARG_FL_RETURN;
886		ret = kprobe_on_func_entry(NULL, symbol, offset);
887		if (ret == 0 && !is_return)
888			ctx.flags |= TPARG_FL_FENTRY;
889		/* Defer the ENOENT case until register kprobe */
890		if (ret == -EINVAL && is_return) {
891			trace_probe_log_err(0, BAD_RETPROBE);
892			goto parse_error;
893		}
894	}
895
896	if (symbol && !strchr(symbol, ':')) {
897		unsigned int count;
898
899		count = number_of_same_symbols(symbol);
900		if (count > 1) {
901			/*
902			 * Users should use ADDR to remove the ambiguity of
903			 * using KSYM only.
904			 */
905			trace_probe_log_err(0, NON_UNIQ_SYMBOL);
906			ret = -EADDRNOTAVAIL;
907
908			goto error;
909		} else if (count == 0) {
910			/*
911			 * We can return ENOENT earlier than when register the
912			 * kprobe.
913			 */
914			trace_probe_log_err(0, BAD_PROBE_ADDR);
915			ret = -ENOENT;
916
917			goto error;
918		}
919	}
920
921	trace_probe_log_set_index(0);
922	if (event) {
923		ret = traceprobe_parse_event_name(&event, &group, gbuf,
924						  event - argv[0]);
925		if (ret)
926			goto parse_error;
927	}
928
929	if (!event) {
930		/* Make a new event name */
931		if (symbol)
932			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
933				 is_return ? 'r' : 'p', symbol, offset);
934		else
935			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
936				 is_return ? 'r' : 'p', addr);
937		sanitize_event_name(buf);
938		event = buf;
939	}
940
941	argc -= 2; argv += 2;
942	ctx.funcname = symbol;
943	new_argv = traceprobe_expand_meta_args(argc, argv, &new_argc,
944					       abuf, MAX_BTF_ARGS_LEN, &ctx);
945	if (IS_ERR(new_argv)) {
946		ret = PTR_ERR(new_argv);
947		new_argv = NULL;
948		goto out;
949	}
950	if (new_argv) {
951		argc = new_argc;
952		argv = new_argv;
953	}
954
955	ret = traceprobe_expand_dentry_args(argc, argv, &dbuf);
956	if (ret)
957		goto out;
958
959	/* setup a probe */
960	tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
961				argc, is_return);
962	if (IS_ERR(tk)) {
963		ret = PTR_ERR(tk);
964		/* This must return -ENOMEM, else there is a bug */
965		WARN_ON_ONCE(ret != -ENOMEM);
966		goto out;	/* We know tk is not allocated */
967	}
968
969	/* parse arguments */
970	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
971		trace_probe_log_set_index(i + 2);
972		ctx.offset = 0;
973		ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], &ctx);
974		if (ret)
975			goto error;	/* This can be -ENOMEM */
976	}
977	/* entry handler for kretprobe */
978	if (is_return && tk->tp.entry_arg) {
979		tk->rp.entry_handler = trace_kprobe_entry_handler;
980		tk->rp.data_size = traceprobe_get_entry_data_size(&tk->tp);
981	}
982
983	ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
984	ret = traceprobe_set_print_fmt(&tk->tp, ptype);
985	if (ret < 0)
986		goto error;
987
988	ret = register_trace_kprobe(tk);
989	if (ret) {
990		trace_probe_log_set_index(1);
991		if (ret == -EILSEQ)
992			trace_probe_log_err(0, BAD_INSN_BNDRY);
993		else if (ret == -ENOENT)
994			trace_probe_log_err(0, BAD_PROBE_ADDR);
995		else if (ret != -ENOMEM && ret != -EEXIST)
996			trace_probe_log_err(0, FAIL_REG_PROBE);
997		goto error;
998	}
999
1000out:
1001	traceprobe_finish_parse(&ctx);
1002	trace_probe_log_clear();
1003	kfree(new_argv);
1004	kfree(symbol);
1005	kfree(dbuf);
1006	return ret;
1007
1008parse_error:
1009	ret = -EINVAL;
1010error:
1011	free_trace_kprobe(tk);
1012	goto out;
1013}
1014
1015static int trace_kprobe_create(const char *raw_command)
1016{
1017	return trace_probe_create(raw_command, __trace_kprobe_create);
1018}
1019
1020static int create_or_delete_trace_kprobe(const char *raw_command)
1021{
1022	int ret;
1023
1024	if (raw_command[0] == '-')
1025		return dyn_event_release(raw_command, &trace_kprobe_ops);
1026
1027	ret = trace_kprobe_create(raw_command);
1028	return ret == -ECANCELED ? -EINVAL : ret;
1029}
1030
1031static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
1032{
1033	return create_or_delete_trace_kprobe(cmd->seq.buffer);
1034}
1035
1036/**
1037 * kprobe_event_cmd_init - Initialize a kprobe event command object
1038 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1039 * @buf: A pointer to the buffer used to build the command
1040 * @maxlen: The length of the buffer passed in @buf
1041 *
1042 * Initialize a synthetic event command object.  Use this before
1043 * calling any of the other kprobe_event functions.
1044 */
1045void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1046{
1047	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
1048			  trace_kprobe_run_command);
1049}
1050EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
1051
1052/**
1053 * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
1054 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1055 * @kretprobe: Is this a return probe?
1056 * @name: The name of the kprobe event
1057 * @loc: The location of the kprobe event
1058 * @...: Variable number of arg (pairs), one pair for each field
1059 *
1060 * NOTE: Users normally won't want to call this function directly, but
1061 * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
1062 * adds a NULL to the end of the arg list.  If this function is used
1063 * directly, make sure the last arg in the variable arg list is NULL.
1064 *
1065 * Generate a kprobe event command to be executed by
1066 * kprobe_event_gen_cmd_end().  This function can be used to generate the
1067 * complete command or only the first part of it; in the latter case,
1068 * kprobe_event_add_fields() can be used to add more fields following this.
1069 *
1070 * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
1071 * returns -EINVAL if @loc == NULL.
1072 *
1073 * Return: 0 if successful, error otherwise.
1074 */
1075int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
1076				 const char *name, const char *loc, ...)
1077{
1078	char buf[MAX_EVENT_NAME_LEN];
1079	struct dynevent_arg arg;
1080	va_list args;
1081	int ret;
1082
1083	if (cmd->type != DYNEVENT_TYPE_KPROBE)
1084		return -EINVAL;
1085
1086	if (!loc)
1087		return -EINVAL;
1088
1089	if (kretprobe)
1090		snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
1091	else
1092		snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
1093
1094	ret = dynevent_str_add(cmd, buf);
1095	if (ret)
1096		return ret;
1097
1098	dynevent_arg_init(&arg, 0);
1099	arg.str = loc;
1100	ret = dynevent_arg_add(cmd, &arg, NULL);
1101	if (ret)
1102		return ret;
1103
1104	va_start(args, loc);
1105	for (;;) {
1106		const char *field;
1107
1108		field = va_arg(args, const char *);
1109		if (!field)
1110			break;
1111
1112		if (++cmd->n_fields > MAX_TRACE_ARGS) {
1113			ret = -EINVAL;
1114			break;
1115		}
1116
1117		arg.str = field;
1118		ret = dynevent_arg_add(cmd, &arg, NULL);
1119		if (ret)
1120			break;
1121	}
1122	va_end(args);
1123
1124	return ret;
1125}
1126EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
1127
1128/**
1129 * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
1130 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1131 * @...: Variable number of arg (pairs), one pair for each field
1132 *
1133 * NOTE: Users normally won't want to call this function directly, but
1134 * rather use the kprobe_event_add_fields() wrapper, which
1135 * automatically adds a NULL to the end of the arg list.  If this
1136 * function is used directly, make sure the last arg in the variable
1137 * arg list is NULL.
1138 *
1139 * Add probe fields to an existing kprobe command using a variable
1140 * list of args.  Fields are added in the same order they're listed.
1141 *
1142 * Return: 0 if successful, error otherwise.
1143 */
1144int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
1145{
1146	struct dynevent_arg arg;
1147	va_list args;
1148	int ret = 0;
1149
1150	if (cmd->type != DYNEVENT_TYPE_KPROBE)
1151		return -EINVAL;
1152
1153	dynevent_arg_init(&arg, 0);
1154
1155	va_start(args, cmd);
1156	for (;;) {
1157		const char *field;
1158
1159		field = va_arg(args, const char *);
1160		if (!field)
1161			break;
1162
1163		if (++cmd->n_fields > MAX_TRACE_ARGS) {
1164			ret = -EINVAL;
1165			break;
1166		}
1167
1168		arg.str = field;
1169		ret = dynevent_arg_add(cmd, &arg, NULL);
1170		if (ret)
1171			break;
1172	}
1173	va_end(args);
1174
1175	return ret;
1176}
1177EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
1178
1179/**
1180 * kprobe_event_delete - Delete a kprobe event
1181 * @name: The name of the kprobe event to delete
1182 *
1183 * Delete a kprobe event with the give @name from kernel code rather
1184 * than directly from the command line.
1185 *
1186 * Return: 0 if successful, error otherwise.
1187 */
1188int kprobe_event_delete(const char *name)
1189{
1190	char buf[MAX_EVENT_NAME_LEN];
1191
1192	snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
1193
1194	return create_or_delete_trace_kprobe(buf);
1195}
1196EXPORT_SYMBOL_GPL(kprobe_event_delete);
1197
1198static int trace_kprobe_release(struct dyn_event *ev)
1199{
1200	struct trace_kprobe *tk = to_trace_kprobe(ev);
1201	int ret = unregister_trace_kprobe(tk);
1202
1203	if (!ret)
1204		free_trace_kprobe(tk);
1205	return ret;
1206}
1207
1208static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
1209{
1210	struct trace_kprobe *tk = to_trace_kprobe(ev);
1211	int i;
1212
1213	seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
1214	if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
1215		seq_printf(m, "%d", tk->rp.maxactive);
1216	seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
1217				trace_probe_name(&tk->tp));
1218
1219	if (!tk->symbol)
1220		seq_printf(m, " 0x%p", tk->rp.kp.addr);
1221	else if (tk->rp.kp.offset)
1222		seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
1223			   tk->rp.kp.offset);
1224	else
1225		seq_printf(m, " %s", trace_kprobe_symbol(tk));
1226
1227	for (i = 0; i < tk->tp.nr_args; i++)
1228		seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
1229	seq_putc(m, '\n');
1230
1231	return 0;
1232}
1233
1234static int probes_seq_show(struct seq_file *m, void *v)
1235{
1236	struct dyn_event *ev = v;
1237
1238	if (!is_trace_kprobe(ev))
1239		return 0;
1240
1241	return trace_kprobe_show(m, ev);
1242}
1243
1244static const struct seq_operations probes_seq_op = {
1245	.start  = dyn_event_seq_start,
1246	.next   = dyn_event_seq_next,
1247	.stop   = dyn_event_seq_stop,
1248	.show   = probes_seq_show
1249};
1250
1251static int probes_open(struct inode *inode, struct file *file)
1252{
1253	int ret;
1254
1255	ret = security_locked_down(LOCKDOWN_TRACEFS);
1256	if (ret)
1257		return ret;
1258
1259	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1260		ret = dyn_events_release_all(&trace_kprobe_ops);
1261		if (ret < 0)
1262			return ret;
1263	}
1264
1265	return seq_open(file, &probes_seq_op);
1266}
1267
1268static ssize_t probes_write(struct file *file, const char __user *buffer,
1269			    size_t count, loff_t *ppos)
1270{
1271	return trace_parse_run_command(file, buffer, count, ppos,
1272				       create_or_delete_trace_kprobe);
1273}
1274
1275static const struct file_operations kprobe_events_ops = {
1276	.owner          = THIS_MODULE,
1277	.open           = probes_open,
1278	.read           = seq_read,
1279	.llseek         = seq_lseek,
1280	.release        = seq_release,
1281	.write		= probes_write,
1282};
1283
1284static unsigned long trace_kprobe_missed(struct trace_kprobe *tk)
1285{
1286	return trace_kprobe_is_return(tk) ?
1287		tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed;
1288}
1289
1290/* Probes profiling interfaces */
1291static int probes_profile_seq_show(struct seq_file *m, void *v)
1292{
1293	struct dyn_event *ev = v;
1294	struct trace_kprobe *tk;
1295	unsigned long nmissed;
1296
1297	if (!is_trace_kprobe(ev))
1298		return 0;
1299
1300	tk = to_trace_kprobe(ev);
1301	nmissed = trace_kprobe_missed(tk);
1302	seq_printf(m, "  %-44s %15lu %15lu\n",
1303		   trace_probe_name(&tk->tp),
1304		   trace_kprobe_nhit(tk),
1305		   nmissed);
1306
1307	return 0;
1308}
1309
1310static const struct seq_operations profile_seq_op = {
1311	.start  = dyn_event_seq_start,
1312	.next   = dyn_event_seq_next,
1313	.stop   = dyn_event_seq_stop,
1314	.show   = probes_profile_seq_show
1315};
1316
1317static int profile_open(struct inode *inode, struct file *file)
1318{
1319	int ret;
1320
1321	ret = security_locked_down(LOCKDOWN_TRACEFS);
1322	if (ret)
1323		return ret;
1324
1325	return seq_open(file, &profile_seq_op);
1326}
1327
1328static const struct file_operations kprobe_profile_ops = {
1329	.owner          = THIS_MODULE,
1330	.open           = profile_open,
1331	.read           = seq_read,
1332	.llseek         = seq_lseek,
1333	.release        = seq_release,
1334};
1335
1336/* Note that we don't verify it, since the code does not come from user space */
1337static int
1338process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
1339		   void *dest, void *base)
1340{
1341	struct pt_regs *regs = rec;
1342	unsigned long val;
1343	int ret;
1344
1345retry:
1346	/* 1st stage: get value from context */
1347	switch (code->op) {
1348	case FETCH_OP_REG:
1349		val = regs_get_register(regs, code->param);
1350		break;
1351	case FETCH_OP_STACK:
1352		val = regs_get_kernel_stack_nth(regs, code->param);
1353		break;
1354	case FETCH_OP_STACKP:
1355		val = kernel_stack_pointer(regs);
1356		break;
1357	case FETCH_OP_RETVAL:
1358		val = regs_return_value(regs);
1359		break;
1360#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1361	case FETCH_OP_ARG:
1362		val = regs_get_kernel_argument(regs, code->param);
1363		break;
1364	case FETCH_OP_EDATA:
1365		val = *(unsigned long *)((unsigned long)edata + code->offset);
1366		break;
1367#endif
1368	case FETCH_NOP_SYMBOL:	/* Ignore a place holder */
1369		code++;
1370		goto retry;
1371	default:
1372		ret = process_common_fetch_insn(code, &val);
1373		if (ret < 0)
1374			return ret;
1375	}
1376	code++;
1377
1378	return process_fetch_insn_bottom(code, val, dest, base);
1379}
1380NOKPROBE_SYMBOL(process_fetch_insn)
1381
1382/* Kprobe handler */
1383static nokprobe_inline void
1384__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1385		    struct trace_event_file *trace_file)
1386{
1387	struct kprobe_trace_entry_head *entry;
1388	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1389	struct trace_event_buffer fbuffer;
1390	int dsize;
1391
1392	WARN_ON(call != trace_file->event_call);
1393
1394	if (trace_trigger_soft_disabled(trace_file))
1395		return;
1396
1397	dsize = __get_data_size(&tk->tp, regs, NULL);
1398
1399	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1400					   sizeof(*entry) + tk->tp.size + dsize);
1401	if (!entry)
1402		return;
1403
1404	fbuffer.regs = regs;
1405	entry->ip = (unsigned long)tk->rp.kp.addr;
1406	store_trace_args(&entry[1], &tk->tp, regs, NULL, sizeof(*entry), dsize);
1407
1408	trace_event_buffer_commit(&fbuffer);
1409}
1410
1411static void
1412kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1413{
1414	struct event_file_link *link;
1415
1416	trace_probe_for_each_link_rcu(link, &tk->tp)
1417		__kprobe_trace_func(tk, regs, link->file);
1418}
1419NOKPROBE_SYMBOL(kprobe_trace_func);
1420
1421/* Kretprobe handler */
1422
1423static int trace_kprobe_entry_handler(struct kretprobe_instance *ri,
1424				      struct pt_regs *regs)
1425{
1426	struct kretprobe *rp = get_kretprobe(ri);
1427	struct trace_kprobe *tk;
1428
1429	/*
1430	 * There is a small chance that get_kretprobe(ri) returns NULL when
1431	 * the kretprobe is unregister on another CPU between kretprobe's
1432	 * trampoline_handler and this function.
1433	 */
1434	if (unlikely(!rp))
1435		return -ENOENT;
1436
1437	tk = container_of(rp, struct trace_kprobe, rp);
1438
1439	/* store argument values into ri->data as entry data */
1440	if (tk->tp.entry_arg)
1441		store_trace_entry_data(ri->data, &tk->tp, regs);
1442
1443	return 0;
1444}
1445
1446
1447static nokprobe_inline void
1448__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1449		       struct pt_regs *regs,
1450		       struct trace_event_file *trace_file)
1451{
1452	struct kretprobe_trace_entry_head *entry;
1453	struct trace_event_buffer fbuffer;
1454	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1455	int dsize;
1456
1457	WARN_ON(call != trace_file->event_call);
1458
1459	if (trace_trigger_soft_disabled(trace_file))
1460		return;
1461
1462	dsize = __get_data_size(&tk->tp, regs, ri->data);
1463
1464	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1465					   sizeof(*entry) + tk->tp.size + dsize);
1466	if (!entry)
1467		return;
1468
1469	fbuffer.regs = regs;
1470	entry->func = (unsigned long)tk->rp.kp.addr;
1471	entry->ret_ip = get_kretprobe_retaddr(ri);
1472	store_trace_args(&entry[1], &tk->tp, regs, ri->data, sizeof(*entry), dsize);
1473
1474	trace_event_buffer_commit(&fbuffer);
1475}
1476
1477static void
1478kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1479		     struct pt_regs *regs)
1480{
1481	struct event_file_link *link;
1482
1483	trace_probe_for_each_link_rcu(link, &tk->tp)
1484		__kretprobe_trace_func(tk, ri, regs, link->file);
1485}
1486NOKPROBE_SYMBOL(kretprobe_trace_func);
1487
1488/* Event entry printers */
1489static enum print_line_t
1490print_kprobe_event(struct trace_iterator *iter, int flags,
1491		   struct trace_event *event)
1492{
1493	struct kprobe_trace_entry_head *field;
1494	struct trace_seq *s = &iter->seq;
1495	struct trace_probe *tp;
1496
1497	field = (struct kprobe_trace_entry_head *)iter->ent;
1498	tp = trace_probe_primary_from_call(
1499		container_of(event, struct trace_event_call, event));
1500	if (WARN_ON_ONCE(!tp))
1501		goto out;
1502
1503	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1504
1505	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1506		goto out;
1507
1508	trace_seq_putc(s, ')');
1509
1510	if (trace_probe_print_args(s, tp->args, tp->nr_args,
1511			     (u8 *)&field[1], field) < 0)
1512		goto out;
1513
1514	trace_seq_putc(s, '\n');
1515 out:
1516	return trace_handle_return(s);
1517}
1518
1519static enum print_line_t
1520print_kretprobe_event(struct trace_iterator *iter, int flags,
1521		      struct trace_event *event)
1522{
1523	struct kretprobe_trace_entry_head *field;
1524	struct trace_seq *s = &iter->seq;
1525	struct trace_probe *tp;
1526
1527	field = (struct kretprobe_trace_entry_head *)iter->ent;
1528	tp = trace_probe_primary_from_call(
1529		container_of(event, struct trace_event_call, event));
1530	if (WARN_ON_ONCE(!tp))
1531		goto out;
1532
1533	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1534
1535	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1536		goto out;
1537
1538	trace_seq_puts(s, " <- ");
1539
1540	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1541		goto out;
1542
1543	trace_seq_putc(s, ')');
1544
1545	if (trace_probe_print_args(s, tp->args, tp->nr_args,
1546			     (u8 *)&field[1], field) < 0)
1547		goto out;
1548
1549	trace_seq_putc(s, '\n');
1550
1551 out:
1552	return trace_handle_return(s);
1553}
1554
1555
1556static int kprobe_event_define_fields(struct trace_event_call *event_call)
1557{
1558	int ret;
1559	struct kprobe_trace_entry_head field;
1560	struct trace_probe *tp;
1561
1562	tp = trace_probe_primary_from_call(event_call);
1563	if (WARN_ON_ONCE(!tp))
1564		return -ENOENT;
1565
1566	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1567
1568	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1569}
1570
1571static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1572{
1573	int ret;
1574	struct kretprobe_trace_entry_head field;
1575	struct trace_probe *tp;
1576
1577	tp = trace_probe_primary_from_call(event_call);
1578	if (WARN_ON_ONCE(!tp))
1579		return -ENOENT;
1580
1581	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1582	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1583
1584	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1585}
1586
1587#ifdef CONFIG_PERF_EVENTS
1588
1589/* Kprobe profile handler */
1590static int
1591kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1592{
1593	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1594	struct kprobe_trace_entry_head *entry;
1595	struct hlist_head *head;
1596	int size, __size, dsize;
1597	int rctx;
1598
1599	if (bpf_prog_array_valid(call)) {
1600		unsigned long orig_ip = instruction_pointer(regs);
1601		int ret;
1602
1603		ret = trace_call_bpf(call, regs);
1604
1605		/*
1606		 * We need to check and see if we modified the pc of the
1607		 * pt_regs, and if so return 1 so that we don't do the
1608		 * single stepping.
1609		 */
1610		if (orig_ip != instruction_pointer(regs))
1611			return 1;
1612		if (!ret)
1613			return 0;
1614	}
1615
1616	head = this_cpu_ptr(call->perf_events);
1617	if (hlist_empty(head))
1618		return 0;
1619
1620	dsize = __get_data_size(&tk->tp, regs, NULL);
1621	__size = sizeof(*entry) + tk->tp.size + dsize;
1622	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1623	size -= sizeof(u32);
1624
1625	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1626	if (!entry)
1627		return 0;
1628
1629	entry->ip = (unsigned long)tk->rp.kp.addr;
1630	memset(&entry[1], 0, dsize);
1631	store_trace_args(&entry[1], &tk->tp, regs, NULL, sizeof(*entry), dsize);
1632	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1633			      head, NULL);
1634	return 0;
1635}
1636NOKPROBE_SYMBOL(kprobe_perf_func);
1637
1638/* Kretprobe profile handler */
1639static void
1640kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1641		    struct pt_regs *regs)
1642{
1643	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1644	struct kretprobe_trace_entry_head *entry;
1645	struct hlist_head *head;
1646	int size, __size, dsize;
1647	int rctx;
1648
1649	if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1650		return;
1651
1652	head = this_cpu_ptr(call->perf_events);
1653	if (hlist_empty(head))
1654		return;
1655
1656	dsize = __get_data_size(&tk->tp, regs, ri->data);
1657	__size = sizeof(*entry) + tk->tp.size + dsize;
1658	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1659	size -= sizeof(u32);
1660
1661	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1662	if (!entry)
1663		return;
1664
1665	entry->func = (unsigned long)tk->rp.kp.addr;
1666	entry->ret_ip = get_kretprobe_retaddr(ri);
1667	store_trace_args(&entry[1], &tk->tp, regs, ri->data, sizeof(*entry), dsize);
1668	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1669			      head, NULL);
1670}
1671NOKPROBE_SYMBOL(kretprobe_perf_func);
1672
1673int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1674			const char **symbol, u64 *probe_offset,
1675			u64 *probe_addr, unsigned long *missed,
1676			bool perf_type_tracepoint)
1677{
1678	const char *pevent = trace_event_name(event->tp_event);
1679	const char *group = event->tp_event->class->system;
1680	struct trace_kprobe *tk;
1681
1682	if (perf_type_tracepoint)
1683		tk = find_trace_kprobe(pevent, group);
1684	else
1685		tk = trace_kprobe_primary_from_call(event->tp_event);
1686	if (!tk)
1687		return -EINVAL;
1688
1689	*fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1690					      : BPF_FD_TYPE_KPROBE;
1691	*probe_offset = tk->rp.kp.offset;
1692	*probe_addr = kallsyms_show_value(current_cred()) ?
1693		      (unsigned long)tk->rp.kp.addr : 0;
1694	*symbol = tk->symbol;
1695	if (missed)
1696		*missed = trace_kprobe_missed(tk);
1697	return 0;
1698}
1699#endif	/* CONFIG_PERF_EVENTS */
1700
1701/*
1702 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1703 *
1704 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1705 * lockless, but we can't race with this __init function.
1706 */
1707static int kprobe_register(struct trace_event_call *event,
1708			   enum trace_reg type, void *data)
1709{
1710	struct trace_event_file *file = data;
1711
1712	switch (type) {
1713	case TRACE_REG_REGISTER:
1714		return enable_trace_kprobe(event, file);
1715	case TRACE_REG_UNREGISTER:
1716		return disable_trace_kprobe(event, file);
1717
1718#ifdef CONFIG_PERF_EVENTS
1719	case TRACE_REG_PERF_REGISTER:
1720		return enable_trace_kprobe(event, NULL);
1721	case TRACE_REG_PERF_UNREGISTER:
1722		return disable_trace_kprobe(event, NULL);
1723	case TRACE_REG_PERF_OPEN:
1724	case TRACE_REG_PERF_CLOSE:
1725	case TRACE_REG_PERF_ADD:
1726	case TRACE_REG_PERF_DEL:
1727		return 0;
1728#endif
1729	}
1730	return 0;
1731}
1732
1733static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1734{
1735	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1736	int ret = 0;
1737
1738	raw_cpu_inc(*tk->nhit);
1739
1740	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1741		kprobe_trace_func(tk, regs);
1742#ifdef CONFIG_PERF_EVENTS
1743	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1744		ret = kprobe_perf_func(tk, regs);
1745#endif
1746	return ret;
1747}
1748NOKPROBE_SYMBOL(kprobe_dispatcher);
1749
1750static int
1751kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1752{
1753	struct kretprobe *rp = get_kretprobe(ri);
1754	struct trace_kprobe *tk;
1755
1756	/*
1757	 * There is a small chance that get_kretprobe(ri) returns NULL when
1758	 * the kretprobe is unregister on another CPU between kretprobe's
1759	 * trampoline_handler and this function.
1760	 */
1761	if (unlikely(!rp))
1762		return 0;
1763
1764	tk = container_of(rp, struct trace_kprobe, rp);
1765	raw_cpu_inc(*tk->nhit);
1766
1767	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1768		kretprobe_trace_func(tk, ri, regs);
1769#ifdef CONFIG_PERF_EVENTS
1770	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1771		kretprobe_perf_func(tk, ri, regs);
1772#endif
1773	return 0;	/* We don't tweak kernel, so just return 0 */
1774}
1775NOKPROBE_SYMBOL(kretprobe_dispatcher);
1776
1777static struct trace_event_functions kretprobe_funcs = {
1778	.trace		= print_kretprobe_event
1779};
1780
1781static struct trace_event_functions kprobe_funcs = {
1782	.trace		= print_kprobe_event
1783};
1784
1785static struct trace_event_fields kretprobe_fields_array[] = {
1786	{ .type = TRACE_FUNCTION_TYPE,
1787	  .define_fields = kretprobe_event_define_fields },
1788	{}
1789};
1790
1791static struct trace_event_fields kprobe_fields_array[] = {
1792	{ .type = TRACE_FUNCTION_TYPE,
1793	  .define_fields = kprobe_event_define_fields },
1794	{}
1795};
1796
1797static inline void init_trace_event_call(struct trace_kprobe *tk)
1798{
1799	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1800
1801	if (trace_kprobe_is_return(tk)) {
1802		call->event.funcs = &kretprobe_funcs;
1803		call->class->fields_array = kretprobe_fields_array;
1804	} else {
1805		call->event.funcs = &kprobe_funcs;
1806		call->class->fields_array = kprobe_fields_array;
1807	}
1808
1809	call->flags = TRACE_EVENT_FL_KPROBE;
1810	call->class->reg = kprobe_register;
1811}
1812
1813static int register_kprobe_event(struct trace_kprobe *tk)
1814{
1815	init_trace_event_call(tk);
1816
1817	return trace_probe_register_event_call(&tk->tp);
1818}
1819
1820static int unregister_kprobe_event(struct trace_kprobe *tk)
1821{
1822	return trace_probe_unregister_event_call(&tk->tp);
1823}
1824
1825#ifdef CONFIG_PERF_EVENTS
1826
1827/* create a trace_kprobe, but don't add it to global lists */
1828struct trace_event_call *
1829create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1830			  bool is_return)
1831{
1832	enum probe_print_type ptype;
1833	struct trace_kprobe *tk;
1834	int ret;
1835	char *event;
1836
1837	if (func) {
1838		unsigned int count;
1839
1840		count = number_of_same_symbols(func);
1841		if (count > 1)
1842			/*
1843			 * Users should use addr to remove the ambiguity of
1844			 * using func only.
1845			 */
1846			return ERR_PTR(-EADDRNOTAVAIL);
1847		else if (count == 0)
1848			/*
1849			 * We can return ENOENT earlier than when register the
1850			 * kprobe.
1851			 */
1852			return ERR_PTR(-ENOENT);
1853	}
1854
1855	/*
1856	 * local trace_kprobes are not added to dyn_event, so they are never
1857	 * searched in find_trace_kprobe(). Therefore, there is no concern of
1858	 * duplicated name here.
1859	 */
1860	event = func ? func : "DUMMY_EVENT";
1861
1862	tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1863				offs, 0 /* maxactive */, 0 /* nargs */,
1864				is_return);
1865
1866	if (IS_ERR(tk)) {
1867		pr_info("Failed to allocate trace_probe.(%d)\n",
1868			(int)PTR_ERR(tk));
1869		return ERR_CAST(tk);
1870	}
1871
1872	init_trace_event_call(tk);
1873
1874	ptype = trace_kprobe_is_return(tk) ?
1875		PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1876	if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0) {
1877		ret = -ENOMEM;
1878		goto error;
1879	}
1880
1881	ret = __register_trace_kprobe(tk);
1882	if (ret < 0)
1883		goto error;
1884
1885	return trace_probe_event_call(&tk->tp);
1886error:
1887	free_trace_kprobe(tk);
1888	return ERR_PTR(ret);
1889}
1890
1891void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1892{
1893	struct trace_kprobe *tk;
1894
1895	tk = trace_kprobe_primary_from_call(event_call);
1896	if (unlikely(!tk))
1897		return;
1898
1899	if (trace_probe_is_enabled(&tk->tp)) {
1900		WARN_ON(1);
1901		return;
1902	}
1903
1904	__unregister_trace_kprobe(tk);
1905
1906	free_trace_kprobe(tk);
1907}
1908#endif /* CONFIG_PERF_EVENTS */
1909
1910static __init void enable_boot_kprobe_events(void)
1911{
1912	struct trace_array *tr = top_trace_array();
1913	struct trace_event_file *file;
1914	struct trace_kprobe *tk;
1915	struct dyn_event *pos;
1916
1917	mutex_lock(&event_mutex);
1918	for_each_trace_kprobe(tk, pos) {
1919		list_for_each_entry(file, &tr->events, list)
1920			if (file->event_call == trace_probe_event_call(&tk->tp))
1921				trace_event_enable_disable(file, 1, 0);
1922	}
1923	mutex_unlock(&event_mutex);
1924}
1925
1926static __init void setup_boot_kprobe_events(void)
1927{
1928	char *p, *cmd = kprobe_boot_events_buf;
1929	int ret;
1930
1931	strreplace(kprobe_boot_events_buf, ',', ' ');
1932
1933	while (cmd && *cmd != '\0') {
1934		p = strchr(cmd, ';');
1935		if (p)
1936			*p++ = '\0';
1937
1938		ret = create_or_delete_trace_kprobe(cmd);
1939		if (ret)
1940			pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1941
1942		cmd = p;
1943	}
1944
1945	enable_boot_kprobe_events();
1946}
1947
1948/*
1949 * Register dynevent at core_initcall. This allows kernel to setup kprobe
1950 * events in postcore_initcall without tracefs.
1951 */
1952static __init int init_kprobe_trace_early(void)
1953{
1954	int ret;
1955
1956	ret = dyn_event_register(&trace_kprobe_ops);
1957	if (ret)
1958		return ret;
1959
1960	if (trace_kprobe_register_module_notifier())
1961		return -EINVAL;
1962
1963	return 0;
1964}
1965core_initcall(init_kprobe_trace_early);
1966
1967/* Make a tracefs interface for controlling probe points */
1968static __init int init_kprobe_trace(void)
1969{
1970	int ret;
1971
1972	ret = tracing_init_dentry();
1973	if (ret)
1974		return 0;
1975
1976	/* Event list interface */
1977	trace_create_file("kprobe_events", TRACE_MODE_WRITE,
1978			  NULL, NULL, &kprobe_events_ops);
1979
1980	/* Profile interface */
1981	trace_create_file("kprobe_profile", TRACE_MODE_READ,
1982			  NULL, NULL, &kprobe_profile_ops);
1983
1984	setup_boot_kprobe_events();
1985
1986	return 0;
1987}
1988fs_initcall(init_kprobe_trace);
1989
1990
1991#ifdef CONFIG_FTRACE_STARTUP_TEST
1992static __init struct trace_event_file *
1993find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1994{
1995	struct trace_event_file *file;
1996
1997	list_for_each_entry(file, &tr->events, list)
1998		if (file->event_call == trace_probe_event_call(&tk->tp))
1999			return file;
2000
2001	return NULL;
2002}
2003
2004/*
2005 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
2006 * stage, we can do this lockless.
2007 */
2008static __init int kprobe_trace_self_tests_init(void)
2009{
2010	int ret, warn = 0;
2011	int (*target)(int, int, int, int, int, int);
2012	struct trace_kprobe *tk;
2013	struct trace_event_file *file;
2014
2015	if (tracing_is_disabled())
2016		return -ENODEV;
2017
2018	if (tracing_selftest_disabled)
2019		return 0;
2020
2021	target = kprobe_trace_selftest_target;
2022
2023	pr_info("Testing kprobe tracing: ");
2024
2025	ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)");
2026	if (WARN_ON_ONCE(ret)) {
2027		pr_warn("error on probing function entry.\n");
2028		warn++;
2029	} else {
2030		/* Enable trace point */
2031		tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
2032		if (WARN_ON_ONCE(tk == NULL)) {
2033			pr_warn("error on getting new probe.\n");
2034			warn++;
2035		} else {
2036			file = find_trace_probe_file(tk, top_trace_array());
2037			if (WARN_ON_ONCE(file == NULL)) {
2038				pr_warn("error on getting probe file.\n");
2039				warn++;
2040			} else
2041				enable_trace_kprobe(
2042					trace_probe_event_call(&tk->tp), file);
2043		}
2044	}
2045
2046	ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval");
2047	if (WARN_ON_ONCE(ret)) {
2048		pr_warn("error on probing function return.\n");
2049		warn++;
2050	} else {
2051		/* Enable trace point */
2052		tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2053		if (WARN_ON_ONCE(tk == NULL)) {
2054			pr_warn("error on getting 2nd new probe.\n");
2055			warn++;
2056		} else {
2057			file = find_trace_probe_file(tk, top_trace_array());
2058			if (WARN_ON_ONCE(file == NULL)) {
2059				pr_warn("error on getting probe file.\n");
2060				warn++;
2061			} else
2062				enable_trace_kprobe(
2063					trace_probe_event_call(&tk->tp), file);
2064		}
2065	}
2066
2067	if (warn)
2068		goto end;
2069
2070	ret = target(1, 2, 3, 4, 5, 6);
2071
2072	/*
2073	 * Not expecting an error here, the check is only to prevent the
2074	 * optimizer from removing the call to target() as otherwise there
2075	 * are no side-effects and the call is never performed.
2076	 */
2077	if (ret != 21)
2078		warn++;
2079
2080	/* Disable trace points before removing it */
2081	tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
2082	if (WARN_ON_ONCE(tk == NULL)) {
2083		pr_warn("error on getting test probe.\n");
2084		warn++;
2085	} else {
2086		if (trace_kprobe_nhit(tk) != 1) {
2087			pr_warn("incorrect number of testprobe hits\n");
2088			warn++;
2089		}
2090
2091		file = find_trace_probe_file(tk, top_trace_array());
2092		if (WARN_ON_ONCE(file == NULL)) {
2093			pr_warn("error on getting probe file.\n");
2094			warn++;
2095		} else
2096			disable_trace_kprobe(
2097				trace_probe_event_call(&tk->tp), file);
2098	}
2099
2100	tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2101	if (WARN_ON_ONCE(tk == NULL)) {
2102		pr_warn("error on getting 2nd test probe.\n");
2103		warn++;
2104	} else {
2105		if (trace_kprobe_nhit(tk) != 1) {
2106			pr_warn("incorrect number of testprobe2 hits\n");
2107			warn++;
2108		}
2109
2110		file = find_trace_probe_file(tk, top_trace_array());
2111		if (WARN_ON_ONCE(file == NULL)) {
2112			pr_warn("error on getting probe file.\n");
2113			warn++;
2114		} else
2115			disable_trace_kprobe(
2116				trace_probe_event_call(&tk->tp), file);
2117	}
2118
2119	ret = create_or_delete_trace_kprobe("-:testprobe");
2120	if (WARN_ON_ONCE(ret)) {
2121		pr_warn("error on deleting a probe.\n");
2122		warn++;
2123	}
2124
2125	ret = create_or_delete_trace_kprobe("-:testprobe2");
2126	if (WARN_ON_ONCE(ret)) {
2127		pr_warn("error on deleting a probe.\n");
2128		warn++;
2129	}
2130
2131end:
2132	ret = dyn_events_release_all(&trace_kprobe_ops);
2133	if (WARN_ON_ONCE(ret)) {
2134		pr_warn("error on cleaning up probes.\n");
2135		warn++;
2136	}
2137	/*
2138	 * Wait for the optimizer work to finish. Otherwise it might fiddle
2139	 * with probes in already freed __init text.
2140	 */
2141	wait_for_kprobe_optimizer();
2142	if (warn)
2143		pr_cont("NG: Some tests are failed. Please check them.\n");
2144	else
2145		pr_cont("OK\n");
2146	return 0;
2147}
2148
2149late_initcall(kprobe_trace_self_tests_init);
2150
2151#endif
2152