1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4 *
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/kernel.h>
10#include <linux/blkdev.h>
11#include <linux/blktrace_api.h>
12#include <linux/percpu.h>
13#include <linux/init.h>
14#include <linux/mutex.h>
15#include <linux/slab.h>
16#include <linux/debugfs.h>
17#include <linux/export.h>
18#include <linux/time.h>
19#include <linux/uaccess.h>
20#include <linux/list.h>
21#include <linux/blk-cgroup.h>
22
23#include "../../block/blk.h"
24
25#include <trace/events/block.h>
26
27#include "trace_output.h"
28
29#ifdef CONFIG_BLK_DEV_IO_TRACE
30
31static unsigned int blktrace_seq __read_mostly = 1;
32
33static struct trace_array *blk_tr;
34static bool blk_tracer_enabled __read_mostly;
35
36static LIST_HEAD(running_trace_list);
37static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(running_trace_lock);
38
39/* Select an alternative, minimalistic output than the original one */
40#define TRACE_BLK_OPT_CLASSIC	0x1
41#define TRACE_BLK_OPT_CGROUP	0x2
42#define TRACE_BLK_OPT_CGNAME	0x4
43
44static struct tracer_opt blk_tracer_opts[] = {
45	/* Default disable the minimalistic output */
46	{ TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
47#ifdef CONFIG_BLK_CGROUP
48	{ TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
49	{ TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
50#endif
51	{ }
52};
53
54static struct tracer_flags blk_tracer_flags = {
55	.val  = 0,
56	.opts = blk_tracer_opts,
57};
58
59/* Global reference count of probes */
60static DEFINE_MUTEX(blk_probe_mutex);
61static int blk_probes_ref;
62
63static void blk_register_tracepoints(void);
64static void blk_unregister_tracepoints(void);
65
66/*
67 * Send out a notify message.
68 */
69static void trace_note(struct blk_trace *bt, pid_t pid, int action,
70		       const void *data, size_t len, u64 cgid)
71{
72	struct blk_io_trace *t;
73	struct ring_buffer_event *event = NULL;
74	struct trace_buffer *buffer = NULL;
75	unsigned int trace_ctx = 0;
76	int cpu = smp_processor_id();
77	bool blk_tracer = blk_tracer_enabled;
78	ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
79
80	if (blk_tracer) {
81		buffer = blk_tr->array_buffer.buffer;
82		trace_ctx = tracing_gen_ctx_flags(0);
83		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
84						  sizeof(*t) + len + cgid_len,
85						  trace_ctx);
86		if (!event)
87			return;
88		t = ring_buffer_event_data(event);
89		goto record_it;
90	}
91
92	if (!bt->rchan)
93		return;
94
95	t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
96	if (t) {
97		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
98		t->time = ktime_to_ns(ktime_get());
99record_it:
100		t->device = bt->dev;
101		t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
102		t->pid = pid;
103		t->cpu = cpu;
104		t->pdu_len = len + cgid_len;
105		if (cgid_len)
106			memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
107		memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
108
109		if (blk_tracer)
110			trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
111	}
112}
113
114/*
115 * Send out a notify for this process, if we haven't done so since a trace
116 * started
117 */
118static void trace_note_tsk(struct task_struct *tsk)
119{
120	unsigned long flags;
121	struct blk_trace *bt;
122
123	tsk->btrace_seq = blktrace_seq;
124	raw_spin_lock_irqsave(&running_trace_lock, flags);
125	list_for_each_entry(bt, &running_trace_list, running_list) {
126		trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
127			   sizeof(tsk->comm), 0);
128	}
129	raw_spin_unlock_irqrestore(&running_trace_lock, flags);
130}
131
132static void trace_note_time(struct blk_trace *bt)
133{
134	struct timespec64 now;
135	unsigned long flags;
136	u32 words[2];
137
138	/* need to check user space to see if this breaks in y2038 or y2106 */
139	ktime_get_real_ts64(&now);
140	words[0] = (u32)now.tv_sec;
141	words[1] = now.tv_nsec;
142
143	local_irq_save(flags);
144	trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
145	local_irq_restore(flags);
146}
147
148void __blk_trace_note_message(struct blk_trace *bt,
149		struct cgroup_subsys_state *css, const char *fmt, ...)
150{
151	int n;
152	va_list args;
153	unsigned long flags;
154	char *buf;
155	u64 cgid = 0;
156
157	if (unlikely(bt->trace_state != Blktrace_running &&
158		     !blk_tracer_enabled))
159		return;
160
161	/*
162	 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
163	 * message to the trace.
164	 */
165	if (!(bt->act_mask & BLK_TC_NOTIFY))
166		return;
167
168	local_irq_save(flags);
169	buf = this_cpu_ptr(bt->msg_data);
170	va_start(args, fmt);
171	n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
172	va_end(args);
173
174#ifdef CONFIG_BLK_CGROUP
175	if (css && (blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
176		cgid = cgroup_id(css->cgroup);
177	else
178		cgid = 1;
179#endif
180	trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, cgid);
181	local_irq_restore(flags);
182}
183EXPORT_SYMBOL_GPL(__blk_trace_note_message);
184
185static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
186			 pid_t pid)
187{
188	if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
189		return 1;
190	if (sector && (sector < bt->start_lba || sector > bt->end_lba))
191		return 1;
192	if (bt->pid && pid != bt->pid)
193		return 1;
194
195	return 0;
196}
197
198/*
199 * Data direction bit lookup
200 */
201static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
202				 BLK_TC_ACT(BLK_TC_WRITE) };
203
204#define BLK_TC_RAHEAD		BLK_TC_AHEAD
205#define BLK_TC_PREFLUSH		BLK_TC_FLUSH
206
207/* The ilog2() calls fall out because they're constant */
208#define MASK_TC_BIT(rw, __name) ((__force u32)(rw & REQ_ ## __name) <<	\
209	  (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
210
211/*
212 * The worker for the various blk_add_trace*() types. Fills out a
213 * blk_io_trace structure and places it in a per-cpu subbuffer.
214 */
215static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
216			    const blk_opf_t opf, u32 what, int error,
217			    int pdu_len, void *pdu_data, u64 cgid)
218{
219	struct task_struct *tsk = current;
220	struct ring_buffer_event *event = NULL;
221	struct trace_buffer *buffer = NULL;
222	struct blk_io_trace *t;
223	unsigned long flags = 0;
224	unsigned long *sequence;
225	unsigned int trace_ctx = 0;
226	pid_t pid;
227	int cpu;
228	bool blk_tracer = blk_tracer_enabled;
229	ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
230	const enum req_op op = opf & REQ_OP_MASK;
231
232	if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
233		return;
234
235	what |= ddir_act[op_is_write(op) ? WRITE : READ];
236	what |= MASK_TC_BIT(opf, SYNC);
237	what |= MASK_TC_BIT(opf, RAHEAD);
238	what |= MASK_TC_BIT(opf, META);
239	what |= MASK_TC_BIT(opf, PREFLUSH);
240	what |= MASK_TC_BIT(opf, FUA);
241	if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
242		what |= BLK_TC_ACT(BLK_TC_DISCARD);
243	if (op == REQ_OP_FLUSH)
244		what |= BLK_TC_ACT(BLK_TC_FLUSH);
245	if (cgid)
246		what |= __BLK_TA_CGROUP;
247
248	pid = tsk->pid;
249	if (act_log_check(bt, what, sector, pid))
250		return;
251	cpu = raw_smp_processor_id();
252
253	if (blk_tracer) {
254		tracing_record_cmdline(current);
255
256		buffer = blk_tr->array_buffer.buffer;
257		trace_ctx = tracing_gen_ctx_flags(0);
258		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
259						  sizeof(*t) + pdu_len + cgid_len,
260						  trace_ctx);
261		if (!event)
262			return;
263		t = ring_buffer_event_data(event);
264		goto record_it;
265	}
266
267	if (unlikely(tsk->btrace_seq != blktrace_seq))
268		trace_note_tsk(tsk);
269
270	/*
271	 * A word about the locking here - we disable interrupts to reserve
272	 * some space in the relay per-cpu buffer, to prevent an irq
273	 * from coming in and stepping on our toes.
274	 */
275	local_irq_save(flags);
276	t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
277	if (t) {
278		sequence = per_cpu_ptr(bt->sequence, cpu);
279
280		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
281		t->sequence = ++(*sequence);
282		t->time = ktime_to_ns(ktime_get());
283record_it:
284		/*
285		 * These two are not needed in ftrace as they are in the
286		 * generic trace_entry, filled by tracing_generic_entry_update,
287		 * but for the trace_event->bin() synthesizer benefit we do it
288		 * here too.
289		 */
290		t->cpu = cpu;
291		t->pid = pid;
292
293		t->sector = sector;
294		t->bytes = bytes;
295		t->action = what;
296		t->device = bt->dev;
297		t->error = error;
298		t->pdu_len = pdu_len + cgid_len;
299
300		if (cgid_len)
301			memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
302		if (pdu_len)
303			memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
304
305		if (blk_tracer) {
306			trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
307			return;
308		}
309	}
310
311	local_irq_restore(flags);
312}
313
314static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
315{
316	relay_close(bt->rchan);
317
318	/*
319	 * If 'bt->dir' is not set, then both 'dropped' and 'msg' are created
320	 * under 'q->debugfs_dir', thus lookup and remove them.
321	 */
322	if (!bt->dir) {
323		debugfs_lookup_and_remove("dropped", q->debugfs_dir);
324		debugfs_lookup_and_remove("msg", q->debugfs_dir);
325	} else {
326		debugfs_remove(bt->dir);
327	}
328	free_percpu(bt->sequence);
329	free_percpu(bt->msg_data);
330	kfree(bt);
331}
332
333static void get_probe_ref(void)
334{
335	mutex_lock(&blk_probe_mutex);
336	if (++blk_probes_ref == 1)
337		blk_register_tracepoints();
338	mutex_unlock(&blk_probe_mutex);
339}
340
341static void put_probe_ref(void)
342{
343	mutex_lock(&blk_probe_mutex);
344	if (!--blk_probes_ref)
345		blk_unregister_tracepoints();
346	mutex_unlock(&blk_probe_mutex);
347}
348
349static int blk_trace_start(struct blk_trace *bt)
350{
351	if (bt->trace_state != Blktrace_setup &&
352	    bt->trace_state != Blktrace_stopped)
353		return -EINVAL;
354
355	blktrace_seq++;
356	smp_mb();
357	bt->trace_state = Blktrace_running;
358	raw_spin_lock_irq(&running_trace_lock);
359	list_add(&bt->running_list, &running_trace_list);
360	raw_spin_unlock_irq(&running_trace_lock);
361	trace_note_time(bt);
362
363	return 0;
364}
365
366static int blk_trace_stop(struct blk_trace *bt)
367{
368	if (bt->trace_state != Blktrace_running)
369		return -EINVAL;
370
371	bt->trace_state = Blktrace_stopped;
372	raw_spin_lock_irq(&running_trace_lock);
373	list_del_init(&bt->running_list);
374	raw_spin_unlock_irq(&running_trace_lock);
375	relay_flush(bt->rchan);
376
377	return 0;
378}
379
380static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
381{
382	blk_trace_stop(bt);
383	synchronize_rcu();
384	blk_trace_free(q, bt);
385	put_probe_ref();
386}
387
388static int __blk_trace_remove(struct request_queue *q)
389{
390	struct blk_trace *bt;
391
392	bt = rcu_replace_pointer(q->blk_trace, NULL,
393				 lockdep_is_held(&q->debugfs_mutex));
394	if (!bt)
395		return -EINVAL;
396
397	blk_trace_cleanup(q, bt);
398
399	return 0;
400}
401
402int blk_trace_remove(struct request_queue *q)
403{
404	int ret;
405
406	mutex_lock(&q->debugfs_mutex);
407	ret = __blk_trace_remove(q);
408	mutex_unlock(&q->debugfs_mutex);
409
410	return ret;
411}
412EXPORT_SYMBOL_GPL(blk_trace_remove);
413
414static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
415				size_t count, loff_t *ppos)
416{
417	struct blk_trace *bt = filp->private_data;
418	char buf[16];
419
420	snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
421
422	return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
423}
424
425static const struct file_operations blk_dropped_fops = {
426	.owner =	THIS_MODULE,
427	.open =		simple_open,
428	.read =		blk_dropped_read,
429	.llseek =	default_llseek,
430};
431
432static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
433				size_t count, loff_t *ppos)
434{
435	char *msg;
436	struct blk_trace *bt;
437
438	if (count >= BLK_TN_MAX_MSG)
439		return -EINVAL;
440
441	msg = memdup_user_nul(buffer, count);
442	if (IS_ERR(msg))
443		return PTR_ERR(msg);
444
445	bt = filp->private_data;
446	__blk_trace_note_message(bt, NULL, "%s", msg);
447	kfree(msg);
448
449	return count;
450}
451
452static const struct file_operations blk_msg_fops = {
453	.owner =	THIS_MODULE,
454	.open =		simple_open,
455	.write =	blk_msg_write,
456	.llseek =	noop_llseek,
457};
458
459/*
460 * Keep track of how many times we encountered a full subbuffer, to aid
461 * the user space app in telling how many lost events there were.
462 */
463static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
464				     void *prev_subbuf, size_t prev_padding)
465{
466	struct blk_trace *bt;
467
468	if (!relay_buf_full(buf))
469		return 1;
470
471	bt = buf->chan->private_data;
472	atomic_inc(&bt->dropped);
473	return 0;
474}
475
476static int blk_remove_buf_file_callback(struct dentry *dentry)
477{
478	debugfs_remove(dentry);
479
480	return 0;
481}
482
483static struct dentry *blk_create_buf_file_callback(const char *filename,
484						   struct dentry *parent,
485						   umode_t mode,
486						   struct rchan_buf *buf,
487						   int *is_global)
488{
489	return debugfs_create_file(filename, mode, parent, buf,
490					&relay_file_operations);
491}
492
493static const struct rchan_callbacks blk_relay_callbacks = {
494	.subbuf_start		= blk_subbuf_start_callback,
495	.create_buf_file	= blk_create_buf_file_callback,
496	.remove_buf_file	= blk_remove_buf_file_callback,
497};
498
499static void blk_trace_setup_lba(struct blk_trace *bt,
500				struct block_device *bdev)
501{
502	if (bdev) {
503		bt->start_lba = bdev->bd_start_sect;
504		bt->end_lba = bdev->bd_start_sect + bdev_nr_sectors(bdev);
505	} else {
506		bt->start_lba = 0;
507		bt->end_lba = -1ULL;
508	}
509}
510
511/*
512 * Setup everything required to start tracing
513 */
514static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
515			      struct block_device *bdev,
516			      struct blk_user_trace_setup *buts)
517{
518	struct blk_trace *bt = NULL;
519	struct dentry *dir = NULL;
520	int ret;
521
522	lockdep_assert_held(&q->debugfs_mutex);
523
524	if (!buts->buf_size || !buts->buf_nr)
525		return -EINVAL;
526
527	strscpy_pad(buts->name, name, BLKTRACE_BDEV_SIZE);
528
529	/*
530	 * some device names have larger paths - convert the slashes
531	 * to underscores for this to work as expected
532	 */
533	strreplace(buts->name, '/', '_');
534
535	/*
536	 * bdev can be NULL, as with scsi-generic, this is a helpful as
537	 * we can be.
538	 */
539	if (rcu_dereference_protected(q->blk_trace,
540				      lockdep_is_held(&q->debugfs_mutex))) {
541		pr_warn("Concurrent blktraces are not allowed on %s\n",
542			buts->name);
543		return -EBUSY;
544	}
545
546	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
547	if (!bt)
548		return -ENOMEM;
549
550	ret = -ENOMEM;
551	bt->sequence = alloc_percpu(unsigned long);
552	if (!bt->sequence)
553		goto err;
554
555	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
556	if (!bt->msg_data)
557		goto err;
558
559	/*
560	 * When tracing the whole disk reuse the existing debugfs directory
561	 * created by the block layer on init. For partitions block devices,
562	 * and scsi-generic block devices we create a temporary new debugfs
563	 * directory that will be removed once the trace ends.
564	 */
565	if (bdev && !bdev_is_partition(bdev))
566		dir = q->debugfs_dir;
567	else
568		bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
569
570	/*
571	 * As blktrace relies on debugfs for its interface the debugfs directory
572	 * is required, contrary to the usual mantra of not checking for debugfs
573	 * files or directories.
574	 */
575	if (IS_ERR_OR_NULL(dir)) {
576		pr_warn("debugfs_dir not present for %s so skipping\n",
577			buts->name);
578		ret = -ENOENT;
579		goto err;
580	}
581
582	bt->dev = dev;
583	atomic_set(&bt->dropped, 0);
584	INIT_LIST_HEAD(&bt->running_list);
585
586	ret = -EIO;
587	debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
588	debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
589
590	bt->rchan = relay_open("trace", dir, buts->buf_size,
591				buts->buf_nr, &blk_relay_callbacks, bt);
592	if (!bt->rchan)
593		goto err;
594
595	bt->act_mask = buts->act_mask;
596	if (!bt->act_mask)
597		bt->act_mask = (u16) -1;
598
599	blk_trace_setup_lba(bt, bdev);
600
601	/* overwrite with user settings */
602	if (buts->start_lba)
603		bt->start_lba = buts->start_lba;
604	if (buts->end_lba)
605		bt->end_lba = buts->end_lba;
606
607	bt->pid = buts->pid;
608	bt->trace_state = Blktrace_setup;
609
610	rcu_assign_pointer(q->blk_trace, bt);
611	get_probe_ref();
612
613	ret = 0;
614err:
615	if (ret)
616		blk_trace_free(q, bt);
617	return ret;
618}
619
620static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
621			     struct block_device *bdev, char __user *arg)
622{
623	struct blk_user_trace_setup buts;
624	int ret;
625
626	ret = copy_from_user(&buts, arg, sizeof(buts));
627	if (ret)
628		return -EFAULT;
629
630	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
631	if (ret)
632		return ret;
633
634	if (copy_to_user(arg, &buts, sizeof(buts))) {
635		__blk_trace_remove(q);
636		return -EFAULT;
637	}
638	return 0;
639}
640
641int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
642		    struct block_device *bdev,
643		    char __user *arg)
644{
645	int ret;
646
647	mutex_lock(&q->debugfs_mutex);
648	ret = __blk_trace_setup(q, name, dev, bdev, arg);
649	mutex_unlock(&q->debugfs_mutex);
650
651	return ret;
652}
653EXPORT_SYMBOL_GPL(blk_trace_setup);
654
655#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
656static int compat_blk_trace_setup(struct request_queue *q, char *name,
657				  dev_t dev, struct block_device *bdev,
658				  char __user *arg)
659{
660	struct blk_user_trace_setup buts;
661	struct compat_blk_user_trace_setup cbuts;
662	int ret;
663
664	if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
665		return -EFAULT;
666
667	buts = (struct blk_user_trace_setup) {
668		.act_mask = cbuts.act_mask,
669		.buf_size = cbuts.buf_size,
670		.buf_nr = cbuts.buf_nr,
671		.start_lba = cbuts.start_lba,
672		.end_lba = cbuts.end_lba,
673		.pid = cbuts.pid,
674	};
675
676	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
677	if (ret)
678		return ret;
679
680	if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
681		__blk_trace_remove(q);
682		return -EFAULT;
683	}
684
685	return 0;
686}
687#endif
688
689static int __blk_trace_startstop(struct request_queue *q, int start)
690{
691	struct blk_trace *bt;
692
693	bt = rcu_dereference_protected(q->blk_trace,
694				       lockdep_is_held(&q->debugfs_mutex));
695	if (bt == NULL)
696		return -EINVAL;
697
698	if (start)
699		return blk_trace_start(bt);
700	else
701		return blk_trace_stop(bt);
702}
703
704int blk_trace_startstop(struct request_queue *q, int start)
705{
706	int ret;
707
708	mutex_lock(&q->debugfs_mutex);
709	ret = __blk_trace_startstop(q, start);
710	mutex_unlock(&q->debugfs_mutex);
711
712	return ret;
713}
714EXPORT_SYMBOL_GPL(blk_trace_startstop);
715
716/*
717 * When reading or writing the blktrace sysfs files, the references to the
718 * opened sysfs or device files should prevent the underlying block device
719 * from being removed. So no further delete protection is really needed.
720 */
721
722/**
723 * blk_trace_ioctl - handle the ioctls associated with tracing
724 * @bdev:	the block device
725 * @cmd:	the ioctl cmd
726 * @arg:	the argument data, if any
727 *
728 **/
729int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
730{
731	struct request_queue *q = bdev_get_queue(bdev);
732	int ret, start = 0;
733	char b[BDEVNAME_SIZE];
734
735	mutex_lock(&q->debugfs_mutex);
736
737	switch (cmd) {
738	case BLKTRACESETUP:
739		snprintf(b, sizeof(b), "%pg", bdev);
740		ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
741		break;
742#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
743	case BLKTRACESETUP32:
744		snprintf(b, sizeof(b), "%pg", bdev);
745		ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
746		break;
747#endif
748	case BLKTRACESTART:
749		start = 1;
750		fallthrough;
751	case BLKTRACESTOP:
752		ret = __blk_trace_startstop(q, start);
753		break;
754	case BLKTRACETEARDOWN:
755		ret = __blk_trace_remove(q);
756		break;
757	default:
758		ret = -ENOTTY;
759		break;
760	}
761
762	mutex_unlock(&q->debugfs_mutex);
763	return ret;
764}
765
766/**
767 * blk_trace_shutdown - stop and cleanup trace structures
768 * @q:    the request queue associated with the device
769 *
770 **/
771void blk_trace_shutdown(struct request_queue *q)
772{
773	if (rcu_dereference_protected(q->blk_trace,
774				      lockdep_is_held(&q->debugfs_mutex)))
775		__blk_trace_remove(q);
776}
777
778#ifdef CONFIG_BLK_CGROUP
779static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
780{
781	struct cgroup_subsys_state *blkcg_css;
782	struct blk_trace *bt;
783
784	/* We don't use the 'bt' value here except as an optimization... */
785	bt = rcu_dereference_protected(q->blk_trace, 1);
786	if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
787		return 0;
788
789	blkcg_css = bio_blkcg_css(bio);
790	if (!blkcg_css)
791		return 0;
792	return cgroup_id(blkcg_css->cgroup);
793}
794#else
795static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
796{
797	return 0;
798}
799#endif
800
801static u64
802blk_trace_request_get_cgid(struct request *rq)
803{
804	if (!rq->bio)
805		return 0;
806	/* Use the first bio */
807	return blk_trace_bio_get_cgid(rq->q, rq->bio);
808}
809
810/*
811 * blktrace probes
812 */
813
814/**
815 * blk_add_trace_rq - Add a trace for a request oriented action
816 * @rq:		the source request
817 * @error:	return status to log
818 * @nr_bytes:	number of completed bytes
819 * @what:	the action
820 * @cgid:	the cgroup info
821 *
822 * Description:
823 *     Records an action against a request. Will log the bio offset + size.
824 *
825 **/
826static void blk_add_trace_rq(struct request *rq, blk_status_t error,
827			     unsigned int nr_bytes, u32 what, u64 cgid)
828{
829	struct blk_trace *bt;
830
831	rcu_read_lock();
832	bt = rcu_dereference(rq->q->blk_trace);
833	if (likely(!bt)) {
834		rcu_read_unlock();
835		return;
836	}
837
838	if (blk_rq_is_passthrough(rq))
839		what |= BLK_TC_ACT(BLK_TC_PC);
840	else
841		what |= BLK_TC_ACT(BLK_TC_FS);
842
843	__blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, rq->cmd_flags,
844			what, blk_status_to_errno(error), 0, NULL, cgid);
845	rcu_read_unlock();
846}
847
848static void blk_add_trace_rq_insert(void *ignore, struct request *rq)
849{
850	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
851			 blk_trace_request_get_cgid(rq));
852}
853
854static void blk_add_trace_rq_issue(void *ignore, struct request *rq)
855{
856	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
857			 blk_trace_request_get_cgid(rq));
858}
859
860static void blk_add_trace_rq_merge(void *ignore, struct request *rq)
861{
862	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
863			 blk_trace_request_get_cgid(rq));
864}
865
866static void blk_add_trace_rq_requeue(void *ignore, struct request *rq)
867{
868	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
869			 blk_trace_request_get_cgid(rq));
870}
871
872static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
873			blk_status_t error, unsigned int nr_bytes)
874{
875	blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
876			 blk_trace_request_get_cgid(rq));
877}
878
879/**
880 * blk_add_trace_bio - Add a trace for a bio oriented action
881 * @q:		queue the io is for
882 * @bio:	the source bio
883 * @what:	the action
884 * @error:	error, if any
885 *
886 * Description:
887 *     Records an action against a bio. Will log the bio offset + size.
888 *
889 **/
890static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
891			      u32 what, int error)
892{
893	struct blk_trace *bt;
894
895	rcu_read_lock();
896	bt = rcu_dereference(q->blk_trace);
897	if (likely(!bt)) {
898		rcu_read_unlock();
899		return;
900	}
901
902	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
903			bio->bi_opf, what, error, 0, NULL,
904			blk_trace_bio_get_cgid(q, bio));
905	rcu_read_unlock();
906}
907
908static void blk_add_trace_bio_bounce(void *ignore, struct bio *bio)
909{
910	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BOUNCE, 0);
911}
912
913static void blk_add_trace_bio_complete(void *ignore,
914				       struct request_queue *q, struct bio *bio)
915{
916	blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
917			  blk_status_to_errno(bio->bi_status));
918}
919
920static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio)
921{
922	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE,
923			0);
924}
925
926static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio)
927{
928	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE,
929			0);
930}
931
932static void blk_add_trace_bio_queue(void *ignore, struct bio *bio)
933{
934	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0);
935}
936
937static void blk_add_trace_getrq(void *ignore, struct bio *bio)
938{
939	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0);
940}
941
942static void blk_add_trace_plug(void *ignore, struct request_queue *q)
943{
944	struct blk_trace *bt;
945
946	rcu_read_lock();
947	bt = rcu_dereference(q->blk_trace);
948	if (bt)
949		__blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
950	rcu_read_unlock();
951}
952
953static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
954				    unsigned int depth, bool explicit)
955{
956	struct blk_trace *bt;
957
958	rcu_read_lock();
959	bt = rcu_dereference(q->blk_trace);
960	if (bt) {
961		__be64 rpdu = cpu_to_be64(depth);
962		u32 what;
963
964		if (explicit)
965			what = BLK_TA_UNPLUG_IO;
966		else
967			what = BLK_TA_UNPLUG_TIMER;
968
969		__blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
970	}
971	rcu_read_unlock();
972}
973
974static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
975{
976	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
977	struct blk_trace *bt;
978
979	rcu_read_lock();
980	bt = rcu_dereference(q->blk_trace);
981	if (bt) {
982		__be64 rpdu = cpu_to_be64(pdu);
983
984		__blk_add_trace(bt, bio->bi_iter.bi_sector,
985				bio->bi_iter.bi_size, bio->bi_opf, BLK_TA_SPLIT,
986				blk_status_to_errno(bio->bi_status),
987				sizeof(rpdu), &rpdu,
988				blk_trace_bio_get_cgid(q, bio));
989	}
990	rcu_read_unlock();
991}
992
993/**
994 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
995 * @ignore:	trace callback data parameter (not used)
996 * @bio:	the source bio
997 * @dev:	source device
998 * @from:	source sector
999 *
1000 * Called after a bio is remapped to a different device and/or sector.
1001 **/
1002static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev,
1003				    sector_t from)
1004{
1005	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
1006	struct blk_trace *bt;
1007	struct blk_io_trace_remap r;
1008
1009	rcu_read_lock();
1010	bt = rcu_dereference(q->blk_trace);
1011	if (likely(!bt)) {
1012		rcu_read_unlock();
1013		return;
1014	}
1015
1016	r.device_from = cpu_to_be32(dev);
1017	r.device_to   = cpu_to_be32(bio_dev(bio));
1018	r.sector_from = cpu_to_be64(from);
1019
1020	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1021			bio->bi_opf, BLK_TA_REMAP,
1022			blk_status_to_errno(bio->bi_status),
1023			sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
1024	rcu_read_unlock();
1025}
1026
1027/**
1028 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1029 * @ignore:	trace callback data parameter (not used)
1030 * @rq:		the source request
1031 * @dev:	target device
1032 * @from:	source sector
1033 *
1034 * Description:
1035 *     Device mapper remaps request to other devices.
1036 *     Add a trace for that action.
1037 *
1038 **/
1039static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
1040				   sector_t from)
1041{
1042	struct blk_trace *bt;
1043	struct blk_io_trace_remap r;
1044
1045	rcu_read_lock();
1046	bt = rcu_dereference(rq->q->blk_trace);
1047	if (likely(!bt)) {
1048		rcu_read_unlock();
1049		return;
1050	}
1051
1052	r.device_from = cpu_to_be32(dev);
1053	r.device_to   = cpu_to_be32(disk_devt(rq->q->disk));
1054	r.sector_from = cpu_to_be64(from);
1055
1056	__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1057			rq->cmd_flags, BLK_TA_REMAP, 0,
1058			sizeof(r), &r, blk_trace_request_get_cgid(rq));
1059	rcu_read_unlock();
1060}
1061
1062/**
1063 * blk_add_driver_data - Add binary message with driver-specific data
1064 * @rq:		io request
1065 * @data:	driver-specific data
1066 * @len:	length of driver-specific data
1067 *
1068 * Description:
1069 *     Some drivers might want to write driver-specific data per request.
1070 *
1071 **/
1072void blk_add_driver_data(struct request *rq, void *data, size_t len)
1073{
1074	struct blk_trace *bt;
1075
1076	rcu_read_lock();
1077	bt = rcu_dereference(rq->q->blk_trace);
1078	if (likely(!bt)) {
1079		rcu_read_unlock();
1080		return;
1081	}
1082
1083	__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0,
1084				BLK_TA_DRV_DATA, 0, len, data,
1085				blk_trace_request_get_cgid(rq));
1086	rcu_read_unlock();
1087}
1088EXPORT_SYMBOL_GPL(blk_add_driver_data);
1089
1090static void blk_register_tracepoints(void)
1091{
1092	int ret;
1093
1094	ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1095	WARN_ON(ret);
1096	ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1097	WARN_ON(ret);
1098	ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1099	WARN_ON(ret);
1100	ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1101	WARN_ON(ret);
1102	ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1103	WARN_ON(ret);
1104	ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1105	WARN_ON(ret);
1106	ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1107	WARN_ON(ret);
1108	ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1109	WARN_ON(ret);
1110	ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1111	WARN_ON(ret);
1112	ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1113	WARN_ON(ret);
1114	ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1115	WARN_ON(ret);
1116	ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1117	WARN_ON(ret);
1118	ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1119	WARN_ON(ret);
1120	ret = register_trace_block_split(blk_add_trace_split, NULL);
1121	WARN_ON(ret);
1122	ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1123	WARN_ON(ret);
1124	ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1125	WARN_ON(ret);
1126}
1127
1128static void blk_unregister_tracepoints(void)
1129{
1130	unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1131	unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1132	unregister_trace_block_split(blk_add_trace_split, NULL);
1133	unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1134	unregister_trace_block_plug(blk_add_trace_plug, NULL);
1135	unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1136	unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1137	unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1138	unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1139	unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1140	unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1141	unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1142	unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1143	unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1144	unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1145	unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1146
1147	tracepoint_synchronize_unregister();
1148}
1149
1150/*
1151 * struct blk_io_tracer formatting routines
1152 */
1153
1154static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1155{
1156	int i = 0;
1157	int tc = t->action >> BLK_TC_SHIFT;
1158
1159	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1160		rwbs[i++] = 'N';
1161		goto out;
1162	}
1163
1164	if (tc & BLK_TC_FLUSH)
1165		rwbs[i++] = 'F';
1166
1167	if (tc & BLK_TC_DISCARD)
1168		rwbs[i++] = 'D';
1169	else if (tc & BLK_TC_WRITE)
1170		rwbs[i++] = 'W';
1171	else if (t->bytes)
1172		rwbs[i++] = 'R';
1173	else
1174		rwbs[i++] = 'N';
1175
1176	if (tc & BLK_TC_FUA)
1177		rwbs[i++] = 'F';
1178	if (tc & BLK_TC_AHEAD)
1179		rwbs[i++] = 'A';
1180	if (tc & BLK_TC_SYNC)
1181		rwbs[i++] = 'S';
1182	if (tc & BLK_TC_META)
1183		rwbs[i++] = 'M';
1184out:
1185	rwbs[i] = '\0';
1186}
1187
1188static inline
1189const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1190{
1191	return (const struct blk_io_trace *)ent;
1192}
1193
1194static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1195{
1196	return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
1197}
1198
1199static inline u64 t_cgid(const struct trace_entry *ent)
1200{
1201	return *(u64 *)(te_blk_io_trace(ent) + 1);
1202}
1203
1204static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1205{
1206	return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
1207}
1208
1209static inline u32 t_action(const struct trace_entry *ent)
1210{
1211	return te_blk_io_trace(ent)->action;
1212}
1213
1214static inline u32 t_bytes(const struct trace_entry *ent)
1215{
1216	return te_blk_io_trace(ent)->bytes;
1217}
1218
1219static inline u32 t_sec(const struct trace_entry *ent)
1220{
1221	return te_blk_io_trace(ent)->bytes >> 9;
1222}
1223
1224static inline unsigned long long t_sector(const struct trace_entry *ent)
1225{
1226	return te_blk_io_trace(ent)->sector;
1227}
1228
1229static inline __u16 t_error(const struct trace_entry *ent)
1230{
1231	return te_blk_io_trace(ent)->error;
1232}
1233
1234static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1235{
1236	const __be64 *val = pdu_start(ent, has_cg);
1237	return be64_to_cpu(*val);
1238}
1239
1240typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1241	bool has_cg);
1242
1243static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1244	bool has_cg)
1245{
1246	char rwbs[RWBS_LEN];
1247	unsigned long long ts  = iter->ts;
1248	unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1249	unsigned secs	       = (unsigned long)ts;
1250	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1251
1252	fill_rwbs(rwbs, t);
1253
1254	trace_seq_printf(&iter->seq,
1255			 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1256			 MAJOR(t->device), MINOR(t->device), iter->cpu,
1257			 secs, nsec_rem, iter->ent->pid, act, rwbs);
1258}
1259
1260static void blk_log_action(struct trace_iterator *iter, const char *act,
1261	bool has_cg)
1262{
1263	char rwbs[RWBS_LEN];
1264	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1265
1266	fill_rwbs(rwbs, t);
1267	if (has_cg) {
1268		u64 id = t_cgid(iter->ent);
1269
1270		if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1271			char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1272
1273			cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1274				sizeof(blkcg_name_buf));
1275			trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1276				 MAJOR(t->device), MINOR(t->device),
1277				 blkcg_name_buf, act, rwbs);
1278		} else {
1279			/*
1280			 * The cgid portion used to be "INO,GEN".  Userland
1281			 * builds a FILEID_INO32_GEN fid out of them and
1282			 * opens the cgroup using open_by_handle_at(2).
1283			 * While 32bit ino setups are still the same, 64bit
1284			 * ones now use the 64bit ino as the whole ID and
1285			 * no longer use generation.
1286			 *
1287			 * Regardless of the content, always output
1288			 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1289			 * be mapped back to @id on both 64 and 32bit ino
1290			 * setups.  See __kernfs_fh_to_dentry().
1291			 */
1292			trace_seq_printf(&iter->seq,
1293				 "%3d,%-3d %llx,%-llx %2s %3s ",
1294				 MAJOR(t->device), MINOR(t->device),
1295				 id & U32_MAX, id >> 32, act, rwbs);
1296		}
1297	} else
1298		trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1299				 MAJOR(t->device), MINOR(t->device), act, rwbs);
1300}
1301
1302static void blk_log_dump_pdu(struct trace_seq *s,
1303	const struct trace_entry *ent, bool has_cg)
1304{
1305	const unsigned char *pdu_buf;
1306	int pdu_len;
1307	int i, end;
1308
1309	pdu_buf = pdu_start(ent, has_cg);
1310	pdu_len = pdu_real_len(ent, has_cg);
1311
1312	if (!pdu_len)
1313		return;
1314
1315	/* find the last zero that needs to be printed */
1316	for (end = pdu_len - 1; end >= 0; end--)
1317		if (pdu_buf[end])
1318			break;
1319	end++;
1320
1321	trace_seq_putc(s, '(');
1322
1323	for (i = 0; i < pdu_len; i++) {
1324
1325		trace_seq_printf(s, "%s%02x",
1326				 i == 0 ? "" : " ", pdu_buf[i]);
1327
1328		/*
1329		 * stop when the rest is just zeros and indicate so
1330		 * with a ".." appended
1331		 */
1332		if (i == end && end != pdu_len - 1) {
1333			trace_seq_puts(s, " ..) ");
1334			return;
1335		}
1336	}
1337
1338	trace_seq_puts(s, ") ");
1339}
1340
1341static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1342{
1343	char cmd[TASK_COMM_LEN];
1344
1345	trace_find_cmdline(ent->pid, cmd);
1346
1347	if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1348		trace_seq_printf(s, "%u ", t_bytes(ent));
1349		blk_log_dump_pdu(s, ent, has_cg);
1350		trace_seq_printf(s, "[%s]\n", cmd);
1351	} else {
1352		if (t_sec(ent))
1353			trace_seq_printf(s, "%llu + %u [%s]\n",
1354						t_sector(ent), t_sec(ent), cmd);
1355		else
1356			trace_seq_printf(s, "[%s]\n", cmd);
1357	}
1358}
1359
1360static void blk_log_with_error(struct trace_seq *s,
1361			      const struct trace_entry *ent, bool has_cg)
1362{
1363	if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1364		blk_log_dump_pdu(s, ent, has_cg);
1365		trace_seq_printf(s, "[%d]\n", t_error(ent));
1366	} else {
1367		if (t_sec(ent))
1368			trace_seq_printf(s, "%llu + %u [%d]\n",
1369					 t_sector(ent),
1370					 t_sec(ent), t_error(ent));
1371		else
1372			trace_seq_printf(s, "%llu [%d]\n",
1373					 t_sector(ent), t_error(ent));
1374	}
1375}
1376
1377static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1378{
1379	const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1380
1381	trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1382			 t_sector(ent), t_sec(ent),
1383			 MAJOR(be32_to_cpu(__r->device_from)),
1384			 MINOR(be32_to_cpu(__r->device_from)),
1385			 be64_to_cpu(__r->sector_from));
1386}
1387
1388static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1389{
1390	char cmd[TASK_COMM_LEN];
1391
1392	trace_find_cmdline(ent->pid, cmd);
1393
1394	trace_seq_printf(s, "[%s]\n", cmd);
1395}
1396
1397static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1398{
1399	char cmd[TASK_COMM_LEN];
1400
1401	trace_find_cmdline(ent->pid, cmd);
1402
1403	trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1404}
1405
1406static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1407{
1408	char cmd[TASK_COMM_LEN];
1409
1410	trace_find_cmdline(ent->pid, cmd);
1411
1412	trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1413			 get_pdu_int(ent, has_cg), cmd);
1414}
1415
1416static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1417			bool has_cg)
1418{
1419
1420	trace_seq_putmem(s, pdu_start(ent, has_cg),
1421		pdu_real_len(ent, has_cg));
1422	trace_seq_putc(s, '\n');
1423}
1424
1425/*
1426 * struct tracer operations
1427 */
1428
1429static void blk_tracer_print_header(struct seq_file *m)
1430{
1431	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1432		return;
1433	seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1434		    "#  |     |     |           |   |   |\n");
1435}
1436
1437static void blk_tracer_start(struct trace_array *tr)
1438{
1439	blk_tracer_enabled = true;
1440}
1441
1442static int blk_tracer_init(struct trace_array *tr)
1443{
1444	blk_tr = tr;
1445	blk_tracer_start(tr);
1446	return 0;
1447}
1448
1449static void blk_tracer_stop(struct trace_array *tr)
1450{
1451	blk_tracer_enabled = false;
1452}
1453
1454static void blk_tracer_reset(struct trace_array *tr)
1455{
1456	blk_tracer_stop(tr);
1457}
1458
1459static const struct {
1460	const char *act[2];
1461	void	   (*print)(struct trace_seq *s, const struct trace_entry *ent,
1462			    bool has_cg);
1463} what2act[] = {
1464	[__BLK_TA_QUEUE]	= {{  "Q", "queue" },	   blk_log_generic },
1465	[__BLK_TA_BACKMERGE]	= {{  "M", "backmerge" },  blk_log_generic },
1466	[__BLK_TA_FRONTMERGE]	= {{  "F", "frontmerge" }, blk_log_generic },
1467	[__BLK_TA_GETRQ]	= {{  "G", "getrq" },	   blk_log_generic },
1468	[__BLK_TA_SLEEPRQ]	= {{  "S", "sleeprq" },	   blk_log_generic },
1469	[__BLK_TA_REQUEUE]	= {{  "R", "requeue" },	   blk_log_with_error },
1470	[__BLK_TA_ISSUE]	= {{  "D", "issue" },	   blk_log_generic },
1471	[__BLK_TA_COMPLETE]	= {{  "C", "complete" },   blk_log_with_error },
1472	[__BLK_TA_PLUG]		= {{  "P", "plug" },	   blk_log_plug },
1473	[__BLK_TA_UNPLUG_IO]	= {{  "U", "unplug_io" },  blk_log_unplug },
1474	[__BLK_TA_UNPLUG_TIMER]	= {{ "UT", "unplug_timer" }, blk_log_unplug },
1475	[__BLK_TA_INSERT]	= {{  "I", "insert" },	   blk_log_generic },
1476	[__BLK_TA_SPLIT]	= {{  "X", "split" },	   blk_log_split },
1477	[__BLK_TA_BOUNCE]	= {{  "B", "bounce" },	   blk_log_generic },
1478	[__BLK_TA_REMAP]	= {{  "A", "remap" },	   blk_log_remap },
1479};
1480
1481static enum print_line_t print_one_line(struct trace_iterator *iter,
1482					bool classic)
1483{
1484	struct trace_array *tr = iter->tr;
1485	struct trace_seq *s = &iter->seq;
1486	const struct blk_io_trace *t;
1487	u16 what;
1488	bool long_act;
1489	blk_log_action_t *log_action;
1490	bool has_cg;
1491
1492	t	   = te_blk_io_trace(iter->ent);
1493	what	   = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1494	long_act   = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1495	log_action = classic ? &blk_log_action_classic : &blk_log_action;
1496	has_cg	   = t->action & __BLK_TA_CGROUP;
1497
1498	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1499		log_action(iter, long_act ? "message" : "m", has_cg);
1500		blk_log_msg(s, iter->ent, has_cg);
1501		return trace_handle_return(s);
1502	}
1503
1504	if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1505		trace_seq_printf(s, "Unknown action %x\n", what);
1506	else {
1507		log_action(iter, what2act[what].act[long_act], has_cg);
1508		what2act[what].print(s, iter->ent, has_cg);
1509	}
1510
1511	return trace_handle_return(s);
1512}
1513
1514static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1515					       int flags, struct trace_event *event)
1516{
1517	return print_one_line(iter, false);
1518}
1519
1520static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1521{
1522	struct trace_seq *s = &iter->seq;
1523	struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1524	const int offset = offsetof(struct blk_io_trace, sector);
1525	struct blk_io_trace old = {
1526		.magic	  = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1527		.time     = iter->ts,
1528	};
1529
1530	trace_seq_putmem(s, &old, offset);
1531	trace_seq_putmem(s, &t->sector,
1532			 sizeof(old) - offset + t->pdu_len);
1533}
1534
1535static enum print_line_t
1536blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1537			     struct trace_event *event)
1538{
1539	blk_trace_synthesize_old_trace(iter);
1540
1541	return trace_handle_return(&iter->seq);
1542}
1543
1544static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1545{
1546	if ((iter->ent->type != TRACE_BLK) ||
1547	    !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1548		return TRACE_TYPE_UNHANDLED;
1549
1550	return print_one_line(iter, true);
1551}
1552
1553static int
1554blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1555{
1556	/* don't output context-info for blk_classic output */
1557	if (bit == TRACE_BLK_OPT_CLASSIC) {
1558		if (set)
1559			tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1560		else
1561			tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1562	}
1563	return 0;
1564}
1565
1566static struct tracer blk_tracer __read_mostly = {
1567	.name		= "blk",
1568	.init		= blk_tracer_init,
1569	.reset		= blk_tracer_reset,
1570	.start		= blk_tracer_start,
1571	.stop		= blk_tracer_stop,
1572	.print_header	= blk_tracer_print_header,
1573	.print_line	= blk_tracer_print_line,
1574	.flags		= &blk_tracer_flags,
1575	.set_flag	= blk_tracer_set_flag,
1576};
1577
1578static struct trace_event_functions trace_blk_event_funcs = {
1579	.trace		= blk_trace_event_print,
1580	.binary		= blk_trace_event_print_binary,
1581};
1582
1583static struct trace_event trace_blk_event = {
1584	.type		= TRACE_BLK,
1585	.funcs		= &trace_blk_event_funcs,
1586};
1587
1588static int __init init_blk_tracer(void)
1589{
1590	if (!register_trace_event(&trace_blk_event)) {
1591		pr_warn("Warning: could not register block events\n");
1592		return 1;
1593	}
1594
1595	if (register_tracer(&blk_tracer) != 0) {
1596		pr_warn("Warning: could not register the block tracer\n");
1597		unregister_trace_event(&trace_blk_event);
1598		return 1;
1599	}
1600
1601	return 0;
1602}
1603
1604device_initcall(init_blk_tracer);
1605
1606static int blk_trace_remove_queue(struct request_queue *q)
1607{
1608	struct blk_trace *bt;
1609
1610	bt = rcu_replace_pointer(q->blk_trace, NULL,
1611				 lockdep_is_held(&q->debugfs_mutex));
1612	if (bt == NULL)
1613		return -EINVAL;
1614
1615	blk_trace_stop(bt);
1616
1617	put_probe_ref();
1618	synchronize_rcu();
1619	blk_trace_free(q, bt);
1620	return 0;
1621}
1622
1623/*
1624 * Setup everything required to start tracing
1625 */
1626static int blk_trace_setup_queue(struct request_queue *q,
1627				 struct block_device *bdev)
1628{
1629	struct blk_trace *bt = NULL;
1630	int ret = -ENOMEM;
1631
1632	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1633	if (!bt)
1634		return -ENOMEM;
1635
1636	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1637	if (!bt->msg_data)
1638		goto free_bt;
1639
1640	bt->dev = bdev->bd_dev;
1641	bt->act_mask = (u16)-1;
1642
1643	blk_trace_setup_lba(bt, bdev);
1644
1645	rcu_assign_pointer(q->blk_trace, bt);
1646	get_probe_ref();
1647	return 0;
1648
1649free_bt:
1650	blk_trace_free(q, bt);
1651	return ret;
1652}
1653
1654/*
1655 * sysfs interface to enable and configure tracing
1656 */
1657
1658static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1659					 struct device_attribute *attr,
1660					 char *buf);
1661static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1662					  struct device_attribute *attr,
1663					  const char *buf, size_t count);
1664#define BLK_TRACE_DEVICE_ATTR(_name) \
1665	DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1666		    sysfs_blk_trace_attr_show, \
1667		    sysfs_blk_trace_attr_store)
1668
1669static BLK_TRACE_DEVICE_ATTR(enable);
1670static BLK_TRACE_DEVICE_ATTR(act_mask);
1671static BLK_TRACE_DEVICE_ATTR(pid);
1672static BLK_TRACE_DEVICE_ATTR(start_lba);
1673static BLK_TRACE_DEVICE_ATTR(end_lba);
1674
1675static struct attribute *blk_trace_attrs[] = {
1676	&dev_attr_enable.attr,
1677	&dev_attr_act_mask.attr,
1678	&dev_attr_pid.attr,
1679	&dev_attr_start_lba.attr,
1680	&dev_attr_end_lba.attr,
1681	NULL
1682};
1683
1684struct attribute_group blk_trace_attr_group = {
1685	.name  = "trace",
1686	.attrs = blk_trace_attrs,
1687};
1688
1689static const struct {
1690	int mask;
1691	const char *str;
1692} mask_maps[] = {
1693	{ BLK_TC_READ,		"read"		},
1694	{ BLK_TC_WRITE,		"write"		},
1695	{ BLK_TC_FLUSH,		"flush"		},
1696	{ BLK_TC_SYNC,		"sync"		},
1697	{ BLK_TC_QUEUE,		"queue"		},
1698	{ BLK_TC_REQUEUE,	"requeue"	},
1699	{ BLK_TC_ISSUE,		"issue"		},
1700	{ BLK_TC_COMPLETE,	"complete"	},
1701	{ BLK_TC_FS,		"fs"		},
1702	{ BLK_TC_PC,		"pc"		},
1703	{ BLK_TC_NOTIFY,	"notify"	},
1704	{ BLK_TC_AHEAD,		"ahead"		},
1705	{ BLK_TC_META,		"meta"		},
1706	{ BLK_TC_DISCARD,	"discard"	},
1707	{ BLK_TC_DRV_DATA,	"drv_data"	},
1708	{ BLK_TC_FUA,		"fua"		},
1709};
1710
1711static int blk_trace_str2mask(const char *str)
1712{
1713	int i;
1714	int mask = 0;
1715	char *buf, *s, *token;
1716
1717	buf = kstrdup(str, GFP_KERNEL);
1718	if (buf == NULL)
1719		return -ENOMEM;
1720	s = strstrip(buf);
1721
1722	while (1) {
1723		token = strsep(&s, ",");
1724		if (token == NULL)
1725			break;
1726
1727		if (*token == '\0')
1728			continue;
1729
1730		for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1731			if (strcasecmp(token, mask_maps[i].str) == 0) {
1732				mask |= mask_maps[i].mask;
1733				break;
1734			}
1735		}
1736		if (i == ARRAY_SIZE(mask_maps)) {
1737			mask = -EINVAL;
1738			break;
1739		}
1740	}
1741	kfree(buf);
1742
1743	return mask;
1744}
1745
1746static ssize_t blk_trace_mask2str(char *buf, int mask)
1747{
1748	int i;
1749	char *p = buf;
1750
1751	for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1752		if (mask & mask_maps[i].mask) {
1753			p += sprintf(p, "%s%s",
1754				    (p == buf) ? "" : ",", mask_maps[i].str);
1755		}
1756	}
1757	*p++ = '\n';
1758
1759	return p - buf;
1760}
1761
1762static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1763					 struct device_attribute *attr,
1764					 char *buf)
1765{
1766	struct block_device *bdev = dev_to_bdev(dev);
1767	struct request_queue *q = bdev_get_queue(bdev);
1768	struct blk_trace *bt;
1769	ssize_t ret = -ENXIO;
1770
1771	mutex_lock(&q->debugfs_mutex);
1772
1773	bt = rcu_dereference_protected(q->blk_trace,
1774				       lockdep_is_held(&q->debugfs_mutex));
1775	if (attr == &dev_attr_enable) {
1776		ret = sprintf(buf, "%u\n", !!bt);
1777		goto out_unlock_bdev;
1778	}
1779
1780	if (bt == NULL)
1781		ret = sprintf(buf, "disabled\n");
1782	else if (attr == &dev_attr_act_mask)
1783		ret = blk_trace_mask2str(buf, bt->act_mask);
1784	else if (attr == &dev_attr_pid)
1785		ret = sprintf(buf, "%u\n", bt->pid);
1786	else if (attr == &dev_attr_start_lba)
1787		ret = sprintf(buf, "%llu\n", bt->start_lba);
1788	else if (attr == &dev_attr_end_lba)
1789		ret = sprintf(buf, "%llu\n", bt->end_lba);
1790
1791out_unlock_bdev:
1792	mutex_unlock(&q->debugfs_mutex);
1793	return ret;
1794}
1795
1796static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1797					  struct device_attribute *attr,
1798					  const char *buf, size_t count)
1799{
1800	struct block_device *bdev = dev_to_bdev(dev);
1801	struct request_queue *q = bdev_get_queue(bdev);
1802	struct blk_trace *bt;
1803	u64 value;
1804	ssize_t ret = -EINVAL;
1805
1806	if (count == 0)
1807		goto out;
1808
1809	if (attr == &dev_attr_act_mask) {
1810		if (kstrtoull(buf, 0, &value)) {
1811			/* Assume it is a list of trace category names */
1812			ret = blk_trace_str2mask(buf);
1813			if (ret < 0)
1814				goto out;
1815			value = ret;
1816		}
1817	} else {
1818		if (kstrtoull(buf, 0, &value))
1819			goto out;
1820	}
1821
1822	mutex_lock(&q->debugfs_mutex);
1823
1824	bt = rcu_dereference_protected(q->blk_trace,
1825				       lockdep_is_held(&q->debugfs_mutex));
1826	if (attr == &dev_attr_enable) {
1827		if (!!value == !!bt) {
1828			ret = 0;
1829			goto out_unlock_bdev;
1830		}
1831		if (value)
1832			ret = blk_trace_setup_queue(q, bdev);
1833		else
1834			ret = blk_trace_remove_queue(q);
1835		goto out_unlock_bdev;
1836	}
1837
1838	ret = 0;
1839	if (bt == NULL) {
1840		ret = blk_trace_setup_queue(q, bdev);
1841		bt = rcu_dereference_protected(q->blk_trace,
1842				lockdep_is_held(&q->debugfs_mutex));
1843	}
1844
1845	if (ret == 0) {
1846		if (attr == &dev_attr_act_mask)
1847			bt->act_mask = value;
1848		else if (attr == &dev_attr_pid)
1849			bt->pid = value;
1850		else if (attr == &dev_attr_start_lba)
1851			bt->start_lba = value;
1852		else if (attr == &dev_attr_end_lba)
1853			bt->end_lba = value;
1854	}
1855
1856out_unlock_bdev:
1857	mutex_unlock(&q->debugfs_mutex);
1858out:
1859	return ret ? ret : count;
1860}
1861#endif /* CONFIG_BLK_DEV_IO_TRACE */
1862
1863#ifdef CONFIG_EVENT_TRACING
1864
1865/**
1866 * blk_fill_rwbs - Fill the buffer rwbs by mapping op to character string.
1867 * @rwbs:	buffer to be filled
1868 * @opf:	request operation type (REQ_OP_XXX) and flags for the tracepoint
1869 *
1870 * Description:
1871 *     Maps each request operation and flag to a single character and fills the
1872 *     buffer provided by the caller with resulting string.
1873 *
1874 **/
1875void blk_fill_rwbs(char *rwbs, blk_opf_t opf)
1876{
1877	int i = 0;
1878
1879	if (opf & REQ_PREFLUSH)
1880		rwbs[i++] = 'F';
1881
1882	switch (opf & REQ_OP_MASK) {
1883	case REQ_OP_WRITE:
1884		rwbs[i++] = 'W';
1885		break;
1886	case REQ_OP_DISCARD:
1887		rwbs[i++] = 'D';
1888		break;
1889	case REQ_OP_SECURE_ERASE:
1890		rwbs[i++] = 'D';
1891		rwbs[i++] = 'E';
1892		break;
1893	case REQ_OP_FLUSH:
1894		rwbs[i++] = 'F';
1895		break;
1896	case REQ_OP_READ:
1897		rwbs[i++] = 'R';
1898		break;
1899	default:
1900		rwbs[i++] = 'N';
1901	}
1902
1903	if (opf & REQ_FUA)
1904		rwbs[i++] = 'F';
1905	if (opf & REQ_RAHEAD)
1906		rwbs[i++] = 'A';
1907	if (opf & REQ_SYNC)
1908		rwbs[i++] = 'S';
1909	if (opf & REQ_META)
1910		rwbs[i++] = 'M';
1911
1912	rwbs[i] = '\0';
1913}
1914EXPORT_SYMBOL_GPL(blk_fill_rwbs);
1915
1916#endif /* CONFIG_EVENT_TRACING */
1917
1918