1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/file.h>
5#include <linux/io_uring.h>
6
7#include <trace/events/io_uring.h>
8
9#include <uapi/linux/io_uring.h>
10
11#include "io_uring.h"
12#include "refs.h"
13#include "cancel.h"
14#include "timeout.h"
15
16struct io_timeout {
17	struct file			*file;
18	u32				off;
19	u32				target_seq;
20	u32				repeats;
21	struct list_head		list;
22	/* head of the link, used by linked timeouts only */
23	struct io_kiocb			*head;
24	/* for linked completions */
25	struct io_kiocb			*prev;
26};
27
28struct io_timeout_rem {
29	struct file			*file;
30	u64				addr;
31
32	/* timeout update */
33	struct timespec64		ts;
34	u32				flags;
35	bool				ltimeout;
36};
37
38static inline bool io_is_timeout_noseq(struct io_kiocb *req)
39{
40	struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
41	struct io_timeout_data *data = req->async_data;
42
43	return !timeout->off || data->flags & IORING_TIMEOUT_MULTISHOT;
44}
45
46static inline void io_put_req(struct io_kiocb *req)
47{
48	if (req_ref_put_and_test(req)) {
49		io_queue_next(req);
50		io_free_req(req);
51	}
52}
53
54static inline bool io_timeout_finish(struct io_timeout *timeout,
55				     struct io_timeout_data *data)
56{
57	if (!(data->flags & IORING_TIMEOUT_MULTISHOT))
58		return true;
59
60	if (!timeout->off || (timeout->repeats && --timeout->repeats))
61		return false;
62
63	return true;
64}
65
66static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer);
67
68static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
69{
70	struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
71	struct io_timeout_data *data = req->async_data;
72	struct io_ring_ctx *ctx = req->ctx;
73
74	if (!io_timeout_finish(timeout, data)) {
75		if (io_req_post_cqe(req, -ETIME, IORING_CQE_F_MORE)) {
76			/* re-arm timer */
77			spin_lock_irq(&ctx->timeout_lock);
78			list_add(&timeout->list, ctx->timeout_list.prev);
79			data->timer.function = io_timeout_fn;
80			hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
81			spin_unlock_irq(&ctx->timeout_lock);
82			return;
83		}
84	}
85
86	io_req_task_complete(req, ts);
87}
88
89static bool io_kill_timeout(struct io_kiocb *req, int status)
90	__must_hold(&req->ctx->timeout_lock)
91{
92	struct io_timeout_data *io = req->async_data;
93
94	if (hrtimer_try_to_cancel(&io->timer) != -1) {
95		struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
96
97		if (status)
98			req_set_fail(req);
99		atomic_set(&req->ctx->cq_timeouts,
100			atomic_read(&req->ctx->cq_timeouts) + 1);
101		list_del_init(&timeout->list);
102		io_req_queue_tw_complete(req, status);
103		return true;
104	}
105	return false;
106}
107
108__cold void io_flush_timeouts(struct io_ring_ctx *ctx)
109{
110	u32 seq;
111	struct io_timeout *timeout, *tmp;
112
113	spin_lock_irq(&ctx->timeout_lock);
114	seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
115
116	list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
117		struct io_kiocb *req = cmd_to_io_kiocb(timeout);
118		u32 events_needed, events_got;
119
120		if (io_is_timeout_noseq(req))
121			break;
122
123		/*
124		 * Since seq can easily wrap around over time, subtract
125		 * the last seq at which timeouts were flushed before comparing.
126		 * Assuming not more than 2^31-1 events have happened since,
127		 * these subtractions won't have wrapped, so we can check if
128		 * target is in [last_seq, current_seq] by comparing the two.
129		 */
130		events_needed = timeout->target_seq - ctx->cq_last_tm_flush;
131		events_got = seq - ctx->cq_last_tm_flush;
132		if (events_got < events_needed)
133			break;
134
135		io_kill_timeout(req, 0);
136	}
137	ctx->cq_last_tm_flush = seq;
138	spin_unlock_irq(&ctx->timeout_lock);
139}
140
141static void io_req_tw_fail_links(struct io_kiocb *link, struct io_tw_state *ts)
142{
143	io_tw_lock(link->ctx, ts);
144	while (link) {
145		struct io_kiocb *nxt = link->link;
146		long res = -ECANCELED;
147
148		if (link->flags & REQ_F_FAIL)
149			res = link->cqe.res;
150		link->link = NULL;
151		io_req_set_res(link, res, 0);
152		io_req_task_complete(link, ts);
153		link = nxt;
154	}
155}
156
157static void io_fail_links(struct io_kiocb *req)
158	__must_hold(&req->ctx->completion_lock)
159{
160	struct io_kiocb *link = req->link;
161	bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES;
162
163	if (!link)
164		return;
165
166	while (link) {
167		if (ignore_cqes)
168			link->flags |= REQ_F_CQE_SKIP;
169		else
170			link->flags &= ~REQ_F_CQE_SKIP;
171		trace_io_uring_fail_link(req, link);
172		link = link->link;
173	}
174
175	link = req->link;
176	link->io_task_work.func = io_req_tw_fail_links;
177	io_req_task_work_add(link);
178	req->link = NULL;
179}
180
181static inline void io_remove_next_linked(struct io_kiocb *req)
182{
183	struct io_kiocb *nxt = req->link;
184
185	req->link = nxt->link;
186	nxt->link = NULL;
187}
188
189void io_disarm_next(struct io_kiocb *req)
190	__must_hold(&req->ctx->completion_lock)
191{
192	struct io_kiocb *link = NULL;
193
194	if (req->flags & REQ_F_ARM_LTIMEOUT) {
195		link = req->link;
196		req->flags &= ~REQ_F_ARM_LTIMEOUT;
197		if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
198			io_remove_next_linked(req);
199			io_req_queue_tw_complete(link, -ECANCELED);
200		}
201	} else if (req->flags & REQ_F_LINK_TIMEOUT) {
202		struct io_ring_ctx *ctx = req->ctx;
203
204		spin_lock_irq(&ctx->timeout_lock);
205		link = io_disarm_linked_timeout(req);
206		spin_unlock_irq(&ctx->timeout_lock);
207		if (link)
208			io_req_queue_tw_complete(link, -ECANCELED);
209	}
210	if (unlikely((req->flags & REQ_F_FAIL) &&
211		     !(req->flags & REQ_F_HARDLINK)))
212		io_fail_links(req);
213}
214
215struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
216					    struct io_kiocb *link)
217	__must_hold(&req->ctx->completion_lock)
218	__must_hold(&req->ctx->timeout_lock)
219{
220	struct io_timeout_data *io = link->async_data;
221	struct io_timeout *timeout = io_kiocb_to_cmd(link, struct io_timeout);
222
223	io_remove_next_linked(req);
224	timeout->head = NULL;
225	if (hrtimer_try_to_cancel(&io->timer) != -1) {
226		list_del(&timeout->list);
227		return link;
228	}
229
230	return NULL;
231}
232
233static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
234{
235	struct io_timeout_data *data = container_of(timer,
236						struct io_timeout_data, timer);
237	struct io_kiocb *req = data->req;
238	struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
239	struct io_ring_ctx *ctx = req->ctx;
240	unsigned long flags;
241
242	spin_lock_irqsave(&ctx->timeout_lock, flags);
243	list_del_init(&timeout->list);
244	atomic_set(&req->ctx->cq_timeouts,
245		atomic_read(&req->ctx->cq_timeouts) + 1);
246	spin_unlock_irqrestore(&ctx->timeout_lock, flags);
247
248	if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
249		req_set_fail(req);
250
251	io_req_set_res(req, -ETIME, 0);
252	req->io_task_work.func = io_timeout_complete;
253	io_req_task_work_add(req);
254	return HRTIMER_NORESTART;
255}
256
257static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
258					   struct io_cancel_data *cd)
259	__must_hold(&ctx->timeout_lock)
260{
261	struct io_timeout *timeout;
262	struct io_timeout_data *io;
263	struct io_kiocb *req = NULL;
264
265	list_for_each_entry(timeout, &ctx->timeout_list, list) {
266		struct io_kiocb *tmp = cmd_to_io_kiocb(timeout);
267
268		if (io_cancel_req_match(tmp, cd)) {
269			req = tmp;
270			break;
271		}
272	}
273	if (!req)
274		return ERR_PTR(-ENOENT);
275
276	io = req->async_data;
277	if (hrtimer_try_to_cancel(&io->timer) == -1)
278		return ERR_PTR(-EALREADY);
279	timeout = io_kiocb_to_cmd(req, struct io_timeout);
280	list_del_init(&timeout->list);
281	return req;
282}
283
284int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
285	__must_hold(&ctx->completion_lock)
286{
287	struct io_kiocb *req;
288
289	spin_lock_irq(&ctx->timeout_lock);
290	req = io_timeout_extract(ctx, cd);
291	spin_unlock_irq(&ctx->timeout_lock);
292
293	if (IS_ERR(req))
294		return PTR_ERR(req);
295	io_req_task_queue_fail(req, -ECANCELED);
296	return 0;
297}
298
299static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *ts)
300{
301	struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
302	struct io_kiocb *prev = timeout->prev;
303	int ret = -ENOENT;
304
305	if (prev) {
306		if (!(req->task->flags & PF_EXITING)) {
307			struct io_cancel_data cd = {
308				.ctx		= req->ctx,
309				.data		= prev->cqe.user_data,
310			};
311
312			ret = io_try_cancel(req->task->io_uring, &cd, 0);
313		}
314		io_req_set_res(req, ret ?: -ETIME, 0);
315		io_req_task_complete(req, ts);
316		io_put_req(prev);
317	} else {
318		io_req_set_res(req, -ETIME, 0);
319		io_req_task_complete(req, ts);
320	}
321}
322
323static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
324{
325	struct io_timeout_data *data = container_of(timer,
326						struct io_timeout_data, timer);
327	struct io_kiocb *prev, *req = data->req;
328	struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
329	struct io_ring_ctx *ctx = req->ctx;
330	unsigned long flags;
331
332	spin_lock_irqsave(&ctx->timeout_lock, flags);
333	prev = timeout->head;
334	timeout->head = NULL;
335
336	/*
337	 * We don't expect the list to be empty, that will only happen if we
338	 * race with the completion of the linked work.
339	 */
340	if (prev) {
341		io_remove_next_linked(prev);
342		if (!req_ref_inc_not_zero(prev))
343			prev = NULL;
344	}
345	list_del(&timeout->list);
346	timeout->prev = prev;
347	spin_unlock_irqrestore(&ctx->timeout_lock, flags);
348
349	req->io_task_work.func = io_req_task_link_timeout;
350	io_req_task_work_add(req);
351	return HRTIMER_NORESTART;
352}
353
354static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
355{
356	switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
357	case IORING_TIMEOUT_BOOTTIME:
358		return CLOCK_BOOTTIME;
359	case IORING_TIMEOUT_REALTIME:
360		return CLOCK_REALTIME;
361	default:
362		/* can't happen, vetted at prep time */
363		WARN_ON_ONCE(1);
364		fallthrough;
365	case 0:
366		return CLOCK_MONOTONIC;
367	}
368}
369
370static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
371				    struct timespec64 *ts, enum hrtimer_mode mode)
372	__must_hold(&ctx->timeout_lock)
373{
374	struct io_timeout_data *io;
375	struct io_timeout *timeout;
376	struct io_kiocb *req = NULL;
377
378	list_for_each_entry(timeout, &ctx->ltimeout_list, list) {
379		struct io_kiocb *tmp = cmd_to_io_kiocb(timeout);
380
381		if (user_data == tmp->cqe.user_data) {
382			req = tmp;
383			break;
384		}
385	}
386	if (!req)
387		return -ENOENT;
388
389	io = req->async_data;
390	if (hrtimer_try_to_cancel(&io->timer) == -1)
391		return -EALREADY;
392	hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
393	io->timer.function = io_link_timeout_fn;
394	hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
395	return 0;
396}
397
398static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
399			     struct timespec64 *ts, enum hrtimer_mode mode)
400	__must_hold(&ctx->timeout_lock)
401{
402	struct io_cancel_data cd = { .ctx = ctx, .data = user_data, };
403	struct io_kiocb *req = io_timeout_extract(ctx, &cd);
404	struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
405	struct io_timeout_data *data;
406
407	if (IS_ERR(req))
408		return PTR_ERR(req);
409
410	timeout->off = 0; /* noseq */
411	data = req->async_data;
412	list_add_tail(&timeout->list, &ctx->timeout_list);
413	hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
414	data->timer.function = io_timeout_fn;
415	hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
416	return 0;
417}
418
419int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
420{
421	struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
422
423	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
424		return -EINVAL;
425	if (sqe->buf_index || sqe->len || sqe->splice_fd_in)
426		return -EINVAL;
427
428	tr->ltimeout = false;
429	tr->addr = READ_ONCE(sqe->addr);
430	tr->flags = READ_ONCE(sqe->timeout_flags);
431	if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
432		if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
433			return -EINVAL;
434		if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
435			tr->ltimeout = true;
436		if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
437			return -EINVAL;
438		if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
439			return -EFAULT;
440		if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0)
441			return -EINVAL;
442	} else if (tr->flags) {
443		/* timeout removal doesn't support flags */
444		return -EINVAL;
445	}
446
447	return 0;
448}
449
450static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
451{
452	return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
453					    : HRTIMER_MODE_REL;
454}
455
456/*
457 * Remove or update an existing timeout command
458 */
459int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
460{
461	struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
462	struct io_ring_ctx *ctx = req->ctx;
463	int ret;
464
465	if (!(tr->flags & IORING_TIMEOUT_UPDATE)) {
466		struct io_cancel_data cd = { .ctx = ctx, .data = tr->addr, };
467
468		spin_lock(&ctx->completion_lock);
469		ret = io_timeout_cancel(ctx, &cd);
470		spin_unlock(&ctx->completion_lock);
471	} else {
472		enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
473
474		spin_lock_irq(&ctx->timeout_lock);
475		if (tr->ltimeout)
476			ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
477		else
478			ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
479		spin_unlock_irq(&ctx->timeout_lock);
480	}
481
482	if (ret < 0)
483		req_set_fail(req);
484	io_req_set_res(req, ret, 0);
485	return IOU_OK;
486}
487
488static int __io_timeout_prep(struct io_kiocb *req,
489			     const struct io_uring_sqe *sqe,
490			     bool is_timeout_link)
491{
492	struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
493	struct io_timeout_data *data;
494	unsigned flags;
495	u32 off = READ_ONCE(sqe->off);
496
497	if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in)
498		return -EINVAL;
499	if (off && is_timeout_link)
500		return -EINVAL;
501	flags = READ_ONCE(sqe->timeout_flags);
502	if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK |
503		      IORING_TIMEOUT_ETIME_SUCCESS |
504		      IORING_TIMEOUT_MULTISHOT))
505		return -EINVAL;
506	/* more than one clock specified is invalid, obviously */
507	if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
508		return -EINVAL;
509	/* multishot requests only make sense with rel values */
510	if (!(~flags & (IORING_TIMEOUT_MULTISHOT | IORING_TIMEOUT_ABS)))
511		return -EINVAL;
512
513	INIT_LIST_HEAD(&timeout->list);
514	timeout->off = off;
515	if (unlikely(off && !req->ctx->off_timeout_used))
516		req->ctx->off_timeout_used = true;
517	/*
518	 * for multishot reqs w/ fixed nr of repeats, repeats tracks the
519	 * remaining nr
520	 */
521	timeout->repeats = 0;
522	if ((flags & IORING_TIMEOUT_MULTISHOT) && off > 0)
523		timeout->repeats = off;
524
525	if (WARN_ON_ONCE(req_has_async_data(req)))
526		return -EFAULT;
527	if (io_alloc_async_data(req))
528		return -ENOMEM;
529
530	data = req->async_data;
531	data->req = req;
532	data->flags = flags;
533
534	if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
535		return -EFAULT;
536
537	if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
538		return -EINVAL;
539
540	data->mode = io_translate_timeout_mode(flags);
541	hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
542
543	if (is_timeout_link) {
544		struct io_submit_link *link = &req->ctx->submit_state.link;
545
546		if (!link->head)
547			return -EINVAL;
548		if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
549			return -EINVAL;
550		timeout->head = link->last;
551		link->last->flags |= REQ_F_ARM_LTIMEOUT;
552	}
553	return 0;
554}
555
556int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
557{
558	return __io_timeout_prep(req, sqe, false);
559}
560
561int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
562{
563	return __io_timeout_prep(req, sqe, true);
564}
565
566int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
567{
568	struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
569	struct io_ring_ctx *ctx = req->ctx;
570	struct io_timeout_data *data = req->async_data;
571	struct list_head *entry;
572	u32 tail, off = timeout->off;
573
574	spin_lock_irq(&ctx->timeout_lock);
575
576	/*
577	 * sqe->off holds how many events that need to occur for this
578	 * timeout event to be satisfied. If it isn't set, then this is
579	 * a pure timeout request, sequence isn't used.
580	 */
581	if (io_is_timeout_noseq(req)) {
582		entry = ctx->timeout_list.prev;
583		goto add;
584	}
585
586	tail = data_race(ctx->cached_cq_tail) - atomic_read(&ctx->cq_timeouts);
587	timeout->target_seq = tail + off;
588
589	/* Update the last seq here in case io_flush_timeouts() hasn't.
590	 * This is safe because ->completion_lock is held, and submissions
591	 * and completions are never mixed in the same ->completion_lock section.
592	 */
593	ctx->cq_last_tm_flush = tail;
594
595	/*
596	 * Insertion sort, ensuring the first entry in the list is always
597	 * the one we need first.
598	 */
599	list_for_each_prev(entry, &ctx->timeout_list) {
600		struct io_timeout *nextt = list_entry(entry, struct io_timeout, list);
601		struct io_kiocb *nxt = cmd_to_io_kiocb(nextt);
602
603		if (io_is_timeout_noseq(nxt))
604			continue;
605		/* nxt.seq is behind @tail, otherwise would've been completed */
606		if (off >= nextt->target_seq - tail)
607			break;
608	}
609add:
610	list_add(&timeout->list, entry);
611	data->timer.function = io_timeout_fn;
612	hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
613	spin_unlock_irq(&ctx->timeout_lock);
614	return IOU_ISSUE_SKIP_COMPLETE;
615}
616
617void io_queue_linked_timeout(struct io_kiocb *req)
618{
619	struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
620	struct io_ring_ctx *ctx = req->ctx;
621
622	spin_lock_irq(&ctx->timeout_lock);
623	/*
624	 * If the back reference is NULL, then our linked request finished
625	 * before we got a chance to setup the timer
626	 */
627	if (timeout->head) {
628		struct io_timeout_data *data = req->async_data;
629
630		data->timer.function = io_link_timeout_fn;
631		hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
632				data->mode);
633		list_add_tail(&timeout->list, &ctx->ltimeout_list);
634	}
635	spin_unlock_irq(&ctx->timeout_lock);
636	/* drop submission reference */
637	io_put_req(req);
638}
639
640static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
641			  bool cancel_all)
642	__must_hold(&req->ctx->timeout_lock)
643{
644	struct io_kiocb *req;
645
646	if (task && head->task != task)
647		return false;
648	if (cancel_all)
649		return true;
650
651	io_for_each_link(req, head) {
652		if (req->flags & REQ_F_INFLIGHT)
653			return true;
654	}
655	return false;
656}
657
658/* Returns true if we found and killed one or more timeouts */
659__cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
660			     bool cancel_all)
661{
662	struct io_timeout *timeout, *tmp;
663	int canceled = 0;
664
665	/*
666	 * completion_lock is needed for io_match_task(). Take it before
667	 * timeout_lockfirst to keep locking ordering.
668	 */
669	spin_lock(&ctx->completion_lock);
670	spin_lock_irq(&ctx->timeout_lock);
671	list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
672		struct io_kiocb *req = cmd_to_io_kiocb(timeout);
673
674		if (io_match_task(req, tsk, cancel_all) &&
675		    io_kill_timeout(req, -ECANCELED))
676			canceled++;
677	}
678	spin_unlock_irq(&ctx->timeout_lock);
679	spin_unlock(&ctx->completion_lock);
680	return canceled != 0;
681}
682