1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/nospec.h>
9#include <linux/hugetlb.h>
10#include <linux/compat.h>
11#include <linux/io_uring.h>
12
13#include <uapi/linux/io_uring.h>
14
15#include "io_uring.h"
16#include "alloc_cache.h"
17#include "openclose.h"
18#include "rsrc.h"
19#include "memmap.h"
20
21struct io_rsrc_update {
22	struct file			*file;
23	u64				arg;
24	u32				nr_args;
25	u32				offset;
26};
27
28static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
29static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
30				  struct io_mapped_ubuf **pimu,
31				  struct page **last_hpage);
32
33/* only define max */
34#define IORING_MAX_FIXED_FILES	(1U << 20)
35#define IORING_MAX_REG_BUFFERS	(1U << 14)
36
37static const struct io_mapped_ubuf dummy_ubuf = {
38	/* set invalid range, so io_import_fixed() fails meeting it */
39	.ubuf = -1UL,
40	.ubuf_end = 0,
41};
42
43int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
44{
45	unsigned long page_limit, cur_pages, new_pages;
46
47	if (!nr_pages)
48		return 0;
49
50	/* Don't allow more pages than we can safely lock */
51	page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
52
53	cur_pages = atomic_long_read(&user->locked_vm);
54	do {
55		new_pages = cur_pages + nr_pages;
56		if (new_pages > page_limit)
57			return -ENOMEM;
58	} while (!atomic_long_try_cmpxchg(&user->locked_vm,
59					  &cur_pages, new_pages));
60	return 0;
61}
62
63static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
64{
65	if (ctx->user)
66		__io_unaccount_mem(ctx->user, nr_pages);
67
68	if (ctx->mm_account)
69		atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
70}
71
72static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
73{
74	int ret;
75
76	if (ctx->user) {
77		ret = __io_account_mem(ctx->user, nr_pages);
78		if (ret)
79			return ret;
80	}
81
82	if (ctx->mm_account)
83		atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
84
85	return 0;
86}
87
88static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
89		       void __user *arg, unsigned index)
90{
91	struct iovec __user *src;
92
93#ifdef CONFIG_COMPAT
94	if (ctx->compat) {
95		struct compat_iovec __user *ciovs;
96		struct compat_iovec ciov;
97
98		ciovs = (struct compat_iovec __user *) arg;
99		if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
100			return -EFAULT;
101
102		dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
103		dst->iov_len = ciov.iov_len;
104		return 0;
105	}
106#endif
107	src = (struct iovec __user *) arg;
108	if (copy_from_user(dst, &src[index], sizeof(*dst)))
109		return -EFAULT;
110	return 0;
111}
112
113static int io_buffer_validate(struct iovec *iov)
114{
115	unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
116
117	/*
118	 * Don't impose further limits on the size and buffer
119	 * constraints here, we'll -EINVAL later when IO is
120	 * submitted if they are wrong.
121	 */
122	if (!iov->iov_base)
123		return iov->iov_len ? -EFAULT : 0;
124	if (!iov->iov_len)
125		return -EFAULT;
126
127	/* arbitrary limit, but we need something */
128	if (iov->iov_len > SZ_1G)
129		return -EFAULT;
130
131	if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
132		return -EOVERFLOW;
133
134	return 0;
135}
136
137static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
138{
139	struct io_mapped_ubuf *imu = *slot;
140	unsigned int i;
141
142	if (imu != &dummy_ubuf) {
143		for (i = 0; i < imu->nr_bvecs; i++)
144			unpin_user_page(imu->bvec[i].bv_page);
145		if (imu->acct_pages)
146			io_unaccount_mem(ctx, imu->acct_pages);
147		kvfree(imu);
148	}
149	*slot = NULL;
150}
151
152static void io_rsrc_put_work(struct io_rsrc_node *node)
153{
154	struct io_rsrc_put *prsrc = &node->item;
155
156	if (prsrc->tag)
157		io_post_aux_cqe(node->ctx, prsrc->tag, 0, 0);
158
159	switch (node->type) {
160	case IORING_RSRC_FILE:
161		fput(prsrc->file);
162		break;
163	case IORING_RSRC_BUFFER:
164		io_rsrc_buf_put(node->ctx, prsrc);
165		break;
166	default:
167		WARN_ON_ONCE(1);
168		break;
169	}
170}
171
172void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
173{
174	if (!io_alloc_cache_put(&ctx->rsrc_node_cache, node))
175		kfree(node);
176}
177
178void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
179	__must_hold(&node->ctx->uring_lock)
180{
181	struct io_ring_ctx *ctx = node->ctx;
182
183	while (!list_empty(&ctx->rsrc_ref_list)) {
184		node = list_first_entry(&ctx->rsrc_ref_list,
185					    struct io_rsrc_node, node);
186		/* recycle ref nodes in order */
187		if (node->refs)
188			break;
189		list_del(&node->node);
190
191		if (likely(!node->empty))
192			io_rsrc_put_work(node);
193		io_rsrc_node_destroy(ctx, node);
194	}
195	if (list_empty(&ctx->rsrc_ref_list) && unlikely(ctx->rsrc_quiesce))
196		wake_up_all(&ctx->rsrc_quiesce_wq);
197}
198
199struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
200{
201	struct io_rsrc_node *ref_node;
202
203	ref_node = io_alloc_cache_get(&ctx->rsrc_node_cache);
204	if (!ref_node) {
205		ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
206		if (!ref_node)
207			return NULL;
208	}
209
210	ref_node->ctx = ctx;
211	ref_node->empty = 0;
212	ref_node->refs = 1;
213	return ref_node;
214}
215
216__cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
217				      struct io_ring_ctx *ctx)
218{
219	struct io_rsrc_node *backup;
220	DEFINE_WAIT(we);
221	int ret;
222
223	/* As We may drop ->uring_lock, other task may have started quiesce */
224	if (data->quiesce)
225		return -ENXIO;
226
227	backup = io_rsrc_node_alloc(ctx);
228	if (!backup)
229		return -ENOMEM;
230	ctx->rsrc_node->empty = true;
231	ctx->rsrc_node->type = -1;
232	list_add_tail(&ctx->rsrc_node->node, &ctx->rsrc_ref_list);
233	io_put_rsrc_node(ctx, ctx->rsrc_node);
234	ctx->rsrc_node = backup;
235
236	if (list_empty(&ctx->rsrc_ref_list))
237		return 0;
238
239	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
240		atomic_set(&ctx->cq_wait_nr, 1);
241		smp_mb();
242	}
243
244	ctx->rsrc_quiesce++;
245	data->quiesce = true;
246	do {
247		prepare_to_wait(&ctx->rsrc_quiesce_wq, &we, TASK_INTERRUPTIBLE);
248		mutex_unlock(&ctx->uring_lock);
249
250		ret = io_run_task_work_sig(ctx);
251		if (ret < 0) {
252			mutex_lock(&ctx->uring_lock);
253			if (list_empty(&ctx->rsrc_ref_list))
254				ret = 0;
255			break;
256		}
257
258		schedule();
259		__set_current_state(TASK_RUNNING);
260		mutex_lock(&ctx->uring_lock);
261		ret = 0;
262	} while (!list_empty(&ctx->rsrc_ref_list));
263
264	finish_wait(&ctx->rsrc_quiesce_wq, &we);
265	data->quiesce = false;
266	ctx->rsrc_quiesce--;
267
268	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
269		atomic_set(&ctx->cq_wait_nr, 0);
270		smp_mb();
271	}
272	return ret;
273}
274
275static void io_free_page_table(void **table, size_t size)
276{
277	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
278
279	for (i = 0; i < nr_tables; i++)
280		kfree(table[i]);
281	kfree(table);
282}
283
284static void io_rsrc_data_free(struct io_rsrc_data *data)
285{
286	size_t size = data->nr * sizeof(data->tags[0][0]);
287
288	if (data->tags)
289		io_free_page_table((void **)data->tags, size);
290	kfree(data);
291}
292
293static __cold void **io_alloc_page_table(size_t size)
294{
295	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
296	size_t init_size = size;
297	void **table;
298
299	table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
300	if (!table)
301		return NULL;
302
303	for (i = 0; i < nr_tables; i++) {
304		unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
305
306		table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
307		if (!table[i]) {
308			io_free_page_table(table, init_size);
309			return NULL;
310		}
311		size -= this_size;
312	}
313	return table;
314}
315
316__cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, int type,
317				     u64 __user *utags,
318				     unsigned nr, struct io_rsrc_data **pdata)
319{
320	struct io_rsrc_data *data;
321	int ret = 0;
322	unsigned i;
323
324	data = kzalloc(sizeof(*data), GFP_KERNEL);
325	if (!data)
326		return -ENOMEM;
327	data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
328	if (!data->tags) {
329		kfree(data);
330		return -ENOMEM;
331	}
332
333	data->nr = nr;
334	data->ctx = ctx;
335	data->rsrc_type = type;
336	if (utags) {
337		ret = -EFAULT;
338		for (i = 0; i < nr; i++) {
339			u64 *tag_slot = io_get_tag_slot(data, i);
340
341			if (copy_from_user(tag_slot, &utags[i],
342					   sizeof(*tag_slot)))
343				goto fail;
344		}
345	}
346	*pdata = data;
347	return 0;
348fail:
349	io_rsrc_data_free(data);
350	return ret;
351}
352
353static int __io_sqe_files_update(struct io_ring_ctx *ctx,
354				 struct io_uring_rsrc_update2 *up,
355				 unsigned nr_args)
356{
357	u64 __user *tags = u64_to_user_ptr(up->tags);
358	__s32 __user *fds = u64_to_user_ptr(up->data);
359	struct io_rsrc_data *data = ctx->file_data;
360	struct io_fixed_file *file_slot;
361	int fd, i, err = 0;
362	unsigned int done;
363
364	if (!ctx->file_data)
365		return -ENXIO;
366	if (up->offset + nr_args > ctx->nr_user_files)
367		return -EINVAL;
368
369	for (done = 0; done < nr_args; done++) {
370		u64 tag = 0;
371
372		if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
373		    copy_from_user(&fd, &fds[done], sizeof(fd))) {
374			err = -EFAULT;
375			break;
376		}
377		if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
378			err = -EINVAL;
379			break;
380		}
381		if (fd == IORING_REGISTER_FILES_SKIP)
382			continue;
383
384		i = array_index_nospec(up->offset + done, ctx->nr_user_files);
385		file_slot = io_fixed_file_slot(&ctx->file_table, i);
386
387		if (file_slot->file_ptr) {
388			err = io_queue_rsrc_removal(data, i,
389						    io_slot_file(file_slot));
390			if (err)
391				break;
392			file_slot->file_ptr = 0;
393			io_file_bitmap_clear(&ctx->file_table, i);
394		}
395		if (fd != -1) {
396			struct file *file = fget(fd);
397
398			if (!file) {
399				err = -EBADF;
400				break;
401			}
402			/*
403			 * Don't allow io_uring instances to be registered.
404			 */
405			if (io_is_uring_fops(file)) {
406				fput(file);
407				err = -EBADF;
408				break;
409			}
410			*io_get_tag_slot(data, i) = tag;
411			io_fixed_file_set(file_slot, file);
412			io_file_bitmap_set(&ctx->file_table, i);
413		}
414	}
415	return done ? done : err;
416}
417
418static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
419				   struct io_uring_rsrc_update2 *up,
420				   unsigned int nr_args)
421{
422	u64 __user *tags = u64_to_user_ptr(up->tags);
423	struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
424	struct page *last_hpage = NULL;
425	__u32 done;
426	int i, err;
427
428	if (!ctx->buf_data)
429		return -ENXIO;
430	if (up->offset + nr_args > ctx->nr_user_bufs)
431		return -EINVAL;
432
433	for (done = 0; done < nr_args; done++) {
434		struct io_mapped_ubuf *imu;
435		u64 tag = 0;
436
437		err = io_copy_iov(ctx, &iov, iovs, done);
438		if (err)
439			break;
440		if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
441			err = -EFAULT;
442			break;
443		}
444		err = io_buffer_validate(&iov);
445		if (err)
446			break;
447		if (!iov.iov_base && tag) {
448			err = -EINVAL;
449			break;
450		}
451		err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
452		if (err)
453			break;
454
455		i = array_index_nospec(up->offset + done, ctx->nr_user_bufs);
456		if (ctx->user_bufs[i] != &dummy_ubuf) {
457			err = io_queue_rsrc_removal(ctx->buf_data, i,
458						    ctx->user_bufs[i]);
459			if (unlikely(err)) {
460				io_buffer_unmap(ctx, &imu);
461				break;
462			}
463			ctx->user_bufs[i] = (struct io_mapped_ubuf *)&dummy_ubuf;
464		}
465
466		ctx->user_bufs[i] = imu;
467		*io_get_tag_slot(ctx->buf_data, i) = tag;
468	}
469	return done ? done : err;
470}
471
472static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
473				     struct io_uring_rsrc_update2 *up,
474				     unsigned nr_args)
475{
476	__u32 tmp;
477
478	lockdep_assert_held(&ctx->uring_lock);
479
480	if (check_add_overflow(up->offset, nr_args, &tmp))
481		return -EOVERFLOW;
482
483	switch (type) {
484	case IORING_RSRC_FILE:
485		return __io_sqe_files_update(ctx, up, nr_args);
486	case IORING_RSRC_BUFFER:
487		return __io_sqe_buffers_update(ctx, up, nr_args);
488	}
489	return -EINVAL;
490}
491
492int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
493			     unsigned nr_args)
494{
495	struct io_uring_rsrc_update2 up;
496
497	if (!nr_args)
498		return -EINVAL;
499	memset(&up, 0, sizeof(up));
500	if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
501		return -EFAULT;
502	if (up.resv || up.resv2)
503		return -EINVAL;
504	return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
505}
506
507int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
508			    unsigned size, unsigned type)
509{
510	struct io_uring_rsrc_update2 up;
511
512	if (size != sizeof(up))
513		return -EINVAL;
514	if (copy_from_user(&up, arg, sizeof(up)))
515		return -EFAULT;
516	if (!up.nr || up.resv || up.resv2)
517		return -EINVAL;
518	return __io_register_rsrc_update(ctx, type, &up, up.nr);
519}
520
521__cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
522			    unsigned int size, unsigned int type)
523{
524	struct io_uring_rsrc_register rr;
525
526	/* keep it extendible */
527	if (size != sizeof(rr))
528		return -EINVAL;
529
530	memset(&rr, 0, sizeof(rr));
531	if (copy_from_user(&rr, arg, size))
532		return -EFAULT;
533	if (!rr.nr || rr.resv2)
534		return -EINVAL;
535	if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
536		return -EINVAL;
537
538	switch (type) {
539	case IORING_RSRC_FILE:
540		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
541			break;
542		return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
543					     rr.nr, u64_to_user_ptr(rr.tags));
544	case IORING_RSRC_BUFFER:
545		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
546			break;
547		return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
548					       rr.nr, u64_to_user_ptr(rr.tags));
549	}
550	return -EINVAL;
551}
552
553int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
554{
555	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
556
557	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
558		return -EINVAL;
559	if (sqe->rw_flags || sqe->splice_fd_in)
560		return -EINVAL;
561
562	up->offset = READ_ONCE(sqe->off);
563	up->nr_args = READ_ONCE(sqe->len);
564	if (!up->nr_args)
565		return -EINVAL;
566	up->arg = READ_ONCE(sqe->addr);
567	return 0;
568}
569
570static int io_files_update_with_index_alloc(struct io_kiocb *req,
571					    unsigned int issue_flags)
572{
573	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
574	__s32 __user *fds = u64_to_user_ptr(up->arg);
575	unsigned int done;
576	struct file *file;
577	int ret, fd;
578
579	if (!req->ctx->file_data)
580		return -ENXIO;
581
582	for (done = 0; done < up->nr_args; done++) {
583		if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
584			ret = -EFAULT;
585			break;
586		}
587
588		file = fget(fd);
589		if (!file) {
590			ret = -EBADF;
591			break;
592		}
593		ret = io_fixed_fd_install(req, issue_flags, file,
594					  IORING_FILE_INDEX_ALLOC);
595		if (ret < 0)
596			break;
597		if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
598			__io_close_fixed(req->ctx, issue_flags, ret);
599			ret = -EFAULT;
600			break;
601		}
602	}
603
604	if (done)
605		return done;
606	return ret;
607}
608
609int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
610{
611	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
612	struct io_ring_ctx *ctx = req->ctx;
613	struct io_uring_rsrc_update2 up2;
614	int ret;
615
616	up2.offset = up->offset;
617	up2.data = up->arg;
618	up2.nr = 0;
619	up2.tags = 0;
620	up2.resv = 0;
621	up2.resv2 = 0;
622
623	if (up->offset == IORING_FILE_INDEX_ALLOC) {
624		ret = io_files_update_with_index_alloc(req, issue_flags);
625	} else {
626		io_ring_submit_lock(ctx, issue_flags);
627		ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
628						&up2, up->nr_args);
629		io_ring_submit_unlock(ctx, issue_flags);
630	}
631
632	if (ret < 0)
633		req_set_fail(req);
634	io_req_set_res(req, ret, 0);
635	return IOU_OK;
636}
637
638int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc)
639{
640	struct io_ring_ctx *ctx = data->ctx;
641	struct io_rsrc_node *node = ctx->rsrc_node;
642	u64 *tag_slot = io_get_tag_slot(data, idx);
643
644	ctx->rsrc_node = io_rsrc_node_alloc(ctx);
645	if (unlikely(!ctx->rsrc_node)) {
646		ctx->rsrc_node = node;
647		return -ENOMEM;
648	}
649
650	node->item.rsrc = rsrc;
651	node->type = data->rsrc_type;
652	node->item.tag = *tag_slot;
653	*tag_slot = 0;
654	list_add_tail(&node->node, &ctx->rsrc_ref_list);
655	io_put_rsrc_node(ctx, node);
656	return 0;
657}
658
659void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
660{
661	int i;
662
663	for (i = 0; i < ctx->nr_user_files; i++) {
664		struct file *file = io_file_from_index(&ctx->file_table, i);
665
666		if (!file)
667			continue;
668		io_file_bitmap_clear(&ctx->file_table, i);
669		fput(file);
670	}
671
672	io_free_file_tables(&ctx->file_table);
673	io_file_table_set_alloc_range(ctx, 0, 0);
674	io_rsrc_data_free(ctx->file_data);
675	ctx->file_data = NULL;
676	ctx->nr_user_files = 0;
677}
678
679int io_sqe_files_unregister(struct io_ring_ctx *ctx)
680{
681	unsigned nr = ctx->nr_user_files;
682	int ret;
683
684	if (!ctx->file_data)
685		return -ENXIO;
686
687	/*
688	 * Quiesce may unlock ->uring_lock, and while it's not held
689	 * prevent new requests using the table.
690	 */
691	ctx->nr_user_files = 0;
692	ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
693	ctx->nr_user_files = nr;
694	if (!ret)
695		__io_sqe_files_unregister(ctx);
696	return ret;
697}
698
699int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
700			  unsigned nr_args, u64 __user *tags)
701{
702	__s32 __user *fds = (__s32 __user *) arg;
703	struct file *file;
704	int fd, ret;
705	unsigned i;
706
707	if (ctx->file_data)
708		return -EBUSY;
709	if (!nr_args)
710		return -EINVAL;
711	if (nr_args > IORING_MAX_FIXED_FILES)
712		return -EMFILE;
713	if (nr_args > rlimit(RLIMIT_NOFILE))
714		return -EMFILE;
715	ret = io_rsrc_data_alloc(ctx, IORING_RSRC_FILE, tags, nr_args,
716				 &ctx->file_data);
717	if (ret)
718		return ret;
719
720	if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
721		io_rsrc_data_free(ctx->file_data);
722		ctx->file_data = NULL;
723		return -ENOMEM;
724	}
725
726	for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
727		struct io_fixed_file *file_slot;
728
729		if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
730			ret = -EFAULT;
731			goto fail;
732		}
733		/* allow sparse sets */
734		if (!fds || fd == -1) {
735			ret = -EINVAL;
736			if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
737				goto fail;
738			continue;
739		}
740
741		file = fget(fd);
742		ret = -EBADF;
743		if (unlikely(!file))
744			goto fail;
745
746		/*
747		 * Don't allow io_uring instances to be registered.
748		 */
749		if (io_is_uring_fops(file)) {
750			fput(file);
751			goto fail;
752		}
753		file_slot = io_fixed_file_slot(&ctx->file_table, i);
754		io_fixed_file_set(file_slot, file);
755		io_file_bitmap_set(&ctx->file_table, i);
756	}
757
758	/* default it to the whole table */
759	io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
760	return 0;
761fail:
762	__io_sqe_files_unregister(ctx);
763	return ret;
764}
765
766static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
767{
768	io_buffer_unmap(ctx, &prsrc->buf);
769	prsrc->buf = NULL;
770}
771
772void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
773{
774	unsigned int i;
775
776	for (i = 0; i < ctx->nr_user_bufs; i++)
777		io_buffer_unmap(ctx, &ctx->user_bufs[i]);
778	kfree(ctx->user_bufs);
779	io_rsrc_data_free(ctx->buf_data);
780	ctx->user_bufs = NULL;
781	ctx->buf_data = NULL;
782	ctx->nr_user_bufs = 0;
783}
784
785int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
786{
787	unsigned nr = ctx->nr_user_bufs;
788	int ret;
789
790	if (!ctx->buf_data)
791		return -ENXIO;
792
793	/*
794	 * Quiesce may unlock ->uring_lock, and while it's not held
795	 * prevent new requests using the table.
796	 */
797	ctx->nr_user_bufs = 0;
798	ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
799	ctx->nr_user_bufs = nr;
800	if (!ret)
801		__io_sqe_buffers_unregister(ctx);
802	return ret;
803}
804
805/*
806 * Not super efficient, but this is just a registration time. And we do cache
807 * the last compound head, so generally we'll only do a full search if we don't
808 * match that one.
809 *
810 * We check if the given compound head page has already been accounted, to
811 * avoid double accounting it. This allows us to account the full size of the
812 * page, not just the constituent pages of a huge page.
813 */
814static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
815				  int nr_pages, struct page *hpage)
816{
817	int i, j;
818
819	/* check current page array */
820	for (i = 0; i < nr_pages; i++) {
821		if (!PageCompound(pages[i]))
822			continue;
823		if (compound_head(pages[i]) == hpage)
824			return true;
825	}
826
827	/* check previously registered pages */
828	for (i = 0; i < ctx->nr_user_bufs; i++) {
829		struct io_mapped_ubuf *imu = ctx->user_bufs[i];
830
831		for (j = 0; j < imu->nr_bvecs; j++) {
832			if (!PageCompound(imu->bvec[j].bv_page))
833				continue;
834			if (compound_head(imu->bvec[j].bv_page) == hpage)
835				return true;
836		}
837	}
838
839	return false;
840}
841
842static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
843				 int nr_pages, struct io_mapped_ubuf *imu,
844				 struct page **last_hpage)
845{
846	int i, ret;
847
848	imu->acct_pages = 0;
849	for (i = 0; i < nr_pages; i++) {
850		if (!PageCompound(pages[i])) {
851			imu->acct_pages++;
852		} else {
853			struct page *hpage;
854
855			hpage = compound_head(pages[i]);
856			if (hpage == *last_hpage)
857				continue;
858			*last_hpage = hpage;
859			if (headpage_already_acct(ctx, pages, i, hpage))
860				continue;
861			imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
862		}
863	}
864
865	if (!imu->acct_pages)
866		return 0;
867
868	ret = io_account_mem(ctx, imu->acct_pages);
869	if (ret)
870		imu->acct_pages = 0;
871	return ret;
872}
873
874static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
875				  struct io_mapped_ubuf **pimu,
876				  struct page **last_hpage)
877{
878	struct io_mapped_ubuf *imu = NULL;
879	struct page **pages = NULL;
880	unsigned long off;
881	size_t size;
882	int ret, nr_pages, i;
883	struct folio *folio = NULL;
884
885	*pimu = (struct io_mapped_ubuf *)&dummy_ubuf;
886	if (!iov->iov_base)
887		return 0;
888
889	ret = -ENOMEM;
890	pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
891				&nr_pages);
892	if (IS_ERR(pages)) {
893		ret = PTR_ERR(pages);
894		pages = NULL;
895		goto done;
896	}
897
898	/* If it's a huge page, try to coalesce them into a single bvec entry */
899	if (nr_pages > 1) {
900		folio = page_folio(pages[0]);
901		for (i = 1; i < nr_pages; i++) {
902			/*
903			 * Pages must be consecutive and on the same folio for
904			 * this to work
905			 */
906			if (page_folio(pages[i]) != folio ||
907			    pages[i] != pages[i - 1] + 1) {
908				folio = NULL;
909				break;
910			}
911		}
912		if (folio) {
913			/*
914			 * The pages are bound to the folio, it doesn't
915			 * actually unpin them but drops all but one reference,
916			 * which is usually put down by io_buffer_unmap().
917			 * Note, needs a better helper.
918			 */
919			unpin_user_pages(&pages[1], nr_pages - 1);
920			nr_pages = 1;
921		}
922	}
923
924	imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
925	if (!imu)
926		goto done;
927
928	ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
929	if (ret) {
930		unpin_user_pages(pages, nr_pages);
931		goto done;
932	}
933
934	off = (unsigned long) iov->iov_base & ~PAGE_MASK;
935	size = iov->iov_len;
936	/* store original address for later verification */
937	imu->ubuf = (unsigned long) iov->iov_base;
938	imu->ubuf_end = imu->ubuf + iov->iov_len;
939	imu->nr_bvecs = nr_pages;
940	*pimu = imu;
941	ret = 0;
942
943	if (folio) {
944		bvec_set_page(&imu->bvec[0], pages[0], size, off);
945		goto done;
946	}
947	for (i = 0; i < nr_pages; i++) {
948		size_t vec_len;
949
950		vec_len = min_t(size_t, size, PAGE_SIZE - off);
951		bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
952		off = 0;
953		size -= vec_len;
954	}
955done:
956	if (ret)
957		kvfree(imu);
958	kvfree(pages);
959	return ret;
960}
961
962static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
963{
964	ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
965	return ctx->user_bufs ? 0 : -ENOMEM;
966}
967
968int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
969			    unsigned int nr_args, u64 __user *tags)
970{
971	struct page *last_hpage = NULL;
972	struct io_rsrc_data *data;
973	int i, ret;
974	struct iovec iov;
975
976	BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
977
978	if (ctx->user_bufs)
979		return -EBUSY;
980	if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
981		return -EINVAL;
982	ret = io_rsrc_data_alloc(ctx, IORING_RSRC_BUFFER, tags, nr_args, &data);
983	if (ret)
984		return ret;
985	ret = io_buffers_map_alloc(ctx, nr_args);
986	if (ret) {
987		io_rsrc_data_free(data);
988		return ret;
989	}
990
991	for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
992		if (arg) {
993			ret = io_copy_iov(ctx, &iov, arg, i);
994			if (ret)
995				break;
996			ret = io_buffer_validate(&iov);
997			if (ret)
998				break;
999		} else {
1000			memset(&iov, 0, sizeof(iov));
1001		}
1002
1003		if (!iov.iov_base && *io_get_tag_slot(data, i)) {
1004			ret = -EINVAL;
1005			break;
1006		}
1007
1008		ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
1009					     &last_hpage);
1010		if (ret)
1011			break;
1012	}
1013
1014	WARN_ON_ONCE(ctx->buf_data);
1015
1016	ctx->buf_data = data;
1017	if (ret)
1018		__io_sqe_buffers_unregister(ctx);
1019	return ret;
1020}
1021
1022int io_import_fixed(int ddir, struct iov_iter *iter,
1023			   struct io_mapped_ubuf *imu,
1024			   u64 buf_addr, size_t len)
1025{
1026	u64 buf_end;
1027	size_t offset;
1028
1029	if (WARN_ON_ONCE(!imu))
1030		return -EFAULT;
1031	if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1032		return -EFAULT;
1033	/* not inside the mapped region */
1034	if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1035		return -EFAULT;
1036
1037	/*
1038	 * Might not be a start of buffer, set size appropriately
1039	 * and advance us to the beginning.
1040	 */
1041	offset = buf_addr - imu->ubuf;
1042	iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1043
1044	if (offset) {
1045		/*
1046		 * Don't use iov_iter_advance() here, as it's really slow for
1047		 * using the latter parts of a big fixed buffer - it iterates
1048		 * over each segment manually. We can cheat a bit here, because
1049		 * we know that:
1050		 *
1051		 * 1) it's a BVEC iter, we set it up
1052		 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1053		 *    first and last bvec
1054		 *
1055		 * So just find our index, and adjust the iterator afterwards.
1056		 * If the offset is within the first bvec (or the whole first
1057		 * bvec, just use iov_iter_advance(). This makes it easier
1058		 * since we can just skip the first segment, which may not
1059		 * be PAGE_SIZE aligned.
1060		 */
1061		const struct bio_vec *bvec = imu->bvec;
1062
1063		if (offset < bvec->bv_len) {
1064			/*
1065			 * Note, huge pages buffers consists of one large
1066			 * bvec entry and should always go this way. The other
1067			 * branch doesn't expect non PAGE_SIZE'd chunks.
1068			 */
1069			iter->bvec = bvec;
1070			iter->nr_segs = bvec->bv_len;
1071			iter->count -= offset;
1072			iter->iov_offset = offset;
1073		} else {
1074			unsigned long seg_skip;
1075
1076			/* skip first vec */
1077			offset -= bvec->bv_len;
1078			seg_skip = 1 + (offset >> PAGE_SHIFT);
1079
1080			iter->bvec = bvec + seg_skip;
1081			iter->nr_segs -= seg_skip;
1082			iter->count -= bvec->bv_len + offset;
1083			iter->iov_offset = offset & ~PAGE_MASK;
1084		}
1085	}
1086
1087	return 0;
1088}
1089