1// SPDX-License-Identifier: GPL-2.0
2#include <linux/ceph/ceph_debug.h>
3
4#include <linux/backing-dev.h>
5#include <linux/fs.h>
6#include <linux/mm.h>
7#include <linux/swap.h>
8#include <linux/pagemap.h>
9#include <linux/slab.h>
10#include <linux/pagevec.h>
11#include <linux/task_io_accounting_ops.h>
12#include <linux/signal.h>
13#include <linux/iversion.h>
14#include <linux/ktime.h>
15#include <linux/netfs.h>
16
17#include "super.h"
18#include "mds_client.h"
19#include "cache.h"
20#include "metric.h"
21#include "crypto.h"
22#include <linux/ceph/osd_client.h>
23#include <linux/ceph/striper.h>
24
25/*
26 * Ceph address space ops.
27 *
28 * There are a few funny things going on here.
29 *
30 * The page->private field is used to reference a struct
31 * ceph_snap_context for _every_ dirty page.  This indicates which
32 * snapshot the page was logically dirtied in, and thus which snap
33 * context needs to be associated with the osd write during writeback.
34 *
35 * Similarly, struct ceph_inode_info maintains a set of counters to
36 * count dirty pages on the inode.  In the absence of snapshots,
37 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
38 *
39 * When a snapshot is taken (that is, when the client receives
40 * notification that a snapshot was taken), each inode with caps and
41 * with dirty pages (dirty pages implies there is a cap) gets a new
42 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
43 * order, new snaps go to the tail).  The i_wrbuffer_ref_head count is
44 * moved to capsnap->dirty. (Unless a sync write is currently in
45 * progress.  In that case, the capsnap is said to be "pending", new
46 * writes cannot start, and the capsnap isn't "finalized" until the
47 * write completes (or fails) and a final size/mtime for the inode for
48 * that snap can be settled upon.)  i_wrbuffer_ref_head is reset to 0.
49 *
50 * On writeback, we must submit writes to the osd IN SNAP ORDER.  So,
51 * we look for the first capsnap in i_cap_snaps and write out pages in
52 * that snap context _only_.  Then we move on to the next capsnap,
53 * eventually reaching the "live" or "head" context (i.e., pages that
54 * are not yet snapped) and are writing the most recently dirtied
55 * pages.
56 *
57 * Invalidate and so forth must take care to ensure the dirty page
58 * accounting is preserved.
59 */
60
61#define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
62#define CONGESTION_OFF_THRESH(congestion_kb)				\
63	(CONGESTION_ON_THRESH(congestion_kb) -				\
64	 (CONGESTION_ON_THRESH(congestion_kb) >> 2))
65
66static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
67					struct folio **foliop, void **_fsdata);
68
69static inline struct ceph_snap_context *page_snap_context(struct page *page)
70{
71	if (PagePrivate(page))
72		return (void *)page->private;
73	return NULL;
74}
75
76/*
77 * Dirty a page.  Optimistically adjust accounting, on the assumption
78 * that we won't race with invalidate.  If we do, readjust.
79 */
80static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
81{
82	struct inode *inode = mapping->host;
83	struct ceph_client *cl = ceph_inode_to_client(inode);
84	struct ceph_inode_info *ci;
85	struct ceph_snap_context *snapc;
86
87	if (folio_test_dirty(folio)) {
88		doutc(cl, "%llx.%llx %p idx %lu -- already dirty\n",
89		      ceph_vinop(inode), folio, folio->index);
90		VM_BUG_ON_FOLIO(!folio_test_private(folio), folio);
91		return false;
92	}
93
94	ci = ceph_inode(inode);
95
96	/* dirty the head */
97	spin_lock(&ci->i_ceph_lock);
98	BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference
99	if (__ceph_have_pending_cap_snap(ci)) {
100		struct ceph_cap_snap *capsnap =
101				list_last_entry(&ci->i_cap_snaps,
102						struct ceph_cap_snap,
103						ci_item);
104		snapc = ceph_get_snap_context(capsnap->context);
105		capsnap->dirty_pages++;
106	} else {
107		BUG_ON(!ci->i_head_snapc);
108		snapc = ceph_get_snap_context(ci->i_head_snapc);
109		++ci->i_wrbuffer_ref_head;
110	}
111	if (ci->i_wrbuffer_ref == 0)
112		ihold(inode);
113	++ci->i_wrbuffer_ref;
114	doutc(cl, "%llx.%llx %p idx %lu head %d/%d -> %d/%d "
115	      "snapc %p seq %lld (%d snaps)\n",
116	      ceph_vinop(inode), folio, folio->index,
117	      ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
118	      ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
119	      snapc, snapc->seq, snapc->num_snaps);
120	spin_unlock(&ci->i_ceph_lock);
121
122	/*
123	 * Reference snap context in folio->private.  Also set
124	 * PagePrivate so that we get invalidate_folio callback.
125	 */
126	VM_WARN_ON_FOLIO(folio->private, folio);
127	folio_attach_private(folio, snapc);
128
129	return ceph_fscache_dirty_folio(mapping, folio);
130}
131
132/*
133 * If we are truncating the full folio (i.e. offset == 0), adjust the
134 * dirty folio counters appropriately.  Only called if there is private
135 * data on the folio.
136 */
137static void ceph_invalidate_folio(struct folio *folio, size_t offset,
138				size_t length)
139{
140	struct inode *inode = folio->mapping->host;
141	struct ceph_client *cl = ceph_inode_to_client(inode);
142	struct ceph_inode_info *ci = ceph_inode(inode);
143	struct ceph_snap_context *snapc;
144
145
146	if (offset != 0 || length != folio_size(folio)) {
147		doutc(cl, "%llx.%llx idx %lu partial dirty page %zu~%zu\n",
148		      ceph_vinop(inode), folio->index, offset, length);
149		return;
150	}
151
152	WARN_ON(!folio_test_locked(folio));
153	if (folio_test_private(folio)) {
154		doutc(cl, "%llx.%llx idx %lu full dirty page\n",
155		      ceph_vinop(inode), folio->index);
156
157		snapc = folio_detach_private(folio);
158		ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
159		ceph_put_snap_context(snapc);
160	}
161
162	netfs_invalidate_folio(folio, offset, length);
163}
164
165static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
166{
167	struct inode *inode = rreq->inode;
168	struct ceph_inode_info *ci = ceph_inode(inode);
169	struct ceph_file_layout *lo = &ci->i_layout;
170	unsigned long max_pages = inode->i_sb->s_bdi->ra_pages;
171	loff_t end = rreq->start + rreq->len, new_end;
172	struct ceph_netfs_request_data *priv = rreq->netfs_priv;
173	unsigned long max_len;
174	u32 blockoff;
175
176	if (priv) {
177		/* Readahead is disabled by posix_fadvise POSIX_FADV_RANDOM */
178		if (priv->file_ra_disabled)
179			max_pages = 0;
180		else
181			max_pages = priv->file_ra_pages;
182
183	}
184
185	/* Readahead is disabled */
186	if (!max_pages)
187		return;
188
189	max_len = max_pages << PAGE_SHIFT;
190
191	/*
192	 * Try to expand the length forward by rounding up it to the next
193	 * block, but do not exceed the file size, unless the original
194	 * request already exceeds it.
195	 */
196	new_end = umin(round_up(end, lo->stripe_unit), rreq->i_size);
197	if (new_end > end && new_end <= rreq->start + max_len)
198		rreq->len = new_end - rreq->start;
199
200	/* Try to expand the start downward */
201	div_u64_rem(rreq->start, lo->stripe_unit, &blockoff);
202	if (rreq->len + blockoff <= max_len) {
203		rreq->start -= blockoff;
204		rreq->len += blockoff;
205	}
206}
207
208static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
209{
210	struct inode *inode = subreq->rreq->inode;
211	struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
212	struct ceph_inode_info *ci = ceph_inode(inode);
213	u64 objno, objoff;
214	u32 xlen;
215
216	/* Truncate the extent at the end of the current block */
217	ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len,
218				      &objno, &objoff, &xlen);
219	subreq->len = min(xlen, fsc->mount_options->rsize);
220	return true;
221}
222
223static void finish_netfs_read(struct ceph_osd_request *req)
224{
225	struct inode *inode = req->r_inode;
226	struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
227	struct ceph_client *cl = fsc->client;
228	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
229	struct netfs_io_subrequest *subreq = req->r_priv;
230	struct ceph_osd_req_op *op = &req->r_ops[0];
231	int err = req->r_result;
232	bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ);
233
234	ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency,
235				 req->r_end_latency, osd_data->length, err);
236
237	doutc(cl, "result %d subreq->len=%zu i_size=%lld\n", req->r_result,
238	      subreq->len, i_size_read(req->r_inode));
239
240	/* no object means success but no data */
241	if (err == -ENOENT)
242		err = 0;
243	else if (err == -EBLOCKLISTED)
244		fsc->blocklisted = true;
245
246	if (err >= 0) {
247		if (sparse && err > 0)
248			err = ceph_sparse_ext_map_end(op);
249		if (err < subreq->len)
250			__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
251		if (IS_ENCRYPTED(inode) && err > 0) {
252			err = ceph_fscrypt_decrypt_extents(inode,
253					osd_data->pages, subreq->start,
254					op->extent.sparse_ext,
255					op->extent.sparse_ext_cnt);
256			if (err > subreq->len)
257				err = subreq->len;
258		}
259	}
260
261	if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
262		ceph_put_page_vector(osd_data->pages,
263				     calc_pages_for(osd_data->alignment,
264					osd_data->length), false);
265	}
266	netfs_subreq_terminated(subreq, err, false);
267	iput(req->r_inode);
268	ceph_dec_osd_stopping_blocker(fsc->mdsc);
269}
270
271static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
272{
273	struct netfs_io_request *rreq = subreq->rreq;
274	struct inode *inode = rreq->inode;
275	struct ceph_mds_reply_info_parsed *rinfo;
276	struct ceph_mds_reply_info_in *iinfo;
277	struct ceph_mds_request *req;
278	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
279	struct ceph_inode_info *ci = ceph_inode(inode);
280	struct iov_iter iter;
281	ssize_t err = 0;
282	size_t len;
283	int mode;
284
285	__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
286	__clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
287
288	if (subreq->start >= inode->i_size)
289		goto out;
290
291	/* We need to fetch the inline data. */
292	mode = ceph_try_to_choose_auth_mds(inode, CEPH_STAT_CAP_INLINE_DATA);
293	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
294	if (IS_ERR(req)) {
295		err = PTR_ERR(req);
296		goto out;
297	}
298	req->r_ino1 = ci->i_vino;
299	req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INLINE_DATA);
300	req->r_num_caps = 2;
301
302	err = ceph_mdsc_do_request(mdsc, NULL, req);
303	if (err < 0)
304		goto out;
305
306	rinfo = &req->r_reply_info;
307	iinfo = &rinfo->targeti;
308	if (iinfo->inline_version == CEPH_INLINE_NONE) {
309		/* The data got uninlined */
310		ceph_mdsc_put_request(req);
311		return false;
312	}
313
314	len = min_t(size_t, iinfo->inline_len - subreq->start, subreq->len);
315	iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len);
316	err = copy_to_iter(iinfo->inline_data + subreq->start, len, &iter);
317	if (err == 0)
318		err = -EFAULT;
319
320	ceph_mdsc_put_request(req);
321out:
322	netfs_subreq_terminated(subreq, err, false);
323	return true;
324}
325
326static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
327{
328	struct netfs_io_request *rreq = subreq->rreq;
329	struct inode *inode = rreq->inode;
330	struct ceph_inode_info *ci = ceph_inode(inode);
331	struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
332	struct ceph_client *cl = fsc->client;
333	struct ceph_osd_request *req = NULL;
334	struct ceph_vino vino = ceph_vino(inode);
335	struct iov_iter iter;
336	int err = 0;
337	u64 len = subreq->len;
338	bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD);
339	u64 off = subreq->start;
340	int extent_cnt;
341
342	if (ceph_inode_is_shutdown(inode)) {
343		err = -EIO;
344		goto out;
345	}
346
347	if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq))
348		return;
349
350	ceph_fscrypt_adjust_off_and_len(inode, &off, &len);
351
352	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino,
353			off, &len, 0, 1, sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ,
354			CEPH_OSD_FLAG_READ, NULL, ci->i_truncate_seq,
355			ci->i_truncate_size, false);
356	if (IS_ERR(req)) {
357		err = PTR_ERR(req);
358		req = NULL;
359		goto out;
360	}
361
362	if (sparse) {
363		extent_cnt = __ceph_sparse_read_ext_count(inode, len);
364		err = ceph_alloc_sparse_ext_map(&req->r_ops[0], extent_cnt);
365		if (err)
366			goto out;
367	}
368
369	doutc(cl, "%llx.%llx pos=%llu orig_len=%zu len=%llu\n",
370	      ceph_vinop(inode), subreq->start, subreq->len, len);
371
372	iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len);
373
374	/*
375	 * FIXME: For now, use CEPH_OSD_DATA_TYPE_PAGES instead of _ITER for
376	 * encrypted inodes. We'd need infrastructure that handles an iov_iter
377	 * instead of page arrays, and we don't have that as of yet. Once the
378	 * dust settles on the write helpers and encrypt/decrypt routines for
379	 * netfs, we should be able to rework this.
380	 */
381	if (IS_ENCRYPTED(inode)) {
382		struct page **pages;
383		size_t page_off;
384
385		err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off);
386		if (err < 0) {
387			doutc(cl, "%llx.%llx failed to allocate pages, %d\n",
388			      ceph_vinop(inode), err);
389			goto out;
390		}
391
392		/* should always give us a page-aligned read */
393		WARN_ON_ONCE(page_off);
394		len = err;
395		err = 0;
396
397		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false,
398						 false);
399	} else {
400		osd_req_op_extent_osd_iter(req, 0, &iter);
401	}
402	if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
403		err = -EIO;
404		goto out;
405	}
406	req->r_callback = finish_netfs_read;
407	req->r_priv = subreq;
408	req->r_inode = inode;
409	ihold(inode);
410
411	ceph_osdc_start_request(req->r_osdc, req);
412out:
413	ceph_osdc_put_request(req);
414	if (err)
415		netfs_subreq_terminated(subreq, err, false);
416	doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err);
417}
418
419static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
420{
421	struct inode *inode = rreq->inode;
422	struct ceph_client *cl = ceph_inode_to_client(inode);
423	int got = 0, want = CEPH_CAP_FILE_CACHE;
424	struct ceph_netfs_request_data *priv;
425	int ret = 0;
426
427	if (rreq->origin != NETFS_READAHEAD)
428		return 0;
429
430	priv = kzalloc(sizeof(*priv), GFP_NOFS);
431	if (!priv)
432		return -ENOMEM;
433
434	if (file) {
435		struct ceph_rw_context *rw_ctx;
436		struct ceph_file_info *fi = file->private_data;
437
438		priv->file_ra_pages = file->f_ra.ra_pages;
439		priv->file_ra_disabled = file->f_mode & FMODE_RANDOM;
440
441		rw_ctx = ceph_find_rw_context(fi);
442		if (rw_ctx) {
443			rreq->netfs_priv = priv;
444			return 0;
445		}
446	}
447
448	/*
449	 * readahead callers do not necessarily hold Fcb caps
450	 * (e.g. fadvise, madvise).
451	 */
452	ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
453	if (ret < 0) {
454		doutc(cl, "%llx.%llx, error getting cap\n", ceph_vinop(inode));
455		goto out;
456	}
457
458	if (!(got & want)) {
459		doutc(cl, "%llx.%llx, no cache cap\n", ceph_vinop(inode));
460		ret = -EACCES;
461		goto out;
462	}
463	if (ret == 0) {
464		ret = -EACCES;
465		goto out;
466	}
467
468	priv->caps = got;
469	rreq->netfs_priv = priv;
470
471out:
472	if (ret < 0)
473		kfree(priv);
474
475	return ret;
476}
477
478static void ceph_netfs_free_request(struct netfs_io_request *rreq)
479{
480	struct ceph_netfs_request_data *priv = rreq->netfs_priv;
481
482	if (!priv)
483		return;
484
485	if (priv->caps)
486		ceph_put_cap_refs(ceph_inode(rreq->inode), priv->caps);
487	kfree(priv);
488	rreq->netfs_priv = NULL;
489}
490
491const struct netfs_request_ops ceph_netfs_ops = {
492	.init_request		= ceph_init_request,
493	.free_request		= ceph_netfs_free_request,
494	.issue_read		= ceph_netfs_issue_read,
495	.expand_readahead	= ceph_netfs_expand_readahead,
496	.clamp_length		= ceph_netfs_clamp_length,
497	.check_write_begin	= ceph_netfs_check_write_begin,
498};
499
500#ifdef CONFIG_CEPH_FSCACHE
501static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async)
502{
503	struct inode *inode = priv;
504
505	if (IS_ERR_VALUE(error) && error != -ENOBUFS)
506		ceph_fscache_invalidate(inode, false);
507}
508
509static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
510{
511	struct ceph_inode_info *ci = ceph_inode(inode);
512	struct fscache_cookie *cookie = ceph_fscache_cookie(ci);
513
514	fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode),
515			       ceph_fscache_write_terminated, inode, true, caching);
516}
517#else
518static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
519{
520}
521#endif /* CONFIG_CEPH_FSCACHE */
522
523struct ceph_writeback_ctl
524{
525	loff_t i_size;
526	u64 truncate_size;
527	u32 truncate_seq;
528	bool size_stable;
529	bool head_snapc;
530};
531
532/*
533 * Get ref for the oldest snapc for an inode with dirty data... that is, the
534 * only snap context we are allowed to write back.
535 */
536static struct ceph_snap_context *
537get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
538		   struct ceph_snap_context *page_snapc)
539{
540	struct ceph_inode_info *ci = ceph_inode(inode);
541	struct ceph_client *cl = ceph_inode_to_client(inode);
542	struct ceph_snap_context *snapc = NULL;
543	struct ceph_cap_snap *capsnap = NULL;
544
545	spin_lock(&ci->i_ceph_lock);
546	list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
547		doutc(cl, " capsnap %p snapc %p has %d dirty pages\n",
548		      capsnap, capsnap->context, capsnap->dirty_pages);
549		if (!capsnap->dirty_pages)
550			continue;
551
552		/* get i_size, truncate_{seq,size} for page_snapc? */
553		if (snapc && capsnap->context != page_snapc)
554			continue;
555
556		if (ctl) {
557			if (capsnap->writing) {
558				ctl->i_size = i_size_read(inode);
559				ctl->size_stable = false;
560			} else {
561				ctl->i_size = capsnap->size;
562				ctl->size_stable = true;
563			}
564			ctl->truncate_size = capsnap->truncate_size;
565			ctl->truncate_seq = capsnap->truncate_seq;
566			ctl->head_snapc = false;
567		}
568
569		if (snapc)
570			break;
571
572		snapc = ceph_get_snap_context(capsnap->context);
573		if (!page_snapc ||
574		    page_snapc == snapc ||
575		    page_snapc->seq > snapc->seq)
576			break;
577	}
578	if (!snapc && ci->i_wrbuffer_ref_head) {
579		snapc = ceph_get_snap_context(ci->i_head_snapc);
580		doutc(cl, " head snapc %p has %d dirty pages\n", snapc,
581		      ci->i_wrbuffer_ref_head);
582		if (ctl) {
583			ctl->i_size = i_size_read(inode);
584			ctl->truncate_size = ci->i_truncate_size;
585			ctl->truncate_seq = ci->i_truncate_seq;
586			ctl->size_stable = false;
587			ctl->head_snapc = true;
588		}
589	}
590	spin_unlock(&ci->i_ceph_lock);
591	return snapc;
592}
593
594static u64 get_writepages_data_length(struct inode *inode,
595				      struct page *page, u64 start)
596{
597	struct ceph_inode_info *ci = ceph_inode(inode);
598	struct ceph_snap_context *snapc;
599	struct ceph_cap_snap *capsnap = NULL;
600	u64 end = i_size_read(inode);
601	u64 ret;
602
603	snapc = page_snap_context(ceph_fscrypt_pagecache_page(page));
604	if (snapc != ci->i_head_snapc) {
605		bool found = false;
606		spin_lock(&ci->i_ceph_lock);
607		list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
608			if (capsnap->context == snapc) {
609				if (!capsnap->writing)
610					end = capsnap->size;
611				found = true;
612				break;
613			}
614		}
615		spin_unlock(&ci->i_ceph_lock);
616		WARN_ON(!found);
617	}
618	if (end > ceph_fscrypt_page_offset(page) + thp_size(page))
619		end = ceph_fscrypt_page_offset(page) + thp_size(page);
620	ret = end > start ? end - start : 0;
621	if (ret && fscrypt_is_bounce_page(page))
622		ret = round_up(ret, CEPH_FSCRYPT_BLOCK_SIZE);
623	return ret;
624}
625
626/*
627 * Write a single page, but leave the page locked.
628 *
629 * If we get a write error, mark the mapping for error, but still adjust the
630 * dirty page accounting (i.e., page is no longer dirty).
631 */
632static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
633{
634	struct folio *folio = page_folio(page);
635	struct inode *inode = page->mapping->host;
636	struct ceph_inode_info *ci = ceph_inode(inode);
637	struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
638	struct ceph_client *cl = fsc->client;
639	struct ceph_snap_context *snapc, *oldest;
640	loff_t page_off = page_offset(page);
641	int err;
642	loff_t len = thp_size(page);
643	loff_t wlen;
644	struct ceph_writeback_ctl ceph_wbc;
645	struct ceph_osd_client *osdc = &fsc->client->osdc;
646	struct ceph_osd_request *req;
647	bool caching = ceph_is_cache_enabled(inode);
648	struct page *bounce_page = NULL;
649
650	doutc(cl, "%llx.%llx page %p idx %lu\n", ceph_vinop(inode), page,
651	      page->index);
652
653	if (ceph_inode_is_shutdown(inode))
654		return -EIO;
655
656	/* verify this is a writeable snap context */
657	snapc = page_snap_context(page);
658	if (!snapc) {
659		doutc(cl, "%llx.%llx page %p not dirty?\n", ceph_vinop(inode),
660		      page);
661		return 0;
662	}
663	oldest = get_oldest_context(inode, &ceph_wbc, snapc);
664	if (snapc->seq > oldest->seq) {
665		doutc(cl, "%llx.%llx page %p snapc %p not writeable - noop\n",
666		      ceph_vinop(inode), page, snapc);
667		/* we should only noop if called by kswapd */
668		WARN_ON(!(current->flags & PF_MEMALLOC));
669		ceph_put_snap_context(oldest);
670		redirty_page_for_writepage(wbc, page);
671		return 0;
672	}
673	ceph_put_snap_context(oldest);
674
675	/* is this a partial page at end of file? */
676	if (page_off >= ceph_wbc.i_size) {
677		doutc(cl, "%llx.%llx folio at %lu beyond eof %llu\n",
678		      ceph_vinop(inode), folio->index, ceph_wbc.i_size);
679		folio_invalidate(folio, 0, folio_size(folio));
680		return 0;
681	}
682
683	if (ceph_wbc.i_size < page_off + len)
684		len = ceph_wbc.i_size - page_off;
685
686	wlen = IS_ENCRYPTED(inode) ? round_up(len, CEPH_FSCRYPT_BLOCK_SIZE) : len;
687	doutc(cl, "%llx.%llx page %p index %lu on %llu~%llu snapc %p seq %lld\n",
688	      ceph_vinop(inode), page, page->index, page_off, wlen, snapc,
689	      snapc->seq);
690
691	if (atomic_long_inc_return(&fsc->writeback_count) >
692	    CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
693		fsc->write_congested = true;
694
695	req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode),
696				    page_off, &wlen, 0, 1, CEPH_OSD_OP_WRITE,
697				    CEPH_OSD_FLAG_WRITE, snapc,
698				    ceph_wbc.truncate_seq,
699				    ceph_wbc.truncate_size, true);
700	if (IS_ERR(req)) {
701		redirty_page_for_writepage(wbc, page);
702		return PTR_ERR(req);
703	}
704
705	if (wlen < len)
706		len = wlen;
707
708	set_page_writeback(page);
709	ceph_fscache_write_to_cache(inode, page_off, len, caching);
710
711	if (IS_ENCRYPTED(inode)) {
712		bounce_page = fscrypt_encrypt_pagecache_blocks(page,
713						    CEPH_FSCRYPT_BLOCK_SIZE, 0,
714						    GFP_NOFS);
715		if (IS_ERR(bounce_page)) {
716			redirty_page_for_writepage(wbc, page);
717			end_page_writeback(page);
718			ceph_osdc_put_request(req);
719			return PTR_ERR(bounce_page);
720		}
721	}
722
723	/* it may be a short write due to an object boundary */
724	WARN_ON_ONCE(len > thp_size(page));
725	osd_req_op_extent_osd_data_pages(req, 0,
726			bounce_page ? &bounce_page : &page, wlen, 0,
727			false, false);
728	doutc(cl, "%llx.%llx %llu~%llu (%llu bytes, %sencrypted)\n",
729	      ceph_vinop(inode), page_off, len, wlen,
730	      IS_ENCRYPTED(inode) ? "" : "not ");
731
732	req->r_mtime = inode_get_mtime(inode);
733	ceph_osdc_start_request(osdc, req);
734	err = ceph_osdc_wait_request(osdc, req);
735
736	ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
737				  req->r_end_latency, len, err);
738	fscrypt_free_bounce_page(bounce_page);
739	ceph_osdc_put_request(req);
740	if (err == 0)
741		err = len;
742
743	if (err < 0) {
744		struct writeback_control tmp_wbc;
745		if (!wbc)
746			wbc = &tmp_wbc;
747		if (err == -ERESTARTSYS) {
748			/* killed by SIGKILL */
749			doutc(cl, "%llx.%llx interrupted page %p\n",
750			      ceph_vinop(inode), page);
751			redirty_page_for_writepage(wbc, page);
752			end_page_writeback(page);
753			return err;
754		}
755		if (err == -EBLOCKLISTED)
756			fsc->blocklisted = true;
757		doutc(cl, "%llx.%llx setting page/mapping error %d %p\n",
758		      ceph_vinop(inode), err, page);
759		mapping_set_error(&inode->i_data, err);
760		wbc->pages_skipped++;
761	} else {
762		doutc(cl, "%llx.%llx cleaned page %p\n",
763		      ceph_vinop(inode), page);
764		err = 0;  /* vfs expects us to return 0 */
765	}
766	oldest = detach_page_private(page);
767	WARN_ON_ONCE(oldest != snapc);
768	end_page_writeback(page);
769	ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
770	ceph_put_snap_context(snapc);  /* page's reference */
771
772	if (atomic_long_dec_return(&fsc->writeback_count) <
773	    CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
774		fsc->write_congested = false;
775
776	return err;
777}
778
779static int ceph_writepage(struct page *page, struct writeback_control *wbc)
780{
781	int err;
782	struct inode *inode = page->mapping->host;
783	BUG_ON(!inode);
784	ihold(inode);
785
786	if (wbc->sync_mode == WB_SYNC_NONE &&
787	    ceph_inode_to_fs_client(inode)->write_congested) {
788		redirty_page_for_writepage(wbc, page);
789		return AOP_WRITEPAGE_ACTIVATE;
790	}
791
792	err = writepage_nounlock(page, wbc);
793	if (err == -ERESTARTSYS) {
794		/* direct memory reclaimer was killed by SIGKILL. return 0
795		 * to prevent caller from setting mapping/page error */
796		err = 0;
797	}
798	unlock_page(page);
799	iput(inode);
800	return err;
801}
802
803/*
804 * async writeback completion handler.
805 *
806 * If we get an error, set the mapping error bit, but not the individual
807 * page error bits.
808 */
809static void writepages_finish(struct ceph_osd_request *req)
810{
811	struct inode *inode = req->r_inode;
812	struct ceph_inode_info *ci = ceph_inode(inode);
813	struct ceph_client *cl = ceph_inode_to_client(inode);
814	struct ceph_osd_data *osd_data;
815	struct page *page;
816	int num_pages, total_pages = 0;
817	int i, j;
818	int rc = req->r_result;
819	struct ceph_snap_context *snapc = req->r_snapc;
820	struct address_space *mapping = inode->i_mapping;
821	struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
822	unsigned int len = 0;
823	bool remove_page;
824
825	doutc(cl, "%llx.%llx rc %d\n", ceph_vinop(inode), rc);
826	if (rc < 0) {
827		mapping_set_error(mapping, rc);
828		ceph_set_error_write(ci);
829		if (rc == -EBLOCKLISTED)
830			fsc->blocklisted = true;
831	} else {
832		ceph_clear_error_write(ci);
833	}
834
835	/*
836	 * We lost the cache cap, need to truncate the page before
837	 * it is unlocked, otherwise we'd truncate it later in the
838	 * page truncation thread, possibly losing some data that
839	 * raced its way in
840	 */
841	remove_page = !(ceph_caps_issued(ci) &
842			(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
843
844	/* clean all pages */
845	for (i = 0; i < req->r_num_ops; i++) {
846		if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) {
847			pr_warn_client(cl,
848				"%llx.%llx incorrect op %d req %p index %d tid %llu\n",
849				ceph_vinop(inode), req->r_ops[i].op, req, i,
850				req->r_tid);
851			break;
852		}
853
854		osd_data = osd_req_op_extent_osd_data(req, i);
855		BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
856		len += osd_data->length;
857		num_pages = calc_pages_for((u64)osd_data->alignment,
858					   (u64)osd_data->length);
859		total_pages += num_pages;
860		for (j = 0; j < num_pages; j++) {
861			page = osd_data->pages[j];
862			if (fscrypt_is_bounce_page(page)) {
863				page = fscrypt_pagecache_page(page);
864				fscrypt_free_bounce_page(osd_data->pages[j]);
865				osd_data->pages[j] = page;
866			}
867			BUG_ON(!page);
868			WARN_ON(!PageUptodate(page));
869
870			if (atomic_long_dec_return(&fsc->writeback_count) <
871			     CONGESTION_OFF_THRESH(
872					fsc->mount_options->congestion_kb))
873				fsc->write_congested = false;
874
875			ceph_put_snap_context(detach_page_private(page));
876			end_page_writeback(page);
877			doutc(cl, "unlocking %p\n", page);
878
879			if (remove_page)
880				generic_error_remove_folio(inode->i_mapping,
881							  page_folio(page));
882
883			unlock_page(page);
884		}
885		doutc(cl, "%llx.%llx wrote %llu bytes cleaned %d pages\n",
886		      ceph_vinop(inode), osd_data->length,
887		      rc >= 0 ? num_pages : 0);
888
889		release_pages(osd_data->pages, num_pages);
890	}
891
892	ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
893				  req->r_end_latency, len, rc);
894
895	ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc);
896
897	osd_data = osd_req_op_extent_osd_data(req, 0);
898	if (osd_data->pages_from_pool)
899		mempool_free(osd_data->pages, ceph_wb_pagevec_pool);
900	else
901		kfree(osd_data->pages);
902	ceph_osdc_put_request(req);
903	ceph_dec_osd_stopping_blocker(fsc->mdsc);
904}
905
906/*
907 * initiate async writeback
908 */
909static int ceph_writepages_start(struct address_space *mapping,
910				 struct writeback_control *wbc)
911{
912	struct inode *inode = mapping->host;
913	struct ceph_inode_info *ci = ceph_inode(inode);
914	struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
915	struct ceph_client *cl = fsc->client;
916	struct ceph_vino vino = ceph_vino(inode);
917	pgoff_t index, start_index, end = -1;
918	struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
919	struct folio_batch fbatch;
920	int rc = 0;
921	unsigned int wsize = i_blocksize(inode);
922	struct ceph_osd_request *req = NULL;
923	struct ceph_writeback_ctl ceph_wbc;
924	bool should_loop, range_whole = false;
925	bool done = false;
926	bool caching = ceph_is_cache_enabled(inode);
927	xa_mark_t tag;
928
929	if (wbc->sync_mode == WB_SYNC_NONE &&
930	    fsc->write_congested)
931		return 0;
932
933	doutc(cl, "%llx.%llx (mode=%s)\n", ceph_vinop(inode),
934	      wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
935	      (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
936
937	if (ceph_inode_is_shutdown(inode)) {
938		if (ci->i_wrbuffer_ref > 0) {
939			pr_warn_ratelimited_client(cl,
940				"%llx.%llx %lld forced umount\n",
941				ceph_vinop(inode), ceph_ino(inode));
942		}
943		mapping_set_error(mapping, -EIO);
944		return -EIO; /* we're in a forced umount, don't write! */
945	}
946	if (fsc->mount_options->wsize < wsize)
947		wsize = fsc->mount_options->wsize;
948
949	folio_batch_init(&fbatch);
950
951	start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
952	index = start_index;
953
954	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) {
955		tag = PAGECACHE_TAG_TOWRITE;
956	} else {
957		tag = PAGECACHE_TAG_DIRTY;
958	}
959retry:
960	/* find oldest snap context with dirty data */
961	snapc = get_oldest_context(inode, &ceph_wbc, NULL);
962	if (!snapc) {
963		/* hmm, why does writepages get called when there
964		   is no dirty data? */
965		doutc(cl, " no snap context with dirty data?\n");
966		goto out;
967	}
968	doutc(cl, " oldest snapc is %p seq %lld (%d snaps)\n", snapc,
969	      snapc->seq, snapc->num_snaps);
970
971	should_loop = false;
972	if (ceph_wbc.head_snapc && snapc != last_snapc) {
973		/* where to start/end? */
974		if (wbc->range_cyclic) {
975			index = start_index;
976			end = -1;
977			if (index > 0)
978				should_loop = true;
979			doutc(cl, " cyclic, start at %lu\n", index);
980		} else {
981			index = wbc->range_start >> PAGE_SHIFT;
982			end = wbc->range_end >> PAGE_SHIFT;
983			if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
984				range_whole = true;
985			doutc(cl, " not cyclic, %lu to %lu\n", index, end);
986		}
987	} else if (!ceph_wbc.head_snapc) {
988		/* Do not respect wbc->range_{start,end}. Dirty pages
989		 * in that range can be associated with newer snapc.
990		 * They are not writeable until we write all dirty pages
991		 * associated with 'snapc' get written */
992		if (index > 0)
993			should_loop = true;
994		doutc(cl, " non-head snapc, range whole\n");
995	}
996
997	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
998		tag_pages_for_writeback(mapping, index, end);
999
1000	ceph_put_snap_context(last_snapc);
1001	last_snapc = snapc;
1002
1003	while (!done && index <= end) {
1004		int num_ops = 0, op_idx;
1005		unsigned i, nr_folios, max_pages, locked_pages = 0;
1006		struct page **pages = NULL, **data_pages;
1007		struct page *page;
1008		pgoff_t strip_unit_end = 0;
1009		u64 offset = 0, len = 0;
1010		bool from_pool = false;
1011
1012		max_pages = wsize >> PAGE_SHIFT;
1013
1014get_more_pages:
1015		nr_folios = filemap_get_folios_tag(mapping, &index,
1016						   end, tag, &fbatch);
1017		doutc(cl, "pagevec_lookup_range_tag got %d\n", nr_folios);
1018		if (!nr_folios && !locked_pages)
1019			break;
1020		for (i = 0; i < nr_folios && locked_pages < max_pages; i++) {
1021			page = &fbatch.folios[i]->page;
1022			doutc(cl, "? %p idx %lu\n", page, page->index);
1023			if (locked_pages == 0)
1024				lock_page(page);  /* first page */
1025			else if (!trylock_page(page))
1026				break;
1027
1028			/* only dirty pages, or our accounting breaks */
1029			if (unlikely(!PageDirty(page)) ||
1030			    unlikely(page->mapping != mapping)) {
1031				doutc(cl, "!dirty or !mapping %p\n", page);
1032				unlock_page(page);
1033				continue;
1034			}
1035			/* only if matching snap context */
1036			pgsnapc = page_snap_context(page);
1037			if (pgsnapc != snapc) {
1038				doutc(cl, "page snapc %p %lld != oldest %p %lld\n",
1039				      pgsnapc, pgsnapc->seq, snapc, snapc->seq);
1040				if (!should_loop &&
1041				    !ceph_wbc.head_snapc &&
1042				    wbc->sync_mode != WB_SYNC_NONE)
1043					should_loop = true;
1044				unlock_page(page);
1045				continue;
1046			}
1047			if (page_offset(page) >= ceph_wbc.i_size) {
1048				struct folio *folio = page_folio(page);
1049
1050				doutc(cl, "folio at %lu beyond eof %llu\n",
1051				      folio->index, ceph_wbc.i_size);
1052				if ((ceph_wbc.size_stable ||
1053				    folio_pos(folio) >= i_size_read(inode)) &&
1054				    folio_clear_dirty_for_io(folio))
1055					folio_invalidate(folio, 0,
1056							folio_size(folio));
1057				folio_unlock(folio);
1058				continue;
1059			}
1060			if (strip_unit_end && (page->index > strip_unit_end)) {
1061				doutc(cl, "end of strip unit %p\n", page);
1062				unlock_page(page);
1063				break;
1064			}
1065			if (PageWriteback(page)) {
1066				if (wbc->sync_mode == WB_SYNC_NONE) {
1067					doutc(cl, "%p under writeback\n", page);
1068					unlock_page(page);
1069					continue;
1070				}
1071				doutc(cl, "waiting on writeback %p\n", page);
1072				wait_on_page_writeback(page);
1073			}
1074
1075			if (!clear_page_dirty_for_io(page)) {
1076				doutc(cl, "%p !clear_page_dirty_for_io\n", page);
1077				unlock_page(page);
1078				continue;
1079			}
1080
1081			/*
1082			 * We have something to write.  If this is
1083			 * the first locked page this time through,
1084			 * calculate max possinle write size and
1085			 * allocate a page array
1086			 */
1087			if (locked_pages == 0) {
1088				u64 objnum;
1089				u64 objoff;
1090				u32 xlen;
1091
1092				/* prepare async write request */
1093				offset = (u64)page_offset(page);
1094				ceph_calc_file_object_mapping(&ci->i_layout,
1095							      offset, wsize,
1096							      &objnum, &objoff,
1097							      &xlen);
1098				len = xlen;
1099
1100				num_ops = 1;
1101				strip_unit_end = page->index +
1102					((len - 1) >> PAGE_SHIFT);
1103
1104				BUG_ON(pages);
1105				max_pages = calc_pages_for(0, (u64)len);
1106				pages = kmalloc_array(max_pages,
1107						      sizeof(*pages),
1108						      GFP_NOFS);
1109				if (!pages) {
1110					from_pool = true;
1111					pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
1112					BUG_ON(!pages);
1113				}
1114
1115				len = 0;
1116			} else if (page->index !=
1117				   (offset + len) >> PAGE_SHIFT) {
1118				if (num_ops >= (from_pool ?  CEPH_OSD_SLAB_OPS :
1119							     CEPH_OSD_MAX_OPS)) {
1120					redirty_page_for_writepage(wbc, page);
1121					unlock_page(page);
1122					break;
1123				}
1124
1125				num_ops++;
1126				offset = (u64)page_offset(page);
1127				len = 0;
1128			}
1129
1130			/* note position of first page in fbatch */
1131			doutc(cl, "%llx.%llx will write page %p idx %lu\n",
1132			      ceph_vinop(inode), page, page->index);
1133
1134			if (atomic_long_inc_return(&fsc->writeback_count) >
1135			    CONGESTION_ON_THRESH(
1136				    fsc->mount_options->congestion_kb))
1137				fsc->write_congested = true;
1138
1139			if (IS_ENCRYPTED(inode)) {
1140				pages[locked_pages] =
1141					fscrypt_encrypt_pagecache_blocks(page,
1142						PAGE_SIZE, 0,
1143						locked_pages ? GFP_NOWAIT : GFP_NOFS);
1144				if (IS_ERR(pages[locked_pages])) {
1145					if (PTR_ERR(pages[locked_pages]) == -EINVAL)
1146						pr_err_client(cl,
1147							"inode->i_blkbits=%hhu\n",
1148							inode->i_blkbits);
1149					/* better not fail on first page! */
1150					BUG_ON(locked_pages == 0);
1151					pages[locked_pages] = NULL;
1152					redirty_page_for_writepage(wbc, page);
1153					unlock_page(page);
1154					break;
1155				}
1156				++locked_pages;
1157			} else {
1158				pages[locked_pages++] = page;
1159			}
1160
1161			fbatch.folios[i] = NULL;
1162			len += thp_size(page);
1163		}
1164
1165		/* did we get anything? */
1166		if (!locked_pages)
1167			goto release_folios;
1168		if (i) {
1169			unsigned j, n = 0;
1170			/* shift unused page to beginning of fbatch */
1171			for (j = 0; j < nr_folios; j++) {
1172				if (!fbatch.folios[j])
1173					continue;
1174				if (n < j)
1175					fbatch.folios[n] = fbatch.folios[j];
1176				n++;
1177			}
1178			fbatch.nr = n;
1179
1180			if (nr_folios && i == nr_folios &&
1181			    locked_pages < max_pages) {
1182				doutc(cl, "reached end fbatch, trying for more\n");
1183				folio_batch_release(&fbatch);
1184				goto get_more_pages;
1185			}
1186		}
1187
1188new_request:
1189		offset = ceph_fscrypt_page_offset(pages[0]);
1190		len = wsize;
1191
1192		req = ceph_osdc_new_request(&fsc->client->osdc,
1193					&ci->i_layout, vino,
1194					offset, &len, 0, num_ops,
1195					CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
1196					snapc, ceph_wbc.truncate_seq,
1197					ceph_wbc.truncate_size, false);
1198		if (IS_ERR(req)) {
1199			req = ceph_osdc_new_request(&fsc->client->osdc,
1200						&ci->i_layout, vino,
1201						offset, &len, 0,
1202						min(num_ops,
1203						    CEPH_OSD_SLAB_OPS),
1204						CEPH_OSD_OP_WRITE,
1205						CEPH_OSD_FLAG_WRITE,
1206						snapc, ceph_wbc.truncate_seq,
1207						ceph_wbc.truncate_size, true);
1208			BUG_ON(IS_ERR(req));
1209		}
1210		BUG_ON(len < ceph_fscrypt_page_offset(pages[locked_pages - 1]) +
1211			     thp_size(pages[locked_pages - 1]) - offset);
1212
1213		if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
1214			rc = -EIO;
1215			goto release_folios;
1216		}
1217		req->r_callback = writepages_finish;
1218		req->r_inode = inode;
1219
1220		/* Format the osd request message and submit the write */
1221		len = 0;
1222		data_pages = pages;
1223		op_idx = 0;
1224		for (i = 0; i < locked_pages; i++) {
1225			struct page *page = ceph_fscrypt_pagecache_page(pages[i]);
1226
1227			u64 cur_offset = page_offset(page);
1228			/*
1229			 * Discontinuity in page range? Ceph can handle that by just passing
1230			 * multiple extents in the write op.
1231			 */
1232			if (offset + len != cur_offset) {
1233				/* If it's full, stop here */
1234				if (op_idx + 1 == req->r_num_ops)
1235					break;
1236
1237				/* Kick off an fscache write with what we have so far. */
1238				ceph_fscache_write_to_cache(inode, offset, len, caching);
1239
1240				/* Start a new extent */
1241				osd_req_op_extent_dup_last(req, op_idx,
1242							   cur_offset - offset);
1243				doutc(cl, "got pages at %llu~%llu\n", offset,
1244				      len);
1245				osd_req_op_extent_osd_data_pages(req, op_idx,
1246							data_pages, len, 0,
1247							from_pool, false);
1248				osd_req_op_extent_update(req, op_idx, len);
1249
1250				len = 0;
1251				offset = cur_offset;
1252				data_pages = pages + i;
1253				op_idx++;
1254			}
1255
1256			set_page_writeback(page);
1257			len += thp_size(page);
1258		}
1259		ceph_fscache_write_to_cache(inode, offset, len, caching);
1260
1261		if (ceph_wbc.size_stable) {
1262			len = min(len, ceph_wbc.i_size - offset);
1263		} else if (i == locked_pages) {
1264			/* writepages_finish() clears writeback pages
1265			 * according to the data length, so make sure
1266			 * data length covers all locked pages */
1267			u64 min_len = len + 1 - thp_size(page);
1268			len = get_writepages_data_length(inode, pages[i - 1],
1269							 offset);
1270			len = max(len, min_len);
1271		}
1272		if (IS_ENCRYPTED(inode))
1273			len = round_up(len, CEPH_FSCRYPT_BLOCK_SIZE);
1274
1275		doutc(cl, "got pages at %llu~%llu\n", offset, len);
1276
1277		if (IS_ENCRYPTED(inode) &&
1278		    ((offset | len) & ~CEPH_FSCRYPT_BLOCK_MASK))
1279			pr_warn_client(cl,
1280				"bad encrypted write offset=%lld len=%llu\n",
1281				offset, len);
1282
1283		osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
1284						 0, from_pool, false);
1285		osd_req_op_extent_update(req, op_idx, len);
1286
1287		BUG_ON(op_idx + 1 != req->r_num_ops);
1288
1289		from_pool = false;
1290		if (i < locked_pages) {
1291			BUG_ON(num_ops <= req->r_num_ops);
1292			num_ops -= req->r_num_ops;
1293			locked_pages -= i;
1294
1295			/* allocate new pages array for next request */
1296			data_pages = pages;
1297			pages = kmalloc_array(locked_pages, sizeof(*pages),
1298					      GFP_NOFS);
1299			if (!pages) {
1300				from_pool = true;
1301				pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
1302				BUG_ON(!pages);
1303			}
1304			memcpy(pages, data_pages + i,
1305			       locked_pages * sizeof(*pages));
1306			memset(data_pages + i, 0,
1307			       locked_pages * sizeof(*pages));
1308		} else {
1309			BUG_ON(num_ops != req->r_num_ops);
1310			index = pages[i - 1]->index + 1;
1311			/* request message now owns the pages array */
1312			pages = NULL;
1313		}
1314
1315		req->r_mtime = inode_get_mtime(inode);
1316		ceph_osdc_start_request(&fsc->client->osdc, req);
1317		req = NULL;
1318
1319		wbc->nr_to_write -= i;
1320		if (pages)
1321			goto new_request;
1322
1323		/*
1324		 * We stop writing back only if we are not doing
1325		 * integrity sync. In case of integrity sync we have to
1326		 * keep going until we have written all the pages
1327		 * we tagged for writeback prior to entering this loop.
1328		 */
1329		if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE)
1330			done = true;
1331
1332release_folios:
1333		doutc(cl, "folio_batch release on %d folios (%p)\n",
1334		      (int)fbatch.nr, fbatch.nr ? fbatch.folios[0] : NULL);
1335		folio_batch_release(&fbatch);
1336	}
1337
1338	if (should_loop && !done) {
1339		/* more to do; loop back to beginning of file */
1340		doutc(cl, "looping back to beginning of file\n");
1341		end = start_index - 1; /* OK even when start_index == 0 */
1342
1343		/* to write dirty pages associated with next snapc,
1344		 * we need to wait until current writes complete */
1345		if (wbc->sync_mode != WB_SYNC_NONE &&
1346		    start_index == 0 && /* all dirty pages were checked */
1347		    !ceph_wbc.head_snapc) {
1348			struct page *page;
1349			unsigned i, nr;
1350			index = 0;
1351			while ((index <= end) &&
1352			       (nr = filemap_get_folios_tag(mapping, &index,
1353						(pgoff_t)-1,
1354						PAGECACHE_TAG_WRITEBACK,
1355						&fbatch))) {
1356				for (i = 0; i < nr; i++) {
1357					page = &fbatch.folios[i]->page;
1358					if (page_snap_context(page) != snapc)
1359						continue;
1360					wait_on_page_writeback(page);
1361				}
1362				folio_batch_release(&fbatch);
1363				cond_resched();
1364			}
1365		}
1366
1367		start_index = 0;
1368		index = 0;
1369		goto retry;
1370	}
1371
1372	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1373		mapping->writeback_index = index;
1374
1375out:
1376	ceph_osdc_put_request(req);
1377	ceph_put_snap_context(last_snapc);
1378	doutc(cl, "%llx.%llx dend - startone, rc = %d\n", ceph_vinop(inode),
1379	      rc);
1380	return rc;
1381}
1382
1383
1384
1385/*
1386 * See if a given @snapc is either writeable, or already written.
1387 */
1388static int context_is_writeable_or_written(struct inode *inode,
1389					   struct ceph_snap_context *snapc)
1390{
1391	struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL);
1392	int ret = !oldest || snapc->seq <= oldest->seq;
1393
1394	ceph_put_snap_context(oldest);
1395	return ret;
1396}
1397
1398/**
1399 * ceph_find_incompatible - find an incompatible context and return it
1400 * @page: page being dirtied
1401 *
1402 * We are only allowed to write into/dirty a page if the page is
1403 * clean, or already dirty within the same snap context. Returns a
1404 * conflicting context if there is one, NULL if there isn't, or a
1405 * negative error code on other errors.
1406 *
1407 * Must be called with page lock held.
1408 */
1409static struct ceph_snap_context *
1410ceph_find_incompatible(struct page *page)
1411{
1412	struct inode *inode = page->mapping->host;
1413	struct ceph_client *cl = ceph_inode_to_client(inode);
1414	struct ceph_inode_info *ci = ceph_inode(inode);
1415
1416	if (ceph_inode_is_shutdown(inode)) {
1417		doutc(cl, " %llx.%llx page %p is shutdown\n",
1418		      ceph_vinop(inode), page);
1419		return ERR_PTR(-ESTALE);
1420	}
1421
1422	for (;;) {
1423		struct ceph_snap_context *snapc, *oldest;
1424
1425		wait_on_page_writeback(page);
1426
1427		snapc = page_snap_context(page);
1428		if (!snapc || snapc == ci->i_head_snapc)
1429			break;
1430
1431		/*
1432		 * this page is already dirty in another (older) snap
1433		 * context!  is it writeable now?
1434		 */
1435		oldest = get_oldest_context(inode, NULL, NULL);
1436		if (snapc->seq > oldest->seq) {
1437			/* not writeable -- return it for the caller to deal with */
1438			ceph_put_snap_context(oldest);
1439			doutc(cl, " %llx.%llx page %p snapc %p not current or oldest\n",
1440			      ceph_vinop(inode), page, snapc);
1441			return ceph_get_snap_context(snapc);
1442		}
1443		ceph_put_snap_context(oldest);
1444
1445		/* yay, writeable, do it now (without dropping page lock) */
1446		doutc(cl, " %llx.%llx page %p snapc %p not current, but oldest\n",
1447		      ceph_vinop(inode), page, snapc);
1448		if (clear_page_dirty_for_io(page)) {
1449			int r = writepage_nounlock(page, NULL);
1450			if (r < 0)
1451				return ERR_PTR(r);
1452		}
1453	}
1454	return NULL;
1455}
1456
1457static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
1458					struct folio **foliop, void **_fsdata)
1459{
1460	struct inode *inode = file_inode(file);
1461	struct ceph_inode_info *ci = ceph_inode(inode);
1462	struct ceph_snap_context *snapc;
1463
1464	snapc = ceph_find_incompatible(folio_page(*foliop, 0));
1465	if (snapc) {
1466		int r;
1467
1468		folio_unlock(*foliop);
1469		folio_put(*foliop);
1470		*foliop = NULL;
1471		if (IS_ERR(snapc))
1472			return PTR_ERR(snapc);
1473
1474		ceph_queue_writeback(inode);
1475		r = wait_event_killable(ci->i_cap_wq,
1476					context_is_writeable_or_written(inode, snapc));
1477		ceph_put_snap_context(snapc);
1478		return r == 0 ? -EAGAIN : r;
1479	}
1480	return 0;
1481}
1482
1483/*
1484 * We are only allowed to write into/dirty the page if the page is
1485 * clean, or already dirty within the same snap context.
1486 */
1487static int ceph_write_begin(struct file *file, struct address_space *mapping,
1488			    loff_t pos, unsigned len,
1489			    struct page **pagep, void **fsdata)
1490{
1491	struct inode *inode = file_inode(file);
1492	struct ceph_inode_info *ci = ceph_inode(inode);
1493	struct folio *folio = NULL;
1494	int r;
1495
1496	r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, &folio, NULL);
1497	if (r < 0)
1498		return r;
1499
1500	folio_wait_private_2(folio); /* [DEPRECATED] */
1501	WARN_ON_ONCE(!folio_test_locked(folio));
1502	*pagep = &folio->page;
1503	return 0;
1504}
1505
1506/*
1507 * we don't do anything in here that simple_write_end doesn't do
1508 * except adjust dirty page accounting
1509 */
1510static int ceph_write_end(struct file *file, struct address_space *mapping,
1511			  loff_t pos, unsigned len, unsigned copied,
1512			  struct page *subpage, void *fsdata)
1513{
1514	struct folio *folio = page_folio(subpage);
1515	struct inode *inode = file_inode(file);
1516	struct ceph_client *cl = ceph_inode_to_client(inode);
1517	bool check_cap = false;
1518
1519	doutc(cl, "%llx.%llx file %p folio %p %d~%d (%d)\n", ceph_vinop(inode),
1520	      file, folio, (int)pos, (int)copied, (int)len);
1521
1522	if (!folio_test_uptodate(folio)) {
1523		/* just return that nothing was copied on a short copy */
1524		if (copied < len) {
1525			copied = 0;
1526			goto out;
1527		}
1528		folio_mark_uptodate(folio);
1529	}
1530
1531	/* did file size increase? */
1532	if (pos+copied > i_size_read(inode))
1533		check_cap = ceph_inode_set_size(inode, pos+copied);
1534
1535	folio_mark_dirty(folio);
1536
1537out:
1538	folio_unlock(folio);
1539	folio_put(folio);
1540
1541	if (check_cap)
1542		ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY);
1543
1544	return copied;
1545}
1546
1547const struct address_space_operations ceph_aops = {
1548	.read_folio = netfs_read_folio,
1549	.readahead = netfs_readahead,
1550	.writepage = ceph_writepage,
1551	.writepages = ceph_writepages_start,
1552	.write_begin = ceph_write_begin,
1553	.write_end = ceph_write_end,
1554	.dirty_folio = ceph_dirty_folio,
1555	.invalidate_folio = ceph_invalidate_folio,
1556	.release_folio = netfs_release_folio,
1557	.direct_IO = noop_direct_IO,
1558};
1559
1560static void ceph_block_sigs(sigset_t *oldset)
1561{
1562	sigset_t mask;
1563	siginitsetinv(&mask, sigmask(SIGKILL));
1564	sigprocmask(SIG_BLOCK, &mask, oldset);
1565}
1566
1567static void ceph_restore_sigs(sigset_t *oldset)
1568{
1569	sigprocmask(SIG_SETMASK, oldset, NULL);
1570}
1571
1572/*
1573 * vm ops
1574 */
1575static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
1576{
1577	struct vm_area_struct *vma = vmf->vma;
1578	struct inode *inode = file_inode(vma->vm_file);
1579	struct ceph_inode_info *ci = ceph_inode(inode);
1580	struct ceph_client *cl = ceph_inode_to_client(inode);
1581	struct ceph_file_info *fi = vma->vm_file->private_data;
1582	loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT;
1583	int want, got, err;
1584	sigset_t oldset;
1585	vm_fault_t ret = VM_FAULT_SIGBUS;
1586
1587	if (ceph_inode_is_shutdown(inode))
1588		return ret;
1589
1590	ceph_block_sigs(&oldset);
1591
1592	doutc(cl, "%llx.%llx %llu trying to get caps\n",
1593	      ceph_vinop(inode), off);
1594	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1595		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1596	else
1597		want = CEPH_CAP_FILE_CACHE;
1598
1599	got = 0;
1600	err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1, &got);
1601	if (err < 0)
1602		goto out_restore;
1603
1604	doutc(cl, "%llx.%llx %llu got cap refs on %s\n", ceph_vinop(inode),
1605	      off, ceph_cap_string(got));
1606
1607	if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
1608	    !ceph_has_inline_data(ci)) {
1609		CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1610		ceph_add_rw_context(fi, &rw_ctx);
1611		ret = filemap_fault(vmf);
1612		ceph_del_rw_context(fi, &rw_ctx);
1613		doutc(cl, "%llx.%llx %llu drop cap refs %s ret %x\n",
1614		      ceph_vinop(inode), off, ceph_cap_string(got), ret);
1615	} else
1616		err = -EAGAIN;
1617
1618	ceph_put_cap_refs(ci, got);
1619
1620	if (err != -EAGAIN)
1621		goto out_restore;
1622
1623	/* read inline data */
1624	if (off >= PAGE_SIZE) {
1625		/* does not support inline data > PAGE_SIZE */
1626		ret = VM_FAULT_SIGBUS;
1627	} else {
1628		struct address_space *mapping = inode->i_mapping;
1629		struct page *page;
1630
1631		filemap_invalidate_lock_shared(mapping);
1632		page = find_or_create_page(mapping, 0,
1633				mapping_gfp_constraint(mapping, ~__GFP_FS));
1634		if (!page) {
1635			ret = VM_FAULT_OOM;
1636			goto out_inline;
1637		}
1638		err = __ceph_do_getattr(inode, page,
1639					 CEPH_STAT_CAP_INLINE_DATA, true);
1640		if (err < 0 || off >= i_size_read(inode)) {
1641			unlock_page(page);
1642			put_page(page);
1643			ret = vmf_error(err);
1644			goto out_inline;
1645		}
1646		if (err < PAGE_SIZE)
1647			zero_user_segment(page, err, PAGE_SIZE);
1648		else
1649			flush_dcache_page(page);
1650		SetPageUptodate(page);
1651		vmf->page = page;
1652		ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
1653out_inline:
1654		filemap_invalidate_unlock_shared(mapping);
1655		doutc(cl, "%llx.%llx %llu read inline data ret %x\n",
1656		      ceph_vinop(inode), off, ret);
1657	}
1658out_restore:
1659	ceph_restore_sigs(&oldset);
1660	if (err < 0)
1661		ret = vmf_error(err);
1662
1663	return ret;
1664}
1665
1666static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
1667{
1668	struct vm_area_struct *vma = vmf->vma;
1669	struct inode *inode = file_inode(vma->vm_file);
1670	struct ceph_client *cl = ceph_inode_to_client(inode);
1671	struct ceph_inode_info *ci = ceph_inode(inode);
1672	struct ceph_file_info *fi = vma->vm_file->private_data;
1673	struct ceph_cap_flush *prealloc_cf;
1674	struct page *page = vmf->page;
1675	loff_t off = page_offset(page);
1676	loff_t size = i_size_read(inode);
1677	size_t len;
1678	int want, got, err;
1679	sigset_t oldset;
1680	vm_fault_t ret = VM_FAULT_SIGBUS;
1681
1682	if (ceph_inode_is_shutdown(inode))
1683		return ret;
1684
1685	prealloc_cf = ceph_alloc_cap_flush();
1686	if (!prealloc_cf)
1687		return VM_FAULT_OOM;
1688
1689	sb_start_pagefault(inode->i_sb);
1690	ceph_block_sigs(&oldset);
1691
1692	if (off + thp_size(page) <= size)
1693		len = thp_size(page);
1694	else
1695		len = offset_in_thp(page, size);
1696
1697	doutc(cl, "%llx.%llx %llu~%zd getting caps i_size %llu\n",
1698	      ceph_vinop(inode), off, len, size);
1699	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1700		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1701	else
1702		want = CEPH_CAP_FILE_BUFFER;
1703
1704	got = 0;
1705	err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, &got);
1706	if (err < 0)
1707		goto out_free;
1708
1709	doutc(cl, "%llx.%llx %llu~%zd got cap refs on %s\n", ceph_vinop(inode),
1710	      off, len, ceph_cap_string(got));
1711
1712	/* Update time before taking page lock */
1713	file_update_time(vma->vm_file);
1714	inode_inc_iversion_raw(inode);
1715
1716	do {
1717		struct ceph_snap_context *snapc;
1718
1719		lock_page(page);
1720
1721		if (page_mkwrite_check_truncate(page, inode) < 0) {
1722			unlock_page(page);
1723			ret = VM_FAULT_NOPAGE;
1724			break;
1725		}
1726
1727		snapc = ceph_find_incompatible(page);
1728		if (!snapc) {
1729			/* success.  we'll keep the page locked. */
1730			set_page_dirty(page);
1731			ret = VM_FAULT_LOCKED;
1732			break;
1733		}
1734
1735		unlock_page(page);
1736
1737		if (IS_ERR(snapc)) {
1738			ret = VM_FAULT_SIGBUS;
1739			break;
1740		}
1741
1742		ceph_queue_writeback(inode);
1743		err = wait_event_killable(ci->i_cap_wq,
1744				context_is_writeable_or_written(inode, snapc));
1745		ceph_put_snap_context(snapc);
1746	} while (err == 0);
1747
1748	if (ret == VM_FAULT_LOCKED) {
1749		int dirty;
1750		spin_lock(&ci->i_ceph_lock);
1751		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1752					       &prealloc_cf);
1753		spin_unlock(&ci->i_ceph_lock);
1754		if (dirty)
1755			__mark_inode_dirty(inode, dirty);
1756	}
1757
1758	doutc(cl, "%llx.%llx %llu~%zd dropping cap refs on %s ret %x\n",
1759	      ceph_vinop(inode), off, len, ceph_cap_string(got), ret);
1760	ceph_put_cap_refs_async(ci, got);
1761out_free:
1762	ceph_restore_sigs(&oldset);
1763	sb_end_pagefault(inode->i_sb);
1764	ceph_free_cap_flush(prealloc_cf);
1765	if (err < 0)
1766		ret = vmf_error(err);
1767	return ret;
1768}
1769
1770void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
1771			   char	*data, size_t len)
1772{
1773	struct ceph_client *cl = ceph_inode_to_client(inode);
1774	struct address_space *mapping = inode->i_mapping;
1775	struct page *page;
1776
1777	if (locked_page) {
1778		page = locked_page;
1779	} else {
1780		if (i_size_read(inode) == 0)
1781			return;
1782		page = find_or_create_page(mapping, 0,
1783					   mapping_gfp_constraint(mapping,
1784					   ~__GFP_FS));
1785		if (!page)
1786			return;
1787		if (PageUptodate(page)) {
1788			unlock_page(page);
1789			put_page(page);
1790			return;
1791		}
1792	}
1793
1794	doutc(cl, "%p %llx.%llx len %zu locked_page %p\n", inode,
1795	      ceph_vinop(inode), len, locked_page);
1796
1797	if (len > 0) {
1798		void *kaddr = kmap_atomic(page);
1799		memcpy(kaddr, data, len);
1800		kunmap_atomic(kaddr);
1801	}
1802
1803	if (page != locked_page) {
1804		if (len < PAGE_SIZE)
1805			zero_user_segment(page, len, PAGE_SIZE);
1806		else
1807			flush_dcache_page(page);
1808
1809		SetPageUptodate(page);
1810		unlock_page(page);
1811		put_page(page);
1812	}
1813}
1814
1815int ceph_uninline_data(struct file *file)
1816{
1817	struct inode *inode = file_inode(file);
1818	struct ceph_inode_info *ci = ceph_inode(inode);
1819	struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
1820	struct ceph_client *cl = fsc->client;
1821	struct ceph_osd_request *req = NULL;
1822	struct ceph_cap_flush *prealloc_cf = NULL;
1823	struct folio *folio = NULL;
1824	u64 inline_version = CEPH_INLINE_NONE;
1825	struct page *pages[1];
1826	int err = 0;
1827	u64 len;
1828
1829	spin_lock(&ci->i_ceph_lock);
1830	inline_version = ci->i_inline_version;
1831	spin_unlock(&ci->i_ceph_lock);
1832
1833	doutc(cl, "%llx.%llx inline_version %llu\n", ceph_vinop(inode),
1834	      inline_version);
1835
1836	if (ceph_inode_is_shutdown(inode)) {
1837		err = -EIO;
1838		goto out;
1839	}
1840
1841	if (inline_version == CEPH_INLINE_NONE)
1842		return 0;
1843
1844	prealloc_cf = ceph_alloc_cap_flush();
1845	if (!prealloc_cf)
1846		return -ENOMEM;
1847
1848	if (inline_version == 1) /* initial version, no data */
1849		goto out_uninline;
1850
1851	folio = read_mapping_folio(inode->i_mapping, 0, file);
1852	if (IS_ERR(folio)) {
1853		err = PTR_ERR(folio);
1854		goto out;
1855	}
1856
1857	folio_lock(folio);
1858
1859	len = i_size_read(inode);
1860	if (len > folio_size(folio))
1861		len = folio_size(folio);
1862
1863	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1864				    ceph_vino(inode), 0, &len, 0, 1,
1865				    CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE,
1866				    NULL, 0, 0, false);
1867	if (IS_ERR(req)) {
1868		err = PTR_ERR(req);
1869		goto out_unlock;
1870	}
1871
1872	req->r_mtime = inode_get_mtime(inode);
1873	ceph_osdc_start_request(&fsc->client->osdc, req);
1874	err = ceph_osdc_wait_request(&fsc->client->osdc, req);
1875	ceph_osdc_put_request(req);
1876	if (err < 0)
1877		goto out_unlock;
1878
1879	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1880				    ceph_vino(inode), 0, &len, 1, 3,
1881				    CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
1882				    NULL, ci->i_truncate_seq,
1883				    ci->i_truncate_size, false);
1884	if (IS_ERR(req)) {
1885		err = PTR_ERR(req);
1886		goto out_unlock;
1887	}
1888
1889	pages[0] = folio_page(folio, 0);
1890	osd_req_op_extent_osd_data_pages(req, 1, pages, len, 0, false, false);
1891
1892	{
1893		__le64 xattr_buf = cpu_to_le64(inline_version);
1894		err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
1895					    "inline_version", &xattr_buf,
1896					    sizeof(xattr_buf),
1897					    CEPH_OSD_CMPXATTR_OP_GT,
1898					    CEPH_OSD_CMPXATTR_MODE_U64);
1899		if (err)
1900			goto out_put_req;
1901	}
1902
1903	{
1904		char xattr_buf[32];
1905		int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf),
1906					 "%llu", inline_version);
1907		err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
1908					    "inline_version",
1909					    xattr_buf, xattr_len, 0, 0);
1910		if (err)
1911			goto out_put_req;
1912	}
1913
1914	req->r_mtime = inode_get_mtime(inode);
1915	ceph_osdc_start_request(&fsc->client->osdc, req);
1916	err = ceph_osdc_wait_request(&fsc->client->osdc, req);
1917
1918	ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1919				  req->r_end_latency, len, err);
1920
1921out_uninline:
1922	if (!err) {
1923		int dirty;
1924
1925		/* Set to CAP_INLINE_NONE and dirty the caps */
1926		down_read(&fsc->mdsc->snap_rwsem);
1927		spin_lock(&ci->i_ceph_lock);
1928		ci->i_inline_version = CEPH_INLINE_NONE;
1929		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, &prealloc_cf);
1930		spin_unlock(&ci->i_ceph_lock);
1931		up_read(&fsc->mdsc->snap_rwsem);
1932		if (dirty)
1933			__mark_inode_dirty(inode, dirty);
1934	}
1935out_put_req:
1936	ceph_osdc_put_request(req);
1937	if (err == -ECANCELED)
1938		err = 0;
1939out_unlock:
1940	if (folio) {
1941		folio_unlock(folio);
1942		folio_put(folio);
1943	}
1944out:
1945	ceph_free_cap_flush(prealloc_cf);
1946	doutc(cl, "%llx.%llx inline_version %llu = %d\n",
1947	      ceph_vinop(inode), inline_version, err);
1948	return err;
1949}
1950
1951static const struct vm_operations_struct ceph_vmops = {
1952	.fault		= ceph_filemap_fault,
1953	.page_mkwrite	= ceph_page_mkwrite,
1954};
1955
1956int ceph_mmap(struct file *file, struct vm_area_struct *vma)
1957{
1958	struct address_space *mapping = file->f_mapping;
1959
1960	if (!mapping->a_ops->read_folio)
1961		return -ENOEXEC;
1962	vma->vm_ops = &ceph_vmops;
1963	return 0;
1964}
1965
1966enum {
1967	POOL_READ	= 1,
1968	POOL_WRITE	= 2,
1969};
1970
1971static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
1972				s64 pool, struct ceph_string *pool_ns)
1973{
1974	struct ceph_fs_client *fsc = ceph_inode_to_fs_client(&ci->netfs.inode);
1975	struct ceph_mds_client *mdsc = fsc->mdsc;
1976	struct ceph_client *cl = fsc->client;
1977	struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
1978	struct rb_node **p, *parent;
1979	struct ceph_pool_perm *perm;
1980	struct page **pages;
1981	size_t pool_ns_len;
1982	int err = 0, err2 = 0, have = 0;
1983
1984	down_read(&mdsc->pool_perm_rwsem);
1985	p = &mdsc->pool_perm_tree.rb_node;
1986	while (*p) {
1987		perm = rb_entry(*p, struct ceph_pool_perm, node);
1988		if (pool < perm->pool)
1989			p = &(*p)->rb_left;
1990		else if (pool > perm->pool)
1991			p = &(*p)->rb_right;
1992		else {
1993			int ret = ceph_compare_string(pool_ns,
1994						perm->pool_ns,
1995						perm->pool_ns_len);
1996			if (ret < 0)
1997				p = &(*p)->rb_left;
1998			else if (ret > 0)
1999				p = &(*p)->rb_right;
2000			else {
2001				have = perm->perm;
2002				break;
2003			}
2004		}
2005	}
2006	up_read(&mdsc->pool_perm_rwsem);
2007	if (*p)
2008		goto out;
2009
2010	if (pool_ns)
2011		doutc(cl, "pool %lld ns %.*s no perm cached\n", pool,
2012		      (int)pool_ns->len, pool_ns->str);
2013	else
2014		doutc(cl, "pool %lld no perm cached\n", pool);
2015
2016	down_write(&mdsc->pool_perm_rwsem);
2017	p = &mdsc->pool_perm_tree.rb_node;
2018	parent = NULL;
2019	while (*p) {
2020		parent = *p;
2021		perm = rb_entry(parent, struct ceph_pool_perm, node);
2022		if (pool < perm->pool)
2023			p = &(*p)->rb_left;
2024		else if (pool > perm->pool)
2025			p = &(*p)->rb_right;
2026		else {
2027			int ret = ceph_compare_string(pool_ns,
2028						perm->pool_ns,
2029						perm->pool_ns_len);
2030			if (ret < 0)
2031				p = &(*p)->rb_left;
2032			else if (ret > 0)
2033				p = &(*p)->rb_right;
2034			else {
2035				have = perm->perm;
2036				break;
2037			}
2038		}
2039	}
2040	if (*p) {
2041		up_write(&mdsc->pool_perm_rwsem);
2042		goto out;
2043	}
2044
2045	rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
2046					 1, false, GFP_NOFS);
2047	if (!rd_req) {
2048		err = -ENOMEM;
2049		goto out_unlock;
2050	}
2051
2052	rd_req->r_flags = CEPH_OSD_FLAG_READ;
2053	osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
2054	rd_req->r_base_oloc.pool = pool;
2055	if (pool_ns)
2056		rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns);
2057	ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino);
2058
2059	err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS);
2060	if (err)
2061		goto out_unlock;
2062
2063	wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
2064					 1, false, GFP_NOFS);
2065	if (!wr_req) {
2066		err = -ENOMEM;
2067		goto out_unlock;
2068	}
2069
2070	wr_req->r_flags = CEPH_OSD_FLAG_WRITE;
2071	osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL);
2072	ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc);
2073	ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid);
2074
2075	err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS);
2076	if (err)
2077		goto out_unlock;
2078
2079	/* one page should be large enough for STAT data */
2080	pages = ceph_alloc_page_vector(1, GFP_KERNEL);
2081	if (IS_ERR(pages)) {
2082		err = PTR_ERR(pages);
2083		goto out_unlock;
2084	}
2085
2086	osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
2087				     0, false, true);
2088	ceph_osdc_start_request(&fsc->client->osdc, rd_req);
2089
2090	wr_req->r_mtime = inode_get_mtime(&ci->netfs.inode);
2091	ceph_osdc_start_request(&fsc->client->osdc, wr_req);
2092
2093	err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
2094	err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
2095
2096	if (err >= 0 || err == -ENOENT)
2097		have |= POOL_READ;
2098	else if (err != -EPERM) {
2099		if (err == -EBLOCKLISTED)
2100			fsc->blocklisted = true;
2101		goto out_unlock;
2102	}
2103
2104	if (err2 == 0 || err2 == -EEXIST)
2105		have |= POOL_WRITE;
2106	else if (err2 != -EPERM) {
2107		if (err2 == -EBLOCKLISTED)
2108			fsc->blocklisted = true;
2109		err = err2;
2110		goto out_unlock;
2111	}
2112
2113	pool_ns_len = pool_ns ? pool_ns->len : 0;
2114	perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS);
2115	if (!perm) {
2116		err = -ENOMEM;
2117		goto out_unlock;
2118	}
2119
2120	perm->pool = pool;
2121	perm->perm = have;
2122	perm->pool_ns_len = pool_ns_len;
2123	if (pool_ns_len > 0)
2124		memcpy(perm->pool_ns, pool_ns->str, pool_ns_len);
2125	perm->pool_ns[pool_ns_len] = 0;
2126
2127	rb_link_node(&perm->node, parent, p);
2128	rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
2129	err = 0;
2130out_unlock:
2131	up_write(&mdsc->pool_perm_rwsem);
2132
2133	ceph_osdc_put_request(rd_req);
2134	ceph_osdc_put_request(wr_req);
2135out:
2136	if (!err)
2137		err = have;
2138	if (pool_ns)
2139		doutc(cl, "pool %lld ns %.*s result = %d\n", pool,
2140		      (int)pool_ns->len, pool_ns->str, err);
2141	else
2142		doutc(cl, "pool %lld result = %d\n", pool, err);
2143	return err;
2144}
2145
2146int ceph_pool_perm_check(struct inode *inode, int need)
2147{
2148	struct ceph_client *cl = ceph_inode_to_client(inode);
2149	struct ceph_inode_info *ci = ceph_inode(inode);
2150	struct ceph_string *pool_ns;
2151	s64 pool;
2152	int ret, flags;
2153
2154	/* Only need to do this for regular files */
2155	if (!S_ISREG(inode->i_mode))
2156		return 0;
2157
2158	if (ci->i_vino.snap != CEPH_NOSNAP) {
2159		/*
2160		 * Pool permission check needs to write to the first object.
2161		 * But for snapshot, head of the first object may have alread
2162		 * been deleted. Skip check to avoid creating orphan object.
2163		 */
2164		return 0;
2165	}
2166
2167	if (ceph_test_mount_opt(ceph_inode_to_fs_client(inode),
2168				NOPOOLPERM))
2169		return 0;
2170
2171	spin_lock(&ci->i_ceph_lock);
2172	flags = ci->i_ceph_flags;
2173	pool = ci->i_layout.pool_id;
2174	spin_unlock(&ci->i_ceph_lock);
2175check:
2176	if (flags & CEPH_I_POOL_PERM) {
2177		if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
2178			doutc(cl, "pool %lld no read perm\n", pool);
2179			return -EPERM;
2180		}
2181		if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
2182			doutc(cl, "pool %lld no write perm\n", pool);
2183			return -EPERM;
2184		}
2185		return 0;
2186	}
2187
2188	pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
2189	ret = __ceph_pool_perm_get(ci, pool, pool_ns);
2190	ceph_put_string(pool_ns);
2191	if (ret < 0)
2192		return ret;
2193
2194	flags = CEPH_I_POOL_PERM;
2195	if (ret & POOL_READ)
2196		flags |= CEPH_I_POOL_RD;
2197	if (ret & POOL_WRITE)
2198		flags |= CEPH_I_POOL_WR;
2199
2200	spin_lock(&ci->i_ceph_lock);
2201	if (pool == ci->i_layout.pool_id &&
2202	    pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) {
2203		ci->i_ceph_flags |= flags;
2204        } else {
2205		pool = ci->i_layout.pool_id;
2206		flags = ci->i_ceph_flags;
2207	}
2208	spin_unlock(&ci->i_ceph_lock);
2209	goto check;
2210}
2211
2212void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc)
2213{
2214	struct ceph_pool_perm *perm;
2215	struct rb_node *n;
2216
2217	while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) {
2218		n = rb_first(&mdsc->pool_perm_tree);
2219		perm = rb_entry(n, struct ceph_pool_perm, node);
2220		rb_erase(n, &mdsc->pool_perm_tree);
2221		kfree(perm);
2222	}
2223}
2224