1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/bitops.h>
4#include <linux/slab.h>
5#include <linux/bio.h>
6#include <linux/mm.h>
7#include <linux/pagemap.h>
8#include <linux/page-flags.h>
9#include <linux/sched/mm.h>
10#include <linux/spinlock.h>
11#include <linux/blkdev.h>
12#include <linux/swap.h>
13#include <linux/writeback.h>
14#include <linux/pagevec.h>
15#include <linux/prefetch.h>
16#include <linux/fsverity.h>
17#include "extent_io.h"
18#include "extent-io-tree.h"
19#include "extent_map.h"
20#include "ctree.h"
21#include "btrfs_inode.h"
22#include "bio.h"
23#include "locking.h"
24#include "backref.h"
25#include "disk-io.h"
26#include "subpage.h"
27#include "zoned.h"
28#include "block-group.h"
29#include "compression.h"
30#include "fs.h"
31#include "accessors.h"
32#include "file-item.h"
33#include "file.h"
34#include "dev-replace.h"
35#include "super.h"
36#include "transaction.h"
37
38static struct kmem_cache *extent_buffer_cache;
39
40#ifdef CONFIG_BTRFS_DEBUG
41static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
42{
43	struct btrfs_fs_info *fs_info = eb->fs_info;
44	unsigned long flags;
45
46	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
47	list_add(&eb->leak_list, &fs_info->allocated_ebs);
48	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
49}
50
51static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
52{
53	struct btrfs_fs_info *fs_info = eb->fs_info;
54	unsigned long flags;
55
56	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
57	list_del(&eb->leak_list);
58	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
59}
60
61void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
62{
63	struct extent_buffer *eb;
64	unsigned long flags;
65
66	/*
67	 * If we didn't get into open_ctree our allocated_ebs will not be
68	 * initialized, so just skip this.
69	 */
70	if (!fs_info->allocated_ebs.next)
71		return;
72
73	WARN_ON(!list_empty(&fs_info->allocated_ebs));
74	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
75	while (!list_empty(&fs_info->allocated_ebs)) {
76		eb = list_first_entry(&fs_info->allocated_ebs,
77				      struct extent_buffer, leak_list);
78		pr_err(
79	"BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n",
80		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
81		       btrfs_header_owner(eb));
82		list_del(&eb->leak_list);
83		WARN_ON_ONCE(1);
84		kmem_cache_free(extent_buffer_cache, eb);
85	}
86	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
87}
88#else
89#define btrfs_leak_debug_add_eb(eb)			do {} while (0)
90#define btrfs_leak_debug_del_eb(eb)			do {} while (0)
91#endif
92
93/*
94 * Structure to record info about the bio being assembled, and other info like
95 * how many bytes are there before stripe/ordered extent boundary.
96 */
97struct btrfs_bio_ctrl {
98	struct btrfs_bio *bbio;
99	enum btrfs_compression_type compress_type;
100	u32 len_to_oe_boundary;
101	blk_opf_t opf;
102	btrfs_bio_end_io_t end_io_func;
103	struct writeback_control *wbc;
104};
105
106static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
107{
108	struct btrfs_bio *bbio = bio_ctrl->bbio;
109
110	if (!bbio)
111		return;
112
113	/* Caller should ensure the bio has at least some range added */
114	ASSERT(bbio->bio.bi_iter.bi_size);
115
116	if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
117	    bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
118		btrfs_submit_compressed_read(bbio);
119	else
120		btrfs_submit_bio(bbio, 0);
121
122	/* The bbio is owned by the end_io handler now */
123	bio_ctrl->bbio = NULL;
124}
125
126/*
127 * Submit or fail the current bio in the bio_ctrl structure.
128 */
129static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
130{
131	struct btrfs_bio *bbio = bio_ctrl->bbio;
132
133	if (!bbio)
134		return;
135
136	if (ret) {
137		ASSERT(ret < 0);
138		btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
139		/* The bio is owned by the end_io handler now */
140		bio_ctrl->bbio = NULL;
141	} else {
142		submit_one_bio(bio_ctrl);
143	}
144}
145
146int __init extent_buffer_init_cachep(void)
147{
148	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
149						sizeof(struct extent_buffer), 0, 0,
150						NULL);
151	if (!extent_buffer_cache)
152		return -ENOMEM;
153
154	return 0;
155}
156
157void __cold extent_buffer_free_cachep(void)
158{
159	/*
160	 * Make sure all delayed rcu free are flushed before we
161	 * destroy caches.
162	 */
163	rcu_barrier();
164	kmem_cache_destroy(extent_buffer_cache);
165}
166
167void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
168{
169	unsigned long index = start >> PAGE_SHIFT;
170	unsigned long end_index = end >> PAGE_SHIFT;
171	struct page *page;
172
173	while (index <= end_index) {
174		page = find_get_page(inode->i_mapping, index);
175		BUG_ON(!page); /* Pages should be in the extent_io_tree */
176		clear_page_dirty_for_io(page);
177		put_page(page);
178		index++;
179	}
180}
181
182static void process_one_page(struct btrfs_fs_info *fs_info,
183			     struct page *page, struct page *locked_page,
184			     unsigned long page_ops, u64 start, u64 end)
185{
186	struct folio *folio = page_folio(page);
187	u32 len;
188
189	ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
190	len = end + 1 - start;
191
192	if (page_ops & PAGE_SET_ORDERED)
193		btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
194	if (page_ops & PAGE_START_WRITEBACK) {
195		btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
196		btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
197	}
198	if (page_ops & PAGE_END_WRITEBACK)
199		btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
200
201	if (page != locked_page && (page_ops & PAGE_UNLOCK))
202		btrfs_folio_end_writer_lock(fs_info, folio, start, len);
203}
204
205static void __process_pages_contig(struct address_space *mapping,
206				   struct page *locked_page, u64 start, u64 end,
207				   unsigned long page_ops)
208{
209	struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
210	pgoff_t start_index = start >> PAGE_SHIFT;
211	pgoff_t end_index = end >> PAGE_SHIFT;
212	pgoff_t index = start_index;
213	struct folio_batch fbatch;
214	int i;
215
216	folio_batch_init(&fbatch);
217	while (index <= end_index) {
218		int found_folios;
219
220		found_folios = filemap_get_folios_contig(mapping, &index,
221				end_index, &fbatch);
222		for (i = 0; i < found_folios; i++) {
223			struct folio *folio = fbatch.folios[i];
224
225			process_one_page(fs_info, &folio->page, locked_page,
226					 page_ops, start, end);
227		}
228		folio_batch_release(&fbatch);
229		cond_resched();
230	}
231}
232
233static noinline void __unlock_for_delalloc(struct inode *inode,
234					   struct page *locked_page,
235					   u64 start, u64 end)
236{
237	unsigned long index = start >> PAGE_SHIFT;
238	unsigned long end_index = end >> PAGE_SHIFT;
239
240	ASSERT(locked_page);
241	if (index == locked_page->index && end_index == index)
242		return;
243
244	__process_pages_contig(inode->i_mapping, locked_page, start, end,
245			       PAGE_UNLOCK);
246}
247
248static noinline int lock_delalloc_pages(struct inode *inode,
249					struct page *locked_page,
250					u64 start,
251					u64 end)
252{
253	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
254	struct address_space *mapping = inode->i_mapping;
255	pgoff_t start_index = start >> PAGE_SHIFT;
256	pgoff_t end_index = end >> PAGE_SHIFT;
257	pgoff_t index = start_index;
258	u64 processed_end = start;
259	struct folio_batch fbatch;
260
261	if (index == locked_page->index && index == end_index)
262		return 0;
263
264	folio_batch_init(&fbatch);
265	while (index <= end_index) {
266		unsigned int found_folios, i;
267
268		found_folios = filemap_get_folios_contig(mapping, &index,
269				end_index, &fbatch);
270		if (found_folios == 0)
271			goto out;
272
273		for (i = 0; i < found_folios; i++) {
274			struct folio *folio = fbatch.folios[i];
275			struct page *page = folio_page(folio, 0);
276			u32 len = end + 1 - start;
277
278			if (page == locked_page)
279				continue;
280
281			if (btrfs_folio_start_writer_lock(fs_info, folio, start,
282							  len))
283				goto out;
284
285			if (!PageDirty(page) || page->mapping != mapping) {
286				btrfs_folio_end_writer_lock(fs_info, folio, start,
287							    len);
288				goto out;
289			}
290
291			processed_end = page_offset(page) + PAGE_SIZE - 1;
292		}
293		folio_batch_release(&fbatch);
294		cond_resched();
295	}
296
297	return 0;
298out:
299	folio_batch_release(&fbatch);
300	if (processed_end > start)
301		__unlock_for_delalloc(inode, locked_page, start, processed_end);
302	return -EAGAIN;
303}
304
305/*
306 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
307 * more than @max_bytes.
308 *
309 * @start:	The original start bytenr to search.
310 *		Will store the extent range start bytenr.
311 * @end:	The original end bytenr of the search range
312 *		Will store the extent range end bytenr.
313 *
314 * Return true if we find a delalloc range which starts inside the original
315 * range, and @start/@end will store the delalloc range start/end.
316 *
317 * Return false if we can't find any delalloc range which starts inside the
318 * original range, and @start/@end will be the non-delalloc range start/end.
319 */
320EXPORT_FOR_TESTS
321noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
322				    struct page *locked_page, u64 *start,
323				    u64 *end)
324{
325	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
326	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
327	const u64 orig_start = *start;
328	const u64 orig_end = *end;
329	/* The sanity tests may not set a valid fs_info. */
330	u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
331	u64 delalloc_start;
332	u64 delalloc_end;
333	bool found;
334	struct extent_state *cached_state = NULL;
335	int ret;
336	int loops = 0;
337
338	/* Caller should pass a valid @end to indicate the search range end */
339	ASSERT(orig_end > orig_start);
340
341	/* The range should at least cover part of the page */
342	ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
343		 orig_end <= page_offset(locked_page)));
344again:
345	/* step one, find a bunch of delalloc bytes starting at start */
346	delalloc_start = *start;
347	delalloc_end = 0;
348	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
349					  max_bytes, &cached_state);
350	if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
351		*start = delalloc_start;
352
353		/* @delalloc_end can be -1, never go beyond @orig_end */
354		*end = min(delalloc_end, orig_end);
355		free_extent_state(cached_state);
356		return false;
357	}
358
359	/*
360	 * start comes from the offset of locked_page.  We have to lock
361	 * pages in order, so we can't process delalloc bytes before
362	 * locked_page
363	 */
364	if (delalloc_start < *start)
365		delalloc_start = *start;
366
367	/*
368	 * make sure to limit the number of pages we try to lock down
369	 */
370	if (delalloc_end + 1 - delalloc_start > max_bytes)
371		delalloc_end = delalloc_start + max_bytes - 1;
372
373	/* step two, lock all the pages after the page that has start */
374	ret = lock_delalloc_pages(inode, locked_page,
375				  delalloc_start, delalloc_end);
376	ASSERT(!ret || ret == -EAGAIN);
377	if (ret == -EAGAIN) {
378		/* some of the pages are gone, lets avoid looping by
379		 * shortening the size of the delalloc range we're searching
380		 */
381		free_extent_state(cached_state);
382		cached_state = NULL;
383		if (!loops) {
384			max_bytes = PAGE_SIZE;
385			loops = 1;
386			goto again;
387		} else {
388			found = false;
389			goto out_failed;
390		}
391	}
392
393	/* step three, lock the state bits for the whole range */
394	lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
395
396	/* then test to make sure it is all still delalloc */
397	ret = test_range_bit(tree, delalloc_start, delalloc_end,
398			     EXTENT_DELALLOC, cached_state);
399
400	unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
401	if (!ret) {
402		__unlock_for_delalloc(inode, locked_page,
403			      delalloc_start, delalloc_end);
404		cond_resched();
405		goto again;
406	}
407	*start = delalloc_start;
408	*end = delalloc_end;
409out_failed:
410	return found;
411}
412
413void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
414				  struct page *locked_page,
415				  struct extent_state **cached,
416				  u32 clear_bits, unsigned long page_ops)
417{
418	clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
419
420	__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
421			       start, end, page_ops);
422}
423
424static bool btrfs_verify_page(struct page *page, u64 start)
425{
426	if (!fsverity_active(page->mapping->host) ||
427	    PageUptodate(page) ||
428	    start >= i_size_read(page->mapping->host))
429		return true;
430	return fsverity_verify_page(page);
431}
432
433static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
434{
435	struct btrfs_fs_info *fs_info = page_to_fs_info(page);
436	struct folio *folio = page_folio(page);
437
438	ASSERT(page_offset(page) <= start &&
439	       start + len <= page_offset(page) + PAGE_SIZE);
440
441	if (uptodate && btrfs_verify_page(page, start))
442		btrfs_folio_set_uptodate(fs_info, folio, start, len);
443	else
444		btrfs_folio_clear_uptodate(fs_info, folio, start, len);
445
446	if (!btrfs_is_subpage(fs_info, page->mapping))
447		unlock_page(page);
448	else
449		btrfs_subpage_end_reader(fs_info, folio, start, len);
450}
451
452/*
453 * After a write IO is done, we need to:
454 *
455 * - clear the uptodate bits on error
456 * - clear the writeback bits in the extent tree for the range
457 * - filio_end_writeback()  if there is no more pending io for the folio
458 *
459 * Scheduling is not allowed, so the extent state tree is expected
460 * to have one and only one object corresponding to this IO.
461 */
462static void end_bbio_data_write(struct btrfs_bio *bbio)
463{
464	struct btrfs_fs_info *fs_info = bbio->fs_info;
465	struct bio *bio = &bbio->bio;
466	int error = blk_status_to_errno(bio->bi_status);
467	struct folio_iter fi;
468	const u32 sectorsize = fs_info->sectorsize;
469
470	ASSERT(!bio_flagged(bio, BIO_CLONED));
471	bio_for_each_folio_all(fi, bio) {
472		struct folio *folio = fi.folio;
473		u64 start = folio_pos(folio) + fi.offset;
474		u32 len = fi.length;
475
476		/* Only order 0 (single page) folios are allowed for data. */
477		ASSERT(folio_order(folio) == 0);
478
479		/* Our read/write should always be sector aligned. */
480		if (!IS_ALIGNED(fi.offset, sectorsize))
481			btrfs_err(fs_info,
482		"partial page write in btrfs with offset %zu and length %zu",
483				  fi.offset, fi.length);
484		else if (!IS_ALIGNED(fi.length, sectorsize))
485			btrfs_info(fs_info,
486		"incomplete page write with offset %zu and length %zu",
487				   fi.offset, fi.length);
488
489		btrfs_finish_ordered_extent(bbio->ordered,
490				folio_page(folio, 0), start, len, !error);
491		if (error)
492			mapping_set_error(folio->mapping, error);
493		btrfs_folio_clear_writeback(fs_info, folio, start, len);
494	}
495
496	bio_put(bio);
497}
498
499/*
500 * Record previously processed extent range
501 *
502 * For endio_readpage_release_extent() to handle a full extent range, reducing
503 * the extent io operations.
504 */
505struct processed_extent {
506	struct btrfs_inode *inode;
507	/* Start of the range in @inode */
508	u64 start;
509	/* End of the range in @inode */
510	u64 end;
511	bool uptodate;
512};
513
514/*
515 * Try to release processed extent range
516 *
517 * May not release the extent range right now if the current range is
518 * contiguous to processed extent.
519 *
520 * Will release processed extent when any of @inode, @uptodate, the range is
521 * no longer contiguous to the processed range.
522 *
523 * Passing @inode == NULL will force processed extent to be released.
524 */
525static void endio_readpage_release_extent(struct processed_extent *processed,
526			      struct btrfs_inode *inode, u64 start, u64 end,
527			      bool uptodate)
528{
529	struct extent_state *cached = NULL;
530	struct extent_io_tree *tree;
531
532	/* The first extent, initialize @processed */
533	if (!processed->inode)
534		goto update;
535
536	/*
537	 * Contiguous to processed extent, just uptodate the end.
538	 *
539	 * Several things to notice:
540	 *
541	 * - bio can be merged as long as on-disk bytenr is contiguous
542	 *   This means we can have page belonging to other inodes, thus need to
543	 *   check if the inode still matches.
544	 * - bvec can contain range beyond current page for multi-page bvec
545	 *   Thus we need to do processed->end + 1 >= start check
546	 */
547	if (processed->inode == inode && processed->uptodate == uptodate &&
548	    processed->end + 1 >= start && end >= processed->end) {
549		processed->end = end;
550		return;
551	}
552
553	tree = &processed->inode->io_tree;
554	/*
555	 * Now we don't have range contiguous to the processed range, release
556	 * the processed range now.
557	 */
558	unlock_extent(tree, processed->start, processed->end, &cached);
559
560update:
561	/* Update processed to current range */
562	processed->inode = inode;
563	processed->start = start;
564	processed->end = end;
565	processed->uptodate = uptodate;
566}
567
568static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
569{
570	struct folio *folio = page_folio(page);
571
572	ASSERT(folio_test_locked(folio));
573	if (!btrfs_is_subpage(fs_info, folio->mapping))
574		return;
575
576	ASSERT(folio_test_private(folio));
577	btrfs_subpage_start_reader(fs_info, folio, page_offset(page), PAGE_SIZE);
578}
579
580/*
581 * After a data read IO is done, we need to:
582 *
583 * - clear the uptodate bits on error
584 * - set the uptodate bits if things worked
585 * - set the folio up to date if all extents in the tree are uptodate
586 * - clear the lock bit in the extent tree
587 * - unlock the folio if there are no other extents locked for it
588 *
589 * Scheduling is not allowed, so the extent state tree is expected
590 * to have one and only one object corresponding to this IO.
591 */
592static void end_bbio_data_read(struct btrfs_bio *bbio)
593{
594	struct btrfs_fs_info *fs_info = bbio->fs_info;
595	struct bio *bio = &bbio->bio;
596	struct processed_extent processed = { 0 };
597	struct folio_iter fi;
598	const u32 sectorsize = fs_info->sectorsize;
599
600	ASSERT(!bio_flagged(bio, BIO_CLONED));
601	bio_for_each_folio_all(fi, &bbio->bio) {
602		bool uptodate = !bio->bi_status;
603		struct folio *folio = fi.folio;
604		struct inode *inode = folio->mapping->host;
605		u64 start;
606		u64 end;
607		u32 len;
608
609		/* For now only order 0 folios are supported for data. */
610		ASSERT(folio_order(folio) == 0);
611		btrfs_debug(fs_info,
612			"%s: bi_sector=%llu, err=%d, mirror=%u",
613			__func__, bio->bi_iter.bi_sector, bio->bi_status,
614			bbio->mirror_num);
615
616		/*
617		 * We always issue full-sector reads, but if some block in a
618		 * folio fails to read, blk_update_request() will advance
619		 * bv_offset and adjust bv_len to compensate.  Print a warning
620		 * for unaligned offsets, and an error if they don't add up to
621		 * a full sector.
622		 */
623		if (!IS_ALIGNED(fi.offset, sectorsize))
624			btrfs_err(fs_info,
625		"partial page read in btrfs with offset %zu and length %zu",
626				  fi.offset, fi.length);
627		else if (!IS_ALIGNED(fi.offset + fi.length, sectorsize))
628			btrfs_info(fs_info,
629		"incomplete page read with offset %zu and length %zu",
630				   fi.offset, fi.length);
631
632		start = folio_pos(folio) + fi.offset;
633		end = start + fi.length - 1;
634		len = fi.length;
635
636		if (likely(uptodate)) {
637			loff_t i_size = i_size_read(inode);
638			pgoff_t end_index = i_size >> folio_shift(folio);
639
640			/*
641			 * Zero out the remaining part if this range straddles
642			 * i_size.
643			 *
644			 * Here we should only zero the range inside the folio,
645			 * not touch anything else.
646			 *
647			 * NOTE: i_size is exclusive while end is inclusive.
648			 */
649			if (folio_index(folio) == end_index && i_size <= end) {
650				u32 zero_start = max(offset_in_folio(folio, i_size),
651						     offset_in_folio(folio, start));
652				u32 zero_len = offset_in_folio(folio, end) + 1 -
653					       zero_start;
654
655				folio_zero_range(folio, zero_start, zero_len);
656			}
657		}
658
659		/* Update page status and unlock. */
660		end_page_read(folio_page(folio, 0), uptodate, start, len);
661		endio_readpage_release_extent(&processed, BTRFS_I(inode),
662					      start, end, uptodate);
663	}
664	/* Release the last extent */
665	endio_readpage_release_extent(&processed, NULL, 0, 0, false);
666	bio_put(bio);
667}
668
669/*
670 * Populate every free slot in a provided array with folios.
671 *
672 * @nr_folios:   number of folios to allocate
673 * @folio_array: the array to fill with folios; any existing non-NULL entries in
674 *		 the array will be skipped
675 * @extra_gfp:	 the extra GFP flags for the allocation
676 *
677 * Return: 0        if all folios were able to be allocated;
678 *         -ENOMEM  otherwise, the partially allocated folios would be freed and
679 *                  the array slots zeroed
680 */
681int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array,
682			    gfp_t extra_gfp)
683{
684	for (int i = 0; i < nr_folios; i++) {
685		if (folio_array[i])
686			continue;
687		folio_array[i] = folio_alloc(GFP_NOFS | extra_gfp, 0);
688		if (!folio_array[i])
689			goto error;
690	}
691	return 0;
692error:
693	for (int i = 0; i < nr_folios; i++) {
694		if (folio_array[i])
695			folio_put(folio_array[i]);
696	}
697	return -ENOMEM;
698}
699
700/*
701 * Populate every free slot in a provided array with pages.
702 *
703 * @nr_pages:   number of pages to allocate
704 * @page_array: the array to fill with pages; any existing non-null entries in
705 * 		the array will be skipped
706 * @extra_gfp:	the extra GFP flags for the allocation.
707 *
708 * Return: 0        if all pages were able to be allocated;
709 *         -ENOMEM  otherwise, the partially allocated pages would be freed and
710 *                  the array slots zeroed
711 */
712int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
713			   gfp_t extra_gfp)
714{
715	const gfp_t gfp = GFP_NOFS | extra_gfp;
716	unsigned int allocated;
717
718	for (allocated = 0; allocated < nr_pages;) {
719		unsigned int last = allocated;
720
721		allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
722		if (unlikely(allocated == last)) {
723			/* No progress, fail and do cleanup. */
724			for (int i = 0; i < allocated; i++) {
725				__free_page(page_array[i]);
726				page_array[i] = NULL;
727			}
728			return -ENOMEM;
729		}
730	}
731	return 0;
732}
733
734/*
735 * Populate needed folios for the extent buffer.
736 *
737 * For now, the folios populated are always in order 0 (aka, single page).
738 */
739static int alloc_eb_folio_array(struct extent_buffer *eb, gfp_t extra_gfp)
740{
741	struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
742	int num_pages = num_extent_pages(eb);
743	int ret;
744
745	ret = btrfs_alloc_page_array(num_pages, page_array, extra_gfp);
746	if (ret < 0)
747		return ret;
748
749	for (int i = 0; i < num_pages; i++)
750		eb->folios[i] = page_folio(page_array[i]);
751	eb->folio_size = PAGE_SIZE;
752	eb->folio_shift = PAGE_SHIFT;
753	return 0;
754}
755
756static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
757				struct page *page, u64 disk_bytenr,
758				unsigned int pg_offset)
759{
760	struct bio *bio = &bio_ctrl->bbio->bio;
761	struct bio_vec *bvec = bio_last_bvec_all(bio);
762	const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
763
764	if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
765		/*
766		 * For compression, all IO should have its logical bytenr set
767		 * to the starting bytenr of the compressed extent.
768		 */
769		return bio->bi_iter.bi_sector == sector;
770	}
771
772	/*
773	 * The contig check requires the following conditions to be met:
774	 *
775	 * 1) The pages are belonging to the same inode
776	 *    This is implied by the call chain.
777	 *
778	 * 2) The range has adjacent logical bytenr
779	 *
780	 * 3) The range has adjacent file offset
781	 *    This is required for the usage of btrfs_bio->file_offset.
782	 */
783	return bio_end_sector(bio) == sector &&
784		page_offset(bvec->bv_page) + bvec->bv_offset + bvec->bv_len ==
785		page_offset(page) + pg_offset;
786}
787
788static void alloc_new_bio(struct btrfs_inode *inode,
789			  struct btrfs_bio_ctrl *bio_ctrl,
790			  u64 disk_bytenr, u64 file_offset)
791{
792	struct btrfs_fs_info *fs_info = inode->root->fs_info;
793	struct btrfs_bio *bbio;
794
795	bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
796			       bio_ctrl->end_io_func, NULL);
797	bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
798	bbio->inode = inode;
799	bbio->file_offset = file_offset;
800	bio_ctrl->bbio = bbio;
801	bio_ctrl->len_to_oe_boundary = U32_MAX;
802
803	/* Limit data write bios to the ordered boundary. */
804	if (bio_ctrl->wbc) {
805		struct btrfs_ordered_extent *ordered;
806
807		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
808		if (ordered) {
809			bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
810					ordered->file_offset +
811					ordered->disk_num_bytes - file_offset);
812			bbio->ordered = ordered;
813		}
814
815		/*
816		 * Pick the last added device to support cgroup writeback.  For
817		 * multi-device file systems this means blk-cgroup policies have
818		 * to always be set on the last added/replaced device.
819		 * This is a bit odd but has been like that for a long time.
820		 */
821		bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
822		wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
823	}
824}
825
826/*
827 * @disk_bytenr: logical bytenr where the write will be
828 * @page:	page to add to the bio
829 * @size:	portion of page that we want to write to
830 * @pg_offset:	offset of the new bio or to check whether we are adding
831 *              a contiguous page to the previous one
832 *
833 * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
834 * new one in @bio_ctrl->bbio.
835 * The mirror number for this IO should already be initizlied in
836 * @bio_ctrl->mirror_num.
837 */
838static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
839			       u64 disk_bytenr, struct page *page,
840			       size_t size, unsigned long pg_offset)
841{
842	struct btrfs_inode *inode = page_to_inode(page);
843
844	ASSERT(pg_offset + size <= PAGE_SIZE);
845	ASSERT(bio_ctrl->end_io_func);
846
847	if (bio_ctrl->bbio &&
848	    !btrfs_bio_is_contig(bio_ctrl, page, disk_bytenr, pg_offset))
849		submit_one_bio(bio_ctrl);
850
851	do {
852		u32 len = size;
853
854		/* Allocate new bio if needed */
855		if (!bio_ctrl->bbio) {
856			alloc_new_bio(inode, bio_ctrl, disk_bytenr,
857				      page_offset(page) + pg_offset);
858		}
859
860		/* Cap to the current ordered extent boundary if there is one. */
861		if (len > bio_ctrl->len_to_oe_boundary) {
862			ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
863			ASSERT(is_data_inode(&inode->vfs_inode));
864			len = bio_ctrl->len_to_oe_boundary;
865		}
866
867		if (bio_add_page(&bio_ctrl->bbio->bio, page, len, pg_offset) != len) {
868			/* bio full: move on to a new one */
869			submit_one_bio(bio_ctrl);
870			continue;
871		}
872
873		if (bio_ctrl->wbc)
874			wbc_account_cgroup_owner(bio_ctrl->wbc, page, len);
875
876		size -= len;
877		pg_offset += len;
878		disk_bytenr += len;
879
880		/*
881		 * len_to_oe_boundary defaults to U32_MAX, which isn't page or
882		 * sector aligned.  alloc_new_bio() then sets it to the end of
883		 * our ordered extent for writes into zoned devices.
884		 *
885		 * When len_to_oe_boundary is tracking an ordered extent, we
886		 * trust the ordered extent code to align things properly, and
887		 * the check above to cap our write to the ordered extent
888		 * boundary is correct.
889		 *
890		 * When len_to_oe_boundary is U32_MAX, the cap above would
891		 * result in a 4095 byte IO for the last page right before
892		 * we hit the bio limit of UINT_MAX.  bio_add_page() has all
893		 * the checks required to make sure we don't overflow the bio,
894		 * and we should just ignore len_to_oe_boundary completely
895		 * unless we're using it to track an ordered extent.
896		 *
897		 * It's pretty hard to make a bio sized U32_MAX, but it can
898		 * happen when the page cache is able to feed us contiguous
899		 * pages for large extents.
900		 */
901		if (bio_ctrl->len_to_oe_boundary != U32_MAX)
902			bio_ctrl->len_to_oe_boundary -= len;
903
904		/* Ordered extent boundary: move on to a new bio. */
905		if (bio_ctrl->len_to_oe_boundary == 0)
906			submit_one_bio(bio_ctrl);
907	} while (size);
908}
909
910static int attach_extent_buffer_folio(struct extent_buffer *eb,
911				      struct folio *folio,
912				      struct btrfs_subpage *prealloc)
913{
914	struct btrfs_fs_info *fs_info = eb->fs_info;
915	int ret = 0;
916
917	/*
918	 * If the page is mapped to btree inode, we should hold the private
919	 * lock to prevent race.
920	 * For cloned or dummy extent buffers, their pages are not mapped and
921	 * will not race with any other ebs.
922	 */
923	if (folio->mapping)
924		lockdep_assert_held(&folio->mapping->i_private_lock);
925
926	if (fs_info->nodesize >= PAGE_SIZE) {
927		if (!folio_test_private(folio))
928			folio_attach_private(folio, eb);
929		else
930			WARN_ON(folio_get_private(folio) != eb);
931		return 0;
932	}
933
934	/* Already mapped, just free prealloc */
935	if (folio_test_private(folio)) {
936		btrfs_free_subpage(prealloc);
937		return 0;
938	}
939
940	if (prealloc)
941		/* Has preallocated memory for subpage */
942		folio_attach_private(folio, prealloc);
943	else
944		/* Do new allocation to attach subpage */
945		ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA);
946	return ret;
947}
948
949int set_page_extent_mapped(struct page *page)
950{
951	return set_folio_extent_mapped(page_folio(page));
952}
953
954int set_folio_extent_mapped(struct folio *folio)
955{
956	struct btrfs_fs_info *fs_info;
957
958	ASSERT(folio->mapping);
959
960	if (folio_test_private(folio))
961		return 0;
962
963	fs_info = folio_to_fs_info(folio);
964
965	if (btrfs_is_subpage(fs_info, folio->mapping))
966		return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
967
968	folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
969	return 0;
970}
971
972void clear_page_extent_mapped(struct page *page)
973{
974	struct folio *folio = page_folio(page);
975	struct btrfs_fs_info *fs_info;
976
977	ASSERT(page->mapping);
978
979	if (!folio_test_private(folio))
980		return;
981
982	fs_info = page_to_fs_info(page);
983	if (btrfs_is_subpage(fs_info, page->mapping))
984		return btrfs_detach_subpage(fs_info, folio);
985
986	folio_detach_private(folio);
987}
988
989static struct extent_map *__get_extent_map(struct inode *inode, struct page *page,
990		 u64 start, u64 len, struct extent_map **em_cached)
991{
992	struct extent_map *em;
993
994	ASSERT(em_cached);
995
996	if (*em_cached) {
997		em = *em_cached;
998		if (extent_map_in_tree(em) && start >= em->start &&
999		    start < extent_map_end(em)) {
1000			refcount_inc(&em->refs);
1001			return em;
1002		}
1003
1004		free_extent_map(em);
1005		*em_cached = NULL;
1006	}
1007
1008	em = btrfs_get_extent(BTRFS_I(inode), page, start, len);
1009	if (!IS_ERR(em)) {
1010		BUG_ON(*em_cached);
1011		refcount_inc(&em->refs);
1012		*em_cached = em;
1013	}
1014	return em;
1015}
1016/*
1017 * basic readpage implementation.  Locked extent state structs are inserted
1018 * into the tree that are removed when the IO is done (by the end_io
1019 * handlers)
1020 * XXX JDM: This needs looking at to ensure proper page locking
1021 * return 0 on success, otherwise return error
1022 */
1023static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
1024		      struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
1025{
1026	struct inode *inode = page->mapping->host;
1027	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
1028	u64 start = page_offset(page);
1029	const u64 end = start + PAGE_SIZE - 1;
1030	u64 cur = start;
1031	u64 extent_offset;
1032	u64 last_byte = i_size_read(inode);
1033	u64 block_start;
1034	struct extent_map *em;
1035	int ret = 0;
1036	size_t pg_offset = 0;
1037	size_t iosize;
1038	size_t blocksize = fs_info->sectorsize;
1039	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1040
1041	ret = set_page_extent_mapped(page);
1042	if (ret < 0) {
1043		unlock_extent(tree, start, end, NULL);
1044		unlock_page(page);
1045		return ret;
1046	}
1047
1048	if (page->index == last_byte >> PAGE_SHIFT) {
1049		size_t zero_offset = offset_in_page(last_byte);
1050
1051		if (zero_offset) {
1052			iosize = PAGE_SIZE - zero_offset;
1053			memzero_page(page, zero_offset, iosize);
1054		}
1055	}
1056	bio_ctrl->end_io_func = end_bbio_data_read;
1057	begin_page_read(fs_info, page);
1058	while (cur <= end) {
1059		enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
1060		bool force_bio_submit = false;
1061		u64 disk_bytenr;
1062
1063		ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
1064		if (cur >= last_byte) {
1065			iosize = PAGE_SIZE - pg_offset;
1066			memzero_page(page, pg_offset, iosize);
1067			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1068			end_page_read(page, true, cur, iosize);
1069			break;
1070		}
1071		em = __get_extent_map(inode, page, cur, end - cur + 1, em_cached);
1072		if (IS_ERR(em)) {
1073			unlock_extent(tree, cur, end, NULL);
1074			end_page_read(page, false, cur, end + 1 - cur);
1075			return PTR_ERR(em);
1076		}
1077		extent_offset = cur - em->start;
1078		BUG_ON(extent_map_end(em) <= cur);
1079		BUG_ON(end < cur);
1080
1081		compress_type = extent_map_compression(em);
1082
1083		iosize = min(extent_map_end(em) - cur, end - cur + 1);
1084		iosize = ALIGN(iosize, blocksize);
1085		if (compress_type != BTRFS_COMPRESS_NONE)
1086			disk_bytenr = em->block_start;
1087		else
1088			disk_bytenr = em->block_start + extent_offset;
1089		block_start = em->block_start;
1090		if (em->flags & EXTENT_FLAG_PREALLOC)
1091			block_start = EXTENT_MAP_HOLE;
1092
1093		/*
1094		 * If we have a file range that points to a compressed extent
1095		 * and it's followed by a consecutive file range that points
1096		 * to the same compressed extent (possibly with a different
1097		 * offset and/or length, so it either points to the whole extent
1098		 * or only part of it), we must make sure we do not submit a
1099		 * single bio to populate the pages for the 2 ranges because
1100		 * this makes the compressed extent read zero out the pages
1101		 * belonging to the 2nd range. Imagine the following scenario:
1102		 *
1103		 *  File layout
1104		 *  [0 - 8K]                     [8K - 24K]
1105		 *    |                               |
1106		 *    |                               |
1107		 * points to extent X,         points to extent X,
1108		 * offset 4K, length of 8K     offset 0, length 16K
1109		 *
1110		 * [extent X, compressed length = 4K uncompressed length = 16K]
1111		 *
1112		 * If the bio to read the compressed extent covers both ranges,
1113		 * it will decompress extent X into the pages belonging to the
1114		 * first range and then it will stop, zeroing out the remaining
1115		 * pages that belong to the other range that points to extent X.
1116		 * So here we make sure we submit 2 bios, one for the first
1117		 * range and another one for the third range. Both will target
1118		 * the same physical extent from disk, but we can't currently
1119		 * make the compressed bio endio callback populate the pages
1120		 * for both ranges because each compressed bio is tightly
1121		 * coupled with a single extent map, and each range can have
1122		 * an extent map with a different offset value relative to the
1123		 * uncompressed data of our extent and different lengths. This
1124		 * is a corner case so we prioritize correctness over
1125		 * non-optimal behavior (submitting 2 bios for the same extent).
1126		 */
1127		if (compress_type != BTRFS_COMPRESS_NONE &&
1128		    prev_em_start && *prev_em_start != (u64)-1 &&
1129		    *prev_em_start != em->start)
1130			force_bio_submit = true;
1131
1132		if (prev_em_start)
1133			*prev_em_start = em->start;
1134
1135		free_extent_map(em);
1136		em = NULL;
1137
1138		/* we've found a hole, just zero and go on */
1139		if (block_start == EXTENT_MAP_HOLE) {
1140			memzero_page(page, pg_offset, iosize);
1141
1142			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1143			end_page_read(page, true, cur, iosize);
1144			cur = cur + iosize;
1145			pg_offset += iosize;
1146			continue;
1147		}
1148		/* the get_extent function already copied into the page */
1149		if (block_start == EXTENT_MAP_INLINE) {
1150			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1151			end_page_read(page, true, cur, iosize);
1152			cur = cur + iosize;
1153			pg_offset += iosize;
1154			continue;
1155		}
1156
1157		if (bio_ctrl->compress_type != compress_type) {
1158			submit_one_bio(bio_ctrl);
1159			bio_ctrl->compress_type = compress_type;
1160		}
1161
1162		if (force_bio_submit)
1163			submit_one_bio(bio_ctrl);
1164		submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1165				   pg_offset);
1166		cur = cur + iosize;
1167		pg_offset += iosize;
1168	}
1169
1170	return 0;
1171}
1172
1173int btrfs_read_folio(struct file *file, struct folio *folio)
1174{
1175	struct page *page = &folio->page;
1176	struct btrfs_inode *inode = page_to_inode(page);
1177	u64 start = page_offset(page);
1178	u64 end = start + PAGE_SIZE - 1;
1179	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1180	struct extent_map *em_cached = NULL;
1181	int ret;
1182
1183	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1184
1185	ret = btrfs_do_readpage(page, &em_cached, &bio_ctrl, NULL);
1186	free_extent_map(em_cached);
1187
1188	/*
1189	 * If btrfs_do_readpage() failed we will want to submit the assembled
1190	 * bio to do the cleanup.
1191	 */
1192	submit_one_bio(&bio_ctrl);
1193	return ret;
1194}
1195
1196static inline void contiguous_readpages(struct page *pages[], int nr_pages,
1197					u64 start, u64 end,
1198					struct extent_map **em_cached,
1199					struct btrfs_bio_ctrl *bio_ctrl,
1200					u64 *prev_em_start)
1201{
1202	struct btrfs_inode *inode = page_to_inode(pages[0]);
1203	int index;
1204
1205	ASSERT(em_cached);
1206
1207	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1208
1209	for (index = 0; index < nr_pages; index++) {
1210		btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
1211				  prev_em_start);
1212		put_page(pages[index]);
1213	}
1214}
1215
1216/*
1217 * helper for __extent_writepage, doing all of the delayed allocation setup.
1218 *
1219 * This returns 1 if btrfs_run_delalloc_range function did all the work required
1220 * to write the page (copy into inline extent).  In this case the IO has
1221 * been started and the page is already unlocked.
1222 *
1223 * This returns 0 if all went well (page still locked)
1224 * This returns < 0 if there were errors (page still locked)
1225 */
1226static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1227		struct page *page, struct writeback_control *wbc)
1228{
1229	const u64 page_start = page_offset(page);
1230	const u64 page_end = page_start + PAGE_SIZE - 1;
1231	u64 delalloc_start = page_start;
1232	u64 delalloc_end = page_end;
1233	u64 delalloc_to_write = 0;
1234	int ret = 0;
1235
1236	while (delalloc_start < page_end) {
1237		delalloc_end = page_end;
1238		if (!find_lock_delalloc_range(&inode->vfs_inode, page,
1239					      &delalloc_start, &delalloc_end)) {
1240			delalloc_start = delalloc_end + 1;
1241			continue;
1242		}
1243
1244		ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
1245					       delalloc_end, wbc);
1246		if (ret < 0)
1247			return ret;
1248
1249		delalloc_start = delalloc_end + 1;
1250	}
1251
1252	/*
1253	 * delalloc_end is already one less than the total length, so
1254	 * we don't subtract one from PAGE_SIZE
1255	 */
1256	delalloc_to_write +=
1257		DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1258
1259	/*
1260	 * If btrfs_run_dealloc_range() already started I/O and unlocked
1261	 * the pages, we just need to account for them here.
1262	 */
1263	if (ret == 1) {
1264		wbc->nr_to_write -= delalloc_to_write;
1265		return 1;
1266	}
1267
1268	if (wbc->nr_to_write < delalloc_to_write) {
1269		int thresh = 8192;
1270
1271		if (delalloc_to_write < thresh * 2)
1272			thresh = delalloc_to_write;
1273		wbc->nr_to_write = min_t(u64, delalloc_to_write,
1274					 thresh);
1275	}
1276
1277	return 0;
1278}
1279
1280/*
1281 * Find the first byte we need to write.
1282 *
1283 * For subpage, one page can contain several sectors, and
1284 * __extent_writepage_io() will just grab all extent maps in the page
1285 * range and try to submit all non-inline/non-compressed extents.
1286 *
1287 * This is a big problem for subpage, we shouldn't re-submit already written
1288 * data at all.
1289 * This function will lookup subpage dirty bit to find which range we really
1290 * need to submit.
1291 *
1292 * Return the next dirty range in [@start, @end).
1293 * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
1294 */
1295static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
1296				 struct page *page, u64 *start, u64 *end)
1297{
1298	struct folio *folio = page_folio(page);
1299	struct btrfs_subpage *subpage = folio_get_private(folio);
1300	struct btrfs_subpage_info *spi = fs_info->subpage_info;
1301	u64 orig_start = *start;
1302	/* Declare as unsigned long so we can use bitmap ops */
1303	unsigned long flags;
1304	int range_start_bit;
1305	int range_end_bit;
1306
1307	/*
1308	 * For regular sector size == page size case, since one page only
1309	 * contains one sector, we return the page offset directly.
1310	 */
1311	if (!btrfs_is_subpage(fs_info, page->mapping)) {
1312		*start = page_offset(page);
1313		*end = page_offset(page) + PAGE_SIZE;
1314		return;
1315	}
1316
1317	range_start_bit = spi->dirty_offset +
1318			  (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
1319
1320	/* We should have the page locked, but just in case */
1321	spin_lock_irqsave(&subpage->lock, flags);
1322	bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
1323			       spi->dirty_offset + spi->bitmap_nr_bits);
1324	spin_unlock_irqrestore(&subpage->lock, flags);
1325
1326	range_start_bit -= spi->dirty_offset;
1327	range_end_bit -= spi->dirty_offset;
1328
1329	*start = page_offset(page) + range_start_bit * fs_info->sectorsize;
1330	*end = page_offset(page) + range_end_bit * fs_info->sectorsize;
1331}
1332
1333/*
1334 * helper for __extent_writepage.  This calls the writepage start hooks,
1335 * and does the loop to map the page into extents and bios.
1336 *
1337 * We return 1 if the IO is started and the page is unlocked,
1338 * 0 if all went well (page still locked)
1339 * < 0 if there were errors (page still locked)
1340 */
1341static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
1342				 struct page *page,
1343				 struct btrfs_bio_ctrl *bio_ctrl,
1344				 loff_t i_size,
1345				 int *nr_ret)
1346{
1347	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1348	u64 cur = page_offset(page);
1349	u64 end = cur + PAGE_SIZE - 1;
1350	u64 extent_offset;
1351	u64 block_start;
1352	struct extent_map *em;
1353	int ret = 0;
1354	int nr = 0;
1355
1356	ret = btrfs_writepage_cow_fixup(page);
1357	if (ret) {
1358		/* Fixup worker will requeue */
1359		redirty_page_for_writepage(bio_ctrl->wbc, page);
1360		unlock_page(page);
1361		return 1;
1362	}
1363
1364	bio_ctrl->end_io_func = end_bbio_data_write;
1365	while (cur <= end) {
1366		u32 len = end - cur + 1;
1367		u64 disk_bytenr;
1368		u64 em_end;
1369		u64 dirty_range_start = cur;
1370		u64 dirty_range_end;
1371		u32 iosize;
1372
1373		if (cur >= i_size) {
1374			btrfs_mark_ordered_io_finished(inode, page, cur, len,
1375						       true);
1376			/*
1377			 * This range is beyond i_size, thus we don't need to
1378			 * bother writing back.
1379			 * But we still need to clear the dirty subpage bit, or
1380			 * the next time the page gets dirtied, we will try to
1381			 * writeback the sectors with subpage dirty bits,
1382			 * causing writeback without ordered extent.
1383			 */
1384			btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, len);
1385			break;
1386		}
1387
1388		find_next_dirty_byte(fs_info, page, &dirty_range_start,
1389				     &dirty_range_end);
1390		if (cur < dirty_range_start) {
1391			cur = dirty_range_start;
1392			continue;
1393		}
1394
1395		em = btrfs_get_extent(inode, NULL, cur, len);
1396		if (IS_ERR(em)) {
1397			ret = PTR_ERR_OR_ZERO(em);
1398			goto out_error;
1399		}
1400
1401		extent_offset = cur - em->start;
1402		em_end = extent_map_end(em);
1403		ASSERT(cur <= em_end);
1404		ASSERT(cur < end);
1405		ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
1406		ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
1407
1408		block_start = em->block_start;
1409		disk_bytenr = em->block_start + extent_offset;
1410
1411		ASSERT(!extent_map_is_compressed(em));
1412		ASSERT(block_start != EXTENT_MAP_HOLE);
1413		ASSERT(block_start != EXTENT_MAP_INLINE);
1414
1415		/*
1416		 * Note that em_end from extent_map_end() and dirty_range_end from
1417		 * find_next_dirty_byte() are all exclusive
1418		 */
1419		iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
1420		free_extent_map(em);
1421		em = NULL;
1422
1423		btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
1424		if (!PageWriteback(page)) {
1425			btrfs_err(inode->root->fs_info,
1426				   "page %lu not writeback, cur %llu end %llu",
1427			       page->index, cur, end);
1428		}
1429
1430		/*
1431		 * Although the PageDirty bit is cleared before entering this
1432		 * function, subpage dirty bit is not cleared.
1433		 * So clear subpage dirty bit here so next time we won't submit
1434		 * page for range already written to disk.
1435		 */
1436		btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, iosize);
1437
1438		submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1439				   cur - page_offset(page));
1440		cur += iosize;
1441		nr++;
1442	}
1443
1444	btrfs_folio_assert_not_dirty(fs_info, page_folio(page));
1445	*nr_ret = nr;
1446	return 0;
1447
1448out_error:
1449	/*
1450	 * If we finish without problem, we should not only clear page dirty,
1451	 * but also empty subpage dirty bits
1452	 */
1453	*nr_ret = nr;
1454	return ret;
1455}
1456
1457/*
1458 * the writepage semantics are similar to regular writepage.  extent
1459 * records are inserted to lock ranges in the tree, and as dirty areas
1460 * are found, they are marked writeback.  Then the lock bits are removed
1461 * and the end_io handler clears the writeback ranges
1462 *
1463 * Return 0 if everything goes well.
1464 * Return <0 for error.
1465 */
1466static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl)
1467{
1468	struct folio *folio = page_folio(page);
1469	struct inode *inode = page->mapping->host;
1470	const u64 page_start = page_offset(page);
1471	int ret;
1472	int nr = 0;
1473	size_t pg_offset;
1474	loff_t i_size = i_size_read(inode);
1475	unsigned long end_index = i_size >> PAGE_SHIFT;
1476
1477	trace___extent_writepage(page, inode, bio_ctrl->wbc);
1478
1479	WARN_ON(!PageLocked(page));
1480
1481	pg_offset = offset_in_page(i_size);
1482	if (page->index > end_index ||
1483	   (page->index == end_index && !pg_offset)) {
1484		folio_invalidate(folio, 0, folio_size(folio));
1485		folio_unlock(folio);
1486		return 0;
1487	}
1488
1489	if (page->index == end_index)
1490		memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
1491
1492	ret = set_page_extent_mapped(page);
1493	if (ret < 0)
1494		goto done;
1495
1496	ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc);
1497	if (ret == 1)
1498		return 0;
1499	if (ret)
1500		goto done;
1501
1502	ret = __extent_writepage_io(BTRFS_I(inode), page, bio_ctrl, i_size, &nr);
1503	if (ret == 1)
1504		return 0;
1505
1506	bio_ctrl->wbc->nr_to_write--;
1507
1508done:
1509	if (nr == 0) {
1510		/* make sure the mapping tag for page dirty gets cleared */
1511		set_page_writeback(page);
1512		end_page_writeback(page);
1513	}
1514	if (ret) {
1515		btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, page_start,
1516					       PAGE_SIZE, !ret);
1517		mapping_set_error(page->mapping, ret);
1518	}
1519	unlock_page(page);
1520	ASSERT(ret <= 0);
1521	return ret;
1522}
1523
1524void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
1525{
1526	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
1527		       TASK_UNINTERRUPTIBLE);
1528}
1529
1530/*
1531 * Lock extent buffer status and pages for writeback.
1532 *
1533 * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1534 * extent buffer is not dirty)
1535 * Return %true is the extent buffer is submitted to bio.
1536 */
1537static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1538			  struct writeback_control *wbc)
1539{
1540	struct btrfs_fs_info *fs_info = eb->fs_info;
1541	bool ret = false;
1542
1543	btrfs_tree_lock(eb);
1544	while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1545		btrfs_tree_unlock(eb);
1546		if (wbc->sync_mode != WB_SYNC_ALL)
1547			return false;
1548		wait_on_extent_buffer_writeback(eb);
1549		btrfs_tree_lock(eb);
1550	}
1551
1552	/*
1553	 * We need to do this to prevent races in people who check if the eb is
1554	 * under IO since we can end up having no IO bits set for a short period
1555	 * of time.
1556	 */
1557	spin_lock(&eb->refs_lock);
1558	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1559		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1560		spin_unlock(&eb->refs_lock);
1561		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1562		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1563					 -eb->len,
1564					 fs_info->dirty_metadata_batch);
1565		ret = true;
1566	} else {
1567		spin_unlock(&eb->refs_lock);
1568	}
1569	btrfs_tree_unlock(eb);
1570	return ret;
1571}
1572
1573static void set_btree_ioerr(struct extent_buffer *eb)
1574{
1575	struct btrfs_fs_info *fs_info = eb->fs_info;
1576
1577	set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1578
1579	/*
1580	 * A read may stumble upon this buffer later, make sure that it gets an
1581	 * error and knows there was an error.
1582	 */
1583	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1584
1585	/*
1586	 * We need to set the mapping with the io error as well because a write
1587	 * error will flip the file system readonly, and then syncfs() will
1588	 * return a 0 because we are readonly if we don't modify the err seq for
1589	 * the superblock.
1590	 */
1591	mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1592
1593	/*
1594	 * If writeback for a btree extent that doesn't belong to a log tree
1595	 * failed, increment the counter transaction->eb_write_errors.
1596	 * We do this because while the transaction is running and before it's
1597	 * committing (when we call filemap_fdata[write|wait]_range against
1598	 * the btree inode), we might have
1599	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1600	 * returns an error or an error happens during writeback, when we're
1601	 * committing the transaction we wouldn't know about it, since the pages
1602	 * can be no longer dirty nor marked anymore for writeback (if a
1603	 * subsequent modification to the extent buffer didn't happen before the
1604	 * transaction commit), which makes filemap_fdata[write|wait]_range not
1605	 * able to find the pages which contain errors at transaction
1606	 * commit time. So if this happens we must abort the transaction,
1607	 * otherwise we commit a super block with btree roots that point to
1608	 * btree nodes/leafs whose content on disk is invalid - either garbage
1609	 * or the content of some node/leaf from a past generation that got
1610	 * cowed or deleted and is no longer valid.
1611	 *
1612	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1613	 * not be enough - we need to distinguish between log tree extents vs
1614	 * non-log tree extents, and the next filemap_fdatawait_range() call
1615	 * will catch and clear such errors in the mapping - and that call might
1616	 * be from a log sync and not from a transaction commit. Also, checking
1617	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1618	 * not done and would not be reliable - the eb might have been released
1619	 * from memory and reading it back again means that flag would not be
1620	 * set (since it's a runtime flag, not persisted on disk).
1621	 *
1622	 * Using the flags below in the btree inode also makes us achieve the
1623	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1624	 * writeback for all dirty pages and before filemap_fdatawait_range()
1625	 * is called, the writeback for all dirty pages had already finished
1626	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1627	 * filemap_fdatawait_range() would return success, as it could not know
1628	 * that writeback errors happened (the pages were no longer tagged for
1629	 * writeback).
1630	 */
1631	switch (eb->log_index) {
1632	case -1:
1633		set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1634		break;
1635	case 0:
1636		set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1637		break;
1638	case 1:
1639		set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1640		break;
1641	default:
1642		BUG(); /* unexpected, logic error */
1643	}
1644}
1645
1646/*
1647 * The endio specific version which won't touch any unsafe spinlock in endio
1648 * context.
1649 */
1650static struct extent_buffer *find_extent_buffer_nolock(
1651		struct btrfs_fs_info *fs_info, u64 start)
1652{
1653	struct extent_buffer *eb;
1654
1655	rcu_read_lock();
1656	eb = radix_tree_lookup(&fs_info->buffer_radix,
1657			       start >> fs_info->sectorsize_bits);
1658	if (eb && atomic_inc_not_zero(&eb->refs)) {
1659		rcu_read_unlock();
1660		return eb;
1661	}
1662	rcu_read_unlock();
1663	return NULL;
1664}
1665
1666static void end_bbio_meta_write(struct btrfs_bio *bbio)
1667{
1668	struct extent_buffer *eb = bbio->private;
1669	struct btrfs_fs_info *fs_info = eb->fs_info;
1670	bool uptodate = !bbio->bio.bi_status;
1671	struct folio_iter fi;
1672	u32 bio_offset = 0;
1673
1674	if (!uptodate)
1675		set_btree_ioerr(eb);
1676
1677	bio_for_each_folio_all(fi, &bbio->bio) {
1678		u64 start = eb->start + bio_offset;
1679		struct folio *folio = fi.folio;
1680		u32 len = fi.length;
1681
1682		btrfs_folio_clear_writeback(fs_info, folio, start, len);
1683		bio_offset += len;
1684	}
1685
1686	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1687	smp_mb__after_atomic();
1688	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
1689
1690	bio_put(&bbio->bio);
1691}
1692
1693static void prepare_eb_write(struct extent_buffer *eb)
1694{
1695	u32 nritems;
1696	unsigned long start;
1697	unsigned long end;
1698
1699	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1700
1701	/* Set btree blocks beyond nritems with 0 to avoid stale content */
1702	nritems = btrfs_header_nritems(eb);
1703	if (btrfs_header_level(eb) > 0) {
1704		end = btrfs_node_key_ptr_offset(eb, nritems);
1705		memzero_extent_buffer(eb, end, eb->len - end);
1706	} else {
1707		/*
1708		 * Leaf:
1709		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1710		 */
1711		start = btrfs_item_nr_offset(eb, nritems);
1712		end = btrfs_item_nr_offset(eb, 0);
1713		if (nritems == 0)
1714			end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1715		else
1716			end += btrfs_item_offset(eb, nritems - 1);
1717		memzero_extent_buffer(eb, start, end - start);
1718	}
1719}
1720
1721static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
1722					    struct writeback_control *wbc)
1723{
1724	struct btrfs_fs_info *fs_info = eb->fs_info;
1725	struct btrfs_bio *bbio;
1726
1727	prepare_eb_write(eb);
1728
1729	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1730			       REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
1731			       eb->fs_info, end_bbio_meta_write, eb);
1732	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
1733	bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1734	wbc_init_bio(wbc, &bbio->bio);
1735	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1736	bbio->file_offset = eb->start;
1737	if (fs_info->nodesize < PAGE_SIZE) {
1738		struct folio *folio = eb->folios[0];
1739		bool ret;
1740
1741		folio_lock(folio);
1742		btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len);
1743		if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start,
1744						       eb->len)) {
1745			folio_clear_dirty_for_io(folio);
1746			wbc->nr_to_write--;
1747		}
1748		ret = bio_add_folio(&bbio->bio, folio, eb->len,
1749				    eb->start - folio_pos(folio));
1750		ASSERT(ret);
1751		wbc_account_cgroup_owner(wbc, folio_page(folio, 0), eb->len);
1752		folio_unlock(folio);
1753	} else {
1754		int num_folios = num_extent_folios(eb);
1755
1756		for (int i = 0; i < num_folios; i++) {
1757			struct folio *folio = eb->folios[i];
1758			bool ret;
1759
1760			folio_lock(folio);
1761			folio_clear_dirty_for_io(folio);
1762			folio_start_writeback(folio);
1763			ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
1764			ASSERT(ret);
1765			wbc_account_cgroup_owner(wbc, folio_page(folio, 0),
1766						 eb->folio_size);
1767			wbc->nr_to_write -= folio_nr_pages(folio);
1768			folio_unlock(folio);
1769		}
1770	}
1771	btrfs_submit_bio(bbio, 0);
1772}
1773
1774/*
1775 * Submit one subpage btree page.
1776 *
1777 * The main difference to submit_eb_page() is:
1778 * - Page locking
1779 *   For subpage, we don't rely on page locking at all.
1780 *
1781 * - Flush write bio
1782 *   We only flush bio if we may be unable to fit current extent buffers into
1783 *   current bio.
1784 *
1785 * Return >=0 for the number of submitted extent buffers.
1786 * Return <0 for fatal error.
1787 */
1788static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
1789{
1790	struct btrfs_fs_info *fs_info = page_to_fs_info(page);
1791	struct folio *folio = page_folio(page);
1792	int submitted = 0;
1793	u64 page_start = page_offset(page);
1794	int bit_start = 0;
1795	int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
1796
1797	/* Lock and write each dirty extent buffers in the range */
1798	while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
1799		struct btrfs_subpage *subpage = folio_get_private(folio);
1800		struct extent_buffer *eb;
1801		unsigned long flags;
1802		u64 start;
1803
1804		/*
1805		 * Take private lock to ensure the subpage won't be detached
1806		 * in the meantime.
1807		 */
1808		spin_lock(&page->mapping->i_private_lock);
1809		if (!folio_test_private(folio)) {
1810			spin_unlock(&page->mapping->i_private_lock);
1811			break;
1812		}
1813		spin_lock_irqsave(&subpage->lock, flags);
1814		if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
1815			      subpage->bitmaps)) {
1816			spin_unlock_irqrestore(&subpage->lock, flags);
1817			spin_unlock(&page->mapping->i_private_lock);
1818			bit_start++;
1819			continue;
1820		}
1821
1822		start = page_start + bit_start * fs_info->sectorsize;
1823		bit_start += sectors_per_node;
1824
1825		/*
1826		 * Here we just want to grab the eb without touching extra
1827		 * spin locks, so call find_extent_buffer_nolock().
1828		 */
1829		eb = find_extent_buffer_nolock(fs_info, start);
1830		spin_unlock_irqrestore(&subpage->lock, flags);
1831		spin_unlock(&page->mapping->i_private_lock);
1832
1833		/*
1834		 * The eb has already reached 0 refs thus find_extent_buffer()
1835		 * doesn't return it. We don't need to write back such eb
1836		 * anyway.
1837		 */
1838		if (!eb)
1839			continue;
1840
1841		if (lock_extent_buffer_for_io(eb, wbc)) {
1842			write_one_eb(eb, wbc);
1843			submitted++;
1844		}
1845		free_extent_buffer(eb);
1846	}
1847	return submitted;
1848}
1849
1850/*
1851 * Submit all page(s) of one extent buffer.
1852 *
1853 * @page:	the page of one extent buffer
1854 * @eb_context:	to determine if we need to submit this page, if current page
1855 *		belongs to this eb, we don't need to submit
1856 *
1857 * The caller should pass each page in their bytenr order, and here we use
1858 * @eb_context to determine if we have submitted pages of one extent buffer.
1859 *
1860 * If we have, we just skip until we hit a new page that doesn't belong to
1861 * current @eb_context.
1862 *
1863 * If not, we submit all the page(s) of the extent buffer.
1864 *
1865 * Return >0 if we have submitted the extent buffer successfully.
1866 * Return 0 if we don't need to submit the page, as it's already submitted by
1867 * previous call.
1868 * Return <0 for fatal error.
1869 */
1870static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
1871{
1872	struct writeback_control *wbc = ctx->wbc;
1873	struct address_space *mapping = page->mapping;
1874	struct folio *folio = page_folio(page);
1875	struct extent_buffer *eb;
1876	int ret;
1877
1878	if (!folio_test_private(folio))
1879		return 0;
1880
1881	if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
1882		return submit_eb_subpage(page, wbc);
1883
1884	spin_lock(&mapping->i_private_lock);
1885	if (!folio_test_private(folio)) {
1886		spin_unlock(&mapping->i_private_lock);
1887		return 0;
1888	}
1889
1890	eb = folio_get_private(folio);
1891
1892	/*
1893	 * Shouldn't happen and normally this would be a BUG_ON but no point
1894	 * crashing the machine for something we can survive anyway.
1895	 */
1896	if (WARN_ON(!eb)) {
1897		spin_unlock(&mapping->i_private_lock);
1898		return 0;
1899	}
1900
1901	if (eb == ctx->eb) {
1902		spin_unlock(&mapping->i_private_lock);
1903		return 0;
1904	}
1905	ret = atomic_inc_not_zero(&eb->refs);
1906	spin_unlock(&mapping->i_private_lock);
1907	if (!ret)
1908		return 0;
1909
1910	ctx->eb = eb;
1911
1912	ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1913	if (ret) {
1914		if (ret == -EBUSY)
1915			ret = 0;
1916		free_extent_buffer(eb);
1917		return ret;
1918	}
1919
1920	if (!lock_extent_buffer_for_io(eb, wbc)) {
1921		free_extent_buffer(eb);
1922		return 0;
1923	}
1924	/* Implies write in zoned mode. */
1925	if (ctx->zoned_bg) {
1926		/* Mark the last eb in the block group. */
1927		btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
1928		ctx->zoned_bg->meta_write_pointer += eb->len;
1929	}
1930	write_one_eb(eb, wbc);
1931	free_extent_buffer(eb);
1932	return 1;
1933}
1934
1935int btree_write_cache_pages(struct address_space *mapping,
1936				   struct writeback_control *wbc)
1937{
1938	struct btrfs_eb_write_context ctx = { .wbc = wbc };
1939	struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
1940	int ret = 0;
1941	int done = 0;
1942	int nr_to_write_done = 0;
1943	struct folio_batch fbatch;
1944	unsigned int nr_folios;
1945	pgoff_t index;
1946	pgoff_t end;		/* Inclusive */
1947	int scanned = 0;
1948	xa_mark_t tag;
1949
1950	folio_batch_init(&fbatch);
1951	if (wbc->range_cyclic) {
1952		index = mapping->writeback_index; /* Start from prev offset */
1953		end = -1;
1954		/*
1955		 * Start from the beginning does not need to cycle over the
1956		 * range, mark it as scanned.
1957		 */
1958		scanned = (index == 0);
1959	} else {
1960		index = wbc->range_start >> PAGE_SHIFT;
1961		end = wbc->range_end >> PAGE_SHIFT;
1962		scanned = 1;
1963	}
1964	if (wbc->sync_mode == WB_SYNC_ALL)
1965		tag = PAGECACHE_TAG_TOWRITE;
1966	else
1967		tag = PAGECACHE_TAG_DIRTY;
1968	btrfs_zoned_meta_io_lock(fs_info);
1969retry:
1970	if (wbc->sync_mode == WB_SYNC_ALL)
1971		tag_pages_for_writeback(mapping, index, end);
1972	while (!done && !nr_to_write_done && (index <= end) &&
1973	       (nr_folios = filemap_get_folios_tag(mapping, &index, end,
1974					    tag, &fbatch))) {
1975		unsigned i;
1976
1977		for (i = 0; i < nr_folios; i++) {
1978			struct folio *folio = fbatch.folios[i];
1979
1980			ret = submit_eb_page(&folio->page, &ctx);
1981			if (ret == 0)
1982				continue;
1983			if (ret < 0) {
1984				done = 1;
1985				break;
1986			}
1987
1988			/*
1989			 * the filesystem may choose to bump up nr_to_write.
1990			 * We have to make sure to honor the new nr_to_write
1991			 * at any time
1992			 */
1993			nr_to_write_done = wbc->nr_to_write <= 0;
1994		}
1995		folio_batch_release(&fbatch);
1996		cond_resched();
1997	}
1998	if (!scanned && !done) {
1999		/*
2000		 * We hit the last page and there is more work to be done: wrap
2001		 * back to the start of the file
2002		 */
2003		scanned = 1;
2004		index = 0;
2005		goto retry;
2006	}
2007	/*
2008	 * If something went wrong, don't allow any metadata write bio to be
2009	 * submitted.
2010	 *
2011	 * This would prevent use-after-free if we had dirty pages not
2012	 * cleaned up, which can still happen by fuzzed images.
2013	 *
2014	 * - Bad extent tree
2015	 *   Allowing existing tree block to be allocated for other trees.
2016	 *
2017	 * - Log tree operations
2018	 *   Exiting tree blocks get allocated to log tree, bumps its
2019	 *   generation, then get cleaned in tree re-balance.
2020	 *   Such tree block will not be written back, since it's clean,
2021	 *   thus no WRITTEN flag set.
2022	 *   And after log writes back, this tree block is not traced by
2023	 *   any dirty extent_io_tree.
2024	 *
2025	 * - Offending tree block gets re-dirtied from its original owner
2026	 *   Since it has bumped generation, no WRITTEN flag, it can be
2027	 *   reused without COWing. This tree block will not be traced
2028	 *   by btrfs_transaction::dirty_pages.
2029	 *
2030	 *   Now such dirty tree block will not be cleaned by any dirty
2031	 *   extent io tree. Thus we don't want to submit such wild eb
2032	 *   if the fs already has error.
2033	 *
2034	 * We can get ret > 0 from submit_extent_page() indicating how many ebs
2035	 * were submitted. Reset it to 0 to avoid false alerts for the caller.
2036	 */
2037	if (ret > 0)
2038		ret = 0;
2039	if (!ret && BTRFS_FS_ERROR(fs_info))
2040		ret = -EROFS;
2041
2042	if (ctx.zoned_bg)
2043		btrfs_put_block_group(ctx.zoned_bg);
2044	btrfs_zoned_meta_io_unlock(fs_info);
2045	return ret;
2046}
2047
2048/*
2049 * Walk the list of dirty pages of the given address space and write all of them.
2050 *
2051 * @mapping:   address space structure to write
2052 * @wbc:       subtract the number of written pages from *@wbc->nr_to_write
2053 * @bio_ctrl:  holds context for the write, namely the bio
2054 *
2055 * If a page is already under I/O, write_cache_pages() skips it, even
2056 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2057 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2058 * and msync() need to guarantee that all the data which was dirty at the time
2059 * the call was made get new I/O started against them.  If wbc->sync_mode is
2060 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2061 * existing IO to complete.
2062 */
2063static int extent_write_cache_pages(struct address_space *mapping,
2064			     struct btrfs_bio_ctrl *bio_ctrl)
2065{
2066	struct writeback_control *wbc = bio_ctrl->wbc;
2067	struct inode *inode = mapping->host;
2068	int ret = 0;
2069	int done = 0;
2070	int nr_to_write_done = 0;
2071	struct folio_batch fbatch;
2072	unsigned int nr_folios;
2073	pgoff_t index;
2074	pgoff_t end;		/* Inclusive */
2075	pgoff_t done_index;
2076	int range_whole = 0;
2077	int scanned = 0;
2078	xa_mark_t tag;
2079
2080	/*
2081	 * We have to hold onto the inode so that ordered extents can do their
2082	 * work when the IO finishes.  The alternative to this is failing to add
2083	 * an ordered extent if the igrab() fails there and that is a huge pain
2084	 * to deal with, so instead just hold onto the inode throughout the
2085	 * writepages operation.  If it fails here we are freeing up the inode
2086	 * anyway and we'd rather not waste our time writing out stuff that is
2087	 * going to be truncated anyway.
2088	 */
2089	if (!igrab(inode))
2090		return 0;
2091
2092	folio_batch_init(&fbatch);
2093	if (wbc->range_cyclic) {
2094		index = mapping->writeback_index; /* Start from prev offset */
2095		end = -1;
2096		/*
2097		 * Start from the beginning does not need to cycle over the
2098		 * range, mark it as scanned.
2099		 */
2100		scanned = (index == 0);
2101	} else {
2102		index = wbc->range_start >> PAGE_SHIFT;
2103		end = wbc->range_end >> PAGE_SHIFT;
2104		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2105			range_whole = 1;
2106		scanned = 1;
2107	}
2108
2109	/*
2110	 * We do the tagged writepage as long as the snapshot flush bit is set
2111	 * and we are the first one who do the filemap_flush() on this inode.
2112	 *
2113	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2114	 * not race in and drop the bit.
2115	 */
2116	if (range_whole && wbc->nr_to_write == LONG_MAX &&
2117	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2118			       &BTRFS_I(inode)->runtime_flags))
2119		wbc->tagged_writepages = 1;
2120
2121	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2122		tag = PAGECACHE_TAG_TOWRITE;
2123	else
2124		tag = PAGECACHE_TAG_DIRTY;
2125retry:
2126	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2127		tag_pages_for_writeback(mapping, index, end);
2128	done_index = index;
2129	while (!done && !nr_to_write_done && (index <= end) &&
2130			(nr_folios = filemap_get_folios_tag(mapping, &index,
2131							end, tag, &fbatch))) {
2132		unsigned i;
2133
2134		for (i = 0; i < nr_folios; i++) {
2135			struct folio *folio = fbatch.folios[i];
2136
2137			done_index = folio_next_index(folio);
2138			/*
2139			 * At this point we hold neither the i_pages lock nor
2140			 * the page lock: the page may be truncated or
2141			 * invalidated (changing page->mapping to NULL),
2142			 * or even swizzled back from swapper_space to
2143			 * tmpfs file mapping
2144			 */
2145			if (!folio_trylock(folio)) {
2146				submit_write_bio(bio_ctrl, 0);
2147				folio_lock(folio);
2148			}
2149
2150			if (unlikely(folio->mapping != mapping)) {
2151				folio_unlock(folio);
2152				continue;
2153			}
2154
2155			if (!folio_test_dirty(folio)) {
2156				/* Someone wrote it for us. */
2157				folio_unlock(folio);
2158				continue;
2159			}
2160
2161			if (wbc->sync_mode != WB_SYNC_NONE) {
2162				if (folio_test_writeback(folio))
2163					submit_write_bio(bio_ctrl, 0);
2164				folio_wait_writeback(folio);
2165			}
2166
2167			if (folio_test_writeback(folio) ||
2168			    !folio_clear_dirty_for_io(folio)) {
2169				folio_unlock(folio);
2170				continue;
2171			}
2172
2173			ret = __extent_writepage(&folio->page, bio_ctrl);
2174			if (ret < 0) {
2175				done = 1;
2176				break;
2177			}
2178
2179			/*
2180			 * The filesystem may choose to bump up nr_to_write.
2181			 * We have to make sure to honor the new nr_to_write
2182			 * at any time.
2183			 */
2184			nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2185					    wbc->nr_to_write <= 0);
2186		}
2187		folio_batch_release(&fbatch);
2188		cond_resched();
2189	}
2190	if (!scanned && !done) {
2191		/*
2192		 * We hit the last page and there is more work to be done: wrap
2193		 * back to the start of the file
2194		 */
2195		scanned = 1;
2196		index = 0;
2197
2198		/*
2199		 * If we're looping we could run into a page that is locked by a
2200		 * writer and that writer could be waiting on writeback for a
2201		 * page in our current bio, and thus deadlock, so flush the
2202		 * write bio here.
2203		 */
2204		submit_write_bio(bio_ctrl, 0);
2205		goto retry;
2206	}
2207
2208	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2209		mapping->writeback_index = done_index;
2210
2211	btrfs_add_delayed_iput(BTRFS_I(inode));
2212	return ret;
2213}
2214
2215/*
2216 * Submit the pages in the range to bio for call sites which delalloc range has
2217 * already been ran (aka, ordered extent inserted) and all pages are still
2218 * locked.
2219 */
2220void extent_write_locked_range(struct inode *inode, struct page *locked_page,
2221			       u64 start, u64 end, struct writeback_control *wbc,
2222			       bool pages_dirty)
2223{
2224	bool found_error = false;
2225	int ret = 0;
2226	struct address_space *mapping = inode->i_mapping;
2227	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2228	const u32 sectorsize = fs_info->sectorsize;
2229	loff_t i_size = i_size_read(inode);
2230	u64 cur = start;
2231	struct btrfs_bio_ctrl bio_ctrl = {
2232		.wbc = wbc,
2233		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2234	};
2235
2236	if (wbc->no_cgroup_owner)
2237		bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2238
2239	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2240
2241	while (cur <= end) {
2242		u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2243		u32 cur_len = cur_end + 1 - cur;
2244		struct page *page;
2245		int nr = 0;
2246
2247		page = find_get_page(mapping, cur >> PAGE_SHIFT);
2248		ASSERT(PageLocked(page));
2249		if (pages_dirty && page != locked_page) {
2250			ASSERT(PageDirty(page));
2251			clear_page_dirty_for_io(page);
2252		}
2253
2254		ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
2255					    i_size, &nr);
2256		if (ret == 1)
2257			goto next_page;
2258
2259		/* Make sure the mapping tag for page dirty gets cleared. */
2260		if (nr == 0) {
2261			set_page_writeback(page);
2262			end_page_writeback(page);
2263		}
2264		if (ret) {
2265			btrfs_mark_ordered_io_finished(BTRFS_I(inode), page,
2266						       cur, cur_len, !ret);
2267			mapping_set_error(page->mapping, ret);
2268		}
2269		btrfs_folio_unlock_writer(fs_info, page_folio(page), cur, cur_len);
2270		if (ret < 0)
2271			found_error = true;
2272next_page:
2273		put_page(page);
2274		cur = cur_end + 1;
2275	}
2276
2277	submit_write_bio(&bio_ctrl, found_error ? ret : 0);
2278}
2279
2280int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
2281{
2282	struct inode *inode = mapping->host;
2283	int ret = 0;
2284	struct btrfs_bio_ctrl bio_ctrl = {
2285		.wbc = wbc,
2286		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2287	};
2288
2289	/*
2290	 * Allow only a single thread to do the reloc work in zoned mode to
2291	 * protect the write pointer updates.
2292	 */
2293	btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2294	ret = extent_write_cache_pages(mapping, &bio_ctrl);
2295	submit_write_bio(&bio_ctrl, ret);
2296	btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2297	return ret;
2298}
2299
2300void btrfs_readahead(struct readahead_control *rac)
2301{
2302	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
2303	struct page *pagepool[16];
2304	struct extent_map *em_cached = NULL;
2305	u64 prev_em_start = (u64)-1;
2306	int nr;
2307
2308	while ((nr = readahead_page_batch(rac, pagepool))) {
2309		u64 contig_start = readahead_pos(rac);
2310		u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
2311
2312		contiguous_readpages(pagepool, nr, contig_start, contig_end,
2313				&em_cached, &bio_ctrl, &prev_em_start);
2314	}
2315
2316	if (em_cached)
2317		free_extent_map(em_cached);
2318	submit_one_bio(&bio_ctrl);
2319}
2320
2321/*
2322 * basic invalidate_folio code, this waits on any locked or writeback
2323 * ranges corresponding to the folio, and then deletes any extent state
2324 * records from the tree
2325 */
2326int extent_invalidate_folio(struct extent_io_tree *tree,
2327			  struct folio *folio, size_t offset)
2328{
2329	struct extent_state *cached_state = NULL;
2330	u64 start = folio_pos(folio);
2331	u64 end = start + folio_size(folio) - 1;
2332	size_t blocksize = folio_to_fs_info(folio)->sectorsize;
2333
2334	/* This function is only called for the btree inode */
2335	ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2336
2337	start += ALIGN(offset, blocksize);
2338	if (start > end)
2339		return 0;
2340
2341	lock_extent(tree, start, end, &cached_state);
2342	folio_wait_writeback(folio);
2343
2344	/*
2345	 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2346	 * so here we only need to unlock the extent range to free any
2347	 * existing extent state.
2348	 */
2349	unlock_extent(tree, start, end, &cached_state);
2350	return 0;
2351}
2352
2353/*
2354 * a helper for release_folio, this tests for areas of the page that
2355 * are locked or under IO and drops the related state bits if it is safe
2356 * to drop the page.
2357 */
2358static bool try_release_extent_state(struct extent_io_tree *tree,
2359				    struct page *page, gfp_t mask)
2360{
2361	u64 start = page_offset(page);
2362	u64 end = start + PAGE_SIZE - 1;
2363	bool ret;
2364
2365	if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
2366		ret = false;
2367	} else {
2368		u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
2369				   EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
2370				   EXTENT_QGROUP_RESERVED);
2371		int ret2;
2372
2373		/*
2374		 * At this point we can safely clear everything except the
2375		 * locked bit, the nodatasum bit and the delalloc new bit.
2376		 * The delalloc new bit will be cleared by ordered extent
2377		 * completion.
2378		 */
2379		ret2 = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
2380
2381		/* if clear_extent_bit failed for enomem reasons,
2382		 * we can't allow the release to continue.
2383		 */
2384		if (ret2 < 0)
2385			ret = false;
2386		else
2387			ret = true;
2388	}
2389	return ret;
2390}
2391
2392/*
2393 * a helper for release_folio.  As long as there are no locked extents
2394 * in the range corresponding to the page, both state records and extent
2395 * map records are removed
2396 */
2397bool try_release_extent_mapping(struct page *page, gfp_t mask)
2398{
2399	u64 start = page_offset(page);
2400	u64 end = start + PAGE_SIZE - 1;
2401	struct btrfs_inode *inode = page_to_inode(page);
2402	struct extent_io_tree *io_tree = &inode->io_tree;
2403
2404	while (start <= end) {
2405		const u64 cur_gen = btrfs_get_fs_generation(inode->root->fs_info);
2406		const u64 len = end - start + 1;
2407		struct extent_map_tree *extent_tree = &inode->extent_tree;
2408		struct extent_map *em;
2409
2410		write_lock(&extent_tree->lock);
2411		em = lookup_extent_mapping(extent_tree, start, len);
2412		if (!em) {
2413			write_unlock(&extent_tree->lock);
2414			break;
2415		}
2416		if ((em->flags & EXTENT_FLAG_PINNED) || em->start != start) {
2417			write_unlock(&extent_tree->lock);
2418			free_extent_map(em);
2419			break;
2420		}
2421		if (test_range_bit_exists(io_tree, em->start,
2422					  extent_map_end(em) - 1, EXTENT_LOCKED))
2423			goto next;
2424		/*
2425		 * If it's not in the list of modified extents, used by a fast
2426		 * fsync, we can remove it. If it's being logged we can safely
2427		 * remove it since fsync took an extra reference on the em.
2428		 */
2429		if (list_empty(&em->list) || (em->flags & EXTENT_FLAG_LOGGING))
2430			goto remove_em;
2431		/*
2432		 * If it's in the list of modified extents, remove it only if
2433		 * its generation is older then the current one, in which case
2434		 * we don't need it for a fast fsync. Otherwise don't remove it,
2435		 * we could be racing with an ongoing fast fsync that could miss
2436		 * the new extent.
2437		 */
2438		if (em->generation >= cur_gen)
2439			goto next;
2440remove_em:
2441		/*
2442		 * We only remove extent maps that are not in the list of
2443		 * modified extents or that are in the list but with a
2444		 * generation lower then the current generation, so there is no
2445		 * need to set the full fsync flag on the inode (it hurts the
2446		 * fsync performance for workloads with a data size that exceeds
2447		 * or is close to the system's memory).
2448		 */
2449		remove_extent_mapping(inode, em);
2450		/* Once for the inode's extent map tree. */
2451		free_extent_map(em);
2452next:
2453		start = extent_map_end(em);
2454		write_unlock(&extent_tree->lock);
2455
2456		/* Once for us, for the lookup_extent_mapping() reference. */
2457		free_extent_map(em);
2458
2459		if (need_resched()) {
2460			/*
2461			 * If we need to resched but we can't block just exit
2462			 * and leave any remaining extent maps.
2463			 */
2464			if (!gfpflags_allow_blocking(mask))
2465				break;
2466
2467			cond_resched();
2468		}
2469	}
2470	return try_release_extent_state(io_tree, page, mask);
2471}
2472
2473struct btrfs_fiemap_entry {
2474	u64 offset;
2475	u64 phys;
2476	u64 len;
2477	u32 flags;
2478};
2479
2480/*
2481 * Indicate the caller of emit_fiemap_extent() that it needs to unlock the file
2482 * range from the inode's io tree, unlock the subvolume tree search path, flush
2483 * the fiemap cache and relock the file range and research the subvolume tree.
2484 * The value here is something negative that can't be confused with a valid
2485 * errno value and different from 1 because that's also a return value from
2486 * fiemap_fill_next_extent() and also it's often used to mean some btree search
2487 * did not find a key, so make it some distinct negative value.
2488 */
2489#define BTRFS_FIEMAP_FLUSH_CACHE (-(MAX_ERRNO + 1))
2490
2491/*
2492 * Used to:
2493 *
2494 * - Cache the next entry to be emitted to the fiemap buffer, so that we can
2495 *   merge extents that are contiguous and can be grouped as a single one;
2496 *
2497 * - Store extents ready to be written to the fiemap buffer in an intermediary
2498 *   buffer. This intermediary buffer is to ensure that in case the fiemap
2499 *   buffer is memory mapped to the fiemap target file, we don't deadlock
2500 *   during btrfs_page_mkwrite(). This is because during fiemap we are locking
2501 *   an extent range in order to prevent races with delalloc flushing and
2502 *   ordered extent completion, which is needed in order to reliably detect
2503 *   delalloc in holes and prealloc extents. And this can lead to a deadlock
2504 *   if the fiemap buffer is memory mapped to the file we are running fiemap
2505 *   against (a silly, useless in practice scenario, but possible) because
2506 *   btrfs_page_mkwrite() will try to lock the same extent range.
2507 */
2508struct fiemap_cache {
2509	/* An array of ready fiemap entries. */
2510	struct btrfs_fiemap_entry *entries;
2511	/* Number of entries in the entries array. */
2512	int entries_size;
2513	/* Index of the next entry in the entries array to write to. */
2514	int entries_pos;
2515	/*
2516	 * Once the entries array is full, this indicates what's the offset for
2517	 * the next file extent item we must search for in the inode's subvolume
2518	 * tree after unlocking the extent range in the inode's io tree and
2519	 * releasing the search path.
2520	 */
2521	u64 next_search_offset;
2522	/*
2523	 * This matches struct fiemap_extent_info::fi_mapped_extents, we use it
2524	 * to count ourselves emitted extents and stop instead of relying on
2525	 * fiemap_fill_next_extent() because we buffer ready fiemap entries at
2526	 * the @entries array, and we want to stop as soon as we hit the max
2527	 * amount of extents to map, not just to save time but also to make the
2528	 * logic at extent_fiemap() simpler.
2529	 */
2530	unsigned int extents_mapped;
2531	/* Fields for the cached extent (unsubmitted, not ready, extent). */
2532	u64 offset;
2533	u64 phys;
2534	u64 len;
2535	u32 flags;
2536	bool cached;
2537};
2538
2539static int flush_fiemap_cache(struct fiemap_extent_info *fieinfo,
2540			      struct fiemap_cache *cache)
2541{
2542	for (int i = 0; i < cache->entries_pos; i++) {
2543		struct btrfs_fiemap_entry *entry = &cache->entries[i];
2544		int ret;
2545
2546		ret = fiemap_fill_next_extent(fieinfo, entry->offset,
2547					      entry->phys, entry->len,
2548					      entry->flags);
2549		/*
2550		 * Ignore 1 (reached max entries) because we keep track of that
2551		 * ourselves in emit_fiemap_extent().
2552		 */
2553		if (ret < 0)
2554			return ret;
2555	}
2556	cache->entries_pos = 0;
2557
2558	return 0;
2559}
2560
2561/*
2562 * Helper to submit fiemap extent.
2563 *
2564 * Will try to merge current fiemap extent specified by @offset, @phys,
2565 * @len and @flags with cached one.
2566 * And only when we fails to merge, cached one will be submitted as
2567 * fiemap extent.
2568 *
2569 * Return value is the same as fiemap_fill_next_extent().
2570 */
2571static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
2572				struct fiemap_cache *cache,
2573				u64 offset, u64 phys, u64 len, u32 flags)
2574{
2575	struct btrfs_fiemap_entry *entry;
2576	u64 cache_end;
2577
2578	/* Set at the end of extent_fiemap(). */
2579	ASSERT((flags & FIEMAP_EXTENT_LAST) == 0);
2580
2581	if (!cache->cached)
2582		goto assign;
2583
2584	/*
2585	 * When iterating the extents of the inode, at extent_fiemap(), we may
2586	 * find an extent that starts at an offset behind the end offset of the
2587	 * previous extent we processed. This happens if fiemap is called
2588	 * without FIEMAP_FLAG_SYNC and there are ordered extents completing
2589	 * after we had to unlock the file range, release the search path, emit
2590	 * the fiemap extents stored in the buffer (cache->entries array) and
2591	 * the lock the remainder of the range and re-search the btree.
2592	 *
2593	 * For example we are in leaf X processing its last item, which is the
2594	 * file extent item for file range [512K, 1M[, and after
2595	 * btrfs_next_leaf() releases the path, there's an ordered extent that
2596	 * completes for the file range [768K, 2M[, and that results in trimming
2597	 * the file extent item so that it now corresponds to the file range
2598	 * [512K, 768K[ and a new file extent item is inserted for the file
2599	 * range [768K, 2M[, which may end up as the last item of leaf X or as
2600	 * the first item of the next leaf - in either case btrfs_next_leaf()
2601	 * will leave us with a path pointing to the new extent item, for the
2602	 * file range [768K, 2M[, since that's the first key that follows the
2603	 * last one we processed. So in order not to report overlapping extents
2604	 * to user space, we trim the length of the previously cached extent and
2605	 * emit it.
2606	 *
2607	 * Upon calling btrfs_next_leaf() we may also find an extent with an
2608	 * offset smaller than or equals to cache->offset, and this happens
2609	 * when we had a hole or prealloc extent with several delalloc ranges in
2610	 * it, but after btrfs_next_leaf() released the path, delalloc was
2611	 * flushed and the resulting ordered extents were completed, so we can
2612	 * now have found a file extent item for an offset that is smaller than
2613	 * or equals to what we have in cache->offset. We deal with this as
2614	 * described below.
2615	 */
2616	cache_end = cache->offset + cache->len;
2617	if (cache_end > offset) {
2618		if (offset == cache->offset) {
2619			/*
2620			 * We cached a dealloc range (found in the io tree) for
2621			 * a hole or prealloc extent and we have now found a
2622			 * file extent item for the same offset. What we have
2623			 * now is more recent and up to date, so discard what
2624			 * we had in the cache and use what we have just found.
2625			 */
2626			goto assign;
2627		} else if (offset > cache->offset) {
2628			/*
2629			 * The extent range we previously found ends after the
2630			 * offset of the file extent item we found and that
2631			 * offset falls somewhere in the middle of that previous
2632			 * extent range. So adjust the range we previously found
2633			 * to end at the offset of the file extent item we have
2634			 * just found, since this extent is more up to date.
2635			 * Emit that adjusted range and cache the file extent
2636			 * item we have just found. This corresponds to the case
2637			 * where a previously found file extent item was split
2638			 * due to an ordered extent completing.
2639			 */
2640			cache->len = offset - cache->offset;
2641			goto emit;
2642		} else {
2643			const u64 range_end = offset + len;
2644
2645			/*
2646			 * The offset of the file extent item we have just found
2647			 * is behind the cached offset. This means we were
2648			 * processing a hole or prealloc extent for which we
2649			 * have found delalloc ranges (in the io tree), so what
2650			 * we have in the cache is the last delalloc range we
2651			 * found while the file extent item we found can be
2652			 * either for a whole delalloc range we previously
2653			 * emmitted or only a part of that range.
2654			 *
2655			 * We have two cases here:
2656			 *
2657			 * 1) The file extent item's range ends at or behind the
2658			 *    cached extent's end. In this case just ignore the
2659			 *    current file extent item because we don't want to
2660			 *    overlap with previous ranges that may have been
2661			 *    emmitted already;
2662			 *
2663			 * 2) The file extent item starts behind the currently
2664			 *    cached extent but its end offset goes beyond the
2665			 *    end offset of the cached extent. We don't want to
2666			 *    overlap with a previous range that may have been
2667			 *    emmitted already, so we emit the currently cached
2668			 *    extent and then partially store the current file
2669			 *    extent item's range in the cache, for the subrange
2670			 *    going the cached extent's end to the end of the
2671			 *    file extent item.
2672			 */
2673			if (range_end <= cache_end)
2674				return 0;
2675
2676			if (!(flags & (FIEMAP_EXTENT_ENCODED | FIEMAP_EXTENT_DELALLOC)))
2677				phys += cache_end - offset;
2678
2679			offset = cache_end;
2680			len = range_end - cache_end;
2681			goto emit;
2682		}
2683	}
2684
2685	/*
2686	 * Only merges fiemap extents if
2687	 * 1) Their logical addresses are continuous
2688	 *
2689	 * 2) Their physical addresses are continuous
2690	 *    So truly compressed (physical size smaller than logical size)
2691	 *    extents won't get merged with each other
2692	 *
2693	 * 3) Share same flags
2694	 */
2695	if (cache->offset + cache->len  == offset &&
2696	    cache->phys + cache->len == phys  &&
2697	    cache->flags == flags) {
2698		cache->len += len;
2699		return 0;
2700	}
2701
2702emit:
2703	/* Not mergeable, need to submit cached one */
2704
2705	if (cache->entries_pos == cache->entries_size) {
2706		/*
2707		 * We will need to research for the end offset of the last
2708		 * stored extent and not from the current offset, because after
2709		 * unlocking the range and releasing the path, if there's a hole
2710		 * between that end offset and this current offset, a new extent
2711		 * may have been inserted due to a new write, so we don't want
2712		 * to miss it.
2713		 */
2714		entry = &cache->entries[cache->entries_size - 1];
2715		cache->next_search_offset = entry->offset + entry->len;
2716		cache->cached = false;
2717
2718		return BTRFS_FIEMAP_FLUSH_CACHE;
2719	}
2720
2721	entry = &cache->entries[cache->entries_pos];
2722	entry->offset = cache->offset;
2723	entry->phys = cache->phys;
2724	entry->len = cache->len;
2725	entry->flags = cache->flags;
2726	cache->entries_pos++;
2727	cache->extents_mapped++;
2728
2729	if (cache->extents_mapped == fieinfo->fi_extents_max) {
2730		cache->cached = false;
2731		return 1;
2732	}
2733assign:
2734	cache->cached = true;
2735	cache->offset = offset;
2736	cache->phys = phys;
2737	cache->len = len;
2738	cache->flags = flags;
2739
2740	return 0;
2741}
2742
2743/*
2744 * Emit last fiemap cache
2745 *
2746 * The last fiemap cache may still be cached in the following case:
2747 * 0		      4k		    8k
2748 * |<- Fiemap range ->|
2749 * |<------------  First extent ----------->|
2750 *
2751 * In this case, the first extent range will be cached but not emitted.
2752 * So we must emit it before ending extent_fiemap().
2753 */
2754static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
2755				  struct fiemap_cache *cache)
2756{
2757	int ret;
2758
2759	if (!cache->cached)
2760		return 0;
2761
2762	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
2763				      cache->len, cache->flags);
2764	cache->cached = false;
2765	if (ret > 0)
2766		ret = 0;
2767	return ret;
2768}
2769
2770static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *path)
2771{
2772	struct extent_buffer *clone = path->nodes[0];
2773	struct btrfs_key key;
2774	int slot;
2775	int ret;
2776
2777	path->slots[0]++;
2778	if (path->slots[0] < btrfs_header_nritems(path->nodes[0]))
2779		return 0;
2780
2781	/*
2782	 * Add a temporary extra ref to an already cloned extent buffer to
2783	 * prevent btrfs_next_leaf() freeing it, we want to reuse it to avoid
2784	 * the cost of allocating a new one.
2785	 */
2786	ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED, &clone->bflags));
2787	atomic_inc(&clone->refs);
2788
2789	ret = btrfs_next_leaf(inode->root, path);
2790	if (ret != 0)
2791		goto out;
2792
2793	/*
2794	 * Don't bother with cloning if there are no more file extent items for
2795	 * our inode.
2796	 */
2797	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2798	if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY) {
2799		ret = 1;
2800		goto out;
2801	}
2802
2803	/*
2804	 * Important to preserve the start field, for the optimizations when
2805	 * checking if extents are shared (see extent_fiemap()).
2806	 *
2807	 * We must set ->start before calling copy_extent_buffer_full().  If we
2808	 * are on sub-pagesize blocksize, we use ->start to determine the offset
2809	 * into the folio where our eb exists, and if we update ->start after
2810	 * the fact then any subsequent reads of the eb may read from a
2811	 * different offset in the folio than where we originally copied into.
2812	 */
2813	clone->start = path->nodes[0]->start;
2814	/* See the comment at fiemap_search_slot() about why we clone. */
2815	copy_extent_buffer_full(clone, path->nodes[0]);
2816
2817	slot = path->slots[0];
2818	btrfs_release_path(path);
2819	path->nodes[0] = clone;
2820	path->slots[0] = slot;
2821out:
2822	if (ret)
2823		free_extent_buffer(clone);
2824
2825	return ret;
2826}
2827
2828/*
2829 * Search for the first file extent item that starts at a given file offset or
2830 * the one that starts immediately before that offset.
2831 * Returns: 0 on success, < 0 on error, 1 if not found.
2832 */
2833static int fiemap_search_slot(struct btrfs_inode *inode, struct btrfs_path *path,
2834			      u64 file_offset)
2835{
2836	const u64 ino = btrfs_ino(inode);
2837	struct btrfs_root *root = inode->root;
2838	struct extent_buffer *clone;
2839	struct btrfs_key key;
2840	int slot;
2841	int ret;
2842
2843	key.objectid = ino;
2844	key.type = BTRFS_EXTENT_DATA_KEY;
2845	key.offset = file_offset;
2846
2847	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2848	if (ret < 0)
2849		return ret;
2850
2851	if (ret > 0 && path->slots[0] > 0) {
2852		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
2853		if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
2854			path->slots[0]--;
2855	}
2856
2857	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2858		ret = btrfs_next_leaf(root, path);
2859		if (ret != 0)
2860			return ret;
2861
2862		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2863		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
2864			return 1;
2865	}
2866
2867	/*
2868	 * We clone the leaf and use it during fiemap. This is because while
2869	 * using the leaf we do expensive things like checking if an extent is
2870	 * shared, which can take a long time. In order to prevent blocking
2871	 * other tasks for too long, we use a clone of the leaf. We have locked
2872	 * the file range in the inode's io tree, so we know none of our file
2873	 * extent items can change. This way we avoid blocking other tasks that
2874	 * want to insert items for other inodes in the same leaf or b+tree
2875	 * rebalance operations (triggered for example when someone is trying
2876	 * to push items into this leaf when trying to insert an item in a
2877	 * neighbour leaf).
2878	 * We also need the private clone because holding a read lock on an
2879	 * extent buffer of the subvolume's b+tree will make lockdep unhappy
2880	 * when we check if extents are shared, as backref walking may need to
2881	 * lock the same leaf we are processing.
2882	 */
2883	clone = btrfs_clone_extent_buffer(path->nodes[0]);
2884	if (!clone)
2885		return -ENOMEM;
2886
2887	slot = path->slots[0];
2888	btrfs_release_path(path);
2889	path->nodes[0] = clone;
2890	path->slots[0] = slot;
2891
2892	return 0;
2893}
2894
2895/*
2896 * Process a range which is a hole or a prealloc extent in the inode's subvolume
2897 * btree. If @disk_bytenr is 0, we are dealing with a hole, otherwise a prealloc
2898 * extent. The end offset (@end) is inclusive.
2899 */
2900static int fiemap_process_hole(struct btrfs_inode *inode,
2901			       struct fiemap_extent_info *fieinfo,
2902			       struct fiemap_cache *cache,
2903			       struct extent_state **delalloc_cached_state,
2904			       struct btrfs_backref_share_check_ctx *backref_ctx,
2905			       u64 disk_bytenr, u64 extent_offset,
2906			       u64 extent_gen,
2907			       u64 start, u64 end)
2908{
2909	const u64 i_size = i_size_read(&inode->vfs_inode);
2910	u64 cur_offset = start;
2911	u64 last_delalloc_end = 0;
2912	u32 prealloc_flags = FIEMAP_EXTENT_UNWRITTEN;
2913	bool checked_extent_shared = false;
2914	int ret;
2915
2916	/*
2917	 * There can be no delalloc past i_size, so don't waste time looking for
2918	 * it beyond i_size.
2919	 */
2920	while (cur_offset < end && cur_offset < i_size) {
2921		u64 delalloc_start;
2922		u64 delalloc_end;
2923		u64 prealloc_start;
2924		u64 prealloc_len = 0;
2925		bool delalloc;
2926
2927		delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
2928							delalloc_cached_state,
2929							&delalloc_start,
2930							&delalloc_end);
2931		if (!delalloc)
2932			break;
2933
2934		/*
2935		 * If this is a prealloc extent we have to report every section
2936		 * of it that has no delalloc.
2937		 */
2938		if (disk_bytenr != 0) {
2939			if (last_delalloc_end == 0) {
2940				prealloc_start = start;
2941				prealloc_len = delalloc_start - start;
2942			} else {
2943				prealloc_start = last_delalloc_end + 1;
2944				prealloc_len = delalloc_start - prealloc_start;
2945			}
2946		}
2947
2948		if (prealloc_len > 0) {
2949			if (!checked_extent_shared && fieinfo->fi_extents_max) {
2950				ret = btrfs_is_data_extent_shared(inode,
2951								  disk_bytenr,
2952								  extent_gen,
2953								  backref_ctx);
2954				if (ret < 0)
2955					return ret;
2956				else if (ret > 0)
2957					prealloc_flags |= FIEMAP_EXTENT_SHARED;
2958
2959				checked_extent_shared = true;
2960			}
2961			ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2962						 disk_bytenr + extent_offset,
2963						 prealloc_len, prealloc_flags);
2964			if (ret)
2965				return ret;
2966			extent_offset += prealloc_len;
2967		}
2968
2969		ret = emit_fiemap_extent(fieinfo, cache, delalloc_start, 0,
2970					 delalloc_end + 1 - delalloc_start,
2971					 FIEMAP_EXTENT_DELALLOC |
2972					 FIEMAP_EXTENT_UNKNOWN);
2973		if (ret)
2974			return ret;
2975
2976		last_delalloc_end = delalloc_end;
2977		cur_offset = delalloc_end + 1;
2978		extent_offset += cur_offset - delalloc_start;
2979		cond_resched();
2980	}
2981
2982	/*
2983	 * Either we found no delalloc for the whole prealloc extent or we have
2984	 * a prealloc extent that spans i_size or starts at or after i_size.
2985	 */
2986	if (disk_bytenr != 0 && last_delalloc_end < end) {
2987		u64 prealloc_start;
2988		u64 prealloc_len;
2989
2990		if (last_delalloc_end == 0) {
2991			prealloc_start = start;
2992			prealloc_len = end + 1 - start;
2993		} else {
2994			prealloc_start = last_delalloc_end + 1;
2995			prealloc_len = end + 1 - prealloc_start;
2996		}
2997
2998		if (!checked_extent_shared && fieinfo->fi_extents_max) {
2999			ret = btrfs_is_data_extent_shared(inode,
3000							  disk_bytenr,
3001							  extent_gen,
3002							  backref_ctx);
3003			if (ret < 0)
3004				return ret;
3005			else if (ret > 0)
3006				prealloc_flags |= FIEMAP_EXTENT_SHARED;
3007		}
3008		ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
3009					 disk_bytenr + extent_offset,
3010					 prealloc_len, prealloc_flags);
3011		if (ret)
3012			return ret;
3013	}
3014
3015	return 0;
3016}
3017
3018static int fiemap_find_last_extent_offset(struct btrfs_inode *inode,
3019					  struct btrfs_path *path,
3020					  u64 *last_extent_end_ret)
3021{
3022	const u64 ino = btrfs_ino(inode);
3023	struct btrfs_root *root = inode->root;
3024	struct extent_buffer *leaf;
3025	struct btrfs_file_extent_item *ei;
3026	struct btrfs_key key;
3027	u64 disk_bytenr;
3028	int ret;
3029
3030	/*
3031	 * Lookup the last file extent. We're not using i_size here because
3032	 * there might be preallocation past i_size.
3033	 */
3034	ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0);
3035	/* There can't be a file extent item at offset (u64)-1 */
3036	ASSERT(ret != 0);
3037	if (ret < 0)
3038		return ret;
3039
3040	/*
3041	 * For a non-existing key, btrfs_search_slot() always leaves us at a
3042	 * slot > 0, except if the btree is empty, which is impossible because
3043	 * at least it has the inode item for this inode and all the items for
3044	 * the root inode 256.
3045	 */
3046	ASSERT(path->slots[0] > 0);
3047	path->slots[0]--;
3048	leaf = path->nodes[0];
3049	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3050	if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
3051		/* No file extent items in the subvolume tree. */
3052		*last_extent_end_ret = 0;
3053		return 0;
3054	}
3055
3056	/*
3057	 * For an inline extent, the disk_bytenr is where inline data starts at,
3058	 * so first check if we have an inline extent item before checking if we
3059	 * have an implicit hole (disk_bytenr == 0).
3060	 */
3061	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
3062	if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
3063		*last_extent_end_ret = btrfs_file_extent_end(path);
3064		return 0;
3065	}
3066
3067	/*
3068	 * Find the last file extent item that is not a hole (when NO_HOLES is
3069	 * not enabled). This should take at most 2 iterations in the worst
3070	 * case: we have one hole file extent item at slot 0 of a leaf and
3071	 * another hole file extent item as the last item in the previous leaf.
3072	 * This is because we merge file extent items that represent holes.
3073	 */
3074	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3075	while (disk_bytenr == 0) {
3076		ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
3077		if (ret < 0) {
3078			return ret;
3079		} else if (ret > 0) {
3080			/* No file extent items that are not holes. */
3081			*last_extent_end_ret = 0;
3082			return 0;
3083		}
3084		leaf = path->nodes[0];
3085		ei = btrfs_item_ptr(leaf, path->slots[0],
3086				    struct btrfs_file_extent_item);
3087		disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3088	}
3089
3090	*last_extent_end_ret = btrfs_file_extent_end(path);
3091	return 0;
3092}
3093
3094int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
3095		  u64 start, u64 len)
3096{
3097	const u64 ino = btrfs_ino(inode);
3098	struct extent_state *cached_state = NULL;
3099	struct extent_state *delalloc_cached_state = NULL;
3100	struct btrfs_path *path;
3101	struct fiemap_cache cache = { 0 };
3102	struct btrfs_backref_share_check_ctx *backref_ctx;
3103	u64 last_extent_end;
3104	u64 prev_extent_end;
3105	u64 range_start;
3106	u64 range_end;
3107	const u64 sectorsize = inode->root->fs_info->sectorsize;
3108	bool stopped = false;
3109	int ret;
3110
3111	cache.entries_size = PAGE_SIZE / sizeof(struct btrfs_fiemap_entry);
3112	cache.entries = kmalloc_array(cache.entries_size,
3113				      sizeof(struct btrfs_fiemap_entry),
3114				      GFP_KERNEL);
3115	backref_ctx = btrfs_alloc_backref_share_check_ctx();
3116	path = btrfs_alloc_path();
3117	if (!cache.entries || !backref_ctx || !path) {
3118		ret = -ENOMEM;
3119		goto out;
3120	}
3121
3122restart:
3123	range_start = round_down(start, sectorsize);
3124	range_end = round_up(start + len, sectorsize);
3125	prev_extent_end = range_start;
3126
3127	lock_extent(&inode->io_tree, range_start, range_end, &cached_state);
3128
3129	ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
3130	if (ret < 0)
3131		goto out_unlock;
3132	btrfs_release_path(path);
3133
3134	path->reada = READA_FORWARD;
3135	ret = fiemap_search_slot(inode, path, range_start);
3136	if (ret < 0) {
3137		goto out_unlock;
3138	} else if (ret > 0) {
3139		/*
3140		 * No file extent item found, but we may have delalloc between
3141		 * the current offset and i_size. So check for that.
3142		 */
3143		ret = 0;
3144		goto check_eof_delalloc;
3145	}
3146
3147	while (prev_extent_end < range_end) {
3148		struct extent_buffer *leaf = path->nodes[0];
3149		struct btrfs_file_extent_item *ei;
3150		struct btrfs_key key;
3151		u64 extent_end;
3152		u64 extent_len;
3153		u64 extent_offset = 0;
3154		u64 extent_gen;
3155		u64 disk_bytenr = 0;
3156		u64 flags = 0;
3157		int extent_type;
3158		u8 compression;
3159
3160		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3161		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3162			break;
3163
3164		extent_end = btrfs_file_extent_end(path);
3165
3166		/*
3167		 * The first iteration can leave us at an extent item that ends
3168		 * before our range's start. Move to the next item.
3169		 */
3170		if (extent_end <= range_start)
3171			goto next_item;
3172
3173		backref_ctx->curr_leaf_bytenr = leaf->start;
3174
3175		/* We have in implicit hole (NO_HOLES feature enabled). */
3176		if (prev_extent_end < key.offset) {
3177			const u64 hole_end = min(key.offset, range_end) - 1;
3178
3179			ret = fiemap_process_hole(inode, fieinfo, &cache,
3180						  &delalloc_cached_state,
3181						  backref_ctx, 0, 0, 0,
3182						  prev_extent_end, hole_end);
3183			if (ret < 0) {
3184				goto out_unlock;
3185			} else if (ret > 0) {
3186				/* fiemap_fill_next_extent() told us to stop. */
3187				stopped = true;
3188				break;
3189			}
3190
3191			/* We've reached the end of the fiemap range, stop. */
3192			if (key.offset >= range_end) {
3193				stopped = true;
3194				break;
3195			}
3196		}
3197
3198		extent_len = extent_end - key.offset;
3199		ei = btrfs_item_ptr(leaf, path->slots[0],
3200				    struct btrfs_file_extent_item);
3201		compression = btrfs_file_extent_compression(leaf, ei);
3202		extent_type = btrfs_file_extent_type(leaf, ei);
3203		extent_gen = btrfs_file_extent_generation(leaf, ei);
3204
3205		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3206			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3207			if (compression == BTRFS_COMPRESS_NONE)
3208				extent_offset = btrfs_file_extent_offset(leaf, ei);
3209		}
3210
3211		if (compression != BTRFS_COMPRESS_NONE)
3212			flags |= FIEMAP_EXTENT_ENCODED;
3213
3214		if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3215			flags |= FIEMAP_EXTENT_DATA_INLINE;
3216			flags |= FIEMAP_EXTENT_NOT_ALIGNED;
3217			ret = emit_fiemap_extent(fieinfo, &cache, key.offset, 0,
3218						 extent_len, flags);
3219		} else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
3220			ret = fiemap_process_hole(inode, fieinfo, &cache,
3221						  &delalloc_cached_state,
3222						  backref_ctx,
3223						  disk_bytenr, extent_offset,
3224						  extent_gen, key.offset,
3225						  extent_end - 1);
3226		} else if (disk_bytenr == 0) {
3227			/* We have an explicit hole. */
3228			ret = fiemap_process_hole(inode, fieinfo, &cache,
3229						  &delalloc_cached_state,
3230						  backref_ctx, 0, 0, 0,
3231						  key.offset, extent_end - 1);
3232		} else {
3233			/* We have a regular extent. */
3234			if (fieinfo->fi_extents_max) {
3235				ret = btrfs_is_data_extent_shared(inode,
3236								  disk_bytenr,
3237								  extent_gen,
3238								  backref_ctx);
3239				if (ret < 0)
3240					goto out_unlock;
3241				else if (ret > 0)
3242					flags |= FIEMAP_EXTENT_SHARED;
3243			}
3244
3245			ret = emit_fiemap_extent(fieinfo, &cache, key.offset,
3246						 disk_bytenr + extent_offset,
3247						 extent_len, flags);
3248		}
3249
3250		if (ret < 0) {
3251			goto out_unlock;
3252		} else if (ret > 0) {
3253			/* emit_fiemap_extent() told us to stop. */
3254			stopped = true;
3255			break;
3256		}
3257
3258		prev_extent_end = extent_end;
3259next_item:
3260		if (fatal_signal_pending(current)) {
3261			ret = -EINTR;
3262			goto out_unlock;
3263		}
3264
3265		ret = fiemap_next_leaf_item(inode, path);
3266		if (ret < 0) {
3267			goto out_unlock;
3268		} else if (ret > 0) {
3269			/* No more file extent items for this inode. */
3270			break;
3271		}
3272		cond_resched();
3273	}
3274
3275check_eof_delalloc:
3276	if (!stopped && prev_extent_end < range_end) {
3277		ret = fiemap_process_hole(inode, fieinfo, &cache,
3278					  &delalloc_cached_state, backref_ctx,
3279					  0, 0, 0, prev_extent_end, range_end - 1);
3280		if (ret < 0)
3281			goto out_unlock;
3282		prev_extent_end = range_end;
3283	}
3284
3285	if (cache.cached && cache.offset + cache.len >= last_extent_end) {
3286		const u64 i_size = i_size_read(&inode->vfs_inode);
3287
3288		if (prev_extent_end < i_size) {
3289			u64 delalloc_start;
3290			u64 delalloc_end;
3291			bool delalloc;
3292
3293			delalloc = btrfs_find_delalloc_in_range(inode,
3294								prev_extent_end,
3295								i_size - 1,
3296								&delalloc_cached_state,
3297								&delalloc_start,
3298								&delalloc_end);
3299			if (!delalloc)
3300				cache.flags |= FIEMAP_EXTENT_LAST;
3301		} else {
3302			cache.flags |= FIEMAP_EXTENT_LAST;
3303		}
3304	}
3305
3306out_unlock:
3307	unlock_extent(&inode->io_tree, range_start, range_end, &cached_state);
3308
3309	if (ret == BTRFS_FIEMAP_FLUSH_CACHE) {
3310		btrfs_release_path(path);
3311		ret = flush_fiemap_cache(fieinfo, &cache);
3312		if (ret)
3313			goto out;
3314		len -= cache.next_search_offset - start;
3315		start = cache.next_search_offset;
3316		goto restart;
3317	} else if (ret < 0) {
3318		goto out;
3319	}
3320
3321	/*
3322	 * Must free the path before emitting to the fiemap buffer because we
3323	 * may have a non-cloned leaf and if the fiemap buffer is memory mapped
3324	 * to a file, a write into it (through btrfs_page_mkwrite()) may trigger
3325	 * waiting for an ordered extent that in order to complete needs to
3326	 * modify that leaf, therefore leading to a deadlock.
3327	 */
3328	btrfs_free_path(path);
3329	path = NULL;
3330
3331	ret = flush_fiemap_cache(fieinfo, &cache);
3332	if (ret)
3333		goto out;
3334
3335	ret = emit_last_fiemap_cache(fieinfo, &cache);
3336out:
3337	free_extent_state(delalloc_cached_state);
3338	kfree(cache.entries);
3339	btrfs_free_backref_share_ctx(backref_ctx);
3340	btrfs_free_path(path);
3341	return ret;
3342}
3343
3344static void __free_extent_buffer(struct extent_buffer *eb)
3345{
3346	kmem_cache_free(extent_buffer_cache, eb);
3347}
3348
3349static int extent_buffer_under_io(const struct extent_buffer *eb)
3350{
3351	return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
3352		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3353}
3354
3355static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *folio)
3356{
3357	struct btrfs_subpage *subpage;
3358
3359	lockdep_assert_held(&folio->mapping->i_private_lock);
3360
3361	if (folio_test_private(folio)) {
3362		subpage = folio_get_private(folio);
3363		if (atomic_read(&subpage->eb_refs))
3364			return true;
3365		/*
3366		 * Even there is no eb refs here, we may still have
3367		 * end_page_read() call relying on page::private.
3368		 */
3369		if (atomic_read(&subpage->readers))
3370			return true;
3371	}
3372	return false;
3373}
3374
3375static void detach_extent_buffer_folio(struct extent_buffer *eb, struct folio *folio)
3376{
3377	struct btrfs_fs_info *fs_info = eb->fs_info;
3378	const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3379
3380	/*
3381	 * For mapped eb, we're going to change the folio private, which should
3382	 * be done under the i_private_lock.
3383	 */
3384	if (mapped)
3385		spin_lock(&folio->mapping->i_private_lock);
3386
3387	if (!folio_test_private(folio)) {
3388		if (mapped)
3389			spin_unlock(&folio->mapping->i_private_lock);
3390		return;
3391	}
3392
3393	if (fs_info->nodesize >= PAGE_SIZE) {
3394		/*
3395		 * We do this since we'll remove the pages after we've
3396		 * removed the eb from the radix tree, so we could race
3397		 * and have this page now attached to the new eb.  So
3398		 * only clear folio if it's still connected to
3399		 * this eb.
3400		 */
3401		if (folio_test_private(folio) && folio_get_private(folio) == eb) {
3402			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3403			BUG_ON(folio_test_dirty(folio));
3404			BUG_ON(folio_test_writeback(folio));
3405			/* We need to make sure we haven't be attached to a new eb. */
3406			folio_detach_private(folio);
3407		}
3408		if (mapped)
3409			spin_unlock(&folio->mapping->i_private_lock);
3410		return;
3411	}
3412
3413	/*
3414	 * For subpage, we can have dummy eb with folio private attached.  In
3415	 * this case, we can directly detach the private as such folio is only
3416	 * attached to one dummy eb, no sharing.
3417	 */
3418	if (!mapped) {
3419		btrfs_detach_subpage(fs_info, folio);
3420		return;
3421	}
3422
3423	btrfs_folio_dec_eb_refs(fs_info, folio);
3424
3425	/*
3426	 * We can only detach the folio private if there are no other ebs in the
3427	 * page range and no unfinished IO.
3428	 */
3429	if (!folio_range_has_eb(fs_info, folio))
3430		btrfs_detach_subpage(fs_info, folio);
3431
3432	spin_unlock(&folio->mapping->i_private_lock);
3433}
3434
3435/* Release all pages attached to the extent buffer */
3436static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
3437{
3438	ASSERT(!extent_buffer_under_io(eb));
3439
3440	for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
3441		struct folio *folio = eb->folios[i];
3442
3443		if (!folio)
3444			continue;
3445
3446		detach_extent_buffer_folio(eb, folio);
3447
3448		/* One for when we allocated the folio. */
3449		folio_put(folio);
3450	}
3451}
3452
3453/*
3454 * Helper for releasing the extent buffer.
3455 */
3456static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3457{
3458	btrfs_release_extent_buffer_pages(eb);
3459	btrfs_leak_debug_del_eb(eb);
3460	__free_extent_buffer(eb);
3461}
3462
3463static struct extent_buffer *
3464__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
3465		      unsigned long len)
3466{
3467	struct extent_buffer *eb = NULL;
3468
3469	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
3470	eb->start = start;
3471	eb->len = len;
3472	eb->fs_info = fs_info;
3473	init_rwsem(&eb->lock);
3474
3475	btrfs_leak_debug_add_eb(eb);
3476
3477	spin_lock_init(&eb->refs_lock);
3478	atomic_set(&eb->refs, 1);
3479
3480	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
3481
3482	return eb;
3483}
3484
3485struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
3486{
3487	struct extent_buffer *new;
3488	int num_folios = num_extent_folios(src);
3489	int ret;
3490
3491	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
3492	if (new == NULL)
3493		return NULL;
3494
3495	/*
3496	 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
3497	 * btrfs_release_extent_buffer() have different behavior for
3498	 * UNMAPPED subpage extent buffer.
3499	 */
3500	set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
3501
3502	ret = alloc_eb_folio_array(new, 0);
3503	if (ret) {
3504		btrfs_release_extent_buffer(new);
3505		return NULL;
3506	}
3507
3508	for (int i = 0; i < num_folios; i++) {
3509		struct folio *folio = new->folios[i];
3510		int ret;
3511
3512		ret = attach_extent_buffer_folio(new, folio, NULL);
3513		if (ret < 0) {
3514			btrfs_release_extent_buffer(new);
3515			return NULL;
3516		}
3517		WARN_ON(folio_test_dirty(folio));
3518	}
3519	copy_extent_buffer_full(new, src);
3520	set_extent_buffer_uptodate(new);
3521
3522	return new;
3523}
3524
3525struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3526						  u64 start, unsigned long len)
3527{
3528	struct extent_buffer *eb;
3529	int num_folios = 0;
3530	int ret;
3531
3532	eb = __alloc_extent_buffer(fs_info, start, len);
3533	if (!eb)
3534		return NULL;
3535
3536	ret = alloc_eb_folio_array(eb, 0);
3537	if (ret)
3538		goto err;
3539
3540	num_folios = num_extent_folios(eb);
3541	for (int i = 0; i < num_folios; i++) {
3542		ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
3543		if (ret < 0)
3544			goto err;
3545	}
3546
3547	set_extent_buffer_uptodate(eb);
3548	btrfs_set_header_nritems(eb, 0);
3549	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3550
3551	return eb;
3552err:
3553	for (int i = 0; i < num_folios; i++) {
3554		if (eb->folios[i]) {
3555			detach_extent_buffer_folio(eb, eb->folios[i]);
3556			__folio_put(eb->folios[i]);
3557		}
3558	}
3559	__free_extent_buffer(eb);
3560	return NULL;
3561}
3562
3563struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3564						u64 start)
3565{
3566	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
3567}
3568
3569static void check_buffer_tree_ref(struct extent_buffer *eb)
3570{
3571	int refs;
3572	/*
3573	 * The TREE_REF bit is first set when the extent_buffer is added
3574	 * to the radix tree. It is also reset, if unset, when a new reference
3575	 * is created by find_extent_buffer.
3576	 *
3577	 * It is only cleared in two cases: freeing the last non-tree
3578	 * reference to the extent_buffer when its STALE bit is set or
3579	 * calling release_folio when the tree reference is the only reference.
3580	 *
3581	 * In both cases, care is taken to ensure that the extent_buffer's
3582	 * pages are not under io. However, release_folio can be concurrently
3583	 * called with creating new references, which is prone to race
3584	 * conditions between the calls to check_buffer_tree_ref in those
3585	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
3586	 *
3587	 * The actual lifetime of the extent_buffer in the radix tree is
3588	 * adequately protected by the refcount, but the TREE_REF bit and
3589	 * its corresponding reference are not. To protect against this
3590	 * class of races, we call check_buffer_tree_ref from the codepaths
3591	 * which trigger io. Note that once io is initiated, TREE_REF can no
3592	 * longer be cleared, so that is the moment at which any such race is
3593	 * best fixed.
3594	 */
3595	refs = atomic_read(&eb->refs);
3596	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3597		return;
3598
3599	spin_lock(&eb->refs_lock);
3600	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3601		atomic_inc(&eb->refs);
3602	spin_unlock(&eb->refs_lock);
3603}
3604
3605static void mark_extent_buffer_accessed(struct extent_buffer *eb)
3606{
3607	int num_folios= num_extent_folios(eb);
3608
3609	check_buffer_tree_ref(eb);
3610
3611	for (int i = 0; i < num_folios; i++)
3612		folio_mark_accessed(eb->folios[i]);
3613}
3614
3615struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
3616					 u64 start)
3617{
3618	struct extent_buffer *eb;
3619
3620	eb = find_extent_buffer_nolock(fs_info, start);
3621	if (!eb)
3622		return NULL;
3623	/*
3624	 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
3625	 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
3626	 * another task running free_extent_buffer() might have seen that flag
3627	 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
3628	 * writeback flags not set) and it's still in the tree (flag
3629	 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
3630	 * decrementing the extent buffer's reference count twice.  So here we
3631	 * could race and increment the eb's reference count, clear its stale
3632	 * flag, mark it as dirty and drop our reference before the other task
3633	 * finishes executing free_extent_buffer, which would later result in
3634	 * an attempt to free an extent buffer that is dirty.
3635	 */
3636	if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
3637		spin_lock(&eb->refs_lock);
3638		spin_unlock(&eb->refs_lock);
3639	}
3640	mark_extent_buffer_accessed(eb);
3641	return eb;
3642}
3643
3644#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3645struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
3646					u64 start)
3647{
3648	struct extent_buffer *eb, *exists = NULL;
3649	int ret;
3650
3651	eb = find_extent_buffer(fs_info, start);
3652	if (eb)
3653		return eb;
3654	eb = alloc_dummy_extent_buffer(fs_info, start);
3655	if (!eb)
3656		return ERR_PTR(-ENOMEM);
3657	eb->fs_info = fs_info;
3658again:
3659	ret = radix_tree_preload(GFP_NOFS);
3660	if (ret) {
3661		exists = ERR_PTR(ret);
3662		goto free_eb;
3663	}
3664	spin_lock(&fs_info->buffer_lock);
3665	ret = radix_tree_insert(&fs_info->buffer_radix,
3666				start >> fs_info->sectorsize_bits, eb);
3667	spin_unlock(&fs_info->buffer_lock);
3668	radix_tree_preload_end();
3669	if (ret == -EEXIST) {
3670		exists = find_extent_buffer(fs_info, start);
3671		if (exists)
3672			goto free_eb;
3673		else
3674			goto again;
3675	}
3676	check_buffer_tree_ref(eb);
3677	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3678
3679	return eb;
3680free_eb:
3681	btrfs_release_extent_buffer(eb);
3682	return exists;
3683}
3684#endif
3685
3686static struct extent_buffer *grab_extent_buffer(
3687		struct btrfs_fs_info *fs_info, struct page *page)
3688{
3689	struct folio *folio = page_folio(page);
3690	struct extent_buffer *exists;
3691
3692	/*
3693	 * For subpage case, we completely rely on radix tree to ensure we
3694	 * don't try to insert two ebs for the same bytenr.  So here we always
3695	 * return NULL and just continue.
3696	 */
3697	if (fs_info->nodesize < PAGE_SIZE)
3698		return NULL;
3699
3700	/* Page not yet attached to an extent buffer */
3701	if (!folio_test_private(folio))
3702		return NULL;
3703
3704	/*
3705	 * We could have already allocated an eb for this page and attached one
3706	 * so lets see if we can get a ref on the existing eb, and if we can we
3707	 * know it's good and we can just return that one, else we know we can
3708	 * just overwrite folio private.
3709	 */
3710	exists = folio_get_private(folio);
3711	if (atomic_inc_not_zero(&exists->refs))
3712		return exists;
3713
3714	WARN_ON(PageDirty(page));
3715	folio_detach_private(folio);
3716	return NULL;
3717}
3718
3719static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
3720{
3721	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
3722		btrfs_err(fs_info, "bad tree block start %llu", start);
3723		return -EINVAL;
3724	}
3725
3726	if (fs_info->nodesize < PAGE_SIZE &&
3727	    offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
3728		btrfs_err(fs_info,
3729		"tree block crosses page boundary, start %llu nodesize %u",
3730			  start, fs_info->nodesize);
3731		return -EINVAL;
3732	}
3733	if (fs_info->nodesize >= PAGE_SIZE &&
3734	    !PAGE_ALIGNED(start)) {
3735		btrfs_err(fs_info,
3736		"tree block is not page aligned, start %llu nodesize %u",
3737			  start, fs_info->nodesize);
3738		return -EINVAL;
3739	}
3740	if (!IS_ALIGNED(start, fs_info->nodesize) &&
3741	    !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
3742		btrfs_warn(fs_info,
3743"tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
3744			      start, fs_info->nodesize);
3745	}
3746	return 0;
3747}
3748
3749
3750/*
3751 * Return 0 if eb->folios[i] is attached to btree inode successfully.
3752 * Return >0 if there is already another extent buffer for the range,
3753 * and @found_eb_ret would be updated.
3754 * Return -EAGAIN if the filemap has an existing folio but with different size
3755 * than @eb.
3756 * The caller needs to free the existing folios and retry using the same order.
3757 */
3758static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
3759				      struct extent_buffer **found_eb_ret)
3760{
3761
3762	struct btrfs_fs_info *fs_info = eb->fs_info;
3763	struct address_space *mapping = fs_info->btree_inode->i_mapping;
3764	const unsigned long index = eb->start >> PAGE_SHIFT;
3765	struct folio *existing_folio;
3766	int ret;
3767
3768	ASSERT(found_eb_ret);
3769
3770	/* Caller should ensure the folio exists. */
3771	ASSERT(eb->folios[i]);
3772
3773retry:
3774	ret = filemap_add_folio(mapping, eb->folios[i], index + i,
3775				GFP_NOFS | __GFP_NOFAIL);
3776	if (!ret)
3777		return 0;
3778
3779	existing_folio = filemap_lock_folio(mapping, index + i);
3780	/* The page cache only exists for a very short time, just retry. */
3781	if (IS_ERR(existing_folio))
3782		goto retry;
3783
3784	/* For now, we should only have single-page folios for btree inode. */
3785	ASSERT(folio_nr_pages(existing_folio) == 1);
3786
3787	if (folio_size(existing_folio) != eb->folio_size) {
3788		folio_unlock(existing_folio);
3789		folio_put(existing_folio);
3790		return -EAGAIN;
3791	}
3792
3793	if (fs_info->nodesize < PAGE_SIZE) {
3794		/*
3795		 * We're going to reuse the existing page, can drop our page
3796		 * and subpage structure now.
3797		 */
3798		__free_page(folio_page(eb->folios[i], 0));
3799		eb->folios[i] = existing_folio;
3800	} else {
3801		struct extent_buffer *existing_eb;
3802
3803		existing_eb = grab_extent_buffer(fs_info,
3804						 folio_page(existing_folio, 0));
3805		if (existing_eb) {
3806			/* The extent buffer still exists, we can use it directly. */
3807			*found_eb_ret = existing_eb;
3808			folio_unlock(existing_folio);
3809			folio_put(existing_folio);
3810			return 1;
3811		}
3812		/* The extent buffer no longer exists, we can reuse the folio. */
3813		__free_page(folio_page(eb->folios[i], 0));
3814		eb->folios[i] = existing_folio;
3815	}
3816	return 0;
3817}
3818
3819struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3820					  u64 start, u64 owner_root, int level)
3821{
3822	unsigned long len = fs_info->nodesize;
3823	int num_folios;
3824	int attached = 0;
3825	struct extent_buffer *eb;
3826	struct extent_buffer *existing_eb = NULL;
3827	struct address_space *mapping = fs_info->btree_inode->i_mapping;
3828	struct btrfs_subpage *prealloc = NULL;
3829	u64 lockdep_owner = owner_root;
3830	bool page_contig = true;
3831	int uptodate = 1;
3832	int ret;
3833
3834	if (check_eb_alignment(fs_info, start))
3835		return ERR_PTR(-EINVAL);
3836
3837#if BITS_PER_LONG == 32
3838	if (start >= MAX_LFS_FILESIZE) {
3839		btrfs_err_rl(fs_info,
3840		"extent buffer %llu is beyond 32bit page cache limit", start);
3841		btrfs_err_32bit_limit(fs_info);
3842		return ERR_PTR(-EOVERFLOW);
3843	}
3844	if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
3845		btrfs_warn_32bit_limit(fs_info);
3846#endif
3847
3848	eb = find_extent_buffer(fs_info, start);
3849	if (eb)
3850		return eb;
3851
3852	eb = __alloc_extent_buffer(fs_info, start, len);
3853	if (!eb)
3854		return ERR_PTR(-ENOMEM);
3855
3856	/*
3857	 * The reloc trees are just snapshots, so we need them to appear to be
3858	 * just like any other fs tree WRT lockdep.
3859	 */
3860	if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
3861		lockdep_owner = BTRFS_FS_TREE_OBJECTID;
3862
3863	btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
3864
3865	/*
3866	 * Preallocate folio private for subpage case, so that we won't
3867	 * allocate memory with i_private_lock nor page lock hold.
3868	 *
3869	 * The memory will be freed by attach_extent_buffer_page() or freed
3870	 * manually if we exit earlier.
3871	 */
3872	if (fs_info->nodesize < PAGE_SIZE) {
3873		prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
3874		if (IS_ERR(prealloc)) {
3875			ret = PTR_ERR(prealloc);
3876			goto out;
3877		}
3878	}
3879
3880reallocate:
3881	/* Allocate all pages first. */
3882	ret = alloc_eb_folio_array(eb, __GFP_NOFAIL);
3883	if (ret < 0) {
3884		btrfs_free_subpage(prealloc);
3885		goto out;
3886	}
3887
3888	num_folios = num_extent_folios(eb);
3889	/* Attach all pages to the filemap. */
3890	for (int i = 0; i < num_folios; i++) {
3891		struct folio *folio;
3892
3893		ret = attach_eb_folio_to_filemap(eb, i, &existing_eb);
3894		if (ret > 0) {
3895			ASSERT(existing_eb);
3896			goto out;
3897		}
3898
3899		/*
3900		 * TODO: Special handling for a corner case where the order of
3901		 * folios mismatch between the new eb and filemap.
3902		 *
3903		 * This happens when:
3904		 *
3905		 * - the new eb is using higher order folio
3906		 *
3907		 * - the filemap is still using 0-order folios for the range
3908		 *   This can happen at the previous eb allocation, and we don't
3909		 *   have higher order folio for the call.
3910		 *
3911		 * - the existing eb has already been freed
3912		 *
3913		 * In this case, we have to free the existing folios first, and
3914		 * re-allocate using the same order.
3915		 * Thankfully this is not going to happen yet, as we're still
3916		 * using 0-order folios.
3917		 */
3918		if (unlikely(ret == -EAGAIN)) {
3919			ASSERT(0);
3920			goto reallocate;
3921		}
3922		attached++;
3923
3924		/*
3925		 * Only after attach_eb_folio_to_filemap(), eb->folios[] is
3926		 * reliable, as we may choose to reuse the existing page cache
3927		 * and free the allocated page.
3928		 */
3929		folio = eb->folios[i];
3930		eb->folio_size = folio_size(folio);
3931		eb->folio_shift = folio_shift(folio);
3932		spin_lock(&mapping->i_private_lock);
3933		/* Should not fail, as we have preallocated the memory */
3934		ret = attach_extent_buffer_folio(eb, folio, prealloc);
3935		ASSERT(!ret);
3936		/*
3937		 * To inform we have extra eb under allocation, so that
3938		 * detach_extent_buffer_page() won't release the folio private
3939		 * when the eb hasn't yet been inserted into radix tree.
3940		 *
3941		 * The ref will be decreased when the eb released the page, in
3942		 * detach_extent_buffer_page().
3943		 * Thus needs no special handling in error path.
3944		 */
3945		btrfs_folio_inc_eb_refs(fs_info, folio);
3946		spin_unlock(&mapping->i_private_lock);
3947
3948		WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
3949
3950		/*
3951		 * Check if the current page is physically contiguous with previous eb
3952		 * page.
3953		 * At this stage, either we allocated a large folio, thus @i
3954		 * would only be 0, or we fall back to per-page allocation.
3955		 */
3956		if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
3957			page_contig = false;
3958
3959		if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
3960			uptodate = 0;
3961
3962		/*
3963		 * We can't unlock the pages just yet since the extent buffer
3964		 * hasn't been properly inserted in the radix tree, this
3965		 * opens a race with btree_release_folio which can free a page
3966		 * while we are still filling in all pages for the buffer and
3967		 * we could crash.
3968		 */
3969	}
3970	if (uptodate)
3971		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3972	/* All pages are physically contiguous, can skip cross page handling. */
3973	if (page_contig)
3974		eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
3975again:
3976	ret = radix_tree_preload(GFP_NOFS);
3977	if (ret)
3978		goto out;
3979
3980	spin_lock(&fs_info->buffer_lock);
3981	ret = radix_tree_insert(&fs_info->buffer_radix,
3982				start >> fs_info->sectorsize_bits, eb);
3983	spin_unlock(&fs_info->buffer_lock);
3984	radix_tree_preload_end();
3985	if (ret == -EEXIST) {
3986		ret = 0;
3987		existing_eb = find_extent_buffer(fs_info, start);
3988		if (existing_eb)
3989			goto out;
3990		else
3991			goto again;
3992	}
3993	/* add one reference for the tree */
3994	check_buffer_tree_ref(eb);
3995	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3996
3997	/*
3998	 * Now it's safe to unlock the pages because any calls to
3999	 * btree_release_folio will correctly detect that a page belongs to a
4000	 * live buffer and won't free them prematurely.
4001	 */
4002	for (int i = 0; i < num_folios; i++)
4003		unlock_page(folio_page(eb->folios[i], 0));
4004	return eb;
4005
4006out:
4007	WARN_ON(!atomic_dec_and_test(&eb->refs));
4008
4009	/*
4010	 * Any attached folios need to be detached before we unlock them.  This
4011	 * is because when we're inserting our new folios into the mapping, and
4012	 * then attaching our eb to that folio.  If we fail to insert our folio
4013	 * we'll lookup the folio for that index, and grab that EB.  We do not
4014	 * want that to grab this eb, as we're getting ready to free it.  So we
4015	 * have to detach it first and then unlock it.
4016	 *
4017	 * We have to drop our reference and NULL it out here because in the
4018	 * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
4019	 * Below when we call btrfs_release_extent_buffer() we will call
4020	 * detach_extent_buffer_folio() on our remaining pages in the !subpage
4021	 * case.  If we left eb->folios[i] populated in the subpage case we'd
4022	 * double put our reference and be super sad.
4023	 */
4024	for (int i = 0; i < attached; i++) {
4025		ASSERT(eb->folios[i]);
4026		detach_extent_buffer_folio(eb, eb->folios[i]);
4027		unlock_page(folio_page(eb->folios[i], 0));
4028		folio_put(eb->folios[i]);
4029		eb->folios[i] = NULL;
4030	}
4031	/*
4032	 * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
4033	 * so it can be cleaned up without utlizing page->mapping.
4034	 */
4035	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4036
4037	btrfs_release_extent_buffer(eb);
4038	if (ret < 0)
4039		return ERR_PTR(ret);
4040	ASSERT(existing_eb);
4041	return existing_eb;
4042}
4043
4044static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4045{
4046	struct extent_buffer *eb =
4047			container_of(head, struct extent_buffer, rcu_head);
4048
4049	__free_extent_buffer(eb);
4050}
4051
4052static int release_extent_buffer(struct extent_buffer *eb)
4053	__releases(&eb->refs_lock)
4054{
4055	lockdep_assert_held(&eb->refs_lock);
4056
4057	WARN_ON(atomic_read(&eb->refs) == 0);
4058	if (atomic_dec_and_test(&eb->refs)) {
4059		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
4060			struct btrfs_fs_info *fs_info = eb->fs_info;
4061
4062			spin_unlock(&eb->refs_lock);
4063
4064			spin_lock(&fs_info->buffer_lock);
4065			radix_tree_delete(&fs_info->buffer_radix,
4066					  eb->start >> fs_info->sectorsize_bits);
4067			spin_unlock(&fs_info->buffer_lock);
4068		} else {
4069			spin_unlock(&eb->refs_lock);
4070		}
4071
4072		btrfs_leak_debug_del_eb(eb);
4073		/* Should be safe to release our pages at this point */
4074		btrfs_release_extent_buffer_pages(eb);
4075#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4076		if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
4077			__free_extent_buffer(eb);
4078			return 1;
4079		}
4080#endif
4081		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4082		return 1;
4083	}
4084	spin_unlock(&eb->refs_lock);
4085
4086	return 0;
4087}
4088
4089void free_extent_buffer(struct extent_buffer *eb)
4090{
4091	int refs;
4092	if (!eb)
4093		return;
4094
4095	refs = atomic_read(&eb->refs);
4096	while (1) {
4097		if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
4098		    || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
4099			refs == 1))
4100			break;
4101		if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
4102			return;
4103	}
4104
4105	spin_lock(&eb->refs_lock);
4106	if (atomic_read(&eb->refs) == 2 &&
4107	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4108	    !extent_buffer_under_io(eb) &&
4109	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4110		atomic_dec(&eb->refs);
4111
4112	/*
4113	 * I know this is terrible, but it's temporary until we stop tracking
4114	 * the uptodate bits and such for the extent buffers.
4115	 */
4116	release_extent_buffer(eb);
4117}
4118
4119void free_extent_buffer_stale(struct extent_buffer *eb)
4120{
4121	if (!eb)
4122		return;
4123
4124	spin_lock(&eb->refs_lock);
4125	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4126
4127	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4128	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4129		atomic_dec(&eb->refs);
4130	release_extent_buffer(eb);
4131}
4132
4133static void btree_clear_folio_dirty(struct folio *folio)
4134{
4135	ASSERT(folio_test_dirty(folio));
4136	ASSERT(folio_test_locked(folio));
4137	folio_clear_dirty_for_io(folio);
4138	xa_lock_irq(&folio->mapping->i_pages);
4139	if (!folio_test_dirty(folio))
4140		__xa_clear_mark(&folio->mapping->i_pages,
4141				folio_index(folio), PAGECACHE_TAG_DIRTY);
4142	xa_unlock_irq(&folio->mapping->i_pages);
4143}
4144
4145static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
4146{
4147	struct btrfs_fs_info *fs_info = eb->fs_info;
4148	struct folio *folio = eb->folios[0];
4149	bool last;
4150
4151	/* btree_clear_folio_dirty() needs page locked. */
4152	folio_lock(folio);
4153	last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
4154	if (last)
4155		btree_clear_folio_dirty(folio);
4156	folio_unlock(folio);
4157	WARN_ON(atomic_read(&eb->refs) == 0);
4158}
4159
4160void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
4161			      struct extent_buffer *eb)
4162{
4163	struct btrfs_fs_info *fs_info = eb->fs_info;
4164	int num_folios;
4165
4166	btrfs_assert_tree_write_locked(eb);
4167
4168	if (trans && btrfs_header_generation(eb) != trans->transid)
4169		return;
4170
4171	/*
4172	 * Instead of clearing the dirty flag off of the buffer, mark it as
4173	 * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
4174	 * write-ordering in zoned mode, without the need to later re-dirty
4175	 * the extent_buffer.
4176	 *
4177	 * The actual zeroout of the buffer will happen later in
4178	 * btree_csum_one_bio.
4179	 */
4180	if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
4181		set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
4182		return;
4183	}
4184
4185	if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
4186		return;
4187
4188	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
4189				 fs_info->dirty_metadata_batch);
4190
4191	if (eb->fs_info->nodesize < PAGE_SIZE)
4192		return clear_subpage_extent_buffer_dirty(eb);
4193
4194	num_folios = num_extent_folios(eb);
4195	for (int i = 0; i < num_folios; i++) {
4196		struct folio *folio = eb->folios[i];
4197
4198		if (!folio_test_dirty(folio))
4199			continue;
4200		folio_lock(folio);
4201		btree_clear_folio_dirty(folio);
4202		folio_unlock(folio);
4203	}
4204	WARN_ON(atomic_read(&eb->refs) == 0);
4205}
4206
4207void set_extent_buffer_dirty(struct extent_buffer *eb)
4208{
4209	int num_folios;
4210	bool was_dirty;
4211
4212	check_buffer_tree_ref(eb);
4213
4214	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4215
4216	num_folios = num_extent_folios(eb);
4217	WARN_ON(atomic_read(&eb->refs) == 0);
4218	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4219	WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
4220
4221	if (!was_dirty) {
4222		bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
4223
4224		/*
4225		 * For subpage case, we can have other extent buffers in the
4226		 * same page, and in clear_subpage_extent_buffer_dirty() we
4227		 * have to clear page dirty without subpage lock held.
4228		 * This can cause race where our page gets dirty cleared after
4229		 * we just set it.
4230		 *
4231		 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
4232		 * its page for other reasons, we can use page lock to prevent
4233		 * the above race.
4234		 */
4235		if (subpage)
4236			lock_page(folio_page(eb->folios[0], 0));
4237		for (int i = 0; i < num_folios; i++)
4238			btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],
4239					      eb->start, eb->len);
4240		if (subpage)
4241			unlock_page(folio_page(eb->folios[0], 0));
4242		percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
4243					 eb->len,
4244					 eb->fs_info->dirty_metadata_batch);
4245	}
4246#ifdef CONFIG_BTRFS_DEBUG
4247	for (int i = 0; i < num_folios; i++)
4248		ASSERT(folio_test_dirty(eb->folios[i]));
4249#endif
4250}
4251
4252void clear_extent_buffer_uptodate(struct extent_buffer *eb)
4253{
4254	struct btrfs_fs_info *fs_info = eb->fs_info;
4255	int num_folios = num_extent_folios(eb);
4256
4257	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4258	for (int i = 0; i < num_folios; i++) {
4259		struct folio *folio = eb->folios[i];
4260
4261		if (!folio)
4262			continue;
4263
4264		/*
4265		 * This is special handling for metadata subpage, as regular
4266		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4267		 */
4268		if (fs_info->nodesize >= PAGE_SIZE)
4269			folio_clear_uptodate(folio);
4270		else
4271			btrfs_subpage_clear_uptodate(fs_info, folio,
4272						     eb->start, eb->len);
4273	}
4274}
4275
4276void set_extent_buffer_uptodate(struct extent_buffer *eb)
4277{
4278	struct btrfs_fs_info *fs_info = eb->fs_info;
4279	int num_folios = num_extent_folios(eb);
4280
4281	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4282	for (int i = 0; i < num_folios; i++) {
4283		struct folio *folio = eb->folios[i];
4284
4285		/*
4286		 * This is special handling for metadata subpage, as regular
4287		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4288		 */
4289		if (fs_info->nodesize >= PAGE_SIZE)
4290			folio_mark_uptodate(folio);
4291		else
4292			btrfs_subpage_set_uptodate(fs_info, folio,
4293						   eb->start, eb->len);
4294	}
4295}
4296
4297static void clear_extent_buffer_reading(struct extent_buffer *eb)
4298{
4299	clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
4300	smp_mb__after_atomic();
4301	wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
4302}
4303
4304static void end_bbio_meta_read(struct btrfs_bio *bbio)
4305{
4306	struct extent_buffer *eb = bbio->private;
4307	struct btrfs_fs_info *fs_info = eb->fs_info;
4308	bool uptodate = !bbio->bio.bi_status;
4309	struct folio_iter fi;
4310	u32 bio_offset = 0;
4311
4312	/*
4313	 * If the extent buffer is marked UPTODATE before the read operation
4314	 * completes, other calls to read_extent_buffer_pages() will return
4315	 * early without waiting for the read to finish, causing data races.
4316	 */
4317	WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags));
4318
4319	eb->read_mirror = bbio->mirror_num;
4320
4321	if (uptodate &&
4322	    btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
4323		uptodate = false;
4324
4325	if (uptodate) {
4326		set_extent_buffer_uptodate(eb);
4327	} else {
4328		clear_extent_buffer_uptodate(eb);
4329		set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4330	}
4331
4332	bio_for_each_folio_all(fi, &bbio->bio) {
4333		struct folio *folio = fi.folio;
4334		u64 start = eb->start + bio_offset;
4335		u32 len = fi.length;
4336
4337		if (uptodate)
4338			btrfs_folio_set_uptodate(fs_info, folio, start, len);
4339		else
4340			btrfs_folio_clear_uptodate(fs_info, folio, start, len);
4341
4342		bio_offset += len;
4343	}
4344
4345	clear_extent_buffer_reading(eb);
4346	free_extent_buffer(eb);
4347
4348	bio_put(&bbio->bio);
4349}
4350
4351int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
4352			     struct btrfs_tree_parent_check *check)
4353{
4354	struct btrfs_bio *bbio;
4355	bool ret;
4356
4357	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4358		return 0;
4359
4360	/*
4361	 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
4362	 * operation, which could potentially still be in flight.  In this case
4363	 * we simply want to return an error.
4364	 */
4365	if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
4366		return -EIO;
4367
4368	/* Someone else is already reading the buffer, just wait for it. */
4369	if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
4370		goto done;
4371
4372	/*
4373	 * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
4374	 * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
4375	 * started and finished reading the same eb.  In this case, UPTODATE
4376	 * will now be set, and we shouldn't read it in again.
4377	 */
4378	if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
4379		clear_extent_buffer_reading(eb);
4380		return 0;
4381	}
4382
4383	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4384	eb->read_mirror = 0;
4385	check_buffer_tree_ref(eb);
4386	atomic_inc(&eb->refs);
4387
4388	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
4389			       REQ_OP_READ | REQ_META, eb->fs_info,
4390			       end_bbio_meta_read, eb);
4391	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
4392	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
4393	bbio->file_offset = eb->start;
4394	memcpy(&bbio->parent_check, check, sizeof(*check));
4395	if (eb->fs_info->nodesize < PAGE_SIZE) {
4396		ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len,
4397				    eb->start - folio_pos(eb->folios[0]));
4398		ASSERT(ret);
4399	} else {
4400		int num_folios = num_extent_folios(eb);
4401
4402		for (int i = 0; i < num_folios; i++) {
4403			struct folio *folio = eb->folios[i];
4404
4405			ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
4406			ASSERT(ret);
4407		}
4408	}
4409	btrfs_submit_bio(bbio, mirror_num);
4410
4411done:
4412	if (wait == WAIT_COMPLETE) {
4413		wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
4414		if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4415			return -EIO;
4416	}
4417
4418	return 0;
4419}
4420
4421static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
4422			    unsigned long len)
4423{
4424	btrfs_warn(eb->fs_info,
4425		"access to eb bytenr %llu len %u out of range start %lu len %lu",
4426		eb->start, eb->len, start, len);
4427	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4428
4429	return true;
4430}
4431
4432/*
4433 * Check if the [start, start + len) range is valid before reading/writing
4434 * the eb.
4435 * NOTE: @start and @len are offset inside the eb, not logical address.
4436 *
4437 * Caller should not touch the dst/src memory if this function returns error.
4438 */
4439static inline int check_eb_range(const struct extent_buffer *eb,
4440				 unsigned long start, unsigned long len)
4441{
4442	unsigned long offset;
4443
4444	/* start, start + len should not go beyond eb->len nor overflow */
4445	if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
4446		return report_eb_range(eb, start, len);
4447
4448	return false;
4449}
4450
4451void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
4452			unsigned long start, unsigned long len)
4453{
4454	const int unit_size = eb->folio_size;
4455	size_t cur;
4456	size_t offset;
4457	char *dst = (char *)dstv;
4458	unsigned long i = get_eb_folio_index(eb, start);
4459
4460	if (check_eb_range(eb, start, len)) {
4461		/*
4462		 * Invalid range hit, reset the memory, so callers won't get
4463		 * some random garbage for their uninitialized memory.
4464		 */
4465		memset(dstv, 0, len);
4466		return;
4467	}
4468
4469	if (eb->addr) {
4470		memcpy(dstv, eb->addr + start, len);
4471		return;
4472	}
4473
4474	offset = get_eb_offset_in_folio(eb, start);
4475
4476	while (len > 0) {
4477		char *kaddr;
4478
4479		cur = min(len, unit_size - offset);
4480		kaddr = folio_address(eb->folios[i]);
4481		memcpy(dst, kaddr + offset, cur);
4482
4483		dst += cur;
4484		len -= cur;
4485		offset = 0;
4486		i++;
4487	}
4488}
4489
4490int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
4491				       void __user *dstv,
4492				       unsigned long start, unsigned long len)
4493{
4494	const int unit_size = eb->folio_size;
4495	size_t cur;
4496	size_t offset;
4497	char __user *dst = (char __user *)dstv;
4498	unsigned long i = get_eb_folio_index(eb, start);
4499	int ret = 0;
4500
4501	WARN_ON(start > eb->len);
4502	WARN_ON(start + len > eb->start + eb->len);
4503
4504	if (eb->addr) {
4505		if (copy_to_user_nofault(dstv, eb->addr + start, len))
4506			ret = -EFAULT;
4507		return ret;
4508	}
4509
4510	offset = get_eb_offset_in_folio(eb, start);
4511
4512	while (len > 0) {
4513		char *kaddr;
4514
4515		cur = min(len, unit_size - offset);
4516		kaddr = folio_address(eb->folios[i]);
4517		if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
4518			ret = -EFAULT;
4519			break;
4520		}
4521
4522		dst += cur;
4523		len -= cur;
4524		offset = 0;
4525		i++;
4526	}
4527
4528	return ret;
4529}
4530
4531int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
4532			 unsigned long start, unsigned long len)
4533{
4534	const int unit_size = eb->folio_size;
4535	size_t cur;
4536	size_t offset;
4537	char *kaddr;
4538	char *ptr = (char *)ptrv;
4539	unsigned long i = get_eb_folio_index(eb, start);
4540	int ret = 0;
4541
4542	if (check_eb_range(eb, start, len))
4543		return -EINVAL;
4544
4545	if (eb->addr)
4546		return memcmp(ptrv, eb->addr + start, len);
4547
4548	offset = get_eb_offset_in_folio(eb, start);
4549
4550	while (len > 0) {
4551		cur = min(len, unit_size - offset);
4552		kaddr = folio_address(eb->folios[i]);
4553		ret = memcmp(ptr, kaddr + offset, cur);
4554		if (ret)
4555			break;
4556
4557		ptr += cur;
4558		len -= cur;
4559		offset = 0;
4560		i++;
4561	}
4562	return ret;
4563}
4564
4565/*
4566 * Check that the extent buffer is uptodate.
4567 *
4568 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
4569 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
4570 */
4571static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
4572{
4573	struct btrfs_fs_info *fs_info = eb->fs_info;
4574	struct folio *folio = eb->folios[i];
4575
4576	ASSERT(folio);
4577
4578	/*
4579	 * If we are using the commit root we could potentially clear a page
4580	 * Uptodate while we're using the extent buffer that we've previously
4581	 * looked up.  We don't want to complain in this case, as the page was
4582	 * valid before, we just didn't write it out.  Instead we want to catch
4583	 * the case where we didn't actually read the block properly, which
4584	 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
4585	 */
4586	if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
4587		return;
4588
4589	if (fs_info->nodesize < PAGE_SIZE) {
4590		struct folio *folio = eb->folios[0];
4591
4592		ASSERT(i == 0);
4593		if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
4594							 eb->start, eb->len)))
4595			btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
4596	} else {
4597		WARN_ON(!folio_test_uptodate(folio));
4598	}
4599}
4600
4601static void __write_extent_buffer(const struct extent_buffer *eb,
4602				  const void *srcv, unsigned long start,
4603				  unsigned long len, bool use_memmove)
4604{
4605	const int unit_size = eb->folio_size;
4606	size_t cur;
4607	size_t offset;
4608	char *kaddr;
4609	char *src = (char *)srcv;
4610	unsigned long i = get_eb_folio_index(eb, start);
4611	/* For unmapped (dummy) ebs, no need to check their uptodate status. */
4612	const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4613
4614	if (check_eb_range(eb, start, len))
4615		return;
4616
4617	if (eb->addr) {
4618		if (use_memmove)
4619			memmove(eb->addr + start, srcv, len);
4620		else
4621			memcpy(eb->addr + start, srcv, len);
4622		return;
4623	}
4624
4625	offset = get_eb_offset_in_folio(eb, start);
4626
4627	while (len > 0) {
4628		if (check_uptodate)
4629			assert_eb_folio_uptodate(eb, i);
4630
4631		cur = min(len, unit_size - offset);
4632		kaddr = folio_address(eb->folios[i]);
4633		if (use_memmove)
4634			memmove(kaddr + offset, src, cur);
4635		else
4636			memcpy(kaddr + offset, src, cur);
4637
4638		src += cur;
4639		len -= cur;
4640		offset = 0;
4641		i++;
4642	}
4643}
4644
4645void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
4646			 unsigned long start, unsigned long len)
4647{
4648	return __write_extent_buffer(eb, srcv, start, len, false);
4649}
4650
4651static void memset_extent_buffer(const struct extent_buffer *eb, int c,
4652				 unsigned long start, unsigned long len)
4653{
4654	const int unit_size = eb->folio_size;
4655	unsigned long cur = start;
4656
4657	if (eb->addr) {
4658		memset(eb->addr + start, c, len);
4659		return;
4660	}
4661
4662	while (cur < start + len) {
4663		unsigned long index = get_eb_folio_index(eb, cur);
4664		unsigned int offset = get_eb_offset_in_folio(eb, cur);
4665		unsigned int cur_len = min(start + len - cur, unit_size - offset);
4666
4667		assert_eb_folio_uptodate(eb, index);
4668		memset(folio_address(eb->folios[index]) + offset, c, cur_len);
4669
4670		cur += cur_len;
4671	}
4672}
4673
4674void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
4675			   unsigned long len)
4676{
4677	if (check_eb_range(eb, start, len))
4678		return;
4679	return memset_extent_buffer(eb, 0, start, len);
4680}
4681
4682void copy_extent_buffer_full(const struct extent_buffer *dst,
4683			     const struct extent_buffer *src)
4684{
4685	const int unit_size = src->folio_size;
4686	unsigned long cur = 0;
4687
4688	ASSERT(dst->len == src->len);
4689
4690	while (cur < src->len) {
4691		unsigned long index = get_eb_folio_index(src, cur);
4692		unsigned long offset = get_eb_offset_in_folio(src, cur);
4693		unsigned long cur_len = min(src->len, unit_size - offset);
4694		void *addr = folio_address(src->folios[index]) + offset;
4695
4696		write_extent_buffer(dst, addr, cur, cur_len);
4697
4698		cur += cur_len;
4699	}
4700}
4701
4702void copy_extent_buffer(const struct extent_buffer *dst,
4703			const struct extent_buffer *src,
4704			unsigned long dst_offset, unsigned long src_offset,
4705			unsigned long len)
4706{
4707	const int unit_size = dst->folio_size;
4708	u64 dst_len = dst->len;
4709	size_t cur;
4710	size_t offset;
4711	char *kaddr;
4712	unsigned long i = get_eb_folio_index(dst, dst_offset);
4713
4714	if (check_eb_range(dst, dst_offset, len) ||
4715	    check_eb_range(src, src_offset, len))
4716		return;
4717
4718	WARN_ON(src->len != dst_len);
4719
4720	offset = get_eb_offset_in_folio(dst, dst_offset);
4721
4722	while (len > 0) {
4723		assert_eb_folio_uptodate(dst, i);
4724
4725		cur = min(len, (unsigned long)(unit_size - offset));
4726
4727		kaddr = folio_address(dst->folios[i]);
4728		read_extent_buffer(src, kaddr + offset, src_offset, cur);
4729
4730		src_offset += cur;
4731		len -= cur;
4732		offset = 0;
4733		i++;
4734	}
4735}
4736
4737/*
4738 * Calculate the folio and offset of the byte containing the given bit number.
4739 *
4740 * @eb:           the extent buffer
4741 * @start:        offset of the bitmap item in the extent buffer
4742 * @nr:           bit number
4743 * @folio_index:  return index of the folio in the extent buffer that contains
4744 *                the given bit number
4745 * @folio_offset: return offset into the folio given by folio_index
4746 *
4747 * This helper hides the ugliness of finding the byte in an extent buffer which
4748 * contains a given bit.
4749 */
4750static inline void eb_bitmap_offset(const struct extent_buffer *eb,
4751				    unsigned long start, unsigned long nr,
4752				    unsigned long *folio_index,
4753				    size_t *folio_offset)
4754{
4755	size_t byte_offset = BIT_BYTE(nr);
4756	size_t offset;
4757
4758	/*
4759	 * The byte we want is the offset of the extent buffer + the offset of
4760	 * the bitmap item in the extent buffer + the offset of the byte in the
4761	 * bitmap item.
4762	 */
4763	offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset;
4764
4765	*folio_index = offset >> eb->folio_shift;
4766	*folio_offset = offset_in_eb_folio(eb, offset);
4767}
4768
4769/*
4770 * Determine whether a bit in a bitmap item is set.
4771 *
4772 * @eb:     the extent buffer
4773 * @start:  offset of the bitmap item in the extent buffer
4774 * @nr:     bit number to test
4775 */
4776int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
4777			   unsigned long nr)
4778{
4779	unsigned long i;
4780	size_t offset;
4781	u8 *kaddr;
4782
4783	eb_bitmap_offset(eb, start, nr, &i, &offset);
4784	assert_eb_folio_uptodate(eb, i);
4785	kaddr = folio_address(eb->folios[i]);
4786	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
4787}
4788
4789static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
4790{
4791	unsigned long index = get_eb_folio_index(eb, bytenr);
4792
4793	if (check_eb_range(eb, bytenr, 1))
4794		return NULL;
4795	return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
4796}
4797
4798/*
4799 * Set an area of a bitmap to 1.
4800 *
4801 * @eb:     the extent buffer
4802 * @start:  offset of the bitmap item in the extent buffer
4803 * @pos:    bit number of the first bit
4804 * @len:    number of bits to set
4805 */
4806void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
4807			      unsigned long pos, unsigned long len)
4808{
4809	unsigned int first_byte = start + BIT_BYTE(pos);
4810	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4811	const bool same_byte = (first_byte == last_byte);
4812	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4813	u8 *kaddr;
4814
4815	if (same_byte)
4816		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4817
4818	/* Handle the first byte. */
4819	kaddr = extent_buffer_get_byte(eb, first_byte);
4820	*kaddr |= mask;
4821	if (same_byte)
4822		return;
4823
4824	/* Handle the byte aligned part. */
4825	ASSERT(first_byte + 1 <= last_byte);
4826	memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
4827
4828	/* Handle the last byte. */
4829	kaddr = extent_buffer_get_byte(eb, last_byte);
4830	*kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
4831}
4832
4833
4834/*
4835 * Clear an area of a bitmap.
4836 *
4837 * @eb:     the extent buffer
4838 * @start:  offset of the bitmap item in the extent buffer
4839 * @pos:    bit number of the first bit
4840 * @len:    number of bits to clear
4841 */
4842void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
4843				unsigned long start, unsigned long pos,
4844				unsigned long len)
4845{
4846	unsigned int first_byte = start + BIT_BYTE(pos);
4847	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4848	const bool same_byte = (first_byte == last_byte);
4849	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4850	u8 *kaddr;
4851
4852	if (same_byte)
4853		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4854
4855	/* Handle the first byte. */
4856	kaddr = extent_buffer_get_byte(eb, first_byte);
4857	*kaddr &= ~mask;
4858	if (same_byte)
4859		return;
4860
4861	/* Handle the byte aligned part. */
4862	ASSERT(first_byte + 1 <= last_byte);
4863	memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
4864
4865	/* Handle the last byte. */
4866	kaddr = extent_buffer_get_byte(eb, last_byte);
4867	*kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
4868}
4869
4870static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4871{
4872	unsigned long distance = (src > dst) ? src - dst : dst - src;
4873	return distance < len;
4874}
4875
4876void memcpy_extent_buffer(const struct extent_buffer *dst,
4877			  unsigned long dst_offset, unsigned long src_offset,
4878			  unsigned long len)
4879{
4880	const int unit_size = dst->folio_size;
4881	unsigned long cur_off = 0;
4882
4883	if (check_eb_range(dst, dst_offset, len) ||
4884	    check_eb_range(dst, src_offset, len))
4885		return;
4886
4887	if (dst->addr) {
4888		const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
4889
4890		if (use_memmove)
4891			memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4892		else
4893			memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
4894		return;
4895	}
4896
4897	while (cur_off < len) {
4898		unsigned long cur_src = cur_off + src_offset;
4899		unsigned long folio_index = get_eb_folio_index(dst, cur_src);
4900		unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
4901		unsigned long cur_len = min(src_offset + len - cur_src,
4902					    unit_size - folio_off);
4903		void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
4904		const bool use_memmove = areas_overlap(src_offset + cur_off,
4905						       dst_offset + cur_off, cur_len);
4906
4907		__write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
4908				      use_memmove);
4909		cur_off += cur_len;
4910	}
4911}
4912
4913void memmove_extent_buffer(const struct extent_buffer *dst,
4914			   unsigned long dst_offset, unsigned long src_offset,
4915			   unsigned long len)
4916{
4917	unsigned long dst_end = dst_offset + len - 1;
4918	unsigned long src_end = src_offset + len - 1;
4919
4920	if (check_eb_range(dst, dst_offset, len) ||
4921	    check_eb_range(dst, src_offset, len))
4922		return;
4923
4924	if (dst_offset < src_offset) {
4925		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4926		return;
4927	}
4928
4929	if (dst->addr) {
4930		memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4931		return;
4932	}
4933
4934	while (len > 0) {
4935		unsigned long src_i;
4936		size_t cur;
4937		size_t dst_off_in_folio;
4938		size_t src_off_in_folio;
4939		void *src_addr;
4940		bool use_memmove;
4941
4942		src_i = get_eb_folio_index(dst, src_end);
4943
4944		dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
4945		src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
4946
4947		cur = min_t(unsigned long, len, src_off_in_folio + 1);
4948		cur = min(cur, dst_off_in_folio + 1);
4949
4950		src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
4951					 cur + 1;
4952		use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4953					    cur);
4954
4955		__write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4956				      use_memmove);
4957
4958		dst_end -= cur;
4959		src_end -= cur;
4960		len -= cur;
4961	}
4962}
4963
4964#define GANG_LOOKUP_SIZE	16
4965static struct extent_buffer *get_next_extent_buffer(
4966		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
4967{
4968	struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4969	struct extent_buffer *found = NULL;
4970	u64 page_start = page_offset(page);
4971	u64 cur = page_start;
4972
4973	ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
4974	lockdep_assert_held(&fs_info->buffer_lock);
4975
4976	while (cur < page_start + PAGE_SIZE) {
4977		int ret;
4978		int i;
4979
4980		ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4981				(void **)gang, cur >> fs_info->sectorsize_bits,
4982				min_t(unsigned int, GANG_LOOKUP_SIZE,
4983				      PAGE_SIZE / fs_info->nodesize));
4984		if (ret == 0)
4985			goto out;
4986		for (i = 0; i < ret; i++) {
4987			/* Already beyond page end */
4988			if (gang[i]->start >= page_start + PAGE_SIZE)
4989				goto out;
4990			/* Found one */
4991			if (gang[i]->start >= bytenr) {
4992				found = gang[i];
4993				goto out;
4994			}
4995		}
4996		cur = gang[ret - 1]->start + gang[ret - 1]->len;
4997	}
4998out:
4999	return found;
5000}
5001
5002static int try_release_subpage_extent_buffer(struct page *page)
5003{
5004	struct btrfs_fs_info *fs_info = page_to_fs_info(page);
5005	u64 cur = page_offset(page);
5006	const u64 end = page_offset(page) + PAGE_SIZE;
5007	int ret;
5008
5009	while (cur < end) {
5010		struct extent_buffer *eb = NULL;
5011
5012		/*
5013		 * Unlike try_release_extent_buffer() which uses folio private
5014		 * to grab buffer, for subpage case we rely on radix tree, thus
5015		 * we need to ensure radix tree consistency.
5016		 *
5017		 * We also want an atomic snapshot of the radix tree, thus go
5018		 * with spinlock rather than RCU.
5019		 */
5020		spin_lock(&fs_info->buffer_lock);
5021		eb = get_next_extent_buffer(fs_info, page, cur);
5022		if (!eb) {
5023			/* No more eb in the page range after or at cur */
5024			spin_unlock(&fs_info->buffer_lock);
5025			break;
5026		}
5027		cur = eb->start + eb->len;
5028
5029		/*
5030		 * The same as try_release_extent_buffer(), to ensure the eb
5031		 * won't disappear out from under us.
5032		 */
5033		spin_lock(&eb->refs_lock);
5034		if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5035			spin_unlock(&eb->refs_lock);
5036			spin_unlock(&fs_info->buffer_lock);
5037			break;
5038		}
5039		spin_unlock(&fs_info->buffer_lock);
5040
5041		/*
5042		 * If tree ref isn't set then we know the ref on this eb is a
5043		 * real ref, so just return, this eb will likely be freed soon
5044		 * anyway.
5045		 */
5046		if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5047			spin_unlock(&eb->refs_lock);
5048			break;
5049		}
5050
5051		/*
5052		 * Here we don't care about the return value, we will always
5053		 * check the folio private at the end.  And
5054		 * release_extent_buffer() will release the refs_lock.
5055		 */
5056		release_extent_buffer(eb);
5057	}
5058	/*
5059	 * Finally to check if we have cleared folio private, as if we have
5060	 * released all ebs in the page, the folio private should be cleared now.
5061	 */
5062	spin_lock(&page->mapping->i_private_lock);
5063	if (!folio_test_private(page_folio(page)))
5064		ret = 1;
5065	else
5066		ret = 0;
5067	spin_unlock(&page->mapping->i_private_lock);
5068	return ret;
5069
5070}
5071
5072int try_release_extent_buffer(struct page *page)
5073{
5074	struct folio *folio = page_folio(page);
5075	struct extent_buffer *eb;
5076
5077	if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
5078		return try_release_subpage_extent_buffer(page);
5079
5080	/*
5081	 * We need to make sure nobody is changing folio private, as we rely on
5082	 * folio private as the pointer to extent buffer.
5083	 */
5084	spin_lock(&page->mapping->i_private_lock);
5085	if (!folio_test_private(folio)) {
5086		spin_unlock(&page->mapping->i_private_lock);
5087		return 1;
5088	}
5089
5090	eb = folio_get_private(folio);
5091	BUG_ON(!eb);
5092
5093	/*
5094	 * This is a little awful but should be ok, we need to make sure that
5095	 * the eb doesn't disappear out from under us while we're looking at
5096	 * this page.
5097	 */
5098	spin_lock(&eb->refs_lock);
5099	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5100		spin_unlock(&eb->refs_lock);
5101		spin_unlock(&page->mapping->i_private_lock);
5102		return 0;
5103	}
5104	spin_unlock(&page->mapping->i_private_lock);
5105
5106	/*
5107	 * If tree ref isn't set then we know the ref on this eb is a real ref,
5108	 * so just return, this page will likely be freed soon anyway.
5109	 */
5110	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5111		spin_unlock(&eb->refs_lock);
5112		return 0;
5113	}
5114
5115	return release_extent_buffer(eb);
5116}
5117
5118/*
5119 * Attempt to readahead a child block.
5120 *
5121 * @fs_info:	the fs_info
5122 * @bytenr:	bytenr to read
5123 * @owner_root: objectid of the root that owns this eb
5124 * @gen:	generation for the uptodate check, can be 0
5125 * @level:	level for the eb
5126 *
5127 * Attempt to readahead a tree block at @bytenr.  If @gen is 0 then we do a
5128 * normal uptodate check of the eb, without checking the generation.  If we have
5129 * to read the block we will not block on anything.
5130 */
5131void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
5132				u64 bytenr, u64 owner_root, u64 gen, int level)
5133{
5134	struct btrfs_tree_parent_check check = {
5135		.has_first_key = 0,
5136		.level = level,
5137		.transid = gen
5138	};
5139	struct extent_buffer *eb;
5140	int ret;
5141
5142	eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
5143	if (IS_ERR(eb))
5144		return;
5145
5146	if (btrfs_buffer_uptodate(eb, gen, 1)) {
5147		free_extent_buffer(eb);
5148		return;
5149	}
5150
5151	ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
5152	if (ret < 0)
5153		free_extent_buffer_stale(eb);
5154	else
5155		free_extent_buffer(eb);
5156}
5157
5158/*
5159 * Readahead a node's child block.
5160 *
5161 * @node:	parent node we're reading from
5162 * @slot:	slot in the parent node for the child we want to read
5163 *
5164 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
5165 * the slot in the node provided.
5166 */
5167void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
5168{
5169	btrfs_readahead_tree_block(node->fs_info,
5170				   btrfs_node_blockptr(node, slot),
5171				   btrfs_header_owner(node),
5172				   btrfs_node_ptr_generation(node, slot),
5173				   btrfs_header_level(node) - 1);
5174}
5175