1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/node.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 *             http://www.samsung.com/
7 */
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/mpage.h>
11#include <linux/sched/mm.h>
12#include <linux/blkdev.h>
13#include <linux/pagevec.h>
14#include <linux/swap.h>
15
16#include "f2fs.h"
17#include "node.h"
18#include "segment.h"
19#include "xattr.h"
20#include "iostat.h"
21#include <trace/events/f2fs.h>
22
23#define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
24
25static struct kmem_cache *nat_entry_slab;
26static struct kmem_cache *free_nid_slab;
27static struct kmem_cache *nat_entry_set_slab;
28static struct kmem_cache *fsync_node_entry_slab;
29
30/*
31 * Check whether the given nid is within node id range.
32 */
33int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
34{
35	if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
36		set_sbi_flag(sbi, SBI_NEED_FSCK);
37		f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
38			  __func__, nid);
39		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
40		return -EFSCORRUPTED;
41	}
42	return 0;
43}
44
45bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
46{
47	struct f2fs_nm_info *nm_i = NM_I(sbi);
48	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
49	struct sysinfo val;
50	unsigned long avail_ram;
51	unsigned long mem_size = 0;
52	bool res = false;
53
54	if (!nm_i)
55		return true;
56
57	si_meminfo(&val);
58
59	/* only uses low memory */
60	avail_ram = val.totalram - val.totalhigh;
61
62	/*
63	 * give 25%, 25%, 50%, 50%, 25%, 25% memory for each components respectively
64	 */
65	if (type == FREE_NIDS) {
66		mem_size = (nm_i->nid_cnt[FREE_NID] *
67				sizeof(struct free_nid)) >> PAGE_SHIFT;
68		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
69	} else if (type == NAT_ENTRIES) {
70		mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
71				sizeof(struct nat_entry)) >> PAGE_SHIFT;
72		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
73		if (excess_cached_nats(sbi))
74			res = false;
75	} else if (type == DIRTY_DENTS) {
76		if (sbi->sb->s_bdi->wb.dirty_exceeded)
77			return false;
78		mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
79		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
80	} else if (type == INO_ENTRIES) {
81		int i;
82
83		for (i = 0; i < MAX_INO_ENTRY; i++)
84			mem_size += sbi->im[i].ino_num *
85						sizeof(struct ino_entry);
86		mem_size >>= PAGE_SHIFT;
87		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
88	} else if (type == READ_EXTENT_CACHE || type == AGE_EXTENT_CACHE) {
89		enum extent_type etype = type == READ_EXTENT_CACHE ?
90						EX_READ : EX_BLOCK_AGE;
91		struct extent_tree_info *eti = &sbi->extent_tree[etype];
92
93		mem_size = (atomic_read(&eti->total_ext_tree) *
94				sizeof(struct extent_tree) +
95				atomic_read(&eti->total_ext_node) *
96				sizeof(struct extent_node)) >> PAGE_SHIFT;
97		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
98	} else if (type == DISCARD_CACHE) {
99		mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
100				sizeof(struct discard_cmd)) >> PAGE_SHIFT;
101		res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
102	} else if (type == COMPRESS_PAGE) {
103#ifdef CONFIG_F2FS_FS_COMPRESSION
104		unsigned long free_ram = val.freeram;
105
106		/*
107		 * free memory is lower than watermark or cached page count
108		 * exceed threshold, deny caching compress page.
109		 */
110		res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
111			(COMPRESS_MAPPING(sbi)->nrpages <
112			 free_ram * sbi->compress_percent / 100);
113#else
114		res = false;
115#endif
116	} else {
117		if (!sbi->sb->s_bdi->wb.dirty_exceeded)
118			return true;
119	}
120	return res;
121}
122
123static void clear_node_page_dirty(struct page *page)
124{
125	if (PageDirty(page)) {
126		f2fs_clear_page_cache_dirty_tag(page);
127		clear_page_dirty_for_io(page);
128		dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
129	}
130	ClearPageUptodate(page);
131}
132
133static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
134{
135	return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid));
136}
137
138static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
139{
140	struct page *src_page;
141	struct page *dst_page;
142	pgoff_t dst_off;
143	void *src_addr;
144	void *dst_addr;
145	struct f2fs_nm_info *nm_i = NM_I(sbi);
146
147	dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
148
149	/* get current nat block page with lock */
150	src_page = get_current_nat_page(sbi, nid);
151	if (IS_ERR(src_page))
152		return src_page;
153	dst_page = f2fs_grab_meta_page(sbi, dst_off);
154	f2fs_bug_on(sbi, PageDirty(src_page));
155
156	src_addr = page_address(src_page);
157	dst_addr = page_address(dst_page);
158	memcpy(dst_addr, src_addr, PAGE_SIZE);
159	set_page_dirty(dst_page);
160	f2fs_put_page(src_page, 1);
161
162	set_to_next_nat(nm_i, nid);
163
164	return dst_page;
165}
166
167static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi,
168						nid_t nid, bool no_fail)
169{
170	struct nat_entry *new;
171
172	new = f2fs_kmem_cache_alloc(nat_entry_slab,
173					GFP_F2FS_ZERO, no_fail, sbi);
174	if (new) {
175		nat_set_nid(new, nid);
176		nat_reset_flag(new);
177	}
178	return new;
179}
180
181static void __free_nat_entry(struct nat_entry *e)
182{
183	kmem_cache_free(nat_entry_slab, e);
184}
185
186/* must be locked by nat_tree_lock */
187static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
188	struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
189{
190	if (no_fail)
191		f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
192	else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
193		return NULL;
194
195	if (raw_ne)
196		node_info_from_raw_nat(&ne->ni, raw_ne);
197
198	spin_lock(&nm_i->nat_list_lock);
199	list_add_tail(&ne->list, &nm_i->nat_entries);
200	spin_unlock(&nm_i->nat_list_lock);
201
202	nm_i->nat_cnt[TOTAL_NAT]++;
203	nm_i->nat_cnt[RECLAIMABLE_NAT]++;
204	return ne;
205}
206
207static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
208{
209	struct nat_entry *ne;
210
211	ne = radix_tree_lookup(&nm_i->nat_root, n);
212
213	/* for recent accessed nat entry, move it to tail of lru list */
214	if (ne && !get_nat_flag(ne, IS_DIRTY)) {
215		spin_lock(&nm_i->nat_list_lock);
216		if (!list_empty(&ne->list))
217			list_move_tail(&ne->list, &nm_i->nat_entries);
218		spin_unlock(&nm_i->nat_list_lock);
219	}
220
221	return ne;
222}
223
224static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
225		nid_t start, unsigned int nr, struct nat_entry **ep)
226{
227	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
228}
229
230static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
231{
232	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
233	nm_i->nat_cnt[TOTAL_NAT]--;
234	nm_i->nat_cnt[RECLAIMABLE_NAT]--;
235	__free_nat_entry(e);
236}
237
238static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
239							struct nat_entry *ne)
240{
241	nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
242	struct nat_entry_set *head;
243
244	head = radix_tree_lookup(&nm_i->nat_set_root, set);
245	if (!head) {
246		head = f2fs_kmem_cache_alloc(nat_entry_set_slab,
247						GFP_NOFS, true, NULL);
248
249		INIT_LIST_HEAD(&head->entry_list);
250		INIT_LIST_HEAD(&head->set_list);
251		head->set = set;
252		head->entry_cnt = 0;
253		f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
254	}
255	return head;
256}
257
258static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
259						struct nat_entry *ne)
260{
261	struct nat_entry_set *head;
262	bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
263
264	if (!new_ne)
265		head = __grab_nat_entry_set(nm_i, ne);
266
267	/*
268	 * update entry_cnt in below condition:
269	 * 1. update NEW_ADDR to valid block address;
270	 * 2. update old block address to new one;
271	 */
272	if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
273				!get_nat_flag(ne, IS_DIRTY)))
274		head->entry_cnt++;
275
276	set_nat_flag(ne, IS_PREALLOC, new_ne);
277
278	if (get_nat_flag(ne, IS_DIRTY))
279		goto refresh_list;
280
281	nm_i->nat_cnt[DIRTY_NAT]++;
282	nm_i->nat_cnt[RECLAIMABLE_NAT]--;
283	set_nat_flag(ne, IS_DIRTY, true);
284refresh_list:
285	spin_lock(&nm_i->nat_list_lock);
286	if (new_ne)
287		list_del_init(&ne->list);
288	else
289		list_move_tail(&ne->list, &head->entry_list);
290	spin_unlock(&nm_i->nat_list_lock);
291}
292
293static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
294		struct nat_entry_set *set, struct nat_entry *ne)
295{
296	spin_lock(&nm_i->nat_list_lock);
297	list_move_tail(&ne->list, &nm_i->nat_entries);
298	spin_unlock(&nm_i->nat_list_lock);
299
300	set_nat_flag(ne, IS_DIRTY, false);
301	set->entry_cnt--;
302	nm_i->nat_cnt[DIRTY_NAT]--;
303	nm_i->nat_cnt[RECLAIMABLE_NAT]++;
304}
305
306static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
307		nid_t start, unsigned int nr, struct nat_entry_set **ep)
308{
309	return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
310							start, nr);
311}
312
313bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
314{
315	return NODE_MAPPING(sbi) == page->mapping &&
316			IS_DNODE(page) && is_cold_node(page);
317}
318
319void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
320{
321	spin_lock_init(&sbi->fsync_node_lock);
322	INIT_LIST_HEAD(&sbi->fsync_node_list);
323	sbi->fsync_seg_id = 0;
324	sbi->fsync_node_num = 0;
325}
326
327static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
328							struct page *page)
329{
330	struct fsync_node_entry *fn;
331	unsigned long flags;
332	unsigned int seq_id;
333
334	fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
335					GFP_NOFS, true, NULL);
336
337	get_page(page);
338	fn->page = page;
339	INIT_LIST_HEAD(&fn->list);
340
341	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
342	list_add_tail(&fn->list, &sbi->fsync_node_list);
343	fn->seq_id = sbi->fsync_seg_id++;
344	seq_id = fn->seq_id;
345	sbi->fsync_node_num++;
346	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
347
348	return seq_id;
349}
350
351void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
352{
353	struct fsync_node_entry *fn;
354	unsigned long flags;
355
356	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
357	list_for_each_entry(fn, &sbi->fsync_node_list, list) {
358		if (fn->page == page) {
359			list_del(&fn->list);
360			sbi->fsync_node_num--;
361			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
362			kmem_cache_free(fsync_node_entry_slab, fn);
363			put_page(page);
364			return;
365		}
366	}
367	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
368	f2fs_bug_on(sbi, 1);
369}
370
371void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
372{
373	unsigned long flags;
374
375	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
376	sbi->fsync_seg_id = 0;
377	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
378}
379
380int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
381{
382	struct f2fs_nm_info *nm_i = NM_I(sbi);
383	struct nat_entry *e;
384	bool need = false;
385
386	f2fs_down_read(&nm_i->nat_tree_lock);
387	e = __lookup_nat_cache(nm_i, nid);
388	if (e) {
389		if (!get_nat_flag(e, IS_CHECKPOINTED) &&
390				!get_nat_flag(e, HAS_FSYNCED_INODE))
391			need = true;
392	}
393	f2fs_up_read(&nm_i->nat_tree_lock);
394	return need;
395}
396
397bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
398{
399	struct f2fs_nm_info *nm_i = NM_I(sbi);
400	struct nat_entry *e;
401	bool is_cp = true;
402
403	f2fs_down_read(&nm_i->nat_tree_lock);
404	e = __lookup_nat_cache(nm_i, nid);
405	if (e && !get_nat_flag(e, IS_CHECKPOINTED))
406		is_cp = false;
407	f2fs_up_read(&nm_i->nat_tree_lock);
408	return is_cp;
409}
410
411bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
412{
413	struct f2fs_nm_info *nm_i = NM_I(sbi);
414	struct nat_entry *e;
415	bool need_update = true;
416
417	f2fs_down_read(&nm_i->nat_tree_lock);
418	e = __lookup_nat_cache(nm_i, ino);
419	if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
420			(get_nat_flag(e, IS_CHECKPOINTED) ||
421			 get_nat_flag(e, HAS_FSYNCED_INODE)))
422		need_update = false;
423	f2fs_up_read(&nm_i->nat_tree_lock);
424	return need_update;
425}
426
427/* must be locked by nat_tree_lock */
428static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
429						struct f2fs_nat_entry *ne)
430{
431	struct f2fs_nm_info *nm_i = NM_I(sbi);
432	struct nat_entry *new, *e;
433
434	/* Let's mitigate lock contention of nat_tree_lock during checkpoint */
435	if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
436		return;
437
438	new = __alloc_nat_entry(sbi, nid, false);
439	if (!new)
440		return;
441
442	f2fs_down_write(&nm_i->nat_tree_lock);
443	e = __lookup_nat_cache(nm_i, nid);
444	if (!e)
445		e = __init_nat_entry(nm_i, new, ne, false);
446	else
447		f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
448				nat_get_blkaddr(e) !=
449					le32_to_cpu(ne->block_addr) ||
450				nat_get_version(e) != ne->version);
451	f2fs_up_write(&nm_i->nat_tree_lock);
452	if (e != new)
453		__free_nat_entry(new);
454}
455
456static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
457			block_t new_blkaddr, bool fsync_done)
458{
459	struct f2fs_nm_info *nm_i = NM_I(sbi);
460	struct nat_entry *e;
461	struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true);
462
463	f2fs_down_write(&nm_i->nat_tree_lock);
464	e = __lookup_nat_cache(nm_i, ni->nid);
465	if (!e) {
466		e = __init_nat_entry(nm_i, new, NULL, true);
467		copy_node_info(&e->ni, ni);
468		f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
469	} else if (new_blkaddr == NEW_ADDR) {
470		/*
471		 * when nid is reallocated,
472		 * previous nat entry can be remained in nat cache.
473		 * So, reinitialize it with new information.
474		 */
475		copy_node_info(&e->ni, ni);
476		f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
477	}
478	/* let's free early to reduce memory consumption */
479	if (e != new)
480		__free_nat_entry(new);
481
482	/* sanity check */
483	f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
484	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
485			new_blkaddr == NULL_ADDR);
486	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
487			new_blkaddr == NEW_ADDR);
488	f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
489			new_blkaddr == NEW_ADDR);
490
491	/* increment version no as node is removed */
492	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
493		unsigned char version = nat_get_version(e);
494
495		nat_set_version(e, inc_node_version(version));
496	}
497
498	/* change address */
499	nat_set_blkaddr(e, new_blkaddr);
500	if (!__is_valid_data_blkaddr(new_blkaddr))
501		set_nat_flag(e, IS_CHECKPOINTED, false);
502	__set_nat_cache_dirty(nm_i, e);
503
504	/* update fsync_mark if its inode nat entry is still alive */
505	if (ni->nid != ni->ino)
506		e = __lookup_nat_cache(nm_i, ni->ino);
507	if (e) {
508		if (fsync_done && ni->nid == ni->ino)
509			set_nat_flag(e, HAS_FSYNCED_INODE, true);
510		set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
511	}
512	f2fs_up_write(&nm_i->nat_tree_lock);
513}
514
515int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
516{
517	struct f2fs_nm_info *nm_i = NM_I(sbi);
518	int nr = nr_shrink;
519
520	if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
521		return 0;
522
523	spin_lock(&nm_i->nat_list_lock);
524	while (nr_shrink) {
525		struct nat_entry *ne;
526
527		if (list_empty(&nm_i->nat_entries))
528			break;
529
530		ne = list_first_entry(&nm_i->nat_entries,
531					struct nat_entry, list);
532		list_del(&ne->list);
533		spin_unlock(&nm_i->nat_list_lock);
534
535		__del_from_nat_cache(nm_i, ne);
536		nr_shrink--;
537
538		spin_lock(&nm_i->nat_list_lock);
539	}
540	spin_unlock(&nm_i->nat_list_lock);
541
542	f2fs_up_write(&nm_i->nat_tree_lock);
543	return nr - nr_shrink;
544}
545
546int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
547				struct node_info *ni, bool checkpoint_context)
548{
549	struct f2fs_nm_info *nm_i = NM_I(sbi);
550	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
551	struct f2fs_journal *journal = curseg->journal;
552	nid_t start_nid = START_NID(nid);
553	struct f2fs_nat_block *nat_blk;
554	struct page *page = NULL;
555	struct f2fs_nat_entry ne;
556	struct nat_entry *e;
557	pgoff_t index;
558	block_t blkaddr;
559	int i;
560
561	ni->nid = nid;
562retry:
563	/* Check nat cache */
564	f2fs_down_read(&nm_i->nat_tree_lock);
565	e = __lookup_nat_cache(nm_i, nid);
566	if (e) {
567		ni->ino = nat_get_ino(e);
568		ni->blk_addr = nat_get_blkaddr(e);
569		ni->version = nat_get_version(e);
570		f2fs_up_read(&nm_i->nat_tree_lock);
571		return 0;
572	}
573
574	/*
575	 * Check current segment summary by trying to grab journal_rwsem first.
576	 * This sem is on the critical path on the checkpoint requiring the above
577	 * nat_tree_lock. Therefore, we should retry, if we failed to grab here
578	 * while not bothering checkpoint.
579	 */
580	if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
581		down_read(&curseg->journal_rwsem);
582	} else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
583				!down_read_trylock(&curseg->journal_rwsem)) {
584		f2fs_up_read(&nm_i->nat_tree_lock);
585		goto retry;
586	}
587
588	i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
589	if (i >= 0) {
590		ne = nat_in_journal(journal, i);
591		node_info_from_raw_nat(ni, &ne);
592	}
593	up_read(&curseg->journal_rwsem);
594	if (i >= 0) {
595		f2fs_up_read(&nm_i->nat_tree_lock);
596		goto cache;
597	}
598
599	/* Fill node_info from nat page */
600	index = current_nat_addr(sbi, nid);
601	f2fs_up_read(&nm_i->nat_tree_lock);
602
603	page = f2fs_get_meta_page(sbi, index);
604	if (IS_ERR(page))
605		return PTR_ERR(page);
606
607	nat_blk = (struct f2fs_nat_block *)page_address(page);
608	ne = nat_blk->entries[nid - start_nid];
609	node_info_from_raw_nat(ni, &ne);
610	f2fs_put_page(page, 1);
611cache:
612	blkaddr = le32_to_cpu(ne.block_addr);
613	if (__is_valid_data_blkaddr(blkaddr) &&
614		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
615		return -EFAULT;
616
617	/* cache nat entry */
618	cache_nat_entry(sbi, nid, &ne);
619	return 0;
620}
621
622/*
623 * readahead MAX_RA_NODE number of node pages.
624 */
625static void f2fs_ra_node_pages(struct page *parent, int start, int n)
626{
627	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
628	struct blk_plug plug;
629	int i, end;
630	nid_t nid;
631
632	blk_start_plug(&plug);
633
634	/* Then, try readahead for siblings of the desired node */
635	end = start + n;
636	end = min(end, (int)NIDS_PER_BLOCK);
637	for (i = start; i < end; i++) {
638		nid = get_nid(parent, i, false);
639		f2fs_ra_node_page(sbi, nid);
640	}
641
642	blk_finish_plug(&plug);
643}
644
645pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
646{
647	const long direct_index = ADDRS_PER_INODE(dn->inode);
648	const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
649	const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
650	unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
651	int cur_level = dn->cur_level;
652	int max_level = dn->max_level;
653	pgoff_t base = 0;
654
655	if (!dn->max_level)
656		return pgofs + 1;
657
658	while (max_level-- > cur_level)
659		skipped_unit *= NIDS_PER_BLOCK;
660
661	switch (dn->max_level) {
662	case 3:
663		base += 2 * indirect_blks;
664		fallthrough;
665	case 2:
666		base += 2 * direct_blks;
667		fallthrough;
668	case 1:
669		base += direct_index;
670		break;
671	default:
672		f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
673	}
674
675	return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
676}
677
678/*
679 * The maximum depth is four.
680 * Offset[0] will have raw inode offset.
681 */
682static int get_node_path(struct inode *inode, long block,
683				int offset[4], unsigned int noffset[4])
684{
685	const long direct_index = ADDRS_PER_INODE(inode);
686	const long direct_blks = ADDRS_PER_BLOCK(inode);
687	const long dptrs_per_blk = NIDS_PER_BLOCK;
688	const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
689	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
690	int n = 0;
691	int level = 0;
692
693	noffset[0] = 0;
694
695	if (block < direct_index) {
696		offset[n] = block;
697		goto got;
698	}
699	block -= direct_index;
700	if (block < direct_blks) {
701		offset[n++] = NODE_DIR1_BLOCK;
702		noffset[n] = 1;
703		offset[n] = block;
704		level = 1;
705		goto got;
706	}
707	block -= direct_blks;
708	if (block < direct_blks) {
709		offset[n++] = NODE_DIR2_BLOCK;
710		noffset[n] = 2;
711		offset[n] = block;
712		level = 1;
713		goto got;
714	}
715	block -= direct_blks;
716	if (block < indirect_blks) {
717		offset[n++] = NODE_IND1_BLOCK;
718		noffset[n] = 3;
719		offset[n++] = block / direct_blks;
720		noffset[n] = 4 + offset[n - 1];
721		offset[n] = block % direct_blks;
722		level = 2;
723		goto got;
724	}
725	block -= indirect_blks;
726	if (block < indirect_blks) {
727		offset[n++] = NODE_IND2_BLOCK;
728		noffset[n] = 4 + dptrs_per_blk;
729		offset[n++] = block / direct_blks;
730		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
731		offset[n] = block % direct_blks;
732		level = 2;
733		goto got;
734	}
735	block -= indirect_blks;
736	if (block < dindirect_blks) {
737		offset[n++] = NODE_DIND_BLOCK;
738		noffset[n] = 5 + (dptrs_per_blk * 2);
739		offset[n++] = block / indirect_blks;
740		noffset[n] = 6 + (dptrs_per_blk * 2) +
741			      offset[n - 1] * (dptrs_per_blk + 1);
742		offset[n++] = (block / direct_blks) % dptrs_per_blk;
743		noffset[n] = 7 + (dptrs_per_blk * 2) +
744			      offset[n - 2] * (dptrs_per_blk + 1) +
745			      offset[n - 1];
746		offset[n] = block % direct_blks;
747		level = 3;
748		goto got;
749	} else {
750		return -E2BIG;
751	}
752got:
753	return level;
754}
755
756/*
757 * Caller should call f2fs_put_dnode(dn).
758 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
759 * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
760 */
761int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
762{
763	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
764	struct page *npage[4];
765	struct page *parent = NULL;
766	int offset[4];
767	unsigned int noffset[4];
768	nid_t nids[4];
769	int level, i = 0;
770	int err = 0;
771
772	level = get_node_path(dn->inode, index, offset, noffset);
773	if (level < 0)
774		return level;
775
776	nids[0] = dn->inode->i_ino;
777	npage[0] = dn->inode_page;
778
779	if (!npage[0]) {
780		npage[0] = f2fs_get_node_page(sbi, nids[0]);
781		if (IS_ERR(npage[0]))
782			return PTR_ERR(npage[0]);
783	}
784
785	/* if inline_data is set, should not report any block indices */
786	if (f2fs_has_inline_data(dn->inode) && index) {
787		err = -ENOENT;
788		f2fs_put_page(npage[0], 1);
789		goto release_out;
790	}
791
792	parent = npage[0];
793	if (level != 0)
794		nids[1] = get_nid(parent, offset[0], true);
795	dn->inode_page = npage[0];
796	dn->inode_page_locked = true;
797
798	/* get indirect or direct nodes */
799	for (i = 1; i <= level; i++) {
800		bool done = false;
801
802		if (!nids[i] && mode == ALLOC_NODE) {
803			/* alloc new node */
804			if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
805				err = -ENOSPC;
806				goto release_pages;
807			}
808
809			dn->nid = nids[i];
810			npage[i] = f2fs_new_node_page(dn, noffset[i]);
811			if (IS_ERR(npage[i])) {
812				f2fs_alloc_nid_failed(sbi, nids[i]);
813				err = PTR_ERR(npage[i]);
814				goto release_pages;
815			}
816
817			set_nid(parent, offset[i - 1], nids[i], i == 1);
818			f2fs_alloc_nid_done(sbi, nids[i]);
819			done = true;
820		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
821			npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
822			if (IS_ERR(npage[i])) {
823				err = PTR_ERR(npage[i]);
824				goto release_pages;
825			}
826			done = true;
827		}
828		if (i == 1) {
829			dn->inode_page_locked = false;
830			unlock_page(parent);
831		} else {
832			f2fs_put_page(parent, 1);
833		}
834
835		if (!done) {
836			npage[i] = f2fs_get_node_page(sbi, nids[i]);
837			if (IS_ERR(npage[i])) {
838				err = PTR_ERR(npage[i]);
839				f2fs_put_page(npage[0], 0);
840				goto release_out;
841			}
842		}
843		if (i < level) {
844			parent = npage[i];
845			nids[i + 1] = get_nid(parent, offset[i], false);
846		}
847	}
848	dn->nid = nids[level];
849	dn->ofs_in_node = offset[level];
850	dn->node_page = npage[level];
851	dn->data_blkaddr = f2fs_data_blkaddr(dn);
852
853	if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
854					f2fs_sb_has_readonly(sbi)) {
855		unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
856		unsigned int ofs_in_node = dn->ofs_in_node;
857		pgoff_t fofs = index;
858		unsigned int c_len;
859		block_t blkaddr;
860
861		/* should align fofs and ofs_in_node to cluster_size */
862		if (fofs % cluster_size) {
863			fofs = round_down(fofs, cluster_size);
864			ofs_in_node = round_down(ofs_in_node, cluster_size);
865		}
866
867		c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node);
868		if (!c_len)
869			goto out;
870
871		blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node);
872		if (blkaddr == COMPRESS_ADDR)
873			blkaddr = data_blkaddr(dn->inode, dn->node_page,
874						ofs_in_node + 1);
875
876		f2fs_update_read_extent_tree_range_compressed(dn->inode,
877					fofs, blkaddr, cluster_size, c_len);
878	}
879out:
880	return 0;
881
882release_pages:
883	f2fs_put_page(parent, 1);
884	if (i > 1)
885		f2fs_put_page(npage[0], 0);
886release_out:
887	dn->inode_page = NULL;
888	dn->node_page = NULL;
889	if (err == -ENOENT) {
890		dn->cur_level = i;
891		dn->max_level = level;
892		dn->ofs_in_node = offset[level];
893	}
894	return err;
895}
896
897static int truncate_node(struct dnode_of_data *dn)
898{
899	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
900	struct node_info ni;
901	int err;
902	pgoff_t index;
903
904	err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
905	if (err)
906		return err;
907
908	/* Deallocate node address */
909	f2fs_invalidate_blocks(sbi, ni.blk_addr);
910	dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
911	set_node_addr(sbi, &ni, NULL_ADDR, false);
912
913	if (dn->nid == dn->inode->i_ino) {
914		f2fs_remove_orphan_inode(sbi, dn->nid);
915		dec_valid_inode_count(sbi);
916		f2fs_inode_synced(dn->inode);
917	}
918
919	clear_node_page_dirty(dn->node_page);
920	set_sbi_flag(sbi, SBI_IS_DIRTY);
921
922	index = dn->node_page->index;
923	f2fs_put_page(dn->node_page, 1);
924
925	invalidate_mapping_pages(NODE_MAPPING(sbi),
926			index, index);
927
928	dn->node_page = NULL;
929	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
930
931	return 0;
932}
933
934static int truncate_dnode(struct dnode_of_data *dn)
935{
936	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
937	struct page *page;
938	int err;
939
940	if (dn->nid == 0)
941		return 1;
942
943	/* get direct node */
944	page = f2fs_get_node_page(sbi, dn->nid);
945	if (PTR_ERR(page) == -ENOENT)
946		return 1;
947	else if (IS_ERR(page))
948		return PTR_ERR(page);
949
950	if (IS_INODE(page) || ino_of_node(page) != dn->inode->i_ino) {
951		f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u",
952				dn->inode->i_ino, dn->nid, ino_of_node(page));
953		set_sbi_flag(sbi, SBI_NEED_FSCK);
954		f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE);
955		f2fs_put_page(page, 1);
956		return -EFSCORRUPTED;
957	}
958
959	/* Make dnode_of_data for parameter */
960	dn->node_page = page;
961	dn->ofs_in_node = 0;
962	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
963	err = truncate_node(dn);
964	if (err) {
965		f2fs_put_page(page, 1);
966		return err;
967	}
968
969	return 1;
970}
971
972static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
973						int ofs, int depth)
974{
975	struct dnode_of_data rdn = *dn;
976	struct page *page;
977	struct f2fs_node *rn;
978	nid_t child_nid;
979	unsigned int child_nofs;
980	int freed = 0;
981	int i, ret;
982
983	if (dn->nid == 0)
984		return NIDS_PER_BLOCK + 1;
985
986	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
987
988	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
989	if (IS_ERR(page)) {
990		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
991		return PTR_ERR(page);
992	}
993
994	f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
995
996	rn = F2FS_NODE(page);
997	if (depth < 3) {
998		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
999			child_nid = le32_to_cpu(rn->in.nid[i]);
1000			if (child_nid == 0)
1001				continue;
1002			rdn.nid = child_nid;
1003			ret = truncate_dnode(&rdn);
1004			if (ret < 0)
1005				goto out_err;
1006			if (set_nid(page, i, 0, false))
1007				dn->node_changed = true;
1008		}
1009	} else {
1010		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
1011		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
1012			child_nid = le32_to_cpu(rn->in.nid[i]);
1013			if (child_nid == 0) {
1014				child_nofs += NIDS_PER_BLOCK + 1;
1015				continue;
1016			}
1017			rdn.nid = child_nid;
1018			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
1019			if (ret == (NIDS_PER_BLOCK + 1)) {
1020				if (set_nid(page, i, 0, false))
1021					dn->node_changed = true;
1022				child_nofs += ret;
1023			} else if (ret < 0 && ret != -ENOENT) {
1024				goto out_err;
1025			}
1026		}
1027		freed = child_nofs;
1028	}
1029
1030	if (!ofs) {
1031		/* remove current indirect node */
1032		dn->node_page = page;
1033		ret = truncate_node(dn);
1034		if (ret)
1035			goto out_err;
1036		freed++;
1037	} else {
1038		f2fs_put_page(page, 1);
1039	}
1040	trace_f2fs_truncate_nodes_exit(dn->inode, freed);
1041	return freed;
1042
1043out_err:
1044	f2fs_put_page(page, 1);
1045	trace_f2fs_truncate_nodes_exit(dn->inode, ret);
1046	return ret;
1047}
1048
1049static int truncate_partial_nodes(struct dnode_of_data *dn,
1050			struct f2fs_inode *ri, int *offset, int depth)
1051{
1052	struct page *pages[2];
1053	nid_t nid[3];
1054	nid_t child_nid;
1055	int err = 0;
1056	int i;
1057	int idx = depth - 2;
1058
1059	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1060	if (!nid[0])
1061		return 0;
1062
1063	/* get indirect nodes in the path */
1064	for (i = 0; i < idx + 1; i++) {
1065		/* reference count'll be increased */
1066		pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
1067		if (IS_ERR(pages[i])) {
1068			err = PTR_ERR(pages[i]);
1069			idx = i - 1;
1070			goto fail;
1071		}
1072		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
1073	}
1074
1075	f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
1076
1077	/* free direct nodes linked to a partial indirect node */
1078	for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
1079		child_nid = get_nid(pages[idx], i, false);
1080		if (!child_nid)
1081			continue;
1082		dn->nid = child_nid;
1083		err = truncate_dnode(dn);
1084		if (err < 0)
1085			goto fail;
1086		if (set_nid(pages[idx], i, 0, false))
1087			dn->node_changed = true;
1088	}
1089
1090	if (offset[idx + 1] == 0) {
1091		dn->node_page = pages[idx];
1092		dn->nid = nid[idx];
1093		err = truncate_node(dn);
1094		if (err)
1095			goto fail;
1096	} else {
1097		f2fs_put_page(pages[idx], 1);
1098	}
1099	offset[idx]++;
1100	offset[idx + 1] = 0;
1101	idx--;
1102fail:
1103	for (i = idx; i >= 0; i--)
1104		f2fs_put_page(pages[i], 1);
1105
1106	trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
1107
1108	return err;
1109}
1110
1111/*
1112 * All the block addresses of data and nodes should be nullified.
1113 */
1114int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1115{
1116	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1117	int err = 0, cont = 1;
1118	int level, offset[4], noffset[4];
1119	unsigned int nofs = 0;
1120	struct f2fs_inode *ri;
1121	struct dnode_of_data dn;
1122	struct page *page;
1123
1124	trace_f2fs_truncate_inode_blocks_enter(inode, from);
1125
1126	level = get_node_path(inode, from, offset, noffset);
1127	if (level < 0) {
1128		trace_f2fs_truncate_inode_blocks_exit(inode, level);
1129		return level;
1130	}
1131
1132	page = f2fs_get_node_page(sbi, inode->i_ino);
1133	if (IS_ERR(page)) {
1134		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
1135		return PTR_ERR(page);
1136	}
1137
1138	set_new_dnode(&dn, inode, page, NULL, 0);
1139	unlock_page(page);
1140
1141	ri = F2FS_INODE(page);
1142	switch (level) {
1143	case 0:
1144	case 1:
1145		nofs = noffset[1];
1146		break;
1147	case 2:
1148		nofs = noffset[1];
1149		if (!offset[level - 1])
1150			goto skip_partial;
1151		err = truncate_partial_nodes(&dn, ri, offset, level);
1152		if (err < 0 && err != -ENOENT)
1153			goto fail;
1154		nofs += 1 + NIDS_PER_BLOCK;
1155		break;
1156	case 3:
1157		nofs = 5 + 2 * NIDS_PER_BLOCK;
1158		if (!offset[level - 1])
1159			goto skip_partial;
1160		err = truncate_partial_nodes(&dn, ri, offset, level);
1161		if (err < 0 && err != -ENOENT)
1162			goto fail;
1163		break;
1164	default:
1165		BUG();
1166	}
1167
1168skip_partial:
1169	while (cont) {
1170		dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1171		switch (offset[0]) {
1172		case NODE_DIR1_BLOCK:
1173		case NODE_DIR2_BLOCK:
1174			err = truncate_dnode(&dn);
1175			break;
1176
1177		case NODE_IND1_BLOCK:
1178		case NODE_IND2_BLOCK:
1179			err = truncate_nodes(&dn, nofs, offset[1], 2);
1180			break;
1181
1182		case NODE_DIND_BLOCK:
1183			err = truncate_nodes(&dn, nofs, offset[1], 3);
1184			cont = 0;
1185			break;
1186
1187		default:
1188			BUG();
1189		}
1190		if (err == -ENOENT) {
1191			set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
1192			f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1193			f2fs_err_ratelimited(sbi,
1194				"truncate node fail, ino:%lu, nid:%u, "
1195				"offset[0]:%d, offset[1]:%d, nofs:%d",
1196				inode->i_ino, dn.nid, offset[0],
1197				offset[1], nofs);
1198			err = 0;
1199		}
1200		if (err < 0)
1201			goto fail;
1202		if (offset[1] == 0 &&
1203				ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
1204			lock_page(page);
1205			BUG_ON(page->mapping != NODE_MAPPING(sbi));
1206			f2fs_wait_on_page_writeback(page, NODE, true, true);
1207			ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
1208			set_page_dirty(page);
1209			unlock_page(page);
1210		}
1211		offset[1] = 0;
1212		offset[0]++;
1213		nofs += err;
1214	}
1215fail:
1216	f2fs_put_page(page, 0);
1217	trace_f2fs_truncate_inode_blocks_exit(inode, err);
1218	return err > 0 ? 0 : err;
1219}
1220
1221/* caller must lock inode page */
1222int f2fs_truncate_xattr_node(struct inode *inode)
1223{
1224	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1225	nid_t nid = F2FS_I(inode)->i_xattr_nid;
1226	struct dnode_of_data dn;
1227	struct page *npage;
1228	int err;
1229
1230	if (!nid)
1231		return 0;
1232
1233	npage = f2fs_get_node_page(sbi, nid);
1234	if (IS_ERR(npage))
1235		return PTR_ERR(npage);
1236
1237	set_new_dnode(&dn, inode, NULL, npage, nid);
1238	err = truncate_node(&dn);
1239	if (err) {
1240		f2fs_put_page(npage, 1);
1241		return err;
1242	}
1243
1244	f2fs_i_xnid_write(inode, 0);
1245
1246	return 0;
1247}
1248
1249/*
1250 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1251 * f2fs_unlock_op().
1252 */
1253int f2fs_remove_inode_page(struct inode *inode)
1254{
1255	struct dnode_of_data dn;
1256	int err;
1257
1258	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1259	err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
1260	if (err)
1261		return err;
1262
1263	err = f2fs_truncate_xattr_node(inode);
1264	if (err) {
1265		f2fs_put_dnode(&dn);
1266		return err;
1267	}
1268
1269	/* remove potential inline_data blocks */
1270	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1271				S_ISLNK(inode->i_mode))
1272		f2fs_truncate_data_blocks_range(&dn, 1);
1273
1274	/* 0 is possible, after f2fs_new_inode() has failed */
1275	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1276		f2fs_put_dnode(&dn);
1277		return -EIO;
1278	}
1279
1280	if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1281		f2fs_warn(F2FS_I_SB(inode),
1282			"f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
1283			inode->i_ino, (unsigned long long)inode->i_blocks);
1284		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
1285	}
1286
1287	/* will put inode & node pages */
1288	err = truncate_node(&dn);
1289	if (err) {
1290		f2fs_put_dnode(&dn);
1291		return err;
1292	}
1293	return 0;
1294}
1295
1296struct page *f2fs_new_inode_page(struct inode *inode)
1297{
1298	struct dnode_of_data dn;
1299
1300	/* allocate inode page for new inode */
1301	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1302
1303	/* caller should f2fs_put_page(page, 1); */
1304	return f2fs_new_node_page(&dn, 0);
1305}
1306
1307struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1308{
1309	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1310	struct node_info new_ni;
1311	struct page *page;
1312	int err;
1313
1314	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1315		return ERR_PTR(-EPERM);
1316
1317	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1318	if (!page)
1319		return ERR_PTR(-ENOMEM);
1320
1321	if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1322		goto fail;
1323
1324#ifdef CONFIG_F2FS_CHECK_FS
1325	err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false);
1326	if (err) {
1327		dec_valid_node_count(sbi, dn->inode, !ofs);
1328		goto fail;
1329	}
1330	if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
1331		err = -EFSCORRUPTED;
1332		dec_valid_node_count(sbi, dn->inode, !ofs);
1333		set_sbi_flag(sbi, SBI_NEED_FSCK);
1334		f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1335		goto fail;
1336	}
1337#endif
1338	new_ni.nid = dn->nid;
1339	new_ni.ino = dn->inode->i_ino;
1340	new_ni.blk_addr = NULL_ADDR;
1341	new_ni.flag = 0;
1342	new_ni.version = 0;
1343	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1344
1345	f2fs_wait_on_page_writeback(page, NODE, true, true);
1346	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1347	set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1348	if (!PageUptodate(page))
1349		SetPageUptodate(page);
1350	if (set_page_dirty(page))
1351		dn->node_changed = true;
1352
1353	if (f2fs_has_xattr_block(ofs))
1354		f2fs_i_xnid_write(dn->inode, dn->nid);
1355
1356	if (ofs == 0)
1357		inc_valid_inode_count(sbi);
1358	return page;
1359fail:
1360	clear_node_page_dirty(page);
1361	f2fs_put_page(page, 1);
1362	return ERR_PTR(err);
1363}
1364
1365/*
1366 * Caller should do after getting the following values.
1367 * 0: f2fs_put_page(page, 0)
1368 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1369 */
1370static int read_node_page(struct page *page, blk_opf_t op_flags)
1371{
1372	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1373	struct node_info ni;
1374	struct f2fs_io_info fio = {
1375		.sbi = sbi,
1376		.type = NODE,
1377		.op = REQ_OP_READ,
1378		.op_flags = op_flags,
1379		.page = page,
1380		.encrypted_page = NULL,
1381	};
1382	int err;
1383
1384	if (PageUptodate(page)) {
1385		if (!f2fs_inode_chksum_verify(sbi, page)) {
1386			ClearPageUptodate(page);
1387			return -EFSBADCRC;
1388		}
1389		return LOCKED_PAGE;
1390	}
1391
1392	err = f2fs_get_node_info(sbi, page->index, &ni, false);
1393	if (err)
1394		return err;
1395
1396	/* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
1397	if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) {
1398		ClearPageUptodate(page);
1399		return -ENOENT;
1400	}
1401
1402	fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1403
1404	err = f2fs_submit_page_bio(&fio);
1405
1406	if (!err)
1407		f2fs_update_iostat(sbi, NULL, FS_NODE_READ_IO, F2FS_BLKSIZE);
1408
1409	return err;
1410}
1411
1412/*
1413 * Readahead a node page
1414 */
1415void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1416{
1417	struct page *apage;
1418	int err;
1419
1420	if (!nid)
1421		return;
1422	if (f2fs_check_nid_range(sbi, nid))
1423		return;
1424
1425	apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
1426	if (apage)
1427		return;
1428
1429	apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1430	if (!apage)
1431		return;
1432
1433	err = read_node_page(apage, REQ_RAHEAD);
1434	f2fs_put_page(apage, err ? 1 : 0);
1435}
1436
1437static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1438					struct page *parent, int start)
1439{
1440	struct page *page;
1441	int err;
1442
1443	if (!nid)
1444		return ERR_PTR(-ENOENT);
1445	if (f2fs_check_nid_range(sbi, nid))
1446		return ERR_PTR(-EINVAL);
1447repeat:
1448	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1449	if (!page)
1450		return ERR_PTR(-ENOMEM);
1451
1452	err = read_node_page(page, 0);
1453	if (err < 0) {
1454		goto out_put_err;
1455	} else if (err == LOCKED_PAGE) {
1456		err = 0;
1457		goto page_hit;
1458	}
1459
1460	if (parent)
1461		f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1462
1463	lock_page(page);
1464
1465	if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1466		f2fs_put_page(page, 1);
1467		goto repeat;
1468	}
1469
1470	if (unlikely(!PageUptodate(page))) {
1471		err = -EIO;
1472		goto out_err;
1473	}
1474
1475	if (!f2fs_inode_chksum_verify(sbi, page)) {
1476		err = -EFSBADCRC;
1477		goto out_err;
1478	}
1479page_hit:
1480	if (likely(nid == nid_of_node(page)))
1481		return page;
1482
1483	f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1484			  nid, nid_of_node(page), ino_of_node(page),
1485			  ofs_of_node(page), cpver_of_node(page),
1486			  next_blkaddr_of_node(page));
1487	set_sbi_flag(sbi, SBI_NEED_FSCK);
1488	f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
1489	err = -EFSCORRUPTED;
1490out_err:
1491	ClearPageUptodate(page);
1492out_put_err:
1493	/* ENOENT comes from read_node_page which is not an error. */
1494	if (err != -ENOENT)
1495		f2fs_handle_page_eio(sbi, page->index, NODE);
1496	f2fs_put_page(page, 1);
1497	return ERR_PTR(err);
1498}
1499
1500struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1501{
1502	return __get_node_page(sbi, nid, NULL, 0);
1503}
1504
1505struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1506{
1507	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1508	nid_t nid = get_nid(parent, start, false);
1509
1510	return __get_node_page(sbi, nid, parent, start);
1511}
1512
1513static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1514{
1515	struct inode *inode;
1516	struct page *page;
1517	int ret;
1518
1519	/* should flush inline_data before evict_inode */
1520	inode = ilookup(sbi->sb, ino);
1521	if (!inode)
1522		return;
1523
1524	page = f2fs_pagecache_get_page(inode->i_mapping, 0,
1525					FGP_LOCK|FGP_NOWAIT, 0);
1526	if (!page)
1527		goto iput_out;
1528
1529	if (!PageUptodate(page))
1530		goto page_out;
1531
1532	if (!PageDirty(page))
1533		goto page_out;
1534
1535	if (!clear_page_dirty_for_io(page))
1536		goto page_out;
1537
1538	ret = f2fs_write_inline_data(inode, page);
1539	inode_dec_dirty_pages(inode);
1540	f2fs_remove_dirty_inode(inode);
1541	if (ret)
1542		set_page_dirty(page);
1543page_out:
1544	f2fs_put_page(page, 1);
1545iput_out:
1546	iput(inode);
1547}
1548
1549static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1550{
1551	pgoff_t index;
1552	struct folio_batch fbatch;
1553	struct page *last_page = NULL;
1554	int nr_folios;
1555
1556	folio_batch_init(&fbatch);
1557	index = 0;
1558
1559	while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1560					(pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1561					&fbatch))) {
1562		int i;
1563
1564		for (i = 0; i < nr_folios; i++) {
1565			struct page *page = &fbatch.folios[i]->page;
1566
1567			if (unlikely(f2fs_cp_error(sbi))) {
1568				f2fs_put_page(last_page, 0);
1569				folio_batch_release(&fbatch);
1570				return ERR_PTR(-EIO);
1571			}
1572
1573			if (!IS_DNODE(page) || !is_cold_node(page))
1574				continue;
1575			if (ino_of_node(page) != ino)
1576				continue;
1577
1578			lock_page(page);
1579
1580			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1581continue_unlock:
1582				unlock_page(page);
1583				continue;
1584			}
1585			if (ino_of_node(page) != ino)
1586				goto continue_unlock;
1587
1588			if (!PageDirty(page)) {
1589				/* someone wrote it for us */
1590				goto continue_unlock;
1591			}
1592
1593			if (last_page)
1594				f2fs_put_page(last_page, 0);
1595
1596			get_page(page);
1597			last_page = page;
1598			unlock_page(page);
1599		}
1600		folio_batch_release(&fbatch);
1601		cond_resched();
1602	}
1603	return last_page;
1604}
1605
1606static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1607				struct writeback_control *wbc, bool do_balance,
1608				enum iostat_type io_type, unsigned int *seq_id)
1609{
1610	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1611	nid_t nid;
1612	struct node_info ni;
1613	struct f2fs_io_info fio = {
1614		.sbi = sbi,
1615		.ino = ino_of_node(page),
1616		.type = NODE,
1617		.op = REQ_OP_WRITE,
1618		.op_flags = wbc_to_write_flags(wbc),
1619		.page = page,
1620		.encrypted_page = NULL,
1621		.submitted = 0,
1622		.io_type = io_type,
1623		.io_wbc = wbc,
1624	};
1625	unsigned int seq;
1626
1627	trace_f2fs_writepage(page_folio(page), NODE);
1628
1629	if (unlikely(f2fs_cp_error(sbi))) {
1630		/* keep node pages in remount-ro mode */
1631		if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
1632			goto redirty_out;
1633		ClearPageUptodate(page);
1634		dec_page_count(sbi, F2FS_DIRTY_NODES);
1635		unlock_page(page);
1636		return 0;
1637	}
1638
1639	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1640		goto redirty_out;
1641
1642	if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1643			wbc->sync_mode == WB_SYNC_NONE &&
1644			IS_DNODE(page) && is_cold_node(page))
1645		goto redirty_out;
1646
1647	/* get old block addr of this node page */
1648	nid = nid_of_node(page);
1649	f2fs_bug_on(sbi, page->index != nid);
1650
1651	if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
1652		goto redirty_out;
1653
1654	if (wbc->for_reclaim) {
1655		if (!f2fs_down_read_trylock(&sbi->node_write))
1656			goto redirty_out;
1657	} else {
1658		f2fs_down_read(&sbi->node_write);
1659	}
1660
1661	/* This page is already truncated */
1662	if (unlikely(ni.blk_addr == NULL_ADDR)) {
1663		ClearPageUptodate(page);
1664		dec_page_count(sbi, F2FS_DIRTY_NODES);
1665		f2fs_up_read(&sbi->node_write);
1666		unlock_page(page);
1667		return 0;
1668	}
1669
1670	if (__is_valid_data_blkaddr(ni.blk_addr) &&
1671		!f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
1672					DATA_GENERIC_ENHANCE)) {
1673		f2fs_up_read(&sbi->node_write);
1674		goto redirty_out;
1675	}
1676
1677	if (atomic && !test_opt(sbi, NOBARRIER) && !f2fs_sb_has_blkzoned(sbi))
1678		fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1679
1680	/* should add to global list before clearing PAGECACHE status */
1681	if (f2fs_in_warm_node_list(sbi, page)) {
1682		seq = f2fs_add_fsync_node_entry(sbi, page);
1683		if (seq_id)
1684			*seq_id = seq;
1685	}
1686
1687	set_page_writeback(page);
1688
1689	fio.old_blkaddr = ni.blk_addr;
1690	f2fs_do_write_node_page(nid, &fio);
1691	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1692	dec_page_count(sbi, F2FS_DIRTY_NODES);
1693	f2fs_up_read(&sbi->node_write);
1694
1695	if (wbc->for_reclaim) {
1696		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
1697		submitted = NULL;
1698	}
1699
1700	unlock_page(page);
1701
1702	if (unlikely(f2fs_cp_error(sbi))) {
1703		f2fs_submit_merged_write(sbi, NODE);
1704		submitted = NULL;
1705	}
1706	if (submitted)
1707		*submitted = fio.submitted;
1708
1709	if (do_balance)
1710		f2fs_balance_fs(sbi, false);
1711	return 0;
1712
1713redirty_out:
1714	redirty_page_for_writepage(wbc, page);
1715	return AOP_WRITEPAGE_ACTIVATE;
1716}
1717
1718int f2fs_move_node_page(struct page *node_page, int gc_type)
1719{
1720	int err = 0;
1721
1722	if (gc_type == FG_GC) {
1723		struct writeback_control wbc = {
1724			.sync_mode = WB_SYNC_ALL,
1725			.nr_to_write = 1,
1726			.for_reclaim = 0,
1727		};
1728
1729		f2fs_wait_on_page_writeback(node_page, NODE, true, true);
1730
1731		set_page_dirty(node_page);
1732
1733		if (!clear_page_dirty_for_io(node_page)) {
1734			err = -EAGAIN;
1735			goto out_page;
1736		}
1737
1738		if (__write_node_page(node_page, false, NULL,
1739					&wbc, false, FS_GC_NODE_IO, NULL)) {
1740			err = -EAGAIN;
1741			unlock_page(node_page);
1742		}
1743		goto release_page;
1744	} else {
1745		/* set page dirty and write it */
1746		if (!folio_test_writeback(page_folio(node_page)))
1747			set_page_dirty(node_page);
1748	}
1749out_page:
1750	unlock_page(node_page);
1751release_page:
1752	f2fs_put_page(node_page, 0);
1753	return err;
1754}
1755
1756static int f2fs_write_node_page(struct page *page,
1757				struct writeback_control *wbc)
1758{
1759	return __write_node_page(page, false, NULL, wbc, false,
1760						FS_NODE_IO, NULL);
1761}
1762
1763int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1764			struct writeback_control *wbc, bool atomic,
1765			unsigned int *seq_id)
1766{
1767	pgoff_t index;
1768	struct folio_batch fbatch;
1769	int ret = 0;
1770	struct page *last_page = NULL;
1771	bool marked = false;
1772	nid_t ino = inode->i_ino;
1773	int nr_folios;
1774	int nwritten = 0;
1775
1776	if (atomic) {
1777		last_page = last_fsync_dnode(sbi, ino);
1778		if (IS_ERR_OR_NULL(last_page))
1779			return PTR_ERR_OR_ZERO(last_page);
1780	}
1781retry:
1782	folio_batch_init(&fbatch);
1783	index = 0;
1784
1785	while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1786					(pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1787					&fbatch))) {
1788		int i;
1789
1790		for (i = 0; i < nr_folios; i++) {
1791			struct page *page = &fbatch.folios[i]->page;
1792			bool submitted = false;
1793
1794			if (unlikely(f2fs_cp_error(sbi))) {
1795				f2fs_put_page(last_page, 0);
1796				folio_batch_release(&fbatch);
1797				ret = -EIO;
1798				goto out;
1799			}
1800
1801			if (!IS_DNODE(page) || !is_cold_node(page))
1802				continue;
1803			if (ino_of_node(page) != ino)
1804				continue;
1805
1806			lock_page(page);
1807
1808			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1809continue_unlock:
1810				unlock_page(page);
1811				continue;
1812			}
1813			if (ino_of_node(page) != ino)
1814				goto continue_unlock;
1815
1816			if (!PageDirty(page) && page != last_page) {
1817				/* someone wrote it for us */
1818				goto continue_unlock;
1819			}
1820
1821			f2fs_wait_on_page_writeback(page, NODE, true, true);
1822
1823			set_fsync_mark(page, 0);
1824			set_dentry_mark(page, 0);
1825
1826			if (!atomic || page == last_page) {
1827				set_fsync_mark(page, 1);
1828				percpu_counter_inc(&sbi->rf_node_block_count);
1829				if (IS_INODE(page)) {
1830					if (is_inode_flag_set(inode,
1831								FI_DIRTY_INODE))
1832						f2fs_update_inode(inode, page);
1833					set_dentry_mark(page,
1834						f2fs_need_dentry_mark(sbi, ino));
1835				}
1836				/* may be written by other thread */
1837				if (!PageDirty(page))
1838					set_page_dirty(page);
1839			}
1840
1841			if (!clear_page_dirty_for_io(page))
1842				goto continue_unlock;
1843
1844			ret = __write_node_page(page, atomic &&
1845						page == last_page,
1846						&submitted, wbc, true,
1847						FS_NODE_IO, seq_id);
1848			if (ret) {
1849				unlock_page(page);
1850				f2fs_put_page(last_page, 0);
1851				break;
1852			} else if (submitted) {
1853				nwritten++;
1854			}
1855
1856			if (page == last_page) {
1857				f2fs_put_page(page, 0);
1858				marked = true;
1859				break;
1860			}
1861		}
1862		folio_batch_release(&fbatch);
1863		cond_resched();
1864
1865		if (ret || marked)
1866			break;
1867	}
1868	if (!ret && atomic && !marked) {
1869		f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
1870			   ino, last_page->index);
1871		lock_page(last_page);
1872		f2fs_wait_on_page_writeback(last_page, NODE, true, true);
1873		set_page_dirty(last_page);
1874		unlock_page(last_page);
1875		goto retry;
1876	}
1877out:
1878	if (nwritten)
1879		f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
1880	return ret ? -EIO : 0;
1881}
1882
1883static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
1884{
1885	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1886	bool clean;
1887
1888	if (inode->i_ino != ino)
1889		return 0;
1890
1891	if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
1892		return 0;
1893
1894	spin_lock(&sbi->inode_lock[DIRTY_META]);
1895	clean = list_empty(&F2FS_I(inode)->gdirty_list);
1896	spin_unlock(&sbi->inode_lock[DIRTY_META]);
1897
1898	if (clean)
1899		return 0;
1900
1901	inode = igrab(inode);
1902	if (!inode)
1903		return 0;
1904	return 1;
1905}
1906
1907static bool flush_dirty_inode(struct page *page)
1908{
1909	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1910	struct inode *inode;
1911	nid_t ino = ino_of_node(page);
1912
1913	inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
1914	if (!inode)
1915		return false;
1916
1917	f2fs_update_inode(inode, page);
1918	unlock_page(page);
1919
1920	iput(inode);
1921	return true;
1922}
1923
1924void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
1925{
1926	pgoff_t index = 0;
1927	struct folio_batch fbatch;
1928	int nr_folios;
1929
1930	folio_batch_init(&fbatch);
1931
1932	while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1933					(pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1934					&fbatch))) {
1935		int i;
1936
1937		for (i = 0; i < nr_folios; i++) {
1938			struct page *page = &fbatch.folios[i]->page;
1939
1940			if (!IS_INODE(page))
1941				continue;
1942
1943			lock_page(page);
1944
1945			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1946continue_unlock:
1947				unlock_page(page);
1948				continue;
1949			}
1950
1951			if (!PageDirty(page)) {
1952				/* someone wrote it for us */
1953				goto continue_unlock;
1954			}
1955
1956			/* flush inline_data, if it's async context. */
1957			if (page_private_inline(page)) {
1958				clear_page_private_inline(page);
1959				unlock_page(page);
1960				flush_inline_data(sbi, ino_of_node(page));
1961				continue;
1962			}
1963			unlock_page(page);
1964		}
1965		folio_batch_release(&fbatch);
1966		cond_resched();
1967	}
1968}
1969
1970int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
1971				struct writeback_control *wbc,
1972				bool do_balance, enum iostat_type io_type)
1973{
1974	pgoff_t index;
1975	struct folio_batch fbatch;
1976	int step = 0;
1977	int nwritten = 0;
1978	int ret = 0;
1979	int nr_folios, done = 0;
1980
1981	folio_batch_init(&fbatch);
1982
1983next_step:
1984	index = 0;
1985
1986	while (!done && (nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi),
1987				&index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1988				&fbatch))) {
1989		int i;
1990
1991		for (i = 0; i < nr_folios; i++) {
1992			struct page *page = &fbatch.folios[i]->page;
1993			bool submitted = false;
1994
1995			/* give a priority to WB_SYNC threads */
1996			if (atomic_read(&sbi->wb_sync_req[NODE]) &&
1997					wbc->sync_mode == WB_SYNC_NONE) {
1998				done = 1;
1999				break;
2000			}
2001
2002			/*
2003			 * flushing sequence with step:
2004			 * 0. indirect nodes
2005			 * 1. dentry dnodes
2006			 * 2. file dnodes
2007			 */
2008			if (step == 0 && IS_DNODE(page))
2009				continue;
2010			if (step == 1 && (!IS_DNODE(page) ||
2011						is_cold_node(page)))
2012				continue;
2013			if (step == 2 && (!IS_DNODE(page) ||
2014						!is_cold_node(page)))
2015				continue;
2016lock_node:
2017			if (wbc->sync_mode == WB_SYNC_ALL)
2018				lock_page(page);
2019			else if (!trylock_page(page))
2020				continue;
2021
2022			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
2023continue_unlock:
2024				unlock_page(page);
2025				continue;
2026			}
2027
2028			if (!PageDirty(page)) {
2029				/* someone wrote it for us */
2030				goto continue_unlock;
2031			}
2032
2033			/* flush inline_data/inode, if it's async context. */
2034			if (!do_balance)
2035				goto write_node;
2036
2037			/* flush inline_data */
2038			if (page_private_inline(page)) {
2039				clear_page_private_inline(page);
2040				unlock_page(page);
2041				flush_inline_data(sbi, ino_of_node(page));
2042				goto lock_node;
2043			}
2044
2045			/* flush dirty inode */
2046			if (IS_INODE(page) && flush_dirty_inode(page))
2047				goto lock_node;
2048write_node:
2049			f2fs_wait_on_page_writeback(page, NODE, true, true);
2050
2051			if (!clear_page_dirty_for_io(page))
2052				goto continue_unlock;
2053
2054			set_fsync_mark(page, 0);
2055			set_dentry_mark(page, 0);
2056
2057			ret = __write_node_page(page, false, &submitted,
2058						wbc, do_balance, io_type, NULL);
2059			if (ret)
2060				unlock_page(page);
2061			else if (submitted)
2062				nwritten++;
2063
2064			if (--wbc->nr_to_write == 0)
2065				break;
2066		}
2067		folio_batch_release(&fbatch);
2068		cond_resched();
2069
2070		if (wbc->nr_to_write == 0) {
2071			step = 2;
2072			break;
2073		}
2074	}
2075
2076	if (step < 2) {
2077		if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2078				wbc->sync_mode == WB_SYNC_NONE && step == 1)
2079			goto out;
2080		step++;
2081		goto next_step;
2082	}
2083out:
2084	if (nwritten)
2085		f2fs_submit_merged_write(sbi, NODE);
2086
2087	if (unlikely(f2fs_cp_error(sbi)))
2088		return -EIO;
2089	return ret;
2090}
2091
2092int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
2093						unsigned int seq_id)
2094{
2095	struct fsync_node_entry *fn;
2096	struct page *page;
2097	struct list_head *head = &sbi->fsync_node_list;
2098	unsigned long flags;
2099	unsigned int cur_seq_id = 0;
2100
2101	while (seq_id && cur_seq_id < seq_id) {
2102		spin_lock_irqsave(&sbi->fsync_node_lock, flags);
2103		if (list_empty(head)) {
2104			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2105			break;
2106		}
2107		fn = list_first_entry(head, struct fsync_node_entry, list);
2108		if (fn->seq_id > seq_id) {
2109			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2110			break;
2111		}
2112		cur_seq_id = fn->seq_id;
2113		page = fn->page;
2114		get_page(page);
2115		spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2116
2117		f2fs_wait_on_page_writeback(page, NODE, true, false);
2118
2119		put_page(page);
2120	}
2121
2122	return filemap_check_errors(NODE_MAPPING(sbi));
2123}
2124
2125static int f2fs_write_node_pages(struct address_space *mapping,
2126			    struct writeback_control *wbc)
2127{
2128	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2129	struct blk_plug plug;
2130	long diff;
2131
2132	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2133		goto skip_write;
2134
2135	/* balancing f2fs's metadata in background */
2136	f2fs_balance_fs_bg(sbi, true);
2137
2138	/* collect a number of dirty node pages and write together */
2139	if (wbc->sync_mode != WB_SYNC_ALL &&
2140			get_pages(sbi, F2FS_DIRTY_NODES) <
2141					nr_pages_to_skip(sbi, NODE))
2142		goto skip_write;
2143
2144	if (wbc->sync_mode == WB_SYNC_ALL)
2145		atomic_inc(&sbi->wb_sync_req[NODE]);
2146	else if (atomic_read(&sbi->wb_sync_req[NODE])) {
2147		/* to avoid potential deadlock */
2148		if (current->plug)
2149			blk_finish_plug(current->plug);
2150		goto skip_write;
2151	}
2152
2153	trace_f2fs_writepages(mapping->host, wbc, NODE);
2154
2155	diff = nr_pages_to_write(sbi, NODE, wbc);
2156	blk_start_plug(&plug);
2157	f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
2158	blk_finish_plug(&plug);
2159	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
2160
2161	if (wbc->sync_mode == WB_SYNC_ALL)
2162		atomic_dec(&sbi->wb_sync_req[NODE]);
2163	return 0;
2164
2165skip_write:
2166	wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
2167	trace_f2fs_writepages(mapping->host, wbc, NODE);
2168	return 0;
2169}
2170
2171static bool f2fs_dirty_node_folio(struct address_space *mapping,
2172		struct folio *folio)
2173{
2174	trace_f2fs_set_page_dirty(folio, NODE);
2175
2176	if (!folio_test_uptodate(folio))
2177		folio_mark_uptodate(folio);
2178#ifdef CONFIG_F2FS_CHECK_FS
2179	if (IS_INODE(&folio->page))
2180		f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page);
2181#endif
2182	if (filemap_dirty_folio(mapping, folio)) {
2183		inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
2184		set_page_private_reference(&folio->page);
2185		return true;
2186	}
2187	return false;
2188}
2189
2190/*
2191 * Structure of the f2fs node operations
2192 */
2193const struct address_space_operations f2fs_node_aops = {
2194	.writepage	= f2fs_write_node_page,
2195	.writepages	= f2fs_write_node_pages,
2196	.dirty_folio	= f2fs_dirty_node_folio,
2197	.invalidate_folio = f2fs_invalidate_folio,
2198	.release_folio	= f2fs_release_folio,
2199	.migrate_folio	= filemap_migrate_folio,
2200};
2201
2202static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
2203						nid_t n)
2204{
2205	return radix_tree_lookup(&nm_i->free_nid_root, n);
2206}
2207
2208static int __insert_free_nid(struct f2fs_sb_info *sbi,
2209				struct free_nid *i)
2210{
2211	struct f2fs_nm_info *nm_i = NM_I(sbi);
2212	int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
2213
2214	if (err)
2215		return err;
2216
2217	nm_i->nid_cnt[FREE_NID]++;
2218	list_add_tail(&i->list, &nm_i->free_nid_list);
2219	return 0;
2220}
2221
2222static void __remove_free_nid(struct f2fs_sb_info *sbi,
2223			struct free_nid *i, enum nid_state state)
2224{
2225	struct f2fs_nm_info *nm_i = NM_I(sbi);
2226
2227	f2fs_bug_on(sbi, state != i->state);
2228	nm_i->nid_cnt[state]--;
2229	if (state == FREE_NID)
2230		list_del(&i->list);
2231	radix_tree_delete(&nm_i->free_nid_root, i->nid);
2232}
2233
2234static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2235			enum nid_state org_state, enum nid_state dst_state)
2236{
2237	struct f2fs_nm_info *nm_i = NM_I(sbi);
2238
2239	f2fs_bug_on(sbi, org_state != i->state);
2240	i->state = dst_state;
2241	nm_i->nid_cnt[org_state]--;
2242	nm_i->nid_cnt[dst_state]++;
2243
2244	switch (dst_state) {
2245	case PREALLOC_NID:
2246		list_del(&i->list);
2247		break;
2248	case FREE_NID:
2249		list_add_tail(&i->list, &nm_i->free_nid_list);
2250		break;
2251	default:
2252		BUG_ON(1);
2253	}
2254}
2255
2256bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi)
2257{
2258	struct f2fs_nm_info *nm_i = NM_I(sbi);
2259	unsigned int i;
2260	bool ret = true;
2261
2262	f2fs_down_read(&nm_i->nat_tree_lock);
2263	for (i = 0; i < nm_i->nat_blocks; i++) {
2264		if (!test_bit_le(i, nm_i->nat_block_bitmap)) {
2265			ret = false;
2266			break;
2267		}
2268	}
2269	f2fs_up_read(&nm_i->nat_tree_lock);
2270
2271	return ret;
2272}
2273
2274static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
2275							bool set, bool build)
2276{
2277	struct f2fs_nm_info *nm_i = NM_I(sbi);
2278	unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
2279	unsigned int nid_ofs = nid - START_NID(nid);
2280
2281	if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2282		return;
2283
2284	if (set) {
2285		if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2286			return;
2287		__set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2288		nm_i->free_nid_count[nat_ofs]++;
2289	} else {
2290		if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2291			return;
2292		__clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2293		if (!build)
2294			nm_i->free_nid_count[nat_ofs]--;
2295	}
2296}
2297
2298/* return if the nid is recognized as free */
2299static bool add_free_nid(struct f2fs_sb_info *sbi,
2300				nid_t nid, bool build, bool update)
2301{
2302	struct f2fs_nm_info *nm_i = NM_I(sbi);
2303	struct free_nid *i, *e;
2304	struct nat_entry *ne;
2305	int err = -EINVAL;
2306	bool ret = false;
2307
2308	/* 0 nid should not be used */
2309	if (unlikely(nid == 0))
2310		return false;
2311
2312	if (unlikely(f2fs_check_nid_range(sbi, nid)))
2313		return false;
2314
2315	i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL);
2316	i->nid = nid;
2317	i->state = FREE_NID;
2318
2319	radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2320
2321	spin_lock(&nm_i->nid_list_lock);
2322
2323	if (build) {
2324		/*
2325		 *   Thread A             Thread B
2326		 *  - f2fs_create
2327		 *   - f2fs_new_inode
2328		 *    - f2fs_alloc_nid
2329		 *     - __insert_nid_to_list(PREALLOC_NID)
2330		 *                     - f2fs_balance_fs_bg
2331		 *                      - f2fs_build_free_nids
2332		 *                       - __f2fs_build_free_nids
2333		 *                        - scan_nat_page
2334		 *                         - add_free_nid
2335		 *                          - __lookup_nat_cache
2336		 *  - f2fs_add_link
2337		 *   - f2fs_init_inode_metadata
2338		 *    - f2fs_new_inode_page
2339		 *     - f2fs_new_node_page
2340		 *      - set_node_addr
2341		 *  - f2fs_alloc_nid_done
2342		 *   - __remove_nid_from_list(PREALLOC_NID)
2343		 *                         - __insert_nid_to_list(FREE_NID)
2344		 */
2345		ne = __lookup_nat_cache(nm_i, nid);
2346		if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2347				nat_get_blkaddr(ne) != NULL_ADDR))
2348			goto err_out;
2349
2350		e = __lookup_free_nid_list(nm_i, nid);
2351		if (e) {
2352			if (e->state == FREE_NID)
2353				ret = true;
2354			goto err_out;
2355		}
2356	}
2357	ret = true;
2358	err = __insert_free_nid(sbi, i);
2359err_out:
2360	if (update) {
2361		update_free_nid_bitmap(sbi, nid, ret, build);
2362		if (!build)
2363			nm_i->available_nids++;
2364	}
2365	spin_unlock(&nm_i->nid_list_lock);
2366	radix_tree_preload_end();
2367
2368	if (err)
2369		kmem_cache_free(free_nid_slab, i);
2370	return ret;
2371}
2372
2373static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2374{
2375	struct f2fs_nm_info *nm_i = NM_I(sbi);
2376	struct free_nid *i;
2377	bool need_free = false;
2378
2379	spin_lock(&nm_i->nid_list_lock);
2380	i = __lookup_free_nid_list(nm_i, nid);
2381	if (i && i->state == FREE_NID) {
2382		__remove_free_nid(sbi, i, FREE_NID);
2383		need_free = true;
2384	}
2385	spin_unlock(&nm_i->nid_list_lock);
2386
2387	if (need_free)
2388		kmem_cache_free(free_nid_slab, i);
2389}
2390
2391static int scan_nat_page(struct f2fs_sb_info *sbi,
2392			struct page *nat_page, nid_t start_nid)
2393{
2394	struct f2fs_nm_info *nm_i = NM_I(sbi);
2395	struct f2fs_nat_block *nat_blk = page_address(nat_page);
2396	block_t blk_addr;
2397	unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2398	int i;
2399
2400	__set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2401
2402	i = start_nid % NAT_ENTRY_PER_BLOCK;
2403
2404	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2405		if (unlikely(start_nid >= nm_i->max_nid))
2406			break;
2407
2408		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2409
2410		if (blk_addr == NEW_ADDR)
2411			return -EFSCORRUPTED;
2412
2413		if (blk_addr == NULL_ADDR) {
2414			add_free_nid(sbi, start_nid, true, true);
2415		} else {
2416			spin_lock(&NM_I(sbi)->nid_list_lock);
2417			update_free_nid_bitmap(sbi, start_nid, false, true);
2418			spin_unlock(&NM_I(sbi)->nid_list_lock);
2419		}
2420	}
2421
2422	return 0;
2423}
2424
2425static void scan_curseg_cache(struct f2fs_sb_info *sbi)
2426{
2427	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2428	struct f2fs_journal *journal = curseg->journal;
2429	int i;
2430
2431	down_read(&curseg->journal_rwsem);
2432	for (i = 0; i < nats_in_cursum(journal); i++) {
2433		block_t addr;
2434		nid_t nid;
2435
2436		addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2437		nid = le32_to_cpu(nid_in_journal(journal, i));
2438		if (addr == NULL_ADDR)
2439			add_free_nid(sbi, nid, true, false);
2440		else
2441			remove_free_nid(sbi, nid);
2442	}
2443	up_read(&curseg->journal_rwsem);
2444}
2445
2446static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
2447{
2448	struct f2fs_nm_info *nm_i = NM_I(sbi);
2449	unsigned int i, idx;
2450	nid_t nid;
2451
2452	f2fs_down_read(&nm_i->nat_tree_lock);
2453
2454	for (i = 0; i < nm_i->nat_blocks; i++) {
2455		if (!test_bit_le(i, nm_i->nat_block_bitmap))
2456			continue;
2457		if (!nm_i->free_nid_count[i])
2458			continue;
2459		for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2460			idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2461						NAT_ENTRY_PER_BLOCK, idx);
2462			if (idx >= NAT_ENTRY_PER_BLOCK)
2463				break;
2464
2465			nid = i * NAT_ENTRY_PER_BLOCK + idx;
2466			add_free_nid(sbi, nid, true, false);
2467
2468			if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2469				goto out;
2470		}
2471	}
2472out:
2473	scan_curseg_cache(sbi);
2474
2475	f2fs_up_read(&nm_i->nat_tree_lock);
2476}
2477
2478static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2479						bool sync, bool mount)
2480{
2481	struct f2fs_nm_info *nm_i = NM_I(sbi);
2482	int i = 0, ret;
2483	nid_t nid = nm_i->next_scan_nid;
2484
2485	if (unlikely(nid >= nm_i->max_nid))
2486		nid = 0;
2487
2488	if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
2489		nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
2490
2491	/* Enough entries */
2492	if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2493		return 0;
2494
2495	if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2496		return 0;
2497
2498	if (!mount) {
2499		/* try to find free nids in free_nid_bitmap */
2500		scan_free_nid_bits(sbi);
2501
2502		if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2503			return 0;
2504	}
2505
2506	/* readahead nat pages to be scanned */
2507	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2508							META_NAT, true);
2509
2510	f2fs_down_read(&nm_i->nat_tree_lock);
2511
2512	while (1) {
2513		if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2514						nm_i->nat_block_bitmap)) {
2515			struct page *page = get_current_nat_page(sbi, nid);
2516
2517			if (IS_ERR(page)) {
2518				ret = PTR_ERR(page);
2519			} else {
2520				ret = scan_nat_page(sbi, page, nid);
2521				f2fs_put_page(page, 1);
2522			}
2523
2524			if (ret) {
2525				f2fs_up_read(&nm_i->nat_tree_lock);
2526
2527				if (ret == -EFSCORRUPTED) {
2528					f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
2529					set_sbi_flag(sbi, SBI_NEED_FSCK);
2530					f2fs_handle_error(sbi,
2531						ERROR_INCONSISTENT_NAT);
2532				}
2533
2534				return ret;
2535			}
2536		}
2537
2538		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2539		if (unlikely(nid >= nm_i->max_nid))
2540			nid = 0;
2541
2542		if (++i >= FREE_NID_PAGES)
2543			break;
2544	}
2545
2546	/* go to the next free nat pages to find free nids abundantly */
2547	nm_i->next_scan_nid = nid;
2548
2549	/* find free nids from current sum_pages */
2550	scan_curseg_cache(sbi);
2551
2552	f2fs_up_read(&nm_i->nat_tree_lock);
2553
2554	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2555					nm_i->ra_nid_pages, META_NAT, false);
2556
2557	return 0;
2558}
2559
2560int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2561{
2562	int ret;
2563
2564	mutex_lock(&NM_I(sbi)->build_lock);
2565	ret = __f2fs_build_free_nids(sbi, sync, mount);
2566	mutex_unlock(&NM_I(sbi)->build_lock);
2567
2568	return ret;
2569}
2570
2571/*
2572 * If this function returns success, caller can obtain a new nid
2573 * from second parameter of this function.
2574 * The returned nid could be used ino as well as nid when inode is created.
2575 */
2576bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2577{
2578	struct f2fs_nm_info *nm_i = NM_I(sbi);
2579	struct free_nid *i = NULL;
2580retry:
2581	if (time_to_inject(sbi, FAULT_ALLOC_NID))
2582		return false;
2583
2584	spin_lock(&nm_i->nid_list_lock);
2585
2586	if (unlikely(nm_i->available_nids == 0)) {
2587		spin_unlock(&nm_i->nid_list_lock);
2588		return false;
2589	}
2590
2591	/* We should not use stale free nids created by f2fs_build_free_nids */
2592	if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2593		f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2594		i = list_first_entry(&nm_i->free_nid_list,
2595					struct free_nid, list);
2596		*nid = i->nid;
2597
2598		__move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2599		nm_i->available_nids--;
2600
2601		update_free_nid_bitmap(sbi, *nid, false, false);
2602
2603		spin_unlock(&nm_i->nid_list_lock);
2604		return true;
2605	}
2606	spin_unlock(&nm_i->nid_list_lock);
2607
2608	/* Let's scan nat pages and its caches to get free nids */
2609	if (!f2fs_build_free_nids(sbi, true, false))
2610		goto retry;
2611	return false;
2612}
2613
2614/*
2615 * f2fs_alloc_nid() should be called prior to this function.
2616 */
2617void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2618{
2619	struct f2fs_nm_info *nm_i = NM_I(sbi);
2620	struct free_nid *i;
2621
2622	spin_lock(&nm_i->nid_list_lock);
2623	i = __lookup_free_nid_list(nm_i, nid);
2624	f2fs_bug_on(sbi, !i);
2625	__remove_free_nid(sbi, i, PREALLOC_NID);
2626	spin_unlock(&nm_i->nid_list_lock);
2627
2628	kmem_cache_free(free_nid_slab, i);
2629}
2630
2631/*
2632 * f2fs_alloc_nid() should be called prior to this function.
2633 */
2634void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2635{
2636	struct f2fs_nm_info *nm_i = NM_I(sbi);
2637	struct free_nid *i;
2638	bool need_free = false;
2639
2640	if (!nid)
2641		return;
2642
2643	spin_lock(&nm_i->nid_list_lock);
2644	i = __lookup_free_nid_list(nm_i, nid);
2645	f2fs_bug_on(sbi, !i);
2646
2647	if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2648		__remove_free_nid(sbi, i, PREALLOC_NID);
2649		need_free = true;
2650	} else {
2651		__move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2652	}
2653
2654	nm_i->available_nids++;
2655
2656	update_free_nid_bitmap(sbi, nid, true, false);
2657
2658	spin_unlock(&nm_i->nid_list_lock);
2659
2660	if (need_free)
2661		kmem_cache_free(free_nid_slab, i);
2662}
2663
2664int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2665{
2666	struct f2fs_nm_info *nm_i = NM_I(sbi);
2667	int nr = nr_shrink;
2668
2669	if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2670		return 0;
2671
2672	if (!mutex_trylock(&nm_i->build_lock))
2673		return 0;
2674
2675	while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
2676		struct free_nid *i, *next;
2677		unsigned int batch = SHRINK_NID_BATCH_SIZE;
2678
2679		spin_lock(&nm_i->nid_list_lock);
2680		list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2681			if (!nr_shrink || !batch ||
2682				nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2683				break;
2684			__remove_free_nid(sbi, i, FREE_NID);
2685			kmem_cache_free(free_nid_slab, i);
2686			nr_shrink--;
2687			batch--;
2688		}
2689		spin_unlock(&nm_i->nid_list_lock);
2690	}
2691
2692	mutex_unlock(&nm_i->build_lock);
2693
2694	return nr - nr_shrink;
2695}
2696
2697int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
2698{
2699	void *src_addr, *dst_addr;
2700	size_t inline_size;
2701	struct page *ipage;
2702	struct f2fs_inode *ri;
2703
2704	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
2705	if (IS_ERR(ipage))
2706		return PTR_ERR(ipage);
2707
2708	ri = F2FS_INODE(page);
2709	if (ri->i_inline & F2FS_INLINE_XATTR) {
2710		if (!f2fs_has_inline_xattr(inode)) {
2711			set_inode_flag(inode, FI_INLINE_XATTR);
2712			stat_inc_inline_xattr(inode);
2713		}
2714	} else {
2715		if (f2fs_has_inline_xattr(inode)) {
2716			stat_dec_inline_xattr(inode);
2717			clear_inode_flag(inode, FI_INLINE_XATTR);
2718		}
2719		goto update_inode;
2720	}
2721
2722	dst_addr = inline_xattr_addr(inode, ipage);
2723	src_addr = inline_xattr_addr(inode, page);
2724	inline_size = inline_xattr_size(inode);
2725
2726	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
2727	memcpy(dst_addr, src_addr, inline_size);
2728update_inode:
2729	f2fs_update_inode(inode, ipage);
2730	f2fs_put_page(ipage, 1);
2731	return 0;
2732}
2733
2734int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2735{
2736	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2737	nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2738	nid_t new_xnid;
2739	struct dnode_of_data dn;
2740	struct node_info ni;
2741	struct page *xpage;
2742	int err;
2743
2744	if (!prev_xnid)
2745		goto recover_xnid;
2746
2747	/* 1: invalidate the previous xattr nid */
2748	err = f2fs_get_node_info(sbi, prev_xnid, &ni, false);
2749	if (err)
2750		return err;
2751
2752	f2fs_invalidate_blocks(sbi, ni.blk_addr);
2753	dec_valid_node_count(sbi, inode, false);
2754	set_node_addr(sbi, &ni, NULL_ADDR, false);
2755
2756recover_xnid:
2757	/* 2: update xattr nid in inode */
2758	if (!f2fs_alloc_nid(sbi, &new_xnid))
2759		return -ENOSPC;
2760
2761	set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2762	xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
2763	if (IS_ERR(xpage)) {
2764		f2fs_alloc_nid_failed(sbi, new_xnid);
2765		return PTR_ERR(xpage);
2766	}
2767
2768	f2fs_alloc_nid_done(sbi, new_xnid);
2769	f2fs_update_inode_page(inode);
2770
2771	/* 3: update and set xattr node page dirty */
2772	if (page) {
2773		memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
2774				VALID_XATTR_BLOCK_SIZE);
2775		set_page_dirty(xpage);
2776	}
2777	f2fs_put_page(xpage, 1);
2778
2779	return 0;
2780}
2781
2782int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2783{
2784	struct f2fs_inode *src, *dst;
2785	nid_t ino = ino_of_node(page);
2786	struct node_info old_ni, new_ni;
2787	struct page *ipage;
2788	int err;
2789
2790	err = f2fs_get_node_info(sbi, ino, &old_ni, false);
2791	if (err)
2792		return err;
2793
2794	if (unlikely(old_ni.blk_addr != NULL_ADDR))
2795		return -EINVAL;
2796retry:
2797	ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2798	if (!ipage) {
2799		memalloc_retry_wait(GFP_NOFS);
2800		goto retry;
2801	}
2802
2803	/* Should not use this inode from free nid list */
2804	remove_free_nid(sbi, ino);
2805
2806	if (!PageUptodate(ipage))
2807		SetPageUptodate(ipage);
2808	fill_node_footer(ipage, ino, ino, 0, true);
2809	set_cold_node(ipage, false);
2810
2811	src = F2FS_INODE(page);
2812	dst = F2FS_INODE(ipage);
2813
2814	memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
2815	dst->i_size = 0;
2816	dst->i_blocks = cpu_to_le64(1);
2817	dst->i_links = cpu_to_le32(1);
2818	dst->i_xattr_nid = 0;
2819	dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2820	if (dst->i_inline & F2FS_EXTRA_ATTR) {
2821		dst->i_extra_isize = src->i_extra_isize;
2822
2823		if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
2824			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2825							i_inline_xattr_size))
2826			dst->i_inline_xattr_size = src->i_inline_xattr_size;
2827
2828		if (f2fs_sb_has_project_quota(sbi) &&
2829			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2830								i_projid))
2831			dst->i_projid = src->i_projid;
2832
2833		if (f2fs_sb_has_inode_crtime(sbi) &&
2834			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2835							i_crtime_nsec)) {
2836			dst->i_crtime = src->i_crtime;
2837			dst->i_crtime_nsec = src->i_crtime_nsec;
2838		}
2839	}
2840
2841	new_ni = old_ni;
2842	new_ni.ino = ino;
2843
2844	if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2845		WARN_ON(1);
2846	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2847	inc_valid_inode_count(sbi);
2848	set_page_dirty(ipage);
2849	f2fs_put_page(ipage, 1);
2850	return 0;
2851}
2852
2853int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2854			unsigned int segno, struct f2fs_summary_block *sum)
2855{
2856	struct f2fs_node *rn;
2857	struct f2fs_summary *sum_entry;
2858	block_t addr;
2859	int i, idx, last_offset, nrpages;
2860
2861	/* scan the node segment */
2862	last_offset = BLKS_PER_SEG(sbi);
2863	addr = START_BLOCK(sbi, segno);
2864	sum_entry = &sum->entries[0];
2865
2866	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2867		nrpages = bio_max_segs(last_offset - i);
2868
2869		/* readahead node pages */
2870		f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2871
2872		for (idx = addr; idx < addr + nrpages; idx++) {
2873			struct page *page = f2fs_get_tmp_page(sbi, idx);
2874
2875			if (IS_ERR(page))
2876				return PTR_ERR(page);
2877
2878			rn = F2FS_NODE(page);
2879			sum_entry->nid = rn->footer.nid;
2880			sum_entry->version = 0;
2881			sum_entry->ofs_in_node = 0;
2882			sum_entry++;
2883			f2fs_put_page(page, 1);
2884		}
2885
2886		invalidate_mapping_pages(META_MAPPING(sbi), addr,
2887							addr + nrpages);
2888	}
2889	return 0;
2890}
2891
2892static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2893{
2894	struct f2fs_nm_info *nm_i = NM_I(sbi);
2895	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2896	struct f2fs_journal *journal = curseg->journal;
2897	int i;
2898
2899	down_write(&curseg->journal_rwsem);
2900	for (i = 0; i < nats_in_cursum(journal); i++) {
2901		struct nat_entry *ne;
2902		struct f2fs_nat_entry raw_ne;
2903		nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2904
2905		if (f2fs_check_nid_range(sbi, nid))
2906			continue;
2907
2908		raw_ne = nat_in_journal(journal, i);
2909
2910		ne = __lookup_nat_cache(nm_i, nid);
2911		if (!ne) {
2912			ne = __alloc_nat_entry(sbi, nid, true);
2913			__init_nat_entry(nm_i, ne, &raw_ne, true);
2914		}
2915
2916		/*
2917		 * if a free nat in journal has not been used after last
2918		 * checkpoint, we should remove it from available nids,
2919		 * since later we will add it again.
2920		 */
2921		if (!get_nat_flag(ne, IS_DIRTY) &&
2922				le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2923			spin_lock(&nm_i->nid_list_lock);
2924			nm_i->available_nids--;
2925			spin_unlock(&nm_i->nid_list_lock);
2926		}
2927
2928		__set_nat_cache_dirty(nm_i, ne);
2929	}
2930	update_nats_in_cursum(journal, -i);
2931	up_write(&curseg->journal_rwsem);
2932}
2933
2934static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2935						struct list_head *head, int max)
2936{
2937	struct nat_entry_set *cur;
2938
2939	if (nes->entry_cnt >= max)
2940		goto add_out;
2941
2942	list_for_each_entry(cur, head, set_list) {
2943		if (cur->entry_cnt >= nes->entry_cnt) {
2944			list_add(&nes->set_list, cur->set_list.prev);
2945			return;
2946		}
2947	}
2948add_out:
2949	list_add_tail(&nes->set_list, head);
2950}
2951
2952static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs,
2953							unsigned int valid)
2954{
2955	if (valid == 0) {
2956		__set_bit_le(nat_ofs, nm_i->empty_nat_bits);
2957		__clear_bit_le(nat_ofs, nm_i->full_nat_bits);
2958		return;
2959	}
2960
2961	__clear_bit_le(nat_ofs, nm_i->empty_nat_bits);
2962	if (valid == NAT_ENTRY_PER_BLOCK)
2963		__set_bit_le(nat_ofs, nm_i->full_nat_bits);
2964	else
2965		__clear_bit_le(nat_ofs, nm_i->full_nat_bits);
2966}
2967
2968static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2969						struct page *page)
2970{
2971	struct f2fs_nm_info *nm_i = NM_I(sbi);
2972	unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
2973	struct f2fs_nat_block *nat_blk = page_address(page);
2974	int valid = 0;
2975	int i = 0;
2976
2977	if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
2978		return;
2979
2980	if (nat_index == 0) {
2981		valid = 1;
2982		i = 1;
2983	}
2984	for (; i < NAT_ENTRY_PER_BLOCK; i++) {
2985		if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
2986			valid++;
2987	}
2988
2989	__update_nat_bits(nm_i, nat_index, valid);
2990}
2991
2992void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi)
2993{
2994	struct f2fs_nm_info *nm_i = NM_I(sbi);
2995	unsigned int nat_ofs;
2996
2997	f2fs_down_read(&nm_i->nat_tree_lock);
2998
2999	for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) {
3000		unsigned int valid = 0, nid_ofs = 0;
3001
3002		/* handle nid zero due to it should never be used */
3003		if (unlikely(nat_ofs == 0)) {
3004			valid = 1;
3005			nid_ofs = 1;
3006		}
3007
3008		for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) {
3009			if (!test_bit_le(nid_ofs,
3010					nm_i->free_nid_bitmap[nat_ofs]))
3011				valid++;
3012		}
3013
3014		__update_nat_bits(nm_i, nat_ofs, valid);
3015	}
3016
3017	f2fs_up_read(&nm_i->nat_tree_lock);
3018}
3019
3020static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
3021		struct nat_entry_set *set, struct cp_control *cpc)
3022{
3023	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3024	struct f2fs_journal *journal = curseg->journal;
3025	nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
3026	bool to_journal = true;
3027	struct f2fs_nat_block *nat_blk;
3028	struct nat_entry *ne, *cur;
3029	struct page *page = NULL;
3030
3031	/*
3032	 * there are two steps to flush nat entries:
3033	 * #1, flush nat entries to journal in current hot data summary block.
3034	 * #2, flush nat entries to nat page.
3035	 */
3036	if ((cpc->reason & CP_UMOUNT) ||
3037		!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
3038		to_journal = false;
3039
3040	if (to_journal) {
3041		down_write(&curseg->journal_rwsem);
3042	} else {
3043		page = get_next_nat_page(sbi, start_nid);
3044		if (IS_ERR(page))
3045			return PTR_ERR(page);
3046
3047		nat_blk = page_address(page);
3048		f2fs_bug_on(sbi, !nat_blk);
3049	}
3050
3051	/* flush dirty nats in nat entry set */
3052	list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
3053		struct f2fs_nat_entry *raw_ne;
3054		nid_t nid = nat_get_nid(ne);
3055		int offset;
3056
3057		f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
3058
3059		if (to_journal) {
3060			offset = f2fs_lookup_journal_in_cursum(journal,
3061							NAT_JOURNAL, nid, 1);
3062			f2fs_bug_on(sbi, offset < 0);
3063			raw_ne = &nat_in_journal(journal, offset);
3064			nid_in_journal(journal, offset) = cpu_to_le32(nid);
3065		} else {
3066			raw_ne = &nat_blk->entries[nid - start_nid];
3067		}
3068		raw_nat_from_node_info(raw_ne, &ne->ni);
3069		nat_reset_flag(ne);
3070		__clear_nat_cache_dirty(NM_I(sbi), set, ne);
3071		if (nat_get_blkaddr(ne) == NULL_ADDR) {
3072			add_free_nid(sbi, nid, false, true);
3073		} else {
3074			spin_lock(&NM_I(sbi)->nid_list_lock);
3075			update_free_nid_bitmap(sbi, nid, false, false);
3076			spin_unlock(&NM_I(sbi)->nid_list_lock);
3077		}
3078	}
3079
3080	if (to_journal) {
3081		up_write(&curseg->journal_rwsem);
3082	} else {
3083		update_nat_bits(sbi, start_nid, page);
3084		f2fs_put_page(page, 1);
3085	}
3086
3087	/* Allow dirty nats by node block allocation in write_begin */
3088	if (!set->entry_cnt) {
3089		radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
3090		kmem_cache_free(nat_entry_set_slab, set);
3091	}
3092	return 0;
3093}
3094
3095/*
3096 * This function is called during the checkpointing process.
3097 */
3098int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3099{
3100	struct f2fs_nm_info *nm_i = NM_I(sbi);
3101	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3102	struct f2fs_journal *journal = curseg->journal;
3103	struct nat_entry_set *setvec[NAT_VEC_SIZE];
3104	struct nat_entry_set *set, *tmp;
3105	unsigned int found;
3106	nid_t set_idx = 0;
3107	LIST_HEAD(sets);
3108	int err = 0;
3109
3110	/*
3111	 * during unmount, let's flush nat_bits before checking
3112	 * nat_cnt[DIRTY_NAT].
3113	 */
3114	if (cpc->reason & CP_UMOUNT) {
3115		f2fs_down_write(&nm_i->nat_tree_lock);
3116		remove_nats_in_journal(sbi);
3117		f2fs_up_write(&nm_i->nat_tree_lock);
3118	}
3119
3120	if (!nm_i->nat_cnt[DIRTY_NAT])
3121		return 0;
3122
3123	f2fs_down_write(&nm_i->nat_tree_lock);
3124
3125	/*
3126	 * if there are no enough space in journal to store dirty nat
3127	 * entries, remove all entries from journal and merge them
3128	 * into nat entry set.
3129	 */
3130	if (cpc->reason & CP_UMOUNT ||
3131		!__has_cursum_space(journal,
3132			nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
3133		remove_nats_in_journal(sbi);
3134
3135	while ((found = __gang_lookup_nat_set(nm_i,
3136					set_idx, NAT_VEC_SIZE, setvec))) {
3137		unsigned idx;
3138
3139		set_idx = setvec[found - 1]->set + 1;
3140		for (idx = 0; idx < found; idx++)
3141			__adjust_nat_entry_set(setvec[idx], &sets,
3142						MAX_NAT_JENTRIES(journal));
3143	}
3144
3145	/* flush dirty nats in nat entry set */
3146	list_for_each_entry_safe(set, tmp, &sets, set_list) {
3147		err = __flush_nat_entry_set(sbi, set, cpc);
3148		if (err)
3149			break;
3150	}
3151
3152	f2fs_up_write(&nm_i->nat_tree_lock);
3153	/* Allow dirty nats by node block allocation in write_begin */
3154
3155	return err;
3156}
3157
3158static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
3159{
3160	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3161	struct f2fs_nm_info *nm_i = NM_I(sbi);
3162	unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
3163	unsigned int i;
3164	__u64 cp_ver = cur_cp_version(ckpt);
3165	block_t nat_bits_addr;
3166
3167	nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3168	nm_i->nat_bits = f2fs_kvzalloc(sbi,
3169			nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
3170	if (!nm_i->nat_bits)
3171		return -ENOMEM;
3172
3173	nm_i->full_nat_bits = nm_i->nat_bits + 8;
3174	nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
3175
3176	if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
3177		return 0;
3178
3179	nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) -
3180						nm_i->nat_bits_blocks;
3181	for (i = 0; i < nm_i->nat_bits_blocks; i++) {
3182		struct page *page;
3183
3184		page = f2fs_get_meta_page(sbi, nat_bits_addr++);
3185		if (IS_ERR(page))
3186			return PTR_ERR(page);
3187
3188		memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
3189					page_address(page), F2FS_BLKSIZE);
3190		f2fs_put_page(page, 1);
3191	}
3192
3193	cp_ver |= (cur_cp_crc(ckpt) << 32);
3194	if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
3195		clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
3196		f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)",
3197			cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits));
3198		return 0;
3199	}
3200
3201	f2fs_notice(sbi, "Found nat_bits in checkpoint");
3202	return 0;
3203}
3204
3205static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
3206{
3207	struct f2fs_nm_info *nm_i = NM_I(sbi);
3208	unsigned int i = 0;
3209	nid_t nid, last_nid;
3210
3211	if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
3212		return;
3213
3214	for (i = 0; i < nm_i->nat_blocks; i++) {
3215		i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
3216		if (i >= nm_i->nat_blocks)
3217			break;
3218
3219		__set_bit_le(i, nm_i->nat_block_bitmap);
3220
3221		nid = i * NAT_ENTRY_PER_BLOCK;
3222		last_nid = nid + NAT_ENTRY_PER_BLOCK;
3223
3224		spin_lock(&NM_I(sbi)->nid_list_lock);
3225		for (; nid < last_nid; nid++)
3226			update_free_nid_bitmap(sbi, nid, true, true);
3227		spin_unlock(&NM_I(sbi)->nid_list_lock);
3228	}
3229
3230	for (i = 0; i < nm_i->nat_blocks; i++) {
3231		i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
3232		if (i >= nm_i->nat_blocks)
3233			break;
3234
3235		__set_bit_le(i, nm_i->nat_block_bitmap);
3236	}
3237}
3238
3239static int init_node_manager(struct f2fs_sb_info *sbi)
3240{
3241	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
3242	struct f2fs_nm_info *nm_i = NM_I(sbi);
3243	unsigned char *version_bitmap;
3244	unsigned int nat_segs;
3245	int err;
3246
3247	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
3248
3249	/* segment_count_nat includes pair segment so divide to 2. */
3250	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
3251	nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
3252	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
3253
3254	/* not used nids: 0, node, meta, (and root counted as valid node) */
3255	nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
3256						F2FS_RESERVED_NODE_NUM;
3257	nm_i->nid_cnt[FREE_NID] = 0;
3258	nm_i->nid_cnt[PREALLOC_NID] = 0;
3259	nm_i->ram_thresh = DEF_RAM_THRESHOLD;
3260	nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
3261	nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
3262	nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS;
3263
3264	INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
3265	INIT_LIST_HEAD(&nm_i->free_nid_list);
3266	INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
3267	INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
3268	INIT_LIST_HEAD(&nm_i->nat_entries);
3269	spin_lock_init(&nm_i->nat_list_lock);
3270
3271	mutex_init(&nm_i->build_lock);
3272	spin_lock_init(&nm_i->nid_list_lock);
3273	init_f2fs_rwsem(&nm_i->nat_tree_lock);
3274
3275	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
3276	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
3277	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
3278	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
3279					GFP_KERNEL);
3280	if (!nm_i->nat_bitmap)
3281		return -ENOMEM;
3282
3283	err = __get_nat_bitmaps(sbi);
3284	if (err)
3285		return err;
3286
3287#ifdef CONFIG_F2FS_CHECK_FS
3288	nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3289					GFP_KERNEL);
3290	if (!nm_i->nat_bitmap_mir)
3291		return -ENOMEM;
3292#endif
3293
3294	return 0;
3295}
3296
3297static int init_free_nid_cache(struct f2fs_sb_info *sbi)
3298{
3299	struct f2fs_nm_info *nm_i = NM_I(sbi);
3300	int i;
3301
3302	nm_i->free_nid_bitmap =
3303		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
3304					      nm_i->nat_blocks),
3305			      GFP_KERNEL);
3306	if (!nm_i->free_nid_bitmap)
3307		return -ENOMEM;
3308
3309	for (i = 0; i < nm_i->nat_blocks; i++) {
3310		nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3311			f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
3312		if (!nm_i->free_nid_bitmap[i])
3313			return -ENOMEM;
3314	}
3315
3316	nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3317								GFP_KERNEL);
3318	if (!nm_i->nat_block_bitmap)
3319		return -ENOMEM;
3320
3321	nm_i->free_nid_count =
3322		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
3323					      nm_i->nat_blocks),
3324			      GFP_KERNEL);
3325	if (!nm_i->free_nid_count)
3326		return -ENOMEM;
3327	return 0;
3328}
3329
3330int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3331{
3332	int err;
3333
3334	sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3335							GFP_KERNEL);
3336	if (!sbi->nm_info)
3337		return -ENOMEM;
3338
3339	err = init_node_manager(sbi);
3340	if (err)
3341		return err;
3342
3343	err = init_free_nid_cache(sbi);
3344	if (err)
3345		return err;
3346
3347	/* load free nid status from nat_bits table */
3348	load_free_nid_bitmap(sbi);
3349
3350	return f2fs_build_free_nids(sbi, true, true);
3351}
3352
3353void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3354{
3355	struct f2fs_nm_info *nm_i = NM_I(sbi);
3356	struct free_nid *i, *next_i;
3357	void *vec[NAT_VEC_SIZE];
3358	struct nat_entry **natvec = (struct nat_entry **)vec;
3359	struct nat_entry_set **setvec = (struct nat_entry_set **)vec;
3360	nid_t nid = 0;
3361	unsigned int found;
3362
3363	if (!nm_i)
3364		return;
3365
3366	/* destroy free nid list */
3367	spin_lock(&nm_i->nid_list_lock);
3368	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3369		__remove_free_nid(sbi, i, FREE_NID);
3370		spin_unlock(&nm_i->nid_list_lock);
3371		kmem_cache_free(free_nid_slab, i);
3372		spin_lock(&nm_i->nid_list_lock);
3373	}
3374	f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3375	f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3376	f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3377	spin_unlock(&nm_i->nid_list_lock);
3378
3379	/* destroy nat cache */
3380	f2fs_down_write(&nm_i->nat_tree_lock);
3381	while ((found = __gang_lookup_nat_cache(nm_i,
3382					nid, NAT_VEC_SIZE, natvec))) {
3383		unsigned idx;
3384
3385		nid = nat_get_nid(natvec[found - 1]) + 1;
3386		for (idx = 0; idx < found; idx++) {
3387			spin_lock(&nm_i->nat_list_lock);
3388			list_del(&natvec[idx]->list);
3389			spin_unlock(&nm_i->nat_list_lock);
3390
3391			__del_from_nat_cache(nm_i, natvec[idx]);
3392		}
3393	}
3394	f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
3395
3396	/* destroy nat set cache */
3397	nid = 0;
3398	memset(vec, 0, sizeof(void *) * NAT_VEC_SIZE);
3399	while ((found = __gang_lookup_nat_set(nm_i,
3400					nid, NAT_VEC_SIZE, setvec))) {
3401		unsigned idx;
3402
3403		nid = setvec[found - 1]->set + 1;
3404		for (idx = 0; idx < found; idx++) {
3405			/* entry_cnt is not zero, when cp_error was occurred */
3406			f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
3407			radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3408			kmem_cache_free(nat_entry_set_slab, setvec[idx]);
3409		}
3410	}
3411	f2fs_up_write(&nm_i->nat_tree_lock);
3412
3413	kvfree(nm_i->nat_block_bitmap);
3414	if (nm_i->free_nid_bitmap) {
3415		int i;
3416
3417		for (i = 0; i < nm_i->nat_blocks; i++)
3418			kvfree(nm_i->free_nid_bitmap[i]);
3419		kvfree(nm_i->free_nid_bitmap);
3420	}
3421	kvfree(nm_i->free_nid_count);
3422
3423	kvfree(nm_i->nat_bitmap);
3424	kvfree(nm_i->nat_bits);
3425#ifdef CONFIG_F2FS_CHECK_FS
3426	kvfree(nm_i->nat_bitmap_mir);
3427#endif
3428	sbi->nm_info = NULL;
3429	kfree(nm_i);
3430}
3431
3432int __init f2fs_create_node_manager_caches(void)
3433{
3434	nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
3435			sizeof(struct nat_entry));
3436	if (!nat_entry_slab)
3437		goto fail;
3438
3439	free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
3440			sizeof(struct free_nid));
3441	if (!free_nid_slab)
3442		goto destroy_nat_entry;
3443
3444	nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
3445			sizeof(struct nat_entry_set));
3446	if (!nat_entry_set_slab)
3447		goto destroy_free_nid;
3448
3449	fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
3450			sizeof(struct fsync_node_entry));
3451	if (!fsync_node_entry_slab)
3452		goto destroy_nat_entry_set;
3453	return 0;
3454
3455destroy_nat_entry_set:
3456	kmem_cache_destroy(nat_entry_set_slab);
3457destroy_free_nid:
3458	kmem_cache_destroy(free_nid_slab);
3459destroy_nat_entry:
3460	kmem_cache_destroy(nat_entry_slab);
3461fail:
3462	return -ENOMEM;
3463}
3464
3465void f2fs_destroy_node_manager_caches(void)
3466{
3467	kmem_cache_destroy(fsync_node_entry_slab);
3468	kmem_cache_destroy(nat_entry_set_slab);
3469	kmem_cache_destroy(free_nid_slab);
3470	kmem_cache_destroy(nat_entry_slab);
3471}
3472