1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010, 2023 Red Hat, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_shared.h"
8#include "xfs_format.h"
9#include "xfs_log_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_trans.h"
12#include "xfs_mount.h"
13#include "xfs_btree.h"
14#include "xfs_alloc_btree.h"
15#include "xfs_alloc.h"
16#include "xfs_discard.h"
17#include "xfs_error.h"
18#include "xfs_extent_busy.h"
19#include "xfs_trace.h"
20#include "xfs_log.h"
21#include "xfs_ag.h"
22#include "xfs_health.h"
23
24/*
25 * Notes on an efficient, low latency fstrim algorithm
26 *
27 * We need to walk the filesystem free space and issue discards on the free
28 * space that meet the search criteria (size and location). We cannot issue
29 * discards on extents that might be in use, or are so recently in use they are
30 * still marked as busy. To serialise against extent state changes whilst we are
31 * gathering extents to trim, we must hold the AGF lock to lock out other
32 * allocations and extent free operations that might change extent state.
33 *
34 * However, we cannot just hold the AGF for the entire AG free space walk whilst
35 * we issue discards on each free space that is found. Storage devices can have
36 * extremely slow discard implementations (e.g. ceph RBD) and so walking a
37 * couple of million free extents and issuing synchronous discards on each
38 * extent can take a *long* time. Whilst we are doing this walk, nothing else
39 * can access the AGF, and we can stall transactions and hence the log whilst
40 * modifications wait for the AGF lock to be released. This can lead hung tasks
41 * kicking the hung task timer and rebooting the system. This is bad.
42 *
43 * Hence we need to take a leaf from the bulkstat playbook. It takes the AGI
44 * lock, gathers a range of inode cluster buffers that are allocated, drops the
45 * AGI lock and then reads all the inode cluster buffers and processes them. It
46 * loops doing this, using a cursor to keep track of where it is up to in the AG
47 * for each iteration to restart the INOBT lookup from.
48 *
49 * We can't do this exactly with free space - once we drop the AGF lock, the
50 * state of the free extent is out of our control and we cannot run a discard
51 * safely on it in this situation. Unless, of course, we've marked the free
52 * extent as busy and undergoing a discard operation whilst we held the AGF
53 * locked.
54 *
55 * This is exactly how online discard works - free extents are marked busy when
56 * they are freed, and once the extent free has been committed to the journal,
57 * the busy extent record is marked as "undergoing discard" and the discard is
58 * then issued on the free extent. Once the discard completes, the busy extent
59 * record is removed and the extent is able to be allocated again.
60 *
61 * In the context of fstrim, if we find a free extent we need to discard, we
62 * don't have to discard it immediately. All we need to do it record that free
63 * extent as being busy and under discard, and all the allocation routines will
64 * now avoid trying to allocate it. Hence if we mark the extent as busy under
65 * the AGF lock, we can safely discard it without holding the AGF lock because
66 * nothing will attempt to allocate that free space until the discard completes.
67 *
68 * This also allows us to issue discards asynchronously like we do with online
69 * discard, and so for fast devices fstrim will run much faster as we can have
70 * multiple discard operations in flight at once, as well as pipeline the free
71 * extent search so that it overlaps in flight discard IO.
72 */
73
74struct workqueue_struct *xfs_discard_wq;
75
76static void
77xfs_discard_endio_work(
78	struct work_struct	*work)
79{
80	struct xfs_busy_extents	*extents =
81		container_of(work, struct xfs_busy_extents, endio_work);
82
83	xfs_extent_busy_clear(extents->mount, &extents->extent_list, false);
84	kfree(extents->owner);
85}
86
87/*
88 * Queue up the actual completion to a thread to avoid IRQ-safe locking for
89 * pagb_lock.
90 */
91static void
92xfs_discard_endio(
93	struct bio		*bio)
94{
95	struct xfs_busy_extents	*extents = bio->bi_private;
96
97	INIT_WORK(&extents->endio_work, xfs_discard_endio_work);
98	queue_work(xfs_discard_wq, &extents->endio_work);
99	bio_put(bio);
100}
101
102/*
103 * Walk the discard list and issue discards on all the busy extents in the
104 * list. We plug and chain the bios so that we only need a single completion
105 * call to clear all the busy extents once the discards are complete.
106 */
107int
108xfs_discard_extents(
109	struct xfs_mount	*mp,
110	struct xfs_busy_extents	*extents)
111{
112	struct xfs_extent_busy	*busyp;
113	struct bio		*bio = NULL;
114	struct blk_plug		plug;
115	int			error = 0;
116
117	blk_start_plug(&plug);
118	list_for_each_entry(busyp, &extents->extent_list, list) {
119		trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
120					 busyp->length);
121
122		error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
123				XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
124				XFS_FSB_TO_BB(mp, busyp->length),
125				GFP_KERNEL, &bio);
126		if (error && error != -EOPNOTSUPP) {
127			xfs_info(mp,
128	 "discard failed for extent [0x%llx,%u], error %d",
129				 (unsigned long long)busyp->bno,
130				 busyp->length,
131				 error);
132			break;
133		}
134	}
135
136	if (bio) {
137		bio->bi_private = extents;
138		bio->bi_end_io = xfs_discard_endio;
139		submit_bio(bio);
140	} else {
141		xfs_discard_endio_work(&extents->endio_work);
142	}
143	blk_finish_plug(&plug);
144
145	return error;
146}
147
148struct xfs_trim_cur {
149	xfs_agblock_t	start;
150	xfs_extlen_t	count;
151	xfs_agblock_t	end;
152	xfs_extlen_t	minlen;
153	bool		by_bno;
154};
155
156static int
157xfs_trim_gather_extents(
158	struct xfs_perag	*pag,
159	struct xfs_trim_cur	*tcur,
160	struct xfs_busy_extents	*extents,
161	uint64_t		*blocks_trimmed)
162{
163	struct xfs_mount	*mp = pag->pag_mount;
164	struct xfs_trans	*tp;
165	struct xfs_btree_cur	*cur;
166	struct xfs_buf		*agbp;
167	int			error;
168	int			i;
169	int			batch = 100;
170
171	/*
172	 * Force out the log.  This means any transactions that might have freed
173	 * space before we take the AGF buffer lock are now on disk, and the
174	 * volatile disk cache is flushed.
175	 */
176	xfs_log_force(mp, XFS_LOG_SYNC);
177
178	error = xfs_trans_alloc_empty(mp, &tp);
179	if (error)
180		return error;
181
182	error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
183	if (error)
184		goto out_trans_cancel;
185
186	if (tcur->by_bno) {
187		/* sub-AG discard request always starts at tcur->start */
188		cur = xfs_bnobt_init_cursor(mp, tp, agbp, pag);
189		error = xfs_alloc_lookup_le(cur, tcur->start, 0, &i);
190		if (!error && !i)
191			error = xfs_alloc_lookup_ge(cur, tcur->start, 0, &i);
192	} else if (tcur->start == 0) {
193		/* first time through a by-len starts with max length */
194		cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag);
195		error = xfs_alloc_lookup_ge(cur, 0, tcur->count, &i);
196	} else {
197		/* nth time through a by-len starts where we left off */
198		cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag);
199		error = xfs_alloc_lookup_le(cur, tcur->start, tcur->count, &i);
200	}
201	if (error)
202		goto out_del_cursor;
203	if (i == 0) {
204		/* nothing of that length left in the AG, we are done */
205		tcur->count = 0;
206		goto out_del_cursor;
207	}
208
209	/*
210	 * Loop until we are done with all extents that are large
211	 * enough to be worth discarding or we hit batch limits.
212	 */
213	while (i) {
214		xfs_agblock_t	fbno;
215		xfs_extlen_t	flen;
216
217		error = xfs_alloc_get_rec(cur, &fbno, &flen, &i);
218		if (error)
219			break;
220		if (XFS_IS_CORRUPT(mp, i != 1)) {
221			xfs_btree_mark_sick(cur);
222			error = -EFSCORRUPTED;
223			break;
224		}
225
226		if (--batch <= 0) {
227			/*
228			 * Update the cursor to point at this extent so we
229			 * restart the next batch from this extent.
230			 */
231			tcur->start = fbno;
232			tcur->count = flen;
233			break;
234		}
235
236		/*
237		 * If the extent is entirely outside of the range we are
238		 * supposed to skip it.  Do not bother to trim down partially
239		 * overlapping ranges for now.
240		 */
241		if (fbno + flen < tcur->start) {
242			trace_xfs_discard_exclude(mp, pag->pag_agno, fbno, flen);
243			goto next_extent;
244		}
245		if (fbno > tcur->end) {
246			trace_xfs_discard_exclude(mp, pag->pag_agno, fbno, flen);
247			if (tcur->by_bno) {
248				tcur->count = 0;
249				break;
250			}
251			goto next_extent;
252		}
253
254		/* Trim the extent returned to the range we want. */
255		if (fbno < tcur->start) {
256			flen -= tcur->start - fbno;
257			fbno = tcur->start;
258		}
259		if (fbno + flen > tcur->end + 1)
260			flen = tcur->end - fbno + 1;
261
262		/* Too small?  Give up. */
263		if (flen < tcur->minlen) {
264			trace_xfs_discard_toosmall(mp, pag->pag_agno, fbno, flen);
265			if (tcur->by_bno)
266				goto next_extent;
267			tcur->count = 0;
268			break;
269		}
270
271		/*
272		 * If any blocks in the range are still busy, skip the
273		 * discard and try again the next time.
274		 */
275		if (xfs_extent_busy_search(mp, pag, fbno, flen)) {
276			trace_xfs_discard_busy(mp, pag->pag_agno, fbno, flen);
277			goto next_extent;
278		}
279
280		xfs_extent_busy_insert_discard(pag, fbno, flen,
281				&extents->extent_list);
282		*blocks_trimmed += flen;
283next_extent:
284		if (tcur->by_bno)
285			error = xfs_btree_increment(cur, 0, &i);
286		else
287			error = xfs_btree_decrement(cur, 0, &i);
288		if (error)
289			break;
290
291		/*
292		 * If there's no more records in the tree, we are done. Set the
293		 * cursor block count to 0 to indicate to the caller that there
294		 * is no more extents to search.
295		 */
296		if (i == 0)
297			tcur->count = 0;
298	}
299
300	/*
301	 * If there was an error, release all the gathered busy extents because
302	 * we aren't going to issue a discard on them any more.
303	 */
304	if (error)
305		xfs_extent_busy_clear(mp, &extents->extent_list, false);
306out_del_cursor:
307	xfs_btree_del_cursor(cur, error);
308out_trans_cancel:
309	xfs_trans_cancel(tp);
310	return error;
311}
312
313static bool
314xfs_trim_should_stop(void)
315{
316	return fatal_signal_pending(current) || freezing(current);
317}
318
319/*
320 * Iterate the free list gathering extents and discarding them. We need a cursor
321 * for the repeated iteration of gather/discard loop, so use the longest extent
322 * we found in the last batch as the key to start the next.
323 */
324static int
325xfs_trim_extents(
326	struct xfs_perag	*pag,
327	xfs_agblock_t		start,
328	xfs_agblock_t		end,
329	xfs_extlen_t		minlen,
330	uint64_t		*blocks_trimmed)
331{
332	struct xfs_trim_cur	tcur = {
333		.start		= start,
334		.count		= pag->pagf_longest,
335		.end		= end,
336		.minlen		= minlen,
337	};
338	int			error = 0;
339
340	if (start != 0 || end != pag->block_count)
341		tcur.by_bno = true;
342
343	do {
344		struct xfs_busy_extents	*extents;
345
346		extents = kzalloc(sizeof(*extents), GFP_KERNEL);
347		if (!extents) {
348			error = -ENOMEM;
349			break;
350		}
351
352		extents->mount = pag->pag_mount;
353		extents->owner = extents;
354		INIT_LIST_HEAD(&extents->extent_list);
355
356		error = xfs_trim_gather_extents(pag, &tcur, extents,
357				blocks_trimmed);
358		if (error) {
359			kfree(extents);
360			break;
361		}
362
363		/*
364		 * We hand the extent list to the discard function here so the
365		 * discarded extents can be removed from the busy extent list.
366		 * This allows the discards to run asynchronously with gathering
367		 * the next round of extents to discard.
368		 *
369		 * However, we must ensure that we do not reference the extent
370		 * list  after this function call, as it may have been freed by
371		 * the time control returns to us.
372		 */
373		error = xfs_discard_extents(pag->pag_mount, extents);
374		if (error)
375			break;
376
377		if (xfs_trim_should_stop())
378			break;
379
380	} while (tcur.count != 0);
381
382	return error;
383
384}
385
386/*
387 * trim a range of the filesystem.
388 *
389 * Note: the parameters passed from userspace are byte ranges into the
390 * filesystem which does not match to the format we use for filesystem block
391 * addressing. FSB addressing is sparse (AGNO|AGBNO), while the incoming format
392 * is a linear address range. Hence we need to use DADDR based conversions and
393 * comparisons for determining the correct offset and regions to trim.
394 */
395int
396xfs_ioc_trim(
397	struct xfs_mount		*mp,
398	struct fstrim_range __user	*urange)
399{
400	struct xfs_perag	*pag;
401	unsigned int		granularity =
402		bdev_discard_granularity(mp->m_ddev_targp->bt_bdev);
403	struct fstrim_range	range;
404	xfs_daddr_t		start, end;
405	xfs_extlen_t		minlen;
406	xfs_agnumber_t		start_agno, end_agno;
407	xfs_agblock_t		start_agbno, end_agbno;
408	uint64_t		blocks_trimmed = 0;
409	int			error, last_error = 0;
410
411	if (!capable(CAP_SYS_ADMIN))
412		return -EPERM;
413	if (!bdev_max_discard_sectors(mp->m_ddev_targp->bt_bdev))
414		return -EOPNOTSUPP;
415
416	/*
417	 * We haven't recovered the log, so we cannot use our bnobt-guided
418	 * storage zapping commands.
419	 */
420	if (xfs_has_norecovery(mp))
421		return -EROFS;
422
423	if (copy_from_user(&range, urange, sizeof(range)))
424		return -EFAULT;
425
426	range.minlen = max_t(u64, granularity, range.minlen);
427	minlen = XFS_B_TO_FSB(mp, range.minlen);
428
429	/*
430	 * Truncating down the len isn't actually quite correct, but using
431	 * BBTOB would mean we trivially get overflows for values
432	 * of ULLONG_MAX or slightly lower.  And ULLONG_MAX is the default
433	 * used by the fstrim application.  In the end it really doesn't
434	 * matter as trimming blocks is an advisory interface.
435	 */
436	if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
437	    range.minlen > XFS_FSB_TO_B(mp, mp->m_ag_max_usable) ||
438	    range.len < mp->m_sb.sb_blocksize)
439		return -EINVAL;
440
441	start = BTOBB(range.start);
442	end = min_t(xfs_daddr_t, start + BTOBBT(range.len),
443		    XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) - 1;
444
445	start_agno = xfs_daddr_to_agno(mp, start);
446	start_agbno = xfs_daddr_to_agbno(mp, start);
447	end_agno = xfs_daddr_to_agno(mp, end);
448	end_agbno = xfs_daddr_to_agbno(mp, end);
449
450	for_each_perag_range(mp, start_agno, end_agno, pag) {
451		xfs_agblock_t	agend = pag->block_count;
452
453		if (start_agno == end_agno)
454			agend = end_agbno;
455		error = xfs_trim_extents(pag, start_agbno, agend, minlen,
456				&blocks_trimmed);
457		if (error)
458			last_error = error;
459
460		if (xfs_trim_should_stop()) {
461			xfs_perag_rele(pag);
462			break;
463		}
464		start_agbno = 0;
465	}
466
467	if (last_error)
468		return last_error;
469
470	range.len = XFS_FSB_TO_B(mp, blocks_trimmed);
471	if (copy_to_user(urange, &range, sizeof(range)))
472		return -EFAULT;
473	return 0;
474}
475