1139790Simp// SPDX-License-Identifier: GPL-2.0
2738Sache/*
34Srgrimes * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4738Sache * All Rights Reserved.
5738Sache */
6106323Smdodd#include "xfs.h"
74Srgrimes#include <linux/backing-dev.h>
84Srgrimes#include <linux/dax.h>
9115703Sobrien
10115703Sobrien#include "xfs_shared.h"
11115703Sobrien#include "xfs_format.h"
122056Swollman#include "xfs_log_format.h"
132056Swollman#include "xfs_trans_resv.h"
142056Swollman#include "xfs_mount.h"
1561994Smsmith#include "xfs_trace.h"
162056Swollman#include "xfs_log.h"
1712675Sjulian#include "xfs_log_recover.h"
1852843Sphk#include "xfs_log_priv.h"
1960038Sphk#include "xfs_trans.h"
207090Sbde#include "xfs_buf_item.h"
21152306Sru#include "xfs_errortag.h"
224Srgrimes#include "xfs_error.h"
2312675Sjulian#include "xfs_ag.h"
2412675Sjulian#include "xfs_buf_mem.h"
2512675Sjulian
2612675Sjulianstruct kmem_cache *xfs_buf_cache;
2712502Sjulian
2847625Sphk/*
29126080Sphk * Locking orders
30126080Sphk *
31111815Sphk * xfs_buf_ioacct_inc:
32111815Sphk * xfs_buf_ioacct_dec:
33111815Sphk *	b_sema (caller holds)
34111815Sphk *	  b_lock
35111815Sphk *
3647625Sphk * xfs_buf_stale:
3712675Sjulian *	b_sema (caller holds)
3869774Sphk *	  b_lock
3960038Sphk *	    lru_lock
40179004Sphk *
41179004Sphk * xfs_buf_rele:
424Srgrimes *	b_lock
4319174Sbde *	  pag_buf_lock
444Srgrimes *	    lru_lock
454Srgrimes *
464Srgrimes * xfs_buftarg_drain_rele
474Srgrimes *	lru_lock
4819174Sbde *	  b_lock (trylock due to inversion)
494Srgrimes *
504Srgrimes * xfs_buftarg_isolate
514Srgrimes *	lru_lock
524Srgrimes *	  b_lock (trylock due to inversion)
53766Sache */
54766Sache
554Srgrimesstatic int __xfs_buf_submit(struct xfs_buf *bp, bool wait);
56170278Sbrian
57170278Sbrianstatic inline int
5892765Salfredxfs_buf_submit(
5992765Salfred	struct xfs_buf		*bp)
6092765Salfred{
6112854Sbde	return __xfs_buf_submit(bp, !(bp->b_flags & XBF_ASYNC));
62179004Sphk}
63179004Sphk
64179004Sphkstatic inline bool xfs_buf_is_uncached(struct xfs_buf *bp)
6517232Sjoerg{
66179004Sphk	return bp->b_rhash_key == XFS_BUF_DADDR_NULL;
674Srgrimes}
68179004Sphk
694Srgrimesstatic inline int
70179004Sphkxfs_buf_is_vmapped(
71179004Sphk	struct xfs_buf	*bp)
728288Sdg{
734Srgrimes	/*
74179004Sphk	 * Return true if the buffer is vmapped.
754Srgrimes	 *
764Srgrimes	 * b_addr is null if the buffer is not mapped, but the code is clever
77179004Sphk	 * enough to know it doesn't have to map a single page, so the check has
78179004Sphk	 * to be both for b_addr and bp->b_page_count > 1.
791393Ssos	 */
80179004Sphk	return bp->b_addr && bp->b_page_count > 1;
81179004Sphk}
82179004Sphk
83179004Sphkstatic inline int
84179004Sphkxfs_buf_vmap_len(
8517232Sjoerg	struct xfs_buf	*bp)
86179004Sphk{
87179004Sphk	return (bp->b_page_count * PAGE_SIZE);
88179004Sphk}
894Srgrimes
90179004Sphk/*
91179004Sphk * Bump the I/O in flight count on the buftarg if we haven't yet done so for
92179004Sphk * this buffer. The count is incremented once per buffer (per hold cycle)
93179004Sphk * because the corresponding decrement is deferred to buffer release. Buffers
94179004Sphk * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
95179004Sphk * tracking adds unnecessary overhead. This is used for sychronization purposes
96179004Sphk * with unmount (see xfs_buftarg_drain()), so all we really need is a count of
97179004Sphk * in-flight buffers.
98179004Sphk *
99179004Sphk * Buffers that are never released (e.g., superblock, iclog buffers) must set
100179004Sphk * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
1014Srgrimes * never reaches zero and unmount hangs indefinitely.
1024Srgrimes */
103179004Sphkstatic inline void
104179004Sphkxfs_buf_ioacct_inc(
105179004Sphk	struct xfs_buf	*bp)
10617232Sjoerg{
107179004Sphk	if (bp->b_flags & XBF_NO_IOACCT)
1084Srgrimes		return;
109179004Sphk
110170278Sbrian	ASSERT(bp->b_flags & XBF_ASYNC);
111179004Sphk	spin_lock(&bp->b_lock);
112179004Sphk	if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
113179004Sphk		bp->b_state |= XFS_BSTATE_IN_FLIGHT;
114179004Sphk		percpu_counter_inc(&bp->b_target->bt_io_count);
115179004Sphk	}
1164Srgrimes	spin_unlock(&bp->b_lock);
117179004Sphk}
1184Srgrimes
119179004Sphk/*
120179004Sphk * Clear the in-flight state on a buffer about to be released to the LRU or
121179004Sphk * freed and unaccount from the buftarg.
1224Srgrimes */
1234Srgrimesstatic inline void
124179004Sphk__xfs_buf_ioacct_dec(
125179004Sphk	struct xfs_buf	*bp)
1264Srgrimes{
127738Sache	lockdep_assert_held(&bp->b_lock);
128738Sache
1294Srgrimes	if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
1304Srgrimes		bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
1314Srgrimes		percpu_counter_dec(&bp->b_target->bt_io_count);
1324Srgrimes	}
133228443Smdf}
1344Srgrimes
135228443Smdfstatic inline void
1364Srgrimesxfs_buf_ioacct_dec(
1374Srgrimes	struct xfs_buf	*bp)
1384Srgrimes{
1394Srgrimes	spin_lock(&bp->b_lock);
1404Srgrimes	__xfs_buf_ioacct_dec(bp);
1414Srgrimes	spin_unlock(&bp->b_lock);
1424Srgrimes}
1434Srgrimes
1444Srgrimes/*
1454Srgrimes * When we mark a buffer stale, we remove the buffer from the LRU and clear the
1464Srgrimes * b_lru_ref count so that the buffer is freed immediately when the buffer
1474Srgrimes * reference count falls to zero. If the buffer is already on the LRU, we need
1484Srgrimes * to remove the reference that LRU holds on the buffer.
1494Srgrimes *
1504Srgrimes * This prevents build-up of stale buffers on the LRU.
1514Srgrimes */
1524Srgrimesvoid
1534Srgrimesxfs_buf_stale(
1544Srgrimes	struct xfs_buf	*bp)
1554Srgrimes{
1564Srgrimes	ASSERT(xfs_buf_islocked(bp));
1574Srgrimes
1584Srgrimes	bp->b_flags |= XBF_STALE;
1594Srgrimes
1604Srgrimes	/*
1614Srgrimes	 * Clear the delwri status so that a delwri queue walker will not
1624Srgrimes	 * flush this buffer to disk now that it is stale. The delwri queue has
1634Srgrimes	 * a reference to the buffer, so this is safe to do.
1644Srgrimes	 */
1654Srgrimes	bp->b_flags &= ~_XBF_DELWRI_Q;
1664Srgrimes
1674Srgrimes	/*
1684Srgrimes	 * Once the buffer is marked stale and unlocked, a subsequent lookup
1694Srgrimes	 * could reset b_flags. There is no guarantee that the buffer is
1704Srgrimes	 * unaccounted (released to LRU) before that occurs. Drop in-flight
1714Srgrimes	 * status now to preserve accounting consistency.
1724Srgrimes	 */
1734Srgrimes	spin_lock(&bp->b_lock);
1744Srgrimes	__xfs_buf_ioacct_dec(bp);
1754Srgrimes
1764Srgrimes	atomic_set(&bp->b_lru_ref, 0);
1774Srgrimes	if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
1784Srgrimes	    (list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru)))
1794Srgrimes		atomic_dec(&bp->b_hold);
1804Srgrimes
1814Srgrimes	ASSERT(atomic_read(&bp->b_hold) >= 1);
1824Srgrimes	spin_unlock(&bp->b_lock);
1834Srgrimes}
1844Srgrimes
1854Srgrimesstatic int
1864Srgrimesxfs_buf_get_maps(
18717232Sjoerg	struct xfs_buf		*bp,
18817232Sjoerg	int			map_count)
1894Srgrimes{
1904Srgrimes	ASSERT(bp->b_maps == NULL);
191170280Sbrian	bp->b_map_count = map_count;
1924Srgrimes
1934Srgrimes	if (map_count == 1) {
1944Srgrimes		bp->b_maps = &bp->__b_map;
1954Srgrimes		return 0;
1964Srgrimes	}
1974Srgrimes
198179004Sphk	bp->b_maps = kzalloc(map_count * sizeof(struct xfs_buf_map),
199179004Sphk			GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
200179004Sphk	if (!bp->b_maps)
20117232Sjoerg		return -ENOMEM;
202179004Sphk	return 0;
2034Srgrimes}
204179004Sphk
2054Srgrimes/*
206179004Sphk *	Frees b_pages if it was allocated.
207179004Sphk */
208179004Sphkstatic void
209179004Sphkxfs_buf_free_maps(
210179004Sphk	struct xfs_buf	*bp)
211179004Sphk{
2124Srgrimes	if (bp->b_maps != &bp->__b_map) {
213179004Sphk		kfree(bp->b_maps);
214179004Sphk		bp->b_maps = NULL;
2158288Sdg	}
216179004Sphk}
217179004Sphk
218179004Sphkstatic int
219179004Sphk_xfs_buf_alloc(
220179004Sphk	struct xfs_buftarg	*target,
221179004Sphk	struct xfs_buf_map	*map,
2224Srgrimes	int			nmaps,
2234Srgrimes	xfs_buf_flags_t		flags,
224179004Sphk	struct xfs_buf		**bpp)
2254Srgrimes{
2264Srgrimes	struct xfs_buf		*bp;
2274Srgrimes	int			error;
228179004Sphk	int			i;
229179004Sphk
230179004Sphk	*bpp = NULL;
231179004Sphk	bp = kmem_cache_zalloc(xfs_buf_cache,
2324Srgrimes			GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
2334Srgrimes
234179004Sphk	/*
235179004Sphk	 * We don't want certain flags to appear in b_flags unless they are
236179004Sphk	 * specifically set by later operations on the buffer.
23717232Sjoerg	 */
238179004Sphk	flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
2394Srgrimes
240179004Sphk	atomic_set(&bp->b_hold, 1);
2414Srgrimes	atomic_set(&bp->b_lru_ref, 1);
2424Srgrimes	init_completion(&bp->b_iowait);
2434Srgrimes	INIT_LIST_HEAD(&bp->b_lru);
244179004Sphk	INIT_LIST_HEAD(&bp->b_list);
245179004Sphk	INIT_LIST_HEAD(&bp->b_li_list);
246179004Sphk	sema_init(&bp->b_sema, 0); /* held, no waiters */
2474Srgrimes	spin_lock_init(&bp->b_lock);
2484Srgrimes	bp->b_target = target;
249179004Sphk	bp->b_mount = target->bt_mount;
2504Srgrimes	bp->b_flags = flags;
2514Srgrimes
252179004Sphk	/*
253179004Sphk	 * Set length and io_length to the same value initially.
254179004Sphk	 * I/O routines should use io_length, which will be the same in
255179004Sphk	 * most cases but may be reset (e.g. XFS recovery).
256179004Sphk	 */
257179004Sphk	error = xfs_buf_get_maps(bp, nmaps);
258179004Sphk	if (error)  {
259179004Sphk		kmem_cache_free(xfs_buf_cache, bp);
260179004Sphk		return error;
261179004Sphk	}
2624Srgrimes
263179004Sphk	bp->b_rhash_key = map[0].bm_bn;
264179004Sphk	bp->b_length = 0;
265179004Sphk	for (i = 0; i < nmaps; i++) {
266179004Sphk		bp->b_maps[i].bm_bn = map[i].bm_bn;
267179004Sphk		bp->b_maps[i].bm_len = map[i].bm_len;
268179004Sphk		bp->b_length += map[i].bm_len;
269179004Sphk	}
270179004Sphk
271179004Sphk	atomic_set(&bp->b_pin_count, 0);
272179004Sphk	init_waitqueue_head(&bp->b_waiters);
2734Srgrimes
274179004Sphk	XFS_STATS_INC(bp->b_mount, xb_create);
275179004Sphk	trace_xfs_buf_init(bp, _RET_IP_);
276179004Sphk
277179004Sphk	*bpp = bp;
278179004Sphk	return 0;
279179004Sphk}
280179004Sphk
281179004Sphkstatic void
282179004Sphkxfs_buf_free_pages(
283179004Sphk	struct xfs_buf	*bp)
284179004Sphk{
2854Srgrimes	uint		i;
286179004Sphk
287179004Sphk	ASSERT(bp->b_flags & _XBF_PAGES);
288179004Sphk
289179004Sphk	if (xfs_buf_is_vmapped(bp))
290179004Sphk		vm_unmap_ram(bp->b_addr, bp->b_page_count);
291179004Sphk
292179004Sphk	for (i = 0; i < bp->b_page_count; i++) {
293179004Sphk		if (bp->b_pages[i])
2944Srgrimes			__free_page(bp->b_pages[i]);
295179004Sphk	}
296179004Sphk	mm_account_reclaimed_pages(bp->b_page_count);
297179004Sphk
298179004Sphk	if (bp->b_pages != bp->b_page_array)
2994Srgrimes		kfree(bp->b_pages);
300179004Sphk	bp->b_pages = NULL;
301179004Sphk	bp->b_flags &= ~_XBF_PAGES;
302179004Sphk}
303179004Sphk
304179004Sphkstatic void
3054Srgrimesxfs_buf_free_callback(
306179004Sphk	struct callback_head	*cb)
307179004Sphk{
308179004Sphk	struct xfs_buf		*bp = container_of(cb, struct xfs_buf, b_rcu);
309179004Sphk
310179004Sphk	xfs_buf_free_maps(bp);
311179004Sphk	kmem_cache_free(xfs_buf_cache, bp);
312179004Sphk}
3134Srgrimes
314179004Sphkstatic void
315179004Sphkxfs_buf_free(
316738Sache	struct xfs_buf		*bp)
317179004Sphk{
318179004Sphk	trace_xfs_buf_free(bp, _RET_IP_);
319179004Sphk
320179004Sphk	ASSERT(list_empty(&bp->b_lru));
321179004Sphk
322179004Sphk	if (xfs_buftarg_is_mem(bp->b_target))
323179004Sphk		xmbuf_unmap_page(bp);
324179004Sphk	else if (bp->b_flags & _XBF_PAGES)
325179004Sphk		xfs_buf_free_pages(bp);
326179004Sphk	else if (bp->b_flags & _XBF_KMEM)
327179004Sphk		kfree(bp->b_addr);
328179004Sphk
329179004Sphk	call_rcu(&bp->b_rcu, xfs_buf_free_callback);
330298307Spfg}
331179004Sphk
332179004Sphkstatic int
333179004Sphkxfs_buf_alloc_kmem(
334179004Sphk	struct xfs_buf	*bp,
335179004Sphk	xfs_buf_flags_t	flags)
336298307Spfg{
337179004Sphk	gfp_t		gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL;
338179004Sphk	size_t		size = BBTOB(bp->b_length);
339179004Sphk
340179004Sphk	/* Assure zeroed buffer for non-read cases. */
341179004Sphk	if (!(flags & XBF_READ))
342179004Sphk		gfp_mask |= __GFP_ZERO;
343179004Sphk
344179004Sphk	bp->b_addr = kmalloc(size, gfp_mask);
345179004Sphk	if (!bp->b_addr)
346179004Sphk		return -ENOMEM;
347179004Sphk
348179004Sphk	if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
349179004Sphk	    ((unsigned long)bp->b_addr & PAGE_MASK)) {
350179004Sphk		/* b_addr spans two pages - use alloc_page instead */
351179004Sphk		kfree(bp->b_addr);
352179004Sphk		bp->b_addr = NULL;
353179004Sphk		return -ENOMEM;
354179004Sphk	}
355179004Sphk	bp->b_offset = offset_in_page(bp->b_addr);
356179004Sphk	bp->b_pages = bp->b_page_array;
357179004Sphk	bp->b_pages[0] = kmem_to_page(bp->b_addr);
358179004Sphk	bp->b_page_count = 1;
359179004Sphk	bp->b_flags |= _XBF_KMEM;
360179004Sphk	return 0;
361179004Sphk}
362179004Sphk
363179004Sphkstatic int
364179004Sphkxfs_buf_alloc_pages(
365179004Sphk	struct xfs_buf	*bp,
366179004Sphk	xfs_buf_flags_t	flags)
367179004Sphk{
368179004Sphk	gfp_t		gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOWARN;
369179004Sphk	long		filled = 0;
370179004Sphk
371179004Sphk	if (flags & XBF_READ_AHEAD)
372179004Sphk		gfp_mask |= __GFP_NORETRY;
373179004Sphk
374179004Sphk	/* Make sure that we have a page list */
375179004Sphk	bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE);
376179004Sphk	if (bp->b_page_count <= XB_PAGES) {
377179004Sphk		bp->b_pages = bp->b_page_array;
378179004Sphk	} else {
379179004Sphk		bp->b_pages = kzalloc(sizeof(struct page *) * bp->b_page_count,
380179004Sphk					gfp_mask);
381179004Sphk		if (!bp->b_pages)
382179004Sphk			return -ENOMEM;
383179004Sphk	}
384179004Sphk	bp->b_flags |= _XBF_PAGES;
385179004Sphk
386179004Sphk	/* Assure zeroed buffer for non-read cases. */
387179004Sphk	if (!(flags & XBF_READ))
388179004Sphk		gfp_mask |= __GFP_ZERO;
389179004Sphk
390179004Sphk	/*
391179004Sphk	 * Bulk filling of pages can take multiple calls. Not filling the entire
392179004Sphk	 * array is not an allocation failure, so don't back off if we get at
393179004Sphk	 * least one extra page.
394179004Sphk	 */
395179004Sphk	for (;;) {
396179004Sphk		long	last = filled;
397179004Sphk
398179004Sphk		filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count,
3994Srgrimes						bp->b_pages);
4004Srgrimes		if (filled == bp->b_page_count) {
4014Srgrimes			XFS_STATS_INC(bp->b_mount, xb_page_found);
402179004Sphk			break;
403179004Sphk		}
4044Srgrimes
4054Srgrimes		if (filled != last)
4064Srgrimes			continue;
4074Srgrimes
408738Sache		if (flags & XBF_READ_AHEAD) {
40960038Sphk			xfs_buf_free_pages(bp);
4104Srgrimes			return -ENOMEM;
411105224Sphk		}
41283366Sjulian
413130585Sphk		XFS_STATS_INC(bp->b_mount, xb_page_retries);
414179004Sphk		memalloc_retry_wait(gfp_mask);
415179004Sphk	}
41683366Sjulian	return 0;
4174Srgrimes}
4184Srgrimes
419179004Sphk/*
4204Srgrimes *	Map buffer into kernel address-space if necessary.
4214Srgrimes */
422187683SedSTATIC int
423179004Sphk_xfs_buf_map_pages(
424179004Sphk	struct xfs_buf		*bp,
425738Sache	xfs_buf_flags_t		flags)
426179004Sphk{
427738Sache	ASSERT(bp->b_flags & _XBF_PAGES);
428179004Sphk	if (bp->b_page_count == 1) {
429179004Sphk		/* A single page buffer is always mappable */
430179004Sphk		bp->b_addr = page_address(bp->b_pages[0]);
431179004Sphk	} else if (flags & XBF_UNMAPPED) {
432179004Sphk		bp->b_addr = NULL;
4334Srgrimes	} else {
4344Srgrimes		int retried = 0;
435105224Sphk		unsigned nofs_flag;
43617232Sjoerg
437130585Sphk		/*
438179004Sphk		 * vm_map_ram() will allocate auxiliary structures (e.g.
439179004Sphk		 * pagetables) with GFP_KERNEL, yet we often under a scoped nofs
4404Srgrimes		 * context here. Mixing GFP_KERNEL with GFP_NOFS allocations
4414Srgrimes		 * from the same call site that can be run from both above and
442194990Skib		 * below memory reclaim causes lockdep false positives. Hence we
44349982Sbillf		 * always need to force this allocation to nofs context because
4444Srgrimes		 * we can't pass __GFP_NOLOCKDEP down to auxillary structures to
445187683Sed		 * prevent false positive lockdep reports.
446187683Sed		 *
447179004Sphk		 * XXX(dgc): I think dquot reclaim is the only place we can get
448179004Sphk		 * to this function from memory reclaim context now. If we fix
449179004Sphk		 * that like we've fixed inode reclaim to avoid writeback from
450179004Sphk		 * reclaim, this nofs wrapping can go away.
451179004Sphk		 */
4524Srgrimes		nofs_flag = memalloc_nofs_save();
453179004Sphk		do {
454179004Sphk			bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
455179004Sphk						-1);
456179004Sphk			if (bp->b_addr)
457179004Sphk				break;
458179004Sphk			vm_unmap_aliases();
459179004Sphk		} while (retried++ <= 1);
460179004Sphk		memalloc_nofs_restore(nofs_flag);
46117803Speter
4624Srgrimes		if (!bp->b_addr)
4634Srgrimes			return -ENOMEM;
464105224Sphk	}
46583366Sjulian
466130585Sphk	return 0;
467179004Sphk}
468179004Sphk
469179004Sphk/*
4704Srgrimes *	Finding and Reading Buffers
4714Srgrimes */
472179004Sphkstatic int
4734Srgrimes_xfs_buf_obj_cmp(
4744Srgrimes	struct rhashtable_compare_arg	*arg,
475187683Sed	const void			*obj)
476187683Sed{
477187683Sed	const struct xfs_buf_map	*map = arg->key;
478187683Sed	const struct xfs_buf		*bp = obj;
479187683Sed
4804Srgrimes	/*
4814Srgrimes	 * The key hashing in the lookup path depends on the key being the
482105224Sphk	 * first element of the compare_arg, make sure to assert this.
48383366Sjulian	 */
484130585Sphk	BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0);
485179004Sphk
486179004Sphk	if (bp->b_rhash_key != map->bm_bn)
487179004Sphk		return 1;
488179004Sphk
4894Srgrimes	if (unlikely(bp->b_length != map->bm_len)) {
4904Srgrimes		/*
491179004Sphk		 * found a block number match. If the range doesn't
492179004Sphk		 * match, the only way this is allowed is if the buffer
4934Srgrimes		 * in the cache is stale and the transaction that made
4944Srgrimes		 * it stale has not yet committed. i.e. we are
495187683Sed		 * reallocating a busy extent. Skip this buffer and
496179004Sphk		 * continue searching for an exact match.
4974Srgrimes		 *
498179004Sphk		 * Note: If we're scanning for incore buffers to stale, don't
499179004Sphk		 * complain if we find non-stale buffers.
500179004Sphk		 */
501179004Sphk		if (!(map->bm_flags & XBM_LIVESCAN))
502179004Sphk			ASSERT(bp->b_flags & XBF_STALE);
503179004Sphk		return 1;
504179004Sphk	}
505179004Sphk	return 0;
506179004Sphk}
5074Srgrimes
508179004Sphkstatic const struct rhashtable_params xfs_buf_hash_params = {
509179004Sphk	.min_size		= 32,	/* empty AGs have minimal footprint */
510179004Sphk	.nelem_hint		= 16,
511179004Sphk	.key_len		= sizeof(xfs_daddr_t),
512179004Sphk	.key_offset		= offsetof(struct xfs_buf, b_rhash_key),
513179004Sphk	.head_offset		= offsetof(struct xfs_buf, b_rhash_head),
514179004Sphk	.automatic_shrinking	= true,
515179004Sphk	.obj_cmpfn		= _xfs_buf_obj_cmp,
516179004Sphk};
517179004Sphk
518179004Sphkint
519179004Sphkxfs_buf_cache_init(
520179004Sphk	struct xfs_buf_cache	*bch)
521179004Sphk{
5224Srgrimes	spin_lock_init(&bch->bc_lock);
523179004Sphk	return rhashtable_init(&bch->bc_hash, &xfs_buf_hash_params);
5244Srgrimes}
5254Srgrimes
526177648Sphkvoid
527177648Sphkxfs_buf_cache_destroy(
52861994Smsmith	struct xfs_buf_cache	*bch)
529177648Sphk{
53061994Smsmith	rhashtable_destroy(&bch->bc_hash);
53161994Smsmith}
532177648Sphk
53361994Smsmithstatic int
534177648Sphkxfs_buf_map_verify(
535106070Smdodd	struct xfs_buftarg	*btp,
536177648Sphk	struct xfs_buf_map	*map)
537177648Sphk{
538177648Sphk	xfs_daddr_t		eofs;
539177648Sphk
540177648Sphk	/* Check for IOs smaller than the sector size / not sector aligned */
541177648Sphk	ASSERT(!(BBTOB(map->bm_len) < btp->bt_meta_sectorsize));
542177648Sphk	ASSERT(!(BBTOB(map->bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
543177648Sphk
544177648Sphk	/*
545177648Sphk	 * Corrupted block numbers can get through to here, unfortunately, so we
546177648Sphk	 * have to check that the buffer falls within the filesystem bounds.
547106070Smdodd	 */
548177648Sphk	eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
54961994Smsmith	if (map->bm_bn < 0 || map->bm_bn >= eofs) {
55061994Smsmith		xfs_alert(btp->bt_mount,
551177648Sphk			  "%s: daddr 0x%llx out of range, EOFS 0x%llx",
552			  __func__, map->bm_bn, eofs);
553		WARN_ON(1);
554		return -EFSCORRUPTED;
555	}
556	return 0;
557}
558
559static int
560xfs_buf_find_lock(
561	struct xfs_buf          *bp,
562	xfs_buf_flags_t		flags)
563{
564	if (flags & XBF_TRYLOCK) {
565		if (!xfs_buf_trylock(bp)) {
566			XFS_STATS_INC(bp->b_mount, xb_busy_locked);
567			return -EAGAIN;
568		}
569	} else {
570		xfs_buf_lock(bp);
571		XFS_STATS_INC(bp->b_mount, xb_get_locked_waited);
572	}
573
574	/*
575	 * if the buffer is stale, clear all the external state associated with
576	 * it. We need to keep flags such as how we allocated the buffer memory
577	 * intact here.
578	 */
579	if (bp->b_flags & XBF_STALE) {
580		if (flags & XBF_LIVESCAN) {
581			xfs_buf_unlock(bp);
582			return -ENOENT;
583		}
584		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
585		bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
586		bp->b_ops = NULL;
587	}
588	return 0;
589}
590
591static inline int
592xfs_buf_lookup(
593	struct xfs_buf_cache	*bch,
594	struct xfs_buf_map	*map,
595	xfs_buf_flags_t		flags,
596	struct xfs_buf		**bpp)
597{
598	struct xfs_buf          *bp;
599	int			error;
600
601	rcu_read_lock();
602	bp = rhashtable_lookup(&bch->bc_hash, map, xfs_buf_hash_params);
603	if (!bp || !atomic_inc_not_zero(&bp->b_hold)) {
604		rcu_read_unlock();
605		return -ENOENT;
606	}
607	rcu_read_unlock();
608
609	error = xfs_buf_find_lock(bp, flags);
610	if (error) {
611		xfs_buf_rele(bp);
612		return error;
613	}
614
615	trace_xfs_buf_find(bp, flags, _RET_IP_);
616	*bpp = bp;
617	return 0;
618}
619
620/*
621 * Insert the new_bp into the hash table. This consumes the perag reference
622 * taken for the lookup regardless of the result of the insert.
623 */
624static int
625xfs_buf_find_insert(
626	struct xfs_buftarg	*btp,
627	struct xfs_buf_cache	*bch,
628	struct xfs_perag	*pag,
629	struct xfs_buf_map	*cmap,
630	struct xfs_buf_map	*map,
631	int			nmaps,
632	xfs_buf_flags_t		flags,
633	struct xfs_buf		**bpp)
634{
635	struct xfs_buf		*new_bp;
636	struct xfs_buf		*bp;
637	int			error;
638
639	error = _xfs_buf_alloc(btp, map, nmaps, flags, &new_bp);
640	if (error)
641		goto out_drop_pag;
642
643	if (xfs_buftarg_is_mem(new_bp->b_target)) {
644		error = xmbuf_map_page(new_bp);
645	} else if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
646		   xfs_buf_alloc_kmem(new_bp, flags) < 0) {
647		/*
648		 * For buffers that fit entirely within a single page, first
649		 * attempt to allocate the memory from the heap to minimise
650		 * memory usage. If we can't get heap memory for these small
651		 * buffers, we fall back to using the page allocator.
652		 */
653		error = xfs_buf_alloc_pages(new_bp, flags);
654	}
655	if (error)
656		goto out_free_buf;
657
658	spin_lock(&bch->bc_lock);
659	bp = rhashtable_lookup_get_insert_fast(&bch->bc_hash,
660			&new_bp->b_rhash_head, xfs_buf_hash_params);
661	if (IS_ERR(bp)) {
662		error = PTR_ERR(bp);
663		spin_unlock(&bch->bc_lock);
664		goto out_free_buf;
665	}
666	if (bp) {
667		/* found an existing buffer */
668		atomic_inc(&bp->b_hold);
669		spin_unlock(&bch->bc_lock);
670		error = xfs_buf_find_lock(bp, flags);
671		if (error)
672			xfs_buf_rele(bp);
673		else
674			*bpp = bp;
675		goto out_free_buf;
676	}
677
678	/* The new buffer keeps the perag reference until it is freed. */
679	new_bp->b_pag = pag;
680	spin_unlock(&bch->bc_lock);
681	*bpp = new_bp;
682	return 0;
683
684out_free_buf:
685	xfs_buf_free(new_bp);
686out_drop_pag:
687	if (pag)
688		xfs_perag_put(pag);
689	return error;
690}
691
692static inline struct xfs_perag *
693xfs_buftarg_get_pag(
694	struct xfs_buftarg		*btp,
695	const struct xfs_buf_map	*map)
696{
697	struct xfs_mount		*mp = btp->bt_mount;
698
699	if (xfs_buftarg_is_mem(btp))
700		return NULL;
701	return xfs_perag_get(mp, xfs_daddr_to_agno(mp, map->bm_bn));
702}
703
704static inline struct xfs_buf_cache *
705xfs_buftarg_buf_cache(
706	struct xfs_buftarg		*btp,
707	struct xfs_perag		*pag)
708{
709	if (pag)
710		return &pag->pag_bcache;
711	return btp->bt_cache;
712}
713
714/*
715 * Assembles a buffer covering the specified range. The code is optimised for
716 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
717 * more hits than misses.
718 */
719int
720xfs_buf_get_map(
721	struct xfs_buftarg	*btp,
722	struct xfs_buf_map	*map,
723	int			nmaps,
724	xfs_buf_flags_t		flags,
725	struct xfs_buf		**bpp)
726{
727	struct xfs_buf_cache	*bch;
728	struct xfs_perag	*pag;
729	struct xfs_buf		*bp = NULL;
730	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
731	int			error;
732	int			i;
733
734	if (flags & XBF_LIVESCAN)
735		cmap.bm_flags |= XBM_LIVESCAN;
736	for (i = 0; i < nmaps; i++)
737		cmap.bm_len += map[i].bm_len;
738
739	error = xfs_buf_map_verify(btp, &cmap);
740	if (error)
741		return error;
742
743	pag = xfs_buftarg_get_pag(btp, &cmap);
744	bch = xfs_buftarg_buf_cache(btp, pag);
745
746	error = xfs_buf_lookup(bch, &cmap, flags, &bp);
747	if (error && error != -ENOENT)
748		goto out_put_perag;
749
750	/* cache hits always outnumber misses by at least 10:1 */
751	if (unlikely(!bp)) {
752		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
753
754		if (flags & XBF_INCORE)
755			goto out_put_perag;
756
757		/* xfs_buf_find_insert() consumes the perag reference. */
758		error = xfs_buf_find_insert(btp, bch, pag, &cmap, map, nmaps,
759				flags, &bp);
760		if (error)
761			return error;
762	} else {
763		XFS_STATS_INC(btp->bt_mount, xb_get_locked);
764		if (pag)
765			xfs_perag_put(pag);
766	}
767
768	/* We do not hold a perag reference anymore. */
769	if (!bp->b_addr) {
770		error = _xfs_buf_map_pages(bp, flags);
771		if (unlikely(error)) {
772			xfs_warn_ratelimited(btp->bt_mount,
773				"%s: failed to map %u pages", __func__,
774				bp->b_page_count);
775			xfs_buf_relse(bp);
776			return error;
777		}
778	}
779
780	/*
781	 * Clear b_error if this is a lookup from a caller that doesn't expect
782	 * valid data to be found in the buffer.
783	 */
784	if (!(flags & XBF_READ))
785		xfs_buf_ioerror(bp, 0);
786
787	XFS_STATS_INC(btp->bt_mount, xb_get);
788	trace_xfs_buf_get(bp, flags, _RET_IP_);
789	*bpp = bp;
790	return 0;
791
792out_put_perag:
793	if (pag)
794		xfs_perag_put(pag);
795	return error;
796}
797
798int
799_xfs_buf_read(
800	struct xfs_buf		*bp,
801	xfs_buf_flags_t		flags)
802{
803	ASSERT(!(flags & XBF_WRITE));
804	ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
805
806	bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE);
807	bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
808
809	return xfs_buf_submit(bp);
810}
811
812/*
813 * Reverify a buffer found in cache without an attached ->b_ops.
814 *
815 * If the caller passed an ops structure and the buffer doesn't have ops
816 * assigned, set the ops and use it to verify the contents. If verification
817 * fails, clear XBF_DONE. We assume the buffer has no recorded errors and is
818 * already in XBF_DONE state on entry.
819 *
820 * Under normal operations, every in-core buffer is verified on read I/O
821 * completion. There are two scenarios that can lead to in-core buffers without
822 * an assigned ->b_ops. The first is during log recovery of buffers on a V4
823 * filesystem, though these buffers are purged at the end of recovery. The
824 * other is online repair, which intentionally reads with a NULL buffer ops to
825 * run several verifiers across an in-core buffer in order to establish buffer
826 * type.  If repair can't establish that, the buffer will be left in memory
827 * with NULL buffer ops.
828 */
829int
830xfs_buf_reverify(
831	struct xfs_buf		*bp,
832	const struct xfs_buf_ops *ops)
833{
834	ASSERT(bp->b_flags & XBF_DONE);
835	ASSERT(bp->b_error == 0);
836
837	if (!ops || bp->b_ops)
838		return 0;
839
840	bp->b_ops = ops;
841	bp->b_ops->verify_read(bp);
842	if (bp->b_error)
843		bp->b_flags &= ~XBF_DONE;
844	return bp->b_error;
845}
846
847int
848xfs_buf_read_map(
849	struct xfs_buftarg	*target,
850	struct xfs_buf_map	*map,
851	int			nmaps,
852	xfs_buf_flags_t		flags,
853	struct xfs_buf		**bpp,
854	const struct xfs_buf_ops *ops,
855	xfs_failaddr_t		fa)
856{
857	struct xfs_buf		*bp;
858	int			error;
859
860	flags |= XBF_READ;
861	*bpp = NULL;
862
863	error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
864	if (error)
865		return error;
866
867	trace_xfs_buf_read(bp, flags, _RET_IP_);
868
869	if (!(bp->b_flags & XBF_DONE)) {
870		/* Initiate the buffer read and wait. */
871		XFS_STATS_INC(target->bt_mount, xb_get_read);
872		bp->b_ops = ops;
873		error = _xfs_buf_read(bp, flags);
874
875		/* Readahead iodone already dropped the buffer, so exit. */
876		if (flags & XBF_ASYNC)
877			return 0;
878	} else {
879		/* Buffer already read; all we need to do is check it. */
880		error = xfs_buf_reverify(bp, ops);
881
882		/* Readahead already finished; drop the buffer and exit. */
883		if (flags & XBF_ASYNC) {
884			xfs_buf_relse(bp);
885			return 0;
886		}
887
888		/* We do not want read in the flags */
889		bp->b_flags &= ~XBF_READ;
890		ASSERT(bp->b_ops != NULL || ops == NULL);
891	}
892
893	/*
894	 * If we've had a read error, then the contents of the buffer are
895	 * invalid and should not be used. To ensure that a followup read tries
896	 * to pull the buffer from disk again, we clear the XBF_DONE flag and
897	 * mark the buffer stale. This ensures that anyone who has a current
898	 * reference to the buffer will interpret it's contents correctly and
899	 * future cache lookups will also treat it as an empty, uninitialised
900	 * buffer.
901	 */
902	if (error) {
903		/*
904		 * Check against log shutdown for error reporting because
905		 * metadata writeback may require a read first and we need to
906		 * report errors in metadata writeback until the log is shut
907		 * down. High level transaction read functions already check
908		 * against mount shutdown, anyway, so we only need to be
909		 * concerned about low level IO interactions here.
910		 */
911		if (!xlog_is_shutdown(target->bt_mount->m_log))
912			xfs_buf_ioerror_alert(bp, fa);
913
914		bp->b_flags &= ~XBF_DONE;
915		xfs_buf_stale(bp);
916		xfs_buf_relse(bp);
917
918		/* bad CRC means corrupted metadata */
919		if (error == -EFSBADCRC)
920			error = -EFSCORRUPTED;
921		return error;
922	}
923
924	*bpp = bp;
925	return 0;
926}
927
928/*
929 *	If we are not low on memory then do the readahead in a deadlock
930 *	safe manner.
931 */
932void
933xfs_buf_readahead_map(
934	struct xfs_buftarg	*target,
935	struct xfs_buf_map	*map,
936	int			nmaps,
937	const struct xfs_buf_ops *ops)
938{
939	struct xfs_buf		*bp;
940
941	/*
942	 * Currently we don't have a good means or justification for performing
943	 * xmbuf_map_page asynchronously, so we don't do readahead.
944	 */
945	if (xfs_buftarg_is_mem(target))
946		return;
947
948	xfs_buf_read_map(target, map, nmaps,
949		     XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops,
950		     __this_address);
951}
952
953/*
954 * Read an uncached buffer from disk. Allocates and returns a locked
955 * buffer containing the disk contents or nothing. Uncached buffers always have
956 * a cache index of XFS_BUF_DADDR_NULL so we can easily determine if the buffer
957 * is cached or uncached during fault diagnosis.
958 */
959int
960xfs_buf_read_uncached(
961	struct xfs_buftarg	*target,
962	xfs_daddr_t		daddr,
963	size_t			numblks,
964	xfs_buf_flags_t		flags,
965	struct xfs_buf		**bpp,
966	const struct xfs_buf_ops *ops)
967{
968	struct xfs_buf		*bp;
969	int			error;
970
971	*bpp = NULL;
972
973	error = xfs_buf_get_uncached(target, numblks, flags, &bp);
974	if (error)
975		return error;
976
977	/* set up the buffer for a read IO */
978	ASSERT(bp->b_map_count == 1);
979	bp->b_rhash_key = XFS_BUF_DADDR_NULL;
980	bp->b_maps[0].bm_bn = daddr;
981	bp->b_flags |= XBF_READ;
982	bp->b_ops = ops;
983
984	xfs_buf_submit(bp);
985	if (bp->b_error) {
986		error = bp->b_error;
987		xfs_buf_relse(bp);
988		return error;
989	}
990
991	*bpp = bp;
992	return 0;
993}
994
995int
996xfs_buf_get_uncached(
997	struct xfs_buftarg	*target,
998	size_t			numblks,
999	xfs_buf_flags_t		flags,
1000	struct xfs_buf		**bpp)
1001{
1002	int			error;
1003	struct xfs_buf		*bp;
1004	DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
1005
1006	*bpp = NULL;
1007
1008	/* flags might contain irrelevant bits, pass only what we care about */
1009	error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
1010	if (error)
1011		return error;
1012
1013	if (xfs_buftarg_is_mem(bp->b_target))
1014		error = xmbuf_map_page(bp);
1015	else
1016		error = xfs_buf_alloc_pages(bp, flags);
1017	if (error)
1018		goto fail_free_buf;
1019
1020	error = _xfs_buf_map_pages(bp, 0);
1021	if (unlikely(error)) {
1022		xfs_warn(target->bt_mount,
1023			"%s: failed to map pages", __func__);
1024		goto fail_free_buf;
1025	}
1026
1027	trace_xfs_buf_get_uncached(bp, _RET_IP_);
1028	*bpp = bp;
1029	return 0;
1030
1031fail_free_buf:
1032	xfs_buf_free(bp);
1033	return error;
1034}
1035
1036/*
1037 *	Increment reference count on buffer, to hold the buffer concurrently
1038 *	with another thread which may release (free) the buffer asynchronously.
1039 *	Must hold the buffer already to call this function.
1040 */
1041void
1042xfs_buf_hold(
1043	struct xfs_buf		*bp)
1044{
1045	trace_xfs_buf_hold(bp, _RET_IP_);
1046	atomic_inc(&bp->b_hold);
1047}
1048
1049static void
1050xfs_buf_rele_uncached(
1051	struct xfs_buf		*bp)
1052{
1053	ASSERT(list_empty(&bp->b_lru));
1054	if (atomic_dec_and_test(&bp->b_hold)) {
1055		xfs_buf_ioacct_dec(bp);
1056		xfs_buf_free(bp);
1057	}
1058}
1059
1060static void
1061xfs_buf_rele_cached(
1062	struct xfs_buf		*bp)
1063{
1064	struct xfs_buftarg	*btp = bp->b_target;
1065	struct xfs_perag	*pag = bp->b_pag;
1066	struct xfs_buf_cache	*bch = xfs_buftarg_buf_cache(btp, pag);
1067	bool			release;
1068	bool			freebuf = false;
1069
1070	trace_xfs_buf_rele(bp, _RET_IP_);
1071
1072	ASSERT(atomic_read(&bp->b_hold) > 0);
1073
1074	/*
1075	 * We grab the b_lock here first to serialise racing xfs_buf_rele()
1076	 * calls. The pag_buf_lock being taken on the last reference only
1077	 * serialises against racing lookups in xfs_buf_find(). IOWs, the second
1078	 * to last reference we drop here is not serialised against the last
1079	 * reference until we take bp->b_lock. Hence if we don't grab b_lock
1080	 * first, the last "release" reference can win the race to the lock and
1081	 * free the buffer before the second-to-last reference is processed,
1082	 * leading to a use-after-free scenario.
1083	 */
1084	spin_lock(&bp->b_lock);
1085	release = atomic_dec_and_lock(&bp->b_hold, &bch->bc_lock);
1086	if (!release) {
1087		/*
1088		 * Drop the in-flight state if the buffer is already on the LRU
1089		 * and it holds the only reference. This is racy because we
1090		 * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
1091		 * ensures the decrement occurs only once per-buf.
1092		 */
1093		if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
1094			__xfs_buf_ioacct_dec(bp);
1095		goto out_unlock;
1096	}
1097
1098	/* the last reference has been dropped ... */
1099	__xfs_buf_ioacct_dec(bp);
1100	if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
1101		/*
1102		 * If the buffer is added to the LRU take a new reference to the
1103		 * buffer for the LRU and clear the (now stale) dispose list
1104		 * state flag
1105		 */
1106		if (list_lru_add_obj(&btp->bt_lru, &bp->b_lru)) {
1107			bp->b_state &= ~XFS_BSTATE_DISPOSE;
1108			atomic_inc(&bp->b_hold);
1109		}
1110		spin_unlock(&bch->bc_lock);
1111	} else {
1112		/*
1113		 * most of the time buffers will already be removed from the
1114		 * LRU, so optimise that case by checking for the
1115		 * XFS_BSTATE_DISPOSE flag indicating the last list the buffer
1116		 * was on was the disposal list
1117		 */
1118		if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
1119			list_lru_del_obj(&btp->bt_lru, &bp->b_lru);
1120		} else {
1121			ASSERT(list_empty(&bp->b_lru));
1122		}
1123
1124		ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1125		rhashtable_remove_fast(&bch->bc_hash, &bp->b_rhash_head,
1126				xfs_buf_hash_params);
1127		spin_unlock(&bch->bc_lock);
1128		if (pag)
1129			xfs_perag_put(pag);
1130		freebuf = true;
1131	}
1132
1133out_unlock:
1134	spin_unlock(&bp->b_lock);
1135
1136	if (freebuf)
1137		xfs_buf_free(bp);
1138}
1139
1140/*
1141 * Release a hold on the specified buffer.
1142 */
1143void
1144xfs_buf_rele(
1145	struct xfs_buf		*bp)
1146{
1147	trace_xfs_buf_rele(bp, _RET_IP_);
1148	if (xfs_buf_is_uncached(bp))
1149		xfs_buf_rele_uncached(bp);
1150	else
1151		xfs_buf_rele_cached(bp);
1152}
1153
1154/*
1155 *	Lock a buffer object, if it is not already locked.
1156 *
1157 *	If we come across a stale, pinned, locked buffer, we know that we are
1158 *	being asked to lock a buffer that has been reallocated. Because it is
1159 *	pinned, we know that the log has not been pushed to disk and hence it
1160 *	will still be locked.  Rather than continuing to have trylock attempts
1161 *	fail until someone else pushes the log, push it ourselves before
1162 *	returning.  This means that the xfsaild will not get stuck trying
1163 *	to push on stale inode buffers.
1164 */
1165int
1166xfs_buf_trylock(
1167	struct xfs_buf		*bp)
1168{
1169	int			locked;
1170
1171	locked = down_trylock(&bp->b_sema) == 0;
1172	if (locked)
1173		trace_xfs_buf_trylock(bp, _RET_IP_);
1174	else
1175		trace_xfs_buf_trylock_fail(bp, _RET_IP_);
1176	return locked;
1177}
1178
1179/*
1180 *	Lock a buffer object.
1181 *
1182 *	If we come across a stale, pinned, locked buffer, we know that we
1183 *	are being asked to lock a buffer that has been reallocated. Because
1184 *	it is pinned, we know that the log has not been pushed to disk and
1185 *	hence it will still be locked. Rather than sleeping until someone
1186 *	else pushes the log, push it ourselves before trying to get the lock.
1187 */
1188void
1189xfs_buf_lock(
1190	struct xfs_buf		*bp)
1191{
1192	trace_xfs_buf_lock(bp, _RET_IP_);
1193
1194	if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
1195		xfs_log_force(bp->b_mount, 0);
1196	down(&bp->b_sema);
1197
1198	trace_xfs_buf_lock_done(bp, _RET_IP_);
1199}
1200
1201void
1202xfs_buf_unlock(
1203	struct xfs_buf		*bp)
1204{
1205	ASSERT(xfs_buf_islocked(bp));
1206
1207	up(&bp->b_sema);
1208	trace_xfs_buf_unlock(bp, _RET_IP_);
1209}
1210
1211STATIC void
1212xfs_buf_wait_unpin(
1213	struct xfs_buf		*bp)
1214{
1215	DECLARE_WAITQUEUE	(wait, current);
1216
1217	if (atomic_read(&bp->b_pin_count) == 0)
1218		return;
1219
1220	add_wait_queue(&bp->b_waiters, &wait);
1221	for (;;) {
1222		set_current_state(TASK_UNINTERRUPTIBLE);
1223		if (atomic_read(&bp->b_pin_count) == 0)
1224			break;
1225		io_schedule();
1226	}
1227	remove_wait_queue(&bp->b_waiters, &wait);
1228	set_current_state(TASK_RUNNING);
1229}
1230
1231static void
1232xfs_buf_ioerror_alert_ratelimited(
1233	struct xfs_buf		*bp)
1234{
1235	static unsigned long	lasttime;
1236	static struct xfs_buftarg *lasttarg;
1237
1238	if (bp->b_target != lasttarg ||
1239	    time_after(jiffies, (lasttime + 5*HZ))) {
1240		lasttime = jiffies;
1241		xfs_buf_ioerror_alert(bp, __this_address);
1242	}
1243	lasttarg = bp->b_target;
1244}
1245
1246/*
1247 * Account for this latest trip around the retry handler, and decide if
1248 * we've failed enough times to constitute a permanent failure.
1249 */
1250static bool
1251xfs_buf_ioerror_permanent(
1252	struct xfs_buf		*bp,
1253	struct xfs_error_cfg	*cfg)
1254{
1255	struct xfs_mount	*mp = bp->b_mount;
1256
1257	if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
1258	    ++bp->b_retries > cfg->max_retries)
1259		return true;
1260	if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1261	    time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
1262		return true;
1263
1264	/* At unmount we may treat errors differently */
1265	if (xfs_is_unmounting(mp) && mp->m_fail_unmount)
1266		return true;
1267
1268	return false;
1269}
1270
1271/*
1272 * On a sync write or shutdown we just want to stale the buffer and let the
1273 * caller handle the error in bp->b_error appropriately.
1274 *
1275 * If the write was asynchronous then no one will be looking for the error.  If
1276 * this is the first failure of this type, clear the error state and write the
1277 * buffer out again. This means we always retry an async write failure at least
1278 * once, but we also need to set the buffer up to behave correctly now for
1279 * repeated failures.
1280 *
1281 * If we get repeated async write failures, then we take action according to the
1282 * error configuration we have been set up to use.
1283 *
1284 * Returns true if this function took care of error handling and the caller must
1285 * not touch the buffer again.  Return false if the caller should proceed with
1286 * normal I/O completion handling.
1287 */
1288static bool
1289xfs_buf_ioend_handle_error(
1290	struct xfs_buf		*bp)
1291{
1292	struct xfs_mount	*mp = bp->b_mount;
1293	struct xfs_error_cfg	*cfg;
1294
1295	/*
1296	 * If we've already shutdown the journal because of I/O errors, there's
1297	 * no point in giving this a retry.
1298	 */
1299	if (xlog_is_shutdown(mp->m_log))
1300		goto out_stale;
1301
1302	xfs_buf_ioerror_alert_ratelimited(bp);
1303
1304	/*
1305	 * We're not going to bother about retrying this during recovery.
1306	 * One strike!
1307	 */
1308	if (bp->b_flags & _XBF_LOGRECOVERY) {
1309		xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1310		return false;
1311	}
1312
1313	/*
1314	 * Synchronous writes will have callers process the error.
1315	 */
1316	if (!(bp->b_flags & XBF_ASYNC))
1317		goto out_stale;
1318
1319	trace_xfs_buf_iodone_async(bp, _RET_IP_);
1320
1321	cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
1322	if (bp->b_last_error != bp->b_error ||
1323	    !(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL))) {
1324		bp->b_last_error = bp->b_error;
1325		if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1326		    !bp->b_first_retry_time)
1327			bp->b_first_retry_time = jiffies;
1328		goto resubmit;
1329	}
1330
1331	/*
1332	 * Permanent error - we need to trigger a shutdown if we haven't already
1333	 * to indicate that inconsistency will result from this action.
1334	 */
1335	if (xfs_buf_ioerror_permanent(bp, cfg)) {
1336		xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1337		goto out_stale;
1338	}
1339
1340	/* Still considered a transient error. Caller will schedule retries. */
1341	if (bp->b_flags & _XBF_INODES)
1342		xfs_buf_inode_io_fail(bp);
1343	else if (bp->b_flags & _XBF_DQUOTS)
1344		xfs_buf_dquot_io_fail(bp);
1345	else
1346		ASSERT(list_empty(&bp->b_li_list));
1347	xfs_buf_ioerror(bp, 0);
1348	xfs_buf_relse(bp);
1349	return true;
1350
1351resubmit:
1352	xfs_buf_ioerror(bp, 0);
1353	bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL);
1354	xfs_buf_submit(bp);
1355	return true;
1356out_stale:
1357	xfs_buf_stale(bp);
1358	bp->b_flags |= XBF_DONE;
1359	bp->b_flags &= ~XBF_WRITE;
1360	trace_xfs_buf_error_relse(bp, _RET_IP_);
1361	return false;
1362}
1363
1364static void
1365xfs_buf_ioend(
1366	struct xfs_buf	*bp)
1367{
1368	trace_xfs_buf_iodone(bp, _RET_IP_);
1369
1370	/*
1371	 * Pull in IO completion errors now. We are guaranteed to be running
1372	 * single threaded, so we don't need the lock to read b_io_error.
1373	 */
1374	if (!bp->b_error && bp->b_io_error)
1375		xfs_buf_ioerror(bp, bp->b_io_error);
1376
1377	if (bp->b_flags & XBF_READ) {
1378		if (!bp->b_error && bp->b_ops)
1379			bp->b_ops->verify_read(bp);
1380		if (!bp->b_error)
1381			bp->b_flags |= XBF_DONE;
1382	} else {
1383		if (!bp->b_error) {
1384			bp->b_flags &= ~XBF_WRITE_FAIL;
1385			bp->b_flags |= XBF_DONE;
1386		}
1387
1388		if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp))
1389			return;
1390
1391		/* clear the retry state */
1392		bp->b_last_error = 0;
1393		bp->b_retries = 0;
1394		bp->b_first_retry_time = 0;
1395
1396		/*
1397		 * Note that for things like remote attribute buffers, there may
1398		 * not be a buffer log item here, so processing the buffer log
1399		 * item must remain optional.
1400		 */
1401		if (bp->b_log_item)
1402			xfs_buf_item_done(bp);
1403
1404		if (bp->b_flags & _XBF_INODES)
1405			xfs_buf_inode_iodone(bp);
1406		else if (bp->b_flags & _XBF_DQUOTS)
1407			xfs_buf_dquot_iodone(bp);
1408
1409	}
1410
1411	bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD |
1412			 _XBF_LOGRECOVERY);
1413
1414	if (bp->b_flags & XBF_ASYNC)
1415		xfs_buf_relse(bp);
1416	else
1417		complete(&bp->b_iowait);
1418}
1419
1420static void
1421xfs_buf_ioend_work(
1422	struct work_struct	*work)
1423{
1424	struct xfs_buf		*bp =
1425		container_of(work, struct xfs_buf, b_ioend_work);
1426
1427	xfs_buf_ioend(bp);
1428}
1429
1430static void
1431xfs_buf_ioend_async(
1432	struct xfs_buf	*bp)
1433{
1434	INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
1435	queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
1436}
1437
1438void
1439__xfs_buf_ioerror(
1440	struct xfs_buf		*bp,
1441	int			error,
1442	xfs_failaddr_t		failaddr)
1443{
1444	ASSERT(error <= 0 && error >= -1000);
1445	bp->b_error = error;
1446	trace_xfs_buf_ioerror(bp, error, failaddr);
1447}
1448
1449void
1450xfs_buf_ioerror_alert(
1451	struct xfs_buf		*bp,
1452	xfs_failaddr_t		func)
1453{
1454	xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error",
1455		"metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d",
1456				  func, (uint64_t)xfs_buf_daddr(bp),
1457				  bp->b_length, -bp->b_error);
1458}
1459
1460/*
1461 * To simulate an I/O failure, the buffer must be locked and held with at least
1462 * three references. The LRU reference is dropped by the stale call. The buf
1463 * item reference is dropped via ioend processing. The third reference is owned
1464 * by the caller and is dropped on I/O completion if the buffer is XBF_ASYNC.
1465 */
1466void
1467xfs_buf_ioend_fail(
1468	struct xfs_buf	*bp)
1469{
1470	bp->b_flags &= ~XBF_DONE;
1471	xfs_buf_stale(bp);
1472	xfs_buf_ioerror(bp, -EIO);
1473	xfs_buf_ioend(bp);
1474}
1475
1476int
1477xfs_bwrite(
1478	struct xfs_buf		*bp)
1479{
1480	int			error;
1481
1482	ASSERT(xfs_buf_islocked(bp));
1483
1484	bp->b_flags |= XBF_WRITE;
1485	bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
1486			 XBF_DONE);
1487
1488	error = xfs_buf_submit(bp);
1489	if (error)
1490		xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
1491	return error;
1492}
1493
1494static void
1495xfs_buf_bio_end_io(
1496	struct bio		*bio)
1497{
1498	struct xfs_buf		*bp = (struct xfs_buf *)bio->bi_private;
1499
1500	if (!bio->bi_status &&
1501	    (bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) &&
1502	    XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
1503		bio->bi_status = BLK_STS_IOERR;
1504
1505	/*
1506	 * don't overwrite existing errors - otherwise we can lose errors on
1507	 * buffers that require multiple bios to complete.
1508	 */
1509	if (bio->bi_status) {
1510		int error = blk_status_to_errno(bio->bi_status);
1511
1512		cmpxchg(&bp->b_io_error, 0, error);
1513	}
1514
1515	if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1516		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1517
1518	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1519		xfs_buf_ioend_async(bp);
1520	bio_put(bio);
1521}
1522
1523static void
1524xfs_buf_ioapply_map(
1525	struct xfs_buf	*bp,
1526	int		map,
1527	int		*buf_offset,
1528	int		*count,
1529	blk_opf_t	op)
1530{
1531	int		page_index;
1532	unsigned int	total_nr_pages = bp->b_page_count;
1533	int		nr_pages;
1534	struct bio	*bio;
1535	sector_t	sector =  bp->b_maps[map].bm_bn;
1536	int		size;
1537	int		offset;
1538
1539	/* skip the pages in the buffer before the start offset */
1540	page_index = 0;
1541	offset = *buf_offset;
1542	while (offset >= PAGE_SIZE) {
1543		page_index++;
1544		offset -= PAGE_SIZE;
1545	}
1546
1547	/*
1548	 * Limit the IO size to the length of the current vector, and update the
1549	 * remaining IO count for the next time around.
1550	 */
1551	size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1552	*count -= size;
1553	*buf_offset += size;
1554
1555next_chunk:
1556	atomic_inc(&bp->b_io_remaining);
1557	nr_pages = bio_max_segs(total_nr_pages);
1558
1559	bio = bio_alloc(bp->b_target->bt_bdev, nr_pages, op, GFP_NOIO);
1560	bio->bi_iter.bi_sector = sector;
1561	bio->bi_end_io = xfs_buf_bio_end_io;
1562	bio->bi_private = bp;
1563
1564	for (; size && nr_pages; nr_pages--, page_index++) {
1565		int	rbytes, nbytes = PAGE_SIZE - offset;
1566
1567		if (nbytes > size)
1568			nbytes = size;
1569
1570		rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1571				      offset);
1572		if (rbytes < nbytes)
1573			break;
1574
1575		offset = 0;
1576		sector += BTOBB(nbytes);
1577		size -= nbytes;
1578		total_nr_pages--;
1579	}
1580
1581	if (likely(bio->bi_iter.bi_size)) {
1582		if (xfs_buf_is_vmapped(bp)) {
1583			flush_kernel_vmap_range(bp->b_addr,
1584						xfs_buf_vmap_len(bp));
1585		}
1586		submit_bio(bio);
1587		if (size)
1588			goto next_chunk;
1589	} else {
1590		/*
1591		 * This is guaranteed not to be the last io reference count
1592		 * because the caller (xfs_buf_submit) holds a count itself.
1593		 */
1594		atomic_dec(&bp->b_io_remaining);
1595		xfs_buf_ioerror(bp, -EIO);
1596		bio_put(bio);
1597	}
1598
1599}
1600
1601STATIC void
1602_xfs_buf_ioapply(
1603	struct xfs_buf	*bp)
1604{
1605	struct blk_plug	plug;
1606	blk_opf_t	op;
1607	int		offset;
1608	int		size;
1609	int		i;
1610
1611	/*
1612	 * Make sure we capture only current IO errors rather than stale errors
1613	 * left over from previous use of the buffer (e.g. failed readahead).
1614	 */
1615	bp->b_error = 0;
1616
1617	if (bp->b_flags & XBF_WRITE) {
1618		op = REQ_OP_WRITE;
1619
1620		/*
1621		 * Run the write verifier callback function if it exists. If
1622		 * this function fails it will mark the buffer with an error and
1623		 * the IO should not be dispatched.
1624		 */
1625		if (bp->b_ops) {
1626			bp->b_ops->verify_write(bp);
1627			if (bp->b_error) {
1628				xfs_force_shutdown(bp->b_mount,
1629						   SHUTDOWN_CORRUPT_INCORE);
1630				return;
1631			}
1632		} else if (bp->b_rhash_key != XFS_BUF_DADDR_NULL) {
1633			struct xfs_mount *mp = bp->b_mount;
1634
1635			/*
1636			 * non-crc filesystems don't attach verifiers during
1637			 * log recovery, so don't warn for such filesystems.
1638			 */
1639			if (xfs_has_crc(mp)) {
1640				xfs_warn(mp,
1641					"%s: no buf ops on daddr 0x%llx len %d",
1642					__func__, xfs_buf_daddr(bp),
1643					bp->b_length);
1644				xfs_hex_dump(bp->b_addr,
1645						XFS_CORRUPTION_DUMP_LEN);
1646				dump_stack();
1647			}
1648		}
1649	} else {
1650		op = REQ_OP_READ;
1651		if (bp->b_flags & XBF_READ_AHEAD)
1652			op |= REQ_RAHEAD;
1653	}
1654
1655	/* we only use the buffer cache for meta-data */
1656	op |= REQ_META;
1657
1658	/* in-memory targets are directly mapped, no IO required. */
1659	if (xfs_buftarg_is_mem(bp->b_target)) {
1660		xfs_buf_ioend(bp);
1661		return;
1662	}
1663
1664	/*
1665	 * Walk all the vectors issuing IO on them. Set up the initial offset
1666	 * into the buffer and the desired IO size before we start -
1667	 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1668	 * subsequent call.
1669	 */
1670	offset = bp->b_offset;
1671	size = BBTOB(bp->b_length);
1672	blk_start_plug(&plug);
1673	for (i = 0; i < bp->b_map_count; i++) {
1674		xfs_buf_ioapply_map(bp, i, &offset, &size, op);
1675		if (bp->b_error)
1676			break;
1677		if (size <= 0)
1678			break;	/* all done */
1679	}
1680	blk_finish_plug(&plug);
1681}
1682
1683/*
1684 * Wait for I/O completion of a sync buffer and return the I/O error code.
1685 */
1686static int
1687xfs_buf_iowait(
1688	struct xfs_buf	*bp)
1689{
1690	ASSERT(!(bp->b_flags & XBF_ASYNC));
1691
1692	trace_xfs_buf_iowait(bp, _RET_IP_);
1693	wait_for_completion(&bp->b_iowait);
1694	trace_xfs_buf_iowait_done(bp, _RET_IP_);
1695
1696	return bp->b_error;
1697}
1698
1699/*
1700 * Buffer I/O submission path, read or write. Asynchronous submission transfers
1701 * the buffer lock ownership and the current reference to the IO. It is not
1702 * safe to reference the buffer after a call to this function unless the caller
1703 * holds an additional reference itself.
1704 */
1705static int
1706__xfs_buf_submit(
1707	struct xfs_buf	*bp,
1708	bool		wait)
1709{
1710	int		error = 0;
1711
1712	trace_xfs_buf_submit(bp, _RET_IP_);
1713
1714	ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1715
1716	/*
1717	 * On log shutdown we stale and complete the buffer immediately. We can
1718	 * be called to read the superblock before the log has been set up, so
1719	 * be careful checking the log state.
1720	 *
1721	 * Checking the mount shutdown state here can result in the log tail
1722	 * moving inappropriately on disk as the log may not yet be shut down.
1723	 * i.e. failing this buffer on mount shutdown can remove it from the AIL
1724	 * and move the tail of the log forwards without having written this
1725	 * buffer to disk. This corrupts the log tail state in memory, and
1726	 * because the log may not be shut down yet, it can then be propagated
1727	 * to disk before the log is shutdown. Hence we check log shutdown
1728	 * state here rather than mount state to avoid corrupting the log tail
1729	 * on shutdown.
1730	 */
1731	if (bp->b_mount->m_log &&
1732	    xlog_is_shutdown(bp->b_mount->m_log)) {
1733		xfs_buf_ioend_fail(bp);
1734		return -EIO;
1735	}
1736
1737	/*
1738	 * Grab a reference so the buffer does not go away underneath us. For
1739	 * async buffers, I/O completion drops the callers reference, which
1740	 * could occur before submission returns.
1741	 */
1742	xfs_buf_hold(bp);
1743
1744	if (bp->b_flags & XBF_WRITE)
1745		xfs_buf_wait_unpin(bp);
1746
1747	/* clear the internal error state to avoid spurious errors */
1748	bp->b_io_error = 0;
1749
1750	/*
1751	 * Set the count to 1 initially, this will stop an I/O completion
1752	 * callout which happens before we have started all the I/O from calling
1753	 * xfs_buf_ioend too early.
1754	 */
1755	atomic_set(&bp->b_io_remaining, 1);
1756	if (bp->b_flags & XBF_ASYNC)
1757		xfs_buf_ioacct_inc(bp);
1758	_xfs_buf_ioapply(bp);
1759
1760	/*
1761	 * If _xfs_buf_ioapply failed, we can get back here with only the IO
1762	 * reference we took above. If we drop it to zero, run completion so
1763	 * that we don't return to the caller with completion still pending.
1764	 */
1765	if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1766		if (bp->b_error || !(bp->b_flags & XBF_ASYNC))
1767			xfs_buf_ioend(bp);
1768		else
1769			xfs_buf_ioend_async(bp);
1770	}
1771
1772	if (wait)
1773		error = xfs_buf_iowait(bp);
1774
1775	/*
1776	 * Release the hold that keeps the buffer referenced for the entire
1777	 * I/O. Note that if the buffer is async, it is not safe to reference
1778	 * after this release.
1779	 */
1780	xfs_buf_rele(bp);
1781	return error;
1782}
1783
1784void *
1785xfs_buf_offset(
1786	struct xfs_buf		*bp,
1787	size_t			offset)
1788{
1789	struct page		*page;
1790
1791	if (bp->b_addr)
1792		return bp->b_addr + offset;
1793
1794	page = bp->b_pages[offset >> PAGE_SHIFT];
1795	return page_address(page) + (offset & (PAGE_SIZE-1));
1796}
1797
1798void
1799xfs_buf_zero(
1800	struct xfs_buf		*bp,
1801	size_t			boff,
1802	size_t			bsize)
1803{
1804	size_t			bend;
1805
1806	bend = boff + bsize;
1807	while (boff < bend) {
1808		struct page	*page;
1809		int		page_index, page_offset, csize;
1810
1811		page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1812		page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1813		page = bp->b_pages[page_index];
1814		csize = min_t(size_t, PAGE_SIZE - page_offset,
1815				      BBTOB(bp->b_length) - boff);
1816
1817		ASSERT((csize + page_offset) <= PAGE_SIZE);
1818
1819		memset(page_address(page) + page_offset, 0, csize);
1820
1821		boff += csize;
1822	}
1823}
1824
1825/*
1826 * Log a message about and stale a buffer that a caller has decided is corrupt.
1827 *
1828 * This function should be called for the kinds of metadata corruption that
1829 * cannot be detect from a verifier, such as incorrect inter-block relationship
1830 * data.  Do /not/ call this function from a verifier function.
1831 *
1832 * The buffer must be XBF_DONE prior to the call.  Afterwards, the buffer will
1833 * be marked stale, but b_error will not be set.  The caller is responsible for
1834 * releasing the buffer or fixing it.
1835 */
1836void
1837__xfs_buf_mark_corrupt(
1838	struct xfs_buf		*bp,
1839	xfs_failaddr_t		fa)
1840{
1841	ASSERT(bp->b_flags & XBF_DONE);
1842
1843	xfs_buf_corruption_error(bp, fa);
1844	xfs_buf_stale(bp);
1845}
1846
1847/*
1848 *	Handling of buffer targets (buftargs).
1849 */
1850
1851/*
1852 * Wait for any bufs with callbacks that have been submitted but have not yet
1853 * returned. These buffers will have an elevated hold count, so wait on those
1854 * while freeing all the buffers only held by the LRU.
1855 */
1856static enum lru_status
1857xfs_buftarg_drain_rele(
1858	struct list_head	*item,
1859	struct list_lru_one	*lru,
1860	spinlock_t		*lru_lock,
1861	void			*arg)
1862
1863{
1864	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
1865	struct list_head	*dispose = arg;
1866
1867	if (atomic_read(&bp->b_hold) > 1) {
1868		/* need to wait, so skip it this pass */
1869		trace_xfs_buf_drain_buftarg(bp, _RET_IP_);
1870		return LRU_SKIP;
1871	}
1872	if (!spin_trylock(&bp->b_lock))
1873		return LRU_SKIP;
1874
1875	/*
1876	 * clear the LRU reference count so the buffer doesn't get
1877	 * ignored in xfs_buf_rele().
1878	 */
1879	atomic_set(&bp->b_lru_ref, 0);
1880	bp->b_state |= XFS_BSTATE_DISPOSE;
1881	list_lru_isolate_move(lru, item, dispose);
1882	spin_unlock(&bp->b_lock);
1883	return LRU_REMOVED;
1884}
1885
1886/*
1887 * Wait for outstanding I/O on the buftarg to complete.
1888 */
1889void
1890xfs_buftarg_wait(
1891	struct xfs_buftarg	*btp)
1892{
1893	/*
1894	 * First wait on the buftarg I/O count for all in-flight buffers to be
1895	 * released. This is critical as new buffers do not make the LRU until
1896	 * they are released.
1897	 *
1898	 * Next, flush the buffer workqueue to ensure all completion processing
1899	 * has finished. Just waiting on buffer locks is not sufficient for
1900	 * async IO as the reference count held over IO is not released until
1901	 * after the buffer lock is dropped. Hence we need to ensure here that
1902	 * all reference counts have been dropped before we start walking the
1903	 * LRU list.
1904	 */
1905	while (percpu_counter_sum(&btp->bt_io_count))
1906		delay(100);
1907	flush_workqueue(btp->bt_mount->m_buf_workqueue);
1908}
1909
1910void
1911xfs_buftarg_drain(
1912	struct xfs_buftarg	*btp)
1913{
1914	LIST_HEAD(dispose);
1915	int			loop = 0;
1916	bool			write_fail = false;
1917
1918	xfs_buftarg_wait(btp);
1919
1920	/* loop until there is nothing left on the lru list. */
1921	while (list_lru_count(&btp->bt_lru)) {
1922		list_lru_walk(&btp->bt_lru, xfs_buftarg_drain_rele,
1923			      &dispose, LONG_MAX);
1924
1925		while (!list_empty(&dispose)) {
1926			struct xfs_buf *bp;
1927			bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1928			list_del_init(&bp->b_lru);
1929			if (bp->b_flags & XBF_WRITE_FAIL) {
1930				write_fail = true;
1931				xfs_buf_alert_ratelimited(bp,
1932					"XFS: Corruption Alert",
1933"Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!",
1934					(long long)xfs_buf_daddr(bp));
1935			}
1936			xfs_buf_rele(bp);
1937		}
1938		if (loop++ != 0)
1939			delay(100);
1940	}
1941
1942	/*
1943	 * If one or more failed buffers were freed, that means dirty metadata
1944	 * was thrown away. This should only ever happen after I/O completion
1945	 * handling has elevated I/O error(s) to permanent failures and shuts
1946	 * down the journal.
1947	 */
1948	if (write_fail) {
1949		ASSERT(xlog_is_shutdown(btp->bt_mount->m_log));
1950		xfs_alert(btp->bt_mount,
1951	      "Please run xfs_repair to determine the extent of the problem.");
1952	}
1953}
1954
1955static enum lru_status
1956xfs_buftarg_isolate(
1957	struct list_head	*item,
1958	struct list_lru_one	*lru,
1959	spinlock_t		*lru_lock,
1960	void			*arg)
1961{
1962	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
1963	struct list_head	*dispose = arg;
1964
1965	/*
1966	 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1967	 * If we fail to get the lock, just skip it.
1968	 */
1969	if (!spin_trylock(&bp->b_lock))
1970		return LRU_SKIP;
1971	/*
1972	 * Decrement the b_lru_ref count unless the value is already
1973	 * zero. If the value is already zero, we need to reclaim the
1974	 * buffer, otherwise it gets another trip through the LRU.
1975	 */
1976	if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1977		spin_unlock(&bp->b_lock);
1978		return LRU_ROTATE;
1979	}
1980
1981	bp->b_state |= XFS_BSTATE_DISPOSE;
1982	list_lru_isolate_move(lru, item, dispose);
1983	spin_unlock(&bp->b_lock);
1984	return LRU_REMOVED;
1985}
1986
1987static unsigned long
1988xfs_buftarg_shrink_scan(
1989	struct shrinker		*shrink,
1990	struct shrink_control	*sc)
1991{
1992	struct xfs_buftarg	*btp = shrink->private_data;
1993	LIST_HEAD(dispose);
1994	unsigned long		freed;
1995
1996	freed = list_lru_shrink_walk(&btp->bt_lru, sc,
1997				     xfs_buftarg_isolate, &dispose);
1998
1999	while (!list_empty(&dispose)) {
2000		struct xfs_buf *bp;
2001		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
2002		list_del_init(&bp->b_lru);
2003		xfs_buf_rele(bp);
2004	}
2005
2006	return freed;
2007}
2008
2009static unsigned long
2010xfs_buftarg_shrink_count(
2011	struct shrinker		*shrink,
2012	struct shrink_control	*sc)
2013{
2014	struct xfs_buftarg	*btp = shrink->private_data;
2015	return list_lru_shrink_count(&btp->bt_lru, sc);
2016}
2017
2018void
2019xfs_destroy_buftarg(
2020	struct xfs_buftarg	*btp)
2021{
2022	shrinker_free(btp->bt_shrinker);
2023	ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
2024	percpu_counter_destroy(&btp->bt_io_count);
2025	list_lru_destroy(&btp->bt_lru);
2026}
2027
2028void
2029xfs_free_buftarg(
2030	struct xfs_buftarg	*btp)
2031{
2032	xfs_destroy_buftarg(btp);
2033	fs_put_dax(btp->bt_daxdev, btp->bt_mount);
2034	/* the main block device is closed by kill_block_super */
2035	if (btp->bt_bdev != btp->bt_mount->m_super->s_bdev)
2036		bdev_fput(btp->bt_bdev_file);
2037	kfree(btp);
2038}
2039
2040int
2041xfs_setsize_buftarg(
2042	struct xfs_buftarg	*btp,
2043	unsigned int		sectorsize)
2044{
2045	/* Set up metadata sector size info */
2046	btp->bt_meta_sectorsize = sectorsize;
2047	btp->bt_meta_sectormask = sectorsize - 1;
2048
2049	if (set_blocksize(btp->bt_bdev_file, sectorsize)) {
2050		xfs_warn(btp->bt_mount,
2051			"Cannot set_blocksize to %u on device %pg",
2052			sectorsize, btp->bt_bdev);
2053		return -EINVAL;
2054	}
2055
2056	return 0;
2057}
2058
2059int
2060xfs_init_buftarg(
2061	struct xfs_buftarg		*btp,
2062	size_t				logical_sectorsize,
2063	const char			*descr)
2064{
2065	/* Set up device logical sector size mask */
2066	btp->bt_logical_sectorsize = logical_sectorsize;
2067	btp->bt_logical_sectormask = logical_sectorsize - 1;
2068
2069	/*
2070	 * Buffer IO error rate limiting. Limit it to no more than 10 messages
2071	 * per 30 seconds so as to not spam logs too much on repeated errors.
2072	 */
2073	ratelimit_state_init(&btp->bt_ioerror_rl, 30 * HZ,
2074			     DEFAULT_RATELIMIT_BURST);
2075
2076	if (list_lru_init(&btp->bt_lru))
2077		return -ENOMEM;
2078	if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
2079		goto out_destroy_lru;
2080
2081	btp->bt_shrinker =
2082		shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-buf:%s", descr);
2083	if (!btp->bt_shrinker)
2084		goto out_destroy_io_count;
2085	btp->bt_shrinker->count_objects = xfs_buftarg_shrink_count;
2086	btp->bt_shrinker->scan_objects = xfs_buftarg_shrink_scan;
2087	btp->bt_shrinker->private_data = btp;
2088	shrinker_register(btp->bt_shrinker);
2089	return 0;
2090
2091out_destroy_io_count:
2092	percpu_counter_destroy(&btp->bt_io_count);
2093out_destroy_lru:
2094	list_lru_destroy(&btp->bt_lru);
2095	return -ENOMEM;
2096}
2097
2098struct xfs_buftarg *
2099xfs_alloc_buftarg(
2100	struct xfs_mount	*mp,
2101	struct file		*bdev_file)
2102{
2103	struct xfs_buftarg	*btp;
2104	const struct dax_holder_operations *ops = NULL;
2105
2106#if defined(CONFIG_FS_DAX) && defined(CONFIG_MEMORY_FAILURE)
2107	ops = &xfs_dax_holder_operations;
2108#endif
2109	btp = kzalloc(sizeof(*btp), GFP_KERNEL | __GFP_NOFAIL);
2110
2111	btp->bt_mount = mp;
2112	btp->bt_bdev_file = bdev_file;
2113	btp->bt_bdev = file_bdev(bdev_file);
2114	btp->bt_dev = btp->bt_bdev->bd_dev;
2115	btp->bt_daxdev = fs_dax_get_by_bdev(btp->bt_bdev, &btp->bt_dax_part_off,
2116					    mp, ops);
2117
2118	/*
2119	 * When allocating the buftargs we have not yet read the super block and
2120	 * thus don't know the file system sector size yet.
2121	 */
2122	if (xfs_setsize_buftarg(btp, bdev_logical_block_size(btp->bt_bdev)))
2123		goto error_free;
2124	if (xfs_init_buftarg(btp, bdev_logical_block_size(btp->bt_bdev),
2125			mp->m_super->s_id))
2126		goto error_free;
2127
2128	return btp;
2129
2130error_free:
2131	kfree(btp);
2132	return NULL;
2133}
2134
2135static inline void
2136xfs_buf_list_del(
2137	struct xfs_buf		*bp)
2138{
2139	list_del_init(&bp->b_list);
2140	wake_up_var(&bp->b_list);
2141}
2142
2143/*
2144 * Cancel a delayed write list.
2145 *
2146 * Remove each buffer from the list, clear the delwri queue flag and drop the
2147 * associated buffer reference.
2148 */
2149void
2150xfs_buf_delwri_cancel(
2151	struct list_head	*list)
2152{
2153	struct xfs_buf		*bp;
2154
2155	while (!list_empty(list)) {
2156		bp = list_first_entry(list, struct xfs_buf, b_list);
2157
2158		xfs_buf_lock(bp);
2159		bp->b_flags &= ~_XBF_DELWRI_Q;
2160		xfs_buf_list_del(bp);
2161		xfs_buf_relse(bp);
2162	}
2163}
2164
2165/*
2166 * Add a buffer to the delayed write list.
2167 *
2168 * This queues a buffer for writeout if it hasn't already been.  Note that
2169 * neither this routine nor the buffer list submission functions perform
2170 * any internal synchronization.  It is expected that the lists are thread-local
2171 * to the callers.
2172 *
2173 * Returns true if we queued up the buffer, or false if it already had
2174 * been on the buffer list.
2175 */
2176bool
2177xfs_buf_delwri_queue(
2178	struct xfs_buf		*bp,
2179	struct list_head	*list)
2180{
2181	ASSERT(xfs_buf_islocked(bp));
2182	ASSERT(!(bp->b_flags & XBF_READ));
2183
2184	/*
2185	 * If the buffer is already marked delwri it already is queued up
2186	 * by someone else for imediate writeout.  Just ignore it in that
2187	 * case.
2188	 */
2189	if (bp->b_flags & _XBF_DELWRI_Q) {
2190		trace_xfs_buf_delwri_queued(bp, _RET_IP_);
2191		return false;
2192	}
2193
2194	trace_xfs_buf_delwri_queue(bp, _RET_IP_);
2195
2196	/*
2197	 * If a buffer gets written out synchronously or marked stale while it
2198	 * is on a delwri list we lazily remove it. To do this, the other party
2199	 * clears the  _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
2200	 * It remains referenced and on the list.  In a rare corner case it
2201	 * might get readded to a delwri list after the synchronous writeout, in
2202	 * which case we need just need to re-add the flag here.
2203	 */
2204	bp->b_flags |= _XBF_DELWRI_Q;
2205	if (list_empty(&bp->b_list)) {
2206		atomic_inc(&bp->b_hold);
2207		list_add_tail(&bp->b_list, list);
2208	}
2209
2210	return true;
2211}
2212
2213/*
2214 * Queue a buffer to this delwri list as part of a data integrity operation.
2215 * If the buffer is on any other delwri list, we'll wait for that to clear
2216 * so that the caller can submit the buffer for IO and wait for the result.
2217 * Callers must ensure the buffer is not already on the list.
2218 */
2219void
2220xfs_buf_delwri_queue_here(
2221	struct xfs_buf		*bp,
2222	struct list_head	*buffer_list)
2223{
2224	/*
2225	 * We need this buffer to end up on the /caller's/ delwri list, not any
2226	 * old list.  This can happen if the buffer is marked stale (which
2227	 * clears DELWRI_Q) after the AIL queues the buffer to its list but
2228	 * before the AIL has a chance to submit the list.
2229	 */
2230	while (!list_empty(&bp->b_list)) {
2231		xfs_buf_unlock(bp);
2232		wait_var_event(&bp->b_list, list_empty(&bp->b_list));
2233		xfs_buf_lock(bp);
2234	}
2235
2236	ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
2237
2238	xfs_buf_delwri_queue(bp, buffer_list);
2239}
2240
2241/*
2242 * Compare function is more complex than it needs to be because
2243 * the return value is only 32 bits and we are doing comparisons
2244 * on 64 bit values
2245 */
2246static int
2247xfs_buf_cmp(
2248	void			*priv,
2249	const struct list_head	*a,
2250	const struct list_head	*b)
2251{
2252	struct xfs_buf	*ap = container_of(a, struct xfs_buf, b_list);
2253	struct xfs_buf	*bp = container_of(b, struct xfs_buf, b_list);
2254	xfs_daddr_t		diff;
2255
2256	diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
2257	if (diff < 0)
2258		return -1;
2259	if (diff > 0)
2260		return 1;
2261	return 0;
2262}
2263
2264/*
2265 * Submit buffers for write. If wait_list is specified, the buffers are
2266 * submitted using sync I/O and placed on the wait list such that the caller can
2267 * iowait each buffer. Otherwise async I/O is used and the buffers are released
2268 * at I/O completion time. In either case, buffers remain locked until I/O
2269 * completes and the buffer is released from the queue.
2270 */
2271static int
2272xfs_buf_delwri_submit_buffers(
2273	struct list_head	*buffer_list,
2274	struct list_head	*wait_list)
2275{
2276	struct xfs_buf		*bp, *n;
2277	int			pinned = 0;
2278	struct blk_plug		plug;
2279
2280	list_sort(NULL, buffer_list, xfs_buf_cmp);
2281
2282	blk_start_plug(&plug);
2283	list_for_each_entry_safe(bp, n, buffer_list, b_list) {
2284		if (!wait_list) {
2285			if (!xfs_buf_trylock(bp))
2286				continue;
2287			if (xfs_buf_ispinned(bp)) {
2288				xfs_buf_unlock(bp);
2289				pinned++;
2290				continue;
2291			}
2292		} else {
2293			xfs_buf_lock(bp);
2294		}
2295
2296		/*
2297		 * Someone else might have written the buffer synchronously or
2298		 * marked it stale in the meantime.  In that case only the
2299		 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
2300		 * reference and remove it from the list here.
2301		 */
2302		if (!(bp->b_flags & _XBF_DELWRI_Q)) {
2303			xfs_buf_list_del(bp);
2304			xfs_buf_relse(bp);
2305			continue;
2306		}
2307
2308		trace_xfs_buf_delwri_split(bp, _RET_IP_);
2309
2310		/*
2311		 * If we have a wait list, each buffer (and associated delwri
2312		 * queue reference) transfers to it and is submitted
2313		 * synchronously. Otherwise, drop the buffer from the delwri
2314		 * queue and submit async.
2315		 */
2316		bp->b_flags &= ~_XBF_DELWRI_Q;
2317		bp->b_flags |= XBF_WRITE;
2318		if (wait_list) {
2319			bp->b_flags &= ~XBF_ASYNC;
2320			list_move_tail(&bp->b_list, wait_list);
2321		} else {
2322			bp->b_flags |= XBF_ASYNC;
2323			xfs_buf_list_del(bp);
2324		}
2325		__xfs_buf_submit(bp, false);
2326	}
2327	blk_finish_plug(&plug);
2328
2329	return pinned;
2330}
2331
2332/*
2333 * Write out a buffer list asynchronously.
2334 *
2335 * This will take the @buffer_list, write all non-locked and non-pinned buffers
2336 * out and not wait for I/O completion on any of the buffers.  This interface
2337 * is only safely useable for callers that can track I/O completion by higher
2338 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
2339 * function.
2340 *
2341 * Note: this function will skip buffers it would block on, and in doing so
2342 * leaves them on @buffer_list so they can be retried on a later pass. As such,
2343 * it is up to the caller to ensure that the buffer list is fully submitted or
2344 * cancelled appropriately when they are finished with the list. Failure to
2345 * cancel or resubmit the list until it is empty will result in leaked buffers
2346 * at unmount time.
2347 */
2348int
2349xfs_buf_delwri_submit_nowait(
2350	struct list_head	*buffer_list)
2351{
2352	return xfs_buf_delwri_submit_buffers(buffer_list, NULL);
2353}
2354
2355/*
2356 * Write out a buffer list synchronously.
2357 *
2358 * This will take the @buffer_list, write all buffers out and wait for I/O
2359 * completion on all of the buffers. @buffer_list is consumed by the function,
2360 * so callers must have some other way of tracking buffers if they require such
2361 * functionality.
2362 */
2363int
2364xfs_buf_delwri_submit(
2365	struct list_head	*buffer_list)
2366{
2367	LIST_HEAD		(wait_list);
2368	int			error = 0, error2;
2369	struct xfs_buf		*bp;
2370
2371	xfs_buf_delwri_submit_buffers(buffer_list, &wait_list);
2372
2373	/* Wait for IO to complete. */
2374	while (!list_empty(&wait_list)) {
2375		bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
2376
2377		xfs_buf_list_del(bp);
2378
2379		/*
2380		 * Wait on the locked buffer, check for errors and unlock and
2381		 * release the delwri queue reference.
2382		 */
2383		error2 = xfs_buf_iowait(bp);
2384		xfs_buf_relse(bp);
2385		if (!error)
2386			error = error2;
2387	}
2388
2389	return error;
2390}
2391
2392/*
2393 * Push a single buffer on a delwri queue.
2394 *
2395 * The purpose of this function is to submit a single buffer of a delwri queue
2396 * and return with the buffer still on the original queue. The waiting delwri
2397 * buffer submission infrastructure guarantees transfer of the delwri queue
2398 * buffer reference to a temporary wait list. We reuse this infrastructure to
2399 * transfer the buffer back to the original queue.
2400 *
2401 * Note the buffer transitions from the queued state, to the submitted and wait
2402 * listed state and back to the queued state during this call. The buffer
2403 * locking and queue management logic between _delwri_pushbuf() and
2404 * _delwri_queue() guarantee that the buffer cannot be queued to another list
2405 * before returning.
2406 */
2407int
2408xfs_buf_delwri_pushbuf(
2409	struct xfs_buf		*bp,
2410	struct list_head	*buffer_list)
2411{
2412	LIST_HEAD		(submit_list);
2413	int			error;
2414
2415	ASSERT(bp->b_flags & _XBF_DELWRI_Q);
2416
2417	trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_);
2418
2419	/*
2420	 * Isolate the buffer to a new local list so we can submit it for I/O
2421	 * independently from the rest of the original list.
2422	 */
2423	xfs_buf_lock(bp);
2424	list_move(&bp->b_list, &submit_list);
2425	xfs_buf_unlock(bp);
2426
2427	/*
2428	 * Delwri submission clears the DELWRI_Q buffer flag and returns with
2429	 * the buffer on the wait list with the original reference. Rather than
2430	 * bounce the buffer from a local wait list back to the original list
2431	 * after I/O completion, reuse the original list as the wait list.
2432	 */
2433	xfs_buf_delwri_submit_buffers(&submit_list, buffer_list);
2434
2435	/*
2436	 * The buffer is now locked, under I/O and wait listed on the original
2437	 * delwri queue. Wait for I/O completion, restore the DELWRI_Q flag and
2438	 * return with the buffer unlocked and on the original queue.
2439	 */
2440	error = xfs_buf_iowait(bp);
2441	bp->b_flags |= _XBF_DELWRI_Q;
2442	xfs_buf_unlock(bp);
2443
2444	return error;
2445}
2446
2447void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
2448{
2449	/*
2450	 * Set the lru reference count to 0 based on the error injection tag.
2451	 * This allows userspace to disrupt buffer caching for debug/testing
2452	 * purposes.
2453	 */
2454	if (XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_LRU_REF))
2455		lru_ref = 0;
2456
2457	atomic_set(&bp->b_lru_ref, lru_ref);
2458}
2459
2460/*
2461 * Verify an on-disk magic value against the magic value specified in the
2462 * verifier structure. The verifier magic is in disk byte order so the caller is
2463 * expected to pass the value directly from disk.
2464 */
2465bool
2466xfs_verify_magic(
2467	struct xfs_buf		*bp,
2468	__be32			dmagic)
2469{
2470	struct xfs_mount	*mp = bp->b_mount;
2471	int			idx;
2472
2473	idx = xfs_has_crc(mp);
2474	if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx]))
2475		return false;
2476	return dmagic == bp->b_ops->magic[idx];
2477}
2478/*
2479 * Verify an on-disk magic value against the magic value specified in the
2480 * verifier structure. The verifier magic is in disk byte order so the caller is
2481 * expected to pass the value directly from disk.
2482 */
2483bool
2484xfs_verify_magic16(
2485	struct xfs_buf		*bp,
2486	__be16			dmagic)
2487{
2488	struct xfs_mount	*mp = bp->b_mount;
2489	int			idx;
2490
2491	idx = xfs_has_crc(mp);
2492	if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx]))
2493		return false;
2494	return dmagic == bp->b_ops->magic16[idx];
2495}
2496