metaslab.c revision 268656
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 */
26
27#include <sys/zfs_context.h>
28#include <sys/dmu.h>
29#include <sys/dmu_tx.h>
30#include <sys/space_map.h>
31#include <sys/metaslab_impl.h>
32#include <sys/vdev_impl.h>
33#include <sys/zio.h>
34#include <sys/spa_impl.h>
35
36SYSCTL_DECL(_vfs_zfs);
37SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab");
38
39/*
40 * Allow allocations to switch to gang blocks quickly. We do this to
41 * avoid having to load lots of space_maps in a given txg. There are,
42 * however, some cases where we want to avoid "fast" ganging and instead
43 * we want to do an exhaustive search of all metaslabs on this device.
44 * Currently we don't allow any gang, slog, or dump device related allocations
45 * to "fast" gang.
46 */
47#define	CAN_FASTGANG(flags) \
48	(!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
49	METASLAB_GANG_AVOID)))
50
51#define	METASLAB_WEIGHT_PRIMARY		(1ULL << 63)
52#define	METASLAB_WEIGHT_SECONDARY	(1ULL << 62)
53#define	METASLAB_ACTIVE_MASK		\
54	(METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
55
56uint64_t metaslab_aliquot = 512ULL << 10;
57uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1;	/* force gang blocks */
58TUNABLE_QUAD("vfs.zfs.metaslab.gang_bang", &metaslab_gang_bang);
59SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, gang_bang, CTLFLAG_RWTUN,
60    &metaslab_gang_bang, 0,
61    "Force gang block allocation for blocks larger than or equal to this value");
62
63/*
64 * The in-core space map representation is more compact than its on-disk form.
65 * The zfs_condense_pct determines how much more compact the in-core
66 * space_map representation must be before we compact it on-disk.
67 * Values should be greater than or equal to 100.
68 */
69int zfs_condense_pct = 200;
70TUNABLE_INT("vfs.zfs.condense_pct", &zfs_condense_pct);
71SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN,
72    &zfs_condense_pct, 0,
73    "Condense on-disk spacemap when it is more than this many percents"
74    " of in-memory counterpart");
75
76/*
77 * The zfs_mg_noalloc_threshold defines which metaslab groups should
78 * be eligible for allocation. The value is defined as a percentage of
79 * a free space. Metaslab groups that have more free space than
80 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
81 * a metaslab group's free space is less than or equal to the
82 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
83 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
84 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
85 * groups are allowed to accept allocations. Gang blocks are always
86 * eligible to allocate on any metaslab group. The default value of 0 means
87 * no metaslab group will be excluded based on this criterion.
88 */
89int zfs_mg_noalloc_threshold = 0;
90TUNABLE_INT("vfs.zfs.mg_noalloc_threshold", &zfs_mg_noalloc_threshold);
91SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_noalloc_threshold, CTLFLAG_RWTUN,
92    &zfs_mg_noalloc_threshold, 0,
93    "Percentage of metaslab group size that should be free"
94    " to make it eligible for allocation");
95
96/*
97 * When set will load all metaslabs when pool is first opened.
98 */
99int metaslab_debug_load = 0;
100TUNABLE_INT("vfs.zfs.metaslab.debug_load", &metaslab_debug_load);
101SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_load, CTLFLAG_RWTUN,
102    &metaslab_debug_load, 0,
103    "Load all metaslabs when pool is first opened");
104
105/*
106 * When set will prevent metaslabs from being unloaded.
107 */
108int metaslab_debug_unload = 0;
109TUNABLE_INT("vfs.zfs.metaslab.debug_unload", &metaslab_debug_unload);
110SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_unload, CTLFLAG_RWTUN,
111    &metaslab_debug_unload, 0,
112    "Prevent metaslabs from being unloaded");
113
114/*
115 * Minimum size which forces the dynamic allocator to change
116 * it's allocation strategy.  Once the space map cannot satisfy
117 * an allocation of this size then it switches to using more
118 * aggressive strategy (i.e search by size rather than offset).
119 */
120uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE;
121TUNABLE_QUAD("vfs.zfs.metaslab.df_alloc_threshold",
122    &metaslab_df_alloc_threshold);
123SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
124    &metaslab_df_alloc_threshold, 0,
125    "Minimum size which forces the dynamic allocator to change it's allocation strategy");
126
127/*
128 * The minimum free space, in percent, which must be available
129 * in a space map to continue allocations in a first-fit fashion.
130 * Once the space_map's free space drops below this level we dynamically
131 * switch to using best-fit allocations.
132 */
133int metaslab_df_free_pct = 4;
134TUNABLE_INT("vfs.zfs.metaslab.df_free_pct", &metaslab_df_free_pct);
135SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN,
136    &metaslab_df_free_pct, 0,
137    "The minimum free space, in percent, which must be available in a space map to continue allocations in a first-fit fashion");
138
139/*
140 * A metaslab is considered "free" if it contains a contiguous
141 * segment which is greater than metaslab_min_alloc_size.
142 */
143uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
144TUNABLE_QUAD("vfs.zfs.metaslab.min_alloc_size",
145    &metaslab_min_alloc_size);
146SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, min_alloc_size, CTLFLAG_RWTUN,
147    &metaslab_min_alloc_size, 0,
148    "A metaslab is considered \"free\" if it contains a contiguous segment which is greater than vfs.zfs.metaslab.min_alloc_size");
149
150/*
151 * Percentage of all cpus that can be used by the metaslab taskq.
152 */
153int metaslab_load_pct = 50;
154TUNABLE_INT("vfs.zfs.metaslab.load_pct", &metaslab_load_pct);
155SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN,
156    &metaslab_load_pct, 0,
157    "Percentage of cpus that can be used by the metaslab taskq");
158
159/*
160 * Determines how many txgs a metaslab may remain loaded without having any
161 * allocations from it. As long as a metaslab continues to be used we will
162 * keep it loaded.
163 */
164int metaslab_unload_delay = TXG_SIZE * 2;
165TUNABLE_INT("vfs.zfs.metaslab.unload_delay", &metaslab_unload_delay);
166SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, unload_delay, CTLFLAG_RWTUN,
167    &metaslab_unload_delay, 0,
168    "Number of TXGs that an unused metaslab can be kept in memory");
169
170/*
171 * Should we be willing to write data to degraded vdevs?
172 */
173boolean_t zfs_write_to_degraded = B_FALSE;
174SYSCTL_INT(_vfs_zfs, OID_AUTO, write_to_degraded, CTLFLAG_RWTUN,
175    &zfs_write_to_degraded, 0, "Allow writing data to degraded vdevs");
176TUNABLE_INT("vfs.zfs.write_to_degraded", &zfs_write_to_degraded);
177
178/*
179 * Max number of metaslabs per group to preload.
180 */
181int metaslab_preload_limit = SPA_DVAS_PER_BP;
182TUNABLE_INT("vfs.zfs.metaslab.preload_limit", &metaslab_preload_limit);
183SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN,
184    &metaslab_preload_limit, 0,
185    "Max number of metaslabs per group to preload");
186
187/*
188 * Enable/disable preloading of metaslab.
189 */
190boolean_t metaslab_preload_enabled = B_TRUE;
191TUNABLE_INT("vfs.zfs.metaslab.preload_enabled", &metaslab_preload_enabled);
192SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_enabled, CTLFLAG_RWTUN,
193    &metaslab_preload_enabled, 0,
194    "Max number of metaslabs per group to preload");
195
196/*
197 * Enable/disable additional weight factor for each metaslab.
198 */
199boolean_t metaslab_weight_factor_enable = B_FALSE;
200TUNABLE_INT("vfs.zfs.metaslab.weight_factor_enable",
201    &metaslab_weight_factor_enable);
202SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, weight_factor_enable, CTLFLAG_RWTUN,
203    &metaslab_weight_factor_enable, 0,
204    "Enable additional weight factor for each metaslab");
205
206
207/*
208 * ==========================================================================
209 * Metaslab classes
210 * ==========================================================================
211 */
212metaslab_class_t *
213metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
214{
215	metaslab_class_t *mc;
216
217	mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
218
219	mc->mc_spa = spa;
220	mc->mc_rotor = NULL;
221	mc->mc_ops = ops;
222
223	return (mc);
224}
225
226void
227metaslab_class_destroy(metaslab_class_t *mc)
228{
229	ASSERT(mc->mc_rotor == NULL);
230	ASSERT(mc->mc_alloc == 0);
231	ASSERT(mc->mc_deferred == 0);
232	ASSERT(mc->mc_space == 0);
233	ASSERT(mc->mc_dspace == 0);
234
235	kmem_free(mc, sizeof (metaslab_class_t));
236}
237
238int
239metaslab_class_validate(metaslab_class_t *mc)
240{
241	metaslab_group_t *mg;
242	vdev_t *vd;
243
244	/*
245	 * Must hold one of the spa_config locks.
246	 */
247	ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
248	    spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
249
250	if ((mg = mc->mc_rotor) == NULL)
251		return (0);
252
253	do {
254		vd = mg->mg_vd;
255		ASSERT(vd->vdev_mg != NULL);
256		ASSERT3P(vd->vdev_top, ==, vd);
257		ASSERT3P(mg->mg_class, ==, mc);
258		ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
259	} while ((mg = mg->mg_next) != mc->mc_rotor);
260
261	return (0);
262}
263
264void
265metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
266    int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
267{
268	atomic_add_64(&mc->mc_alloc, alloc_delta);
269	atomic_add_64(&mc->mc_deferred, defer_delta);
270	atomic_add_64(&mc->mc_space, space_delta);
271	atomic_add_64(&mc->mc_dspace, dspace_delta);
272}
273
274void
275metaslab_class_minblocksize_update(metaslab_class_t *mc)
276{
277	metaslab_group_t *mg;
278	vdev_t *vd;
279	uint64_t minashift = UINT64_MAX;
280
281	if ((mg = mc->mc_rotor) == NULL) {
282		mc->mc_minblocksize = SPA_MINBLOCKSIZE;
283		return;
284	}
285
286	do {
287		vd = mg->mg_vd;
288		if (vd->vdev_ashift < minashift)
289			minashift = vd->vdev_ashift;
290	} while ((mg = mg->mg_next) != mc->mc_rotor);
291
292	mc->mc_minblocksize = 1ULL << minashift;
293}
294
295uint64_t
296metaslab_class_get_alloc(metaslab_class_t *mc)
297{
298	return (mc->mc_alloc);
299}
300
301uint64_t
302metaslab_class_get_deferred(metaslab_class_t *mc)
303{
304	return (mc->mc_deferred);
305}
306
307uint64_t
308metaslab_class_get_space(metaslab_class_t *mc)
309{
310	return (mc->mc_space);
311}
312
313uint64_t
314metaslab_class_get_dspace(metaslab_class_t *mc)
315{
316	return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
317}
318
319uint64_t
320metaslab_class_get_minblocksize(metaslab_class_t *mc)
321{
322	return (mc->mc_minblocksize);
323}
324
325/*
326 * ==========================================================================
327 * Metaslab groups
328 * ==========================================================================
329 */
330static int
331metaslab_compare(const void *x1, const void *x2)
332{
333	const metaslab_t *m1 = x1;
334	const metaslab_t *m2 = x2;
335
336	if (m1->ms_weight < m2->ms_weight)
337		return (1);
338	if (m1->ms_weight > m2->ms_weight)
339		return (-1);
340
341	/*
342	 * If the weights are identical, use the offset to force uniqueness.
343	 */
344	if (m1->ms_start < m2->ms_start)
345		return (-1);
346	if (m1->ms_start > m2->ms_start)
347		return (1);
348
349	ASSERT3P(m1, ==, m2);
350
351	return (0);
352}
353
354/*
355 * Update the allocatable flag and the metaslab group's capacity.
356 * The allocatable flag is set to true if the capacity is below
357 * the zfs_mg_noalloc_threshold. If a metaslab group transitions
358 * from allocatable to non-allocatable or vice versa then the metaslab
359 * group's class is updated to reflect the transition.
360 */
361static void
362metaslab_group_alloc_update(metaslab_group_t *mg)
363{
364	vdev_t *vd = mg->mg_vd;
365	metaslab_class_t *mc = mg->mg_class;
366	vdev_stat_t *vs = &vd->vdev_stat;
367	boolean_t was_allocatable;
368
369	ASSERT(vd == vd->vdev_top);
370
371	mutex_enter(&mg->mg_lock);
372	was_allocatable = mg->mg_allocatable;
373
374	mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
375	    (vs->vs_space + 1);
376
377	mg->mg_allocatable = (mg->mg_free_capacity > zfs_mg_noalloc_threshold);
378
379	/*
380	 * The mc_alloc_groups maintains a count of the number of
381	 * groups in this metaslab class that are still above the
382	 * zfs_mg_noalloc_threshold. This is used by the allocating
383	 * threads to determine if they should avoid allocations to
384	 * a given group. The allocator will avoid allocations to a group
385	 * if that group has reached or is below the zfs_mg_noalloc_threshold
386	 * and there are still other groups that are above the threshold.
387	 * When a group transitions from allocatable to non-allocatable or
388	 * vice versa we update the metaslab class to reflect that change.
389	 * When the mc_alloc_groups value drops to 0 that means that all
390	 * groups have reached the zfs_mg_noalloc_threshold making all groups
391	 * eligible for allocations. This effectively means that all devices
392	 * are balanced again.
393	 */
394	if (was_allocatable && !mg->mg_allocatable)
395		mc->mc_alloc_groups--;
396	else if (!was_allocatable && mg->mg_allocatable)
397		mc->mc_alloc_groups++;
398	mutex_exit(&mg->mg_lock);
399}
400
401metaslab_group_t *
402metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
403{
404	metaslab_group_t *mg;
405
406	mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
407	mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
408	avl_create(&mg->mg_metaslab_tree, metaslab_compare,
409	    sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
410	mg->mg_vd = vd;
411	mg->mg_class = mc;
412	mg->mg_activation_count = 0;
413
414	mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
415	    minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT);
416
417	return (mg);
418}
419
420void
421metaslab_group_destroy(metaslab_group_t *mg)
422{
423	ASSERT(mg->mg_prev == NULL);
424	ASSERT(mg->mg_next == NULL);
425	/*
426	 * We may have gone below zero with the activation count
427	 * either because we never activated in the first place or
428	 * because we're done, and possibly removing the vdev.
429	 */
430	ASSERT(mg->mg_activation_count <= 0);
431
432	taskq_destroy(mg->mg_taskq);
433	avl_destroy(&mg->mg_metaslab_tree);
434	mutex_destroy(&mg->mg_lock);
435	kmem_free(mg, sizeof (metaslab_group_t));
436}
437
438void
439metaslab_group_activate(metaslab_group_t *mg)
440{
441	metaslab_class_t *mc = mg->mg_class;
442	metaslab_group_t *mgprev, *mgnext;
443
444	ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
445
446	ASSERT(mc->mc_rotor != mg);
447	ASSERT(mg->mg_prev == NULL);
448	ASSERT(mg->mg_next == NULL);
449	ASSERT(mg->mg_activation_count <= 0);
450
451	if (++mg->mg_activation_count <= 0)
452		return;
453
454	mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
455	metaslab_group_alloc_update(mg);
456
457	if ((mgprev = mc->mc_rotor) == NULL) {
458		mg->mg_prev = mg;
459		mg->mg_next = mg;
460	} else {
461		mgnext = mgprev->mg_next;
462		mg->mg_prev = mgprev;
463		mg->mg_next = mgnext;
464		mgprev->mg_next = mg;
465		mgnext->mg_prev = mg;
466	}
467	mc->mc_rotor = mg;
468	metaslab_class_minblocksize_update(mc);
469}
470
471void
472metaslab_group_passivate(metaslab_group_t *mg)
473{
474	metaslab_class_t *mc = mg->mg_class;
475	metaslab_group_t *mgprev, *mgnext;
476
477	ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
478
479	if (--mg->mg_activation_count != 0) {
480		ASSERT(mc->mc_rotor != mg);
481		ASSERT(mg->mg_prev == NULL);
482		ASSERT(mg->mg_next == NULL);
483		ASSERT(mg->mg_activation_count < 0);
484		return;
485	}
486
487	taskq_wait(mg->mg_taskq);
488
489	mgprev = mg->mg_prev;
490	mgnext = mg->mg_next;
491
492	if (mg == mgnext) {
493		mc->mc_rotor = NULL;
494	} else {
495		mc->mc_rotor = mgnext;
496		mgprev->mg_next = mgnext;
497		mgnext->mg_prev = mgprev;
498	}
499
500	mg->mg_prev = NULL;
501	mg->mg_next = NULL;
502	metaslab_class_minblocksize_update(mc);
503}
504
505static void
506metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
507{
508	mutex_enter(&mg->mg_lock);
509	ASSERT(msp->ms_group == NULL);
510	msp->ms_group = mg;
511	msp->ms_weight = 0;
512	avl_add(&mg->mg_metaslab_tree, msp);
513	mutex_exit(&mg->mg_lock);
514}
515
516static void
517metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
518{
519	mutex_enter(&mg->mg_lock);
520	ASSERT(msp->ms_group == mg);
521	avl_remove(&mg->mg_metaslab_tree, msp);
522	msp->ms_group = NULL;
523	mutex_exit(&mg->mg_lock);
524}
525
526static void
527metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
528{
529	/*
530	 * Although in principle the weight can be any value, in
531	 * practice we do not use values in the range [1, 510].
532	 */
533	ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0);
534	ASSERT(MUTEX_HELD(&msp->ms_lock));
535
536	mutex_enter(&mg->mg_lock);
537	ASSERT(msp->ms_group == mg);
538	avl_remove(&mg->mg_metaslab_tree, msp);
539	msp->ms_weight = weight;
540	avl_add(&mg->mg_metaslab_tree, msp);
541	mutex_exit(&mg->mg_lock);
542}
543
544/*
545 * Determine if a given metaslab group should skip allocations. A metaslab
546 * group should avoid allocations if its used capacity has crossed the
547 * zfs_mg_noalloc_threshold and there is at least one metaslab group
548 * that can still handle allocations.
549 */
550static boolean_t
551metaslab_group_allocatable(metaslab_group_t *mg)
552{
553	vdev_t *vd = mg->mg_vd;
554	spa_t *spa = vd->vdev_spa;
555	metaslab_class_t *mc = mg->mg_class;
556
557	/*
558	 * A metaslab group is considered allocatable if its free capacity
559	 * is greater than the set value of zfs_mg_noalloc_threshold, it's
560	 * associated with a slog, or there are no other metaslab groups
561	 * with free capacity greater than zfs_mg_noalloc_threshold.
562	 */
563	return (mg->mg_free_capacity > zfs_mg_noalloc_threshold ||
564	    mc != spa_normal_class(spa) || mc->mc_alloc_groups == 0);
565}
566
567/*
568 * ==========================================================================
569 * Range tree callbacks
570 * ==========================================================================
571 */
572
573/*
574 * Comparison function for the private size-ordered tree. Tree is sorted
575 * by size, larger sizes at the end of the tree.
576 */
577static int
578metaslab_rangesize_compare(const void *x1, const void *x2)
579{
580	const range_seg_t *r1 = x1;
581	const range_seg_t *r2 = x2;
582	uint64_t rs_size1 = r1->rs_end - r1->rs_start;
583	uint64_t rs_size2 = r2->rs_end - r2->rs_start;
584
585	if (rs_size1 < rs_size2)
586		return (-1);
587	if (rs_size1 > rs_size2)
588		return (1);
589
590	if (r1->rs_start < r2->rs_start)
591		return (-1);
592
593	if (r1->rs_start > r2->rs_start)
594		return (1);
595
596	return (0);
597}
598
599/*
600 * Create any block allocator specific components. The current allocators
601 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
602 */
603static void
604metaslab_rt_create(range_tree_t *rt, void *arg)
605{
606	metaslab_t *msp = arg;
607
608	ASSERT3P(rt->rt_arg, ==, msp);
609	ASSERT(msp->ms_tree == NULL);
610
611	avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
612	    sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
613}
614
615/*
616 * Destroy the block allocator specific components.
617 */
618static void
619metaslab_rt_destroy(range_tree_t *rt, void *arg)
620{
621	metaslab_t *msp = arg;
622
623	ASSERT3P(rt->rt_arg, ==, msp);
624	ASSERT3P(msp->ms_tree, ==, rt);
625	ASSERT0(avl_numnodes(&msp->ms_size_tree));
626
627	avl_destroy(&msp->ms_size_tree);
628}
629
630static void
631metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
632{
633	metaslab_t *msp = arg;
634
635	ASSERT3P(rt->rt_arg, ==, msp);
636	ASSERT3P(msp->ms_tree, ==, rt);
637	VERIFY(!msp->ms_condensing);
638	avl_add(&msp->ms_size_tree, rs);
639}
640
641static void
642metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
643{
644	metaslab_t *msp = arg;
645
646	ASSERT3P(rt->rt_arg, ==, msp);
647	ASSERT3P(msp->ms_tree, ==, rt);
648	VERIFY(!msp->ms_condensing);
649	avl_remove(&msp->ms_size_tree, rs);
650}
651
652static void
653metaslab_rt_vacate(range_tree_t *rt, void *arg)
654{
655	metaslab_t *msp = arg;
656
657	ASSERT3P(rt->rt_arg, ==, msp);
658	ASSERT3P(msp->ms_tree, ==, rt);
659
660	/*
661	 * Normally one would walk the tree freeing nodes along the way.
662	 * Since the nodes are shared with the range trees we can avoid
663	 * walking all nodes and just reinitialize the avl tree. The nodes
664	 * will be freed by the range tree, so we don't want to free them here.
665	 */
666	avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
667	    sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
668}
669
670static range_tree_ops_t metaslab_rt_ops = {
671	metaslab_rt_create,
672	metaslab_rt_destroy,
673	metaslab_rt_add,
674	metaslab_rt_remove,
675	metaslab_rt_vacate
676};
677
678/*
679 * ==========================================================================
680 * Metaslab block operations
681 * ==========================================================================
682 */
683
684/*
685 * Return the maximum contiguous segment within the metaslab.
686 */
687uint64_t
688metaslab_block_maxsize(metaslab_t *msp)
689{
690	avl_tree_t *t = &msp->ms_size_tree;
691	range_seg_t *rs;
692
693	if (t == NULL || (rs = avl_last(t)) == NULL)
694		return (0ULL);
695
696	return (rs->rs_end - rs->rs_start);
697}
698
699uint64_t
700metaslab_block_alloc(metaslab_t *msp, uint64_t size)
701{
702	uint64_t start;
703	range_tree_t *rt = msp->ms_tree;
704
705	VERIFY(!msp->ms_condensing);
706
707	start = msp->ms_ops->msop_alloc(msp, size);
708	if (start != -1ULL) {
709		vdev_t *vd = msp->ms_group->mg_vd;
710
711		VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
712		VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
713		VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
714		range_tree_remove(rt, start, size);
715	}
716	return (start);
717}
718
719/*
720 * ==========================================================================
721 * Common allocator routines
722 * ==========================================================================
723 */
724
725/*
726 * This is a helper function that can be used by the allocator to find
727 * a suitable block to allocate. This will search the specified AVL
728 * tree looking for a block that matches the specified criteria.
729 */
730static uint64_t
731metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
732    uint64_t align)
733{
734	range_seg_t *rs, rsearch;
735	avl_index_t where;
736
737	rsearch.rs_start = *cursor;
738	rsearch.rs_end = *cursor + size;
739
740	rs = avl_find(t, &rsearch, &where);
741	if (rs == NULL)
742		rs = avl_nearest(t, where, AVL_AFTER);
743
744	while (rs != NULL) {
745		uint64_t offset = P2ROUNDUP(rs->rs_start, align);
746
747		if (offset + size <= rs->rs_end) {
748			*cursor = offset + size;
749			return (offset);
750		}
751		rs = AVL_NEXT(t, rs);
752	}
753
754	/*
755	 * If we know we've searched the whole map (*cursor == 0), give up.
756	 * Otherwise, reset the cursor to the beginning and try again.
757	 */
758	if (*cursor == 0)
759		return (-1ULL);
760
761	*cursor = 0;
762	return (metaslab_block_picker(t, cursor, size, align));
763}
764
765/*
766 * ==========================================================================
767 * The first-fit block allocator
768 * ==========================================================================
769 */
770static uint64_t
771metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
772{
773	/*
774	 * Find the largest power of 2 block size that evenly divides the
775	 * requested size. This is used to try to allocate blocks with similar
776	 * alignment from the same area of the metaslab (i.e. same cursor
777	 * bucket) but it does not guarantee that other allocations sizes
778	 * may exist in the same region.
779	 */
780	uint64_t align = size & -size;
781	uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
782	avl_tree_t *t = &msp->ms_tree->rt_root;
783
784	return (metaslab_block_picker(t, cursor, size, align));
785}
786
787/* ARGSUSED */
788static boolean_t
789metaslab_ff_fragmented(metaslab_t *msp)
790{
791	return (B_TRUE);
792}
793
794static metaslab_ops_t metaslab_ff_ops = {
795	metaslab_ff_alloc,
796	metaslab_ff_fragmented
797};
798
799/*
800 * ==========================================================================
801 * Dynamic block allocator -
802 * Uses the first fit allocation scheme until space get low and then
803 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
804 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
805 * ==========================================================================
806 */
807static uint64_t
808metaslab_df_alloc(metaslab_t *msp, uint64_t size)
809{
810	/*
811	 * Find the largest power of 2 block size that evenly divides the
812	 * requested size. This is used to try to allocate blocks with similar
813	 * alignment from the same area of the metaslab (i.e. same cursor
814	 * bucket) but it does not guarantee that other allocations sizes
815	 * may exist in the same region.
816	 */
817	uint64_t align = size & -size;
818	uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
819	range_tree_t *rt = msp->ms_tree;
820	avl_tree_t *t = &rt->rt_root;
821	uint64_t max_size = metaslab_block_maxsize(msp);
822	int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
823
824	ASSERT(MUTEX_HELD(&msp->ms_lock));
825	ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
826
827	if (max_size < size)
828		return (-1ULL);
829
830	/*
831	 * If we're running low on space switch to using the size
832	 * sorted AVL tree (best-fit).
833	 */
834	if (max_size < metaslab_df_alloc_threshold ||
835	    free_pct < metaslab_df_free_pct) {
836		t = &msp->ms_size_tree;
837		*cursor = 0;
838	}
839
840	return (metaslab_block_picker(t, cursor, size, 1ULL));
841}
842
843static boolean_t
844metaslab_df_fragmented(metaslab_t *msp)
845{
846	range_tree_t *rt = msp->ms_tree;
847	uint64_t max_size = metaslab_block_maxsize(msp);
848	int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
849
850	if (max_size >= metaslab_df_alloc_threshold &&
851	    free_pct >= metaslab_df_free_pct)
852		return (B_FALSE);
853
854	return (B_TRUE);
855}
856
857static metaslab_ops_t metaslab_df_ops = {
858	metaslab_df_alloc,
859	metaslab_df_fragmented
860};
861
862/*
863 * ==========================================================================
864 * Cursor fit block allocator -
865 * Select the largest region in the metaslab, set the cursor to the beginning
866 * of the range and the cursor_end to the end of the range. As allocations
867 * are made advance the cursor. Continue allocating from the cursor until
868 * the range is exhausted and then find a new range.
869 * ==========================================================================
870 */
871static uint64_t
872metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
873{
874	range_tree_t *rt = msp->ms_tree;
875	avl_tree_t *t = &msp->ms_size_tree;
876	uint64_t *cursor = &msp->ms_lbas[0];
877	uint64_t *cursor_end = &msp->ms_lbas[1];
878	uint64_t offset = 0;
879
880	ASSERT(MUTEX_HELD(&msp->ms_lock));
881	ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
882
883	ASSERT3U(*cursor_end, >=, *cursor);
884
885	if ((*cursor + size) > *cursor_end) {
886		range_seg_t *rs;
887
888		rs = avl_last(&msp->ms_size_tree);
889		if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
890			return (-1ULL);
891
892		*cursor = rs->rs_start;
893		*cursor_end = rs->rs_end;
894	}
895
896	offset = *cursor;
897	*cursor += size;
898
899	return (offset);
900}
901
902static boolean_t
903metaslab_cf_fragmented(metaslab_t *msp)
904{
905	return (metaslab_block_maxsize(msp) < metaslab_min_alloc_size);
906}
907
908static metaslab_ops_t metaslab_cf_ops = {
909	metaslab_cf_alloc,
910	metaslab_cf_fragmented
911};
912
913/*
914 * ==========================================================================
915 * New dynamic fit allocator -
916 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
917 * contiguous blocks. If no region is found then just use the largest segment
918 * that remains.
919 * ==========================================================================
920 */
921
922/*
923 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
924 * to request from the allocator.
925 */
926uint64_t metaslab_ndf_clump_shift = 4;
927
928static uint64_t
929metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
930{
931	avl_tree_t *t = &msp->ms_tree->rt_root;
932	avl_index_t where;
933	range_seg_t *rs, rsearch;
934	uint64_t hbit = highbit64(size);
935	uint64_t *cursor = &msp->ms_lbas[hbit - 1];
936	uint64_t max_size = metaslab_block_maxsize(msp);
937
938	ASSERT(MUTEX_HELD(&msp->ms_lock));
939	ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
940
941	if (max_size < size)
942		return (-1ULL);
943
944	rsearch.rs_start = *cursor;
945	rsearch.rs_end = *cursor + size;
946
947	rs = avl_find(t, &rsearch, &where);
948	if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
949		t = &msp->ms_size_tree;
950
951		rsearch.rs_start = 0;
952		rsearch.rs_end = MIN(max_size,
953		    1ULL << (hbit + metaslab_ndf_clump_shift));
954		rs = avl_find(t, &rsearch, &where);
955		if (rs == NULL)
956			rs = avl_nearest(t, where, AVL_AFTER);
957		ASSERT(rs != NULL);
958	}
959
960	if ((rs->rs_end - rs->rs_start) >= size) {
961		*cursor = rs->rs_start + size;
962		return (rs->rs_start);
963	}
964	return (-1ULL);
965}
966
967static boolean_t
968metaslab_ndf_fragmented(metaslab_t *msp)
969{
970	return (metaslab_block_maxsize(msp) <=
971	    (metaslab_min_alloc_size << metaslab_ndf_clump_shift));
972}
973
974static metaslab_ops_t metaslab_ndf_ops = {
975	metaslab_ndf_alloc,
976	metaslab_ndf_fragmented
977};
978
979metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
980
981/*
982 * ==========================================================================
983 * Metaslabs
984 * ==========================================================================
985 */
986
987/*
988 * Wait for any in-progress metaslab loads to complete.
989 */
990void
991metaslab_load_wait(metaslab_t *msp)
992{
993	ASSERT(MUTEX_HELD(&msp->ms_lock));
994
995	while (msp->ms_loading) {
996		ASSERT(!msp->ms_loaded);
997		cv_wait(&msp->ms_load_cv, &msp->ms_lock);
998	}
999}
1000
1001int
1002metaslab_load(metaslab_t *msp)
1003{
1004	int error = 0;
1005
1006	ASSERT(MUTEX_HELD(&msp->ms_lock));
1007	ASSERT(!msp->ms_loaded);
1008	ASSERT(!msp->ms_loading);
1009
1010	msp->ms_loading = B_TRUE;
1011
1012	/*
1013	 * If the space map has not been allocated yet, then treat
1014	 * all the space in the metaslab as free and add it to the
1015	 * ms_tree.
1016	 */
1017	if (msp->ms_sm != NULL)
1018		error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE);
1019	else
1020		range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size);
1021
1022	msp->ms_loaded = (error == 0);
1023	msp->ms_loading = B_FALSE;
1024
1025	if (msp->ms_loaded) {
1026		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1027			range_tree_walk(msp->ms_defertree[t],
1028			    range_tree_remove, msp->ms_tree);
1029		}
1030	}
1031	cv_broadcast(&msp->ms_load_cv);
1032	return (error);
1033}
1034
1035void
1036metaslab_unload(metaslab_t *msp)
1037{
1038	ASSERT(MUTEX_HELD(&msp->ms_lock));
1039	range_tree_vacate(msp->ms_tree, NULL, NULL);
1040	msp->ms_loaded = B_FALSE;
1041	msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1042}
1043
1044metaslab_t *
1045metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg)
1046{
1047	vdev_t *vd = mg->mg_vd;
1048	objset_t *mos = vd->vdev_spa->spa_meta_objset;
1049	metaslab_t *msp;
1050
1051	msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
1052	mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL);
1053	cv_init(&msp->ms_load_cv, NULL, CV_DEFAULT, NULL);
1054	msp->ms_id = id;
1055	msp->ms_start = id << vd->vdev_ms_shift;
1056	msp->ms_size = 1ULL << vd->vdev_ms_shift;
1057
1058	/*
1059	 * We only open space map objects that already exist. All others
1060	 * will be opened when we finally allocate an object for it.
1061	 */
1062	if (object != 0) {
1063		VERIFY0(space_map_open(&msp->ms_sm, mos, object, msp->ms_start,
1064		    msp->ms_size, vd->vdev_ashift, &msp->ms_lock));
1065		ASSERT(msp->ms_sm != NULL);
1066	}
1067
1068	/*
1069	 * We create the main range tree here, but we don't create the
1070	 * alloctree and freetree until metaslab_sync_done().  This serves
1071	 * two purposes: it allows metaslab_sync_done() to detect the
1072	 * addition of new space; and for debugging, it ensures that we'd
1073	 * data fault on any attempt to use this metaslab before it's ready.
1074	 */
1075	msp->ms_tree = range_tree_create(&metaslab_rt_ops, msp, &msp->ms_lock);
1076	metaslab_group_add(mg, msp);
1077
1078	msp->ms_ops = mg->mg_class->mc_ops;
1079
1080	/*
1081	 * If we're opening an existing pool (txg == 0) or creating
1082	 * a new one (txg == TXG_INITIAL), all space is available now.
1083	 * If we're adding space to an existing pool, the new space
1084	 * does not become available until after this txg has synced.
1085	 */
1086	if (txg <= TXG_INITIAL)
1087		metaslab_sync_done(msp, 0);
1088
1089	/*
1090	 * If metaslab_debug_load is set and we're initializing a metaslab
1091	 * that has an allocated space_map object then load the its space
1092	 * map so that can verify frees.
1093	 */
1094	if (metaslab_debug_load && msp->ms_sm != NULL) {
1095		mutex_enter(&msp->ms_lock);
1096		VERIFY0(metaslab_load(msp));
1097		mutex_exit(&msp->ms_lock);
1098	}
1099
1100	if (txg != 0) {
1101		vdev_dirty(vd, 0, NULL, txg);
1102		vdev_dirty(vd, VDD_METASLAB, msp, txg);
1103	}
1104
1105	return (msp);
1106}
1107
1108void
1109metaslab_fini(metaslab_t *msp)
1110{
1111	metaslab_group_t *mg = msp->ms_group;
1112
1113	metaslab_group_remove(mg, msp);
1114
1115	mutex_enter(&msp->ms_lock);
1116
1117	VERIFY(msp->ms_group == NULL);
1118	vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1119	    0, -msp->ms_size);
1120	space_map_close(msp->ms_sm);
1121
1122	metaslab_unload(msp);
1123	range_tree_destroy(msp->ms_tree);
1124
1125	for (int t = 0; t < TXG_SIZE; t++) {
1126		range_tree_destroy(msp->ms_alloctree[t]);
1127		range_tree_destroy(msp->ms_freetree[t]);
1128	}
1129
1130	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1131		range_tree_destroy(msp->ms_defertree[t]);
1132	}
1133
1134	ASSERT0(msp->ms_deferspace);
1135
1136	mutex_exit(&msp->ms_lock);
1137	cv_destroy(&msp->ms_load_cv);
1138	mutex_destroy(&msp->ms_lock);
1139
1140	kmem_free(msp, sizeof (metaslab_t));
1141}
1142
1143/*
1144 * Apply a weighting factor based on the histogram information for this
1145 * metaslab. The current weighting factor is somewhat arbitrary and requires
1146 * additional investigation. The implementation provides a measure of
1147 * "weighted" free space and gives a higher weighting for larger contiguous
1148 * regions. The weighting factor is determined by counting the number of
1149 * sm_shift sectors that exist in each region represented by the histogram.
1150 * That value is then multiplied by the power of 2 exponent and the sm_shift
1151 * value.
1152 *
1153 * For example, assume the 2^21 histogram bucket has 4 2MB regions and the
1154 * metaslab has an sm_shift value of 9 (512B):
1155 *
1156 * 1) calculate the number of sm_shift sectors in the region:
1157 *	2^21 / 2^9 = 2^12 = 4096 * 4 (number of regions) = 16384
1158 * 2) multiply by the power of 2 exponent and the sm_shift value:
1159 *	16384 * 21 * 9 = 3096576
1160 * This value will be added to the weighting of the metaslab.
1161 */
1162static uint64_t
1163metaslab_weight_factor(metaslab_t *msp)
1164{
1165	uint64_t factor = 0;
1166	uint64_t sectors;
1167	int i;
1168
1169	/*
1170	 * A null space map means that the entire metaslab is free,
1171	 * calculate a weight factor that spans the entire size of the
1172	 * metaslab.
1173	 */
1174	if (msp->ms_sm == NULL) {
1175		vdev_t *vd = msp->ms_group->mg_vd;
1176
1177		i = highbit64(msp->ms_size) - 1;
1178		sectors = msp->ms_size >> vd->vdev_ashift;
1179		return (sectors * i * vd->vdev_ashift);
1180	}
1181
1182	if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
1183		return (0);
1184
1185	for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE(msp->ms_sm); i++) {
1186		if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1187			continue;
1188
1189		/*
1190		 * Determine the number of sm_shift sectors in the region
1191		 * indicated by the histogram. For example, given an
1192		 * sm_shift value of 9 (512 bytes) and i = 4 then we know
1193		 * that we're looking at an 8K region in the histogram
1194		 * (i.e. 9 + 4 = 13, 2^13 = 8192). To figure out the
1195		 * number of sm_shift sectors (512 bytes in this example),
1196		 * we would take 8192 / 512 = 16. Since the histogram
1197		 * is offset by sm_shift we can simply use the value of
1198		 * of i to calculate this (i.e. 2^i = 16 where i = 4).
1199		 */
1200		sectors = msp->ms_sm->sm_phys->smp_histogram[i] << i;
1201		factor += (i + msp->ms_sm->sm_shift) * sectors;
1202	}
1203	return (factor * msp->ms_sm->sm_shift);
1204}
1205
1206static uint64_t
1207metaslab_weight(metaslab_t *msp)
1208{
1209	metaslab_group_t *mg = msp->ms_group;
1210	vdev_t *vd = mg->mg_vd;
1211	uint64_t weight, space;
1212
1213	ASSERT(MUTEX_HELD(&msp->ms_lock));
1214
1215	/*
1216	 * This vdev is in the process of being removed so there is nothing
1217	 * for us to do here.
1218	 */
1219	if (vd->vdev_removing) {
1220		ASSERT0(space_map_allocated(msp->ms_sm));
1221		ASSERT0(vd->vdev_ms_shift);
1222		return (0);
1223	}
1224
1225	/*
1226	 * The baseline weight is the metaslab's free space.
1227	 */
1228	space = msp->ms_size - space_map_allocated(msp->ms_sm);
1229	weight = space;
1230
1231	/*
1232	 * Modern disks have uniform bit density and constant angular velocity.
1233	 * Therefore, the outer recording zones are faster (higher bandwidth)
1234	 * than the inner zones by the ratio of outer to inner track diameter,
1235	 * which is typically around 2:1.  We account for this by assigning
1236	 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1237	 * In effect, this means that we'll select the metaslab with the most
1238	 * free bandwidth rather than simply the one with the most free space.
1239	 */
1240	weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1241	ASSERT(weight >= space && weight <= 2 * space);
1242
1243	msp->ms_factor = metaslab_weight_factor(msp);
1244	if (metaslab_weight_factor_enable)
1245		weight += msp->ms_factor;
1246
1247	if (msp->ms_loaded && !msp->ms_ops->msop_fragmented(msp)) {
1248		/*
1249		 * If this metaslab is one we're actively using, adjust its
1250		 * weight to make it preferable to any inactive metaslab so
1251		 * we'll polish it off.
1252		 */
1253		weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1254	}
1255
1256	return (weight);
1257}
1258
1259static int
1260metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
1261{
1262	ASSERT(MUTEX_HELD(&msp->ms_lock));
1263
1264	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1265		metaslab_load_wait(msp);
1266		if (!msp->ms_loaded) {
1267			int error = metaslab_load(msp);
1268			if (error) {
1269				metaslab_group_sort(msp->ms_group, msp, 0);
1270				return (error);
1271			}
1272		}
1273
1274		metaslab_group_sort(msp->ms_group, msp,
1275		    msp->ms_weight | activation_weight);
1276	}
1277	ASSERT(msp->ms_loaded);
1278	ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
1279
1280	return (0);
1281}
1282
1283static void
1284metaslab_passivate(metaslab_t *msp, uint64_t size)
1285{
1286	/*
1287	 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1288	 * this metaslab again.  In that case, it had better be empty,
1289	 * or we would be leaving space on the table.
1290	 */
1291	ASSERT(size >= SPA_MINBLOCKSIZE || range_tree_space(msp->ms_tree) == 0);
1292	metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
1293	ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
1294}
1295
1296static void
1297metaslab_preload(void *arg)
1298{
1299	metaslab_t *msp = arg;
1300	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1301
1302	ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
1303
1304	mutex_enter(&msp->ms_lock);
1305	metaslab_load_wait(msp);
1306	if (!msp->ms_loaded)
1307		(void) metaslab_load(msp);
1308
1309	/*
1310	 * Set the ms_access_txg value so that we don't unload it right away.
1311	 */
1312	msp->ms_access_txg = spa_syncing_txg(spa) + metaslab_unload_delay + 1;
1313	mutex_exit(&msp->ms_lock);
1314}
1315
1316static void
1317metaslab_group_preload(metaslab_group_t *mg)
1318{
1319	spa_t *spa = mg->mg_vd->vdev_spa;
1320	metaslab_t *msp;
1321	avl_tree_t *t = &mg->mg_metaslab_tree;
1322	int m = 0;
1323
1324	if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
1325		taskq_wait(mg->mg_taskq);
1326		return;
1327	}
1328
1329	mutex_enter(&mg->mg_lock);
1330	/*
1331	 * Load the next potential metaslabs
1332	 */
1333	msp = avl_first(t);
1334	while (msp != NULL) {
1335		metaslab_t *msp_next = AVL_NEXT(t, msp);
1336
1337		/* If we have reached our preload limit then we're done */
1338		if (++m > metaslab_preload_limit)
1339			break;
1340
1341		/*
1342		 * We must drop the metaslab group lock here to preserve
1343		 * lock ordering with the ms_lock (when grabbing both
1344		 * the mg_lock and the ms_lock, the ms_lock must be taken
1345		 * first).  As a result, it is possible that the ordering
1346		 * of the metaslabs within the avl tree may change before
1347		 * we reacquire the lock. The metaslab cannot be removed from
1348		 * the tree while we're in syncing context so it is safe to
1349		 * drop the mg_lock here. If the metaslabs are reordered
1350		 * nothing will break -- we just may end up loading a
1351		 * less than optimal one.
1352		 */
1353		mutex_exit(&mg->mg_lock);
1354		VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
1355		    msp, TQ_SLEEP) != 0);
1356		mutex_enter(&mg->mg_lock);
1357		msp = msp_next;
1358	}
1359	mutex_exit(&mg->mg_lock);
1360}
1361
1362/*
1363 * Determine if the space map's on-disk footprint is past our tolerance
1364 * for inefficiency. We would like to use the following criteria to make
1365 * our decision:
1366 *
1367 * 1. The size of the space map object should not dramatically increase as a
1368 * result of writing out the free space range tree.
1369 *
1370 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
1371 * times the size than the free space range tree representation
1372 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
1373 *
1374 * Checking the first condition is tricky since we don't want to walk
1375 * the entire AVL tree calculating the estimated on-disk size. Instead we
1376 * use the size-ordered range tree in the metaslab and calculate the
1377 * size required to write out the largest segment in our free tree. If the
1378 * size required to represent that segment on disk is larger than the space
1379 * map object then we avoid condensing this map.
1380 *
1381 * To determine the second criterion we use a best-case estimate and assume
1382 * each segment can be represented on-disk as a single 64-bit entry. We refer
1383 * to this best-case estimate as the space map's minimal form.
1384 */
1385static boolean_t
1386metaslab_should_condense(metaslab_t *msp)
1387{
1388	space_map_t *sm = msp->ms_sm;
1389	range_seg_t *rs;
1390	uint64_t size, entries, segsz;
1391
1392	ASSERT(MUTEX_HELD(&msp->ms_lock));
1393	ASSERT(msp->ms_loaded);
1394
1395	/*
1396	 * Use the ms_size_tree range tree, which is ordered by size, to
1397	 * obtain the largest segment in the free tree. If the tree is empty
1398	 * then we should condense the map.
1399	 */
1400	rs = avl_last(&msp->ms_size_tree);
1401	if (rs == NULL)
1402		return (B_TRUE);
1403
1404	/*
1405	 * Calculate the number of 64-bit entries this segment would
1406	 * require when written to disk. If this single segment would be
1407	 * larger on-disk than the entire current on-disk structure, then
1408	 * clearly condensing will increase the on-disk structure size.
1409	 */
1410	size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
1411	entries = size / (MIN(size, SM_RUN_MAX));
1412	segsz = entries * sizeof (uint64_t);
1413
1414	return (segsz <= space_map_length(msp->ms_sm) &&
1415	    space_map_length(msp->ms_sm) >= (zfs_condense_pct *
1416	    sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root)) / 100);
1417}
1418
1419/*
1420 * Condense the on-disk space map representation to its minimized form.
1421 * The minimized form consists of a small number of allocations followed by
1422 * the entries of the free range tree.
1423 */
1424static void
1425metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
1426{
1427	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1428	range_tree_t *freetree = msp->ms_freetree[txg & TXG_MASK];
1429	range_tree_t *condense_tree;
1430	space_map_t *sm = msp->ms_sm;
1431
1432	ASSERT(MUTEX_HELD(&msp->ms_lock));
1433	ASSERT3U(spa_sync_pass(spa), ==, 1);
1434	ASSERT(msp->ms_loaded);
1435
1436	spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, "
1437	    "smp size %llu, segments %lu", txg, msp->ms_id, msp,
1438	    space_map_length(msp->ms_sm), avl_numnodes(&msp->ms_tree->rt_root));
1439
1440	/*
1441	 * Create an range tree that is 100% allocated. We remove segments
1442	 * that have been freed in this txg, any deferred frees that exist,
1443	 * and any allocation in the future. Removing segments should be
1444	 * a relatively inexpensive operation since we expect these trees to
1445	 * have a small number of nodes.
1446	 */
1447	condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock);
1448	range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
1449
1450	/*
1451	 * Remove what's been freed in this txg from the condense_tree.
1452	 * Since we're in sync_pass 1, we know that all the frees from
1453	 * this txg are in the freetree.
1454	 */
1455	range_tree_walk(freetree, range_tree_remove, condense_tree);
1456
1457	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1458		range_tree_walk(msp->ms_defertree[t],
1459		    range_tree_remove, condense_tree);
1460	}
1461
1462	for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
1463		range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK],
1464		    range_tree_remove, condense_tree);
1465	}
1466
1467	/*
1468	 * We're about to drop the metaslab's lock thus allowing
1469	 * other consumers to change it's content. Set the
1470	 * metaslab's ms_condensing flag to ensure that
1471	 * allocations on this metaslab do not occur while we're
1472	 * in the middle of committing it to disk. This is only critical
1473	 * for the ms_tree as all other range trees use per txg
1474	 * views of their content.
1475	 */
1476	msp->ms_condensing = B_TRUE;
1477
1478	mutex_exit(&msp->ms_lock);
1479	space_map_truncate(sm, tx);
1480	mutex_enter(&msp->ms_lock);
1481
1482	/*
1483	 * While we would ideally like to create a space_map representation
1484	 * that consists only of allocation records, doing so can be
1485	 * prohibitively expensive because the in-core free tree can be
1486	 * large, and therefore computationally expensive to subtract
1487	 * from the condense_tree. Instead we sync out two trees, a cheap
1488	 * allocation only tree followed by the in-core free tree. While not
1489	 * optimal, this is typically close to optimal, and much cheaper to
1490	 * compute.
1491	 */
1492	space_map_write(sm, condense_tree, SM_ALLOC, tx);
1493	range_tree_vacate(condense_tree, NULL, NULL);
1494	range_tree_destroy(condense_tree);
1495
1496	space_map_write(sm, msp->ms_tree, SM_FREE, tx);
1497	msp->ms_condensing = B_FALSE;
1498}
1499
1500/*
1501 * Write a metaslab to disk in the context of the specified transaction group.
1502 */
1503void
1504metaslab_sync(metaslab_t *msp, uint64_t txg)
1505{
1506	metaslab_group_t *mg = msp->ms_group;
1507	vdev_t *vd = mg->mg_vd;
1508	spa_t *spa = vd->vdev_spa;
1509	objset_t *mos = spa_meta_objset(spa);
1510	range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK];
1511	range_tree_t **freetree = &msp->ms_freetree[txg & TXG_MASK];
1512	range_tree_t **freed_tree =
1513	    &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
1514	dmu_tx_t *tx;
1515	uint64_t object = space_map_object(msp->ms_sm);
1516
1517	ASSERT(!vd->vdev_ishole);
1518
1519	/*
1520	 * This metaslab has just been added so there's no work to do now.
1521	 */
1522	if (*freetree == NULL) {
1523		ASSERT3P(alloctree, ==, NULL);
1524		return;
1525	}
1526
1527	ASSERT3P(alloctree, !=, NULL);
1528	ASSERT3P(*freetree, !=, NULL);
1529	ASSERT3P(*freed_tree, !=, NULL);
1530
1531	if (range_tree_space(alloctree) == 0 &&
1532	    range_tree_space(*freetree) == 0)
1533		return;
1534
1535	/*
1536	 * The only state that can actually be changing concurrently with
1537	 * metaslab_sync() is the metaslab's ms_tree.  No other thread can
1538	 * be modifying this txg's alloctree, freetree, freed_tree, or
1539	 * space_map_phys_t. Therefore, we only hold ms_lock to satify
1540	 * space_map ASSERTs. We drop it whenever we call into the DMU,
1541	 * because the DMU can call down to us (e.g. via zio_free()) at
1542	 * any time.
1543	 */
1544
1545	tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
1546
1547	if (msp->ms_sm == NULL) {
1548		uint64_t new_object;
1549
1550		new_object = space_map_alloc(mos, tx);
1551		VERIFY3U(new_object, !=, 0);
1552
1553		VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
1554		    msp->ms_start, msp->ms_size, vd->vdev_ashift,
1555		    &msp->ms_lock));
1556		ASSERT(msp->ms_sm != NULL);
1557	}
1558
1559	mutex_enter(&msp->ms_lock);
1560
1561	if (msp->ms_loaded && spa_sync_pass(spa) == 1 &&
1562	    metaslab_should_condense(msp)) {
1563		metaslab_condense(msp, txg, tx);
1564	} else {
1565		space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
1566		space_map_write(msp->ms_sm, *freetree, SM_FREE, tx);
1567	}
1568
1569	range_tree_vacate(alloctree, NULL, NULL);
1570
1571	if (msp->ms_loaded) {
1572		/*
1573		 * When the space map is loaded, we have an accruate
1574		 * histogram in the range tree. This gives us an opportunity
1575		 * to bring the space map's histogram up-to-date so we clear
1576		 * it first before updating it.
1577		 */
1578		space_map_histogram_clear(msp->ms_sm);
1579		space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx);
1580	} else {
1581		/*
1582		 * Since the space map is not loaded we simply update the
1583		 * exisiting histogram with what was freed in this txg. This
1584		 * means that the on-disk histogram may not have an accurate
1585		 * view of the free space but it's close enough to allow
1586		 * us to make allocation decisions.
1587		 */
1588		space_map_histogram_add(msp->ms_sm, *freetree, tx);
1589	}
1590
1591	/*
1592	 * For sync pass 1, we avoid traversing this txg's free range tree
1593	 * and instead will just swap the pointers for freetree and
1594	 * freed_tree. We can safely do this since the freed_tree is
1595	 * guaranteed to be empty on the initial pass.
1596	 */
1597	if (spa_sync_pass(spa) == 1) {
1598		range_tree_swap(freetree, freed_tree);
1599	} else {
1600		range_tree_vacate(*freetree, range_tree_add, *freed_tree);
1601	}
1602
1603	ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
1604	ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
1605
1606	mutex_exit(&msp->ms_lock);
1607
1608	if (object != space_map_object(msp->ms_sm)) {
1609		object = space_map_object(msp->ms_sm);
1610		dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
1611		    msp->ms_id, sizeof (uint64_t), &object, tx);
1612	}
1613	dmu_tx_commit(tx);
1614}
1615
1616/*
1617 * Called after a transaction group has completely synced to mark
1618 * all of the metaslab's free space as usable.
1619 */
1620void
1621metaslab_sync_done(metaslab_t *msp, uint64_t txg)
1622{
1623	metaslab_group_t *mg = msp->ms_group;
1624	vdev_t *vd = mg->mg_vd;
1625	range_tree_t **freed_tree;
1626	range_tree_t **defer_tree;
1627	int64_t alloc_delta, defer_delta;
1628
1629	ASSERT(!vd->vdev_ishole);
1630
1631	mutex_enter(&msp->ms_lock);
1632
1633	/*
1634	 * If this metaslab is just becoming available, initialize its
1635	 * alloctrees, freetrees, and defertree and add its capacity to
1636	 * the vdev.
1637	 */
1638	if (msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK] == NULL) {
1639		for (int t = 0; t < TXG_SIZE; t++) {
1640			ASSERT(msp->ms_alloctree[t] == NULL);
1641			ASSERT(msp->ms_freetree[t] == NULL);
1642
1643			msp->ms_alloctree[t] = range_tree_create(NULL, msp,
1644			    &msp->ms_lock);
1645			msp->ms_freetree[t] = range_tree_create(NULL, msp,
1646			    &msp->ms_lock);
1647		}
1648
1649		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1650			ASSERT(msp->ms_defertree[t] == NULL);
1651
1652			msp->ms_defertree[t] = range_tree_create(NULL, msp,
1653			    &msp->ms_lock);
1654		}
1655
1656		vdev_space_update(vd, 0, 0, msp->ms_size);
1657	}
1658
1659	freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
1660	defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
1661
1662	alloc_delta = space_map_alloc_delta(msp->ms_sm);
1663	defer_delta = range_tree_space(*freed_tree) -
1664	    range_tree_space(*defer_tree);
1665
1666	vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
1667
1668	ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
1669	ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
1670
1671	/*
1672	 * If there's a metaslab_load() in progress, wait for it to complete
1673	 * so that we have a consistent view of the in-core space map.
1674	 */
1675	metaslab_load_wait(msp);
1676
1677	/*
1678	 * Move the frees from the defer_tree back to the free
1679	 * range tree (if it's loaded). Swap the freed_tree and the
1680	 * defer_tree -- this is safe to do because we've just emptied out
1681	 * the defer_tree.
1682	 */
1683	range_tree_vacate(*defer_tree,
1684	    msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
1685	range_tree_swap(freed_tree, defer_tree);
1686
1687	space_map_update(msp->ms_sm);
1688
1689	msp->ms_deferspace += defer_delta;
1690	ASSERT3S(msp->ms_deferspace, >=, 0);
1691	ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
1692	if (msp->ms_deferspace != 0) {
1693		/*
1694		 * Keep syncing this metaslab until all deferred frees
1695		 * are back in circulation.
1696		 */
1697		vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1698	}
1699
1700	if (msp->ms_loaded && msp->ms_access_txg < txg) {
1701		for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
1702			VERIFY0(range_tree_space(
1703			    msp->ms_alloctree[(txg + t) & TXG_MASK]));
1704		}
1705
1706		if (!metaslab_debug_unload)
1707			metaslab_unload(msp);
1708	}
1709
1710	metaslab_group_sort(mg, msp, metaslab_weight(msp));
1711	mutex_exit(&msp->ms_lock);
1712
1713}
1714
1715void
1716metaslab_sync_reassess(metaslab_group_t *mg)
1717{
1718	metaslab_group_alloc_update(mg);
1719
1720	/*
1721	 * Preload the next potential metaslabs
1722	 */
1723	metaslab_group_preload(mg);
1724}
1725
1726static uint64_t
1727metaslab_distance(metaslab_t *msp, dva_t *dva)
1728{
1729	uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
1730	uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
1731	uint64_t start = msp->ms_id;
1732
1733	if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
1734		return (1ULL << 63);
1735
1736	if (offset < start)
1737		return ((start - offset) << ms_shift);
1738	if (offset > start)
1739		return ((offset - start) << ms_shift);
1740	return (0);
1741}
1742
1743static uint64_t
1744metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize,
1745    uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
1746{
1747	spa_t *spa = mg->mg_vd->vdev_spa;
1748	metaslab_t *msp = NULL;
1749	uint64_t offset = -1ULL;
1750	avl_tree_t *t = &mg->mg_metaslab_tree;
1751	uint64_t activation_weight;
1752	uint64_t target_distance;
1753	int i;
1754
1755	activation_weight = METASLAB_WEIGHT_PRIMARY;
1756	for (i = 0; i < d; i++) {
1757		if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
1758			activation_weight = METASLAB_WEIGHT_SECONDARY;
1759			break;
1760		}
1761	}
1762
1763	for (;;) {
1764		boolean_t was_active;
1765
1766		mutex_enter(&mg->mg_lock);
1767		for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
1768			if (msp->ms_weight < asize) {
1769				spa_dbgmsg(spa, "%s: failed to meet weight "
1770				    "requirement: vdev %llu, txg %llu, mg %p, "
1771				    "msp %p, psize %llu, asize %llu, "
1772				    "weight %llu", spa_name(spa),
1773				    mg->mg_vd->vdev_id, txg,
1774				    mg, msp, psize, asize, msp->ms_weight);
1775				mutex_exit(&mg->mg_lock);
1776				return (-1ULL);
1777			}
1778
1779			/*
1780			 * If the selected metaslab is condensing, skip it.
1781			 */
1782			if (msp->ms_condensing)
1783				continue;
1784
1785			was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
1786			if (activation_weight == METASLAB_WEIGHT_PRIMARY)
1787				break;
1788
1789			target_distance = min_distance +
1790			    (space_map_allocated(msp->ms_sm) != 0 ? 0 :
1791			    min_distance >> 1);
1792
1793			for (i = 0; i < d; i++)
1794				if (metaslab_distance(msp, &dva[i]) <
1795				    target_distance)
1796					break;
1797			if (i == d)
1798				break;
1799		}
1800		mutex_exit(&mg->mg_lock);
1801		if (msp == NULL)
1802			return (-1ULL);
1803
1804		mutex_enter(&msp->ms_lock);
1805
1806		/*
1807		 * Ensure that the metaslab we have selected is still
1808		 * capable of handling our request. It's possible that
1809		 * another thread may have changed the weight while we
1810		 * were blocked on the metaslab lock.
1811		 */
1812		if (msp->ms_weight < asize || (was_active &&
1813		    !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
1814		    activation_weight == METASLAB_WEIGHT_PRIMARY)) {
1815			mutex_exit(&msp->ms_lock);
1816			continue;
1817		}
1818
1819		if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
1820		    activation_weight == METASLAB_WEIGHT_PRIMARY) {
1821			metaslab_passivate(msp,
1822			    msp->ms_weight & ~METASLAB_ACTIVE_MASK);
1823			mutex_exit(&msp->ms_lock);
1824			continue;
1825		}
1826
1827		if (metaslab_activate(msp, activation_weight) != 0) {
1828			mutex_exit(&msp->ms_lock);
1829			continue;
1830		}
1831
1832		/*
1833		 * If this metaslab is currently condensing then pick again as
1834		 * we can't manipulate this metaslab until it's committed
1835		 * to disk.
1836		 */
1837		if (msp->ms_condensing) {
1838			mutex_exit(&msp->ms_lock);
1839			continue;
1840		}
1841
1842		if ((offset = metaslab_block_alloc(msp, asize)) != -1ULL)
1843			break;
1844
1845		metaslab_passivate(msp, metaslab_block_maxsize(msp));
1846		mutex_exit(&msp->ms_lock);
1847	}
1848
1849	if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
1850		vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
1851
1852	range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, asize);
1853	msp->ms_access_txg = txg + metaslab_unload_delay;
1854
1855	mutex_exit(&msp->ms_lock);
1856
1857	return (offset);
1858}
1859
1860/*
1861 * Allocate a block for the specified i/o.
1862 */
1863static int
1864metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
1865    dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
1866{
1867	metaslab_group_t *mg, *rotor;
1868	vdev_t *vd;
1869	int dshift = 3;
1870	int all_zero;
1871	int zio_lock = B_FALSE;
1872	boolean_t allocatable;
1873	uint64_t offset = -1ULL;
1874	uint64_t asize;
1875	uint64_t distance;
1876
1877	ASSERT(!DVA_IS_VALID(&dva[d]));
1878
1879	/*
1880	 * For testing, make some blocks above a certain size be gang blocks.
1881	 */
1882	if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
1883		return (SET_ERROR(ENOSPC));
1884
1885	/*
1886	 * Start at the rotor and loop through all mgs until we find something.
1887	 * Note that there's no locking on mc_rotor or mc_aliquot because
1888	 * nothing actually breaks if we miss a few updates -- we just won't
1889	 * allocate quite as evenly.  It all balances out over time.
1890	 *
1891	 * If we are doing ditto or log blocks, try to spread them across
1892	 * consecutive vdevs.  If we're forced to reuse a vdev before we've
1893	 * allocated all of our ditto blocks, then try and spread them out on
1894	 * that vdev as much as possible.  If it turns out to not be possible,
1895	 * gradually lower our standards until anything becomes acceptable.
1896	 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
1897	 * gives us hope of containing our fault domains to something we're
1898	 * able to reason about.  Otherwise, any two top-level vdev failures
1899	 * will guarantee the loss of data.  With consecutive allocation,
1900	 * only two adjacent top-level vdev failures will result in data loss.
1901	 *
1902	 * If we are doing gang blocks (hintdva is non-NULL), try to keep
1903	 * ourselves on the same vdev as our gang block header.  That
1904	 * way, we can hope for locality in vdev_cache, plus it makes our
1905	 * fault domains something tractable.
1906	 */
1907	if (hintdva) {
1908		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
1909
1910		/*
1911		 * It's possible the vdev we're using as the hint no
1912		 * longer exists (i.e. removed). Consult the rotor when
1913		 * all else fails.
1914		 */
1915		if (vd != NULL) {
1916			mg = vd->vdev_mg;
1917
1918			if (flags & METASLAB_HINTBP_AVOID &&
1919			    mg->mg_next != NULL)
1920				mg = mg->mg_next;
1921		} else {
1922			mg = mc->mc_rotor;
1923		}
1924	} else if (d != 0) {
1925		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
1926		mg = vd->vdev_mg->mg_next;
1927	} else {
1928		mg = mc->mc_rotor;
1929	}
1930
1931	/*
1932	 * If the hint put us into the wrong metaslab class, or into a
1933	 * metaslab group that has been passivated, just follow the rotor.
1934	 */
1935	if (mg->mg_class != mc || mg->mg_activation_count <= 0)
1936		mg = mc->mc_rotor;
1937
1938	rotor = mg;
1939top:
1940	all_zero = B_TRUE;
1941	do {
1942		ASSERT(mg->mg_activation_count == 1);
1943
1944		vd = mg->mg_vd;
1945
1946		/*
1947		 * Don't allocate from faulted devices.
1948		 */
1949		if (zio_lock) {
1950			spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
1951			allocatable = vdev_allocatable(vd);
1952			spa_config_exit(spa, SCL_ZIO, FTAG);
1953		} else {
1954			allocatable = vdev_allocatable(vd);
1955		}
1956
1957		/*
1958		 * Determine if the selected metaslab group is eligible
1959		 * for allocations. If we're ganging or have requested
1960		 * an allocation for the smallest gang block size
1961		 * then we don't want to avoid allocating to the this
1962		 * metaslab group. If we're in this condition we should
1963		 * try to allocate from any device possible so that we
1964		 * don't inadvertently return ENOSPC and suspend the pool
1965		 * even though space is still available.
1966		 */
1967		if (allocatable && CAN_FASTGANG(flags) &&
1968		    psize > SPA_GANGBLOCKSIZE)
1969			allocatable = metaslab_group_allocatable(mg);
1970
1971		if (!allocatable)
1972			goto next;
1973
1974		/*
1975		 * Avoid writing single-copy data to a failing vdev
1976		 * unless the user instructs us that it is okay.
1977		 */
1978		if ((vd->vdev_stat.vs_write_errors > 0 ||
1979		    vd->vdev_state < VDEV_STATE_HEALTHY) &&
1980		    d == 0 && dshift == 3 &&
1981		    !(zfs_write_to_degraded && vd->vdev_state ==
1982		    VDEV_STATE_DEGRADED)) {
1983			all_zero = B_FALSE;
1984			goto next;
1985		}
1986
1987		ASSERT(mg->mg_class == mc);
1988
1989		distance = vd->vdev_asize >> dshift;
1990		if (distance <= (1ULL << vd->vdev_ms_shift))
1991			distance = 0;
1992		else
1993			all_zero = B_FALSE;
1994
1995		asize = vdev_psize_to_asize(vd, psize);
1996		ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
1997
1998		offset = metaslab_group_alloc(mg, psize, asize, txg, distance,
1999		    dva, d);
2000		if (offset != -1ULL) {
2001			/*
2002			 * If we've just selected this metaslab group,
2003			 * figure out whether the corresponding vdev is
2004			 * over- or under-used relative to the pool,
2005			 * and set an allocation bias to even it out.
2006			 */
2007			if (mc->mc_aliquot == 0) {
2008				vdev_stat_t *vs = &vd->vdev_stat;
2009				int64_t vu, cu;
2010
2011				vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
2012				cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
2013
2014				/*
2015				 * Calculate how much more or less we should
2016				 * try to allocate from this device during
2017				 * this iteration around the rotor.
2018				 * For example, if a device is 80% full
2019				 * and the pool is 20% full then we should
2020				 * reduce allocations by 60% on this device.
2021				 *
2022				 * mg_bias = (20 - 80) * 512K / 100 = -307K
2023				 *
2024				 * This reduces allocations by 307K for this
2025				 * iteration.
2026				 */
2027				mg->mg_bias = ((cu - vu) *
2028				    (int64_t)mg->mg_aliquot) / 100;
2029			}
2030
2031			if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
2032			    mg->mg_aliquot + mg->mg_bias) {
2033				mc->mc_rotor = mg->mg_next;
2034				mc->mc_aliquot = 0;
2035			}
2036
2037			DVA_SET_VDEV(&dva[d], vd->vdev_id);
2038			DVA_SET_OFFSET(&dva[d], offset);
2039			DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
2040			DVA_SET_ASIZE(&dva[d], asize);
2041
2042			return (0);
2043		}
2044next:
2045		mc->mc_rotor = mg->mg_next;
2046		mc->mc_aliquot = 0;
2047	} while ((mg = mg->mg_next) != rotor);
2048
2049	if (!all_zero) {
2050		dshift++;
2051		ASSERT(dshift < 64);
2052		goto top;
2053	}
2054
2055	if (!allocatable && !zio_lock) {
2056		dshift = 3;
2057		zio_lock = B_TRUE;
2058		goto top;
2059	}
2060
2061	bzero(&dva[d], sizeof (dva_t));
2062
2063	return (SET_ERROR(ENOSPC));
2064}
2065
2066/*
2067 * Free the block represented by DVA in the context of the specified
2068 * transaction group.
2069 */
2070static void
2071metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
2072{
2073	uint64_t vdev = DVA_GET_VDEV(dva);
2074	uint64_t offset = DVA_GET_OFFSET(dva);
2075	uint64_t size = DVA_GET_ASIZE(dva);
2076	vdev_t *vd;
2077	metaslab_t *msp;
2078
2079	ASSERT(DVA_IS_VALID(dva));
2080
2081	if (txg > spa_freeze_txg(spa))
2082		return;
2083
2084	if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
2085	    (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
2086		cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
2087		    (u_longlong_t)vdev, (u_longlong_t)offset);
2088		ASSERT(0);
2089		return;
2090	}
2091
2092	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2093
2094	if (DVA_GET_GANG(dva))
2095		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2096
2097	mutex_enter(&msp->ms_lock);
2098
2099	if (now) {
2100		range_tree_remove(msp->ms_alloctree[txg & TXG_MASK],
2101		    offset, size);
2102
2103		VERIFY(!msp->ms_condensing);
2104		VERIFY3U(offset, >=, msp->ms_start);
2105		VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
2106		VERIFY3U(range_tree_space(msp->ms_tree) + size, <=,
2107		    msp->ms_size);
2108		VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2109		VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2110		range_tree_add(msp->ms_tree, offset, size);
2111	} else {
2112		if (range_tree_space(msp->ms_freetree[txg & TXG_MASK]) == 0)
2113			vdev_dirty(vd, VDD_METASLAB, msp, txg);
2114		range_tree_add(msp->ms_freetree[txg & TXG_MASK],
2115		    offset, size);
2116	}
2117
2118	mutex_exit(&msp->ms_lock);
2119}
2120
2121/*
2122 * Intent log support: upon opening the pool after a crash, notify the SPA
2123 * of blocks that the intent log has allocated for immediate write, but
2124 * which are still considered free by the SPA because the last transaction
2125 * group didn't commit yet.
2126 */
2127static int
2128metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
2129{
2130	uint64_t vdev = DVA_GET_VDEV(dva);
2131	uint64_t offset = DVA_GET_OFFSET(dva);
2132	uint64_t size = DVA_GET_ASIZE(dva);
2133	vdev_t *vd;
2134	metaslab_t *msp;
2135	int error = 0;
2136
2137	ASSERT(DVA_IS_VALID(dva));
2138
2139	if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
2140	    (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
2141		return (SET_ERROR(ENXIO));
2142
2143	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2144
2145	if (DVA_GET_GANG(dva))
2146		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2147
2148	mutex_enter(&msp->ms_lock);
2149
2150	if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
2151		error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
2152
2153	if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size))
2154		error = SET_ERROR(ENOENT);
2155
2156	if (error || txg == 0) {	/* txg == 0 indicates dry run */
2157		mutex_exit(&msp->ms_lock);
2158		return (error);
2159	}
2160
2161	VERIFY(!msp->ms_condensing);
2162	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2163	VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2164	VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size);
2165	range_tree_remove(msp->ms_tree, offset, size);
2166
2167	if (spa_writeable(spa)) {	/* don't dirty if we're zdb(1M) */
2168		if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2169			vdev_dirty(vd, VDD_METASLAB, msp, txg);
2170		range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size);
2171	}
2172
2173	mutex_exit(&msp->ms_lock);
2174
2175	return (0);
2176}
2177
2178int
2179metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
2180    int ndvas, uint64_t txg, blkptr_t *hintbp, int flags)
2181{
2182	dva_t *dva = bp->blk_dva;
2183	dva_t *hintdva = hintbp->blk_dva;
2184	int error = 0;
2185
2186	ASSERT(bp->blk_birth == 0);
2187	ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
2188
2189	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2190
2191	if (mc->mc_rotor == NULL) {	/* no vdevs in this class */
2192		spa_config_exit(spa, SCL_ALLOC, FTAG);
2193		return (SET_ERROR(ENOSPC));
2194	}
2195
2196	ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
2197	ASSERT(BP_GET_NDVAS(bp) == 0);
2198	ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
2199
2200	for (int d = 0; d < ndvas; d++) {
2201		error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
2202		    txg, flags);
2203		if (error != 0) {
2204			for (d--; d >= 0; d--) {
2205				metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
2206				bzero(&dva[d], sizeof (dva_t));
2207			}
2208			spa_config_exit(spa, SCL_ALLOC, FTAG);
2209			return (error);
2210		}
2211	}
2212	ASSERT(error == 0);
2213	ASSERT(BP_GET_NDVAS(bp) == ndvas);
2214
2215	spa_config_exit(spa, SCL_ALLOC, FTAG);
2216
2217	BP_SET_BIRTH(bp, txg, txg);
2218
2219	return (0);
2220}
2221
2222void
2223metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
2224{
2225	const dva_t *dva = bp->blk_dva;
2226	int ndvas = BP_GET_NDVAS(bp);
2227
2228	ASSERT(!BP_IS_HOLE(bp));
2229	ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
2230
2231	spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
2232
2233	for (int d = 0; d < ndvas; d++)
2234		metaslab_free_dva(spa, &dva[d], txg, now);
2235
2236	spa_config_exit(spa, SCL_FREE, FTAG);
2237}
2238
2239int
2240metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
2241{
2242	const dva_t *dva = bp->blk_dva;
2243	int ndvas = BP_GET_NDVAS(bp);
2244	int error = 0;
2245
2246	ASSERT(!BP_IS_HOLE(bp));
2247
2248	if (txg != 0) {
2249		/*
2250		 * First do a dry run to make sure all DVAs are claimable,
2251		 * so we don't have to unwind from partial failures below.
2252		 */
2253		if ((error = metaslab_claim(spa, bp, 0)) != 0)
2254			return (error);
2255	}
2256
2257	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2258
2259	for (int d = 0; d < ndvas; d++)
2260		if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
2261			break;
2262
2263	spa_config_exit(spa, SCL_ALLOC, FTAG);
2264
2265	ASSERT(error == 0 || txg == 0);
2266
2267	return (error);
2268}
2269
2270void
2271metaslab_check_free(spa_t *spa, const blkptr_t *bp)
2272{
2273	if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
2274		return;
2275
2276	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2277	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
2278		uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
2279		vdev_t *vd = vdev_lookup_top(spa, vdev);
2280		uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
2281		uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
2282		metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2283
2284		if (msp->ms_loaded)
2285			range_tree_verify(msp->ms_tree, offset, size);
2286
2287		for (int j = 0; j < TXG_SIZE; j++)
2288			range_tree_verify(msp->ms_freetree[j], offset, size);
2289		for (int j = 0; j < TXG_DEFER_SIZE; j++)
2290			range_tree_verify(msp->ms_defertree[j], offset, size);
2291	}
2292	spa_config_exit(spa, SCL_VDEV, FTAG);
2293}
2294