metaslab.c revision 265741
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 */
26
27#include <sys/zfs_context.h>
28#include <sys/dmu.h>
29#include <sys/dmu_tx.h>
30#include <sys/space_map.h>
31#include <sys/metaslab_impl.h>
32#include <sys/vdev_impl.h>
33#include <sys/zio.h>
34#include <sys/spa_impl.h>
35
36SYSCTL_DECL(_vfs_zfs);
37SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab");
38
39/*
40 * Allow allocations to switch to gang blocks quickly. We do this to
41 * avoid having to load lots of space_maps in a given txg. There are,
42 * however, some cases where we want to avoid "fast" ganging and instead
43 * we want to do an exhaustive search of all metaslabs on this device.
44 * Currently we don't allow any gang, slog, or dump device related allocations
45 * to "fast" gang.
46 */
47#define	CAN_FASTGANG(flags) \
48	(!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
49	METASLAB_GANG_AVOID)))
50
51#define	METASLAB_WEIGHT_PRIMARY		(1ULL << 63)
52#define	METASLAB_WEIGHT_SECONDARY	(1ULL << 62)
53#define	METASLAB_ACTIVE_MASK		\
54	(METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
55
56uint64_t metaslab_aliquot = 512ULL << 10;
57uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1;	/* force gang blocks */
58TUNABLE_QUAD("vfs.zfs.metaslab.gang_bang", &metaslab_gang_bang);
59SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, gang_bang, CTLFLAG_RWTUN,
60    &metaslab_gang_bang, 0,
61    "Force gang block allocation for blocks larger than or equal to this value");
62
63/*
64 * The in-core space map representation is more compact than its on-disk form.
65 * The zfs_condense_pct determines how much more compact the in-core
66 * space_map representation must be before we compact it on-disk.
67 * Values should be greater than or equal to 100.
68 */
69int zfs_condense_pct = 200;
70TUNABLE_INT("vfs.zfs.condense_pct", &zfs_condense_pct);
71SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN,
72    &zfs_condense_pct, 0,
73    "Condense on-disk spacemap when it is more than this many percents"
74    " of in-memory counterpart");
75
76/*
77 * The zfs_mg_noalloc_threshold defines which metaslab groups should
78 * be eligible for allocation. The value is defined as a percentage of
79 * a free space. Metaslab groups that have more free space than
80 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
81 * a metaslab group's free space is less than or equal to the
82 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
83 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
84 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
85 * groups are allowed to accept allocations. Gang blocks are always
86 * eligible to allocate on any metaslab group. The default value of 0 means
87 * no metaslab group will be excluded based on this criterion.
88 */
89int zfs_mg_noalloc_threshold = 0;
90TUNABLE_INT("vfs.zfs.mg_noalloc_threshold", &zfs_mg_noalloc_threshold);
91SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_noalloc_threshold, CTLFLAG_RWTUN,
92    &zfs_mg_noalloc_threshold, 0,
93    "Percentage of metaslab group size that should be free"
94    " to make it eligible for allocation");
95
96/*
97 * When set will load all metaslabs when pool is first opened.
98 */
99int metaslab_debug_load = 0;
100TUNABLE_INT("vfs.zfs.metaslab.debug_load", &metaslab_debug_load);
101SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_load, CTLFLAG_RWTUN,
102    &metaslab_debug_load, 0,
103    "Load all metaslabs when pool is first opened");
104
105/*
106 * When set will prevent metaslabs from being unloaded.
107 */
108int metaslab_debug_unload = 0;
109TUNABLE_INT("vfs.zfs.metaslab.debug_unload", &metaslab_debug_unload);
110SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_unload, CTLFLAG_RWTUN,
111    &metaslab_debug_unload, 0,
112    "Prevent metaslabs from being unloaded");
113
114/*
115 * Minimum size which forces the dynamic allocator to change
116 * it's allocation strategy.  Once the space map cannot satisfy
117 * an allocation of this size then it switches to using more
118 * aggressive strategy (i.e search by size rather than offset).
119 */
120uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE;
121TUNABLE_QUAD("vfs.zfs.metaslab.df_alloc_threshold",
122    &metaslab_df_alloc_threshold);
123SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
124    &metaslab_df_alloc_threshold, 0,
125    "Minimum size which forces the dynamic allocator to change it's allocation strategy");
126
127/*
128 * The minimum free space, in percent, which must be available
129 * in a space map to continue allocations in a first-fit fashion.
130 * Once the space_map's free space drops below this level we dynamically
131 * switch to using best-fit allocations.
132 */
133int metaslab_df_free_pct = 4;
134TUNABLE_INT("vfs.zfs.metaslab.df_free_pct", &metaslab_df_free_pct);
135SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN,
136    &metaslab_df_free_pct, 0,
137    "The minimum free space, in percent, which must be available in a space map to continue allocations in a first-fit fashion");
138
139/*
140 * A metaslab is considered "free" if it contains a contiguous
141 * segment which is greater than metaslab_min_alloc_size.
142 */
143uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
144TUNABLE_QUAD("vfs.zfs.metaslab.min_alloc_size",
145    &metaslab_min_alloc_size);
146SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, min_alloc_size, CTLFLAG_RWTUN,
147    &metaslab_min_alloc_size, 0,
148    "A metaslab is considered \"free\" if it contains a contiguous segment which is greater than vfs.zfs.metaslab.min_alloc_size");
149
150/*
151 * Percentage of all cpus that can be used by the metaslab taskq.
152 */
153int metaslab_load_pct = 50;
154TUNABLE_INT("vfs.zfs.metaslab.load_pct", &metaslab_load_pct);
155SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN,
156    &metaslab_load_pct, 0,
157    "Percentage of cpus that can be used by the metaslab taskq");
158
159/*
160 * Determines how many txgs a metaslab may remain loaded without having any
161 * allocations from it. As long as a metaslab continues to be used we will
162 * keep it loaded.
163 */
164int metaslab_unload_delay = TXG_SIZE * 2;
165TUNABLE_INT("vfs.zfs.metaslab.unload_delay", &metaslab_unload_delay);
166SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, unload_delay, CTLFLAG_RWTUN,
167    &metaslab_unload_delay, 0,
168    "Number of TXGs that an unused metaslab can be kept in memory");
169
170/*
171 * Should we be willing to write data to degraded vdevs?
172 */
173boolean_t zfs_write_to_degraded = B_FALSE;
174SYSCTL_INT(_vfs_zfs, OID_AUTO, write_to_degraded, CTLFLAG_RWTUN,
175    &zfs_write_to_degraded, 0, "Allow writing data to degraded vdevs");
176TUNABLE_INT("vfs.zfs.write_to_degraded", &zfs_write_to_degraded);
177
178/*
179 * Max number of metaslabs per group to preload.
180 */
181int metaslab_preload_limit = SPA_DVAS_PER_BP;
182TUNABLE_INT("vfs.zfs.metaslab.preload_limit", &metaslab_preload_limit);
183SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN,
184    &metaslab_preload_limit, 0,
185    "Max number of metaslabs per group to preload");
186
187/*
188 * Enable/disable preloading of metaslab.
189 */
190boolean_t metaslab_preload_enabled = B_TRUE;
191TUNABLE_INT("vfs.zfs.metaslab.preload_enabled", &metaslab_preload_enabled);
192SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_enabled, CTLFLAG_RWTUN,
193    &metaslab_preload_enabled, 0,
194    "Max number of metaslabs per group to preload");
195
196/*
197 * Enable/disable additional weight factor for each metaslab.
198 */
199boolean_t metaslab_weight_factor_enable = B_FALSE;
200TUNABLE_INT("vfs.zfs.metaslab.weight_factor_enable",
201    &metaslab_weight_factor_enable);
202SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, weight_factor_enable, CTLFLAG_RWTUN,
203    &metaslab_weight_factor_enable, 0,
204    "Enable additional weight factor for each metaslab");
205
206
207/*
208 * ==========================================================================
209 * Metaslab classes
210 * ==========================================================================
211 */
212metaslab_class_t *
213metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
214{
215	metaslab_class_t *mc;
216
217	mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
218
219	mc->mc_spa = spa;
220	mc->mc_rotor = NULL;
221	mc->mc_ops = ops;
222
223	return (mc);
224}
225
226void
227metaslab_class_destroy(metaslab_class_t *mc)
228{
229	ASSERT(mc->mc_rotor == NULL);
230	ASSERT(mc->mc_alloc == 0);
231	ASSERT(mc->mc_deferred == 0);
232	ASSERT(mc->mc_space == 0);
233	ASSERT(mc->mc_dspace == 0);
234
235	kmem_free(mc, sizeof (metaslab_class_t));
236}
237
238int
239metaslab_class_validate(metaslab_class_t *mc)
240{
241	metaslab_group_t *mg;
242	vdev_t *vd;
243
244	/*
245	 * Must hold one of the spa_config locks.
246	 */
247	ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
248	    spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
249
250	if ((mg = mc->mc_rotor) == NULL)
251		return (0);
252
253	do {
254		vd = mg->mg_vd;
255		ASSERT(vd->vdev_mg != NULL);
256		ASSERT3P(vd->vdev_top, ==, vd);
257		ASSERT3P(mg->mg_class, ==, mc);
258		ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
259	} while ((mg = mg->mg_next) != mc->mc_rotor);
260
261	return (0);
262}
263
264void
265metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
266    int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
267{
268	atomic_add_64(&mc->mc_alloc, alloc_delta);
269	atomic_add_64(&mc->mc_deferred, defer_delta);
270	atomic_add_64(&mc->mc_space, space_delta);
271	atomic_add_64(&mc->mc_dspace, dspace_delta);
272}
273
274void
275metaslab_class_minblocksize_update(metaslab_class_t *mc)
276{
277	metaslab_group_t *mg;
278	vdev_t *vd;
279	uint64_t minashift = UINT64_MAX;
280
281	if ((mg = mc->mc_rotor) == NULL) {
282		mc->mc_minblocksize = SPA_MINBLOCKSIZE;
283		return;
284	}
285
286	do {
287		vd = mg->mg_vd;
288		if (vd->vdev_ashift < minashift)
289			minashift = vd->vdev_ashift;
290	} while ((mg = mg->mg_next) != mc->mc_rotor);
291
292	mc->mc_minblocksize = 1ULL << minashift;
293}
294
295uint64_t
296metaslab_class_get_alloc(metaslab_class_t *mc)
297{
298	return (mc->mc_alloc);
299}
300
301uint64_t
302metaslab_class_get_deferred(metaslab_class_t *mc)
303{
304	return (mc->mc_deferred);
305}
306
307uint64_t
308metaslab_class_get_space(metaslab_class_t *mc)
309{
310	return (mc->mc_space);
311}
312
313uint64_t
314metaslab_class_get_dspace(metaslab_class_t *mc)
315{
316	return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
317}
318
319uint64_t
320metaslab_class_get_minblocksize(metaslab_class_t *mc)
321{
322	return (mc->mc_minblocksize);
323}
324
325/*
326 * ==========================================================================
327 * Metaslab groups
328 * ==========================================================================
329 */
330static int
331metaslab_compare(const void *x1, const void *x2)
332{
333	const metaslab_t *m1 = x1;
334	const metaslab_t *m2 = x2;
335
336	if (m1->ms_weight < m2->ms_weight)
337		return (1);
338	if (m1->ms_weight > m2->ms_weight)
339		return (-1);
340
341	/*
342	 * If the weights are identical, use the offset to force uniqueness.
343	 */
344	if (m1->ms_start < m2->ms_start)
345		return (-1);
346	if (m1->ms_start > m2->ms_start)
347		return (1);
348
349	ASSERT3P(m1, ==, m2);
350
351	return (0);
352}
353
354/*
355 * Update the allocatable flag and the metaslab group's capacity.
356 * The allocatable flag is set to true if the capacity is below
357 * the zfs_mg_noalloc_threshold. If a metaslab group transitions
358 * from allocatable to non-allocatable or vice versa then the metaslab
359 * group's class is updated to reflect the transition.
360 */
361static void
362metaslab_group_alloc_update(metaslab_group_t *mg)
363{
364	vdev_t *vd = mg->mg_vd;
365	metaslab_class_t *mc = mg->mg_class;
366	vdev_stat_t *vs = &vd->vdev_stat;
367	boolean_t was_allocatable;
368
369	ASSERT(vd == vd->vdev_top);
370
371	mutex_enter(&mg->mg_lock);
372	was_allocatable = mg->mg_allocatable;
373
374	mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
375	    (vs->vs_space + 1);
376
377	mg->mg_allocatable = (mg->mg_free_capacity > zfs_mg_noalloc_threshold);
378
379	/*
380	 * The mc_alloc_groups maintains a count of the number of
381	 * groups in this metaslab class that are still above the
382	 * zfs_mg_noalloc_threshold. This is used by the allocating
383	 * threads to determine if they should avoid allocations to
384	 * a given group. The allocator will avoid allocations to a group
385	 * if that group has reached or is below the zfs_mg_noalloc_threshold
386	 * and there are still other groups that are above the threshold.
387	 * When a group transitions from allocatable to non-allocatable or
388	 * vice versa we update the metaslab class to reflect that change.
389	 * When the mc_alloc_groups value drops to 0 that means that all
390	 * groups have reached the zfs_mg_noalloc_threshold making all groups
391	 * eligible for allocations. This effectively means that all devices
392	 * are balanced again.
393	 */
394	if (was_allocatable && !mg->mg_allocatable)
395		mc->mc_alloc_groups--;
396	else if (!was_allocatable && mg->mg_allocatable)
397		mc->mc_alloc_groups++;
398	mutex_exit(&mg->mg_lock);
399}
400
401metaslab_group_t *
402metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
403{
404	metaslab_group_t *mg;
405
406	mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
407	mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
408	avl_create(&mg->mg_metaslab_tree, metaslab_compare,
409	    sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
410	mg->mg_vd = vd;
411	mg->mg_class = mc;
412	mg->mg_activation_count = 0;
413
414	mg->mg_taskq = taskq_create("metaslab_group_tasksq", metaslab_load_pct,
415	    minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT);
416
417	return (mg);
418}
419
420void
421metaslab_group_destroy(metaslab_group_t *mg)
422{
423	ASSERT(mg->mg_prev == NULL);
424	ASSERT(mg->mg_next == NULL);
425	/*
426	 * We may have gone below zero with the activation count
427	 * either because we never activated in the first place or
428	 * because we're done, and possibly removing the vdev.
429	 */
430	ASSERT(mg->mg_activation_count <= 0);
431
432	avl_destroy(&mg->mg_metaslab_tree);
433	mutex_destroy(&mg->mg_lock);
434	kmem_free(mg, sizeof (metaslab_group_t));
435}
436
437void
438metaslab_group_activate(metaslab_group_t *mg)
439{
440	metaslab_class_t *mc = mg->mg_class;
441	metaslab_group_t *mgprev, *mgnext;
442
443	ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
444
445	ASSERT(mc->mc_rotor != mg);
446	ASSERT(mg->mg_prev == NULL);
447	ASSERT(mg->mg_next == NULL);
448	ASSERT(mg->mg_activation_count <= 0);
449
450	if (++mg->mg_activation_count <= 0)
451		return;
452
453	mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
454	metaslab_group_alloc_update(mg);
455
456	if ((mgprev = mc->mc_rotor) == NULL) {
457		mg->mg_prev = mg;
458		mg->mg_next = mg;
459	} else {
460		mgnext = mgprev->mg_next;
461		mg->mg_prev = mgprev;
462		mg->mg_next = mgnext;
463		mgprev->mg_next = mg;
464		mgnext->mg_prev = mg;
465	}
466	mc->mc_rotor = mg;
467	metaslab_class_minblocksize_update(mc);
468}
469
470void
471metaslab_group_passivate(metaslab_group_t *mg)
472{
473	metaslab_class_t *mc = mg->mg_class;
474	metaslab_group_t *mgprev, *mgnext;
475
476	ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
477
478	if (--mg->mg_activation_count != 0) {
479		ASSERT(mc->mc_rotor != mg);
480		ASSERT(mg->mg_prev == NULL);
481		ASSERT(mg->mg_next == NULL);
482		ASSERT(mg->mg_activation_count < 0);
483		return;
484	}
485
486	taskq_wait(mg->mg_taskq);
487
488	mgprev = mg->mg_prev;
489	mgnext = mg->mg_next;
490
491	if (mg == mgnext) {
492		mc->mc_rotor = NULL;
493	} else {
494		mc->mc_rotor = mgnext;
495		mgprev->mg_next = mgnext;
496		mgnext->mg_prev = mgprev;
497	}
498
499	mg->mg_prev = NULL;
500	mg->mg_next = NULL;
501	metaslab_class_minblocksize_update(mc);
502}
503
504static void
505metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
506{
507	mutex_enter(&mg->mg_lock);
508	ASSERT(msp->ms_group == NULL);
509	msp->ms_group = mg;
510	msp->ms_weight = 0;
511	avl_add(&mg->mg_metaslab_tree, msp);
512	mutex_exit(&mg->mg_lock);
513}
514
515static void
516metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
517{
518	mutex_enter(&mg->mg_lock);
519	ASSERT(msp->ms_group == mg);
520	avl_remove(&mg->mg_metaslab_tree, msp);
521	msp->ms_group = NULL;
522	mutex_exit(&mg->mg_lock);
523}
524
525static void
526metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
527{
528	/*
529	 * Although in principle the weight can be any value, in
530	 * practice we do not use values in the range [1, 510].
531	 */
532	ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0);
533	ASSERT(MUTEX_HELD(&msp->ms_lock));
534
535	mutex_enter(&mg->mg_lock);
536	ASSERT(msp->ms_group == mg);
537	avl_remove(&mg->mg_metaslab_tree, msp);
538	msp->ms_weight = weight;
539	avl_add(&mg->mg_metaslab_tree, msp);
540	mutex_exit(&mg->mg_lock);
541}
542
543/*
544 * Determine if a given metaslab group should skip allocations. A metaslab
545 * group should avoid allocations if its used capacity has crossed the
546 * zfs_mg_noalloc_threshold and there is at least one metaslab group
547 * that can still handle allocations.
548 */
549static boolean_t
550metaslab_group_allocatable(metaslab_group_t *mg)
551{
552	vdev_t *vd = mg->mg_vd;
553	spa_t *spa = vd->vdev_spa;
554	metaslab_class_t *mc = mg->mg_class;
555
556	/*
557	 * A metaslab group is considered allocatable if its free capacity
558	 * is greater than the set value of zfs_mg_noalloc_threshold, it's
559	 * associated with a slog, or there are no other metaslab groups
560	 * with free capacity greater than zfs_mg_noalloc_threshold.
561	 */
562	return (mg->mg_free_capacity > zfs_mg_noalloc_threshold ||
563	    mc != spa_normal_class(spa) || mc->mc_alloc_groups == 0);
564}
565
566/*
567 * ==========================================================================
568 * Range tree callbacks
569 * ==========================================================================
570 */
571
572/*
573 * Comparison function for the private size-ordered tree. Tree is sorted
574 * by size, larger sizes at the end of the tree.
575 */
576static int
577metaslab_rangesize_compare(const void *x1, const void *x2)
578{
579	const range_seg_t *r1 = x1;
580	const range_seg_t *r2 = x2;
581	uint64_t rs_size1 = r1->rs_end - r1->rs_start;
582	uint64_t rs_size2 = r2->rs_end - r2->rs_start;
583
584	if (rs_size1 < rs_size2)
585		return (-1);
586	if (rs_size1 > rs_size2)
587		return (1);
588
589	if (r1->rs_start < r2->rs_start)
590		return (-1);
591
592	if (r1->rs_start > r2->rs_start)
593		return (1);
594
595	return (0);
596}
597
598/*
599 * Create any block allocator specific components. The current allocators
600 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
601 */
602static void
603metaslab_rt_create(range_tree_t *rt, void *arg)
604{
605	metaslab_t *msp = arg;
606
607	ASSERT3P(rt->rt_arg, ==, msp);
608	ASSERT(msp->ms_tree == NULL);
609
610	avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
611	    sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
612}
613
614/*
615 * Destroy the block allocator specific components.
616 */
617static void
618metaslab_rt_destroy(range_tree_t *rt, void *arg)
619{
620	metaslab_t *msp = arg;
621
622	ASSERT3P(rt->rt_arg, ==, msp);
623	ASSERT3P(msp->ms_tree, ==, rt);
624	ASSERT0(avl_numnodes(&msp->ms_size_tree));
625
626	avl_destroy(&msp->ms_size_tree);
627}
628
629static void
630metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
631{
632	metaslab_t *msp = arg;
633
634	ASSERT3P(rt->rt_arg, ==, msp);
635	ASSERT3P(msp->ms_tree, ==, rt);
636	VERIFY(!msp->ms_condensing);
637	avl_add(&msp->ms_size_tree, rs);
638}
639
640static void
641metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
642{
643	metaslab_t *msp = arg;
644
645	ASSERT3P(rt->rt_arg, ==, msp);
646	ASSERT3P(msp->ms_tree, ==, rt);
647	VERIFY(!msp->ms_condensing);
648	avl_remove(&msp->ms_size_tree, rs);
649}
650
651static void
652metaslab_rt_vacate(range_tree_t *rt, void *arg)
653{
654	metaslab_t *msp = arg;
655
656	ASSERT3P(rt->rt_arg, ==, msp);
657	ASSERT3P(msp->ms_tree, ==, rt);
658
659	/*
660	 * Normally one would walk the tree freeing nodes along the way.
661	 * Since the nodes are shared with the range trees we can avoid
662	 * walking all nodes and just reinitialize the avl tree. The nodes
663	 * will be freed by the range tree, so we don't want to free them here.
664	 */
665	avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
666	    sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
667}
668
669static range_tree_ops_t metaslab_rt_ops = {
670	metaslab_rt_create,
671	metaslab_rt_destroy,
672	metaslab_rt_add,
673	metaslab_rt_remove,
674	metaslab_rt_vacate
675};
676
677/*
678 * ==========================================================================
679 * Metaslab block operations
680 * ==========================================================================
681 */
682
683/*
684 * Return the maximum contiguous segment within the metaslab.
685 */
686uint64_t
687metaslab_block_maxsize(metaslab_t *msp)
688{
689	avl_tree_t *t = &msp->ms_size_tree;
690	range_seg_t *rs;
691
692	if (t == NULL || (rs = avl_last(t)) == NULL)
693		return (0ULL);
694
695	return (rs->rs_end - rs->rs_start);
696}
697
698uint64_t
699metaslab_block_alloc(metaslab_t *msp, uint64_t size)
700{
701	uint64_t start;
702	range_tree_t *rt = msp->ms_tree;
703
704	VERIFY(!msp->ms_condensing);
705
706	start = msp->ms_ops->msop_alloc(msp, size);
707	if (start != -1ULL) {
708		vdev_t *vd = msp->ms_group->mg_vd;
709
710		VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
711		VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
712		VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
713		range_tree_remove(rt, start, size);
714	}
715	return (start);
716}
717
718/*
719 * ==========================================================================
720 * Common allocator routines
721 * ==========================================================================
722 */
723
724/*
725 * This is a helper function that can be used by the allocator to find
726 * a suitable block to allocate. This will search the specified AVL
727 * tree looking for a block that matches the specified criteria.
728 */
729static uint64_t
730metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
731    uint64_t align)
732{
733	range_seg_t *rs, rsearch;
734	avl_index_t where;
735
736	rsearch.rs_start = *cursor;
737	rsearch.rs_end = *cursor + size;
738
739	rs = avl_find(t, &rsearch, &where);
740	if (rs == NULL)
741		rs = avl_nearest(t, where, AVL_AFTER);
742
743	while (rs != NULL) {
744		uint64_t offset = P2ROUNDUP(rs->rs_start, align);
745
746		if (offset + size <= rs->rs_end) {
747			*cursor = offset + size;
748			return (offset);
749		}
750		rs = AVL_NEXT(t, rs);
751	}
752
753	/*
754	 * If we know we've searched the whole map (*cursor == 0), give up.
755	 * Otherwise, reset the cursor to the beginning and try again.
756	 */
757	if (*cursor == 0)
758		return (-1ULL);
759
760	*cursor = 0;
761	return (metaslab_block_picker(t, cursor, size, align));
762}
763
764/*
765 * ==========================================================================
766 * The first-fit block allocator
767 * ==========================================================================
768 */
769static uint64_t
770metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
771{
772	/*
773	 * Find the largest power of 2 block size that evenly divides the
774	 * requested size. This is used to try to allocate blocks with similar
775	 * alignment from the same area of the metaslab (i.e. same cursor
776	 * bucket) but it does not guarantee that other allocations sizes
777	 * may exist in the same region.
778	 */
779	uint64_t align = size & -size;
780	uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
781	avl_tree_t *t = &msp->ms_tree->rt_root;
782
783	return (metaslab_block_picker(t, cursor, size, align));
784}
785
786/* ARGSUSED */
787static boolean_t
788metaslab_ff_fragmented(metaslab_t *msp)
789{
790	return (B_TRUE);
791}
792
793static metaslab_ops_t metaslab_ff_ops = {
794	metaslab_ff_alloc,
795	metaslab_ff_fragmented
796};
797
798/*
799 * ==========================================================================
800 * Dynamic block allocator -
801 * Uses the first fit allocation scheme until space get low and then
802 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
803 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
804 * ==========================================================================
805 */
806static uint64_t
807metaslab_df_alloc(metaslab_t *msp, uint64_t size)
808{
809	/*
810	 * Find the largest power of 2 block size that evenly divides the
811	 * requested size. This is used to try to allocate blocks with similar
812	 * alignment from the same area of the metaslab (i.e. same cursor
813	 * bucket) but it does not guarantee that other allocations sizes
814	 * may exist in the same region.
815	 */
816	uint64_t align = size & -size;
817	uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
818	range_tree_t *rt = msp->ms_tree;
819	avl_tree_t *t = &rt->rt_root;
820	uint64_t max_size = metaslab_block_maxsize(msp);
821	int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
822
823	ASSERT(MUTEX_HELD(&msp->ms_lock));
824	ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
825
826	if (max_size < size)
827		return (-1ULL);
828
829	/*
830	 * If we're running low on space switch to using the size
831	 * sorted AVL tree (best-fit).
832	 */
833	if (max_size < metaslab_df_alloc_threshold ||
834	    free_pct < metaslab_df_free_pct) {
835		t = &msp->ms_size_tree;
836		*cursor = 0;
837	}
838
839	return (metaslab_block_picker(t, cursor, size, 1ULL));
840}
841
842static boolean_t
843metaslab_df_fragmented(metaslab_t *msp)
844{
845	range_tree_t *rt = msp->ms_tree;
846	uint64_t max_size = metaslab_block_maxsize(msp);
847	int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
848
849	if (max_size >= metaslab_df_alloc_threshold &&
850	    free_pct >= metaslab_df_free_pct)
851		return (B_FALSE);
852
853	return (B_TRUE);
854}
855
856static metaslab_ops_t metaslab_df_ops = {
857	metaslab_df_alloc,
858	metaslab_df_fragmented
859};
860
861/*
862 * ==========================================================================
863 * Cursor fit block allocator -
864 * Select the largest region in the metaslab, set the cursor to the beginning
865 * of the range and the cursor_end to the end of the range. As allocations
866 * are made advance the cursor. Continue allocating from the cursor until
867 * the range is exhausted and then find a new range.
868 * ==========================================================================
869 */
870static uint64_t
871metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
872{
873	range_tree_t *rt = msp->ms_tree;
874	avl_tree_t *t = &msp->ms_size_tree;
875	uint64_t *cursor = &msp->ms_lbas[0];
876	uint64_t *cursor_end = &msp->ms_lbas[1];
877	uint64_t offset = 0;
878
879	ASSERT(MUTEX_HELD(&msp->ms_lock));
880	ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
881
882	ASSERT3U(*cursor_end, >=, *cursor);
883
884	if ((*cursor + size) > *cursor_end) {
885		range_seg_t *rs;
886
887		rs = avl_last(&msp->ms_size_tree);
888		if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
889			return (-1ULL);
890
891		*cursor = rs->rs_start;
892		*cursor_end = rs->rs_end;
893	}
894
895	offset = *cursor;
896	*cursor += size;
897
898	return (offset);
899}
900
901static boolean_t
902metaslab_cf_fragmented(metaslab_t *msp)
903{
904	return (metaslab_block_maxsize(msp) < metaslab_min_alloc_size);
905}
906
907static metaslab_ops_t metaslab_cf_ops = {
908	metaslab_cf_alloc,
909	metaslab_cf_fragmented
910};
911
912/*
913 * ==========================================================================
914 * New dynamic fit allocator -
915 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
916 * contiguous blocks. If no region is found then just use the largest segment
917 * that remains.
918 * ==========================================================================
919 */
920
921/*
922 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
923 * to request from the allocator.
924 */
925uint64_t metaslab_ndf_clump_shift = 4;
926
927static uint64_t
928metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
929{
930	avl_tree_t *t = &msp->ms_tree->rt_root;
931	avl_index_t where;
932	range_seg_t *rs, rsearch;
933	uint64_t hbit = highbit64(size);
934	uint64_t *cursor = &msp->ms_lbas[hbit - 1];
935	uint64_t max_size = metaslab_block_maxsize(msp);
936
937	ASSERT(MUTEX_HELD(&msp->ms_lock));
938	ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
939
940	if (max_size < size)
941		return (-1ULL);
942
943	rsearch.rs_start = *cursor;
944	rsearch.rs_end = *cursor + size;
945
946	rs = avl_find(t, &rsearch, &where);
947	if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
948		t = &msp->ms_size_tree;
949
950		rsearch.rs_start = 0;
951		rsearch.rs_end = MIN(max_size,
952		    1ULL << (hbit + metaslab_ndf_clump_shift));
953		rs = avl_find(t, &rsearch, &where);
954		if (rs == NULL)
955			rs = avl_nearest(t, where, AVL_AFTER);
956		ASSERT(rs != NULL);
957	}
958
959	if ((rs->rs_end - rs->rs_start) >= size) {
960		*cursor = rs->rs_start + size;
961		return (rs->rs_start);
962	}
963	return (-1ULL);
964}
965
966static boolean_t
967metaslab_ndf_fragmented(metaslab_t *msp)
968{
969	return (metaslab_block_maxsize(msp) <=
970	    (metaslab_min_alloc_size << metaslab_ndf_clump_shift));
971}
972
973static metaslab_ops_t metaslab_ndf_ops = {
974	metaslab_ndf_alloc,
975	metaslab_ndf_fragmented
976};
977
978metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
979
980/*
981 * ==========================================================================
982 * Metaslabs
983 * ==========================================================================
984 */
985
986/*
987 * Wait for any in-progress metaslab loads to complete.
988 */
989void
990metaslab_load_wait(metaslab_t *msp)
991{
992	ASSERT(MUTEX_HELD(&msp->ms_lock));
993
994	while (msp->ms_loading) {
995		ASSERT(!msp->ms_loaded);
996		cv_wait(&msp->ms_load_cv, &msp->ms_lock);
997	}
998}
999
1000int
1001metaslab_load(metaslab_t *msp)
1002{
1003	int error = 0;
1004
1005	ASSERT(MUTEX_HELD(&msp->ms_lock));
1006	ASSERT(!msp->ms_loaded);
1007	ASSERT(!msp->ms_loading);
1008
1009	msp->ms_loading = B_TRUE;
1010
1011	/*
1012	 * If the space map has not been allocated yet, then treat
1013	 * all the space in the metaslab as free and add it to the
1014	 * ms_tree.
1015	 */
1016	if (msp->ms_sm != NULL)
1017		error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE);
1018	else
1019		range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size);
1020
1021	msp->ms_loaded = (error == 0);
1022	msp->ms_loading = B_FALSE;
1023
1024	if (msp->ms_loaded) {
1025		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1026			range_tree_walk(msp->ms_defertree[t],
1027			    range_tree_remove, msp->ms_tree);
1028		}
1029	}
1030	cv_broadcast(&msp->ms_load_cv);
1031	return (error);
1032}
1033
1034void
1035metaslab_unload(metaslab_t *msp)
1036{
1037	ASSERT(MUTEX_HELD(&msp->ms_lock));
1038	range_tree_vacate(msp->ms_tree, NULL, NULL);
1039	msp->ms_loaded = B_FALSE;
1040	msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1041}
1042
1043metaslab_t *
1044metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg)
1045{
1046	vdev_t *vd = mg->mg_vd;
1047	objset_t *mos = vd->vdev_spa->spa_meta_objset;
1048	metaslab_t *msp;
1049
1050	msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
1051	mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL);
1052	cv_init(&msp->ms_load_cv, NULL, CV_DEFAULT, NULL);
1053	msp->ms_id = id;
1054	msp->ms_start = id << vd->vdev_ms_shift;
1055	msp->ms_size = 1ULL << vd->vdev_ms_shift;
1056
1057	/*
1058	 * We only open space map objects that already exist. All others
1059	 * will be opened when we finally allocate an object for it.
1060	 */
1061	if (object != 0) {
1062		VERIFY0(space_map_open(&msp->ms_sm, mos, object, msp->ms_start,
1063		    msp->ms_size, vd->vdev_ashift, &msp->ms_lock));
1064		ASSERT(msp->ms_sm != NULL);
1065	}
1066
1067	/*
1068	 * We create the main range tree here, but we don't create the
1069	 * alloctree and freetree until metaslab_sync_done().  This serves
1070	 * two purposes: it allows metaslab_sync_done() to detect the
1071	 * addition of new space; and for debugging, it ensures that we'd
1072	 * data fault on any attempt to use this metaslab before it's ready.
1073	 */
1074	msp->ms_tree = range_tree_create(&metaslab_rt_ops, msp, &msp->ms_lock);
1075	metaslab_group_add(mg, msp);
1076
1077	msp->ms_ops = mg->mg_class->mc_ops;
1078
1079	/*
1080	 * If we're opening an existing pool (txg == 0) or creating
1081	 * a new one (txg == TXG_INITIAL), all space is available now.
1082	 * If we're adding space to an existing pool, the new space
1083	 * does not become available until after this txg has synced.
1084	 */
1085	if (txg <= TXG_INITIAL)
1086		metaslab_sync_done(msp, 0);
1087
1088	/*
1089	 * If metaslab_debug_load is set and we're initializing a metaslab
1090	 * that has an allocated space_map object then load the its space
1091	 * map so that can verify frees.
1092	 */
1093	if (metaslab_debug_load && msp->ms_sm != NULL) {
1094		mutex_enter(&msp->ms_lock);
1095		VERIFY0(metaslab_load(msp));
1096		mutex_exit(&msp->ms_lock);
1097	}
1098
1099	if (txg != 0) {
1100		vdev_dirty(vd, 0, NULL, txg);
1101		vdev_dirty(vd, VDD_METASLAB, msp, txg);
1102	}
1103
1104	return (msp);
1105}
1106
1107void
1108metaslab_fini(metaslab_t *msp)
1109{
1110	metaslab_group_t *mg = msp->ms_group;
1111
1112	metaslab_group_remove(mg, msp);
1113
1114	mutex_enter(&msp->ms_lock);
1115
1116	VERIFY(msp->ms_group == NULL);
1117	vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1118	    0, -msp->ms_size);
1119	space_map_close(msp->ms_sm);
1120
1121	metaslab_unload(msp);
1122	range_tree_destroy(msp->ms_tree);
1123
1124	for (int t = 0; t < TXG_SIZE; t++) {
1125		range_tree_destroy(msp->ms_alloctree[t]);
1126		range_tree_destroy(msp->ms_freetree[t]);
1127	}
1128
1129	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1130		range_tree_destroy(msp->ms_defertree[t]);
1131	}
1132
1133	ASSERT0(msp->ms_deferspace);
1134
1135	mutex_exit(&msp->ms_lock);
1136	cv_destroy(&msp->ms_load_cv);
1137	mutex_destroy(&msp->ms_lock);
1138
1139	kmem_free(msp, sizeof (metaslab_t));
1140}
1141
1142/*
1143 * Apply a weighting factor based on the histogram information for this
1144 * metaslab. The current weighting factor is somewhat arbitrary and requires
1145 * additional investigation. The implementation provides a measure of
1146 * "weighted" free space and gives a higher weighting for larger contiguous
1147 * regions. The weighting factor is determined by counting the number of
1148 * sm_shift sectors that exist in each region represented by the histogram.
1149 * That value is then multiplied by the power of 2 exponent and the sm_shift
1150 * value.
1151 *
1152 * For example, assume the 2^21 histogram bucket has 4 2MB regions and the
1153 * metaslab has an sm_shift value of 9 (512B):
1154 *
1155 * 1) calculate the number of sm_shift sectors in the region:
1156 *	2^21 / 2^9 = 2^12 = 4096 * 4 (number of regions) = 16384
1157 * 2) multiply by the power of 2 exponent and the sm_shift value:
1158 *	16384 * 21 * 9 = 3096576
1159 * This value will be added to the weighting of the metaslab.
1160 */
1161static uint64_t
1162metaslab_weight_factor(metaslab_t *msp)
1163{
1164	uint64_t factor = 0;
1165	uint64_t sectors;
1166	int i;
1167
1168	/*
1169	 * A null space map means that the entire metaslab is free,
1170	 * calculate a weight factor that spans the entire size of the
1171	 * metaslab.
1172	 */
1173	if (msp->ms_sm == NULL) {
1174		vdev_t *vd = msp->ms_group->mg_vd;
1175
1176		i = highbit64(msp->ms_size) - 1;
1177		sectors = msp->ms_size >> vd->vdev_ashift;
1178		return (sectors * i * vd->vdev_ashift);
1179	}
1180
1181	if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
1182		return (0);
1183
1184	for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE(msp->ms_sm); i++) {
1185		if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1186			continue;
1187
1188		/*
1189		 * Determine the number of sm_shift sectors in the region
1190		 * indicated by the histogram. For example, given an
1191		 * sm_shift value of 9 (512 bytes) and i = 4 then we know
1192		 * that we're looking at an 8K region in the histogram
1193		 * (i.e. 9 + 4 = 13, 2^13 = 8192). To figure out the
1194		 * number of sm_shift sectors (512 bytes in this example),
1195		 * we would take 8192 / 512 = 16. Since the histogram
1196		 * is offset by sm_shift we can simply use the value of
1197		 * of i to calculate this (i.e. 2^i = 16 where i = 4).
1198		 */
1199		sectors = msp->ms_sm->sm_phys->smp_histogram[i] << i;
1200		factor += (i + msp->ms_sm->sm_shift) * sectors;
1201	}
1202	return (factor * msp->ms_sm->sm_shift);
1203}
1204
1205static uint64_t
1206metaslab_weight(metaslab_t *msp)
1207{
1208	metaslab_group_t *mg = msp->ms_group;
1209	vdev_t *vd = mg->mg_vd;
1210	uint64_t weight, space;
1211
1212	ASSERT(MUTEX_HELD(&msp->ms_lock));
1213
1214	/*
1215	 * This vdev is in the process of being removed so there is nothing
1216	 * for us to do here.
1217	 */
1218	if (vd->vdev_removing) {
1219		ASSERT0(space_map_allocated(msp->ms_sm));
1220		ASSERT0(vd->vdev_ms_shift);
1221		return (0);
1222	}
1223
1224	/*
1225	 * The baseline weight is the metaslab's free space.
1226	 */
1227	space = msp->ms_size - space_map_allocated(msp->ms_sm);
1228	weight = space;
1229
1230	/*
1231	 * Modern disks have uniform bit density and constant angular velocity.
1232	 * Therefore, the outer recording zones are faster (higher bandwidth)
1233	 * than the inner zones by the ratio of outer to inner track diameter,
1234	 * which is typically around 2:1.  We account for this by assigning
1235	 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1236	 * In effect, this means that we'll select the metaslab with the most
1237	 * free bandwidth rather than simply the one with the most free space.
1238	 */
1239	weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1240	ASSERT(weight >= space && weight <= 2 * space);
1241
1242	msp->ms_factor = metaslab_weight_factor(msp);
1243	if (metaslab_weight_factor_enable)
1244		weight += msp->ms_factor;
1245
1246	if (msp->ms_loaded && !msp->ms_ops->msop_fragmented(msp)) {
1247		/*
1248		 * If this metaslab is one we're actively using, adjust its
1249		 * weight to make it preferable to any inactive metaslab so
1250		 * we'll polish it off.
1251		 */
1252		weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1253	}
1254
1255	return (weight);
1256}
1257
1258static int
1259metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
1260{
1261	ASSERT(MUTEX_HELD(&msp->ms_lock));
1262
1263	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1264		metaslab_load_wait(msp);
1265		if (!msp->ms_loaded) {
1266			int error = metaslab_load(msp);
1267			if (error) {
1268				metaslab_group_sort(msp->ms_group, msp, 0);
1269				return (error);
1270			}
1271		}
1272
1273		metaslab_group_sort(msp->ms_group, msp,
1274		    msp->ms_weight | activation_weight);
1275	}
1276	ASSERT(msp->ms_loaded);
1277	ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
1278
1279	return (0);
1280}
1281
1282static void
1283metaslab_passivate(metaslab_t *msp, uint64_t size)
1284{
1285	/*
1286	 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1287	 * this metaslab again.  In that case, it had better be empty,
1288	 * or we would be leaving space on the table.
1289	 */
1290	ASSERT(size >= SPA_MINBLOCKSIZE || range_tree_space(msp->ms_tree) == 0);
1291	metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
1292	ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
1293}
1294
1295static void
1296metaslab_preload(void *arg)
1297{
1298	metaslab_t *msp = arg;
1299	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1300
1301	mutex_enter(&msp->ms_lock);
1302	metaslab_load_wait(msp);
1303	if (!msp->ms_loaded)
1304		(void) metaslab_load(msp);
1305
1306	/*
1307	 * Set the ms_access_txg value so that we don't unload it right away.
1308	 */
1309	msp->ms_access_txg = spa_syncing_txg(spa) + metaslab_unload_delay + 1;
1310	mutex_exit(&msp->ms_lock);
1311}
1312
1313static void
1314metaslab_group_preload(metaslab_group_t *mg)
1315{
1316	spa_t *spa = mg->mg_vd->vdev_spa;
1317	metaslab_t *msp;
1318	avl_tree_t *t = &mg->mg_metaslab_tree;
1319	int m = 0;
1320
1321	if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
1322		taskq_wait(mg->mg_taskq);
1323		return;
1324	}
1325	mutex_enter(&mg->mg_lock);
1326
1327	/*
1328	 * Prefetch the next potential metaslabs
1329	 */
1330	for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
1331
1332		/* If we have reached our preload limit then we're done */
1333		if (++m > metaslab_preload_limit)
1334			break;
1335
1336		VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
1337		    msp, TQ_SLEEP) != 0);
1338	}
1339	mutex_exit(&mg->mg_lock);
1340}
1341
1342/*
1343 * Determine if the space map's on-disk footprint is past our tolerance
1344 * for inefficiency. We would like to use the following criteria to make
1345 * our decision:
1346 *
1347 * 1. The size of the space map object should not dramatically increase as a
1348 * result of writing out the free space range tree.
1349 *
1350 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
1351 * times the size than the free space range tree representation
1352 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
1353 *
1354 * Checking the first condition is tricky since we don't want to walk
1355 * the entire AVL tree calculating the estimated on-disk size. Instead we
1356 * use the size-ordered range tree in the metaslab and calculate the
1357 * size required to write out the largest segment in our free tree. If the
1358 * size required to represent that segment on disk is larger than the space
1359 * map object then we avoid condensing this map.
1360 *
1361 * To determine the second criterion we use a best-case estimate and assume
1362 * each segment can be represented on-disk as a single 64-bit entry. We refer
1363 * to this best-case estimate as the space map's minimal form.
1364 */
1365static boolean_t
1366metaslab_should_condense(metaslab_t *msp)
1367{
1368	space_map_t *sm = msp->ms_sm;
1369	range_seg_t *rs;
1370	uint64_t size, entries, segsz;
1371
1372	ASSERT(MUTEX_HELD(&msp->ms_lock));
1373	ASSERT(msp->ms_loaded);
1374
1375	/*
1376	 * Use the ms_size_tree range tree, which is ordered by size, to
1377	 * obtain the largest segment in the free tree. If the tree is empty
1378	 * then we should condense the map.
1379	 */
1380	rs = avl_last(&msp->ms_size_tree);
1381	if (rs == NULL)
1382		return (B_TRUE);
1383
1384	/*
1385	 * Calculate the number of 64-bit entries this segment would
1386	 * require when written to disk. If this single segment would be
1387	 * larger on-disk than the entire current on-disk structure, then
1388	 * clearly condensing will increase the on-disk structure size.
1389	 */
1390	size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
1391	entries = size / (MIN(size, SM_RUN_MAX));
1392	segsz = entries * sizeof (uint64_t);
1393
1394	return (segsz <= space_map_length(msp->ms_sm) &&
1395	    space_map_length(msp->ms_sm) >= (zfs_condense_pct *
1396	    sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root)) / 100);
1397}
1398
1399/*
1400 * Condense the on-disk space map representation to its minimized form.
1401 * The minimized form consists of a small number of allocations followed by
1402 * the entries of the free range tree.
1403 */
1404static void
1405metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
1406{
1407	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1408	range_tree_t *freetree = msp->ms_freetree[txg & TXG_MASK];
1409	range_tree_t *condense_tree;
1410	space_map_t *sm = msp->ms_sm;
1411
1412	ASSERT(MUTEX_HELD(&msp->ms_lock));
1413	ASSERT3U(spa_sync_pass(spa), ==, 1);
1414	ASSERT(msp->ms_loaded);
1415
1416	spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, "
1417	    "smp size %llu, segments %lu", txg, msp->ms_id, msp,
1418	    space_map_length(msp->ms_sm), avl_numnodes(&msp->ms_tree->rt_root));
1419
1420	/*
1421	 * Create an range tree that is 100% allocated. We remove segments
1422	 * that have been freed in this txg, any deferred frees that exist,
1423	 * and any allocation in the future. Removing segments should be
1424	 * a relatively inexpensive operation since we expect these trees to
1425	 * have a small number of nodes.
1426	 */
1427	condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock);
1428	range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
1429
1430	/*
1431	 * Remove what's been freed in this txg from the condense_tree.
1432	 * Since we're in sync_pass 1, we know that all the frees from
1433	 * this txg are in the freetree.
1434	 */
1435	range_tree_walk(freetree, range_tree_remove, condense_tree);
1436
1437	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1438		range_tree_walk(msp->ms_defertree[t],
1439		    range_tree_remove, condense_tree);
1440	}
1441
1442	for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
1443		range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK],
1444		    range_tree_remove, condense_tree);
1445	}
1446
1447	/*
1448	 * We're about to drop the metaslab's lock thus allowing
1449	 * other consumers to change it's content. Set the
1450	 * metaslab's ms_condensing flag to ensure that
1451	 * allocations on this metaslab do not occur while we're
1452	 * in the middle of committing it to disk. This is only critical
1453	 * for the ms_tree as all other range trees use per txg
1454	 * views of their content.
1455	 */
1456	msp->ms_condensing = B_TRUE;
1457
1458	mutex_exit(&msp->ms_lock);
1459	space_map_truncate(sm, tx);
1460	mutex_enter(&msp->ms_lock);
1461
1462	/*
1463	 * While we would ideally like to create a space_map representation
1464	 * that consists only of allocation records, doing so can be
1465	 * prohibitively expensive because the in-core free tree can be
1466	 * large, and therefore computationally expensive to subtract
1467	 * from the condense_tree. Instead we sync out two trees, a cheap
1468	 * allocation only tree followed by the in-core free tree. While not
1469	 * optimal, this is typically close to optimal, and much cheaper to
1470	 * compute.
1471	 */
1472	space_map_write(sm, condense_tree, SM_ALLOC, tx);
1473	range_tree_vacate(condense_tree, NULL, NULL);
1474	range_tree_destroy(condense_tree);
1475
1476	space_map_write(sm, msp->ms_tree, SM_FREE, tx);
1477	msp->ms_condensing = B_FALSE;
1478}
1479
1480/*
1481 * Write a metaslab to disk in the context of the specified transaction group.
1482 */
1483void
1484metaslab_sync(metaslab_t *msp, uint64_t txg)
1485{
1486	metaslab_group_t *mg = msp->ms_group;
1487	vdev_t *vd = mg->mg_vd;
1488	spa_t *spa = vd->vdev_spa;
1489	objset_t *mos = spa_meta_objset(spa);
1490	range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK];
1491	range_tree_t **freetree = &msp->ms_freetree[txg & TXG_MASK];
1492	range_tree_t **freed_tree =
1493	    &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
1494	dmu_tx_t *tx;
1495	uint64_t object = space_map_object(msp->ms_sm);
1496
1497	ASSERT(!vd->vdev_ishole);
1498
1499	/*
1500	 * This metaslab has just been added so there's no work to do now.
1501	 */
1502	if (*freetree == NULL) {
1503		ASSERT3P(alloctree, ==, NULL);
1504		return;
1505	}
1506
1507	ASSERT3P(alloctree, !=, NULL);
1508	ASSERT3P(*freetree, !=, NULL);
1509	ASSERT3P(*freed_tree, !=, NULL);
1510
1511	if (range_tree_space(alloctree) == 0 &&
1512	    range_tree_space(*freetree) == 0)
1513		return;
1514
1515	/*
1516	 * The only state that can actually be changing concurrently with
1517	 * metaslab_sync() is the metaslab's ms_tree.  No other thread can
1518	 * be modifying this txg's alloctree, freetree, freed_tree, or
1519	 * space_map_phys_t. Therefore, we only hold ms_lock to satify
1520	 * space_map ASSERTs. We drop it whenever we call into the DMU,
1521	 * because the DMU can call down to us (e.g. via zio_free()) at
1522	 * any time.
1523	 */
1524
1525	tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
1526
1527	if (msp->ms_sm == NULL) {
1528		uint64_t new_object;
1529
1530		new_object = space_map_alloc(mos, tx);
1531		VERIFY3U(new_object, !=, 0);
1532
1533		VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
1534		    msp->ms_start, msp->ms_size, vd->vdev_ashift,
1535		    &msp->ms_lock));
1536		ASSERT(msp->ms_sm != NULL);
1537	}
1538
1539	mutex_enter(&msp->ms_lock);
1540
1541	if (msp->ms_loaded && spa_sync_pass(spa) == 1 &&
1542	    metaslab_should_condense(msp)) {
1543		metaslab_condense(msp, txg, tx);
1544	} else {
1545		space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
1546		space_map_write(msp->ms_sm, *freetree, SM_FREE, tx);
1547	}
1548
1549	range_tree_vacate(alloctree, NULL, NULL);
1550
1551	if (msp->ms_loaded) {
1552		/*
1553		 * When the space map is loaded, we have an accruate
1554		 * histogram in the range tree. This gives us an opportunity
1555		 * to bring the space map's histogram up-to-date so we clear
1556		 * it first before updating it.
1557		 */
1558		space_map_histogram_clear(msp->ms_sm);
1559		space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx);
1560	} else {
1561		/*
1562		 * Since the space map is not loaded we simply update the
1563		 * exisiting histogram with what was freed in this txg. This
1564		 * means that the on-disk histogram may not have an accurate
1565		 * view of the free space but it's close enough to allow
1566		 * us to make allocation decisions.
1567		 */
1568		space_map_histogram_add(msp->ms_sm, *freetree, tx);
1569	}
1570
1571	/*
1572	 * For sync pass 1, we avoid traversing this txg's free range tree
1573	 * and instead will just swap the pointers for freetree and
1574	 * freed_tree. We can safely do this since the freed_tree is
1575	 * guaranteed to be empty on the initial pass.
1576	 */
1577	if (spa_sync_pass(spa) == 1) {
1578		range_tree_swap(freetree, freed_tree);
1579	} else {
1580		range_tree_vacate(*freetree, range_tree_add, *freed_tree);
1581	}
1582
1583	ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
1584	ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
1585
1586	mutex_exit(&msp->ms_lock);
1587
1588	if (object != space_map_object(msp->ms_sm)) {
1589		object = space_map_object(msp->ms_sm);
1590		dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
1591		    msp->ms_id, sizeof (uint64_t), &object, tx);
1592	}
1593	dmu_tx_commit(tx);
1594}
1595
1596/*
1597 * Called after a transaction group has completely synced to mark
1598 * all of the metaslab's free space as usable.
1599 */
1600void
1601metaslab_sync_done(metaslab_t *msp, uint64_t txg)
1602{
1603	metaslab_group_t *mg = msp->ms_group;
1604	vdev_t *vd = mg->mg_vd;
1605	range_tree_t **freed_tree;
1606	range_tree_t **defer_tree;
1607	int64_t alloc_delta, defer_delta;
1608
1609	ASSERT(!vd->vdev_ishole);
1610
1611	mutex_enter(&msp->ms_lock);
1612
1613	/*
1614	 * If this metaslab is just becoming available, initialize its
1615	 * alloctrees, freetrees, and defertree and add its capacity to
1616	 * the vdev.
1617	 */
1618	if (msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK] == NULL) {
1619		for (int t = 0; t < TXG_SIZE; t++) {
1620			ASSERT(msp->ms_alloctree[t] == NULL);
1621			ASSERT(msp->ms_freetree[t] == NULL);
1622
1623			msp->ms_alloctree[t] = range_tree_create(NULL, msp,
1624			    &msp->ms_lock);
1625			msp->ms_freetree[t] = range_tree_create(NULL, msp,
1626			    &msp->ms_lock);
1627		}
1628
1629		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1630			ASSERT(msp->ms_defertree[t] == NULL);
1631
1632			msp->ms_defertree[t] = range_tree_create(NULL, msp,
1633			    &msp->ms_lock);
1634		}
1635
1636		vdev_space_update(vd, 0, 0, msp->ms_size);
1637	}
1638
1639	freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
1640	defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
1641
1642	alloc_delta = space_map_alloc_delta(msp->ms_sm);
1643	defer_delta = range_tree_space(*freed_tree) -
1644	    range_tree_space(*defer_tree);
1645
1646	vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
1647
1648	ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
1649	ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
1650
1651	/*
1652	 * If there's a metaslab_load() in progress, wait for it to complete
1653	 * so that we have a consistent view of the in-core space map.
1654	 */
1655	metaslab_load_wait(msp);
1656
1657	/*
1658	 * Move the frees from the defer_tree back to the free
1659	 * range tree (if it's loaded). Swap the freed_tree and the
1660	 * defer_tree -- this is safe to do because we've just emptied out
1661	 * the defer_tree.
1662	 */
1663	range_tree_vacate(*defer_tree,
1664	    msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
1665	range_tree_swap(freed_tree, defer_tree);
1666
1667	space_map_update(msp->ms_sm);
1668
1669	msp->ms_deferspace += defer_delta;
1670	ASSERT3S(msp->ms_deferspace, >=, 0);
1671	ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
1672	if (msp->ms_deferspace != 0) {
1673		/*
1674		 * Keep syncing this metaslab until all deferred frees
1675		 * are back in circulation.
1676		 */
1677		vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1678	}
1679
1680	if (msp->ms_loaded && msp->ms_access_txg < txg) {
1681		for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
1682			VERIFY0(range_tree_space(
1683			    msp->ms_alloctree[(txg + t) & TXG_MASK]));
1684		}
1685
1686		if (!metaslab_debug_unload)
1687			metaslab_unload(msp);
1688	}
1689
1690	metaslab_group_sort(mg, msp, metaslab_weight(msp));
1691	mutex_exit(&msp->ms_lock);
1692
1693}
1694
1695void
1696metaslab_sync_reassess(metaslab_group_t *mg)
1697{
1698	metaslab_group_alloc_update(mg);
1699
1700	/*
1701	 * Preload the next potential metaslabs
1702	 */
1703	metaslab_group_preload(mg);
1704}
1705
1706static uint64_t
1707metaslab_distance(metaslab_t *msp, dva_t *dva)
1708{
1709	uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
1710	uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
1711	uint64_t start = msp->ms_id;
1712
1713	if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
1714		return (1ULL << 63);
1715
1716	if (offset < start)
1717		return ((start - offset) << ms_shift);
1718	if (offset > start)
1719		return ((offset - start) << ms_shift);
1720	return (0);
1721}
1722
1723static uint64_t
1724metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize,
1725    uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
1726{
1727	spa_t *spa = mg->mg_vd->vdev_spa;
1728	metaslab_t *msp = NULL;
1729	uint64_t offset = -1ULL;
1730	avl_tree_t *t = &mg->mg_metaslab_tree;
1731	uint64_t activation_weight;
1732	uint64_t target_distance;
1733	int i;
1734
1735	activation_weight = METASLAB_WEIGHT_PRIMARY;
1736	for (i = 0; i < d; i++) {
1737		if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
1738			activation_weight = METASLAB_WEIGHT_SECONDARY;
1739			break;
1740		}
1741	}
1742
1743	for (;;) {
1744		boolean_t was_active;
1745
1746		mutex_enter(&mg->mg_lock);
1747		for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
1748			if (msp->ms_weight < asize) {
1749				spa_dbgmsg(spa, "%s: failed to meet weight "
1750				    "requirement: vdev %llu, txg %llu, mg %p, "
1751				    "msp %p, psize %llu, asize %llu, "
1752				    "weight %llu", spa_name(spa),
1753				    mg->mg_vd->vdev_id, txg,
1754				    mg, msp, psize, asize, msp->ms_weight);
1755				mutex_exit(&mg->mg_lock);
1756				return (-1ULL);
1757			}
1758
1759			/*
1760			 * If the selected metaslab is condensing, skip it.
1761			 */
1762			if (msp->ms_condensing)
1763				continue;
1764
1765			was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
1766			if (activation_weight == METASLAB_WEIGHT_PRIMARY)
1767				break;
1768
1769			target_distance = min_distance +
1770			    (space_map_allocated(msp->ms_sm) != 0 ? 0 :
1771			    min_distance >> 1);
1772
1773			for (i = 0; i < d; i++)
1774				if (metaslab_distance(msp, &dva[i]) <
1775				    target_distance)
1776					break;
1777			if (i == d)
1778				break;
1779		}
1780		mutex_exit(&mg->mg_lock);
1781		if (msp == NULL)
1782			return (-1ULL);
1783
1784		mutex_enter(&msp->ms_lock);
1785
1786		/*
1787		 * Ensure that the metaslab we have selected is still
1788		 * capable of handling our request. It's possible that
1789		 * another thread may have changed the weight while we
1790		 * were blocked on the metaslab lock.
1791		 */
1792		if (msp->ms_weight < asize || (was_active &&
1793		    !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
1794		    activation_weight == METASLAB_WEIGHT_PRIMARY)) {
1795			mutex_exit(&msp->ms_lock);
1796			continue;
1797		}
1798
1799		if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
1800		    activation_weight == METASLAB_WEIGHT_PRIMARY) {
1801			metaslab_passivate(msp,
1802			    msp->ms_weight & ~METASLAB_ACTIVE_MASK);
1803			mutex_exit(&msp->ms_lock);
1804			continue;
1805		}
1806
1807		if (metaslab_activate(msp, activation_weight) != 0) {
1808			mutex_exit(&msp->ms_lock);
1809			continue;
1810		}
1811
1812		/*
1813		 * If this metaslab is currently condensing then pick again as
1814		 * we can't manipulate this metaslab until it's committed
1815		 * to disk.
1816		 */
1817		if (msp->ms_condensing) {
1818			mutex_exit(&msp->ms_lock);
1819			continue;
1820		}
1821
1822		if ((offset = metaslab_block_alloc(msp, asize)) != -1ULL)
1823			break;
1824
1825		metaslab_passivate(msp, metaslab_block_maxsize(msp));
1826		mutex_exit(&msp->ms_lock);
1827	}
1828
1829	if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
1830		vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
1831
1832	range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, asize);
1833	msp->ms_access_txg = txg + metaslab_unload_delay;
1834
1835	mutex_exit(&msp->ms_lock);
1836
1837	return (offset);
1838}
1839
1840/*
1841 * Allocate a block for the specified i/o.
1842 */
1843static int
1844metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
1845    dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
1846{
1847	metaslab_group_t *mg, *rotor;
1848	vdev_t *vd;
1849	int dshift = 3;
1850	int all_zero;
1851	int zio_lock = B_FALSE;
1852	boolean_t allocatable;
1853	uint64_t offset = -1ULL;
1854	uint64_t asize;
1855	uint64_t distance;
1856
1857	ASSERT(!DVA_IS_VALID(&dva[d]));
1858
1859	/*
1860	 * For testing, make some blocks above a certain size be gang blocks.
1861	 */
1862	if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
1863		return (SET_ERROR(ENOSPC));
1864
1865	/*
1866	 * Start at the rotor and loop through all mgs until we find something.
1867	 * Note that there's no locking on mc_rotor or mc_aliquot because
1868	 * nothing actually breaks if we miss a few updates -- we just won't
1869	 * allocate quite as evenly.  It all balances out over time.
1870	 *
1871	 * If we are doing ditto or log blocks, try to spread them across
1872	 * consecutive vdevs.  If we're forced to reuse a vdev before we've
1873	 * allocated all of our ditto blocks, then try and spread them out on
1874	 * that vdev as much as possible.  If it turns out to not be possible,
1875	 * gradually lower our standards until anything becomes acceptable.
1876	 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
1877	 * gives us hope of containing our fault domains to something we're
1878	 * able to reason about.  Otherwise, any two top-level vdev failures
1879	 * will guarantee the loss of data.  With consecutive allocation,
1880	 * only two adjacent top-level vdev failures will result in data loss.
1881	 *
1882	 * If we are doing gang blocks (hintdva is non-NULL), try to keep
1883	 * ourselves on the same vdev as our gang block header.  That
1884	 * way, we can hope for locality in vdev_cache, plus it makes our
1885	 * fault domains something tractable.
1886	 */
1887	if (hintdva) {
1888		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
1889
1890		/*
1891		 * It's possible the vdev we're using as the hint no
1892		 * longer exists (i.e. removed). Consult the rotor when
1893		 * all else fails.
1894		 */
1895		if (vd != NULL) {
1896			mg = vd->vdev_mg;
1897
1898			if (flags & METASLAB_HINTBP_AVOID &&
1899			    mg->mg_next != NULL)
1900				mg = mg->mg_next;
1901		} else {
1902			mg = mc->mc_rotor;
1903		}
1904	} else if (d != 0) {
1905		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
1906		mg = vd->vdev_mg->mg_next;
1907	} else {
1908		mg = mc->mc_rotor;
1909	}
1910
1911	/*
1912	 * If the hint put us into the wrong metaslab class, or into a
1913	 * metaslab group that has been passivated, just follow the rotor.
1914	 */
1915	if (mg->mg_class != mc || mg->mg_activation_count <= 0)
1916		mg = mc->mc_rotor;
1917
1918	rotor = mg;
1919top:
1920	all_zero = B_TRUE;
1921	do {
1922		ASSERT(mg->mg_activation_count == 1);
1923
1924		vd = mg->mg_vd;
1925
1926		/*
1927		 * Don't allocate from faulted devices.
1928		 */
1929		if (zio_lock) {
1930			spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
1931			allocatable = vdev_allocatable(vd);
1932			spa_config_exit(spa, SCL_ZIO, FTAG);
1933		} else {
1934			allocatable = vdev_allocatable(vd);
1935		}
1936
1937		/*
1938		 * Determine if the selected metaslab group is eligible
1939		 * for allocations. If we're ganging or have requested
1940		 * an allocation for the smallest gang block size
1941		 * then we don't want to avoid allocating to the this
1942		 * metaslab group. If we're in this condition we should
1943		 * try to allocate from any device possible so that we
1944		 * don't inadvertently return ENOSPC and suspend the pool
1945		 * even though space is still available.
1946		 */
1947		if (allocatable && CAN_FASTGANG(flags) &&
1948		    psize > SPA_GANGBLOCKSIZE)
1949			allocatable = metaslab_group_allocatable(mg);
1950
1951		if (!allocatable)
1952			goto next;
1953
1954		/*
1955		 * Avoid writing single-copy data to a failing vdev
1956		 * unless the user instructs us that it is okay.
1957		 */
1958		if ((vd->vdev_stat.vs_write_errors > 0 ||
1959		    vd->vdev_state < VDEV_STATE_HEALTHY) &&
1960		    d == 0 && dshift == 3 &&
1961		    !(zfs_write_to_degraded && vd->vdev_state ==
1962		    VDEV_STATE_DEGRADED)) {
1963			all_zero = B_FALSE;
1964			goto next;
1965		}
1966
1967		ASSERT(mg->mg_class == mc);
1968
1969		distance = vd->vdev_asize >> dshift;
1970		if (distance <= (1ULL << vd->vdev_ms_shift))
1971			distance = 0;
1972		else
1973			all_zero = B_FALSE;
1974
1975		asize = vdev_psize_to_asize(vd, psize);
1976		ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
1977
1978		offset = metaslab_group_alloc(mg, psize, asize, txg, distance,
1979		    dva, d);
1980		if (offset != -1ULL) {
1981			/*
1982			 * If we've just selected this metaslab group,
1983			 * figure out whether the corresponding vdev is
1984			 * over- or under-used relative to the pool,
1985			 * and set an allocation bias to even it out.
1986			 */
1987			if (mc->mc_aliquot == 0) {
1988				vdev_stat_t *vs = &vd->vdev_stat;
1989				int64_t vu, cu;
1990
1991				vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
1992				cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
1993
1994				/*
1995				 * Calculate how much more or less we should
1996				 * try to allocate from this device during
1997				 * this iteration around the rotor.
1998				 * For example, if a device is 80% full
1999				 * and the pool is 20% full then we should
2000				 * reduce allocations by 60% on this device.
2001				 *
2002				 * mg_bias = (20 - 80) * 512K / 100 = -307K
2003				 *
2004				 * This reduces allocations by 307K for this
2005				 * iteration.
2006				 */
2007				mg->mg_bias = ((cu - vu) *
2008				    (int64_t)mg->mg_aliquot) / 100;
2009			}
2010
2011			if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
2012			    mg->mg_aliquot + mg->mg_bias) {
2013				mc->mc_rotor = mg->mg_next;
2014				mc->mc_aliquot = 0;
2015			}
2016
2017			DVA_SET_VDEV(&dva[d], vd->vdev_id);
2018			DVA_SET_OFFSET(&dva[d], offset);
2019			DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
2020			DVA_SET_ASIZE(&dva[d], asize);
2021
2022			return (0);
2023		}
2024next:
2025		mc->mc_rotor = mg->mg_next;
2026		mc->mc_aliquot = 0;
2027	} while ((mg = mg->mg_next) != rotor);
2028
2029	if (!all_zero) {
2030		dshift++;
2031		ASSERT(dshift < 64);
2032		goto top;
2033	}
2034
2035	if (!allocatable && !zio_lock) {
2036		dshift = 3;
2037		zio_lock = B_TRUE;
2038		goto top;
2039	}
2040
2041	bzero(&dva[d], sizeof (dva_t));
2042
2043	return (SET_ERROR(ENOSPC));
2044}
2045
2046/*
2047 * Free the block represented by DVA in the context of the specified
2048 * transaction group.
2049 */
2050static void
2051metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
2052{
2053	uint64_t vdev = DVA_GET_VDEV(dva);
2054	uint64_t offset = DVA_GET_OFFSET(dva);
2055	uint64_t size = DVA_GET_ASIZE(dva);
2056	vdev_t *vd;
2057	metaslab_t *msp;
2058
2059	ASSERT(DVA_IS_VALID(dva));
2060
2061	if (txg > spa_freeze_txg(spa))
2062		return;
2063
2064	if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
2065	    (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
2066		cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
2067		    (u_longlong_t)vdev, (u_longlong_t)offset);
2068		ASSERT(0);
2069		return;
2070	}
2071
2072	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2073
2074	if (DVA_GET_GANG(dva))
2075		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2076
2077	mutex_enter(&msp->ms_lock);
2078
2079	if (now) {
2080		range_tree_remove(msp->ms_alloctree[txg & TXG_MASK],
2081		    offset, size);
2082
2083		VERIFY(!msp->ms_condensing);
2084		VERIFY3U(offset, >=, msp->ms_start);
2085		VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
2086		VERIFY3U(range_tree_space(msp->ms_tree) + size, <=,
2087		    msp->ms_size);
2088		VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2089		VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2090		range_tree_add(msp->ms_tree, offset, size);
2091	} else {
2092		if (range_tree_space(msp->ms_freetree[txg & TXG_MASK]) == 0)
2093			vdev_dirty(vd, VDD_METASLAB, msp, txg);
2094		range_tree_add(msp->ms_freetree[txg & TXG_MASK],
2095		    offset, size);
2096	}
2097
2098	mutex_exit(&msp->ms_lock);
2099}
2100
2101/*
2102 * Intent log support: upon opening the pool after a crash, notify the SPA
2103 * of blocks that the intent log has allocated for immediate write, but
2104 * which are still considered free by the SPA because the last transaction
2105 * group didn't commit yet.
2106 */
2107static int
2108metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
2109{
2110	uint64_t vdev = DVA_GET_VDEV(dva);
2111	uint64_t offset = DVA_GET_OFFSET(dva);
2112	uint64_t size = DVA_GET_ASIZE(dva);
2113	vdev_t *vd;
2114	metaslab_t *msp;
2115	int error = 0;
2116
2117	ASSERT(DVA_IS_VALID(dva));
2118
2119	if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
2120	    (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
2121		return (SET_ERROR(ENXIO));
2122
2123	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2124
2125	if (DVA_GET_GANG(dva))
2126		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2127
2128	mutex_enter(&msp->ms_lock);
2129
2130	if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
2131		error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
2132
2133	if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size))
2134		error = SET_ERROR(ENOENT);
2135
2136	if (error || txg == 0) {	/* txg == 0 indicates dry run */
2137		mutex_exit(&msp->ms_lock);
2138		return (error);
2139	}
2140
2141	VERIFY(!msp->ms_condensing);
2142	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2143	VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2144	VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size);
2145	range_tree_remove(msp->ms_tree, offset, size);
2146
2147	if (spa_writeable(spa)) {	/* don't dirty if we're zdb(1M) */
2148		if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2149			vdev_dirty(vd, VDD_METASLAB, msp, txg);
2150		range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size);
2151	}
2152
2153	mutex_exit(&msp->ms_lock);
2154
2155	return (0);
2156}
2157
2158int
2159metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
2160    int ndvas, uint64_t txg, blkptr_t *hintbp, int flags)
2161{
2162	dva_t *dva = bp->blk_dva;
2163	dva_t *hintdva = hintbp->blk_dva;
2164	int error = 0;
2165
2166	ASSERT(bp->blk_birth == 0);
2167	ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
2168
2169	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2170
2171	if (mc->mc_rotor == NULL) {	/* no vdevs in this class */
2172		spa_config_exit(spa, SCL_ALLOC, FTAG);
2173		return (SET_ERROR(ENOSPC));
2174	}
2175
2176	ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
2177	ASSERT(BP_GET_NDVAS(bp) == 0);
2178	ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
2179
2180	for (int d = 0; d < ndvas; d++) {
2181		error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
2182		    txg, flags);
2183		if (error != 0) {
2184			for (d--; d >= 0; d--) {
2185				metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
2186				bzero(&dva[d], sizeof (dva_t));
2187			}
2188			spa_config_exit(spa, SCL_ALLOC, FTAG);
2189			return (error);
2190		}
2191	}
2192	ASSERT(error == 0);
2193	ASSERT(BP_GET_NDVAS(bp) == ndvas);
2194
2195	spa_config_exit(spa, SCL_ALLOC, FTAG);
2196
2197	BP_SET_BIRTH(bp, txg, txg);
2198
2199	return (0);
2200}
2201
2202void
2203metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
2204{
2205	const dva_t *dva = bp->blk_dva;
2206	int ndvas = BP_GET_NDVAS(bp);
2207
2208	ASSERT(!BP_IS_HOLE(bp));
2209	ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
2210
2211	spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
2212
2213	for (int d = 0; d < ndvas; d++)
2214		metaslab_free_dva(spa, &dva[d], txg, now);
2215
2216	spa_config_exit(spa, SCL_FREE, FTAG);
2217}
2218
2219int
2220metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
2221{
2222	const dva_t *dva = bp->blk_dva;
2223	int ndvas = BP_GET_NDVAS(bp);
2224	int error = 0;
2225
2226	ASSERT(!BP_IS_HOLE(bp));
2227
2228	if (txg != 0) {
2229		/*
2230		 * First do a dry run to make sure all DVAs are claimable,
2231		 * so we don't have to unwind from partial failures below.
2232		 */
2233		if ((error = metaslab_claim(spa, bp, 0)) != 0)
2234			return (error);
2235	}
2236
2237	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2238
2239	for (int d = 0; d < ndvas; d++)
2240		if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
2241			break;
2242
2243	spa_config_exit(spa, SCL_ALLOC, FTAG);
2244
2245	ASSERT(error == 0 || txg == 0);
2246
2247	return (error);
2248}
2249
2250void
2251metaslab_check_free(spa_t *spa, const blkptr_t *bp)
2252{
2253	if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
2254		return;
2255
2256	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2257	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
2258		uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
2259		vdev_t *vd = vdev_lookup_top(spa, vdev);
2260		uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
2261		uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
2262		metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2263
2264		if (msp->ms_loaded)
2265			range_tree_verify(msp->ms_tree, offset, size);
2266
2267		for (int j = 0; j < TXG_SIZE; j++)
2268			range_tree_verify(msp->ms_freetree[j], offset, size);
2269		for (int j = 0; j < TXG_DEFER_SIZE; j++)
2270			range_tree_verify(msp->ms_defertree[j], offset, size);
2271	}
2272	spa_config_exit(spa, SCL_VDEV, FTAG);
2273}
2274