metaslab_impl.h revision 307279
167064Sobrien/* 267064Sobrien * CDDL HEADER START 358551Skris * 458551Skris * The contents of this file are subject to the terms of the 558551Skris * Common Development and Distribution License (the "License"). 618214Speter * You may not use this file except in compliance with the License. 758551Skris * 867064Sobrien * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 967064Sobrien * or http://www.opensolaris.org/os/licensing. 1067064Sobrien * See the License for the specific language governing permissions 1158551Skris * and limitations under the License. 1267064Sobrien * 1358551Skris * When distributing Covered Code, include this CDDL HEADER in each 1467064Sobrien * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 1567064Sobrien * If applicable, add the following below this CDDL HEADER, with the 1618214Speter * fields enclosed by brackets "[]" replaced with your own identifying 1718214Speter * information: Portions Copyright [yyyy] [name of copyright owner] 1818214Speter * 1958551Skris * CDDL HEADER END 2067064Sobrien */ 2167064Sobrien/* 2267064Sobrien * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 2367064Sobrien * Use is subject to license terms. 2458551Skris */ 25228060Sbapt 26228060Sbapt/* 27228060Sbapt * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 2867064Sobrien */ 29 30#ifndef _SYS_METASLAB_IMPL_H 31#define _SYS_METASLAB_IMPL_H 32 33#include <sys/metaslab.h> 34#include <sys/space_map.h> 35#include <sys/range_tree.h> 36#include <sys/vdev.h> 37#include <sys/txg.h> 38#include <sys/avl.h> 39 40#ifdef __cplusplus 41extern "C" { 42#endif 43 44/* 45 * A metaslab class encompasses a category of allocatable top-level vdevs. 46 * Each top-level vdev is associated with a metaslab group which defines 47 * the allocatable region for that vdev. Examples of these categories include 48 * "normal" for data block allocations (i.e. main pool allocations) or "log" 49 * for allocations designated for intent log devices (i.e. slog devices). 50 * When a block allocation is requested from the SPA it is associated with a 51 * metaslab_class_t, and only top-level vdevs (i.e. metaslab groups) belonging 52 * to the class can be used to satisfy that request. Allocations are done 53 * by traversing the metaslab groups that are linked off of the mc_rotor field. 54 * This rotor points to the next metaslab group where allocations will be 55 * attempted. Allocating a block is a 3 step process -- select the metaslab 56 * group, select the metaslab, and then allocate the block. The metaslab 57 * class defines the low-level block allocator that will be used as the 58 * final step in allocation. These allocators are pluggable allowing each class 59 * to use a block allocator that best suits that class. 60 */ 61struct metaslab_class { 62 kmutex_t mc_lock; 63 spa_t *mc_spa; 64 metaslab_group_t *mc_rotor; 65 metaslab_ops_t *mc_ops; 66 uint64_t mc_aliquot; 67 68 /* 69 * Track the number of metaslab groups that have been initialized 70 * and can accept allocations. An initialized metaslab group is 71 * one has been completely added to the config (i.e. we have 72 * updated the MOS config and the space has been added to the pool). 73 */ 74 uint64_t mc_groups; 75 76 /* 77 * Toggle to enable/disable the allocation throttle. 78 */ 79 boolean_t mc_alloc_throttle_enabled; 80 81 /* 82 * The allocation throttle works on a reservation system. Whenever 83 * an asynchronous zio wants to perform an allocation it must 84 * first reserve the number of blocks that it wants to allocate. 85 * If there aren't sufficient slots available for the pending zio 86 * then that I/O is throttled until more slots free up. The current 87 * number of reserved allocations is maintained by the mc_alloc_slots 88 * refcount. The mc_alloc_max_slots value determines the maximum 89 * number of allocations that the system allows. Gang blocks are 90 * allowed to reserve slots even if we've reached the maximum 91 * number of allocations allowed. 92 */ 93 uint64_t mc_alloc_max_slots; 94 refcount_t mc_alloc_slots; 95 96 uint64_t mc_alloc_groups; /* # of allocatable groups */ 97 98 uint64_t mc_alloc; /* total allocated space */ 99 uint64_t mc_deferred; /* total deferred frees */ 100 uint64_t mc_space; /* total space (alloc + free) */ 101 uint64_t mc_dspace; /* total deflated space */ 102 uint64_t mc_minblocksize; 103 uint64_t mc_histogram[RANGE_TREE_HISTOGRAM_SIZE]; 104}; 105 106/* 107 * Metaslab groups encapsulate all the allocatable regions (i.e. metaslabs) 108 * of a top-level vdev. They are linked togther to form a circular linked 109 * list and can belong to only one metaslab class. Metaslab groups may become 110 * ineligible for allocations for a number of reasons such as limited free 111 * space, fragmentation, or going offline. When this happens the allocator will 112 * simply find the next metaslab group in the linked list and attempt 113 * to allocate from that group instead. 114 */ 115struct metaslab_group { 116 kmutex_t mg_lock; 117 avl_tree_t mg_metaslab_tree; 118 uint64_t mg_aliquot; 119 boolean_t mg_allocatable; /* can we allocate? */ 120 121 /* 122 * A metaslab group is considered to be initialized only after 123 * we have updated the MOS config and added the space to the pool. 124 * We only allow allocation attempts to a metaslab group if it 125 * has been initialized. 126 */ 127 boolean_t mg_initialized; 128 129 uint64_t mg_free_capacity; /* percentage free */ 130 int64_t mg_bias; 131 int64_t mg_activation_count; 132 metaslab_class_t *mg_class; 133 vdev_t *mg_vd; 134 taskq_t *mg_taskq; 135 metaslab_group_t *mg_prev; 136 metaslab_group_t *mg_next; 137 138 /* 139 * Each metaslab group can handle mg_max_alloc_queue_depth allocations 140 * which are tracked by mg_alloc_queue_depth. It's possible for a 141 * metaslab group to handle more allocations than its max. This 142 * can occur when gang blocks are required or when other groups 143 * are unable to handle their share of allocations. 144 */ 145 uint64_t mg_max_alloc_queue_depth; 146 refcount_t mg_alloc_queue_depth; 147 148 /* 149 * A metalab group that can no longer allocate the minimum block 150 * size will set mg_no_free_space. Once a metaslab group is out 151 * of space then its share of work must be distributed to other 152 * groups. 153 */ 154 boolean_t mg_no_free_space; 155 156 uint64_t mg_allocations; 157 uint64_t mg_failed_allocations; 158 uint64_t mg_fragmentation; 159 uint64_t mg_histogram[RANGE_TREE_HISTOGRAM_SIZE]; 160}; 161 162/* 163 * This value defines the number of elements in the ms_lbas array. The value 164 * of 64 was chosen as it covers all power of 2 buckets up to UINT64_MAX. 165 * This is the equivalent of highbit(UINT64_MAX). 166 */ 167#define MAX_LBAS 64 168 169/* 170 * Each metaslab maintains a set of in-core trees to track metaslab operations. 171 * The in-core free tree (ms_tree) contains the current list of free segments. 172 * As blocks are allocated, the allocated segment are removed from the ms_tree 173 * and added to a per txg allocation tree (ms_alloctree). As blocks are freed, 174 * they are added to the per txg free tree (ms_freetree). These per txg 175 * trees allow us to process all allocations and frees in syncing context 176 * where it is safe to update the on-disk space maps. One additional in-core 177 * tree is maintained to track deferred frees (ms_defertree). Once a block 178 * is freed it will move from the ms_freetree to the ms_defertree. A deferred 179 * free means that a block has been freed but cannot be used by the pool 180 * until TXG_DEFER_SIZE transactions groups later. For example, a block 181 * that is freed in txg 50 will not be available for reallocation until 182 * txg 52 (50 + TXG_DEFER_SIZE). This provides a safety net for uberblock 183 * rollback. A pool could be safely rolled back TXG_DEFERS_SIZE 184 * transactions groups and ensure that no block has been reallocated. 185 * 186 * The simplified transition diagram looks like this: 187 * 188 * 189 * ALLOCATE 190 * | 191 * V 192 * free segment (ms_tree) --------> ms_alloctree ----> (write to space map) 193 * ^ 194 * | 195 * | ms_freetree <--- FREE 196 * | | 197 * | | 198 * | | 199 * +----------- ms_defertree <-------+---------> (write to space map) 200 * 201 * 202 * Each metaslab's space is tracked in a single space map in the MOS, 203 * which is only updated in syncing context. Each time we sync a txg, 204 * we append the allocs and frees from that txg to the space map. 205 * The pool space is only updated once all metaslabs have finished syncing. 206 * 207 * To load the in-core free tree we read the space map from disk. 208 * This object contains a series of alloc and free records that are 209 * combined to make up the list of all free segments in this metaslab. These 210 * segments are represented in-core by the ms_tree and are stored in an 211 * AVL tree. 212 * 213 * As the space map grows (as a result of the appends) it will 214 * eventually become space-inefficient. When the metaslab's in-core free tree 215 * is zfs_condense_pct/100 times the size of the minimal on-disk 216 * representation, we rewrite it in its minimized form. If a metaslab 217 * needs to condense then we must set the ms_condensing flag to ensure 218 * that allocations are not performed on the metaslab that is being written. 219 */ 220struct metaslab { 221 kmutex_t ms_lock; 222 kcondvar_t ms_load_cv; 223 space_map_t *ms_sm; 224 metaslab_ops_t *ms_ops; 225 uint64_t ms_id; 226 uint64_t ms_start; 227 uint64_t ms_size; 228 uint64_t ms_fragmentation; 229 230 range_tree_t *ms_alloctree[TXG_SIZE]; 231 range_tree_t *ms_freetree[TXG_SIZE]; 232 range_tree_t *ms_defertree[TXG_DEFER_SIZE]; 233 range_tree_t *ms_tree; 234 235 boolean_t ms_condensing; /* condensing? */ 236 boolean_t ms_condense_wanted; 237 boolean_t ms_loaded; 238 boolean_t ms_loading; 239 240 int64_t ms_deferspace; /* sum of ms_defermap[] space */ 241 uint64_t ms_weight; /* weight vs. others in group */ 242 uint64_t ms_access_txg; 243 244 /* 245 * The metaslab block allocators can optionally use a size-ordered 246 * range tree and/or an array of LBAs. Not all allocators use 247 * this functionality. The ms_size_tree should always contain the 248 * same number of segments as the ms_tree. The only difference 249 * is that the ms_size_tree is ordered by segment sizes. 250 */ 251 avl_tree_t ms_size_tree; 252 uint64_t ms_lbas[MAX_LBAS]; 253 254 metaslab_group_t *ms_group; /* metaslab group */ 255 avl_node_t ms_group_node; /* node in metaslab group tree */ 256 txg_node_t ms_txg_node; /* per-txg dirty metaslab links */ 257}; 258 259#ifdef __cplusplus 260} 261#endif 262 263#endif /* _SYS_METASLAB_IMPL_H */ 264