vdev_cache.c revision 263397
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25/* 26 * Copyright (c) 2013 by Delphix. All rights reserved. 27 */ 28 29#include <sys/zfs_context.h> 30#include <sys/spa.h> 31#include <sys/vdev_impl.h> 32#include <sys/zio.h> 33#include <sys/kstat.h> 34 35/* 36 * Virtual device read-ahead caching. 37 * 38 * This file implements a simple LRU read-ahead cache. When the DMU reads 39 * a given block, it will often want other, nearby blocks soon thereafter. 40 * We take advantage of this by reading a larger disk region and caching 41 * the result. In the best case, this can turn 128 back-to-back 512-byte 42 * reads into a single 64k read followed by 127 cache hits; this reduces 43 * latency dramatically. In the worst case, it can turn an isolated 512-byte 44 * read into a 64k read, which doesn't affect latency all that much but is 45 * terribly wasteful of bandwidth. A more intelligent version of the cache 46 * could keep track of access patterns and not do read-ahead unless it sees 47 * at least two temporally close I/Os to the same region. Currently, only 48 * metadata I/O is inflated. A futher enhancement could take advantage of 49 * more semantic information about the I/O. And it could use something 50 * faster than an AVL tree; that was chosen solely for convenience. 51 * 52 * There are five cache operations: allocate, fill, read, write, evict. 53 * 54 * (1) Allocate. This reserves a cache entry for the specified region. 55 * We separate the allocate and fill operations so that multiple threads 56 * don't generate I/O for the same cache miss. 57 * 58 * (2) Fill. When the I/O for a cache miss completes, the fill routine 59 * places the data in the previously allocated cache entry. 60 * 61 * (3) Read. Read data from the cache. 62 * 63 * (4) Write. Update cache contents after write completion. 64 * 65 * (5) Evict. When allocating a new entry, we evict the oldest (LRU) entry 66 * if the total cache size exceeds zfs_vdev_cache_size. 67 */ 68 69/* 70 * These tunables are for performance analysis. 71 */ 72/* 73 * All i/os smaller than zfs_vdev_cache_max will be turned into 74 * 1<<zfs_vdev_cache_bshift byte reads by the vdev_cache (aka software 75 * track buffer). At most zfs_vdev_cache_size bytes will be kept in each 76 * vdev's vdev_cache. 77 * 78 * TODO: Note that with the current ZFS code, it turns out that the 79 * vdev cache is not helpful, and in some cases actually harmful. It 80 * is better if we disable this. Once some time has passed, we should 81 * actually remove this to simplify the code. For now we just disable 82 * it by setting the zfs_vdev_cache_size to zero. Note that Solaris 11 83 * has made these same changes. 84 */ 85int zfs_vdev_cache_max = 1<<14; /* 16KB */ 86int zfs_vdev_cache_size = 0; 87int zfs_vdev_cache_bshift = 16; 88 89#define VCBS (1 << zfs_vdev_cache_bshift) /* 64KB */ 90 91SYSCTL_DECL(_vfs_zfs_vdev); 92SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, cache, CTLFLAG_RW, 0, "ZFS VDEV Cache"); 93TUNABLE_INT("vfs.zfs.vdev.cache.max", &zfs_vdev_cache_max); 94SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, max, CTLFLAG_RDTUN, 95 &zfs_vdev_cache_max, 0, "Maximum I/O request size that increase read size"); 96TUNABLE_INT("vfs.zfs.vdev.cache.size", &zfs_vdev_cache_size); 97SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, size, CTLFLAG_RDTUN, 98 &zfs_vdev_cache_size, 0, "Size of VDEV cache"); 99TUNABLE_INT("vfs.zfs.vdev.cache.bshift", &zfs_vdev_cache_bshift); 100SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, bshift, CTLFLAG_RDTUN, 101 &zfs_vdev_cache_bshift, 0, "Turn too small requests into 1 << this value"); 102 103kstat_t *vdc_ksp = NULL; 104 105typedef struct vdc_stats { 106 kstat_named_t vdc_stat_delegations; 107 kstat_named_t vdc_stat_hits; 108 kstat_named_t vdc_stat_misses; 109} vdc_stats_t; 110 111static vdc_stats_t vdc_stats = { 112 { "delegations", KSTAT_DATA_UINT64 }, 113 { "hits", KSTAT_DATA_UINT64 }, 114 { "misses", KSTAT_DATA_UINT64 } 115}; 116 117#define VDCSTAT_BUMP(stat) atomic_add_64(&vdc_stats.stat.value.ui64, 1); 118 119static int 120vdev_cache_offset_compare(const void *a1, const void *a2) 121{ 122 const vdev_cache_entry_t *ve1 = a1; 123 const vdev_cache_entry_t *ve2 = a2; 124 125 if (ve1->ve_offset < ve2->ve_offset) 126 return (-1); 127 if (ve1->ve_offset > ve2->ve_offset) 128 return (1); 129 return (0); 130} 131 132static int 133vdev_cache_lastused_compare(const void *a1, const void *a2) 134{ 135 const vdev_cache_entry_t *ve1 = a1; 136 const vdev_cache_entry_t *ve2 = a2; 137 138 if (ve1->ve_lastused < ve2->ve_lastused) 139 return (-1); 140 if (ve1->ve_lastused > ve2->ve_lastused) 141 return (1); 142 143 /* 144 * Among equally old entries, sort by offset to ensure uniqueness. 145 */ 146 return (vdev_cache_offset_compare(a1, a2)); 147} 148 149/* 150 * Evict the specified entry from the cache. 151 */ 152static void 153vdev_cache_evict(vdev_cache_t *vc, vdev_cache_entry_t *ve) 154{ 155 ASSERT(MUTEX_HELD(&vc->vc_lock)); 156 ASSERT(ve->ve_fill_io == NULL); 157 ASSERT(ve->ve_data != NULL); 158 159 avl_remove(&vc->vc_lastused_tree, ve); 160 avl_remove(&vc->vc_offset_tree, ve); 161 zio_buf_free(ve->ve_data, VCBS); 162 kmem_free(ve, sizeof (vdev_cache_entry_t)); 163} 164 165/* 166 * Allocate an entry in the cache. At the point we don't have the data, 167 * we're just creating a placeholder so that multiple threads don't all 168 * go off and read the same blocks. 169 */ 170static vdev_cache_entry_t * 171vdev_cache_allocate(zio_t *zio) 172{ 173 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 174 uint64_t offset = P2ALIGN(zio->io_offset, VCBS); 175 vdev_cache_entry_t *ve; 176 177 ASSERT(MUTEX_HELD(&vc->vc_lock)); 178 179 if (zfs_vdev_cache_size == 0) 180 return (NULL); 181 182 /* 183 * If adding a new entry would exceed the cache size, 184 * evict the oldest entry (LRU). 185 */ 186 if ((avl_numnodes(&vc->vc_lastused_tree) << zfs_vdev_cache_bshift) > 187 zfs_vdev_cache_size) { 188 ve = avl_first(&vc->vc_lastused_tree); 189 if (ve->ve_fill_io != NULL) 190 return (NULL); 191 ASSERT(ve->ve_hits != 0); 192 vdev_cache_evict(vc, ve); 193 } 194 195 ve = kmem_zalloc(sizeof (vdev_cache_entry_t), KM_SLEEP); 196 ve->ve_offset = offset; 197 ve->ve_lastused = ddi_get_lbolt(); 198 ve->ve_data = zio_buf_alloc(VCBS); 199 200 avl_add(&vc->vc_offset_tree, ve); 201 avl_add(&vc->vc_lastused_tree, ve); 202 203 return (ve); 204} 205 206static void 207vdev_cache_hit(vdev_cache_t *vc, vdev_cache_entry_t *ve, zio_t *zio) 208{ 209 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS); 210 211 ASSERT(MUTEX_HELD(&vc->vc_lock)); 212 ASSERT(ve->ve_fill_io == NULL); 213 214 if (ve->ve_lastused != ddi_get_lbolt()) { 215 avl_remove(&vc->vc_lastused_tree, ve); 216 ve->ve_lastused = ddi_get_lbolt(); 217 avl_add(&vc->vc_lastused_tree, ve); 218 } 219 220 ve->ve_hits++; 221 bcopy(ve->ve_data + cache_phase, zio->io_data, zio->io_size); 222} 223 224/* 225 * Fill a previously allocated cache entry with data. 226 */ 227static void 228vdev_cache_fill(zio_t *fio) 229{ 230 vdev_t *vd = fio->io_vd; 231 vdev_cache_t *vc = &vd->vdev_cache; 232 vdev_cache_entry_t *ve = fio->io_private; 233 zio_t *pio; 234 235 ASSERT(fio->io_size == VCBS); 236 237 /* 238 * Add data to the cache. 239 */ 240 mutex_enter(&vc->vc_lock); 241 242 ASSERT(ve->ve_fill_io == fio); 243 ASSERT(ve->ve_offset == fio->io_offset); 244 ASSERT(ve->ve_data == fio->io_data); 245 246 ve->ve_fill_io = NULL; 247 248 /* 249 * Even if this cache line was invalidated by a missed write update, 250 * any reads that were queued up before the missed update are still 251 * valid, so we can satisfy them from this line before we evict it. 252 */ 253 while ((pio = zio_walk_parents(fio)) != NULL) 254 vdev_cache_hit(vc, ve, pio); 255 256 if (fio->io_error || ve->ve_missed_update) 257 vdev_cache_evict(vc, ve); 258 259 mutex_exit(&vc->vc_lock); 260} 261 262/* 263 * Read data from the cache. Returns B_TRUE cache hit, B_FALSE on miss. 264 */ 265boolean_t 266vdev_cache_read(zio_t *zio) 267{ 268 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 269 vdev_cache_entry_t *ve, ve_search; 270 uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS); 271 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS); 272 zio_t *fio; 273 274 ASSERT(zio->io_type == ZIO_TYPE_READ); 275 276 if (zio->io_flags & ZIO_FLAG_DONT_CACHE) 277 return (B_FALSE); 278 279 if (zio->io_size > zfs_vdev_cache_max) 280 return (B_FALSE); 281 282 /* 283 * If the I/O straddles two or more cache blocks, don't cache it. 284 */ 285 if (P2BOUNDARY(zio->io_offset, zio->io_size, VCBS)) 286 return (B_FALSE); 287 288 ASSERT(cache_phase + zio->io_size <= VCBS); 289 290 mutex_enter(&vc->vc_lock); 291 292 ve_search.ve_offset = cache_offset; 293 ve = avl_find(&vc->vc_offset_tree, &ve_search, NULL); 294 295 if (ve != NULL) { 296 if (ve->ve_missed_update) { 297 mutex_exit(&vc->vc_lock); 298 return (B_FALSE); 299 } 300 301 if ((fio = ve->ve_fill_io) != NULL) { 302 zio_vdev_io_bypass(zio); 303 zio_add_child(zio, fio); 304 mutex_exit(&vc->vc_lock); 305 VDCSTAT_BUMP(vdc_stat_delegations); 306 return (B_TRUE); 307 } 308 309 vdev_cache_hit(vc, ve, zio); 310 zio_vdev_io_bypass(zio); 311 312 mutex_exit(&vc->vc_lock); 313 VDCSTAT_BUMP(vdc_stat_hits); 314 return (B_TRUE); 315 } 316 317 ve = vdev_cache_allocate(zio); 318 319 if (ve == NULL) { 320 mutex_exit(&vc->vc_lock); 321 return (B_FALSE); 322 } 323 324 fio = zio_vdev_delegated_io(zio->io_vd, cache_offset, 325 ve->ve_data, VCBS, ZIO_TYPE_READ, ZIO_PRIORITY_NOW, 326 ZIO_FLAG_DONT_CACHE, vdev_cache_fill, ve); 327 328 ve->ve_fill_io = fio; 329 zio_vdev_io_bypass(zio); 330 zio_add_child(zio, fio); 331 332 mutex_exit(&vc->vc_lock); 333 zio_nowait(fio); 334 VDCSTAT_BUMP(vdc_stat_misses); 335 336 return (B_TRUE); 337} 338 339/* 340 * Update cache contents upon write completion. 341 */ 342void 343vdev_cache_write(zio_t *zio) 344{ 345 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 346 vdev_cache_entry_t *ve, ve_search; 347 uint64_t io_start = zio->io_offset; 348 uint64_t io_end = io_start + zio->io_size; 349 uint64_t min_offset = P2ALIGN(io_start, VCBS); 350 uint64_t max_offset = P2ROUNDUP(io_end, VCBS); 351 avl_index_t where; 352 353 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 354 355 mutex_enter(&vc->vc_lock); 356 357 ve_search.ve_offset = min_offset; 358 ve = avl_find(&vc->vc_offset_tree, &ve_search, &where); 359 360 if (ve == NULL) 361 ve = avl_nearest(&vc->vc_offset_tree, where, AVL_AFTER); 362 363 while (ve != NULL && ve->ve_offset < max_offset) { 364 uint64_t start = MAX(ve->ve_offset, io_start); 365 uint64_t end = MIN(ve->ve_offset + VCBS, io_end); 366 367 if (ve->ve_fill_io != NULL) { 368 ve->ve_missed_update = 1; 369 } else { 370 bcopy((char *)zio->io_data + start - io_start, 371 ve->ve_data + start - ve->ve_offset, end - start); 372 } 373 ve = AVL_NEXT(&vc->vc_offset_tree, ve); 374 } 375 mutex_exit(&vc->vc_lock); 376} 377 378void 379vdev_cache_purge(vdev_t *vd) 380{ 381 vdev_cache_t *vc = &vd->vdev_cache; 382 vdev_cache_entry_t *ve; 383 384 mutex_enter(&vc->vc_lock); 385 while ((ve = avl_first(&vc->vc_offset_tree)) != NULL) 386 vdev_cache_evict(vc, ve); 387 mutex_exit(&vc->vc_lock); 388} 389 390void 391vdev_cache_init(vdev_t *vd) 392{ 393 vdev_cache_t *vc = &vd->vdev_cache; 394 395 mutex_init(&vc->vc_lock, NULL, MUTEX_DEFAULT, NULL); 396 397 avl_create(&vc->vc_offset_tree, vdev_cache_offset_compare, 398 sizeof (vdev_cache_entry_t), 399 offsetof(struct vdev_cache_entry, ve_offset_node)); 400 401 avl_create(&vc->vc_lastused_tree, vdev_cache_lastused_compare, 402 sizeof (vdev_cache_entry_t), 403 offsetof(struct vdev_cache_entry, ve_lastused_node)); 404} 405 406void 407vdev_cache_fini(vdev_t *vd) 408{ 409 vdev_cache_t *vc = &vd->vdev_cache; 410 411 vdev_cache_purge(vd); 412 413 avl_destroy(&vc->vc_offset_tree); 414 avl_destroy(&vc->vc_lastused_tree); 415 416 mutex_destroy(&vc->vc_lock); 417} 418 419void 420vdev_cache_stat_init(void) 421{ 422 vdc_ksp = kstat_create("zfs", 0, "vdev_cache_stats", "misc", 423 KSTAT_TYPE_NAMED, sizeof (vdc_stats) / sizeof (kstat_named_t), 424 KSTAT_FLAG_VIRTUAL); 425 if (vdc_ksp != NULL) { 426 vdc_ksp->ks_data = &vdc_stats; 427 kstat_install(vdc_ksp); 428 } 429} 430 431void 432vdev_cache_stat_fini(void) 433{ 434 if (vdc_ksp != NULL) { 435 kstat_delete(vdc_ksp); 436 vdc_ksp = NULL; 437 } 438} 439