arc.c revision 263397
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2013 by Delphix. All rights reserved. 24 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved. 25 * Copyright 2013 Nexenta Systems, Inc. All rights reserved. 26 */ 27 28/* 29 * DVA-based Adjustable Replacement Cache 30 * 31 * While much of the theory of operation used here is 32 * based on the self-tuning, low overhead replacement cache 33 * presented by Megiddo and Modha at FAST 2003, there are some 34 * significant differences: 35 * 36 * 1. The Megiddo and Modha model assumes any page is evictable. 37 * Pages in its cache cannot be "locked" into memory. This makes 38 * the eviction algorithm simple: evict the last page in the list. 39 * This also make the performance characteristics easy to reason 40 * about. Our cache is not so simple. At any given moment, some 41 * subset of the blocks in the cache are un-evictable because we 42 * have handed out a reference to them. Blocks are only evictable 43 * when there are no external references active. This makes 44 * eviction far more problematic: we choose to evict the evictable 45 * blocks that are the "lowest" in the list. 46 * 47 * There are times when it is not possible to evict the requested 48 * space. In these circumstances we are unable to adjust the cache 49 * size. To prevent the cache growing unbounded at these times we 50 * implement a "cache throttle" that slows the flow of new data 51 * into the cache until we can make space available. 52 * 53 * 2. The Megiddo and Modha model assumes a fixed cache size. 54 * Pages are evicted when the cache is full and there is a cache 55 * miss. Our model has a variable sized cache. It grows with 56 * high use, but also tries to react to memory pressure from the 57 * operating system: decreasing its size when system memory is 58 * tight. 59 * 60 * 3. The Megiddo and Modha model assumes a fixed page size. All 61 * elements of the cache are therefore exactly the same size. So 62 * when adjusting the cache size following a cache miss, its simply 63 * a matter of choosing a single page to evict. In our model, we 64 * have variable sized cache blocks (rangeing from 512 bytes to 65 * 128K bytes). We therefore choose a set of blocks to evict to make 66 * space for a cache miss that approximates as closely as possible 67 * the space used by the new block. 68 * 69 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70 * by N. Megiddo & D. Modha, FAST 2003 71 */ 72 73/* 74 * The locking model: 75 * 76 * A new reference to a cache buffer can be obtained in two 77 * ways: 1) via a hash table lookup using the DVA as a key, 78 * or 2) via one of the ARC lists. The arc_read() interface 79 * uses method 1, while the internal arc algorithms for 80 * adjusting the cache use method 2. We therefore provide two 81 * types of locks: 1) the hash table lock array, and 2) the 82 * arc list locks. 83 * 84 * Buffers do not have their own mutexs, rather they rely on the 85 * hash table mutexs for the bulk of their protection (i.e. most 86 * fields in the arc_buf_hdr_t are protected by these mutexs). 87 * 88 * buf_hash_find() returns the appropriate mutex (held) when it 89 * locates the requested buffer in the hash table. It returns 90 * NULL for the mutex if the buffer was not in the table. 91 * 92 * buf_hash_remove() expects the appropriate hash mutex to be 93 * already held before it is invoked. 94 * 95 * Each arc state also has a mutex which is used to protect the 96 * buffer list associated with the state. When attempting to 97 * obtain a hash table lock while holding an arc list lock you 98 * must use: mutex_tryenter() to avoid deadlock. Also note that 99 * the active state mutex must be held before the ghost state mutex. 100 * 101 * Arc buffers may have an associated eviction callback function. 102 * This function will be invoked prior to removing the buffer (e.g. 103 * in arc_do_user_evicts()). Note however that the data associated 104 * with the buffer may be evicted prior to the callback. The callback 105 * must be made with *no locks held* (to prevent deadlock). Additionally, 106 * the users of callbacks must ensure that their private data is 107 * protected from simultaneous callbacks from arc_buf_evict() 108 * and arc_do_user_evicts(). 109 * 110 * Note that the majority of the performance stats are manipulated 111 * with atomic operations. 112 * 113 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 114 * 115 * - L2ARC buflist creation 116 * - L2ARC buflist eviction 117 * - L2ARC write completion, which walks L2ARC buflists 118 * - ARC header destruction, as it removes from L2ARC buflists 119 * - ARC header release, as it removes from L2ARC buflists 120 */ 121 122#include <sys/spa.h> 123#include <sys/zio.h> 124#include <sys/zio_compress.h> 125#include <sys/zfs_context.h> 126#include <sys/arc.h> 127#include <sys/refcount.h> 128#include <sys/vdev.h> 129#include <sys/vdev_impl.h> 130#include <sys/dsl_pool.h> 131#ifdef _KERNEL 132#include <sys/dnlc.h> 133#endif 134#include <sys/callb.h> 135#include <sys/kstat.h> 136#include <sys/trim_map.h> 137#include <zfs_fletcher.h> 138#include <sys/sdt.h> 139 140#include <vm/vm_pageout.h> 141 142#ifdef illumos 143#ifndef _KERNEL 144/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */ 145boolean_t arc_watch = B_FALSE; 146int arc_procfd; 147#endif 148#endif /* illumos */ 149 150static kmutex_t arc_reclaim_thr_lock; 151static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 152static uint8_t arc_thread_exit; 153 154#define ARC_REDUCE_DNLC_PERCENT 3 155uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 156 157typedef enum arc_reclaim_strategy { 158 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 159 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 160} arc_reclaim_strategy_t; 161 162/* 163 * The number of iterations through arc_evict_*() before we 164 * drop & reacquire the lock. 165 */ 166int arc_evict_iterations = 100; 167 168/* number of seconds before growing cache again */ 169static int arc_grow_retry = 60; 170 171/* shift of arc_c for calculating both min and max arc_p */ 172static int arc_p_min_shift = 4; 173 174/* log2(fraction of arc to reclaim) */ 175static int arc_shrink_shift = 5; 176 177/* 178 * minimum lifespan of a prefetch block in clock ticks 179 * (initialized in arc_init()) 180 */ 181static int arc_min_prefetch_lifespan; 182 183/* 184 * If this percent of memory is free, don't throttle. 185 */ 186int arc_lotsfree_percent = 10; 187 188static int arc_dead; 189extern int zfs_prefetch_disable; 190 191/* 192 * The arc has filled available memory and has now warmed up. 193 */ 194static boolean_t arc_warm; 195 196/* 197 * These tunables are for performance analysis. 198 */ 199uint64_t zfs_arc_max; 200uint64_t zfs_arc_min; 201uint64_t zfs_arc_meta_limit = 0; 202int zfs_arc_grow_retry = 0; 203int zfs_arc_shrink_shift = 0; 204int zfs_arc_p_min_shift = 0; 205int zfs_disable_dup_eviction = 0; 206 207TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max); 208TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min); 209TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit); 210SYSCTL_DECL(_vfs_zfs); 211SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0, 212 "Maximum ARC size"); 213SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0, 214 "Minimum ARC size"); 215 216/* 217 * Note that buffers can be in one of 6 states: 218 * ARC_anon - anonymous (discussed below) 219 * ARC_mru - recently used, currently cached 220 * ARC_mru_ghost - recentely used, no longer in cache 221 * ARC_mfu - frequently used, currently cached 222 * ARC_mfu_ghost - frequently used, no longer in cache 223 * ARC_l2c_only - exists in L2ARC but not other states 224 * When there are no active references to the buffer, they are 225 * are linked onto a list in one of these arc states. These are 226 * the only buffers that can be evicted or deleted. Within each 227 * state there are multiple lists, one for meta-data and one for 228 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 229 * etc.) is tracked separately so that it can be managed more 230 * explicitly: favored over data, limited explicitly. 231 * 232 * Anonymous buffers are buffers that are not associated with 233 * a DVA. These are buffers that hold dirty block copies 234 * before they are written to stable storage. By definition, 235 * they are "ref'd" and are considered part of arc_mru 236 * that cannot be freed. Generally, they will aquire a DVA 237 * as they are written and migrate onto the arc_mru list. 238 * 239 * The ARC_l2c_only state is for buffers that are in the second 240 * level ARC but no longer in any of the ARC_m* lists. The second 241 * level ARC itself may also contain buffers that are in any of 242 * the ARC_m* states - meaning that a buffer can exist in two 243 * places. The reason for the ARC_l2c_only state is to keep the 244 * buffer header in the hash table, so that reads that hit the 245 * second level ARC benefit from these fast lookups. 246 */ 247 248#define ARCS_LOCK_PAD CACHE_LINE_SIZE 249struct arcs_lock { 250 kmutex_t arcs_lock; 251#ifdef _KERNEL 252 unsigned char pad[(ARCS_LOCK_PAD - sizeof (kmutex_t))]; 253#endif 254}; 255 256/* 257 * must be power of two for mask use to work 258 * 259 */ 260#define ARC_BUFC_NUMDATALISTS 16 261#define ARC_BUFC_NUMMETADATALISTS 16 262#define ARC_BUFC_NUMLISTS (ARC_BUFC_NUMMETADATALISTS + ARC_BUFC_NUMDATALISTS) 263 264typedef struct arc_state { 265 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 266 uint64_t arcs_size; /* total amount of data in this state */ 267 list_t arcs_lists[ARC_BUFC_NUMLISTS]; /* list of evictable buffers */ 268 struct arcs_lock arcs_locks[ARC_BUFC_NUMLISTS] __aligned(CACHE_LINE_SIZE); 269} arc_state_t; 270 271#define ARCS_LOCK(s, i) (&((s)->arcs_locks[(i)].arcs_lock)) 272 273/* The 6 states: */ 274static arc_state_t ARC_anon; 275static arc_state_t ARC_mru; 276static arc_state_t ARC_mru_ghost; 277static arc_state_t ARC_mfu; 278static arc_state_t ARC_mfu_ghost; 279static arc_state_t ARC_l2c_only; 280 281typedef struct arc_stats { 282 kstat_named_t arcstat_hits; 283 kstat_named_t arcstat_misses; 284 kstat_named_t arcstat_demand_data_hits; 285 kstat_named_t arcstat_demand_data_misses; 286 kstat_named_t arcstat_demand_metadata_hits; 287 kstat_named_t arcstat_demand_metadata_misses; 288 kstat_named_t arcstat_prefetch_data_hits; 289 kstat_named_t arcstat_prefetch_data_misses; 290 kstat_named_t arcstat_prefetch_metadata_hits; 291 kstat_named_t arcstat_prefetch_metadata_misses; 292 kstat_named_t arcstat_mru_hits; 293 kstat_named_t arcstat_mru_ghost_hits; 294 kstat_named_t arcstat_mfu_hits; 295 kstat_named_t arcstat_mfu_ghost_hits; 296 kstat_named_t arcstat_allocated; 297 kstat_named_t arcstat_deleted; 298 kstat_named_t arcstat_stolen; 299 kstat_named_t arcstat_recycle_miss; 300 /* 301 * Number of buffers that could not be evicted because the hash lock 302 * was held by another thread. The lock may not necessarily be held 303 * by something using the same buffer, since hash locks are shared 304 * by multiple buffers. 305 */ 306 kstat_named_t arcstat_mutex_miss; 307 /* 308 * Number of buffers skipped because they have I/O in progress, are 309 * indrect prefetch buffers that have not lived long enough, or are 310 * not from the spa we're trying to evict from. 311 */ 312 kstat_named_t arcstat_evict_skip; 313 kstat_named_t arcstat_evict_l2_cached; 314 kstat_named_t arcstat_evict_l2_eligible; 315 kstat_named_t arcstat_evict_l2_ineligible; 316 kstat_named_t arcstat_hash_elements; 317 kstat_named_t arcstat_hash_elements_max; 318 kstat_named_t arcstat_hash_collisions; 319 kstat_named_t arcstat_hash_chains; 320 kstat_named_t arcstat_hash_chain_max; 321 kstat_named_t arcstat_p; 322 kstat_named_t arcstat_c; 323 kstat_named_t arcstat_c_min; 324 kstat_named_t arcstat_c_max; 325 kstat_named_t arcstat_size; 326 kstat_named_t arcstat_hdr_size; 327 kstat_named_t arcstat_data_size; 328 kstat_named_t arcstat_other_size; 329 kstat_named_t arcstat_l2_hits; 330 kstat_named_t arcstat_l2_misses; 331 kstat_named_t arcstat_l2_feeds; 332 kstat_named_t arcstat_l2_rw_clash; 333 kstat_named_t arcstat_l2_read_bytes; 334 kstat_named_t arcstat_l2_write_bytes; 335 kstat_named_t arcstat_l2_writes_sent; 336 kstat_named_t arcstat_l2_writes_done; 337 kstat_named_t arcstat_l2_writes_error; 338 kstat_named_t arcstat_l2_writes_hdr_miss; 339 kstat_named_t arcstat_l2_evict_lock_retry; 340 kstat_named_t arcstat_l2_evict_reading; 341 kstat_named_t arcstat_l2_free_on_write; 342 kstat_named_t arcstat_l2_abort_lowmem; 343 kstat_named_t arcstat_l2_cksum_bad; 344 kstat_named_t arcstat_l2_io_error; 345 kstat_named_t arcstat_l2_size; 346 kstat_named_t arcstat_l2_asize; 347 kstat_named_t arcstat_l2_hdr_size; 348 kstat_named_t arcstat_l2_compress_successes; 349 kstat_named_t arcstat_l2_compress_zeros; 350 kstat_named_t arcstat_l2_compress_failures; 351 kstat_named_t arcstat_l2_write_trylock_fail; 352 kstat_named_t arcstat_l2_write_passed_headroom; 353 kstat_named_t arcstat_l2_write_spa_mismatch; 354 kstat_named_t arcstat_l2_write_in_l2; 355 kstat_named_t arcstat_l2_write_hdr_io_in_progress; 356 kstat_named_t arcstat_l2_write_not_cacheable; 357 kstat_named_t arcstat_l2_write_full; 358 kstat_named_t arcstat_l2_write_buffer_iter; 359 kstat_named_t arcstat_l2_write_pios; 360 kstat_named_t arcstat_l2_write_buffer_bytes_scanned; 361 kstat_named_t arcstat_l2_write_buffer_list_iter; 362 kstat_named_t arcstat_l2_write_buffer_list_null_iter; 363 kstat_named_t arcstat_memory_throttle_count; 364 kstat_named_t arcstat_duplicate_buffers; 365 kstat_named_t arcstat_duplicate_buffers_size; 366 kstat_named_t arcstat_duplicate_reads; 367} arc_stats_t; 368 369static arc_stats_t arc_stats = { 370 { "hits", KSTAT_DATA_UINT64 }, 371 { "misses", KSTAT_DATA_UINT64 }, 372 { "demand_data_hits", KSTAT_DATA_UINT64 }, 373 { "demand_data_misses", KSTAT_DATA_UINT64 }, 374 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 375 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 376 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 377 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 378 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 379 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 380 { "mru_hits", KSTAT_DATA_UINT64 }, 381 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 382 { "mfu_hits", KSTAT_DATA_UINT64 }, 383 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 384 { "allocated", KSTAT_DATA_UINT64 }, 385 { "deleted", KSTAT_DATA_UINT64 }, 386 { "stolen", KSTAT_DATA_UINT64 }, 387 { "recycle_miss", KSTAT_DATA_UINT64 }, 388 { "mutex_miss", KSTAT_DATA_UINT64 }, 389 { "evict_skip", KSTAT_DATA_UINT64 }, 390 { "evict_l2_cached", KSTAT_DATA_UINT64 }, 391 { "evict_l2_eligible", KSTAT_DATA_UINT64 }, 392 { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, 393 { "hash_elements", KSTAT_DATA_UINT64 }, 394 { "hash_elements_max", KSTAT_DATA_UINT64 }, 395 { "hash_collisions", KSTAT_DATA_UINT64 }, 396 { "hash_chains", KSTAT_DATA_UINT64 }, 397 { "hash_chain_max", KSTAT_DATA_UINT64 }, 398 { "p", KSTAT_DATA_UINT64 }, 399 { "c", KSTAT_DATA_UINT64 }, 400 { "c_min", KSTAT_DATA_UINT64 }, 401 { "c_max", KSTAT_DATA_UINT64 }, 402 { "size", KSTAT_DATA_UINT64 }, 403 { "hdr_size", KSTAT_DATA_UINT64 }, 404 { "data_size", KSTAT_DATA_UINT64 }, 405 { "other_size", KSTAT_DATA_UINT64 }, 406 { "l2_hits", KSTAT_DATA_UINT64 }, 407 { "l2_misses", KSTAT_DATA_UINT64 }, 408 { "l2_feeds", KSTAT_DATA_UINT64 }, 409 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 410 { "l2_read_bytes", KSTAT_DATA_UINT64 }, 411 { "l2_write_bytes", KSTAT_DATA_UINT64 }, 412 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 413 { "l2_writes_done", KSTAT_DATA_UINT64 }, 414 { "l2_writes_error", KSTAT_DATA_UINT64 }, 415 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 416 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 417 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 418 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 419 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 420 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 421 { "l2_io_error", KSTAT_DATA_UINT64 }, 422 { "l2_size", KSTAT_DATA_UINT64 }, 423 { "l2_asize", KSTAT_DATA_UINT64 }, 424 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 425 { "l2_compress_successes", KSTAT_DATA_UINT64 }, 426 { "l2_compress_zeros", KSTAT_DATA_UINT64 }, 427 { "l2_compress_failures", KSTAT_DATA_UINT64 }, 428 { "l2_write_trylock_fail", KSTAT_DATA_UINT64 }, 429 { "l2_write_passed_headroom", KSTAT_DATA_UINT64 }, 430 { "l2_write_spa_mismatch", KSTAT_DATA_UINT64 }, 431 { "l2_write_in_l2", KSTAT_DATA_UINT64 }, 432 { "l2_write_io_in_progress", KSTAT_DATA_UINT64 }, 433 { "l2_write_not_cacheable", KSTAT_DATA_UINT64 }, 434 { "l2_write_full", KSTAT_DATA_UINT64 }, 435 { "l2_write_buffer_iter", KSTAT_DATA_UINT64 }, 436 { "l2_write_pios", KSTAT_DATA_UINT64 }, 437 { "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 }, 438 { "l2_write_buffer_list_iter", KSTAT_DATA_UINT64 }, 439 { "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 }, 440 { "memory_throttle_count", KSTAT_DATA_UINT64 }, 441 { "duplicate_buffers", KSTAT_DATA_UINT64 }, 442 { "duplicate_buffers_size", KSTAT_DATA_UINT64 }, 443 { "duplicate_reads", KSTAT_DATA_UINT64 } 444}; 445 446#define ARCSTAT(stat) (arc_stats.stat.value.ui64) 447 448#define ARCSTAT_INCR(stat, val) \ 449 atomic_add_64(&arc_stats.stat.value.ui64, (val)) 450 451#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 452#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 453 454#define ARCSTAT_MAX(stat, val) { \ 455 uint64_t m; \ 456 while ((val) > (m = arc_stats.stat.value.ui64) && \ 457 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 458 continue; \ 459} 460 461#define ARCSTAT_MAXSTAT(stat) \ 462 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 463 464/* 465 * We define a macro to allow ARC hits/misses to be easily broken down by 466 * two separate conditions, giving a total of four different subtypes for 467 * each of hits and misses (so eight statistics total). 468 */ 469#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 470 if (cond1) { \ 471 if (cond2) { \ 472 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 473 } else { \ 474 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 475 } \ 476 } else { \ 477 if (cond2) { \ 478 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 479 } else { \ 480 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 481 } \ 482 } 483 484kstat_t *arc_ksp; 485static arc_state_t *arc_anon; 486static arc_state_t *arc_mru; 487static arc_state_t *arc_mru_ghost; 488static arc_state_t *arc_mfu; 489static arc_state_t *arc_mfu_ghost; 490static arc_state_t *arc_l2c_only; 491 492/* 493 * There are several ARC variables that are critical to export as kstats -- 494 * but we don't want to have to grovel around in the kstat whenever we wish to 495 * manipulate them. For these variables, we therefore define them to be in 496 * terms of the statistic variable. This assures that we are not introducing 497 * the possibility of inconsistency by having shadow copies of the variables, 498 * while still allowing the code to be readable. 499 */ 500#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 501#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 502#define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 503#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 504#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 505 506#define L2ARC_IS_VALID_COMPRESS(_c_) \ 507 ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY) 508 509static int arc_no_grow; /* Don't try to grow cache size */ 510static uint64_t arc_tempreserve; 511static uint64_t arc_loaned_bytes; 512static uint64_t arc_meta_used; 513static uint64_t arc_meta_limit; 514static uint64_t arc_meta_max = 0; 515SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RD, &arc_meta_used, 0, 516 "ARC metadata used"); 517SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RW, &arc_meta_limit, 0, 518 "ARC metadata limit"); 519 520typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 521 522typedef struct arc_callback arc_callback_t; 523 524struct arc_callback { 525 void *acb_private; 526 arc_done_func_t *acb_done; 527 arc_buf_t *acb_buf; 528 zio_t *acb_zio_dummy; 529 arc_callback_t *acb_next; 530}; 531 532typedef struct arc_write_callback arc_write_callback_t; 533 534struct arc_write_callback { 535 void *awcb_private; 536 arc_done_func_t *awcb_ready; 537 arc_done_func_t *awcb_physdone; 538 arc_done_func_t *awcb_done; 539 arc_buf_t *awcb_buf; 540}; 541 542struct arc_buf_hdr { 543 /* protected by hash lock */ 544 dva_t b_dva; 545 uint64_t b_birth; 546 uint64_t b_cksum0; 547 548 kmutex_t b_freeze_lock; 549 zio_cksum_t *b_freeze_cksum; 550 void *b_thawed; 551 552 arc_buf_hdr_t *b_hash_next; 553 arc_buf_t *b_buf; 554 uint32_t b_flags; 555 uint32_t b_datacnt; 556 557 arc_callback_t *b_acb; 558 kcondvar_t b_cv; 559 560 /* immutable */ 561 arc_buf_contents_t b_type; 562 uint64_t b_size; 563 uint64_t b_spa; 564 565 /* protected by arc state mutex */ 566 arc_state_t *b_state; 567 list_node_t b_arc_node; 568 569 /* updated atomically */ 570 clock_t b_arc_access; 571 572 /* self protecting */ 573 refcount_t b_refcnt; 574 575 l2arc_buf_hdr_t *b_l2hdr; 576 list_node_t b_l2node; 577}; 578 579static arc_buf_t *arc_eviction_list; 580static kmutex_t arc_eviction_mtx; 581static arc_buf_hdr_t arc_eviction_hdr; 582static void arc_get_data_buf(arc_buf_t *buf); 583static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 584static int arc_evict_needed(arc_buf_contents_t type); 585static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes); 586#ifdef illumos 587static void arc_buf_watch(arc_buf_t *buf); 588#endif /* illumos */ 589 590static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab); 591 592#define GHOST_STATE(state) \ 593 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 594 (state) == arc_l2c_only) 595 596/* 597 * Private ARC flags. These flags are private ARC only flags that will show up 598 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 599 * be passed in as arc_flags in things like arc_read. However, these flags 600 * should never be passed and should only be set by ARC code. When adding new 601 * public flags, make sure not to smash the private ones. 602 */ 603 604#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 605#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 606#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 607#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 608#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 609#define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 610#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 611#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ 612#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ 613#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ 614 615#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 616#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 617#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 618#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH) 619#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 620#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 621#define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 622#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) 623#define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ 624 (hdr)->b_l2hdr != NULL) 625#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 626#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 627#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 628 629/* 630 * Other sizes 631 */ 632 633#define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 634#define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 635 636/* 637 * Hash table routines 638 */ 639 640#define HT_LOCK_PAD CACHE_LINE_SIZE 641 642struct ht_lock { 643 kmutex_t ht_lock; 644#ifdef _KERNEL 645 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 646#endif 647}; 648 649#define BUF_LOCKS 256 650typedef struct buf_hash_table { 651 uint64_t ht_mask; 652 arc_buf_hdr_t **ht_table; 653 struct ht_lock ht_locks[BUF_LOCKS] __aligned(CACHE_LINE_SIZE); 654} buf_hash_table_t; 655 656static buf_hash_table_t buf_hash_table; 657 658#define BUF_HASH_INDEX(spa, dva, birth) \ 659 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 660#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 661#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 662#define HDR_LOCK(hdr) \ 663 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth))) 664 665uint64_t zfs_crc64_table[256]; 666 667/* 668 * Level 2 ARC 669 */ 670 671#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 672#define L2ARC_HEADROOM 2 /* num of writes */ 673/* 674 * If we discover during ARC scan any buffers to be compressed, we boost 675 * our headroom for the next scanning cycle by this percentage multiple. 676 */ 677#define L2ARC_HEADROOM_BOOST 200 678#define L2ARC_FEED_SECS 1 /* caching interval secs */ 679#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ 680 681#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 682#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 683 684/* L2ARC Performance Tunables */ 685uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 686uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 687uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 688uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST; 689uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 690uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ 691boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 692boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ 693boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ 694 695SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW, 696 &l2arc_write_max, 0, "max write size"); 697SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RW, 698 &l2arc_write_boost, 0, "extra write during warmup"); 699SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RW, 700 &l2arc_headroom, 0, "number of dev writes"); 701SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RW, 702 &l2arc_feed_secs, 0, "interval seconds"); 703SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RW, 704 &l2arc_feed_min_ms, 0, "min interval milliseconds"); 705 706SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RW, 707 &l2arc_noprefetch, 0, "don't cache prefetch bufs"); 708SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RW, 709 &l2arc_feed_again, 0, "turbo warmup"); 710SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW, 711 &l2arc_norw, 0, "no reads during writes"); 712 713SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD, 714 &ARC_anon.arcs_size, 0, "size of anonymous state"); 715SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_lsize, CTLFLAG_RD, 716 &ARC_anon.arcs_lsize[ARC_BUFC_METADATA], 0, "size of anonymous state"); 717SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_lsize, CTLFLAG_RD, 718 &ARC_anon.arcs_lsize[ARC_BUFC_DATA], 0, "size of anonymous state"); 719 720SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD, 721 &ARC_mru.arcs_size, 0, "size of mru state"); 722SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_lsize, CTLFLAG_RD, 723 &ARC_mru.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mru state"); 724SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_lsize, CTLFLAG_RD, 725 &ARC_mru.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mru state"); 726 727SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD, 728 &ARC_mru_ghost.arcs_size, 0, "size of mru ghost state"); 729SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_lsize, CTLFLAG_RD, 730 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_METADATA], 0, 731 "size of metadata in mru ghost state"); 732SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_lsize, CTLFLAG_RD, 733 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_DATA], 0, 734 "size of data in mru ghost state"); 735 736SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD, 737 &ARC_mfu.arcs_size, 0, "size of mfu state"); 738SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_lsize, CTLFLAG_RD, 739 &ARC_mfu.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mfu state"); 740SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_lsize, CTLFLAG_RD, 741 &ARC_mfu.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mfu state"); 742 743SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD, 744 &ARC_mfu_ghost.arcs_size, 0, "size of mfu ghost state"); 745SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_lsize, CTLFLAG_RD, 746 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_METADATA], 0, 747 "size of metadata in mfu ghost state"); 748SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_lsize, CTLFLAG_RD, 749 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_DATA], 0, 750 "size of data in mfu ghost state"); 751 752SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD, 753 &ARC_l2c_only.arcs_size, 0, "size of mru state"); 754 755/* 756 * L2ARC Internals 757 */ 758typedef struct l2arc_dev { 759 vdev_t *l2ad_vdev; /* vdev */ 760 spa_t *l2ad_spa; /* spa */ 761 uint64_t l2ad_hand; /* next write location */ 762 uint64_t l2ad_start; /* first addr on device */ 763 uint64_t l2ad_end; /* last addr on device */ 764 uint64_t l2ad_evict; /* last addr eviction reached */ 765 boolean_t l2ad_first; /* first sweep through */ 766 boolean_t l2ad_writing; /* currently writing */ 767 list_t *l2ad_buflist; /* buffer list */ 768 list_node_t l2ad_node; /* device list node */ 769} l2arc_dev_t; 770 771static list_t L2ARC_dev_list; /* device list */ 772static list_t *l2arc_dev_list; /* device list pointer */ 773static kmutex_t l2arc_dev_mtx; /* device list mutex */ 774static l2arc_dev_t *l2arc_dev_last; /* last device used */ 775static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 776static list_t L2ARC_free_on_write; /* free after write buf list */ 777static list_t *l2arc_free_on_write; /* free after write list ptr */ 778static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 779static uint64_t l2arc_ndev; /* number of devices */ 780 781typedef struct l2arc_read_callback { 782 arc_buf_t *l2rcb_buf; /* read buffer */ 783 spa_t *l2rcb_spa; /* spa */ 784 blkptr_t l2rcb_bp; /* original blkptr */ 785 zbookmark_t l2rcb_zb; /* original bookmark */ 786 int l2rcb_flags; /* original flags */ 787 enum zio_compress l2rcb_compress; /* applied compress */ 788} l2arc_read_callback_t; 789 790typedef struct l2arc_write_callback { 791 l2arc_dev_t *l2wcb_dev; /* device info */ 792 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 793} l2arc_write_callback_t; 794 795struct l2arc_buf_hdr { 796 /* protected by arc_buf_hdr mutex */ 797 l2arc_dev_t *b_dev; /* L2ARC device */ 798 uint64_t b_daddr; /* disk address, offset byte */ 799 /* compression applied to buffer data */ 800 enum zio_compress b_compress; 801 /* real alloc'd buffer size depending on b_compress applied */ 802 int b_asize; 803 /* temporary buffer holder for in-flight compressed data */ 804 void *b_tmp_cdata; 805}; 806 807typedef struct l2arc_data_free { 808 /* protected by l2arc_free_on_write_mtx */ 809 void *l2df_data; 810 size_t l2df_size; 811 void (*l2df_func)(void *, size_t); 812 list_node_t l2df_list_node; 813} l2arc_data_free_t; 814 815static kmutex_t l2arc_feed_thr_lock; 816static kcondvar_t l2arc_feed_thr_cv; 817static uint8_t l2arc_thread_exit; 818 819static void l2arc_read_done(zio_t *zio); 820static void l2arc_hdr_stat_add(void); 821static void l2arc_hdr_stat_remove(void); 822 823static boolean_t l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr); 824static void l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, 825 enum zio_compress c); 826static void l2arc_release_cdata_buf(arc_buf_hdr_t *ab); 827 828static uint64_t 829buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) 830{ 831 uint8_t *vdva = (uint8_t *)dva; 832 uint64_t crc = -1ULL; 833 int i; 834 835 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 836 837 for (i = 0; i < sizeof (dva_t); i++) 838 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 839 840 crc ^= (spa>>8) ^ birth; 841 842 return (crc); 843} 844 845#define BUF_EMPTY(buf) \ 846 ((buf)->b_dva.dva_word[0] == 0 && \ 847 (buf)->b_dva.dva_word[1] == 0 && \ 848 (buf)->b_cksum0 == 0) 849 850#define BUF_EQUAL(spa, dva, birth, buf) \ 851 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 852 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 853 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 854 855static void 856buf_discard_identity(arc_buf_hdr_t *hdr) 857{ 858 hdr->b_dva.dva_word[0] = 0; 859 hdr->b_dva.dva_word[1] = 0; 860 hdr->b_birth = 0; 861 hdr->b_cksum0 = 0; 862} 863 864static arc_buf_hdr_t * 865buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp) 866{ 867 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 868 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 869 arc_buf_hdr_t *buf; 870 871 mutex_enter(hash_lock); 872 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 873 buf = buf->b_hash_next) { 874 if (BUF_EQUAL(spa, dva, birth, buf)) { 875 *lockp = hash_lock; 876 return (buf); 877 } 878 } 879 mutex_exit(hash_lock); 880 *lockp = NULL; 881 return (NULL); 882} 883 884/* 885 * Insert an entry into the hash table. If there is already an element 886 * equal to elem in the hash table, then the already existing element 887 * will be returned and the new element will not be inserted. 888 * Otherwise returns NULL. 889 */ 890static arc_buf_hdr_t * 891buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 892{ 893 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 894 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 895 arc_buf_hdr_t *fbuf; 896 uint32_t i; 897 898 ASSERT(!HDR_IN_HASH_TABLE(buf)); 899 *lockp = hash_lock; 900 mutex_enter(hash_lock); 901 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 902 fbuf = fbuf->b_hash_next, i++) { 903 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 904 return (fbuf); 905 } 906 907 buf->b_hash_next = buf_hash_table.ht_table[idx]; 908 buf_hash_table.ht_table[idx] = buf; 909 buf->b_flags |= ARC_IN_HASH_TABLE; 910 911 /* collect some hash table performance data */ 912 if (i > 0) { 913 ARCSTAT_BUMP(arcstat_hash_collisions); 914 if (i == 1) 915 ARCSTAT_BUMP(arcstat_hash_chains); 916 917 ARCSTAT_MAX(arcstat_hash_chain_max, i); 918 } 919 920 ARCSTAT_BUMP(arcstat_hash_elements); 921 ARCSTAT_MAXSTAT(arcstat_hash_elements); 922 923 return (NULL); 924} 925 926static void 927buf_hash_remove(arc_buf_hdr_t *buf) 928{ 929 arc_buf_hdr_t *fbuf, **bufp; 930 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 931 932 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 933 ASSERT(HDR_IN_HASH_TABLE(buf)); 934 935 bufp = &buf_hash_table.ht_table[idx]; 936 while ((fbuf = *bufp) != buf) { 937 ASSERT(fbuf != NULL); 938 bufp = &fbuf->b_hash_next; 939 } 940 *bufp = buf->b_hash_next; 941 buf->b_hash_next = NULL; 942 buf->b_flags &= ~ARC_IN_HASH_TABLE; 943 944 /* collect some hash table performance data */ 945 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 946 947 if (buf_hash_table.ht_table[idx] && 948 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 949 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 950} 951 952/* 953 * Global data structures and functions for the buf kmem cache. 954 */ 955static kmem_cache_t *hdr_cache; 956static kmem_cache_t *buf_cache; 957 958static void 959buf_fini(void) 960{ 961 int i; 962 963 kmem_free(buf_hash_table.ht_table, 964 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 965 for (i = 0; i < BUF_LOCKS; i++) 966 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 967 kmem_cache_destroy(hdr_cache); 968 kmem_cache_destroy(buf_cache); 969} 970 971/* 972 * Constructor callback - called when the cache is empty 973 * and a new buf is requested. 974 */ 975/* ARGSUSED */ 976static int 977hdr_cons(void *vbuf, void *unused, int kmflag) 978{ 979 arc_buf_hdr_t *buf = vbuf; 980 981 bzero(buf, sizeof (arc_buf_hdr_t)); 982 refcount_create(&buf->b_refcnt); 983 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 984 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 985 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 986 987 return (0); 988} 989 990/* ARGSUSED */ 991static int 992buf_cons(void *vbuf, void *unused, int kmflag) 993{ 994 arc_buf_t *buf = vbuf; 995 996 bzero(buf, sizeof (arc_buf_t)); 997 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL); 998 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); 999 1000 return (0); 1001} 1002 1003/* 1004 * Destructor callback - called when a cached buf is 1005 * no longer required. 1006 */ 1007/* ARGSUSED */ 1008static void 1009hdr_dest(void *vbuf, void *unused) 1010{ 1011 arc_buf_hdr_t *buf = vbuf; 1012 1013 ASSERT(BUF_EMPTY(buf)); 1014 refcount_destroy(&buf->b_refcnt); 1015 cv_destroy(&buf->b_cv); 1016 mutex_destroy(&buf->b_freeze_lock); 1017 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 1018} 1019 1020/* ARGSUSED */ 1021static void 1022buf_dest(void *vbuf, void *unused) 1023{ 1024 arc_buf_t *buf = vbuf; 1025 1026 mutex_destroy(&buf->b_evict_lock); 1027 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); 1028} 1029 1030/* 1031 * Reclaim callback -- invoked when memory is low. 1032 */ 1033/* ARGSUSED */ 1034static void 1035hdr_recl(void *unused) 1036{ 1037 dprintf("hdr_recl called\n"); 1038 /* 1039 * umem calls the reclaim func when we destroy the buf cache, 1040 * which is after we do arc_fini(). 1041 */ 1042 if (!arc_dead) 1043 cv_signal(&arc_reclaim_thr_cv); 1044} 1045 1046static void 1047buf_init(void) 1048{ 1049 uint64_t *ct; 1050 uint64_t hsize = 1ULL << 12; 1051 int i, j; 1052 1053 /* 1054 * The hash table is big enough to fill all of physical memory 1055 * with an average 64K block size. The table will take up 1056 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 1057 */ 1058 while (hsize * 65536 < (uint64_t)physmem * PAGESIZE) 1059 hsize <<= 1; 1060retry: 1061 buf_hash_table.ht_mask = hsize - 1; 1062 buf_hash_table.ht_table = 1063 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 1064 if (buf_hash_table.ht_table == NULL) { 1065 ASSERT(hsize > (1ULL << 8)); 1066 hsize >>= 1; 1067 goto retry; 1068 } 1069 1070 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 1071 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 1072 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 1073 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 1074 1075 for (i = 0; i < 256; i++) 1076 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 1077 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 1078 1079 for (i = 0; i < BUF_LOCKS; i++) { 1080 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 1081 NULL, MUTEX_DEFAULT, NULL); 1082 } 1083} 1084 1085#define ARC_MINTIME (hz>>4) /* 62 ms */ 1086 1087static void 1088arc_cksum_verify(arc_buf_t *buf) 1089{ 1090 zio_cksum_t zc; 1091 1092 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1093 return; 1094 1095 mutex_enter(&buf->b_hdr->b_freeze_lock); 1096 if (buf->b_hdr->b_freeze_cksum == NULL || 1097 (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 1098 mutex_exit(&buf->b_hdr->b_freeze_lock); 1099 return; 1100 } 1101 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 1102 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 1103 panic("buffer modified while frozen!"); 1104 mutex_exit(&buf->b_hdr->b_freeze_lock); 1105} 1106 1107static int 1108arc_cksum_equal(arc_buf_t *buf) 1109{ 1110 zio_cksum_t zc; 1111 int equal; 1112 1113 mutex_enter(&buf->b_hdr->b_freeze_lock); 1114 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 1115 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 1116 mutex_exit(&buf->b_hdr->b_freeze_lock); 1117 1118 return (equal); 1119} 1120 1121static void 1122arc_cksum_compute(arc_buf_t *buf, boolean_t force) 1123{ 1124 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 1125 return; 1126 1127 mutex_enter(&buf->b_hdr->b_freeze_lock); 1128 if (buf->b_hdr->b_freeze_cksum != NULL) { 1129 mutex_exit(&buf->b_hdr->b_freeze_lock); 1130 return; 1131 } 1132 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 1133 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 1134 buf->b_hdr->b_freeze_cksum); 1135 mutex_exit(&buf->b_hdr->b_freeze_lock); 1136#ifdef illumos 1137 arc_buf_watch(buf); 1138#endif /* illumos */ 1139} 1140 1141#ifdef illumos 1142#ifndef _KERNEL 1143typedef struct procctl { 1144 long cmd; 1145 prwatch_t prwatch; 1146} procctl_t; 1147#endif 1148 1149/* ARGSUSED */ 1150static void 1151arc_buf_unwatch(arc_buf_t *buf) 1152{ 1153#ifndef _KERNEL 1154 if (arc_watch) { 1155 int result; 1156 procctl_t ctl; 1157 ctl.cmd = PCWATCH; 1158 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1159 ctl.prwatch.pr_size = 0; 1160 ctl.prwatch.pr_wflags = 0; 1161 result = write(arc_procfd, &ctl, sizeof (ctl)); 1162 ASSERT3U(result, ==, sizeof (ctl)); 1163 } 1164#endif 1165} 1166 1167/* ARGSUSED */ 1168static void 1169arc_buf_watch(arc_buf_t *buf) 1170{ 1171#ifndef _KERNEL 1172 if (arc_watch) { 1173 int result; 1174 procctl_t ctl; 1175 ctl.cmd = PCWATCH; 1176 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1177 ctl.prwatch.pr_size = buf->b_hdr->b_size; 1178 ctl.prwatch.pr_wflags = WA_WRITE; 1179 result = write(arc_procfd, &ctl, sizeof (ctl)); 1180 ASSERT3U(result, ==, sizeof (ctl)); 1181 } 1182#endif 1183} 1184#endif /* illumos */ 1185 1186void 1187arc_buf_thaw(arc_buf_t *buf) 1188{ 1189 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1190 if (buf->b_hdr->b_state != arc_anon) 1191 panic("modifying non-anon buffer!"); 1192 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 1193 panic("modifying buffer while i/o in progress!"); 1194 arc_cksum_verify(buf); 1195 } 1196 1197 mutex_enter(&buf->b_hdr->b_freeze_lock); 1198 if (buf->b_hdr->b_freeze_cksum != NULL) { 1199 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1200 buf->b_hdr->b_freeze_cksum = NULL; 1201 } 1202 1203 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1204 if (buf->b_hdr->b_thawed) 1205 kmem_free(buf->b_hdr->b_thawed, 1); 1206 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP); 1207 } 1208 1209 mutex_exit(&buf->b_hdr->b_freeze_lock); 1210 1211#ifdef illumos 1212 arc_buf_unwatch(buf); 1213#endif /* illumos */ 1214} 1215 1216void 1217arc_buf_freeze(arc_buf_t *buf) 1218{ 1219 kmutex_t *hash_lock; 1220 1221 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1222 return; 1223 1224 hash_lock = HDR_LOCK(buf->b_hdr); 1225 mutex_enter(hash_lock); 1226 1227 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 1228 buf->b_hdr->b_state == arc_anon); 1229 arc_cksum_compute(buf, B_FALSE); 1230 mutex_exit(hash_lock); 1231 1232} 1233 1234static void 1235get_buf_info(arc_buf_hdr_t *ab, arc_state_t *state, list_t **list, kmutex_t **lock) 1236{ 1237 uint64_t buf_hashid = buf_hash(ab->b_spa, &ab->b_dva, ab->b_birth); 1238 1239 if (ab->b_type == ARC_BUFC_METADATA) 1240 buf_hashid &= (ARC_BUFC_NUMMETADATALISTS - 1); 1241 else { 1242 buf_hashid &= (ARC_BUFC_NUMDATALISTS - 1); 1243 buf_hashid += ARC_BUFC_NUMMETADATALISTS; 1244 } 1245 1246 *list = &state->arcs_lists[buf_hashid]; 1247 *lock = ARCS_LOCK(state, buf_hashid); 1248} 1249 1250 1251static void 1252add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1253{ 1254 ASSERT(MUTEX_HELD(hash_lock)); 1255 1256 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 1257 (ab->b_state != arc_anon)) { 1258 uint64_t delta = ab->b_size * ab->b_datacnt; 1259 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 1260 list_t *list; 1261 kmutex_t *lock; 1262 1263 get_buf_info(ab, ab->b_state, &list, &lock); 1264 ASSERT(!MUTEX_HELD(lock)); 1265 mutex_enter(lock); 1266 ASSERT(list_link_active(&ab->b_arc_node)); 1267 list_remove(list, ab); 1268 if (GHOST_STATE(ab->b_state)) { 1269 ASSERT0(ab->b_datacnt); 1270 ASSERT3P(ab->b_buf, ==, NULL); 1271 delta = ab->b_size; 1272 } 1273 ASSERT(delta > 0); 1274 ASSERT3U(*size, >=, delta); 1275 atomic_add_64(size, -delta); 1276 mutex_exit(lock); 1277 /* remove the prefetch flag if we get a reference */ 1278 if (ab->b_flags & ARC_PREFETCH) 1279 ab->b_flags &= ~ARC_PREFETCH; 1280 } 1281} 1282 1283static int 1284remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1285{ 1286 int cnt; 1287 arc_state_t *state = ab->b_state; 1288 1289 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 1290 ASSERT(!GHOST_STATE(state)); 1291 1292 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 1293 (state != arc_anon)) { 1294 uint64_t *size = &state->arcs_lsize[ab->b_type]; 1295 list_t *list; 1296 kmutex_t *lock; 1297 1298 get_buf_info(ab, state, &list, &lock); 1299 ASSERT(!MUTEX_HELD(lock)); 1300 mutex_enter(lock); 1301 ASSERT(!list_link_active(&ab->b_arc_node)); 1302 list_insert_head(list, ab); 1303 ASSERT(ab->b_datacnt > 0); 1304 atomic_add_64(size, ab->b_size * ab->b_datacnt); 1305 mutex_exit(lock); 1306 } 1307 return (cnt); 1308} 1309 1310/* 1311 * Move the supplied buffer to the indicated state. The mutex 1312 * for the buffer must be held by the caller. 1313 */ 1314static void 1315arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 1316{ 1317 arc_state_t *old_state = ab->b_state; 1318 int64_t refcnt = refcount_count(&ab->b_refcnt); 1319 uint64_t from_delta, to_delta; 1320 list_t *list; 1321 kmutex_t *lock; 1322 1323 ASSERT(MUTEX_HELD(hash_lock)); 1324 ASSERT3P(new_state, !=, old_state); 1325 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 1326 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 1327 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon); 1328 1329 from_delta = to_delta = ab->b_datacnt * ab->b_size; 1330 1331 /* 1332 * If this buffer is evictable, transfer it from the 1333 * old state list to the new state list. 1334 */ 1335 if (refcnt == 0) { 1336 if (old_state != arc_anon) { 1337 int use_mutex; 1338 uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 1339 1340 get_buf_info(ab, old_state, &list, &lock); 1341 use_mutex = !MUTEX_HELD(lock); 1342 if (use_mutex) 1343 mutex_enter(lock); 1344 1345 ASSERT(list_link_active(&ab->b_arc_node)); 1346 list_remove(list, ab); 1347 1348 /* 1349 * If prefetching out of the ghost cache, 1350 * we will have a non-zero datacnt. 1351 */ 1352 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 1353 /* ghost elements have a ghost size */ 1354 ASSERT(ab->b_buf == NULL); 1355 from_delta = ab->b_size; 1356 } 1357 ASSERT3U(*size, >=, from_delta); 1358 atomic_add_64(size, -from_delta); 1359 1360 if (use_mutex) 1361 mutex_exit(lock); 1362 } 1363 if (new_state != arc_anon) { 1364 int use_mutex; 1365 uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1366 1367 get_buf_info(ab, new_state, &list, &lock); 1368 use_mutex = !MUTEX_HELD(lock); 1369 if (use_mutex) 1370 mutex_enter(lock); 1371 1372 list_insert_head(list, ab); 1373 1374 /* ghost elements have a ghost size */ 1375 if (GHOST_STATE(new_state)) { 1376 ASSERT(ab->b_datacnt == 0); 1377 ASSERT(ab->b_buf == NULL); 1378 to_delta = ab->b_size; 1379 } 1380 atomic_add_64(size, to_delta); 1381 1382 if (use_mutex) 1383 mutex_exit(lock); 1384 } 1385 } 1386 1387 ASSERT(!BUF_EMPTY(ab)); 1388 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab)) 1389 buf_hash_remove(ab); 1390 1391 /* adjust state sizes */ 1392 if (to_delta) 1393 atomic_add_64(&new_state->arcs_size, to_delta); 1394 if (from_delta) { 1395 ASSERT3U(old_state->arcs_size, >=, from_delta); 1396 atomic_add_64(&old_state->arcs_size, -from_delta); 1397 } 1398 ab->b_state = new_state; 1399 1400 /* adjust l2arc hdr stats */ 1401 if (new_state == arc_l2c_only) 1402 l2arc_hdr_stat_add(); 1403 else if (old_state == arc_l2c_only) 1404 l2arc_hdr_stat_remove(); 1405} 1406 1407void 1408arc_space_consume(uint64_t space, arc_space_type_t type) 1409{ 1410 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1411 1412 switch (type) { 1413 case ARC_SPACE_DATA: 1414 ARCSTAT_INCR(arcstat_data_size, space); 1415 break; 1416 case ARC_SPACE_OTHER: 1417 ARCSTAT_INCR(arcstat_other_size, space); 1418 break; 1419 case ARC_SPACE_HDRS: 1420 ARCSTAT_INCR(arcstat_hdr_size, space); 1421 break; 1422 case ARC_SPACE_L2HDRS: 1423 ARCSTAT_INCR(arcstat_l2_hdr_size, space); 1424 break; 1425 } 1426 1427 atomic_add_64(&arc_meta_used, space); 1428 atomic_add_64(&arc_size, space); 1429} 1430 1431void 1432arc_space_return(uint64_t space, arc_space_type_t type) 1433{ 1434 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1435 1436 switch (type) { 1437 case ARC_SPACE_DATA: 1438 ARCSTAT_INCR(arcstat_data_size, -space); 1439 break; 1440 case ARC_SPACE_OTHER: 1441 ARCSTAT_INCR(arcstat_other_size, -space); 1442 break; 1443 case ARC_SPACE_HDRS: 1444 ARCSTAT_INCR(arcstat_hdr_size, -space); 1445 break; 1446 case ARC_SPACE_L2HDRS: 1447 ARCSTAT_INCR(arcstat_l2_hdr_size, -space); 1448 break; 1449 } 1450 1451 ASSERT(arc_meta_used >= space); 1452 if (arc_meta_max < arc_meta_used) 1453 arc_meta_max = arc_meta_used; 1454 atomic_add_64(&arc_meta_used, -space); 1455 ASSERT(arc_size >= space); 1456 atomic_add_64(&arc_size, -space); 1457} 1458 1459void * 1460arc_data_buf_alloc(uint64_t size) 1461{ 1462 if (arc_evict_needed(ARC_BUFC_DATA)) 1463 cv_signal(&arc_reclaim_thr_cv); 1464 atomic_add_64(&arc_size, size); 1465 return (zio_data_buf_alloc(size)); 1466} 1467 1468void 1469arc_data_buf_free(void *buf, uint64_t size) 1470{ 1471 zio_data_buf_free(buf, size); 1472 ASSERT(arc_size >= size); 1473 atomic_add_64(&arc_size, -size); 1474} 1475 1476arc_buf_t * 1477arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1478{ 1479 arc_buf_hdr_t *hdr; 1480 arc_buf_t *buf; 1481 1482 ASSERT3U(size, >, 0); 1483 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1484 ASSERT(BUF_EMPTY(hdr)); 1485 hdr->b_size = size; 1486 hdr->b_type = type; 1487 hdr->b_spa = spa_load_guid(spa); 1488 hdr->b_state = arc_anon; 1489 hdr->b_arc_access = 0; 1490 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1491 buf->b_hdr = hdr; 1492 buf->b_data = NULL; 1493 buf->b_efunc = NULL; 1494 buf->b_private = NULL; 1495 buf->b_next = NULL; 1496 hdr->b_buf = buf; 1497 arc_get_data_buf(buf); 1498 hdr->b_datacnt = 1; 1499 hdr->b_flags = 0; 1500 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1501 (void) refcount_add(&hdr->b_refcnt, tag); 1502 1503 return (buf); 1504} 1505 1506static char *arc_onloan_tag = "onloan"; 1507 1508/* 1509 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in 1510 * flight data by arc_tempreserve_space() until they are "returned". Loaned 1511 * buffers must be returned to the arc before they can be used by the DMU or 1512 * freed. 1513 */ 1514arc_buf_t * 1515arc_loan_buf(spa_t *spa, int size) 1516{ 1517 arc_buf_t *buf; 1518 1519 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA); 1520 1521 atomic_add_64(&arc_loaned_bytes, size); 1522 return (buf); 1523} 1524 1525/* 1526 * Return a loaned arc buffer to the arc. 1527 */ 1528void 1529arc_return_buf(arc_buf_t *buf, void *tag) 1530{ 1531 arc_buf_hdr_t *hdr = buf->b_hdr; 1532 1533 ASSERT(buf->b_data != NULL); 1534 (void) refcount_add(&hdr->b_refcnt, tag); 1535 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag); 1536 1537 atomic_add_64(&arc_loaned_bytes, -hdr->b_size); 1538} 1539 1540/* Detach an arc_buf from a dbuf (tag) */ 1541void 1542arc_loan_inuse_buf(arc_buf_t *buf, void *tag) 1543{ 1544 arc_buf_hdr_t *hdr; 1545 1546 ASSERT(buf->b_data != NULL); 1547 hdr = buf->b_hdr; 1548 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag); 1549 (void) refcount_remove(&hdr->b_refcnt, tag); 1550 buf->b_efunc = NULL; 1551 buf->b_private = NULL; 1552 1553 atomic_add_64(&arc_loaned_bytes, hdr->b_size); 1554} 1555 1556static arc_buf_t * 1557arc_buf_clone(arc_buf_t *from) 1558{ 1559 arc_buf_t *buf; 1560 arc_buf_hdr_t *hdr = from->b_hdr; 1561 uint64_t size = hdr->b_size; 1562 1563 ASSERT(hdr->b_state != arc_anon); 1564 1565 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1566 buf->b_hdr = hdr; 1567 buf->b_data = NULL; 1568 buf->b_efunc = NULL; 1569 buf->b_private = NULL; 1570 buf->b_next = hdr->b_buf; 1571 hdr->b_buf = buf; 1572 arc_get_data_buf(buf); 1573 bcopy(from->b_data, buf->b_data, size); 1574 1575 /* 1576 * This buffer already exists in the arc so create a duplicate 1577 * copy for the caller. If the buffer is associated with user data 1578 * then track the size and number of duplicates. These stats will be 1579 * updated as duplicate buffers are created and destroyed. 1580 */ 1581 if (hdr->b_type == ARC_BUFC_DATA) { 1582 ARCSTAT_BUMP(arcstat_duplicate_buffers); 1583 ARCSTAT_INCR(arcstat_duplicate_buffers_size, size); 1584 } 1585 hdr->b_datacnt += 1; 1586 return (buf); 1587} 1588 1589void 1590arc_buf_add_ref(arc_buf_t *buf, void* tag) 1591{ 1592 arc_buf_hdr_t *hdr; 1593 kmutex_t *hash_lock; 1594 1595 /* 1596 * Check to see if this buffer is evicted. Callers 1597 * must verify b_data != NULL to know if the add_ref 1598 * was successful. 1599 */ 1600 mutex_enter(&buf->b_evict_lock); 1601 if (buf->b_data == NULL) { 1602 mutex_exit(&buf->b_evict_lock); 1603 return; 1604 } 1605 hash_lock = HDR_LOCK(buf->b_hdr); 1606 mutex_enter(hash_lock); 1607 hdr = buf->b_hdr; 1608 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1609 mutex_exit(&buf->b_evict_lock); 1610 1611 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1612 add_reference(hdr, hash_lock, tag); 1613 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 1614 arc_access(hdr, hash_lock); 1615 mutex_exit(hash_lock); 1616 ARCSTAT_BUMP(arcstat_hits); 1617 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1618 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1619 data, metadata, hits); 1620} 1621 1622/* 1623 * Free the arc data buffer. If it is an l2arc write in progress, 1624 * the buffer is placed on l2arc_free_on_write to be freed later. 1625 */ 1626static void 1627arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t)) 1628{ 1629 arc_buf_hdr_t *hdr = buf->b_hdr; 1630 1631 if (HDR_L2_WRITING(hdr)) { 1632 l2arc_data_free_t *df; 1633 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1634 df->l2df_data = buf->b_data; 1635 df->l2df_size = hdr->b_size; 1636 df->l2df_func = free_func; 1637 mutex_enter(&l2arc_free_on_write_mtx); 1638 list_insert_head(l2arc_free_on_write, df); 1639 mutex_exit(&l2arc_free_on_write_mtx); 1640 ARCSTAT_BUMP(arcstat_l2_free_on_write); 1641 } else { 1642 free_func(buf->b_data, hdr->b_size); 1643 } 1644} 1645 1646static void 1647arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1648{ 1649 arc_buf_t **bufp; 1650 1651 /* free up data associated with the buf */ 1652 if (buf->b_data) { 1653 arc_state_t *state = buf->b_hdr->b_state; 1654 uint64_t size = buf->b_hdr->b_size; 1655 arc_buf_contents_t type = buf->b_hdr->b_type; 1656 1657 arc_cksum_verify(buf); 1658#ifdef illumos 1659 arc_buf_unwatch(buf); 1660#endif /* illumos */ 1661 1662 if (!recycle) { 1663 if (type == ARC_BUFC_METADATA) { 1664 arc_buf_data_free(buf, zio_buf_free); 1665 arc_space_return(size, ARC_SPACE_DATA); 1666 } else { 1667 ASSERT(type == ARC_BUFC_DATA); 1668 arc_buf_data_free(buf, zio_data_buf_free); 1669 ARCSTAT_INCR(arcstat_data_size, -size); 1670 atomic_add_64(&arc_size, -size); 1671 } 1672 } 1673 if (list_link_active(&buf->b_hdr->b_arc_node)) { 1674 uint64_t *cnt = &state->arcs_lsize[type]; 1675 1676 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1677 ASSERT(state != arc_anon); 1678 1679 ASSERT3U(*cnt, >=, size); 1680 atomic_add_64(cnt, -size); 1681 } 1682 ASSERT3U(state->arcs_size, >=, size); 1683 atomic_add_64(&state->arcs_size, -size); 1684 buf->b_data = NULL; 1685 1686 /* 1687 * If we're destroying a duplicate buffer make sure 1688 * that the appropriate statistics are updated. 1689 */ 1690 if (buf->b_hdr->b_datacnt > 1 && 1691 buf->b_hdr->b_type == ARC_BUFC_DATA) { 1692 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers); 1693 ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size); 1694 } 1695 ASSERT(buf->b_hdr->b_datacnt > 0); 1696 buf->b_hdr->b_datacnt -= 1; 1697 } 1698 1699 /* only remove the buf if requested */ 1700 if (!all) 1701 return; 1702 1703 /* remove the buf from the hdr list */ 1704 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1705 continue; 1706 *bufp = buf->b_next; 1707 buf->b_next = NULL; 1708 1709 ASSERT(buf->b_efunc == NULL); 1710 1711 /* clean up the buf */ 1712 buf->b_hdr = NULL; 1713 kmem_cache_free(buf_cache, buf); 1714} 1715 1716static void 1717arc_hdr_destroy(arc_buf_hdr_t *hdr) 1718{ 1719 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1720 ASSERT3P(hdr->b_state, ==, arc_anon); 1721 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1722 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr; 1723 1724 if (l2hdr != NULL) { 1725 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx); 1726 /* 1727 * To prevent arc_free() and l2arc_evict() from 1728 * attempting to free the same buffer at the same time, 1729 * a FREE_IN_PROGRESS flag is given to arc_free() to 1730 * give it priority. l2arc_evict() can't destroy this 1731 * header while we are waiting on l2arc_buflist_mtx. 1732 * 1733 * The hdr may be removed from l2ad_buflist before we 1734 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. 1735 */ 1736 if (!buflist_held) { 1737 mutex_enter(&l2arc_buflist_mtx); 1738 l2hdr = hdr->b_l2hdr; 1739 } 1740 1741 if (l2hdr != NULL) { 1742 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr, 1743 hdr->b_size, 0); 1744 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 1745 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1746 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize); 1747 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 1748 if (hdr->b_state == arc_l2c_only) 1749 l2arc_hdr_stat_remove(); 1750 hdr->b_l2hdr = NULL; 1751 } 1752 1753 if (!buflist_held) 1754 mutex_exit(&l2arc_buflist_mtx); 1755 } 1756 1757 if (!BUF_EMPTY(hdr)) { 1758 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1759 buf_discard_identity(hdr); 1760 } 1761 while (hdr->b_buf) { 1762 arc_buf_t *buf = hdr->b_buf; 1763 1764 if (buf->b_efunc) { 1765 mutex_enter(&arc_eviction_mtx); 1766 mutex_enter(&buf->b_evict_lock); 1767 ASSERT(buf->b_hdr != NULL); 1768 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1769 hdr->b_buf = buf->b_next; 1770 buf->b_hdr = &arc_eviction_hdr; 1771 buf->b_next = arc_eviction_list; 1772 arc_eviction_list = buf; 1773 mutex_exit(&buf->b_evict_lock); 1774 mutex_exit(&arc_eviction_mtx); 1775 } else { 1776 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1777 } 1778 } 1779 if (hdr->b_freeze_cksum != NULL) { 1780 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1781 hdr->b_freeze_cksum = NULL; 1782 } 1783 if (hdr->b_thawed) { 1784 kmem_free(hdr->b_thawed, 1); 1785 hdr->b_thawed = NULL; 1786 } 1787 1788 ASSERT(!list_link_active(&hdr->b_arc_node)); 1789 ASSERT3P(hdr->b_hash_next, ==, NULL); 1790 ASSERT3P(hdr->b_acb, ==, NULL); 1791 kmem_cache_free(hdr_cache, hdr); 1792} 1793 1794void 1795arc_buf_free(arc_buf_t *buf, void *tag) 1796{ 1797 arc_buf_hdr_t *hdr = buf->b_hdr; 1798 int hashed = hdr->b_state != arc_anon; 1799 1800 ASSERT(buf->b_efunc == NULL); 1801 ASSERT(buf->b_data != NULL); 1802 1803 if (hashed) { 1804 kmutex_t *hash_lock = HDR_LOCK(hdr); 1805 1806 mutex_enter(hash_lock); 1807 hdr = buf->b_hdr; 1808 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1809 1810 (void) remove_reference(hdr, hash_lock, tag); 1811 if (hdr->b_datacnt > 1) { 1812 arc_buf_destroy(buf, FALSE, TRUE); 1813 } else { 1814 ASSERT(buf == hdr->b_buf); 1815 ASSERT(buf->b_efunc == NULL); 1816 hdr->b_flags |= ARC_BUF_AVAILABLE; 1817 } 1818 mutex_exit(hash_lock); 1819 } else if (HDR_IO_IN_PROGRESS(hdr)) { 1820 int destroy_hdr; 1821 /* 1822 * We are in the middle of an async write. Don't destroy 1823 * this buffer unless the write completes before we finish 1824 * decrementing the reference count. 1825 */ 1826 mutex_enter(&arc_eviction_mtx); 1827 (void) remove_reference(hdr, NULL, tag); 1828 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1829 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1830 mutex_exit(&arc_eviction_mtx); 1831 if (destroy_hdr) 1832 arc_hdr_destroy(hdr); 1833 } else { 1834 if (remove_reference(hdr, NULL, tag) > 0) 1835 arc_buf_destroy(buf, FALSE, TRUE); 1836 else 1837 arc_hdr_destroy(hdr); 1838 } 1839} 1840 1841boolean_t 1842arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1843{ 1844 arc_buf_hdr_t *hdr = buf->b_hdr; 1845 kmutex_t *hash_lock = HDR_LOCK(hdr); 1846 boolean_t no_callback = (buf->b_efunc == NULL); 1847 1848 if (hdr->b_state == arc_anon) { 1849 ASSERT(hdr->b_datacnt == 1); 1850 arc_buf_free(buf, tag); 1851 return (no_callback); 1852 } 1853 1854 mutex_enter(hash_lock); 1855 hdr = buf->b_hdr; 1856 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1857 ASSERT(hdr->b_state != arc_anon); 1858 ASSERT(buf->b_data != NULL); 1859 1860 (void) remove_reference(hdr, hash_lock, tag); 1861 if (hdr->b_datacnt > 1) { 1862 if (no_callback) 1863 arc_buf_destroy(buf, FALSE, TRUE); 1864 } else if (no_callback) { 1865 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1866 ASSERT(buf->b_efunc == NULL); 1867 hdr->b_flags |= ARC_BUF_AVAILABLE; 1868 } 1869 ASSERT(no_callback || hdr->b_datacnt > 1 || 1870 refcount_is_zero(&hdr->b_refcnt)); 1871 mutex_exit(hash_lock); 1872 return (no_callback); 1873} 1874 1875int 1876arc_buf_size(arc_buf_t *buf) 1877{ 1878 return (buf->b_hdr->b_size); 1879} 1880 1881/* 1882 * Called from the DMU to determine if the current buffer should be 1883 * evicted. In order to ensure proper locking, the eviction must be initiated 1884 * from the DMU. Return true if the buffer is associated with user data and 1885 * duplicate buffers still exist. 1886 */ 1887boolean_t 1888arc_buf_eviction_needed(arc_buf_t *buf) 1889{ 1890 arc_buf_hdr_t *hdr; 1891 boolean_t evict_needed = B_FALSE; 1892 1893 if (zfs_disable_dup_eviction) 1894 return (B_FALSE); 1895 1896 mutex_enter(&buf->b_evict_lock); 1897 hdr = buf->b_hdr; 1898 if (hdr == NULL) { 1899 /* 1900 * We are in arc_do_user_evicts(); let that function 1901 * perform the eviction. 1902 */ 1903 ASSERT(buf->b_data == NULL); 1904 mutex_exit(&buf->b_evict_lock); 1905 return (B_FALSE); 1906 } else if (buf->b_data == NULL) { 1907 /* 1908 * We have already been added to the arc eviction list; 1909 * recommend eviction. 1910 */ 1911 ASSERT3P(hdr, ==, &arc_eviction_hdr); 1912 mutex_exit(&buf->b_evict_lock); 1913 return (B_TRUE); 1914 } 1915 1916 if (hdr->b_datacnt > 1 && hdr->b_type == ARC_BUFC_DATA) 1917 evict_needed = B_TRUE; 1918 1919 mutex_exit(&buf->b_evict_lock); 1920 return (evict_needed); 1921} 1922 1923/* 1924 * Evict buffers from list until we've removed the specified number of 1925 * bytes. Move the removed buffers to the appropriate evict state. 1926 * If the recycle flag is set, then attempt to "recycle" a buffer: 1927 * - look for a buffer to evict that is `bytes' long. 1928 * - return the data block from this buffer rather than freeing it. 1929 * This flag is used by callers that are trying to make space for a 1930 * new buffer in a full arc cache. 1931 * 1932 * This function makes a "best effort". It skips over any buffers 1933 * it can't get a hash_lock on, and so may not catch all candidates. 1934 * It may also return without evicting as much space as requested. 1935 */ 1936static void * 1937arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle, 1938 arc_buf_contents_t type) 1939{ 1940 arc_state_t *evicted_state; 1941 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1942 int64_t bytes_remaining; 1943 arc_buf_hdr_t *ab, *ab_prev = NULL; 1944 list_t *evicted_list, *list, *evicted_list_start, *list_start; 1945 kmutex_t *lock, *evicted_lock; 1946 kmutex_t *hash_lock; 1947 boolean_t have_lock; 1948 void *stolen = NULL; 1949 arc_buf_hdr_t marker = { 0 }; 1950 int count = 0; 1951 static int evict_metadata_offset, evict_data_offset; 1952 int i, idx, offset, list_count, lists; 1953 1954 ASSERT(state == arc_mru || state == arc_mfu); 1955 1956 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1957 1958 if (type == ARC_BUFC_METADATA) { 1959 offset = 0; 1960 list_count = ARC_BUFC_NUMMETADATALISTS; 1961 list_start = &state->arcs_lists[0]; 1962 evicted_list_start = &evicted_state->arcs_lists[0]; 1963 idx = evict_metadata_offset; 1964 } else { 1965 offset = ARC_BUFC_NUMMETADATALISTS; 1966 list_start = &state->arcs_lists[offset]; 1967 evicted_list_start = &evicted_state->arcs_lists[offset]; 1968 list_count = ARC_BUFC_NUMDATALISTS; 1969 idx = evict_data_offset; 1970 } 1971 bytes_remaining = evicted_state->arcs_lsize[type]; 1972 lists = 0; 1973 1974evict_start: 1975 list = &list_start[idx]; 1976 evicted_list = &evicted_list_start[idx]; 1977 lock = ARCS_LOCK(state, (offset + idx)); 1978 evicted_lock = ARCS_LOCK(evicted_state, (offset + idx)); 1979 1980 mutex_enter(lock); 1981 mutex_enter(evicted_lock); 1982 1983 for (ab = list_tail(list); ab; ab = ab_prev) { 1984 ab_prev = list_prev(list, ab); 1985 bytes_remaining -= (ab->b_size * ab->b_datacnt); 1986 /* prefetch buffers have a minimum lifespan */ 1987 if (HDR_IO_IN_PROGRESS(ab) || 1988 (spa && ab->b_spa != spa) || 1989 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1990 ddi_get_lbolt() - ab->b_arc_access < 1991 arc_min_prefetch_lifespan)) { 1992 skipped++; 1993 continue; 1994 } 1995 /* "lookahead" for better eviction candidate */ 1996 if (recycle && ab->b_size != bytes && 1997 ab_prev && ab_prev->b_size == bytes) 1998 continue; 1999 2000 /* ignore markers */ 2001 if (ab->b_spa == 0) 2002 continue; 2003 2004 /* 2005 * It may take a long time to evict all the bufs requested. 2006 * To avoid blocking all arc activity, periodically drop 2007 * the arcs_mtx and give other threads a chance to run 2008 * before reacquiring the lock. 2009 * 2010 * If we are looking for a buffer to recycle, we are in 2011 * the hot code path, so don't sleep. 2012 */ 2013 if (!recycle && count++ > arc_evict_iterations) { 2014 list_insert_after(list, ab, &marker); 2015 mutex_exit(evicted_lock); 2016 mutex_exit(lock); 2017 kpreempt(KPREEMPT_SYNC); 2018 mutex_enter(lock); 2019 mutex_enter(evicted_lock); 2020 ab_prev = list_prev(list, &marker); 2021 list_remove(list, &marker); 2022 count = 0; 2023 continue; 2024 } 2025 2026 hash_lock = HDR_LOCK(ab); 2027 have_lock = MUTEX_HELD(hash_lock); 2028 if (have_lock || mutex_tryenter(hash_lock)) { 2029 ASSERT0(refcount_count(&ab->b_refcnt)); 2030 ASSERT(ab->b_datacnt > 0); 2031 while (ab->b_buf) { 2032 arc_buf_t *buf = ab->b_buf; 2033 if (!mutex_tryenter(&buf->b_evict_lock)) { 2034 missed += 1; 2035 break; 2036 } 2037 if (buf->b_data) { 2038 bytes_evicted += ab->b_size; 2039 if (recycle && ab->b_type == type && 2040 ab->b_size == bytes && 2041 !HDR_L2_WRITING(ab)) { 2042 stolen = buf->b_data; 2043 recycle = FALSE; 2044 } 2045 } 2046 if (buf->b_efunc) { 2047 mutex_enter(&arc_eviction_mtx); 2048 arc_buf_destroy(buf, 2049 buf->b_data == stolen, FALSE); 2050 ab->b_buf = buf->b_next; 2051 buf->b_hdr = &arc_eviction_hdr; 2052 buf->b_next = arc_eviction_list; 2053 arc_eviction_list = buf; 2054 mutex_exit(&arc_eviction_mtx); 2055 mutex_exit(&buf->b_evict_lock); 2056 } else { 2057 mutex_exit(&buf->b_evict_lock); 2058 arc_buf_destroy(buf, 2059 buf->b_data == stolen, TRUE); 2060 } 2061 } 2062 2063 if (ab->b_l2hdr) { 2064 ARCSTAT_INCR(arcstat_evict_l2_cached, 2065 ab->b_size); 2066 } else { 2067 if (l2arc_write_eligible(ab->b_spa, ab)) { 2068 ARCSTAT_INCR(arcstat_evict_l2_eligible, 2069 ab->b_size); 2070 } else { 2071 ARCSTAT_INCR( 2072 arcstat_evict_l2_ineligible, 2073 ab->b_size); 2074 } 2075 } 2076 2077 if (ab->b_datacnt == 0) { 2078 arc_change_state(evicted_state, ab, hash_lock); 2079 ASSERT(HDR_IN_HASH_TABLE(ab)); 2080 ab->b_flags |= ARC_IN_HASH_TABLE; 2081 ab->b_flags &= ~ARC_BUF_AVAILABLE; 2082 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 2083 } 2084 if (!have_lock) 2085 mutex_exit(hash_lock); 2086 if (bytes >= 0 && bytes_evicted >= bytes) 2087 break; 2088 if (bytes_remaining > 0) { 2089 mutex_exit(evicted_lock); 2090 mutex_exit(lock); 2091 idx = ((idx + 1) & (list_count - 1)); 2092 lists++; 2093 goto evict_start; 2094 } 2095 } else { 2096 missed += 1; 2097 } 2098 } 2099 2100 mutex_exit(evicted_lock); 2101 mutex_exit(lock); 2102 2103 idx = ((idx + 1) & (list_count - 1)); 2104 lists++; 2105 2106 if (bytes_evicted < bytes) { 2107 if (lists < list_count) 2108 goto evict_start; 2109 else 2110 dprintf("only evicted %lld bytes from %x", 2111 (longlong_t)bytes_evicted, state); 2112 } 2113 if (type == ARC_BUFC_METADATA) 2114 evict_metadata_offset = idx; 2115 else 2116 evict_data_offset = idx; 2117 2118 if (skipped) 2119 ARCSTAT_INCR(arcstat_evict_skip, skipped); 2120 2121 if (missed) 2122 ARCSTAT_INCR(arcstat_mutex_miss, missed); 2123 2124 /* 2125 * Note: we have just evicted some data into the ghost state, 2126 * potentially putting the ghost size over the desired size. Rather 2127 * that evicting from the ghost list in this hot code path, leave 2128 * this chore to the arc_reclaim_thread(). 2129 */ 2130 2131 if (stolen) 2132 ARCSTAT_BUMP(arcstat_stolen); 2133 return (stolen); 2134} 2135 2136/* 2137 * Remove buffers from list until we've removed the specified number of 2138 * bytes. Destroy the buffers that are removed. 2139 */ 2140static void 2141arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes) 2142{ 2143 arc_buf_hdr_t *ab, *ab_prev; 2144 arc_buf_hdr_t marker = { 0 }; 2145 list_t *list, *list_start; 2146 kmutex_t *hash_lock, *lock; 2147 uint64_t bytes_deleted = 0; 2148 uint64_t bufs_skipped = 0; 2149 int count = 0; 2150 static int evict_offset; 2151 int list_count, idx = evict_offset; 2152 int offset, lists = 0; 2153 2154 ASSERT(GHOST_STATE(state)); 2155 2156 /* 2157 * data lists come after metadata lists 2158 */ 2159 list_start = &state->arcs_lists[ARC_BUFC_NUMMETADATALISTS]; 2160 list_count = ARC_BUFC_NUMDATALISTS; 2161 offset = ARC_BUFC_NUMMETADATALISTS; 2162 2163evict_start: 2164 list = &list_start[idx]; 2165 lock = ARCS_LOCK(state, idx + offset); 2166 2167 mutex_enter(lock); 2168 for (ab = list_tail(list); ab; ab = ab_prev) { 2169 ab_prev = list_prev(list, ab); 2170 if (ab->b_type > ARC_BUFC_NUMTYPES) 2171 panic("invalid ab=%p", (void *)ab); 2172 if (spa && ab->b_spa != spa) 2173 continue; 2174 2175 /* ignore markers */ 2176 if (ab->b_spa == 0) 2177 continue; 2178 2179 hash_lock = HDR_LOCK(ab); 2180 /* caller may be trying to modify this buffer, skip it */ 2181 if (MUTEX_HELD(hash_lock)) 2182 continue; 2183 2184 /* 2185 * It may take a long time to evict all the bufs requested. 2186 * To avoid blocking all arc activity, periodically drop 2187 * the arcs_mtx and give other threads a chance to run 2188 * before reacquiring the lock. 2189 */ 2190 if (count++ > arc_evict_iterations) { 2191 list_insert_after(list, ab, &marker); 2192 mutex_exit(lock); 2193 kpreempt(KPREEMPT_SYNC); 2194 mutex_enter(lock); 2195 ab_prev = list_prev(list, &marker); 2196 list_remove(list, &marker); 2197 count = 0; 2198 continue; 2199 } 2200 if (mutex_tryenter(hash_lock)) { 2201 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 2202 ASSERT(ab->b_buf == NULL); 2203 ARCSTAT_BUMP(arcstat_deleted); 2204 bytes_deleted += ab->b_size; 2205 2206 if (ab->b_l2hdr != NULL) { 2207 /* 2208 * This buffer is cached on the 2nd Level ARC; 2209 * don't destroy the header. 2210 */ 2211 arc_change_state(arc_l2c_only, ab, hash_lock); 2212 mutex_exit(hash_lock); 2213 } else { 2214 arc_change_state(arc_anon, ab, hash_lock); 2215 mutex_exit(hash_lock); 2216 arc_hdr_destroy(ab); 2217 } 2218 2219 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 2220 if (bytes >= 0 && bytes_deleted >= bytes) 2221 break; 2222 } else if (bytes < 0) { 2223 /* 2224 * Insert a list marker and then wait for the 2225 * hash lock to become available. Once its 2226 * available, restart from where we left off. 2227 */ 2228 list_insert_after(list, ab, &marker); 2229 mutex_exit(lock); 2230 mutex_enter(hash_lock); 2231 mutex_exit(hash_lock); 2232 mutex_enter(lock); 2233 ab_prev = list_prev(list, &marker); 2234 list_remove(list, &marker); 2235 } else { 2236 bufs_skipped += 1; 2237 } 2238 2239 } 2240 mutex_exit(lock); 2241 idx = ((idx + 1) & (ARC_BUFC_NUMDATALISTS - 1)); 2242 lists++; 2243 2244 if (lists < list_count) 2245 goto evict_start; 2246 2247 evict_offset = idx; 2248 if ((uintptr_t)list > (uintptr_t)&state->arcs_lists[ARC_BUFC_NUMMETADATALISTS] && 2249 (bytes < 0 || bytes_deleted < bytes)) { 2250 list_start = &state->arcs_lists[0]; 2251 list_count = ARC_BUFC_NUMMETADATALISTS; 2252 offset = lists = 0; 2253 goto evict_start; 2254 } 2255 2256 if (bufs_skipped) { 2257 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 2258 ASSERT(bytes >= 0); 2259 } 2260 2261 if (bytes_deleted < bytes) 2262 dprintf("only deleted %lld bytes from %p", 2263 (longlong_t)bytes_deleted, state); 2264} 2265 2266static void 2267arc_adjust(void) 2268{ 2269 int64_t adjustment, delta; 2270 2271 /* 2272 * Adjust MRU size 2273 */ 2274 2275 adjustment = MIN((int64_t)(arc_size - arc_c), 2276 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used - 2277 arc_p)); 2278 2279 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 2280 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment); 2281 (void) arc_evict(arc_mru, 0, delta, FALSE, ARC_BUFC_DATA); 2282 adjustment -= delta; 2283 } 2284 2285 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 2286 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment); 2287 (void) arc_evict(arc_mru, 0, delta, FALSE, 2288 ARC_BUFC_METADATA); 2289 } 2290 2291 /* 2292 * Adjust MFU size 2293 */ 2294 2295 adjustment = arc_size - arc_c; 2296 2297 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 2298 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]); 2299 (void) arc_evict(arc_mfu, 0, delta, FALSE, ARC_BUFC_DATA); 2300 adjustment -= delta; 2301 } 2302 2303 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 2304 int64_t delta = MIN(adjustment, 2305 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]); 2306 (void) arc_evict(arc_mfu, 0, delta, FALSE, 2307 ARC_BUFC_METADATA); 2308 } 2309 2310 /* 2311 * Adjust ghost lists 2312 */ 2313 2314 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c; 2315 2316 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) { 2317 delta = MIN(arc_mru_ghost->arcs_size, adjustment); 2318 arc_evict_ghost(arc_mru_ghost, 0, delta); 2319 } 2320 2321 adjustment = 2322 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c; 2323 2324 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) { 2325 delta = MIN(arc_mfu_ghost->arcs_size, adjustment); 2326 arc_evict_ghost(arc_mfu_ghost, 0, delta); 2327 } 2328} 2329 2330static void 2331arc_do_user_evicts(void) 2332{ 2333 static arc_buf_t *tmp_arc_eviction_list; 2334 2335 /* 2336 * Move list over to avoid LOR 2337 */ 2338restart: 2339 mutex_enter(&arc_eviction_mtx); 2340 tmp_arc_eviction_list = arc_eviction_list; 2341 arc_eviction_list = NULL; 2342 mutex_exit(&arc_eviction_mtx); 2343 2344 while (tmp_arc_eviction_list != NULL) { 2345 arc_buf_t *buf = tmp_arc_eviction_list; 2346 tmp_arc_eviction_list = buf->b_next; 2347 mutex_enter(&buf->b_evict_lock); 2348 buf->b_hdr = NULL; 2349 mutex_exit(&buf->b_evict_lock); 2350 2351 if (buf->b_efunc != NULL) 2352 VERIFY(buf->b_efunc(buf) == 0); 2353 2354 buf->b_efunc = NULL; 2355 buf->b_private = NULL; 2356 kmem_cache_free(buf_cache, buf); 2357 } 2358 2359 if (arc_eviction_list != NULL) 2360 goto restart; 2361} 2362 2363/* 2364 * Flush all *evictable* data from the cache for the given spa. 2365 * NOTE: this will not touch "active" (i.e. referenced) data. 2366 */ 2367void 2368arc_flush(spa_t *spa) 2369{ 2370 uint64_t guid = 0; 2371 2372 if (spa) 2373 guid = spa_load_guid(spa); 2374 2375 while (arc_mru->arcs_lsize[ARC_BUFC_DATA]) { 2376 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA); 2377 if (spa) 2378 break; 2379 } 2380 while (arc_mru->arcs_lsize[ARC_BUFC_METADATA]) { 2381 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA); 2382 if (spa) 2383 break; 2384 } 2385 while (arc_mfu->arcs_lsize[ARC_BUFC_DATA]) { 2386 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA); 2387 if (spa) 2388 break; 2389 } 2390 while (arc_mfu->arcs_lsize[ARC_BUFC_METADATA]) { 2391 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA); 2392 if (spa) 2393 break; 2394 } 2395 2396 arc_evict_ghost(arc_mru_ghost, guid, -1); 2397 arc_evict_ghost(arc_mfu_ghost, guid, -1); 2398 2399 mutex_enter(&arc_reclaim_thr_lock); 2400 arc_do_user_evicts(); 2401 mutex_exit(&arc_reclaim_thr_lock); 2402 ASSERT(spa || arc_eviction_list == NULL); 2403} 2404 2405void 2406arc_shrink(void) 2407{ 2408 if (arc_c > arc_c_min) { 2409 uint64_t to_free; 2410 2411#ifdef _KERNEL 2412 to_free = arc_c >> arc_shrink_shift; 2413#else 2414 to_free = arc_c >> arc_shrink_shift; 2415#endif 2416 if (arc_c > arc_c_min + to_free) 2417 atomic_add_64(&arc_c, -to_free); 2418 else 2419 arc_c = arc_c_min; 2420 2421 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 2422 if (arc_c > arc_size) 2423 arc_c = MAX(arc_size, arc_c_min); 2424 if (arc_p > arc_c) 2425 arc_p = (arc_c >> 1); 2426 ASSERT(arc_c >= arc_c_min); 2427 ASSERT((int64_t)arc_p >= 0); 2428 } 2429 2430 if (arc_size > arc_c) 2431 arc_adjust(); 2432} 2433 2434static int needfree = 0; 2435 2436static int 2437arc_reclaim_needed(void) 2438{ 2439 2440#ifdef _KERNEL 2441 2442 if (needfree) 2443 return (1); 2444 2445 /* 2446 * Cooperate with pagedaemon when it's time for it to scan 2447 * and reclaim some pages. 2448 */ 2449 if (vm_paging_needed()) 2450 return (1); 2451 2452#ifdef sun 2453 /* 2454 * take 'desfree' extra pages, so we reclaim sooner, rather than later 2455 */ 2456 extra = desfree; 2457 2458 /* 2459 * check that we're out of range of the pageout scanner. It starts to 2460 * schedule paging if freemem is less than lotsfree and needfree. 2461 * lotsfree is the high-water mark for pageout, and needfree is the 2462 * number of needed free pages. We add extra pages here to make sure 2463 * the scanner doesn't start up while we're freeing memory. 2464 */ 2465 if (freemem < lotsfree + needfree + extra) 2466 return (1); 2467 2468 /* 2469 * check to make sure that swapfs has enough space so that anon 2470 * reservations can still succeed. anon_resvmem() checks that the 2471 * availrmem is greater than swapfs_minfree, and the number of reserved 2472 * swap pages. We also add a bit of extra here just to prevent 2473 * circumstances from getting really dire. 2474 */ 2475 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 2476 return (1); 2477 2478#if defined(__i386) 2479 /* 2480 * If we're on an i386 platform, it's possible that we'll exhaust the 2481 * kernel heap space before we ever run out of available physical 2482 * memory. Most checks of the size of the heap_area compare against 2483 * tune.t_minarmem, which is the minimum available real memory that we 2484 * can have in the system. However, this is generally fixed at 25 pages 2485 * which is so low that it's useless. In this comparison, we seek to 2486 * calculate the total heap-size, and reclaim if more than 3/4ths of the 2487 * heap is allocated. (Or, in the calculation, if less than 1/4th is 2488 * free) 2489 */ 2490 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 2491 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 2492 return (1); 2493#endif 2494#else /* !sun */ 2495 if (kmem_used() > (kmem_size() * 3) / 4) 2496 return (1); 2497#endif /* sun */ 2498 2499#else 2500 if (spa_get_random(100) == 0) 2501 return (1); 2502#endif 2503 return (0); 2504} 2505 2506extern kmem_cache_t *zio_buf_cache[]; 2507extern kmem_cache_t *zio_data_buf_cache[]; 2508 2509static void 2510arc_kmem_reap_now(arc_reclaim_strategy_t strat) 2511{ 2512 size_t i; 2513 kmem_cache_t *prev_cache = NULL; 2514 kmem_cache_t *prev_data_cache = NULL; 2515 2516#ifdef _KERNEL 2517 if (arc_meta_used >= arc_meta_limit) { 2518 /* 2519 * We are exceeding our meta-data cache limit. 2520 * Purge some DNLC entries to release holds on meta-data. 2521 */ 2522 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 2523 } 2524#if defined(__i386) 2525 /* 2526 * Reclaim unused memory from all kmem caches. 2527 */ 2528 kmem_reap(); 2529#endif 2530#endif 2531 2532 /* 2533 * An aggressive reclamation will shrink the cache size as well as 2534 * reap free buffers from the arc kmem caches. 2535 */ 2536 if (strat == ARC_RECLAIM_AGGR) 2537 arc_shrink(); 2538 2539 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 2540 if (zio_buf_cache[i] != prev_cache) { 2541 prev_cache = zio_buf_cache[i]; 2542 kmem_cache_reap_now(zio_buf_cache[i]); 2543 } 2544 if (zio_data_buf_cache[i] != prev_data_cache) { 2545 prev_data_cache = zio_data_buf_cache[i]; 2546 kmem_cache_reap_now(zio_data_buf_cache[i]); 2547 } 2548 } 2549 kmem_cache_reap_now(buf_cache); 2550 kmem_cache_reap_now(hdr_cache); 2551} 2552 2553static void 2554arc_reclaim_thread(void *dummy __unused) 2555{ 2556 clock_t growtime = 0; 2557 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 2558 callb_cpr_t cpr; 2559 2560 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 2561 2562 mutex_enter(&arc_reclaim_thr_lock); 2563 while (arc_thread_exit == 0) { 2564 if (arc_reclaim_needed()) { 2565 2566 if (arc_no_grow) { 2567 if (last_reclaim == ARC_RECLAIM_CONS) { 2568 last_reclaim = ARC_RECLAIM_AGGR; 2569 } else { 2570 last_reclaim = ARC_RECLAIM_CONS; 2571 } 2572 } else { 2573 arc_no_grow = TRUE; 2574 last_reclaim = ARC_RECLAIM_AGGR; 2575 membar_producer(); 2576 } 2577 2578 /* reset the growth delay for every reclaim */ 2579 growtime = ddi_get_lbolt() + (arc_grow_retry * hz); 2580 2581 if (needfree && last_reclaim == ARC_RECLAIM_CONS) { 2582 /* 2583 * If needfree is TRUE our vm_lowmem hook 2584 * was called and in that case we must free some 2585 * memory, so switch to aggressive mode. 2586 */ 2587 arc_no_grow = TRUE; 2588 last_reclaim = ARC_RECLAIM_AGGR; 2589 } 2590 arc_kmem_reap_now(last_reclaim); 2591 arc_warm = B_TRUE; 2592 2593 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) { 2594 arc_no_grow = FALSE; 2595 } 2596 2597 arc_adjust(); 2598 2599 if (arc_eviction_list != NULL) 2600 arc_do_user_evicts(); 2601 2602#ifdef _KERNEL 2603 if (needfree) { 2604 needfree = 0; 2605 wakeup(&needfree); 2606 } 2607#endif 2608 2609 /* block until needed, or one second, whichever is shorter */ 2610 CALLB_CPR_SAFE_BEGIN(&cpr); 2611 (void) cv_timedwait(&arc_reclaim_thr_cv, 2612 &arc_reclaim_thr_lock, hz); 2613 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 2614 } 2615 2616 arc_thread_exit = 0; 2617 cv_broadcast(&arc_reclaim_thr_cv); 2618 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 2619 thread_exit(); 2620} 2621 2622/* 2623 * Adapt arc info given the number of bytes we are trying to add and 2624 * the state that we are comming from. This function is only called 2625 * when we are adding new content to the cache. 2626 */ 2627static void 2628arc_adapt(int bytes, arc_state_t *state) 2629{ 2630 int mult; 2631 uint64_t arc_p_min = (arc_c >> arc_p_min_shift); 2632 2633 if (state == arc_l2c_only) 2634 return; 2635 2636 ASSERT(bytes > 0); 2637 /* 2638 * Adapt the target size of the MRU list: 2639 * - if we just hit in the MRU ghost list, then increase 2640 * the target size of the MRU list. 2641 * - if we just hit in the MFU ghost list, then increase 2642 * the target size of the MFU list by decreasing the 2643 * target size of the MRU list. 2644 */ 2645 if (state == arc_mru_ghost) { 2646 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 2647 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 2648 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ 2649 2650 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); 2651 } else if (state == arc_mfu_ghost) { 2652 uint64_t delta; 2653 2654 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 2655 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 2656 mult = MIN(mult, 10); 2657 2658 delta = MIN(bytes * mult, arc_p); 2659 arc_p = MAX(arc_p_min, arc_p - delta); 2660 } 2661 ASSERT((int64_t)arc_p >= 0); 2662 2663 if (arc_reclaim_needed()) { 2664 cv_signal(&arc_reclaim_thr_cv); 2665 return; 2666 } 2667 2668 if (arc_no_grow) 2669 return; 2670 2671 if (arc_c >= arc_c_max) 2672 return; 2673 2674 /* 2675 * If we're within (2 * maxblocksize) bytes of the target 2676 * cache size, increment the target cache size 2677 */ 2678 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 2679 atomic_add_64(&arc_c, (int64_t)bytes); 2680 if (arc_c > arc_c_max) 2681 arc_c = arc_c_max; 2682 else if (state == arc_anon) 2683 atomic_add_64(&arc_p, (int64_t)bytes); 2684 if (arc_p > arc_c) 2685 arc_p = arc_c; 2686 } 2687 ASSERT((int64_t)arc_p >= 0); 2688} 2689 2690/* 2691 * Check if the cache has reached its limits and eviction is required 2692 * prior to insert. 2693 */ 2694static int 2695arc_evict_needed(arc_buf_contents_t type) 2696{ 2697 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 2698 return (1); 2699 2700#ifdef sun 2701#ifdef _KERNEL 2702 /* 2703 * If zio data pages are being allocated out of a separate heap segment, 2704 * then enforce that the size of available vmem for this area remains 2705 * above about 1/32nd free. 2706 */ 2707 if (type == ARC_BUFC_DATA && zio_arena != NULL && 2708 vmem_size(zio_arena, VMEM_FREE) < 2709 (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 2710 return (1); 2711#endif 2712#endif /* sun */ 2713 2714 if (arc_reclaim_needed()) 2715 return (1); 2716 2717 return (arc_size > arc_c); 2718} 2719 2720/* 2721 * The buffer, supplied as the first argument, needs a data block. 2722 * So, if we are at cache max, determine which cache should be victimized. 2723 * We have the following cases: 2724 * 2725 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2726 * In this situation if we're out of space, but the resident size of the MFU is 2727 * under the limit, victimize the MFU cache to satisfy this insertion request. 2728 * 2729 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2730 * Here, we've used up all of the available space for the MRU, so we need to 2731 * evict from our own cache instead. Evict from the set of resident MRU 2732 * entries. 2733 * 2734 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2735 * c minus p represents the MFU space in the cache, since p is the size of the 2736 * cache that is dedicated to the MRU. In this situation there's still space on 2737 * the MFU side, so the MRU side needs to be victimized. 2738 * 2739 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2740 * MFU's resident set is consuming more space than it has been allotted. In 2741 * this situation, we must victimize our own cache, the MFU, for this insertion. 2742 */ 2743static void 2744arc_get_data_buf(arc_buf_t *buf) 2745{ 2746 arc_state_t *state = buf->b_hdr->b_state; 2747 uint64_t size = buf->b_hdr->b_size; 2748 arc_buf_contents_t type = buf->b_hdr->b_type; 2749 2750 arc_adapt(size, state); 2751 2752 /* 2753 * We have not yet reached cache maximum size, 2754 * just allocate a new buffer. 2755 */ 2756 if (!arc_evict_needed(type)) { 2757 if (type == ARC_BUFC_METADATA) { 2758 buf->b_data = zio_buf_alloc(size); 2759 arc_space_consume(size, ARC_SPACE_DATA); 2760 } else { 2761 ASSERT(type == ARC_BUFC_DATA); 2762 buf->b_data = zio_data_buf_alloc(size); 2763 ARCSTAT_INCR(arcstat_data_size, size); 2764 atomic_add_64(&arc_size, size); 2765 } 2766 goto out; 2767 } 2768 2769 /* 2770 * If we are prefetching from the mfu ghost list, this buffer 2771 * will end up on the mru list; so steal space from there. 2772 */ 2773 if (state == arc_mfu_ghost) 2774 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2775 else if (state == arc_mru_ghost) 2776 state = arc_mru; 2777 2778 if (state == arc_mru || state == arc_anon) { 2779 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2780 state = (arc_mfu->arcs_lsize[type] >= size && 2781 arc_p > mru_used) ? arc_mfu : arc_mru; 2782 } else { 2783 /* MFU cases */ 2784 uint64_t mfu_space = arc_c - arc_p; 2785 state = (arc_mru->arcs_lsize[type] >= size && 2786 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2787 } 2788 if ((buf->b_data = arc_evict(state, 0, size, TRUE, type)) == NULL) { 2789 if (type == ARC_BUFC_METADATA) { 2790 buf->b_data = zio_buf_alloc(size); 2791 arc_space_consume(size, ARC_SPACE_DATA); 2792 } else { 2793 ASSERT(type == ARC_BUFC_DATA); 2794 buf->b_data = zio_data_buf_alloc(size); 2795 ARCSTAT_INCR(arcstat_data_size, size); 2796 atomic_add_64(&arc_size, size); 2797 } 2798 ARCSTAT_BUMP(arcstat_recycle_miss); 2799 } 2800 ASSERT(buf->b_data != NULL); 2801out: 2802 /* 2803 * Update the state size. Note that ghost states have a 2804 * "ghost size" and so don't need to be updated. 2805 */ 2806 if (!GHOST_STATE(buf->b_hdr->b_state)) { 2807 arc_buf_hdr_t *hdr = buf->b_hdr; 2808 2809 atomic_add_64(&hdr->b_state->arcs_size, size); 2810 if (list_link_active(&hdr->b_arc_node)) { 2811 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2812 atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2813 } 2814 /* 2815 * If we are growing the cache, and we are adding anonymous 2816 * data, and we have outgrown arc_p, update arc_p 2817 */ 2818 if (arc_size < arc_c && hdr->b_state == arc_anon && 2819 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2820 arc_p = MIN(arc_c, arc_p + size); 2821 } 2822 ARCSTAT_BUMP(arcstat_allocated); 2823} 2824 2825/* 2826 * This routine is called whenever a buffer is accessed. 2827 * NOTE: the hash lock is dropped in this function. 2828 */ 2829static void 2830arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2831{ 2832 clock_t now; 2833 2834 ASSERT(MUTEX_HELD(hash_lock)); 2835 2836 if (buf->b_state == arc_anon) { 2837 /* 2838 * This buffer is not in the cache, and does not 2839 * appear in our "ghost" list. Add the new buffer 2840 * to the MRU state. 2841 */ 2842 2843 ASSERT(buf->b_arc_access == 0); 2844 buf->b_arc_access = ddi_get_lbolt(); 2845 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2846 arc_change_state(arc_mru, buf, hash_lock); 2847 2848 } else if (buf->b_state == arc_mru) { 2849 now = ddi_get_lbolt(); 2850 2851 /* 2852 * If this buffer is here because of a prefetch, then either: 2853 * - clear the flag if this is a "referencing" read 2854 * (any subsequent access will bump this into the MFU state). 2855 * or 2856 * - move the buffer to the head of the list if this is 2857 * another prefetch (to make it less likely to be evicted). 2858 */ 2859 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2860 if (refcount_count(&buf->b_refcnt) == 0) { 2861 ASSERT(list_link_active(&buf->b_arc_node)); 2862 } else { 2863 buf->b_flags &= ~ARC_PREFETCH; 2864 ARCSTAT_BUMP(arcstat_mru_hits); 2865 } 2866 buf->b_arc_access = now; 2867 return; 2868 } 2869 2870 /* 2871 * This buffer has been "accessed" only once so far, 2872 * but it is still in the cache. Move it to the MFU 2873 * state. 2874 */ 2875 if (now > buf->b_arc_access + ARC_MINTIME) { 2876 /* 2877 * More than 125ms have passed since we 2878 * instantiated this buffer. Move it to the 2879 * most frequently used state. 2880 */ 2881 buf->b_arc_access = now; 2882 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2883 arc_change_state(arc_mfu, buf, hash_lock); 2884 } 2885 ARCSTAT_BUMP(arcstat_mru_hits); 2886 } else if (buf->b_state == arc_mru_ghost) { 2887 arc_state_t *new_state; 2888 /* 2889 * This buffer has been "accessed" recently, but 2890 * was evicted from the cache. Move it to the 2891 * MFU state. 2892 */ 2893 2894 if (buf->b_flags & ARC_PREFETCH) { 2895 new_state = arc_mru; 2896 if (refcount_count(&buf->b_refcnt) > 0) 2897 buf->b_flags &= ~ARC_PREFETCH; 2898 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2899 } else { 2900 new_state = arc_mfu; 2901 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2902 } 2903 2904 buf->b_arc_access = ddi_get_lbolt(); 2905 arc_change_state(new_state, buf, hash_lock); 2906 2907 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2908 } else if (buf->b_state == arc_mfu) { 2909 /* 2910 * This buffer has been accessed more than once and is 2911 * still in the cache. Keep it in the MFU state. 2912 * 2913 * NOTE: an add_reference() that occurred when we did 2914 * the arc_read() will have kicked this off the list. 2915 * If it was a prefetch, we will explicitly move it to 2916 * the head of the list now. 2917 */ 2918 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2919 ASSERT(refcount_count(&buf->b_refcnt) == 0); 2920 ASSERT(list_link_active(&buf->b_arc_node)); 2921 } 2922 ARCSTAT_BUMP(arcstat_mfu_hits); 2923 buf->b_arc_access = ddi_get_lbolt(); 2924 } else if (buf->b_state == arc_mfu_ghost) { 2925 arc_state_t *new_state = arc_mfu; 2926 /* 2927 * This buffer has been accessed more than once but has 2928 * been evicted from the cache. Move it back to the 2929 * MFU state. 2930 */ 2931 2932 if (buf->b_flags & ARC_PREFETCH) { 2933 /* 2934 * This is a prefetch access... 2935 * move this block back to the MRU state. 2936 */ 2937 ASSERT0(refcount_count(&buf->b_refcnt)); 2938 new_state = arc_mru; 2939 } 2940 2941 buf->b_arc_access = ddi_get_lbolt(); 2942 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2943 arc_change_state(new_state, buf, hash_lock); 2944 2945 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2946 } else if (buf->b_state == arc_l2c_only) { 2947 /* 2948 * This buffer is on the 2nd Level ARC. 2949 */ 2950 2951 buf->b_arc_access = ddi_get_lbolt(); 2952 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2953 arc_change_state(arc_mfu, buf, hash_lock); 2954 } else { 2955 ASSERT(!"invalid arc state"); 2956 } 2957} 2958 2959/* a generic arc_done_func_t which you can use */ 2960/* ARGSUSED */ 2961void 2962arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2963{ 2964 if (zio == NULL || zio->io_error == 0) 2965 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2966 VERIFY(arc_buf_remove_ref(buf, arg)); 2967} 2968 2969/* a generic arc_done_func_t */ 2970void 2971arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2972{ 2973 arc_buf_t **bufp = arg; 2974 if (zio && zio->io_error) { 2975 VERIFY(arc_buf_remove_ref(buf, arg)); 2976 *bufp = NULL; 2977 } else { 2978 *bufp = buf; 2979 ASSERT(buf->b_data); 2980 } 2981} 2982 2983static void 2984arc_read_done(zio_t *zio) 2985{ 2986 arc_buf_hdr_t *hdr, *found; 2987 arc_buf_t *buf; 2988 arc_buf_t *abuf; /* buffer we're assigning to callback */ 2989 kmutex_t *hash_lock; 2990 arc_callback_t *callback_list, *acb; 2991 int freeable = FALSE; 2992 2993 buf = zio->io_private; 2994 hdr = buf->b_hdr; 2995 2996 /* 2997 * The hdr was inserted into hash-table and removed from lists 2998 * prior to starting I/O. We should find this header, since 2999 * it's in the hash table, and it should be legit since it's 3000 * not possible to evict it during the I/O. The only possible 3001 * reason for it not to be found is if we were freed during the 3002 * read. 3003 */ 3004 found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth, 3005 &hash_lock); 3006 3007 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 3008 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 3009 (found == hdr && HDR_L2_READING(hdr))); 3010 3011 hdr->b_flags &= ~ARC_L2_EVICTED; 3012 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 3013 hdr->b_flags &= ~ARC_L2CACHE; 3014 3015 /* byteswap if necessary */ 3016 callback_list = hdr->b_acb; 3017 ASSERT(callback_list != NULL); 3018 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) { 3019 dmu_object_byteswap_t bswap = 3020 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp)); 3021 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? 3022 byteswap_uint64_array : 3023 dmu_ot_byteswap[bswap].ob_func; 3024 func(buf->b_data, hdr->b_size); 3025 } 3026 3027 arc_cksum_compute(buf, B_FALSE); 3028#ifdef illumos 3029 arc_buf_watch(buf); 3030#endif /* illumos */ 3031 3032 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) { 3033 /* 3034 * Only call arc_access on anonymous buffers. This is because 3035 * if we've issued an I/O for an evicted buffer, we've already 3036 * called arc_access (to prevent any simultaneous readers from 3037 * getting confused). 3038 */ 3039 arc_access(hdr, hash_lock); 3040 } 3041 3042 /* create copies of the data buffer for the callers */ 3043 abuf = buf; 3044 for (acb = callback_list; acb; acb = acb->acb_next) { 3045 if (acb->acb_done) { 3046 if (abuf == NULL) { 3047 ARCSTAT_BUMP(arcstat_duplicate_reads); 3048 abuf = arc_buf_clone(buf); 3049 } 3050 acb->acb_buf = abuf; 3051 abuf = NULL; 3052 } 3053 } 3054 hdr->b_acb = NULL; 3055 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3056 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 3057 if (abuf == buf) { 3058 ASSERT(buf->b_efunc == NULL); 3059 ASSERT(hdr->b_datacnt == 1); 3060 hdr->b_flags |= ARC_BUF_AVAILABLE; 3061 } 3062 3063 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 3064 3065 if (zio->io_error != 0) { 3066 hdr->b_flags |= ARC_IO_ERROR; 3067 if (hdr->b_state != arc_anon) 3068 arc_change_state(arc_anon, hdr, hash_lock); 3069 if (HDR_IN_HASH_TABLE(hdr)) 3070 buf_hash_remove(hdr); 3071 freeable = refcount_is_zero(&hdr->b_refcnt); 3072 } 3073 3074 /* 3075 * Broadcast before we drop the hash_lock to avoid the possibility 3076 * that the hdr (and hence the cv) might be freed before we get to 3077 * the cv_broadcast(). 3078 */ 3079 cv_broadcast(&hdr->b_cv); 3080 3081 if (hash_lock) { 3082 mutex_exit(hash_lock); 3083 } else { 3084 /* 3085 * This block was freed while we waited for the read to 3086 * complete. It has been removed from the hash table and 3087 * moved to the anonymous state (so that it won't show up 3088 * in the cache). 3089 */ 3090 ASSERT3P(hdr->b_state, ==, arc_anon); 3091 freeable = refcount_is_zero(&hdr->b_refcnt); 3092 } 3093 3094 /* execute each callback and free its structure */ 3095 while ((acb = callback_list) != NULL) { 3096 if (acb->acb_done) 3097 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 3098 3099 if (acb->acb_zio_dummy != NULL) { 3100 acb->acb_zio_dummy->io_error = zio->io_error; 3101 zio_nowait(acb->acb_zio_dummy); 3102 } 3103 3104 callback_list = acb->acb_next; 3105 kmem_free(acb, sizeof (arc_callback_t)); 3106 } 3107 3108 if (freeable) 3109 arc_hdr_destroy(hdr); 3110} 3111 3112/* 3113 * "Read" the block block at the specified DVA (in bp) via the 3114 * cache. If the block is found in the cache, invoke the provided 3115 * callback immediately and return. Note that the `zio' parameter 3116 * in the callback will be NULL in this case, since no IO was 3117 * required. If the block is not in the cache pass the read request 3118 * on to the spa with a substitute callback function, so that the 3119 * requested block will be added to the cache. 3120 * 3121 * If a read request arrives for a block that has a read in-progress, 3122 * either wait for the in-progress read to complete (and return the 3123 * results); or, if this is a read with a "done" func, add a record 3124 * to the read to invoke the "done" func when the read completes, 3125 * and return; or just return. 3126 * 3127 * arc_read_done() will invoke all the requested "done" functions 3128 * for readers of this block. 3129 */ 3130int 3131arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done, 3132 void *private, zio_priority_t priority, int zio_flags, uint32_t *arc_flags, 3133 const zbookmark_t *zb) 3134{ 3135 arc_buf_hdr_t *hdr; 3136 arc_buf_t *buf = NULL; 3137 kmutex_t *hash_lock; 3138 zio_t *rzio; 3139 uint64_t guid = spa_load_guid(spa); 3140 3141top: 3142 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp), 3143 &hash_lock); 3144 if (hdr && hdr->b_datacnt > 0) { 3145 3146 *arc_flags |= ARC_CACHED; 3147 3148 if (HDR_IO_IN_PROGRESS(hdr)) { 3149 3150 if (*arc_flags & ARC_WAIT) { 3151 cv_wait(&hdr->b_cv, hash_lock); 3152 mutex_exit(hash_lock); 3153 goto top; 3154 } 3155 ASSERT(*arc_flags & ARC_NOWAIT); 3156 3157 if (done) { 3158 arc_callback_t *acb = NULL; 3159 3160 acb = kmem_zalloc(sizeof (arc_callback_t), 3161 KM_SLEEP); 3162 acb->acb_done = done; 3163 acb->acb_private = private; 3164 if (pio != NULL) 3165 acb->acb_zio_dummy = zio_null(pio, 3166 spa, NULL, NULL, NULL, zio_flags); 3167 3168 ASSERT(acb->acb_done != NULL); 3169 acb->acb_next = hdr->b_acb; 3170 hdr->b_acb = acb; 3171 add_reference(hdr, hash_lock, private); 3172 mutex_exit(hash_lock); 3173 return (0); 3174 } 3175 mutex_exit(hash_lock); 3176 return (0); 3177 } 3178 3179 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 3180 3181 if (done) { 3182 add_reference(hdr, hash_lock, private); 3183 /* 3184 * If this block is already in use, create a new 3185 * copy of the data so that we will be guaranteed 3186 * that arc_release() will always succeed. 3187 */ 3188 buf = hdr->b_buf; 3189 ASSERT(buf); 3190 ASSERT(buf->b_data); 3191 if (HDR_BUF_AVAILABLE(hdr)) { 3192 ASSERT(buf->b_efunc == NULL); 3193 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 3194 } else { 3195 buf = arc_buf_clone(buf); 3196 } 3197 3198 } else if (*arc_flags & ARC_PREFETCH && 3199 refcount_count(&hdr->b_refcnt) == 0) { 3200 hdr->b_flags |= ARC_PREFETCH; 3201 } 3202 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 3203 arc_access(hdr, hash_lock); 3204 if (*arc_flags & ARC_L2CACHE) 3205 hdr->b_flags |= ARC_L2CACHE; 3206 if (*arc_flags & ARC_L2COMPRESS) 3207 hdr->b_flags |= ARC_L2COMPRESS; 3208 mutex_exit(hash_lock); 3209 ARCSTAT_BUMP(arcstat_hits); 3210 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 3211 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 3212 data, metadata, hits); 3213 3214 if (done) 3215 done(NULL, buf, private); 3216 } else { 3217 uint64_t size = BP_GET_LSIZE(bp); 3218 arc_callback_t *acb; 3219 vdev_t *vd = NULL; 3220 uint64_t addr = 0; 3221 boolean_t devw = B_FALSE; 3222 enum zio_compress b_compress = ZIO_COMPRESS_OFF; 3223 uint64_t b_asize = 0; 3224 3225 if (hdr == NULL) { 3226 /* this block is not in the cache */ 3227 arc_buf_hdr_t *exists; 3228 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 3229 buf = arc_buf_alloc(spa, size, private, type); 3230 hdr = buf->b_hdr; 3231 hdr->b_dva = *BP_IDENTITY(bp); 3232 hdr->b_birth = BP_PHYSICAL_BIRTH(bp); 3233 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 3234 exists = buf_hash_insert(hdr, &hash_lock); 3235 if (exists) { 3236 /* somebody beat us to the hash insert */ 3237 mutex_exit(hash_lock); 3238 buf_discard_identity(hdr); 3239 (void) arc_buf_remove_ref(buf, private); 3240 goto top; /* restart the IO request */ 3241 } 3242 /* if this is a prefetch, we don't have a reference */ 3243 if (*arc_flags & ARC_PREFETCH) { 3244 (void) remove_reference(hdr, hash_lock, 3245 private); 3246 hdr->b_flags |= ARC_PREFETCH; 3247 } 3248 if (*arc_flags & ARC_L2CACHE) 3249 hdr->b_flags |= ARC_L2CACHE; 3250 if (*arc_flags & ARC_L2COMPRESS) 3251 hdr->b_flags |= ARC_L2COMPRESS; 3252 if (BP_GET_LEVEL(bp) > 0) 3253 hdr->b_flags |= ARC_INDIRECT; 3254 } else { 3255 /* this block is in the ghost cache */ 3256 ASSERT(GHOST_STATE(hdr->b_state)); 3257 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3258 ASSERT0(refcount_count(&hdr->b_refcnt)); 3259 ASSERT(hdr->b_buf == NULL); 3260 3261 /* if this is a prefetch, we don't have a reference */ 3262 if (*arc_flags & ARC_PREFETCH) 3263 hdr->b_flags |= ARC_PREFETCH; 3264 else 3265 add_reference(hdr, hash_lock, private); 3266 if (*arc_flags & ARC_L2CACHE) 3267 hdr->b_flags |= ARC_L2CACHE; 3268 if (*arc_flags & ARC_L2COMPRESS) 3269 hdr->b_flags |= ARC_L2COMPRESS; 3270 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 3271 buf->b_hdr = hdr; 3272 buf->b_data = NULL; 3273 buf->b_efunc = NULL; 3274 buf->b_private = NULL; 3275 buf->b_next = NULL; 3276 hdr->b_buf = buf; 3277 ASSERT(hdr->b_datacnt == 0); 3278 hdr->b_datacnt = 1; 3279 arc_get_data_buf(buf); 3280 arc_access(hdr, hash_lock); 3281 } 3282 3283 ASSERT(!GHOST_STATE(hdr->b_state)); 3284 3285 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 3286 acb->acb_done = done; 3287 acb->acb_private = private; 3288 3289 ASSERT(hdr->b_acb == NULL); 3290 hdr->b_acb = acb; 3291 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3292 3293 if (hdr->b_l2hdr != NULL && 3294 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { 3295 devw = hdr->b_l2hdr->b_dev->l2ad_writing; 3296 addr = hdr->b_l2hdr->b_daddr; 3297 b_compress = hdr->b_l2hdr->b_compress; 3298 b_asize = hdr->b_l2hdr->b_asize; 3299 /* 3300 * Lock out device removal. 3301 */ 3302 if (vdev_is_dead(vd) || 3303 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 3304 vd = NULL; 3305 } 3306 3307 mutex_exit(hash_lock); 3308 3309 /* 3310 * At this point, we have a level 1 cache miss. Try again in 3311 * L2ARC if possible. 3312 */ 3313 ASSERT3U(hdr->b_size, ==, size); 3314 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, 3315 uint64_t, size, zbookmark_t *, zb); 3316 ARCSTAT_BUMP(arcstat_misses); 3317 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 3318 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 3319 data, metadata, misses); 3320#ifdef _KERNEL 3321 curthread->td_ru.ru_inblock++; 3322#endif 3323 3324 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { 3325 /* 3326 * Read from the L2ARC if the following are true: 3327 * 1. The L2ARC vdev was previously cached. 3328 * 2. This buffer still has L2ARC metadata. 3329 * 3. This buffer isn't currently writing to the L2ARC. 3330 * 4. The L2ARC entry wasn't evicted, which may 3331 * also have invalidated the vdev. 3332 * 5. This isn't prefetch and l2arc_noprefetch is set. 3333 */ 3334 if (hdr->b_l2hdr != NULL && 3335 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && 3336 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { 3337 l2arc_read_callback_t *cb; 3338 3339 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 3340 ARCSTAT_BUMP(arcstat_l2_hits); 3341 3342 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 3343 KM_SLEEP); 3344 cb->l2rcb_buf = buf; 3345 cb->l2rcb_spa = spa; 3346 cb->l2rcb_bp = *bp; 3347 cb->l2rcb_zb = *zb; 3348 cb->l2rcb_flags = zio_flags; 3349 cb->l2rcb_compress = b_compress; 3350 3351 ASSERT(addr >= VDEV_LABEL_START_SIZE && 3352 addr + size < vd->vdev_psize - 3353 VDEV_LABEL_END_SIZE); 3354 3355 /* 3356 * l2arc read. The SCL_L2ARC lock will be 3357 * released by l2arc_read_done(). 3358 * Issue a null zio if the underlying buffer 3359 * was squashed to zero size by compression. 3360 */ 3361 if (b_compress == ZIO_COMPRESS_EMPTY) { 3362 rzio = zio_null(pio, spa, vd, 3363 l2arc_read_done, cb, 3364 zio_flags | ZIO_FLAG_DONT_CACHE | 3365 ZIO_FLAG_CANFAIL | 3366 ZIO_FLAG_DONT_PROPAGATE | 3367 ZIO_FLAG_DONT_RETRY); 3368 } else { 3369 rzio = zio_read_phys(pio, vd, addr, 3370 b_asize, buf->b_data, 3371 ZIO_CHECKSUM_OFF, 3372 l2arc_read_done, cb, priority, 3373 zio_flags | ZIO_FLAG_DONT_CACHE | 3374 ZIO_FLAG_CANFAIL | 3375 ZIO_FLAG_DONT_PROPAGATE | 3376 ZIO_FLAG_DONT_RETRY, B_FALSE); 3377 } 3378 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 3379 zio_t *, rzio); 3380 ARCSTAT_INCR(arcstat_l2_read_bytes, b_asize); 3381 3382 if (*arc_flags & ARC_NOWAIT) { 3383 zio_nowait(rzio); 3384 return (0); 3385 } 3386 3387 ASSERT(*arc_flags & ARC_WAIT); 3388 if (zio_wait(rzio) == 0) 3389 return (0); 3390 3391 /* l2arc read error; goto zio_read() */ 3392 } else { 3393 DTRACE_PROBE1(l2arc__miss, 3394 arc_buf_hdr_t *, hdr); 3395 ARCSTAT_BUMP(arcstat_l2_misses); 3396 if (HDR_L2_WRITING(hdr)) 3397 ARCSTAT_BUMP(arcstat_l2_rw_clash); 3398 spa_config_exit(spa, SCL_L2ARC, vd); 3399 } 3400 } else { 3401 if (vd != NULL) 3402 spa_config_exit(spa, SCL_L2ARC, vd); 3403 if (l2arc_ndev != 0) { 3404 DTRACE_PROBE1(l2arc__miss, 3405 arc_buf_hdr_t *, hdr); 3406 ARCSTAT_BUMP(arcstat_l2_misses); 3407 } 3408 } 3409 3410 rzio = zio_read(pio, spa, bp, buf->b_data, size, 3411 arc_read_done, buf, priority, zio_flags, zb); 3412 3413 if (*arc_flags & ARC_WAIT) 3414 return (zio_wait(rzio)); 3415 3416 ASSERT(*arc_flags & ARC_NOWAIT); 3417 zio_nowait(rzio); 3418 } 3419 return (0); 3420} 3421 3422void 3423arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 3424{ 3425 ASSERT(buf->b_hdr != NULL); 3426 ASSERT(buf->b_hdr->b_state != arc_anon); 3427 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 3428 ASSERT(buf->b_efunc == NULL); 3429 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr)); 3430 3431 buf->b_efunc = func; 3432 buf->b_private = private; 3433} 3434 3435/* 3436 * Notify the arc that a block was freed, and thus will never be used again. 3437 */ 3438void 3439arc_freed(spa_t *spa, const blkptr_t *bp) 3440{ 3441 arc_buf_hdr_t *hdr; 3442 kmutex_t *hash_lock; 3443 uint64_t guid = spa_load_guid(spa); 3444 3445 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp), 3446 &hash_lock); 3447 if (hdr == NULL) 3448 return; 3449 if (HDR_BUF_AVAILABLE(hdr)) { 3450 arc_buf_t *buf = hdr->b_buf; 3451 add_reference(hdr, hash_lock, FTAG); 3452 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 3453 mutex_exit(hash_lock); 3454 3455 arc_release(buf, FTAG); 3456 (void) arc_buf_remove_ref(buf, FTAG); 3457 } else { 3458 mutex_exit(hash_lock); 3459 } 3460 3461} 3462 3463/* 3464 * This is used by the DMU to let the ARC know that a buffer is 3465 * being evicted, so the ARC should clean up. If this arc buf 3466 * is not yet in the evicted state, it will be put there. 3467 */ 3468int 3469arc_buf_evict(arc_buf_t *buf) 3470{ 3471 arc_buf_hdr_t *hdr; 3472 kmutex_t *hash_lock; 3473 arc_buf_t **bufp; 3474 list_t *list, *evicted_list; 3475 kmutex_t *lock, *evicted_lock; 3476 3477 mutex_enter(&buf->b_evict_lock); 3478 hdr = buf->b_hdr; 3479 if (hdr == NULL) { 3480 /* 3481 * We are in arc_do_user_evicts(). 3482 */ 3483 ASSERT(buf->b_data == NULL); 3484 mutex_exit(&buf->b_evict_lock); 3485 return (0); 3486 } else if (buf->b_data == NULL) { 3487 arc_buf_t copy = *buf; /* structure assignment */ 3488 /* 3489 * We are on the eviction list; process this buffer now 3490 * but let arc_do_user_evicts() do the reaping. 3491 */ 3492 buf->b_efunc = NULL; 3493 mutex_exit(&buf->b_evict_lock); 3494 VERIFY(copy.b_efunc(©) == 0); 3495 return (1); 3496 } 3497 hash_lock = HDR_LOCK(hdr); 3498 mutex_enter(hash_lock); 3499 hdr = buf->b_hdr; 3500 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3501 3502 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 3503 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 3504 3505 /* 3506 * Pull this buffer off of the hdr 3507 */ 3508 bufp = &hdr->b_buf; 3509 while (*bufp != buf) 3510 bufp = &(*bufp)->b_next; 3511 *bufp = buf->b_next; 3512 3513 ASSERT(buf->b_data != NULL); 3514 arc_buf_destroy(buf, FALSE, FALSE); 3515 3516 if (hdr->b_datacnt == 0) { 3517 arc_state_t *old_state = hdr->b_state; 3518 arc_state_t *evicted_state; 3519 3520 ASSERT(hdr->b_buf == NULL); 3521 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 3522 3523 evicted_state = 3524 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 3525 3526 get_buf_info(hdr, old_state, &list, &lock); 3527 get_buf_info(hdr, evicted_state, &evicted_list, &evicted_lock); 3528 mutex_enter(lock); 3529 mutex_enter(evicted_lock); 3530 3531 arc_change_state(evicted_state, hdr, hash_lock); 3532 ASSERT(HDR_IN_HASH_TABLE(hdr)); 3533 hdr->b_flags |= ARC_IN_HASH_TABLE; 3534 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 3535 3536 mutex_exit(evicted_lock); 3537 mutex_exit(lock); 3538 } 3539 mutex_exit(hash_lock); 3540 mutex_exit(&buf->b_evict_lock); 3541 3542 VERIFY(buf->b_efunc(buf) == 0); 3543 buf->b_efunc = NULL; 3544 buf->b_private = NULL; 3545 buf->b_hdr = NULL; 3546 buf->b_next = NULL; 3547 kmem_cache_free(buf_cache, buf); 3548 return (1); 3549} 3550 3551/* 3552 * Release this buffer from the cache, making it an anonymous buffer. This 3553 * must be done after a read and prior to modifying the buffer contents. 3554 * If the buffer has more than one reference, we must make 3555 * a new hdr for the buffer. 3556 */ 3557void 3558arc_release(arc_buf_t *buf, void *tag) 3559{ 3560 arc_buf_hdr_t *hdr; 3561 kmutex_t *hash_lock = NULL; 3562 l2arc_buf_hdr_t *l2hdr; 3563 uint64_t buf_size; 3564 3565 /* 3566 * It would be nice to assert that if it's DMU metadata (level > 3567 * 0 || it's the dnode file), then it must be syncing context. 3568 * But we don't know that information at this level. 3569 */ 3570 3571 mutex_enter(&buf->b_evict_lock); 3572 hdr = buf->b_hdr; 3573 3574 /* this buffer is not on any list */ 3575 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 3576 3577 if (hdr->b_state == arc_anon) { 3578 /* this buffer is already released */ 3579 ASSERT(buf->b_efunc == NULL); 3580 } else { 3581 hash_lock = HDR_LOCK(hdr); 3582 mutex_enter(hash_lock); 3583 hdr = buf->b_hdr; 3584 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3585 } 3586 3587 l2hdr = hdr->b_l2hdr; 3588 if (l2hdr) { 3589 mutex_enter(&l2arc_buflist_mtx); 3590 hdr->b_l2hdr = NULL; 3591 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 3592 } 3593 buf_size = hdr->b_size; 3594 3595 /* 3596 * Do we have more than one buf? 3597 */ 3598 if (hdr->b_datacnt > 1) { 3599 arc_buf_hdr_t *nhdr; 3600 arc_buf_t **bufp; 3601 uint64_t blksz = hdr->b_size; 3602 uint64_t spa = hdr->b_spa; 3603 arc_buf_contents_t type = hdr->b_type; 3604 uint32_t flags = hdr->b_flags; 3605 3606 ASSERT(hdr->b_buf != buf || buf->b_next != NULL); 3607 /* 3608 * Pull the data off of this hdr and attach it to 3609 * a new anonymous hdr. 3610 */ 3611 (void) remove_reference(hdr, hash_lock, tag); 3612 bufp = &hdr->b_buf; 3613 while (*bufp != buf) 3614 bufp = &(*bufp)->b_next; 3615 *bufp = buf->b_next; 3616 buf->b_next = NULL; 3617 3618 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 3619 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 3620 if (refcount_is_zero(&hdr->b_refcnt)) { 3621 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 3622 ASSERT3U(*size, >=, hdr->b_size); 3623 atomic_add_64(size, -hdr->b_size); 3624 } 3625 3626 /* 3627 * We're releasing a duplicate user data buffer, update 3628 * our statistics accordingly. 3629 */ 3630 if (hdr->b_type == ARC_BUFC_DATA) { 3631 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers); 3632 ARCSTAT_INCR(arcstat_duplicate_buffers_size, 3633 -hdr->b_size); 3634 } 3635 hdr->b_datacnt -= 1; 3636 arc_cksum_verify(buf); 3637#ifdef illumos 3638 arc_buf_unwatch(buf); 3639#endif /* illumos */ 3640 3641 mutex_exit(hash_lock); 3642 3643 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 3644 nhdr->b_size = blksz; 3645 nhdr->b_spa = spa; 3646 nhdr->b_type = type; 3647 nhdr->b_buf = buf; 3648 nhdr->b_state = arc_anon; 3649 nhdr->b_arc_access = 0; 3650 nhdr->b_flags = flags & ARC_L2_WRITING; 3651 nhdr->b_l2hdr = NULL; 3652 nhdr->b_datacnt = 1; 3653 nhdr->b_freeze_cksum = NULL; 3654 (void) refcount_add(&nhdr->b_refcnt, tag); 3655 buf->b_hdr = nhdr; 3656 mutex_exit(&buf->b_evict_lock); 3657 atomic_add_64(&arc_anon->arcs_size, blksz); 3658 } else { 3659 mutex_exit(&buf->b_evict_lock); 3660 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 3661 ASSERT(!list_link_active(&hdr->b_arc_node)); 3662 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3663 if (hdr->b_state != arc_anon) 3664 arc_change_state(arc_anon, hdr, hash_lock); 3665 hdr->b_arc_access = 0; 3666 if (hash_lock) 3667 mutex_exit(hash_lock); 3668 3669 buf_discard_identity(hdr); 3670 arc_buf_thaw(buf); 3671 } 3672 buf->b_efunc = NULL; 3673 buf->b_private = NULL; 3674 3675 if (l2hdr) { 3676 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize); 3677 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr, 3678 hdr->b_size, 0); 3679 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 3680 ARCSTAT_INCR(arcstat_l2_size, -buf_size); 3681 mutex_exit(&l2arc_buflist_mtx); 3682 } 3683} 3684 3685int 3686arc_released(arc_buf_t *buf) 3687{ 3688 int released; 3689 3690 mutex_enter(&buf->b_evict_lock); 3691 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 3692 mutex_exit(&buf->b_evict_lock); 3693 return (released); 3694} 3695 3696int 3697arc_has_callback(arc_buf_t *buf) 3698{ 3699 int callback; 3700 3701 mutex_enter(&buf->b_evict_lock); 3702 callback = (buf->b_efunc != NULL); 3703 mutex_exit(&buf->b_evict_lock); 3704 return (callback); 3705} 3706 3707#ifdef ZFS_DEBUG 3708int 3709arc_referenced(arc_buf_t *buf) 3710{ 3711 int referenced; 3712 3713 mutex_enter(&buf->b_evict_lock); 3714 referenced = (refcount_count(&buf->b_hdr->b_refcnt)); 3715 mutex_exit(&buf->b_evict_lock); 3716 return (referenced); 3717} 3718#endif 3719 3720static void 3721arc_write_ready(zio_t *zio) 3722{ 3723 arc_write_callback_t *callback = zio->io_private; 3724 arc_buf_t *buf = callback->awcb_buf; 3725 arc_buf_hdr_t *hdr = buf->b_hdr; 3726 3727 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 3728 callback->awcb_ready(zio, buf, callback->awcb_private); 3729 3730 /* 3731 * If the IO is already in progress, then this is a re-write 3732 * attempt, so we need to thaw and re-compute the cksum. 3733 * It is the responsibility of the callback to handle the 3734 * accounting for any re-write attempt. 3735 */ 3736 if (HDR_IO_IN_PROGRESS(hdr)) { 3737 mutex_enter(&hdr->b_freeze_lock); 3738 if (hdr->b_freeze_cksum != NULL) { 3739 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 3740 hdr->b_freeze_cksum = NULL; 3741 } 3742 mutex_exit(&hdr->b_freeze_lock); 3743 } 3744 arc_cksum_compute(buf, B_FALSE); 3745 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3746} 3747 3748/* 3749 * The SPA calls this callback for each physical write that happens on behalf 3750 * of a logical write. See the comment in dbuf_write_physdone() for details. 3751 */ 3752static void 3753arc_write_physdone(zio_t *zio) 3754{ 3755 arc_write_callback_t *cb = zio->io_private; 3756 if (cb->awcb_physdone != NULL) 3757 cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private); 3758} 3759 3760static void 3761arc_write_done(zio_t *zio) 3762{ 3763 arc_write_callback_t *callback = zio->io_private; 3764 arc_buf_t *buf = callback->awcb_buf; 3765 arc_buf_hdr_t *hdr = buf->b_hdr; 3766 3767 ASSERT(hdr->b_acb == NULL); 3768 3769 if (zio->io_error == 0) { 3770 if (BP_IS_HOLE(zio->io_bp)) { 3771 buf_discard_identity(hdr); 3772 } else { 3773 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 3774 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); 3775 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 3776 } 3777 } else { 3778 ASSERT(BUF_EMPTY(hdr)); 3779 } 3780 3781 /* 3782 * If the block to be written was all-zero, we may have 3783 * compressed it away. In this case no write was performed 3784 * so there will be no dva/birth/checksum. The buffer must 3785 * therefore remain anonymous (and uncached). 3786 */ 3787 if (!BUF_EMPTY(hdr)) { 3788 arc_buf_hdr_t *exists; 3789 kmutex_t *hash_lock; 3790 3791 ASSERT(zio->io_error == 0); 3792 3793 arc_cksum_verify(buf); 3794 3795 exists = buf_hash_insert(hdr, &hash_lock); 3796 if (exists) { 3797 /* 3798 * This can only happen if we overwrite for 3799 * sync-to-convergence, because we remove 3800 * buffers from the hash table when we arc_free(). 3801 */ 3802 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 3803 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 3804 panic("bad overwrite, hdr=%p exists=%p", 3805 (void *)hdr, (void *)exists); 3806 ASSERT(refcount_is_zero(&exists->b_refcnt)); 3807 arc_change_state(arc_anon, exists, hash_lock); 3808 mutex_exit(hash_lock); 3809 arc_hdr_destroy(exists); 3810 exists = buf_hash_insert(hdr, &hash_lock); 3811 ASSERT3P(exists, ==, NULL); 3812 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) { 3813 /* nopwrite */ 3814 ASSERT(zio->io_prop.zp_nopwrite); 3815 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 3816 panic("bad nopwrite, hdr=%p exists=%p", 3817 (void *)hdr, (void *)exists); 3818 } else { 3819 /* Dedup */ 3820 ASSERT(hdr->b_datacnt == 1); 3821 ASSERT(hdr->b_state == arc_anon); 3822 ASSERT(BP_GET_DEDUP(zio->io_bp)); 3823 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 3824 } 3825 } 3826 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3827 /* if it's not anon, we are doing a scrub */ 3828 if (!exists && hdr->b_state == arc_anon) 3829 arc_access(hdr, hash_lock); 3830 mutex_exit(hash_lock); 3831 } else { 3832 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3833 } 3834 3835 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3836 callback->awcb_done(zio, buf, callback->awcb_private); 3837 3838 kmem_free(callback, sizeof (arc_write_callback_t)); 3839} 3840 3841zio_t * 3842arc_write(zio_t *pio, spa_t *spa, uint64_t txg, 3843 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress, 3844 const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *physdone, 3845 arc_done_func_t *done, void *private, zio_priority_t priority, 3846 int zio_flags, const zbookmark_t *zb) 3847{ 3848 arc_buf_hdr_t *hdr = buf->b_hdr; 3849 arc_write_callback_t *callback; 3850 zio_t *zio; 3851 3852 ASSERT(ready != NULL); 3853 ASSERT(done != NULL); 3854 ASSERT(!HDR_IO_ERROR(hdr)); 3855 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3856 ASSERT(hdr->b_acb == NULL); 3857 if (l2arc) 3858 hdr->b_flags |= ARC_L2CACHE; 3859 if (l2arc_compress) 3860 hdr->b_flags |= ARC_L2COMPRESS; 3861 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3862 callback->awcb_ready = ready; 3863 callback->awcb_physdone = physdone; 3864 callback->awcb_done = done; 3865 callback->awcb_private = private; 3866 callback->awcb_buf = buf; 3867 3868 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp, 3869 arc_write_ready, arc_write_physdone, arc_write_done, callback, 3870 priority, zio_flags, zb); 3871 3872 return (zio); 3873} 3874 3875static int 3876arc_memory_throttle(uint64_t reserve, uint64_t txg) 3877{ 3878#ifdef _KERNEL 3879 uint64_t available_memory = 3880 ptoa((uintmax_t)cnt.v_free_count + cnt.v_cache_count); 3881 static uint64_t page_load = 0; 3882 static uint64_t last_txg = 0; 3883 3884#ifdef sun 3885#if defined(__i386) 3886 available_memory = 3887 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3888#endif 3889#endif /* sun */ 3890 3891 if (cnt.v_free_count + cnt.v_cache_count > 3892 (uint64_t)physmem * arc_lotsfree_percent / 100) 3893 return (0); 3894 3895 if (txg > last_txg) { 3896 last_txg = txg; 3897 page_load = 0; 3898 } 3899 /* 3900 * If we are in pageout, we know that memory is already tight, 3901 * the arc is already going to be evicting, so we just want to 3902 * continue to let page writes occur as quickly as possible. 3903 */ 3904 if (curproc == pageproc) { 3905 if (page_load > available_memory / 4) 3906 return (SET_ERROR(ERESTART)); 3907 /* Note: reserve is inflated, so we deflate */ 3908 page_load += reserve / 8; 3909 return (0); 3910 } else if (page_load > 0 && arc_reclaim_needed()) { 3911 /* memory is low, delay before restarting */ 3912 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3913 return (SET_ERROR(EAGAIN)); 3914 } 3915 page_load = 0; 3916#endif 3917 return (0); 3918} 3919 3920void 3921arc_tempreserve_clear(uint64_t reserve) 3922{ 3923 atomic_add_64(&arc_tempreserve, -reserve); 3924 ASSERT((int64_t)arc_tempreserve >= 0); 3925} 3926 3927int 3928arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3929{ 3930 int error; 3931 uint64_t anon_size; 3932 3933 if (reserve > arc_c/4 && !arc_no_grow) 3934 arc_c = MIN(arc_c_max, reserve * 4); 3935 if (reserve > arc_c) 3936 return (SET_ERROR(ENOMEM)); 3937 3938 /* 3939 * Don't count loaned bufs as in flight dirty data to prevent long 3940 * network delays from blocking transactions that are ready to be 3941 * assigned to a txg. 3942 */ 3943 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0); 3944 3945 /* 3946 * Writes will, almost always, require additional memory allocations 3947 * in order to compress/encrypt/etc the data. We therefore need to 3948 * make sure that there is sufficient available memory for this. 3949 */ 3950 error = arc_memory_throttle(reserve, txg); 3951 if (error != 0) 3952 return (error); 3953 3954 /* 3955 * Throttle writes when the amount of dirty data in the cache 3956 * gets too large. We try to keep the cache less than half full 3957 * of dirty blocks so that our sync times don't grow too large. 3958 * Note: if two requests come in concurrently, we might let them 3959 * both succeed, when one of them should fail. Not a huge deal. 3960 */ 3961 3962 if (reserve + arc_tempreserve + anon_size > arc_c / 2 && 3963 anon_size > arc_c / 4) { 3964 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3965 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3966 arc_tempreserve>>10, 3967 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3968 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3969 reserve>>10, arc_c>>10); 3970 return (SET_ERROR(ERESTART)); 3971 } 3972 atomic_add_64(&arc_tempreserve, reserve); 3973 return (0); 3974} 3975 3976static kmutex_t arc_lowmem_lock; 3977#ifdef _KERNEL 3978static eventhandler_tag arc_event_lowmem = NULL; 3979 3980static void 3981arc_lowmem(void *arg __unused, int howto __unused) 3982{ 3983 3984 /* Serialize access via arc_lowmem_lock. */ 3985 mutex_enter(&arc_lowmem_lock); 3986 mutex_enter(&arc_reclaim_thr_lock); 3987 needfree = 1; 3988 cv_signal(&arc_reclaim_thr_cv); 3989 3990 /* 3991 * It is unsafe to block here in arbitrary threads, because we can come 3992 * here from ARC itself and may hold ARC locks and thus risk a deadlock 3993 * with ARC reclaim thread. 3994 */ 3995 if (curproc == pageproc) { 3996 while (needfree) 3997 msleep(&needfree, &arc_reclaim_thr_lock, 0, "zfs:lowmem", 0); 3998 } 3999 mutex_exit(&arc_reclaim_thr_lock); 4000 mutex_exit(&arc_lowmem_lock); 4001} 4002#endif 4003 4004void 4005arc_init(void) 4006{ 4007 int i, prefetch_tunable_set = 0; 4008 4009 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 4010 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 4011 mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL); 4012 4013 /* Convert seconds to clock ticks */ 4014 arc_min_prefetch_lifespan = 1 * hz; 4015 4016 /* Start out with 1/8 of all memory */ 4017 arc_c = kmem_size() / 8; 4018 4019#ifdef sun 4020#ifdef _KERNEL 4021 /* 4022 * On architectures where the physical memory can be larger 4023 * than the addressable space (intel in 32-bit mode), we may 4024 * need to limit the cache to 1/8 of VM size. 4025 */ 4026 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 4027#endif 4028#endif /* sun */ 4029 /* set min cache to 1/32 of all memory, or 16MB, whichever is more */ 4030 arc_c_min = MAX(arc_c / 4, 64<<18); 4031 /* set max to 1/2 of all memory, or all but 1GB, whichever is more */ 4032 if (arc_c * 8 >= 1<<30) 4033 arc_c_max = (arc_c * 8) - (1<<30); 4034 else 4035 arc_c_max = arc_c_min; 4036 arc_c_max = MAX(arc_c * 5, arc_c_max); 4037 4038#ifdef _KERNEL 4039 /* 4040 * Allow the tunables to override our calculations if they are 4041 * reasonable (ie. over 16MB) 4042 */ 4043 if (zfs_arc_max > 64<<18 && zfs_arc_max < kmem_size()) 4044 arc_c_max = zfs_arc_max; 4045 if (zfs_arc_min > 64<<18 && zfs_arc_min <= arc_c_max) 4046 arc_c_min = zfs_arc_min; 4047#endif 4048 4049 arc_c = arc_c_max; 4050 arc_p = (arc_c >> 1); 4051 4052 /* limit meta-data to 1/4 of the arc capacity */ 4053 arc_meta_limit = arc_c_max / 4; 4054 4055 /* Allow the tunable to override if it is reasonable */ 4056 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 4057 arc_meta_limit = zfs_arc_meta_limit; 4058 4059 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 4060 arc_c_min = arc_meta_limit / 2; 4061 4062 if (zfs_arc_grow_retry > 0) 4063 arc_grow_retry = zfs_arc_grow_retry; 4064 4065 if (zfs_arc_shrink_shift > 0) 4066 arc_shrink_shift = zfs_arc_shrink_shift; 4067 4068 if (zfs_arc_p_min_shift > 0) 4069 arc_p_min_shift = zfs_arc_p_min_shift; 4070 4071 /* if kmem_flags are set, lets try to use less memory */ 4072 if (kmem_debugging()) 4073 arc_c = arc_c / 2; 4074 if (arc_c < arc_c_min) 4075 arc_c = arc_c_min; 4076 4077 zfs_arc_min = arc_c_min; 4078 zfs_arc_max = arc_c_max; 4079 4080 arc_anon = &ARC_anon; 4081 arc_mru = &ARC_mru; 4082 arc_mru_ghost = &ARC_mru_ghost; 4083 arc_mfu = &ARC_mfu; 4084 arc_mfu_ghost = &ARC_mfu_ghost; 4085 arc_l2c_only = &ARC_l2c_only; 4086 arc_size = 0; 4087 4088 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) { 4089 mutex_init(&arc_anon->arcs_locks[i].arcs_lock, 4090 NULL, MUTEX_DEFAULT, NULL); 4091 mutex_init(&arc_mru->arcs_locks[i].arcs_lock, 4092 NULL, MUTEX_DEFAULT, NULL); 4093 mutex_init(&arc_mru_ghost->arcs_locks[i].arcs_lock, 4094 NULL, MUTEX_DEFAULT, NULL); 4095 mutex_init(&arc_mfu->arcs_locks[i].arcs_lock, 4096 NULL, MUTEX_DEFAULT, NULL); 4097 mutex_init(&arc_mfu_ghost->arcs_locks[i].arcs_lock, 4098 NULL, MUTEX_DEFAULT, NULL); 4099 mutex_init(&arc_l2c_only->arcs_locks[i].arcs_lock, 4100 NULL, MUTEX_DEFAULT, NULL); 4101 4102 list_create(&arc_mru->arcs_lists[i], 4103 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 4104 list_create(&arc_mru_ghost->arcs_lists[i], 4105 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 4106 list_create(&arc_mfu->arcs_lists[i], 4107 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 4108 list_create(&arc_mfu_ghost->arcs_lists[i], 4109 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 4110 list_create(&arc_mfu_ghost->arcs_lists[i], 4111 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 4112 list_create(&arc_l2c_only->arcs_lists[i], 4113 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 4114 } 4115 4116 buf_init(); 4117 4118 arc_thread_exit = 0; 4119 arc_eviction_list = NULL; 4120 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 4121 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 4122 4123 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 4124 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 4125 4126 if (arc_ksp != NULL) { 4127 arc_ksp->ks_data = &arc_stats; 4128 kstat_install(arc_ksp); 4129 } 4130 4131 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 4132 TS_RUN, minclsyspri); 4133 4134#ifdef _KERNEL 4135 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL, 4136 EVENTHANDLER_PRI_FIRST); 4137#endif 4138 4139 arc_dead = FALSE; 4140 arc_warm = B_FALSE; 4141 4142 /* 4143 * Calculate maximum amount of dirty data per pool. 4144 * 4145 * If it has been set by /etc/system, take that. 4146 * Otherwise, use a percentage of physical memory defined by 4147 * zfs_dirty_data_max_percent (default 10%) with a cap at 4148 * zfs_dirty_data_max_max (default 4GB). 4149 */ 4150 if (zfs_dirty_data_max == 0) { 4151 zfs_dirty_data_max = ptob(physmem) * 4152 zfs_dirty_data_max_percent / 100; 4153 zfs_dirty_data_max = MIN(zfs_dirty_data_max, 4154 zfs_dirty_data_max_max); 4155 } 4156 4157#ifdef _KERNEL 4158 if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable)) 4159 prefetch_tunable_set = 1; 4160 4161#ifdef __i386__ 4162 if (prefetch_tunable_set == 0) { 4163 printf("ZFS NOTICE: Prefetch is disabled by default on i386 " 4164 "-- to enable,\n"); 4165 printf(" add \"vfs.zfs.prefetch_disable=0\" " 4166 "to /boot/loader.conf.\n"); 4167 zfs_prefetch_disable = 1; 4168 } 4169#else 4170 if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) && 4171 prefetch_tunable_set == 0) { 4172 printf("ZFS NOTICE: Prefetch is disabled by default if less " 4173 "than 4GB of RAM is present;\n" 4174 " to enable, add \"vfs.zfs.prefetch_disable=0\" " 4175 "to /boot/loader.conf.\n"); 4176 zfs_prefetch_disable = 1; 4177 } 4178#endif 4179 /* Warn about ZFS memory and address space requirements. */ 4180 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) { 4181 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; " 4182 "expect unstable behavior.\n"); 4183 } 4184 if (kmem_size() < 512 * (1 << 20)) { 4185 printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; " 4186 "expect unstable behavior.\n"); 4187 printf(" Consider tuning vm.kmem_size and " 4188 "vm.kmem_size_max\n"); 4189 printf(" in /boot/loader.conf.\n"); 4190 } 4191#endif 4192} 4193 4194void 4195arc_fini(void) 4196{ 4197 int i; 4198 4199 mutex_enter(&arc_reclaim_thr_lock); 4200 arc_thread_exit = 1; 4201 cv_signal(&arc_reclaim_thr_cv); 4202 while (arc_thread_exit != 0) 4203 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 4204 mutex_exit(&arc_reclaim_thr_lock); 4205 4206 arc_flush(NULL); 4207 4208 arc_dead = TRUE; 4209 4210 if (arc_ksp != NULL) { 4211 kstat_delete(arc_ksp); 4212 arc_ksp = NULL; 4213 } 4214 4215 mutex_destroy(&arc_eviction_mtx); 4216 mutex_destroy(&arc_reclaim_thr_lock); 4217 cv_destroy(&arc_reclaim_thr_cv); 4218 4219 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) { 4220 list_destroy(&arc_mru->arcs_lists[i]); 4221 list_destroy(&arc_mru_ghost->arcs_lists[i]); 4222 list_destroy(&arc_mfu->arcs_lists[i]); 4223 list_destroy(&arc_mfu_ghost->arcs_lists[i]); 4224 list_destroy(&arc_l2c_only->arcs_lists[i]); 4225 4226 mutex_destroy(&arc_anon->arcs_locks[i].arcs_lock); 4227 mutex_destroy(&arc_mru->arcs_locks[i].arcs_lock); 4228 mutex_destroy(&arc_mru_ghost->arcs_locks[i].arcs_lock); 4229 mutex_destroy(&arc_mfu->arcs_locks[i].arcs_lock); 4230 mutex_destroy(&arc_mfu_ghost->arcs_locks[i].arcs_lock); 4231 mutex_destroy(&arc_l2c_only->arcs_locks[i].arcs_lock); 4232 } 4233 4234 buf_fini(); 4235 4236 ASSERT(arc_loaned_bytes == 0); 4237 4238 mutex_destroy(&arc_lowmem_lock); 4239#ifdef _KERNEL 4240 if (arc_event_lowmem != NULL) 4241 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem); 4242#endif 4243} 4244 4245/* 4246 * Level 2 ARC 4247 * 4248 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 4249 * It uses dedicated storage devices to hold cached data, which are populated 4250 * using large infrequent writes. The main role of this cache is to boost 4251 * the performance of random read workloads. The intended L2ARC devices 4252 * include short-stroked disks, solid state disks, and other media with 4253 * substantially faster read latency than disk. 4254 * 4255 * +-----------------------+ 4256 * | ARC | 4257 * +-----------------------+ 4258 * | ^ ^ 4259 * | | | 4260 * l2arc_feed_thread() arc_read() 4261 * | | | 4262 * | l2arc read | 4263 * V | | 4264 * +---------------+ | 4265 * | L2ARC | | 4266 * +---------------+ | 4267 * | ^ | 4268 * l2arc_write() | | 4269 * | | | 4270 * V | | 4271 * +-------+ +-------+ 4272 * | vdev | | vdev | 4273 * | cache | | cache | 4274 * +-------+ +-------+ 4275 * +=========+ .-----. 4276 * : L2ARC : |-_____-| 4277 * : devices : | Disks | 4278 * +=========+ `-_____-' 4279 * 4280 * Read requests are satisfied from the following sources, in order: 4281 * 4282 * 1) ARC 4283 * 2) vdev cache of L2ARC devices 4284 * 3) L2ARC devices 4285 * 4) vdev cache of disks 4286 * 5) disks 4287 * 4288 * Some L2ARC device types exhibit extremely slow write performance. 4289 * To accommodate for this there are some significant differences between 4290 * the L2ARC and traditional cache design: 4291 * 4292 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 4293 * the ARC behave as usual, freeing buffers and placing headers on ghost 4294 * lists. The ARC does not send buffers to the L2ARC during eviction as 4295 * this would add inflated write latencies for all ARC memory pressure. 4296 * 4297 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 4298 * It does this by periodically scanning buffers from the eviction-end of 4299 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 4300 * not already there. It scans until a headroom of buffers is satisfied, 4301 * which itself is a buffer for ARC eviction. If a compressible buffer is 4302 * found during scanning and selected for writing to an L2ARC device, we 4303 * temporarily boost scanning headroom during the next scan cycle to make 4304 * sure we adapt to compression effects (which might significantly reduce 4305 * the data volume we write to L2ARC). The thread that does this is 4306 * l2arc_feed_thread(), illustrated below; example sizes are included to 4307 * provide a better sense of ratio than this diagram: 4308 * 4309 * head --> tail 4310 * +---------------------+----------+ 4311 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 4312 * +---------------------+----------+ | o L2ARC eligible 4313 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 4314 * +---------------------+----------+ | 4315 * 15.9 Gbytes ^ 32 Mbytes | 4316 * headroom | 4317 * l2arc_feed_thread() 4318 * | 4319 * l2arc write hand <--[oooo]--' 4320 * | 8 Mbyte 4321 * | write max 4322 * V 4323 * +==============================+ 4324 * L2ARC dev |####|#|###|###| |####| ... | 4325 * +==============================+ 4326 * 32 Gbytes 4327 * 4328 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 4329 * evicted, then the L2ARC has cached a buffer much sooner than it probably 4330 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 4331 * safe to say that this is an uncommon case, since buffers at the end of 4332 * the ARC lists have moved there due to inactivity. 4333 * 4334 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 4335 * then the L2ARC simply misses copying some buffers. This serves as a 4336 * pressure valve to prevent heavy read workloads from both stalling the ARC 4337 * with waits and clogging the L2ARC with writes. This also helps prevent 4338 * the potential for the L2ARC to churn if it attempts to cache content too 4339 * quickly, such as during backups of the entire pool. 4340 * 4341 * 5. After system boot and before the ARC has filled main memory, there are 4342 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 4343 * lists can remain mostly static. Instead of searching from tail of these 4344 * lists as pictured, the l2arc_feed_thread() will search from the list heads 4345 * for eligible buffers, greatly increasing its chance of finding them. 4346 * 4347 * The L2ARC device write speed is also boosted during this time so that 4348 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 4349 * there are no L2ARC reads, and no fear of degrading read performance 4350 * through increased writes. 4351 * 4352 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 4353 * the vdev queue can aggregate them into larger and fewer writes. Each 4354 * device is written to in a rotor fashion, sweeping writes through 4355 * available space then repeating. 4356 * 4357 * 7. The L2ARC does not store dirty content. It never needs to flush 4358 * write buffers back to disk based storage. 4359 * 4360 * 8. If an ARC buffer is written (and dirtied) which also exists in the 4361 * L2ARC, the now stale L2ARC buffer is immediately dropped. 4362 * 4363 * The performance of the L2ARC can be tweaked by a number of tunables, which 4364 * may be necessary for different workloads: 4365 * 4366 * l2arc_write_max max write bytes per interval 4367 * l2arc_write_boost extra write bytes during device warmup 4368 * l2arc_noprefetch skip caching prefetched buffers 4369 * l2arc_headroom number of max device writes to precache 4370 * l2arc_headroom_boost when we find compressed buffers during ARC 4371 * scanning, we multiply headroom by this 4372 * percentage factor for the next scan cycle, 4373 * since more compressed buffers are likely to 4374 * be present 4375 * l2arc_feed_secs seconds between L2ARC writing 4376 * 4377 * Tunables may be removed or added as future performance improvements are 4378 * integrated, and also may become zpool properties. 4379 * 4380 * There are three key functions that control how the L2ARC warms up: 4381 * 4382 * l2arc_write_eligible() check if a buffer is eligible to cache 4383 * l2arc_write_size() calculate how much to write 4384 * l2arc_write_interval() calculate sleep delay between writes 4385 * 4386 * These three functions determine what to write, how much, and how quickly 4387 * to send writes. 4388 */ 4389 4390static boolean_t 4391l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab) 4392{ 4393 /* 4394 * A buffer is *not* eligible for the L2ARC if it: 4395 * 1. belongs to a different spa. 4396 * 2. is already cached on the L2ARC. 4397 * 3. has an I/O in progress (it may be an incomplete read). 4398 * 4. is flagged not eligible (zfs property). 4399 */ 4400 if (ab->b_spa != spa_guid) { 4401 ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch); 4402 return (B_FALSE); 4403 } 4404 if (ab->b_l2hdr != NULL) { 4405 ARCSTAT_BUMP(arcstat_l2_write_in_l2); 4406 return (B_FALSE); 4407 } 4408 if (HDR_IO_IN_PROGRESS(ab)) { 4409 ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress); 4410 return (B_FALSE); 4411 } 4412 if (!HDR_L2CACHE(ab)) { 4413 ARCSTAT_BUMP(arcstat_l2_write_not_cacheable); 4414 return (B_FALSE); 4415 } 4416 4417 return (B_TRUE); 4418} 4419 4420static uint64_t 4421l2arc_write_size(void) 4422{ 4423 uint64_t size; 4424 4425 /* 4426 * Make sure our globals have meaningful values in case the user 4427 * altered them. 4428 */ 4429 size = l2arc_write_max; 4430 if (size == 0) { 4431 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must " 4432 "be greater than zero, resetting it to the default (%d)", 4433 L2ARC_WRITE_SIZE); 4434 size = l2arc_write_max = L2ARC_WRITE_SIZE; 4435 } 4436 4437 if (arc_warm == B_FALSE) 4438 size += l2arc_write_boost; 4439 4440 return (size); 4441 4442} 4443 4444static clock_t 4445l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) 4446{ 4447 clock_t interval, next, now; 4448 4449 /* 4450 * If the ARC lists are busy, increase our write rate; if the 4451 * lists are stale, idle back. This is achieved by checking 4452 * how much we previously wrote - if it was more than half of 4453 * what we wanted, schedule the next write much sooner. 4454 */ 4455 if (l2arc_feed_again && wrote > (wanted / 2)) 4456 interval = (hz * l2arc_feed_min_ms) / 1000; 4457 else 4458 interval = hz * l2arc_feed_secs; 4459 4460 now = ddi_get_lbolt(); 4461 next = MAX(now, MIN(now + interval, began + interval)); 4462 4463 return (next); 4464} 4465 4466static void 4467l2arc_hdr_stat_add(void) 4468{ 4469 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 4470 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 4471} 4472 4473static void 4474l2arc_hdr_stat_remove(void) 4475{ 4476 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 4477 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 4478} 4479 4480/* 4481 * Cycle through L2ARC devices. This is how L2ARC load balances. 4482 * If a device is returned, this also returns holding the spa config lock. 4483 */ 4484static l2arc_dev_t * 4485l2arc_dev_get_next(void) 4486{ 4487 l2arc_dev_t *first, *next = NULL; 4488 4489 /* 4490 * Lock out the removal of spas (spa_namespace_lock), then removal 4491 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 4492 * both locks will be dropped and a spa config lock held instead. 4493 */ 4494 mutex_enter(&spa_namespace_lock); 4495 mutex_enter(&l2arc_dev_mtx); 4496 4497 /* if there are no vdevs, there is nothing to do */ 4498 if (l2arc_ndev == 0) 4499 goto out; 4500 4501 first = NULL; 4502 next = l2arc_dev_last; 4503 do { 4504 /* loop around the list looking for a non-faulted vdev */ 4505 if (next == NULL) { 4506 next = list_head(l2arc_dev_list); 4507 } else { 4508 next = list_next(l2arc_dev_list, next); 4509 if (next == NULL) 4510 next = list_head(l2arc_dev_list); 4511 } 4512 4513 /* if we have come back to the start, bail out */ 4514 if (first == NULL) 4515 first = next; 4516 else if (next == first) 4517 break; 4518 4519 } while (vdev_is_dead(next->l2ad_vdev)); 4520 4521 /* if we were unable to find any usable vdevs, return NULL */ 4522 if (vdev_is_dead(next->l2ad_vdev)) 4523 next = NULL; 4524 4525 l2arc_dev_last = next; 4526 4527out: 4528 mutex_exit(&l2arc_dev_mtx); 4529 4530 /* 4531 * Grab the config lock to prevent the 'next' device from being 4532 * removed while we are writing to it. 4533 */ 4534 if (next != NULL) 4535 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 4536 mutex_exit(&spa_namespace_lock); 4537 4538 return (next); 4539} 4540 4541/* 4542 * Free buffers that were tagged for destruction. 4543 */ 4544static void 4545l2arc_do_free_on_write() 4546{ 4547 list_t *buflist; 4548 l2arc_data_free_t *df, *df_prev; 4549 4550 mutex_enter(&l2arc_free_on_write_mtx); 4551 buflist = l2arc_free_on_write; 4552 4553 for (df = list_tail(buflist); df; df = df_prev) { 4554 df_prev = list_prev(buflist, df); 4555 ASSERT(df->l2df_data != NULL); 4556 ASSERT(df->l2df_func != NULL); 4557 df->l2df_func(df->l2df_data, df->l2df_size); 4558 list_remove(buflist, df); 4559 kmem_free(df, sizeof (l2arc_data_free_t)); 4560 } 4561 4562 mutex_exit(&l2arc_free_on_write_mtx); 4563} 4564 4565/* 4566 * A write to a cache device has completed. Update all headers to allow 4567 * reads from these buffers to begin. 4568 */ 4569static void 4570l2arc_write_done(zio_t *zio) 4571{ 4572 l2arc_write_callback_t *cb; 4573 l2arc_dev_t *dev; 4574 list_t *buflist; 4575 arc_buf_hdr_t *head, *ab, *ab_prev; 4576 l2arc_buf_hdr_t *abl2; 4577 kmutex_t *hash_lock; 4578 4579 cb = zio->io_private; 4580 ASSERT(cb != NULL); 4581 dev = cb->l2wcb_dev; 4582 ASSERT(dev != NULL); 4583 head = cb->l2wcb_head; 4584 ASSERT(head != NULL); 4585 buflist = dev->l2ad_buflist; 4586 ASSERT(buflist != NULL); 4587 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 4588 l2arc_write_callback_t *, cb); 4589 4590 if (zio->io_error != 0) 4591 ARCSTAT_BUMP(arcstat_l2_writes_error); 4592 4593 mutex_enter(&l2arc_buflist_mtx); 4594 4595 /* 4596 * All writes completed, or an error was hit. 4597 */ 4598 for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 4599 ab_prev = list_prev(buflist, ab); 4600 abl2 = ab->b_l2hdr; 4601 4602 /* 4603 * Release the temporary compressed buffer as soon as possible. 4604 */ 4605 if (abl2->b_compress != ZIO_COMPRESS_OFF) 4606 l2arc_release_cdata_buf(ab); 4607 4608 hash_lock = HDR_LOCK(ab); 4609 if (!mutex_tryenter(hash_lock)) { 4610 /* 4611 * This buffer misses out. It may be in a stage 4612 * of eviction. Its ARC_L2_WRITING flag will be 4613 * left set, denying reads to this buffer. 4614 */ 4615 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 4616 continue; 4617 } 4618 4619 if (zio->io_error != 0) { 4620 /* 4621 * Error - drop L2ARC entry. 4622 */ 4623 list_remove(buflist, ab); 4624 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize); 4625 ab->b_l2hdr = NULL; 4626 trim_map_free(abl2->b_dev->l2ad_vdev, abl2->b_daddr, 4627 ab->b_size, 0); 4628 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4629 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4630 } 4631 4632 /* 4633 * Allow ARC to begin reads to this L2ARC entry. 4634 */ 4635 ab->b_flags &= ~ARC_L2_WRITING; 4636 4637 mutex_exit(hash_lock); 4638 } 4639 4640 atomic_inc_64(&l2arc_writes_done); 4641 list_remove(buflist, head); 4642 kmem_cache_free(hdr_cache, head); 4643 mutex_exit(&l2arc_buflist_mtx); 4644 4645 l2arc_do_free_on_write(); 4646 4647 kmem_free(cb, sizeof (l2arc_write_callback_t)); 4648} 4649 4650/* 4651 * A read to a cache device completed. Validate buffer contents before 4652 * handing over to the regular ARC routines. 4653 */ 4654static void 4655l2arc_read_done(zio_t *zio) 4656{ 4657 l2arc_read_callback_t *cb; 4658 arc_buf_hdr_t *hdr; 4659 arc_buf_t *buf; 4660 kmutex_t *hash_lock; 4661 int equal; 4662 4663 ASSERT(zio->io_vd != NULL); 4664 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 4665 4666 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 4667 4668 cb = zio->io_private; 4669 ASSERT(cb != NULL); 4670 buf = cb->l2rcb_buf; 4671 ASSERT(buf != NULL); 4672 4673 hash_lock = HDR_LOCK(buf->b_hdr); 4674 mutex_enter(hash_lock); 4675 hdr = buf->b_hdr; 4676 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 4677 4678 /* 4679 * If the buffer was compressed, decompress it first. 4680 */ 4681 if (cb->l2rcb_compress != ZIO_COMPRESS_OFF) 4682 l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress); 4683 ASSERT(zio->io_data != NULL); 4684 4685 /* 4686 * Check this survived the L2ARC journey. 4687 */ 4688 equal = arc_cksum_equal(buf); 4689 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 4690 mutex_exit(hash_lock); 4691 zio->io_private = buf; 4692 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 4693 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 4694 arc_read_done(zio); 4695 } else { 4696 mutex_exit(hash_lock); 4697 /* 4698 * Buffer didn't survive caching. Increment stats and 4699 * reissue to the original storage device. 4700 */ 4701 if (zio->io_error != 0) { 4702 ARCSTAT_BUMP(arcstat_l2_io_error); 4703 } else { 4704 zio->io_error = SET_ERROR(EIO); 4705 } 4706 if (!equal) 4707 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 4708 4709 /* 4710 * If there's no waiter, issue an async i/o to the primary 4711 * storage now. If there *is* a waiter, the caller must 4712 * issue the i/o in a context where it's OK to block. 4713 */ 4714 if (zio->io_waiter == NULL) { 4715 zio_t *pio = zio_unique_parent(zio); 4716 4717 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); 4718 4719 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp, 4720 buf->b_data, zio->io_size, arc_read_done, buf, 4721 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); 4722 } 4723 } 4724 4725 kmem_free(cb, sizeof (l2arc_read_callback_t)); 4726} 4727 4728/* 4729 * This is the list priority from which the L2ARC will search for pages to 4730 * cache. This is used within loops (0..3) to cycle through lists in the 4731 * desired order. This order can have a significant effect on cache 4732 * performance. 4733 * 4734 * Currently the metadata lists are hit first, MFU then MRU, followed by 4735 * the data lists. This function returns a locked list, and also returns 4736 * the lock pointer. 4737 */ 4738static list_t * 4739l2arc_list_locked(int list_num, kmutex_t **lock) 4740{ 4741 list_t *list = NULL; 4742 int idx; 4743 4744 ASSERT(list_num >= 0 && list_num < 2 * ARC_BUFC_NUMLISTS); 4745 4746 if (list_num < ARC_BUFC_NUMMETADATALISTS) { 4747 idx = list_num; 4748 list = &arc_mfu->arcs_lists[idx]; 4749 *lock = ARCS_LOCK(arc_mfu, idx); 4750 } else if (list_num < ARC_BUFC_NUMMETADATALISTS * 2) { 4751 idx = list_num - ARC_BUFC_NUMMETADATALISTS; 4752 list = &arc_mru->arcs_lists[idx]; 4753 *lock = ARCS_LOCK(arc_mru, idx); 4754 } else if (list_num < (ARC_BUFC_NUMMETADATALISTS * 2 + 4755 ARC_BUFC_NUMDATALISTS)) { 4756 idx = list_num - ARC_BUFC_NUMMETADATALISTS; 4757 list = &arc_mfu->arcs_lists[idx]; 4758 *lock = ARCS_LOCK(arc_mfu, idx); 4759 } else { 4760 idx = list_num - ARC_BUFC_NUMLISTS; 4761 list = &arc_mru->arcs_lists[idx]; 4762 *lock = ARCS_LOCK(arc_mru, idx); 4763 } 4764 4765 ASSERT(!(MUTEX_HELD(*lock))); 4766 mutex_enter(*lock); 4767 return (list); 4768} 4769 4770/* 4771 * Evict buffers from the device write hand to the distance specified in 4772 * bytes. This distance may span populated buffers, it may span nothing. 4773 * This is clearing a region on the L2ARC device ready for writing. 4774 * If the 'all' boolean is set, every buffer is evicted. 4775 */ 4776static void 4777l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 4778{ 4779 list_t *buflist; 4780 l2arc_buf_hdr_t *abl2; 4781 arc_buf_hdr_t *ab, *ab_prev; 4782 kmutex_t *hash_lock; 4783 uint64_t taddr; 4784 4785 buflist = dev->l2ad_buflist; 4786 4787 if (buflist == NULL) 4788 return; 4789 4790 if (!all && dev->l2ad_first) { 4791 /* 4792 * This is the first sweep through the device. There is 4793 * nothing to evict. 4794 */ 4795 return; 4796 } 4797 4798 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 4799 /* 4800 * When nearing the end of the device, evict to the end 4801 * before the device write hand jumps to the start. 4802 */ 4803 taddr = dev->l2ad_end; 4804 } else { 4805 taddr = dev->l2ad_hand + distance; 4806 } 4807 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 4808 uint64_t, taddr, boolean_t, all); 4809 4810top: 4811 mutex_enter(&l2arc_buflist_mtx); 4812 for (ab = list_tail(buflist); ab; ab = ab_prev) { 4813 ab_prev = list_prev(buflist, ab); 4814 4815 hash_lock = HDR_LOCK(ab); 4816 if (!mutex_tryenter(hash_lock)) { 4817 /* 4818 * Missed the hash lock. Retry. 4819 */ 4820 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 4821 mutex_exit(&l2arc_buflist_mtx); 4822 mutex_enter(hash_lock); 4823 mutex_exit(hash_lock); 4824 goto top; 4825 } 4826 4827 if (HDR_L2_WRITE_HEAD(ab)) { 4828 /* 4829 * We hit a write head node. Leave it for 4830 * l2arc_write_done(). 4831 */ 4832 list_remove(buflist, ab); 4833 mutex_exit(hash_lock); 4834 continue; 4835 } 4836 4837 if (!all && ab->b_l2hdr != NULL && 4838 (ab->b_l2hdr->b_daddr > taddr || 4839 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 4840 /* 4841 * We've evicted to the target address, 4842 * or the end of the device. 4843 */ 4844 mutex_exit(hash_lock); 4845 break; 4846 } 4847 4848 if (HDR_FREE_IN_PROGRESS(ab)) { 4849 /* 4850 * Already on the path to destruction. 4851 */ 4852 mutex_exit(hash_lock); 4853 continue; 4854 } 4855 4856 if (ab->b_state == arc_l2c_only) { 4857 ASSERT(!HDR_L2_READING(ab)); 4858 /* 4859 * This doesn't exist in the ARC. Destroy. 4860 * arc_hdr_destroy() will call list_remove() 4861 * and decrement arcstat_l2_size. 4862 */ 4863 arc_change_state(arc_anon, ab, hash_lock); 4864 arc_hdr_destroy(ab); 4865 } else { 4866 /* 4867 * Invalidate issued or about to be issued 4868 * reads, since we may be about to write 4869 * over this location. 4870 */ 4871 if (HDR_L2_READING(ab)) { 4872 ARCSTAT_BUMP(arcstat_l2_evict_reading); 4873 ab->b_flags |= ARC_L2_EVICTED; 4874 } 4875 4876 /* 4877 * Tell ARC this no longer exists in L2ARC. 4878 */ 4879 if (ab->b_l2hdr != NULL) { 4880 abl2 = ab->b_l2hdr; 4881 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize); 4882 ab->b_l2hdr = NULL; 4883 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4884 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4885 } 4886 list_remove(buflist, ab); 4887 4888 /* 4889 * This may have been leftover after a 4890 * failed write. 4891 */ 4892 ab->b_flags &= ~ARC_L2_WRITING; 4893 } 4894 mutex_exit(hash_lock); 4895 } 4896 mutex_exit(&l2arc_buflist_mtx); 4897 4898 vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0); 4899 dev->l2ad_evict = taddr; 4900} 4901 4902/* 4903 * Find and write ARC buffers to the L2ARC device. 4904 * 4905 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 4906 * for reading until they have completed writing. 4907 * The headroom_boost is an in-out parameter used to maintain headroom boost 4908 * state between calls to this function. 4909 * 4910 * Returns the number of bytes actually written (which may be smaller than 4911 * the delta by which the device hand has changed due to alignment). 4912 */ 4913static uint64_t 4914l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz, 4915 boolean_t *headroom_boost) 4916{ 4917 arc_buf_hdr_t *ab, *ab_prev, *head; 4918 list_t *list; 4919 uint64_t write_asize, write_psize, write_sz, headroom, 4920 buf_compress_minsz; 4921 void *buf_data; 4922 kmutex_t *list_lock; 4923 boolean_t full; 4924 l2arc_write_callback_t *cb; 4925 zio_t *pio, *wzio; 4926 uint64_t guid = spa_load_guid(spa); 4927 const boolean_t do_headroom_boost = *headroom_boost; 4928 int try; 4929 4930 ASSERT(dev->l2ad_vdev != NULL); 4931 4932 /* Lower the flag now, we might want to raise it again later. */ 4933 *headroom_boost = B_FALSE; 4934 4935 pio = NULL; 4936 write_sz = write_asize = write_psize = 0; 4937 full = B_FALSE; 4938 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 4939 head->b_flags |= ARC_L2_WRITE_HEAD; 4940 4941 ARCSTAT_BUMP(arcstat_l2_write_buffer_iter); 4942 /* 4943 * We will want to try to compress buffers that are at least 2x the 4944 * device sector size. 4945 */ 4946 buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift; 4947 4948 /* 4949 * Copy buffers for L2ARC writing. 4950 */ 4951 mutex_enter(&l2arc_buflist_mtx); 4952 for (try = 0; try < 2 * ARC_BUFC_NUMLISTS; try++) { 4953 uint64_t passed_sz = 0; 4954 4955 list = l2arc_list_locked(try, &list_lock); 4956 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter); 4957 4958 /* 4959 * L2ARC fast warmup. 4960 * 4961 * Until the ARC is warm and starts to evict, read from the 4962 * head of the ARC lists rather than the tail. 4963 */ 4964 if (arc_warm == B_FALSE) 4965 ab = list_head(list); 4966 else 4967 ab = list_tail(list); 4968 if (ab == NULL) 4969 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter); 4970 4971 headroom = target_sz * l2arc_headroom; 4972 if (do_headroom_boost) 4973 headroom = (headroom * l2arc_headroom_boost) / 100; 4974 4975 for (; ab; ab = ab_prev) { 4976 l2arc_buf_hdr_t *l2hdr; 4977 kmutex_t *hash_lock; 4978 uint64_t buf_sz; 4979 4980 if (arc_warm == B_FALSE) 4981 ab_prev = list_next(list, ab); 4982 else 4983 ab_prev = list_prev(list, ab); 4984 ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, ab->b_size); 4985 4986 hash_lock = HDR_LOCK(ab); 4987 if (!mutex_tryenter(hash_lock)) { 4988 ARCSTAT_BUMP(arcstat_l2_write_trylock_fail); 4989 /* 4990 * Skip this buffer rather than waiting. 4991 */ 4992 continue; 4993 } 4994 4995 passed_sz += ab->b_size; 4996 if (passed_sz > headroom) { 4997 /* 4998 * Searched too far. 4999 */ 5000 mutex_exit(hash_lock); 5001 ARCSTAT_BUMP(arcstat_l2_write_passed_headroom); 5002 break; 5003 } 5004 5005 if (!l2arc_write_eligible(guid, ab)) { 5006 mutex_exit(hash_lock); 5007 continue; 5008 } 5009 5010 if ((write_sz + ab->b_size) > target_sz) { 5011 full = B_TRUE; 5012 mutex_exit(hash_lock); 5013 ARCSTAT_BUMP(arcstat_l2_write_full); 5014 break; 5015 } 5016 5017 if (pio == NULL) { 5018 /* 5019 * Insert a dummy header on the buflist so 5020 * l2arc_write_done() can find where the 5021 * write buffers begin without searching. 5022 */ 5023 list_insert_head(dev->l2ad_buflist, head); 5024 5025 cb = kmem_alloc( 5026 sizeof (l2arc_write_callback_t), KM_SLEEP); 5027 cb->l2wcb_dev = dev; 5028 cb->l2wcb_head = head; 5029 pio = zio_root(spa, l2arc_write_done, cb, 5030 ZIO_FLAG_CANFAIL); 5031 ARCSTAT_BUMP(arcstat_l2_write_pios); 5032 } 5033 5034 /* 5035 * Create and add a new L2ARC header. 5036 */ 5037 l2hdr = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 5038 l2hdr->b_dev = dev; 5039 ab->b_flags |= ARC_L2_WRITING; 5040 5041 /* 5042 * Temporarily stash the data buffer in b_tmp_cdata. 5043 * The subsequent write step will pick it up from 5044 * there. This is because can't access ab->b_buf 5045 * without holding the hash_lock, which we in turn 5046 * can't access without holding the ARC list locks 5047 * (which we want to avoid during compression/writing). 5048 */ 5049 l2hdr->b_compress = ZIO_COMPRESS_OFF; 5050 l2hdr->b_asize = ab->b_size; 5051 l2hdr->b_tmp_cdata = ab->b_buf->b_data; 5052 5053 buf_sz = ab->b_size; 5054 ab->b_l2hdr = l2hdr; 5055 5056 list_insert_head(dev->l2ad_buflist, ab); 5057 5058 /* 5059 * Compute and store the buffer cksum before 5060 * writing. On debug the cksum is verified first. 5061 */ 5062 arc_cksum_verify(ab->b_buf); 5063 arc_cksum_compute(ab->b_buf, B_TRUE); 5064 5065 mutex_exit(hash_lock); 5066 5067 write_sz += buf_sz; 5068 } 5069 5070 mutex_exit(list_lock); 5071 5072 if (full == B_TRUE) 5073 break; 5074 } 5075 5076 /* No buffers selected for writing? */ 5077 if (pio == NULL) { 5078 ASSERT0(write_sz); 5079 mutex_exit(&l2arc_buflist_mtx); 5080 kmem_cache_free(hdr_cache, head); 5081 return (0); 5082 } 5083 5084 /* 5085 * Now start writing the buffers. We're starting at the write head 5086 * and work backwards, retracing the course of the buffer selector 5087 * loop above. 5088 */ 5089 for (ab = list_prev(dev->l2ad_buflist, head); ab; 5090 ab = list_prev(dev->l2ad_buflist, ab)) { 5091 l2arc_buf_hdr_t *l2hdr; 5092 uint64_t buf_sz; 5093 5094 /* 5095 * We shouldn't need to lock the buffer here, since we flagged 5096 * it as ARC_L2_WRITING in the previous step, but we must take 5097 * care to only access its L2 cache parameters. In particular, 5098 * ab->b_buf may be invalid by now due to ARC eviction. 5099 */ 5100 l2hdr = ab->b_l2hdr; 5101 l2hdr->b_daddr = dev->l2ad_hand; 5102 5103 if ((ab->b_flags & ARC_L2COMPRESS) && 5104 l2hdr->b_asize >= buf_compress_minsz) { 5105 if (l2arc_compress_buf(l2hdr)) { 5106 /* 5107 * If compression succeeded, enable headroom 5108 * boost on the next scan cycle. 5109 */ 5110 *headroom_boost = B_TRUE; 5111 } 5112 } 5113 5114 /* 5115 * Pick up the buffer data we had previously stashed away 5116 * (and now potentially also compressed). 5117 */ 5118 buf_data = l2hdr->b_tmp_cdata; 5119 buf_sz = l2hdr->b_asize; 5120 5121 /* Compression may have squashed the buffer to zero length. */ 5122 if (buf_sz != 0) { 5123 uint64_t buf_p_sz; 5124 5125 wzio = zio_write_phys(pio, dev->l2ad_vdev, 5126 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 5127 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 5128 ZIO_FLAG_CANFAIL, B_FALSE); 5129 5130 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 5131 zio_t *, wzio); 5132 (void) zio_nowait(wzio); 5133 5134 write_asize += buf_sz; 5135 /* 5136 * Keep the clock hand suitably device-aligned. 5137 */ 5138 buf_p_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); 5139 write_psize += buf_p_sz; 5140 dev->l2ad_hand += buf_p_sz; 5141 } 5142 } 5143 5144 mutex_exit(&l2arc_buflist_mtx); 5145 5146 ASSERT3U(write_asize, <=, target_sz); 5147 ARCSTAT_BUMP(arcstat_l2_writes_sent); 5148 ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize); 5149 ARCSTAT_INCR(arcstat_l2_size, write_sz); 5150 ARCSTAT_INCR(arcstat_l2_asize, write_asize); 5151 vdev_space_update(dev->l2ad_vdev, write_psize, 0, 0); 5152 5153 /* 5154 * Bump device hand to the device start if it is approaching the end. 5155 * l2arc_evict() will already have evicted ahead for this case. 5156 */ 5157 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 5158 vdev_space_update(dev->l2ad_vdev, 5159 dev->l2ad_end - dev->l2ad_hand, 0, 0); 5160 dev->l2ad_hand = dev->l2ad_start; 5161 dev->l2ad_evict = dev->l2ad_start; 5162 dev->l2ad_first = B_FALSE; 5163 } 5164 5165 dev->l2ad_writing = B_TRUE; 5166 (void) zio_wait(pio); 5167 dev->l2ad_writing = B_FALSE; 5168 5169 return (write_asize); 5170} 5171 5172/* 5173 * Compresses an L2ARC buffer. 5174 * The data to be compressed must be prefilled in l2hdr->b_tmp_cdata and its 5175 * size in l2hdr->b_asize. This routine tries to compress the data and 5176 * depending on the compression result there are three possible outcomes: 5177 * *) The buffer was incompressible. The original l2hdr contents were left 5178 * untouched and are ready for writing to an L2 device. 5179 * *) The buffer was all-zeros, so there is no need to write it to an L2 5180 * device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is 5181 * set to zero and b_compress is set to ZIO_COMPRESS_EMPTY. 5182 * *) Compression succeeded and b_tmp_cdata was replaced with a temporary 5183 * data buffer which holds the compressed data to be written, and b_asize 5184 * tells us how much data there is. b_compress is set to the appropriate 5185 * compression algorithm. Once writing is done, invoke 5186 * l2arc_release_cdata_buf on this l2hdr to free this temporary buffer. 5187 * 5188 * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the 5189 * buffer was incompressible). 5190 */ 5191static boolean_t 5192l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr) 5193{ 5194 void *cdata; 5195 size_t csize, len; 5196 5197 ASSERT(l2hdr->b_compress == ZIO_COMPRESS_OFF); 5198 ASSERT(l2hdr->b_tmp_cdata != NULL); 5199 5200 len = l2hdr->b_asize; 5201 cdata = zio_data_buf_alloc(len); 5202 csize = zio_compress_data(ZIO_COMPRESS_LZ4, l2hdr->b_tmp_cdata, 5203 cdata, l2hdr->b_asize, (size_t)(1ULL << l2hdr->b_dev->l2ad_vdev->vdev_ashift)); 5204 5205 if (csize == 0) { 5206 /* zero block, indicate that there's nothing to write */ 5207 zio_data_buf_free(cdata, len); 5208 l2hdr->b_compress = ZIO_COMPRESS_EMPTY; 5209 l2hdr->b_asize = 0; 5210 l2hdr->b_tmp_cdata = NULL; 5211 ARCSTAT_BUMP(arcstat_l2_compress_zeros); 5212 return (B_TRUE); 5213 } else if (csize > 0 && csize < len) { 5214 /* 5215 * Compression succeeded, we'll keep the cdata around for 5216 * writing and release it afterwards. 5217 */ 5218 l2hdr->b_compress = ZIO_COMPRESS_LZ4; 5219 l2hdr->b_asize = csize; 5220 l2hdr->b_tmp_cdata = cdata; 5221 ARCSTAT_BUMP(arcstat_l2_compress_successes); 5222 return (B_TRUE); 5223 } else { 5224 /* 5225 * Compression failed, release the compressed buffer. 5226 * l2hdr will be left unmodified. 5227 */ 5228 zio_data_buf_free(cdata, len); 5229 ARCSTAT_BUMP(arcstat_l2_compress_failures); 5230 return (B_FALSE); 5231 } 5232} 5233 5234/* 5235 * Decompresses a zio read back from an l2arc device. On success, the 5236 * underlying zio's io_data buffer is overwritten by the uncompressed 5237 * version. On decompression error (corrupt compressed stream), the 5238 * zio->io_error value is set to signal an I/O error. 5239 * 5240 * Please note that the compressed data stream is not checksummed, so 5241 * if the underlying device is experiencing data corruption, we may feed 5242 * corrupt data to the decompressor, so the decompressor needs to be 5243 * able to handle this situation (LZ4 does). 5244 */ 5245static void 5246l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c) 5247{ 5248 ASSERT(L2ARC_IS_VALID_COMPRESS(c)); 5249 5250 if (zio->io_error != 0) { 5251 /* 5252 * An io error has occured, just restore the original io 5253 * size in preparation for a main pool read. 5254 */ 5255 zio->io_orig_size = zio->io_size = hdr->b_size; 5256 return; 5257 } 5258 5259 if (c == ZIO_COMPRESS_EMPTY) { 5260 /* 5261 * An empty buffer results in a null zio, which means we 5262 * need to fill its io_data after we're done restoring the 5263 * buffer's contents. 5264 */ 5265 ASSERT(hdr->b_buf != NULL); 5266 bzero(hdr->b_buf->b_data, hdr->b_size); 5267 zio->io_data = zio->io_orig_data = hdr->b_buf->b_data; 5268 } else { 5269 ASSERT(zio->io_data != NULL); 5270 /* 5271 * We copy the compressed data from the start of the arc buffer 5272 * (the zio_read will have pulled in only what we need, the 5273 * rest is garbage which we will overwrite at decompression) 5274 * and then decompress back to the ARC data buffer. This way we 5275 * can minimize copying by simply decompressing back over the 5276 * original compressed data (rather than decompressing to an 5277 * aux buffer and then copying back the uncompressed buffer, 5278 * which is likely to be much larger). 5279 */ 5280 uint64_t csize; 5281 void *cdata; 5282 5283 csize = zio->io_size; 5284 cdata = zio_data_buf_alloc(csize); 5285 bcopy(zio->io_data, cdata, csize); 5286 if (zio_decompress_data(c, cdata, zio->io_data, csize, 5287 hdr->b_size) != 0) 5288 zio->io_error = EIO; 5289 zio_data_buf_free(cdata, csize); 5290 } 5291 5292 /* Restore the expected uncompressed IO size. */ 5293 zio->io_orig_size = zio->io_size = hdr->b_size; 5294} 5295 5296/* 5297 * Releases the temporary b_tmp_cdata buffer in an l2arc header structure. 5298 * This buffer serves as a temporary holder of compressed data while 5299 * the buffer entry is being written to an l2arc device. Once that is 5300 * done, we can dispose of it. 5301 */ 5302static void 5303l2arc_release_cdata_buf(arc_buf_hdr_t *ab) 5304{ 5305 l2arc_buf_hdr_t *l2hdr = ab->b_l2hdr; 5306 5307 if (l2hdr->b_compress == ZIO_COMPRESS_LZ4) { 5308 /* 5309 * If the data was compressed, then we've allocated a 5310 * temporary buffer for it, so now we need to release it. 5311 */ 5312 ASSERT(l2hdr->b_tmp_cdata != NULL); 5313 zio_data_buf_free(l2hdr->b_tmp_cdata, ab->b_size); 5314 } 5315 l2hdr->b_tmp_cdata = NULL; 5316} 5317 5318/* 5319 * This thread feeds the L2ARC at regular intervals. This is the beating 5320 * heart of the L2ARC. 5321 */ 5322static void 5323l2arc_feed_thread(void *dummy __unused) 5324{ 5325 callb_cpr_t cpr; 5326 l2arc_dev_t *dev; 5327 spa_t *spa; 5328 uint64_t size, wrote; 5329 clock_t begin, next = ddi_get_lbolt(); 5330 boolean_t headroom_boost = B_FALSE; 5331 5332 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 5333 5334 mutex_enter(&l2arc_feed_thr_lock); 5335 5336 while (l2arc_thread_exit == 0) { 5337 CALLB_CPR_SAFE_BEGIN(&cpr); 5338 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 5339 next - ddi_get_lbolt()); 5340 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 5341 next = ddi_get_lbolt() + hz; 5342 5343 /* 5344 * Quick check for L2ARC devices. 5345 */ 5346 mutex_enter(&l2arc_dev_mtx); 5347 if (l2arc_ndev == 0) { 5348 mutex_exit(&l2arc_dev_mtx); 5349 continue; 5350 } 5351 mutex_exit(&l2arc_dev_mtx); 5352 begin = ddi_get_lbolt(); 5353 5354 /* 5355 * This selects the next l2arc device to write to, and in 5356 * doing so the next spa to feed from: dev->l2ad_spa. This 5357 * will return NULL if there are now no l2arc devices or if 5358 * they are all faulted. 5359 * 5360 * If a device is returned, its spa's config lock is also 5361 * held to prevent device removal. l2arc_dev_get_next() 5362 * will grab and release l2arc_dev_mtx. 5363 */ 5364 if ((dev = l2arc_dev_get_next()) == NULL) 5365 continue; 5366 5367 spa = dev->l2ad_spa; 5368 ASSERT(spa != NULL); 5369 5370 /* 5371 * If the pool is read-only then force the feed thread to 5372 * sleep a little longer. 5373 */ 5374 if (!spa_writeable(spa)) { 5375 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz; 5376 spa_config_exit(spa, SCL_L2ARC, dev); 5377 continue; 5378 } 5379 5380 /* 5381 * Avoid contributing to memory pressure. 5382 */ 5383 if (arc_reclaim_needed()) { 5384 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 5385 spa_config_exit(spa, SCL_L2ARC, dev); 5386 continue; 5387 } 5388 5389 ARCSTAT_BUMP(arcstat_l2_feeds); 5390 5391 size = l2arc_write_size(); 5392 5393 /* 5394 * Evict L2ARC buffers that will be overwritten. 5395 */ 5396 l2arc_evict(dev, size, B_FALSE); 5397 5398 /* 5399 * Write ARC buffers. 5400 */ 5401 wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost); 5402 5403 /* 5404 * Calculate interval between writes. 5405 */ 5406 next = l2arc_write_interval(begin, size, wrote); 5407 spa_config_exit(spa, SCL_L2ARC, dev); 5408 } 5409 5410 l2arc_thread_exit = 0; 5411 cv_broadcast(&l2arc_feed_thr_cv); 5412 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 5413 thread_exit(); 5414} 5415 5416boolean_t 5417l2arc_vdev_present(vdev_t *vd) 5418{ 5419 l2arc_dev_t *dev; 5420 5421 mutex_enter(&l2arc_dev_mtx); 5422 for (dev = list_head(l2arc_dev_list); dev != NULL; 5423 dev = list_next(l2arc_dev_list, dev)) { 5424 if (dev->l2ad_vdev == vd) 5425 break; 5426 } 5427 mutex_exit(&l2arc_dev_mtx); 5428 5429 return (dev != NULL); 5430} 5431 5432/* 5433 * Add a vdev for use by the L2ARC. By this point the spa has already 5434 * validated the vdev and opened it. 5435 */ 5436void 5437l2arc_add_vdev(spa_t *spa, vdev_t *vd) 5438{ 5439 l2arc_dev_t *adddev; 5440 5441 ASSERT(!l2arc_vdev_present(vd)); 5442 5443 vdev_ashift_optimize(vd); 5444 5445 /* 5446 * Create a new l2arc device entry. 5447 */ 5448 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 5449 adddev->l2ad_spa = spa; 5450 adddev->l2ad_vdev = vd; 5451 adddev->l2ad_start = VDEV_LABEL_START_SIZE; 5452 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); 5453 adddev->l2ad_hand = adddev->l2ad_start; 5454 adddev->l2ad_evict = adddev->l2ad_start; 5455 adddev->l2ad_first = B_TRUE; 5456 adddev->l2ad_writing = B_FALSE; 5457 5458 /* 5459 * This is a list of all ARC buffers that are still valid on the 5460 * device. 5461 */ 5462 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 5463 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 5464 offsetof(arc_buf_hdr_t, b_l2node)); 5465 5466 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); 5467 5468 /* 5469 * Add device to global list 5470 */ 5471 mutex_enter(&l2arc_dev_mtx); 5472 list_insert_head(l2arc_dev_list, adddev); 5473 atomic_inc_64(&l2arc_ndev); 5474 mutex_exit(&l2arc_dev_mtx); 5475} 5476 5477/* 5478 * Remove a vdev from the L2ARC. 5479 */ 5480void 5481l2arc_remove_vdev(vdev_t *vd) 5482{ 5483 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 5484 5485 /* 5486 * Find the device by vdev 5487 */ 5488 mutex_enter(&l2arc_dev_mtx); 5489 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 5490 nextdev = list_next(l2arc_dev_list, dev); 5491 if (vd == dev->l2ad_vdev) { 5492 remdev = dev; 5493 break; 5494 } 5495 } 5496 ASSERT(remdev != NULL); 5497 5498 /* 5499 * Remove device from global list 5500 */ 5501 list_remove(l2arc_dev_list, remdev); 5502 l2arc_dev_last = NULL; /* may have been invalidated */ 5503 atomic_dec_64(&l2arc_ndev); 5504 mutex_exit(&l2arc_dev_mtx); 5505 5506 /* 5507 * Clear all buflists and ARC references. L2ARC device flush. 5508 */ 5509 l2arc_evict(remdev, 0, B_TRUE); 5510 list_destroy(remdev->l2ad_buflist); 5511 kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 5512 kmem_free(remdev, sizeof (l2arc_dev_t)); 5513} 5514 5515void 5516l2arc_init(void) 5517{ 5518 l2arc_thread_exit = 0; 5519 l2arc_ndev = 0; 5520 l2arc_writes_sent = 0; 5521 l2arc_writes_done = 0; 5522 5523 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 5524 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 5525 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 5526 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 5527 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 5528 5529 l2arc_dev_list = &L2ARC_dev_list; 5530 l2arc_free_on_write = &L2ARC_free_on_write; 5531 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 5532 offsetof(l2arc_dev_t, l2ad_node)); 5533 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 5534 offsetof(l2arc_data_free_t, l2df_list_node)); 5535} 5536 5537void 5538l2arc_fini(void) 5539{ 5540 /* 5541 * This is called from dmu_fini(), which is called from spa_fini(); 5542 * Because of this, we can assume that all l2arc devices have 5543 * already been removed when the pools themselves were removed. 5544 */ 5545 5546 l2arc_do_free_on_write(); 5547 5548 mutex_destroy(&l2arc_feed_thr_lock); 5549 cv_destroy(&l2arc_feed_thr_cv); 5550 mutex_destroy(&l2arc_dev_mtx); 5551 mutex_destroy(&l2arc_buflist_mtx); 5552 mutex_destroy(&l2arc_free_on_write_mtx); 5553 5554 list_destroy(l2arc_dev_list); 5555 list_destroy(l2arc_free_on_write); 5556} 5557 5558void 5559l2arc_start(void) 5560{ 5561 if (!(spa_mode_global & FWRITE)) 5562 return; 5563 5564 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 5565 TS_RUN, minclsyspri); 5566} 5567 5568void 5569l2arc_stop(void) 5570{ 5571 if (!(spa_mode_global & FWRITE)) 5572 return; 5573 5574 mutex_enter(&l2arc_feed_thr_lock); 5575 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 5576 l2arc_thread_exit = 1; 5577 while (l2arc_thread_exit != 0) 5578 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 5579 mutex_exit(&l2arc_feed_thr_lock); 5580} 5581