swap_pager.c revision 302027
1/*- 2 * Copyright (c) 1998 Matthew Dillon, 3 * Copyright (c) 1994 John S. Dyson 4 * Copyright (c) 1990 University of Utah. 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * New Swap System 41 * Matthew Dillon 42 * 43 * Radix Bitmap 'blists'. 44 * 45 * - The new swapper uses the new radix bitmap code. This should scale 46 * to arbitrarily small or arbitrarily large swap spaces and an almost 47 * arbitrary degree of fragmentation. 48 * 49 * Features: 50 * 51 * - on the fly reallocation of swap during putpages. The new system 52 * does not try to keep previously allocated swap blocks for dirty 53 * pages. 54 * 55 * - on the fly deallocation of swap 56 * 57 * - No more garbage collection required. Unnecessarily allocated swap 58 * blocks only exist for dirty vm_page_t's now and these are already 59 * cycled (in a high-load system) by the pager. We also do on-the-fly 60 * removal of invalidated swap blocks when a page is destroyed 61 * or renamed. 62 * 63 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 64 * 65 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 66 * @(#)vm_swap.c 8.5 (Berkeley) 2/17/94 67 */ 68 69#include <sys/cdefs.h> 70__FBSDID("$FreeBSD: stable/10/sys/vm/swap_pager.c 302027 2016-06-20 09:06:50Z kib $"); 71 72#include "opt_swap.h" 73#include "opt_vm.h" 74 75#include <sys/param.h> 76#include <sys/systm.h> 77#include <sys/conf.h> 78#include <sys/kernel.h> 79#include <sys/priv.h> 80#include <sys/proc.h> 81#include <sys/bio.h> 82#include <sys/buf.h> 83#include <sys/disk.h> 84#include <sys/fcntl.h> 85#include <sys/mount.h> 86#include <sys/namei.h> 87#include <sys/vnode.h> 88#include <sys/malloc.h> 89#include <sys/racct.h> 90#include <sys/resource.h> 91#include <sys/resourcevar.h> 92#include <sys/rwlock.h> 93#include <sys/sysctl.h> 94#include <sys/sysproto.h> 95#include <sys/blist.h> 96#include <sys/lock.h> 97#include <sys/sx.h> 98#include <sys/vmmeter.h> 99 100#include <security/mac/mac_framework.h> 101 102#include <vm/vm.h> 103#include <vm/pmap.h> 104#include <vm/vm_map.h> 105#include <vm/vm_kern.h> 106#include <vm/vm_object.h> 107#include <vm/vm_page.h> 108#include <vm/vm_pager.h> 109#include <vm/vm_pageout.h> 110#include <vm/vm_param.h> 111#include <vm/swap_pager.h> 112#include <vm/vm_extern.h> 113#include <vm/uma.h> 114 115#include <geom/geom.h> 116 117/* 118 * SWB_NPAGES must be a power of 2. It may be set to 1, 2, 4, 8, 16 119 * or 32 pages per allocation. 120 * The 32-page limit is due to the radix code (kern/subr_blist.c). 121 */ 122#ifndef MAX_PAGEOUT_CLUSTER 123#define MAX_PAGEOUT_CLUSTER 16 124#endif 125 126#if !defined(SWB_NPAGES) 127#define SWB_NPAGES MAX_PAGEOUT_CLUSTER 128#endif 129 130/* 131 * The swblock structure maps an object and a small, fixed-size range 132 * of page indices to disk addresses within a swap area. 133 * The collection of these mappings is implemented as a hash table. 134 * Unused disk addresses within a swap area are allocated and managed 135 * using a blist. 136 */ 137#define SWCORRECT(n) (sizeof(void *) * (n) / sizeof(daddr_t)) 138#define SWAP_META_PAGES (SWB_NPAGES * 2) 139#define SWAP_META_MASK (SWAP_META_PAGES - 1) 140 141struct swblock { 142 struct swblock *swb_hnext; 143 vm_object_t swb_object; 144 vm_pindex_t swb_index; 145 int swb_count; 146 daddr_t swb_pages[SWAP_META_PAGES]; 147}; 148 149static MALLOC_DEFINE(M_VMPGDATA, "vm_pgdata", "swap pager private data"); 150static struct mtx sw_dev_mtx; 151static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq); 152static struct swdevt *swdevhd; /* Allocate from here next */ 153static int nswapdev; /* Number of swap devices */ 154int swap_pager_avail; 155static int swdev_syscall_active = 0; /* serialize swap(on|off) */ 156 157static vm_ooffset_t swap_total; 158SYSCTL_QUAD(_vm, OID_AUTO, swap_total, CTLFLAG_RD, &swap_total, 0, 159 "Total amount of available swap storage."); 160static vm_ooffset_t swap_reserved; 161SYSCTL_QUAD(_vm, OID_AUTO, swap_reserved, CTLFLAG_RD, &swap_reserved, 0, 162 "Amount of swap storage needed to back all allocated anonymous memory."); 163static int overcommit = 0; 164SYSCTL_INT(_vm, OID_AUTO, overcommit, CTLFLAG_RW, &overcommit, 0, 165 "Configure virtual memory overcommit behavior. See tuning(7) " 166 "for details."); 167static unsigned long swzone; 168SYSCTL_ULONG(_vm, OID_AUTO, swzone, CTLFLAG_RD, &swzone, 0, 169 "Actual size of swap metadata zone"); 170static unsigned long swap_maxpages; 171SYSCTL_ULONG(_vm, OID_AUTO, swap_maxpages, CTLFLAG_RD, &swap_maxpages, 0, 172 "Maximum amount of swap supported"); 173 174/* bits from overcommit */ 175#define SWAP_RESERVE_FORCE_ON (1 << 0) 176#define SWAP_RESERVE_RLIMIT_ON (1 << 1) 177#define SWAP_RESERVE_ALLOW_NONWIRED (1 << 2) 178 179int 180swap_reserve(vm_ooffset_t incr) 181{ 182 183 return (swap_reserve_by_cred(incr, curthread->td_ucred)); 184} 185 186int 187swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred) 188{ 189 vm_ooffset_t r, s; 190 int res, error; 191 static int curfail; 192 static struct timeval lastfail; 193 struct uidinfo *uip; 194 195 uip = cred->cr_ruidinfo; 196 197 if (incr & PAGE_MASK) 198 panic("swap_reserve: & PAGE_MASK"); 199 200#ifdef RACCT 201 if (racct_enable) { 202 PROC_LOCK(curproc); 203 error = racct_add(curproc, RACCT_SWAP, incr); 204 PROC_UNLOCK(curproc); 205 if (error != 0) 206 return (0); 207 } 208#endif 209 210 res = 0; 211 mtx_lock(&sw_dev_mtx); 212 r = swap_reserved + incr; 213 if (overcommit & SWAP_RESERVE_ALLOW_NONWIRED) { 214 s = cnt.v_page_count - cnt.v_free_reserved - cnt.v_wire_count; 215 s *= PAGE_SIZE; 216 } else 217 s = 0; 218 s += swap_total; 219 if ((overcommit & SWAP_RESERVE_FORCE_ON) == 0 || r <= s || 220 (error = priv_check(curthread, PRIV_VM_SWAP_NOQUOTA)) == 0) { 221 res = 1; 222 swap_reserved = r; 223 } 224 mtx_unlock(&sw_dev_mtx); 225 226 if (res) { 227 PROC_LOCK(curproc); 228 UIDINFO_VMSIZE_LOCK(uip); 229 if ((overcommit & SWAP_RESERVE_RLIMIT_ON) != 0 && 230 uip->ui_vmsize + incr > lim_cur(curproc, RLIMIT_SWAP) && 231 priv_check(curthread, PRIV_VM_SWAP_NORLIMIT)) 232 res = 0; 233 else 234 uip->ui_vmsize += incr; 235 UIDINFO_VMSIZE_UNLOCK(uip); 236 PROC_UNLOCK(curproc); 237 if (!res) { 238 mtx_lock(&sw_dev_mtx); 239 swap_reserved -= incr; 240 mtx_unlock(&sw_dev_mtx); 241 } 242 } 243 if (!res && ppsratecheck(&lastfail, &curfail, 1)) { 244 printf("uid %d, pid %d: swap reservation for %jd bytes failed\n", 245 uip->ui_uid, curproc->p_pid, incr); 246 } 247 248#ifdef RACCT 249 if (!res) { 250 PROC_LOCK(curproc); 251 racct_sub(curproc, RACCT_SWAP, incr); 252 PROC_UNLOCK(curproc); 253 } 254#endif 255 256 return (res); 257} 258 259void 260swap_reserve_force(vm_ooffset_t incr) 261{ 262 struct uidinfo *uip; 263 264 mtx_lock(&sw_dev_mtx); 265 swap_reserved += incr; 266 mtx_unlock(&sw_dev_mtx); 267 268#ifdef RACCT 269 PROC_LOCK(curproc); 270 racct_add_force(curproc, RACCT_SWAP, incr); 271 PROC_UNLOCK(curproc); 272#endif 273 274 uip = curthread->td_ucred->cr_ruidinfo; 275 PROC_LOCK(curproc); 276 UIDINFO_VMSIZE_LOCK(uip); 277 uip->ui_vmsize += incr; 278 UIDINFO_VMSIZE_UNLOCK(uip); 279 PROC_UNLOCK(curproc); 280} 281 282void 283swap_release(vm_ooffset_t decr) 284{ 285 struct ucred *cred; 286 287 PROC_LOCK(curproc); 288 cred = curthread->td_ucred; 289 swap_release_by_cred(decr, cred); 290 PROC_UNLOCK(curproc); 291} 292 293void 294swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred) 295{ 296 struct uidinfo *uip; 297 298 uip = cred->cr_ruidinfo; 299 300 if (decr & PAGE_MASK) 301 panic("swap_release: & PAGE_MASK"); 302 303 mtx_lock(&sw_dev_mtx); 304 if (swap_reserved < decr) 305 panic("swap_reserved < decr"); 306 swap_reserved -= decr; 307 mtx_unlock(&sw_dev_mtx); 308 309 UIDINFO_VMSIZE_LOCK(uip); 310 if (uip->ui_vmsize < decr) 311 printf("negative vmsize for uid = %d\n", uip->ui_uid); 312 uip->ui_vmsize -= decr; 313 UIDINFO_VMSIZE_UNLOCK(uip); 314 315 racct_sub_cred(cred, RACCT_SWAP, decr); 316} 317 318static void swapdev_strategy(struct buf *, struct swdevt *sw); 319 320#define SWM_FREE 0x02 /* free, period */ 321#define SWM_POP 0x04 /* pop out */ 322 323int swap_pager_full = 2; /* swap space exhaustion (task killing) */ 324static int swap_pager_almost_full = 1; /* swap space exhaustion (w/hysteresis)*/ 325static int nsw_rcount; /* free read buffers */ 326static int nsw_wcount_sync; /* limit write buffers / synchronous */ 327static int nsw_wcount_async; /* limit write buffers / asynchronous */ 328static int nsw_wcount_async_max;/* assigned maximum */ 329static int nsw_cluster_max; /* maximum VOP I/O allowed */ 330 331static struct swblock **swhash; 332static int swhash_mask; 333static struct mtx swhash_mtx; 334 335static int swap_async_max = 4; /* maximum in-progress async I/O's */ 336static struct sx sw_alloc_sx; 337 338 339SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 340 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 341 342/* 343 * "named" and "unnamed" anon region objects. Try to reduce the overhead 344 * of searching a named list by hashing it just a little. 345 */ 346 347#define NOBJLISTS 8 348 349#define NOBJLIST(handle) \ 350 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 351 352static struct mtx sw_alloc_mtx; /* protect list manipulation */ 353static struct pagerlst swap_pager_object_list[NOBJLISTS]; 354static uma_zone_t swap_zone; 355 356/* 357 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 358 * calls hooked from other parts of the VM system and do not appear here. 359 * (see vm/swap_pager.h). 360 */ 361static vm_object_t 362 swap_pager_alloc(void *handle, vm_ooffset_t size, 363 vm_prot_t prot, vm_ooffset_t offset, struct ucred *); 364static void swap_pager_dealloc(vm_object_t object); 365static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int); 366static void swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *); 367static boolean_t 368 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after); 369static void swap_pager_init(void); 370static void swap_pager_unswapped(vm_page_t); 371static void swap_pager_swapoff(struct swdevt *sp); 372 373struct pagerops swappagerops = { 374 .pgo_init = swap_pager_init, /* early system initialization of pager */ 375 .pgo_alloc = swap_pager_alloc, /* allocate an OBJT_SWAP object */ 376 .pgo_dealloc = swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 377 .pgo_getpages = swap_pager_getpages, /* pagein */ 378 .pgo_putpages = swap_pager_putpages, /* pageout */ 379 .pgo_haspage = swap_pager_haspage, /* get backing store status for page */ 380 .pgo_pageunswapped = swap_pager_unswapped, /* remove swap related to page */ 381}; 382 383/* 384 * dmmax is in page-sized chunks with the new swap system. It was 385 * dev-bsized chunks in the old. dmmax is always a power of 2. 386 * 387 * swap_*() routines are externally accessible. swp_*() routines are 388 * internal. 389 */ 390static int dmmax; 391static int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 392static int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 393 394SYSCTL_INT(_vm, OID_AUTO, dmmax, 395 CTLFLAG_RD, &dmmax, 0, "Maximum size of a swap block"); 396 397static void swp_sizecheck(void); 398static void swp_pager_async_iodone(struct buf *bp); 399static int swapongeom(struct thread *, struct vnode *); 400static int swaponvp(struct thread *, struct vnode *, u_long); 401static int swapoff_one(struct swdevt *sp, struct ucred *cred); 402 403/* 404 * Swap bitmap functions 405 */ 406static void swp_pager_freeswapspace(daddr_t blk, int npages); 407static daddr_t swp_pager_getswapspace(int npages); 408 409/* 410 * Metadata functions 411 */ 412static struct swblock **swp_pager_hash(vm_object_t object, vm_pindex_t index); 413static void swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t); 414static void swp_pager_meta_free(vm_object_t, vm_pindex_t, daddr_t); 415static void swp_pager_meta_free_all(vm_object_t); 416static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int); 417 418static void 419swp_pager_free_nrpage(vm_page_t m) 420{ 421 422 vm_page_lock(m); 423 if (m->wire_count == 0) 424 vm_page_free(m); 425 vm_page_unlock(m); 426} 427 428/* 429 * SWP_SIZECHECK() - update swap_pager_full indication 430 * 431 * update the swap_pager_almost_full indication and warn when we are 432 * about to run out of swap space, using lowat/hiwat hysteresis. 433 * 434 * Clear swap_pager_full ( task killing ) indication when lowat is met. 435 * 436 * No restrictions on call 437 * This routine may not block. 438 */ 439static void 440swp_sizecheck(void) 441{ 442 443 if (swap_pager_avail < nswap_lowat) { 444 if (swap_pager_almost_full == 0) { 445 printf("swap_pager: out of swap space\n"); 446 swap_pager_almost_full = 1; 447 } 448 } else { 449 swap_pager_full = 0; 450 if (swap_pager_avail > nswap_hiwat) 451 swap_pager_almost_full = 0; 452 } 453} 454 455/* 456 * SWP_PAGER_HASH() - hash swap meta data 457 * 458 * This is an helper function which hashes the swapblk given 459 * the object and page index. It returns a pointer to a pointer 460 * to the object, or a pointer to a NULL pointer if it could not 461 * find a swapblk. 462 */ 463static struct swblock ** 464swp_pager_hash(vm_object_t object, vm_pindex_t index) 465{ 466 struct swblock **pswap; 467 struct swblock *swap; 468 469 index &= ~(vm_pindex_t)SWAP_META_MASK; 470 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; 471 while ((swap = *pswap) != NULL) { 472 if (swap->swb_object == object && 473 swap->swb_index == index 474 ) { 475 break; 476 } 477 pswap = &swap->swb_hnext; 478 } 479 return (pswap); 480} 481 482/* 483 * SWAP_PAGER_INIT() - initialize the swap pager! 484 * 485 * Expected to be started from system init. NOTE: This code is run 486 * before much else so be careful what you depend on. Most of the VM 487 * system has yet to be initialized at this point. 488 */ 489static void 490swap_pager_init(void) 491{ 492 /* 493 * Initialize object lists 494 */ 495 int i; 496 497 for (i = 0; i < NOBJLISTS; ++i) 498 TAILQ_INIT(&swap_pager_object_list[i]); 499 mtx_init(&sw_alloc_mtx, "swap_pager list", NULL, MTX_DEF); 500 mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF); 501 sx_init(&sw_alloc_sx, "swspsx"); 502 503 /* 504 * Device Stripe, in PAGE_SIZE'd blocks 505 */ 506 dmmax = SWB_NPAGES * 2; 507} 508 509/* 510 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 511 * 512 * Expected to be started from pageout process once, prior to entering 513 * its main loop. 514 */ 515void 516swap_pager_swap_init(void) 517{ 518 unsigned long n, n2; 519 520 /* 521 * Number of in-transit swap bp operations. Don't 522 * exhaust the pbufs completely. Make sure we 523 * initialize workable values (0 will work for hysteresis 524 * but it isn't very efficient). 525 * 526 * The nsw_cluster_max is constrained by the bp->b_pages[] 527 * array (MAXPHYS/PAGE_SIZE) and our locally defined 528 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 529 * constrained by the swap device interleave stripe size. 530 * 531 * Currently we hardwire nsw_wcount_async to 4. This limit is 532 * designed to prevent other I/O from having high latencies due to 533 * our pageout I/O. The value 4 works well for one or two active swap 534 * devices but is probably a little low if you have more. Even so, 535 * a higher value would probably generate only a limited improvement 536 * with three or four active swap devices since the system does not 537 * typically have to pageout at extreme bandwidths. We will want 538 * at least 2 per swap devices, and 4 is a pretty good value if you 539 * have one NFS swap device due to the command/ack latency over NFS. 540 * So it all works out pretty well. 541 */ 542 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 543 544 mtx_lock(&pbuf_mtx); 545 nsw_rcount = (nswbuf + 1) / 2; 546 nsw_wcount_sync = (nswbuf + 3) / 4; 547 nsw_wcount_async = 4; 548 nsw_wcount_async_max = nsw_wcount_async; 549 mtx_unlock(&pbuf_mtx); 550 551 /* 552 * Initialize our zone. Right now I'm just guessing on the number 553 * we need based on the number of pages in the system. Each swblock 554 * can hold 32 pages, so this is probably overkill. This reservation 555 * is typically limited to around 32MB by default. 556 */ 557 n = cnt.v_page_count / 2; 558 if (maxswzone && n > maxswzone / sizeof(struct swblock)) 559 n = maxswzone / sizeof(struct swblock); 560 n2 = n; 561 swap_zone = uma_zcreate("SWAPMETA", sizeof(struct swblock), NULL, NULL, 562 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM); 563 if (swap_zone == NULL) 564 panic("failed to create swap_zone."); 565 do { 566 if (uma_zone_reserve_kva(swap_zone, n)) 567 break; 568 /* 569 * if the allocation failed, try a zone two thirds the 570 * size of the previous attempt. 571 */ 572 n -= ((n + 2) / 3); 573 } while (n > 0); 574 if (n2 != n) 575 printf("Swap zone entries reduced from %lu to %lu.\n", n2, n); 576 swap_maxpages = n * SWAP_META_PAGES; 577 swzone = n * sizeof(struct swblock); 578 n2 = n; 579 580 /* 581 * Initialize our meta-data hash table. The swapper does not need to 582 * be quite as efficient as the VM system, so we do not use an 583 * oversized hash table. 584 * 585 * n: size of hash table, must be power of 2 586 * swhash_mask: hash table index mask 587 */ 588 for (n = 1; n < n2 / 8; n *= 2) 589 ; 590 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO); 591 swhash_mask = n - 1; 592 mtx_init(&swhash_mtx, "swap_pager swhash", NULL, MTX_DEF); 593} 594 595/* 596 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 597 * its metadata structures. 598 * 599 * This routine is called from the mmap and fork code to create a new 600 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 601 * and then converting it with swp_pager_meta_build(). 602 * 603 * This routine may block in vm_object_allocate() and create a named 604 * object lookup race, so we must interlock. 605 * 606 * MPSAFE 607 */ 608static vm_object_t 609swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 610 vm_ooffset_t offset, struct ucred *cred) 611{ 612 vm_object_t object; 613 vm_pindex_t pindex; 614 615 pindex = OFF_TO_IDX(offset + PAGE_MASK + size); 616 if (handle) { 617 mtx_lock(&Giant); 618 /* 619 * Reference existing named region or allocate new one. There 620 * should not be a race here against swp_pager_meta_build() 621 * as called from vm_page_remove() in regards to the lookup 622 * of the handle. 623 */ 624 sx_xlock(&sw_alloc_sx); 625 object = vm_pager_object_lookup(NOBJLIST(handle), handle); 626 if (object == NULL) { 627 if (cred != NULL) { 628 if (!swap_reserve_by_cred(size, cred)) { 629 sx_xunlock(&sw_alloc_sx); 630 mtx_unlock(&Giant); 631 return (NULL); 632 } 633 crhold(cred); 634 } 635 object = vm_object_allocate(OBJT_DEFAULT, pindex); 636 VM_OBJECT_WLOCK(object); 637 object->handle = handle; 638 if (cred != NULL) { 639 object->cred = cred; 640 object->charge = size; 641 } 642 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 643 VM_OBJECT_WUNLOCK(object); 644 } 645 sx_xunlock(&sw_alloc_sx); 646 mtx_unlock(&Giant); 647 } else { 648 if (cred != NULL) { 649 if (!swap_reserve_by_cred(size, cred)) 650 return (NULL); 651 crhold(cred); 652 } 653 object = vm_object_allocate(OBJT_DEFAULT, pindex); 654 VM_OBJECT_WLOCK(object); 655 if (cred != NULL) { 656 object->cred = cred; 657 object->charge = size; 658 } 659 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 660 VM_OBJECT_WUNLOCK(object); 661 } 662 return (object); 663} 664 665/* 666 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 667 * 668 * The swap backing for the object is destroyed. The code is 669 * designed such that we can reinstantiate it later, but this 670 * routine is typically called only when the entire object is 671 * about to be destroyed. 672 * 673 * The object must be locked. 674 */ 675static void 676swap_pager_dealloc(vm_object_t object) 677{ 678 679 /* 680 * Remove from list right away so lookups will fail if we block for 681 * pageout completion. 682 */ 683 if (object->handle != NULL) { 684 mtx_lock(&sw_alloc_mtx); 685 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 686 mtx_unlock(&sw_alloc_mtx); 687 } 688 689 VM_OBJECT_ASSERT_WLOCKED(object); 690 vm_object_pip_wait(object, "swpdea"); 691 692 /* 693 * Free all remaining metadata. We only bother to free it from 694 * the swap meta data. We do not attempt to free swapblk's still 695 * associated with vm_page_t's for this object. We do not care 696 * if paging is still in progress on some objects. 697 */ 698 swp_pager_meta_free_all(object); 699 object->handle = NULL; 700 object->type = OBJT_DEAD; 701} 702 703/************************************************************************ 704 * SWAP PAGER BITMAP ROUTINES * 705 ************************************************************************/ 706 707/* 708 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 709 * 710 * Allocate swap for the requested number of pages. The starting 711 * swap block number (a page index) is returned or SWAPBLK_NONE 712 * if the allocation failed. 713 * 714 * Also has the side effect of advising that somebody made a mistake 715 * when they configured swap and didn't configure enough. 716 * 717 * This routine may not sleep. 718 * 719 * We allocate in round-robin fashion from the configured devices. 720 */ 721static daddr_t 722swp_pager_getswapspace(int npages) 723{ 724 daddr_t blk; 725 struct swdevt *sp; 726 int i; 727 728 blk = SWAPBLK_NONE; 729 mtx_lock(&sw_dev_mtx); 730 sp = swdevhd; 731 for (i = 0; i < nswapdev; i++) { 732 if (sp == NULL) 733 sp = TAILQ_FIRST(&swtailq); 734 if (!(sp->sw_flags & SW_CLOSING)) { 735 blk = blist_alloc(sp->sw_blist, npages); 736 if (blk != SWAPBLK_NONE) { 737 blk += sp->sw_first; 738 sp->sw_used += npages; 739 swap_pager_avail -= npages; 740 swp_sizecheck(); 741 swdevhd = TAILQ_NEXT(sp, sw_list); 742 goto done; 743 } 744 } 745 sp = TAILQ_NEXT(sp, sw_list); 746 } 747 if (swap_pager_full != 2) { 748 printf("swap_pager_getswapspace(%d): failed\n", npages); 749 swap_pager_full = 2; 750 swap_pager_almost_full = 1; 751 } 752 swdevhd = NULL; 753done: 754 mtx_unlock(&sw_dev_mtx); 755 return (blk); 756} 757 758static int 759swp_pager_isondev(daddr_t blk, struct swdevt *sp) 760{ 761 762 return (blk >= sp->sw_first && blk < sp->sw_end); 763} 764 765static void 766swp_pager_strategy(struct buf *bp) 767{ 768 struct swdevt *sp; 769 770 mtx_lock(&sw_dev_mtx); 771 TAILQ_FOREACH(sp, &swtailq, sw_list) { 772 if (bp->b_blkno >= sp->sw_first && bp->b_blkno < sp->sw_end) { 773 mtx_unlock(&sw_dev_mtx); 774 if ((sp->sw_flags & SW_UNMAPPED) != 0 && 775 unmapped_buf_allowed) { 776 bp->b_kvaalloc = bp->b_data; 777 bp->b_data = unmapped_buf; 778 bp->b_kvabase = unmapped_buf; 779 bp->b_offset = 0; 780 bp->b_flags |= B_UNMAPPED; 781 } else { 782 pmap_qenter((vm_offset_t)bp->b_data, 783 &bp->b_pages[0], bp->b_bcount / PAGE_SIZE); 784 } 785 sp->sw_strategy(bp, sp); 786 return; 787 } 788 } 789 panic("Swapdev not found"); 790} 791 792 793/* 794 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 795 * 796 * This routine returns the specified swap blocks back to the bitmap. 797 * 798 * This routine may not sleep. 799 */ 800static void 801swp_pager_freeswapspace(daddr_t blk, int npages) 802{ 803 struct swdevt *sp; 804 805 mtx_lock(&sw_dev_mtx); 806 TAILQ_FOREACH(sp, &swtailq, sw_list) { 807 if (blk >= sp->sw_first && blk < sp->sw_end) { 808 sp->sw_used -= npages; 809 /* 810 * If we are attempting to stop swapping on 811 * this device, we don't want to mark any 812 * blocks free lest they be reused. 813 */ 814 if ((sp->sw_flags & SW_CLOSING) == 0) { 815 blist_free(sp->sw_blist, blk - sp->sw_first, 816 npages); 817 swap_pager_avail += npages; 818 swp_sizecheck(); 819 } 820 mtx_unlock(&sw_dev_mtx); 821 return; 822 } 823 } 824 panic("Swapdev not found"); 825} 826 827/* 828 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 829 * range within an object. 830 * 831 * This is a globally accessible routine. 832 * 833 * This routine removes swapblk assignments from swap metadata. 834 * 835 * The external callers of this routine typically have already destroyed 836 * or renamed vm_page_t's associated with this range in the object so 837 * we should be ok. 838 * 839 * The object must be locked. 840 */ 841void 842swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size) 843{ 844 845 swp_pager_meta_free(object, start, size); 846} 847 848/* 849 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 850 * 851 * Assigns swap blocks to the specified range within the object. The 852 * swap blocks are not zeroed. Any previous swap assignment is destroyed. 853 * 854 * Returns 0 on success, -1 on failure. 855 */ 856int 857swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 858{ 859 int n = 0; 860 daddr_t blk = SWAPBLK_NONE; 861 vm_pindex_t beg = start; /* save start index */ 862 863 VM_OBJECT_WLOCK(object); 864 while (size) { 865 if (n == 0) { 866 n = BLIST_MAX_ALLOC; 867 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) { 868 n >>= 1; 869 if (n == 0) { 870 swp_pager_meta_free(object, beg, start - beg); 871 VM_OBJECT_WUNLOCK(object); 872 return (-1); 873 } 874 } 875 } 876 swp_pager_meta_build(object, start, blk); 877 --size; 878 ++start; 879 ++blk; 880 --n; 881 } 882 swp_pager_meta_free(object, start, n); 883 VM_OBJECT_WUNLOCK(object); 884 return (0); 885} 886 887/* 888 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 889 * and destroy the source. 890 * 891 * Copy any valid swapblks from the source to the destination. In 892 * cases where both the source and destination have a valid swapblk, 893 * we keep the destination's. 894 * 895 * This routine is allowed to sleep. It may sleep allocating metadata 896 * indirectly through swp_pager_meta_build() or if paging is still in 897 * progress on the source. 898 * 899 * The source object contains no vm_page_t's (which is just as well) 900 * 901 * The source object is of type OBJT_SWAP. 902 * 903 * The source and destination objects must be locked. 904 * Both object locks may temporarily be released. 905 */ 906void 907swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 908 vm_pindex_t offset, int destroysource) 909{ 910 vm_pindex_t i; 911 912 VM_OBJECT_ASSERT_WLOCKED(srcobject); 913 VM_OBJECT_ASSERT_WLOCKED(dstobject); 914 915 /* 916 * If destroysource is set, we remove the source object from the 917 * swap_pager internal queue now. 918 */ 919 if (destroysource) { 920 if (srcobject->handle != NULL) { 921 mtx_lock(&sw_alloc_mtx); 922 TAILQ_REMOVE( 923 NOBJLIST(srcobject->handle), 924 srcobject, 925 pager_object_list 926 ); 927 mtx_unlock(&sw_alloc_mtx); 928 } 929 } 930 931 /* 932 * transfer source to destination. 933 */ 934 for (i = 0; i < dstobject->size; ++i) { 935 daddr_t dstaddr; 936 937 /* 938 * Locate (without changing) the swapblk on the destination, 939 * unless it is invalid in which case free it silently, or 940 * if the destination is a resident page, in which case the 941 * source is thrown away. 942 */ 943 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 944 945 if (dstaddr == SWAPBLK_NONE) { 946 /* 947 * Destination has no swapblk and is not resident, 948 * copy source. 949 */ 950 daddr_t srcaddr; 951 952 srcaddr = swp_pager_meta_ctl( 953 srcobject, 954 i + offset, 955 SWM_POP 956 ); 957 958 if (srcaddr != SWAPBLK_NONE) { 959 /* 960 * swp_pager_meta_build() can sleep. 961 */ 962 vm_object_pip_add(srcobject, 1); 963 VM_OBJECT_WUNLOCK(srcobject); 964 vm_object_pip_add(dstobject, 1); 965 swp_pager_meta_build(dstobject, i, srcaddr); 966 vm_object_pip_wakeup(dstobject); 967 VM_OBJECT_WLOCK(srcobject); 968 vm_object_pip_wakeup(srcobject); 969 } 970 } else { 971 /* 972 * Destination has valid swapblk or it is represented 973 * by a resident page. We destroy the sourceblock. 974 */ 975 976 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 977 } 978 } 979 980 /* 981 * Free left over swap blocks in source. 982 * 983 * We have to revert the type to OBJT_DEFAULT so we do not accidently 984 * double-remove the object from the swap queues. 985 */ 986 if (destroysource) { 987 swp_pager_meta_free_all(srcobject); 988 /* 989 * Reverting the type is not necessary, the caller is going 990 * to destroy srcobject directly, but I'm doing it here 991 * for consistency since we've removed the object from its 992 * queues. 993 */ 994 srcobject->type = OBJT_DEFAULT; 995 } 996} 997 998/* 999 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 1000 * the requested page. 1001 * 1002 * We determine whether good backing store exists for the requested 1003 * page and return TRUE if it does, FALSE if it doesn't. 1004 * 1005 * If TRUE, we also try to determine how much valid, contiguous backing 1006 * store exists before and after the requested page within a reasonable 1007 * distance. We do not try to restrict it to the swap device stripe 1008 * (that is handled in getpages/putpages). It probably isn't worth 1009 * doing here. 1010 */ 1011static boolean_t 1012swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after) 1013{ 1014 daddr_t blk0; 1015 1016 VM_OBJECT_ASSERT_LOCKED(object); 1017 /* 1018 * do we have good backing store at the requested index ? 1019 */ 1020 blk0 = swp_pager_meta_ctl(object, pindex, 0); 1021 1022 if (blk0 == SWAPBLK_NONE) { 1023 if (before) 1024 *before = 0; 1025 if (after) 1026 *after = 0; 1027 return (FALSE); 1028 } 1029 1030 /* 1031 * find backwards-looking contiguous good backing store 1032 */ 1033 if (before != NULL) { 1034 int i; 1035 1036 for (i = 1; i < (SWB_NPAGES/2); ++i) { 1037 daddr_t blk; 1038 1039 if (i > pindex) 1040 break; 1041 blk = swp_pager_meta_ctl(object, pindex - i, 0); 1042 if (blk != blk0 - i) 1043 break; 1044 } 1045 *before = (i - 1); 1046 } 1047 1048 /* 1049 * find forward-looking contiguous good backing store 1050 */ 1051 if (after != NULL) { 1052 int i; 1053 1054 for (i = 1; i < (SWB_NPAGES/2); ++i) { 1055 daddr_t blk; 1056 1057 blk = swp_pager_meta_ctl(object, pindex + i, 0); 1058 if (blk != blk0 + i) 1059 break; 1060 } 1061 *after = (i - 1); 1062 } 1063 return (TRUE); 1064} 1065 1066/* 1067 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 1068 * 1069 * This removes any associated swap backing store, whether valid or 1070 * not, from the page. 1071 * 1072 * This routine is typically called when a page is made dirty, at 1073 * which point any associated swap can be freed. MADV_FREE also 1074 * calls us in a special-case situation 1075 * 1076 * NOTE!!! If the page is clean and the swap was valid, the caller 1077 * should make the page dirty before calling this routine. This routine 1078 * does NOT change the m->dirty status of the page. Also: MADV_FREE 1079 * depends on it. 1080 * 1081 * This routine may not sleep. 1082 * 1083 * The object containing the page must be locked. 1084 */ 1085static void 1086swap_pager_unswapped(vm_page_t m) 1087{ 1088 1089 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 1090} 1091 1092/* 1093 * SWAP_PAGER_GETPAGES() - bring pages in from swap 1094 * 1095 * Attempt to retrieve (m, count) pages from backing store, but make 1096 * sure we retrieve at least m[reqpage]. We try to load in as large 1097 * a chunk surrounding m[reqpage] as is contiguous in swap and which 1098 * belongs to the same object. 1099 * 1100 * The code is designed for asynchronous operation and 1101 * immediate-notification of 'reqpage' but tends not to be 1102 * used that way. Please do not optimize-out this algorithmic 1103 * feature, I intend to improve on it in the future. 1104 * 1105 * The parent has a single vm_object_pip_add() reference prior to 1106 * calling us and we should return with the same. 1107 * 1108 * The parent has BUSY'd the pages. We should return with 'm' 1109 * left busy, but the others adjusted. 1110 */ 1111static int 1112swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) 1113{ 1114 struct buf *bp; 1115 vm_page_t mreq; 1116 int i; 1117 int j; 1118 daddr_t blk; 1119 1120 mreq = m[reqpage]; 1121 1122 KASSERT(mreq->object == object, 1123 ("swap_pager_getpages: object mismatch %p/%p", 1124 object, mreq->object)); 1125 1126 /* 1127 * Calculate range to retrieve. The pages have already been assigned 1128 * their swapblks. We require a *contiguous* range but we know it to 1129 * not span devices. If we do not supply it, bad things 1130 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 1131 * loops are set up such that the case(s) are handled implicitly. 1132 * 1133 * The swp_*() calls must be made with the object locked. 1134 */ 1135 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1136 1137 for (i = reqpage - 1; i >= 0; --i) { 1138 daddr_t iblk; 1139 1140 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 1141 if (blk != iblk + (reqpage - i)) 1142 break; 1143 } 1144 ++i; 1145 1146 for (j = reqpage + 1; j < count; ++j) { 1147 daddr_t jblk; 1148 1149 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 1150 if (blk != jblk - (j - reqpage)) 1151 break; 1152 } 1153 1154 /* 1155 * free pages outside our collection range. Note: we never free 1156 * mreq, it must remain busy throughout. 1157 */ 1158 if (0 < i || j < count) { 1159 int k; 1160 1161 for (k = 0; k < i; ++k) 1162 swp_pager_free_nrpage(m[k]); 1163 for (k = j; k < count; ++k) 1164 swp_pager_free_nrpage(m[k]); 1165 } 1166 1167 /* 1168 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq 1169 * still busy, but the others unbusied. 1170 */ 1171 if (blk == SWAPBLK_NONE) 1172 return (VM_PAGER_FAIL); 1173 1174 /* 1175 * Getpbuf() can sleep. 1176 */ 1177 VM_OBJECT_WUNLOCK(object); 1178 /* 1179 * Get a swap buffer header to perform the IO 1180 */ 1181 bp = getpbuf(&nsw_rcount); 1182 bp->b_flags |= B_PAGING; 1183 1184 bp->b_iocmd = BIO_READ; 1185 bp->b_iodone = swp_pager_async_iodone; 1186 bp->b_rcred = crhold(thread0.td_ucred); 1187 bp->b_wcred = crhold(thread0.td_ucred); 1188 bp->b_blkno = blk - (reqpage - i); 1189 bp->b_bcount = PAGE_SIZE * (j - i); 1190 bp->b_bufsize = PAGE_SIZE * (j - i); 1191 bp->b_pager.pg_reqpage = reqpage - i; 1192 1193 VM_OBJECT_WLOCK(object); 1194 { 1195 int k; 1196 1197 for (k = i; k < j; ++k) { 1198 bp->b_pages[k - i] = m[k]; 1199 m[k]->oflags |= VPO_SWAPINPROG; 1200 } 1201 } 1202 bp->b_npages = j - i; 1203 1204 PCPU_INC(cnt.v_swapin); 1205 PCPU_ADD(cnt.v_swappgsin, bp->b_npages); 1206 1207 /* 1208 * We still hold the lock on mreq, and our automatic completion routine 1209 * does not remove it. 1210 */ 1211 vm_object_pip_add(object, bp->b_npages); 1212 VM_OBJECT_WUNLOCK(object); 1213 1214 /* 1215 * perform the I/O. NOTE!!! bp cannot be considered valid after 1216 * this point because we automatically release it on completion. 1217 * Instead, we look at the one page we are interested in which we 1218 * still hold a lock on even through the I/O completion. 1219 * 1220 * The other pages in our m[] array are also released on completion, 1221 * so we cannot assume they are valid anymore either. 1222 * 1223 * NOTE: b_blkno is destroyed by the call to swapdev_strategy 1224 */ 1225 BUF_KERNPROC(bp); 1226 swp_pager_strategy(bp); 1227 1228 /* 1229 * wait for the page we want to complete. VPO_SWAPINPROG is always 1230 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1231 * is set in the meta-data. 1232 */ 1233 VM_OBJECT_WLOCK(object); 1234 while ((mreq->oflags & VPO_SWAPINPROG) != 0) { 1235 mreq->oflags |= VPO_SWAPSLEEP; 1236 PCPU_INC(cnt.v_intrans); 1237 if (VM_OBJECT_SLEEP(object, &object->paging_in_progress, PSWP, 1238 "swread", hz * 20)) { 1239 printf( 1240"swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n", 1241 bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount); 1242 } 1243 } 1244 1245 /* 1246 * mreq is left busied after completion, but all the other pages 1247 * are freed. If we had an unrecoverable read error the page will 1248 * not be valid. 1249 */ 1250 if (mreq->valid != VM_PAGE_BITS_ALL) { 1251 return (VM_PAGER_ERROR); 1252 } else { 1253 return (VM_PAGER_OK); 1254 } 1255 1256 /* 1257 * A final note: in a low swap situation, we cannot deallocate swap 1258 * and mark a page dirty here because the caller is likely to mark 1259 * the page clean when we return, causing the page to possibly revert 1260 * to all-zero's later. 1261 */ 1262} 1263 1264/* 1265 * swap_pager_putpages: 1266 * 1267 * Assign swap (if necessary) and initiate I/O on the specified pages. 1268 * 1269 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1270 * are automatically converted to SWAP objects. 1271 * 1272 * In a low memory situation we may block in VOP_STRATEGY(), but the new 1273 * vm_page reservation system coupled with properly written VFS devices 1274 * should ensure that no low-memory deadlock occurs. This is an area 1275 * which needs work. 1276 * 1277 * The parent has N vm_object_pip_add() references prior to 1278 * calling us and will remove references for rtvals[] that are 1279 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1280 * completion. 1281 * 1282 * The parent has soft-busy'd the pages it passes us and will unbusy 1283 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1284 * We need to unbusy the rest on I/O completion. 1285 */ 1286void 1287swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, 1288 int flags, int *rtvals) 1289{ 1290 int i, n; 1291 boolean_t sync; 1292 1293 if (count && m[0]->object != object) { 1294 panic("swap_pager_putpages: object mismatch %p/%p", 1295 object, 1296 m[0]->object 1297 ); 1298 } 1299 1300 /* 1301 * Step 1 1302 * 1303 * Turn object into OBJT_SWAP 1304 * check for bogus sysops 1305 * force sync if not pageout process 1306 */ 1307 if (object->type != OBJT_SWAP) 1308 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 1309 VM_OBJECT_WUNLOCK(object); 1310 1311 n = 0; 1312 if (curproc != pageproc) 1313 sync = TRUE; 1314 else 1315 sync = (flags & VM_PAGER_PUT_SYNC) != 0; 1316 1317 /* 1318 * Step 2 1319 * 1320 * Update nsw parameters from swap_async_max sysctl values. 1321 * Do not let the sysop crash the machine with bogus numbers. 1322 */ 1323 mtx_lock(&pbuf_mtx); 1324 if (swap_async_max != nsw_wcount_async_max) { 1325 int n; 1326 1327 /* 1328 * limit range 1329 */ 1330 if ((n = swap_async_max) > nswbuf / 2) 1331 n = nswbuf / 2; 1332 if (n < 1) 1333 n = 1; 1334 swap_async_max = n; 1335 1336 /* 1337 * Adjust difference ( if possible ). If the current async 1338 * count is too low, we may not be able to make the adjustment 1339 * at this time. 1340 */ 1341 n -= nsw_wcount_async_max; 1342 if (nsw_wcount_async + n >= 0) { 1343 nsw_wcount_async += n; 1344 nsw_wcount_async_max += n; 1345 wakeup(&nsw_wcount_async); 1346 } 1347 } 1348 mtx_unlock(&pbuf_mtx); 1349 1350 /* 1351 * Step 3 1352 * 1353 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1354 * The page is left dirty until the pageout operation completes 1355 * successfully. 1356 */ 1357 for (i = 0; i < count; i += n) { 1358 int j; 1359 struct buf *bp; 1360 daddr_t blk; 1361 1362 /* 1363 * Maximum I/O size is limited by a number of factors. 1364 */ 1365 n = min(BLIST_MAX_ALLOC, count - i); 1366 n = min(n, nsw_cluster_max); 1367 1368 /* 1369 * Get biggest block of swap we can. If we fail, fall 1370 * back and try to allocate a smaller block. Don't go 1371 * overboard trying to allocate space if it would overly 1372 * fragment swap. 1373 */ 1374 while ( 1375 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 1376 n > 4 1377 ) { 1378 n >>= 1; 1379 } 1380 if (blk == SWAPBLK_NONE) { 1381 for (j = 0; j < n; ++j) 1382 rtvals[i+j] = VM_PAGER_FAIL; 1383 continue; 1384 } 1385 1386 /* 1387 * All I/O parameters have been satisfied, build the I/O 1388 * request and assign the swap space. 1389 */ 1390 if (sync == TRUE) { 1391 bp = getpbuf(&nsw_wcount_sync); 1392 } else { 1393 bp = getpbuf(&nsw_wcount_async); 1394 bp->b_flags = B_ASYNC; 1395 } 1396 bp->b_flags |= B_PAGING; 1397 bp->b_iocmd = BIO_WRITE; 1398 1399 bp->b_rcred = crhold(thread0.td_ucred); 1400 bp->b_wcred = crhold(thread0.td_ucred); 1401 bp->b_bcount = PAGE_SIZE * n; 1402 bp->b_bufsize = PAGE_SIZE * n; 1403 bp->b_blkno = blk; 1404 1405 VM_OBJECT_WLOCK(object); 1406 for (j = 0; j < n; ++j) { 1407 vm_page_t mreq = m[i+j]; 1408 1409 swp_pager_meta_build( 1410 mreq->object, 1411 mreq->pindex, 1412 blk + j 1413 ); 1414 vm_page_dirty(mreq); 1415 rtvals[i+j] = VM_PAGER_OK; 1416 1417 mreq->oflags |= VPO_SWAPINPROG; 1418 bp->b_pages[j] = mreq; 1419 } 1420 VM_OBJECT_WUNLOCK(object); 1421 bp->b_npages = n; 1422 /* 1423 * Must set dirty range for NFS to work. 1424 */ 1425 bp->b_dirtyoff = 0; 1426 bp->b_dirtyend = bp->b_bcount; 1427 1428 PCPU_INC(cnt.v_swapout); 1429 PCPU_ADD(cnt.v_swappgsout, bp->b_npages); 1430 1431 /* 1432 * asynchronous 1433 * 1434 * NOTE: b_blkno is destroyed by the call to swapdev_strategy 1435 */ 1436 if (sync == FALSE) { 1437 bp->b_iodone = swp_pager_async_iodone; 1438 BUF_KERNPROC(bp); 1439 swp_pager_strategy(bp); 1440 1441 for (j = 0; j < n; ++j) 1442 rtvals[i+j] = VM_PAGER_PEND; 1443 /* restart outter loop */ 1444 continue; 1445 } 1446 1447 /* 1448 * synchronous 1449 * 1450 * NOTE: b_blkno is destroyed by the call to swapdev_strategy 1451 */ 1452 bp->b_iodone = bdone; 1453 swp_pager_strategy(bp); 1454 1455 /* 1456 * Wait for the sync I/O to complete, then update rtvals. 1457 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1458 * our async completion routine at the end, thus avoiding a 1459 * double-free. 1460 */ 1461 bwait(bp, PVM, "swwrt"); 1462 for (j = 0; j < n; ++j) 1463 rtvals[i+j] = VM_PAGER_PEND; 1464 /* 1465 * Now that we are through with the bp, we can call the 1466 * normal async completion, which frees everything up. 1467 */ 1468 swp_pager_async_iodone(bp); 1469 } 1470 VM_OBJECT_WLOCK(object); 1471} 1472 1473/* 1474 * swp_pager_async_iodone: 1475 * 1476 * Completion routine for asynchronous reads and writes from/to swap. 1477 * Also called manually by synchronous code to finish up a bp. 1478 * 1479 * This routine may not sleep. 1480 */ 1481static void 1482swp_pager_async_iodone(struct buf *bp) 1483{ 1484 int i; 1485 vm_object_t object = NULL; 1486 1487 /* 1488 * report error 1489 */ 1490 if (bp->b_ioflags & BIO_ERROR) { 1491 printf( 1492 "swap_pager: I/O error - %s failed; blkno %ld," 1493 "size %ld, error %d\n", 1494 ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"), 1495 (long)bp->b_blkno, 1496 (long)bp->b_bcount, 1497 bp->b_error 1498 ); 1499 } 1500 1501 /* 1502 * remove the mapping for kernel virtual 1503 */ 1504 if ((bp->b_flags & B_UNMAPPED) != 0) { 1505 bp->b_data = bp->b_kvaalloc; 1506 bp->b_kvabase = bp->b_kvaalloc; 1507 bp->b_flags &= ~B_UNMAPPED; 1508 } else 1509 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 1510 1511 if (bp->b_npages) { 1512 object = bp->b_pages[0]->object; 1513 VM_OBJECT_WLOCK(object); 1514 } 1515 1516 /* 1517 * cleanup pages. If an error occurs writing to swap, we are in 1518 * very serious trouble. If it happens to be a disk error, though, 1519 * we may be able to recover by reassigning the swap later on. So 1520 * in this case we remove the m->swapblk assignment for the page 1521 * but do not free it in the rlist. The errornous block(s) are thus 1522 * never reallocated as swap. Redirty the page and continue. 1523 */ 1524 for (i = 0; i < bp->b_npages; ++i) { 1525 vm_page_t m = bp->b_pages[i]; 1526 1527 m->oflags &= ~VPO_SWAPINPROG; 1528 if (m->oflags & VPO_SWAPSLEEP) { 1529 m->oflags &= ~VPO_SWAPSLEEP; 1530 wakeup(&object->paging_in_progress); 1531 } 1532 1533 if (bp->b_ioflags & BIO_ERROR) { 1534 /* 1535 * If an error occurs I'd love to throw the swapblk 1536 * away without freeing it back to swapspace, so it 1537 * can never be used again. But I can't from an 1538 * interrupt. 1539 */ 1540 if (bp->b_iocmd == BIO_READ) { 1541 /* 1542 * When reading, reqpage needs to stay 1543 * locked for the parent, but all other 1544 * pages can be freed. We still want to 1545 * wakeup the parent waiting on the page, 1546 * though. ( also: pg_reqpage can be -1 and 1547 * not match anything ). 1548 * 1549 * We have to wake specifically requested pages 1550 * up too because we cleared VPO_SWAPINPROG and 1551 * someone may be waiting for that. 1552 * 1553 * NOTE: for reads, m->dirty will probably 1554 * be overridden by the original caller of 1555 * getpages so don't play cute tricks here. 1556 */ 1557 m->valid = 0; 1558 if (i != bp->b_pager.pg_reqpage) 1559 swp_pager_free_nrpage(m); 1560 else { 1561 vm_page_lock(m); 1562 vm_page_flash(m); 1563 vm_page_unlock(m); 1564 } 1565 /* 1566 * If i == bp->b_pager.pg_reqpage, do not wake 1567 * the page up. The caller needs to. 1568 */ 1569 } else { 1570 /* 1571 * If a write error occurs, reactivate page 1572 * so it doesn't clog the inactive list, 1573 * then finish the I/O. 1574 */ 1575 vm_page_dirty(m); 1576 vm_page_lock(m); 1577 vm_page_activate(m); 1578 vm_page_unlock(m); 1579 vm_page_sunbusy(m); 1580 } 1581 } else if (bp->b_iocmd == BIO_READ) { 1582 /* 1583 * NOTE: for reads, m->dirty will probably be 1584 * overridden by the original caller of getpages so 1585 * we cannot set them in order to free the underlying 1586 * swap in a low-swap situation. I don't think we'd 1587 * want to do that anyway, but it was an optimization 1588 * that existed in the old swapper for a time before 1589 * it got ripped out due to precisely this problem. 1590 * 1591 * If not the requested page then deactivate it. 1592 * 1593 * Note that the requested page, reqpage, is left 1594 * busied, but we still have to wake it up. The 1595 * other pages are released (unbusied) by 1596 * vm_page_xunbusy(). 1597 */ 1598 KASSERT(!pmap_page_is_mapped(m), 1599 ("swp_pager_async_iodone: page %p is mapped", m)); 1600 m->valid = VM_PAGE_BITS_ALL; 1601 KASSERT(m->dirty == 0, 1602 ("swp_pager_async_iodone: page %p is dirty", m)); 1603 1604 /* 1605 * We have to wake specifically requested pages 1606 * up too because we cleared VPO_SWAPINPROG and 1607 * could be waiting for it in getpages. However, 1608 * be sure to not unbusy getpages specifically 1609 * requested page - getpages expects it to be 1610 * left busy. 1611 */ 1612 if (i != bp->b_pager.pg_reqpage) { 1613 vm_page_lock(m); 1614 vm_page_deactivate(m); 1615 vm_page_unlock(m); 1616 vm_page_xunbusy(m); 1617 } else { 1618 vm_page_lock(m); 1619 vm_page_flash(m); 1620 vm_page_unlock(m); 1621 } 1622 } else { 1623 /* 1624 * For write success, clear the dirty 1625 * status, then finish the I/O ( which decrements the 1626 * busy count and possibly wakes waiter's up ). 1627 */ 1628 KASSERT(!pmap_page_is_write_mapped(m), 1629 ("swp_pager_async_iodone: page %p is not write" 1630 " protected", m)); 1631 vm_page_undirty(m); 1632 vm_page_sunbusy(m); 1633 if (vm_page_count_severe()) { 1634 vm_page_lock(m); 1635 vm_page_try_to_cache(m); 1636 vm_page_unlock(m); 1637 } 1638 } 1639 } 1640 1641 /* 1642 * adjust pip. NOTE: the original parent may still have its own 1643 * pip refs on the object. 1644 */ 1645 if (object != NULL) { 1646 vm_object_pip_wakeupn(object, bp->b_npages); 1647 VM_OBJECT_WUNLOCK(object); 1648 } 1649 1650 /* 1651 * swapdev_strategy() manually sets b_vp and b_bufobj before calling 1652 * bstrategy(). Set them back to NULL now we're done with it, or we'll 1653 * trigger a KASSERT in relpbuf(). 1654 */ 1655 if (bp->b_vp) { 1656 bp->b_vp = NULL; 1657 bp->b_bufobj = NULL; 1658 } 1659 /* 1660 * release the physical I/O buffer 1661 */ 1662 relpbuf( 1663 bp, 1664 ((bp->b_iocmd == BIO_READ) ? &nsw_rcount : 1665 ((bp->b_flags & B_ASYNC) ? 1666 &nsw_wcount_async : 1667 &nsw_wcount_sync 1668 ) 1669 ) 1670 ); 1671} 1672 1673/* 1674 * swap_pager_isswapped: 1675 * 1676 * Return 1 if at least one page in the given object is paged 1677 * out to the given swap device. 1678 * 1679 * This routine may not sleep. 1680 */ 1681int 1682swap_pager_isswapped(vm_object_t object, struct swdevt *sp) 1683{ 1684 daddr_t index = 0; 1685 int bcount; 1686 int i; 1687 1688 VM_OBJECT_ASSERT_WLOCKED(object); 1689 if (object->type != OBJT_SWAP) 1690 return (0); 1691 1692 mtx_lock(&swhash_mtx); 1693 for (bcount = 0; bcount < object->un_pager.swp.swp_bcount; bcount++) { 1694 struct swblock *swap; 1695 1696 if ((swap = *swp_pager_hash(object, index)) != NULL) { 1697 for (i = 0; i < SWAP_META_PAGES; ++i) { 1698 if (swp_pager_isondev(swap->swb_pages[i], sp)) { 1699 mtx_unlock(&swhash_mtx); 1700 return (1); 1701 } 1702 } 1703 } 1704 index += SWAP_META_PAGES; 1705 } 1706 mtx_unlock(&swhash_mtx); 1707 return (0); 1708} 1709 1710/* 1711 * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in 1712 * 1713 * This routine dissociates the page at the given index within a 1714 * swap block from its backing store, paging it in if necessary. 1715 * If the page is paged in, it is placed in the inactive queue, 1716 * since it had its backing store ripped out from under it. 1717 * We also attempt to swap in all other pages in the swap block, 1718 * we only guarantee that the one at the specified index is 1719 * paged in. 1720 * 1721 * XXX - The code to page the whole block in doesn't work, so we 1722 * revert to the one-by-one behavior for now. Sigh. 1723 */ 1724static inline void 1725swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex) 1726{ 1727 vm_page_t m; 1728 1729 vm_object_pip_add(object, 1); 1730 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); 1731 if (m->valid == VM_PAGE_BITS_ALL) { 1732 vm_object_pip_subtract(object, 1); 1733 vm_page_dirty(m); 1734 vm_page_lock(m); 1735 vm_page_activate(m); 1736 vm_page_unlock(m); 1737 vm_page_xunbusy(m); 1738 vm_pager_page_unswapped(m); 1739 return; 1740 } 1741 1742 if (swap_pager_getpages(object, &m, 1, 0) != VM_PAGER_OK) 1743 panic("swap_pager_force_pagein: read from swap failed");/*XXX*/ 1744 vm_object_pip_subtract(object, 1); 1745 vm_page_dirty(m); 1746 vm_page_lock(m); 1747 vm_page_deactivate(m); 1748 vm_page_unlock(m); 1749 vm_page_xunbusy(m); 1750 vm_pager_page_unswapped(m); 1751} 1752 1753/* 1754 * swap_pager_swapoff: 1755 * 1756 * Page in all of the pages that have been paged out to the 1757 * given device. The corresponding blocks in the bitmap must be 1758 * marked as allocated and the device must be flagged SW_CLOSING. 1759 * There may be no processes swapped out to the device. 1760 * 1761 * This routine may block. 1762 */ 1763static void 1764swap_pager_swapoff(struct swdevt *sp) 1765{ 1766 struct swblock *swap; 1767 int i, j, retries; 1768 1769 GIANT_REQUIRED; 1770 1771 retries = 0; 1772full_rescan: 1773 mtx_lock(&swhash_mtx); 1774 for (i = 0; i <= swhash_mask; i++) { /* '<=' is correct here */ 1775restart: 1776 for (swap = swhash[i]; swap != NULL; swap = swap->swb_hnext) { 1777 vm_object_t object = swap->swb_object; 1778 vm_pindex_t pindex = swap->swb_index; 1779 for (j = 0; j < SWAP_META_PAGES; ++j) { 1780 if (swp_pager_isondev(swap->swb_pages[j], sp)) { 1781 /* avoid deadlock */ 1782 if (!VM_OBJECT_TRYWLOCK(object)) { 1783 break; 1784 } else { 1785 mtx_unlock(&swhash_mtx); 1786 swp_pager_force_pagein(object, 1787 pindex + j); 1788 VM_OBJECT_WUNLOCK(object); 1789 mtx_lock(&swhash_mtx); 1790 goto restart; 1791 } 1792 } 1793 } 1794 } 1795 } 1796 mtx_unlock(&swhash_mtx); 1797 if (sp->sw_used) { 1798 /* 1799 * Objects may be locked or paging to the device being 1800 * removed, so we will miss their pages and need to 1801 * make another pass. We have marked this device as 1802 * SW_CLOSING, so the activity should finish soon. 1803 */ 1804 retries++; 1805 if (retries > 100) { 1806 panic("swapoff: failed to locate %d swap blocks", 1807 sp->sw_used); 1808 } 1809 pause("swpoff", hz / 20); 1810 goto full_rescan; 1811 } 1812} 1813 1814/************************************************************************ 1815 * SWAP META DATA * 1816 ************************************************************************ 1817 * 1818 * These routines manipulate the swap metadata stored in the 1819 * OBJT_SWAP object. 1820 * 1821 * Swap metadata is implemented with a global hash and not directly 1822 * linked into the object. Instead the object simply contains 1823 * appropriate tracking counters. 1824 */ 1825 1826/* 1827 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 1828 * 1829 * We first convert the object to a swap object if it is a default 1830 * object. 1831 * 1832 * The specified swapblk is added to the object's swap metadata. If 1833 * the swapblk is not valid, it is freed instead. Any previously 1834 * assigned swapblk is freed. 1835 */ 1836static void 1837swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk) 1838{ 1839 static volatile int exhausted; 1840 struct swblock *swap; 1841 struct swblock **pswap; 1842 int idx; 1843 1844 VM_OBJECT_ASSERT_WLOCKED(object); 1845 /* 1846 * Convert default object to swap object if necessary 1847 */ 1848 if (object->type != OBJT_SWAP) { 1849 object->type = OBJT_SWAP; 1850 object->un_pager.swp.swp_bcount = 0; 1851 1852 if (object->handle != NULL) { 1853 mtx_lock(&sw_alloc_mtx); 1854 TAILQ_INSERT_TAIL( 1855 NOBJLIST(object->handle), 1856 object, 1857 pager_object_list 1858 ); 1859 mtx_unlock(&sw_alloc_mtx); 1860 } 1861 } 1862 1863 /* 1864 * Locate hash entry. If not found create, but if we aren't adding 1865 * anything just return. If we run out of space in the map we wait 1866 * and, since the hash table may have changed, retry. 1867 */ 1868retry: 1869 mtx_lock(&swhash_mtx); 1870 pswap = swp_pager_hash(object, pindex); 1871 1872 if ((swap = *pswap) == NULL) { 1873 int i; 1874 1875 if (swapblk == SWAPBLK_NONE) 1876 goto done; 1877 1878 swap = *pswap = uma_zalloc(swap_zone, M_NOWAIT | 1879 (curproc == pageproc ? M_USE_RESERVE : 0)); 1880 if (swap == NULL) { 1881 mtx_unlock(&swhash_mtx); 1882 VM_OBJECT_WUNLOCK(object); 1883 if (uma_zone_exhausted(swap_zone)) { 1884 if (atomic_cmpset_int(&exhausted, 0, 1)) 1885 printf("swap zone exhausted, " 1886 "increase kern.maxswzone\n"); 1887 vm_pageout_oom(VM_OOM_SWAPZ); 1888 pause("swzonex", 10); 1889 } else 1890 VM_WAIT; 1891 VM_OBJECT_WLOCK(object); 1892 goto retry; 1893 } 1894 1895 if (atomic_cmpset_int(&exhausted, 1, 0)) 1896 printf("swap zone ok\n"); 1897 1898 swap->swb_hnext = NULL; 1899 swap->swb_object = object; 1900 swap->swb_index = pindex & ~(vm_pindex_t)SWAP_META_MASK; 1901 swap->swb_count = 0; 1902 1903 ++object->un_pager.swp.swp_bcount; 1904 1905 for (i = 0; i < SWAP_META_PAGES; ++i) 1906 swap->swb_pages[i] = SWAPBLK_NONE; 1907 } 1908 1909 /* 1910 * Delete prior contents of metadata 1911 */ 1912 idx = pindex & SWAP_META_MASK; 1913 1914 if (swap->swb_pages[idx] != SWAPBLK_NONE) { 1915 swp_pager_freeswapspace(swap->swb_pages[idx], 1); 1916 --swap->swb_count; 1917 } 1918 1919 /* 1920 * Enter block into metadata 1921 */ 1922 swap->swb_pages[idx] = swapblk; 1923 if (swapblk != SWAPBLK_NONE) 1924 ++swap->swb_count; 1925done: 1926 mtx_unlock(&swhash_mtx); 1927} 1928 1929/* 1930 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 1931 * 1932 * The requested range of blocks is freed, with any associated swap 1933 * returned to the swap bitmap. 1934 * 1935 * This routine will free swap metadata structures as they are cleaned 1936 * out. This routine does *NOT* operate on swap metadata associated 1937 * with resident pages. 1938 */ 1939static void 1940swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) 1941{ 1942 1943 VM_OBJECT_ASSERT_LOCKED(object); 1944 if (object->type != OBJT_SWAP) 1945 return; 1946 1947 while (count > 0) { 1948 struct swblock **pswap; 1949 struct swblock *swap; 1950 1951 mtx_lock(&swhash_mtx); 1952 pswap = swp_pager_hash(object, index); 1953 1954 if ((swap = *pswap) != NULL) { 1955 daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 1956 1957 if (v != SWAPBLK_NONE) { 1958 swp_pager_freeswapspace(v, 1); 1959 swap->swb_pages[index & SWAP_META_MASK] = 1960 SWAPBLK_NONE; 1961 if (--swap->swb_count == 0) { 1962 *pswap = swap->swb_hnext; 1963 uma_zfree(swap_zone, swap); 1964 --object->un_pager.swp.swp_bcount; 1965 } 1966 } 1967 --count; 1968 ++index; 1969 } else { 1970 int n = SWAP_META_PAGES - (index & SWAP_META_MASK); 1971 count -= n; 1972 index += n; 1973 } 1974 mtx_unlock(&swhash_mtx); 1975 } 1976} 1977 1978/* 1979 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 1980 * 1981 * This routine locates and destroys all swap metadata associated with 1982 * an object. 1983 */ 1984static void 1985swp_pager_meta_free_all(vm_object_t object) 1986{ 1987 daddr_t index = 0; 1988 1989 VM_OBJECT_ASSERT_WLOCKED(object); 1990 if (object->type != OBJT_SWAP) 1991 return; 1992 1993 while (object->un_pager.swp.swp_bcount) { 1994 struct swblock **pswap; 1995 struct swblock *swap; 1996 1997 mtx_lock(&swhash_mtx); 1998 pswap = swp_pager_hash(object, index); 1999 if ((swap = *pswap) != NULL) { 2000 int i; 2001 2002 for (i = 0; i < SWAP_META_PAGES; ++i) { 2003 daddr_t v = swap->swb_pages[i]; 2004 if (v != SWAPBLK_NONE) { 2005 --swap->swb_count; 2006 swp_pager_freeswapspace(v, 1); 2007 } 2008 } 2009 if (swap->swb_count != 0) 2010 panic("swap_pager_meta_free_all: swb_count != 0"); 2011 *pswap = swap->swb_hnext; 2012 uma_zfree(swap_zone, swap); 2013 --object->un_pager.swp.swp_bcount; 2014 } 2015 mtx_unlock(&swhash_mtx); 2016 index += SWAP_META_PAGES; 2017 } 2018} 2019 2020/* 2021 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 2022 * 2023 * This routine is capable of looking up, popping, or freeing 2024 * swapblk assignments in the swap meta data or in the vm_page_t. 2025 * The routine typically returns the swapblk being looked-up, or popped, 2026 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 2027 * was invalid. This routine will automatically free any invalid 2028 * meta-data swapblks. 2029 * 2030 * It is not possible to store invalid swapblks in the swap meta data 2031 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 2032 * 2033 * When acting on a busy resident page and paging is in progress, we 2034 * have to wait until paging is complete but otherwise can act on the 2035 * busy page. 2036 * 2037 * SWM_FREE remove and free swap block from metadata 2038 * SWM_POP remove from meta data but do not free.. pop it out 2039 */ 2040static daddr_t 2041swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags) 2042{ 2043 struct swblock **pswap; 2044 struct swblock *swap; 2045 daddr_t r1; 2046 int idx; 2047 2048 VM_OBJECT_ASSERT_LOCKED(object); 2049 /* 2050 * The meta data only exists of the object is OBJT_SWAP 2051 * and even then might not be allocated yet. 2052 */ 2053 if (object->type != OBJT_SWAP) 2054 return (SWAPBLK_NONE); 2055 2056 r1 = SWAPBLK_NONE; 2057 mtx_lock(&swhash_mtx); 2058 pswap = swp_pager_hash(object, pindex); 2059 2060 if ((swap = *pswap) != NULL) { 2061 idx = pindex & SWAP_META_MASK; 2062 r1 = swap->swb_pages[idx]; 2063 2064 if (r1 != SWAPBLK_NONE) { 2065 if (flags & SWM_FREE) { 2066 swp_pager_freeswapspace(r1, 1); 2067 r1 = SWAPBLK_NONE; 2068 } 2069 if (flags & (SWM_FREE|SWM_POP)) { 2070 swap->swb_pages[idx] = SWAPBLK_NONE; 2071 if (--swap->swb_count == 0) { 2072 *pswap = swap->swb_hnext; 2073 uma_zfree(swap_zone, swap); 2074 --object->un_pager.swp.swp_bcount; 2075 } 2076 } 2077 } 2078 } 2079 mtx_unlock(&swhash_mtx); 2080 return (r1); 2081} 2082 2083/* 2084 * System call swapon(name) enables swapping on device name, 2085 * which must be in the swdevsw. Return EBUSY 2086 * if already swapping on this device. 2087 */ 2088#ifndef _SYS_SYSPROTO_H_ 2089struct swapon_args { 2090 char *name; 2091}; 2092#endif 2093 2094/* 2095 * MPSAFE 2096 */ 2097/* ARGSUSED */ 2098int 2099sys_swapon(struct thread *td, struct swapon_args *uap) 2100{ 2101 struct vattr attr; 2102 struct vnode *vp; 2103 struct nameidata nd; 2104 int error; 2105 2106 error = priv_check(td, PRIV_SWAPON); 2107 if (error) 2108 return (error); 2109 2110 mtx_lock(&Giant); 2111 while (swdev_syscall_active) 2112 tsleep(&swdev_syscall_active, PUSER - 1, "swpon", 0); 2113 swdev_syscall_active = 1; 2114 2115 /* 2116 * Swap metadata may not fit in the KVM if we have physical 2117 * memory of >1GB. 2118 */ 2119 if (swap_zone == NULL) { 2120 error = ENOMEM; 2121 goto done; 2122 } 2123 2124 NDINIT(&nd, LOOKUP, ISOPEN | FOLLOW | AUDITVNODE1, UIO_USERSPACE, 2125 uap->name, td); 2126 error = namei(&nd); 2127 if (error) 2128 goto done; 2129 2130 NDFREE(&nd, NDF_ONLY_PNBUF); 2131 vp = nd.ni_vp; 2132 2133 if (vn_isdisk(vp, &error)) { 2134 error = swapongeom(td, vp); 2135 } else if (vp->v_type == VREG && 2136 (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 && 2137 (error = VOP_GETATTR(vp, &attr, td->td_ucred)) == 0) { 2138 /* 2139 * Allow direct swapping to NFS regular files in the same 2140 * way that nfs_mountroot() sets up diskless swapping. 2141 */ 2142 error = swaponvp(td, vp, attr.va_size / DEV_BSIZE); 2143 } 2144 2145 if (error) 2146 vrele(vp); 2147done: 2148 swdev_syscall_active = 0; 2149 wakeup_one(&swdev_syscall_active); 2150 mtx_unlock(&Giant); 2151 return (error); 2152} 2153 2154/* 2155 * Check that the total amount of swap currently configured does not 2156 * exceed half the theoretical maximum. If it does, print a warning 2157 * message and return -1; otherwise, return 0. 2158 */ 2159static int 2160swapon_check_swzone(unsigned long npages) 2161{ 2162 unsigned long maxpages; 2163 2164 /* absolute maximum we can handle assuming 100% efficiency */ 2165 maxpages = uma_zone_get_max(swap_zone) * SWAP_META_PAGES; 2166 2167 /* recommend using no more than half that amount */ 2168 if (npages > maxpages / 2) { 2169 printf("warning: total configured swap (%lu pages) " 2170 "exceeds maximum recommended amount (%lu pages).\n", 2171 npages, maxpages / 2); 2172 printf("warning: increase kern.maxswzone " 2173 "or reduce amount of swap.\n"); 2174 return (-1); 2175 } 2176 return (0); 2177} 2178 2179static void 2180swaponsomething(struct vnode *vp, void *id, u_long nblks, 2181 sw_strategy_t *strategy, sw_close_t *close, dev_t dev, int flags) 2182{ 2183 struct swdevt *sp, *tsp; 2184 swblk_t dvbase; 2185 u_long mblocks; 2186 2187 /* 2188 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks. 2189 * First chop nblks off to page-align it, then convert. 2190 * 2191 * sw->sw_nblks is in page-sized chunks now too. 2192 */ 2193 nblks &= ~(ctodb(1) - 1); 2194 nblks = dbtoc(nblks); 2195 2196 /* 2197 * If we go beyond this, we get overflows in the radix 2198 * tree bitmap code. 2199 */ 2200 mblocks = 0x40000000 / BLIST_META_RADIX; 2201 if (nblks > mblocks) { 2202 printf( 2203 "WARNING: reducing swap size to maximum of %luMB per unit\n", 2204 mblocks / 1024 / 1024 * PAGE_SIZE); 2205 nblks = mblocks; 2206 } 2207 2208 sp = malloc(sizeof *sp, M_VMPGDATA, M_WAITOK | M_ZERO); 2209 sp->sw_vp = vp; 2210 sp->sw_id = id; 2211 sp->sw_dev = dev; 2212 sp->sw_flags = 0; 2213 sp->sw_nblks = nblks; 2214 sp->sw_used = 0; 2215 sp->sw_strategy = strategy; 2216 sp->sw_close = close; 2217 sp->sw_flags = flags; 2218 2219 sp->sw_blist = blist_create(nblks, M_WAITOK); 2220 /* 2221 * Do not free the first two block in order to avoid overwriting 2222 * any bsd label at the front of the partition 2223 */ 2224 blist_free(sp->sw_blist, 2, nblks - 2); 2225 2226 dvbase = 0; 2227 mtx_lock(&sw_dev_mtx); 2228 TAILQ_FOREACH(tsp, &swtailq, sw_list) { 2229 if (tsp->sw_end >= dvbase) { 2230 /* 2231 * We put one uncovered page between the devices 2232 * in order to definitively prevent any cross-device 2233 * I/O requests 2234 */ 2235 dvbase = tsp->sw_end + 1; 2236 } 2237 } 2238 sp->sw_first = dvbase; 2239 sp->sw_end = dvbase + nblks; 2240 TAILQ_INSERT_TAIL(&swtailq, sp, sw_list); 2241 nswapdev++; 2242 swap_pager_avail += nblks; 2243 swap_total += (vm_ooffset_t)nblks * PAGE_SIZE; 2244 swapon_check_swzone(swap_total / PAGE_SIZE); 2245 swp_sizecheck(); 2246 mtx_unlock(&sw_dev_mtx); 2247} 2248 2249/* 2250 * SYSCALL: swapoff(devname) 2251 * 2252 * Disable swapping on the given device. 2253 * 2254 * XXX: Badly designed system call: it should use a device index 2255 * rather than filename as specification. We keep sw_vp around 2256 * only to make this work. 2257 */ 2258#ifndef _SYS_SYSPROTO_H_ 2259struct swapoff_args { 2260 char *name; 2261}; 2262#endif 2263 2264/* 2265 * MPSAFE 2266 */ 2267/* ARGSUSED */ 2268int 2269sys_swapoff(struct thread *td, struct swapoff_args *uap) 2270{ 2271 struct vnode *vp; 2272 struct nameidata nd; 2273 struct swdevt *sp; 2274 int error; 2275 2276 error = priv_check(td, PRIV_SWAPOFF); 2277 if (error) 2278 return (error); 2279 2280 mtx_lock(&Giant); 2281 while (swdev_syscall_active) 2282 tsleep(&swdev_syscall_active, PUSER - 1, "swpoff", 0); 2283 swdev_syscall_active = 1; 2284 2285 NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNODE1, UIO_USERSPACE, uap->name, 2286 td); 2287 error = namei(&nd); 2288 if (error) 2289 goto done; 2290 NDFREE(&nd, NDF_ONLY_PNBUF); 2291 vp = nd.ni_vp; 2292 2293 mtx_lock(&sw_dev_mtx); 2294 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2295 if (sp->sw_vp == vp) 2296 break; 2297 } 2298 mtx_unlock(&sw_dev_mtx); 2299 if (sp == NULL) { 2300 error = EINVAL; 2301 goto done; 2302 } 2303 error = swapoff_one(sp, td->td_ucred); 2304done: 2305 swdev_syscall_active = 0; 2306 wakeup_one(&swdev_syscall_active); 2307 mtx_unlock(&Giant); 2308 return (error); 2309} 2310 2311static int 2312swapoff_one(struct swdevt *sp, struct ucred *cred) 2313{ 2314 u_long nblks, dvbase; 2315#ifdef MAC 2316 int error; 2317#endif 2318 2319 mtx_assert(&Giant, MA_OWNED); 2320#ifdef MAC 2321 (void) vn_lock(sp->sw_vp, LK_EXCLUSIVE | LK_RETRY); 2322 error = mac_system_check_swapoff(cred, sp->sw_vp); 2323 (void) VOP_UNLOCK(sp->sw_vp, 0); 2324 if (error != 0) 2325 return (error); 2326#endif 2327 nblks = sp->sw_nblks; 2328 2329 /* 2330 * We can turn off this swap device safely only if the 2331 * available virtual memory in the system will fit the amount 2332 * of data we will have to page back in, plus an epsilon so 2333 * the system doesn't become critically low on swap space. 2334 */ 2335 if (cnt.v_free_count + cnt.v_cache_count + swap_pager_avail < 2336 nblks + nswap_lowat) { 2337 return (ENOMEM); 2338 } 2339 2340 /* 2341 * Prevent further allocations on this device. 2342 */ 2343 mtx_lock(&sw_dev_mtx); 2344 sp->sw_flags |= SW_CLOSING; 2345 for (dvbase = 0; dvbase < sp->sw_end; dvbase += dmmax) { 2346 swap_pager_avail -= blist_fill(sp->sw_blist, 2347 dvbase, dmmax); 2348 } 2349 swap_total -= (vm_ooffset_t)nblks * PAGE_SIZE; 2350 mtx_unlock(&sw_dev_mtx); 2351 2352 /* 2353 * Page in the contents of the device and close it. 2354 */ 2355 swap_pager_swapoff(sp); 2356 2357 sp->sw_close(curthread, sp); 2358 mtx_lock(&sw_dev_mtx); 2359 sp->sw_id = NULL; 2360 TAILQ_REMOVE(&swtailq, sp, sw_list); 2361 nswapdev--; 2362 if (nswapdev == 0) { 2363 swap_pager_full = 2; 2364 swap_pager_almost_full = 1; 2365 } 2366 if (swdevhd == sp) 2367 swdevhd = NULL; 2368 mtx_unlock(&sw_dev_mtx); 2369 blist_destroy(sp->sw_blist); 2370 free(sp, M_VMPGDATA); 2371 return (0); 2372} 2373 2374void 2375swapoff_all(void) 2376{ 2377 struct swdevt *sp, *spt; 2378 const char *devname; 2379 int error; 2380 2381 mtx_lock(&Giant); 2382 while (swdev_syscall_active) 2383 tsleep(&swdev_syscall_active, PUSER - 1, "swpoff", 0); 2384 swdev_syscall_active = 1; 2385 2386 mtx_lock(&sw_dev_mtx); 2387 TAILQ_FOREACH_SAFE(sp, &swtailq, sw_list, spt) { 2388 mtx_unlock(&sw_dev_mtx); 2389 if (vn_isdisk(sp->sw_vp, NULL)) 2390 devname = devtoname(sp->sw_vp->v_rdev); 2391 else 2392 devname = "[file]"; 2393 error = swapoff_one(sp, thread0.td_ucred); 2394 if (error != 0) { 2395 printf("Cannot remove swap device %s (error=%d), " 2396 "skipping.\n", devname, error); 2397 } else if (bootverbose) { 2398 printf("Swap device %s removed.\n", devname); 2399 } 2400 mtx_lock(&sw_dev_mtx); 2401 } 2402 mtx_unlock(&sw_dev_mtx); 2403 2404 swdev_syscall_active = 0; 2405 wakeup_one(&swdev_syscall_active); 2406 mtx_unlock(&Giant); 2407} 2408 2409void 2410swap_pager_status(int *total, int *used) 2411{ 2412 struct swdevt *sp; 2413 2414 *total = 0; 2415 *used = 0; 2416 mtx_lock(&sw_dev_mtx); 2417 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2418 *total += sp->sw_nblks; 2419 *used += sp->sw_used; 2420 } 2421 mtx_unlock(&sw_dev_mtx); 2422} 2423 2424int 2425swap_dev_info(int name, struct xswdev *xs, char *devname, size_t len) 2426{ 2427 struct swdevt *sp; 2428 const char *tmp_devname; 2429 int error, n; 2430 2431 n = 0; 2432 error = ENOENT; 2433 mtx_lock(&sw_dev_mtx); 2434 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2435 if (n != name) { 2436 n++; 2437 continue; 2438 } 2439 xs->xsw_version = XSWDEV_VERSION; 2440 xs->xsw_dev = sp->sw_dev; 2441 xs->xsw_flags = sp->sw_flags; 2442 xs->xsw_nblks = sp->sw_nblks; 2443 xs->xsw_used = sp->sw_used; 2444 if (devname != NULL) { 2445 if (vn_isdisk(sp->sw_vp, NULL)) 2446 tmp_devname = devtoname(sp->sw_vp->v_rdev); 2447 else 2448 tmp_devname = "[file]"; 2449 strncpy(devname, tmp_devname, len); 2450 } 2451 error = 0; 2452 break; 2453 } 2454 mtx_unlock(&sw_dev_mtx); 2455 return (error); 2456} 2457 2458static int 2459sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS) 2460{ 2461 struct xswdev xs; 2462 int error; 2463 2464 if (arg2 != 1) /* name length */ 2465 return (EINVAL); 2466 error = swap_dev_info(*(int *)arg1, &xs, NULL, 0); 2467 if (error != 0) 2468 return (error); 2469 error = SYSCTL_OUT(req, &xs, sizeof(xs)); 2470 return (error); 2471} 2472 2473SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0, 2474 "Number of swap devices"); 2475SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD, sysctl_vm_swap_info, 2476 "Swap statistics by device"); 2477 2478/* 2479 * vmspace_swap_count() - count the approximate swap usage in pages for a 2480 * vmspace. 2481 * 2482 * The map must be locked. 2483 * 2484 * Swap usage is determined by taking the proportional swap used by 2485 * VM objects backing the VM map. To make up for fractional losses, 2486 * if the VM object has any swap use at all the associated map entries 2487 * count for at least 1 swap page. 2488 */ 2489long 2490vmspace_swap_count(struct vmspace *vmspace) 2491{ 2492 vm_map_t map; 2493 vm_map_entry_t cur; 2494 vm_object_t object; 2495 long count, n; 2496 2497 map = &vmspace->vm_map; 2498 count = 0; 2499 2500 for (cur = map->header.next; cur != &map->header; cur = cur->next) { 2501 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 2502 (object = cur->object.vm_object) != NULL) { 2503 VM_OBJECT_WLOCK(object); 2504 if (object->type == OBJT_SWAP && 2505 object->un_pager.swp.swp_bcount != 0) { 2506 n = (cur->end - cur->start) / PAGE_SIZE; 2507 count += object->un_pager.swp.swp_bcount * 2508 SWAP_META_PAGES * n / object->size + 1; 2509 } 2510 VM_OBJECT_WUNLOCK(object); 2511 } 2512 } 2513 return (count); 2514} 2515 2516/* 2517 * GEOM backend 2518 * 2519 * Swapping onto disk devices. 2520 * 2521 */ 2522 2523static g_orphan_t swapgeom_orphan; 2524 2525static struct g_class g_swap_class = { 2526 .name = "SWAP", 2527 .version = G_VERSION, 2528 .orphan = swapgeom_orphan, 2529}; 2530 2531DECLARE_GEOM_CLASS(g_swap_class, g_class); 2532 2533 2534static void 2535swapgeom_close_ev(void *arg, int flags) 2536{ 2537 struct g_consumer *cp; 2538 2539 cp = arg; 2540 g_access(cp, -1, -1, 0); 2541 g_detach(cp); 2542 g_destroy_consumer(cp); 2543} 2544 2545/* 2546 * Add a reference to the g_consumer for an inflight transaction. 2547 */ 2548static void 2549swapgeom_acquire(struct g_consumer *cp) 2550{ 2551 2552 mtx_assert(&sw_dev_mtx, MA_OWNED); 2553 cp->index++; 2554} 2555 2556/* 2557 * Remove a reference from the g_consumer. Post a close event if 2558 * all referneces go away. 2559 */ 2560static void 2561swapgeom_release(struct g_consumer *cp, struct swdevt *sp) 2562{ 2563 2564 mtx_assert(&sw_dev_mtx, MA_OWNED); 2565 cp->index--; 2566 if (cp->index == 0) { 2567 if (g_post_event(swapgeom_close_ev, cp, M_NOWAIT, NULL) == 0) 2568 sp->sw_id = NULL; 2569 } 2570} 2571 2572static void 2573swapgeom_done(struct bio *bp2) 2574{ 2575 struct swdevt *sp; 2576 struct buf *bp; 2577 struct g_consumer *cp; 2578 2579 bp = bp2->bio_caller2; 2580 cp = bp2->bio_from; 2581 bp->b_ioflags = bp2->bio_flags; 2582 if (bp2->bio_error) 2583 bp->b_ioflags |= BIO_ERROR; 2584 bp->b_resid = bp->b_bcount - bp2->bio_completed; 2585 bp->b_error = bp2->bio_error; 2586 bufdone(bp); 2587 sp = bp2->bio_caller1; 2588 mtx_lock(&sw_dev_mtx); 2589 swapgeom_release(cp, sp); 2590 mtx_unlock(&sw_dev_mtx); 2591 g_destroy_bio(bp2); 2592} 2593 2594static void 2595swapgeom_strategy(struct buf *bp, struct swdevt *sp) 2596{ 2597 struct bio *bio; 2598 struct g_consumer *cp; 2599 2600 mtx_lock(&sw_dev_mtx); 2601 cp = sp->sw_id; 2602 if (cp == NULL) { 2603 mtx_unlock(&sw_dev_mtx); 2604 bp->b_error = ENXIO; 2605 bp->b_ioflags |= BIO_ERROR; 2606 bufdone(bp); 2607 return; 2608 } 2609 swapgeom_acquire(cp); 2610 mtx_unlock(&sw_dev_mtx); 2611 if (bp->b_iocmd == BIO_WRITE) 2612 bio = g_new_bio(); 2613 else 2614 bio = g_alloc_bio(); 2615 if (bio == NULL) { 2616 mtx_lock(&sw_dev_mtx); 2617 swapgeom_release(cp, sp); 2618 mtx_unlock(&sw_dev_mtx); 2619 bp->b_error = ENOMEM; 2620 bp->b_ioflags |= BIO_ERROR; 2621 bufdone(bp); 2622 return; 2623 } 2624 2625 bio->bio_caller1 = sp; 2626 bio->bio_caller2 = bp; 2627 bio->bio_cmd = bp->b_iocmd; 2628 bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE; 2629 bio->bio_length = bp->b_bcount; 2630 bio->bio_done = swapgeom_done; 2631 if ((bp->b_flags & B_UNMAPPED) != 0) { 2632 bio->bio_ma = bp->b_pages; 2633 bio->bio_data = unmapped_buf; 2634 bio->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK; 2635 bio->bio_ma_n = bp->b_npages; 2636 bio->bio_flags |= BIO_UNMAPPED; 2637 } else { 2638 bio->bio_data = bp->b_data; 2639 bio->bio_ma = NULL; 2640 } 2641 g_io_request(bio, cp); 2642 return; 2643} 2644 2645static void 2646swapgeom_orphan(struct g_consumer *cp) 2647{ 2648 struct swdevt *sp; 2649 int destroy; 2650 2651 mtx_lock(&sw_dev_mtx); 2652 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2653 if (sp->sw_id == cp) { 2654 sp->sw_flags |= SW_CLOSING; 2655 break; 2656 } 2657 } 2658 /* 2659 * Drop reference we were created with. Do directly since we're in a 2660 * special context where we don't have to queue the call to 2661 * swapgeom_close_ev(). 2662 */ 2663 cp->index--; 2664 destroy = ((sp != NULL) && (cp->index == 0)); 2665 if (destroy) 2666 sp->sw_id = NULL; 2667 mtx_unlock(&sw_dev_mtx); 2668 if (destroy) 2669 swapgeom_close_ev(cp, 0); 2670} 2671 2672static void 2673swapgeom_close(struct thread *td, struct swdevt *sw) 2674{ 2675 struct g_consumer *cp; 2676 2677 mtx_lock(&sw_dev_mtx); 2678 cp = sw->sw_id; 2679 sw->sw_id = NULL; 2680 mtx_unlock(&sw_dev_mtx); 2681 /* XXX: direct call when Giant untangled */ 2682 if (cp != NULL) 2683 g_waitfor_event(swapgeom_close_ev, cp, M_WAITOK, NULL); 2684} 2685 2686 2687struct swh0h0 { 2688 struct cdev *dev; 2689 struct vnode *vp; 2690 int error; 2691}; 2692 2693static void 2694swapongeom_ev(void *arg, int flags) 2695{ 2696 struct swh0h0 *swh; 2697 struct g_provider *pp; 2698 struct g_consumer *cp; 2699 static struct g_geom *gp; 2700 struct swdevt *sp; 2701 u_long nblks; 2702 int error; 2703 2704 swh = arg; 2705 swh->error = 0; 2706 pp = g_dev_getprovider(swh->dev); 2707 if (pp == NULL) { 2708 swh->error = ENODEV; 2709 return; 2710 } 2711 mtx_lock(&sw_dev_mtx); 2712 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2713 cp = sp->sw_id; 2714 if (cp != NULL && cp->provider == pp) { 2715 mtx_unlock(&sw_dev_mtx); 2716 swh->error = EBUSY; 2717 return; 2718 } 2719 } 2720 mtx_unlock(&sw_dev_mtx); 2721 if (gp == NULL) 2722 gp = g_new_geomf(&g_swap_class, "swap"); 2723 cp = g_new_consumer(gp); 2724 cp->index = 1; /* Number of active I/Os, plus one for being active. */ 2725 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 2726 g_attach(cp, pp); 2727 /* 2728 * XXX: Everytime you think you can improve the margin for 2729 * footshooting, somebody depends on the ability to do so: 2730 * savecore(8) wants to write to our swapdev so we cannot 2731 * set an exclusive count :-( 2732 */ 2733 error = g_access(cp, 1, 1, 0); 2734 if (error) { 2735 g_detach(cp); 2736 g_destroy_consumer(cp); 2737 swh->error = error; 2738 return; 2739 } 2740 nblks = pp->mediasize / DEV_BSIZE; 2741 swaponsomething(swh->vp, cp, nblks, swapgeom_strategy, 2742 swapgeom_close, dev2udev(swh->dev), 2743 (pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ? SW_UNMAPPED : 0); 2744 swh->error = 0; 2745} 2746 2747static int 2748swapongeom(struct thread *td, struct vnode *vp) 2749{ 2750 int error; 2751 struct swh0h0 swh; 2752 2753 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2754 2755 swh.dev = vp->v_rdev; 2756 swh.vp = vp; 2757 swh.error = 0; 2758 /* XXX: direct call when Giant untangled */ 2759 error = g_waitfor_event(swapongeom_ev, &swh, M_WAITOK, NULL); 2760 if (!error) 2761 error = swh.error; 2762 VOP_UNLOCK(vp, 0); 2763 return (error); 2764} 2765 2766/* 2767 * VNODE backend 2768 * 2769 * This is used mainly for network filesystem (read: probably only tested 2770 * with NFS) swapfiles. 2771 * 2772 */ 2773 2774static void 2775swapdev_strategy(struct buf *bp, struct swdevt *sp) 2776{ 2777 struct vnode *vp2; 2778 2779 bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first); 2780 2781 vp2 = sp->sw_id; 2782 vhold(vp2); 2783 if (bp->b_iocmd == BIO_WRITE) { 2784 if (bp->b_bufobj) 2785 bufobj_wdrop(bp->b_bufobj); 2786 bufobj_wref(&vp2->v_bufobj); 2787 } 2788 if (bp->b_bufobj != &vp2->v_bufobj) 2789 bp->b_bufobj = &vp2->v_bufobj; 2790 bp->b_vp = vp2; 2791 bp->b_iooffset = dbtob(bp->b_blkno); 2792 bstrategy(bp); 2793 return; 2794} 2795 2796static void 2797swapdev_close(struct thread *td, struct swdevt *sp) 2798{ 2799 2800 VOP_CLOSE(sp->sw_vp, FREAD | FWRITE, td->td_ucred, td); 2801 vrele(sp->sw_vp); 2802} 2803 2804 2805static int 2806swaponvp(struct thread *td, struct vnode *vp, u_long nblks) 2807{ 2808 struct swdevt *sp; 2809 int error; 2810 2811 if (nblks == 0) 2812 return (ENXIO); 2813 mtx_lock(&sw_dev_mtx); 2814 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2815 if (sp->sw_id == vp) { 2816 mtx_unlock(&sw_dev_mtx); 2817 return (EBUSY); 2818 } 2819 } 2820 mtx_unlock(&sw_dev_mtx); 2821 2822 (void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2823#ifdef MAC 2824 error = mac_system_check_swapon(td->td_ucred, vp); 2825 if (error == 0) 2826#endif 2827 error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, NULL); 2828 (void) VOP_UNLOCK(vp, 0); 2829 if (error) 2830 return (error); 2831 2832 swaponsomething(vp, vp, nblks, swapdev_strategy, swapdev_close, 2833 NODEV, 0); 2834 return (0); 2835} 2836