vm_pageout.c revision 291935
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 2005 Yahoo! Technologies Norway AS 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * The Mach Operating System project at Carnegie-Mellon University. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the University of 25 * California, Berkeley and its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 49 * 50 * Permission to use, copy, modify and distribute this software and 51 * its documentation is hereby granted, provided that both the copyright 52 * notice and this permission notice appear in all copies of the 53 * software, derivative works or modified versions, and any portions 54 * thereof, and that both notices appear in supporting documentation. 55 * 56 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 57 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 58 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 59 * 60 * Carnegie Mellon requests users of this software to return to 61 * 62 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 63 * School of Computer Science 64 * Carnegie Mellon University 65 * Pittsburgh PA 15213-3890 66 * 67 * any improvements or extensions that they make and grant Carnegie the 68 * rights to redistribute these changes. 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include <sys/cdefs.h> 76__FBSDID("$FreeBSD: stable/10/sys/vm/vm_pageout.c 291935 2015-12-07 11:21:49Z kib $"); 77 78#include "opt_vm.h" 79#include "opt_kdtrace.h" 80#include <sys/param.h> 81#include <sys/systm.h> 82#include <sys/kernel.h> 83#include <sys/eventhandler.h> 84#include <sys/lock.h> 85#include <sys/mutex.h> 86#include <sys/proc.h> 87#include <sys/kthread.h> 88#include <sys/ktr.h> 89#include <sys/mount.h> 90#include <sys/racct.h> 91#include <sys/resourcevar.h> 92#include <sys/sched.h> 93#include <sys/sdt.h> 94#include <sys/signalvar.h> 95#include <sys/smp.h> 96#include <sys/time.h> 97#include <sys/vnode.h> 98#include <sys/vmmeter.h> 99#include <sys/rwlock.h> 100#include <sys/sx.h> 101#include <sys/sysctl.h> 102 103#include <vm/vm.h> 104#include <vm/vm_param.h> 105#include <vm/vm_object.h> 106#include <vm/vm_page.h> 107#include <vm/vm_map.h> 108#include <vm/vm_pageout.h> 109#include <vm/vm_pager.h> 110#include <vm/vm_phys.h> 111#include <vm/swap_pager.h> 112#include <vm/vm_extern.h> 113#include <vm/uma.h> 114 115/* 116 * System initialization 117 */ 118 119/* the kernel process "vm_pageout"*/ 120static void vm_pageout(void); 121static void vm_pageout_init(void); 122static int vm_pageout_clean(vm_page_t); 123static void vm_pageout_scan(struct vm_domain *vmd, int pass); 124static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 125 int starting_page_shortage); 126 127SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init, 128 NULL); 129 130struct proc *pageproc; 131 132static struct kproc_desc page_kp = { 133 "pagedaemon", 134 vm_pageout, 135 &pageproc 136}; 137SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, 138 &page_kp); 139 140SDT_PROVIDER_DEFINE(vm); 141SDT_PROBE_DEFINE(vm, , , vm__lowmem_cache); 142SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan); 143 144#if !defined(NO_SWAPPING) 145/* the kernel process "vm_daemon"*/ 146static void vm_daemon(void); 147static struct proc *vmproc; 148 149static struct kproc_desc vm_kp = { 150 "vmdaemon", 151 vm_daemon, 152 &vmproc 153}; 154SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp); 155#endif 156 157 158int vm_pages_needed; /* Event on which pageout daemon sleeps */ 159int vm_pageout_deficit; /* Estimated number of pages deficit */ 160int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 161int vm_pageout_wakeup_thresh; 162static int vm_pageout_oom_seq = 12; 163 164#if !defined(NO_SWAPPING) 165static int vm_pageout_req_swapout; /* XXX */ 166static int vm_daemon_needed; 167static struct mtx vm_daemon_mtx; 168/* Allow for use by vm_pageout before vm_daemon is initialized. */ 169MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF); 170#endif 171static int vm_max_launder = 32; 172static int vm_pageout_update_period; 173static int defer_swap_pageouts; 174static int disable_swap_pageouts; 175static int lowmem_period = 10; 176static time_t lowmem_uptime; 177 178#if defined(NO_SWAPPING) 179static int vm_swap_enabled = 0; 180static int vm_swap_idle_enabled = 0; 181#else 182static int vm_swap_enabled = 1; 183static int vm_swap_idle_enabled = 0; 184#endif 185 186SYSCTL_INT(_vm, OID_AUTO, pageout_wakeup_thresh, 187 CTLFLAG_RW, &vm_pageout_wakeup_thresh, 0, 188 "free page threshold for waking up the pageout daemon"); 189 190SYSCTL_INT(_vm, OID_AUTO, max_launder, 191 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 192 193SYSCTL_INT(_vm, OID_AUTO, pageout_update_period, 194 CTLFLAG_RW, &vm_pageout_update_period, 0, 195 "Maximum active LRU update period"); 196 197SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RW, &lowmem_period, 0, 198 "Low memory callback period"); 199 200#if defined(NO_SWAPPING) 201SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 202 CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout"); 203SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 204 CTLFLAG_RD, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 205#else 206SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 207 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 208SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 209 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 210#endif 211 212SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 213 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 214 215SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 216 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 217 218static int pageout_lock_miss; 219SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 220 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 221 222SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq, 223 CTLFLAG_RW, &vm_pageout_oom_seq, 0, 224 "back-to-back calls to oom detector to start OOM"); 225 226#define VM_PAGEOUT_PAGE_COUNT 16 227int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 228 229int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 230SYSCTL_INT(_vm, OID_AUTO, max_wired, 231 CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count"); 232 233static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *); 234static boolean_t vm_pageout_launder(struct vm_pagequeue *pq, int, vm_paddr_t, 235 vm_paddr_t); 236#if !defined(NO_SWAPPING) 237static void vm_pageout_map_deactivate_pages(vm_map_t, long); 238static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long); 239static void vm_req_vmdaemon(int req); 240#endif 241static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *); 242 243/* 244 * Initialize a dummy page for marking the caller's place in the specified 245 * paging queue. In principle, this function only needs to set the flag 246 * PG_MARKER. Nonetheless, it wirte busies and initializes the hold count 247 * to one as safety precautions. 248 */ 249static void 250vm_pageout_init_marker(vm_page_t marker, u_short queue) 251{ 252 253 bzero(marker, sizeof(*marker)); 254 marker->flags = PG_MARKER; 255 marker->busy_lock = VPB_SINGLE_EXCLUSIVER; 256 marker->queue = queue; 257 marker->hold_count = 1; 258} 259 260/* 261 * vm_pageout_fallback_object_lock: 262 * 263 * Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is 264 * known to have failed and page queue must be either PQ_ACTIVE or 265 * PQ_INACTIVE. To avoid lock order violation, unlock the page queues 266 * while locking the vm object. Use marker page to detect page queue 267 * changes and maintain notion of next page on page queue. Return 268 * TRUE if no changes were detected, FALSE otherwise. vm object is 269 * locked on return. 270 * 271 * This function depends on both the lock portion of struct vm_object 272 * and normal struct vm_page being type stable. 273 */ 274static boolean_t 275vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next) 276{ 277 struct vm_page marker; 278 struct vm_pagequeue *pq; 279 boolean_t unchanged; 280 u_short queue; 281 vm_object_t object; 282 283 queue = m->queue; 284 vm_pageout_init_marker(&marker, queue); 285 pq = vm_page_pagequeue(m); 286 object = m->object; 287 288 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q); 289 vm_pagequeue_unlock(pq); 290 vm_page_unlock(m); 291 VM_OBJECT_WLOCK(object); 292 vm_page_lock(m); 293 vm_pagequeue_lock(pq); 294 295 /* 296 * The page's object might have changed, and/or the page might 297 * have moved from its original position in the queue. If the 298 * page's object has changed, then the caller should abandon 299 * processing the page because the wrong object lock was 300 * acquired. Use the marker's plinks.q, not the page's, to 301 * determine if the page has been moved. The state of the 302 * page's plinks.q can be indeterminate; whereas, the marker's 303 * plinks.q must be valid. 304 */ 305 *next = TAILQ_NEXT(&marker, plinks.q); 306 unchanged = m->object == object && 307 m == TAILQ_PREV(&marker, pglist, plinks.q); 308 KASSERT(!unchanged || m->queue == queue, 309 ("page %p queue %d %d", m, queue, m->queue)); 310 TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q); 311 return (unchanged); 312} 313 314/* 315 * Lock the page while holding the page queue lock. Use marker page 316 * to detect page queue changes and maintain notion of next page on 317 * page queue. Return TRUE if no changes were detected, FALSE 318 * otherwise. The page is locked on return. The page queue lock might 319 * be dropped and reacquired. 320 * 321 * This function depends on normal struct vm_page being type stable. 322 */ 323static boolean_t 324vm_pageout_page_lock(vm_page_t m, vm_page_t *next) 325{ 326 struct vm_page marker; 327 struct vm_pagequeue *pq; 328 boolean_t unchanged; 329 u_short queue; 330 331 vm_page_lock_assert(m, MA_NOTOWNED); 332 if (vm_page_trylock(m)) 333 return (TRUE); 334 335 queue = m->queue; 336 vm_pageout_init_marker(&marker, queue); 337 pq = vm_page_pagequeue(m); 338 339 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q); 340 vm_pagequeue_unlock(pq); 341 vm_page_lock(m); 342 vm_pagequeue_lock(pq); 343 344 /* Page queue might have changed. */ 345 *next = TAILQ_NEXT(&marker, plinks.q); 346 unchanged = m == TAILQ_PREV(&marker, pglist, plinks.q); 347 KASSERT(!unchanged || m->queue == queue, 348 ("page %p queue %d %d", m, queue, m->queue)); 349 TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q); 350 return (unchanged); 351} 352 353/* 354 * vm_pageout_clean: 355 * 356 * Clean the page and remove it from the laundry. 357 * 358 * We set the busy bit to cause potential page faults on this page to 359 * block. Note the careful timing, however, the busy bit isn't set till 360 * late and we cannot do anything that will mess with the page. 361 */ 362static int 363vm_pageout_clean(vm_page_t m) 364{ 365 vm_object_t object; 366 vm_page_t mc[2*vm_pageout_page_count], pb, ps; 367 int pageout_count; 368 int ib, is, page_base; 369 vm_pindex_t pindex = m->pindex; 370 371 vm_page_lock_assert(m, MA_OWNED); 372 object = m->object; 373 VM_OBJECT_ASSERT_WLOCKED(object); 374 375 /* 376 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 377 * with the new swapper, but we could have serious problems paging 378 * out other object types if there is insufficient memory. 379 * 380 * Unfortunately, checking free memory here is far too late, so the 381 * check has been moved up a procedural level. 382 */ 383 384 /* 385 * Can't clean the page if it's busy or held. 386 */ 387 vm_page_assert_unbusied(m); 388 KASSERT(m->hold_count == 0, ("vm_pageout_clean: page %p is held", m)); 389 vm_page_unlock(m); 390 391 mc[vm_pageout_page_count] = pb = ps = m; 392 pageout_count = 1; 393 page_base = vm_pageout_page_count; 394 ib = 1; 395 is = 1; 396 397 /* 398 * Scan object for clusterable pages. 399 * 400 * We can cluster ONLY if: ->> the page is NOT 401 * clean, wired, busy, held, or mapped into a 402 * buffer, and one of the following: 403 * 1) The page is inactive, or a seldom used 404 * active page. 405 * -or- 406 * 2) we force the issue. 407 * 408 * During heavy mmap/modification loads the pageout 409 * daemon can really fragment the underlying file 410 * due to flushing pages out of order and not trying 411 * align the clusters (which leave sporatic out-of-order 412 * holes). To solve this problem we do the reverse scan 413 * first and attempt to align our cluster, then do a 414 * forward scan if room remains. 415 */ 416more: 417 while (ib && pageout_count < vm_pageout_page_count) { 418 vm_page_t p; 419 420 if (ib > pindex) { 421 ib = 0; 422 break; 423 } 424 425 if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) { 426 ib = 0; 427 break; 428 } 429 vm_page_test_dirty(p); 430 if (p->dirty == 0) { 431 ib = 0; 432 break; 433 } 434 vm_page_lock(p); 435 if (p->queue != PQ_INACTIVE || 436 p->hold_count != 0) { /* may be undergoing I/O */ 437 vm_page_unlock(p); 438 ib = 0; 439 break; 440 } 441 vm_page_unlock(p); 442 mc[--page_base] = pb = p; 443 ++pageout_count; 444 ++ib; 445 /* 446 * alignment boundry, stop here and switch directions. Do 447 * not clear ib. 448 */ 449 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 450 break; 451 } 452 453 while (pageout_count < vm_pageout_page_count && 454 pindex + is < object->size) { 455 vm_page_t p; 456 457 if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p)) 458 break; 459 vm_page_test_dirty(p); 460 if (p->dirty == 0) 461 break; 462 vm_page_lock(p); 463 if (p->queue != PQ_INACTIVE || 464 p->hold_count != 0) { /* may be undergoing I/O */ 465 vm_page_unlock(p); 466 break; 467 } 468 vm_page_unlock(p); 469 mc[page_base + pageout_count] = ps = p; 470 ++pageout_count; 471 ++is; 472 } 473 474 /* 475 * If we exhausted our forward scan, continue with the reverse scan 476 * when possible, even past a page boundry. This catches boundry 477 * conditions. 478 */ 479 if (ib && pageout_count < vm_pageout_page_count) 480 goto more; 481 482 /* 483 * we allow reads during pageouts... 484 */ 485 return (vm_pageout_flush(&mc[page_base], pageout_count, 0, 0, NULL, 486 NULL)); 487} 488 489/* 490 * vm_pageout_flush() - launder the given pages 491 * 492 * The given pages are laundered. Note that we setup for the start of 493 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 494 * reference count all in here rather then in the parent. If we want 495 * the parent to do more sophisticated things we may have to change 496 * the ordering. 497 * 498 * Returned runlen is the count of pages between mreq and first 499 * page after mreq with status VM_PAGER_AGAIN. 500 * *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL 501 * for any page in runlen set. 502 */ 503int 504vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, 505 boolean_t *eio) 506{ 507 vm_object_t object = mc[0]->object; 508 int pageout_status[count]; 509 int numpagedout = 0; 510 int i, runlen; 511 512 VM_OBJECT_ASSERT_WLOCKED(object); 513 514 /* 515 * Initiate I/O. Bump the vm_page_t->busy counter and 516 * mark the pages read-only. 517 * 518 * We do not have to fixup the clean/dirty bits here... we can 519 * allow the pager to do it after the I/O completes. 520 * 521 * NOTE! mc[i]->dirty may be partial or fragmented due to an 522 * edge case with file fragments. 523 */ 524 for (i = 0; i < count; i++) { 525 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, 526 ("vm_pageout_flush: partially invalid page %p index %d/%d", 527 mc[i], i, count)); 528 vm_page_sbusy(mc[i]); 529 pmap_remove_write(mc[i]); 530 } 531 vm_object_pip_add(object, count); 532 533 vm_pager_put_pages(object, mc, count, flags, pageout_status); 534 535 runlen = count - mreq; 536 if (eio != NULL) 537 *eio = FALSE; 538 for (i = 0; i < count; i++) { 539 vm_page_t mt = mc[i]; 540 541 KASSERT(pageout_status[i] == VM_PAGER_PEND || 542 !pmap_page_is_write_mapped(mt), 543 ("vm_pageout_flush: page %p is not write protected", mt)); 544 switch (pageout_status[i]) { 545 case VM_PAGER_OK: 546 case VM_PAGER_PEND: 547 numpagedout++; 548 break; 549 case VM_PAGER_BAD: 550 /* 551 * Page outside of range of object. Right now we 552 * essentially lose the changes by pretending it 553 * worked. 554 */ 555 vm_page_undirty(mt); 556 break; 557 case VM_PAGER_ERROR: 558 case VM_PAGER_FAIL: 559 /* 560 * If page couldn't be paged out, then reactivate the 561 * page so it doesn't clog the inactive list. (We 562 * will try paging out it again later). 563 */ 564 vm_page_lock(mt); 565 vm_page_activate(mt); 566 vm_page_unlock(mt); 567 if (eio != NULL && i >= mreq && i - mreq < runlen) 568 *eio = TRUE; 569 break; 570 case VM_PAGER_AGAIN: 571 if (i >= mreq && i - mreq < runlen) 572 runlen = i - mreq; 573 break; 574 } 575 576 /* 577 * If the operation is still going, leave the page busy to 578 * block all other accesses. Also, leave the paging in 579 * progress indicator set so that we don't attempt an object 580 * collapse. 581 */ 582 if (pageout_status[i] != VM_PAGER_PEND) { 583 vm_object_pip_wakeup(object); 584 vm_page_sunbusy(mt); 585 if (vm_page_count_severe()) { 586 vm_page_lock(mt); 587 vm_page_try_to_cache(mt); 588 vm_page_unlock(mt); 589 } 590 } 591 } 592 if (prunlen != NULL) 593 *prunlen = runlen; 594 return (numpagedout); 595} 596 597static boolean_t 598vm_pageout_launder(struct vm_pagequeue *pq, int tries, vm_paddr_t low, 599 vm_paddr_t high) 600{ 601 struct mount *mp; 602 struct vnode *vp; 603 vm_object_t object; 604 vm_paddr_t pa; 605 vm_page_t m, m_tmp, next; 606 int lockmode; 607 608 vm_pagequeue_lock(pq); 609 TAILQ_FOREACH_SAFE(m, &pq->pq_pl, plinks.q, next) { 610 if ((m->flags & PG_MARKER) != 0) 611 continue; 612 pa = VM_PAGE_TO_PHYS(m); 613 if (pa < low || pa + PAGE_SIZE > high) 614 continue; 615 if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) { 616 vm_page_unlock(m); 617 continue; 618 } 619 object = m->object; 620 if ((!VM_OBJECT_TRYWLOCK(object) && 621 (!vm_pageout_fallback_object_lock(m, &next) || 622 m->hold_count != 0)) || vm_page_busied(m)) { 623 vm_page_unlock(m); 624 VM_OBJECT_WUNLOCK(object); 625 continue; 626 } 627 vm_page_test_dirty(m); 628 if (m->dirty == 0 && object->ref_count != 0) 629 pmap_remove_all(m); 630 if (m->dirty != 0) { 631 vm_page_unlock(m); 632 if (tries == 0 || (object->flags & OBJ_DEAD) != 0) { 633 VM_OBJECT_WUNLOCK(object); 634 continue; 635 } 636 if (object->type == OBJT_VNODE) { 637 vm_pagequeue_unlock(pq); 638 vp = object->handle; 639 vm_object_reference_locked(object); 640 VM_OBJECT_WUNLOCK(object); 641 (void)vn_start_write(vp, &mp, V_WAIT); 642 lockmode = MNT_SHARED_WRITES(vp->v_mount) ? 643 LK_SHARED : LK_EXCLUSIVE; 644 vn_lock(vp, lockmode | LK_RETRY); 645 VM_OBJECT_WLOCK(object); 646 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 647 VM_OBJECT_WUNLOCK(object); 648 VOP_UNLOCK(vp, 0); 649 vm_object_deallocate(object); 650 vn_finished_write(mp); 651 return (TRUE); 652 } else if (object->type == OBJT_SWAP || 653 object->type == OBJT_DEFAULT) { 654 vm_pagequeue_unlock(pq); 655 m_tmp = m; 656 vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC, 657 0, NULL, NULL); 658 VM_OBJECT_WUNLOCK(object); 659 return (TRUE); 660 } 661 } else { 662 /* 663 * Dequeue here to prevent lock recursion in 664 * vm_page_cache(). 665 */ 666 vm_page_dequeue_locked(m); 667 vm_page_cache(m); 668 vm_page_unlock(m); 669 } 670 VM_OBJECT_WUNLOCK(object); 671 } 672 vm_pagequeue_unlock(pq); 673 return (FALSE); 674} 675 676/* 677 * Increase the number of cached pages. The specified value, "tries", 678 * determines which categories of pages are cached: 679 * 680 * 0: All clean, inactive pages within the specified physical address range 681 * are cached. Will not sleep. 682 * 1: The vm_lowmem handlers are called. All inactive pages within 683 * the specified physical address range are cached. May sleep. 684 * 2: The vm_lowmem handlers are called. All inactive and active pages 685 * within the specified physical address range are cached. May sleep. 686 */ 687void 688vm_pageout_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high) 689{ 690 int actl, actmax, inactl, inactmax, dom, initial_dom; 691 static int start_dom = 0; 692 693 if (tries > 0) { 694 /* 695 * Decrease registered cache sizes. The vm_lowmem handlers 696 * may acquire locks and/or sleep, so they can only be invoked 697 * when "tries" is greater than zero. 698 */ 699 SDT_PROBE0(vm, , , vm__lowmem_cache); 700 EVENTHANDLER_INVOKE(vm_lowmem, 0); 701 702 /* 703 * We do this explicitly after the caches have been drained 704 * above. 705 */ 706 uma_reclaim(); 707 } 708 709 /* 710 * Make the next scan start on the next domain. 711 */ 712 initial_dom = atomic_fetchadd_int(&start_dom, 1) % vm_ndomains; 713 714 inactl = 0; 715 inactmax = cnt.v_inactive_count; 716 actl = 0; 717 actmax = tries < 2 ? 0 : cnt.v_active_count; 718 dom = initial_dom; 719 720 /* 721 * Scan domains in round-robin order, first inactive queues, 722 * then active. Since domain usually owns large physically 723 * contiguous chunk of memory, it makes sense to completely 724 * exhaust one domain before switching to next, while growing 725 * the pool of contiguous physical pages. 726 * 727 * Do not even start launder a domain which cannot contain 728 * the specified address range, as indicated by segments 729 * constituting the domain. 730 */ 731again: 732 if (inactl < inactmax) { 733 if (vm_phys_domain_intersects(vm_dom[dom].vmd_segs, 734 low, high) && 735 vm_pageout_launder(&vm_dom[dom].vmd_pagequeues[PQ_INACTIVE], 736 tries, low, high)) { 737 inactl++; 738 goto again; 739 } 740 if (++dom == vm_ndomains) 741 dom = 0; 742 if (dom != initial_dom) 743 goto again; 744 } 745 if (actl < actmax) { 746 if (vm_phys_domain_intersects(vm_dom[dom].vmd_segs, 747 low, high) && 748 vm_pageout_launder(&vm_dom[dom].vmd_pagequeues[PQ_ACTIVE], 749 tries, low, high)) { 750 actl++; 751 goto again; 752 } 753 if (++dom == vm_ndomains) 754 dom = 0; 755 if (dom != initial_dom) 756 goto again; 757 } 758} 759 760#if !defined(NO_SWAPPING) 761/* 762 * vm_pageout_object_deactivate_pages 763 * 764 * Deactivate enough pages to satisfy the inactive target 765 * requirements. 766 * 767 * The object and map must be locked. 768 */ 769static void 770vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object, 771 long desired) 772{ 773 vm_object_t backing_object, object; 774 vm_page_t p; 775 int act_delta, remove_mode; 776 777 VM_OBJECT_ASSERT_LOCKED(first_object); 778 if ((first_object->flags & OBJ_FICTITIOUS) != 0) 779 return; 780 for (object = first_object;; object = backing_object) { 781 if (pmap_resident_count(pmap) <= desired) 782 goto unlock_return; 783 VM_OBJECT_ASSERT_LOCKED(object); 784 if ((object->flags & OBJ_UNMANAGED) != 0 || 785 object->paging_in_progress != 0) 786 goto unlock_return; 787 788 remove_mode = 0; 789 if (object->shadow_count > 1) 790 remove_mode = 1; 791 /* 792 * Scan the object's entire memory queue. 793 */ 794 TAILQ_FOREACH(p, &object->memq, listq) { 795 if (pmap_resident_count(pmap) <= desired) 796 goto unlock_return; 797 if (vm_page_busied(p)) 798 continue; 799 PCPU_INC(cnt.v_pdpages); 800 vm_page_lock(p); 801 if (p->wire_count != 0 || p->hold_count != 0 || 802 !pmap_page_exists_quick(pmap, p)) { 803 vm_page_unlock(p); 804 continue; 805 } 806 act_delta = pmap_ts_referenced(p); 807 if ((p->aflags & PGA_REFERENCED) != 0) { 808 if (act_delta == 0) 809 act_delta = 1; 810 vm_page_aflag_clear(p, PGA_REFERENCED); 811 } 812 if (p->queue != PQ_ACTIVE && act_delta != 0) { 813 vm_page_activate(p); 814 p->act_count += act_delta; 815 } else if (p->queue == PQ_ACTIVE) { 816 if (act_delta == 0) { 817 p->act_count -= min(p->act_count, 818 ACT_DECLINE); 819 if (!remove_mode && p->act_count == 0) { 820 pmap_remove_all(p); 821 vm_page_deactivate(p); 822 } else 823 vm_page_requeue(p); 824 } else { 825 vm_page_activate(p); 826 if (p->act_count < ACT_MAX - 827 ACT_ADVANCE) 828 p->act_count += ACT_ADVANCE; 829 vm_page_requeue(p); 830 } 831 } else if (p->queue == PQ_INACTIVE) 832 pmap_remove_all(p); 833 vm_page_unlock(p); 834 } 835 if ((backing_object = object->backing_object) == NULL) 836 goto unlock_return; 837 VM_OBJECT_RLOCK(backing_object); 838 if (object != first_object) 839 VM_OBJECT_RUNLOCK(object); 840 } 841unlock_return: 842 if (object != first_object) 843 VM_OBJECT_RUNLOCK(object); 844} 845 846/* 847 * deactivate some number of pages in a map, try to do it fairly, but 848 * that is really hard to do. 849 */ 850static void 851vm_pageout_map_deactivate_pages(map, desired) 852 vm_map_t map; 853 long desired; 854{ 855 vm_map_entry_t tmpe; 856 vm_object_t obj, bigobj; 857 int nothingwired; 858 859 if (!vm_map_trylock(map)) 860 return; 861 862 bigobj = NULL; 863 nothingwired = TRUE; 864 865 /* 866 * first, search out the biggest object, and try to free pages from 867 * that. 868 */ 869 tmpe = map->header.next; 870 while (tmpe != &map->header) { 871 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 872 obj = tmpe->object.vm_object; 873 if (obj != NULL && VM_OBJECT_TRYRLOCK(obj)) { 874 if (obj->shadow_count <= 1 && 875 (bigobj == NULL || 876 bigobj->resident_page_count < obj->resident_page_count)) { 877 if (bigobj != NULL) 878 VM_OBJECT_RUNLOCK(bigobj); 879 bigobj = obj; 880 } else 881 VM_OBJECT_RUNLOCK(obj); 882 } 883 } 884 if (tmpe->wired_count > 0) 885 nothingwired = FALSE; 886 tmpe = tmpe->next; 887 } 888 889 if (bigobj != NULL) { 890 vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired); 891 VM_OBJECT_RUNLOCK(bigobj); 892 } 893 /* 894 * Next, hunt around for other pages to deactivate. We actually 895 * do this search sort of wrong -- .text first is not the best idea. 896 */ 897 tmpe = map->header.next; 898 while (tmpe != &map->header) { 899 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 900 break; 901 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 902 obj = tmpe->object.vm_object; 903 if (obj != NULL) { 904 VM_OBJECT_RLOCK(obj); 905 vm_pageout_object_deactivate_pages(map->pmap, obj, desired); 906 VM_OBJECT_RUNLOCK(obj); 907 } 908 } 909 tmpe = tmpe->next; 910 } 911 912#ifdef __ia64__ 913 /* 914 * Remove all non-wired, managed mappings if a process is swapped out. 915 * This will free page table pages. 916 */ 917 if (desired == 0) 918 pmap_remove_pages(map->pmap); 919#else 920 /* 921 * Remove all mappings if a process is swapped out, this will free page 922 * table pages. 923 */ 924 if (desired == 0 && nothingwired) { 925 pmap_remove(vm_map_pmap(map), vm_map_min(map), 926 vm_map_max(map)); 927 } 928#endif 929 930 vm_map_unlock(map); 931} 932#endif /* !defined(NO_SWAPPING) */ 933 934/* 935 * vm_pageout_scan does the dirty work for the pageout daemon. 936 * 937 * pass 0 - Update active LRU/deactivate pages 938 * pass 1 - Move inactive to cache or free 939 * pass 2 - Launder dirty pages 940 */ 941static void 942vm_pageout_scan(struct vm_domain *vmd, int pass) 943{ 944 vm_page_t m, next; 945 struct vm_pagequeue *pq; 946 vm_object_t object; 947 long min_scan; 948 int act_delta, addl_page_shortage, deficit, maxscan, page_shortage; 949 int vnodes_skipped = 0; 950 int maxlaunder, scan_tick, scanned, starting_page_shortage; 951 int lockmode; 952 boolean_t queues_locked; 953 954 /* 955 * If we need to reclaim memory ask kernel caches to return 956 * some. We rate limit to avoid thrashing. 957 */ 958 if (vmd == &vm_dom[0] && pass > 0 && 959 (time_uptime - lowmem_uptime) >= lowmem_period) { 960 /* 961 * Decrease registered cache sizes. 962 */ 963 SDT_PROBE0(vm, , , vm__lowmem_scan); 964 EVENTHANDLER_INVOKE(vm_lowmem, 0); 965 /* 966 * We do this explicitly after the caches have been 967 * drained above. 968 */ 969 uma_reclaim(); 970 lowmem_uptime = time_uptime; 971 } 972 973 /* 974 * The addl_page_shortage is the number of temporarily 975 * stuck pages in the inactive queue. In other words, the 976 * number of pages from the inactive count that should be 977 * discounted in setting the target for the active queue scan. 978 */ 979 addl_page_shortage = 0; 980 981 /* 982 * Calculate the number of pages we want to either free or move 983 * to the cache. 984 */ 985 if (pass > 0) { 986 deficit = atomic_readandclear_int(&vm_pageout_deficit); 987 page_shortage = vm_paging_target() + deficit; 988 } else 989 page_shortage = deficit = 0; 990 starting_page_shortage = page_shortage; 991 992 /* 993 * maxlaunder limits the number of dirty pages we flush per scan. 994 * For most systems a smaller value (16 or 32) is more robust under 995 * extreme memory and disk pressure because any unnecessary writes 996 * to disk can result in extreme performance degredation. However, 997 * systems with excessive dirty pages (especially when MAP_NOSYNC is 998 * used) will die horribly with limited laundering. If the pageout 999 * daemon cannot clean enough pages in the first pass, we let it go 1000 * all out in succeeding passes. 1001 */ 1002 if ((maxlaunder = vm_max_launder) <= 1) 1003 maxlaunder = 1; 1004 if (pass > 1) 1005 maxlaunder = 10000; 1006 1007 /* 1008 * Start scanning the inactive queue for pages we can move to the 1009 * cache or free. The scan will stop when the target is reached or 1010 * we have scanned the entire inactive queue. Note that m->act_count 1011 * is not used to form decisions for the inactive queue, only for the 1012 * active queue. 1013 */ 1014 pq = &vmd->vmd_pagequeues[PQ_INACTIVE]; 1015 maxscan = pq->pq_cnt; 1016 vm_pagequeue_lock(pq); 1017 queues_locked = TRUE; 1018 for (m = TAILQ_FIRST(&pq->pq_pl); 1019 m != NULL && maxscan-- > 0 && page_shortage > 0; 1020 m = next) { 1021 vm_pagequeue_assert_locked(pq); 1022 KASSERT(queues_locked, ("unlocked queues")); 1023 KASSERT(m->queue == PQ_INACTIVE, ("Inactive queue %p", m)); 1024 1025 PCPU_INC(cnt.v_pdpages); 1026 next = TAILQ_NEXT(m, plinks.q); 1027 1028 /* 1029 * skip marker pages 1030 */ 1031 if (m->flags & PG_MARKER) 1032 continue; 1033 1034 KASSERT((m->flags & PG_FICTITIOUS) == 0, 1035 ("Fictitious page %p cannot be in inactive queue", m)); 1036 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1037 ("Unmanaged page %p cannot be in inactive queue", m)); 1038 1039 /* 1040 * The page or object lock acquisitions fail if the 1041 * page was removed from the queue or moved to a 1042 * different position within the queue. In either 1043 * case, addl_page_shortage should not be incremented. 1044 */ 1045 if (!vm_pageout_page_lock(m, &next)) { 1046 vm_page_unlock(m); 1047 continue; 1048 } 1049 object = m->object; 1050 if (!VM_OBJECT_TRYWLOCK(object) && 1051 !vm_pageout_fallback_object_lock(m, &next)) { 1052 vm_page_unlock(m); 1053 VM_OBJECT_WUNLOCK(object); 1054 continue; 1055 } 1056 1057 /* 1058 * Don't mess with busy pages, keep them at at the 1059 * front of the queue, most likely they are being 1060 * paged out. Increment addl_page_shortage for busy 1061 * pages, because they may leave the inactive queue 1062 * shortly after page scan is finished. 1063 */ 1064 if (vm_page_busied(m)) { 1065 vm_page_unlock(m); 1066 VM_OBJECT_WUNLOCK(object); 1067 addl_page_shortage++; 1068 continue; 1069 } 1070 1071 /* 1072 * We unlock the inactive page queue, invalidating the 1073 * 'next' pointer. Use our marker to remember our 1074 * place. 1075 */ 1076 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, plinks.q); 1077 vm_pagequeue_unlock(pq); 1078 queues_locked = FALSE; 1079 1080 /* 1081 * We bump the activation count if the page has been 1082 * referenced while in the inactive queue. This makes 1083 * it less likely that the page will be added back to the 1084 * inactive queue prematurely again. Here we check the 1085 * page tables (or emulated bits, if any), given the upper 1086 * level VM system not knowing anything about existing 1087 * references. 1088 */ 1089 act_delta = 0; 1090 if ((m->aflags & PGA_REFERENCED) != 0) { 1091 vm_page_aflag_clear(m, PGA_REFERENCED); 1092 act_delta = 1; 1093 } 1094 if (object->ref_count != 0) { 1095 act_delta += pmap_ts_referenced(m); 1096 } else { 1097 KASSERT(!pmap_page_is_mapped(m), 1098 ("vm_pageout_scan: page %p is mapped", m)); 1099 } 1100 1101 /* 1102 * If the upper level VM system knows about any page 1103 * references, we reactivate the page or requeue it. 1104 */ 1105 if (act_delta != 0) { 1106 if (object->ref_count) { 1107 vm_page_activate(m); 1108 m->act_count += act_delta + ACT_ADVANCE; 1109 } else { 1110 vm_pagequeue_lock(pq); 1111 queues_locked = TRUE; 1112 vm_page_requeue_locked(m); 1113 } 1114 VM_OBJECT_WUNLOCK(object); 1115 vm_page_unlock(m); 1116 goto relock_queues; 1117 } 1118 1119 if (m->hold_count != 0) { 1120 vm_page_unlock(m); 1121 VM_OBJECT_WUNLOCK(object); 1122 1123 /* 1124 * Held pages are essentially stuck in the 1125 * queue. So, they ought to be discounted 1126 * from the inactive count. See the 1127 * calculation of the page_shortage for the 1128 * loop over the active queue below. 1129 */ 1130 addl_page_shortage++; 1131 goto relock_queues; 1132 } 1133 1134 /* 1135 * If the page appears to be clean at the machine-independent 1136 * layer, then remove all of its mappings from the pmap in 1137 * anticipation of placing it onto the cache queue. If, 1138 * however, any of the page's mappings allow write access, 1139 * then the page may still be modified until the last of those 1140 * mappings are removed. 1141 */ 1142 if (object->ref_count != 0) { 1143 vm_page_test_dirty(m); 1144 if (m->dirty == 0) 1145 pmap_remove_all(m); 1146 } 1147 1148 if (m->valid == 0) { 1149 /* 1150 * Invalid pages can be easily freed 1151 */ 1152 vm_page_free(m); 1153 PCPU_INC(cnt.v_dfree); 1154 --page_shortage; 1155 } else if (m->dirty == 0) { 1156 /* 1157 * Clean pages can be placed onto the cache queue. 1158 * This effectively frees them. 1159 */ 1160 vm_page_cache(m); 1161 --page_shortage; 1162 } else if ((m->flags & PG_WINATCFLS) == 0 && pass < 2) { 1163 /* 1164 * Dirty pages need to be paged out, but flushing 1165 * a page is extremely expensive verses freeing 1166 * a clean page. Rather then artificially limiting 1167 * the number of pages we can flush, we instead give 1168 * dirty pages extra priority on the inactive queue 1169 * by forcing them to be cycled through the queue 1170 * twice before being flushed, after which the 1171 * (now clean) page will cycle through once more 1172 * before being freed. This significantly extends 1173 * the thrash point for a heavily loaded machine. 1174 */ 1175 m->flags |= PG_WINATCFLS; 1176 vm_pagequeue_lock(pq); 1177 queues_locked = TRUE; 1178 vm_page_requeue_locked(m); 1179 } else if (maxlaunder > 0) { 1180 /* 1181 * We always want to try to flush some dirty pages if 1182 * we encounter them, to keep the system stable. 1183 * Normally this number is small, but under extreme 1184 * pressure where there are insufficient clean pages 1185 * on the inactive queue, we may have to go all out. 1186 */ 1187 int swap_pageouts_ok; 1188 struct vnode *vp = NULL; 1189 struct mount *mp = NULL; 1190 1191 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 1192 swap_pageouts_ok = 1; 1193 } else { 1194 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 1195 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 1196 vm_page_count_min()); 1197 1198 } 1199 1200 /* 1201 * We don't bother paging objects that are "dead". 1202 * Those objects are in a "rundown" state. 1203 */ 1204 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 1205 vm_pagequeue_lock(pq); 1206 vm_page_unlock(m); 1207 VM_OBJECT_WUNLOCK(object); 1208 queues_locked = TRUE; 1209 vm_page_requeue_locked(m); 1210 goto relock_queues; 1211 } 1212 1213 /* 1214 * The object is already known NOT to be dead. It 1215 * is possible for the vget() to block the whole 1216 * pageout daemon, but the new low-memory handling 1217 * code should prevent it. 1218 * 1219 * The previous code skipped locked vnodes and, worse, 1220 * reordered pages in the queue. This results in 1221 * completely non-deterministic operation and, on a 1222 * busy system, can lead to extremely non-optimal 1223 * pageouts. For example, it can cause clean pages 1224 * to be freed and dirty pages to be moved to the end 1225 * of the queue. Since dirty pages are also moved to 1226 * the end of the queue once-cleaned, this gives 1227 * way too large a weighting to defering the freeing 1228 * of dirty pages. 1229 * 1230 * We can't wait forever for the vnode lock, we might 1231 * deadlock due to a vn_read() getting stuck in 1232 * vm_wait while holding this vnode. We skip the 1233 * vnode if we can't get it in a reasonable amount 1234 * of time. 1235 */ 1236 if (object->type == OBJT_VNODE) { 1237 vm_page_unlock(m); 1238 vp = object->handle; 1239 if (vp->v_type == VREG && 1240 vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1241 mp = NULL; 1242 ++pageout_lock_miss; 1243 if (object->flags & OBJ_MIGHTBEDIRTY) 1244 vnodes_skipped++; 1245 goto unlock_and_continue; 1246 } 1247 KASSERT(mp != NULL, 1248 ("vp %p with NULL v_mount", vp)); 1249 vm_object_reference_locked(object); 1250 VM_OBJECT_WUNLOCK(object); 1251 lockmode = MNT_SHARED_WRITES(vp->v_mount) ? 1252 LK_SHARED : LK_EXCLUSIVE; 1253 if (vget(vp, lockmode | LK_TIMELOCK, 1254 curthread)) { 1255 VM_OBJECT_WLOCK(object); 1256 ++pageout_lock_miss; 1257 if (object->flags & OBJ_MIGHTBEDIRTY) 1258 vnodes_skipped++; 1259 vp = NULL; 1260 goto unlock_and_continue; 1261 } 1262 VM_OBJECT_WLOCK(object); 1263 vm_page_lock(m); 1264 vm_pagequeue_lock(pq); 1265 queues_locked = TRUE; 1266 /* 1267 * The page might have been moved to another 1268 * queue during potential blocking in vget() 1269 * above. The page might have been freed and 1270 * reused for another vnode. 1271 */ 1272 if (m->queue != PQ_INACTIVE || 1273 m->object != object || 1274 TAILQ_NEXT(m, plinks.q) != &vmd->vmd_marker) { 1275 vm_page_unlock(m); 1276 if (object->flags & OBJ_MIGHTBEDIRTY) 1277 vnodes_skipped++; 1278 goto unlock_and_continue; 1279 } 1280 1281 /* 1282 * The page may have been busied during the 1283 * blocking in vget(). We don't move the 1284 * page back onto the end of the queue so that 1285 * statistics are more correct if we don't. 1286 */ 1287 if (vm_page_busied(m)) { 1288 vm_page_unlock(m); 1289 addl_page_shortage++; 1290 goto unlock_and_continue; 1291 } 1292 1293 /* 1294 * If the page has become held it might 1295 * be undergoing I/O, so skip it 1296 */ 1297 if (m->hold_count != 0) { 1298 vm_page_unlock(m); 1299 addl_page_shortage++; 1300 if (object->flags & OBJ_MIGHTBEDIRTY) 1301 vnodes_skipped++; 1302 goto unlock_and_continue; 1303 } 1304 vm_pagequeue_unlock(pq); 1305 queues_locked = FALSE; 1306 } 1307 1308 /* 1309 * If a page is dirty, then it is either being washed 1310 * (but not yet cleaned) or it is still in the 1311 * laundry. If it is still in the laundry, then we 1312 * start the cleaning operation. 1313 * 1314 * decrement page_shortage on success to account for 1315 * the (future) cleaned page. Otherwise we could wind 1316 * up laundering or cleaning too many pages. 1317 */ 1318 if (vm_pageout_clean(m) != 0) { 1319 --page_shortage; 1320 --maxlaunder; 1321 } 1322unlock_and_continue: 1323 vm_page_lock_assert(m, MA_NOTOWNED); 1324 VM_OBJECT_WUNLOCK(object); 1325 if (mp != NULL) { 1326 if (queues_locked) { 1327 vm_pagequeue_unlock(pq); 1328 queues_locked = FALSE; 1329 } 1330 if (vp != NULL) 1331 vput(vp); 1332 vm_object_deallocate(object); 1333 vn_finished_write(mp); 1334 } 1335 vm_page_lock_assert(m, MA_NOTOWNED); 1336 goto relock_queues; 1337 } 1338 vm_page_unlock(m); 1339 VM_OBJECT_WUNLOCK(object); 1340relock_queues: 1341 if (!queues_locked) { 1342 vm_pagequeue_lock(pq); 1343 queues_locked = TRUE; 1344 } 1345 next = TAILQ_NEXT(&vmd->vmd_marker, plinks.q); 1346 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q); 1347 } 1348 vm_pagequeue_unlock(pq); 1349 1350#if !defined(NO_SWAPPING) 1351 /* 1352 * Wakeup the swapout daemon if we didn't cache or free the targeted 1353 * number of pages. 1354 */ 1355 if (vm_swap_enabled && page_shortage > 0) 1356 vm_req_vmdaemon(VM_SWAP_NORMAL); 1357#endif 1358 1359 /* 1360 * Wakeup the sync daemon if we skipped a vnode in a writeable object 1361 * and we didn't cache or free enough pages. 1362 */ 1363 if (vnodes_skipped > 0 && page_shortage > cnt.v_free_target - 1364 cnt.v_free_min) 1365 (void)speedup_syncer(); 1366 1367 /* 1368 * If the inactive queue scan fails repeatedly to meet its 1369 * target, kill the largest process. 1370 */ 1371 vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage); 1372 1373 /* 1374 * Compute the number of pages we want to try to move from the 1375 * active queue to the inactive queue. 1376 */ 1377 page_shortage = cnt.v_inactive_target - cnt.v_inactive_count + 1378 vm_paging_target() + deficit + addl_page_shortage; 1379 1380 pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; 1381 vm_pagequeue_lock(pq); 1382 maxscan = pq->pq_cnt; 1383 1384 /* 1385 * If we're just idle polling attempt to visit every 1386 * active page within 'update_period' seconds. 1387 */ 1388 scan_tick = ticks; 1389 if (vm_pageout_update_period != 0) { 1390 min_scan = pq->pq_cnt; 1391 min_scan *= scan_tick - vmd->vmd_last_active_scan; 1392 min_scan /= hz * vm_pageout_update_period; 1393 } else 1394 min_scan = 0; 1395 if (min_scan > 0 || (page_shortage > 0 && maxscan > 0)) 1396 vmd->vmd_last_active_scan = scan_tick; 1397 1398 /* 1399 * Scan the active queue for pages that can be deactivated. Update 1400 * the per-page activity counter and use it to identify deactivation 1401 * candidates. 1402 */ 1403 for (m = TAILQ_FIRST(&pq->pq_pl), scanned = 0; m != NULL && (scanned < 1404 min_scan || (page_shortage > 0 && scanned < maxscan)); m = next, 1405 scanned++) { 1406 1407 KASSERT(m->queue == PQ_ACTIVE, 1408 ("vm_pageout_scan: page %p isn't active", m)); 1409 1410 next = TAILQ_NEXT(m, plinks.q); 1411 if ((m->flags & PG_MARKER) != 0) 1412 continue; 1413 KASSERT((m->flags & PG_FICTITIOUS) == 0, 1414 ("Fictitious page %p cannot be in active queue", m)); 1415 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1416 ("Unmanaged page %p cannot be in active queue", m)); 1417 if (!vm_pageout_page_lock(m, &next)) { 1418 vm_page_unlock(m); 1419 continue; 1420 } 1421 1422 /* 1423 * The count for pagedaemon pages is done after checking the 1424 * page for eligibility... 1425 */ 1426 PCPU_INC(cnt.v_pdpages); 1427 1428 /* 1429 * Check to see "how much" the page has been used. 1430 */ 1431 act_delta = 0; 1432 if (m->aflags & PGA_REFERENCED) { 1433 vm_page_aflag_clear(m, PGA_REFERENCED); 1434 act_delta += 1; 1435 } 1436 /* 1437 * Unlocked object ref count check. Two races are possible. 1438 * 1) The ref was transitioning to zero and we saw non-zero, 1439 * the pmap bits will be checked unnecessarily. 1440 * 2) The ref was transitioning to one and we saw zero. 1441 * The page lock prevents a new reference to this page so 1442 * we need not check the reference bits. 1443 */ 1444 if (m->object->ref_count != 0) 1445 act_delta += pmap_ts_referenced(m); 1446 1447 /* 1448 * Advance or decay the act_count based on recent usage. 1449 */ 1450 if (act_delta) { 1451 m->act_count += ACT_ADVANCE + act_delta; 1452 if (m->act_count > ACT_MAX) 1453 m->act_count = ACT_MAX; 1454 } else { 1455 m->act_count -= min(m->act_count, ACT_DECLINE); 1456 act_delta = m->act_count; 1457 } 1458 1459 /* 1460 * Move this page to the tail of the active or inactive 1461 * queue depending on usage. 1462 */ 1463 if (act_delta == 0) { 1464 /* Dequeue to avoid later lock recursion. */ 1465 vm_page_dequeue_locked(m); 1466 vm_page_deactivate(m); 1467 page_shortage--; 1468 } else 1469 vm_page_requeue_locked(m); 1470 vm_page_unlock(m); 1471 } 1472 vm_pagequeue_unlock(pq); 1473#if !defined(NO_SWAPPING) 1474 /* 1475 * Idle process swapout -- run once per second. 1476 */ 1477 if (vm_swap_idle_enabled) { 1478 static long lsec; 1479 if (time_second != lsec) { 1480 vm_req_vmdaemon(VM_SWAP_IDLE); 1481 lsec = time_second; 1482 } 1483 } 1484#endif 1485} 1486 1487static int vm_pageout_oom_vote; 1488 1489/* 1490 * The pagedaemon threads randlomly select one to perform the 1491 * OOM. Trying to kill processes before all pagedaemons 1492 * failed to reach free target is premature. 1493 */ 1494static void 1495vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, 1496 int starting_page_shortage) 1497{ 1498 int old_vote; 1499 1500 if (starting_page_shortage <= 0 || starting_page_shortage != 1501 page_shortage) 1502 vmd->vmd_oom_seq = 0; 1503 else 1504 vmd->vmd_oom_seq++; 1505 if (vmd->vmd_oom_seq < vm_pageout_oom_seq) { 1506 if (vmd->vmd_oom) { 1507 vmd->vmd_oom = FALSE; 1508 atomic_subtract_int(&vm_pageout_oom_vote, 1); 1509 } 1510 return; 1511 } 1512 1513 /* 1514 * Do not follow the call sequence until OOM condition is 1515 * cleared. 1516 */ 1517 vmd->vmd_oom_seq = 0; 1518 1519 if (vmd->vmd_oom) 1520 return; 1521 1522 vmd->vmd_oom = TRUE; 1523 old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1); 1524 if (old_vote != vm_ndomains - 1) 1525 return; 1526 1527 /* 1528 * The current pagedaemon thread is the last in the quorum to 1529 * start OOM. Initiate the selection and signaling of the 1530 * victim. 1531 */ 1532 vm_pageout_oom(VM_OOM_MEM); 1533 1534 /* 1535 * After one round of OOM terror, recall our vote. On the 1536 * next pass, current pagedaemon would vote again if the low 1537 * memory condition is still there, due to vmd_oom being 1538 * false. 1539 */ 1540 vmd->vmd_oom = FALSE; 1541 atomic_subtract_int(&vm_pageout_oom_vote, 1); 1542} 1543 1544/* 1545 * The OOM killer is the page daemon's action of last resort when 1546 * memory allocation requests have been stalled for a prolonged period 1547 * of time because it cannot reclaim memory. This function computes 1548 * the approximate number of physical pages that could be reclaimed if 1549 * the specified address space is destroyed. 1550 * 1551 * Private, anonymous memory owned by the address space is the 1552 * principal resource that we expect to recover after an OOM kill. 1553 * Since the physical pages mapped by the address space's COW entries 1554 * are typically shared pages, they are unlikely to be released and so 1555 * they are not counted. 1556 * 1557 * To get to the point where the page daemon runs the OOM killer, its 1558 * efforts to write-back vnode-backed pages may have stalled. This 1559 * could be caused by a memory allocation deadlock in the write path 1560 * that might be resolved by an OOM kill. Therefore, physical pages 1561 * belonging to vnode-backed objects are counted, because they might 1562 * be freed without being written out first if the address space holds 1563 * the last reference to an unlinked vnode. 1564 * 1565 * Similarly, physical pages belonging to OBJT_PHYS objects are 1566 * counted because the address space might hold the last reference to 1567 * the object. 1568 */ 1569static long 1570vm_pageout_oom_pagecount(struct vmspace *vmspace) 1571{ 1572 vm_map_t map; 1573 vm_map_entry_t entry; 1574 vm_object_t obj; 1575 long res; 1576 1577 map = &vmspace->vm_map; 1578 KASSERT(!map->system_map, ("system map")); 1579 sx_assert(&map->lock, SA_LOCKED); 1580 res = 0; 1581 for (entry = map->header.next; entry != &map->header; 1582 entry = entry->next) { 1583 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 1584 continue; 1585 obj = entry->object.vm_object; 1586 if (obj == NULL) 1587 continue; 1588 if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 && 1589 obj->ref_count != 1) 1590 continue; 1591 switch (obj->type) { 1592 case OBJT_DEFAULT: 1593 case OBJT_SWAP: 1594 case OBJT_PHYS: 1595 case OBJT_VNODE: 1596 res += obj->resident_page_count; 1597 break; 1598 } 1599 } 1600 return (res); 1601} 1602 1603void 1604vm_pageout_oom(int shortage) 1605{ 1606 struct proc *p, *bigproc; 1607 vm_offset_t size, bigsize; 1608 struct thread *td; 1609 struct vmspace *vm; 1610 1611 /* 1612 * We keep the process bigproc locked once we find it to keep anyone 1613 * from messing with it; however, there is a possibility of 1614 * deadlock if process B is bigproc and one of it's child processes 1615 * attempts to propagate a signal to B while we are waiting for A's 1616 * lock while walking this list. To avoid this, we don't block on 1617 * the process lock but just skip a process if it is already locked. 1618 */ 1619 bigproc = NULL; 1620 bigsize = 0; 1621 sx_slock(&allproc_lock); 1622 FOREACH_PROC_IN_SYSTEM(p) { 1623 int breakout; 1624 1625 PROC_LOCK(p); 1626 1627 /* 1628 * If this is a system, protected or killed process, skip it. 1629 */ 1630 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | 1631 P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 || 1632 p->p_pid == 1 || P_KILLED(p) || 1633 (p->p_pid < 48 && swap_pager_avail != 0)) { 1634 PROC_UNLOCK(p); 1635 continue; 1636 } 1637 /* 1638 * If the process is in a non-running type state, 1639 * don't touch it. Check all the threads individually. 1640 */ 1641 breakout = 0; 1642 FOREACH_THREAD_IN_PROC(p, td) { 1643 thread_lock(td); 1644 if (!TD_ON_RUNQ(td) && 1645 !TD_IS_RUNNING(td) && 1646 !TD_IS_SLEEPING(td) && 1647 !TD_IS_SUSPENDED(td) && 1648 !TD_IS_SWAPPED(td)) { 1649 thread_unlock(td); 1650 breakout = 1; 1651 break; 1652 } 1653 thread_unlock(td); 1654 } 1655 if (breakout) { 1656 PROC_UNLOCK(p); 1657 continue; 1658 } 1659 /* 1660 * get the process size 1661 */ 1662 vm = vmspace_acquire_ref(p); 1663 if (vm == NULL) { 1664 PROC_UNLOCK(p); 1665 continue; 1666 } 1667 _PHOLD(p); 1668 if (!vm_map_trylock_read(&vm->vm_map)) { 1669 _PRELE(p); 1670 PROC_UNLOCK(p); 1671 vmspace_free(vm); 1672 continue; 1673 } 1674 PROC_UNLOCK(p); 1675 size = vmspace_swap_count(vm); 1676 if (shortage == VM_OOM_MEM) 1677 size += vm_pageout_oom_pagecount(vm); 1678 vm_map_unlock_read(&vm->vm_map); 1679 vmspace_free(vm); 1680 1681 /* 1682 * If this process is bigger than the biggest one, 1683 * remember it. 1684 */ 1685 if (size > bigsize) { 1686 if (bigproc != NULL) 1687 PRELE(bigproc); 1688 bigproc = p; 1689 bigsize = size; 1690 } else { 1691 PRELE(p); 1692 } 1693 } 1694 sx_sunlock(&allproc_lock); 1695 if (bigproc != NULL) { 1696 PROC_LOCK(bigproc); 1697 killproc(bigproc, "out of swap space"); 1698 sched_nice(bigproc, PRIO_MIN); 1699 _PRELE(bigproc); 1700 PROC_UNLOCK(bigproc); 1701 wakeup(&cnt.v_free_count); 1702 } 1703} 1704 1705static void 1706vm_pageout_worker(void *arg) 1707{ 1708 struct vm_domain *domain; 1709 int domidx; 1710 1711 domidx = (uintptr_t)arg; 1712 domain = &vm_dom[domidx]; 1713 1714 /* 1715 * XXXKIB It could be useful to bind pageout daemon threads to 1716 * the cores belonging to the domain, from which vm_page_array 1717 * is allocated. 1718 */ 1719 1720 KASSERT(domain->vmd_segs != 0, ("domain without segments")); 1721 domain->vmd_last_active_scan = ticks; 1722 vm_pageout_init_marker(&domain->vmd_marker, PQ_INACTIVE); 1723 1724 /* 1725 * The pageout daemon worker is never done, so loop forever. 1726 */ 1727 while (TRUE) { 1728 /* 1729 * If we have enough free memory, wakeup waiters. Do 1730 * not clear vm_pages_needed until we reach our target, 1731 * otherwise we may be woken up over and over again and 1732 * waste a lot of cpu. 1733 */ 1734 mtx_lock(&vm_page_queue_free_mtx); 1735 if (vm_pages_needed && !vm_page_count_min()) { 1736 if (!vm_paging_needed()) 1737 vm_pages_needed = 0; 1738 wakeup(&cnt.v_free_count); 1739 } 1740 if (vm_pages_needed) { 1741 /* 1742 * We're still not done. Either vm_pages_needed was 1743 * set by another thread during the previous scan 1744 * (typically, this happens during a level 0 scan) or 1745 * vm_pages_needed was already set and the scan failed 1746 * to free enough pages. If we haven't yet performed 1747 * a level >= 2 scan (unlimited dirty cleaning), then 1748 * upgrade the level and scan again now. Otherwise, 1749 * sleep a bit and try again later. While sleeping, 1750 * vm_pages_needed can be cleared. 1751 */ 1752 if (domain->vmd_pass > 1) 1753 msleep(&vm_pages_needed, 1754 &vm_page_queue_free_mtx, PVM, "psleep", 1755 hz / 2); 1756 } else { 1757 /* 1758 * Good enough, sleep until required to refresh 1759 * stats. 1760 */ 1761 msleep(&vm_pages_needed, &vm_page_queue_free_mtx, 1762 PVM, "psleep", hz); 1763 } 1764 if (vm_pages_needed) { 1765 cnt.v_pdwakeups++; 1766 domain->vmd_pass++; 1767 } else 1768 domain->vmd_pass = 0; 1769 mtx_unlock(&vm_page_queue_free_mtx); 1770 vm_pageout_scan(domain, domain->vmd_pass); 1771 } 1772} 1773 1774/* 1775 * vm_pageout_init initialises basic pageout daemon settings. 1776 */ 1777static void 1778vm_pageout_init(void) 1779{ 1780 /* 1781 * Initialize some paging parameters. 1782 */ 1783 cnt.v_interrupt_free_min = 2; 1784 if (cnt.v_page_count < 2000) 1785 vm_pageout_page_count = 8; 1786 1787 /* 1788 * v_free_reserved needs to include enough for the largest 1789 * swap pager structures plus enough for any pv_entry structs 1790 * when paging. 1791 */ 1792 if (cnt.v_page_count > 1024) 1793 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1794 else 1795 cnt.v_free_min = 4; 1796 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1797 cnt.v_interrupt_free_min; 1798 cnt.v_free_reserved = vm_pageout_page_count + 1799 cnt.v_pageout_free_min + (cnt.v_page_count / 768); 1800 cnt.v_free_severe = cnt.v_free_min / 2; 1801 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1802 cnt.v_free_min += cnt.v_free_reserved; 1803 cnt.v_free_severe += cnt.v_free_reserved; 1804 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1805 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1806 cnt.v_inactive_target = cnt.v_free_count / 3; 1807 1808 /* 1809 * Set the default wakeup threshold to be 10% above the minimum 1810 * page limit. This keeps the steady state out of shortfall. 1811 */ 1812 vm_pageout_wakeup_thresh = (cnt.v_free_min / 10) * 11; 1813 1814 /* 1815 * Set interval in seconds for active scan. We want to visit each 1816 * page at least once every ten minutes. This is to prevent worst 1817 * case paging behaviors with stale active LRU. 1818 */ 1819 if (vm_pageout_update_period == 0) 1820 vm_pageout_update_period = 600; 1821 1822 /* XXX does not really belong here */ 1823 if (vm_page_max_wired == 0) 1824 vm_page_max_wired = cnt.v_free_count / 3; 1825} 1826 1827/* 1828 * vm_pageout is the high level pageout daemon. 1829 */ 1830static void 1831vm_pageout(void) 1832{ 1833 int error; 1834#if MAXMEMDOM > 1 1835 int i; 1836#endif 1837 1838 swap_pager_swap_init(); 1839#if MAXMEMDOM > 1 1840 for (i = 1; i < vm_ndomains; i++) { 1841 error = kthread_add(vm_pageout_worker, (void *)(uintptr_t)i, 1842 curproc, NULL, 0, 0, "dom%d", i); 1843 if (error != 0) { 1844 panic("starting pageout for domain %d, error %d\n", 1845 i, error); 1846 } 1847 } 1848#endif 1849 error = kthread_add(uma_reclaim_worker, NULL, curproc, NULL, 1850 0, 0, "uma"); 1851 if (error != 0) 1852 panic("starting uma_reclaim helper, error %d\n", error); 1853 vm_pageout_worker((void *)(uintptr_t)0); 1854} 1855 1856/* 1857 * Unless the free page queue lock is held by the caller, this function 1858 * should be regarded as advisory. Specifically, the caller should 1859 * not msleep() on &cnt.v_free_count following this function unless 1860 * the free page queue lock is held until the msleep() is performed. 1861 */ 1862void 1863pagedaemon_wakeup(void) 1864{ 1865 1866 if (!vm_pages_needed && curthread->td_proc != pageproc) { 1867 vm_pages_needed = 1; 1868 wakeup(&vm_pages_needed); 1869 } 1870} 1871 1872#if !defined(NO_SWAPPING) 1873static void 1874vm_req_vmdaemon(int req) 1875{ 1876 static int lastrun = 0; 1877 1878 mtx_lock(&vm_daemon_mtx); 1879 vm_pageout_req_swapout |= req; 1880 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1881 wakeup(&vm_daemon_needed); 1882 lastrun = ticks; 1883 } 1884 mtx_unlock(&vm_daemon_mtx); 1885} 1886 1887static void 1888vm_daemon(void) 1889{ 1890 struct rlimit rsslim; 1891 struct proc *p; 1892 struct thread *td; 1893 struct vmspace *vm; 1894 int breakout, swapout_flags, tryagain, attempts; 1895#ifdef RACCT 1896 uint64_t rsize, ravailable; 1897#endif 1898 1899 while (TRUE) { 1900 mtx_lock(&vm_daemon_mtx); 1901 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", 1902#ifdef RACCT 1903 racct_enable ? hz : 0 1904#else 1905 0 1906#endif 1907 ); 1908 swapout_flags = vm_pageout_req_swapout; 1909 vm_pageout_req_swapout = 0; 1910 mtx_unlock(&vm_daemon_mtx); 1911 if (swapout_flags) 1912 swapout_procs(swapout_flags); 1913 1914 /* 1915 * scan the processes for exceeding their rlimits or if 1916 * process is swapped out -- deactivate pages 1917 */ 1918 tryagain = 0; 1919 attempts = 0; 1920again: 1921 attempts++; 1922 sx_slock(&allproc_lock); 1923 FOREACH_PROC_IN_SYSTEM(p) { 1924 vm_pindex_t limit, size; 1925 1926 /* 1927 * if this is a system process or if we have already 1928 * looked at this process, skip it. 1929 */ 1930 PROC_LOCK(p); 1931 if (p->p_state != PRS_NORMAL || 1932 p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) { 1933 PROC_UNLOCK(p); 1934 continue; 1935 } 1936 /* 1937 * if the process is in a non-running type state, 1938 * don't touch it. 1939 */ 1940 breakout = 0; 1941 FOREACH_THREAD_IN_PROC(p, td) { 1942 thread_lock(td); 1943 if (!TD_ON_RUNQ(td) && 1944 !TD_IS_RUNNING(td) && 1945 !TD_IS_SLEEPING(td) && 1946 !TD_IS_SUSPENDED(td)) { 1947 thread_unlock(td); 1948 breakout = 1; 1949 break; 1950 } 1951 thread_unlock(td); 1952 } 1953 if (breakout) { 1954 PROC_UNLOCK(p); 1955 continue; 1956 } 1957 /* 1958 * get a limit 1959 */ 1960 lim_rlimit(p, RLIMIT_RSS, &rsslim); 1961 limit = OFF_TO_IDX( 1962 qmin(rsslim.rlim_cur, rsslim.rlim_max)); 1963 1964 /* 1965 * let processes that are swapped out really be 1966 * swapped out set the limit to nothing (will force a 1967 * swap-out.) 1968 */ 1969 if ((p->p_flag & P_INMEM) == 0) 1970 limit = 0; /* XXX */ 1971 vm = vmspace_acquire_ref(p); 1972 PROC_UNLOCK(p); 1973 if (vm == NULL) 1974 continue; 1975 1976 size = vmspace_resident_count(vm); 1977 if (size >= limit) { 1978 vm_pageout_map_deactivate_pages( 1979 &vm->vm_map, limit); 1980 } 1981#ifdef RACCT 1982 if (racct_enable) { 1983 rsize = IDX_TO_OFF(size); 1984 PROC_LOCK(p); 1985 racct_set(p, RACCT_RSS, rsize); 1986 ravailable = racct_get_available(p, RACCT_RSS); 1987 PROC_UNLOCK(p); 1988 if (rsize > ravailable) { 1989 /* 1990 * Don't be overly aggressive; this 1991 * might be an innocent process, 1992 * and the limit could've been exceeded 1993 * by some memory hog. Don't try 1994 * to deactivate more than 1/4th 1995 * of process' resident set size. 1996 */ 1997 if (attempts <= 8) { 1998 if (ravailable < rsize - 1999 (rsize / 4)) { 2000 ravailable = rsize - 2001 (rsize / 4); 2002 } 2003 } 2004 vm_pageout_map_deactivate_pages( 2005 &vm->vm_map, 2006 OFF_TO_IDX(ravailable)); 2007 /* Update RSS usage after paging out. */ 2008 size = vmspace_resident_count(vm); 2009 rsize = IDX_TO_OFF(size); 2010 PROC_LOCK(p); 2011 racct_set(p, RACCT_RSS, rsize); 2012 PROC_UNLOCK(p); 2013 if (rsize > ravailable) 2014 tryagain = 1; 2015 } 2016 } 2017#endif 2018 vmspace_free(vm); 2019 } 2020 sx_sunlock(&allproc_lock); 2021 if (tryagain != 0 && attempts <= 10) 2022 goto again; 2023 } 2024} 2025#endif /* !defined(NO_SWAPPING) */ 2026