Lines Matching refs:page

63 #define PAGE_ASSERT(page, condition)	\
64 ASSERT_PRINT((condition), "page: %p", (page))
67 // this many pages will be cleared at once in the page scrubber thread
70 // maximum I/O priority of the page writer
76 // The page reserve an allocation of the certain priority must not touch.
83 // Minimum number of free pages the page daemon will try to achieve.
88 // Wait interval between page daemon runs.
98 // vm_page::usage_count buff an accessed page receives in a scan.
100 // vm_page::usage_count debuff an unaccessed page receives in a scan.
127 static mutex sPageDeficitLock = MUTEX_INITIALIZER("page deficit");
129 // This lock must be used whenever the free or clear page queues are changed.
134 = RW_LOCK_INITIALIZER("free/clear page queues");
275 out.Print("page reserve: %" B_PRIu32, fCount);
294 out.Print("page unreserve: %" B_PRId32, fCount);
315 out.Print("page alloc: %#" B_PRIxPHYSADDR, fPageNumber);
337 out.Print("page alloc run: start %#" B_PRIxPHYSADDR " length: %"
360 out.Print("page free: %#" B_PRIxPHYSADDR, fPageNumber);
379 out.Print("page scrubbing: %" B_PRId32, fCount);
398 out.Print("page scrubbed: %" B_PRId32, fCount);
415 out.Print("page stolen");
434 ActivatePage(vm_page* page)
436 fCache(page->cache),
437 fPage(page)
444 out.Print("page activated: %p, cache: %p", fPage, fCache);
455 DeactivatePage(vm_page* page)
457 fCache(page->cache),
458 fPage(page)
465 out.Print("page deactivated: %p, cache: %p", fPage, fCache);
476 FreedPageSwap(vm_page* page)
478 fCache(page->cache),
479 fPage(page)
486 out.Print("page swap freed: %p, cache: %p", fPage, fCache);
509 WritePage(vm_page* page)
511 fCache(page->Cache()),
512 fPage(page)
519 out.Print("page write: %p, cache: %p", fPage, fCache);
542 SetPageState(vm_page* page, uint8 newState)
544 fPage(page),
545 fOldState(page->State()),
547 fBusy(page->busy),
548 fWired(page->WiredCount() > 0),
549 fMapped(!page->mappings.IsEmpty()),
550 fAccessed(page->accessed),
551 fModified(page->modified)
571 out.Print("page set state: %p (%c%c%c%c%c): %s -> %s", fPage,
699 kprintf("page number %#" B_PRIxPHYSADDR, pageNumber);
749 kprintf("page %#" B_PRIxPHYSADDR "\n", pageNumber);
764 list_page(vm_page* page)
767 (addr_t)(page->physical_page_number * B_PAGE_SIZE));
768 switch (page->State()) {
779 if (page->busy) kprintf("B"); else kprintf("-");
780 if (page->busy_writing) kprintf("W"); else kprintf("-");
781 if (page->accessed) kprintf("A"); else kprintf("-");
782 if (page->modified) kprintf("M"); else kprintf("-");
783 if (page->unused) kprintf("U"); else kprintf("-");
785 kprintf(" usage:%3u", page->usage_count);
786 kprintf(" wired:%5u", page->WiredCount());
789 vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
807 kprintf("page table:\n");
814 kprintf("end of page table\n");
823 struct vm_page *page;
850 page = (vm_page*)address;
855 if (p == page) {
856 kprintf("found page %p in queue %p (%s)\n", page,
863 kprintf("page %p isn't in any queue\n", page);
932 struct vm_page* page;
935 page = (struct vm_page *)(addr_t)pageAddress;
948 kprintf("Virtual address not mapped to a physical page in this "
955 page = vm_lookup_page(pageAddress / B_PAGE_SIZE);
958 const page_num_t expected = sPhysicalPageOffset + (page - sPages);
960 kprintf("PAGE: %p\n", page);
961 kprintf("queue_next,prev: %p, %p\n", page->queue_link.next,
962 page->queue_link.previous);
963 kprintf("physical_number: %#" B_PRIxPHYSADDR "\n", page->physical_page_number);
964 if (page->physical_page_number != expected)
966 kprintf("cache: %p\n", page->Cache());
967 kprintf("cache_offset: %" B_PRIuPHYSADDR "\n", page->cache_offset);
968 kprintf("cache_next: %p\n", page->cache_next);
969 kprintf("state: %s\n", page_state_to_string(page->State()));
970 kprintf("wired_count: %d\n", page->WiredCount());
971 kprintf("usage_count: %d\n", page->usage_count);
972 kprintf("busy: %d\n", page->busy);
973 kprintf("busy_writing: %d\n", page->busy_writing);
974 kprintf("accessed: %d\n", page->accessed);
975 kprintf("modified: %d\n", page->modified);
977 kprintf("queue: %p\n", page->queue);
980 kprintf("accessor: %" B_PRId32 "\n", page->accessing_thread);
984 vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
1004 == page->physical_page_number) {
1019 set_debug_variable("_cache", (addr_t)page->Cache());
1021 set_debug_variable("_accessor", page->accessing_thread);
1062 struct vm_page *page = queue->Head();
1064 kprintf("page cache type state wired usage\n");
1065 for (page_num_t i = 0; page; i++, page = queue->Next(page)) {
1066 kprintf("%p %p %-7s %8s %5d %5d\n", page, page->Cache(),
1067 vm_cache_type_to_string(page->Cache()->type),
1068 page_state_to_string(page->State()),
1069 page->WiredCount(), page->usage_count);
1101 panic("page %" B_PRIuPHYSADDR " at %p has invalid state!\n", i,
1138 kprintf("page stats:\n");
1157 kprintf("unsatisfied page reservations: %" B_PRId32 "\n",
1296 kprintf("\ntotal page allocations: %" B_PRIuSIZE "\n",
1362 track_page_usage(vm_page* page)
1364 if (page->WiredCount() == 0) {
1365 sNextPageUsage[(int32)page->usage_count + 128]++;
1386 TRACE_DAEMON("average page usage: %f (%lu pages)\n",
1395 kprintf("distribution of page usage counts (%lu pages):",
1525 free_page(vm_page* page, bool clear)
1527 DEBUG_PAGE_ACCESS_CHECK(page);
1529 PAGE_ASSERT(page, !page->IsMapped());
1533 switch (page->State()) {
1548 panic("free_page(): page %p already free", page);
1555 panic("free_page(): page %p in invalid state %d",
1556 page, page->State());
1560 if (page->CacheRef() != NULL)
1561 panic("to be freed page %p has cache", page);
1562 if (page->IsMapped())
1563 panic("to be freed page %p has mappings", page);
1566 fromQueue->RemoveUnlocked(page);
1568 TA(FreePage(page->physical_page_number));
1571 page->allocation_tracking_info.Clear();
1576 DEBUG_PAGE_ACCESS_END(page);
1579 page->SetState(PAGE_STATE_CLEAR);
1580 sClearPageQueue.PrependUnlocked(page);
1582 page->SetState(PAGE_STATE_FREE);
1583 sFreePageQueue.PrependUnlocked(page);
1591 /*! The caller must make sure that no-one else tries to change the page's state
1592 while the function is called. If the page has a cache, this can be done by
1596 set_page_state(vm_page *page, int pageState)
1598 DEBUG_PAGE_ACCESS_CHECK(page);
1600 if (pageState == page->State())
1605 switch (page->State()) {
1620 panic("set_page_state(): page %p is free/clear", page);
1627 panic("set_page_state(): page %p in invalid state %d",
1628 page, page->State());
1645 PAGE_ASSERT(page, !page->IsMapped());
1646 PAGE_ASSERT(page, !page->modified);
1662 VMCache* cache = page->Cache();
1666 else if (page->State() == PAGE_STATE_MODIFIED)
1670 // move the page
1672 // Note: Theoretically we are required to lock when changing the page
1675 // page states and active pages have a cache that must be locked at
1677 // before trying to change/interpret the page state.
1678 PAGE_ASSERT(page, cache != NULL);
1680 page->SetState(pageState);
1683 fromQueue->RemoveUnlocked(page);
1685 page->SetState(pageState);
1688 toQueue->AppendUnlocked(page);
1693 /*! Moves a previously modified page into a now appropriate queue.
1694 The page queues must not be locked.
1697 move_page_to_appropriate_queue(vm_page *page)
1699 DEBUG_PAGE_ACCESS_CHECK(page);
1701 // Note, this logic must be in sync with what the page daemon does.
1703 if (page->IsMapped())
1705 else if (page->modified)
1711 // page.
1712 set_page_state(page, state);
1717 clear_page(struct vm_page *page)
1719 vm_memset_physical(page->physical_page_number << PAGE_SHIFT, 0,
1732 "): start page is before free list\n", startPage, length);
1752 vm_page *page = &sPages[startPage + i];
1753 switch (page->State()) {
1757 // TODO: This violates the page reservation policy, since we remove pages from
1760 DEBUG_PAGE_ACCESS_START(page);
1761 VMPageQueue& queue = page->State() == PAGE_STATE_FREE
1763 queue.Remove(page);
1764 page->SetState(wired ? PAGE_STATE_WIRED : PAGE_STATE_UNUSED);
1765 page->busy = false;
1767 DEBUG_PAGE_ACCESS_END(page);
1779 dprintf("mark_page_range_in_use: page %#" B_PRIxPHYSADDR
1780 " in non-free state %d!\n", startPage + i, page->State());
1812 // we must make sure we don't cause a violation of the page
1824 vm_page *page[SCRUB_SIZE];
1827 page[i] = sFreePageQueue.RemoveHeadUnlocked();
1828 if (page[i] == NULL)
1831 DEBUG_PAGE_ACCESS_START(page[i]);
1833 page[i]->SetState(PAGE_STATE_ACTIVE);
1834 page[i]->busy = true;
1849 clear_page(page[i]);
1856 page[i]->SetState(PAGE_STATE_CLEAR);
1857 page[i]->busy = false;
1858 DEBUG_PAGE_ACCESS_END(page[i]);
1859 sClearPageQueue.PrependUnlocked(page[i]);
1909 vm_page* page = sModifiedPageQueue.Head();
1910 if (page == NULL)
1913 sModifiedPageQueue.Requeue(page, true);
1917 if (!page->busy)
1918 return page;
1937 void AddPage(vm_page* page);
1956 void SetTo(PageWriterRun* run, vm_page* page, int32 maxPages);
1957 bool AddPage(vm_page* page);
1985 void SetTo(vm_page* page);
2005 panic("page write wrapper going out of scope but isn't completed");
2009 /*! The page's cache must be locked.
2012 PageWriteWrapper::SetTo(vm_page* page)
2014 DEBUG_PAGE_ACCESS_CHECK(page);
2016 if (page->busy)
2017 panic("setting page write wrapper to busy page");
2020 panic("re-setting page write wrapper that isn't completed");
2022 fPage = page;
2023 fCache = page->Cache();
2029 // We have a modified page -- however, while we're writing it back,
2030 // the page might still be mapped. In order not to lose any changes to the
2031 // page, we mark it clean before actually writing it back; if
2032 // writing the page fails for some reason, we'll just keep it in the
2033 // modified page list, but that should happen only rarely.
2035 // If the page is changed after we cleared the dirty flag, but before we
2043 /*! The page's cache must be locked.
2044 The page queues must not be locked.
2045 \return \c true if the page was written successfully respectively could be
2052 panic("completing page write wrapper that is not active");
2057 // Set unbusy and notify later by hand, since we might free the page.
2067 // Writing the page failed. One reason would be that the cache has been
2068 // shrunk and the page does no longer belong to the file. Otherwise the
2069 // actual I/O failed, in which case we'll simply keep the page modified.
2073 // shrunk while we were trying to write the page and we have to free
2081 // Writing the page failed -- mark the page modified and move it to
2086 dprintf("PageWriteWrapper: Failed to write page %p: %s\n", fPage,
2111 /*! The page's cache must be locked.
2114 PageWriteTransfer::SetTo(PageWriterRun* run, vm_page* page, int32 maxPages)
2117 fCache = page->Cache();
2118 fOffset = page->cache_offset;
2123 fVecs[0].base = (phys_addr_t)page->physical_page_number << PAGE_SHIFT;
2129 /*! The page's cache must be locked.
2132 PageWriteTransfer::AddPage(vm_page* page)
2134 if (page->Cache() != fCache
2141 if ((phys_addr_t)page->physical_page_number << PAGE_SHIFT == nextBase
2142 && (off_t)page->cache_offset == fOffset + fPageCount) {
2150 if ((phys_addr_t)page->physical_page_number << PAGE_SHIFT == nextBase
2151 && (off_t)page->cache_offset == fOffset - 1) {
2155 fOffset = page->cache_offset;
2160 if (((off_t)page->cache_offset == fOffset + fPageCount
2161 || (off_t)page->cache_offset == fOffset - 1)
2165 if ((off_t)page->cache_offset < fOffset) {
2170 fOffset = page->cache_offset;
2176 = (phys_addr_t)page->physical_page_number << PAGE_SHIFT;
2211 // and the last page has at least been written partially
2254 /*! The page's cache must be locked.
2257 PageWriterRun::AddPage(vm_page* page)
2259 fWrappers[fWrapperCount++].SetTo(page);
2261 if (fTransferCount == 0 || !fTransfers[fTransferCount - 1].AddPage(page)) {
2262 fTransfers[fTransferCount++].SetTo(this, page,
2263 page->Cache()->MaxPagesPerAsyncWrite());
2276 fAllFinishedCondition.Init(this, "page writer wait for I/O");
2309 // We've acquired a references for each page
2331 /*! The page writer continuously takes some pages from the modified
2350 panic("page writer: Failed to init PageWriterRun!");
2369 // single page. Take a break.
2409 vm_page *page = next_modified_page(maxPagesToSee);
2410 if (page == NULL)
2413 PageCacheLocker cacheLocker(page, false);
2417 VMCache *cache = page->Cache();
2419 // If the page is busy or its state has changed while we were
2421 if (page->busy || page->State() != PAGE_STATE_MODIFIED)
2424 DEBUG_PAGE_ACCESS_START(page);
2427 if (page->WiredCount() > 0) {
2428 set_page_state(page, PAGE_STATE_ACTIVE);
2429 DEBUG_PAGE_ACCESS_END(page);
2438 (off_t)page->cache_offset << PAGE_SHIFT))
2441 // We can't/don't want to do anything with this page, so move it
2443 if (page->mappings.IsEmpty())
2444 set_page_state(page, PAGE_STATE_INACTIVE);
2446 set_page_state(page, PAGE_STATE_ACTIVE);
2448 DEBUG_PAGE_ACCESS_END(page);
2455 DEBUG_PAGE_ACCESS_END(page);
2461 run.AddPage(page);
2465 // we could mark a page busy that would need to be accessed
2466 // when writing back another page, thus causing a deadlock.
2468 DEBUG_PAGE_ACCESS_END(page);
2470 //dprintf("write page %p, cache %p (%ld)\n", page, page->cache, page->cache->ref_count);
2471 TPW(WritePage(page));
2495 TRACE(("page writer: wrote 1024 pages (total: %" B_PRIu64 " ms, "
2520 // TODO: This should be done in the page daemon!
2526 vm_page *page = vm_page_at_index(index);
2527 PageCacheLocker locker(page);
2531 DEBUG_PAGE_ACCESS_START(page);
2533 VMCache* cache = page->Cache();
2534 if (cache->temporary && page->WiredCount() == 0
2535 && cache->HasPage(page->cache_offset << PAGE_SHIFT)
2536 && page->usage_count > 0) {
2537 // TODO: how to judge a page is highly active?
2538 if (swap_free_page_swap_space(page)) {
2539 // We need to mark the page modified, since otherwise it could be
2541 vm_page_set_state(page, PAGE_STATE_MODIFIED);
2542 TD(FreedPageSwap(page));
2543 DEBUG_PAGE_ACCESS_END(page);
2547 DEBUG_PAGE_ACCESS_END(page);
2560 vm_page *page;
2564 page = sCachedPageQueue.Head();
2566 // Get the next page of the current queue
2572 page = sCachedPageQueue.Next(&marker);
2577 while (page != NULL) {
2578 if (!page->busy) {
2581 sCachedPageQueue.InsertAfter(page, &marker);
2582 return page;
2585 page = sCachedPageQueue.Next(page);
2593 free_cached_page(vm_page *page, bool dontWait)
2595 // try to lock the page's cache
2596 if (vm_cache_acquire_locked_page_cache(page, dontWait) == NULL)
2598 VMCache* cache = page->Cache();
2603 // check again if that page is still a candidate
2604 if (page->busy || page->State() != PAGE_STATE_CACHED)
2607 DEBUG_PAGE_ACCESS_START(page);
2609 PAGE_ASSERT(page, !page->IsMapped());
2610 PAGE_ASSERT(page, !page->modified);
2612 // we can now steal this page
2614 cache->RemovePage(page);
2615 // Now the page doesn't have cache anymore, so no one else (e.g.
2619 sCachedPageQueue.RemoveUnlocked(page);
2633 vm_page *page = find_cached_page_candidate(marker);
2634 if (page == NULL)
2637 if (free_cached_page(page, dontWait)) {
2639 page->SetState(PAGE_STATE_FREE);
2640 DEBUG_PAGE_ACCESS_END(page);
2641 sFreePageQueue.PrependUnlocked(page);
2669 // Get the next page. Note that we don't bother to lock here. We go with
2672 // queue anyway to lock the page's cache, and we'll recheck afterwards.
2673 vm_page* page = queue.Head();
2674 if (page == NULL)
2677 // lock the page's cache
2678 VMCache* cache = vm_cache_acquire_locked_page_cache(page, true);
2682 if (page->State() != PAGE_STATE_ACTIVE) {
2683 // page is no longer in the cache or in this queue
2688 if (page->busy) {
2689 // page is busy -- requeue at the end
2690 vm_page_requeue(page, true);
2695 DEBUG_PAGE_ACCESS_START(page);
2697 // Get the page active/modified flags and update the page's usage count.
2700 // via page fault whenever such an inactive page is used again.
2705 if (page->WiredCount() > 0 || page->usage_count > 0
2707 usageCount = vm_clear_page_mapping_accessed_flags(page);
2709 usageCount = vm_remove_all_page_mappings_if_unaccessed(page);
2712 usageCount += page->usage_count + kPageUsageAdvance;
2717 usageCount += page->usage_count - (int32)kPageUsageDecline;
2720 set_page_state(page, PAGE_STATE_INACTIVE);
2724 page->usage_count = usageCount;
2726 DEBUG_PAGE_ACCESS_END(page);
2749 // it is relatively expensive to page out pages, we do that on a grander
2765 // get the next page
2766 vm_page* page = nextPage;
2767 if (page == NULL)
2769 nextPage = queue.Next(page);
2771 if (page->busy)
2775 queue.InsertAfter(page, &marker);
2778 // lock the page's cache
2779 VMCache* cache = vm_cache_acquire_locked_page_cache(page, true);
2780 if (cache == NULL || page->busy
2781 || page->State() != PAGE_STATE_INACTIVE) {
2792 DEBUG_PAGE_ACCESS_START(page);
2795 // unmap the page, if it hasn't been accessed.
2797 if (page->WiredCount() > 0)
2798 usageCount = vm_clear_page_mapping_accessed_flags(page);
2800 usageCount = vm_remove_all_page_mappings_if_unaccessed(page);
2804 usageCount += page->usage_count + kPageUsageAdvance;
2808 usageCount += page->usage_count - (int32)kPageUsageDecline;
2813 page->usage_count = usageCount;
2822 // page out memory as well.
2823 bool isMapped = page->IsMapped();
2826 set_page_state(page, PAGE_STATE_ACTIVE);
2829 vm_page_requeue(page, true);
2831 vm_page_requeue(page, true);
2832 } else if (!page->modified) {
2833 set_page_state(page, PAGE_STATE_CACHED);
2837 set_page_state(page, PAGE_STATE_MODIFIED);
2841 vm_page_requeue(page, true);
2843 DEBUG_PAGE_ACCESS_END(page);
2861 // wake up the page writer, if we tossed it some pages
2894 // get the next page
2895 vm_page* page = nextPage;
2896 if (page == NULL)
2898 nextPage = queue.Next(page);
2900 if (page->busy)
2904 queue.InsertAfter(page, &marker);
2907 // lock the page's cache
2908 VMCache* cache = vm_cache_acquire_locked_page_cache(page, true);
2909 if (cache == NULL || page->busy || page->State() != PAGE_STATE_ACTIVE) {
2920 DEBUG_PAGE_ACCESS_START(page);
2922 // Get the page active/modified flags and update the page's usage count.
2923 int32 usageCount = vm_clear_page_mapping_accessed_flags(page);
2926 usageCount += page->usage_count + kPageUsageAdvance;
2932 usageCount += page->usage_count - (int32)kPageUsageDecline;
2935 set_page_state(page, PAGE_STATE_INACTIVE);
2940 page->usage_count = usageCount;
2942 DEBUG_PAGE_ACCESS_END(page);
2962 TRACE_DAEMON("page daemon: idle run\n");
2983 TRACE_DAEMON("page daemon: full run: free: %" B_PRIu32 ", cached: %"
3123 \param firstPage Offset (in page size units) of the first page in the range.
3124 \param endPage End offset (in page size units) of the page range. The page
3164 vm_page* page = it.Next();
3165 if (page == NULL || page->cache_offset >= endPage) {
3169 page = NULL;
3172 if (page != NULL) {
3173 if (page->busy
3174 || (page->State() != PAGE_STATE_MODIFIED
3175 && !vm_test_map_modification(page))) {
3176 page = NULL;
3181 if (page != NULL) {
3186 DEBUG_PAGE_ACCESS_START(page);
3188 wrapper->SetTo(page);
3190 if (transferEmpty || transfer.AddPage(page)) {
3192 transfer.SetTo(NULL, page, maxPages);
3196 DEBUG_PAGE_ACCESS_END(page);
3202 DEBUG_PAGE_ACCESS_END(page);
3217 if (page != NULL) {
3218 transfer.SetTo(NULL, page, maxPages);
3244 /*! Schedules the page writer to write back the specified \a page.
3246 take several seconds until the page is actually written out.
3249 vm_page_schedule_write_page(vm_page *page)
3251 PAGE_ASSERT(page, page->State() == PAGE_STATE_MODIFIED);
3253 vm_page_requeue(page, false);
3268 vm_page *page = it.Next();) {
3269 if (page->cache_offset >= endPage)
3272 if (!page->busy && page->State() == PAGE_STATE_MODIFIED) {
3273 DEBUG_PAGE_ACCESS_START(page);
3274 vm_page_requeue(page, false);
3276 DEBUG_PAGE_ACCESS_END(page);
3314 TRACE(("first phys page = %#" B_PRIxPHYSADDR ", end %#" B_PRIxPHYSADDR "\n",
3326 // init page queues
3336 // map in the new free page table
3344 // initialize the free page table
3370 // mark the allocated physical page ranges wired
3382 // a cached page with each allocation.
3410 create_area("page structures", &dummy, B_EXACT_ADDRESS,
3417 "Dump statistics about page usage");
3418 add_debugger_command_etc("page", &dump_page_long,
3419 "Dump page info",
3421 "Prints information for the physical page. If neither \"-p\" nor\n"
3423 "the vm_page data structure for the page in question. If \"-p\" is\n"
3424 "given, the address is the physical address of the page. If \"-v\" is\n"
3426 "thread's address space and for the page it is mapped to (if any)\n"
3428 "search all known address spaces for mappings to that page and print\n"
3430 add_debugger_command("page_queue", &dump_page_queue, "Dump page queue");
3432 "Find out which queue a page is actually in");
3436 "Dumps statistics about page usage counts",
3438 "Dumps statistics about page usage counts.\n",
3445 "Dump current page allocations summed up per caller",
3456 "Dump current page allocations",
3457 "[ --stacktrace ] [ -p <page number> ] [ --team <team ID> ] "
3460 "The optional \"-p\" page number filters for a specific page,\n"
3479 thread_id thread = spawn_kernel_thread(&page_scrubber, "page scrubber",
3483 // start page writer
3485 sPageWriterCondition.Init("page writer");
3487 thread = spawn_kernel_thread(&page_writer, "page writer",
3491 // start page daemon
3493 sPageDaemonCondition.Init("page daemon");
3495 thread = spawn_kernel_thread(&page_daemon, "page daemon",
3504 vm_mark_page_inuse(page_num_t page)
3506 return vm_mark_page_range_inuse(page, 1);
3600 vm_page* page = queue->RemoveHeadUnlocked();
3601 if (page == NULL) {
3602 // if the primary queue was empty, grab the page from the
3604 page = otherQueue->RemoveHeadUnlocked();
3606 if (page == NULL) {
3607 // Unlikely, but possible: the page we have reserved has moved
3613 page = queue->RemoveHead();
3614 if (page == NULL)
3617 if (page == NULL) {
3618 panic("Had reserved page, but there is none!");
3627 if (page->CacheRef() != NULL)
3628 panic("supposed to be free page %p has cache\n", page);
3630 DEBUG_PAGE_ACCESS_START(page);
3632 int oldPageState = page->State();
3633 page->SetState(pageState);
3634 page->busy = (flags & VM_PAGE_ALLOC_BUSY) != 0;
3635 page->usage_count = 0;
3636 page->accessed = false;
3637 page->modified = false;
3642 sPageQueues[pageState].AppendUnlocked(page);
3644 // clear the page, if we had to take it from the free queue and a clear
3645 // page was requested
3647 clear_page(page);
3650 page->allocation_tracking_info.Init(
3651 TA(AllocatePage(page->physical_page_number)));
3653 TA(AllocatePage(page->physical_page_number));
3656 return page;
3666 while (vm_page* page = freePages.RemoveTail()) {
3667 page->busy = false;
3668 page->SetState(PAGE_STATE_FREE);
3669 DEBUG_PAGE_ACCESS_END(page);
3670 sFreePageQueue.PrependUnlocked(page);
3673 while (vm_page* page = clearPages.RemoveTail()) {
3674 page->busy = false;
3675 page->SetState(PAGE_STATE_CLEAR);
3676 DEBUG_PAGE_ACCESS_END(page);
3677 sClearPageQueue.PrependUnlocked(page);
3687 The caller must have write-locked the free/clear page queues. The function
3699 \param freeClearQueueLocker Locked WriteLocker for the free/clear page
3701 \return The index of the first page that could not be allocated. \a length
3722 vm_page& page = sPages[start + i];
3723 switch (page.State()) {
3725 DEBUG_PAGE_ACCESS_START(&page);
3726 sClearPageQueue.Remove(&page);
3727 clearPages.Add(&page);
3730 DEBUG_PAGE_ACCESS_START(&page);
3731 sFreePageQueue.Remove(&page);
3732 freePages.Add(&page);
3741 // Probably a page was cached when our caller checked. Now it's
3751 page.SetState(flags & VM_PAGE_ALLOC_STATE);
3752 page.busy = (flags & VM_PAGE_ALLOC_BUSY) != 0;
3753 page.usage_count = 0;
3754 page.accessed = false;
3755 page.modified = false;
3760 // failed to allocate a page -- free all that we've got
3774 // skip, if we've already got the page
3786 // free the page, if it is still cached
3787 vm_page& page = sPages[nextIndex];
3788 if (!free_cached_page(&page, false)) {
3789 // TODO: if the page turns out to have been freed already,
3794 page.SetState(flags & VM_PAGE_ALLOC_STATE);
3795 page.busy = (flags & VM_PAGE_ALLOC_BUSY) != 0;
3796 page.usage_count = 0;
3797 page.accessed = false;
3798 page.modified = false;
3800 freePages.InsertBefore(freePage, &page);
3823 vm_page* page = it.Next();) {
3824 clear_page(page);
3858 \param restrictions Restrictions to the physical addresses of the page run
3860 address where the page run may start, \c high_address, the last
3861 acceptable physical address where the page run may end (i.e. it must
3863 \c alignment, the alignment of the page run start address, and
3864 \c boundary, multiples of which the page run must not cross.
3866 \param priority The page reservation priority (as passed to
3868 \return The first page of the allocated page run on success; \c NULL
3875 // compute start and end page index
3974 // apparently a cached page couldn't be allocated -- skip it and
4006 vm_page_is_dummy(struct vm_page *page)
4008 return page < sPages || page >= sPages + sNumPages;
4012 /*! Free the page that belonged to a certain cache.
4014 if the page does not equal PAGE_STATE_MODIFIED.
4016 \param cache The cache the page was previously owned by or NULL. The page
4019 \param page The page to free.
4020 \param reservation If not NULL, the page count of the reservation will be
4021 incremented, thus allowing to allocate another page for the freed one at
4025 vm_page_free_etc(VMCache* cache, vm_page* page,
4028 PAGE_ASSERT(page, page->State() != PAGE_STATE_FREE
4029 && page->State() != PAGE_STATE_CLEAR);
4031 if (page->State() == PAGE_STATE_MODIFIED && cache->temporary)
4034 free_page(page, false);
4041 vm_page_set_state(vm_page *page, int pageState)
4043 PAGE_ASSERT(page, page->State() != PAGE_STATE_FREE
4044 && page->State() != PAGE_STATE_CLEAR);
4047 free_page(page, pageState == PAGE_STATE_CLEAR);
4050 set_page_state(page, pageState);
4054 /*! Moves a page to either the tail of the head of its current queue,
4056 The page must have a cache and the cache must be locked!
4059 vm_page_requeue(struct vm_page *page, bool tail)
4061 PAGE_ASSERT(page, page->Cache() != NULL);
4062 page->Cache()->AssertLocked();
4063 // DEBUG_PAGE_ACCESS_CHECK(page);
4073 switch (page->State()) {
4088 panic("vm_page_requeue() called for free/clear page %p", page);
4095 page, page->State());
4099 queue->RequeueUnlocked(page, tail);
4110 /*! There is a subtle distinction between the page counts returned by
4186 // TODO: We don't consider pages used for page directories/tables yet.
4190 /*! Returns the greatest address within the last page of accessible physical
4193 means the that the last page ends at exactly 4 GB.