Lines Matching refs:page

306 		out.Print("page fault %#lx %s %s, pc: %#lx", fAddress,
318 // page fault errors
344 out.Print("page fault error: no area");
347 out.Print("page fault error: area: %ld, kernel only", fArea);
350 out.Print("page fault error: area: %ld, write protected",
354 out.Print("page fault error: area: %ld, read protected", fArea);
357 out.Print("page fault error: area: %ld, execute protected",
361 out.Print("page fault error: kernel touching bad user memory");
364 out.Print("page fault error: no address space");
367 out.Print("page fault error: area: %ld, error: %s", fArea,
382 vm_page* page)
387 fPage(page)
394 out.Print("page fault done: area: %ld, top cache: %p, cache: %p, "
395 "page: %p", fArea, fTopCache, fCache, fPage);
416 /*! The page's cache must be locked.
419 increment_page_wired_count(vm_page* page)
421 if (!page->IsMapped())
423 page->IncrementWiredCount();
427 /*! The page's cache must be locked.
430 decrement_page_wired_count(vm_page* page)
432 page->DecrementWiredCount();
433 if (!page->IsMapped())
439 virtual_page_address(VMArea* area, vm_page* page)
442 + ((page->cache_offset << PAGE_SHIFT) - area->cache_offset);
447 is_page_in_area(VMArea* area, vm_page* page)
449 off_t pageCacheOffsetBytes = (off_t)(page->cache_offset << PAGE_SHIFT);
474 // In the page protections we store only the three user protections,
475 // so we use 4 bits per page.
490 // init the page protections for all pages to that of the area
549 implementation might need to map this page.
550 The page's cache must be locked.
553 map_page(VMArea* area, vm_page* page, addr_t address, uint32 protection,
558 bool wasMapped = page->IsMapped();
561 DEBUG_PAGE_ACCESS_CHECK(page);
571 mapping->page = page;
576 map->Map(address, page->physical_page_number * B_PAGE_SIZE, protection,
580 if (!page->IsMapped())
583 page->mappings.Add(mapping);
588 DEBUG_PAGE_ACCESS_CHECK(page);
591 map->Map(address, page->physical_page_number * B_PAGE_SIZE, protection,
595 increment_page_wired_count(page);
599 // The page is mapped now, so we must not remain in the cached queue.
601 // otherwise the page daemon wouldn't come to keep track of it (in idle
602 // mode) -- if the page isn't touched, it will be deactivated after a
604 if (page->State() == PAGE_STATE_CACHED
605 || page->State() == PAGE_STATE_INACTIVE) {
606 vm_page_set_state(page, PAGE_STATE_ACTIVE);
615 page's cache.
932 // Set the correct page protections for the second area.
937 vm_page* page = it.Next();) {
938 if (is_page_in_area(secondArea, page)) {
939 addr_t address = virtual_page_address(secondArea, page);
1359 panic("area has no page protections");
1369 // And set the proper page protections so that the fault case will actually
1370 // fail and not simply try to map a new page.
1467 vm_page* page = NULL;
1566 // Optimization: For a single-page contiguous allocation without low/high
1605 // TODO: We don't reserve the memory for the pages for the page
1637 // we try to allocate the page run here upfront as this may easily
1639 page = vm_page_allocate_page_run(PAGE_STATE_WIRED | pageAllocFlags,
1641 if (page == NULL) {
1713 vm_page* page = vm_page_allocate_page(&reservation,
1715 cache->InsertPage(page, offset);
1716 map_page(area, page, address, protection, &reservation);
1718 DEBUG_PAGE_ACCESS_END(page);
1747 page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
1748 if (page == NULL) {
1749 panic("looking up page failed for pa %#" B_PRIxPHYSADDR
1753 DEBUG_PAGE_ACCESS_START(page);
1755 cache->InsertPage(page, offset);
1756 increment_page_wired_count(page);
1757 vm_page_set_state(page, PAGE_STATE_WIRED);
1758 page->busy = false;
1760 DEBUG_PAGE_ACCESS_END(page);
1773 = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
1782 page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
1783 if (page == NULL)
1784 panic("couldn't lookup physical page just allocated\n");
1789 panic("couldn't map physical page in page run\n");
1791 cache->InsertPage(page, offset);
1792 increment_page_wired_count(page);
1794 DEBUG_PAGE_ACCESS_END(page);
1818 phys_addr_t pageNumber = page->physical_page_number;
1821 page = vm_lookup_page(pageNumber);
1822 if (page == NULL)
1823 panic("couldn't lookup physical page just allocated\n");
1825 vm_page_set_state(page, PAGE_STATE_FREE);
1860 // if the physical address is somewhat inside a page,
1861 // move the actual area down to align on a page boundary
1946 TODO: This function was introduced to map physical page vecs to
2121 vm_page* page = it.Next();) {
2122 if (page->cache_offset >= endPage)
2126 if (page->busy || page->usage_count == 0)
2129 DEBUG_PAGE_ACCESS_START(page);
2130 map_page(area, page,
2131 baseAddress + (page->cache_offset * B_PAGE_SIZE - cacheOffset),
2133 DEBUG_PAGE_ACCESS_END(page);
2140 \a offset and \a size arguments have to be page aligned.
2466 vm_page* page = it.Next();) {
2467 if (!page->busy) {
2468 DEBUG_PAGE_ACCESS_START(page);
2469 map_page(newArea, page,
2470 newArea->Base() + ((page->cache_offset << PAGE_SHIFT)
2473 DEBUG_PAGE_ACCESS_END(page);
2525 // and dirty flags of the top cache page mappings.
2586 - Either the cache must not have any wired ranges or a page reservation for
2592 has wired page. The wired pages are copied in this case.
2633 vm_page* page = it.Next();) {
2634 if (page->WiredCount() > 0) {
2635 // allocate a new page and copy the wired one
2641 page->physical_page_number * B_PAGE_SIZE);
2643 // move the wired page to the upper cache (note: removing is OK
2645 upperCache->MovePage(page);
2647 page->cache_offset * B_PAGE_SIZE);
2651 // Change the protection of this page in all areas.
2654 if (!is_page_in_area(tempArea, page))
2659 addr_t address = virtual_page_address(tempArea, page);
2686 vm_page* page = it.Next();) {
2687 if (!is_page_in_area(tempArea, page))
2692 addr_t address = virtual_page_address(tempArea, page);
3028 vm_page* page = it.Next();) {
3029 if (page->cache_offset >= firstPageOffset
3030 && page->cache_offset <= lastPageOffset) {
3031 addr_t address = virtual_page_address(area, page);
3067 /*! The page's cache must be locked.
3070 vm_test_map_modification(vm_page* page)
3072 if (page->modified)
3075 vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
3084 map->Query(virtual_page_address(area, page), &physicalAddress, &flags);
3095 /*! The page's cache must be locked.
3098 vm_clear_map_flags(vm_page* page, uint32 flags)
3101 page->accessed = false;
3103 page->modified = false;
3105 vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
3112 map->ClearFlags(virtual_page_address(area, page), flags);
3118 /*! Removes all mappings from a page.
3119 After you've called this function, the page is unmapped from memory and
3120 the page's \c accessed and \c modified flags have been updated according
3122 The page's cache must be locked.
3125 vm_remove_all_page_mappings(vm_page* page)
3127 while (vm_page_mapping* mapping = page->mappings.Head()) {
3130 addr_t address = virtual_page_address(area, page);
3137 vm_clear_page_mapping_accessed_flags(struct vm_page *page)
3141 vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
3149 virtual_page_address(area, page), false, modified)) {
3153 page->modified |= modified;
3157 if (page->accessed) {
3159 page->accessed = false;
3166 /*! Removes all mappings of a page and/or clears the accessed bits of the
3168 The function iterates through the page mappings and removes them until
3170 iterate, but only clear the accessed flag of the mapping. The page's
3174 \c accessed bit of the page itself. If \c 0 is returned, all mappings
3175 of the page have been removed.
3178 vm_remove_all_page_mappings_if_unaccessed(struct vm_page *page)
3180 ASSERT(page->WiredCount() == 0);
3182 if (page->accessed)
3183 return vm_clear_page_mapping_accessed_flags(page);
3185 while (vm_page_mapping* mapping = page->mappings.Head()) {
3188 addr_t address = virtual_page_address(area, page);
3191 page->accessed = true;
3192 page->modified |= modified;
3193 return vm_clear_page_mapping_accessed_flags(page);
3195 page->modified |= modified;
3228 " -p or --physical only allows memory from a single page to be "
3268 kprintf("NOTE: number of bytes has been cut to page size\n");
3275 kprintf("getting the hardware page failed.");
3573 "page count" : "committed size");
3651 kprintf("page mappings:\n");
3654 kprintf(" %p", mapping->page);
3662 kprintf("page mappings:\t%" B_PRIu32 "\n", count);
3823 vm_page* page = (vm_page*)(addr_t)addressValue;
3824 physicalAddress = page->physical_page_number * B_PAGE_SIZE;
3985 vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
3986 if (page != NULL && page->State() != PAGE_STATE_FREE
3987 && page->State() != PAGE_STATE_CLEAR
3988 && page->State() != PAGE_STATE_UNUSED) {
3989 DEBUG_PAGE_ACCESS_START(page);
3990 vm_page_set_state(page, PAGE_STATE_FREE);
4222 // TODO: horrible brute-force method of determining if the page can be
4238 panic("early physical page allocations no longer possible!");
4247 // see if the page after the next allocated paddr run can be allocated
4250 // see if the next page will collide with the next allocated range
4254 // see if the next physical page fits in the memory block
4267 // see if the page after the prev allocated paddr run can be allocated
4269 // see if the next page will collide with the next allocated range
4274 // see if the next physical page fits in the memory block
4310 panic("error allocating early page!\n");
4319 panic("error mapping early page!");
4360 // initialize the free page list and physical page mapper
4424 // create the object cache for the page mappings
4425 gPageMappingsObjectCache = create_object_cache_etc("page mappings",
4429 panic("failed to create page mappings object cache");
4475 "Prints low-level page mapping information for a given address. If\n"
4484 "page.\n",
4557 FTRACE(("vm_page_fault: page fault at 0x%lx, ip 0x%lx\n", address,
4610 // this will cause the arch dependant page fault handler to
4615 // unhandled page fault in the kernel
4616 panic("vm_page_fault: unhandled page fault in kernel space at "
4678 vm_page* page;
4701 page = NULL;
4717 /*! Gets the page that should be mapped into the area.
4718 Returns an error code other than \c B_OK, if the page couldn't be found or
4724 Returns \c B_OK with \c context.restart set to \c false, if the page was
4725 found. It is returned in \c context.page. The address space will still be
4727 cache the page lives in.
4734 vm_page* page = NULL;
4741 page = cache->LookupPage(context.cacheOffset);
4742 if (page != NULL && page->busy) {
4743 // page must be busy -- wait for it to become unbusy
4746 cache->WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, false);
4753 if (page != NULL)
4756 // The current cache does not contain the page we're looking for.
4760 // insert a fresh page and mark it busy -- we're going to read it in
4761 page = vm_page_allocate_page(&context.reservation,
4763 cache->InsertPage(page, context.cacheOffset);
4766 // the page in. Keep a reference to the cache around.
4770 // read the page in
4772 vec.base = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
4781 // on error remove and free the page
4782 dprintf("reading page from cache %p returned: %s!\n",
4785 cache->NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY);
4786 cache->RemovePage(page);
4787 vm_page_set_state(page, PAGE_STATE_FREE);
4793 // mark the page unbusy again
4794 cache->MarkPageUnbusy(page);
4796 DEBUG_PAGE_ACCESS_END(page);
4809 if (page == NULL) {
4810 // There was no adequate page, determine the cache for a clean one.
4815 // allocate a clean page
4816 page = vm_page_allocate_page(&context.reservation,
4818 FTRACE(("vm_soft_fault: just allocated page 0x%" B_PRIxPHYSADDR "\n",
4819 page->physical_page_number));
4821 // insert the new page into our cache
4822 cache->InsertPage(page, context.cacheOffset);
4824 } else if (page->Cache() != context.topCache && context.isWrite) {
4825 // We have a page that has the data we want, but in the wrong cache
4827 vm_page* sourcePage = page;
4829 // TODO: If memory is low, it might be a good idea to steal the page
4831 FTRACE(("get new page, copy it, and put it into the topmost cache\n"));
4832 page = vm_page_allocate_page(&context.reservation, PAGE_STATE_ACTIVE);
4835 // one while copying the page. Lacking another mechanism to ensure that
4836 // the source page doesn't disappear, we mark it busy.
4840 // copy the page
4841 vm_memcpy_physical_page(page->physical_page_number * B_PAGE_SIZE,
4847 // insert the new page into our cache
4848 context.topCache->InsertPage(page, context.cacheOffset);
4851 DEBUG_PAGE_ACCESS_START(page);
4853 context.page = page;
4861 \param originalAddress The address. Doesn't need to be page aligned.
4864 \param wirePage On success, if non \c NULL, the wired count of the page
4865 mapped at the given address is incremented and the page is returned
4886 // page daemon/thief can do their job without problems.
4946 // page fault now.
4955 // Note, since the page fault is resolved with interrupts enabled,
4964 // its sources already have the page we're searching for (we're going
4975 // All went fine, all there is left to do is to map the page into the
4977 TPF(PageFaultDone(area->id, context.topCache, context.page->Cache(),
4978 context.page));
4980 // If the page doesn't reside in the area's cache, we need to make sure
4984 if (context.page->Cache() != context.topCache && !isWrite)
4990 // check whether there's already a page mapped at the address
5000 // Yep there's already a page. If it's ours, we can simply adjust
5002 if (mappedPage == context.page) {
5005 // the page isn't temporarily unmapped), otherwise we'd have
5015 // If the page is wired, we can't unmap it. Wait until it is unwired
5016 // again and restart. Note that the page cannot be wired for
5026 // ... but since we allocated a page and inserted it into
5028 // have a page from a lower cache mapped while an upper
5029 // cache has a page that would shadow it.
5030 context.topCache->RemovePage(context.page);
5031 vm_page_free_etc(context.topCache, context.page,
5034 DEBUG_PAGE_ACCESS_END(context.page);
5041 // Note: The mapped page is a page of a lower cache. We are
5042 // guaranteed to have that cached locked, our new page is a copy of
5043 // that page, and the page is not busy. The logic for that guarantee
5044 // is as follows: Since the page is mapped, it must live in the top
5046 // (was before the new page was inserted) no other page in any
5047 // cache between the top cache and the page's cache (otherwise that
5056 if (map_page(area, context.page, address, newProtection,
5058 // Mapping can only fail, when the page mapping object couldn't
5060 // fine, though. If this was a regular page fault, we'll simply
5064 DEBUG_PAGE_ACCESS_END(context.page);
5074 // The caller expects us to wire the page. Since
5082 } else if (context.page->State() == PAGE_STATE_INACTIVE)
5083 vm_page_set_state(context.page, PAGE_STATE_ACTIVE);
5085 // also wire the page, if requested
5087 increment_page_wired_count(context.page);
5088 *wirePage = context.page;
5091 DEBUG_PAGE_ACCESS_END(context.page);
5443 // Shrink or grow individual page protections if in use.
5454 // init the additional page protections to that of the area
5522 /*! Copies a range of memory directly from/to a page that might not be mapped
5526 walks through the respective area's cache chain to find the physical page
5529 must not cross a page boundary.
5573 // search the page
5577 vm_page* page = NULL;
5579 page = cache->DebugLookupPage(cacheOffset);
5580 if (page != NULL)
5591 if (page == NULL)
5595 phys_addr_t physicalAddress = page->physical_page_number * B_PAGE_SIZE
5599 if (page->Cache() != area->cache)
5693 /*! Wires a single page at the given address.
5699 be page aligned.
5700 \param writable If \c true the page shall be writable.
5703 \return \c B_OK, when the page could be wired, another error code otherwise.
5712 // compute the page protection that is required
5750 // up the page and play with its wired count.
5756 vm_page* page;
5759 && (page = vm_lookup_page(physicalAddress / B_PAGE_SIZE))
5762 // the page's wired count.
5763 increment_page_wired_count(page);
5769 // Let vm_soft_fault() map the page for us, if possible. We need
5778 isUser, &page);
5781 // The page could not be mapped -- clean up.
5791 = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE
5793 info->page = page;
5799 /*! Unwires a single page previously wired via vm_wire_page().
5815 if (info->page->Cache() != cache) {
5816 // The page is not in the top cache, so we lock the whole cache chain
5817 // before touching the page's wired count.
5821 decrement_page_wired_count(info->page);
5862 // compute the page protection that is required
5940 vm_page* page;
5943 && (page = vm_lookup_page(physicalAddress / B_PAGE_SIZE))
5946 // the page's wired count.
5947 increment_page_wired_count(page);
5949 // Let vm_soft_fault() map the page for us, if possible. We need
5958 false, isUser, &page);
5992 // even if not a single page was wired, unlock_memory_etc() is called
6021 // compute the page protection that is required
6097 vm_page* page;
6100 && (page = vm_lookup_page(physicalAddress / B_PAGE_SIZE))
6103 // the page's wired count.
6104 decrement_page_wired_count(page);
6106 panic("unlock_memory_etc(): Failed to unwire page: address "
6227 // page does fit in current entry
6862 // page protection array and re-map mapped pages.
6907 // This page should already be accounted for in the commitment.
6944 vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
6945 if (page == NULL) {
6946 panic("area %p looking up page failed for pa %#" B_PRIxPHYSADDR
6952 // If the page is not in the topmost cache and write access is
6955 bool unmapPage = page->Cache() != topCache
6964 DEBUG_PAGE_ACCESS_START(page);
6966 DEBUG_PAGE_ACCESS_END(page);