Lines Matching refs:page

159 		vm_page* page = vm_page_allocate_page(reservation,
162 fCache->InsertPage(page, fOffset + pos);
165 page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE);
166 fPages[i++] = page;
204 // clear partial page
298 // have been written back in order to relieve the page pressure
312 vm_page* page;
314 (page = it.Next()) != NULL && left > 0;) {
315 if (page->State() == PAGE_STATE_CACHED && !page->busy) {
316 DEBUG_PAGE_ACCESS_START(page);
317 ASSERT(!page->IsMapped());
318 ASSERT(!page->modified);
319 cache->RemovePage(page);
320 vm_page_set_state(page, PAGE_STATE_FREE);
395 vm_page* page = pages[pageIndex++] = vm_page_allocate_page(
398 cache->InsertPage(page, offset + pos);
401 page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE);
492 though, if only a partial page gets written.
519 vm_page* page = pages[pageIndex++] = vm_page_allocate_page(
524 page->modified = !writeThrough;
526 ref->cache->InsertPage(page, offset + pos);
529 page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE);
539 // This is only a partial write, so we have to read the rest of the page
553 // get the last page in the I/O vectors
558 // the space in the page after this write action needs to be cleaned
562 // the end of this write does not happen on a page boundary, so we
563 // need to fetch the last page before we can update it
780 // check if this page is already in memory
781 vm_page* page = cache->LookupPage(offset);
782 if (page != NULL) {
783 // The page may be busy - since we need to unlock the cache sometime
795 // the page again.
796 page = cache->LookupPage(offset);
797 if (page != NULL && page->busy) {
798 cache->WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
805 TRACE(("lookup page from offset %lld: %p, size = %lu, pageOffset "
806 "= %lu\n", offset, page, bytesLeft, pageOffset));
808 if (page != NULL) {
810 // Since the following user_mem{cpy,set}() might cause a page
813 // deadlock. To make sure that our page doesn't go away, we mark
815 page->busy = true;
818 // copy the contents of the page already in memory
820 = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE
838 DEBUG_PAGE_ACCESS_START(page);
840 page->modified = true;
842 if (page->State() != PAGE_STATE_MODIFIED)
843 vm_page_set_state(page, PAGE_STATE_MODIFIED);
845 DEBUG_PAGE_ACCESS_END(page);
848 cache->MarkPageUnbusy(page);
851 // If it is cached only, requeue the page, so the respective queue
853 if (page->State() == PAGE_STATE_CACHED
854 || page->State() == PAGE_STATE_MODIFIED) {
855 DEBUG_PAGE_ACCESS_START(page);
856 vm_page_requeue(page, true);
857 DEBUG_PAGE_ACCESS_END(page);
861 // we've read the last page, so we're done!
996 // check if this page is already in memory
998 vm_page* page = cache->LookupPage(offset);
1003 if (page == NULL) {
1009 // read the part before the current page (or the end of the request)
1116 // allocate a clean page we can use for writing zeroes
1119 vm_page* page = vm_page_allocate_page(&reservation,
1123 sZeroPage = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
1217 // VM (on page fault) can still add pages, if the file is mmap()ed. We
1219 // the page fault code to deal correctly with private mappings (i.e. only
1270 // We may have a new partial page at the end of the cache that must be
1274 vm_page* page = cache->LookupPage(newSize - partialBytes);
1275 if (page != NULL) {
1276 vm_memset_physical(page->physical_page_number * B_PAGE_SIZE