Lines Matching refs:page

112 			out.Print("guarded heap allocate: heap: %p; page: %p; "
138 out.Print("guarded heap free: heap: %p; page: %p", fHeap,
180 guarded_heap_page& page = area.pages[startPageIndex + i];
181 page.flags = GUARDED_HEAP_PAGE_FLAG_USED;
183 page.team = (gKernelStartup ? 0 : team_get_current_team_id());
184 page.thread = find_thread(NULL);
186 page.stack_trace_depth = arch_debug_get_stack_trace(
187 page.stack_trace, GUARDED_HEAP_STACK_TRACE_DEPTH, 0, 4,
190 page.allocation_size = allocationSize;
191 page.allocation_base = allocationBase;
192 page.alignment = alignment;
193 page.flags |= GUARDED_HEAP_PAGE_FLAG_FIRST;
194 firstPage = &page;
196 page.team = firstPage->team;
197 page.thread = firstPage->thread;
199 page.stack_trace_depth = 0;
201 page.allocation_size = allocationSize;
202 page.allocation_base = allocationBase;
203 page.alignment = alignment;
206 list_remove_item(&area.free_list, &page);
209 page.flags |= GUARDED_HEAP_PAGE_FLAG_GUARD;
218 page.flags));
227 guarded_heap_page& page = area.pages[pageIndex];
231 page.flags = 0;
233 page.flags |= GUARDED_HEAP_PAGE_FLAG_DEAD;
235 page.flags = 0;
238 page.allocation_size = 0;
239 page.team = (gKernelStartup ? 0 : team_get_current_team_id());
240 page.thread = find_thread(NULL);
243 page.stack_trace_depth = arch_debug_get_stack_trace(page.stack_trace,
247 list_add_item(&area.free_list, &page);
280 // We use the free list this way so that the page that has been free for
283 guarded_heap_page* page
286 for (; page != NULL;
287 page = (guarded_heap_page*)list_get_next_item(&area.free_list, page)) {
289 if ((page->flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
292 size_t pageIndex = page - area.pages;
297 // (including the guard page).
470 guarded_heap_page& page = area.pages[pageIndex];
471 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) == 0) {
472 panic("tried to free %p which points at page %" B_PRIuSIZE
477 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0) {
478 panic("tried to free %p which points at page %" B_PRIuSIZE
479 " which is a guard page", address, pageIndex);
483 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0) {
484 panic("tried to free %p which points at page %" B_PRIuSIZE
485 " which is not an allocation first page", address, pageIndex);
489 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0) {
490 panic("tried to free %p which points at page %" B_PRIuSIZE
491 " which is a dead page", address, pageIndex);
507 guarded_heap_page* page = &area.pages[pageIndex];
508 while ((page->flags & GUARDED_HEAP_PAGE_FLAG_GUARD) == 0) {
509 // Mark the allocation page as free.
514 page = &area.pages[pageIndex];
517 // Mark the guard page as free as well.
558 guarded_heap_page& page = area->pages[pageIndex];
559 size_t oldSize = page.allocation_size;
581 dump_guarded_heap_stack_trace(guarded_heap_page& page)
585 for (size_t i = 0; i < page.stack_trace_depth; i++) {
586 addr_t address = page.stack_trace[i];
615 // Find the area that contains this page.
635 guarded_heap_page& page = area->pages[pageIndex];
637 kprintf("page index: %" B_PRIuSIZE "\n", pageIndex);
639 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
641 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0)
643 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0)
645 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0)
649 kprintf("allocation size: %" B_PRIuSIZE "\n", page.allocation_size);
650 kprintf("allocation base: %p\n", page.allocation_base);
651 kprintf("alignment: %" B_PRIuSIZE "\n", page.alignment);
652 kprintf("allocating team: %" B_PRId32 "\n", page.team);
653 kprintf("allocating thread: %" B_PRId32 "\n", page.thread);
655 dump_guarded_heap_stack_trace(page);
670 // Find the area that contains this page.
697 kprintf("page count: %" B_PRIuSIZE "\n", area->page_count);
709 kprintf("free list broken, page %p not actually free\n", item);
722 guarded_heap_page& page = area->pages[i];
724 || (page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0) {
767 kprintf("page count: %" B_PRIuSIZE "\n", heap->page_count);
819 guarded_heap_page& page = area->pages[i];
820 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0)
823 if ((team < 0 || page.team == team)
824 && (thread < 0 || page.thread == thread)
825 && (address == 0 || (addr_t)page.allocation_base == address)) {
830 " bytes\n", page.team, page.thread,
831 (addr_t)page.allocation_base, page.allocation_size);
834 dump_guarded_heap_stack_trace(page);
837 totalSize += page.allocation_size;
886 guarded_heap_page& page = area->pages[i];
887 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0
888 && (page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) == 0
889 && (page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) == 0) {
904 "Dump info about a guarded heap page",
905 "<address>\nDump info about guarded heap page containing address.\n",