Lines Matching refs:address

203 static void dump_guarded_heap_page(void* address, bool doPanic = false);
228 addr_t address = area.base + pageIndex * B_PAGE_SIZE;
229 mprotect((void*)address, B_PAGE_SIZE, protection);
242 addr_t address = stackTrace[i];
245 (void*)address, NULL, NULL, &imageName, &symbolName, NULL,
248 print_stdout("\t%#" B_PRIxADDR " (lookup failed: %s)\n", address,
254 address - (addr_t)location, exactMatch ? "" : " (nearest)");
513 void* address = NULL;
514 area_id area = create_area("guarded_heap_huge_allocation", &address,
524 guarded_heap_page* page = (guarded_heap_page*)address;
528 page->allocation_base = (void*)(((addr_t)address
539 mprotect((void*)((addr_t)address + pagesNeeded * B_PAGE_SIZE),
543 addr_t protectedStart = (addr_t)address + B_PAGE_SIZE;
552 protectedSize = (addr_t)address + (pagesNeeded + 1) * B_PAGE_SIZE
606 guarded_heap_get_locked_area_for(guarded_heap& heap, void* address)
611 if ((addr_t)address < area->base)
614 if ((addr_t)address >= area->base + area->size)
626 guarded_heap_area_page_index_for(guarded_heap_area& area, void* address)
628 size_t pageIndex = ((addr_t)address - area.base) / B_PAGE_SIZE;
632 " which is not marked in use", address, pageIndex);
638 " which is a guard page", address, pageIndex);
644 " which is not an allocation first page", address, pageIndex);
650 " which is a dead page", address, pageIndex);
659 guarded_heap_area_free(guarded_heap_area& area, void* address)
661 size_t pageIndex = guarded_heap_area_page_index_for(area, address);
698 guarded_heap_area_allocation_for(void* address, area_id& allocationArea)
700 allocationArea = area_for(address);
708 guarded_heap_page* page = (guarded_heap_page*)areaInfo.address;
714 if (page->allocation_base != address)
724 guarded_heap_free_area_allocation(void* address)
727 if (guarded_heap_area_allocation_for(address, allocationArea) == NULL)
736 guarded_heap_free(void* address)
738 if (address == NULL)
742 address);
744 return guarded_heap_free_area_allocation(address);
747 return guarded_heap_area_free(*area, address);
752 guarded_heap_realloc(void* address, size_t newSize)
755 address);
761 size_t pageIndex = guarded_heap_area_page_index_for(*area, address);
769 guarded_heap_page* page = guarded_heap_area_allocation_for(address,
778 return address;
785 memcpy(newBlock, address, min_c(oldSize, newSize));
791 guarded_heap_area_free(*area, address);
824 dump_guarded_heap_page(void* address, bool doPanic)
831 if ((addr_t)address < candidate->base)
833 if ((addr_t)address >= candidate->base + candidate->size)
841 panic("didn't find area for address %p\n", address);
845 size_t pageIndex = ((addr_t)address - area->base) / B_PAGE_SIZE;
867 panic("thread %" B_PRId32 " tried accessing address %p which is " \
871 find_thread(NULL), address, page.allocation_base, \
1184 heap_free(void* address)
1186 if (!guarded_heap_free(address))
1187 panic("free failed for address %p", address);
1192 heap_realloc(void* address, size_t newSize)
1195 free(address);
1199 if (address == NULL)
1202 return guarded_heap_realloc(address, newSize);