Lines Matching refs:base

114 	uint64 base;
117 size, &base)) {
122 insert_physical_allocated_range(base, size);
123 sNextPhysicalAddress = base + size;
126 return base;
160 /*! Adds a new page table for the specified base address */
162 add_page_table(addr_t base)
169 base = ROUNDDOWN(base, B_PAGE_SIZE * 1024);
179 TRACE("add_page_table(base = %p), got page: %p\n", (void*)base, pageTable);
188 sPageDirectory[base / (4 * 1024 * 1024)]
192 base += B_PAGE_SIZE * 1024;
193 if (base > gKernelArgs.arch_args.virtual_end)
194 gKernelArgs.arch_args.virtual_end = base;
307 dprintf(" base 0x%08Lx, len 0x%08Lx, type %lu (%s)\n",
401 // 'kMaxKernelSize' bytes following the kernel base address.
452 mmu_allocate_physical(addr_t base, size_t size)
456 gKernelArgs.num_physical_memory_ranges, base, size)) {
463 gKernelArgs.num_physical_allocated_ranges, base, size, &foundBase)
464 || foundBase != base) {
468 return insert_physical_allocated_range(base, size) == B_OK;
575 gdtDescriptor.base = gBootGDT;
602 dprintf(" base %#018" B_PRIx64 ", length %#018" B_PRIx64 "\n",
609 dprintf(" base %#018" B_PRIx64 ", length %#018" B_PRIx64 "\n",
616 dprintf(" base %#018" B_PRIx64 ", length %#018" B_PRIx64 "\n",
673 uint64 base = extMemoryBlock[i].base_addr;
675 uint64 end = base + length;
679 base = ROUNDUP(base, B_PAGE_SIZE);
694 if (base < 0x100000)
695 base = 0x100000;
698 += length - (max_c(end, base) - base);
700 if (end <= base)
703 status_t status = insert_physical_memory_range(base, end - base);
707 "used already!\n", base, end, MAX_PHYSICAL_MEMORY_RANGE);
710 "%#" B_PRIx64 " - %#" B_PRIx64 "\n", base, end);
721 uint64 base = extMemoryBlock[i].base_addr;
722 uint64 end = ROUNDUP(base + extMemoryBlock[i].length, B_PAGE_SIZE);
723 base = ROUNDDOWN(base, B_PAGE_SIZE);
725 status_t status = remove_physical_memory_range(base, end - base);
728 "%#" B_PRIx64 " - %#" B_PRIx64 "\n", base, end);
813 platform_release_heap(struct stage2_args *args, void *base)