Lines Matching refs:address

106 	addr_t address = sNextVirtualAddress;
109 TRACE(("%s(%d): %08x\n", __FUNCTION__, size, address));
110 return address;
117 addr_t address = sNextPhysicalAddress;
120 TRACE(("%s(%d): %08x\n", __FUNCTION__, size, address));
121 return address;
149 addr_t address = sNextPageTableAddress;
150 if (address >= kPageTableRegionEnd)
154 return (uint32 *)address;
172 /** Adds a new page table for the specified base address */
271 // physical and virtual address are the same.
310 addr_t address = sNextVirtualAddress;
319 return address + pageOffset;
335 // 1 MB following the kernel base address.
338 addr_t address = (addr_t)virtualAddress;
340 // is the address within the valid range?
341 if (address < KERNEL_LOAD_BASE || address + size * B_PAGE_SIZE
346 map_page(address, get_next_physical_page(), kDefaultPageFlags);
347 address += B_PAGE_SIZE;
354 void *address = (void *)sNextVirtualAddress;
360 TRACE(("mmu_allocate(NULL, %d): %p\n", size, address));
361 return address;
366 * address space. It might not actually free memory (as its implementation
375 addr_t address = (addr_t)virtualAddress;
376 addr_t pageOffset = address % B_PAGE_SIZE;
377 address -= pageOffset;
380 // is the address within the valid range?
381 if (address < KERNEL_LOAD_BASE || address + size > sNextVirtualAddress) {
383 (void *)address, size);
388 unmap_page(address);
389 address += B_PAGE_SIZE;
392 if (address == sNextVirtualAddress) {
393 // we can actually reuse the virtual address space
445 TRACE(("idt at virtual address 0x%lx\n", gKernelArgs.arch_args.vir_idt));
494 TRACE(("gdt at virtual address %p\n", (void *)gKernelArgs.arch_args.vir_gdt));
506 // sort the address ranges
651 void *address = mmu_allocate(*_address, size);
652 if (address == NULL)
655 *_address = address;
661 platform_free_region(void *address, size_t size)
663 mmu_free(address, size);