1/*
2 * Copyright 2019-2022 Haiku, Inc. All rights reserved.
3 * Released under the terms of the MIT License.
4 */
5
6
7#include <kernel.h>
8#include <boot/arch/arm/arch_cpu.h>
9#include <boot/platform.h>
10#include <boot/stage2.h>
11#include <boot/stdio.h>
12
13#include "efi_platform.h"
14#include "generic_mmu.h"
15#include "mmu.h"
16#include "serial.h"
17#include "smp.h"
18
19//#define TRACE_ARCH_START
20#ifdef TRACE_ARCH_START
21#	define TRACE(x...) dprintf(x)
22#else
23#	define TRACE(x...) ;
24#endif
25
26
27#define ALIGN_MEMORY_MAP	4
28
29
30extern "C" void clean_dcache_all(void);
31extern "C" void invalidate_icache_all(void);
32
33extern "C" typedef void (*arch_enter_kernel_t)(uint32_t, addr_t, addr_t, addr_t);
34
35
36// From entry.S
37extern "C" void arch_enter_kernel(uint32_t ttbr, addr_t kernelArgs,
38	addr_t kernelEntry, addr_t kernelStackTop);
39
40// From arch_mmu.cpp
41extern void arch_mmu_post_efi_setup(size_t memoryMapSize,
42	efi_memory_descriptor *memoryMap, size_t descriptorSize,
43	uint32_t descriptorVersion);
44
45extern uint32_t arch_mmu_generate_post_efi_page_tables(size_t memoryMapSize,
46	efi_memory_descriptor *memoryMap, size_t descriptorSize,
47	uint32_t descriptorVersion);
48
49
50void
51arch_convert_kernel_args(void)
52{
53	fix_address(gKernelArgs.arch_args.fdt);
54}
55
56
57static void *
58allocate_trampoline_page(void)
59{
60	void *trampolinePage = NULL;
61	if (platform_allocate_lomem(&trampolinePage, B_PAGE_SIZE) == B_OK)
62		return trampolinePage;
63
64	trampolinePage = (void *)get_next_virtual_address(B_PAGE_SIZE);
65	if (platform_allocate_region(&trampolinePage, B_PAGE_SIZE, 0, true) == B_OK)
66		return trampolinePage;
67
68	trampolinePage = NULL;
69	if (platform_allocate_region(&trampolinePage, B_PAGE_SIZE, 0, false) != B_OK)
70		return NULL;
71
72	if (platform_free_region(trampolinePage, B_PAGE_SIZE) != B_OK)
73		return NULL;
74
75	if (platform_allocate_region(&trampolinePage, B_PAGE_SIZE, 0, true) != B_OK)
76		return NULL;
77
78	ASSERT_ALWAYS((uint32_t)trampolinePage >= 0x88000000);
79	return trampolinePage;
80}
81
82
83void
84arch_start_kernel(addr_t kernelEntry)
85{
86	// Allocate virtual memory for kernel args
87	struct kernel_args *kernelArgs = NULL;
88	if (platform_allocate_region((void **)&kernelArgs,
89			sizeof(struct kernel_args), 0, false) != B_OK)
90		panic("Failed to allocate kernel args.");
91
92	addr_t virtKernelArgs;
93	platform_bootloader_address_to_kernel_address((void*)kernelArgs,
94		&virtKernelArgs);
95
96	// Allocate identity mapped region for entry.S trampoline
97	void *trampolinePage = allocate_trampoline_page();
98	if (trampolinePage == NULL)
99		panic("Failed to allocate trampoline page.");
100
101	memcpy(trampolinePage, (void *)arch_enter_kernel, B_PAGE_SIZE);
102	arch_enter_kernel_t enter_kernel = (arch_enter_kernel_t)trampolinePage;
103
104	// Prepare to exit EFI boot services.
105	// Read the memory map.
106	// First call is to determine the buffer size.
107	size_t memoryMapSize = 0;
108	efi_memory_descriptor dummy;
109	size_t mapKey;
110	size_t descriptorSize;
111	uint32_t descriptorVersion;
112	if (kBootServices->GetMemoryMap(&memoryMapSize, &dummy, &mapKey,
113			&descriptorSize, &descriptorVersion) != EFI_BUFFER_TOO_SMALL) {
114		panic("Unable to determine size of system memory map");
115	}
116
117	// Allocate a buffer twice as large as needed just in case it gets bigger
118	// between calls to ExitBootServices.
119	size_t actualMemoryMapSize = memoryMapSize * 2;
120	efi_memory_descriptor *memoryMap
121		= (efi_memory_descriptor *)kernel_args_malloc(actualMemoryMapSize +
122			ALIGN_MEMORY_MAP);
123
124	// align memory_map to 4-byte boundary
125	// otherwise we get alignment exception when calling GetMemoryMap below
126	memoryMap = (efi_memory_descriptor *)ROUNDUP((uint32_t)memoryMap, ALIGN_MEMORY_MAP);
127
128	if (memoryMap == NULL)
129		panic("Unable to allocate memory map.");
130
131	// Read (and print) the memory map.
132	memoryMapSize = actualMemoryMapSize;
133	if (kBootServices->GetMemoryMap(&memoryMapSize, memoryMap, &mapKey,
134			&descriptorSize, &descriptorVersion) != EFI_SUCCESS) {
135		panic("Unable to fetch system memory map.");
136	}
137
138	addr_t addr = (addr_t)memoryMap;
139	dprintf("System provided memory map:\n");
140	for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
141		efi_memory_descriptor *entry
142			= (efi_memory_descriptor *)(addr + i * descriptorSize);
143		dprintf("  phys: 0x%08" PRIx64 "-0x%08" PRIx64
144			", virt: 0x%08" PRIx64 "-0x%08" PRIx64
145			", type: %s (%#x), attr: %#" PRIx64 "\n",
146			entry->PhysicalStart,
147			entry->PhysicalStart + entry->NumberOfPages * B_PAGE_SIZE,
148			entry->VirtualStart,
149			entry->VirtualStart + entry->NumberOfPages * B_PAGE_SIZE,
150			memory_region_type_str(entry->Type), entry->Type,
151			entry->Attribute);
152	}
153
154	// Generate page tables for use after ExitBootServices.
155	uint32_t final_ttbr0 = arch_mmu_generate_post_efi_page_tables(
156		memoryMapSize, memoryMap, descriptorSize, descriptorVersion);
157
158	// Attempt to fetch the memory map and exit boot services.
159	// This needs to be done in a loop, as ExitBootServices can change the
160	// memory map.
161	// Even better: Only GetMemoryMap and ExitBootServices can be called after
162	// the first call to ExitBootServices, as the firmware is permitted to
163	// partially exit. This is why twice as much space was allocated for the
164	// memory map, as it's impossible to allocate more now.
165	// A changing memory map shouldn't affect the generated page tables, as
166	// they only needed to know about the maximum address, not any specific
167	// entry.
168
169	dprintf("Calling ExitBootServices. So long, EFI!\n");
170	serial_disable();
171	while (true) {
172		if (kBootServices->ExitBootServices(kImage, mapKey) == EFI_SUCCESS) {
173			// Disconnect from EFI serial_io / stdio services
174			serial_kernel_handoff();
175			dprintf("Unhooked from EFI serial services\n");
176			break;
177		}
178
179		memoryMapSize = actualMemoryMapSize;
180		if (kBootServices->GetMemoryMap(&memoryMapSize, memoryMap, &mapKey,
181				&descriptorSize, &descriptorVersion) != EFI_SUCCESS) {
182			panic("Unable to fetch system memory map.");
183		}
184	}
185
186	// Update EFI, generate final kernel physical memory map, etc.
187	arch_mmu_post_efi_setup(memoryMapSize, memoryMap,
188		descriptorSize, descriptorVersion);
189
190	// Re-init and activate serial in a horrific post-EFI landscape. Clowns roam the land freely.
191	serial_init();
192	serial_enable();
193
194	// Copy final kernel args
195	// This should be the last step before jumping to the kernel
196	// as there are some fixups happening to kernel_args even in the last minute
197	memcpy(kernelArgs, &gKernelArgs, sizeof(struct kernel_args));
198
199	//smp_boot_other_cpus(final_ttbr0, kernelEntry, (addr_t)&gKernelArgs);
200
201	TRACE("CPSR = 0x%08" B_PRIx32 "\n", cpu_read_CPSR());
202	TRACE("SCTLR = 0x%08" B_PRIx32 "\n", mmu_read_SCTLR());
203	TRACE("TTBR0 = 0x%08" B_PRIx32 ", TTBR1 = 0x%08" B_PRIx32 ", TTBCR = 0x%08" B_PRIx32 "\n",
204		mmu_read_TTBR0(), mmu_read_TTBR1(), mmu_read_TTBCR());
205	TRACE("DACR = 0x%08" B_PRIx32 "\n",
206		mmu_read_DACR());
207
208	clean_dcache_all();
209	invalidate_icache_all();
210
211	// Enter the kernel!
212	dprintf("enter_kernel(ttbr0: 0x%08x, kernelArgs: 0x%08x, "
213		"kernelEntry: 0x%08x, sp: 0x%08x)\n",
214		final_ttbr0, (uint32_t)virtKernelArgs, (uint32_t)kernelEntry,
215		(uint32_t)(gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size));
216
217	enter_kernel(final_ttbr0, virtKernelArgs, kernelEntry,
218		gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size);
219}
220