1/*-
2 * Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
3 * Copyright (c) 2014 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#define __ELF_WORD_SIZE 64
29#include <sys/param.h>
30#include <sys/exec.h>
31#include <sys/linker.h>
32#include <string.h>
33#include <machine/elf.h>
34#include <stand.h>
35#include <vm/vm.h>
36#include <vm/pmap.h>
37
38#include <efi.h>
39#include <efilib.h>
40
41#include "bootstrap.h"
42
43#include "loader_efi.h"
44
45extern int bi_load(char *args, vm_offset_t *modulep, vm_offset_t *kernendp,
46    bool exit_bs);
47
48static int	elf64_exec(struct preloaded_file *amp);
49static int	elf64_obj_exec(struct preloaded_file *amp);
50
51static struct file_format amd64_elf = {
52	.l_load = elf64_loadfile,
53	.l_exec = elf64_exec,
54};
55static struct file_format amd64_elf_obj = {
56	.l_load = elf64_obj_loadfile,
57	.l_exec = elf64_obj_exec,
58};
59
60extern struct file_format multiboot2;
61extern struct file_format multiboot2_obj;
62
63struct file_format *file_formats[] = {
64	&multiboot2,
65	&multiboot2_obj,
66	&amd64_elf,
67	&amd64_elf_obj,
68	NULL
69};
70
71static pml4_entry_t *PT4;
72static pdp_entry_t *PT3;
73static pdp_entry_t *PT3_l, *PT3_u;
74static pd_entry_t *PT2;
75static pd_entry_t *PT2_l0, *PT2_l1, *PT2_l2, *PT2_l3, *PT2_u0, *PT2_u1;
76
77extern EFI_PHYSICAL_ADDRESS staging;
78
79static void (*trampoline)(uint64_t stack, void *copy_finish, uint64_t kernend,
80    uint64_t modulep, pml4_entry_t *pagetable, uint64_t entry);
81
82extern uintptr_t amd64_tramp;
83extern uint32_t amd64_tramp_size;
84
85/*
86 * There is an ELF kernel and one or more ELF modules loaded.
87 * We wish to start executing the kernel image, so make such
88 * preparations as are required, and do so.
89 */
90static int
91elf64_exec(struct preloaded_file *fp)
92{
93	struct file_metadata	*md;
94	Elf_Ehdr 		*ehdr;
95	vm_offset_t		modulep, kernend, trampcode, trampstack;
96	int			err, i;
97	bool			copy_auto;
98
99	copy_auto = copy_staging == COPY_STAGING_AUTO;
100	if (copy_auto)
101		copy_staging = fp->f_kernphys_relocatable ?
102		    COPY_STAGING_DISABLE : COPY_STAGING_ENABLE;
103
104	if ((md = file_findmetadata(fp, MODINFOMD_ELFHDR)) == NULL)
105		return (EFTYPE);
106	ehdr = (Elf_Ehdr *)&(md->md_data);
107
108	trampcode = copy_staging == COPY_STAGING_ENABLE ?
109	    (vm_offset_t)0x0000000040000000 /* 1G */ :
110	    (vm_offset_t)0x0000000100000000; /* 4G */;
111	err = BS->AllocatePages(AllocateMaxAddress, EfiLoaderData, 1,
112	    (EFI_PHYSICAL_ADDRESS *)&trampcode);
113	if (EFI_ERROR(err)) {
114		printf("Unable to allocate trampoline\n");
115		if (copy_auto)
116			copy_staging = COPY_STAGING_AUTO;
117		return (ENOMEM);
118	}
119	bzero((void *)trampcode, EFI_PAGE_SIZE);
120	trampstack = trampcode + EFI_PAGE_SIZE - 8;
121	bcopy((void *)&amd64_tramp, (void *)trampcode, amd64_tramp_size);
122	trampoline = (void *)trampcode;
123
124	if (copy_staging == COPY_STAGING_ENABLE) {
125		PT4 = (pml4_entry_t *)0x0000000040000000; /* 1G */
126		err = BS->AllocatePages(AllocateMaxAddress, EfiLoaderData, 3,
127		    (EFI_PHYSICAL_ADDRESS *)&PT4);
128		if (EFI_ERROR(err)) {
129			printf("Unable to allocate trampoline page table\n");
130			BS->FreePages(trampcode, 1);
131			if (copy_auto)
132				copy_staging = COPY_STAGING_AUTO;
133			return (ENOMEM);
134		}
135		bzero(PT4, 3 * EFI_PAGE_SIZE);
136		PT3 = &PT4[512];
137		PT2 = &PT3[512];
138
139		/*
140		 * This is kinda brutal, but every single 1GB VM
141		 * memory segment points to the same first 1GB of
142		 * physical memory.  But it is more than adequate.
143		 */
144		for (i = 0; i < NPTEPG; i++) {
145			/*
146			 * Each slot of the L4 pages points to the
147			 * same L3 page.
148			 */
149			PT4[i] = (pml4_entry_t)PT3;
150			PT4[i] |= PG_V | PG_RW;
151
152			/*
153			 * Each slot of the L3 pages points to the
154			 * same L2 page.
155			 */
156			PT3[i] = (pdp_entry_t)PT2;
157			PT3[i] |= PG_V | PG_RW;
158
159			/*
160			 * The L2 page slots are mapped with 2MB pages for 1GB.
161			 */
162			PT2[i] = (pd_entry_t)i * (2 * 1024 * 1024);
163			PT2[i] |= PG_V | PG_RW | PG_PS;
164		}
165	} else {
166		PT4 = (pml4_entry_t *)0x0000000100000000; /* 4G */
167		err = BS->AllocatePages(AllocateMaxAddress, EfiLoaderData, 9,
168		    (EFI_PHYSICAL_ADDRESS *)&PT4);
169		if (EFI_ERROR(err)) {
170			printf("Unable to allocate trampoline page table\n");
171			BS->FreePages(trampcode, 9);
172			if (copy_auto)
173				copy_staging = COPY_STAGING_AUTO;
174			return (ENOMEM);
175		}
176
177		bzero(PT4, 9 * EFI_PAGE_SIZE);
178
179		PT3_l = &PT4[NPML4EPG * 1];
180		PT3_u = &PT4[NPML4EPG * 2];
181		PT2_l0 = &PT4[NPML4EPG * 3];
182		PT2_l1 = &PT4[NPML4EPG * 4];
183		PT2_l2 = &PT4[NPML4EPG * 5];
184		PT2_l3 = &PT4[NPML4EPG * 6];
185		PT2_u0 = &PT4[NPML4EPG * 7];
186		PT2_u1 = &PT4[NPML4EPG * 8];
187
188		/* 1:1 mapping of lower 4G */
189		PT4[0] = (pml4_entry_t)PT3_l | PG_V | PG_RW;
190		PT3_l[0] = (pdp_entry_t)PT2_l0 | PG_V | PG_RW;
191		PT3_l[1] = (pdp_entry_t)PT2_l1 | PG_V | PG_RW;
192		PT3_l[2] = (pdp_entry_t)PT2_l2 | PG_V | PG_RW;
193		PT3_l[3] = (pdp_entry_t)PT2_l3 | PG_V | PG_RW;
194		for (i = 0; i < 4 * NPDEPG; i++) {
195			PT2_l0[i] = ((pd_entry_t)i << PDRSHIFT) | PG_V |
196			    PG_RW | PG_PS;
197		}
198
199		/* mapping of kernel 2G below top */
200		PT4[NPML4EPG - 1] = (pml4_entry_t)PT3_u | PG_V | PG_RW;
201		PT3_u[NPDPEPG - 2] = (pdp_entry_t)PT2_u0 | PG_V | PG_RW;
202		PT3_u[NPDPEPG - 1] = (pdp_entry_t)PT2_u1 | PG_V | PG_RW;
203		/* compat mapping of phys @0 */
204		PT2_u0[0] = PG_PS | PG_V | PG_RW;
205		/* this maps past staging area */
206		for (i = 1; i < 2 * NPDEPG; i++) {
207			PT2_u0[i] = ((pd_entry_t)staging +
208			    ((pd_entry_t)i - 1) * NBPDR) |
209			    PG_V | PG_RW | PG_PS;
210		}
211	}
212
213	printf("staging %#lx (%scopying) tramp %p PT4 %p\n",
214	    staging, copy_staging == COPY_STAGING_ENABLE ? "" : "not ",
215	    trampoline, PT4);
216	printf("Start @ 0x%lx ...\n", ehdr->e_entry);
217
218	efi_time_fini();
219	err = bi_load(fp->f_args, &modulep, &kernend, true);
220	if (err != 0) {
221		efi_time_init();
222		if (copy_auto)
223			copy_staging = COPY_STAGING_AUTO;
224		return (err);
225	}
226
227	dev_cleanup();
228
229	trampoline(trampstack, copy_staging == COPY_STAGING_ENABLE ?
230	    efi_copy_finish : efi_copy_finish_nop, kernend, modulep,
231	    PT4, ehdr->e_entry);
232
233	panic("exec returned");
234}
235
236static int
237elf64_obj_exec(struct preloaded_file *fp)
238{
239
240	return (EFTYPE);
241}
242