1/*-
2 * Copyright (c) 2006 Peter Wemm
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * From: FreeBSD: src/lib/libkvm/kvm_minidump_amd64.c r261799
26 */
27
28#include <sys/cdefs.h>
29/*
30 * ARM64 (AArch64) machine dependent routines for kvm and minidumps.
31 */
32
33#include <sys/param.h>
34#include <stdint.h>
35#include <stdlib.h>
36#include <string.h>
37#include <unistd.h>
38#include <vm/vm.h>
39#include <kvm.h>
40
41#include "../../sys/arm64/include/minidump.h"
42
43#include <limits.h>
44
45#include "kvm_private.h"
46#include "kvm_aarch64.h"
47
48#define	aarch64_round_page(x, size)	roundup2((kvaddr_t)(x), size)
49#define	aarch64_trunc_page(x, size)	rounddown2((kvaddr_t)(x), size)
50
51struct vmstate {
52	struct minidumphdr hdr;
53	size_t page_size;
54	u_int l3_shift;
55};
56
57static aarch64_pte_t
58_aarch64_pte_get(kvm_t *kd, u_long pteindex)
59{
60	aarch64_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte));
61
62	return le64toh(*pte);
63}
64
65static int
66_aarch64_minidump_probe(kvm_t *kd)
67{
68
69	return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_AARCH64) &&
70	    _kvm_is_minidump(kd));
71}
72
73static void
74_aarch64_minidump_freevtop(kvm_t *kd)
75{
76	struct vmstate *vm = kd->vmst;
77
78	free(vm);
79	kd->vmst = NULL;
80}
81
82static int
83_aarch64_minidump_initvtop(kvm_t *kd)
84{
85	struct vmstate *vmst;
86	off_t off, dump_avail_off, sparse_off;
87
88	vmst = _kvm_malloc(kd, sizeof(*vmst));
89	if (vmst == NULL) {
90		_kvm_err(kd, kd->program, "cannot allocate vm");
91		return (-1);
92	}
93	kd->vmst = vmst;
94	if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) !=
95	    sizeof(vmst->hdr)) {
96		_kvm_err(kd, kd->program, "cannot read dump header");
97		return (-1);
98	}
99	if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic,
100	    sizeof(vmst->hdr.magic)) != 0) {
101		_kvm_err(kd, kd->program, "not a minidump for this platform");
102		return (-1);
103	}
104
105	vmst->hdr.version = le32toh(vmst->hdr.version);
106	if (vmst->hdr.version > MINIDUMP_VERSION || vmst->hdr.version < 1) {
107		_kvm_err(kd, kd->program, "wrong minidump version. "
108		    "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version);
109		return (-1);
110	}
111	vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize);
112	vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize);
113	vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize);
114	vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase);
115	vmst->hdr.dmapphys = le64toh(vmst->hdr.dmapphys);
116	vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase);
117	vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend);
118	/* dumpavailsize added in version 2 */
119	if (vmst->hdr.version >= 2) {
120		vmst->hdr.dumpavailsize = le32toh(vmst->hdr.dumpavailsize);
121	} else {
122		vmst->hdr.dumpavailsize = 0;
123	}
124	/* flags added in version 3 */
125	if (vmst->hdr.version >= 3) {
126		vmst->hdr.flags = le32toh(vmst->hdr.flags);
127	} else {
128		vmst->hdr.flags = MINIDUMP_FLAG_PS_4K;
129	}
130
131	switch (vmst->hdr.flags & MINIDUMP_FLAG_PS_MASK) {
132	case MINIDUMP_FLAG_PS_4K:
133		vmst->page_size = AARCH64_PAGE_SIZE_4K;
134		vmst->l3_shift = AARCH64_L3_SHIFT_4K;
135		break;
136	case MINIDUMP_FLAG_PS_16K:
137		vmst->page_size = AARCH64_PAGE_SIZE_16K;
138		vmst->l3_shift = AARCH64_L3_SHIFT_16K;
139		break;
140	default:
141		_kvm_err(kd, kd->program, "unknown page size flag %x",
142		    vmst->hdr.flags & MINIDUMP_FLAG_PS_MASK);
143		return (-1);
144	}
145
146	/* Skip header and msgbuf */
147	dump_avail_off = vmst->page_size +
148	    aarch64_round_page(vmst->hdr.msgbufsize, vmst->page_size);
149
150	/* Skip dump_avail */
151	off = dump_avail_off +
152	    aarch64_round_page(vmst->hdr.dumpavailsize, vmst->page_size);
153
154	/* build physical address lookup table for sparse pages */
155	sparse_off = off +
156	    aarch64_round_page(vmst->hdr.bitmapsize, vmst->page_size) +
157	    aarch64_round_page(vmst->hdr.pmapsize, vmst->page_size);
158	if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
159	    vmst->hdr.bitmapsize, off, sparse_off, vmst->page_size) == -1) {
160		return (-1);
161	}
162	off += aarch64_round_page(vmst->hdr.bitmapsize, vmst->page_size);
163
164	if (_kvm_pmap_init(kd, vmst->hdr.pmapsize, off) == -1) {
165		return (-1);
166	}
167	off += aarch64_round_page(vmst->hdr.pmapsize, vmst->page_size);
168
169	return (0);
170}
171
172static int
173_aarch64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa)
174{
175	struct vmstate *vm;
176	aarch64_physaddr_t offset;
177	aarch64_pte_t l3;
178	kvaddr_t l3_index;
179	aarch64_physaddr_t a;
180	off_t ofs;
181
182	vm = kd->vmst;
183	offset = va & (kd->vmst->page_size - 1);
184
185	if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
186		a = aarch64_trunc_page(va - vm->hdr.dmapbase + vm->hdr.dmapphys,
187		    kd->vmst->page_size);
188		ofs = _kvm_pt_find(kd, a, kd->vmst->page_size);
189		if (ofs == -1) {
190			_kvm_err(kd, kd->program, "_aarch64_minidump_vatop: "
191			    "direct map address 0x%jx not in minidump",
192			    (uintmax_t)va);
193			goto invalid;
194		}
195		*pa = ofs + offset;
196		return (kd->vmst->page_size - offset);
197	} else if (va >= vm->hdr.kernbase) {
198		l3_index = (va - vm->hdr.kernbase) >> kd->vmst->l3_shift;
199		if (l3_index >= vm->hdr.pmapsize / sizeof(l3))
200			goto invalid;
201		l3 = _aarch64_pte_get(kd, l3_index);
202		if ((l3 & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) {
203			_kvm_err(kd, kd->program,
204			    "_aarch64_minidump_vatop: pde not valid");
205			goto invalid;
206		}
207		a = l3 & ~AARCH64_ATTR_MASK;
208		ofs = _kvm_pt_find(kd, a, kd->vmst->page_size);
209		if (ofs == -1) {
210			_kvm_err(kd, kd->program, "_aarch64_minidump_vatop: "
211			    "physical address 0x%jx not in minidump",
212			    (uintmax_t)a);
213			goto invalid;
214		}
215		*pa = ofs + offset;
216		return (kd->vmst->page_size - offset);
217	} else {
218		_kvm_err(kd, kd->program,
219	    "_aarch64_minidump_vatop: virtual address 0x%jx not minidumped",
220		    (uintmax_t)va);
221		goto invalid;
222	}
223
224invalid:
225	_kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
226	return (0);
227}
228
229static int
230_aarch64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
231{
232
233	if (ISALIVE(kd)) {
234		_kvm_err(kd, 0,
235		    "_aarch64_minidump_kvatop called in live kernel!");
236		return (0);
237	}
238	return (_aarch64_minidump_vatop(kd, va, pa));
239}
240
241static int
242_aarch64_native(kvm_t *kd __unused)
243{
244
245#ifdef __aarch64__
246	return (1);
247#else
248	return (0);
249#endif
250}
251
252static vm_prot_t
253_aarch64_entry_to_prot(aarch64_pte_t pte)
254{
255	vm_prot_t prot = VM_PROT_READ;
256
257	/* Source: arm64/arm64/pmap.c:pmap_protect() */
258	if ((pte & AARCH64_ATTR_AP(AARCH64_ATTR_AP_RO)) == 0)
259		prot |= VM_PROT_WRITE;
260	if ((pte & AARCH64_ATTR_XN) == 0)
261		prot |= VM_PROT_EXECUTE;
262	return prot;
263}
264
265static int
266_aarch64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
267{
268	struct vmstate *vm = kd->vmst;
269	u_long nptes = vm->hdr.pmapsize / sizeof(aarch64_pte_t);
270	u_long bmindex, dva, pa, pteindex, va;
271	struct kvm_bitmap bm;
272	vm_prot_t prot;
273	int ret = 0;
274
275	if (!_kvm_bitmap_init(&bm, vm->hdr.bitmapsize, &bmindex))
276		return (0);
277
278	for (pteindex = 0; pteindex < nptes; pteindex++) {
279		aarch64_pte_t pte = _aarch64_pte_get(kd, pteindex);
280
281		if ((pte & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE)
282			continue;
283
284		va = vm->hdr.kernbase + (pteindex << kd->vmst->l3_shift);
285		pa = pte & ~AARCH64_ATTR_MASK;
286		dva = vm->hdr.dmapbase + pa;
287		if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
288		    _aarch64_entry_to_prot(pte), kd->vmst->page_size, 0)) {
289			goto out;
290		}
291	}
292
293	while (_kvm_bitmap_next(&bm, &bmindex)) {
294		pa = _kvm_bit_id_pa(kd, bmindex, kd->vmst->page_size);
295		if (pa == _KVM_PA_INVALID)
296			break;
297		dva = vm->hdr.dmapbase + pa;
298		if (vm->hdr.dmapend < (dva + kd->vmst->page_size))
299			break;
300		va = 0;
301		prot = VM_PROT_READ | VM_PROT_WRITE;
302		if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
303		    prot, kd->vmst->page_size, 0)) {
304			goto out;
305		}
306	}
307	ret = 1;
308
309out:
310	_kvm_bitmap_deinit(&bm);
311	return (ret);
312}
313
314static struct kvm_arch kvm_aarch64_minidump = {
315	.ka_probe = _aarch64_minidump_probe,
316	.ka_initvtop = _aarch64_minidump_initvtop,
317	.ka_freevtop = _aarch64_minidump_freevtop,
318	.ka_kvatop = _aarch64_minidump_kvatop,
319	.ka_native = _aarch64_native,
320	.ka_walk_pages = _aarch64_minidump_walk_pages,
321};
322
323KVM_ARCH(kvm_aarch64_minidump);
324