1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2006 Peter Wemm
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29/*
30 * AMD64 machine dependent routines for kvm and minidumps.
31 */
32
33#include <sys/param.h>
34#include <sys/endian.h>
35#include <stdint.h>
36#include <stdlib.h>
37#include <string.h>
38#include <unistd.h>
39#include <vm/vm.h>
40#include <kvm.h>
41
42#include "../../sys/amd64/include/minidump.h"
43
44#include <limits.h>
45
46#include "kvm_private.h"
47#include "kvm_amd64.h"
48
49#define	amd64_round_page(x)	roundup2((kvaddr_t)(x), AMD64_PAGE_SIZE)
50#define	VM_IS_V1(vm)		(vm->hdr.version == 1)
51#define	VA_OFF(vm, va)		\
52	(VM_IS_V1(vm) ? ((va) & (AMD64_PAGE_SIZE - 1)) : ((va) & AMD64_PAGE_MASK))
53
54struct vmstate {
55	struct minidumphdr hdr;
56};
57
58static vm_prot_t
59_amd64_entry_to_prot(uint64_t entry)
60{
61	vm_prot_t prot = VM_PROT_READ;
62
63	if ((entry & AMD64_PG_RW) != 0)
64		prot |= VM_PROT_WRITE;
65	if ((entry & AMD64_PG_NX) == 0)
66		prot |= VM_PROT_EXECUTE;
67	return prot;
68}
69
70/*
71 * Version 2 minidumps use page directory entries, while version 1 use page
72 * table entries.
73 */
74
75static amd64_pde_t
76_amd64_pde_get(kvm_t *kd, u_long pdeindex)
77{
78	amd64_pde_t *pde = _kvm_pmap_get(kd, pdeindex, sizeof(*pde));
79
80	return le64toh(*pde);
81}
82
83static amd64_pte_t
84_amd64_pte_get(kvm_t *kd, u_long pteindex)
85{
86	amd64_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte));
87
88	return le64toh(*pte);
89}
90
91/* Get the first page table entry for a given page directory index. */
92static amd64_pte_t *
93_amd64_pde_first_pte(kvm_t *kd, u_long pdeindex)
94{
95	u_long *pa;
96
97	pa = _kvm_pmap_get(kd, pdeindex, sizeof(amd64_pde_t));
98	if (pa == NULL)
99		return NULL;
100	return _kvm_map_get(kd, *pa & AMD64_PG_FRAME, AMD64_PAGE_SIZE);
101}
102
103static int
104_amd64_minidump_probe(kvm_t *kd)
105{
106
107	return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_X86_64) &&
108	    _kvm_is_minidump(kd));
109}
110
111static void
112_amd64_minidump_freevtop(kvm_t *kd)
113{
114	struct vmstate *vm = kd->vmst;
115
116	free(vm);
117	kd->vmst = NULL;
118}
119
120static int
121_amd64_minidump_initvtop(kvm_t *kd)
122{
123	struct vmstate *vmst;
124	off_t off, dump_avail_off, sparse_off;
125
126	vmst = _kvm_malloc(kd, sizeof(*vmst));
127	if (vmst == NULL) {
128		_kvm_err(kd, kd->program, "cannot allocate vm");
129		return (-1);
130	}
131	kd->vmst = vmst;
132	if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) !=
133	    sizeof(vmst->hdr)) {
134		_kvm_err(kd, kd->program, "cannot read dump header");
135		return (-1);
136	}
137	if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) {
138		_kvm_err(kd, kd->program, "not a minidump for this platform");
139		return (-1);
140	}
141
142	/*
143	 * NB: amd64 minidump header is binary compatible between version 1
144	 * and version 2; version 3 adds the dumpavailsize field
145	 */
146	vmst->hdr.version = le32toh(vmst->hdr.version);
147	if (vmst->hdr.version > MINIDUMP_VERSION || vmst->hdr.version < 1) {
148		_kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d",
149		    MINIDUMP_VERSION, vmst->hdr.version);
150		return (-1);
151	}
152	vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize);
153	vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize);
154	vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize);
155	vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase);
156	vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase);
157	vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend);
158	vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ?
159	    le32toh(vmst->hdr.dumpavailsize) : 0;
160
161	/* Skip header and msgbuf */
162	dump_avail_off = AMD64_PAGE_SIZE + amd64_round_page(vmst->hdr.msgbufsize);
163
164	/* Skip dump_avail */
165	off = dump_avail_off + amd64_round_page(vmst->hdr.dumpavailsize);
166
167	sparse_off = off + amd64_round_page(vmst->hdr.bitmapsize) +
168	    amd64_round_page(vmst->hdr.pmapsize);
169	if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
170	    vmst->hdr.bitmapsize, off, sparse_off, AMD64_PAGE_SIZE) == -1) {
171		return (-1);
172	}
173	off += amd64_round_page(vmst->hdr.bitmapsize);
174
175	if (_kvm_pmap_init(kd, vmst->hdr.pmapsize, off) == -1) {
176		return (-1);
177	}
178	off += amd64_round_page(vmst->hdr.pmapsize);
179
180	return (0);
181}
182
183static int
184_amd64_minidump_vatop_v1(kvm_t *kd, kvaddr_t va, off_t *pa)
185{
186	struct vmstate *vm;
187	amd64_physaddr_t offset;
188	amd64_pte_t pte;
189	kvaddr_t pteindex;
190	amd64_physaddr_t a;
191	off_t ofs;
192
193	vm = kd->vmst;
194	offset = va & AMD64_PAGE_MASK;
195
196	if (va >= vm->hdr.kernbase) {
197		pteindex = (va - vm->hdr.kernbase) >> AMD64_PAGE_SHIFT;
198		if (pteindex >= vm->hdr.pmapsize / sizeof(pte))
199			goto invalid;
200		pte = _amd64_pte_get(kd, pteindex);
201		if ((pte & AMD64_PG_V) == 0) {
202			_kvm_err(kd, kd->program,
203			    "_amd64_minidump_vatop_v1: pte not valid");
204			goto invalid;
205		}
206		a = pte & AMD64_PG_FRAME;
207		ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
208		if (ofs == -1) {
209			_kvm_err(kd, kd->program,
210	    "_amd64_minidump_vatop_v1: physical address 0x%jx not in minidump",
211			    (uintmax_t)a);
212			goto invalid;
213		}
214		*pa = ofs + offset;
215		return (AMD64_PAGE_SIZE - offset);
216	} else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
217		a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK;
218		ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
219		if (ofs == -1) {
220			_kvm_err(kd, kd->program,
221    "_amd64_minidump_vatop_v1: direct map address 0x%jx not in minidump",
222			    (uintmax_t)va);
223			goto invalid;
224		}
225		*pa = ofs + offset;
226		return (AMD64_PAGE_SIZE - offset);
227	} else {
228		_kvm_err(kd, kd->program,
229	    "_amd64_minidump_vatop_v1: virtual address 0x%jx not minidumped",
230		    (uintmax_t)va);
231		goto invalid;
232	}
233
234invalid:
235	_kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
236	return (0);
237}
238
239static int
240_amd64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa)
241{
242	amd64_pte_t pt[AMD64_NPTEPG];
243	struct vmstate *vm;
244	amd64_physaddr_t offset;
245	amd64_pde_t pde;
246	amd64_pte_t pte;
247	kvaddr_t pteindex;
248	kvaddr_t pdeindex;
249	amd64_physaddr_t a;
250	off_t ofs;
251
252	vm = kd->vmst;
253	offset = va & AMD64_PAGE_MASK;
254
255	if (va >= vm->hdr.kernbase) {
256		pdeindex = (va - vm->hdr.kernbase) >> AMD64_PDRSHIFT;
257		if (pdeindex >= vm->hdr.pmapsize / sizeof(pde))
258			goto invalid;
259		pde = _amd64_pde_get(kd, pdeindex);
260		if ((pde & AMD64_PG_V) == 0) {
261			_kvm_err(kd, kd->program,
262			    "_amd64_minidump_vatop: pde not valid");
263			goto invalid;
264		}
265		if ((pde & AMD64_PG_PS) == 0) {
266			a = pde & AMD64_PG_FRAME;
267			/* TODO: Just read the single PTE */
268			ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
269			if (ofs == -1) {
270				_kvm_err(kd, kd->program,
271				    "cannot find page table entry for %ju",
272				    (uintmax_t)a);
273				goto invalid;
274			}
275			if (pread(kd->pmfd, &pt, AMD64_PAGE_SIZE, ofs) !=
276			    AMD64_PAGE_SIZE) {
277				_kvm_err(kd, kd->program,
278				    "cannot read page table entry for %ju",
279				    (uintmax_t)a);
280				goto invalid;
281			}
282			pteindex = (va >> AMD64_PAGE_SHIFT) &
283			    (AMD64_NPTEPG - 1);
284			pte = le64toh(pt[pteindex]);
285			if ((pte & AMD64_PG_V) == 0) {
286				_kvm_err(kd, kd->program,
287				    "_amd64_minidump_vatop: pte not valid");
288				goto invalid;
289			}
290			a = pte & AMD64_PG_FRAME;
291		} else {
292			a = pde & AMD64_PG_PS_FRAME;
293			a += (va & AMD64_PDRMASK) ^ offset;
294		}
295		ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
296		if (ofs == -1) {
297			_kvm_err(kd, kd->program,
298	    "_amd64_minidump_vatop: physical address 0x%jx not in minidump",
299			    (uintmax_t)a);
300			goto invalid;
301		}
302		*pa = ofs + offset;
303		return (AMD64_PAGE_SIZE - offset);
304	} else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
305		a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK;
306		ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
307		if (ofs == -1) {
308			_kvm_err(kd, kd->program,
309	    "_amd64_minidump_vatop: direct map address 0x%jx not in minidump",
310			    (uintmax_t)va);
311			goto invalid;
312		}
313		*pa = ofs + offset;
314		return (AMD64_PAGE_SIZE - offset);
315	} else {
316		_kvm_err(kd, kd->program,
317	    "_amd64_minidump_vatop: virtual address 0x%jx not minidumped",
318		    (uintmax_t)va);
319		goto invalid;
320	}
321
322invalid:
323	_kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
324	return (0);
325}
326
327static int
328_amd64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
329{
330
331	if (ISALIVE(kd)) {
332		_kvm_err(kd, 0,
333		    "_amd64_minidump_kvatop called in live kernel!");
334		return (0);
335	}
336	if (((struct vmstate *)kd->vmst)->hdr.version == 1)
337		return (_amd64_minidump_vatop_v1(kd, va, pa));
338	else
339		return (_amd64_minidump_vatop(kd, va, pa));
340}
341
342static int
343_amd64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
344{
345	struct vmstate *vm = kd->vmst;
346	u_long npdes = vm->hdr.pmapsize / sizeof(amd64_pde_t);
347	u_long bmindex, dva, pa, pdeindex, va;
348	struct kvm_bitmap bm;
349	int ret = 0;
350	vm_prot_t prot;
351	unsigned int pgsz = AMD64_PAGE_SIZE;
352
353	if (vm->hdr.version < 2)
354		return (0);
355
356	if (!_kvm_bitmap_init(&bm, vm->hdr.bitmapsize, &bmindex))
357		return (0);
358
359	for (pdeindex = 0; pdeindex < npdes; pdeindex++) {
360		amd64_pde_t pde = _amd64_pde_get(kd, pdeindex);
361		amd64_pte_t *ptes;
362		u_long i;
363
364		va = vm->hdr.kernbase + (pdeindex << AMD64_PDRSHIFT);
365		if ((pde & AMD64_PG_V) == 0)
366			continue;
367
368		if ((pde & AMD64_PG_PS) != 0) {
369			/*
370			 * Large page.  Iterate on each 4K page section
371			 * within this page.  This differs from 4K pages in
372			 * that every page here uses the same PDE to
373			 * generate permissions.
374			 */
375			pa = (pde & AMD64_PG_PS_FRAME) +
376			    ((va & AMD64_PDRMASK) ^ VA_OFF(vm, va));
377			dva = vm->hdr.dmapbase + pa;
378			_kvm_bitmap_set(&bm, _kvm_pa_bit_id(kd, pa, AMD64_PAGE_SIZE));
379			if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
380			    _amd64_entry_to_prot(pde), AMD64_NBPDR, pgsz)) {
381				goto out;
382			}
383			continue;
384		}
385
386		/* 4K pages: pde references another page of entries. */
387		ptes = _amd64_pde_first_pte(kd, pdeindex);
388		/* Ignore page directory pages that were not dumped. */
389		if (ptes == NULL)
390			continue;
391
392		for (i = 0; i < AMD64_NPTEPG; i++) {
393			amd64_pte_t pte = (u_long)ptes[i];
394
395			pa = pte & AMD64_PG_FRAME;
396			dva = vm->hdr.dmapbase + pa;
397			if ((pte & AMD64_PG_V) != 0) {
398				_kvm_bitmap_set(&bm,
399				    _kvm_pa_bit_id(kd, pa, AMD64_PAGE_SIZE));
400				if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
401				    _amd64_entry_to_prot(pte), pgsz, 0)) {
402					goto out;
403				}
404			}
405			va += AMD64_PAGE_SIZE;
406		}
407	}
408
409	while (_kvm_bitmap_next(&bm, &bmindex)) {
410		pa = _kvm_bit_id_pa(kd, bmindex, AMD64_PAGE_SIZE);
411		if (pa == _KVM_PA_INVALID)
412			break;
413		dva = vm->hdr.dmapbase + pa;
414		if (vm->hdr.dmapend < (dva + pgsz))
415			break;
416		va = 0;
417		/* amd64/pmap.c: create_pagetables(): dmap always R|W. */
418		prot = VM_PROT_READ | VM_PROT_WRITE;
419		if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, prot, pgsz, 0)) {
420			goto out;
421		}
422	}
423
424	ret = 1;
425
426out:
427	_kvm_bitmap_deinit(&bm);
428	return (ret);
429}
430
431static struct kvm_arch kvm_amd64_minidump = {
432	.ka_probe = _amd64_minidump_probe,
433	.ka_initvtop = _amd64_minidump_initvtop,
434	.ka_freevtop = _amd64_minidump_freevtop,
435	.ka_kvatop = _amd64_minidump_kvatop,
436	.ka_native = _amd64_native,
437	.ka_walk_pages = _amd64_minidump_walk_pages,
438};
439
440KVM_ARCH(kvm_amd64_minidump);
441