1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2008, Juniper Networks, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the author nor the names of any co-contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/param.h>
32#include <sys/endian.h>
33#include <sys/kerneldump.h>
34#include <sys/mman.h>
35
36#include <elf.h>
37#include <kvm.h>
38#include <limits.h>
39#include <stdlib.h>
40#include <string.h>
41
42#include "kvm_private.h"
43
44struct vmstate {
45	void		*map;
46	size_t		mapsz;
47	size_t		dmphdrsz;
48	Elf64_Ehdr	*eh;
49	Elf64_Phdr	*ph;
50};
51
52static int
53valid_elf_header(kvm_t *kd, Elf64_Ehdr *eh)
54{
55
56	if (!IS_ELF(*eh))
57		return (0);
58	if (eh->e_ident[EI_CLASS] != ELFCLASS64)
59		return (0);
60	if (eh->e_ident[EI_DATA] != ELFDATA2MSB &&
61	    eh->e_ident[EI_DATA] != ELFDATA2LSB)
62		return (0);
63	if (eh->e_ident[EI_VERSION] != EV_CURRENT)
64		return (0);
65	if (eh->e_ident[EI_OSABI] != ELFOSABI_STANDALONE)
66		return (0);
67	if (_kvm16toh(kd, eh->e_type) != ET_CORE)
68		return (0);
69	if (_kvm16toh(kd, eh->e_machine) != EM_PPC64)
70		return (0);
71	/* Can't think of anything else to check... */
72	return (1);
73}
74
75static size_t
76dump_header_size(struct kerneldumpheader *dh)
77{
78
79	if (strcmp(dh->magic, KERNELDUMPMAGIC) != 0)
80		return (0);
81	if (strcmp(dh->architecture, "powerpc64") != 0 &&
82	    strcmp(dh->architecture, "powerpc64le") != 0)
83		return (0);
84	/* That should do it... */
85	return (sizeof(*dh));
86}
87
88/*
89 * Map the ELF headers into the process' address space. We do this in two
90 * steps: first the ELF header itself and using that information the whole
91 * set of headers.
92 */
93static int
94powerpc_maphdrs(kvm_t *kd)
95{
96	struct vmstate *vm;
97	size_t mapsz;
98
99	vm = kd->vmst;
100
101	vm->mapsz = sizeof(*vm->eh) + sizeof(struct kerneldumpheader);
102	vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
103	if (vm->map == MAP_FAILED) {
104		_kvm_err(kd, kd->program, "cannot map corefile");
105		return (-1);
106	}
107	vm->dmphdrsz = 0;
108	vm->eh = vm->map;
109	if (!valid_elf_header(kd, vm->eh)) {
110		/*
111		 * Hmmm, no ELF header. Maybe we still have a dump header.
112		 * This is normal when the core file wasn't created by
113		 * savecore(8), but instead was dumped over TFTP. We can
114		 * easily skip the dump header...
115		 */
116		vm->dmphdrsz = dump_header_size(vm->map);
117		if (vm->dmphdrsz == 0)
118			goto inval;
119		vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz);
120		if (!valid_elf_header(kd, vm->eh))
121			goto inval;
122	}
123	mapsz = _kvm16toh(kd, vm->eh->e_phentsize) *
124	    _kvm16toh(kd, vm->eh->e_phnum) + _kvm64toh(kd, vm->eh->e_phoff);
125	munmap(vm->map, vm->mapsz);
126
127	/* Map all headers. */
128	vm->mapsz = vm->dmphdrsz + mapsz;
129	vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
130	if (vm->map == MAP_FAILED) {
131		_kvm_err(kd, kd->program, "cannot map corefile headers");
132		return (-1);
133	}
134	vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz);
135	vm->ph = (void *)((uintptr_t)vm->eh +
136	    (uintptr_t)_kvm64toh(kd, vm->eh->e_phoff));
137	return (0);
138
139 inval:
140	_kvm_err(kd, kd->program, "invalid corefile");
141	return (-1);
142}
143
144/*
145 * Determine the offset within the corefile corresponding the virtual
146 * address. Return the number of contiguous bytes in the corefile or
147 * 0 when the virtual address is invalid.
148 */
149static size_t
150powerpc64_va2off(kvm_t *kd, kvaddr_t va, off_t *ofs)
151{
152	struct vmstate *vm = kd->vmst;
153	Elf64_Phdr *ph;
154	int nph;
155
156	ph = vm->ph;
157	nph = _kvm16toh(kd, vm->eh->e_phnum);
158	while (nph && (va < _kvm64toh(kd, ph->p_vaddr) ||
159	    va >= _kvm64toh(kd, ph->p_vaddr) + _kvm64toh(kd, ph->p_memsz))) {
160		nph--;
161		ph = (void *)((uintptr_t)ph +
162		    _kvm16toh(kd, vm->eh->e_phentsize));
163	}
164	if (nph == 0)
165		return (0);
166
167	/* Segment found. Return file offset and range. */
168	*ofs = vm->dmphdrsz + _kvm64toh(kd, ph->p_offset) +
169	    (va - _kvm64toh(kd, ph->p_vaddr));
170	return (_kvm64toh(kd, ph->p_memsz) -
171	    (va - _kvm64toh(kd, ph->p_vaddr)));
172}
173
174static void
175_powerpc64_freevtop(kvm_t *kd)
176{
177	struct vmstate *vm = kd->vmst;
178
179	if (vm->eh != MAP_FAILED)
180		munmap(vm->eh, vm->mapsz);
181	free(vm);
182	kd->vmst = NULL;
183}
184
185static int
186_powerpc64_probe(kvm_t *kd)
187{
188
189	return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_PPC64) &&
190	    kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB);
191}
192
193static int
194_powerpc64le_probe(kvm_t *kd)
195{
196
197	return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_PPC64) &&
198	    kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB);
199}
200
201static int
202_powerpc64_initvtop(kvm_t *kd)
203{
204
205	kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
206	if (kd->vmst == NULL)
207		return (-1);
208
209	if (powerpc_maphdrs(kd) == -1)
210		return (-1);
211
212	return (0);
213}
214
215static int
216_powerpc64_kvatop(kvm_t *kd, kvaddr_t va, off_t *ofs)
217{
218	struct vmstate *vm;
219
220	vm = kd->vmst;
221	if (_kvm64toh(kd, vm->ph->p_paddr) == 0xffffffffffffffff)
222		return ((int)powerpc64_va2off(kd, va, ofs));
223
224	_kvm_err(kd, kd->program, "Raw corefile not supported");
225	return (0);
226}
227
228static int
229_powerpc64_native(kvm_t *kd __unused)
230{
231
232#if defined(__powerpc64__) && BYTE_ORDER == BIG_ENDIAN
233	return (1);
234#else
235	return (0);
236#endif
237}
238
239static int
240_powerpc64le_native(kvm_t *kd __unused)
241{
242
243#if defined(__powerpc64__) && BYTE_ORDER == LITTLE_ENDIAN
244	return (1);
245#else
246	return (0);
247#endif
248}
249
250static struct kvm_arch kvm_powerpc64 = {
251	.ka_probe = _powerpc64_probe,
252	.ka_initvtop = _powerpc64_initvtop,
253	.ka_freevtop = _powerpc64_freevtop,
254	.ka_kvatop = _powerpc64_kvatop,
255	.ka_native = _powerpc64_native,
256};
257
258static struct kvm_arch kvm_powerpc64le = {
259	.ka_probe = _powerpc64le_probe,
260	.ka_initvtop = _powerpc64_initvtop,
261	.ka_freevtop = _powerpc64_freevtop,
262	.ka_kvatop = _powerpc64_kvatop,
263	.ka_native = _powerpc64le_native,
264};
265
266KVM_ARCH(kvm_powerpc64);
267KVM_ARCH(kvm_powerpc64le);
268