1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2005 Olivier Houchard
5 * Copyright (c) 1989, 1992, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 *
8 * This code is derived from software developed by the Computer Systems
9 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
10 * BG 91-66 and contributed to Berkeley.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
29 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
30 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36/*
37 * ARM machine dependent routines for kvm.
38 */
39
40#include <sys/param.h>
41#include <sys/endian.h>
42#include <kvm.h>
43#include <limits.h>
44#include <stdint.h>
45#include <stdlib.h>
46#include <unistd.h>
47
48#ifdef __arm__
49#include <machine/vmparam.h>
50#endif
51
52#include "kvm_private.h"
53#include "kvm_arm.h"
54
55struct vmstate {
56	arm_pd_entry_t *l1pt;
57	size_t phnum;
58	GElf_Phdr *phdr;
59};
60
61/*
62 * Translate a physical memory address to a file-offset in the crash-dump.
63 */
64static size_t
65_kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz)
66{
67	struct vmstate *vm = kd->vmst;
68	GElf_Phdr *p;
69	size_t n;
70
71	p = vm->phdr;
72	n = vm->phnum;
73	while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz))
74		p++, n--;
75	if (n == 0)
76		return (0);
77
78	*ofs = (pa - p->p_paddr) + p->p_offset;
79	if (pgsz == 0)
80		return (p->p_memsz - (pa - p->p_paddr));
81	return (pgsz - ((size_t)pa & (pgsz - 1)));
82}
83
84static void
85_arm_freevtop(kvm_t *kd)
86{
87	struct vmstate *vm = kd->vmst;
88
89	free(vm->phdr);
90	free(vm);
91	kd->vmst = NULL;
92}
93
94static int
95_arm_probe(kvm_t *kd)
96{
97
98	return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_ARM) &&
99	    !_kvm_is_minidump(kd));
100}
101
102static int
103_arm_initvtop(kvm_t *kd)
104{
105	struct vmstate *vm;
106	struct kvm_nlist nl[2];
107	kvaddr_t kernbase;
108	arm_physaddr_t physaddr, pa;
109	arm_pd_entry_t *l1pt;
110	size_t i;
111	int found;
112
113	if (kd->rawdump) {
114		_kvm_err(kd, kd->program, "raw dumps not supported on arm");
115		return (-1);
116	}
117
118	vm = _kvm_malloc(kd, sizeof(*vm));
119	if (vm == NULL) {
120		_kvm_err(kd, kd->program, "cannot allocate vm");
121		return (-1);
122	}
123	kd->vmst = vm;
124	vm->l1pt = NULL;
125
126	if (_kvm_read_core_phdrs(kd, &vm->phnum, &vm->phdr) == -1)
127		return (-1);
128
129	found = 0;
130	for (i = 0; i < vm->phnum; i++) {
131		if (vm->phdr[i].p_type == PT_DUMP_DELTA) {
132			kernbase = vm->phdr[i].p_vaddr;
133			physaddr = vm->phdr[i].p_paddr;
134			found = 1;
135			break;
136		}
137	}
138
139	nl[1].n_name = NULL;
140	if (!found) {
141		nl[0].n_name = "kernbase";
142		if (kvm_nlist2(kd, nl) != 0) {
143#ifdef __arm__
144			kernbase = KERNBASE;
145#else
146		_kvm_err(kd, kd->program, "cannot resolve kernbase");
147		return (-1);
148#endif
149		} else
150			kernbase = nl[0].n_value;
151
152		nl[0].n_name = "physaddr";
153		if (kvm_nlist2(kd, nl) != 0) {
154			_kvm_err(kd, kd->program, "couldn't get phys addr");
155			return (-1);
156		}
157		physaddr = nl[0].n_value;
158	}
159	nl[0].n_name = "kernel_l1pa";
160	if (kvm_nlist2(kd, nl) != 0) {
161		_kvm_err(kd, kd->program, "bad namelist");
162		return (-1);
163	}
164	if (kvm_read2(kd, (nl[0].n_value - kernbase + physaddr), &pa,
165	    sizeof(pa)) != sizeof(pa)) {
166		_kvm_err(kd, kd->program, "cannot read kernel_l1pa");
167		return (-1);
168	}
169	l1pt = _kvm_malloc(kd, ARM_L1_TABLE_SIZE);
170	if (l1pt == NULL) {
171		_kvm_err(kd, kd->program, "cannot allocate l1pt");
172		return (-1);
173	}
174	if (kvm_read2(kd, pa, l1pt, ARM_L1_TABLE_SIZE) != ARM_L1_TABLE_SIZE) {
175		_kvm_err(kd, kd->program, "cannot read l1pt");
176		free(l1pt);
177		return (-1);
178	}
179	vm->l1pt = l1pt;
180	return 0;
181}
182
183/* from arm/pmap.c */
184#define	ARM_L1_IDX(va)		((va) >> ARM_L1_S_SHIFT)
185
186#define	l1pte_section_p(pde)	(((pde) & ARM_L1_TYPE_MASK) == ARM_L1_TYPE_S)
187#define	l1pte_valid(pde)	((pde) != 0)
188#define	l2pte_valid(pte)	((pte) != 0)
189#define l2pte_index(v)		(((v) & ARM_L1_S_OFFSET) >> ARM_L2_S_SHIFT)
190
191
192static int
193_arm_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
194{
195	struct vmstate *vm = kd->vmst;
196	arm_pd_entry_t pd;
197	arm_pt_entry_t pte;
198	arm_physaddr_t pte_pa;
199	off_t pte_off;
200
201	if (vm->l1pt == NULL)
202		return (_kvm_pa2off(kd, va, pa, ARM_PAGE_SIZE));
203	pd = _kvm32toh(kd, vm->l1pt[ARM_L1_IDX(va)]);
204	if (!l1pte_valid(pd))
205		goto invalid;
206	if (l1pte_section_p(pd)) {
207		/* 1MB section mapping. */
208		*pa = (pd & ARM_L1_S_ADDR_MASK) + (va & ARM_L1_S_OFFSET);
209		return  (_kvm_pa2off(kd, *pa, pa, ARM_L1_S_SIZE));
210	}
211	pte_pa = (pd & ARM_L1_C_ADDR_MASK) + l2pte_index(va) * sizeof(pte);
212	_kvm_pa2off(kd, pte_pa, &pte_off, ARM_L1_S_SIZE);
213	if (pread(kd->pmfd, &pte, sizeof(pte), pte_off) != sizeof(pte)) {
214		_kvm_syserr(kd, kd->program, "_arm_kvatop: pread");
215		goto invalid;
216	}
217	pte = _kvm32toh(kd, pte);
218	if (!l2pte_valid(pte)) {
219		goto invalid;
220	}
221	if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) {
222		*pa = (pte & ARM_L2_L_FRAME) | (va & ARM_L2_L_OFFSET);
223		return (_kvm_pa2off(kd, *pa, pa, ARM_L2_L_SIZE));
224	}
225	*pa = (pte & ARM_L2_S_FRAME) | (va & ARM_L2_S_OFFSET);
226	return (_kvm_pa2off(kd, *pa, pa, ARM_PAGE_SIZE));
227invalid:
228	_kvm_err(kd, 0, "Invalid address (%jx)", (uintmax_t)va);
229	return 0;
230}
231
232/*
233 * Machine-dependent initialization for ALL open kvm descriptors,
234 * not just those for a kernel crash dump.  Some architectures
235 * have to deal with these NOT being constants!  (i.e. m68k)
236 */
237#ifdef FBSD_NOT_YET
238int
239_kvm_mdopen(kvm_t *kd)
240{
241
242	kd->usrstack = USRSTACK;
243	kd->min_uva = VM_MIN_ADDRESS;
244	kd->max_uva = VM_MAXUSER_ADDRESS;
245
246	return (0);
247}
248#endif
249
250int
251#ifdef __arm__
252_arm_native(kvm_t *kd)
253#else
254_arm_native(kvm_t *kd __unused)
255#endif
256{
257
258#ifdef __arm__
259#if _BYTE_ORDER == _LITTLE_ENDIAN
260	return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB);
261#else
262	return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB);
263#endif
264#else
265	return (0);
266#endif
267}
268
269static struct kvm_arch kvm_arm = {
270	.ka_probe = _arm_probe,
271	.ka_initvtop = _arm_initvtop,
272	.ka_freevtop = _arm_freevtop,
273	.ka_kvatop = _arm_kvatop,
274	.ka_native = _arm_native,
275};
276
277KVM_ARCH(kvm_arm);
278