1/*-
2 * Copyright (c) 2006 Peter Wemm
3 * Copyright (c) 2019 Leandro Lupori
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * From: FreeBSD: src/lib/libkvm/kvm_minidump_riscv.c
27 */
28
29#include <sys/param.h>
30#include <vm/vm.h>
31
32#include <kvm.h>
33
34#include <limits.h>
35#include <stdint.h>
36#include <stdlib.h>
37#include <string.h>
38#include <unistd.h>
39
40#include "../../sys/powerpc/include/minidump.h"
41#include "kvm_private.h"
42#include "kvm_powerpc64.h"
43
44/*
45 * PowerPC64 HPT machine dependent routines for kvm and minidumps.
46 *
47 * Address Translation parameters:
48 *
49 * b = 12 (SLB base page size: 4 KB)
50 * b = 24 (SLB base page size: 16 MB)
51 * p = 12 (page size: 4 KB)
52 * p = 24 (page size: 16 MB)
53 * s = 28 (segment size: 256 MB)
54 */
55
56/* Large (huge) page params */
57#define	LP_PAGE_SHIFT		24
58#define	LP_PAGE_SIZE		(1ULL << LP_PAGE_SHIFT)
59#define	LP_PAGE_MASK		0x00ffffffULL
60
61/* SLB */
62
63#define	SEGMENT_LENGTH		0x10000000ULL
64
65#define	round_seg(x)		roundup2((uint64_t)(x), SEGMENT_LENGTH)
66
67/* Virtual real-mode VSID in LPARs */
68#define	VSID_VRMA		0x1ffffffULL
69
70#define	SLBV_L			0x0000000000000100ULL /* Large page selector */
71#define	SLBV_CLASS		0x0000000000000080ULL /* Class selector */
72#define	SLBV_LP_MASK		0x0000000000000030ULL
73#define	SLBV_VSID_MASK		0x3ffffffffffff000ULL /* Virtual SegID mask */
74#define	SLBV_VSID_SHIFT		12
75
76#define	SLBE_B_MASK		0x0000000006000000ULL
77#define	SLBE_B_256MB		0x0000000000000000ULL
78#define	SLBE_VALID		0x0000000008000000ULL /* SLB entry valid */
79#define	SLBE_INDEX_MASK		0x0000000000000fffULL /* SLB index mask */
80#define	SLBE_ESID_MASK		0xfffffffff0000000ULL /* Effective SegID mask */
81#define	SLBE_ESID_SHIFT		28
82
83/* PTE */
84
85#define	LPTEH_VSID_SHIFT	12
86#define	LPTEH_AVPN_MASK		0xffffffffffffff80ULL
87#define	LPTEH_B_MASK		0xc000000000000000ULL
88#define	LPTEH_B_256MB		0x0000000000000000ULL
89#define	LPTEH_BIG		0x0000000000000004ULL	/* 4KB/16MB page */
90#define	LPTEH_HID		0x0000000000000002ULL
91#define	LPTEH_VALID		0x0000000000000001ULL
92
93#define	LPTEL_RPGN		0xfffffffffffff000ULL
94#define	LPTEL_LP_MASK		0x00000000000ff000ULL
95#define	LPTEL_NOEXEC		0x0000000000000004ULL
96
97/* Supervisor        (U: RW, S: RW) */
98#define	LPTEL_BW		0x0000000000000002ULL
99
100/* Both Read Only    (U: RO, S: RO) */
101#define	LPTEL_BR		0x0000000000000003ULL
102
103#define	LPTEL_RW		LPTEL_BW
104#define	LPTEL_RO		LPTEL_BR
105
106/*
107 * PTE AVA field manipulation macros.
108 *
109 * AVA[0:54] = PTEH[2:56]
110 * AVA[VSID] = AVA[0:49] = PTEH[2:51]
111 * AVA[PAGE] = AVA[50:54] = PTEH[52:56]
112 */
113#define	PTEH_AVA_VSID_MASK	0x3ffffffffffff000UL
114#define	PTEH_AVA_VSID_SHIFT	12
115#define	PTEH_AVA_VSID(p) \
116	(((p) & PTEH_AVA_VSID_MASK) >> PTEH_AVA_VSID_SHIFT)
117
118#define	PTEH_AVA_PAGE_MASK	0x0000000000000f80UL
119#define	PTEH_AVA_PAGE_SHIFT	7
120#define	PTEH_AVA_PAGE(p) \
121	(((p) & PTEH_AVA_PAGE_MASK) >> PTEH_AVA_PAGE_SHIFT)
122
123/* Masks to obtain the Physical Address from PTE low 64-bit word. */
124#define	PTEL_PA_MASK		0x0ffffffffffff000UL
125#define	PTEL_LP_PA_MASK		0x0fffffffff000000UL
126
127#define	PTE_HASH_MASK		0x0000007fffffffffUL
128
129/*
130 * Number of AVA/VA page bits to shift right, in order to leave only the
131 * ones that should be considered.
132 *
133 * q = MIN(54, 77-b) (PowerISA v2.07B, 5.7.7.3)
134 * n = q + 1 - 50 (VSID size in bits)
135 * s(ava) = 5 - n
136 * s(va) = (28 - b) - n
137 *
138 * q: bit number of lower limit of VA/AVA bits to compare
139 * n: number of AVA/VA page bits to compare
140 * s: shift amount
141 * 28 - b: VA page size in bits
142 */
143#define	AVA_PAGE_SHIFT(b)	(5 - (MIN(54, 77-(b)) + 1 - 50))
144#define	VA_PAGE_SHIFT(b)	(28 - (b) - (MIN(54, 77-(b)) + 1 - 50))
145
146/* Kernel ESID -> VSID mapping */
147#define	KERNEL_VSID_BIT	0x0000001000000000UL /* Bit set in all kernel VSIDs */
148#define	KERNEL_VSID(esid) ((((((uint64_t)esid << 8) | ((uint64_t)esid >> 28)) \
149				* 0x13bbUL) & (KERNEL_VSID_BIT - 1)) | \
150				KERNEL_VSID_BIT)
151
152/* Types */
153
154typedef uint64_t	ppc64_physaddr_t;
155
156typedef struct {
157	uint64_t slbv;
158	uint64_t slbe;
159} ppc64_slb_entry_t;
160
161typedef struct {
162	uint64_t pte_hi;
163	uint64_t pte_lo;
164} ppc64_pt_entry_t;
165
166struct hpt_data {
167	ppc64_slb_entry_t *slbs;
168	uint32_t slbsize;
169};
170
171
172static void
173slb_fill(ppc64_slb_entry_t *slb, uint64_t ea, uint64_t i)
174{
175	uint64_t esid;
176
177	esid = ea >> SLBE_ESID_SHIFT;
178	slb->slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
179	slb->slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID | i;
180}
181
182static int
183slb_init(kvm_t *kd)
184{
185	struct minidumphdr *hdr;
186	struct hpt_data *data;
187	ppc64_slb_entry_t *slb;
188	uint32_t slbsize;
189	uint64_t ea, i, maxmem;
190
191	hdr = &kd->vmst->hdr;
192	data = PPC64_MMU_DATA(kd);
193
194	/* Alloc SLBs */
195	maxmem = hdr->bitmapsize * 8 * PPC64_PAGE_SIZE;
196	slbsize = round_seg(hdr->kernend + 1 - hdr->kernbase + maxmem) /
197	    SEGMENT_LENGTH * sizeof(ppc64_slb_entry_t);
198	data->slbs = _kvm_malloc(kd, slbsize);
199	if (data->slbs == NULL) {
200		_kvm_err(kd, kd->program, "cannot allocate slbs");
201		return (-1);
202	}
203	data->slbsize = slbsize;
204
205	dprintf("%s: maxmem=0x%jx, segs=%jd, slbsize=0x%jx\n",
206	    __func__, (uintmax_t)maxmem,
207	    (uintmax_t)slbsize / sizeof(ppc64_slb_entry_t), (uintmax_t)slbsize);
208
209	/*
210	 * Generate needed SLB entries.
211	 *
212	 * When translating addresses from EA to VA to PA, the needed SLB
213	 * entry could be generated on the fly, but this is not the case
214	 * for the walk_pages method, that needs to search the SLB entry
215	 * by VSID, in order to find out the EA from a PTE.
216	 */
217
218	/* VM area */
219	for (ea = hdr->kernbase, i = 0, slb = data->slbs;
220	    ea < hdr->kernend; ea += SEGMENT_LENGTH, i++, slb++)
221		slb_fill(slb, ea, i);
222
223	/* DMAP area */
224	for (ea = hdr->dmapbase;
225	    ea < MIN(hdr->dmapend, hdr->dmapbase + maxmem);
226	    ea += SEGMENT_LENGTH, i++, slb++) {
227		slb_fill(slb, ea, i);
228		if (hdr->hw_direct_map)
229			slb->slbv |= SLBV_L;
230	}
231
232	return (0);
233}
234
235static void
236ppc64mmu_hpt_cleanup(kvm_t *kd)
237{
238	struct hpt_data *data;
239
240	if (kd->vmst == NULL)
241		return;
242
243	data = PPC64_MMU_DATA(kd);
244	free(data->slbs);
245	free(data);
246	PPC64_MMU_DATA(kd) = NULL;
247}
248
249static int
250ppc64mmu_hpt_init(kvm_t *kd)
251{
252	struct hpt_data *data;
253
254	/* Alloc MMU data */
255	data = _kvm_malloc(kd, sizeof(*data));
256	if (data == NULL) {
257		_kvm_err(kd, kd->program, "cannot allocate MMU data");
258		return (-1);
259	}
260	data->slbs = NULL;
261	PPC64_MMU_DATA(kd) = data;
262
263	if (slb_init(kd) == -1)
264		goto failed;
265
266	return (0);
267
268failed:
269	ppc64mmu_hpt_cleanup(kd);
270	return (-1);
271}
272
273static ppc64_slb_entry_t *
274slb_search(kvm_t *kd, kvaddr_t ea)
275{
276	struct hpt_data *data;
277	ppc64_slb_entry_t *slb;
278	int i, n;
279
280	data = PPC64_MMU_DATA(kd);
281	slb = data->slbs;
282	n = data->slbsize / sizeof(ppc64_slb_entry_t);
283
284	/* SLB search */
285	for (i = 0; i < n; i++, slb++) {
286		if ((slb->slbe & SLBE_VALID) == 0)
287			continue;
288
289		/* Compare 36-bit ESID of EA with segment one (64-s) */
290		if ((slb->slbe & SLBE_ESID_MASK) != (ea & SLBE_ESID_MASK))
291			continue;
292
293		/* Match found */
294		dprintf("SEG#%02d: slbv=0x%016jx, slbe=0x%016jx\n",
295		    i, (uintmax_t)slb->slbv, (uintmax_t)slb->slbe);
296		break;
297	}
298
299	/* SLB not found */
300	if (i == n) {
301		_kvm_err(kd, kd->program, "%s: segment not found for EA 0x%jx",
302		    __func__, (uintmax_t)ea);
303		return (NULL);
304	}
305	return (slb);
306}
307
308static ppc64_pt_entry_t
309pte_get(kvm_t *kd, u_long ptex)
310{
311	ppc64_pt_entry_t pte, *p;
312
313	p = _kvm_pmap_get(kd, ptex, sizeof(pte));
314	pte.pte_hi = be64toh(p->pte_hi);
315	pte.pte_lo = be64toh(p->pte_lo);
316	return (pte);
317}
318
319static int
320pte_search(kvm_t *kd, ppc64_slb_entry_t *slb, uint64_t hid, kvaddr_t ea,
321    ppc64_pt_entry_t *p)
322{
323	uint64_t hash, hmask;
324	uint64_t pteg, ptex;
325	uint64_t va_vsid, va_page;
326	int b;
327	int ava_pg_shift, va_pg_shift;
328	ppc64_pt_entry_t pte;
329
330	/*
331	 * Get VA:
332	 *
333	 * va(78) = va_vsid(50) || va_page(s-b) || offset(b)
334	 *
335	 * va_vsid: 50-bit VSID (78-s)
336	 * va_page: (s-b)-bit VA page
337	 */
338	b = slb->slbv & SLBV_L? LP_PAGE_SHIFT : PPC64_PAGE_SHIFT;
339	va_vsid = (slb->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT;
340	va_page = (ea & ~SLBE_ESID_MASK) >> b;
341
342	dprintf("%s: hid=0x%jx, ea=0x%016jx, b=%d, va_vsid=0x%010jx, "
343	    "va_page=0x%04jx\n",
344	    __func__, (uintmax_t)hid, (uintmax_t)ea, b,
345	    (uintmax_t)va_vsid, (uintmax_t)va_page);
346
347	/*
348	 * Get hash:
349	 *
350	 * Primary hash: va_vsid(11:49) ^ va_page(s-b)
351	 * Secondary hash: ~primary_hash
352	 */
353	hash = (va_vsid & PTE_HASH_MASK) ^ va_page;
354	if (hid)
355		hash = ~hash & PTE_HASH_MASK;
356
357	/*
358	 * Get PTEG:
359	 *
360	 * pteg = (hash(0:38) & hmask) << 3
361	 *
362	 * hmask (hash mask): mask generated from HTABSIZE || 11*0b1
363	 * hmask = number_of_ptegs - 1
364	 */
365	hmask = kd->vmst->hdr.pmapsize / (8 * sizeof(ppc64_pt_entry_t)) - 1;
366	pteg = (hash & hmask) << 3;
367
368	ava_pg_shift = AVA_PAGE_SHIFT(b);
369	va_pg_shift = VA_PAGE_SHIFT(b);
370
371	dprintf("%s: hash=0x%010jx, hmask=0x%010jx, (hash & hmask)=0x%010jx, "
372	    "pteg=0x%011jx, ava_pg_shift=%d, va_pg_shift=%d\n",
373	    __func__, (uintmax_t)hash, (uintmax_t)hmask,
374	    (uintmax_t)(hash & hmask), (uintmax_t)pteg,
375	    ava_pg_shift, va_pg_shift);
376
377	/* Search PTEG */
378	for (ptex = pteg; ptex < pteg + 8; ptex++) {
379		pte = pte_get(kd, ptex);
380
381		/* Check H, V and B */
382		if ((pte.pte_hi & LPTEH_HID) != hid ||
383		    (pte.pte_hi & LPTEH_VALID) == 0 ||
384		    (pte.pte_hi & LPTEH_B_MASK) != LPTEH_B_256MB)
385			continue;
386
387		/* Compare AVA with VA */
388		if (PTEH_AVA_VSID(pte.pte_hi) != va_vsid ||
389		    (PTEH_AVA_PAGE(pte.pte_hi) >> ava_pg_shift) !=
390		    (va_page >> va_pg_shift))
391			continue;
392
393		/*
394		 * Check if PTE[L] matches SLBV[L].
395		 *
396		 * Note: this check ignores PTE[LP], as does the kernel.
397		 */
398		if (b == PPC64_PAGE_SHIFT) {
399			if (pte.pte_hi & LPTEH_BIG)
400				continue;
401		} else if ((pte.pte_hi & LPTEH_BIG) == 0)
402			continue;
403
404		/* Match found */
405		dprintf("%s: PTE found: ptex=0x%jx, pteh=0x%016jx, "
406		    "ptel=0x%016jx\n",
407		    __func__, (uintmax_t)ptex, (uintmax_t)pte.pte_hi,
408		    (uintmax_t)pte.pte_lo);
409		break;
410	}
411
412	/* Not found? */
413	if (ptex == pteg + 8) {
414		/* Try secondary hash */
415		if (hid == 0)
416			return (pte_search(kd, slb, LPTEH_HID, ea, p));
417		else {
418			_kvm_err(kd, kd->program,
419			    "%s: pte not found", __func__);
420			return (-1);
421		}
422	}
423
424	/* PTE found */
425	*p = pte;
426	return (0);
427}
428
429static int
430pte_lookup(kvm_t *kd, kvaddr_t ea, ppc64_pt_entry_t *pte)
431{
432	ppc64_slb_entry_t *slb;
433
434	/* First, find SLB */
435	if ((slb = slb_search(kd, ea)) == NULL)
436		return (-1);
437
438	/* Next, find PTE */
439	return (pte_search(kd, slb, 0, ea, pte));
440}
441
442static int
443ppc64mmu_hpt_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
444{
445	struct minidumphdr *hdr;
446	struct vmstate *vm;
447	ppc64_pt_entry_t pte;
448	ppc64_physaddr_t pgoff, pgpa;
449	off_t ptoff;
450	int err;
451
452	vm = kd->vmst;
453	hdr = &vm->hdr;
454	pgoff = va & PPC64_PAGE_MASK;
455
456	dprintf("%s: va=0x%016jx\n", __func__, (uintmax_t)va);
457
458	/*
459	 * A common use case of libkvm is to first find a symbol address
460	 * from the kernel image and then use kvatop to translate it and
461	 * to be able to fetch its corresponding data.
462	 *
463	 * The problem is that, in PowerPC64 case, the addresses of relocated
464	 * data won't match those in the kernel image. This is handled here by
465	 * adding the relocation offset to those addresses.
466	 */
467	if (va < hdr->dmapbase)
468		va += hdr->startkernel - PPC64_KERNBASE;
469
470	/* Handle DMAP */
471	if (va >= hdr->dmapbase && va <= hdr->dmapend) {
472		pgpa = (va & ~hdr->dmapbase) & ~PPC64_PAGE_MASK;
473		ptoff = _kvm_pt_find(kd, pgpa, PPC64_PAGE_SIZE);
474		if (ptoff == -1) {
475			_kvm_err(kd, kd->program, "%s: "
476			    "direct map address 0x%jx not in minidump",
477			    __func__, (uintmax_t)va);
478			goto invalid;
479		}
480		*pa = ptoff + pgoff;
481		return (PPC64_PAGE_SIZE - pgoff);
482	/* Translate VA to PA */
483	} else if (va >= hdr->kernbase) {
484		if ((err = pte_lookup(kd, va, &pte)) == -1) {
485			_kvm_err(kd, kd->program,
486			    "%s: pte not valid", __func__);
487			goto invalid;
488		}
489
490		if (pte.pte_hi & LPTEH_BIG)
491			pgpa = (pte.pte_lo & PTEL_LP_PA_MASK) |
492			    (va & ~PPC64_PAGE_MASK & LP_PAGE_MASK);
493		else
494			pgpa = pte.pte_lo & PTEL_PA_MASK;
495		dprintf("%s: pgpa=0x%016jx\n", __func__, (uintmax_t)pgpa);
496
497		ptoff = _kvm_pt_find(kd, pgpa, PPC64_PAGE_SIZE);
498		if (ptoff == -1) {
499			_kvm_err(kd, kd->program, "%s: "
500			    "physical address 0x%jx not in minidump",
501			    __func__, (uintmax_t)pgpa);
502			goto invalid;
503		}
504		*pa = ptoff + pgoff;
505		return (PPC64_PAGE_SIZE - pgoff);
506	} else {
507		_kvm_err(kd, kd->program,
508		    "%s: virtual address 0x%jx not minidumped",
509		    __func__, (uintmax_t)va);
510		goto invalid;
511	}
512
513invalid:
514	_kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
515	return (0);
516}
517
518static vm_prot_t
519entry_to_prot(ppc64_pt_entry_t *pte)
520{
521	vm_prot_t prot = VM_PROT_READ;
522
523	if (pte->pte_lo & LPTEL_RW)
524		prot |= VM_PROT_WRITE;
525	if ((pte->pte_lo & LPTEL_NOEXEC) != 0)
526		prot |= VM_PROT_EXECUTE;
527	return (prot);
528}
529
530static ppc64_slb_entry_t *
531slb_vsid_search(kvm_t *kd, uint64_t vsid)
532{
533	struct hpt_data *data;
534	ppc64_slb_entry_t *slb;
535	int i, n;
536
537	data = PPC64_MMU_DATA(kd);
538	slb = data->slbs;
539	n = data->slbsize / sizeof(ppc64_slb_entry_t);
540	vsid <<= SLBV_VSID_SHIFT;
541
542	/* SLB search */
543	for (i = 0; i < n; i++, slb++) {
544		/* Check if valid and compare VSID */
545		if ((slb->slbe & SLBE_VALID) &&
546		    (slb->slbv & SLBV_VSID_MASK) == vsid)
547			break;
548	}
549
550	/* SLB not found */
551	if (i == n) {
552		_kvm_err(kd, kd->program,
553		    "%s: segment not found for VSID 0x%jx",
554		    __func__, (uintmax_t)vsid >> SLBV_VSID_SHIFT);
555		return (NULL);
556	}
557	return (slb);
558}
559
560static u_long
561get_ea(kvm_t *kd, ppc64_pt_entry_t *pte, u_long ptex)
562{
563	ppc64_slb_entry_t *slb;
564	uint64_t ea, hash, vsid;
565	int b, shift;
566
567	/* Find SLB */
568	vsid = PTEH_AVA_VSID(pte->pte_hi);
569	if ((slb = slb_vsid_search(kd, vsid)) == NULL)
570		return (~0UL);
571
572	/* Get ESID part of EA */
573	ea = slb->slbe & SLBE_ESID_MASK;
574
575	b = slb->slbv & SLBV_L? LP_PAGE_SHIFT : PPC64_PAGE_SHIFT;
576
577	/*
578	 * If there are less than 64K PTEGs (16-bit), the upper bits of
579	 * EA page must be obtained from PTEH's AVA.
580	 */
581	if (kd->vmst->hdr.pmapsize / (8 * sizeof(ppc64_pt_entry_t)) <
582	    0x10000U) {
583		/*
584		 * Add 0 to 5 EA bits, right after VSID.
585		 * b == 12: 5 bits
586		 * b == 24: 4 bits
587		 */
588		shift = AVA_PAGE_SHIFT(b);
589		ea |= (PTEH_AVA_PAGE(pte->pte_hi) >> shift) <<
590		    (SLBE_ESID_SHIFT - 5 + shift);
591	}
592
593	/* Get VA page from hash and add to EA. */
594	hash = (ptex & ~7) >> 3;
595	if (pte->pte_hi & LPTEH_HID)
596		hash = ~hash & PTE_HASH_MASK;
597	ea |= ((hash ^ (vsid & PTE_HASH_MASK)) << b) & ~SLBE_ESID_MASK;
598	return (ea);
599}
600
601static int
602ppc64mmu_hpt_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
603{
604	struct vmstate *vm;
605	int ret;
606	unsigned int pagesz;
607	u_long dva, pa, va;
608	u_long ptex, nptes;
609	uint64_t vsid;
610
611	ret = 0;
612	vm = kd->vmst;
613	nptes = vm->hdr.pmapsize / sizeof(ppc64_pt_entry_t);
614
615	/* Walk through PTEs */
616	for (ptex = 0; ptex < nptes; ptex++) {
617		ppc64_pt_entry_t pte = pte_get(kd, ptex);
618		if ((pte.pte_hi & LPTEH_VALID) == 0)
619			continue;
620
621		/* Skip non-kernel related pages, as well as VRMA ones */
622		vsid = PTEH_AVA_VSID(pte.pte_hi);
623		if ((vsid & KERNEL_VSID_BIT) == 0 ||
624		    (vsid >> PPC64_PAGE_SHIFT) == VSID_VRMA)
625			continue;
626
627		/* Retrieve page's VA (EA on PPC64 terminology) */
628		if ((va = get_ea(kd, &pte, ptex)) == ~0UL)
629			goto out;
630
631		/* Get PA and page size */
632		if (pte.pte_hi & LPTEH_BIG) {
633			pa = pte.pte_lo & PTEL_LP_PA_MASK;
634			pagesz = LP_PAGE_SIZE;
635		} else {
636			pa = pte.pte_lo & PTEL_PA_MASK;
637			pagesz = PPC64_PAGE_SIZE;
638		}
639
640		/* Get DMAP address */
641		dva = vm->hdr.dmapbase + pa;
642
643		if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
644		    entry_to_prot(&pte), pagesz, 0))
645			goto out;
646	}
647	ret = 1;
648
649out:
650	return (ret);
651}
652
653
654static struct ppc64_mmu_ops ops = {
655	.init		= ppc64mmu_hpt_init,
656	.cleanup	= ppc64mmu_hpt_cleanup,
657	.kvatop		= ppc64mmu_hpt_kvatop,
658	.walk_pages	= ppc64mmu_hpt_walk_pages,
659};
660struct ppc64_mmu_ops *ppc64_mmu_ops_hpt = &ops;
661