1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2014 Ian Lepore <ian@freebsd.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30#ifdef _KERNEL
31#include "opt_acpi.h"
32#include "opt_ddb.h"
33#endif
34
35/*
36 * Routines for describing and initializing anything related to physical memory.
37 */
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/bus.h>
42#include <sys/kernel.h>
43#include <sys/module.h>
44#include <sys/physmem.h>
45
46#ifdef _KERNEL
47#include <vm/vm.h>
48#include <vm/vm_param.h>
49#include <vm/vm_page.h>
50#include <vm/vm_phys.h>
51#include <vm/vm_dumpset.h>
52
53#include <machine/md_var.h>
54#include <machine/resource.h>
55#else
56#include <stdarg.h>
57#include <stdio.h>
58#include <string.h>
59#endif
60
61/*
62 * These structures are used internally to keep track of regions of physical
63 * ram, and regions within the physical ram that need to be excluded.  An
64 * exclusion region can be excluded from crash dumps, from the vm pool of pages
65 * that can be allocated, or both, depending on the exclusion flags associated
66 * with the region.
67 */
68#ifdef DEV_ACPI
69#define	MAX_HWCNT	32	/* ACPI needs more regions */
70#define	MAX_EXCNT	32
71#else
72#define	MAX_HWCNT	16
73#define	MAX_EXCNT	16
74#endif
75
76#if defined(__arm__)
77#define	MAX_PHYS_ADDR	0xFFFFFFFFull
78#elif defined(__aarch64__) || defined(__amd64__) || defined(__riscv)
79#define	MAX_PHYS_ADDR	0xFFFFFFFFFFFFFFFFull
80#endif
81
82struct region {
83	vm_paddr_t	addr;
84	vm_size_t	size;
85	uint32_t	flags;
86};
87
88static struct region hwregions[MAX_HWCNT];
89static struct region exregions[MAX_EXCNT];
90
91static size_t hwcnt;
92static size_t excnt;
93
94/*
95 * realmem is the total number of hardware pages, excluded or not.
96 * Maxmem is one greater than the last physical page number.
97 */
98long realmem;
99long Maxmem;
100
101#ifndef _KERNEL
102static void
103panic(const char *fmt, ...)
104{
105	va_list va;
106
107	va_start(va, fmt);
108	vfprintf(stderr, fmt, va);
109	fprintf(stderr, "\n");
110	va_end(va);
111	__builtin_trap();
112}
113#endif
114
115/*
116 * Print the contents of the physical and excluded region tables using the
117 * provided printf-like output function (which will be either printf or
118 * db_printf).
119 */
120static void
121physmem_dump_tables(int (*prfunc)(const char *, ...) __printflike(1, 2))
122{
123	size_t i;
124	int flags;
125	uintmax_t addr, size;
126	const unsigned int mbyte = 1024 * 1024;
127
128	prfunc("Physical memory chunk(s):\n");
129	for (i = 0; i < hwcnt; ++i) {
130		addr = hwregions[i].addr;
131		size = hwregions[i].size;
132		prfunc("  0x%08jx - 0x%08jx, %5ju MB (%7ju pages)\n", addr,
133		    addr + size - 1, size / mbyte, size / PAGE_SIZE);
134	}
135
136	prfunc("Excluded memory regions:\n");
137	for (i = 0; i < excnt; ++i) {
138		addr  = exregions[i].addr;
139		size  = exregions[i].size;
140		flags = exregions[i].flags;
141		prfunc("  0x%08jx - 0x%08jx, %5ju MB (%7ju pages) %s %s\n",
142		    addr, addr + size - 1, size / mbyte, size / PAGE_SIZE,
143		    (flags & EXFLAG_NOALLOC) ? "NoAlloc" : "",
144		    (flags & EXFLAG_NODUMP)  ? "NoDump" : "");
145	}
146
147#ifdef DEBUG
148	prfunc("Avail lists:\n");
149	for (i = 0; phys_avail[i] != 0; ++i) {
150		prfunc("  phys_avail[%zu] 0x%08jx\n", i,
151		    (uintmax_t)phys_avail[i]);
152	}
153	for (i = 0; dump_avail[i] != 0; ++i) {
154		prfunc("  dump_avail[%zu] 0x%08jx\n", i,
155		    (uintmax_t)dump_avail[i]);
156	}
157#endif
158}
159
160/*
161 * Print the contents of the static mapping table.  Used for bootverbose.
162 */
163void
164physmem_print_tables(void)
165{
166
167	physmem_dump_tables(printf);
168}
169
170/*
171 * Walk the list of hardware regions, processing it against the list of
172 * exclusions that contain the given exflags, and generating an "avail list".
173 *
174 * If maxphyssz is not zero it sets upper limit, in bytes, for the total
175 * "avail list" size. Walk stops once the limit is reached and the last region
176 * is cut short if necessary.
177 *
178 * Updates the value at *pavail with the sum of all pages in all hw regions.
179 *
180 * Returns the number of pages of non-excluded memory added to the avail list.
181 */
182static size_t
183regions_to_avail(vm_paddr_t *avail, uint32_t exflags, size_t maxavail,
184    uint64_t maxphyssz, long *pavail, long *prealmem)
185{
186	size_t acnt, exi, hwi;
187	uint64_t adj, end, start, xend, xstart;
188	long availmem, totalmem;
189	const struct region *exp, *hwp;
190	uint64_t availsz;
191
192	bzero(avail, maxavail * sizeof(vm_paddr_t));
193
194	totalmem = 0;
195	availmem = 0;
196	availsz = 0;
197	acnt = 0;
198	for (hwi = 0, hwp = hwregions; hwi < hwcnt; ++hwi, ++hwp) {
199		adj   = round_page(hwp->addr) - hwp->addr;
200		start = round_page(hwp->addr);
201		end   = trunc_page(hwp->size + adj) + start;
202		totalmem += atop((vm_offset_t)(end - start));
203		for (exi = 0, exp = exregions; exi < excnt; ++exi, ++exp) {
204			/*
205			 * If the excluded region does not match given flags,
206			 * continue checking with the next excluded region.
207			 */
208			if ((exp->flags & exflags) == 0)
209				continue;
210			xstart = exp->addr;
211			xend   = exp->size + xstart;
212			/*
213			 * If the excluded region ends before this hw region,
214			 * continue checking with the next excluded region.
215			 */
216			if (xend <= start)
217				continue;
218			/*
219			 * If the excluded region begins after this hw region
220			 * we're done because both lists are sorted.
221			 */
222			if (xstart >= end)
223				break;
224			/*
225			 * If the excluded region completely covers this hw
226			 * region, shrink this hw region to zero size.
227			 */
228			if ((start >= xstart) && (end <= xend)) {
229				start = xend;
230				end = xend;
231				break;
232			}
233			/*
234			 * If the excluded region falls wholly within this hw
235			 * region without abutting or overlapping the beginning
236			 * or end, create an available entry from the leading
237			 * fragment, then adjust the start of this hw region to
238			 * the end of the excluded region, and continue checking
239			 * the next excluded region because another exclusion
240			 * could affect the remainder of this hw region.
241			 */
242			if ((xstart > start) && (xend < end)) {
243
244				if ((maxphyssz != 0) &&
245				    (availsz + xstart - start > maxphyssz)) {
246					xstart = maxphyssz + start - availsz;
247				}
248				if (xstart <= start)
249					continue;
250				if (acnt > 0 &&
251				    avail[acnt - 1] == (vm_paddr_t)start) {
252					avail[acnt - 1] = (vm_paddr_t)xstart;
253				} else {
254					avail[acnt++] = (vm_paddr_t)start;
255					avail[acnt++] = (vm_paddr_t)xstart;
256				}
257				availsz += (xstart - start);
258				availmem += atop((vm_offset_t)(xstart - start));
259				start = xend;
260				continue;
261			}
262			/*
263			 * We know the excluded region overlaps either the start
264			 * or end of this hardware region (but not both), trim
265			 * the excluded portion off the appropriate end.
266			 */
267			if (xstart <= start)
268				start = xend;
269			else
270				end = xstart;
271		}
272		/*
273		 * If the trimming actions above left a non-zero size, create an
274		 * available entry for it.
275		 */
276		if (end > start) {
277			if ((maxphyssz != 0) &&
278			    (availsz + end - start > maxphyssz)) {
279				end = maxphyssz + start - availsz;
280			}
281			if (end <= start)
282				break;
283
284			if (acnt > 0 && avail[acnt - 1] == (vm_paddr_t)start) {
285				avail[acnt - 1] = (vm_paddr_t)end;
286			} else {
287				avail[acnt++] = (vm_paddr_t)start;
288				avail[acnt++] = (vm_paddr_t)end;
289			}
290			availsz += end - start;
291			availmem += atop((vm_offset_t)(end - start));
292		}
293		if (acnt >= maxavail)
294			panic("Not enough space in the dump/phys_avail arrays");
295	}
296
297	if (pavail != NULL)
298		*pavail = availmem;
299	if (prealmem != NULL)
300		*prealmem = totalmem;
301	return (acnt);
302}
303
304/*
305 * Check if the region at idx can be merged with the region above it.
306 */
307static size_t
308merge_upper_regions(struct region *regions, size_t rcnt, size_t idx)
309{
310	struct region *lower, *upper;
311	vm_paddr_t lend, uend;
312	size_t i, mergecnt, movecnt;
313
314	lower = &regions[idx];
315	lend = lower->addr + lower->size;
316
317	/*
318	 * Continue merging in upper entries as long as we have entries to
319	 * merge; the new block could have spanned more than one, although one
320	 * is likely the common case.
321	 */
322	for (i = idx + 1; i < rcnt; i++) {
323		upper = &regions[i];
324		if (lend < upper->addr || lower->flags != upper->flags)
325			break;
326
327		uend = upper->addr + upper->size;
328		if (uend > lend) {
329			lower->size += uend - lend;
330			lend = lower->addr + lower->size;
331		}
332
333		if (uend >= lend) {
334			/*
335			 * If we didn't move past the end of the upper region,
336			 * then we don't need to bother checking for another
337			 * merge because it would have been done already.  Just
338			 * increment i once more to maintain the invariant that
339			 * i is one past the last entry merged.
340			 */
341			i++;
342			break;
343		}
344	}
345
346	/*
347	 * We merged in the entries from [idx + 1, i); physically move the tail
348	 * end at [i, rcnt) if we need to.
349	 */
350	mergecnt = i - (idx + 1);
351	if (mergecnt > 0) {
352		movecnt = rcnt - i;
353		if (movecnt == 0) {
354			/* Merged all the way to the end, just decrease rcnt. */
355			rcnt = idx + 1;
356		} else {
357			memmove(&regions[idx + 1], &regions[idx + mergecnt + 1],
358			    movecnt * sizeof(*regions));
359			rcnt -= mergecnt;
360		}
361	}
362	return (rcnt);
363}
364
365/*
366 * Insertion-sort a new entry into a regions list; sorted by start address.
367 */
368static size_t
369insert_region(struct region *regions, size_t rcnt, vm_paddr_t addr,
370    vm_size_t size, uint32_t flags)
371{
372	size_t i;
373	vm_paddr_t nend, rend;
374	struct region *ep, *rp;
375
376	nend = addr + size;
377	ep = regions + rcnt;
378	for (i = 0, rp = regions; i < rcnt; ++i, ++rp) {
379		rend = rp->addr + rp->size;
380		if (flags == rp->flags) {
381			if (addr <= rp->addr && nend >= rp->addr) {
382				/*
383				 * New mapping overlaps at the beginning, shift
384				 * for any difference in the beginning then
385				 * shift if the new mapping extends past.
386				 */
387				rp->size += rp->addr - addr;
388				rp->addr = addr;
389				if (nend > rend) {
390					rp->size += nend - rend;
391					rcnt = merge_upper_regions(regions,
392					    rcnt, i);
393				}
394				return (rcnt);
395			} else if (addr <= rend && nend > rp->addr) {
396				/*
397				 * New mapping is either entirely contained
398				 * within or it's overlapping at the end.
399				 */
400				if (nend > rend) {
401					rp->size += nend - rend;
402					rcnt = merge_upper_regions(regions,
403					    rcnt, i);
404				}
405				return (rcnt);
406			}
407		} else if ((flags != 0) && (rp->flags != 0)) {
408			/*
409			 * If we're duplicating an entry that already exists
410			 * exactly, just upgrade its flags as needed.  We could
411			 * do more if we find that we have differently specified
412			 * flags clipping existing excluding regions, but that's
413			 * probably rare.
414			 */
415			if (addr == rp->addr && nend == rend) {
416				rp->flags |= flags;
417				return (rcnt);
418			}
419		}
420
421		if (addr < rp->addr) {
422			bcopy(rp, rp + 1, (ep - rp) * sizeof(*rp));
423			break;
424		}
425	}
426	rp->addr  = addr;
427	rp->size  = size;
428	rp->flags = flags;
429	rcnt++;
430
431	return (rcnt);
432}
433
434/*
435 * Add a hardware memory region.
436 */
437void
438physmem_hardware_region(uint64_t pa, uint64_t sz)
439{
440	/*
441	 * Filter out the page at PA 0x00000000.  The VM can't handle it, as
442	 * pmap_extract() == 0 means failure.
443	 */
444	if (pa == 0) {
445		if (sz <= PAGE_SIZE)
446			return;
447		pa  = PAGE_SIZE;
448		sz -= PAGE_SIZE;
449	} else if (pa > MAX_PHYS_ADDR) {
450		/* This range is past usable memory, ignore it */
451		return;
452	}
453
454	/*
455	 * Also filter out the page at the end of the physical address space --
456	 * if addr is non-zero and addr+size is zero we wrapped to the next byte
457	 * beyond what vm_paddr_t can express.  That leads to a NULL pointer
458	 * deref early in startup; work around it by leaving the last page out.
459	 *
460	 * XXX This just in:  subtract out a whole megabyte, not just 1 page.
461	 * Reducing the size by anything less than 1MB results in the NULL
462	 * pointer deref in _vm_map_lock_read().  Better to give up a megabyte
463	 * than leave some folks with an unusable system while we investigate.
464	 */
465	if ((pa + sz) > (MAX_PHYS_ADDR - 1024 * 1024)) {
466		sz = MAX_PHYS_ADDR - pa + 1;
467		if (sz <= 1024 * 1024)
468			return;
469		sz -= 1024 * 1024;
470	}
471
472	if (sz > 0 && hwcnt < nitems(hwregions))
473		hwcnt = insert_region(hwregions, hwcnt, pa, sz, 0);
474}
475
476/*
477 * Add an exclusion region.
478 */
479void
480physmem_exclude_region(vm_paddr_t pa, vm_size_t sz, uint32_t exflags)
481{
482	vm_offset_t adj;
483
484	/*
485	 * Truncate the starting address down to a page boundary, and round the
486	 * ending page up to a page boundary.
487	 */
488	adj = pa - trunc_page(pa);
489	pa  = trunc_page(pa);
490	sz  = round_page(sz + adj);
491
492	if (excnt >= nitems(exregions))
493		panic("failed to exclude region %#jx-%#jx", (uintmax_t)pa,
494		    (uintmax_t)(pa + sz));
495	excnt = insert_region(exregions, excnt, pa, sz, exflags);
496}
497
498size_t
499physmem_avail(vm_paddr_t *avail, size_t maxavail)
500{
501
502	return (regions_to_avail(avail, EXFLAG_NOALLOC, maxavail, 0, NULL, NULL));
503}
504
505bool
506physmem_excluded(vm_paddr_t pa, vm_size_t sz)
507{
508	const struct region *exp;
509	size_t exi;
510
511	for (exi = 0, exp = exregions; exi < excnt; ++exi, ++exp) {
512		if (pa < exp->addr || pa + sz > exp->addr + exp->size)
513			continue;
514		return (true);
515	}
516	return (false);
517}
518
519#ifdef _KERNEL
520/*
521 * Process all the regions added earlier into the global avail lists.
522 *
523 * Updates the kernel global 'physmem' with the number of physical pages
524 * available for use (all pages not in any exclusion region).
525 *
526 * Updates the kernel global 'Maxmem' with the page number one greater then the
527 * last page of physical memory in the system.
528 */
529void
530physmem_init_kernel_globals(void)
531{
532	size_t nextidx;
533	u_long hwphyssz;
534
535	hwphyssz = 0;
536	TUNABLE_ULONG_FETCH("hw.physmem", &hwphyssz);
537
538	regions_to_avail(dump_avail, EXFLAG_NODUMP, PHYS_AVAIL_ENTRIES,
539	    hwphyssz, NULL, NULL);
540	nextidx = regions_to_avail(phys_avail, EXFLAG_NOALLOC,
541	    PHYS_AVAIL_ENTRIES, hwphyssz, &physmem, &realmem);
542	if (nextidx == 0)
543		panic("No memory entries in phys_avail");
544	Maxmem = atop(phys_avail[nextidx - 1]);
545}
546
547#ifdef DDB
548#include <ddb/ddb.h>
549
550DB_SHOW_COMMAND_FLAGS(physmem, db_show_physmem, DB_CMD_MEMSAFE)
551{
552
553	physmem_dump_tables(db_printf);
554}
555
556#endif /* DDB */
557
558/*
559 * ram pseudo driver - this reserves I/O space resources corresponding to physical
560 * memory regions.
561 */
562
563static void
564ram_identify(driver_t *driver, device_t parent)
565{
566
567	if (resource_disabled("ram", 0))
568		return;
569	if (BUS_ADD_CHILD(parent, 0, "ram", 0) == NULL)
570		panic("ram_identify");
571}
572
573static int
574ram_probe(device_t dev)
575{
576
577	device_quiet(dev);
578	device_set_desc(dev, "System RAM");
579	return (BUS_PROBE_SPECIFIC);
580}
581
582static int
583ram_attach(device_t dev)
584{
585	vm_paddr_t avail_list[PHYS_AVAIL_COUNT];
586	rman_res_t start, end;
587	int rid, i;
588
589	rid = 0;
590
591	/* Get the avail list. */
592	regions_to_avail(avail_list, EXFLAG_NOALLOC | EXFLAG_NODUMP,
593	    PHYS_AVAIL_COUNT, 0, NULL, NULL);
594
595	/* Reserve all memory regions. */
596	for (i = 0; avail_list[i + 1] != 0; i += 2) {
597		start = avail_list[i];
598		end = avail_list[i + 1];
599
600		if (bootverbose)
601			device_printf(dev,
602			    "reserving memory region:   %jx-%jx\n",
603			    (uintmax_t)start, (uintmax_t)end);
604
605		if (bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, start, end,
606		    end - start, 0) == NULL)
607			panic("ram_attach: resource %d failed to attach", rid);
608		rid++;
609	}
610
611	return (0);
612}
613
614static device_method_t ram_methods[] = {
615	/* Device interface */
616	DEVMETHOD(device_identify,	ram_identify),
617	DEVMETHOD(device_probe,		ram_probe),
618	DEVMETHOD(device_attach,	ram_attach),
619
620	DEVMETHOD_END
621};
622
623DEFINE_CLASS_0(ram, ram_driver, ram_methods, /* no softc */ 1);
624DRIVER_MODULE(ram, nexus, ram_driver, 0, 0);
625#endif /* _KERNEL */
626