1206360Sjoel/*-
2211194Smdf * Copyright (c) 2005, Bosko Milekic <bmilekic@FreeBSD.org>.
3211194Smdf * Copyright (c) 2010 Isilon Systems, Inc. (http://www.isilon.com/)
4211194Smdf * All rights reserved.
5140587Sbmilekic *
6140587Sbmilekic * Redistribution and use in source and binary forms, with or without
7140587Sbmilekic * modification, are permitted provided that the following conditions
8140587Sbmilekic * are met:
9140587Sbmilekic * 1. Redistributions of source code must retain the above copyright
10140587Sbmilekic *    notice unmodified, this list of conditions, and the following
11140587Sbmilekic *    disclaimer.
12140587Sbmilekic * 2. Redistributions in binary form must reproduce the above copyright
13140587Sbmilekic *    notice, this list of conditions and the following disclaimer in the
14140587Sbmilekic *    documentation and/or other materials provided with the distribution.
15140587Sbmilekic *
16140587Sbmilekic * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17140587Sbmilekic * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18140587Sbmilekic * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19140587Sbmilekic * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20140587Sbmilekic * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21140587Sbmilekic * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22140587Sbmilekic * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23140587Sbmilekic * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24140587Sbmilekic * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25140587Sbmilekic * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26140587Sbmilekic */
27140587Sbmilekic
28140587Sbmilekic#include <sys/cdefs.h>
29140587Sbmilekic__FBSDID("$FreeBSD: stable/10/sys/vm/memguard.c 325037 2017-10-27 14:23:53Z markj $");
30140587Sbmilekic
31140587Sbmilekic/*
32140587Sbmilekic * MemGuard is a simple replacement allocator for debugging only
33140587Sbmilekic * which provides ElectricFence-style memory barrier protection on
34140587Sbmilekic * objects being allocated, and is used to detect tampering-after-free
35140587Sbmilekic * scenarios.
36140587Sbmilekic *
37140587Sbmilekic * See the memguard(9) man page for more information on using MemGuard.
38140587Sbmilekic */
39140587Sbmilekic
40211229Smdf#include "opt_vm.h"
41211229Smdf
42140587Sbmilekic#include <sys/param.h>
43140587Sbmilekic#include <sys/systm.h>
44140587Sbmilekic#include <sys/kernel.h>
45140587Sbmilekic#include <sys/types.h>
46140587Sbmilekic#include <sys/queue.h>
47140587Sbmilekic#include <sys/lock.h>
48140587Sbmilekic#include <sys/mutex.h>
49140587Sbmilekic#include <sys/malloc.h>
50153880Spjd#include <sys/sysctl.h>
51254025Sjeff#include <sys/vmem.h>
52140587Sbmilekic
53140587Sbmilekic#include <vm/vm.h>
54211194Smdf#include <vm/uma.h>
55141670Sbmilekic#include <vm/vm_param.h>
56140587Sbmilekic#include <vm/vm_page.h>
57140587Sbmilekic#include <vm/vm_map.h>
58211194Smdf#include <vm/vm_object.h>
59254307Sjeff#include <vm/vm_kern.h>
60140587Sbmilekic#include <vm/vm_extern.h>
61226313Sglebius#include <vm/uma_int.h>
62140587Sbmilekic#include <vm/memguard.h>
63140587Sbmilekic
64227309Sedstatic SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data");
65141670Sbmilekic/*
66153880Spjd * The vm_memguard_divisor variable controls how much of kmem_map should be
67153880Spjd * reserved for MemGuard.
68153880Spjd */
69211194Smdfstatic u_int vm_memguard_divisor;
70211194SmdfSYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN,
71211194Smdf    &vm_memguard_divisor,
72153880Spjd    0, "(kmem_size/memguard_divisor) == memguard submap size");
73153880Spjd
74153880Spjd/*
75153880Spjd * Short description (ks_shortdesc) of memory type to monitor.
76153880Spjd */
77153880Spjdstatic char vm_memguard_desc[128] = "";
78153880Spjdstatic struct malloc_type *vm_memguard_mtype = NULL;
79153880SpjdTUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc));
80153880Spjdstatic int
81153880Spjdmemguard_sysctl_desc(SYSCTL_HANDLER_ARGS)
82153880Spjd{
83211194Smdf	char desc[sizeof(vm_memguard_desc)];
84211194Smdf	int error;
85153880Spjd
86153880Spjd	strlcpy(desc, vm_memguard_desc, sizeof(desc));
87153880Spjd	error = sysctl_handle_string(oidp, desc, sizeof(desc), req);
88153880Spjd	if (error != 0 || req->newptr == NULL)
89153880Spjd		return (error);
90153880Spjd
91211194Smdf	mtx_lock(&malloc_mtx);
92280338Sngie	/* If mtp is NULL, it will be initialized in memguard_cmp() */
93211194Smdf	vm_memguard_mtype = malloc_desc2type(desc);
94211194Smdf	strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc));
95153880Spjd	mtx_unlock(&malloc_mtx);
96153880Spjd	return (error);
97153880Spjd}
98211194SmdfSYSCTL_PROC(_vm_memguard, OID_AUTO, desc,
99211194Smdf    CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
100153880Spjd    memguard_sysctl_desc, "A", "Short description of memory type to monitor");
101153880Spjd
102211194Smdfstatic vm_offset_t memguard_cursor;
103254025Sjeffstatic vm_offset_t memguard_base;
104211194Smdfstatic vm_size_t memguard_mapsize;
105211194Smdfstatic vm_size_t memguard_physlimit;
106211194Smdfstatic u_long memguard_wasted;
107211194Smdfstatic u_long memguard_wrap;
108211194Smdfstatic u_long memguard_succ;
109211194Smdfstatic u_long memguard_fail_kva;
110211194Smdfstatic u_long memguard_fail_pgs;
111140587Sbmilekic
112211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, cursor, CTLFLAG_RD,
113211194Smdf    &memguard_cursor, 0, "MemGuard cursor");
114211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD,
115254025Sjeff    &memguard_mapsize, 0, "MemGuard private arena size");
116211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD,
117211194Smdf    &memguard_physlimit, 0, "Limit on MemGuard memory consumption");
118211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, wasted, CTLFLAG_RD,
119211194Smdf    &memguard_wasted, 0, "Excess memory used through page promotion");
120211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, wrapcnt, CTLFLAG_RD,
121211194Smdf    &memguard_wrap, 0, "MemGuard cursor wrap count");
122211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, numalloc, CTLFLAG_RD,
123211194Smdf    &memguard_succ, 0, "Count of successful MemGuard allocations");
124211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_kva, CTLFLAG_RD,
125211194Smdf    &memguard_fail_kva, 0, "MemGuard failures due to lack of KVA");
126211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD,
127211194Smdf    &memguard_fail_pgs, 0, "MemGuard failures due to lack of pages");
128140587Sbmilekic
129226313Sglebius#define MG_GUARD_AROUND		0x001
130226313Sglebius#define MG_GUARD_ALLLARGE	0x002
131226313Sglebius#define MG_GUARD_NOFREE		0x004
132226313Sglebiusstatic int memguard_options = MG_GUARD_AROUND;
133211194SmdfTUNABLE_INT("vm.memguard.options", &memguard_options);
134211194SmdfSYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RW,
135211194Smdf    &memguard_options, 0,
136211194Smdf    "MemGuard options:\n"
137211194Smdf    "\t0x001 - add guard pages around each allocation\n"
138226313Sglebius    "\t0x002 - always use MemGuard for allocations over a page\n"
139226313Sglebius    "\t0x004 - guard uma(9) zones with UMA_ZONE_NOFREE flag");
140211194Smdf
141211194Smdfstatic u_int memguard_minsize;
142211194Smdfstatic u_long memguard_minsize_reject;
143211194SmdfSYSCTL_UINT(_vm_memguard, OID_AUTO, minsize, CTLFLAG_RW,
144211194Smdf    &memguard_minsize, 0, "Minimum size for page promotion");
145211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, minsize_reject, CTLFLAG_RD,
146211194Smdf    &memguard_minsize_reject, 0, "# times rejected for size");
147211194Smdf
148211194Smdfstatic u_int memguard_frequency;
149211194Smdfstatic u_long memguard_frequency_hits;
150211194SmdfTUNABLE_INT("vm.memguard.frequency", &memguard_frequency);
151211194SmdfSYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RW,
152211194Smdf    &memguard_frequency, 0, "Times in 100000 that MemGuard will randomly run");
153211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD,
154211194Smdf    &memguard_frequency_hits, 0, "# times MemGuard randomly chose");
155211194Smdf
156211194Smdf
157140587Sbmilekic/*
158211194Smdf * Return a fudged value to be used for vm_kmem_size for allocating
159211194Smdf * the kmem_map.  The memguard memory will be a submap.
160140587Sbmilekic */
161211194Smdfunsigned long
162238502Smdfmemguard_fudge(unsigned long km_size, const struct vm_map *parent_map)
163211194Smdf{
164238502Smdf	u_long mem_pgs, parent_size;
165140587Sbmilekic
166211194Smdf	vm_memguard_divisor = 10;
167211194Smdf	TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor);
168211194Smdf
169238502Smdf	parent_size = vm_map_max(parent_map) - vm_map_min(parent_map) +
170238502Smdf	    PAGE_SIZE;
171211194Smdf	/* Pick a conservative value if provided value sucks. */
172211194Smdf	if ((vm_memguard_divisor <= 0) ||
173238502Smdf	    ((parent_size / vm_memguard_divisor) == 0))
174211194Smdf		vm_memguard_divisor = 10;
175211194Smdf	/*
176211194Smdf	 * Limit consumption of physical pages to
177211194Smdf	 * 1/vm_memguard_divisor of system memory.  If the KVA is
178211194Smdf	 * smaller than this then the KVA limit comes into play first.
179211194Smdf	 * This prevents memguard's page promotions from completely
180211194Smdf	 * using up memory, since most malloc(9) calls are sub-page.
181211194Smdf	 */
182238502Smdf	mem_pgs = cnt.v_page_count;
183211194Smdf	memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE;
184211194Smdf	/*
185211194Smdf	 * We want as much KVA as we can take safely.  Use at most our
186238502Smdf	 * allotted fraction of the parent map's size.  Limit this to
187238502Smdf	 * twice the physical memory to avoid using too much memory as
188238502Smdf	 * pagetable pages (size must be multiple of PAGE_SIZE).
189211194Smdf	 */
190238502Smdf	memguard_mapsize = round_page(parent_size / vm_memguard_divisor);
191238502Smdf	if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs)
192211194Smdf		memguard_mapsize = mem_pgs * 2 * PAGE_SIZE;
193238502Smdf	if (km_size + memguard_mapsize > parent_size)
194238502Smdf		memguard_mapsize = 0;
195211194Smdf	return (km_size + memguard_mapsize);
196211194Smdf}
197211194Smdf
198140587Sbmilekic/*
199140587Sbmilekic * Initialize the MemGuard mock allocator.  All objects from MemGuard come
200140587Sbmilekic * out of a single VM map (contiguous chunk of address space).
201140587Sbmilekic */
202140587Sbmilekicvoid
203254025Sjeffmemguard_init(vmem_t *parent)
204140587Sbmilekic{
205254025Sjeff	vm_offset_t base;
206140587Sbmilekic
207254307Sjeff	vmem_alloc(parent, memguard_mapsize, M_BESTFIT | M_WAITOK, &base);
208254307Sjeff	vmem_init(memguard_arena, "memguard arena", base, memguard_mapsize,
209254025Sjeff	    PAGE_SIZE, 0, M_WAITOK);
210211194Smdf	memguard_cursor = base;
211254025Sjeff	memguard_base = base;
212140587Sbmilekic
213140587Sbmilekic	printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
214211194Smdf	printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base);
215211194Smdf	printf("\tMEMGUARD map size: %jd KBytes\n",
216211194Smdf	    (uintmax_t)memguard_mapsize >> 10);
217140587Sbmilekic}
218140587Sbmilekic
219140587Sbmilekic/*
220211194Smdf * Run things that can't be done as early as memguard_init().
221140587Sbmilekic */
222211194Smdfstatic void
223211194Smdfmemguard_sysinit(void)
224211194Smdf{
225211194Smdf	struct sysctl_oid_list *parent;
226211194Smdf
227211194Smdf	parent = SYSCTL_STATIC_CHILDREN(_vm_memguard);
228211194Smdf
229273736Shselasky	SYSCTL_ADD_UAUTO(NULL, parent, OID_AUTO, "mapstart", CTLFLAG_RD,
230254025Sjeff	    &memguard_base, "MemGuard KVA base");
231273736Shselasky	SYSCTL_ADD_UAUTO(NULL, parent, OID_AUTO, "maplimit", CTLFLAG_RD,
232254025Sjeff	    &memguard_mapsize, "MemGuard KVA size");
233254025Sjeff#if 0
234211194Smdf	SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapused", CTLFLAG_RD,
235211194Smdf	    &memguard_map->size, "MemGuard KVA used");
236254025Sjeff#endif
237211194Smdf}
238211194SmdfSYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL);
239211194Smdf
240211194Smdf/*
241211194Smdf * v2sizep() converts a virtual address of the first page allocated for
242211194Smdf * an item to a pointer to u_long recording the size of the original
243211194Smdf * allocation request.
244211194Smdf *
245211194Smdf * This routine is very similar to those defined by UMA in uma_int.h.
246211194Smdf * The difference is that this routine stores the originally allocated
247211194Smdf * size in one of the page's fields that is unused when the page is
248211194Smdf * wired rather than the object field, which is used.
249211194Smdf */
250211194Smdfstatic u_long *
251211194Smdfv2sizep(vm_offset_t va)
252211194Smdf{
253212063Smdf	vm_paddr_t pa;
254211194Smdf	struct vm_page *p;
255211194Smdf
256212063Smdf	pa = pmap_kextract(va);
257212063Smdf	if (pa == 0)
258212063Smdf		panic("MemGuard detected double-free of %p", (void *)va);
259212063Smdf	p = PHYS_TO_VM_PAGE(pa);
260211194Smdf	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
261211194Smdf	    ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
262254182Skib	return (&p->plinks.memguard.p);
263211194Smdf}
264211194Smdf
265254025Sjeffstatic u_long *
266254025Sjeffv2sizev(vm_offset_t va)
267254025Sjeff{
268254025Sjeff	vm_paddr_t pa;
269254025Sjeff	struct vm_page *p;
270254025Sjeff
271254025Sjeff	pa = pmap_kextract(va);
272254025Sjeff	if (pa == 0)
273254025Sjeff		panic("MemGuard detected double-free of %p", (void *)va);
274254025Sjeff	p = PHYS_TO_VM_PAGE(pa);
275254025Sjeff	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
276254025Sjeff	    ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
277254182Skib	return (&p->plinks.memguard.v);
278254025Sjeff}
279254025Sjeff
280211194Smdf/*
281211194Smdf * Allocate a single object of specified size with specified flags
282211194Smdf * (either M_WAITOK or M_NOWAIT).
283211194Smdf */
284140587Sbmilekicvoid *
285211194Smdfmemguard_alloc(unsigned long req_size, int flags)
286140587Sbmilekic{
287325037Smarkj	vm_offset_t addr, origaddr;
288211194Smdf	u_long size_p, size_v;
289211194Smdf	int do_guard, rv;
290140587Sbmilekic
291211194Smdf	size_p = round_page(req_size);
292211194Smdf	if (size_p == 0)
293211194Smdf		return (NULL);
294211194Smdf	/*
295211194Smdf	 * To ensure there are holes on both sides of the allocation,
296211194Smdf	 * request 2 extra pages of KVA.  We will only actually add a
297211194Smdf	 * vm_map_entry and get pages for the original request.  Save
298211194Smdf	 * the value of memguard_options so we have a consistent
299211194Smdf	 * value.
300211194Smdf	 */
301211194Smdf	size_v = size_p;
302226313Sglebius	do_guard = (memguard_options & MG_GUARD_AROUND) != 0;
303211194Smdf	if (do_guard)
304211194Smdf		size_v += 2 * PAGE_SIZE;
305140587Sbmilekic
306140587Sbmilekic	/*
307211194Smdf	 * When we pass our memory limit, reject sub-page allocations.
308211194Smdf	 * Page-size and larger allocations will use the same amount
309211194Smdf	 * of physical memory whether we allocate or hand off to
310211194Smdf	 * uma_large_alloc(), so keep those.
311140587Sbmilekic	 */
312254307Sjeff	if (vmem_size(memguard_arena, VMEM_ALLOC) >= memguard_physlimit &&
313211194Smdf	    req_size < PAGE_SIZE) {
314211194Smdf		addr = (vm_offset_t)NULL;
315211194Smdf		memguard_fail_pgs++;
316211194Smdf		goto out;
317211194Smdf	}
318211194Smdf	/*
319211194Smdf	 * Keep a moving cursor so we don't recycle KVA as long as
320211194Smdf	 * possible.  It's not perfect, since we don't know in what
321211194Smdf	 * order previous allocations will be free'd, but it's simple
322211194Smdf	 * and fast, and requires O(1) additional storage if guard
323211194Smdf	 * pages are not used.
324211194Smdf	 *
325211194Smdf	 * XXX This scheme will lead to greater fragmentation of the
326211194Smdf	 * map, unless vm_map_findspace() is tweaked.
327211194Smdf	 */
328211194Smdf	for (;;) {
329254307Sjeff		if (vmem_xalloc(memguard_arena, size_v, 0, 0, 0,
330254307Sjeff		    memguard_cursor, VMEM_ADDR_MAX,
331325037Smarkj		    M_BESTFIT | M_NOWAIT, &origaddr) == 0)
332211194Smdf			break;
333211194Smdf		/*
334211194Smdf		 * The map has no space.  This may be due to
335211194Smdf		 * fragmentation, or because the cursor is near the
336211194Smdf		 * end of the map.
337211194Smdf		 */
338254025Sjeff		if (memguard_cursor == memguard_base) {
339211194Smdf			memguard_fail_kva++;
340211194Smdf			addr = (vm_offset_t)NULL;
341211194Smdf			goto out;
342140587Sbmilekic		}
343211194Smdf		memguard_wrap++;
344254025Sjeff		memguard_cursor = memguard_base;
345141670Sbmilekic	}
346325037Smarkj	addr = origaddr;
347211194Smdf	if (do_guard)
348211194Smdf		addr += PAGE_SIZE;
349254025Sjeff	rv = kmem_back(kmem_object, addr, size_p, flags);
350211194Smdf	if (rv != KERN_SUCCESS) {
351325037Smarkj		vmem_xfree(memguard_arena, origaddr, size_v);
352211194Smdf		memguard_fail_pgs++;
353211194Smdf		addr = (vm_offset_t)NULL;
354211194Smdf		goto out;
355140587Sbmilekic	}
356254025Sjeff	memguard_cursor = addr + size_v;
357211194Smdf	*v2sizep(trunc_page(addr)) = req_size;
358254025Sjeff	*v2sizev(trunc_page(addr)) = size_v;
359211194Smdf	memguard_succ++;
360211194Smdf	if (req_size < PAGE_SIZE) {
361211194Smdf		memguard_wasted += (PAGE_SIZE - req_size);
362211194Smdf		if (do_guard) {
363211194Smdf			/*
364211194Smdf			 * Align the request to 16 bytes, and return
365211194Smdf			 * an address near the end of the page, to
366211194Smdf			 * better detect array overrun.
367211194Smdf			 */
368211194Smdf			req_size = roundup2(req_size, 16);
369211194Smdf			addr += (PAGE_SIZE - req_size);
370211194Smdf		}
371211194Smdf	}
372211194Smdfout:
373211194Smdf	return ((void *)addr);
374140587Sbmilekic}
375140587Sbmilekic
376211194Smdfint
377211194Smdfis_memguard_addr(void *addr)
378211194Smdf{
379211194Smdf	vm_offset_t a = (vm_offset_t)(uintptr_t)addr;
380211194Smdf
381254025Sjeff	return (a >= memguard_base && a < memguard_base + memguard_mapsize);
382211194Smdf}
383211194Smdf
384140587Sbmilekic/*
385140587Sbmilekic * Free specified single object.
386140587Sbmilekic */
387140587Sbmilekicvoid
388211194Smdfmemguard_free(void *ptr)
389140587Sbmilekic{
390211194Smdf	vm_offset_t addr;
391254025Sjeff	u_long req_size, size, sizev;
392211194Smdf	char *temp;
393211194Smdf	int i;
394140587Sbmilekic
395211194Smdf	addr = trunc_page((uintptr_t)ptr);
396211194Smdf	req_size = *v2sizep(addr);
397254025Sjeff	sizev = *v2sizev(addr);
398211194Smdf	size = round_page(req_size);
399141670Sbmilekic
400141670Sbmilekic	/*
401211194Smdf	 * Page should not be guarded right now, so force a write.
402211194Smdf	 * The purpose of this is to increase the likelihood of
403211194Smdf	 * catching a double-free, but not necessarily a
404211194Smdf	 * tamper-after-free (the second thread freeing might not
405211194Smdf	 * write before freeing, so this forces it to and,
406211194Smdf	 * subsequently, trigger a fault).
407141670Sbmilekic	 */
408211194Smdf	temp = ptr;
409211194Smdf	for (i = 0; i < size; i += PAGE_SIZE)
410211194Smdf		temp[i] = 'M';
411141670Sbmilekic
412211194Smdf	/*
413211194Smdf	 * This requires carnal knowledge of the implementation of
414211194Smdf	 * kmem_free(), but since we've already replaced kmem_malloc()
415211194Smdf	 * above, it's not really any worse.  We want to use the
416211194Smdf	 * vm_map lock to serialize updates to memguard_wasted, since
417211194Smdf	 * we had the lock at increment.
418211194Smdf	 */
419254025Sjeff	kmem_unback(kmem_object, addr, size);
420254025Sjeff	if (sizev > size)
421254025Sjeff		addr -= PAGE_SIZE;
422254307Sjeff	vmem_xfree(memguard_arena, addr, sizev);
423211194Smdf	if (req_size < PAGE_SIZE)
424211194Smdf		memguard_wasted -= (PAGE_SIZE - req_size);
425140587Sbmilekic}
426140587Sbmilekic
427212058Smdf/*
428212058Smdf * Re-allocate an allocation that was originally guarded.
429212058Smdf */
430212058Smdfvoid *
431212058Smdfmemguard_realloc(void *addr, unsigned long size, struct malloc_type *mtp,
432212058Smdf    int flags)
433212058Smdf{
434212058Smdf	void *newaddr;
435212058Smdf	u_long old_size;
436212058Smdf
437212058Smdf	/*
438212058Smdf	 * Allocate the new block.  Force the allocation to be guarded
439212058Smdf	 * as the original may have been guarded through random
440212058Smdf	 * chance, and that should be preserved.
441212058Smdf	 */
442212058Smdf	if ((newaddr = memguard_alloc(size, flags)) == NULL)
443212058Smdf		return (NULL);
444212058Smdf
445212058Smdf	/* Copy over original contents. */
446212058Smdf	old_size = *v2sizep(trunc_page((uintptr_t)addr));
447212058Smdf	bcopy(addr, newaddr, min(size, old_size));
448212058Smdf	memguard_free(addr);
449212058Smdf	return (newaddr);
450212058Smdf}
451212058Smdf
452226313Sglebiusstatic int
453226313Sglebiusmemguard_cmp(unsigned long size)
454153880Spjd{
455153880Spjd
456211194Smdf	if (size < memguard_minsize) {
457211194Smdf		memguard_minsize_reject++;
458211194Smdf		return (0);
459211194Smdf	}
460226313Sglebius	if ((memguard_options & MG_GUARD_ALLLARGE) != 0 && size >= PAGE_SIZE)
461211194Smdf		return (1);
462211194Smdf	if (memguard_frequency > 0 &&
463211194Smdf	    (random() % 100000) < memguard_frequency) {
464211194Smdf		memguard_frequency_hits++;
465211194Smdf		return (1);
466211194Smdf	}
467226313Sglebius
468226313Sglebius	return (0);
469226313Sglebius}
470226313Sglebius
471226313Sglebiusint
472226313Sglebiusmemguard_cmp_mtp(struct malloc_type *mtp, unsigned long size)
473226313Sglebius{
474226313Sglebius
475226313Sglebius	if (memguard_cmp(size))
476226313Sglebius		return(1);
477226313Sglebius
478153880Spjd#if 1
479153880Spjd	/*
480153880Spjd	 * The safest way of comparsion is to always compare short description
481153880Spjd	 * string of memory type, but it is also the slowest way.
482153880Spjd	 */
483153880Spjd	return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0);
484153880Spjd#else
485153880Spjd	/*
486153880Spjd	 * If we compare pointers, there are two possible problems:
487153880Spjd	 * 1. Memory type was unloaded and new memory type was allocated at the
488153880Spjd	 *    same address.
489153880Spjd	 * 2. Memory type was unloaded and loaded again, but allocated at a
490153880Spjd	 *    different address.
491153880Spjd	 */
492153880Spjd	if (vm_memguard_mtype != NULL)
493153880Spjd		return (mtp == vm_memguard_mtype);
494153880Spjd	if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) {
495153880Spjd		vm_memguard_mtype = mtp;
496153880Spjd		return (1);
497153880Spjd	}
498153880Spjd	return (0);
499153880Spjd#endif
500153880Spjd}
501226313Sglebius
502226313Sglebiusint
503226313Sglebiusmemguard_cmp_zone(uma_zone_t zone)
504226313Sglebius{
505226313Sglebius
506280338Sngie	if ((memguard_options & MG_GUARD_NOFREE) == 0 &&
507226313Sglebius	    zone->uz_flags & UMA_ZONE_NOFREE)
508226313Sglebius		return (0);
509226313Sglebius
510226313Sglebius	if (memguard_cmp(zone->uz_size))
511226313Sglebius		return (1);
512226313Sglebius
513226313Sglebius	/*
514226313Sglebius	 * The safest way of comparsion is to always compare zone name,
515226313Sglebius	 * but it is also the slowest way.
516226313Sglebius	 */
517226313Sglebius	return (strcmp(zone->uz_name, vm_memguard_desc) == 0);
518226313Sglebius}
519