139665Smsmith/*
239665Smsmith * This module derived from code donated to the FreeBSD Project by
339665Smsmith * Matthew Dillon <dillon@backplane.com>
439665Smsmith *
539665Smsmith * Copyright (c) 1998 The FreeBSD Project
639665Smsmith * All rights reserved.
739665Smsmith *
839665Smsmith * Redistribution and use in source and binary forms, with or without
939665Smsmith * modification, are permitted provided that the following conditions
1039665Smsmith * are met:
1139665Smsmith * 1. Redistributions of source code must retain the above copyright
1239665Smsmith *    notice, this list of conditions and the following disclaimer.
1339665Smsmith * 2. Redistributions in binary form must reproduce the above copyright
1439665Smsmith *    notice, this list of conditions and the following disclaimer in the
1539665Smsmith *    documentation and/or other materials provided with the distribution.
1639665Smsmith *
1739665Smsmith * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1839665Smsmith * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1939665Smsmith * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2039665Smsmith * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2139665Smsmith * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2239665Smsmith * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2339665Smsmith * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2439665Smsmith * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2539665Smsmith * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2639665Smsmith * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2739665Smsmith * SUCH DAMAGE.
2839665Smsmith */
2939665Smsmith
3084221Sdillon#include <sys/cdefs.h>
3184221Sdillon__FBSDID("$FreeBSD$");
3284221Sdillon
3339665Smsmith/*
3439665Smsmith * LIB/MEMORY/ZALLOC.C	- self contained low-overhead memory pool/allocation
3539665Smsmith *			  subsystem
3639665Smsmith *
3739665Smsmith *	This subsystem implements memory pools and memory allocation
3839665Smsmith *	routines.
3939665Smsmith *
4039665Smsmith *	Pools are managed via a linked list of 'free' areas.  Allocating
4139665Smsmith *	memory creates holes in the freelist, freeing memory fills them.
4239665Smsmith *	Since the freelist consists only of free memory areas, it is possible
4339665Smsmith *	to allocate the entire pool without incuring any structural overhead.
4439665Smsmith *
4539665Smsmith *	The system works best when allocating similarly-sized chunks of
4639665Smsmith *	memory.  Care must be taken to avoid fragmentation when
4739665Smsmith *	allocating/deallocating dissimilar chunks.
4839665Smsmith *
4939665Smsmith *	When a memory pool is first allocated, the entire pool is marked as
5039665Smsmith *	allocated.  This is done mainly because we do not want to modify any
5139665Smsmith *	portion of a pool's data area until we are given permission.  The
5239665Smsmith *	caller must explicitly deallocate portions of the pool to make them
5339665Smsmith *	available.
5439665Smsmith *
5539665Smsmith *	z[n]xalloc() works like z[n]alloc() but the allocation is made from
5639665Smsmith *	within the specified address range.  If the segment could not be
5739665Smsmith *	allocated, NULL is returned.  WARNING!  The address range will be
5839665Smsmith *	aligned to an 8 or 16 byte boundry depending on the cpu so if you
5939665Smsmith *	give an unaligned address range, unexpected results may occur.
6039665Smsmith *
6139665Smsmith *	If a standard allocation fails, the reclaim function will be called
6239665Smsmith *	to recover some space.  This usually causes other portions of the
6339665Smsmith *	same pool to be released.  Memory allocations at this low level
6439665Smsmith *	should not block but you can do that too in your reclaim function
6539665Smsmith *	if you want.  Reclaim does not function when z[n]xalloc() is used,
6639665Smsmith *	only for z[n]alloc().
6739665Smsmith *
6839665Smsmith *	Allocation and frees of 0 bytes are valid operations.
6939665Smsmith */
7039665Smsmith
7139665Smsmith#include "zalloc_defs.h"
7239665Smsmith
7339665Smsmith/*
74269101Sian * Objects in the pool must be aligned to at least the size of struct MemNode.
75269101Sian * They must also be aligned to MALLOCALIGN, which should normally be larger
76269101Sian * than the struct, so assert that to be so at compile time.
77269101Sian */
78269101Siantypedef char assert_align[(sizeof(struct MemNode) <= MALLOCALIGN) ? 1 : -1];
79269101Sian
80269101Sian#define	MEMNODE_SIZE_MASK	MALLOCALIGN_MASK
81269101Sian
82269101Sian/*
8339665Smsmith * znalloc() -	allocate memory (without zeroing) from pool.  Call reclaim
8439665Smsmith *		and retry if appropriate, return NULL if unable to allocate
8539665Smsmith *		memory.
8639665Smsmith */
8739665Smsmith
8839665Smsmithvoid *
89223905Savatarznalloc(MemPool *mp, uintptr_t bytes)
9039665Smsmith{
9139665Smsmith    /*
9239665Smsmith     * align according to pool object size (can be 0).  This is
9339665Smsmith     * inclusive of the MEMNODE_SIZE_MASK minimum alignment.
9439665Smsmith     *
9539665Smsmith     */
9639665Smsmith    bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK;
9739665Smsmith
9839665Smsmith    if (bytes == 0)
9939665Smsmith	return((void *)-1);
10039665Smsmith
10139665Smsmith    /*
10239863Smsmith     * locate freelist entry big enough to hold the object.  If all objects
10339863Smsmith     * are the same size, this is a constant-time function.
10439665Smsmith     */
10539665Smsmith
10639665Smsmith    if (bytes <= mp->mp_Size - mp->mp_Used) {
10739665Smsmith	MemNode **pmn;
10839665Smsmith	MemNode *mn;
10939665Smsmith
11039863Smsmith	for (pmn = &mp->mp_First; (mn=*pmn) != NULL; pmn = &mn->mr_Next) {
11139863Smsmith	    if (bytes > mn->mr_Bytes)
11239863Smsmith		continue;
11339665Smsmith
11439665Smsmith	    /*
11539863Smsmith	     *  Cut a chunk of memory out of the beginning of this
11639863Smsmith	     *  block and fixup the link appropriately.
11739665Smsmith	     */
11839665Smsmith
11939665Smsmith	    {
12039665Smsmith		char *ptr = (char *)mn;
12139863Smsmith
12239665Smsmith		if (mn->mr_Bytes == bytes) {
12339665Smsmith		    *pmn = mn->mr_Next;
12439665Smsmith		} else {
12539665Smsmith		    mn = (MemNode *)((char *)mn + bytes);
12639665Smsmith		    mn->mr_Next  = ((MemNode *)ptr)->mr_Next;
12739665Smsmith		    mn->mr_Bytes = ((MemNode *)ptr)->mr_Bytes - bytes;
12839665Smsmith		    *pmn = mn;
12939665Smsmith		}
13039665Smsmith		mp->mp_Used += bytes;
13139665Smsmith		return(ptr);
13239665Smsmith	    }
13339665Smsmith	}
13439665Smsmith    }
13539863Smsmith
13639863Smsmith    /*
13739863Smsmith     * Memory pool is full, return NULL.
13839863Smsmith     */
13939863Smsmith
14039665Smsmith    return(NULL);
14139665Smsmith}
14239665Smsmith
14339665Smsmith/*
14439665Smsmith * zfree() - free previously allocated memory
14539665Smsmith */
14639665Smsmith
14739665Smsmithvoid
148223905Savatarzfree(MemPool *mp, void *ptr, uintptr_t bytes)
14939665Smsmith{
15039665Smsmith    /*
15139665Smsmith     * align according to pool object size (can be 0).  This is
15239665Smsmith     * inclusive of the MEMNODE_SIZE_MASK minimum alignment.
15339665Smsmith     */
15439665Smsmith    bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK;
15539665Smsmith
15639665Smsmith    if (bytes == 0)
15739665Smsmith	return;
15839665Smsmith
15939665Smsmith    /*
16039665Smsmith     * panic if illegal pointer
16139665Smsmith     */
16239665Smsmith
16339665Smsmith    if ((char *)ptr < (char *)mp->mp_Base ||
16439665Smsmith	(char *)ptr + bytes > (char *)mp->mp_End ||
165223905Savatar	((uintptr_t)ptr & MEMNODE_SIZE_MASK) != 0)
166223854Savatar	panic("zfree(%p,%ju): wild pointer", ptr, (uintmax_t)bytes);
16739665Smsmith
16839665Smsmith    /*
16939665Smsmith     * free the segment
17039665Smsmith     */
17139665Smsmith
17239665Smsmith    {
17339665Smsmith	MemNode **pmn;
17439665Smsmith	MemNode *mn;
17539665Smsmith
17639665Smsmith	mp->mp_Used -= bytes;
17739665Smsmith
17839665Smsmith	for (pmn = &mp->mp_First; (mn = *pmn) != NULL; pmn = &mn->mr_Next) {
17939665Smsmith	    /*
18039665Smsmith	     * If area between last node and current node
18139665Smsmith	     *  - check range
18239665Smsmith	     *  - check merge with next area
18339665Smsmith	     *  - check merge with previous area
18439665Smsmith	     */
18539665Smsmith	    if ((char *)ptr <= (char *)mn) {
18639665Smsmith		/*
18739665Smsmith		 * range check
18839665Smsmith		 */
189223854Savatar		if ((char *)ptr + bytes > (char *)mn) {
190223854Savatar		    panic("zfree(%p,%ju): corrupt memlist1", ptr,
191223854Savatar			(uintmax_t)bytes);
192223854Savatar		}
19339665Smsmith
19439665Smsmith		/*
19539665Smsmith		 * merge against next area or create independant area
19639665Smsmith		 */
19739665Smsmith
19839665Smsmith		if ((char *)ptr + bytes == (char *)mn) {
19939665Smsmith		    ((MemNode *)ptr)->mr_Next = mn->mr_Next;
20039665Smsmith		    ((MemNode *)ptr)->mr_Bytes= bytes + mn->mr_Bytes;
20139665Smsmith		} else {
20239665Smsmith		    ((MemNode *)ptr)->mr_Next = mn;
20339665Smsmith		    ((MemNode *)ptr)->mr_Bytes= bytes;
20439665Smsmith		}
20539665Smsmith		*pmn = mn = (MemNode *)ptr;
20639665Smsmith
20739665Smsmith		/*
20839665Smsmith		 * merge against previous area (if there is a previous
20939665Smsmith		 * area).
21039665Smsmith		 */
21139665Smsmith
21239665Smsmith		if (pmn != &mp->mp_First) {
21339665Smsmith		    if ((char*)pmn + ((MemNode*)pmn)->mr_Bytes == (char*)ptr) {
21439665Smsmith			((MemNode *)pmn)->mr_Next = mn->mr_Next;
21539665Smsmith			((MemNode *)pmn)->mr_Bytes += mn->mr_Bytes;
21639665Smsmith			mn = (MemNode *)pmn;
21739665Smsmith		    }
21839665Smsmith		}
21939665Smsmith		return;
22039665Smsmith		/* NOT REACHED */
22139665Smsmith	    }
222223854Savatar	    if ((char *)ptr < (char *)mn + mn->mr_Bytes) {
223223854Savatar		panic("zfree(%p,%ju): corrupt memlist2", ptr,
224223854Savatar		    (uintmax_t)bytes);
225223854Savatar	    }
22639665Smsmith	}
22739665Smsmith	/*
22839665Smsmith	 * We are beyond the last MemNode, append new MemNode.  Merge against
22939665Smsmith	 * previous area if possible.
23039665Smsmith	 */
23139665Smsmith	if (pmn == &mp->mp_First ||
23239665Smsmith	    (char *)pmn + ((MemNode *)pmn)->mr_Bytes != (char *)ptr
23339665Smsmith	) {
23439665Smsmith	    ((MemNode *)ptr)->mr_Next = NULL;
23539665Smsmith	    ((MemNode *)ptr)->mr_Bytes = bytes;
23639665Smsmith	    *pmn = (MemNode *)ptr;
23739665Smsmith	    mn = (MemNode *)ptr;
23839665Smsmith	} else {
23939665Smsmith	    ((MemNode *)pmn)->mr_Bytes += bytes;
24039665Smsmith	    mn = (MemNode *)pmn;
24139665Smsmith	}
24239665Smsmith    }
24339665Smsmith}
24439665Smsmith
24539665Smsmith/*
24639665Smsmith * zextendPool() - extend memory pool to cover additional space.
24739665Smsmith *
24839665Smsmith *		   Note: the added memory starts out as allocated, you
24939665Smsmith *		   must free it to make it available to the memory subsystem.
25039665Smsmith *
25139665Smsmith *		   Note: mp_Size may not reflect (mp_End - mp_Base) range
25239665Smsmith *		   due to other parts of the system doing their own sbrk()
25339665Smsmith *		   calls.
25439665Smsmith */
25539665Smsmith
25639665Smsmithvoid
257223905SavatarzextendPool(MemPool *mp, void *base, uintptr_t bytes)
25839665Smsmith{
25939665Smsmith    if (mp->mp_Size == 0) {
26039665Smsmith	mp->mp_Base = base;
26139665Smsmith	mp->mp_Used = bytes;
26239666Sdillon	mp->mp_End = (char *)base + bytes;
263108108Sdillon	mp->mp_Size = bytes;
26439665Smsmith    } else {
26539665Smsmith	void *pend = (char *)mp->mp_Base + mp->mp_Size;
26639665Smsmith
26739665Smsmith	if (base < mp->mp_Base) {
268108108Sdillon	    mp->mp_Size += (char *)mp->mp_Base - (char *)base;
26939665Smsmith	    mp->mp_Used += (char *)mp->mp_Base - (char *)base;
27039665Smsmith	    mp->mp_Base = base;
27139665Smsmith	}
27239665Smsmith	base = (char *)base + bytes;
27339665Smsmith	if (base > pend) {
274108108Sdillon	    mp->mp_Size += (char *)base - (char *)pend;
27539665Smsmith	    mp->mp_Used += (char *)base - (char *)pend;
27639666Sdillon	    mp->mp_End = (char *)base;
27739665Smsmith	}
27839665Smsmith    }
27939665Smsmith}
28039665Smsmith
28139665Smsmith#ifdef ZALLOCDEBUG
28239665Smsmith
28339665Smsmithvoid
28439665Smsmithzallocstats(MemPool *mp)
28539665Smsmith{
28639665Smsmith    int abytes = 0;
28739665Smsmith    int hbytes = 0;
28839665Smsmith    int fcount = 0;
28939665Smsmith    MemNode *mn;
29039665Smsmith
29139863Smsmith    printf("%d bytes reserved", (int) mp->mp_Size);
29239665Smsmith
29339665Smsmith    mn = mp->mp_First;
29439665Smsmith
29539665Smsmith    if ((void *)mn != (void *)mp->mp_Base) {
29639665Smsmith	abytes += (char *)mn - (char *)mp->mp_Base;
29739665Smsmith    }
29839665Smsmith
29939665Smsmith    while (mn) {
30039665Smsmith	if ((char *)mn + mn->mr_Bytes != mp->mp_End) {
30139665Smsmith	    hbytes += mn->mr_Bytes;
30239665Smsmith	    ++fcount;
30339665Smsmith	}
30439665Smsmith	if (mn->mr_Next)
30539665Smsmith	    abytes += (char *)mn->mr_Next - ((char *)mn + mn->mr_Bytes);
30639665Smsmith	mn = mn->mr_Next;
30739665Smsmith    }
30839665Smsmith    printf(" %d bytes allocated\n%d fragments (%d bytes fragmented)\n",
30939665Smsmith	abytes,
31039665Smsmith	fcount,
31139665Smsmith	hbytes
31239665Smsmith    );
31339665Smsmith}
31439665Smsmith
31539665Smsmith#endif
31639665Smsmith
317