memstat_malloc.c revision 148357
1/*-
2 * Copyright (c) 2005 Robert N. M. Watson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/lib/libmemstat/memstat_malloc.c 148357 2005-07-24 01:28:54Z rwatson $
27 */
28
29#include <sys/param.h>
30#include <sys/malloc.h>
31#include <sys/sysctl.h>
32
33#include <err.h>
34#include <errno.h>
35#include <stdio.h>
36#include <stdlib.h>
37#include <string.h>
38
39#include "memstat.h"
40#include "memstat_internal.h"
41
42/*
43 * Extract malloc(9) statistics from the running kernel, and store all memory
44 * type information in the passed list.  For each type, check the list for an
45 * existing entry with the right name/allocator -- if present, update that
46 * entry.  Otherwise, add a new entry.  On error, the entire list will be
47 * cleared, as entries will be in an inconsistent state.
48 *
49 * To reduce the level of work for a list that starts empty, we keep around a
50 * hint as to whether it was empty when we began, so we can avoid searching
51 * the list for entries to update.  Updates are O(n^2) due to searching for
52 * each entry before adding it.
53 */
54int
55memstat_sysctl_malloc(struct memory_type_list *list, int flags)
56{
57	struct malloc_type_stream_header *mtshp;
58	struct malloc_type_header *mthp;
59	struct malloc_type_stats *mtsp;
60	struct memory_type *mtp;
61	int count, hint_dontsearch, i, j, maxcpus;
62	char *buffer, *p;
63	size_t size;
64
65	hint_dontsearch = LIST_EMPTY(&list->mtl_list);
66
67	/*
68	 * Query the number of CPUs, number of malloc types so that we can
69	 * guess an initial buffer size.  We loop until we succeed or really
70	 * fail.  Note that the value of maxcpus we query using sysctl is not
71	 * the version we use when processing the real data -- that is read
72	 * from the header.
73	 */
74retry:
75	size = sizeof(maxcpus);
76	if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) {
77		if (errno == EACCES || errno == EPERM)
78			list->mtl_error = MEMSTAT_ERROR_PERMISSION;
79		else
80			list->mtl_error = MEMSTAT_ERROR_DATAERROR;
81		return (-1);
82	}
83	if (size != sizeof(maxcpus)) {
84		list->mtl_error = MEMSTAT_ERROR_DATAERROR;
85		return (-1);
86	}
87
88	if (maxcpus > MEMSTAT_MAXCPU) {
89		list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
90		return (-1);
91	}
92
93	size = sizeof(count);
94	if (sysctlbyname("kern.malloc_count", &count, &size, NULL, 0) < 0) {
95		if (errno == EACCES || errno == EPERM)
96			list->mtl_error = MEMSTAT_ERROR_PERMISSION;
97		else
98			list->mtl_error = MEMSTAT_ERROR_VERSION;
99		return (-1);
100	}
101	if (size != sizeof(count)) {
102		list->mtl_error = MEMSTAT_ERROR_DATAERROR;
103		return (-1);
104	}
105
106	size = sizeof(*mthp) + count * (sizeof(*mthp) + sizeof(*mtsp) *
107	    maxcpus);
108
109	buffer = malloc(size);
110	if (buffer == NULL) {
111		list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
112		return (-1);
113	}
114
115	if (sysctlbyname("kern.malloc_stats", buffer, &size, NULL, 0) < 0) {
116		/*
117		 * XXXRW: ENOMEM is an ambiguous return, we should bound the
118		 * number of loops, perhaps.
119		 */
120		if (errno == ENOMEM) {
121			free(buffer);
122			goto retry;
123		}
124		if (errno == EACCES || errno == EPERM)
125			list->mtl_error = MEMSTAT_ERROR_PERMISSION;
126		else
127			list->mtl_error = MEMSTAT_ERROR_VERSION;
128		free(buffer);
129		return (-1);
130	}
131
132	if (size == 0) {
133		free(buffer);
134		return (0);
135	}
136
137	if (size < sizeof(*mtshp)) {
138		list->mtl_error = MEMSTAT_ERROR_VERSION;
139		free(buffer);
140		return (-1);
141	}
142	p = buffer;
143	mtshp = (struct malloc_type_stream_header *)p;
144	p += sizeof(*mtshp);
145
146	if (mtshp->mtsh_version != MALLOC_TYPE_STREAM_VERSION) {
147		list->mtl_error = MEMSTAT_ERROR_VERSION;
148		free(buffer);
149		return (-1);
150	}
151
152	if (mtshp->mtsh_maxcpus > MEMSTAT_MAXCPU) {
153		list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
154		free(buffer);
155		return (-1);
156	}
157
158	/*
159	 * For the remainder of this function, we are quite trusting about
160	 * the layout of structures and sizes, since we've determined we have
161	 * a matching version and acceptable CPU count.
162	 */
163	maxcpus = mtshp->mtsh_maxcpus;
164	count = mtshp->mtsh_count;
165	for (i = 0; i < count; i++) {
166		mthp = (struct malloc_type_header *)p;
167		p += sizeof(*mthp);
168
169		if (hint_dontsearch == 0) {
170			mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC,
171			    mthp->mth_name);
172		} else
173			mtp = NULL;
174		if (mtp == NULL)
175			mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC,
176			    mthp->mth_name);
177		if (mtp == NULL) {
178			memstat_mtl_free(list);
179			free(buffer);
180			list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
181			return (-1);
182		}
183
184		/*
185		 * Reset the statistics on a current node.
186		 */
187		_memstat_mt_reset_stats(mtp);
188
189		for (j = 0; j < maxcpus; j++) {
190			mtsp = (struct malloc_type_stats *)p;
191			p += sizeof(*mtsp);
192
193			/*
194			 * Sumarize raw statistics across CPUs into coalesced
195			 * statistics.
196			 */
197			mtp->mt_memalloced += mtsp->mts_memalloced;
198			mtp->mt_memfreed += mtsp->mts_memfreed;
199			mtp->mt_numallocs += mtsp->mts_numallocs;
200			mtp->mt_numfrees += mtsp->mts_numfrees;
201			mtp->mt_sizemask |= mtsp->mts_size;
202
203			/*
204			 * Copies of per-CPU statistics.
205			 */
206			mtp->mt_percpu_alloc[j].mtp_memalloced =
207			    mtsp->mts_memalloced;
208			mtp->mt_percpu_alloc[j].mtp_memfreed =
209			    mtsp->mts_memfreed;
210			mtp->mt_percpu_alloc[j].mtp_numallocs =
211			    mtsp->mts_numallocs;
212			mtp->mt_percpu_alloc[j].mtp_numfrees =
213			    mtsp->mts_numfrees;
214			mtp->mt_percpu_alloc[j].mtp_sizemask =
215			    mtsp->mts_size;
216		}
217
218		/*
219		 * Derived cross-CPU statistics.
220		 */
221		mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
222		mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
223	}
224
225	free(buffer);
226
227	return (0);
228}
229