1/*-
2 * Copyright (c) 2005 Robert N. M. Watson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#ifndef _MEMSTAT_INTERNAL_H_
30#define	_MEMSTAT_INTERNAL_H_
31
32/*
33 * memstat maintains its own internal notion of statistics on each memory
34 * type, common across UMA and kernel malloc.  Some fields are straight from
35 * the allocator statistics, others are derived when extracted from the
36 * kernel.  A struct memory_type will describe each type supported by an
37 * allocator.  memory_type structures can be chained into lists.
38 */
39struct memory_type {
40	/*
41	 * Static properties of type.
42	 */
43	int	 mt_allocator;		/* malloc(9), uma(9), etc. */
44	char	 mt_name[MEMTYPE_MAXNAME];	/* name of memory type. */
45
46	/*
47	 * (Relatively) static zone settings, that don't uniquely identify
48	 * the zone, but also don't change much.
49	 */
50	uint64_t	 mt_countlimit;	/* 0, or maximum allocations. */
51	uint64_t	 mt_byteslimit;	/* 0, or maximum bytes. */
52	uint64_t	 mt_sizemask;	/* malloc: allocated size bitmask. */
53	uint64_t	 mt_size;	/* uma: size of objects. */
54
55	/*
56	 * Zone or type information that includes all caches and any central
57	 * zone state.  Depending on the allocator, this may be synthesized
58	 * from several sources, or directly measured.
59	 */
60	uint64_t	 mt_memalloced;	/* Bytes allocated over life time. */
61	uint64_t	 mt_memfreed;	/* Bytes freed over life time. */
62	uint64_t	 mt_numallocs;	/* Allocations over life time. */
63	uint64_t	 mt_numfrees;	/* Frees over life time. */
64	uint64_t	 mt_bytes;	/* Bytes currently allocated. */
65	uint64_t	 mt_count;	/* Number of current allocations. */
66	uint64_t	 mt_free;	/* Number of cached free items. */
67	uint64_t	 mt_failures;	/* Number of allocation failures. */
68	uint64_t	 mt_sleeps;	/* Number of allocation sleeps. */
69
70	/*
71	 * Caller-owned memory.
72	 */
73	void		*mt_caller_pointer[MEMSTAT_MAXCALLER];	/* Pointers. */
74	uint64_t	 mt_caller_uint64[MEMSTAT_MAXCALLER];	/* Integers. */
75
76	/*
77	 * For allocators making use of per-CPU caches, we also provide raw
78	 * statistics from the central allocator and each per-CPU cache,
79	 * which (combined) sometimes make up the above general statistics.
80	 *
81	 * First, central zone/type state, all numbers excluding any items
82	 * cached in per-CPU caches.
83	 *
84	 * XXXRW: Might be desirable to separately expose allocation stats
85	 * from zone, which should (combined with per-cpu) add up to the
86	 * global stats above.
87	 */
88	uint64_t	 mt_zonefree;	/* Free items in zone. */
89	uint64_t	 mt_kegfree;	/* Free items in keg. */
90
91	/*
92	 * Per-CPU measurements fall into two categories: per-CPU allocation,
93	 * and per-CPU cache state.
94	 */
95	struct mt_percpu_alloc_s {
96		uint64_t	 mtp_memalloced;/* Per-CPU mt_memalloced. */
97		uint64_t	 mtp_memfreed;	/* Per-CPU mt_memfreed. */
98		uint64_t	 mtp_numallocs;	/* Per-CPU mt_numallocs. */
99		uint64_t	 mtp_numfrees;	/* Per-CPU mt_numfrees. */
100		uint64_t	 mtp_sizemask;	/* Per-CPU mt_sizemask. */
101		void		*mtp_caller_pointer[MEMSTAT_MAXCALLER];
102		uint64_t	 mtp_caller_uint64[MEMSTAT_MAXCALLER];
103	}	*mt_percpu_alloc;
104
105	struct mt_percpu_cache_s {
106		uint64_t	 mtp_free;	/* Per-CPU cache free items. */
107	}	*mt_percpu_cache;
108
109	LIST_ENTRY(memory_type)	mt_list;	/* List of types. */
110};
111
112/*
113 * Description of struct memory_type_list is in memstat.h.
114 */
115struct memory_type_list {
116	LIST_HEAD(, memory_type)	mtl_list;
117	int				mtl_error;
118};
119
120void			 _memstat_mtl_empty(struct memory_type_list *list);
121struct memory_type	*_memstat_mt_allocate(struct memory_type_list *list,
122			    int allocator, const char *name, int maxcpus);
123void			 _memstat_mt_reset_stats(struct memory_type *mtp,
124			    int maxcpus);
125
126#endif /* !_MEMSTAT_INTERNAL_H_ */
127