stats.c revision 290001
1/*
2 * Copyright (C) 2009, 2012  Internet Systems Consortium, Inc. ("ISC")
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
9 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
10 * AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
11 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
12 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
13 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
14 * PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/* $Id$ */
18
19/*! \file */
20
21#include <config.h>
22
23#include <string.h>
24
25#include <isc/atomic.h>
26#include <isc/buffer.h>
27#include <isc/magic.h>
28#include <isc/mem.h>
29#include <isc/platform.h>
30#include <isc/print.h>
31#include <isc/rwlock.h>
32#include <isc/stats.h>
33#include <isc/util.h>
34
35#define ISC_STATS_MAGIC			ISC_MAGIC('S', 't', 'a', 't')
36#define ISC_STATS_VALID(x)		ISC_MAGIC_VALID(x, ISC_STATS_MAGIC)
37
38#ifndef ISC_STATS_USEMULTIFIELDS
39#if defined(ISC_RWLOCK_USEATOMIC) && defined(ISC_PLATFORM_HAVEXADD) && !defined(ISC_PLATFORM_HAVEXADDQ)
40#define ISC_STATS_USEMULTIFIELDS 1
41#else
42#define ISC_STATS_USEMULTIFIELDS 0
43#endif
44#endif	/* ISC_STATS_USEMULTIFIELDS */
45
46#if ISC_STATS_USEMULTIFIELDS
47typedef struct {
48	isc_uint32_t hi;
49	isc_uint32_t lo;
50} isc_stat_t;
51#else
52typedef isc_uint64_t isc_stat_t;
53#endif
54
55struct isc_stats {
56	/*% Unlocked */
57	unsigned int	magic;
58	isc_mem_t	*mctx;
59	int		ncounters;
60
61	isc_mutex_t	lock;
62	unsigned int	references; /* locked by lock */
63
64	/*%
65	 * Locked by counterlock or unlocked if efficient rwlock is not
66	 * available.
67	 */
68#ifdef ISC_RWLOCK_USEATOMIC
69	isc_rwlock_t	counterlock;
70#endif
71	isc_stat_t	*counters;
72
73	/*%
74	 * We don't want to lock the counters while we are dumping, so we first
75	 * copy the current counter values into a local array.  This buffer
76	 * will be used as the copy destination.  It's allocated on creation
77	 * of the stats structure so that the dump operation won't fail due
78	 * to memory allocation failure.
79	 * XXX: this approach is weird for non-threaded build because the
80	 * additional memory and the copy overhead could be avoided.  We prefer
81	 * simplicity here, however, under the assumption that this function
82	 * should be only rarely called.
83	 */
84	isc_uint64_t	*copiedcounters;
85};
86
87static isc_result_t
88create_stats(isc_mem_t *mctx, int ncounters, isc_stats_t **statsp) {
89	isc_stats_t *stats;
90	isc_result_t result = ISC_R_SUCCESS;
91
92	REQUIRE(statsp != NULL && *statsp == NULL);
93
94	stats = isc_mem_get(mctx, sizeof(*stats));
95	if (stats == NULL)
96		return (ISC_R_NOMEMORY);
97
98	result = isc_mutex_init(&stats->lock);
99	if (result != ISC_R_SUCCESS)
100		goto clean_stats;
101
102	stats->counters = isc_mem_get(mctx, sizeof(isc_stat_t) * ncounters);
103	if (stats->counters == NULL) {
104		result = ISC_R_NOMEMORY;
105		goto clean_mutex;
106	}
107	stats->copiedcounters = isc_mem_get(mctx,
108					    sizeof(isc_uint64_t) * ncounters);
109	if (stats->copiedcounters == NULL) {
110		result = ISC_R_NOMEMORY;
111		goto clean_counters;
112	}
113
114#ifdef ISC_RWLOCK_USEATOMIC
115	result = isc_rwlock_init(&stats->counterlock, 0, 0);
116	if (result != ISC_R_SUCCESS)
117		goto clean_copiedcounters;
118#endif
119
120	stats->references = 1;
121	memset(stats->counters, 0, sizeof(isc_stat_t) * ncounters);
122	stats->mctx = NULL;
123	isc_mem_attach(mctx, &stats->mctx);
124	stats->ncounters = ncounters;
125	stats->magic = ISC_STATS_MAGIC;
126
127	*statsp = stats;
128
129	return (result);
130
131clean_counters:
132	isc_mem_put(mctx, stats->counters, sizeof(isc_stat_t) * ncounters);
133
134#ifdef ISC_RWLOCK_USEATOMIC
135clean_copiedcounters:
136	isc_mem_put(mctx, stats->copiedcounters,
137		    sizeof(isc_stat_t) * ncounters);
138#endif
139
140clean_mutex:
141	DESTROYLOCK(&stats->lock);
142
143clean_stats:
144	isc_mem_put(mctx, stats, sizeof(*stats));
145
146	return (result);
147}
148
149void
150isc_stats_attach(isc_stats_t *stats, isc_stats_t **statsp) {
151	REQUIRE(ISC_STATS_VALID(stats));
152	REQUIRE(statsp != NULL && *statsp == NULL);
153
154	LOCK(&stats->lock);
155	stats->references++;
156	UNLOCK(&stats->lock);
157
158	*statsp = stats;
159}
160
161void
162isc_stats_detach(isc_stats_t **statsp) {
163	isc_stats_t *stats;
164
165	REQUIRE(statsp != NULL && ISC_STATS_VALID(*statsp));
166
167	stats = *statsp;
168	*statsp = NULL;
169
170	LOCK(&stats->lock);
171	stats->references--;
172	UNLOCK(&stats->lock);
173
174	if (stats->references == 0) {
175		isc_mem_put(stats->mctx, stats->copiedcounters,
176			    sizeof(isc_stat_t) * stats->ncounters);
177		isc_mem_put(stats->mctx, stats->counters,
178			    sizeof(isc_stat_t) * stats->ncounters);
179		DESTROYLOCK(&stats->lock);
180#ifdef ISC_RWLOCK_USEATOMIC
181		isc_rwlock_destroy(&stats->counterlock);
182#endif
183		isc_mem_putanddetach(&stats->mctx, stats, sizeof(*stats));
184	}
185}
186
187int
188isc_stats_ncounters(isc_stats_t *stats) {
189	REQUIRE(ISC_STATS_VALID(stats));
190
191	return (stats->ncounters);
192}
193
194static inline void
195incrementcounter(isc_stats_t *stats, int counter) {
196	isc_int32_t prev;
197
198#ifdef ISC_RWLOCK_USEATOMIC
199	/*
200	 * We use a "read" lock to prevent other threads from reading the
201	 * counter while we "writing" a counter field.  The write access itself
202	 * is protected by the atomic operation.
203	 */
204	isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
205#endif
206
207#if ISC_STATS_USEMULTIFIELDS
208	prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, 1);
209	/*
210	 * If the lower 32-bit field overflows, increment the higher field.
211	 * Note that it's *theoretically* possible that the lower field
212	 * overlaps again before the higher field is incremented.  It doesn't
213	 * matter, however, because we don't read the value until
214	 * isc_stats_copy() is called where the whole process is protected
215	 * by the write (exclusive) lock.
216	 */
217	if (prev == (isc_int32_t)0xffffffff)
218		isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi, 1);
219#elif defined(ISC_PLATFORM_HAVEXADDQ)
220	UNUSED(prev);
221	isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], 1);
222#else
223	UNUSED(prev);
224	stats->counters[counter]++;
225#endif
226
227#ifdef ISC_RWLOCK_USEATOMIC
228	isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
229#endif
230}
231
232static inline void
233decrementcounter(isc_stats_t *stats, int counter) {
234	isc_int32_t prev;
235
236#ifdef ISC_RWLOCK_USEATOMIC
237	isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
238#endif
239
240#if ISC_STATS_USEMULTIFIELDS
241	prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, -1);
242	if (prev == 0)
243		isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi,
244				-1);
245#elif defined(ISC_PLATFORM_HAVEXADDQ)
246	UNUSED(prev);
247	isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], -1);
248#else
249	UNUSED(prev);
250	stats->counters[counter]--;
251#endif
252
253#ifdef ISC_RWLOCK_USEATOMIC
254	isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
255#endif
256}
257
258static void
259copy_counters(isc_stats_t *stats) {
260	int i;
261
262#ifdef ISC_RWLOCK_USEATOMIC
263	/*
264	 * We use a "write" lock before "reading" the statistics counters as
265	 * an exclusive lock.
266	 */
267	isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_write);
268#endif
269
270#if ISC_STATS_USEMULTIFIELDS
271	for (i = 0; i < stats->ncounters; i++) {
272		stats->copiedcounters[i] =
273				(isc_uint64_t)(stats->counters[i].hi) << 32 |
274				stats->counters[i].lo;
275	}
276#else
277	UNUSED(i);
278	memcpy(stats->copiedcounters, stats->counters,
279	       stats->ncounters * sizeof(isc_stat_t));
280#endif
281
282#ifdef ISC_RWLOCK_USEATOMIC
283	isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_write);
284#endif
285}
286
287isc_result_t
288isc_stats_create(isc_mem_t *mctx, isc_stats_t **statsp, int ncounters) {
289	REQUIRE(statsp != NULL && *statsp == NULL);
290
291	return (create_stats(mctx, ncounters, statsp));
292}
293
294void
295isc_stats_increment(isc_stats_t *stats, isc_statscounter_t counter) {
296	REQUIRE(ISC_STATS_VALID(stats));
297	REQUIRE(counter < stats->ncounters);
298
299	incrementcounter(stats, (int)counter);
300}
301
302void
303isc_stats_decrement(isc_stats_t *stats, isc_statscounter_t counter) {
304	REQUIRE(ISC_STATS_VALID(stats));
305	REQUIRE(counter < stats->ncounters);
306
307	decrementcounter(stats, (int)counter);
308}
309
310void
311isc_stats_dump(isc_stats_t *stats, isc_stats_dumper_t dump_fn,
312	       void *arg, unsigned int options)
313{
314	int i;
315
316	REQUIRE(ISC_STATS_VALID(stats));
317
318	copy_counters(stats);
319
320	for (i = 0; i < stats->ncounters; i++) {
321		if ((options & ISC_STATSDUMP_VERBOSE) == 0 &&
322		    stats->copiedcounters[i] == 0)
323				continue;
324		dump_fn((isc_statscounter_t)i, stats->copiedcounters[i], arg);
325	}
326}
327