1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2003-2008 Joseph Koshy
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/types.h>
30#include <sys/param.h>
31#include <sys/module.h>
32#include <sys/pmc.h>
33#include <sys/syscall.h>
34
35#include <ctype.h>
36#include <errno.h>
37#include <err.h>
38#include <fcntl.h>
39#include <pmc.h>
40#include <stdio.h>
41#include <stdlib.h>
42#include <string.h>
43#include <strings.h>
44#include <sysexits.h>
45#include <unistd.h>
46
47#include "libpmcinternal.h"
48
49/* Function prototypes */
50#if defined(__amd64__) || defined(__i386__)
51static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
52    struct pmc_op_pmcallocate *_pmc_config);
53#endif
54#if defined(__amd64__) || defined(__i386__)
55static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
56    struct pmc_op_pmcallocate *_pmc_config);
57#endif
58#if defined(__arm__)
59static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
60    struct pmc_op_pmcallocate *_pmc_config);
61#endif
62#if defined(__aarch64__)
63static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
64    struct pmc_op_pmcallocate *_pmc_config);
65static int cmn600_pmu_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
66    struct pmc_op_pmcallocate *_pmc_config);
67static int dmc620_pmu_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
68    struct pmc_op_pmcallocate *_pmc_config);
69#endif
70static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
71    struct pmc_op_pmcallocate *_pmc_config);
72
73#if defined(__powerpc__)
74static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec,
75			     struct pmc_op_pmcallocate *_pmc_config);
76#endif /* __powerpc__ */
77
78#define PMC_CALL(op, params)	syscall(pmc_syscall, (op), (params))
79
80/*
81 * Event aliases provide a way for the user to ask for generic events
82 * like "cache-misses", or "instructions-retired".  These aliases are
83 * mapped to the appropriate canonical event descriptions using a
84 * lookup table.
85 */
86struct pmc_event_alias {
87	const char	*pm_alias;
88	const char	*pm_spec;
89};
90
91static const struct pmc_event_alias *pmc_mdep_event_aliases;
92
93/*
94 * The pmc_event_descr structure maps symbolic names known to the user
95 * to integer codes used by the PMC KLD.
96 */
97struct pmc_event_descr {
98	const char	*pm_ev_name;
99	enum pmc_event	pm_ev_code;
100};
101
102/*
103 * The pmc_class_descr structure maps class name prefixes for
104 * event names to event tables and other PMC class data.
105 */
106struct pmc_class_descr {
107	const char	*pm_evc_name;
108	size_t		pm_evc_name_size;
109	enum pmc_class	pm_evc_class;
110	const struct pmc_event_descr *pm_evc_event_table;
111	size_t		pm_evc_event_table_size;
112	int		(*pm_evc_allocate_pmc)(enum pmc_event _pe,
113			    char *_ctrspec, struct pmc_op_pmcallocate *_pa);
114};
115
116#define	PMC_TABLE_SIZE(N)	(sizeof(N)/sizeof(N[0]))
117#define	PMC_EVENT_TABLE_SIZE(N)	PMC_TABLE_SIZE(N##_event_table)
118
119#undef	__PMC_EV
120#define	__PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
121
122/*
123 * PMC_CLASSDEP_TABLE(NAME, CLASS)
124 *
125 * Define a table mapping event names and aliases to HWPMC event IDs.
126 */
127#define	PMC_CLASSDEP_TABLE(N, C)				\
128	static const struct pmc_event_descr N##_event_table[] =	\
129	{							\
130		__PMC_EV_##C()					\
131	}
132
133PMC_CLASSDEP_TABLE(iaf, IAF);
134PMC_CLASSDEP_TABLE(k8, K8);
135PMC_CLASSDEP_TABLE(armv7, ARMV7);
136PMC_CLASSDEP_TABLE(armv8, ARMV8);
137PMC_CLASSDEP_TABLE(cmn600_pmu, CMN600_PMU);
138PMC_CLASSDEP_TABLE(dmc620_pmu_cd2, DMC620_PMU_CD2);
139PMC_CLASSDEP_TABLE(dmc620_pmu_c, DMC620_PMU_C);
140PMC_CLASSDEP_TABLE(ppc7450, PPC7450);
141PMC_CLASSDEP_TABLE(ppc970, PPC970);
142PMC_CLASSDEP_TABLE(e500, E500);
143
144static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT];
145
146#undef	__PMC_EV_ALIAS
147#define	__PMC_EV_ALIAS(N,CODE) 	{ N, PMC_EV_##CODE },
148
149/*
150 * TODO: Factor out the __PMC_EV_ARMV7/8 list into a single separate table
151 * rather than duplicating for each core.
152 */
153
154static const struct pmc_event_descr cortex_a8_event_table[] =
155{
156	__PMC_EV_ALIAS_ARMV7_CORTEX_A8()
157	__PMC_EV_ARMV7()
158};
159
160static const struct pmc_event_descr cortex_a9_event_table[] =
161{
162	__PMC_EV_ALIAS_ARMV7_CORTEX_A9()
163	__PMC_EV_ARMV7()
164};
165
166static const struct pmc_event_descr cortex_a53_event_table[] =
167{
168	__PMC_EV_ALIAS_ARMV8_CORTEX_A53()
169	__PMC_EV_ARMV8()
170};
171
172static const struct pmc_event_descr cortex_a57_event_table[] =
173{
174	__PMC_EV_ALIAS_ARMV8_CORTEX_A57()
175	__PMC_EV_ARMV8()
176};
177
178static const struct pmc_event_descr cortex_a76_event_table[] =
179{
180	__PMC_EV_ALIAS_ARMV8_CORTEX_A76()
181	__PMC_EV_ARMV8()
182};
183
184static const struct pmc_event_descr tsc_event_table[] =
185{
186	__PMC_EV_ALIAS_TSC()
187};
188
189#undef	PMC_CLASS_TABLE_DESC
190#define	PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR)	\
191static const struct pmc_class_descr NAME##_class_table_descr =	\
192	{							\
193		.pm_evc_name  = #CLASS "-",			\
194		.pm_evc_name_size = sizeof(#CLASS "-") - 1,	\
195		.pm_evc_class = PMC_CLASS_##CLASS ,		\
196		.pm_evc_event_table = EVENTS##_event_table ,	\
197		.pm_evc_event_table_size = 			\
198			PMC_EVENT_TABLE_SIZE(EVENTS),		\
199		.pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc	\
200	}
201
202#if	defined(__i386__) || defined(__amd64__)
203PMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
204#endif
205#if	defined(__i386__) || defined(__amd64__)
206PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
207#endif
208#if	defined(__arm__)
209PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7);
210PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7);
211#endif
212#if	defined(__aarch64__)
213PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64);
214PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64);
215PMC_CLASS_TABLE_DESC(cortex_a76, ARMV8, cortex_a76, arm64);
216PMC_CLASS_TABLE_DESC(cmn600_pmu, CMN600_PMU, cmn600_pmu, cmn600_pmu);
217PMC_CLASS_TABLE_DESC(dmc620_pmu_cd2, DMC620_PMU_CD2, dmc620_pmu_cd2, dmc620_pmu);
218PMC_CLASS_TABLE_DESC(dmc620_pmu_c, DMC620_PMU_C, dmc620_pmu_c, dmc620_pmu);
219#endif
220#if defined(__powerpc__)
221PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc);
222PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc);
223PMC_CLASS_TABLE_DESC(e500, E500, e500, powerpc);
224#endif
225
226static struct pmc_class_descr soft_class_table_descr =
227{
228	.pm_evc_name  = "SOFT-",
229	.pm_evc_name_size = sizeof("SOFT-") - 1,
230	.pm_evc_class = PMC_CLASS_SOFT,
231	.pm_evc_event_table = NULL,
232	.pm_evc_event_table_size = 0,
233	.pm_evc_allocate_pmc = soft_allocate_pmc
234};
235
236#undef	PMC_CLASS_TABLE_DESC
237
238static const struct pmc_class_descr **pmc_class_table;
239#define	PMC_CLASS_TABLE_SIZE	cpu_info.pm_nclass
240
241/*
242 * Mapping tables, mapping enumeration values to human readable
243 * strings.
244 */
245
246static const char * pmc_capability_names[] = {
247#undef	__PMC_CAP
248#define	__PMC_CAP(N,V,D)	#N ,
249	__PMC_CAPS()
250};
251
252struct pmc_class_map {
253	enum pmc_class	pm_class;
254	const char	*pm_name;
255};
256
257static const struct pmc_class_map pmc_class_names[] = {
258#undef	__PMC_CLASS
259#define __PMC_CLASS(S,V,D) { .pm_class = PMC_CLASS_##S, .pm_name = #S } ,
260	__PMC_CLASSES()
261};
262
263struct pmc_cputype_map {
264	enum pmc_cputype pm_cputype;
265	const char	*pm_name;
266};
267
268static const struct pmc_cputype_map pmc_cputype_names[] = {
269#undef	__PMC_CPU
270#define	__PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } ,
271	__PMC_CPUS()
272};
273
274static const char * pmc_disposition_names[] = {
275#undef	__PMC_DISP
276#define	__PMC_DISP(D)	#D ,
277	__PMC_DISPOSITIONS()
278};
279
280static const char * pmc_mode_names[] = {
281#undef  __PMC_MODE
282#define __PMC_MODE(M,N)	#M ,
283	__PMC_MODES()
284};
285
286static const char * pmc_state_names[] = {
287#undef  __PMC_STATE
288#define __PMC_STATE(S) #S ,
289	__PMC_STATES()
290};
291
292/*
293 * Filled in by pmc_init().
294 */
295static int pmc_syscall = -1;
296static struct pmc_cpuinfo cpu_info;
297static struct pmc_op_getdyneventinfo soft_event_info;
298
299/* Event masks for events */
300struct pmc_masks {
301	const char	*pm_name;
302	const uint64_t	pm_value;
303};
304#define	PMCMASK(N,V)	{ .pm_name = #N, .pm_value = (V) }
305#define	NULLMASK	{ .pm_name = NULL }
306
307#if defined(__amd64__) || defined(__i386__)
308static int
309pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask)
310{
311	const struct pmc_masks *pm;
312	char *q, *r;
313	int c;
314
315	if (pmask == NULL)	/* no mask keywords */
316		return (-1);
317	q = strchr(p, '=');	/* skip '=' */
318	if (*++q == '\0')	/* no more data */
319		return (-1);
320	c = 0;			/* count of mask keywords seen */
321	while ((r = strsep(&q, "+")) != NULL) {
322		for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name);
323		    pm++)
324			;
325		if (pm->pm_name == NULL) /* not found */
326			return (-1);
327		*evmask |= pm->pm_value;
328		c++;
329	}
330	return (c);
331}
332#endif
333
334#define	KWMATCH(p,kw)		(strcasecmp((p), (kw)) == 0)
335#define	KWPREFIXMATCH(p,kw)	(strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
336#define	EV_ALIAS(N,S)		{ .pm_alias = N, .pm_spec = S }
337
338#if defined(__amd64__) || defined(__i386__)
339/*
340 * AMD K8 PMCs.
341 *
342 */
343
344static struct pmc_event_alias k8_aliases[] = {
345	EV_ALIAS("branches",		"k8-fr-retired-taken-branches"),
346	EV_ALIAS("branch-mispredicts",
347	    "k8-fr-retired-taken-branches-mispredicted"),
348	EV_ALIAS("cycles",		"tsc"),
349	EV_ALIAS("dc-misses",		"k8-dc-miss"),
350	EV_ALIAS("ic-misses",		"k8-ic-miss"),
351	EV_ALIAS("instructions",	"k8-fr-retired-x86-instructions"),
352	EV_ALIAS("interrupts",		"k8-fr-taken-hardware-interrupts"),
353	EV_ALIAS("unhalted-cycles",	"k8-bu-cpu-clk-unhalted"),
354	EV_ALIAS(NULL, NULL)
355};
356
357#define	__K8MASK(N,V) PMCMASK(N,(1 << (V)))
358
359/*
360 * Parsing tables
361 */
362
363/* fp dispatched fpu ops */
364static const struct pmc_masks k8_mask_fdfo[] = {
365	__K8MASK(add-pipe-excluding-junk-ops,	0),
366	__K8MASK(multiply-pipe-excluding-junk-ops,	1),
367	__K8MASK(store-pipe-excluding-junk-ops,	2),
368	__K8MASK(add-pipe-junk-ops,		3),
369	__K8MASK(multiply-pipe-junk-ops,	4),
370	__K8MASK(store-pipe-junk-ops,		5),
371	NULLMASK
372};
373
374/* ls segment register loads */
375static const struct pmc_masks k8_mask_lsrl[] = {
376	__K8MASK(es,	0),
377	__K8MASK(cs,	1),
378	__K8MASK(ss,	2),
379	__K8MASK(ds,	3),
380	__K8MASK(fs,	4),
381	__K8MASK(gs,	5),
382	__K8MASK(hs,	6),
383	NULLMASK
384};
385
386/* ls locked operation */
387static const struct pmc_masks k8_mask_llo[] = {
388	__K8MASK(locked-instructions,	0),
389	__K8MASK(cycles-in-request,	1),
390	__K8MASK(cycles-to-complete,	2),
391	NULLMASK
392};
393
394/* dc refill from {l2,system} and dc copyback */
395static const struct pmc_masks k8_mask_dc[] = {
396	__K8MASK(invalid,	0),
397	__K8MASK(shared,	1),
398	__K8MASK(exclusive,	2),
399	__K8MASK(owner,		3),
400	__K8MASK(modified,	4),
401	NULLMASK
402};
403
404/* dc one bit ecc error */
405static const struct pmc_masks k8_mask_dobee[] = {
406	__K8MASK(scrubber,	0),
407	__K8MASK(piggyback,	1),
408	NULLMASK
409};
410
411/* dc dispatched prefetch instructions */
412static const struct pmc_masks k8_mask_ddpi[] = {
413	__K8MASK(load,	0),
414	__K8MASK(store,	1),
415	__K8MASK(nta,	2),
416	NULLMASK
417};
418
419/* dc dcache accesses by locks */
420static const struct pmc_masks k8_mask_dabl[] = {
421	__K8MASK(accesses,	0),
422	__K8MASK(misses,	1),
423	NULLMASK
424};
425
426/* bu internal l2 request */
427static const struct pmc_masks k8_mask_bilr[] = {
428	__K8MASK(ic-fill,	0),
429	__K8MASK(dc-fill,	1),
430	__K8MASK(tlb-reload,	2),
431	__K8MASK(tag-snoop,	3),
432	__K8MASK(cancelled,	4),
433	NULLMASK
434};
435
436/* bu fill request l2 miss */
437static const struct pmc_masks k8_mask_bfrlm[] = {
438	__K8MASK(ic-fill,	0),
439	__K8MASK(dc-fill,	1),
440	__K8MASK(tlb-reload,	2),
441	NULLMASK
442};
443
444/* bu fill into l2 */
445static const struct pmc_masks k8_mask_bfil[] = {
446	__K8MASK(dirty-l2-victim,	0),
447	__K8MASK(victim-from-l2,	1),
448	NULLMASK
449};
450
451/* fr retired fpu instructions */
452static const struct pmc_masks k8_mask_frfi[] = {
453	__K8MASK(x87,			0),
454	__K8MASK(mmx-3dnow,		1),
455	__K8MASK(packed-sse-sse2,	2),
456	__K8MASK(scalar-sse-sse2,	3),
457	NULLMASK
458};
459
460/* fr retired fastpath double op instructions */
461static const struct pmc_masks k8_mask_frfdoi[] = {
462	__K8MASK(low-op-pos-0,		0),
463	__K8MASK(low-op-pos-1,		1),
464	__K8MASK(low-op-pos-2,		2),
465	NULLMASK
466};
467
468/* fr fpu exceptions */
469static const struct pmc_masks k8_mask_ffe[] = {
470	__K8MASK(x87-reclass-microfaults,	0),
471	__K8MASK(sse-retype-microfaults,	1),
472	__K8MASK(sse-reclass-microfaults,	2),
473	__K8MASK(sse-and-x87-microtraps,	3),
474	NULLMASK
475};
476
477/* nb memory controller page access event */
478static const struct pmc_masks k8_mask_nmcpae[] = {
479	__K8MASK(page-hit,	0),
480	__K8MASK(page-miss,	1),
481	__K8MASK(page-conflict,	2),
482	NULLMASK
483};
484
485/* nb memory controller turnaround */
486static const struct pmc_masks k8_mask_nmct[] = {
487	__K8MASK(dimm-turnaround,		0),
488	__K8MASK(read-to-write-turnaround,	1),
489	__K8MASK(write-to-read-turnaround,	2),
490	NULLMASK
491};
492
493/* nb memory controller bypass saturation */
494static const struct pmc_masks k8_mask_nmcbs[] = {
495	__K8MASK(memory-controller-hi-pri-bypass,	0),
496	__K8MASK(memory-controller-lo-pri-bypass,	1),
497	__K8MASK(dram-controller-interface-bypass,	2),
498	__K8MASK(dram-controller-queue-bypass,		3),
499	NULLMASK
500};
501
502/* nb sized commands */
503static const struct pmc_masks k8_mask_nsc[] = {
504	__K8MASK(nonpostwrszbyte,	0),
505	__K8MASK(nonpostwrszdword,	1),
506	__K8MASK(postwrszbyte,		2),
507	__K8MASK(postwrszdword,		3),
508	__K8MASK(rdszbyte,		4),
509	__K8MASK(rdszdword,		5),
510	__K8MASK(rdmodwr,		6),
511	NULLMASK
512};
513
514/* nb probe result */
515static const struct pmc_masks k8_mask_npr[] = {
516	__K8MASK(probe-miss,		0),
517	__K8MASK(probe-hit,		1),
518	__K8MASK(probe-hit-dirty-no-memory-cancel, 2),
519	__K8MASK(probe-hit-dirty-with-memory-cancel, 3),
520	NULLMASK
521};
522
523/* nb hypertransport bus bandwidth */
524static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
525	__K8MASK(command,	0),
526	__K8MASK(data,	1),
527	__K8MASK(buffer-release, 2),
528	__K8MASK(nop,	3),
529	NULLMASK
530};
531
532#undef	__K8MASK
533
534#define	K8_KW_COUNT	"count"
535#define	K8_KW_EDGE	"edge"
536#define	K8_KW_INV	"inv"
537#define	K8_KW_MASK	"mask"
538#define	K8_KW_OS	"os"
539#define	K8_KW_USR	"usr"
540
541static int
542k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
543    struct pmc_op_pmcallocate *pmc_config)
544{
545	char		*e, *p, *q;
546	int		n;
547	uint32_t	count;
548	uint64_t	evmask;
549	const struct pmc_masks	*pm, *pmask;
550
551	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
552	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
553
554	pmask = NULL;
555	evmask = 0;
556
557#define	__K8SETMASK(M) pmask = k8_mask_##M
558
559	/* setup parsing tables */
560	switch (pe) {
561	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
562		__K8SETMASK(fdfo);
563		break;
564	case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
565		__K8SETMASK(lsrl);
566		break;
567	case PMC_EV_K8_LS_LOCKED_OPERATION:
568		__K8SETMASK(llo);
569		break;
570	case PMC_EV_K8_DC_REFILL_FROM_L2:
571	case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
572	case PMC_EV_K8_DC_COPYBACK:
573		__K8SETMASK(dc);
574		break;
575	case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
576		__K8SETMASK(dobee);
577		break;
578	case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
579		__K8SETMASK(ddpi);
580		break;
581	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
582		__K8SETMASK(dabl);
583		break;
584	case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
585		__K8SETMASK(bilr);
586		break;
587	case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
588		__K8SETMASK(bfrlm);
589		break;
590	case PMC_EV_K8_BU_FILL_INTO_L2:
591		__K8SETMASK(bfil);
592		break;
593	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
594		__K8SETMASK(frfi);
595		break;
596	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
597		__K8SETMASK(frfdoi);
598		break;
599	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
600		__K8SETMASK(ffe);
601		break;
602	case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
603		__K8SETMASK(nmcpae);
604		break;
605	case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
606		__K8SETMASK(nmct);
607		break;
608	case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
609		__K8SETMASK(nmcbs);
610		break;
611	case PMC_EV_K8_NB_SIZED_COMMANDS:
612		__K8SETMASK(nsc);
613		break;
614	case PMC_EV_K8_NB_PROBE_RESULT:
615		__K8SETMASK(npr);
616		break;
617	case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
618	case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
619	case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
620		__K8SETMASK(nhbb);
621		break;
622
623	default:
624		break;		/* no options defined */
625	}
626
627	while ((p = strsep(&ctrspec, ",")) != NULL) {
628		if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
629			q = strchr(p, '=');
630			if (*++q == '\0') /* skip '=' */
631				return (-1);
632
633			count = strtol(q, &e, 0);
634			if (e == q || *e != '\0')
635				return (-1);
636
637			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
638			pmc_config->pm_md.pm_amd.pm_amd_config |=
639			    AMD_PMC_TO_COUNTER(count);
640
641		} else if (KWMATCH(p, K8_KW_EDGE)) {
642			pmc_config->pm_caps |= PMC_CAP_EDGE;
643		} else if (KWMATCH(p, K8_KW_INV)) {
644			pmc_config->pm_caps |= PMC_CAP_INVERT;
645		} else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
646			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
647				return (-1);
648			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
649		} else if (KWMATCH(p, K8_KW_OS)) {
650			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
651		} else if (KWMATCH(p, K8_KW_USR)) {
652			pmc_config->pm_caps |= PMC_CAP_USER;
653		} else
654			return (-1);
655	}
656
657	/* other post processing */
658	switch (pe) {
659	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
660	case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
661	case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
662	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
663	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
664	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
665		/* XXX only available in rev B and later */
666		break;
667	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
668		/* XXX only available in rev C and later */
669		break;
670	case PMC_EV_K8_LS_LOCKED_OPERATION:
671		/* XXX CPU Rev A,B evmask is to be zero */
672		if (evmask & (evmask - 1)) /* > 1 bit set */
673			return (-1);
674		if (evmask == 0) {
675			evmask = 0x01; /* Rev C and later: #instrs */
676			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
677		}
678		break;
679	default:
680		if (evmask == 0 && pmask != NULL) {
681			for (pm = pmask; pm->pm_name; pm++)
682				evmask |= pm->pm_value;
683			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
684		}
685	}
686
687	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
688		pmc_config->pm_md.pm_amd.pm_amd_config =
689		    AMD_PMC_TO_UNITMASK(evmask);
690
691	return (0);
692}
693
694#endif
695
696#if	defined(__i386__) || defined(__amd64__)
697static int
698tsc_allocate_pmc(enum pmc_event pe, char *ctrspec,
699    struct pmc_op_pmcallocate *pmc_config)
700{
701	if (pe != PMC_EV_TSC_TSC)
702		return (-1);
703
704	/* TSC events must be unqualified. */
705	if (ctrspec && *ctrspec != '\0')
706		return (-1);
707
708	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
709	pmc_config->pm_caps |= PMC_CAP_READ;
710
711	return (0);
712}
713#endif
714
715static struct pmc_event_alias generic_aliases[] = {
716	EV_ALIAS("instructions",		"SOFT-CLOCK.HARD"),
717	EV_ALIAS(NULL, NULL)
718};
719
720static int
721soft_allocate_pmc(enum pmc_event pe, char *ctrspec,
722    struct pmc_op_pmcallocate *pmc_config)
723{
724	(void)ctrspec;
725	(void)pmc_config;
726
727	if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST)
728		return (-1);
729
730	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
731	return (0);
732}
733
734#if	defined(__arm__)
735static struct pmc_event_alias cortex_a8_aliases[] = {
736	EV_ALIAS("dc-misses",		"L1_DCACHE_REFILL"),
737	EV_ALIAS("ic-misses",		"L1_ICACHE_REFILL"),
738	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
739	EV_ALIAS(NULL, NULL)
740};
741
742static struct pmc_event_alias cortex_a9_aliases[] = {
743	EV_ALIAS("dc-misses",		"L1_DCACHE_REFILL"),
744	EV_ALIAS("ic-misses",		"L1_ICACHE_REFILL"),
745	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
746	EV_ALIAS(NULL, NULL)
747};
748
749static int
750armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
751    struct pmc_op_pmcallocate *pmc_config __unused)
752{
753	switch (pe) {
754	default:
755		break;
756	}
757
758	return (0);
759}
760#endif
761
762#if	defined(__aarch64__)
763static struct pmc_event_alias cortex_a53_aliases[] = {
764	EV_ALIAS(NULL, NULL)
765};
766static struct pmc_event_alias cortex_a57_aliases[] = {
767	EV_ALIAS(NULL, NULL)
768};
769static struct pmc_event_alias cortex_a76_aliases[] = {
770	EV_ALIAS(NULL, NULL)
771};
772
773static int
774arm64_allocate_pmc(enum pmc_event pe, char *ctrspec,
775    struct pmc_op_pmcallocate *pmc_config)
776{
777	char *p;
778
779	while ((p = strsep(&ctrspec, ",")) != NULL) {
780		if (KWMATCH(p, "os"))
781			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
782		else if (KWMATCH(p, "usr"))
783			pmc_config->pm_caps |= PMC_CAP_USER;
784		else
785			return (-1);
786	}
787
788	return (0);
789}
790
791static int
792cmn600_pmu_allocate_pmc(enum pmc_event pe, char *ctrspec,
793    struct pmc_op_pmcallocate *pmc_config)
794{
795	uint32_t nodeid, occupancy, xpport, xpchannel;
796	char *e, *p, *q;
797	unsigned int i;
798	char *xpport_names[] = { "East", "West", "North", "South", "devport0",
799	    "devport1" };
800	char *xpchannel_names[] = { "REQ", "RSP", "SNP", "DAT" };
801
802	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
803	pmc_config->pm_caps |= PMC_CAP_SYSTEM;
804	pmc_config->pm_md.pm_cmn600.pma_cmn600_config = 0;
805	/*
806	 * CMN600 extra fields:
807	 * * nodeid - node coordinates x[2-3],y[2-3],p[1],s[2]
808	 * 		width of x and y fields depend on matrix size.
809	 * * occupancy - numeric value to select desired filter.
810	 * * xpport - East, West, North, South, devport0, devport1 (or 0, 1, ..., 5)
811	 * * xpchannel - REQ, RSP, SNP, DAT (or 0, 1, 2, 3)
812	 */
813
814	while ((p = strsep(&ctrspec, ",")) != NULL) {
815		if (KWPREFIXMATCH(p, "nodeid=")) {
816			q = strchr(p, '=');
817			if (*++q == '\0') /* skip '=' */
818				return (-1);
819
820			nodeid = strtol(q, &e, 0);
821			if (e == q || *e != '\0')
822				return (-1);
823
824			pmc_config->pm_md.pm_cmn600.pma_cmn600_nodeid |= nodeid;
825
826		} else if (KWPREFIXMATCH(p, "occupancy=")) {
827			q = strchr(p, '=');
828			if (*++q == '\0') /* skip '=' */
829				return (-1);
830
831			occupancy = strtol(q, &e, 0);
832			if (e == q || *e != '\0')
833				return (-1);
834
835			pmc_config->pm_md.pm_cmn600.pma_cmn600_occupancy = occupancy;
836		} else if (KWPREFIXMATCH(p, "xpport=")) {
837			q = strchr(p, '=');
838			if (*++q == '\0') /* skip '=' */
839				return (-1);
840
841			xpport = strtol(q, &e, 0);
842			if (e == q || *e != '\0') {
843				for (i = 0; i < nitems(xpport_names); i++) {
844					if (strcasecmp(xpport_names[i], q) == 0) {
845						xpport = i;
846						break;
847					}
848				}
849				if (i == nitems(xpport_names))
850					return (-1);
851			}
852
853			pmc_config->pm_md.pm_cmn600.pma_cmn600_config |= xpport << 2;
854		} else if (KWPREFIXMATCH(p, "xpchannel=")) {
855			q = strchr(p, '=');
856			if (*++q == '\0') /* skip '=' */
857				return (-1);
858
859			xpchannel = strtol(q, &e, 0);
860			if (e == q || *e != '\0') {
861				for (i = 0; i < nitems(xpchannel_names); i++) {
862					if (strcasecmp(xpchannel_names[i], q) == 0) {
863						xpchannel = i;
864						break;
865					}
866				}
867				if (i == nitems(xpchannel_names))
868					return (-1);
869			}
870
871			pmc_config->pm_md.pm_cmn600.pma_cmn600_config |= xpchannel << 5;
872		} else
873			return (-1);
874	}
875
876	return (0);
877}
878
879static int
880dmc620_pmu_allocate_pmc(enum pmc_event pe, char *ctrspec,
881    struct pmc_op_pmcallocate *pmc_config)
882{
883	char		*e, *p, *q;
884	uint64_t	match, mask;
885	uint32_t	count;
886
887	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
888	pmc_config->pm_caps |= PMC_CAP_SYSTEM;
889	pmc_config->pm_md.pm_dmc620.pm_dmc620_config = 0;
890
891	while ((p = strsep(&ctrspec, ",")) != NULL) {
892		if (KWPREFIXMATCH(p, "count=")) {
893			q = strchr(p, '=');
894			if (*++q == '\0') /* skip '=' */
895				return (-1);
896
897			count = strtol(q, &e, 0);
898			if (e == q || *e != '\0')
899				return (-1);
900
901			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
902			pmc_config->pm_md.pm_dmc620.pm_dmc620_config |= count;
903
904		} else if (KWMATCH(p, "inv")) {
905			pmc_config->pm_caps |= PMC_CAP_INVERT;
906		} else if (KWPREFIXMATCH(p, "match=")) {
907			match = strtol(q, &e, 0);
908			if (e == q || *e != '\0')
909				return (-1);
910
911			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
912			pmc_config->pm_md.pm_dmc620.pm_dmc620_match = match;
913		} else if (KWPREFIXMATCH(p, "mask=")) {
914			q = strchr(p, '=');
915			if (*++q == '\0') /* skip '=' */
916				return (-1);
917
918			mask = strtol(q, &e, 0);
919			if (e == q || *e != '\0')
920				return (-1);
921
922			pmc_config->pm_md.pm_dmc620.pm_dmc620_mask = mask;
923			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
924		} else
925			return (-1);
926	}
927
928	return (0);
929}
930#endif
931
932#if defined(__powerpc__)
933
934static struct pmc_event_alias ppc7450_aliases[] = {
935	EV_ALIAS("instructions",	"INSTR_COMPLETED"),
936	EV_ALIAS("branches",		"BRANCHES_COMPLETED"),
937	EV_ALIAS("branch-mispredicts",	"MISPREDICTED_BRANCHES"),
938	EV_ALIAS(NULL, NULL)
939};
940
941static struct pmc_event_alias ppc970_aliases[] = {
942	EV_ALIAS("instructions", "INSTR_COMPLETED"),
943	EV_ALIAS("cycles",       "CYCLES"),
944	EV_ALIAS(NULL, NULL)
945};
946
947static struct pmc_event_alias e500_aliases[] = {
948	EV_ALIAS("instructions", "INSTR_COMPLETED"),
949	EV_ALIAS("cycles",       "CYCLES"),
950	EV_ALIAS(NULL, NULL)
951};
952
953#define	POWERPC_KW_OS		"os"
954#define	POWERPC_KW_USR		"usr"
955#define	POWERPC_KW_ANYTHREAD	"anythread"
956
957static int
958powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
959		     struct pmc_op_pmcallocate *pmc_config __unused)
960{
961	char *p;
962
963	(void) pe;
964
965	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
966
967	while ((p = strsep(&ctrspec, ",")) != NULL) {
968		if (KWMATCH(p, POWERPC_KW_OS))
969			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
970		else if (KWMATCH(p, POWERPC_KW_USR))
971			pmc_config->pm_caps |= PMC_CAP_USER;
972		else if (KWMATCH(p, POWERPC_KW_ANYTHREAD))
973			pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
974		else
975			return (-1);
976	}
977
978	return (0);
979}
980
981#endif /* __powerpc__ */
982
983
984/*
985 * Match an event name `name' with its canonical form.
986 *
987 * Matches are case insensitive and spaces, periods, underscores and
988 * hyphen characters are considered to match each other.
989 *
990 * Returns 1 for a match, 0 otherwise.
991 */
992
993static int
994pmc_match_event_name(const char *name, const char *canonicalname)
995{
996	int cc, nc;
997	const unsigned char *c, *n;
998
999	c = (const unsigned char *) canonicalname;
1000	n = (const unsigned char *) name;
1001
1002	for (; (nc = *n) && (cc = *c); n++, c++) {
1003
1004		if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
1005		    (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
1006			continue;
1007
1008		if (toupper(nc) == toupper(cc))
1009			continue;
1010
1011
1012		return (0);
1013	}
1014
1015	if (*n == '\0' && *c == '\0')
1016		return (1);
1017
1018	return (0);
1019}
1020
1021/*
1022 * Match an event name against all the event named supported by a
1023 * PMC class.
1024 *
1025 * Returns an event descriptor pointer on match or NULL otherwise.
1026 */
1027static const struct pmc_event_descr *
1028pmc_match_event_class(const char *name,
1029    const struct pmc_class_descr *pcd)
1030{
1031	size_t n;
1032	const struct pmc_event_descr *ev;
1033
1034	ev = pcd->pm_evc_event_table;
1035	for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
1036		if (pmc_match_event_name(name, ev->pm_ev_name))
1037			return (ev);
1038
1039	return (NULL);
1040}
1041
1042/*
1043 * API entry points
1044 */
1045
1046int
1047pmc_allocate(const char *ctrspec, enum pmc_mode mode,
1048    uint32_t flags, int cpu, pmc_id_t *pmcid,
1049    uint64_t count)
1050{
1051	size_t n;
1052	int retval;
1053	char *r, *spec_copy;
1054	const char *ctrname;
1055	const struct pmc_event_descr *ev;
1056	const struct pmc_event_alias *alias;
1057	struct pmc_op_pmcallocate pmc_config;
1058	const struct pmc_class_descr *pcd;
1059
1060	spec_copy = NULL;
1061	retval    = -1;
1062
1063	if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
1064	    mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
1065		errno = EINVAL;
1066		goto out;
1067	}
1068	bzero(&pmc_config, sizeof(pmc_config));
1069	pmc_config.pm_cpu   = cpu;
1070	pmc_config.pm_mode  = mode;
1071	pmc_config.pm_flags = flags;
1072	pmc_config.pm_count = count;
1073	if (PMC_IS_SAMPLING_MODE(mode))
1074		pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
1075
1076	/*
1077	 * Try to pull the raw event ID directly from the pmu-events table. If
1078	 * this is unsupported on the platform, or the event is not found,
1079	 * continue with searching the regular event tables.
1080	 */
1081	r = spec_copy = strdup(ctrspec);
1082	ctrname = strsep(&r, ",");
1083	if (pmc_pmu_enabled()) {
1084		if (pmc_pmu_pmcallocate(ctrname, &pmc_config) == 0)
1085			goto found;
1086	}
1087	free(spec_copy);
1088	spec_copy = NULL;
1089
1090	/* replace an event alias with the canonical event specifier */
1091	if (pmc_mdep_event_aliases)
1092		for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++)
1093			if (!strcasecmp(ctrspec, alias->pm_alias)) {
1094				spec_copy = strdup(alias->pm_spec);
1095				break;
1096			}
1097
1098	if (spec_copy == NULL)
1099		spec_copy = strdup(ctrspec);
1100
1101	r = spec_copy;
1102	ctrname = strsep(&r, ",");
1103
1104	/*
1105	 * If a explicit class prefix was given by the user, restrict the
1106	 * search for the event to the specified PMC class.
1107	 */
1108	ev = NULL;
1109	for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
1110		pcd = pmc_class_table[n];
1111		if (pcd != NULL && strncasecmp(ctrname, pcd->pm_evc_name,
1112		    pcd->pm_evc_name_size) == 0) {
1113			if ((ev = pmc_match_event_class(ctrname +
1114			    pcd->pm_evc_name_size, pcd)) == NULL) {
1115				errno = EINVAL;
1116				goto out;
1117			}
1118			break;
1119		}
1120	}
1121
1122	/*
1123	 * Otherwise, search for this event in all compatible PMC
1124	 * classes.
1125	 */
1126	for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
1127		pcd = pmc_class_table[n];
1128		if (pcd != NULL)
1129			ev = pmc_match_event_class(ctrname, pcd);
1130	}
1131
1132	if (ev == NULL) {
1133		errno = EINVAL;
1134		goto out;
1135	}
1136
1137	pmc_config.pm_ev    = ev->pm_ev_code;
1138	pmc_config.pm_class = pcd->pm_evc_class;
1139
1140 	if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) {
1141		errno = EINVAL;
1142		goto out;
1143	}
1144
1145found:
1146	if (PMC_CALL(PMC_OP_PMCALLOCATE, &pmc_config) == 0) {
1147		*pmcid = pmc_config.pm_pmcid;
1148		retval = 0;
1149	}
1150out:
1151	if (spec_copy)
1152		free(spec_copy);
1153
1154	return (retval);
1155}
1156
1157int
1158pmc_attach(pmc_id_t pmc, pid_t pid)
1159{
1160	struct pmc_op_pmcattach pmc_attach_args;
1161
1162	pmc_attach_args.pm_pmc = pmc;
1163	pmc_attach_args.pm_pid = pid;
1164
1165	return (PMC_CALL(PMC_OP_PMCATTACH, &pmc_attach_args));
1166}
1167
1168int
1169pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
1170{
1171	unsigned int i;
1172	enum pmc_class cl;
1173
1174	cl = PMC_ID_TO_CLASS(pmcid);
1175	for (i = 0; i < cpu_info.pm_nclass; i++)
1176		if (cpu_info.pm_classes[i].pm_class == cl) {
1177			*caps = cpu_info.pm_classes[i].pm_caps;
1178			return (0);
1179		}
1180	errno = EINVAL;
1181	return (-1);
1182}
1183
1184int
1185pmc_configure_logfile(int fd)
1186{
1187	struct pmc_op_configurelog cla;
1188
1189	cla.pm_flags = 0;
1190	cla.pm_logfd = fd;
1191	if (PMC_CALL(PMC_OP_CONFIGURELOG, &cla) < 0)
1192		return (-1);
1193	return (0);
1194}
1195
1196int
1197pmc_cpuinfo(const struct pmc_cpuinfo **pci)
1198{
1199	if (pmc_syscall == -1) {
1200		errno = ENXIO;
1201		return (-1);
1202	}
1203
1204	*pci = &cpu_info;
1205	return (0);
1206}
1207
1208int
1209pmc_detach(pmc_id_t pmc, pid_t pid)
1210{
1211	struct pmc_op_pmcattach pmc_detach_args;
1212
1213	pmc_detach_args.pm_pmc = pmc;
1214	pmc_detach_args.pm_pid = pid;
1215	return (PMC_CALL(PMC_OP_PMCDETACH, &pmc_detach_args));
1216}
1217
1218int
1219pmc_disable(int cpu, int pmc)
1220{
1221	struct pmc_op_pmcadmin ssa;
1222
1223	ssa.pm_cpu = cpu;
1224	ssa.pm_pmc = pmc;
1225	ssa.pm_state = PMC_STATE_DISABLED;
1226	return (PMC_CALL(PMC_OP_PMCADMIN, &ssa));
1227}
1228
1229int
1230pmc_enable(int cpu, int pmc)
1231{
1232	struct pmc_op_pmcadmin ssa;
1233
1234	ssa.pm_cpu = cpu;
1235	ssa.pm_pmc = pmc;
1236	ssa.pm_state = PMC_STATE_FREE;
1237	return (PMC_CALL(PMC_OP_PMCADMIN, &ssa));
1238}
1239
1240/*
1241 * Return a list of events known to a given PMC class.  'cl' is the
1242 * PMC class identifier, 'eventnames' is the returned list of 'const
1243 * char *' pointers pointing to the names of the events. 'nevents' is
1244 * the number of event name pointers returned.
1245 *
1246 * The space for 'eventnames' is allocated using malloc(3).  The caller
1247 * is responsible for freeing this space when done.
1248 */
1249int
1250pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
1251    int *nevents)
1252{
1253	int count;
1254	const char **names;
1255	const struct pmc_event_descr *ev;
1256
1257	switch (cl)
1258	{
1259	case PMC_CLASS_IAF:
1260		ev = iaf_event_table;
1261		count = PMC_EVENT_TABLE_SIZE(iaf);
1262		break;
1263	case PMC_CLASS_TSC:
1264		ev = tsc_event_table;
1265		count = PMC_EVENT_TABLE_SIZE(tsc);
1266		break;
1267	case PMC_CLASS_K8:
1268		ev = k8_event_table;
1269		count = PMC_EVENT_TABLE_SIZE(k8);
1270		break;
1271	case PMC_CLASS_ARMV7:
1272		switch (cpu_info.pm_cputype) {
1273		default:
1274		case PMC_CPU_ARMV7_CORTEX_A8:
1275			ev = cortex_a8_event_table;
1276			count = PMC_EVENT_TABLE_SIZE(cortex_a8);
1277			break;
1278		case PMC_CPU_ARMV7_CORTEX_A9:
1279			ev = cortex_a9_event_table;
1280			count = PMC_EVENT_TABLE_SIZE(cortex_a9);
1281			break;
1282		}
1283		break;
1284	case PMC_CLASS_ARMV8:
1285		switch (cpu_info.pm_cputype) {
1286		default:
1287		case PMC_CPU_ARMV8_CORTEX_A53:
1288			ev = cortex_a53_event_table;
1289			count = PMC_EVENT_TABLE_SIZE(cortex_a53);
1290			break;
1291		case PMC_CPU_ARMV8_CORTEX_A57:
1292			ev = cortex_a57_event_table;
1293			count = PMC_EVENT_TABLE_SIZE(cortex_a57);
1294			break;
1295		case PMC_CPU_ARMV8_CORTEX_A76:
1296			ev = cortex_a76_event_table;
1297			count = PMC_EVENT_TABLE_SIZE(cortex_a76);
1298			break;
1299		}
1300		break;
1301	case PMC_CLASS_CMN600_PMU:
1302		ev = cmn600_pmu_event_table;
1303		count = PMC_EVENT_TABLE_SIZE(cmn600_pmu);
1304		break;
1305	case PMC_CLASS_DMC620_PMU_CD2:
1306		ev = dmc620_pmu_cd2_event_table;
1307		count = PMC_EVENT_TABLE_SIZE(dmc620_pmu_cd2);
1308		break;
1309	case PMC_CLASS_DMC620_PMU_C:
1310		ev = dmc620_pmu_c_event_table;
1311		count = PMC_EVENT_TABLE_SIZE(dmc620_pmu_c);
1312		break;
1313	case PMC_CLASS_PPC7450:
1314		ev = ppc7450_event_table;
1315		count = PMC_EVENT_TABLE_SIZE(ppc7450);
1316		break;
1317	case PMC_CLASS_PPC970:
1318		ev = ppc970_event_table;
1319		count = PMC_EVENT_TABLE_SIZE(ppc970);
1320		break;
1321	case PMC_CLASS_E500:
1322		ev = e500_event_table;
1323		count = PMC_EVENT_TABLE_SIZE(e500);
1324		break;
1325	case PMC_CLASS_SOFT:
1326		ev = soft_event_table;
1327		count = soft_event_info.pm_nevent;
1328		break;
1329	default:
1330		errno = EINVAL;
1331		return (-1);
1332	}
1333
1334	if ((names = malloc(count * sizeof(const char *))) == NULL)
1335		return (-1);
1336
1337	*eventnames = names;
1338	*nevents = count;
1339
1340	for (;count--; ev++, names++)
1341		*names = ev->pm_ev_name;
1342
1343	return (0);
1344}
1345
1346int
1347pmc_flush_logfile(void)
1348{
1349	return (PMC_CALL(PMC_OP_FLUSHLOG, 0));
1350}
1351
1352int
1353pmc_close_logfile(void)
1354{
1355	return (PMC_CALL(PMC_OP_CLOSELOG, 0));
1356}
1357
1358int
1359pmc_get_driver_stats(struct pmc_driverstats *ds)
1360{
1361	struct pmc_op_getdriverstats gms;
1362
1363	if (PMC_CALL(PMC_OP_GETDRIVERSTATS, &gms) < 0)
1364		return (-1);
1365
1366	/* copy out fields in the current userland<->library interface */
1367	ds->pm_intr_ignored    = gms.pm_intr_ignored;
1368	ds->pm_intr_processed  = gms.pm_intr_processed;
1369	ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
1370	ds->pm_syscalls        = gms.pm_syscalls;
1371	ds->pm_syscall_errors  = gms.pm_syscall_errors;
1372	ds->pm_buffer_requests = gms.pm_buffer_requests;
1373	ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
1374	ds->pm_log_sweeps      = gms.pm_log_sweeps;
1375	return (0);
1376}
1377
1378int
1379pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
1380{
1381	struct pmc_op_getmsr gm;
1382
1383	gm.pm_pmcid = pmc;
1384	if (PMC_CALL(PMC_OP_PMCGETMSR, &gm) < 0)
1385		return (-1);
1386	*msr = gm.pm_msr;
1387	return (0);
1388}
1389
1390int
1391pmc_init(void)
1392{
1393	int error, pmc_mod_id;
1394	unsigned int n;
1395	uint32_t abi_version;
1396	struct module_stat pmc_modstat;
1397	struct pmc_op_getcpuinfo op_cpu_info;
1398
1399	if (pmc_syscall != -1) /* already inited */
1400		return (0);
1401
1402	/* retrieve the system call number from the KLD */
1403	if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
1404		return (-1);
1405
1406	pmc_modstat.version = sizeof(struct module_stat);
1407	if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
1408		return (-1);
1409
1410	pmc_syscall = pmc_modstat.data.intval;
1411
1412	/* check the kernel module's ABI against our compiled-in version */
1413	abi_version = PMC_VERSION;
1414	if (PMC_CALL(PMC_OP_GETMODULEVERSION, &abi_version) < 0)
1415		return (pmc_syscall = -1);
1416
1417	/* ignore patch & minor numbers for the comparison */
1418	if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
1419		errno  = EPROGMISMATCH;
1420		return (pmc_syscall = -1);
1421	}
1422
1423	bzero(&op_cpu_info, sizeof(op_cpu_info));
1424	if (PMC_CALL(PMC_OP_GETCPUINFO, &op_cpu_info) < 0)
1425		return (pmc_syscall = -1);
1426
1427	cpu_info.pm_cputype = op_cpu_info.pm_cputype;
1428	cpu_info.pm_ncpu    = op_cpu_info.pm_ncpu;
1429	cpu_info.pm_npmc    = op_cpu_info.pm_npmc;
1430	cpu_info.pm_nclass  = op_cpu_info.pm_nclass;
1431	for (n = 0; n < op_cpu_info.pm_nclass; n++)
1432		memcpy(&cpu_info.pm_classes[n], &op_cpu_info.pm_classes[n],
1433		    sizeof(cpu_info.pm_classes[n]));
1434
1435	pmc_class_table = calloc(PMC_CLASS_TABLE_SIZE,
1436	    sizeof(struct pmc_class_descr *));
1437
1438	if (pmc_class_table == NULL)
1439		return (-1);
1440
1441	/*
1442	 * Get soft events list.
1443	 */
1444	soft_event_info.pm_class = PMC_CLASS_SOFT;
1445	if (PMC_CALL(PMC_OP_GETDYNEVENTINFO, &soft_event_info) < 0)
1446		return (pmc_syscall = -1);
1447
1448	/* Map soft events to static list. */
1449	for (n = 0; n < soft_event_info.pm_nevent; n++) {
1450		soft_event_table[n].pm_ev_name =
1451		    soft_event_info.pm_events[n].pm_ev_name;
1452		soft_event_table[n].pm_ev_code =
1453		    soft_event_info.pm_events[n].pm_ev_code;
1454	}
1455	soft_class_table_descr.pm_evc_event_table_size = \
1456	    soft_event_info.pm_nevent;
1457	soft_class_table_descr.pm_evc_event_table = \
1458	    soft_event_table;
1459
1460	/*
1461	 * Fill in the class table.
1462	 */
1463	n = 0;
1464	for (unsigned i = 0; i < PMC_CLASS_TABLE_SIZE; i++) {
1465		switch (cpu_info.pm_classes[i].pm_class) {
1466#if defined(__amd64__) || defined(__i386__)
1467		case PMC_CLASS_TSC:
1468			pmc_class_table[n++] = &tsc_class_table_descr;
1469			break;
1470
1471		case PMC_CLASS_K8:
1472			pmc_class_table[n++] = &k8_class_table_descr;
1473			break;
1474#endif
1475
1476		case PMC_CLASS_SOFT:
1477			pmc_class_table[n++] = &soft_class_table_descr;
1478			break;
1479
1480#if defined(__arm__)
1481		case PMC_CLASS_ARMV7:
1482			switch (cpu_info.pm_cputype) {
1483			case PMC_CPU_ARMV7_CORTEX_A8:
1484				pmc_class_table[n++] =
1485				    &cortex_a8_class_table_descr;
1486				break;
1487			case PMC_CPU_ARMV7_CORTEX_A9:
1488				pmc_class_table[n++] =
1489				    &cortex_a9_class_table_descr;
1490				break;
1491			default:
1492				errno = ENXIO;
1493				return (pmc_syscall = -1);
1494			}
1495			break;
1496#endif
1497
1498#if defined(__aarch64__)
1499		case PMC_CLASS_ARMV8:
1500			switch (cpu_info.pm_cputype) {
1501			case PMC_CPU_ARMV8_CORTEX_A53:
1502				pmc_class_table[n++] =
1503				    &cortex_a53_class_table_descr;
1504				break;
1505			case PMC_CPU_ARMV8_CORTEX_A57:
1506				pmc_class_table[n++] =
1507				    &cortex_a57_class_table_descr;
1508				break;
1509			case PMC_CPU_ARMV8_CORTEX_A76:
1510				pmc_class_table[n++] =
1511				    &cortex_a76_class_table_descr;
1512				break;
1513			default:
1514				errno = ENXIO;
1515				return (pmc_syscall = -1);
1516			}
1517			break;
1518
1519		case PMC_CLASS_DMC620_PMU_CD2:
1520			pmc_class_table[n++] =
1521			    &dmc620_pmu_cd2_class_table_descr;
1522			break;
1523
1524		case PMC_CLASS_DMC620_PMU_C:
1525			pmc_class_table[n++] = &dmc620_pmu_c_class_table_descr;
1526			break;
1527
1528		case PMC_CLASS_CMN600_PMU:
1529			pmc_class_table[n++] = &cmn600_pmu_class_table_descr;
1530			break;
1531#endif
1532
1533#if defined(__powerpc__)
1534		case PMC_CLASS_PPC7450:
1535			pmc_class_table[n++] = &ppc7450_class_table_descr;
1536			break;
1537
1538		case PMC_CLASS_PPC970:
1539			pmc_class_table[n++] = &ppc970_class_table_descr;
1540			break;
1541
1542		case PMC_CLASS_E500:
1543			pmc_class_table[n++] = &e500_class_table_descr;
1544			break;
1545#endif
1546
1547		default:
1548#if defined(DEBUG)
1549			printf("pm_class: 0x%x\n",
1550			    cpu_info.pm_classes[i].pm_class);
1551#endif
1552			break;
1553		}
1554	}
1555
1556#define	PMC_MDEP_INIT(C) pmc_mdep_event_aliases = C##_aliases
1557
1558	/* Configure the event name parser. */
1559	switch (cpu_info.pm_cputype) {
1560#if defined(__amd64__) || defined(__i386__)
1561	case PMC_CPU_AMD_K8:
1562		PMC_MDEP_INIT(k8);
1563		break;
1564#endif
1565	case PMC_CPU_GENERIC:
1566		PMC_MDEP_INIT(generic);
1567		break;
1568#if defined(__arm__)
1569	case PMC_CPU_ARMV7_CORTEX_A8:
1570		PMC_MDEP_INIT(cortex_a8);
1571		break;
1572	case PMC_CPU_ARMV7_CORTEX_A9:
1573		PMC_MDEP_INIT(cortex_a9);
1574		break;
1575#endif
1576#if defined(__aarch64__)
1577	case PMC_CPU_ARMV8_CORTEX_A53:
1578		PMC_MDEP_INIT(cortex_a53);
1579		break;
1580	case PMC_CPU_ARMV8_CORTEX_A57:
1581		PMC_MDEP_INIT(cortex_a57);
1582		break;
1583	case PMC_CPU_ARMV8_CORTEX_A76:
1584		PMC_MDEP_INIT(cortex_a76);
1585		break;
1586#endif
1587#if defined(__powerpc__)
1588	case PMC_CPU_PPC_7450:
1589		PMC_MDEP_INIT(ppc7450);
1590		break;
1591	case PMC_CPU_PPC_970:
1592		PMC_MDEP_INIT(ppc970);
1593		break;
1594	case PMC_CPU_PPC_E500:
1595		PMC_MDEP_INIT(e500);
1596		break;
1597#endif
1598	default:
1599		/*
1600		 * Some kind of CPU this version of the library knows nothing
1601		 * about.  This shouldn't happen since the abi version check
1602		 * should have caught this.
1603		 */
1604#if defined(__amd64__) || defined(__i386__) || defined(__powerpc64__)
1605		break;
1606#endif
1607		errno = ENXIO;
1608		return (pmc_syscall = -1);
1609	}
1610
1611	return (0);
1612}
1613
1614const char *
1615pmc_name_of_capability(enum pmc_caps cap)
1616{
1617	int i;
1618
1619	/*
1620	 * 'cap' should have a single bit set and should be in
1621	 * range.
1622	 */
1623	if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
1624	    cap > PMC_CAP_LAST) {
1625		errno = EINVAL;
1626		return (NULL);
1627	}
1628
1629	i = ffs(cap);
1630	return (pmc_capability_names[i - 1]);
1631}
1632
1633const char *
1634pmc_name_of_class(enum pmc_class pc)
1635{
1636	size_t n;
1637
1638	for (n = 0; n < PMC_TABLE_SIZE(pmc_class_names); n++)
1639		if (pc == pmc_class_names[n].pm_class)
1640			return (pmc_class_names[n].pm_name);
1641
1642	errno = EINVAL;
1643	return (NULL);
1644}
1645
1646const char *
1647pmc_name_of_cputype(enum pmc_cputype cp)
1648{
1649	size_t n;
1650
1651	for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++)
1652		if (cp == pmc_cputype_names[n].pm_cputype)
1653			return (pmc_cputype_names[n].pm_name);
1654
1655	errno = EINVAL;
1656	return (NULL);
1657}
1658
1659const char *
1660pmc_name_of_disposition(enum pmc_disp pd)
1661{
1662	if ((int) pd >= PMC_DISP_FIRST &&
1663	    pd <= PMC_DISP_LAST)
1664		return (pmc_disposition_names[pd]);
1665
1666	errno = EINVAL;
1667	return (NULL);
1668}
1669
1670const char *
1671_pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
1672{
1673	const struct pmc_event_descr *ev, *evfence;
1674
1675	ev = evfence = NULL;
1676	if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
1677		ev = k8_event_table;
1678		evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8);
1679
1680	} else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) {
1681		switch (cpu) {
1682		case PMC_CPU_ARMV7_CORTEX_A8:
1683			ev = cortex_a8_event_table;
1684			evfence = cortex_a8_event_table + PMC_EVENT_TABLE_SIZE(cortex_a8);
1685			break;
1686		case PMC_CPU_ARMV7_CORTEX_A9:
1687			ev = cortex_a9_event_table;
1688			evfence = cortex_a9_event_table + PMC_EVENT_TABLE_SIZE(cortex_a9);
1689			break;
1690		default:	/* Unknown CPU type. */
1691			break;
1692		}
1693	} else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) {
1694		switch (cpu) {
1695		case PMC_CPU_ARMV8_CORTEX_A53:
1696			ev = cortex_a53_event_table;
1697			evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53);
1698			break;
1699		case PMC_CPU_ARMV8_CORTEX_A57:
1700			ev = cortex_a57_event_table;
1701			evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57);
1702			break;
1703		case PMC_CPU_ARMV8_CORTEX_A76:
1704			ev = cortex_a76_event_table;
1705			evfence = cortex_a76_event_table + PMC_EVENT_TABLE_SIZE(cortex_a76);
1706			break;
1707		default:	/* Unknown CPU type. */
1708			break;
1709		}
1710	} else if (pe >= PMC_EV_CMN600_PMU_FIRST &&
1711	    pe <= PMC_EV_CMN600_PMU_LAST) {
1712		ev = cmn600_pmu_event_table;
1713		evfence = cmn600_pmu_event_table +
1714		    PMC_EVENT_TABLE_SIZE(cmn600_pmu);
1715	} else if (pe >= PMC_EV_DMC620_PMU_CD2_FIRST &&
1716	    pe <= PMC_EV_DMC620_PMU_CD2_LAST) {
1717		ev = dmc620_pmu_cd2_event_table;
1718		evfence = dmc620_pmu_cd2_event_table +
1719		    PMC_EVENT_TABLE_SIZE(dmc620_pmu_cd2);
1720	} else if (pe >= PMC_EV_DMC620_PMU_C_FIRST &&
1721	    pe <= PMC_EV_DMC620_PMU_C_LAST) {
1722		ev = dmc620_pmu_c_event_table;
1723		evfence = dmc620_pmu_c_event_table +
1724		    PMC_EVENT_TABLE_SIZE(dmc620_pmu_c);
1725	} else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) {
1726		ev = ppc7450_event_table;
1727		evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450);
1728	} else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) {
1729		ev = ppc970_event_table;
1730		evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970);
1731	} else if (pe >= PMC_EV_E500_FIRST && pe <= PMC_EV_E500_LAST) {
1732		ev = e500_event_table;
1733		evfence = e500_event_table + PMC_EVENT_TABLE_SIZE(e500);
1734	} else if (pe == PMC_EV_TSC_TSC) {
1735		ev = tsc_event_table;
1736		evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
1737	} else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) {
1738		ev = soft_event_table;
1739		evfence = soft_event_table + soft_event_info.pm_nevent;
1740	}
1741
1742	for (; ev != evfence; ev++)
1743		if (pe == ev->pm_ev_code)
1744			return (ev->pm_ev_name);
1745
1746	return (NULL);
1747}
1748
1749const char *
1750pmc_name_of_event(enum pmc_event pe)
1751{
1752	const char *n;
1753
1754	if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
1755		return (n);
1756
1757	errno = EINVAL;
1758	return (NULL);
1759}
1760
1761const char *
1762pmc_name_of_mode(enum pmc_mode pm)
1763{
1764	if ((int) pm >= PMC_MODE_FIRST &&
1765	    pm <= PMC_MODE_LAST)
1766		return (pmc_mode_names[pm]);
1767
1768	errno = EINVAL;
1769	return (NULL);
1770}
1771
1772const char *
1773pmc_name_of_state(enum pmc_state ps)
1774{
1775	if ((int) ps >= PMC_STATE_FIRST &&
1776	    ps <= PMC_STATE_LAST)
1777		return (pmc_state_names[ps]);
1778
1779	errno = EINVAL;
1780	return (NULL);
1781}
1782
1783int
1784pmc_ncpu(void)
1785{
1786	if (pmc_syscall == -1) {
1787		errno = ENXIO;
1788		return (-1);
1789	}
1790
1791	return (cpu_info.pm_ncpu);
1792}
1793
1794int
1795pmc_npmc(int cpu)
1796{
1797	if (pmc_syscall == -1) {
1798		errno = ENXIO;
1799		return (-1);
1800	}
1801
1802	if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
1803		errno = EINVAL;
1804		return (-1);
1805	}
1806
1807	return (cpu_info.pm_npmc);
1808}
1809
1810int
1811pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
1812{
1813	int nbytes, npmc;
1814	struct pmc_op_getpmcinfo *pmci;
1815
1816	if ((npmc = pmc_npmc(cpu)) < 0)
1817		return (-1);
1818
1819	nbytes = sizeof(struct pmc_op_getpmcinfo) +
1820	    npmc * sizeof(struct pmc_info);
1821
1822	if ((pmci = calloc(1, nbytes)) == NULL)
1823		return (-1);
1824
1825	pmci->pm_cpu  = cpu;
1826
1827	if (PMC_CALL(PMC_OP_GETPMCINFO, pmci) < 0) {
1828		free(pmci);
1829		return (-1);
1830	}
1831
1832	/* kernel<->library, library<->userland interfaces are identical */
1833	*ppmci = (struct pmc_pmcinfo *) pmci;
1834	return (0);
1835}
1836
1837int
1838pmc_read(pmc_id_t pmc, pmc_value_t *value)
1839{
1840	struct pmc_op_pmcrw pmc_read_op;
1841
1842	pmc_read_op.pm_pmcid = pmc;
1843	pmc_read_op.pm_flags = PMC_F_OLDVALUE;
1844	pmc_read_op.pm_value = -1;
1845
1846	if (PMC_CALL(PMC_OP_PMCRW, &pmc_read_op) < 0)
1847		return (-1);
1848
1849	*value = pmc_read_op.pm_value;
1850	return (0);
1851}
1852
1853int
1854pmc_release(pmc_id_t pmc)
1855{
1856	struct pmc_op_simple	pmc_release_args;
1857
1858	pmc_release_args.pm_pmcid = pmc;
1859	return (PMC_CALL(PMC_OP_PMCRELEASE, &pmc_release_args));
1860}
1861
1862int
1863pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
1864{
1865	struct pmc_op_pmcrw pmc_rw_op;
1866
1867	pmc_rw_op.pm_pmcid = pmc;
1868	pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
1869	pmc_rw_op.pm_value = newvalue;
1870
1871	if (PMC_CALL(PMC_OP_PMCRW, &pmc_rw_op) < 0)
1872		return (-1);
1873
1874	*oldvaluep = pmc_rw_op.pm_value;
1875	return (0);
1876}
1877
1878int
1879pmc_set(pmc_id_t pmc, pmc_value_t value)
1880{
1881	struct pmc_op_pmcsetcount sc;
1882
1883	sc.pm_pmcid = pmc;
1884	sc.pm_count = value;
1885
1886	if (PMC_CALL(PMC_OP_PMCSETCOUNT, &sc) < 0)
1887		return (-1);
1888	return (0);
1889}
1890
1891int
1892pmc_start(pmc_id_t pmc)
1893{
1894	struct pmc_op_simple	pmc_start_args;
1895
1896	pmc_start_args.pm_pmcid = pmc;
1897	return (PMC_CALL(PMC_OP_PMCSTART, &pmc_start_args));
1898}
1899
1900int
1901pmc_stop(pmc_id_t pmc)
1902{
1903	struct pmc_op_simple	pmc_stop_args;
1904
1905	pmc_stop_args.pm_pmcid = pmc;
1906	return (PMC_CALL(PMC_OP_PMCSTOP, &pmc_stop_args));
1907}
1908
1909int
1910pmc_width(pmc_id_t pmcid, uint32_t *width)
1911{
1912	unsigned int i;
1913	enum pmc_class cl;
1914
1915	cl = PMC_ID_TO_CLASS(pmcid);
1916	for (i = 0; i < cpu_info.pm_nclass; i++)
1917		if (cpu_info.pm_classes[i].pm_class == cl) {
1918			*width = cpu_info.pm_classes[i].pm_width;
1919			return (0);
1920		}
1921	errno = EINVAL;
1922	return (-1);
1923}
1924
1925int
1926pmc_write(pmc_id_t pmc, pmc_value_t value)
1927{
1928	struct pmc_op_pmcrw pmc_write_op;
1929
1930	pmc_write_op.pm_pmcid = pmc;
1931	pmc_write_op.pm_flags = PMC_F_NEWVALUE;
1932	pmc_write_op.pm_value = value;
1933	return (PMC_CALL(PMC_OP_PMCRW, &pmc_write_op));
1934}
1935
1936int
1937pmc_writelog(uint32_t userdata)
1938{
1939	struct pmc_op_writelog wl;
1940
1941	wl.pm_userdata = userdata;
1942	return (PMC_CALL(PMC_OP_WRITELOG, &wl));
1943}
1944