libpmc.c revision 204635
1/*-
2 * Copyright (c) 2003-2008 Joseph Koshy
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/lib/libpmc/libpmc.c 204635 2010-03-03 15:05:58Z gnn $");
29
30#include <sys/types.h>
31#include <sys/module.h>
32#include <sys/pmc.h>
33#include <sys/syscall.h>
34
35#include <ctype.h>
36#include <errno.h>
37#include <fcntl.h>
38#include <pmc.h>
39#include <stdio.h>
40#include <stdlib.h>
41#include <string.h>
42#include <strings.h>
43#include <unistd.h>
44
45#include "libpmcinternal.h"
46
47/* Function prototypes */
48#if defined(__i386__)
49static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
50    struct pmc_op_pmcallocate *_pmc_config);
51#endif
52#if defined(__amd64__) || defined(__i386__)
53static int iaf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
54    struct pmc_op_pmcallocate *_pmc_config);
55static int iap_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
56    struct pmc_op_pmcallocate *_pmc_config);
57static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
58    struct pmc_op_pmcallocate *_pmc_config);
59static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
60    struct pmc_op_pmcallocate *_pmc_config);
61#endif
62#if defined(__i386__)
63static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
64    struct pmc_op_pmcallocate *_pmc_config);
65static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
66    struct pmc_op_pmcallocate *_pmc_config);
67#endif
68#if defined(__amd64__) || defined(__i386__)
69static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
70    struct pmc_op_pmcallocate *_pmc_config);
71#endif
72#if defined(__XSCALE__)
73static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
74    struct pmc_op_pmcallocate *_pmc_config);
75#endif
76
77#if defined(__mips__)
78static int mips24k_allocate_pmc(enum pmc_event _pe, char* ctrspec,
79			     struct pmc_op_pmcallocate *_pmc_config);
80#endif /* __mips__ */
81
82
83#define PMC_CALL(cmd, params)				\
84	syscall(pmc_syscall, PMC_OP_##cmd, (params))
85
86/*
87 * Event aliases provide a way for the user to ask for generic events
88 * like "cache-misses", or "instructions-retired".  These aliases are
89 * mapped to the appropriate canonical event descriptions using a
90 * lookup table.
91 */
92struct pmc_event_alias {
93	const char	*pm_alias;
94	const char	*pm_spec;
95};
96
97static const struct pmc_event_alias *pmc_mdep_event_aliases;
98
99/*
100 * The pmc_event_descr structure maps symbolic names known to the user
101 * to integer codes used by the PMC KLD.
102 */
103struct pmc_event_descr {
104	const char	*pm_ev_name;
105	enum pmc_event	pm_ev_code;
106};
107
108/*
109 * The pmc_class_descr structure maps class name prefixes for
110 * event names to event tables and other PMC class data.
111 */
112struct pmc_class_descr {
113	const char	*pm_evc_name;
114	size_t		pm_evc_name_size;
115	enum pmc_class	pm_evc_class;
116	const struct pmc_event_descr *pm_evc_event_table;
117	size_t		pm_evc_event_table_size;
118	int		(*pm_evc_allocate_pmc)(enum pmc_event _pe,
119			    char *_ctrspec, struct pmc_op_pmcallocate *_pa);
120};
121
122#define	PMC_TABLE_SIZE(N)	(sizeof(N)/sizeof(N[0]))
123#define	PMC_EVENT_TABLE_SIZE(N)	PMC_TABLE_SIZE(N##_event_table)
124
125#undef	__PMC_EV
126#define	__PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
127
128/*
129 * PMC_CLASSDEP_TABLE(NAME, CLASS)
130 *
131 * Define a table mapping event names and aliases to HWPMC event IDs.
132 */
133#define	PMC_CLASSDEP_TABLE(N, C)				\
134	static const struct pmc_event_descr N##_event_table[] =	\
135	{							\
136		__PMC_EV_##C()					\
137	}
138
139PMC_CLASSDEP_TABLE(iaf, IAF);
140PMC_CLASSDEP_TABLE(k7, K7);
141PMC_CLASSDEP_TABLE(k8, K8);
142PMC_CLASSDEP_TABLE(p4, P4);
143PMC_CLASSDEP_TABLE(p5, P5);
144PMC_CLASSDEP_TABLE(p6, P6);
145PMC_CLASSDEP_TABLE(xscale, XSCALE);
146PMC_CLASSDEP_TABLE(mips24k, MIPS24K);
147
148#undef	__PMC_EV_ALIAS
149#define	__PMC_EV_ALIAS(N,CODE) 	{ N, PMC_EV_##CODE },
150
151static const struct pmc_event_descr atom_event_table[] =
152{
153	__PMC_EV_ALIAS_ATOM()
154};
155
156static const struct pmc_event_descr core_event_table[] =
157{
158	__PMC_EV_ALIAS_CORE()
159};
160
161
162static const struct pmc_event_descr core2_event_table[] =
163{
164	__PMC_EV_ALIAS_CORE2()
165};
166
167static const struct pmc_event_descr corei7_event_table[] =
168{
169	__PMC_EV_ALIAS_COREI7()
170};
171
172/*
173 * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...)
174 *
175 * Map a CPU to the PMC classes it supports.
176 */
177#define	PMC_MDEP_TABLE(N,C,...)				\
178	static const enum pmc_class N##_pmc_classes[] = {	\
179		PMC_CLASS_##C, __VA_ARGS__			\
180	}
181
182PMC_MDEP_TABLE(atom, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
183PMC_MDEP_TABLE(core, IAP, PMC_CLASS_TSC);
184PMC_MDEP_TABLE(core2, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
185PMC_MDEP_TABLE(corei7, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
186PMC_MDEP_TABLE(k7, K7, PMC_CLASS_TSC);
187PMC_MDEP_TABLE(k8, K8, PMC_CLASS_TSC);
188PMC_MDEP_TABLE(p4, P4, PMC_CLASS_TSC);
189PMC_MDEP_TABLE(p5, P5, PMC_CLASS_TSC);
190PMC_MDEP_TABLE(p6, P6, PMC_CLASS_TSC);
191PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_XSCALE);
192PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_MIPS24K);
193
194static const struct pmc_event_descr tsc_event_table[] =
195{
196	__PMC_EV_TSC()
197};
198
199#undef	PMC_CLASS_TABLE_DESC
200#define	PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR)	\
201static const struct pmc_class_descr NAME##_class_table_descr =	\
202	{							\
203		.pm_evc_name  = #CLASS "-",			\
204		.pm_evc_name_size = sizeof(#CLASS "-") - 1,	\
205		.pm_evc_class = PMC_CLASS_##CLASS ,		\
206		.pm_evc_event_table = EVENTS##_event_table ,	\
207		.pm_evc_event_table_size = 			\
208			PMC_EVENT_TABLE_SIZE(EVENTS),		\
209		.pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc	\
210	}
211
212#if	defined(__i386__) || defined(__amd64__)
213PMC_CLASS_TABLE_DESC(iaf, IAF, iaf, iaf);
214PMC_CLASS_TABLE_DESC(atom, IAP, atom, iap);
215PMC_CLASS_TABLE_DESC(core, IAP, core, iap);
216PMC_CLASS_TABLE_DESC(core2, IAP, core2, iap);
217PMC_CLASS_TABLE_DESC(corei7, IAP, corei7, iap);
218#endif
219#if	defined(__i386__)
220PMC_CLASS_TABLE_DESC(k7, K7, k7, k7);
221#endif
222#if	defined(__i386__) || defined(__amd64__)
223PMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
224PMC_CLASS_TABLE_DESC(p4, P4, p4, p4);
225#endif
226#if	defined(__i386__)
227PMC_CLASS_TABLE_DESC(p5, P5, p5, p5);
228PMC_CLASS_TABLE_DESC(p6, P6, p6, p6);
229#endif
230#if	defined(__i386__) || defined(__amd64__)
231PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
232#endif
233#if	defined(__XSCALE__)
234PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale);
235#endif
236
237#if defined(__mips__)
238PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips24k);
239#endif /* __mips__ */
240
241#undef	PMC_CLASS_TABLE_DESC
242
243static const struct pmc_class_descr **pmc_class_table;
244#define	PMC_CLASS_TABLE_SIZE	cpu_info.pm_nclass
245
246static const enum pmc_class *pmc_mdep_class_list;
247static size_t pmc_mdep_class_list_size;
248
249/*
250 * Mapping tables, mapping enumeration values to human readable
251 * strings.
252 */
253
254static const char * pmc_capability_names[] = {
255#undef	__PMC_CAP
256#define	__PMC_CAP(N,V,D)	#N ,
257	__PMC_CAPS()
258};
259
260static const char * pmc_class_names[] = {
261#undef	__PMC_CLASS
262#define __PMC_CLASS(C)	#C ,
263	__PMC_CLASSES()
264};
265
266struct pmc_cputype_map {
267	enum pmc_class	pm_cputype;
268	const char	*pm_name;
269};
270
271static const struct pmc_cputype_map pmc_cputype_names[] = {
272#undef	__PMC_CPU
273#define	__PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } ,
274	__PMC_CPUS()
275};
276
277static const char * pmc_disposition_names[] = {
278#undef	__PMC_DISP
279#define	__PMC_DISP(D)	#D ,
280	__PMC_DISPOSITIONS()
281};
282
283static const char * pmc_mode_names[] = {
284#undef  __PMC_MODE
285#define __PMC_MODE(M,N)	#M ,
286	__PMC_MODES()
287};
288
289static const char * pmc_state_names[] = {
290#undef  __PMC_STATE
291#define __PMC_STATE(S) #S ,
292	__PMC_STATES()
293};
294
295static int pmc_syscall = -1;		/* filled in by pmc_init() */
296
297static struct pmc_cpuinfo cpu_info;	/* filled in by pmc_init() */
298
299/* Event masks for events */
300struct pmc_masks {
301	const char	*pm_name;
302	const uint32_t	pm_value;
303};
304#define	PMCMASK(N,V)	{ .pm_name = #N, .pm_value = (V) }
305#define	NULLMASK	PMCMASK(NULL,0)
306
307#if defined(__amd64__) || defined(__i386__)
308static int
309pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint32_t *evmask)
310{
311	const struct pmc_masks *pm;
312	char *q, *r;
313	int c;
314
315	if (pmask == NULL)	/* no mask keywords */
316		return (-1);
317	q = strchr(p, '=');	/* skip '=' */
318	if (*++q == '\0')	/* no more data */
319		return (-1);
320	c = 0;			/* count of mask keywords seen */
321	while ((r = strsep(&q, "+")) != NULL) {
322		for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name);
323		    pm++)
324			;
325		if (pm->pm_name == NULL) /* not found */
326			return (-1);
327		*evmask |= pm->pm_value;
328		c++;
329	}
330	return (c);
331}
332#endif
333
334#define	KWMATCH(p,kw)		(strcasecmp((p), (kw)) == 0)
335#define	KWPREFIXMATCH(p,kw)	(strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
336#define	EV_ALIAS(N,S)		{ .pm_alias = N, .pm_spec = S }
337
338#if defined(__i386__)
339
340/*
341 * AMD K7 (Athlon) CPUs.
342 */
343
344static struct pmc_event_alias k7_aliases[] = {
345	EV_ALIAS("branches",		"k7-retired-branches"),
346	EV_ALIAS("branch-mispredicts",	"k7-retired-branches-mispredicted"),
347	EV_ALIAS("cycles",		"tsc"),
348	EV_ALIAS("dc-misses",		"k7-dc-misses"),
349	EV_ALIAS("ic-misses",		"k7-ic-misses"),
350	EV_ALIAS("instructions",	"k7-retired-instructions"),
351	EV_ALIAS("interrupts",		"k7-hardware-interrupts"),
352	EV_ALIAS(NULL, NULL)
353};
354
355#define	K7_KW_COUNT	"count"
356#define	K7_KW_EDGE	"edge"
357#define	K7_KW_INV	"inv"
358#define	K7_KW_OS	"os"
359#define	K7_KW_UNITMASK	"unitmask"
360#define	K7_KW_USR	"usr"
361
362static int
363k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
364    struct pmc_op_pmcallocate *pmc_config)
365{
366	char		*e, *p, *q;
367	int		c, has_unitmask;
368	uint32_t	count, unitmask;
369
370	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
371	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
372
373	if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
374	    pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
375	    pe == PMC_EV_K7_DC_WRITEBACKS) {
376		has_unitmask = 1;
377		unitmask = AMD_PMC_UNITMASK_MOESI;
378	} else
379		unitmask = has_unitmask = 0;
380
381	while ((p = strsep(&ctrspec, ",")) != NULL) {
382		if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
383			q = strchr(p, '=');
384			if (*++q == '\0') /* skip '=' */
385				return (-1);
386
387			count = strtol(q, &e, 0);
388			if (e == q || *e != '\0')
389				return (-1);
390
391			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
392			pmc_config->pm_md.pm_amd.pm_amd_config |=
393			    AMD_PMC_TO_COUNTER(count);
394
395		} else if (KWMATCH(p, K7_KW_EDGE)) {
396			pmc_config->pm_caps |= PMC_CAP_EDGE;
397		} else if (KWMATCH(p, K7_KW_INV)) {
398			pmc_config->pm_caps |= PMC_CAP_INVERT;
399		} else if (KWMATCH(p, K7_KW_OS)) {
400			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
401		} else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
402			if (has_unitmask == 0)
403				return (-1);
404			unitmask = 0;
405			q = strchr(p, '=');
406			if (*++q == '\0') /* skip '=' */
407				return (-1);
408
409			while ((c = tolower(*q++)) != 0)
410				if (c == 'm')
411					unitmask |= AMD_PMC_UNITMASK_M;
412				else if (c == 'o')
413					unitmask |= AMD_PMC_UNITMASK_O;
414				else if (c == 'e')
415					unitmask |= AMD_PMC_UNITMASK_E;
416				else if (c == 's')
417					unitmask |= AMD_PMC_UNITMASK_S;
418				else if (c == 'i')
419					unitmask |= AMD_PMC_UNITMASK_I;
420				else if (c == '+')
421					continue;
422				else
423					return (-1);
424
425			if (unitmask == 0)
426				return (-1);
427
428		} else if (KWMATCH(p, K7_KW_USR)) {
429			pmc_config->pm_caps |= PMC_CAP_USER;
430		} else
431			return (-1);
432	}
433
434	if (has_unitmask) {
435		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
436		pmc_config->pm_md.pm_amd.pm_amd_config |=
437		    AMD_PMC_TO_UNITMASK(unitmask);
438	}
439
440	return (0);
441
442}
443
444#endif
445
446#if defined(__amd64__) || defined(__i386__)
447
448/*
449 * Intel Core (Family 6, Model E) PMCs.
450 */
451
452static struct pmc_event_alias core_aliases[] = {
453	EV_ALIAS("branches",		"iap-br-instr-ret"),
454	EV_ALIAS("branch-mispredicts",	"iap-br-mispred-ret"),
455	EV_ALIAS("cycles",		"tsc-tsc"),
456	EV_ALIAS("ic-misses",		"iap-icache-misses"),
457	EV_ALIAS("instructions",	"iap-instr-ret"),
458	EV_ALIAS("interrupts",		"iap-core-hw-int-rx"),
459	EV_ALIAS("unhalted-cycles",	"iap-unhalted-core-cycles"),
460	EV_ALIAS(NULL, NULL)
461};
462
463/*
464 * Intel Core2 (Family 6, Model F), Core2Extreme (Family 6, Model 17H)
465 * and Atom (Family 6, model 1CH) PMCs.
466 *
467 * We map aliases to events on the fixed-function counters if these
468 * are present.  Note that not all CPUs in this family contain fixed-function
469 * counters.
470 */
471
472static struct pmc_event_alias core2_aliases[] = {
473	EV_ALIAS("branches",		"iap-br-inst-retired.any"),
474	EV_ALIAS("branch-mispredicts",	"iap-br-inst-retired.mispred"),
475	EV_ALIAS("cycles",		"tsc-tsc"),
476	EV_ALIAS("ic-misses",		"iap-l1i-misses"),
477	EV_ALIAS("instructions",	"iaf-instr-retired.any"),
478	EV_ALIAS("interrupts",		"iap-hw-int-rcv"),
479	EV_ALIAS("unhalted-cycles",	"iaf-cpu-clk-unhalted.core"),
480	EV_ALIAS(NULL, NULL)
481};
482
483static struct pmc_event_alias core2_aliases_without_iaf[] = {
484	EV_ALIAS("branches",		"iap-br-inst-retired.any"),
485	EV_ALIAS("branch-mispredicts",	"iap-br-inst-retired.mispred"),
486	EV_ALIAS("cycles",		"tsc-tsc"),
487	EV_ALIAS("ic-misses",		"iap-l1i-misses"),
488	EV_ALIAS("instructions",	"iap-inst-retired.any_p"),
489	EV_ALIAS("interrupts",		"iap-hw-int-rcv"),
490	EV_ALIAS("unhalted-cycles",	"iap-cpu-clk-unhalted.core_p"),
491	EV_ALIAS(NULL, NULL)
492};
493
494#define	atom_aliases			core2_aliases
495#define	atom_aliases_without_iaf	core2_aliases_without_iaf
496#define corei7_aliases			core2_aliases
497#define corei7_aliases_without_iaf	core2_aliases_without_iaf
498
499#define	IAF_KW_OS		"os"
500#define	IAF_KW_USR		"usr"
501#define	IAF_KW_ANYTHREAD	"anythread"
502
503/*
504 * Parse an event specifier for Intel fixed function counters.
505 */
506static int
507iaf_allocate_pmc(enum pmc_event pe, char *ctrspec,
508    struct pmc_op_pmcallocate *pmc_config)
509{
510	char *p;
511
512	(void) pe;
513
514	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
515	pmc_config->pm_md.pm_iaf.pm_iaf_flags = 0;
516
517	while ((p = strsep(&ctrspec, ",")) != NULL) {
518		if (KWMATCH(p, IAF_KW_OS))
519			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
520		else if (KWMATCH(p, IAF_KW_USR))
521			pmc_config->pm_caps |= PMC_CAP_USER;
522		else if (KWMATCH(p, IAF_KW_ANYTHREAD))
523			pmc_config->pm_md.pm_iaf.pm_iaf_flags |= IAF_ANY;
524		else
525			return (-1);
526	}
527
528	return (0);
529}
530
531/*
532 * Core/Core2 support.
533 */
534
535#define	IAP_KW_AGENT		"agent"
536#define	IAP_KW_ANYTHREAD	"anythread"
537#define	IAP_KW_CACHESTATE	"cachestate"
538#define	IAP_KW_CMASK		"cmask"
539#define	IAP_KW_CORE		"core"
540#define	IAP_KW_EDGE		"edge"
541#define	IAP_KW_INV		"inv"
542#define	IAP_KW_OS		"os"
543#define	IAP_KW_PREFETCH		"prefetch"
544#define	IAP_KW_SNOOPRESPONSE	"snoopresponse"
545#define	IAP_KW_SNOOPTYPE	"snooptype"
546#define	IAP_KW_TRANSITION	"trans"
547#define	IAP_KW_USR		"usr"
548
549static struct pmc_masks iap_core_mask[] = {
550	PMCMASK(all,	(0x3 << 14)),
551	PMCMASK(this,	(0x1 << 14)),
552	NULLMASK
553};
554
555static struct pmc_masks iap_agent_mask[] = {
556	PMCMASK(this,	0),
557	PMCMASK(any,	(0x1 << 13)),
558	NULLMASK
559};
560
561static struct pmc_masks iap_prefetch_mask[] = {
562	PMCMASK(both,		(0x3 << 12)),
563	PMCMASK(only,		(0x1 << 12)),
564	PMCMASK(exclude,	0),
565	NULLMASK
566};
567
568static struct pmc_masks iap_cachestate_mask[] = {
569	PMCMASK(i,		(1 <<  8)),
570	PMCMASK(s,		(1 <<  9)),
571	PMCMASK(e,		(1 << 10)),
572	PMCMASK(m,		(1 << 11)),
573	NULLMASK
574};
575
576static struct pmc_masks iap_snoopresponse_mask[] = {
577	PMCMASK(clean,		(1 << 8)),
578	PMCMASK(hit,		(1 << 9)),
579	PMCMASK(hitm,		(1 << 11)),
580	NULLMASK
581};
582
583static struct pmc_masks iap_snooptype_mask[] = {
584	PMCMASK(cmp2s,		(1 << 8)),
585	PMCMASK(cmp2i,		(1 << 9)),
586	NULLMASK
587};
588
589static struct pmc_masks iap_transition_mask[] = {
590	PMCMASK(any,		0x00),
591	PMCMASK(frequency,	0x10),
592	NULLMASK
593};
594
595static int
596iap_allocate_pmc(enum pmc_event pe, char *ctrspec,
597    struct pmc_op_pmcallocate *pmc_config)
598{
599	char *e, *p, *q;
600	uint32_t cachestate, evmask;
601	int count, n;
602
603	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
604	    PMC_CAP_QUALIFIER);
605	pmc_config->pm_md.pm_iap.pm_iap_config = 0;
606
607	cachestate = evmask = 0;
608
609	/* Parse additional modifiers if present */
610	while ((p = strsep(&ctrspec, ",")) != NULL) {
611
612		n = 0;
613		if (KWPREFIXMATCH(p, IAP_KW_CMASK "=")) {
614			q = strchr(p, '=');
615			if (*++q == '\0') /* skip '=' */
616				return (-1);
617			count = strtol(q, &e, 0);
618			if (e == q || *e != '\0')
619				return (-1);
620			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
621			pmc_config->pm_md.pm_iap.pm_iap_config |=
622			    IAP_CMASK(count);
623		} else if (KWMATCH(p, IAP_KW_EDGE)) {
624			pmc_config->pm_caps |= PMC_CAP_EDGE;
625		} else if (KWMATCH(p, IAP_KW_INV)) {
626			pmc_config->pm_caps |= PMC_CAP_INVERT;
627		} else if (KWMATCH(p, IAP_KW_OS)) {
628			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
629		} else if (KWMATCH(p, IAP_KW_USR)) {
630			pmc_config->pm_caps |= PMC_CAP_USER;
631		} else if (KWMATCH(p, IAP_KW_ANYTHREAD)) {
632			pmc_config->pm_md.pm_iap.pm_iap_config |= IAP_ANY;
633		} else if (KWPREFIXMATCH(p, IAP_KW_CORE "=")) {
634			n = pmc_parse_mask(iap_core_mask, p, &evmask);
635			if (n != 1)
636				return (-1);
637		} else if (KWPREFIXMATCH(p, IAP_KW_AGENT "=")) {
638			n = pmc_parse_mask(iap_agent_mask, p, &evmask);
639			if (n != 1)
640				return (-1);
641		} else if (KWPREFIXMATCH(p, IAP_KW_PREFETCH "=")) {
642			n = pmc_parse_mask(iap_prefetch_mask, p, &evmask);
643			if (n != 1)
644				return (-1);
645		} else if (KWPREFIXMATCH(p, IAP_KW_CACHESTATE "=")) {
646			n = pmc_parse_mask(iap_cachestate_mask, p, &cachestate);
647		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_CORE &&
648		    KWPREFIXMATCH(p, IAP_KW_TRANSITION "=")) {
649			n = pmc_parse_mask(iap_transition_mask, p, &evmask);
650			if (n != 1)
651				return (-1);
652		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM ||
653		    cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2 ||
654		    cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2EXTREME ||
655		    cpu_info.pm_cputype == PMC_CPU_INTEL_COREI7) {
656			if (KWPREFIXMATCH(p, IAP_KW_SNOOPRESPONSE "=")) {
657				n = pmc_parse_mask(iap_snoopresponse_mask, p,
658				    &evmask);
659			} else if (KWPREFIXMATCH(p, IAP_KW_SNOOPTYPE "=")) {
660				n = pmc_parse_mask(iap_snooptype_mask, p,
661				    &evmask);
662			} else
663				return (-1);
664		} else
665			return (-1);
666
667		if (n < 0)	/* Parsing failed. */
668			return (-1);
669	}
670
671	pmc_config->pm_md.pm_iap.pm_iap_config |= evmask;
672
673	/*
674	 * If the event requires a 'cachestate' qualifier but was not
675	 * specified by the user, use a sensible default.
676	 */
677	switch (pe) {
678	case PMC_EV_IAP_EVENT_28H: /* Core, Core2, Atom */
679	case PMC_EV_IAP_EVENT_29H: /* Core, Core2, Atom */
680	case PMC_EV_IAP_EVENT_2AH: /* Core, Core2, Atom */
681	case PMC_EV_IAP_EVENT_2BH: /* Atom, Core2 */
682	case PMC_EV_IAP_EVENT_2EH: /* Core, Core2, Atom */
683	case PMC_EV_IAP_EVENT_30H: /* Core, Core2, Atom */
684	case PMC_EV_IAP_EVENT_32H: /* Core */
685	case PMC_EV_IAP_EVENT_40H: /* Core */
686	case PMC_EV_IAP_EVENT_41H: /* Core */
687	case PMC_EV_IAP_EVENT_42H: /* Core, Core2, Atom */
688	case PMC_EV_IAP_EVENT_77H: /* Core */
689		if (cachestate == 0)
690			cachestate = (0xF << 8);
691	default:
692		break;
693	}
694
695	pmc_config->pm_md.pm_iap.pm_iap_config |= cachestate;
696
697	return (0);
698}
699
700/*
701 * AMD K8 PMCs.
702 *
703 * These are very similar to AMD K7 PMCs, but support more kinds of
704 * events.
705 */
706
707static struct pmc_event_alias k8_aliases[] = {
708	EV_ALIAS("branches",		"k8-fr-retired-taken-branches"),
709	EV_ALIAS("branch-mispredicts",
710	    "k8-fr-retired-taken-branches-mispredicted"),
711	EV_ALIAS("cycles",		"tsc"),
712	EV_ALIAS("dc-misses",		"k8-dc-miss"),
713	EV_ALIAS("ic-misses",		"k8-ic-miss"),
714	EV_ALIAS("instructions",	"k8-fr-retired-x86-instructions"),
715	EV_ALIAS("interrupts",		"k8-fr-taken-hardware-interrupts"),
716	EV_ALIAS("unhalted-cycles",	"k8-bu-cpu-clk-unhalted"),
717	EV_ALIAS(NULL, NULL)
718};
719
720#define	__K8MASK(N,V) PMCMASK(N,(1 << (V)))
721
722/*
723 * Parsing tables
724 */
725
726/* fp dispatched fpu ops */
727static const struct pmc_masks k8_mask_fdfo[] = {
728	__K8MASK(add-pipe-excluding-junk-ops,	0),
729	__K8MASK(multiply-pipe-excluding-junk-ops,	1),
730	__K8MASK(store-pipe-excluding-junk-ops,	2),
731	__K8MASK(add-pipe-junk-ops,		3),
732	__K8MASK(multiply-pipe-junk-ops,	4),
733	__K8MASK(store-pipe-junk-ops,		5),
734	NULLMASK
735};
736
737/* ls segment register loads */
738static const struct pmc_masks k8_mask_lsrl[] = {
739	__K8MASK(es,	0),
740	__K8MASK(cs,	1),
741	__K8MASK(ss,	2),
742	__K8MASK(ds,	3),
743	__K8MASK(fs,	4),
744	__K8MASK(gs,	5),
745	__K8MASK(hs,	6),
746	NULLMASK
747};
748
749/* ls locked operation */
750static const struct pmc_masks k8_mask_llo[] = {
751	__K8MASK(locked-instructions,	0),
752	__K8MASK(cycles-in-request,	1),
753	__K8MASK(cycles-to-complete,	2),
754	NULLMASK
755};
756
757/* dc refill from {l2,system} and dc copyback */
758static const struct pmc_masks k8_mask_dc[] = {
759	__K8MASK(invalid,	0),
760	__K8MASK(shared,	1),
761	__K8MASK(exclusive,	2),
762	__K8MASK(owner,		3),
763	__K8MASK(modified,	4),
764	NULLMASK
765};
766
767/* dc one bit ecc error */
768static const struct pmc_masks k8_mask_dobee[] = {
769	__K8MASK(scrubber,	0),
770	__K8MASK(piggyback,	1),
771	NULLMASK
772};
773
774/* dc dispatched prefetch instructions */
775static const struct pmc_masks k8_mask_ddpi[] = {
776	__K8MASK(load,	0),
777	__K8MASK(store,	1),
778	__K8MASK(nta,	2),
779	NULLMASK
780};
781
782/* dc dcache accesses by locks */
783static const struct pmc_masks k8_mask_dabl[] = {
784	__K8MASK(accesses,	0),
785	__K8MASK(misses,	1),
786	NULLMASK
787};
788
789/* bu internal l2 request */
790static const struct pmc_masks k8_mask_bilr[] = {
791	__K8MASK(ic-fill,	0),
792	__K8MASK(dc-fill,	1),
793	__K8MASK(tlb-reload,	2),
794	__K8MASK(tag-snoop,	3),
795	__K8MASK(cancelled,	4),
796	NULLMASK
797};
798
799/* bu fill request l2 miss */
800static const struct pmc_masks k8_mask_bfrlm[] = {
801	__K8MASK(ic-fill,	0),
802	__K8MASK(dc-fill,	1),
803	__K8MASK(tlb-reload,	2),
804	NULLMASK
805};
806
807/* bu fill into l2 */
808static const struct pmc_masks k8_mask_bfil[] = {
809	__K8MASK(dirty-l2-victim,	0),
810	__K8MASK(victim-from-l2,	1),
811	NULLMASK
812};
813
814/* fr retired fpu instructions */
815static const struct pmc_masks k8_mask_frfi[] = {
816	__K8MASK(x87,			0),
817	__K8MASK(mmx-3dnow,		1),
818	__K8MASK(packed-sse-sse2,	2),
819	__K8MASK(scalar-sse-sse2,	3),
820	NULLMASK
821};
822
823/* fr retired fastpath double op instructions */
824static const struct pmc_masks k8_mask_frfdoi[] = {
825	__K8MASK(low-op-pos-0,		0),
826	__K8MASK(low-op-pos-1,		1),
827	__K8MASK(low-op-pos-2,		2),
828	NULLMASK
829};
830
831/* fr fpu exceptions */
832static const struct pmc_masks k8_mask_ffe[] = {
833	__K8MASK(x87-reclass-microfaults,	0),
834	__K8MASK(sse-retype-microfaults,	1),
835	__K8MASK(sse-reclass-microfaults,	2),
836	__K8MASK(sse-and-x87-microtraps,	3),
837	NULLMASK
838};
839
840/* nb memory controller page access event */
841static const struct pmc_masks k8_mask_nmcpae[] = {
842	__K8MASK(page-hit,	0),
843	__K8MASK(page-miss,	1),
844	__K8MASK(page-conflict,	2),
845	NULLMASK
846};
847
848/* nb memory controller turnaround */
849static const struct pmc_masks k8_mask_nmct[] = {
850	__K8MASK(dimm-turnaround,		0),
851	__K8MASK(read-to-write-turnaround,	1),
852	__K8MASK(write-to-read-turnaround,	2),
853	NULLMASK
854};
855
856/* nb memory controller bypass saturation */
857static const struct pmc_masks k8_mask_nmcbs[] = {
858	__K8MASK(memory-controller-hi-pri-bypass,	0),
859	__K8MASK(memory-controller-lo-pri-bypass,	1),
860	__K8MASK(dram-controller-interface-bypass,	2),
861	__K8MASK(dram-controller-queue-bypass,		3),
862	NULLMASK
863};
864
865/* nb sized commands */
866static const struct pmc_masks k8_mask_nsc[] = {
867	__K8MASK(nonpostwrszbyte,	0),
868	__K8MASK(nonpostwrszdword,	1),
869	__K8MASK(postwrszbyte,		2),
870	__K8MASK(postwrszdword,		3),
871	__K8MASK(rdszbyte,		4),
872	__K8MASK(rdszdword,		5),
873	__K8MASK(rdmodwr,		6),
874	NULLMASK
875};
876
877/* nb probe result */
878static const struct pmc_masks k8_mask_npr[] = {
879	__K8MASK(probe-miss,		0),
880	__K8MASK(probe-hit,		1),
881	__K8MASK(probe-hit-dirty-no-memory-cancel, 2),
882	__K8MASK(probe-hit-dirty-with-memory-cancel, 3),
883	NULLMASK
884};
885
886/* nb hypertransport bus bandwidth */
887static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
888	__K8MASK(command,	0),
889	__K8MASK(data,	1),
890	__K8MASK(buffer-release, 2),
891	__K8MASK(nop,	3),
892	NULLMASK
893};
894
895#undef	__K8MASK
896
897#define	K8_KW_COUNT	"count"
898#define	K8_KW_EDGE	"edge"
899#define	K8_KW_INV	"inv"
900#define	K8_KW_MASK	"mask"
901#define	K8_KW_OS	"os"
902#define	K8_KW_USR	"usr"
903
904static int
905k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
906    struct pmc_op_pmcallocate *pmc_config)
907{
908	char		*e, *p, *q;
909	int		n;
910	uint32_t	count, evmask;
911	const struct pmc_masks	*pm, *pmask;
912
913	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
914	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
915
916	pmask = NULL;
917	evmask = 0;
918
919#define	__K8SETMASK(M) pmask = k8_mask_##M
920
921	/* setup parsing tables */
922	switch (pe) {
923	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
924		__K8SETMASK(fdfo);
925		break;
926	case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
927		__K8SETMASK(lsrl);
928		break;
929	case PMC_EV_K8_LS_LOCKED_OPERATION:
930		__K8SETMASK(llo);
931		break;
932	case PMC_EV_K8_DC_REFILL_FROM_L2:
933	case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
934	case PMC_EV_K8_DC_COPYBACK:
935		__K8SETMASK(dc);
936		break;
937	case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
938		__K8SETMASK(dobee);
939		break;
940	case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
941		__K8SETMASK(ddpi);
942		break;
943	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
944		__K8SETMASK(dabl);
945		break;
946	case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
947		__K8SETMASK(bilr);
948		break;
949	case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
950		__K8SETMASK(bfrlm);
951		break;
952	case PMC_EV_K8_BU_FILL_INTO_L2:
953		__K8SETMASK(bfil);
954		break;
955	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
956		__K8SETMASK(frfi);
957		break;
958	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
959		__K8SETMASK(frfdoi);
960		break;
961	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
962		__K8SETMASK(ffe);
963		break;
964	case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
965		__K8SETMASK(nmcpae);
966		break;
967	case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
968		__K8SETMASK(nmct);
969		break;
970	case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
971		__K8SETMASK(nmcbs);
972		break;
973	case PMC_EV_K8_NB_SIZED_COMMANDS:
974		__K8SETMASK(nsc);
975		break;
976	case PMC_EV_K8_NB_PROBE_RESULT:
977		__K8SETMASK(npr);
978		break;
979	case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
980	case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
981	case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
982		__K8SETMASK(nhbb);
983		break;
984
985	default:
986		break;		/* no options defined */
987	}
988
989	while ((p = strsep(&ctrspec, ",")) != NULL) {
990		if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
991			q = strchr(p, '=');
992			if (*++q == '\0') /* skip '=' */
993				return (-1);
994
995			count = strtol(q, &e, 0);
996			if (e == q || *e != '\0')
997				return (-1);
998
999			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1000			pmc_config->pm_md.pm_amd.pm_amd_config |=
1001			    AMD_PMC_TO_COUNTER(count);
1002
1003		} else if (KWMATCH(p, K8_KW_EDGE)) {
1004			pmc_config->pm_caps |= PMC_CAP_EDGE;
1005		} else if (KWMATCH(p, K8_KW_INV)) {
1006			pmc_config->pm_caps |= PMC_CAP_INVERT;
1007		} else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
1008			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1009				return (-1);
1010			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1011		} else if (KWMATCH(p, K8_KW_OS)) {
1012			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1013		} else if (KWMATCH(p, K8_KW_USR)) {
1014			pmc_config->pm_caps |= PMC_CAP_USER;
1015		} else
1016			return (-1);
1017	}
1018
1019	/* other post processing */
1020	switch (pe) {
1021	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1022	case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
1023	case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
1024	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1025	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1026	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1027		/* XXX only available in rev B and later */
1028		break;
1029	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1030		/* XXX only available in rev C and later */
1031		break;
1032	case PMC_EV_K8_LS_LOCKED_OPERATION:
1033		/* XXX CPU Rev A,B evmask is to be zero */
1034		if (evmask & (evmask - 1)) /* > 1 bit set */
1035			return (-1);
1036		if (evmask == 0) {
1037			evmask = 0x01; /* Rev C and later: #instrs */
1038			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1039		}
1040		break;
1041	default:
1042		if (evmask == 0 && pmask != NULL) {
1043			for (pm = pmask; pm->pm_name; pm++)
1044				evmask |= pm->pm_value;
1045			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1046		}
1047	}
1048
1049	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1050		pmc_config->pm_md.pm_amd.pm_amd_config =
1051		    AMD_PMC_TO_UNITMASK(evmask);
1052
1053	return (0);
1054}
1055
1056#endif
1057
1058#if defined(__amd64__) || defined(__i386__)
1059
1060/*
1061 * Intel P4 PMCs
1062 */
1063
1064static struct pmc_event_alias p4_aliases[] = {
1065	EV_ALIAS("branches",		"p4-branch-retired,mask=mmtp+mmtm"),
1066	EV_ALIAS("branch-mispredicts",	"p4-mispred-branch-retired"),
1067	EV_ALIAS("cycles",		"tsc"),
1068	EV_ALIAS("instructions",
1069	    "p4-instr-retired,mask=nbogusntag+nbogustag"),
1070	EV_ALIAS("unhalted-cycles",	"p4-global-power-events"),
1071	EV_ALIAS(NULL, NULL)
1072};
1073
1074#define	P4_KW_ACTIVE	"active"
1075#define	P4_KW_ACTIVE_ANY "any"
1076#define	P4_KW_ACTIVE_BOTH "both"
1077#define	P4_KW_ACTIVE_NONE "none"
1078#define	P4_KW_ACTIVE_SINGLE "single"
1079#define	P4_KW_BUSREQTYPE "busreqtype"
1080#define	P4_KW_CASCADE	"cascade"
1081#define	P4_KW_EDGE	"edge"
1082#define	P4_KW_INV	"complement"
1083#define	P4_KW_OS	"os"
1084#define	P4_KW_MASK	"mask"
1085#define	P4_KW_PRECISE	"precise"
1086#define	P4_KW_TAG	"tag"
1087#define	P4_KW_THRESHOLD	"threshold"
1088#define	P4_KW_USR	"usr"
1089
1090#define	__P4MASK(N,V) PMCMASK(N, (1 << (V)))
1091
1092static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
1093	__P4MASK(dd, 0),
1094	__P4MASK(db, 1),
1095	__P4MASK(di, 2),
1096	__P4MASK(bd, 3),
1097	__P4MASK(bb, 4),
1098	__P4MASK(bi, 5),
1099	__P4MASK(id, 6),
1100	__P4MASK(ib, 7),
1101	NULLMASK
1102};
1103
1104static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
1105	__P4MASK(tcmiss, 0),
1106	NULLMASK,
1107};
1108
1109static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
1110	__P4MASK(hit, 0),
1111	__P4MASK(miss, 1),
1112	__P4MASK(hit-uc, 2),
1113	NULLMASK
1114};
1115
1116static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
1117	__P4MASK(st-rb-full, 2),
1118	__P4MASK(64k-conf, 3),
1119	NULLMASK
1120};
1121
1122static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
1123	__P4MASK(lsc, 0),
1124	__P4MASK(ssc, 1),
1125	NULLMASK
1126};
1127
1128static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
1129	__P4MASK(split-ld, 1),
1130	NULLMASK
1131};
1132
1133static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
1134	__P4MASK(split-st, 1),
1135	NULLMASK
1136};
1137
1138static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
1139	__P4MASK(no-sta, 1),
1140	__P4MASK(no-std, 3),
1141	__P4MASK(partial-data, 4),
1142	__P4MASK(unalgn-addr, 5),
1143	NULLMASK
1144};
1145
1146static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
1147	__P4MASK(dtmiss, 0),
1148	__P4MASK(itmiss, 1),
1149	NULLMASK
1150};
1151
1152static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
1153	__P4MASK(rd-2ndl-hits, 0),
1154	__P4MASK(rd-2ndl-hite, 1),
1155	__P4MASK(rd-2ndl-hitm, 2),
1156	__P4MASK(rd-3rdl-hits, 3),
1157	__P4MASK(rd-3rdl-hite, 4),
1158	__P4MASK(rd-3rdl-hitm, 5),
1159	__P4MASK(rd-2ndl-miss, 8),
1160	__P4MASK(rd-3rdl-miss, 9),
1161	__P4MASK(wr-2ndl-miss, 10),
1162	NULLMASK
1163};
1164
1165static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
1166	__P4MASK(all-read, 5),
1167	__P4MASK(all-write, 6),
1168	__P4MASK(mem-uc, 7),
1169	__P4MASK(mem-wc, 8),
1170	__P4MASK(mem-wt, 9),
1171	__P4MASK(mem-wp, 10),
1172	__P4MASK(mem-wb, 11),
1173	__P4MASK(own, 13),
1174	__P4MASK(other, 14),
1175	__P4MASK(prefetch, 15),
1176	NULLMASK
1177};
1178
1179static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
1180	__P4MASK(all-read, 5),
1181	__P4MASK(all-write, 6),
1182	__P4MASK(mem-uc, 7),
1183	__P4MASK(mem-wc, 8),
1184	__P4MASK(mem-wt, 9),
1185	__P4MASK(mem-wp, 10),
1186	__P4MASK(mem-wb, 11),
1187	__P4MASK(own, 13),
1188	__P4MASK(other, 14),
1189	__P4MASK(prefetch, 15),
1190	NULLMASK
1191};
1192
1193static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
1194	__P4MASK(drdy-drv, 0),
1195	__P4MASK(drdy-own, 1),
1196	__P4MASK(drdy-other, 2),
1197	__P4MASK(dbsy-drv, 3),
1198	__P4MASK(dbsy-own, 4),
1199	__P4MASK(dbsy-other, 5),
1200	NULLMASK
1201};
1202
1203static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
1204	__P4MASK(req-type0, 0),
1205	__P4MASK(req-type1, 1),
1206	__P4MASK(req-len0, 2),
1207	__P4MASK(req-len1, 3),
1208	__P4MASK(req-io-type, 5),
1209	__P4MASK(req-lock-type, 6),
1210	__P4MASK(req-cache-type, 7),
1211	__P4MASK(req-split-type, 8),
1212	__P4MASK(req-dem-type, 9),
1213	__P4MASK(req-ord-type, 10),
1214	__P4MASK(mem-type0, 11),
1215	__P4MASK(mem-type1, 12),
1216	__P4MASK(mem-type2, 13),
1217	NULLMASK
1218};
1219
1220static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
1221	__P4MASK(all, 15),
1222	NULLMASK
1223};
1224
1225static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
1226	__P4MASK(all, 15),
1227	NULLMASK
1228};
1229
1230static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
1231	__P4MASK(all, 15),
1232	NULLMASK
1233};
1234
1235static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
1236	__P4MASK(all, 15),
1237	NULLMASK
1238};
1239
1240static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
1241	__P4MASK(all, 15),
1242	NULLMASK
1243};
1244
1245static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
1246	__P4MASK(all, 15),
1247	NULLMASK
1248};
1249
1250static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
1251	__P4MASK(all, 15),
1252	NULLMASK
1253};
1254
1255static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
1256	__P4MASK(all, 15),
1257	NULLMASK
1258};
1259
1260static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
1261	__P4MASK(allp0, 3),
1262	__P4MASK(allp2, 4),
1263	NULLMASK
1264};
1265
1266static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
1267	__P4MASK(running, 0),
1268	NULLMASK
1269};
1270
1271static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
1272	__P4MASK(cisc, 0),
1273	NULLMASK
1274};
1275
1276static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
1277	__P4MASK(from-tc-build, 0),
1278	__P4MASK(from-tc-deliver, 1),
1279	__P4MASK(from-rom, 2),
1280	NULLMASK
1281};
1282
1283static const struct pmc_masks p4_mask_rmbt[] = {
1284	/* retired mispred branch type */
1285	__P4MASK(conditional, 1),
1286	__P4MASK(call, 2),
1287	__P4MASK(return, 3),
1288	__P4MASK(indirect, 4),
1289	NULLMASK
1290};
1291
1292static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
1293	__P4MASK(conditional, 1),
1294	__P4MASK(call, 2),
1295	__P4MASK(retired, 3),
1296	__P4MASK(indirect, 4),
1297	NULLMASK
1298};
1299
1300static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
1301	__P4MASK(sbfull, 5),
1302	NULLMASK
1303};
1304
1305static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
1306	__P4MASK(wcb-evicts, 0),
1307	__P4MASK(wcb-full-evict, 1),
1308	NULLMASK
1309};
1310
1311static const struct pmc_masks p4_mask_fee[] = { /* front end event */
1312	__P4MASK(nbogus, 0),
1313	__P4MASK(bogus, 1),
1314	NULLMASK
1315};
1316
1317static const struct pmc_masks p4_mask_ee[] = { /* execution event */
1318	__P4MASK(nbogus0, 0),
1319	__P4MASK(nbogus1, 1),
1320	__P4MASK(nbogus2, 2),
1321	__P4MASK(nbogus3, 3),
1322	__P4MASK(bogus0, 4),
1323	__P4MASK(bogus1, 5),
1324	__P4MASK(bogus2, 6),
1325	__P4MASK(bogus3, 7),
1326	NULLMASK
1327};
1328
1329static const struct pmc_masks p4_mask_re[] = { /* replay event */
1330	__P4MASK(nbogus, 0),
1331	__P4MASK(bogus, 1),
1332	NULLMASK
1333};
1334
1335static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
1336	__P4MASK(nbogusntag, 0),
1337	__P4MASK(nbogustag, 1),
1338	__P4MASK(bogusntag, 2),
1339	__P4MASK(bogustag, 3),
1340	NULLMASK
1341};
1342
1343static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
1344	__P4MASK(nbogus, 0),
1345	__P4MASK(bogus, 1),
1346	NULLMASK
1347};
1348
1349static const struct pmc_masks p4_mask_ut[] = { /* uop type */
1350	__P4MASK(tagloads, 1),
1351	__P4MASK(tagstores, 2),
1352	NULLMASK
1353};
1354
1355static const struct pmc_masks p4_mask_br[] = { /* branch retired */
1356	__P4MASK(mmnp, 0),
1357	__P4MASK(mmnm, 1),
1358	__P4MASK(mmtp, 2),
1359	__P4MASK(mmtm, 3),
1360	NULLMASK
1361};
1362
1363static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
1364	__P4MASK(nbogus, 0),
1365	NULLMASK
1366};
1367
1368static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
1369	__P4MASK(fpsu, 0),
1370	__P4MASK(fpso, 1),
1371	__P4MASK(poao, 2),
1372	__P4MASK(poau, 3),
1373	__P4MASK(prea, 4),
1374	NULLMASK
1375};
1376
1377static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
1378	__P4MASK(clear, 0),
1379	__P4MASK(moclear, 2),
1380	__P4MASK(smclear, 3),
1381	NULLMASK
1382};
1383
1384/* P4 event parser */
1385static int
1386p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
1387    struct pmc_op_pmcallocate *pmc_config)
1388{
1389
1390	char	*e, *p, *q;
1391	int	count, has_tag, has_busreqtype, n;
1392	uint32_t evmask, cccractivemask;
1393	const struct pmc_masks *pm, *pmask;
1394
1395	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1396	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig =
1397	    pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0;
1398
1399	pmask   = NULL;
1400	evmask  = 0;
1401	cccractivemask = 0x3;
1402	has_tag = has_busreqtype = 0;
1403
1404#define	__P4SETMASK(M) do {				\
1405	pmask = p4_mask_##M;				\
1406} while (0)
1407
1408	switch (pe) {
1409	case PMC_EV_P4_TC_DELIVER_MODE:
1410		__P4SETMASK(tcdm);
1411		break;
1412	case PMC_EV_P4_BPU_FETCH_REQUEST:
1413		__P4SETMASK(bfr);
1414		break;
1415	case PMC_EV_P4_ITLB_REFERENCE:
1416		__P4SETMASK(ir);
1417		break;
1418	case PMC_EV_P4_MEMORY_CANCEL:
1419		__P4SETMASK(memcan);
1420		break;
1421	case PMC_EV_P4_MEMORY_COMPLETE:
1422		__P4SETMASK(memcomp);
1423		break;
1424	case PMC_EV_P4_LOAD_PORT_REPLAY:
1425		__P4SETMASK(lpr);
1426		break;
1427	case PMC_EV_P4_STORE_PORT_REPLAY:
1428		__P4SETMASK(spr);
1429		break;
1430	case PMC_EV_P4_MOB_LOAD_REPLAY:
1431		__P4SETMASK(mlr);
1432		break;
1433	case PMC_EV_P4_PAGE_WALK_TYPE:
1434		__P4SETMASK(pwt);
1435		break;
1436	case PMC_EV_P4_BSQ_CACHE_REFERENCE:
1437		__P4SETMASK(bcr);
1438		break;
1439	case PMC_EV_P4_IOQ_ALLOCATION:
1440		__P4SETMASK(ia);
1441		has_busreqtype = 1;
1442		break;
1443	case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
1444		__P4SETMASK(iae);
1445		has_busreqtype = 1;
1446		break;
1447	case PMC_EV_P4_FSB_DATA_ACTIVITY:
1448		__P4SETMASK(fda);
1449		break;
1450	case PMC_EV_P4_BSQ_ALLOCATION:
1451		__P4SETMASK(ba);
1452		break;
1453	case PMC_EV_P4_SSE_INPUT_ASSIST:
1454		__P4SETMASK(sia);
1455		break;
1456	case PMC_EV_P4_PACKED_SP_UOP:
1457		__P4SETMASK(psu);
1458		break;
1459	case PMC_EV_P4_PACKED_DP_UOP:
1460		__P4SETMASK(pdu);
1461		break;
1462	case PMC_EV_P4_SCALAR_SP_UOP:
1463		__P4SETMASK(ssu);
1464		break;
1465	case PMC_EV_P4_SCALAR_DP_UOP:
1466		__P4SETMASK(sdu);
1467		break;
1468	case PMC_EV_P4_64BIT_MMX_UOP:
1469		__P4SETMASK(64bmu);
1470		break;
1471	case PMC_EV_P4_128BIT_MMX_UOP:
1472		__P4SETMASK(128bmu);
1473		break;
1474	case PMC_EV_P4_X87_FP_UOP:
1475		__P4SETMASK(xfu);
1476		break;
1477	case PMC_EV_P4_X87_SIMD_MOVES_UOP:
1478		__P4SETMASK(xsmu);
1479		break;
1480	case PMC_EV_P4_GLOBAL_POWER_EVENTS:
1481		__P4SETMASK(gpe);
1482		break;
1483	case PMC_EV_P4_TC_MS_XFER:
1484		__P4SETMASK(tmx);
1485		break;
1486	case PMC_EV_P4_UOP_QUEUE_WRITES:
1487		__P4SETMASK(uqw);
1488		break;
1489	case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
1490		__P4SETMASK(rmbt);
1491		break;
1492	case PMC_EV_P4_RETIRED_BRANCH_TYPE:
1493		__P4SETMASK(rbt);
1494		break;
1495	case PMC_EV_P4_RESOURCE_STALL:
1496		__P4SETMASK(rs);
1497		break;
1498	case PMC_EV_P4_WC_BUFFER:
1499		__P4SETMASK(wb);
1500		break;
1501	case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
1502	case PMC_EV_P4_B2B_CYCLES:
1503	case PMC_EV_P4_BNR:
1504	case PMC_EV_P4_SNOOP:
1505	case PMC_EV_P4_RESPONSE:
1506		break;
1507	case PMC_EV_P4_FRONT_END_EVENT:
1508		__P4SETMASK(fee);
1509		break;
1510	case PMC_EV_P4_EXECUTION_EVENT:
1511		__P4SETMASK(ee);
1512		break;
1513	case PMC_EV_P4_REPLAY_EVENT:
1514		__P4SETMASK(re);
1515		break;
1516	case PMC_EV_P4_INSTR_RETIRED:
1517		__P4SETMASK(insret);
1518		break;
1519	case PMC_EV_P4_UOPS_RETIRED:
1520		__P4SETMASK(ur);
1521		break;
1522	case PMC_EV_P4_UOP_TYPE:
1523		__P4SETMASK(ut);
1524		break;
1525	case PMC_EV_P4_BRANCH_RETIRED:
1526		__P4SETMASK(br);
1527		break;
1528	case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
1529		__P4SETMASK(mbr);
1530		break;
1531	case PMC_EV_P4_X87_ASSIST:
1532		__P4SETMASK(xa);
1533		break;
1534	case PMC_EV_P4_MACHINE_CLEAR:
1535		__P4SETMASK(machclr);
1536		break;
1537	default:
1538		return (-1);
1539	}
1540
1541	/* process additional flags */
1542	while ((p = strsep(&ctrspec, ",")) != NULL) {
1543		if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
1544			q = strchr(p, '=');
1545			if (*++q == '\0') /* skip '=' */
1546				return (-1);
1547
1548			if (strcasecmp(q, P4_KW_ACTIVE_NONE) == 0)
1549				cccractivemask = 0x0;
1550			else if (strcasecmp(q, P4_KW_ACTIVE_SINGLE) == 0)
1551				cccractivemask = 0x1;
1552			else if (strcasecmp(q, P4_KW_ACTIVE_BOTH) == 0)
1553				cccractivemask = 0x2;
1554			else if (strcasecmp(q, P4_KW_ACTIVE_ANY) == 0)
1555				cccractivemask = 0x3;
1556			else
1557				return (-1);
1558
1559		} else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
1560			if (has_busreqtype == 0)
1561				return (-1);
1562
1563			q = strchr(p, '=');
1564			if (*++q == '\0') /* skip '=' */
1565				return (-1);
1566
1567			count = strtol(q, &e, 0);
1568			if (e == q || *e != '\0')
1569				return (-1);
1570			evmask = (evmask & ~0x1F) | (count & 0x1F);
1571		} else if (KWMATCH(p, P4_KW_CASCADE))
1572			pmc_config->pm_caps |= PMC_CAP_CASCADE;
1573		else if (KWMATCH(p, P4_KW_EDGE))
1574			pmc_config->pm_caps |= PMC_CAP_EDGE;
1575		else if (KWMATCH(p, P4_KW_INV))
1576			pmc_config->pm_caps |= PMC_CAP_INVERT;
1577		else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
1578			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1579				return (-1);
1580			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1581		} else if (KWMATCH(p, P4_KW_OS))
1582			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1583		else if (KWMATCH(p, P4_KW_PRECISE))
1584			pmc_config->pm_caps |= PMC_CAP_PRECISE;
1585		else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
1586			if (has_tag == 0)
1587				return (-1);
1588
1589			q = strchr(p, '=');
1590			if (*++q == '\0') /* skip '=' */
1591				return (-1);
1592
1593			count = strtol(q, &e, 0);
1594			if (e == q || *e != '\0')
1595				return (-1);
1596
1597			pmc_config->pm_caps |= PMC_CAP_TAGGING;
1598			pmc_config->pm_md.pm_p4.pm_p4_escrconfig |=
1599			    P4_ESCR_TO_TAG_VALUE(count);
1600		} else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
1601			q = strchr(p, '=');
1602			if (*++q == '\0') /* skip '=' */
1603				return (-1);
1604
1605			count = strtol(q, &e, 0);
1606			if (e == q || *e != '\0')
1607				return (-1);
1608
1609			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1610			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &=
1611			    ~P4_CCCR_THRESHOLD_MASK;
1612			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1613			    P4_CCCR_TO_THRESHOLD(count);
1614		} else if (KWMATCH(p, P4_KW_USR))
1615			pmc_config->pm_caps |= PMC_CAP_USER;
1616		else
1617			return (-1);
1618	}
1619
1620	/* other post processing */
1621	if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
1622	    pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
1623	    pe == PMC_EV_P4_BSQ_ALLOCATION)
1624		pmc_config->pm_caps |= PMC_CAP_EDGE;
1625
1626	/* fill in thread activity mask */
1627	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1628	    P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
1629
1630	if (evmask)
1631		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1632
1633	switch (pe) {
1634	case PMC_EV_P4_FSB_DATA_ACTIVITY:
1635		if ((evmask & 0x06) == 0x06 ||
1636		    (evmask & 0x18) == 0x18)
1637			return (-1); /* can't have own+other bits together */
1638		if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
1639			evmask = 0x1D;
1640		break;
1641	case PMC_EV_P4_MACHINE_CLEAR:
1642		/* only one bit is allowed to be set */
1643		if ((evmask & (evmask - 1)) != 0)
1644			return (-1);
1645		if (evmask == 0) {
1646			evmask = 0x1;	/* 'CLEAR' */
1647			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1648		}
1649		break;
1650	default:
1651		if (evmask == 0 && pmask) {
1652			for (pm = pmask; pm->pm_name; pm++)
1653				evmask |= pm->pm_value;
1654			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1655		}
1656	}
1657
1658	pmc_config->pm_md.pm_p4.pm_p4_escrconfig =
1659	    P4_ESCR_TO_EVENT_MASK(evmask);
1660
1661	return (0);
1662}
1663
1664#endif
1665
1666#if defined(__i386__)
1667
1668/*
1669 * Pentium style PMCs
1670 */
1671
1672static struct pmc_event_alias p5_aliases[] = {
1673	EV_ALIAS("branches",		"p5-taken-branches"),
1674	EV_ALIAS("cycles",		"tsc"),
1675	EV_ALIAS("dc-misses",		"p5-data-read-miss-or-write-miss"),
1676	EV_ALIAS("ic-misses",		"p5-code-cache-miss"),
1677	EV_ALIAS("instructions",	"p5-instructions-executed"),
1678	EV_ALIAS("interrupts",		"p5-hardware-interrupts"),
1679	EV_ALIAS("unhalted-cycles",
1680	    "p5-number-of-cycles-not-in-halt-state"),
1681	EV_ALIAS(NULL, NULL)
1682};
1683
1684static int
1685p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
1686    struct pmc_op_pmcallocate *pmc_config)
1687{
1688	return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */
1689}
1690
1691/*
1692 * Pentium Pro style PMCs.  These PMCs are found in Pentium II, Pentium III,
1693 * and Pentium M CPUs.
1694 */
1695
1696static struct pmc_event_alias p6_aliases[] = {
1697	EV_ALIAS("branches",		"p6-br-inst-retired"),
1698	EV_ALIAS("branch-mispredicts",	"p6-br-miss-pred-retired"),
1699	EV_ALIAS("cycles",		"tsc"),
1700	EV_ALIAS("dc-misses",		"p6-dcu-lines-in"),
1701	EV_ALIAS("ic-misses",		"p6-ifu-fetch-miss"),
1702	EV_ALIAS("instructions",	"p6-inst-retired"),
1703	EV_ALIAS("interrupts",		"p6-hw-int-rx"),
1704	EV_ALIAS("unhalted-cycles",	"p6-cpu-clk-unhalted"),
1705	EV_ALIAS(NULL, NULL)
1706};
1707
1708#define	P6_KW_CMASK	"cmask"
1709#define	P6_KW_EDGE	"edge"
1710#define	P6_KW_INV	"inv"
1711#define	P6_KW_OS	"os"
1712#define	P6_KW_UMASK	"umask"
1713#define	P6_KW_USR	"usr"
1714
1715static struct pmc_masks p6_mask_mesi[] = {
1716	PMCMASK(m,	0x01),
1717	PMCMASK(e,	0x02),
1718	PMCMASK(s,	0x04),
1719	PMCMASK(i,	0x08),
1720	NULLMASK
1721};
1722
1723static struct pmc_masks p6_mask_mesihw[] = {
1724	PMCMASK(m,	0x01),
1725	PMCMASK(e,	0x02),
1726	PMCMASK(s,	0x04),
1727	PMCMASK(i,	0x08),
1728	PMCMASK(nonhw,	0x00),
1729	PMCMASK(hw,	0x10),
1730	PMCMASK(both,	0x30),
1731	NULLMASK
1732};
1733
1734static struct pmc_masks p6_mask_hw[] = {
1735	PMCMASK(nonhw,	0x00),
1736	PMCMASK(hw,	0x10),
1737	PMCMASK(both,	0x30),
1738	NULLMASK
1739};
1740
1741static struct pmc_masks p6_mask_any[] = {
1742	PMCMASK(self,	0x00),
1743	PMCMASK(any,	0x20),
1744	NULLMASK
1745};
1746
1747static struct pmc_masks p6_mask_ekp[] = {
1748	PMCMASK(nta,	0x00),
1749	PMCMASK(t1,	0x01),
1750	PMCMASK(t2,	0x02),
1751	PMCMASK(wos,	0x03),
1752	NULLMASK
1753};
1754
1755static struct pmc_masks p6_mask_pps[] = {
1756	PMCMASK(packed-and-scalar, 0x00),
1757	PMCMASK(scalar,	0x01),
1758	NULLMASK
1759};
1760
1761static struct pmc_masks p6_mask_mite[] = {
1762	PMCMASK(packed-multiply,	 0x01),
1763	PMCMASK(packed-shift,		0x02),
1764	PMCMASK(pack,			0x04),
1765	PMCMASK(unpack,			0x08),
1766	PMCMASK(packed-logical,		0x10),
1767	PMCMASK(packed-arithmetic,	0x20),
1768	NULLMASK
1769};
1770
1771static struct pmc_masks p6_mask_fmt[] = {
1772	PMCMASK(mmxtofp,	0x00),
1773	PMCMASK(fptommx,	0x01),
1774	NULLMASK
1775};
1776
1777static struct pmc_masks p6_mask_sr[] = {
1778	PMCMASK(es,	0x01),
1779	PMCMASK(ds,	0x02),
1780	PMCMASK(fs,	0x04),
1781	PMCMASK(gs,	0x08),
1782	NULLMASK
1783};
1784
1785static struct pmc_masks p6_mask_eet[] = {
1786	PMCMASK(all,	0x00),
1787	PMCMASK(freq,	0x02),
1788	NULLMASK
1789};
1790
1791static struct pmc_masks p6_mask_efur[] = {
1792	PMCMASK(all,	0x00),
1793	PMCMASK(loadop,	0x01),
1794	PMCMASK(stdsta,	0x02),
1795	NULLMASK
1796};
1797
1798static struct pmc_masks p6_mask_essir[] = {
1799	PMCMASK(sse-packed-single,	0x00),
1800	PMCMASK(sse-packed-single-scalar-single, 0x01),
1801	PMCMASK(sse2-packed-double,	0x02),
1802	PMCMASK(sse2-scalar-double,	0x03),
1803	NULLMASK
1804};
1805
1806static struct pmc_masks p6_mask_esscir[] = {
1807	PMCMASK(sse-packed-single,	0x00),
1808	PMCMASK(sse-scalar-single,	0x01),
1809	PMCMASK(sse2-packed-double,	0x02),
1810	PMCMASK(sse2-scalar-double,	0x03),
1811	NULLMASK
1812};
1813
1814/* P6 event parser */
1815static int
1816p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
1817    struct pmc_op_pmcallocate *pmc_config)
1818{
1819	char *e, *p, *q;
1820	uint32_t evmask;
1821	int count, n;
1822	const struct pmc_masks *pm, *pmask;
1823
1824	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1825	pmc_config->pm_md.pm_ppro.pm_ppro_config = 0;
1826
1827	evmask = 0;
1828
1829#define	P6MASKSET(M)	pmask = p6_mask_ ## M
1830
1831	switch(pe) {
1832	case PMC_EV_P6_L2_IFETCH:	P6MASKSET(mesi); break;
1833	case PMC_EV_P6_L2_LD:		P6MASKSET(mesi); break;
1834	case PMC_EV_P6_L2_ST:		P6MASKSET(mesi); break;
1835	case PMC_EV_P6_L2_RQSTS:	P6MASKSET(mesi); break;
1836	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1837	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1838	case PMC_EV_P6_BUS_TRAN_BRD:
1839	case PMC_EV_P6_BUS_TRAN_RFO:
1840	case PMC_EV_P6_BUS_TRANS_WB:
1841	case PMC_EV_P6_BUS_TRAN_IFETCH:
1842	case PMC_EV_P6_BUS_TRAN_INVAL:
1843	case PMC_EV_P6_BUS_TRAN_PWR:
1844	case PMC_EV_P6_BUS_TRANS_P:
1845	case PMC_EV_P6_BUS_TRANS_IO:
1846	case PMC_EV_P6_BUS_TRAN_DEF:
1847	case PMC_EV_P6_BUS_TRAN_BURST:
1848	case PMC_EV_P6_BUS_TRAN_ANY:
1849	case PMC_EV_P6_BUS_TRAN_MEM:
1850		P6MASKSET(any);	break;
1851	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1852	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1853		P6MASKSET(ekp); break;
1854	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1855	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1856		P6MASKSET(pps);	break;
1857	case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
1858		P6MASKSET(mite); break;
1859	case PMC_EV_P6_FP_MMX_TRANS:
1860		P6MASKSET(fmt);	break;
1861	case PMC_EV_P6_SEG_RENAME_STALLS:
1862	case PMC_EV_P6_SEG_REG_RENAMES:
1863		P6MASKSET(sr);	break;
1864	case PMC_EV_P6_EMON_EST_TRANS:
1865		P6MASKSET(eet);	break;
1866	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1867		P6MASKSET(efur); break;
1868	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1869		P6MASKSET(essir); break;
1870	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1871		P6MASKSET(esscir); break;
1872	default:
1873		pmask = NULL;
1874		break;
1875	}
1876
1877	/* Pentium M PMCs have a few events with different semantics */
1878	if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
1879		if (pe == PMC_EV_P6_L2_LD ||
1880		    pe == PMC_EV_P6_L2_LINES_IN ||
1881		    pe == PMC_EV_P6_L2_LINES_OUT)
1882			P6MASKSET(mesihw);
1883		else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
1884			P6MASKSET(hw);
1885	}
1886
1887	/* Parse additional modifiers if present */
1888	while ((p = strsep(&ctrspec, ",")) != NULL) {
1889		if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
1890			q = strchr(p, '=');
1891			if (*++q == '\0') /* skip '=' */
1892				return (-1);
1893			count = strtol(q, &e, 0);
1894			if (e == q || *e != '\0')
1895				return (-1);
1896			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1897			pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1898			    P6_EVSEL_TO_CMASK(count);
1899		} else if (KWMATCH(p, P6_KW_EDGE)) {
1900			pmc_config->pm_caps |= PMC_CAP_EDGE;
1901		} else if (KWMATCH(p, P6_KW_INV)) {
1902			pmc_config->pm_caps |= PMC_CAP_INVERT;
1903		} else if (KWMATCH(p, P6_KW_OS)) {
1904			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1905		} else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
1906			evmask = 0;
1907			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1908				return (-1);
1909			if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
1910			     pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
1911			     pe == PMC_EV_P6_BUS_TRAN_BRD ||
1912			     pe == PMC_EV_P6_BUS_TRAN_RFO ||
1913			     pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
1914			     pe == PMC_EV_P6_BUS_TRAN_INVAL ||
1915			     pe == PMC_EV_P6_BUS_TRAN_PWR ||
1916			     pe == PMC_EV_P6_BUS_TRAN_DEF ||
1917			     pe == PMC_EV_P6_BUS_TRAN_BURST ||
1918			     pe == PMC_EV_P6_BUS_TRAN_ANY ||
1919			     pe == PMC_EV_P6_BUS_TRAN_MEM ||
1920			     pe == PMC_EV_P6_BUS_TRANS_IO ||
1921			     pe == PMC_EV_P6_BUS_TRANS_P ||
1922			     pe == PMC_EV_P6_BUS_TRANS_WB ||
1923			     pe == PMC_EV_P6_EMON_EST_TRANS ||
1924			     pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
1925			     pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
1926			     pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
1927			     pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
1928			     pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
1929			     pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
1930			     pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
1931			     pe == PMC_EV_P6_FP_MMX_TRANS)
1932			    && (n > 1))	/* Only one mask keyword is allowed. */
1933				return (-1);
1934			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1935		} else if (KWMATCH(p, P6_KW_USR)) {
1936			pmc_config->pm_caps |= PMC_CAP_USER;
1937		} else
1938			return (-1);
1939	}
1940
1941	/* post processing */
1942	switch (pe) {
1943
1944		/*
1945		 * The following events default to an evmask of 0
1946		 */
1947
1948		/* default => 'self' */
1949	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1950	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1951	case PMC_EV_P6_BUS_TRAN_BRD:
1952	case PMC_EV_P6_BUS_TRAN_RFO:
1953	case PMC_EV_P6_BUS_TRANS_WB:
1954	case PMC_EV_P6_BUS_TRAN_IFETCH:
1955	case PMC_EV_P6_BUS_TRAN_INVAL:
1956	case PMC_EV_P6_BUS_TRAN_PWR:
1957	case PMC_EV_P6_BUS_TRANS_P:
1958	case PMC_EV_P6_BUS_TRANS_IO:
1959	case PMC_EV_P6_BUS_TRAN_DEF:
1960	case PMC_EV_P6_BUS_TRAN_BURST:
1961	case PMC_EV_P6_BUS_TRAN_ANY:
1962	case PMC_EV_P6_BUS_TRAN_MEM:
1963
1964		/* default => 'nta' */
1965	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1966	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1967
1968		/* default => 'packed and scalar' */
1969	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1970	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1971
1972		/* default => 'mmx to fp transitions' */
1973	case PMC_EV_P6_FP_MMX_TRANS:
1974
1975		/* default => 'SSE Packed Single' */
1976	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1977	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1978
1979		/* default => 'all fused micro-ops' */
1980	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1981
1982		/* default => 'all transitions' */
1983	case PMC_EV_P6_EMON_EST_TRANS:
1984		break;
1985
1986	case PMC_EV_P6_MMX_UOPS_EXEC:
1987		evmask = 0x0F;		/* only value allowed */
1988		break;
1989
1990	default:
1991		/*
1992		 * For all other events, set the default event mask
1993		 * to a logical OR of all the allowed event mask bits.
1994		 */
1995		if (evmask == 0 && pmask) {
1996			for (pm = pmask; pm->pm_name; pm++)
1997				evmask |= pm->pm_value;
1998			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1999		}
2000
2001		break;
2002	}
2003
2004	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
2005		pmc_config->pm_md.pm_ppro.pm_ppro_config |=
2006		    P6_EVSEL_TO_UMASK(evmask);
2007
2008	return (0);
2009}
2010
2011#endif
2012
2013#if	defined(__i386__) || defined(__amd64__)
2014static int
2015tsc_allocate_pmc(enum pmc_event pe, char *ctrspec,
2016    struct pmc_op_pmcallocate *pmc_config)
2017{
2018	if (pe != PMC_EV_TSC_TSC)
2019		return (-1);
2020
2021	/* TSC events must be unqualified. */
2022	if (ctrspec && *ctrspec != '\0')
2023		return (-1);
2024
2025	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
2026	pmc_config->pm_caps |= PMC_CAP_READ;
2027
2028	return (0);
2029}
2030#endif
2031
2032#if	defined(__XSCALE__)
2033
2034static struct pmc_event_alias xscale_aliases[] = {
2035	EV_ALIAS("branches",		"BRANCH_RETIRED"),
2036	EV_ALIAS("branch-mispredicts",	"BRANCH_MISPRED"),
2037	EV_ALIAS("dc-misses",		"DC_MISS"),
2038	EV_ALIAS("ic-misses",		"IC_MISS"),
2039	EV_ALIAS("instructions",	"INSTR_RETIRED"),
2040	EV_ALIAS(NULL, NULL)
2041};
2042static int
2043xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2044    struct pmc_op_pmcallocate *pmc_config __unused)
2045{
2046	switch (pe) {
2047	default:
2048		break;
2049	}
2050
2051	return (0);
2052}
2053#endif
2054
2055#if defined(__mips__)
2056
2057static struct pmc_event_alias mips24k_aliases[] = {
2058	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
2059	EV_ALIAS("branches",		"BRANCH_COMPLETED"),
2060	EV_ALIAS("branch-mispredicts",	"BRANCH_MISPRED"),
2061	EV_ALIAS(NULL, NULL)
2062};
2063
2064#define	MIPS24K_KW_OS		"os"
2065#define	MIPS24K_KW_USR		"usr"
2066#define	MIPS24K_KW_ANYTHREAD	"anythread"
2067
2068static int
2069mips24k_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2070		  struct pmc_op_pmcallocate *pmc_config __unused)
2071{
2072	char *p;
2073
2074	(void) pe;
2075
2076	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2077
2078	while ((p = strsep(&ctrspec, ",")) != NULL) {
2079		if (KWMATCH(p, MIPS24K_KW_OS))
2080			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2081		else if (KWMATCH(p, MIPS24K_KW_USR))
2082			pmc_config->pm_caps |= PMC_CAP_USER;
2083		else if (KWMATCH(p, MIPS24K_KW_ANYTHREAD))
2084			pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
2085		else
2086			return (-1);
2087	}
2088
2089	return (0);
2090}
2091#endif /* __mips__ */
2092
2093
2094/*
2095 * Match an event name `name' with its canonical form.
2096 *
2097 * Matches are case insensitive and spaces, periods, underscores and
2098 * hyphen characters are considered to match each other.
2099 *
2100 * Returns 1 for a match, 0 otherwise.
2101 */
2102
2103static int
2104pmc_match_event_name(const char *name, const char *canonicalname)
2105{
2106	int cc, nc;
2107	const unsigned char *c, *n;
2108
2109	c = (const unsigned char *) canonicalname;
2110	n = (const unsigned char *) name;
2111
2112	for (; (nc = *n) && (cc = *c); n++, c++) {
2113
2114		if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
2115		    (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
2116			continue;
2117
2118		if (toupper(nc) == toupper(cc))
2119			continue;
2120
2121
2122		return (0);
2123	}
2124
2125	if (*n == '\0' && *c == '\0')
2126		return (1);
2127
2128	return (0);
2129}
2130
2131/*
2132 * Match an event name against all the event named supported by a
2133 * PMC class.
2134 *
2135 * Returns an event descriptor pointer on match or NULL otherwise.
2136 */
2137static const struct pmc_event_descr *
2138pmc_match_event_class(const char *name,
2139    const struct pmc_class_descr *pcd)
2140{
2141	size_t n;
2142	const struct pmc_event_descr *ev;
2143
2144	ev = pcd->pm_evc_event_table;
2145	for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
2146		if (pmc_match_event_name(name, ev->pm_ev_name))
2147			return (ev);
2148
2149	return (NULL);
2150}
2151
2152static int
2153pmc_mdep_is_compatible_class(enum pmc_class pc)
2154{
2155	size_t n;
2156
2157	for (n = 0; n < pmc_mdep_class_list_size; n++)
2158		if (pmc_mdep_class_list[n] == pc)
2159			return (1);
2160	return (0);
2161}
2162
2163/*
2164 * API entry points
2165 */
2166
2167int
2168pmc_allocate(const char *ctrspec, enum pmc_mode mode,
2169    uint32_t flags, int cpu, pmc_id_t *pmcid)
2170{
2171	size_t n;
2172	int retval;
2173	char *r, *spec_copy;
2174	const char *ctrname;
2175	const struct pmc_event_descr *ev;
2176	const struct pmc_event_alias *alias;
2177	struct pmc_op_pmcallocate pmc_config;
2178	const struct pmc_class_descr *pcd;
2179
2180	spec_copy = NULL;
2181	retval    = -1;
2182
2183	if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
2184	    mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
2185		errno = EINVAL;
2186		goto out;
2187	}
2188
2189	/* replace an event alias with the canonical event specifier */
2190	if (pmc_mdep_event_aliases)
2191		for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++)
2192			if (!strcasecmp(ctrspec, alias->pm_alias)) {
2193				spec_copy = strdup(alias->pm_spec);
2194				break;
2195			}
2196
2197	if (spec_copy == NULL)
2198		spec_copy = strdup(ctrspec);
2199
2200	r = spec_copy;
2201	ctrname = strsep(&r, ",");
2202
2203	/*
2204	 * If a explicit class prefix was given by the user, restrict the
2205	 * search for the event to the specified PMC class.
2206	 */
2207	ev = NULL;
2208	for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
2209		pcd = pmc_class_table[n];
2210		if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) &&
2211		    strncasecmp(ctrname, pcd->pm_evc_name,
2212				pcd->pm_evc_name_size) == 0) {
2213			if ((ev = pmc_match_event_class(ctrname +
2214			    pcd->pm_evc_name_size, pcd)) == NULL) {
2215				errno = EINVAL;
2216				goto out;
2217			}
2218			break;
2219		}
2220	}
2221
2222	/*
2223	 * Otherwise, search for this event in all compatible PMC
2224	 * classes.
2225	 */
2226	for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
2227		pcd = pmc_class_table[n];
2228		if (pmc_mdep_is_compatible_class(pcd->pm_evc_class))
2229			ev = pmc_match_event_class(ctrname, pcd);
2230	}
2231
2232	if (ev == NULL) {
2233		errno = EINVAL;
2234		goto out;
2235	}
2236
2237	bzero(&pmc_config, sizeof(pmc_config));
2238	pmc_config.pm_ev    = ev->pm_ev_code;
2239	pmc_config.pm_class = pcd->pm_evc_class;
2240	pmc_config.pm_cpu   = cpu;
2241	pmc_config.pm_mode  = mode;
2242	pmc_config.pm_flags = flags;
2243
2244	if (PMC_IS_SAMPLING_MODE(mode))
2245		pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
2246
2247 	if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) {
2248		errno = EINVAL;
2249		goto out;
2250	}
2251
2252	if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
2253		goto out;
2254
2255	*pmcid = pmc_config.pm_pmcid;
2256
2257	retval = 0;
2258
2259 out:
2260	if (spec_copy)
2261		free(spec_copy);
2262
2263	return (retval);
2264}
2265
2266int
2267pmc_attach(pmc_id_t pmc, pid_t pid)
2268{
2269	struct pmc_op_pmcattach pmc_attach_args;
2270
2271	pmc_attach_args.pm_pmc = pmc;
2272	pmc_attach_args.pm_pid = pid;
2273
2274	return (PMC_CALL(PMCATTACH, &pmc_attach_args));
2275}
2276
2277int
2278pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
2279{
2280	unsigned int i;
2281	enum pmc_class cl;
2282
2283	cl = PMC_ID_TO_CLASS(pmcid);
2284	for (i = 0; i < cpu_info.pm_nclass; i++)
2285		if (cpu_info.pm_classes[i].pm_class == cl) {
2286			*caps = cpu_info.pm_classes[i].pm_caps;
2287			return (0);
2288		}
2289	errno = EINVAL;
2290	return (-1);
2291}
2292
2293int
2294pmc_configure_logfile(int fd)
2295{
2296	struct pmc_op_configurelog cla;
2297
2298	cla.pm_logfd = fd;
2299	if (PMC_CALL(CONFIGURELOG, &cla) < 0)
2300		return (-1);
2301	return (0);
2302}
2303
2304int
2305pmc_cpuinfo(const struct pmc_cpuinfo **pci)
2306{
2307	if (pmc_syscall == -1) {
2308		errno = ENXIO;
2309		return (-1);
2310	}
2311
2312	*pci = &cpu_info;
2313	return (0);
2314}
2315
2316int
2317pmc_detach(pmc_id_t pmc, pid_t pid)
2318{
2319	struct pmc_op_pmcattach pmc_detach_args;
2320
2321	pmc_detach_args.pm_pmc = pmc;
2322	pmc_detach_args.pm_pid = pid;
2323	return (PMC_CALL(PMCDETACH, &pmc_detach_args));
2324}
2325
2326int
2327pmc_disable(int cpu, int pmc)
2328{
2329	struct pmc_op_pmcadmin ssa;
2330
2331	ssa.pm_cpu = cpu;
2332	ssa.pm_pmc = pmc;
2333	ssa.pm_state = PMC_STATE_DISABLED;
2334	return (PMC_CALL(PMCADMIN, &ssa));
2335}
2336
2337int
2338pmc_enable(int cpu, int pmc)
2339{
2340	struct pmc_op_pmcadmin ssa;
2341
2342	ssa.pm_cpu = cpu;
2343	ssa.pm_pmc = pmc;
2344	ssa.pm_state = PMC_STATE_FREE;
2345	return (PMC_CALL(PMCADMIN, &ssa));
2346}
2347
2348/*
2349 * Return a list of events known to a given PMC class.  'cl' is the
2350 * PMC class identifier, 'eventnames' is the returned list of 'const
2351 * char *' pointers pointing to the names of the events. 'nevents' is
2352 * the number of event name pointers returned.
2353 *
2354 * The space for 'eventnames' is allocated using malloc(3).  The caller
2355 * is responsible for freeing this space when done.
2356 */
2357int
2358pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
2359    int *nevents)
2360{
2361	int count;
2362	const char **names;
2363	const struct pmc_event_descr *ev;
2364
2365	switch (cl)
2366	{
2367	case PMC_CLASS_IAF:
2368		ev = iaf_event_table;
2369		count = PMC_EVENT_TABLE_SIZE(iaf);
2370		break;
2371	case PMC_CLASS_IAP:
2372		/*
2373		 * Return the most appropriate set of event name
2374		 * spellings for the current CPU.
2375		 */
2376		switch (cpu_info.pm_cputype) {
2377		default:
2378		case PMC_CPU_INTEL_ATOM:
2379			ev = atom_event_table;
2380			count = PMC_EVENT_TABLE_SIZE(atom);
2381			break;
2382		case PMC_CPU_INTEL_CORE:
2383			ev = core_event_table;
2384			count = PMC_EVENT_TABLE_SIZE(core);
2385			break;
2386		case PMC_CPU_INTEL_CORE2:
2387		case PMC_CPU_INTEL_CORE2EXTREME:
2388			ev = core2_event_table;
2389			count = PMC_EVENT_TABLE_SIZE(core2);
2390			break;
2391		case PMC_CPU_INTEL_COREI7:
2392			ev = corei7_event_table;
2393			count = PMC_EVENT_TABLE_SIZE(corei7);
2394			break;
2395		}
2396		break;
2397	case PMC_CLASS_TSC:
2398		ev = tsc_event_table;
2399		count = PMC_EVENT_TABLE_SIZE(tsc);
2400		break;
2401	case PMC_CLASS_K7:
2402		ev = k7_event_table;
2403		count = PMC_EVENT_TABLE_SIZE(k7);
2404		break;
2405	case PMC_CLASS_K8:
2406		ev = k8_event_table;
2407		count = PMC_EVENT_TABLE_SIZE(k8);
2408		break;
2409	case PMC_CLASS_P4:
2410		ev = p4_event_table;
2411		count = PMC_EVENT_TABLE_SIZE(p4);
2412		break;
2413	case PMC_CLASS_P5:
2414		ev = p5_event_table;
2415		count = PMC_EVENT_TABLE_SIZE(p5);
2416		break;
2417	case PMC_CLASS_P6:
2418		ev = p6_event_table;
2419		count = PMC_EVENT_TABLE_SIZE(p6);
2420		break;
2421	case PMC_CLASS_XSCALE:
2422		ev = xscale_event_table;
2423		count = PMC_EVENT_TABLE_SIZE(xscale);
2424		break;
2425	case PMC_CLASS_MIPS24K:
2426		ev = mips24k_event_table;
2427		count = PMC_EVENT_TABLE_SIZE(mips24k);
2428		break;
2429	default:
2430		errno = EINVAL;
2431		return (-1);
2432	}
2433
2434	if ((names = malloc(count * sizeof(const char *))) == NULL)
2435		return (-1);
2436
2437	*eventnames = names;
2438	*nevents = count;
2439
2440	for (;count--; ev++, names++)
2441		*names = ev->pm_ev_name;
2442	return (0);
2443}
2444
2445int
2446pmc_flush_logfile(void)
2447{
2448	return (PMC_CALL(FLUSHLOG,0));
2449}
2450
2451int
2452pmc_get_driver_stats(struct pmc_driverstats *ds)
2453{
2454	struct pmc_op_getdriverstats gms;
2455
2456	if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
2457		return (-1);
2458
2459	/* copy out fields in the current userland<->library interface */
2460	ds->pm_intr_ignored    = gms.pm_intr_ignored;
2461	ds->pm_intr_processed  = gms.pm_intr_processed;
2462	ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
2463	ds->pm_syscalls        = gms.pm_syscalls;
2464	ds->pm_syscall_errors  = gms.pm_syscall_errors;
2465	ds->pm_buffer_requests = gms.pm_buffer_requests;
2466	ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
2467	ds->pm_log_sweeps      = gms.pm_log_sweeps;
2468	return (0);
2469}
2470
2471int
2472pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
2473{
2474	struct pmc_op_getmsr gm;
2475
2476	gm.pm_pmcid = pmc;
2477	if (PMC_CALL(PMCGETMSR, &gm) < 0)
2478		return (-1);
2479	*msr = gm.pm_msr;
2480	return (0);
2481}
2482
2483int
2484pmc_init(void)
2485{
2486	int error, pmc_mod_id;
2487	unsigned int n;
2488	uint32_t abi_version;
2489	struct module_stat pmc_modstat;
2490	struct pmc_op_getcpuinfo op_cpu_info;
2491#if defined(__amd64__) || defined(__i386__)
2492	int cpu_has_iaf_counters;
2493	unsigned int t;
2494#endif
2495
2496	if (pmc_syscall != -1) /* already inited */
2497		return (0);
2498
2499	/* retrieve the system call number from the KLD */
2500	if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
2501		return (-1);
2502
2503	pmc_modstat.version = sizeof(struct module_stat);
2504	if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
2505		return (-1);
2506
2507	pmc_syscall = pmc_modstat.data.intval;
2508
2509	/* check the kernel module's ABI against our compiled-in version */
2510	abi_version = PMC_VERSION;
2511	if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
2512		return (pmc_syscall = -1);
2513
2514	/* ignore patch & minor numbers for the comparision */
2515	if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
2516		errno  = EPROGMISMATCH;
2517		return (pmc_syscall = -1);
2518	}
2519
2520	if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0)
2521		return (pmc_syscall = -1);
2522
2523	cpu_info.pm_cputype = op_cpu_info.pm_cputype;
2524	cpu_info.pm_ncpu    = op_cpu_info.pm_ncpu;
2525	cpu_info.pm_npmc    = op_cpu_info.pm_npmc;
2526	cpu_info.pm_nclass  = op_cpu_info.pm_nclass;
2527	for (n = 0; n < cpu_info.pm_nclass; n++)
2528		cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n];
2529
2530	pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE *
2531	    sizeof(struct pmc_class_descr *));
2532
2533	if (pmc_class_table == NULL)
2534		return (-1);
2535
2536	for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++)
2537		pmc_class_table[n] = NULL;
2538
2539	/*
2540	 * Fill in the class table.
2541	 */
2542	n = 0;
2543#if defined(__amd64__) || defined(__i386__)
2544	pmc_class_table[n++] = &tsc_class_table_descr;
2545
2546	/*
2547 	 * Check if this CPU has fixed function counters.
2548	 */
2549	cpu_has_iaf_counters = 0;
2550	for (t = 0; t < cpu_info.pm_nclass; t++)
2551		if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF)
2552			cpu_has_iaf_counters = 1;
2553#endif
2554
2555#define	PMC_MDEP_INIT(C) do {					\
2556		pmc_mdep_event_aliases    = C##_aliases;	\
2557		pmc_mdep_class_list  = C##_pmc_classes;		\
2558		pmc_mdep_class_list_size =			\
2559		    PMC_TABLE_SIZE(C##_pmc_classes);		\
2560	} while (0)
2561
2562#define	PMC_MDEP_INIT_INTEL_V2(C) do {					\
2563		PMC_MDEP_INIT(C);					\
2564		if (cpu_has_iaf_counters) 				\
2565			pmc_class_table[n++] = &iaf_class_table_descr;	\
2566		else							\
2567			pmc_mdep_event_aliases =			\
2568				C##_aliases_without_iaf;		\
2569		pmc_class_table[n] = &C##_class_table_descr;		\
2570	} while (0)
2571
2572	/* Configure the event name parser. */
2573	switch (cpu_info.pm_cputype) {
2574#if defined(__i386__)
2575	case PMC_CPU_AMD_K7:
2576		PMC_MDEP_INIT(k7);
2577		pmc_class_table[n] = &k7_class_table_descr;
2578		break;
2579	case PMC_CPU_INTEL_P5:
2580		PMC_MDEP_INIT(p5);
2581		pmc_class_table[n]  = &p5_class_table_descr;
2582		break;
2583	case PMC_CPU_INTEL_P6:		/* P6 ... Pentium M CPUs have */
2584	case PMC_CPU_INTEL_PII:		/* similar PMCs. */
2585	case PMC_CPU_INTEL_PIII:
2586	case PMC_CPU_INTEL_PM:
2587		PMC_MDEP_INIT(p6);
2588		pmc_class_table[n] = &p6_class_table_descr;
2589		break;
2590#endif
2591#if defined(__amd64__) || defined(__i386__)
2592	case PMC_CPU_AMD_K8:
2593		PMC_MDEP_INIT(k8);
2594		pmc_class_table[n] = &k8_class_table_descr;
2595		break;
2596	case PMC_CPU_INTEL_ATOM:
2597		PMC_MDEP_INIT_INTEL_V2(atom);
2598		break;
2599	case PMC_CPU_INTEL_CORE:
2600		PMC_MDEP_INIT(core);
2601		pmc_class_table[n] = &core_class_table_descr;
2602		break;
2603	case PMC_CPU_INTEL_CORE2:
2604	case PMC_CPU_INTEL_CORE2EXTREME:
2605		PMC_MDEP_INIT_INTEL_V2(core2);
2606		break;
2607	case PMC_CPU_INTEL_COREI7:
2608		PMC_MDEP_INIT_INTEL_V2(corei7);
2609		break;
2610	case PMC_CPU_INTEL_PIV:
2611		PMC_MDEP_INIT(p4);
2612		pmc_class_table[n] = &p4_class_table_descr;
2613		break;
2614#endif
2615#if defined(__XSCALE__)
2616	case PMC_CPU_INTEL_XSCALE:
2617		PMC_MDEP_INIT(xscale);
2618		pmc_class_table[n] = &xscale_class_table_descr;
2619		break;
2620#endif
2621#if defined(__mips__)
2622	case PMC_CPU_MIPS_24K:
2623		PMC_MDEP_INIT(mips24k);
2624		pmc_class_table[n] = &mips24k_class_table_descr;
2625		break;
2626#endif /* __mips__ */
2627	default:
2628		/*
2629		 * Some kind of CPU this version of the library knows nothing
2630		 * about.  This shouldn't happen since the abi version check
2631		 * should have caught this.
2632		 */
2633		errno = ENXIO;
2634		return (pmc_syscall = -1);
2635	}
2636
2637	return (0);
2638}
2639
2640const char *
2641pmc_name_of_capability(enum pmc_caps cap)
2642{
2643	int i;
2644
2645	/*
2646	 * 'cap' should have a single bit set and should be in
2647	 * range.
2648	 */
2649	if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
2650	    cap > PMC_CAP_LAST) {
2651		errno = EINVAL;
2652		return (NULL);
2653	}
2654
2655	i = ffs(cap);
2656	return (pmc_capability_names[i - 1]);
2657}
2658
2659const char *
2660pmc_name_of_class(enum pmc_class pc)
2661{
2662	if ((int) pc >= PMC_CLASS_FIRST &&
2663	    pc <= PMC_CLASS_LAST)
2664		return (pmc_class_names[pc]);
2665
2666	errno = EINVAL;
2667	return (NULL);
2668}
2669
2670const char *
2671pmc_name_of_cputype(enum pmc_cputype cp)
2672{
2673	size_t n;
2674
2675	for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++)
2676		if (cp == pmc_cputype_names[n].pm_cputype)
2677			return (pmc_cputype_names[n].pm_name);
2678
2679	errno = EINVAL;
2680	return (NULL);
2681}
2682
2683const char *
2684pmc_name_of_disposition(enum pmc_disp pd)
2685{
2686	if ((int) pd >= PMC_DISP_FIRST &&
2687	    pd <= PMC_DISP_LAST)
2688		return (pmc_disposition_names[pd]);
2689
2690	errno = EINVAL;
2691	return (NULL);
2692}
2693
2694const char *
2695_pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
2696{
2697	const struct pmc_event_descr *ev, *evfence;
2698
2699	ev = evfence = NULL;
2700	if (pe >= PMC_EV_IAF_FIRST && pe <= PMC_EV_IAF_LAST) {
2701		ev = iaf_event_table;
2702		evfence = iaf_event_table + PMC_EVENT_TABLE_SIZE(iaf);
2703	} else if (pe >= PMC_EV_IAP_FIRST && pe <= PMC_EV_IAP_LAST) {
2704		switch (cpu) {
2705		case PMC_CPU_INTEL_ATOM:
2706			ev = atom_event_table;
2707			evfence = atom_event_table + PMC_EVENT_TABLE_SIZE(atom);
2708			break;
2709		case PMC_CPU_INTEL_CORE:
2710			ev = core_event_table;
2711			evfence = core_event_table + PMC_EVENT_TABLE_SIZE(core);
2712			break;
2713		case PMC_CPU_INTEL_CORE2:
2714		case PMC_CPU_INTEL_CORE2EXTREME:
2715			ev = core2_event_table;
2716			evfence = core2_event_table + PMC_EVENT_TABLE_SIZE(core2);
2717			break;
2718		case PMC_CPU_INTEL_COREI7:
2719			ev = corei7_event_table;
2720			evfence = corei7_event_table + PMC_EVENT_TABLE_SIZE(corei7);
2721			break;
2722		default:	/* Unknown CPU type. */
2723			break;
2724		}
2725	} if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) {
2726		ev = k7_event_table;
2727		evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7);
2728	} else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
2729		ev = k8_event_table;
2730		evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8);
2731	} else if (pe >= PMC_EV_P4_FIRST && pe <= PMC_EV_P4_LAST) {
2732		ev = p4_event_table;
2733		evfence = p4_event_table + PMC_EVENT_TABLE_SIZE(p4);
2734	} else if (pe >= PMC_EV_P5_FIRST && pe <= PMC_EV_P5_LAST) {
2735		ev = p5_event_table;
2736		evfence = p5_event_table + PMC_EVENT_TABLE_SIZE(p5);
2737	} else if (pe >= PMC_EV_P6_FIRST && pe <= PMC_EV_P6_LAST) {
2738		ev = p6_event_table;
2739		evfence = p6_event_table + PMC_EVENT_TABLE_SIZE(p6);
2740	} else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) {
2741		ev = xscale_event_table;
2742		evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale);
2743	} else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) {
2744		ev = mips24k_event_table;
2745		evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k
2746);
2747	} else if (pe == PMC_EV_TSC_TSC) {
2748		ev = tsc_event_table;
2749		evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
2750	}
2751
2752	for (; ev != evfence; ev++)
2753		if (pe == ev->pm_ev_code)
2754			return (ev->pm_ev_name);
2755
2756	return (NULL);
2757}
2758
2759const char *
2760pmc_name_of_event(enum pmc_event pe)
2761{
2762	const char *n;
2763
2764	if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
2765		return (n);
2766
2767	errno = EINVAL;
2768	return (NULL);
2769}
2770
2771const char *
2772pmc_name_of_mode(enum pmc_mode pm)
2773{
2774	if ((int) pm >= PMC_MODE_FIRST &&
2775	    pm <= PMC_MODE_LAST)
2776		return (pmc_mode_names[pm]);
2777
2778	errno = EINVAL;
2779	return (NULL);
2780}
2781
2782const char *
2783pmc_name_of_state(enum pmc_state ps)
2784{
2785	if ((int) ps >= PMC_STATE_FIRST &&
2786	    ps <= PMC_STATE_LAST)
2787		return (pmc_state_names[ps]);
2788
2789	errno = EINVAL;
2790	return (NULL);
2791}
2792
2793int
2794pmc_ncpu(void)
2795{
2796	if (pmc_syscall == -1) {
2797		errno = ENXIO;
2798		return (-1);
2799	}
2800
2801	return (cpu_info.pm_ncpu);
2802}
2803
2804int
2805pmc_npmc(int cpu)
2806{
2807	if (pmc_syscall == -1) {
2808		errno = ENXIO;
2809		return (-1);
2810	}
2811
2812	if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
2813		errno = EINVAL;
2814		return (-1);
2815	}
2816
2817	return (cpu_info.pm_npmc);
2818}
2819
2820int
2821pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
2822{
2823	int nbytes, npmc;
2824	struct pmc_op_getpmcinfo *pmci;
2825
2826	if ((npmc = pmc_npmc(cpu)) < 0)
2827		return (-1);
2828
2829	nbytes = sizeof(struct pmc_op_getpmcinfo) +
2830	    npmc * sizeof(struct pmc_info);
2831
2832	if ((pmci = calloc(1, nbytes)) == NULL)
2833		return (-1);
2834
2835	pmci->pm_cpu  = cpu;
2836
2837	if (PMC_CALL(GETPMCINFO, pmci) < 0) {
2838		free(pmci);
2839		return (-1);
2840	}
2841
2842	/* kernel<->library, library<->userland interfaces are identical */
2843	*ppmci = (struct pmc_pmcinfo *) pmci;
2844	return (0);
2845}
2846
2847int
2848pmc_read(pmc_id_t pmc, pmc_value_t *value)
2849{
2850	struct pmc_op_pmcrw pmc_read_op;
2851
2852	pmc_read_op.pm_pmcid = pmc;
2853	pmc_read_op.pm_flags = PMC_F_OLDVALUE;
2854	pmc_read_op.pm_value = -1;
2855
2856	if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
2857		return (-1);
2858
2859	*value = pmc_read_op.pm_value;
2860	return (0);
2861}
2862
2863int
2864pmc_release(pmc_id_t pmc)
2865{
2866	struct pmc_op_simple	pmc_release_args;
2867
2868	pmc_release_args.pm_pmcid = pmc;
2869	return (PMC_CALL(PMCRELEASE, &pmc_release_args));
2870}
2871
2872int
2873pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
2874{
2875	struct pmc_op_pmcrw pmc_rw_op;
2876
2877	pmc_rw_op.pm_pmcid = pmc;
2878	pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
2879	pmc_rw_op.pm_value = newvalue;
2880
2881	if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
2882		return (-1);
2883
2884	*oldvaluep = pmc_rw_op.pm_value;
2885	return (0);
2886}
2887
2888int
2889pmc_set(pmc_id_t pmc, pmc_value_t value)
2890{
2891	struct pmc_op_pmcsetcount sc;
2892
2893	sc.pm_pmcid = pmc;
2894	sc.pm_count = value;
2895
2896	if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
2897		return (-1);
2898	return (0);
2899}
2900
2901int
2902pmc_start(pmc_id_t pmc)
2903{
2904	struct pmc_op_simple	pmc_start_args;
2905
2906	pmc_start_args.pm_pmcid = pmc;
2907	return (PMC_CALL(PMCSTART, &pmc_start_args));
2908}
2909
2910int
2911pmc_stop(pmc_id_t pmc)
2912{
2913	struct pmc_op_simple	pmc_stop_args;
2914
2915	pmc_stop_args.pm_pmcid = pmc;
2916	return (PMC_CALL(PMCSTOP, &pmc_stop_args));
2917}
2918
2919int
2920pmc_width(pmc_id_t pmcid, uint32_t *width)
2921{
2922	unsigned int i;
2923	enum pmc_class cl;
2924
2925	cl = PMC_ID_TO_CLASS(pmcid);
2926	for (i = 0; i < cpu_info.pm_nclass; i++)
2927		if (cpu_info.pm_classes[i].pm_class == cl) {
2928			*width = cpu_info.pm_classes[i].pm_width;
2929			return (0);
2930		}
2931	errno = EINVAL;
2932	return (-1);
2933}
2934
2935int
2936pmc_write(pmc_id_t pmc, pmc_value_t value)
2937{
2938	struct pmc_op_pmcrw pmc_write_op;
2939
2940	pmc_write_op.pm_pmcid = pmc;
2941	pmc_write_op.pm_flags = PMC_F_NEWVALUE;
2942	pmc_write_op.pm_value = value;
2943	return (PMC_CALL(PMCRW, &pmc_write_op));
2944}
2945
2946int
2947pmc_writelog(uint32_t userdata)
2948{
2949	struct pmc_op_writelog wl;
2950
2951	wl.pm_userdata = userdata;
2952	return (PMC_CALL(WRITELOG, &wl));
2953}
2954