xref: /illumos-gate/usr/src/uts/intel/pcbe/core_pcbe.c (revision a18ddb3c6e38b3caec32c0c4fb5fef8f2f4c4c8f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Performance Counter Back-End for Intel Family 6 Models 15 and 23
28  */
29 
30 #include <sys/cpuvar.h>
31 #include <sys/param.h>
32 #include <sys/cpc_impl.h>
33 #include <sys/cpc_pcbe.h>
34 #include <sys/modctl.h>
35 #include <sys/inttypes.h>
36 #include <sys/systm.h>
37 #include <sys/cmn_err.h>
38 #include <sys/x86_archext.h>
39 #include <sys/sdt.h>
40 #include <sys/archsystm.h>
41 #include <sys/privregs.h>
42 #include <sys/ddi.h>
43 #include <sys/sunddi.h>
44 #include <sys/cred.h>
45 #include <sys/policy.h>
46 
47 static int core_pcbe_init(void);
48 static uint_t core_pcbe_ncounters(void);
49 static const char *core_pcbe_impl_name(void);
50 static const char *core_pcbe_cpuref(void);
51 static char *core_pcbe_list_events(uint_t picnum);
52 static char *core_pcbe_list_attrs(void);
53 static uint64_t core_pcbe_event_coverage(char *event);
54 static uint64_t core_pcbe_overflow_bitmap(void);
55 static int core_pcbe_configure(uint_t picnum, char *event, uint64_t preset,
56     uint32_t flags, uint_t nattrs, kcpc_attr_t *attrs, void **data,
57     void *token);
58 static void core_pcbe_program(void *token);
59 static void core_pcbe_allstop(void);
60 static void core_pcbe_sample(void *token);
61 static void core_pcbe_free(void *config);
62 
63 #define	FALSE	0
64 #define	TRUE	1
65 
66 /* Counter Type */
67 #define	CORE_GPC	0	/* General-Purpose Counter (GPC) */
68 #define	CORE_FFC	1	/* Fixed-Function Counter (FFC) */
69 
70 /* MSR Addresses */
71 #define	GPC_BASE_PMC		0x00c1	/* First GPC */
72 #define	GPC_BASE_PES		0x0186	/* First GPC Event Select register */
73 #define	FFC_BASE_PMC		0x0309	/* First FFC */
74 #define	PERF_FIXED_CTR_CTRL	0x038d	/* Used to enable/disable FFCs */
75 #define	PERF_GLOBAL_STATUS	0x038e	/* Overflow status register */
76 #define	PERF_GLOBAL_CTRL	0x038f	/* Used to enable/disable counting */
77 #define	PERF_GLOBAL_OVF_CTRL	0x0390	/* Used to clear overflow status */
78 
79 /*
80  * Processor Event Select register fields
81  */
82 #define	CORE_USR	(1ULL << 16)	/* Count while not in ring 0 */
83 #define	CORE_OS		(1ULL << 17)	/* Count while in ring 0 */
84 #define	CORE_EDGE	(1ULL << 18)	/* Enable edge detection */
85 #define	CORE_PC		(1ULL << 19)	/* Enable pin control */
86 #define	CORE_INT	(1ULL << 20)	/* Enable interrupt on overflow */
87 #define	CORE_EN		(1ULL << 22)	/* Enable counting */
88 #define	CORE_INV	(1ULL << 23)	/* Invert the CMASK */
89 #define	CORE_ANYTHR	(1ULL << 21)	/* Count event for any thread on core */
90 
91 #define	CORE_UMASK_SHIFT	8
92 #define	CORE_UMASK_MASK		0xffu
93 #define	CORE_CMASK_SHIFT	24
94 #define	CORE_CMASK_MASK		0xffu
95 
96 /*
97  * Fixed-function counter attributes
98  */
99 #define	CORE_FFC_OS_EN	(1ULL << 0)	/* Count while not in ring 0 */
100 #define	CORE_FFC_USR_EN	(1ULL << 1)	/* Count while in ring 1 */
101 #define	CORE_FFC_ANYTHR	(1ULL << 2)	/* Count event for any thread on core */
102 #define	CORE_FFC_PMI	(1ULL << 3)	/* Enable interrupt on overflow */
103 
104 /*
105  * Number of bits for specifying each FFC's attributes in the control register
106  */
107 #define	CORE_FFC_ATTR_SIZE	4
108 
109 /*
110  * CondChgd and OvfBuffer fields of global status and overflow control registers
111  */
112 #define	CONDCHGD	(1ULL << 63)
113 #define	OVFBUFFER	(1ULL << 62)
114 #define	MASK_CONDCHGD_OVFBUFFER	(CONDCHGD | OVFBUFFER)
115 
116 #define	ALL_STOPPED	0ULL
117 
118 #define	BITMASK_XBITS(x)	((1ull << (x)) - 1ull)
119 
120 /*
121  * Only the lower 32-bits can be written to in the general-purpose
122  * counters.  The higher bits are extended from bit 31; all ones if
123  * bit 31 is one and all zeros otherwise.
124  *
125  * The fixed-function counters do not have this restriction.
126  */
127 #define	BITS_EXTENDED_FROM_31	(BITMASK_XBITS(width_gpc) & ~BITMASK_XBITS(31))
128 
129 #define	WRMSR(msr, value)						\
130 	wrmsr((msr), (value));						\
131 	DTRACE_PROBE2(wrmsr, uint64_t, (msr), uint64_t, (value));
132 
133 #define	RDMSR(msr, value)						\
134 	(value) = rdmsr((msr));						\
135 	DTRACE_PROBE2(rdmsr, uint64_t, (msr), uint64_t, (value));
136 
137 typedef struct core_pcbe_config {
138 	uint64_t	core_rawpic;
139 	uint64_t	core_ctl;	/* Event Select bits */
140 	uint64_t	core_pmc;	/* Counter register address */
141 	uint64_t	core_pes;	/* Event Select register address */
142 	uint_t		core_picno;
143 	uint8_t		core_pictype;	/* CORE_GPC or CORE_FFC */
144 } core_pcbe_config_t;
145 
146 pcbe_ops_t core_pcbe_ops = {
147 	PCBE_VER_1,			/* pcbe_ver */
148 	CPC_CAP_OVERFLOW_INTERRUPT | CPC_CAP_OVERFLOW_PRECISE,	/* pcbe_caps */
149 	core_pcbe_ncounters,		/* pcbe_ncounters */
150 	core_pcbe_impl_name,		/* pcbe_impl_name */
151 	core_pcbe_cpuref,		/* pcbe_cpuref */
152 	core_pcbe_list_events,		/* pcbe_list_events */
153 	core_pcbe_list_attrs,		/* pcbe_list_attrs */
154 	core_pcbe_event_coverage,	/* pcbe_event_coverage */
155 	core_pcbe_overflow_bitmap,	/* pcbe_overflow_bitmap */
156 	core_pcbe_configure,		/* pcbe_configure */
157 	core_pcbe_program,		/* pcbe_program */
158 	core_pcbe_allstop,		/* pcbe_allstop */
159 	core_pcbe_sample,		/* pcbe_sample */
160 	core_pcbe_free			/* pcbe_free */
161 };
162 
163 struct nametable_fam6mod15_23 {
164 	const char	*name;
165 	uint64_t	restricted_bits;
166 	uint8_t		event_num;
167 };
168 
169 #define	NT_END	0xFF
170 
171 /*
172  * Counting an event for all cores or all bus agents requires cpc_cpu privileges
173  */
174 #define	ALL_CORES	(1ULL << 15)
175 #define	ALL_AGENTS	(1ULL << 13)
176 
177 /*
178  * The events listed in the following table can be counted on all
179  * general-purpose counters on processors that are of Family 6 Models 15 or 23
180  */
181 static const struct nametable_fam6mod15_23 cmn_gpc_events_f6m15_23[] = {
182 	/* Alphabetical order of event name */
183 
184 	{ "baclears",			0x0,	0xe6 },
185 	{ "bogus_br",			0x0,	0xe4 },
186 	{ "br_bac_missp_exec",		0x0,	0x8a },
187 
188 	{ "br_call_exec",		0x0,	0x92 },
189 	{ "br_call_missp_exec",		0x0,	0x93 },
190 	{ "br_cnd_exec",		0x0,	0x8b },
191 
192 	{ "br_cnd_missp_exec",		0x0,	0x8c },
193 	{ "br_ind_call_exec",		0x0,	0x94 },
194 	{ "br_ind_exec",		0x0,	0x8d },
195 
196 	{ "br_ind_missp_exec",		0x0,	0x8e },
197 	{ "br_inst_decoded",		0x0,	0xe0 },
198 	{ "br_inst_exec",		0x0,	0x88 },
199 
200 	{ "br_inst_retired",		0x0,	0xc4 },
201 	{ "br_inst_retired_mispred",	0x0,	0xc5 },
202 	{ "br_missp_exec",		0x0,	0x89 },
203 
204 	{ "br_ret_bac_missp_exec",	0x0,	0x91 },
205 	{ "br_ret_exec",		0x0,	0x8f },
206 	{ "br_ret_missp_exec",		0x0,	0x90 },
207 
208 	{ "br_tkn_bubble_1",		0x0,	0x97 },
209 	{ "br_tkn_bubble_2",		0x0,	0x98 },
210 	{ "bus_bnr_drv",		ALL_AGENTS,	0x61 },
211 
212 	{ "bus_data_rcv",		ALL_CORES,	0x64 },
213 	{ "bus_drdy_clocks",		ALL_AGENTS,	0x62 },
214 	{ "bus_hit_drv",		ALL_AGENTS,	0x7a },
215 
216 	{ "bus_hitm_drv",		ALL_AGENTS,	0x7b },
217 	{ "bus_io_wait",		ALL_CORES,	0x7f },
218 	{ "bus_lock_clocks",		ALL_CORES | ALL_AGENTS,	0x63 },
219 
220 	{ "bus_request_outstanding",	ALL_CORES | ALL_AGENTS,	0x60 },
221 	{ "bus_trans_any",		ALL_CORES | ALL_AGENTS,	0x70 },
222 	{ "bus_trans_brd",		ALL_CORES | ALL_AGENTS,	0x65 },
223 
224 	{ "bus_trans_burst",		ALL_CORES | ALL_AGENTS,	0x6e },
225 	{ "bus_trans_def",		ALL_CORES | ALL_AGENTS,	0x6d },
226 	{ "bus_trans_ifetch",		ALL_CORES | ALL_AGENTS,	0x68 },
227 
228 	{ "bus_trans_inval",		ALL_CORES | ALL_AGENTS,	0x69 },
229 	{ "bus_trans_io",		ALL_CORES | ALL_AGENTS,	0x6c },
230 	{ "bus_trans_mem",		ALL_CORES | ALL_AGENTS,	0x6f },
231 
232 	{ "bus_trans_p",		ALL_CORES | ALL_AGENTS,	0x6b },
233 	{ "bus_trans_pwr",		ALL_CORES | ALL_AGENTS,	0x6a },
234 	{ "bus_trans_rfo",		ALL_CORES | ALL_AGENTS,	0x66 },
235 
236 	{ "bus_trans_wb",		ALL_CORES | ALL_AGENTS,	0x67 },
237 	{ "busq_empty",			ALL_CORES,	0x7d },
238 	{ "cmp_snoop",			ALL_CORES,	0x78 },
239 
240 	{ "cpu_clk_unhalted",		0x0,	0x3c },
241 	{ "cycles_int",			0x0,	0xc6 },
242 	{ "cycles_l1i_mem_stalled",	0x0,	0x86 },
243 
244 	{ "dtlb_misses",		0x0,	0x08 },
245 	{ "eist_trans",			0x0,	0x3a },
246 	{ "esp",			0x0,	0xab },
247 
248 	{ "ext_snoop",			ALL_AGENTS,	0x77 },
249 	{ "fp_mmx_trans",		0x0,	0xcc },
250 	{ "hw_int_rcv",			0x0,	0xc8 },
251 
252 	{ "ild_stall",			0x0,	0x87 },
253 	{ "inst_queue",			0x0,	0x83 },
254 	{ "inst_retired",		0x0,	0xc0 },
255 
256 	{ "itlb",			0x0,	0x82 },
257 	{ "itlb_miss_retired",		0x0,	0xc9 },
258 	{ "l1d_all_ref",		0x0,	0x43 },
259 
260 	{ "l1d_cache_ld",		0x0,	0x40 },
261 	{ "l1d_cache_lock",		0x0,	0x42 },
262 	{ "l1d_cache_st",		0x0,	0x41 },
263 
264 	{ "l1d_m_evict",		0x0,	0x47 },
265 	{ "l1d_m_repl",			0x0,	0x46 },
266 	{ "l1d_pend_miss",		0x0,	0x48 },
267 
268 	{ "l1d_prefetch",		0x0,	0x4e },
269 	{ "l1d_repl",			0x0,	0x45 },
270 	{ "l1d_split",			0x0,	0x49 },
271 
272 	{ "l1i_misses",			0x0,	0x81 },
273 	{ "l1i_reads",			0x0,	0x80 },
274 	{ "l2_ads",			ALL_CORES,	0x21 },
275 
276 	{ "l2_dbus_busy_rd",		ALL_CORES,	0x23 },
277 	{ "l2_ifetch",			ALL_CORES,	0x28 },
278 	{ "l2_ld",			ALL_CORES,	0x29 },
279 
280 	{ "l2_lines_in",		ALL_CORES,	0x24 },
281 	{ "l2_lines_out",		ALL_CORES,	0x26 },
282 	{ "l2_lock",			ALL_CORES,	0x2b },
283 
284 	{ "l2_m_lines_in",		ALL_CORES,	0x25 },
285 	{ "l2_m_lines_out",		ALL_CORES,	0x27 },
286 	{ "l2_no_req",			ALL_CORES,	0x32 },
287 
288 	{ "l2_reject_busq",		ALL_CORES,	0x30 },
289 	{ "l2_rqsts",			ALL_CORES,	0x2e },
290 	{ "l2_st",			ALL_CORES,	0x2a },
291 
292 	{ "load_block",			0x0,	0x03 },
293 	{ "load_hit_pre",		0x0,	0x4c },
294 	{ "machine_nukes",		0x0,	0xc3 },
295 
296 	{ "macro_insts",		0x0,	0xaa },
297 	{ "memory_disambiguation",	0x0,	0x09 },
298 	{ "misalign_mem_ref",		0x0,	0x05 },
299 	{ "page_walks",			0x0,	0x0c },
300 
301 	{ "pref_rqsts_dn",		0x0,	0xf8 },
302 	{ "pref_rqsts_up",		0x0,	0xf0 },
303 	{ "rat_stalls",			0x0,	0xd2 },
304 
305 	{ "resource_stalls",		0x0,	0xdc },
306 	{ "rs_uops_dispatched",		0x0,	0xa0 },
307 	{ "seg_reg_renames",		0x0,	0xd5 },
308 
309 	{ "seg_rename_stalls",		0x0,	0xd4 },
310 	{ "segment_reg_loads",		0x0,	0x06 },
311 	{ "simd_assist",		0x0,	0xcd },
312 
313 	{ "simd_comp_inst_retired",	0x0,	0xca },
314 	{ "simd_inst_retired",		0x0,	0xc7 },
315 	{ "simd_instr_retired",		0x0,	0xce },
316 
317 	{ "simd_sat_instr_retired",	0x0,	0xcf },
318 	{ "simd_sat_uop_exec",		0x0,	0xb1 },
319 	{ "simd_uop_type_exec",		0x0,	0xb3 },
320 
321 	{ "simd_uops_exec",		0x0,	0xb0 },
322 	{ "snoop_stall_drv",		ALL_CORES | ALL_AGENTS,	0x7e },
323 	{ "sse_pre_exec",		0x0,	0x07 },
324 
325 	{ "sse_pre_miss",		0x0,	0x4b },
326 	{ "store_block",		0x0,	0x04 },
327 	{ "thermal_trip",		0x0,	0x3b },
328 
329 	{ "uops_retired",		0x0,	0xc2 },
330 	{ "x87_ops_retired",		0x0,	0xc1 },
331 	{ "",				0x0,	NT_END }
332 };
333 
334 /*
335  * If any of the pic specific events require privileges, make sure to add a
336  * check in configure_gpc() to find whether an event hard-coded as a number by
337  * the user has any privilege requirements
338  */
339 static const struct nametable_fam6mod15_23 pic0_events[] = {
340 	/* Alphabetical order of event name */
341 
342 	{ "cycles_div_busy",		0x0,	0x14 },
343 	{ "fp_comp_ops_exe",		0x0,	0x10 },
344 	{ "idle_during_div",		0x0,	0x18 },
345 
346 	{ "mem_load_retired",		0x0,	0xcb },
347 	{ "rs_uops_dispatched_port",	0x0,	0xa1 },
348 	{ "",				0x0,	NT_END }
349 };
350 
351 static const struct nametable_fam6mod15_23 pic1_events[] = {
352 	/* Alphabetical order of event name */
353 
354 	{ "delayed_bypass",	0x0,	0x19 },
355 	{ "div",		0x0,	0x13 },
356 	{ "fp_assist",		0x0,	0x11 },
357 
358 	{ "mul",		0x0,	0x12 },
359 	{ "",			0x0,	NT_END }
360 };
361 
362 char *ffc_names[] = {
363 	"inst_retired.any",
364 	"cpu_clk_unhalted.thread",
365 	"cpu_clk_unhalted.ref",
366 	NULL
367 };
368 
369 static char	**gpc_names;
370 static uint32_t	versionid;
371 static uint64_t	num_gpc;
372 static uint64_t	width_gpc;
373 static uint64_t	mask_gpc;
374 static uint64_t	num_ffc;
375 static uint64_t	width_ffc;
376 static uint64_t	mask_ffc;
377 static uint_t	total_pmc;
378 static uint64_t	control_ffc;
379 static uint64_t	control_gpc;
380 static uint64_t	control_mask;
381 static uint32_t	arch_events_vector;
382 
383 #define	IMPL_NAME_LEN 100
384 static char core_impl_name[IMPL_NAME_LEN];
385 
386 static const char *core_cpuref =
387 	"See Appendix A of the \"Intel 64 and IA-32 Architectures Software" \
388 	" Developer's Manual Volume 3B: System Programming Guide, Part 2\"" \
389 	" Order Number: 253669-026US, Februrary 2008";
390 
391 struct events_table_t {
392 	uint8_t		eventselect;
393 	uint8_t		unitmask;
394 	uint64_t	supported_counters;
395 	const char	*name;
396 };
397 
398 /* Used to describe which counters support an event */
399 #define	C(x) (1 << (x))
400 #define	C0 C(0)
401 #define	C1 C(1)
402 #define	C2 C(2)
403 #define	C3 C(3)
404 #define	C_ALL 0xFFFFFFFFFFFFFFFF
405 
406 const struct events_table_t arch_events_table[] = {
407 
408 { 0x3c, 0x00, C_ALL, "cpu_clk_unhalted.thread_p" },
409 { 0xc0, 0x00, C_ALL, "inst_retired.any_p" },
410 { 0x3c, 0x01, C_ALL, "cpu_clk_unhalted.ref_p" },
411 
412 { 0x2e, 0x4f, C_ALL, "longest_lat_cache.reference" },
413 { 0x2e, 0x41, C_ALL, "longest_lat_cache.miss" },
414 { 0xc4, 0x00, C_ALL, "br_inst_retired.all_branches" },
415 
416 { 0xc5, 0x00, C_ALL, "br_misp_retired.all_branches" }
417 };
418 
419 static uint64_t known_arch_events =
420 	    sizeof (arch_events_table)/sizeof (struct events_table_t);
421 
422 #define	EVENTS_FAM6_MOD26						\
423 									\
424 { 0x80, 0x04, C0|C1|C2|C3, "l1i.cycles_stalled" },			\
425 { 0x80, 0x01, C0|C1|C2|C3, "l1i.hits" },				\
426 { 0x80, 0x02, C0|C1|C2|C3, "l1i.misses" },				\
427 									\
428 { 0x80, 0x03, C0|C1|C2|C3, "l1i.reads" },				\
429 { 0x82, 0x01, C0|C1|C2|C3, "large_itlb.hit" },				\
430 { 0x87, 0x0F, C0|C1|C2|C3, "ild_stall.any" },				\
431 									\
432 { 0x87, 0x04, C0|C1|C2|C3, "ild_stall.iq_full" },			\
433 { 0x87, 0x01, C0|C1|C2|C3, "ild_stall.lcp" },				\
434 { 0x87, 0x02, C0|C1|C2|C3, "ild_stall.mru" },				\
435 									\
436 { 0x87, 0x08, C0|C1|C2|C3, "ild_stall.regen" },				\
437 { 0xE6, 0x02, C0|C1|C2|C3, "baclear.bad_target" },			\
438 { 0xE6, 0x01, C0|C1|C2|C3, "baclear.clear" },				\
439 									\
440 { 0xE8, 0x01, C0|C1|C2|C3, "bpu_clears.early" },			\
441 { 0xE8, 0x02, C0|C1|C2|C3, "bpu_clears.late" },				\
442 { 0xE5, 0x01, C0|C1|C2|C3, "bpu_missed_call_ret" },			\
443 									\
444 { 0xE0, 0x01, C0|C1|C2|C3, "br_inst_decoded" },				\
445 { 0x88, 0x7F, C0|C1|C2|C3, "br_inst_exec.any" },			\
446 { 0x88, 0x01, C0|C1|C2|C3, "br_inst_exec.cond" },			\
447 									\
448 { 0x88, 0x02, C0|C1|C2|C3, "br_inst_exec.direct" },			\
449 { 0x88, 0x10, C0|C1|C2|C3, "br_inst_exec.direct_near_call" },		\
450 { 0x88, 0x20, C0|C1|C2|C3, "br_inst_exec.indirect_near_call" },		\
451 									\
452 { 0x88, 0x04, C0|C1|C2|C3, "br_inst_exec.indirect_non_call" },		\
453 { 0x88, 0x30, C0|C1|C2|C3, "br_inst_exec.near_calls" },			\
454 { 0x88, 0x07, C0|C1|C2|C3, "br_inst_exec.non_calls" },			\
455 									\
456 { 0x88, 0x08, C0|C1|C2|C3, "br_inst_exec.return_near" },		\
457 { 0x88, 0x40, C0|C1|C2|C3, "br_inst_exec.taken" },			\
458 { 0x89, 0x7F, C0|C1|C2|C3, "br_misp_exec.any" },			\
459 									\
460 { 0x89, 0x01, C0|C1|C2|C3, "br_misp_exec.cond" },			\
461 { 0x89, 0x02, C0|C1|C2|C3, "br_misp_exec.direct" },			\
462 { 0x89, 0x10, C0|C1|C2|C3, "br_misp_exec.direct_near_call" },		\
463 									\
464 { 0x89, 0x20, C0|C1|C2|C3, "br_misp_exec.indirect_near_call" },		\
465 { 0x89, 0x04, C0|C1|C2|C3, "br_misp_exec.indirect_non_call" },		\
466 { 0x89, 0x30, C0|C1|C2|C3, "br_misp_exec.near_calls" },			\
467 									\
468 { 0x89, 0x07, C0|C1|C2|C3, "br_misp_exec.non_calls" },			\
469 { 0x89, 0x08, C0|C1|C2|C3, "br_misp_exec.return_near" },		\
470 { 0x89, 0x40, C0|C1|C2|C3, "br_misp_exec.taken" },			\
471 									\
472 { 0x17, 0x01, C0|C1|C2|C3, "inst_queue_writes" },			\
473 { 0x1E, 0x01, C0|C1|C2|C3, "inst_queue_write_cycles" },			\
474 { 0xA7, 0x01, C0|C1|C2|C3, "baclear_force_iq" },			\
475 									\
476 { 0xD0, 0x01, C0|C1|C2|C3, "macro_insts.decoded" },			\
477 { 0xA6, 0x01, C0|C1|C2|C3, "macro_insts.fusions_decoded" },		\
478 { 0x19, 0x01, C0|C1|C2|C3, "two_uop_insts_decoded" },			\
479 									\
480 { 0x18, 0x01, C0|C1|C2|C3, "inst_decoded.dec0" },			\
481 { 0xD1, 0x04, C0|C1|C2|C3, "uops_decoded.esp_folding" },		\
482 { 0xD1, 0x08, C0|C1|C2|C3, "uops_decoded.esp_sync" },			\
483 									\
484 { 0xD1, 0x02, C0|C1|C2|C3, "uops_decoded.ms" },				\
485 { 0x20, 0x01, C0|C1|C2|C3, "lsd_overflow" },				\
486 { 0x0E, 0x01, C0|C1|C2|C3, "uops_issued.any" },				\
487 									\
488 { 0x0E, 0x02, C0|C1|C2|C3, "uops_issued.fused" },			\
489 { 0xA2, 0x20, C0|C1|C2|C3, "resource_stalls.fpcw" },			\
490 { 0xA2, 0x02, C0|C1|C2|C3, "resource_stalls.load" },			\
491 									\
492 { 0xA2, 0x40, C0|C1|C2|C3, "resource_stalls.mxcsr" },			\
493 { 0xA2, 0x04, C0|C1|C2|C3, "resource_stalls.rs_full" },			\
494 { 0xA2, 0x08, C0|C1|C2|C3, "resource_stalls.store" },			\
495 									\
496 { 0xA2, 0x01, C0|C1|C2|C3, "resource_stalls.any" },			\
497 { 0xD2, 0x01, C0|C1|C2|C3, "rat_stalls.flags" },			\
498 { 0xD2, 0x02, C0|C1|C2|C3, "rat_stalls.registers" },			\
499 									\
500 { 0xD2, 0x04, C0|C1|C2|C3, "rat_stalls.rob_read_port" },		\
501 { 0xD2, 0x08, C0|C1|C2|C3, "rat_stalls.scoreboard" },			\
502 { 0xD2, 0x0F, C0|C1|C2|C3, "rat_stalls.any" },				\
503 									\
504 { 0xD4, 0x01, C0|C1|C2|C3, "seg_rename_stalls" },			\
505 { 0xD5, 0x01, C0|C1|C2|C3, "es_reg_renames" },				\
506 { 0x10, 0x02, C0|C1|C2|C3, "fp_comp_ops_exe.mmx" },			\
507 									\
508 { 0x10, 0x80, C0|C1|C2|C3, "fp_comp_ops_exe.sse_double_precision" },	\
509 { 0x10, 0x04, C0|C1|C2|C3, "fp_comp_ops_exe.sse_fp" },			\
510 { 0x10, 0x10, C0|C1|C2|C3, "fp_comp_ops_exe.sse_fp_packed" },		\
511 									\
512 { 0x10, 0x20, C0|C1|C2|C3, "fp_comp_ops_exe.sse_fp_scalar" },		\
513 { 0x10, 0x40, C0|C1|C2|C3, "fp_comp_ops_exe.sse_single_precision" },	\
514 { 0x10, 0x08, C0|C1|C2|C3, "fp_comp_ops_exe.sse2_integer" },		\
515 									\
516 { 0x10, 0x01, C0|C1|C2|C3, "fp_comp_ops_exe.x87" },			\
517 { 0x14, 0x01, C0|C1|C2|C3, "arith.cycles_div_busy" },			\
518 { 0x14, 0x02, C0|C1|C2|C3, "arith.mul" },				\
519 									\
520 { 0x12, 0x04, C0|C1|C2|C3, "simd_int_128.pack" },			\
521 { 0x12, 0x20, C0|C1|C2|C3, "simd_int_128.packed_arith" },		\
522 { 0x12, 0x10, C0|C1|C2|C3, "simd_int_128.packed_logical" },		\
523 									\
524 { 0x12, 0x01, C0|C1|C2|C3, "simd_int_128.packed_mpy" },			\
525 { 0x12, 0x02, C0|C1|C2|C3, "simd_int_128.packed_shift" },		\
526 { 0x12, 0x40, C0|C1|C2|C3, "simd_int_128.shuffle_move" },		\
527 									\
528 { 0x12, 0x08, C0|C1|C2|C3, "simd_int_128.unpack" },			\
529 { 0xFD, 0x04, C0|C1|C2|C3, "simd_int_64.pack" },			\
530 { 0xFD, 0x20, C0|C1|C2|C3, "simd_int_64.packed_arith" },		\
531 									\
532 { 0xFD, 0x10, C0|C1|C2|C3, "simd_int_64.packed_logical" },		\
533 { 0xFD, 0x01, C0|C1|C2|C3, "simd_int_64.packed_mpy" },			\
534 { 0xFD, 0x02, C0|C1|C2|C3, "simd_int_64.packed_shift" },		\
535 									\
536 { 0xFD, 0x40, C0|C1|C2|C3, "simd_int_64.shuffle_move" },		\
537 { 0xFD, 0x08, C0|C1|C2|C3, "simd_int_64.unpack" },			\
538 { 0xB1, 0x01, C0|C1|C2|C3, "uops_executed.port0" },			\
539 									\
540 { 0xB1, 0x02, C0|C1|C2|C3, "uops_executed.port1" },			\
541 { 0x40, 0x04, C0|C1, "l1d_cache_ld.e_state" },				\
542 { 0x40, 0x01, C0|C1, "l1d_cache_ld.i_state" },				\
543 									\
544 { 0x40, 0x08, C0|C1, "l1d_cache_ld.m_state" },				\
545 { 0x40, 0x0F, C0|C1, "l1d_cache_ld.mesi" },				\
546 { 0x40, 0x02, C0|C1, "l1d_cache_ld.s_state" },				\
547 									\
548 { 0x41, 0x04, C0|C1, "l1d_cache_st.e_state" },				\
549 { 0x41, 0x08, C0|C1, "l1d_cache_st.m_state" },				\
550 { 0x41, 0x0F, C0|C1, "l1d_cache_st.mesi" },				\
551 									\
552 { 0x41, 0x02, C0|C1, "l1d_cache_st.s_state" },				\
553 { 0x42, 0x04, C0|C1, "l1d_cache_lock.e_state" },			\
554 { 0x42, 0x01, C0|C1, "l1d_cache_lock.hit" },				\
555 									\
556 { 0x42, 0x08, C0|C1, "l1d_cache_lock.m_state" },			\
557 { 0x42, 0x02, C0|C1, "l1d_cache_lock.s_state" },			\
558 { 0x43, 0x01, C0|C1, "l1d_all_ref.any" },				\
559 									\
560 { 0x43, 0x02, C0|C1, "l1d_all_ref.cacheable" },				\
561 { 0x4B, 0x01, C0|C1, "mmx2_mem_exec.nta" },				\
562 { 0x4C, 0x01, C0|C1, "load_hit_pre" },					\
563 									\
564 { 0x4E, 0x02, C0|C1, "l1d_prefetch.miss" },				\
565 { 0x4E, 0x01, C0|C1, "l1d_prefetch.requests" },				\
566 { 0x4E, 0x04, C0|C1, "l1d_prefetch.triggers" },				\
567 									\
568 { 0x51, 0x04, C0|C1, "l1d.m_evict" },					\
569 { 0x51, 0x02, C0|C1, "l1d.m_repl" },					\
570 { 0x51, 0x08, C0|C1, "l1d.m_snoop_evict" },				\
571 									\
572 { 0x51, 0x01, C0|C1, "l1d.repl" },					\
573 { 0x52, 0x01, C0|C1, "l1d_cache_prefetch_lock_fb_hit" },		\
574 { 0x53, 0x01, C0|C1, "l1d_cache_lock_fb_hit" },				\
575 									\
576 { 0x63, 0x02, C0|C1, "cache_lock_cycles.l1d" },				\
577 { 0x63, 0x01, C0|C1, "cache_lock_cycles.l1d_l2" },			\
578 { 0x06, 0x04, C0|C1|C2|C3, "store_blocks.at_ret" },			\
579 									\
580 { 0x06, 0x08, C0|C1|C2|C3, "store_blocks.l1d_block" },			\
581 { 0x06, 0x01, C0|C1|C2|C3, "store_blocks.not_sta" },			\
582 { 0x06, 0x02, C0|C1|C2|C3, "store_blocks.sta" },			\
583 									\
584 { 0x13, 0x07, C0|C1|C2|C3, "load_dispatch.any" },			\
585 { 0x13, 0x04, C0|C1|C2|C3, "load_dispatch.mob" },			\
586 { 0x13, 0x01, C0|C1|C2|C3, "load_dispatch.rs" },			\
587 									\
588 { 0x13, 0x02, C0|C1|C2|C3, "load_dispatch.rs_delayed" },		\
589 { 0x08, 0x01, C0|C1|C2|C3, "dtlb_load_misses.any" },			\
590 { 0x08, 0x20, C0|C1|C2|C3, "dtlb_load_misses.pde_miss" },		\
591 									\
592 { 0x08, 0x02, C0|C1|C2|C3, "dtlb_load_misses.walk_completed" },		\
593 { 0x49, 0x01, C0|C1|C2|C3, "dtlb_misses.any" },				\
594 { 0x49, 0x10, C0|C1|C2|C3, "dtlb_misses.stlb_hit" },			\
595 									\
596 { 0x49, 0x02, C0|C1|C2|C3, "dtlb_misses.walk_completed" },		\
597 { 0x4F, 0x02, C0|C1|C2|C3, "ept.epde_miss" },				\
598 { 0x4F, 0x08, C0|C1|C2|C3, "ept.epdpe_miss" },				\
599 									\
600 { 0x85, 0x01, C0|C1|C2|C3, "itlb_misses.any" },				\
601 { 0x85, 0x02, C0|C1|C2|C3, "itlb_misses.walk_completed" },		\
602 { 0x24, 0xAA, C0|C1|C2|C3, "l2_rqsts.miss" },				\
603 									\
604 { 0x24, 0xFF, C0|C1|C2|C3, "l2_rqsts.references" },			\
605 { 0x24, 0x10, C0|C1|C2|C3, "l2_rqsts.ifetch_hit" },			\
606 { 0x24, 0x20, C0|C1|C2|C3, "l2_rqsts.ifetch_miss" },			\
607 									\
608 { 0x24, 0x30, C0|C1|C2|C3, "l2_rqsts.ifetches" },			\
609 { 0x24, 0x01, C0|C1|C2|C3, "l2_rqsts.ld_hit" },				\
610 { 0x24, 0x02, C0|C1|C2|C3, "l2_rqsts.ld_miss" },			\
611 									\
612 { 0x24, 0x03, C0|C1|C2|C3, "l2_rqsts.loads" },				\
613 { 0x24, 0x40, C0|C1|C2|C3, "l2_rqsts.prefetch_hit" },			\
614 { 0x24, 0x80, C0|C1|C2|C3, "l2_rqsts.prefetch_miss" },			\
615 									\
616 { 0x24, 0xC0, C0|C1|C2|C3, "l2_rqsts.prefetches" },			\
617 { 0x24, 0x04, C0|C1|C2|C3, "l2_rqsts.rfo_hit" },			\
618 { 0x24, 0x08, C0|C1|C2|C3, "l2_rqsts.rfo_miss" },			\
619 									\
620 { 0x24, 0x0C, C0|C1|C2|C3, "l2_rqsts.rfos" },				\
621 { 0x26, 0xFF, C0|C1|C2|C3, "l2_data_rqsts.any" },			\
622 { 0x26, 0x04, C0|C1|C2|C3, "l2_data_rqsts.demand.e_state" },		\
623 									\
624 { 0x26, 0x01, C0|C1|C2|C3, "l2_data_rqsts.demand.i_state" },		\
625 { 0x26, 0x08, C0|C1|C2|C3, "l2_data_rqsts.demand.m_state" },		\
626 { 0x26, 0x0F, C0|C1|C2|C3, "l2_data_rqsts.demand.mesi" },		\
627 									\
628 { 0x26, 0x02, C0|C1|C2|C3, "l2_data_rqsts.demand.s_state" },		\
629 { 0x26, 0x40, C0|C1|C2|C3, "l2_data_rqsts.prefetch.e_state" },		\
630 { 0x26, 0x10, C0|C1|C2|C3, "l2_data_rqsts.prefetch.i_state" },		\
631 									\
632 { 0x26, 0x80, C0|C1|C2|C3, "l2_data_rqsts.prefetch.m_state" },		\
633 { 0x26, 0xF0, C0|C1|C2|C3, "l2_data_rqsts.prefetch.mesi" },		\
634 { 0x26, 0x20, C0|C1|C2|C3, "l2_data_rqsts.prefetch.s_state" },		\
635 									\
636 { 0x27, 0x40, C0|C1|C2|C3, "l2_write.lock.e_state" },			\
637 { 0x27, 0x10, C0|C1|C2|C3, "l2_write.lock.i_state" },			\
638 { 0x27, 0x20, C0|C1|C2|C3, "l2_write.lock.s_state" },			\
639 									\
640 { 0x27, 0x0E, C0|C1|C2|C3, "l2_write.rfo.hit" },			\
641 { 0x27, 0x01, C0|C1|C2|C3, "l2_write.rfo.i_state" },			\
642 { 0x27, 0x08, C0|C1|C2|C3, "l2_write.rfo.m_state" },			\
643 									\
644 { 0x27, 0x0F, C0|C1|C2|C3, "l2_write.rfo.mesi" },			\
645 { 0x27, 0x02, C0|C1|C2|C3, "l2_write.rfo.s_state" },			\
646 { 0x28, 0x04, C0|C1|C2|C3, "l1d_wb_l2.e_state" },			\
647 									\
648 { 0x28, 0x01, C0|C1|C2|C3, "l1d_wb_l2.i_state" },			\
649 { 0x28, 0x08, C0|C1|C2|C3, "l1d_wb_l2.m_state" },			\
650 { 0xF0, 0x80, C0|C1|C2|C3, "l2_transactions.any" },			\
651 									\
652 { 0xF0, 0x20, C0|C1|C2|C3, "l2_transactions.fill" },			\
653 { 0xF0, 0x04, C0|C1|C2|C3, "l2_transactions.ifetch" },			\
654 { 0xF0, 0x10, C0|C1|C2|C3, "l2_transactions.l1d_wb" },			\
655 									\
656 { 0xF0, 0x01, C0|C1|C2|C3, "l2_transactions.load" },			\
657 { 0xF0, 0x08, C0|C1|C2|C3, "l2_transactions.prefetch" },		\
658 { 0xF0, 0x02, C0|C1|C2|C3, "l2_transactions.rfo" },			\
659 									\
660 { 0xF0, 0x40, C0|C1|C2|C3, "l2_transactions.wb" },			\
661 { 0xF1, 0x07, C0|C1|C2|C3, "l2_lines_in.any" },				\
662 { 0xF1, 0x04, C0|C1|C2|C3, "l2_lines_in.e_state" },			\
663 									\
664 { 0xF1, 0x02, C0|C1|C2|C3, "l2_lines_in.s_state" },			\
665 { 0xF2, 0x0F, C0|C1|C2|C3, "l2_lines_out.any" },			\
666 { 0xF2, 0x01, C0|C1|C2|C3, "l2_lines_out.demand_clean" },		\
667 									\
668 { 0xF2, 0x02, C0|C1|C2|C3, "l2_lines_out.demand_dirty" },		\
669 { 0xF2, 0x04, C0|C1|C2|C3, "l2_lines_out.prefetch_clean" },		\
670 { 0x6C, 0x01, C0|C1|C2|C3, "io_transactions" },				\
671 									\
672 { 0xB0, 0x80, C0|C1|C2|C3, "offcore_requests.any" },			\
673 { 0xB0, 0x10, C0|C1|C2|C3, "offcore_requests.any.rfo" },		\
674 { 0xB0, 0x40, C0|C1|C2|C3, "offcore_requests.l1d_writeback" },		\
675 									\
676 { 0xB8, 0x01, C0|C1|C2|C3, "snoop_response.hit" },			\
677 { 0xB8, 0x02, C0|C1|C2|C3, "snoop_response.hite" },			\
678 { 0xB8, 0x04, C0|C1|C2|C3, "snoop_response.hitm" },			\
679 									\
680 { 0xF4, 0x10, C0|C1|C2|C3, "sq_misc.split_lock" },			\
681 { 0x0B, 0x01, C0|C1|C2|C3, "mem_inst_retired.loads" },			\
682 { 0x0B, 0x02, C0|C1|C2|C3, "mem_inst_retired.stores" },			\
683 									\
684 { 0xC0, 0x04, C0|C1|C2|C3, "inst_retired.mmx" },			\
685 { 0xC0, 0x02, C0|C1|C2|C3, "inst_retired.x87" },			\
686 { 0xC7, 0x04, C0|C1|C2|C3, "ssex_uops_retired.packed_double" },		\
687 									\
688 { 0xC7, 0x01, C0|C1|C2|C3, "ssex_uops_retired.packed_single" },		\
689 { 0xC7, 0x08, C0|C1|C2|C3, "ssex_uops_retired.scalar_double" },		\
690 { 0xC7, 0x02, C0|C1|C2|C3, "ssex_uops_retired.scalar_single" },		\
691 									\
692 { 0xC7, 0x10, C0|C1|C2|C3, "ssex_uops_retired.vector_integer" },	\
693 { 0xC2, 0x01, C0|C1|C2|C3, "uops_retired.any" },			\
694 { 0xC2, 0x04, C0|C1|C2|C3, "uops_retired.macro_fused" },		\
695 									\
696 { 0xC8, 0x20, C0|C1|C2|C3, "itlb_miss_retired" },			\
697 { 0xCB, 0x80, C0|C1|C2|C3, "mem_load_retired.dtlb_miss" },		\
698 { 0xCB, 0x40, C0|C1|C2|C3, "mem_load_retired.hit_lfb" },		\
699 									\
700 { 0xCB, 0x01, C0|C1|C2|C3, "mem_load_retired.l1d_hit" },		\
701 { 0xCB, 0x02, C0|C1|C2|C3, "mem_load_retired.l2_hit" },			\
702 { 0xCB, 0x10, C0|C1|C2|C3, "mem_load_retired.llc_miss" },		\
703 									\
704 { 0xCB, 0x04, C0|C1|C2|C3, "mem_load_retired.llc_unshared_hit" },	\
705 { 0xCB, 0x08, C0|C1|C2|C3, "mem_load_retired.other_core_l2_hit_hitm" },	\
706 { 0x0F, 0x02, C0|C1|C2|C3, "mem_uncore_retired.other_core_l2_hitm" },	\
707 									\
708 { 0x0F, 0x08, C0|C1|C2|C3, "mem_uncore_retired.remote_cache_local_home_hit" },\
709 { 0x0F, 0x10, C0|C1|C2|C3, "mem_uncore_retired.remote_dram" },		\
710 { 0x0F, 0x20, C0|C1|C2|C3, "mem_uncore_retired.local_dram" },		\
711 									\
712 { 0x0C, 0x01, C0|C1|C2|C3, "mem_store_retired.dtlb_miss" },		\
713 { 0xC4, 0x01, C0|C1|C2|C3, "br_inst_retired.conditional" },		\
714 { 0xC4, 0x02, C0|C1|C2|C3, "br_inst_retired.near_call" },		\
715 									\
716 { 0xC5, 0x02, C0|C1|C2|C3, "br_misp_retired.near_call" },		\
717 { 0xDB, 0x01, C0|C1|C2|C3, "uop_unfusion" },				\
718 { 0xF7, 0x01, C0|C1|C2|C3, "fp_assist.all" },				\
719 									\
720 { 0xF7, 0x04, C0|C1|C2|C3, "fp_assist.input" },				\
721 { 0xF7, 0x02, C0|C1|C2|C3, "fp_assist.output" },			\
722 { 0xCC, 0x03, C0|C1|C2|C3, "fp_mmx_trans.any" },			\
723 									\
724 { 0xCC, 0x01, C0|C1|C2|C3, "fp_mmx_trans.to_fp" },			\
725 { 0xCC, 0x02, C0|C1|C2|C3, "fp_mmx_trans.to_mmx" },			\
726 { 0xC3, 0x04, C0|C1|C2|C3, "machine_clears.smc" }
727 
728 
729 #define	EVENTS_FAM6_MOD37						\
730 { 0xB0, 0x08, C0|C1|C2|C3, "offcore_requests.any.read" },		\
731 { 0xB0, 0x01, C0|C1|C2|C3, "offcore_requests.demand.read_data" },	\
732 { 0xB0, 0x04, C0|C1|C2|C3, "offcore_requests.demand.rfo" }
733 
734 static const struct events_table_t *events_table = NULL;
735 
736 const struct events_table_t events_fam6_mod26[] = {
737 	EVENTS_FAM6_MOD26,
738 	{ NT_END, 0, 0, "" }
739 };
740 
741 const struct events_table_t events_fam6_mod37[] = {
742 	EVENTS_FAM6_MOD26,
743 	EVENTS_FAM6_MOD37,
744 	{ NT_END, 0, 0, "" }
745 };
746 
747 /*
748  * Initialize string containing list of supported general-purpose counter
749  * events for processors of Family 6 Models 15 and 23
750  */
751 static void
752 pcbe_init_fam6_model15_23()
753 {
754 	const struct nametable_fam6mod15_23	*n;
755 	const struct nametable_fam6mod15_23	*picspecific_events;
756 	size_t			common_size;
757 	size_t			size;
758 	uint64_t		i;
759 
760 	gpc_names = kmem_alloc(num_gpc * sizeof (char *), KM_SLEEP);
761 
762 	/* Calculate space needed to save all the common event names */
763 	common_size = 0;
764 	for (n = cmn_gpc_events_f6m15_23; n->event_num != NT_END; n++) {
765 		common_size += strlen(n->name) + 1;
766 	}
767 
768 	for (i = 0; i < num_gpc; i++) {
769 		size = 0;
770 		switch (i) {
771 			case 0:
772 				picspecific_events = pic0_events;
773 				break;
774 			case 1:
775 				picspecific_events = pic1_events;
776 				break;
777 			default:
778 				picspecific_events = NULL;
779 				break;
780 		}
781 		if (picspecific_events != NULL) {
782 			for (n = picspecific_events;
783 			    n->event_num != NT_END;
784 			    n++) {
785 				size += strlen(n->name) + 1;
786 			}
787 		}
788 
789 		gpc_names[i] =
790 		    kmem_alloc(size + common_size + 1, KM_SLEEP);
791 
792 		gpc_names[i][0] = '\0';
793 		if (picspecific_events != NULL) {
794 			for (n = picspecific_events;
795 			    n->event_num != NT_END;
796 			    n++) {
797 				(void) strcat(gpc_names[i], n->name);
798 				(void) strcat(gpc_names[i], ",");
799 			}
800 		}
801 		for (n = cmn_gpc_events_f6m15_23; n->event_num != NT_END;
802 		    n++) {
803 			(void) strcat(gpc_names[i], n->name);
804 			(void) strcat(gpc_names[i], ",");
805 		}
806 		/*
807 		 * Remove trailing comma.
808 		 */
809 		gpc_names[i][common_size + size - 1] = '\0';
810 	}
811 }
812 
813 static int
814 core_pcbe_init(void)
815 {
816 	struct cpuid_regs	cp;
817 	size_t			size;
818 	uint64_t		i;
819 	uint64_t		j;
820 	uint64_t		arch_events_vector_length;
821 	size_t			arch_events_string_length;
822 
823 	if (cpuid_getvendor(CPU) != X86_VENDOR_Intel)
824 		return (-1);
825 
826 	/* Obtain Basic CPUID information */
827 	cp.cp_eax = 0x0;
828 	(void) __cpuid_insn(&cp);
829 
830 	/* No Architectural Performance Monitoring Leaf returned by CPUID */
831 	if (cp.cp_eax < 0xa) {
832 		return (-1);
833 	}
834 
835 	/* Obtain the Architectural Performance Monitoring Leaf */
836 	cp.cp_eax = 0xa;
837 	(void) __cpuid_insn(&cp);
838 
839 	versionid = cp.cp_eax & 0xFF;
840 
841 	/*
842 	 * All Family 6 Model 15 and Model 23 processors have fixed-function
843 	 * counters.  These counters were made Architectural with
844 	 * Family 6 Model 15 Stepping 9.
845 	 */
846 	switch (versionid) {
847 
848 		case 0:
849 			return (-1);
850 
851 		case 2:
852 			num_ffc = cp.cp_edx & 0x1F;
853 			width_ffc = (cp.cp_edx >> 5) & 0xFF;
854 
855 			/*
856 			 * Some processors have an errata (AW34) where
857 			 * versionid is reported as 2 when actually 1.
858 			 * In this case, fixed-function counters are
859 			 * model-specific as in Version 1.
860 			 */
861 			if (num_ffc != 0) {
862 				break;
863 			}
864 			/* FALLTHROUGH */
865 		case 1:
866 			num_ffc = 3;
867 			width_ffc = 40;
868 			versionid = 1;
869 			break;
870 
871 		default:
872 			num_ffc = cp.cp_edx & 0x1F;
873 			width_ffc = (cp.cp_edx >> 5) & 0xFF;
874 			break;
875 	}
876 
877 
878 	if (num_ffc >= 64)
879 		return (-1);
880 
881 	if (num_ffc >= sizeof (ffc_names) / sizeof (char *)) {
882 		/*
883 		 * The system seems to have more fixed-function counters than
884 		 * what this PCBE is able to handle correctly.  Default to the
885 		 * maximum number of fixed-function counters that this driver
886 		 * is aware of.
887 		 */
888 		num_ffc = sizeof (ffc_names) / sizeof (char *) - 1;
889 	}
890 
891 	mask_ffc = BITMASK_XBITS(width_ffc);
892 
893 	num_gpc = (cp.cp_eax >> 8) & 0xFF;
894 	width_gpc = (cp.cp_eax >> 16) & 0xFF;
895 
896 	if (num_gpc >= 64)
897 		return (-1);
898 
899 	mask_gpc = BITMASK_XBITS(width_gpc);
900 
901 	total_pmc = num_gpc + num_ffc;
902 
903 	control_gpc = BITMASK_XBITS(num_gpc);
904 	control_ffc = BITMASK_XBITS(num_ffc);
905 
906 	control_mask = (control_ffc << 32) | control_gpc;
907 
908 	if (total_pmc > 64) {
909 		/* Too wide for the overflow bitmap */
910 		return (-1);
911 	}
912 
913 	if ((cpuid_getfamily(CPU) == 6) &&
914 	    ((cpuid_getmodel(CPU) == 15) || (cpuid_getmodel(CPU) == 23))) {
915 		(void) snprintf(core_impl_name, IMPL_NAME_LEN,
916 		    "Core Microarchitecture");
917 		pcbe_init_fam6_model15_23();
918 		return (0);
919 	}
920 
921 	(void) snprintf(core_impl_name, IMPL_NAME_LEN,
922 	    "Intel Arch PerfMon v%d on Family %d Model %d",
923 	    versionid, cpuid_getfamily(CPU), cpuid_getmodel(CPU));
924 
925 	arch_events_vector_length = (cp.cp_eax >> 24) & 0xFF;
926 
927 	ASSERT(known_arch_events == arch_events_vector_length);
928 
929 	/*
930 	 * To handle the case where a new performance monitoring setup is run
931 	 * on a non-debug kernel
932 	 */
933 	if (known_arch_events > arch_events_vector_length) {
934 		known_arch_events = arch_events_vector_length;
935 	} else {
936 		arch_events_vector_length = known_arch_events;
937 	}
938 
939 	arch_events_vector = cp.cp_ebx &
940 	    BITMASK_XBITS(arch_events_vector_length);
941 
942 	/* General-purpose Counters (GPC) */
943 	gpc_names = NULL;
944 
945 	if (num_gpc > 0) {
946 
947 		gpc_names = kmem_alloc(num_gpc * sizeof (char *), KM_SLEEP);
948 
949 		/* Calculate space required for the architectural gpc events */
950 		arch_events_string_length = 0;
951 		for (i = 0; i < known_arch_events; i++) {
952 			if (((1U << i) & arch_events_vector) == 0) {
953 				arch_events_string_length +=
954 				    strlen(arch_events_table[i].name) + 1;
955 			}
956 		}
957 
958 		if (cpuid_getmodel(CPU) == 26) {
959 			events_table = events_fam6_mod26;
960 		} else if (cpuid_getmodel(CPU) == 37) {
961 			events_table = events_fam6_mod37;
962 		}
963 
964 		for (i = 0; i < num_gpc; i++) {
965 
966 			/* Determine length of supported event names */
967 			size = arch_events_string_length;
968 			for (j = 0; events_table != NULL &&
969 			    events_table[j].eventselect != NT_END;
970 			    j++) {
971 				if (C(i) & events_table[j].supported_counters) {
972 					size += strlen(events_table[j].name) +
973 					    1;
974 				}
975 			}
976 
977 			/* Allocate memory for this pics list */
978 			gpc_names[i] = kmem_alloc(size + 1, KM_SLEEP);
979 			gpc_names[i][0] = '\0';
980 			if (size == 0) {
981 				continue;
982 			}
983 
984 			/* Create the list */
985 			for (j = 0; j < known_arch_events; j++) {
986 				if (((1U << j) & arch_events_vector) == 0) {
987 					(void) strcat(gpc_names[i],
988 					    arch_events_table[j].name);
989 					(void) strcat(gpc_names[i], ",");
990 				}
991 			}
992 
993 			for (j = 0; events_table != NULL &&
994 			    events_table[j].eventselect != NT_END;
995 			    j++) {
996 				if (C(i) & events_table[j].supported_counters) {
997 					(void) strcat(gpc_names[i],
998 					    events_table[j].name);
999 					(void) strcat(gpc_names[i], ",");
1000 				}
1001 			}
1002 			/*
1003 			 * Remove trailing comma.
1004 			 */
1005 			gpc_names[i][size - 1] = '\0';
1006 		}
1007 	}
1008 	/*
1009 	 * Fixed-function Counters (FFC) are already listed individually in
1010 	 * ffc_names[]
1011 	 */
1012 	return (0);
1013 }
1014 
1015 static uint_t core_pcbe_ncounters()
1016 {
1017 	return (total_pmc);
1018 }
1019 
1020 static const char *core_pcbe_impl_name(void)
1021 {
1022 	return (core_impl_name);
1023 }
1024 
1025 static const char *core_pcbe_cpuref(void)
1026 {
1027 	return (core_cpuref);
1028 }
1029 
1030 static char *core_pcbe_list_events(uint_t picnum)
1031 {
1032 	ASSERT(picnum < cpc_ncounters);
1033 
1034 	if (picnum < num_gpc) {
1035 		return (gpc_names[picnum]);
1036 	} else {
1037 		return (ffc_names[picnum - num_gpc]);
1038 	}
1039 }
1040 
1041 static char *core_pcbe_list_attrs(void)
1042 {
1043 	if (versionid >= 3) {
1044 		return ("edge,inv,umask,cmask,anythr");
1045 	} else {
1046 		return ("edge,pc,inv,umask,cmask");
1047 	}
1048 }
1049 
1050 static const struct nametable_fam6mod15_23 *
1051 find_gpcevent_f6m15_23(char *name,
1052     const struct nametable_fam6mod15_23 *nametable)
1053 {
1054 	const struct nametable_fam6mod15_23 *n;
1055 	int compare_result;
1056 
1057 	compare_result = -1;
1058 	for (n = nametable; n->event_num != NT_END; n++) {
1059 		compare_result = strcmp(name, n->name);
1060 		if (compare_result <= 0) {
1061 			break;
1062 		}
1063 	}
1064 
1065 	if (compare_result == 0) {
1066 		return (n);
1067 	}
1068 
1069 	return (NULL);
1070 }
1071 
1072 static const struct events_table_t *
1073 find_gpcevent(char *name)
1074 {
1075 	int i;
1076 
1077 	for (i = 0; i < known_arch_events; i++) {
1078 		if (strcmp(name, arch_events_table[i].name) == 0) {
1079 			if (((1U << i) & arch_events_vector) == 0) {
1080 				return (&arch_events_table[i]);
1081 			}
1082 		}
1083 	}
1084 
1085 	if (events_table == NULL) {
1086 		return (NULL);
1087 	}
1088 
1089 	for (i = 0; events_table[i].eventselect != NT_END; i++) {
1090 		if (strcmp(name, events_table[i].name) == 0) {
1091 			return (&events_table[i]);
1092 		}
1093 	}
1094 
1095 	return (NULL);
1096 }
1097 static uint64_t
1098 core_pcbe_event_coverage(char *event)
1099 {
1100 	uint64_t bitmap;
1101 	uint64_t bitmask;
1102 	const struct events_table_t *n;
1103 	int i;
1104 
1105 	bitmap = 0;
1106 
1107 	/* Is it an event that a GPC can track? */
1108 	if (versionid >= 3) {
1109 		n = find_gpcevent(event);
1110 		if (n != NULL) {
1111 			bitmap |= (n->supported_counters &
1112 			    BITMASK_XBITS(num_gpc));
1113 		}
1114 	} else {
1115 		if (find_gpcevent_f6m15_23(event, cmn_gpc_events_f6m15_23) !=
1116 		    NULL) {
1117 			bitmap |= BITMASK_XBITS(num_gpc);
1118 		} else if (find_gpcevent_f6m15_23(event, pic0_events) != NULL) {
1119 			bitmap |= 1ULL;
1120 		} else if (find_gpcevent_f6m15_23(event, pic1_events) != NULL) {
1121 			bitmap |= 1ULL << 1;
1122 		}
1123 	}
1124 
1125 	/* Check if the event can be counted in the fixed-function counters */
1126 	if (num_ffc > 0) {
1127 		bitmask = 1ULL << num_gpc;
1128 		for (i = 0; i < num_ffc; i++) {
1129 			if (strcmp(event, ffc_names[i]) == 0) {
1130 				bitmap |= bitmask;
1131 			}
1132 			bitmask = bitmask << 1;
1133 		}
1134 	}
1135 
1136 	return (bitmap);
1137 }
1138 
1139 static uint64_t
1140 core_pcbe_overflow_bitmap(void)
1141 {
1142 	uint64_t interrupt_status;
1143 	uint64_t intrbits_ffc;
1144 	uint64_t intrbits_gpc;
1145 	extern int kcpc_hw_overflow_intr_installed;
1146 	uint64_t overflow_bitmap;
1147 
1148 	RDMSR(PERF_GLOBAL_STATUS, interrupt_status);
1149 	WRMSR(PERF_GLOBAL_OVF_CTRL, interrupt_status);
1150 
1151 	interrupt_status = interrupt_status & control_mask;
1152 	intrbits_ffc = (interrupt_status >> 32) & control_ffc;
1153 	intrbits_gpc = interrupt_status & control_gpc;
1154 	overflow_bitmap = (intrbits_ffc << num_gpc) | intrbits_gpc;
1155 
1156 	ASSERT(kcpc_hw_overflow_intr_installed);
1157 	(*kcpc_hw_enable_cpc_intr)();
1158 
1159 	return (overflow_bitmap);
1160 }
1161 
1162 static int
1163 check_cpc_securitypolicy(core_pcbe_config_t *conf,
1164     const struct nametable_fam6mod15_23 *n)
1165 {
1166 	if (conf->core_ctl & n->restricted_bits) {
1167 		if (secpolicy_cpc_cpu(crgetcred()) != 0) {
1168 			return (CPC_ATTR_REQUIRES_PRIVILEGE);
1169 		}
1170 	}
1171 	return (0);
1172 }
1173 
1174 static int
1175 configure_gpc(uint_t picnum, char *event, uint64_t preset, uint32_t flags,
1176     uint_t nattrs, kcpc_attr_t *attrs, void **data)
1177 {
1178 	core_pcbe_config_t	conf;
1179 	const struct nametable_fam6mod15_23	*n;
1180 	const struct nametable_fam6mod15_23	*m;
1181 	const struct nametable_fam6mod15_23	*picspecific_events;
1182 	struct nametable_fam6mod15_23	nt_raw = { "", 0x0, 0x0 };
1183 	uint_t			i;
1184 	long			event_num;
1185 	const struct events_table_t *eventcode;
1186 	int			umask_known;
1187 
1188 	if (((preset & BITS_EXTENDED_FROM_31) != 0) &&
1189 	    ((preset & BITS_EXTENDED_FROM_31) !=
1190 	    BITS_EXTENDED_FROM_31)) {
1191 
1192 		/*
1193 		 * Bits beyond bit-31 in the general-purpose counters can only
1194 		 * be written to by extension of bit 31.  We cannot preset
1195 		 * these bits to any value other than all 1s or all 0s.
1196 		 */
1197 		return (CPC_ATTRIBUTE_OUT_OF_RANGE);
1198 	}
1199 
1200 	if (versionid >= 3) {
1201 		eventcode = find_gpcevent(event);
1202 		if (eventcode != NULL) {
1203 			if ((C(picnum) & eventcode->supported_counters) == 0) {
1204 				return (CPC_PIC_NOT_CAPABLE);
1205 			}
1206 			conf.core_ctl = eventcode->eventselect;
1207 			conf.core_ctl |= eventcode->unitmask <<
1208 			    CORE_UMASK_SHIFT;
1209 			umask_known = 1;
1210 		} else {
1211 			/* Event specified as raw event code */
1212 			if (ddi_strtol(event, NULL, 0, &event_num) != 0) {
1213 				return (CPC_INVALID_EVENT);
1214 			}
1215 			conf.core_ctl = event_num & 0xFF;
1216 			umask_known = 0;
1217 		}
1218 	} else {
1219 		umask_known = 0;
1220 		n = find_gpcevent_f6m15_23(event, cmn_gpc_events_f6m15_23);
1221 		if (n == NULL) {
1222 			switch (picnum) {
1223 				case 0:
1224 					picspecific_events = pic0_events;
1225 					break;
1226 				case 1:
1227 					picspecific_events = pic1_events;
1228 					break;
1229 				default:
1230 					picspecific_events = NULL;
1231 					break;
1232 			}
1233 			if (picspecific_events != NULL) {
1234 				n = find_gpcevent_f6m15_23(event,
1235 				    picspecific_events);
1236 			}
1237 		}
1238 		if (n == NULL) {
1239 			/*
1240 			 * Check if this is a case where the event was
1241 			 * specified directly by its event number instead of
1242 			 * its name string.
1243 			 */
1244 			if (ddi_strtol(event, NULL, 0, &event_num) != 0) {
1245 				return (CPC_INVALID_EVENT);
1246 			}
1247 
1248 			event_num = event_num & 0xFF;
1249 
1250 			/*
1251 			 * Search the event table to find out if the event
1252 			 * specified has an privilege requirements.  Currently
1253 			 * none of the pic-specific counters have any privilege
1254 			 * requirements.  Hence only the table
1255 			 * cmn_gpc_events_f6m15_23 is searched.
1256 			 */
1257 			for (m = cmn_gpc_events_f6m15_23;
1258 			    m->event_num != NT_END;
1259 			    m++) {
1260 				if (event_num == m->event_num) {
1261 					break;
1262 				}
1263 			}
1264 			if (m->event_num == NT_END) {
1265 				nt_raw.event_num = (uint8_t)event_num;
1266 				n = &nt_raw;
1267 			} else {
1268 				n = m;
1269 			}
1270 		}
1271 		conf.core_ctl = n->event_num; /* Event Select */
1272 	}
1273 
1274 
1275 	conf.core_picno = picnum;
1276 	conf.core_pictype = CORE_GPC;
1277 	conf.core_rawpic = preset & mask_gpc;
1278 
1279 	conf.core_pes = GPC_BASE_PES + picnum;
1280 	conf.core_pmc = GPC_BASE_PMC + picnum;
1281 
1282 	for (i = 0; i < nattrs; i++) {
1283 		if (strncmp(attrs[i].ka_name, "umask", 6) == 0) {
1284 			if (umask_known == 1) {
1285 				return (CPC_ATTRIBUTE_OUT_OF_RANGE);
1286 			}
1287 			if ((attrs[i].ka_val | CORE_UMASK_MASK) !=
1288 			    CORE_UMASK_MASK) {
1289 				return (CPC_ATTRIBUTE_OUT_OF_RANGE);
1290 			}
1291 			conf.core_ctl |= attrs[i].ka_val <<
1292 			    CORE_UMASK_SHIFT;
1293 		} else  if (strncmp(attrs[i].ka_name, "edge", 6) == 0) {
1294 			if (attrs[i].ka_val != 0)
1295 				conf.core_ctl |= CORE_EDGE;
1296 		} else if (strncmp(attrs[i].ka_name, "inv", 4) == 0) {
1297 			if (attrs[i].ka_val != 0)
1298 				conf.core_ctl |= CORE_INV;
1299 		} else if (strncmp(attrs[i].ka_name, "cmask", 6) == 0) {
1300 			if ((attrs[i].ka_val | CORE_CMASK_MASK) !=
1301 			    CORE_CMASK_MASK) {
1302 				return (CPC_ATTRIBUTE_OUT_OF_RANGE);
1303 			}
1304 			conf.core_ctl |= attrs[i].ka_val <<
1305 			    CORE_CMASK_SHIFT;
1306 		} else if (strncmp(attrs[i].ka_name, "anythr", 7) ==
1307 		    0) {
1308 			if (versionid < 3)
1309 				return (CPC_INVALID_ATTRIBUTE);
1310 			if (secpolicy_cpc_cpu(crgetcred()) != 0) {
1311 				return (CPC_ATTR_REQUIRES_PRIVILEGE);
1312 			}
1313 			if (attrs[i].ka_val != 0)
1314 				conf.core_ctl |= CORE_ANYTHR;
1315 		} else {
1316 			return (CPC_INVALID_ATTRIBUTE);
1317 		}
1318 	}
1319 
1320 	if (flags & CPC_COUNT_USER)
1321 		conf.core_ctl |= CORE_USR;
1322 	if (flags & CPC_COUNT_SYSTEM)
1323 		conf.core_ctl |= CORE_OS;
1324 	if (flags & CPC_OVF_NOTIFY_EMT)
1325 		conf.core_ctl |= CORE_INT;
1326 	conf.core_ctl |= CORE_EN;
1327 
1328 	if (versionid < 3) {
1329 		if (check_cpc_securitypolicy(&conf, n) != 0) {
1330 			return (CPC_ATTR_REQUIRES_PRIVILEGE);
1331 		}
1332 	}
1333 
1334 	*data = kmem_alloc(sizeof (core_pcbe_config_t), KM_SLEEP);
1335 	*((core_pcbe_config_t *)*data) = conf;
1336 
1337 	return (0);
1338 }
1339 
1340 static int
1341 configure_ffc(uint_t picnum, char *event, uint64_t preset, uint32_t flags,
1342     uint_t nattrs, kcpc_attr_t *attrs, void **data)
1343 {
1344 	core_pcbe_config_t	*conf;
1345 	uint_t			i;
1346 
1347 	if (picnum - num_gpc >= num_ffc) {
1348 		return (CPC_INVALID_PICNUM);
1349 	}
1350 
1351 	if (strcmp(ffc_names[picnum-num_gpc], event) != 0) {
1352 		return (CPC_INVALID_EVENT);
1353 	}
1354 
1355 	if ((versionid < 3) && (nattrs != 0)) {
1356 		return (CPC_INVALID_ATTRIBUTE);
1357 	}
1358 
1359 	conf = kmem_alloc(sizeof (core_pcbe_config_t), KM_SLEEP);
1360 	conf->core_ctl = 0;
1361 
1362 	for (i = 0; i < nattrs; i++) {
1363 		if (strncmp(attrs[i].ka_name, "anythr", 7) == 0) {
1364 			if (secpolicy_cpc_cpu(crgetcred()) != 0) {
1365 				return (CPC_ATTR_REQUIRES_PRIVILEGE);
1366 			}
1367 			if (attrs[i].ka_val != 0) {
1368 				conf->core_ctl |= CORE_FFC_ANYTHR;
1369 			}
1370 		} else {
1371 			kmem_free(conf, sizeof (core_pcbe_config_t));
1372 			return (CPC_INVALID_ATTRIBUTE);
1373 		}
1374 	}
1375 
1376 	conf->core_picno = picnum;
1377 	conf->core_pictype = CORE_FFC;
1378 	conf->core_rawpic = preset & mask_ffc;
1379 	conf->core_pmc = FFC_BASE_PMC + (picnum - num_gpc);
1380 
1381 	/* All fixed-function counters have the same control register */
1382 	conf->core_pes = PERF_FIXED_CTR_CTRL;
1383 
1384 	if (flags & CPC_COUNT_USER)
1385 		conf->core_ctl |= CORE_FFC_USR_EN;
1386 	if (flags & CPC_COUNT_SYSTEM)
1387 		conf->core_ctl |= CORE_FFC_OS_EN;
1388 	if (flags & CPC_OVF_NOTIFY_EMT)
1389 		conf->core_ctl |= CORE_FFC_PMI;
1390 
1391 	*data = conf;
1392 	return (0);
1393 }
1394 
1395 /*ARGSUSED*/
1396 static int
1397 core_pcbe_configure(uint_t picnum, char *event, uint64_t preset,
1398     uint32_t flags, uint_t nattrs, kcpc_attr_t *attrs, void **data,
1399     void *token)
1400 {
1401 	int			ret;
1402 	core_pcbe_config_t	*conf;
1403 
1404 	/*
1405 	 * If we've been handed an existing configuration, we need only preset
1406 	 * the counter value.
1407 	 */
1408 	if (*data != NULL) {
1409 		conf = *data;
1410 		ASSERT(conf->core_pictype == CORE_GPC ||
1411 		    conf->core_pictype == CORE_FFC);
1412 		if (conf->core_pictype == CORE_GPC)
1413 			conf->core_rawpic = preset & mask_gpc;
1414 		else /* CORE_FFC */
1415 			conf->core_rawpic = preset & mask_ffc;
1416 		return (0);
1417 	}
1418 
1419 	if (picnum >= total_pmc) {
1420 		return (CPC_INVALID_PICNUM);
1421 	}
1422 
1423 	if (picnum < num_gpc) {
1424 		ret = configure_gpc(picnum, event, preset, flags,
1425 		    nattrs, attrs, data);
1426 	} else {
1427 		ret = configure_ffc(picnum, event, preset, flags,
1428 		    nattrs, attrs, data);
1429 	}
1430 	return (ret);
1431 }
1432 
1433 static void
1434 core_pcbe_program(void *token)
1435 {
1436 	core_pcbe_config_t	*cfg;
1437 	uint64_t		perf_global_ctrl;
1438 	uint64_t		perf_fixed_ctr_ctrl;
1439 	uint64_t		curcr4;
1440 
1441 	core_pcbe_allstop();
1442 
1443 	curcr4 = getcr4();
1444 	if (kcpc_allow_nonpriv(token))
1445 		/* Allow RDPMC at any ring level */
1446 		setcr4(curcr4 | CR4_PCE);
1447 	else
1448 		/* Allow RDPMC only at ring 0 */
1449 		setcr4(curcr4 & ~CR4_PCE);
1450 
1451 	/* Clear any overflow indicators before programming the counters */
1452 	WRMSR(PERF_GLOBAL_OVF_CTRL, MASK_CONDCHGD_OVFBUFFER | control_mask);
1453 
1454 	cfg = NULL;
1455 	perf_global_ctrl = 0;
1456 	perf_fixed_ctr_ctrl = 0;
1457 	cfg = (core_pcbe_config_t *)kcpc_next_config(token, cfg, NULL);
1458 	while (cfg != NULL) {
1459 		ASSERT(cfg->core_pictype == CORE_GPC ||
1460 		    cfg->core_pictype == CORE_FFC);
1461 
1462 		if (cfg->core_pictype == CORE_GPC) {
1463 			/*
1464 			 * General-purpose counter registers have write
1465 			 * restrictions where only the lower 32-bits can be
1466 			 * written to.  The rest of the relevant bits are
1467 			 * written to by extension from bit 31 (all ZEROS if
1468 			 * bit-31 is ZERO and all ONE if bit-31 is ONE).  This
1469 			 * makes it possible to write to the counter register
1470 			 * only values that have all ONEs or all ZEROs in the
1471 			 * higher bits.
1472 			 */
1473 			if (((cfg->core_rawpic & BITS_EXTENDED_FROM_31) == 0) ||
1474 			    ((cfg->core_rawpic & BITS_EXTENDED_FROM_31) ==
1475 			    BITS_EXTENDED_FROM_31)) {
1476 				/*
1477 				 * Straighforward case where the higher bits
1478 				 * are all ZEROs or all ONEs.
1479 				 */
1480 				WRMSR(cfg->core_pmc,
1481 				    (cfg->core_rawpic & mask_gpc));
1482 			} else {
1483 				/*
1484 				 * The high order bits are not all the same.
1485 				 * We save what is currently in the registers
1486 				 * and do not write to it.  When we want to do
1487 				 * a read from this register later (in
1488 				 * core_pcbe_sample()), we subtract the value
1489 				 * we save here to get the actual event count.
1490 				 *
1491 				 * NOTE: As a result, we will not get overflow
1492 				 * interrupts as expected.
1493 				 */
1494 				RDMSR(cfg->core_pmc, cfg->core_rawpic);
1495 				cfg->core_rawpic = cfg->core_rawpic & mask_gpc;
1496 			}
1497 			WRMSR(cfg->core_pes, cfg->core_ctl);
1498 			perf_global_ctrl |= 1ull << cfg->core_picno;
1499 		} else {
1500 			/*
1501 			 * Unlike the general-purpose counters, all relevant
1502 			 * bits of fixed-function counters can be written to.
1503 			 */
1504 			WRMSR(cfg->core_pmc, cfg->core_rawpic & mask_ffc);
1505 
1506 			/*
1507 			 * Collect the control bits for all the
1508 			 * fixed-function counters and write it at one shot
1509 			 * later in this function
1510 			 */
1511 			perf_fixed_ctr_ctrl |= cfg->core_ctl <<
1512 			    ((cfg->core_picno - num_gpc) * CORE_FFC_ATTR_SIZE);
1513 			perf_global_ctrl |=
1514 			    1ull << (cfg->core_picno - num_gpc + 32);
1515 		}
1516 
1517 		cfg = (core_pcbe_config_t *)
1518 		    kcpc_next_config(token, cfg, NULL);
1519 	}
1520 
1521 	/* Enable all the counters */
1522 	WRMSR(PERF_FIXED_CTR_CTRL, perf_fixed_ctr_ctrl);
1523 	WRMSR(PERF_GLOBAL_CTRL, perf_global_ctrl);
1524 }
1525 
1526 static void
1527 core_pcbe_allstop(void)
1528 {
1529 	/* Disable all the counters together */
1530 	WRMSR(PERF_GLOBAL_CTRL, ALL_STOPPED);
1531 
1532 	setcr4(getcr4() & ~CR4_PCE);
1533 }
1534 
1535 static void
1536 core_pcbe_sample(void *token)
1537 {
1538 	uint64_t		*daddr;
1539 	uint64_t		curpic;
1540 	core_pcbe_config_t	*cfg;
1541 	uint64_t			counter_mask;
1542 
1543 	cfg = (core_pcbe_config_t *)kcpc_next_config(token, NULL, &daddr);
1544 	while (cfg != NULL) {
1545 		ASSERT(cfg->core_pictype == CORE_GPC ||
1546 		    cfg->core_pictype == CORE_FFC);
1547 
1548 		curpic = rdmsr(cfg->core_pmc);
1549 
1550 		DTRACE_PROBE4(core__pcbe__sample,
1551 		    uint64_t, cfg->core_pmc,
1552 		    uint64_t, curpic,
1553 		    uint64_t, cfg->core_rawpic,
1554 		    uint64_t, *daddr);
1555 
1556 		if (cfg->core_pictype == CORE_GPC) {
1557 			counter_mask = mask_gpc;
1558 		} else {
1559 			counter_mask = mask_ffc;
1560 		}
1561 		curpic = curpic & counter_mask;
1562 		if (curpic >= cfg->core_rawpic) {
1563 			*daddr += curpic - cfg->core_rawpic;
1564 		} else {
1565 			/* Counter overflowed since our last sample */
1566 			*daddr += counter_mask - (cfg->core_rawpic - curpic) +
1567 			    1;
1568 		}
1569 		cfg->core_rawpic = *daddr & counter_mask;
1570 
1571 		cfg =
1572 		    (core_pcbe_config_t *)kcpc_next_config(token, cfg, &daddr);
1573 	}
1574 }
1575 
1576 static void
1577 core_pcbe_free(void *config)
1578 {
1579 	kmem_free(config, sizeof (core_pcbe_config_t));
1580 }
1581 
1582 static struct modlpcbe core_modlpcbe = {
1583 	&mod_pcbeops,
1584 	"Core Performance Counters",
1585 	&core_pcbe_ops
1586 };
1587 
1588 static struct modlinkage core_modl = {
1589 	MODREV_1,
1590 	&core_modlpcbe,
1591 };
1592 
1593 int
1594 _init(void)
1595 {
1596 	if (core_pcbe_init() != 0) {
1597 		return (ENOTSUP);
1598 	}
1599 	return (mod_install(&core_modl));
1600 }
1601 
1602 int
1603 _fini(void)
1604 {
1605 	return (mod_remove(&core_modl));
1606 }
1607 
1608 int
1609 _info(struct modinfo *mi)
1610 {
1611 	return (mod_info(&core_modl, mi));
1612 }
1613