1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2019, Joyent, Inc.
25 */
26 /*
27 * Copyright (c) 2010, Intel Corporation.
28 * All rights reserved.
29 */
30
31 /*
32 * CPU Module Interface - hardware abstraction.
33 */
34
35 #ifdef __xpv
36 #include <sys/xpv_user.h>
37 #endif
38
39 #include <sys/types.h>
40 #include <sys/cpu_module.h>
41 #include <sys/kmem.h>
42 #include <sys/x86_archext.h>
43 #include <sys/cpuvar.h>
44 #include <sys/ksynch.h>
45 #include <sys/x_call.h>
46 #include <sys/pghw.h>
47 #include <sys/pci_cfgacc.h>
48 #include <sys/pci_cfgspace.h>
49 #include <sys/archsystm.h>
50 #include <sys/ontrap.h>
51 #include <sys/controlregs.h>
52 #include <sys/sunddi.h>
53 #include <sys/trap.h>
54 #include <sys/mca_x86.h>
55 #include <sys/processor.h>
56 #include <sys/cmn_err.h>
57 #include <sys/nvpair.h>
58 #include <sys/fm/util.h>
59 #include <sys/fm/protocol.h>
60 #include <sys/fm/smb/fmsmb.h>
61 #include <sys/cpu_module_impl.h>
62
63 /*
64 * Variable which determines if the SMBIOS supports x86 generic topology; or
65 * if legacy topolgy enumeration will occur.
66 */
67 extern int x86gentopo_legacy;
68
69 /*
70 * Outside of this file consumers use the opaque cmi_hdl_t. This
71 * definition is duplicated in the generic_cpu mdb module, so keep
72 * them in-sync when making changes.
73 */
74 typedef struct cmi_hdl_impl {
75 enum cmi_hdl_class cmih_class; /* Handle nature */
76 const struct cmi_hdl_ops *cmih_ops; /* Operations vector */
77 uint_t cmih_chipid; /* Chipid of cpu resource */
78 uint_t cmih_procnodeid; /* Nodeid of cpu resource */
79 uint_t cmih_coreid; /* Core within die */
80 uint_t cmih_strandid; /* Thread within core */
81 uint_t cmih_procnodes_per_pkg; /* Nodes in a processor */
82 boolean_t cmih_mstrand; /* cores are multithreaded */
83 volatile uint32_t *cmih_refcntp; /* Reference count pointer */
84 uint64_t cmih_msrsrc; /* MSR data source flags */
85 void *cmih_hdlpriv; /* cmi_hw.c private data */
86 void *cmih_spec; /* cmi_hdl_{set,get}_specific */
87 void *cmih_cmi; /* cpu mod control structure */
88 void *cmih_cmidata; /* cpu mod private data */
89 const struct cmi_mc_ops *cmih_mcops; /* Memory-controller ops */
90 void *cmih_mcdata; /* Memory-controller data */
91 uint64_t cmih_flags; /* See CMIH_F_* below */
92 uint16_t cmih_smbiosid; /* SMBIOS Type 4 struct ID */
93 uint_t cmih_smb_chipid; /* SMBIOS factored chipid */
94 nvlist_t *cmih_smb_bboard; /* SMBIOS bboard nvlist */
95 } cmi_hdl_impl_t;
96
97 #define IMPLHDL(ophdl) ((cmi_hdl_impl_t *)ophdl)
98 #define HDLOPS(hdl) ((hdl)->cmih_ops)
99
100 #define CMIH_F_INJACTV 0x1ULL
101 #define CMIH_F_DEAD 0x2ULL
102
103 /*
104 * Ops structure for handle operations.
105 */
106 struct cmi_hdl_ops {
107 /*
108 * These ops are required in an implementation.
109 */
110 uint_t (*cmio_vendor)(cmi_hdl_impl_t *);
111 const char *(*cmio_vendorstr)(cmi_hdl_impl_t *);
112 uint_t (*cmio_family)(cmi_hdl_impl_t *);
113 uint_t (*cmio_model)(cmi_hdl_impl_t *);
114 uint_t (*cmio_stepping)(cmi_hdl_impl_t *);
115 uint_t (*cmio_chipid)(cmi_hdl_impl_t *);
116 uint_t (*cmio_procnodeid)(cmi_hdl_impl_t *);
117 uint_t (*cmio_coreid)(cmi_hdl_impl_t *);
118 uint_t (*cmio_strandid)(cmi_hdl_impl_t *);
119 uint_t (*cmio_procnodes_per_pkg)(cmi_hdl_impl_t *);
120 uint_t (*cmio_strand_apicid)(cmi_hdl_impl_t *);
121 uint32_t (*cmio_chiprev)(cmi_hdl_impl_t *);
122 const char *(*cmio_chiprevstr)(cmi_hdl_impl_t *);
123 uint32_t (*cmio_getsockettype)(cmi_hdl_impl_t *);
124 const char *(*cmio_getsocketstr)(cmi_hdl_impl_t *);
125 uint_t (*cmio_chipsig)(cmi_hdl_impl_t *);
126
127 id_t (*cmio_logical_id)(cmi_hdl_impl_t *);
128 /*
129 * These ops are optional in an implementation.
130 */
131 ulong_t (*cmio_getcr4)(cmi_hdl_impl_t *);
132 void (*cmio_setcr4)(cmi_hdl_impl_t *, ulong_t);
133 cmi_errno_t (*cmio_rdmsr)(cmi_hdl_impl_t *, uint_t, uint64_t *);
134 cmi_errno_t (*cmio_wrmsr)(cmi_hdl_impl_t *, uint_t, uint64_t);
135 cmi_errno_t (*cmio_msrinterpose)(cmi_hdl_impl_t *, uint_t, uint64_t);
136 void (*cmio_int)(cmi_hdl_impl_t *, int);
137 int (*cmio_online)(cmi_hdl_impl_t *, int, int *);
138 uint16_t (*cmio_smbiosid) (cmi_hdl_impl_t *);
139 uint_t (*cmio_smb_chipid)(cmi_hdl_impl_t *);
140 nvlist_t *(*cmio_smb_bboard)(cmi_hdl_impl_t *);
141 };
142
143 static const struct cmi_hdl_ops cmi_hdl_ops;
144
145 /*
146 * Handles are looked up from contexts such as polling, injection etc
147 * where the context is reasonably well defined (although a poller could
148 * interrupt any old thread holding any old lock). They are also looked
149 * up by machine check handlers, which may strike at inconvenient times
150 * such as during handle initialization or destruction or during handle
151 * lookup (which the #MC handler itself will also have to perform).
152 *
153 * So keeping handles in a linked list makes locking difficult when we
154 * consider #MC handlers. Our solution is to have a look-up table indexed
155 * by that which uniquely identifies a handle - chip/core/strand id -
156 * with each entry a structure including a pointer to a handle
157 * structure for the resource, and a reference count for the handle.
158 * Reference counts are modified atomically. The public cmi_hdl_hold
159 * always succeeds because this can only be used after handle creation
160 * and before the call to destruct, so the hold count is already at least one.
161 * In other functions that lookup a handle (cmi_hdl_lookup, cmi_hdl_any)
162 * we must be certain that the count has not already decrmented to zero
163 * before applying our hold.
164 *
165 * The table is an array of maximum number of chips defined in
166 * CMI_CHIPID_ARR_SZ indexed by the chip id. If the chip is not present, the
167 * entry is NULL. Each entry is a pointer to another array which contains a
168 * list of all strands of the chip. This first level table is allocated when
169 * first we want to populate an entry. The size of the latter (per chip) table
170 * is CMI_MAX_STRANDS_PER_CHIP and it is populated when one of its cpus starts.
171 *
172 * Ideally we should only allocate to the actual number of chips, cores per
173 * chip and strand per core. The number of chips is not available until all
174 * of them are passed. The number of cores and strands are partially available.
175 * For now we stick with the above approach.
176 */
177 #define CMI_MAX_CHIPID_NBITS 6 /* max chipid of 63 */
178 #define CMI_MAX_CORES_PER_CHIP_NBITS 4 /* 16 cores per chip max */
179 #define CMI_MAX_STRANDS_PER_CORE_NBITS 3 /* 8 strands per core max */
180
181 #define CMI_MAX_CHIPID ((1 << (CMI_MAX_CHIPID_NBITS)) - 1)
182 #define CMI_MAX_CORES_PER_CHIP(cbits) (1 << (cbits))
183 #define CMI_MAX_COREID(cbits) ((1 << (cbits)) - 1)
184 #define CMI_MAX_STRANDS_PER_CORE(sbits) (1 << (sbits))
185 #define CMI_MAX_STRANDID(sbits) ((1 << (sbits)) - 1)
186 #define CMI_MAX_STRANDS_PER_CHIP(cbits, sbits) \
187 (CMI_MAX_CORES_PER_CHIP(cbits) * CMI_MAX_STRANDS_PER_CORE(sbits))
188
189 #define CMI_CHIPID_ARR_SZ (1 << CMI_MAX_CHIPID_NBITS)
190
191 typedef struct cmi_hdl_ent {
192 volatile uint32_t cmae_refcnt;
193 cmi_hdl_impl_t *cmae_hdlp;
194 } cmi_hdl_ent_t;
195
196 static cmi_hdl_ent_t *cmi_chip_tab[CMI_CHIPID_ARR_SZ];
197
198 /*
199 * Default values for the number of core and strand bits.
200 */
201 uint_t cmi_core_nbits = CMI_MAX_CORES_PER_CHIP_NBITS;
202 uint_t cmi_strand_nbits = CMI_MAX_STRANDS_PER_CORE_NBITS;
203 static int cmi_ext_topo_check = 0;
204
205 /*
206 * Controls where we will source PCI config space data.
207 */
208 #define CMI_PCICFG_FLAG_RD_HWOK 0x0001
209 #define CMI_PCICFG_FLAG_RD_INTERPOSEOK 0X0002
210 #define CMI_PCICFG_FLAG_WR_HWOK 0x0004
211 #define CMI_PCICFG_FLAG_WR_INTERPOSEOK 0X0008
212
213 static uint64_t cmi_pcicfg_flags =
214 CMI_PCICFG_FLAG_RD_HWOK | CMI_PCICFG_FLAG_RD_INTERPOSEOK |
215 CMI_PCICFG_FLAG_WR_HWOK | CMI_PCICFG_FLAG_WR_INTERPOSEOK;
216
217 /*
218 * The flags for individual cpus are kept in their per-cpu handle cmih_msrsrc
219 */
220 #define CMI_MSR_FLAG_RD_HWOK 0x0001
221 #define CMI_MSR_FLAG_RD_INTERPOSEOK 0x0002
222 #define CMI_MSR_FLAG_WR_HWOK 0x0004
223 #define CMI_MSR_FLAG_WR_INTERPOSEOK 0x0008
224
225 int cmi_call_func_ntv_tries = 3;
226
227 static cmi_errno_t
call_func_ntv(int cpuid,xc_func_t func,xc_arg_t arg1,xc_arg_t arg2)228 call_func_ntv(int cpuid, xc_func_t func, xc_arg_t arg1, xc_arg_t arg2)
229 {
230 cmi_errno_t rc = -1;
231 int i;
232
233 kpreempt_disable();
234
235 if (CPU->cpu_id == cpuid) {
236 (*func)(arg1, arg2, (xc_arg_t)&rc);
237 } else {
238 /*
239 * This should not happen for a #MC trap or a poll, so
240 * this is likely an error injection or similar.
241 * We will try to cross call with xc_trycall - we
242 * can't guarantee success with xc_call because
243 * the interrupt code in the case of a #MC may
244 * already hold the xc mutex.
245 */
246 for (i = 0; i < cmi_call_func_ntv_tries; i++) {
247 cpuset_t cpus;
248
249 CPUSET_ONLY(cpus, cpuid);
250 xc_priority(arg1, arg2, (xc_arg_t)&rc,
251 CPUSET2BV(cpus), func);
252 if (rc != -1)
253 break;
254
255 DELAY(1);
256 }
257 }
258
259 kpreempt_enable();
260
261 return (rc != -1 ? rc : CMIERR_DEADLOCK);
262 }
263
264 static uint64_t injcnt;
265
266 void
cmi_hdl_inj_begin(cmi_hdl_t ophdl)267 cmi_hdl_inj_begin(cmi_hdl_t ophdl)
268 {
269 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
270
271 if (hdl != NULL)
272 hdl->cmih_flags |= CMIH_F_INJACTV;
273 if (injcnt++ == 0) {
274 cmn_err(CE_NOTE, "Hardware error injection/simulation "
275 "activity noted");
276 }
277 }
278
279 void
cmi_hdl_inj_end(cmi_hdl_t ophdl)280 cmi_hdl_inj_end(cmi_hdl_t ophdl)
281 {
282 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
283
284 ASSERT(hdl == NULL || hdl->cmih_flags & CMIH_F_INJACTV);
285 if (hdl != NULL)
286 hdl->cmih_flags &= ~CMIH_F_INJACTV;
287 }
288
289 boolean_t
cmi_inj_tainted(void)290 cmi_inj_tainted(void)
291 {
292 return (injcnt != 0 ? B_TRUE : B_FALSE);
293 }
294
295 /*
296 * =======================================================
297 * | MSR Interposition |
298 * | ----------------- |
299 * | |
300 * -------------------------------------------------------
301 */
302
303 #define CMI_MSRI_HASHSZ 16
304 #define CMI_MSRI_HASHIDX(hdl, msr) \
305 ((((uintptr_t)(hdl) >> 3) + (msr)) % (CMI_MSRI_HASHSZ - 1))
306
307 struct cmi_msri_bkt {
308 kmutex_t msrib_lock;
309 struct cmi_msri_hashent *msrib_head;
310 };
311
312 struct cmi_msri_hashent {
313 struct cmi_msri_hashent *msrie_next;
314 struct cmi_msri_hashent *msrie_prev;
315 cmi_hdl_impl_t *msrie_hdl;
316 uint_t msrie_msrnum;
317 uint64_t msrie_msrval;
318 };
319
320 #define CMI_MSRI_MATCH(ent, hdl, req_msr) \
321 ((ent)->msrie_hdl == (hdl) && (ent)->msrie_msrnum == (req_msr))
322
323 static struct cmi_msri_bkt msrihash[CMI_MSRI_HASHSZ];
324
325 static void
msri_addent(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t val)326 msri_addent(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
327 {
328 int idx = CMI_MSRI_HASHIDX(hdl, msr);
329 struct cmi_msri_bkt *hbp = &msrihash[idx];
330 struct cmi_msri_hashent *hep;
331
332 mutex_enter(&hbp->msrib_lock);
333
334 for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
335 if (CMI_MSRI_MATCH(hep, hdl, msr))
336 break;
337 }
338
339 if (hep != NULL) {
340 hep->msrie_msrval = val;
341 } else {
342 hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
343 hep->msrie_hdl = hdl;
344 hep->msrie_msrnum = msr;
345 hep->msrie_msrval = val;
346
347 if (hbp->msrib_head != NULL)
348 hbp->msrib_head->msrie_prev = hep;
349 hep->msrie_next = hbp->msrib_head;
350 hep->msrie_prev = NULL;
351 hbp->msrib_head = hep;
352 }
353
354 mutex_exit(&hbp->msrib_lock);
355 }
356
357 /*
358 * Look for a match for the given hanlde and msr. Return 1 with valp
359 * filled if a match is found, otherwise return 0 with valp untouched.
360 */
361 static int
msri_lookup(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t * valp)362 msri_lookup(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
363 {
364 int idx = CMI_MSRI_HASHIDX(hdl, msr);
365 struct cmi_msri_bkt *hbp = &msrihash[idx];
366 struct cmi_msri_hashent *hep;
367
368 /*
369 * This function is called during #MC trap handling, so we should
370 * consider the possibility that the hash mutex is held by the
371 * interrupted thread. This should not happen because interposition
372 * is an artificial injection mechanism and the #MC is requested
373 * after adding entries, but just in case of a real #MC at an
374 * unlucky moment we'll use mutex_tryenter here.
375 */
376 if (!mutex_tryenter(&hbp->msrib_lock))
377 return (0);
378
379 for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
380 if (CMI_MSRI_MATCH(hep, hdl, msr)) {
381 *valp = hep->msrie_msrval;
382 break;
383 }
384 }
385
386 mutex_exit(&hbp->msrib_lock);
387
388 return (hep != NULL);
389 }
390
391 /*
392 * Remove any interposed value that matches.
393 */
394 static void
msri_rment(cmi_hdl_impl_t * hdl,uint_t msr)395 msri_rment(cmi_hdl_impl_t *hdl, uint_t msr)
396 {
397
398 int idx = CMI_MSRI_HASHIDX(hdl, msr);
399 struct cmi_msri_bkt *hbp = &msrihash[idx];
400 struct cmi_msri_hashent *hep;
401
402 if (!mutex_tryenter(&hbp->msrib_lock))
403 return;
404
405 for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
406 if (CMI_MSRI_MATCH(hep, hdl, msr)) {
407 if (hep->msrie_prev != NULL)
408 hep->msrie_prev->msrie_next = hep->msrie_next;
409
410 if (hep->msrie_next != NULL)
411 hep->msrie_next->msrie_prev = hep->msrie_prev;
412
413 if (hbp->msrib_head == hep)
414 hbp->msrib_head = hep->msrie_next;
415
416 kmem_free(hep, sizeof (*hep));
417 break;
418 }
419 }
420
421 mutex_exit(&hbp->msrib_lock);
422 }
423
424 /*
425 * =======================================================
426 * | PCI Config Space Interposition |
427 * | ------------------------------ |
428 * | |
429 * -------------------------------------------------------
430 */
431
432 /*
433 * Hash for interposed PCI config space values. We lookup on bus/dev/fun/offset
434 * and then record whether the value stashed was made with a byte, word or
435 * doubleword access; we will only return a hit for an access of the
436 * same size. If you access say a 32-bit register using byte accesses
437 * and then attempt to read the full 32-bit value back you will not obtain
438 * any sort of merged result - you get a lookup miss.
439 */
440
441 #define CMI_PCII_HASHSZ 16
442 #define CMI_PCII_HASHIDX(b, d, f, o) \
443 (((b) + (d) + (f) + (o)) % (CMI_PCII_HASHSZ - 1))
444
445 struct cmi_pcii_bkt {
446 kmutex_t pciib_lock;
447 struct cmi_pcii_hashent *pciib_head;
448 };
449
450 struct cmi_pcii_hashent {
451 struct cmi_pcii_hashent *pcii_next;
452 struct cmi_pcii_hashent *pcii_prev;
453 int pcii_bus;
454 int pcii_dev;
455 int pcii_func;
456 int pcii_reg;
457 int pcii_asize;
458 uint32_t pcii_val;
459 };
460
461 #define CMI_PCII_MATCH(ent, b, d, f, r, asz) \
462 ((ent)->pcii_bus == (b) && (ent)->pcii_dev == (d) && \
463 (ent)->pcii_func == (f) && (ent)->pcii_reg == (r) && \
464 (ent)->pcii_asize == (asz))
465
466 static struct cmi_pcii_bkt pciihash[CMI_PCII_HASHSZ];
467
468
469 /*
470 * Add a new entry to the PCI interpose hash, overwriting any existing
471 * entry that is found.
472 */
473 static void
pcii_addent(int bus,int dev,int func,int reg,uint32_t val,int asz)474 pcii_addent(int bus, int dev, int func, int reg, uint32_t val, int asz)
475 {
476 int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
477 struct cmi_pcii_bkt *hbp = &pciihash[idx];
478 struct cmi_pcii_hashent *hep;
479
480 cmi_hdl_inj_begin(NULL);
481
482 mutex_enter(&hbp->pciib_lock);
483
484 for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
485 if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz))
486 break;
487 }
488
489 if (hep != NULL) {
490 hep->pcii_val = val;
491 } else {
492 hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
493 hep->pcii_bus = bus;
494 hep->pcii_dev = dev;
495 hep->pcii_func = func;
496 hep->pcii_reg = reg;
497 hep->pcii_asize = asz;
498 hep->pcii_val = val;
499
500 if (hbp->pciib_head != NULL)
501 hbp->pciib_head->pcii_prev = hep;
502 hep->pcii_next = hbp->pciib_head;
503 hep->pcii_prev = NULL;
504 hbp->pciib_head = hep;
505 }
506
507 mutex_exit(&hbp->pciib_lock);
508
509 cmi_hdl_inj_end(NULL);
510 }
511
512 /*
513 * Look for a match for the given bus/dev/func/reg; return 1 with valp
514 * filled if a match is found, otherwise return 0 with valp untouched.
515 */
516 static int
pcii_lookup(int bus,int dev,int func,int reg,int asz,uint32_t * valp)517 pcii_lookup(int bus, int dev, int func, int reg, int asz, uint32_t *valp)
518 {
519 int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
520 struct cmi_pcii_bkt *hbp = &pciihash[idx];
521 struct cmi_pcii_hashent *hep;
522
523 if (!mutex_tryenter(&hbp->pciib_lock))
524 return (0);
525
526 for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
527 if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
528 *valp = hep->pcii_val;
529 break;
530 }
531 }
532
533 mutex_exit(&hbp->pciib_lock);
534
535 return (hep != NULL);
536 }
537
538 static void
pcii_rment(int bus,int dev,int func,int reg,int asz)539 pcii_rment(int bus, int dev, int func, int reg, int asz)
540 {
541 int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
542 struct cmi_pcii_bkt *hbp = &pciihash[idx];
543 struct cmi_pcii_hashent *hep;
544
545 mutex_enter(&hbp->pciib_lock);
546
547 for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
548 if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
549 if (hep->pcii_prev != NULL)
550 hep->pcii_prev->pcii_next = hep->pcii_next;
551
552 if (hep->pcii_next != NULL)
553 hep->pcii_next->pcii_prev = hep->pcii_prev;
554
555 if (hbp->pciib_head == hep)
556 hbp->pciib_head = hep->pcii_next;
557
558 kmem_free(hep, sizeof (*hep));
559 break;
560 }
561 }
562
563 mutex_exit(&hbp->pciib_lock);
564 }
565
566 #ifndef __xpv
567
568 /*
569 * =======================================================
570 * | Native methods |
571 * | -------------- |
572 * | |
573 * | These are used when we are running native on bare- |
574 * | metal, or simply don't know any better. |
575 * ---------------------------------------------------------
576 */
577
578 #define HDLPRIV(hdl) ((cpu_t *)(hdl)->cmih_hdlpriv)
579
580 static uint_t
ntv_vendor(cmi_hdl_impl_t * hdl)581 ntv_vendor(cmi_hdl_impl_t *hdl)
582 {
583 return (cpuid_getvendor(HDLPRIV(hdl)));
584 }
585
586 static const char *
ntv_vendorstr(cmi_hdl_impl_t * hdl)587 ntv_vendorstr(cmi_hdl_impl_t *hdl)
588 {
589 return (cpuid_getvendorstr(HDLPRIV(hdl)));
590 }
591
592 static uint_t
ntv_family(cmi_hdl_impl_t * hdl)593 ntv_family(cmi_hdl_impl_t *hdl)
594 {
595 return (cpuid_getfamily(HDLPRIV(hdl)));
596 }
597
598 static uint_t
ntv_model(cmi_hdl_impl_t * hdl)599 ntv_model(cmi_hdl_impl_t *hdl)
600 {
601 return (cpuid_getmodel(HDLPRIV(hdl)));
602 }
603
604 static uint_t
ntv_stepping(cmi_hdl_impl_t * hdl)605 ntv_stepping(cmi_hdl_impl_t *hdl)
606 {
607 return (cpuid_getstep(HDLPRIV(hdl)));
608 }
609
610 static uint_t
ntv_chipid(cmi_hdl_impl_t * hdl)611 ntv_chipid(cmi_hdl_impl_t *hdl)
612 {
613 return (hdl->cmih_chipid);
614
615 }
616
617 static uint_t
ntv_procnodeid(cmi_hdl_impl_t * hdl)618 ntv_procnodeid(cmi_hdl_impl_t *hdl)
619 {
620 return (hdl->cmih_procnodeid);
621 }
622
623 static uint_t
ntv_procnodes_per_pkg(cmi_hdl_impl_t * hdl)624 ntv_procnodes_per_pkg(cmi_hdl_impl_t *hdl)
625 {
626 return (hdl->cmih_procnodes_per_pkg);
627 }
628
629 static uint_t
ntv_coreid(cmi_hdl_impl_t * hdl)630 ntv_coreid(cmi_hdl_impl_t *hdl)
631 {
632 return (hdl->cmih_coreid);
633 }
634
635 static uint_t
ntv_strandid(cmi_hdl_impl_t * hdl)636 ntv_strandid(cmi_hdl_impl_t *hdl)
637 {
638 return (hdl->cmih_strandid);
639 }
640
641 static uint_t
ntv_strand_apicid(cmi_hdl_impl_t * hdl)642 ntv_strand_apicid(cmi_hdl_impl_t *hdl)
643 {
644 return (cpuid_get_apicid(HDLPRIV(hdl)));
645 }
646
647 static uint16_t
ntv_smbiosid(cmi_hdl_impl_t * hdl)648 ntv_smbiosid(cmi_hdl_impl_t *hdl)
649 {
650 return (hdl->cmih_smbiosid);
651 }
652
653 static uint_t
ntv_smb_chipid(cmi_hdl_impl_t * hdl)654 ntv_smb_chipid(cmi_hdl_impl_t *hdl)
655 {
656 return (hdl->cmih_smb_chipid);
657 }
658
659 static nvlist_t *
ntv_smb_bboard(cmi_hdl_impl_t * hdl)660 ntv_smb_bboard(cmi_hdl_impl_t *hdl)
661 {
662 return (hdl->cmih_smb_bboard);
663 }
664
665 static uint32_t
ntv_chiprev(cmi_hdl_impl_t * hdl)666 ntv_chiprev(cmi_hdl_impl_t *hdl)
667 {
668 return (cpuid_getchiprev(HDLPRIV(hdl)));
669 }
670
671 static const char *
ntv_chiprevstr(cmi_hdl_impl_t * hdl)672 ntv_chiprevstr(cmi_hdl_impl_t *hdl)
673 {
674 return (cpuid_getchiprevstr(HDLPRIV(hdl)));
675 }
676
677 static uint32_t
ntv_getsockettype(cmi_hdl_impl_t * hdl)678 ntv_getsockettype(cmi_hdl_impl_t *hdl)
679 {
680 return (cpuid_getsockettype(HDLPRIV(hdl)));
681 }
682
683 static const char *
ntv_getsocketstr(cmi_hdl_impl_t * hdl)684 ntv_getsocketstr(cmi_hdl_impl_t *hdl)
685 {
686 return (cpuid_getsocketstr(HDLPRIV(hdl)));
687 }
688
689 static uint_t
ntv_chipsig(cmi_hdl_impl_t * hdl)690 ntv_chipsig(cmi_hdl_impl_t *hdl)
691 {
692 return (cpuid_getsig(HDLPRIV(hdl)));
693 }
694
695 static id_t
ntv_logical_id(cmi_hdl_impl_t * hdl)696 ntv_logical_id(cmi_hdl_impl_t *hdl)
697 {
698 return (HDLPRIV(hdl)->cpu_id);
699 }
700
701 /*ARGSUSED*/
702 static int
ntv_getcr4_xc(xc_arg_t arg1,xc_arg_t arg2,xc_arg_t arg3)703 ntv_getcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
704 {
705 ulong_t *dest = (ulong_t *)arg1;
706 cmi_errno_t *rcp = (cmi_errno_t *)arg3;
707
708 *dest = getcr4();
709 *rcp = CMI_SUCCESS;
710
711 return (0);
712 }
713
714 static ulong_t
ntv_getcr4(cmi_hdl_impl_t * hdl)715 ntv_getcr4(cmi_hdl_impl_t *hdl)
716 {
717 cpu_t *cp = HDLPRIV(hdl);
718 ulong_t val;
719
720 (void) call_func_ntv(cp->cpu_id, ntv_getcr4_xc, (xc_arg_t)&val, 0);
721
722 return (val);
723 }
724
725 /*ARGSUSED*/
726 static int
ntv_setcr4_xc(xc_arg_t arg1,xc_arg_t arg2,xc_arg_t arg3)727 ntv_setcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
728 {
729 ulong_t val = (ulong_t)arg1;
730 cmi_errno_t *rcp = (cmi_errno_t *)arg3;
731
732 setcr4(val);
733 *rcp = CMI_SUCCESS;
734
735 return (0);
736 }
737
738 static void
ntv_setcr4(cmi_hdl_impl_t * hdl,ulong_t val)739 ntv_setcr4(cmi_hdl_impl_t *hdl, ulong_t val)
740 {
741 cpu_t *cp = HDLPRIV(hdl);
742
743 (void) call_func_ntv(cp->cpu_id, ntv_setcr4_xc, (xc_arg_t)val, 0);
744 }
745
746 volatile uint32_t cmi_trapped_rdmsr;
747
748 /*ARGSUSED*/
749 static int
ntv_rdmsr_xc(xc_arg_t arg1,xc_arg_t arg2,xc_arg_t arg3)750 ntv_rdmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
751 {
752 uint_t msr = (uint_t)arg1;
753 uint64_t *valp = (uint64_t *)arg2;
754 cmi_errno_t *rcp = (cmi_errno_t *)arg3;
755
756 on_trap_data_t otd;
757
758 if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
759 if (checked_rdmsr(msr, valp) == 0)
760 *rcp = CMI_SUCCESS;
761 else
762 *rcp = CMIERR_NOTSUP;
763 } else {
764 *rcp = CMIERR_MSRGPF;
765 atomic_inc_32(&cmi_trapped_rdmsr);
766 }
767 no_trap();
768
769 return (0);
770 }
771
772 static cmi_errno_t
ntv_rdmsr(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t * valp)773 ntv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
774 {
775 cpu_t *cp = HDLPRIV(hdl);
776
777 if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_HWOK))
778 return (CMIERR_INTERPOSE);
779
780 return (call_func_ntv(cp->cpu_id, ntv_rdmsr_xc,
781 (xc_arg_t)msr, (xc_arg_t)valp));
782 }
783
784 volatile uint32_t cmi_trapped_wrmsr;
785
786 /*ARGSUSED*/
787 static int
ntv_wrmsr_xc(xc_arg_t arg1,xc_arg_t arg2,xc_arg_t arg3)788 ntv_wrmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
789 {
790 uint_t msr = (uint_t)arg1;
791 uint64_t val = *((uint64_t *)arg2);
792 cmi_errno_t *rcp = (cmi_errno_t *)arg3;
793 on_trap_data_t otd;
794
795 if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
796 if (checked_wrmsr(msr, val) == 0)
797 *rcp = CMI_SUCCESS;
798 else
799 *rcp = CMIERR_NOTSUP;
800 } else {
801 *rcp = CMIERR_MSRGPF;
802 atomic_inc_32(&cmi_trapped_wrmsr);
803 }
804 no_trap();
805
806 return (0);
807
808 }
809
810 static cmi_errno_t
ntv_wrmsr(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t val)811 ntv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
812 {
813 cpu_t *cp = HDLPRIV(hdl);
814
815 if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_WR_HWOK))
816 return (CMI_SUCCESS);
817
818 return (call_func_ntv(cp->cpu_id, ntv_wrmsr_xc,
819 (xc_arg_t)msr, (xc_arg_t)&val));
820 }
821
822 static cmi_errno_t
ntv_msrinterpose(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t val)823 ntv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
824 {
825 msri_addent(hdl, msr, val);
826 return (CMI_SUCCESS);
827 }
828
829 /*ARGSUSED*/
830 static int
ntv_int_xc(xc_arg_t arg1,xc_arg_t arg2,xc_arg_t arg3)831 ntv_int_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
832 {
833 cmi_errno_t *rcp = (cmi_errno_t *)arg3;
834 int int_no = (int)arg1;
835
836 if (int_no == T_MCE)
837 int18();
838 else
839 int_cmci();
840 *rcp = CMI_SUCCESS;
841
842 return (0);
843 }
844
845 static void
ntv_int(cmi_hdl_impl_t * hdl,int int_no)846 ntv_int(cmi_hdl_impl_t *hdl, int int_no)
847 {
848 cpu_t *cp = HDLPRIV(hdl);
849
850 (void) call_func_ntv(cp->cpu_id, ntv_int_xc, (xc_arg_t)int_no, 0);
851 }
852
853 static int
ntv_online(cmi_hdl_impl_t * hdl,int new_status,int * old_status)854 ntv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status)
855 {
856 int rc;
857 processorid_t cpuid = HDLPRIV(hdl)->cpu_id;
858
859 while (mutex_tryenter(&cpu_lock) == 0) {
860 if (hdl->cmih_flags & CMIH_F_DEAD)
861 return (EBUSY);
862 delay(1);
863 }
864 rc = p_online_internal_locked(cpuid, new_status, old_status);
865 mutex_exit(&cpu_lock);
866
867 return (rc);
868 }
869
870 #else /* __xpv */
871
872 /*
873 * =======================================================
874 * | xVM dom0 methods |
875 * | ---------------- |
876 * | |
877 * | These are used when we are running as dom0 in |
878 * | a Solaris xVM context. |
879 * ---------------------------------------------------------
880 */
881
882 #define HDLPRIV(hdl) ((xen_mc_lcpu_cookie_t)(hdl)->cmih_hdlpriv)
883
884 extern uint_t _cpuid_vendorstr_to_vendorcode(char *);
885
886
887 static uint_t
xpv_vendor(cmi_hdl_impl_t * hdl)888 xpv_vendor(cmi_hdl_impl_t *hdl)
889 {
890 return (_cpuid_vendorstr_to_vendorcode((char *)xen_physcpu_vendorstr(
891 HDLPRIV(hdl))));
892 }
893
894 static const char *
xpv_vendorstr(cmi_hdl_impl_t * hdl)895 xpv_vendorstr(cmi_hdl_impl_t *hdl)
896 {
897 return (xen_physcpu_vendorstr(HDLPRIV(hdl)));
898 }
899
900 static uint_t
xpv_family(cmi_hdl_impl_t * hdl)901 xpv_family(cmi_hdl_impl_t *hdl)
902 {
903 return (xen_physcpu_family(HDLPRIV(hdl)));
904 }
905
906 static uint_t
xpv_model(cmi_hdl_impl_t * hdl)907 xpv_model(cmi_hdl_impl_t *hdl)
908 {
909 return (xen_physcpu_model(HDLPRIV(hdl)));
910 }
911
912 static uint_t
xpv_stepping(cmi_hdl_impl_t * hdl)913 xpv_stepping(cmi_hdl_impl_t *hdl)
914 {
915 return (xen_physcpu_stepping(HDLPRIV(hdl)));
916 }
917
918 static uint_t
xpv_chipid(cmi_hdl_impl_t * hdl)919 xpv_chipid(cmi_hdl_impl_t *hdl)
920 {
921 return (hdl->cmih_chipid);
922 }
923
924 static uint_t
xpv_procnodeid(cmi_hdl_impl_t * hdl)925 xpv_procnodeid(cmi_hdl_impl_t *hdl)
926 {
927 return (hdl->cmih_procnodeid);
928 }
929
930 static uint_t
xpv_procnodes_per_pkg(cmi_hdl_impl_t * hdl)931 xpv_procnodes_per_pkg(cmi_hdl_impl_t *hdl)
932 {
933 return (hdl->cmih_procnodes_per_pkg);
934 }
935
936 static uint_t
xpv_coreid(cmi_hdl_impl_t * hdl)937 xpv_coreid(cmi_hdl_impl_t *hdl)
938 {
939 return (hdl->cmih_coreid);
940 }
941
942 static uint_t
xpv_strandid(cmi_hdl_impl_t * hdl)943 xpv_strandid(cmi_hdl_impl_t *hdl)
944 {
945 return (hdl->cmih_strandid);
946 }
947
948 static uint_t
xpv_strand_apicid(cmi_hdl_impl_t * hdl)949 xpv_strand_apicid(cmi_hdl_impl_t *hdl)
950 {
951 return (xen_physcpu_initial_apicid(HDLPRIV(hdl)));
952 }
953
954 static uint16_t
xpv_smbiosid(cmi_hdl_impl_t * hdl)955 xpv_smbiosid(cmi_hdl_impl_t *hdl)
956 {
957 return (hdl->cmih_smbiosid);
958 }
959
960 static uint_t
xpv_smb_chipid(cmi_hdl_impl_t * hdl)961 xpv_smb_chipid(cmi_hdl_impl_t *hdl)
962 {
963 return (hdl->cmih_smb_chipid);
964 }
965
966 static nvlist_t *
xpv_smb_bboard(cmi_hdl_impl_t * hdl)967 xpv_smb_bboard(cmi_hdl_impl_t *hdl)
968 {
969 return (hdl->cmih_smb_bboard);
970 }
971
972 extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t);
973
974 static uint32_t
xpv_chiprev(cmi_hdl_impl_t * hdl)975 xpv_chiprev(cmi_hdl_impl_t *hdl)
976 {
977 return (_cpuid_chiprev(xpv_vendor(hdl), xpv_family(hdl),
978 xpv_model(hdl), xpv_stepping(hdl)));
979 }
980
981 extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t);
982
983 static const char *
xpv_chiprevstr(cmi_hdl_impl_t * hdl)984 xpv_chiprevstr(cmi_hdl_impl_t *hdl)
985 {
986 return (_cpuid_chiprevstr(xpv_vendor(hdl), xpv_family(hdl),
987 xpv_model(hdl), xpv_stepping(hdl)));
988 }
989
990 extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t);
991
992 static uint32_t
xpv_getsockettype(cmi_hdl_impl_t * hdl)993 xpv_getsockettype(cmi_hdl_impl_t *hdl)
994 {
995 return (_cpuid_skt(xpv_vendor(hdl), xpv_family(hdl),
996 xpv_model(hdl), xpv_stepping(hdl)));
997 }
998
999 extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t);
1000
1001 static const char *
xpv_getsocketstr(cmi_hdl_impl_t * hdl)1002 xpv_getsocketstr(cmi_hdl_impl_t *hdl)
1003 {
1004 return (_cpuid_sktstr(xpv_vendor(hdl), xpv_family(hdl),
1005 xpv_model(hdl), xpv_stepping(hdl)));
1006 }
1007
1008 /* ARGSUSED */
1009 static uint_t
xpv_chipsig(cmi_hdl_impl_t * hdl)1010 xpv_chipsig(cmi_hdl_impl_t *hdl)
1011 {
1012 return (0);
1013 }
1014
1015 static id_t
xpv_logical_id(cmi_hdl_impl_t * hdl)1016 xpv_logical_id(cmi_hdl_impl_t *hdl)
1017 {
1018 return (xen_physcpu_logical_id(HDLPRIV(hdl)));
1019 }
1020
1021 static cmi_errno_t
xpv_rdmsr(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t * valp)1022 xpv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
1023 {
1024 switch (msr) {
1025 case IA32_MSR_MCG_CAP:
1026 *valp = xen_physcpu_mcg_cap(HDLPRIV(hdl));
1027 break;
1028
1029 default:
1030 return (CMIERR_NOTSUP);
1031 }
1032
1033 return (CMI_SUCCESS);
1034 }
1035
1036 /*
1037 * Request the hypervisor to write an MSR for us. The hypervisor
1038 * will only accept MCA-related MSRs, as this is for MCA error
1039 * simulation purposes alone. We will pre-screen MSRs for injection
1040 * so we don't bother the HV with bogus requests. We will permit
1041 * injection to any MCA bank register, and to MCG_STATUS.
1042 */
1043
1044 #define IS_MCA_INJ_MSR(msr) \
1045 (((msr) >= IA32_MSR_MC(0, CTL) && (msr) <= IA32_MSR_MC(10, MISC)) || \
1046 (msr) == IA32_MSR_MCG_STATUS)
1047
1048 static cmi_errno_t
xpv_wrmsr_cmn(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t val,boolean_t intpose)1049 xpv_wrmsr_cmn(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val, boolean_t intpose)
1050 {
1051 xen_mc_t xmc;
1052 struct xen_mc_msrinject *mci = &xmc.u.mc_msrinject;
1053
1054 if (!(hdl->cmih_flags & CMIH_F_INJACTV))
1055 return (CMIERR_NOTSUP); /* for injection use only! */
1056
1057 if (!IS_MCA_INJ_MSR(msr))
1058 return (CMIERR_API);
1059
1060 if (panicstr)
1061 return (CMIERR_DEADLOCK);
1062
1063 mci->mcinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl));
1064 mci->mcinj_flags = intpose ? MC_MSRINJ_F_INTERPOSE : 0;
1065 mci->mcinj_count = 1; /* learn to batch sometime */
1066 mci->mcinj_msr[0].reg = msr;
1067 mci->mcinj_msr[0].value = val;
1068
1069 return (HYPERVISOR_mca(XEN_MC_msrinject, &xmc) ==
1070 0 ? CMI_SUCCESS : CMIERR_NOTSUP);
1071 }
1072
1073 static cmi_errno_t
xpv_wrmsr(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t val)1074 xpv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
1075 {
1076 return (xpv_wrmsr_cmn(hdl, msr, val, B_FALSE));
1077 }
1078
1079
1080 static cmi_errno_t
xpv_msrinterpose(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t val)1081 xpv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
1082 {
1083 return (xpv_wrmsr_cmn(hdl, msr, val, B_TRUE));
1084 }
1085
1086 static void
xpv_int(cmi_hdl_impl_t * hdl,int int_no)1087 xpv_int(cmi_hdl_impl_t *hdl, int int_no)
1088 {
1089 xen_mc_t xmc;
1090 struct xen_mc_mceinject *mce = &xmc.u.mc_mceinject;
1091
1092 if (!(hdl->cmih_flags & CMIH_F_INJACTV))
1093 return;
1094
1095 if (int_no != T_MCE) {
1096 cmn_err(CE_WARN, "xpv_int: int_no %d unimplemented\n",
1097 int_no);
1098 }
1099
1100 mce->mceinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl));
1101
1102 (void) HYPERVISOR_mca(XEN_MC_mceinject, &xmc);
1103 }
1104
1105 static int
xpv_online(cmi_hdl_impl_t * hdl,int new_status,int * old_status)1106 xpv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status)
1107 {
1108 xen_sysctl_t xs;
1109 int op, rc, status;
1110
1111 new_status &= ~P_FORCED;
1112
1113 switch (new_status) {
1114 case P_STATUS:
1115 op = XEN_SYSCTL_CPU_HOTPLUG_STATUS;
1116 break;
1117 case P_FAULTED:
1118 case P_OFFLINE:
1119 op = XEN_SYSCTL_CPU_HOTPLUG_OFFLINE;
1120 break;
1121 case P_ONLINE:
1122 op = XEN_SYSCTL_CPU_HOTPLUG_ONLINE;
1123 break;
1124 default:
1125 return (-1);
1126 }
1127
1128 xs.cmd = XEN_SYSCTL_cpu_hotplug;
1129 xs.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
1130 xs.u.cpu_hotplug.cpu = xen_physcpu_logical_id(HDLPRIV(hdl));
1131 xs.u.cpu_hotplug.op = op;
1132
1133 if ((rc = HYPERVISOR_sysctl(&xs)) >= 0) {
1134 status = rc;
1135 rc = 0;
1136 switch (status) {
1137 case XEN_CPU_HOTPLUG_STATUS_NEW:
1138 *old_status = P_OFFLINE;
1139 break;
1140 case XEN_CPU_HOTPLUG_STATUS_OFFLINE:
1141 *old_status = P_FAULTED;
1142 break;
1143 case XEN_CPU_HOTPLUG_STATUS_ONLINE:
1144 *old_status = P_ONLINE;
1145 break;
1146 default:
1147 return (-1);
1148 }
1149 }
1150
1151 return (-rc);
1152 }
1153
1154 #endif
1155
1156 /*ARGSUSED*/
1157 static void *
cpu_search(enum cmi_hdl_class class,uint_t chipid,uint_t coreid,uint_t strandid)1158 cpu_search(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1159 uint_t strandid)
1160 {
1161 #ifdef __xpv
1162 xen_mc_lcpu_cookie_t cpi;
1163
1164 for (cpi = xen_physcpu_next(NULL); cpi != NULL;
1165 cpi = xen_physcpu_next(cpi)) {
1166 if (xen_physcpu_chipid(cpi) == chipid &&
1167 xen_physcpu_coreid(cpi) == coreid &&
1168 xen_physcpu_strandid(cpi) == strandid)
1169 return ((void *)cpi);
1170 }
1171 return (NULL);
1172
1173 #else /* __xpv */
1174
1175 cpu_t *cp, *startcp;
1176
1177 kpreempt_disable();
1178 cp = startcp = CPU;
1179 do {
1180 if (cmi_ntv_hwchipid(cp) == chipid &&
1181 cmi_ntv_hwcoreid(cp) == coreid &&
1182 cmi_ntv_hwstrandid(cp) == strandid) {
1183 kpreempt_enable();
1184 return ((void *)cp);
1185 }
1186
1187 cp = cp->cpu_next;
1188 } while (cp != startcp);
1189 kpreempt_enable();
1190 return (NULL);
1191 #endif /* __ xpv */
1192 }
1193
1194 static boolean_t
cpu_is_cmt(void * priv)1195 cpu_is_cmt(void *priv)
1196 {
1197 #ifdef __xpv
1198 return (xen_physcpu_is_cmt((xen_mc_lcpu_cookie_t)priv));
1199 #else /* __xpv */
1200 cpu_t *cp = (cpu_t *)priv;
1201
1202 int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1203 cpuid_get_ncore_per_chip(cp);
1204
1205 return (strands_per_core > 1);
1206 #endif /* __xpv */
1207 }
1208
1209 /*
1210 * Find the handle entry of a given cpu identified by a <chip,core,strand>
1211 * tuple.
1212 */
1213 static cmi_hdl_ent_t *
cmi_hdl_ent_lookup(uint_t chipid,uint_t coreid,uint_t strandid)1214 cmi_hdl_ent_lookup(uint_t chipid, uint_t coreid, uint_t strandid)
1215 {
1216 int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits,
1217 cmi_strand_nbits);
1218
1219 /*
1220 * Allocate per-chip table which contains a list of handle of
1221 * all strands of the chip.
1222 */
1223 if (cmi_chip_tab[chipid] == NULL) {
1224 size_t sz;
1225 cmi_hdl_ent_t *pg;
1226
1227 sz = max_strands * sizeof (cmi_hdl_ent_t);
1228 pg = kmem_zalloc(sz, KM_SLEEP);
1229
1230 /* test and set the per-chip table if it is not allocated */
1231 if (atomic_cas_ptr(&cmi_chip_tab[chipid], NULL, pg) != NULL)
1232 kmem_free(pg, sz); /* someone beats us */
1233 }
1234
1235 return (cmi_chip_tab[chipid] +
1236 ((((coreid) & CMI_MAX_COREID(cmi_core_nbits)) << cmi_strand_nbits) |
1237 ((strandid) & CMI_MAX_STRANDID(cmi_strand_nbits))));
1238 }
1239
1240 extern void cpuid_get_ext_topo(cpu_t *, uint_t *, uint_t *);
1241
1242 cmi_hdl_t
cmi_hdl_create(enum cmi_hdl_class class,uint_t chipid,uint_t coreid,uint_t strandid)1243 cmi_hdl_create(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1244 uint_t strandid)
1245 {
1246 cmi_hdl_impl_t *hdl;
1247 void *priv;
1248 cmi_hdl_ent_t *ent;
1249 uint_t vendor;
1250
1251 #ifdef __xpv
1252 ASSERT(class == CMI_HDL_SOLARIS_xVM_MCA);
1253 #else
1254 ASSERT(class == CMI_HDL_NATIVE);
1255 #endif
1256
1257 if ((priv = cpu_search(class, chipid, coreid, strandid)) == NULL)
1258 return (NULL);
1259
1260 /*
1261 * Assume all chips in the system are the same type.
1262 * For Intel, attempt to check if extended topology is available
1263 * CPUID.EAX=0xB. If so, get the number of core and strand bits.
1264 */
1265 #ifdef __xpv
1266 vendor = _cpuid_vendorstr_to_vendorcode(
1267 (char *)xen_physcpu_vendorstr((xen_mc_lcpu_cookie_t)priv));
1268 #else
1269 vendor = cpuid_getvendor((cpu_t *)priv);
1270 #endif
1271
1272 switch (vendor) {
1273 case X86_VENDOR_Intel:
1274 case X86_VENDOR_AMD:
1275 case X86_VENDOR_HYGON:
1276 if (cmi_ext_topo_check == 0) {
1277 cpuid_get_ext_topo((cpu_t *)priv, &cmi_core_nbits,
1278 &cmi_strand_nbits);
1279 cmi_ext_topo_check = 1;
1280 }
1281 default:
1282 break;
1283 }
1284
1285 if (chipid > CMI_MAX_CHIPID ||
1286 coreid > CMI_MAX_COREID(cmi_core_nbits) ||
1287 strandid > CMI_MAX_STRANDID(cmi_strand_nbits))
1288 return (NULL);
1289
1290 hdl = kmem_zalloc(sizeof (*hdl), KM_SLEEP);
1291
1292 hdl->cmih_class = class;
1293 HDLOPS(hdl) = &cmi_hdl_ops;
1294 hdl->cmih_chipid = chipid;
1295 hdl->cmih_coreid = coreid;
1296 hdl->cmih_strandid = strandid;
1297 hdl->cmih_mstrand = cpu_is_cmt(priv);
1298 hdl->cmih_hdlpriv = priv;
1299 #ifdef __xpv
1300 hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_INTERPOSEOK |
1301 CMI_MSR_FLAG_WR_INTERPOSEOK;
1302
1303 /*
1304 * XXX: need hypervisor support for procnodeid, for now assume
1305 * single-node processors (procnodeid = chipid)
1306 */
1307 hdl->cmih_procnodeid = xen_physcpu_chipid((xen_mc_lcpu_cookie_t)priv);
1308 hdl->cmih_procnodes_per_pkg = 1;
1309 #else /* __xpv */
1310 hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_HWOK | CMI_MSR_FLAG_RD_INTERPOSEOK |
1311 CMI_MSR_FLAG_WR_HWOK | CMI_MSR_FLAG_WR_INTERPOSEOK;
1312 hdl->cmih_procnodeid = cpuid_get_procnodeid((cpu_t *)priv);
1313 hdl->cmih_procnodes_per_pkg =
1314 cpuid_get_procnodes_per_pkg((cpu_t *)priv);
1315 #endif /* __xpv */
1316
1317 ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
1318 if (ent->cmae_refcnt != 0 || ent->cmae_hdlp != NULL) {
1319 /*
1320 * Somehow this (chipid, coreid, strandid) id tuple has
1321 * already been assigned! This indicates that the
1322 * callers logic in determining these values is busted,
1323 * or perhaps undermined by bad BIOS setup. Complain,
1324 * and refuse to initialize this tuple again as bad things
1325 * will happen.
1326 */
1327 cmn_err(CE_NOTE, "cmi_hdl_create: chipid %d coreid %d "
1328 "strandid %d handle already allocated!",
1329 chipid, coreid, strandid);
1330 kmem_free(hdl, sizeof (*hdl));
1331 return (NULL);
1332 }
1333
1334 /*
1335 * Once we store a nonzero reference count others can find this
1336 * handle via cmi_hdl_lookup etc. This initial hold on the handle
1337 * is to be dropped only if some other part of cmi initialization
1338 * fails or, if it succeeds, at later cpu deconfigure. Note the
1339 * the module private data we hold in cmih_cmi and cmih_cmidata
1340 * is still NULL at this point (the caller will fill it with
1341 * cmi_hdl_setcmi if it initializes) so consumers of handles
1342 * should always be ready for that possibility.
1343 */
1344 ent->cmae_hdlp = hdl;
1345 hdl->cmih_refcntp = &ent->cmae_refcnt;
1346 ent->cmae_refcnt = 1;
1347
1348 return ((cmi_hdl_t)hdl);
1349 }
1350
1351 void
cmi_read_smbios(cmi_hdl_t ophdl)1352 cmi_read_smbios(cmi_hdl_t ophdl)
1353 {
1354
1355 uint_t strand_apicid = UINT_MAX;
1356 uint_t chip_inst = UINT_MAX;
1357 uint16_t smb_id = USHRT_MAX;
1358 int rc = 0;
1359
1360 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1361
1362 /* set x86gentopo compatibility */
1363 fm_smb_fmacompat();
1364
1365 #ifndef __xpv
1366 strand_apicid = ntv_strand_apicid(hdl);
1367 #else
1368 strand_apicid = xpv_strand_apicid(hdl);
1369 #endif
1370
1371 if (!x86gentopo_legacy) {
1372 /*
1373 * If fm_smb_chipinst() or fm_smb_bboard() fails,
1374 * topo reverts to legacy mode
1375 */
1376 rc = fm_smb_chipinst(strand_apicid, &chip_inst, &smb_id);
1377 if (rc == 0) {
1378 hdl->cmih_smb_chipid = chip_inst;
1379 hdl->cmih_smbiosid = smb_id;
1380 } else {
1381 #ifdef DEBUG
1382 cmn_err(CE_NOTE, "!cmi reads smbios chip info failed");
1383 #endif /* DEBUG */
1384 return;
1385 }
1386
1387 hdl->cmih_smb_bboard = fm_smb_bboard(strand_apicid);
1388 #ifdef DEBUG
1389 if (hdl->cmih_smb_bboard == NULL)
1390 cmn_err(CE_NOTE,
1391 "!cmi reads smbios base boards info failed");
1392 #endif /* DEBUG */
1393 }
1394 }
1395
1396 void
cmi_hdl_hold(cmi_hdl_t ophdl)1397 cmi_hdl_hold(cmi_hdl_t ophdl)
1398 {
1399 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1400
1401 ASSERT(*hdl->cmih_refcntp != 0); /* must not be the initial hold */
1402
1403 atomic_inc_32(hdl->cmih_refcntp);
1404 }
1405
1406 static int
cmi_hdl_canref(cmi_hdl_ent_t * ent)1407 cmi_hdl_canref(cmi_hdl_ent_t *ent)
1408 {
1409 volatile uint32_t *refcntp;
1410 uint32_t refcnt;
1411
1412 refcntp = &ent->cmae_refcnt;
1413 refcnt = *refcntp;
1414
1415 if (refcnt == 0) {
1416 /*
1417 * Associated object never existed, is being destroyed,
1418 * or has been destroyed.
1419 */
1420 return (0);
1421 }
1422
1423 /*
1424 * We cannot use atomic increment here because once the reference
1425 * count reaches zero it must never be bumped up again.
1426 */
1427 while (refcnt != 0) {
1428 if (atomic_cas_32(refcntp, refcnt, refcnt + 1) == refcnt)
1429 return (1);
1430 refcnt = *refcntp;
1431 }
1432
1433 /*
1434 * Somebody dropped the reference count to 0 after our initial
1435 * check.
1436 */
1437 return (0);
1438 }
1439
1440
1441 void
cmi_hdl_rele(cmi_hdl_t ophdl)1442 cmi_hdl_rele(cmi_hdl_t ophdl)
1443 {
1444 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1445
1446 ASSERT(*hdl->cmih_refcntp > 0);
1447 atomic_dec_32(hdl->cmih_refcntp);
1448 }
1449
1450 void
cmi_hdl_destroy(cmi_hdl_t ophdl)1451 cmi_hdl_destroy(cmi_hdl_t ophdl)
1452 {
1453 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1454 cmi_hdl_ent_t *ent;
1455
1456 /* Release the reference count held by cmi_hdl_create(). */
1457 ASSERT(*hdl->cmih_refcntp > 0);
1458 atomic_dec_32(hdl->cmih_refcntp);
1459 hdl->cmih_flags |= CMIH_F_DEAD;
1460
1461 ent = cmi_hdl_ent_lookup(hdl->cmih_chipid, hdl->cmih_coreid,
1462 hdl->cmih_strandid);
1463 /*
1464 * Use busy polling instead of condition variable here because
1465 * cmi_hdl_rele() may be called from #MC handler.
1466 */
1467 while (cmi_hdl_canref(ent)) {
1468 cmi_hdl_rele(ophdl);
1469 delay(1);
1470 }
1471 ent->cmae_hdlp = NULL;
1472
1473 kmem_free(hdl, sizeof (*hdl));
1474 }
1475
1476 void
cmi_hdl_setspecific(cmi_hdl_t ophdl,void * arg)1477 cmi_hdl_setspecific(cmi_hdl_t ophdl, void *arg)
1478 {
1479 IMPLHDL(ophdl)->cmih_spec = arg;
1480 }
1481
1482 void *
cmi_hdl_getspecific(cmi_hdl_t ophdl)1483 cmi_hdl_getspecific(cmi_hdl_t ophdl)
1484 {
1485 return (IMPLHDL(ophdl)->cmih_spec);
1486 }
1487
1488 void
cmi_hdl_setmc(cmi_hdl_t ophdl,const struct cmi_mc_ops * mcops,void * mcdata)1489 cmi_hdl_setmc(cmi_hdl_t ophdl, const struct cmi_mc_ops *mcops, void *mcdata)
1490 {
1491 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1492
1493 ASSERT(hdl->cmih_mcops == NULL && hdl->cmih_mcdata == NULL);
1494 hdl->cmih_mcops = mcops;
1495 hdl->cmih_mcdata = mcdata;
1496 }
1497
1498 const struct cmi_mc_ops *
cmi_hdl_getmcops(cmi_hdl_t ophdl)1499 cmi_hdl_getmcops(cmi_hdl_t ophdl)
1500 {
1501 return (IMPLHDL(ophdl)->cmih_mcops);
1502 }
1503
1504 void *
cmi_hdl_getmcdata(cmi_hdl_t ophdl)1505 cmi_hdl_getmcdata(cmi_hdl_t ophdl)
1506 {
1507 return (IMPLHDL(ophdl)->cmih_mcdata);
1508 }
1509
1510 cmi_hdl_t
cmi_hdl_lookup(enum cmi_hdl_class class,uint_t chipid,uint_t coreid,uint_t strandid)1511 cmi_hdl_lookup(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1512 uint_t strandid)
1513 {
1514 cmi_hdl_ent_t *ent;
1515
1516 if (chipid > CMI_MAX_CHIPID ||
1517 coreid > CMI_MAX_COREID(cmi_core_nbits) ||
1518 strandid > CMI_MAX_STRANDID(cmi_strand_nbits))
1519 return (NULL);
1520
1521 ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
1522
1523 if (class == CMI_HDL_NEUTRAL)
1524 #ifdef __xpv
1525 class = CMI_HDL_SOLARIS_xVM_MCA;
1526 #else
1527 class = CMI_HDL_NATIVE;
1528 #endif
1529
1530 if (!cmi_hdl_canref(ent))
1531 return (NULL);
1532
1533 if (ent->cmae_hdlp->cmih_class != class) {
1534 cmi_hdl_rele((cmi_hdl_t)ent->cmae_hdlp);
1535 return (NULL);
1536 }
1537
1538 return ((cmi_hdl_t)ent->cmae_hdlp);
1539 }
1540
1541 cmi_hdl_t
cmi_hdl_any(void)1542 cmi_hdl_any(void)
1543 {
1544 int i, j;
1545 cmi_hdl_ent_t *ent;
1546 int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits,
1547 cmi_strand_nbits);
1548
1549 for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1550 if (cmi_chip_tab[i] == NULL)
1551 continue;
1552 for (j = 0, ent = cmi_chip_tab[i]; j < max_strands;
1553 j++, ent++) {
1554 if (cmi_hdl_canref(ent))
1555 return ((cmi_hdl_t)ent->cmae_hdlp);
1556 }
1557 }
1558
1559 return (NULL);
1560 }
1561
1562 void
cmi_hdl_walk(int (* cbfunc)(cmi_hdl_t,void *,void *,void *),void * arg1,void * arg2,void * arg3)1563 cmi_hdl_walk(int (*cbfunc)(cmi_hdl_t, void *, void *, void *),
1564 void *arg1, void *arg2, void *arg3)
1565 {
1566 int i, j;
1567 cmi_hdl_ent_t *ent;
1568 int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits,
1569 cmi_strand_nbits);
1570
1571 for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1572 if (cmi_chip_tab[i] == NULL)
1573 continue;
1574 for (j = 0, ent = cmi_chip_tab[i]; j < max_strands;
1575 j++, ent++) {
1576 if (cmi_hdl_canref(ent)) {
1577 cmi_hdl_impl_t *hdl = ent->cmae_hdlp;
1578 if ((*cbfunc)((cmi_hdl_t)hdl, arg1, arg2, arg3)
1579 == CMI_HDL_WALK_DONE) {
1580 cmi_hdl_rele((cmi_hdl_t)hdl);
1581 return;
1582 }
1583 cmi_hdl_rele((cmi_hdl_t)hdl);
1584 }
1585 }
1586 }
1587 }
1588
1589 void
cmi_hdl_setcmi(cmi_hdl_t ophdl,void * cmi,void * cmidata)1590 cmi_hdl_setcmi(cmi_hdl_t ophdl, void *cmi, void *cmidata)
1591 {
1592 IMPLHDL(ophdl)->cmih_cmidata = cmidata;
1593 IMPLHDL(ophdl)->cmih_cmi = cmi;
1594 }
1595
1596 void *
cmi_hdl_getcmi(cmi_hdl_t ophdl)1597 cmi_hdl_getcmi(cmi_hdl_t ophdl)
1598 {
1599 return (IMPLHDL(ophdl)->cmih_cmi);
1600 }
1601
1602 void *
cmi_hdl_getcmidata(cmi_hdl_t ophdl)1603 cmi_hdl_getcmidata(cmi_hdl_t ophdl)
1604 {
1605 return (IMPLHDL(ophdl)->cmih_cmidata);
1606 }
1607
1608 enum cmi_hdl_class
cmi_hdl_class(cmi_hdl_t ophdl)1609 cmi_hdl_class(cmi_hdl_t ophdl)
1610 {
1611 return (IMPLHDL(ophdl)->cmih_class);
1612 }
1613
1614 #define CMI_HDL_OPFUNC(what, type) \
1615 type \
1616 cmi_hdl_##what(cmi_hdl_t ophdl) \
1617 { \
1618 return (HDLOPS(IMPLHDL(ophdl))-> \
1619 cmio_##what(IMPLHDL(ophdl))); \
1620 }
1621
1622 /* BEGIN CSTYLED */
CMI_HDL_OPFUNC(vendor,uint_t)1623 CMI_HDL_OPFUNC(vendor, uint_t)
1624 CMI_HDL_OPFUNC(vendorstr, const char *)
1625 CMI_HDL_OPFUNC(family, uint_t)
1626 CMI_HDL_OPFUNC(model, uint_t)
1627 CMI_HDL_OPFUNC(stepping, uint_t)
1628 CMI_HDL_OPFUNC(chipid, uint_t)
1629 CMI_HDL_OPFUNC(procnodeid, uint_t)
1630 CMI_HDL_OPFUNC(coreid, uint_t)
1631 CMI_HDL_OPFUNC(strandid, uint_t)
1632 CMI_HDL_OPFUNC(procnodes_per_pkg, uint_t)
1633 CMI_HDL_OPFUNC(strand_apicid, uint_t)
1634 CMI_HDL_OPFUNC(chiprev, uint32_t)
1635 CMI_HDL_OPFUNC(chiprevstr, const char *)
1636 CMI_HDL_OPFUNC(getsockettype, uint32_t)
1637 CMI_HDL_OPFUNC(getsocketstr, const char *)
1638 CMI_HDL_OPFUNC(logical_id, id_t)
1639 CMI_HDL_OPFUNC(smbiosid, uint16_t)
1640 CMI_HDL_OPFUNC(smb_chipid, uint_t)
1641 CMI_HDL_OPFUNC(smb_bboard, nvlist_t *)
1642 CMI_HDL_OPFUNC(chipsig, uint_t)
1643 /* END CSTYLED */
1644
1645 boolean_t
1646 cmi_hdl_is_cmt(cmi_hdl_t ophdl)
1647 {
1648 return (IMPLHDL(ophdl)->cmih_mstrand);
1649 }
1650
1651 void
cmi_hdl_int(cmi_hdl_t ophdl,int num)1652 cmi_hdl_int(cmi_hdl_t ophdl, int num)
1653 {
1654 if (HDLOPS(IMPLHDL(ophdl))->cmio_int == NULL)
1655 return;
1656
1657 cmi_hdl_inj_begin(ophdl);
1658 HDLOPS(IMPLHDL(ophdl))->cmio_int(IMPLHDL(ophdl), num);
1659 cmi_hdl_inj_end(NULL);
1660 }
1661
1662 int
cmi_hdl_online(cmi_hdl_t ophdl,int new_status,int * old_status)1663 cmi_hdl_online(cmi_hdl_t ophdl, int new_status, int *old_status)
1664 {
1665 return (HDLOPS(IMPLHDL(ophdl))->cmio_online(IMPLHDL(ophdl),
1666 new_status, old_status));
1667 }
1668
1669 #ifndef __xpv
1670 /*
1671 * Return hardware chip instance; cpuid_get_chipid provides this directly.
1672 */
1673 uint_t
cmi_ntv_hwchipid(cpu_t * cp)1674 cmi_ntv_hwchipid(cpu_t *cp)
1675 {
1676 return (cpuid_get_chipid(cp));
1677 }
1678
1679 /*
1680 * Return hardware node instance; cpuid_get_procnodeid provides this directly.
1681 */
1682 uint_t
cmi_ntv_hwprocnodeid(cpu_t * cp)1683 cmi_ntv_hwprocnodeid(cpu_t *cp)
1684 {
1685 return (cpuid_get_procnodeid(cp));
1686 }
1687
1688 /*
1689 * Return core instance within a single chip.
1690 */
1691 uint_t
cmi_ntv_hwcoreid(cpu_t * cp)1692 cmi_ntv_hwcoreid(cpu_t *cp)
1693 {
1694 return (cpuid_get_pkgcoreid(cp));
1695 }
1696
1697 /*
1698 * Return strand number within a single core. cpuid_get_clogid numbers
1699 * all execution units (strands, or cores in unstranded models) sequentially
1700 * within a single chip.
1701 */
1702 uint_t
cmi_ntv_hwstrandid(cpu_t * cp)1703 cmi_ntv_hwstrandid(cpu_t *cp)
1704 {
1705 int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1706 cpuid_get_ncore_per_chip(cp);
1707
1708 return (cpuid_get_clogid(cp) % strands_per_core);
1709 }
1710
1711 static void
cmi_ntv_hwdisable_mce_xc(void)1712 cmi_ntv_hwdisable_mce_xc(void)
1713 {
1714 ulong_t cr4;
1715
1716 cr4 = getcr4();
1717 cr4 = cr4 & (~CR4_MCE);
1718 setcr4(cr4);
1719 }
1720
1721 void
cmi_ntv_hwdisable_mce(cmi_hdl_t hdl)1722 cmi_ntv_hwdisable_mce(cmi_hdl_t hdl)
1723 {
1724 cpuset_t set;
1725 cmi_hdl_impl_t *thdl = IMPLHDL(hdl);
1726 cpu_t *cp = HDLPRIV(thdl);
1727
1728 if (CPU->cpu_id == cp->cpu_id) {
1729 cmi_ntv_hwdisable_mce_xc();
1730 } else {
1731 CPUSET_ONLY(set, cp->cpu_id);
1732 xc_call(0, 0, 0, CPUSET2BV(set),
1733 (xc_func_t)cmi_ntv_hwdisable_mce_xc);
1734 }
1735 }
1736
1737 #endif /* __xpv */
1738
1739 void
cmi_hdlconf_rdmsr_nohw(cmi_hdl_t ophdl)1740 cmi_hdlconf_rdmsr_nohw(cmi_hdl_t ophdl)
1741 {
1742 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1743
1744 hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_RD_HWOK;
1745 }
1746
1747 void
cmi_hdlconf_wrmsr_nohw(cmi_hdl_t ophdl)1748 cmi_hdlconf_wrmsr_nohw(cmi_hdl_t ophdl)
1749 {
1750 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1751
1752 hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_WR_HWOK;
1753 }
1754
1755 cmi_errno_t
cmi_hdl_rdmsr(cmi_hdl_t ophdl,uint_t msr,uint64_t * valp)1756 cmi_hdl_rdmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t *valp)
1757 {
1758 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1759
1760 /*
1761 * Regardless of the handle class, we first check for am
1762 * interposed value. In the xVM case you probably want to
1763 * place interposed values within the hypervisor itself, but
1764 * we still allow interposing them in dom0 for test and bringup
1765 * purposes.
1766 */
1767 if ((hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_INTERPOSEOK) &&
1768 msri_lookup(hdl, msr, valp))
1769 return (CMI_SUCCESS);
1770
1771 if (HDLOPS(hdl)->cmio_rdmsr == NULL)
1772 return (CMIERR_NOTSUP);
1773
1774 return (HDLOPS(hdl)->cmio_rdmsr(hdl, msr, valp));
1775 }
1776
1777 cmi_errno_t
cmi_hdl_wrmsr(cmi_hdl_t ophdl,uint_t msr,uint64_t val)1778 cmi_hdl_wrmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t val)
1779 {
1780 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1781
1782 /* Invalidate any interposed value */
1783 msri_rment(hdl, msr);
1784
1785 if (HDLOPS(hdl)->cmio_wrmsr == NULL)
1786 return (CMI_SUCCESS); /* pretend all is ok */
1787
1788 return (HDLOPS(hdl)->cmio_wrmsr(hdl, msr, val));
1789 }
1790
1791 void
cmi_hdl_enable_mce(cmi_hdl_t ophdl)1792 cmi_hdl_enable_mce(cmi_hdl_t ophdl)
1793 {
1794 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1795 ulong_t cr4;
1796
1797 if (HDLOPS(hdl)->cmio_getcr4 == NULL ||
1798 HDLOPS(hdl)->cmio_setcr4 == NULL)
1799 return;
1800
1801 cr4 = HDLOPS(hdl)->cmio_getcr4(hdl);
1802
1803 HDLOPS(hdl)->cmio_setcr4(hdl, cr4 | CR4_MCE);
1804 }
1805
1806 void
cmi_hdl_msrinterpose(cmi_hdl_t ophdl,cmi_mca_regs_t * regs,uint_t nregs)1807 cmi_hdl_msrinterpose(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1808 {
1809 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1810 int i;
1811
1812 if (HDLOPS(hdl)->cmio_msrinterpose == NULL)
1813 return;
1814
1815 cmi_hdl_inj_begin(ophdl);
1816
1817 for (i = 0; i < nregs; i++, regs++)
1818 HDLOPS(hdl)->cmio_msrinterpose(hdl, regs->cmr_msrnum,
1819 regs->cmr_msrval);
1820
1821 cmi_hdl_inj_end(ophdl);
1822 }
1823
1824 /*ARGSUSED*/
1825 void
cmi_hdl_msrforward(cmi_hdl_t ophdl,cmi_mca_regs_t * regs,uint_t nregs)1826 cmi_hdl_msrforward(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1827 {
1828 #ifdef __xpv
1829 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1830 int i;
1831
1832 for (i = 0; i < nregs; i++, regs++)
1833 msri_addent(hdl, regs->cmr_msrnum, regs->cmr_msrval);
1834 #endif
1835 }
1836
1837
1838 void
cmi_pcird_nohw(void)1839 cmi_pcird_nohw(void)
1840 {
1841 cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_RD_HWOK;
1842 }
1843
1844 void
cmi_pciwr_nohw(void)1845 cmi_pciwr_nohw(void)
1846 {
1847 cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_WR_HWOK;
1848 }
1849
1850 static uint32_t
cmi_pci_get_cmn(int bus,int dev,int func,int reg,int asz,int * interpose,ddi_acc_handle_t hdl)1851 cmi_pci_get_cmn(int bus, int dev, int func, int reg, int asz,
1852 int *interpose, ddi_acc_handle_t hdl)
1853 {
1854 uint32_t val;
1855
1856 if (cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_INTERPOSEOK &&
1857 pcii_lookup(bus, dev, func, reg, asz, &val)) {
1858 if (interpose)
1859 *interpose = 1;
1860 return (val);
1861 }
1862 if (interpose)
1863 *interpose = 0;
1864
1865 if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_HWOK))
1866 return (0);
1867
1868 switch (asz) {
1869 case 1:
1870 if (hdl)
1871 val = pci_config_get8(hdl, (off_t)reg);
1872 else
1873 val = pci_cfgacc_get8(NULL, PCI_GETBDF(bus, dev, func),
1874 reg);
1875 break;
1876 case 2:
1877 if (hdl)
1878 val = pci_config_get16(hdl, (off_t)reg);
1879 else
1880 val = pci_cfgacc_get16(NULL, PCI_GETBDF(bus, dev, func),
1881 reg);
1882 break;
1883 case 4:
1884 if (hdl)
1885 val = pci_config_get32(hdl, (off_t)reg);
1886 else
1887 val = pci_cfgacc_get32(NULL, PCI_GETBDF(bus, dev, func),
1888 reg);
1889 break;
1890 default:
1891 val = 0;
1892 }
1893 return (val);
1894 }
1895
1896 uint8_t
cmi_pci_getb(int bus,int dev,int func,int reg,int * interpose,ddi_acc_handle_t hdl)1897 cmi_pci_getb(int bus, int dev, int func, int reg, int *interpose,
1898 ddi_acc_handle_t hdl)
1899 {
1900 return ((uint8_t)cmi_pci_get_cmn(bus, dev, func, reg, 1, interpose,
1901 hdl));
1902 }
1903
1904 uint16_t
cmi_pci_getw(int bus,int dev,int func,int reg,int * interpose,ddi_acc_handle_t hdl)1905 cmi_pci_getw(int bus, int dev, int func, int reg, int *interpose,
1906 ddi_acc_handle_t hdl)
1907 {
1908 return ((uint16_t)cmi_pci_get_cmn(bus, dev, func, reg, 2, interpose,
1909 hdl));
1910 }
1911
1912 uint32_t
cmi_pci_getl(int bus,int dev,int func,int reg,int * interpose,ddi_acc_handle_t hdl)1913 cmi_pci_getl(int bus, int dev, int func, int reg, int *interpose,
1914 ddi_acc_handle_t hdl)
1915 {
1916 return (cmi_pci_get_cmn(bus, dev, func, reg, 4, interpose, hdl));
1917 }
1918
1919 void
cmi_pci_interposeb(int bus,int dev,int func,int reg,uint8_t val)1920 cmi_pci_interposeb(int bus, int dev, int func, int reg, uint8_t val)
1921 {
1922 pcii_addent(bus, dev, func, reg, val, 1);
1923 }
1924
1925 void
cmi_pci_interposew(int bus,int dev,int func,int reg,uint16_t val)1926 cmi_pci_interposew(int bus, int dev, int func, int reg, uint16_t val)
1927 {
1928 pcii_addent(bus, dev, func, reg, val, 2);
1929 }
1930
1931 void
cmi_pci_interposel(int bus,int dev,int func,int reg,uint32_t val)1932 cmi_pci_interposel(int bus, int dev, int func, int reg, uint32_t val)
1933 {
1934 pcii_addent(bus, dev, func, reg, val, 4);
1935 }
1936
1937 static void
cmi_pci_put_cmn(int bus,int dev,int func,int reg,int asz,ddi_acc_handle_t hdl,uint32_t val)1938 cmi_pci_put_cmn(int bus, int dev, int func, int reg, int asz,
1939 ddi_acc_handle_t hdl, uint32_t val)
1940 {
1941 /*
1942 * If there is an interposed value for this register invalidate it.
1943 */
1944 pcii_rment(bus, dev, func, reg, asz);
1945
1946 if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_WR_HWOK))
1947 return;
1948
1949 switch (asz) {
1950 case 1:
1951 if (hdl)
1952 pci_config_put8(hdl, (off_t)reg, (uint8_t)val);
1953 else
1954 pci_cfgacc_put8(NULL, PCI_GETBDF(bus, dev, func), reg,
1955 (uint8_t)val);
1956 break;
1957
1958 case 2:
1959 if (hdl)
1960 pci_config_put16(hdl, (off_t)reg, (uint16_t)val);
1961 else
1962 pci_cfgacc_put16(NULL, PCI_GETBDF(bus, dev, func), reg,
1963 (uint16_t)val);
1964 break;
1965
1966 case 4:
1967 if (hdl)
1968 pci_config_put32(hdl, (off_t)reg, val);
1969 else
1970 pci_cfgacc_put32(NULL, PCI_GETBDF(bus, dev, func), reg,
1971 val);
1972 break;
1973
1974 default:
1975 break;
1976 }
1977 }
1978
1979 void
cmi_pci_putb(int bus,int dev,int func,int reg,ddi_acc_handle_t hdl,uint8_t val)1980 cmi_pci_putb(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1981 uint8_t val)
1982 {
1983 cmi_pci_put_cmn(bus, dev, func, reg, 1, hdl, val);
1984 }
1985
1986 void
cmi_pci_putw(int bus,int dev,int func,int reg,ddi_acc_handle_t hdl,uint16_t val)1987 cmi_pci_putw(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1988 uint16_t val)
1989 {
1990 cmi_pci_put_cmn(bus, dev, func, reg, 2, hdl, val);
1991 }
1992
1993 void
cmi_pci_putl(int bus,int dev,int func,int reg,ddi_acc_handle_t hdl,uint32_t val)1994 cmi_pci_putl(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1995 uint32_t val)
1996 {
1997 cmi_pci_put_cmn(bus, dev, func, reg, 4, hdl, val);
1998 }
1999
2000 static const struct cmi_hdl_ops cmi_hdl_ops = {
2001 #ifdef __xpv
2002 /*
2003 * CMI_HDL_SOLARIS_xVM_MCA - ops when we are an xVM dom0
2004 */
2005 xpv_vendor, /* cmio_vendor */
2006 xpv_vendorstr, /* cmio_vendorstr */
2007 xpv_family, /* cmio_family */
2008 xpv_model, /* cmio_model */
2009 xpv_stepping, /* cmio_stepping */
2010 xpv_chipid, /* cmio_chipid */
2011 xpv_procnodeid, /* cmio_procnodeid */
2012 xpv_coreid, /* cmio_coreid */
2013 xpv_strandid, /* cmio_strandid */
2014 xpv_procnodes_per_pkg, /* cmio_procnodes_per_pkg */
2015 xpv_strand_apicid, /* cmio_strand_apicid */
2016 xpv_chiprev, /* cmio_chiprev */
2017 xpv_chiprevstr, /* cmio_chiprevstr */
2018 xpv_getsockettype, /* cmio_getsockettype */
2019 xpv_getsocketstr, /* cmio_getsocketstr */
2020 xpv_chipsig, /* cmio_chipsig */
2021 xpv_logical_id, /* cmio_logical_id */
2022 NULL, /* cmio_getcr4 */
2023 NULL, /* cmio_setcr4 */
2024 xpv_rdmsr, /* cmio_rdmsr */
2025 xpv_wrmsr, /* cmio_wrmsr */
2026 xpv_msrinterpose, /* cmio_msrinterpose */
2027 xpv_int, /* cmio_int */
2028 xpv_online, /* cmio_online */
2029 xpv_smbiosid, /* cmio_smbiosid */
2030 xpv_smb_chipid, /* cmio_smb_chipid */
2031 xpv_smb_bboard /* cmio_smb_bboard */
2032
2033 #else /* __xpv */
2034
2035 /*
2036 * CMI_HDL_NATIVE - ops when apparently running on bare-metal
2037 */
2038 ntv_vendor, /* cmio_vendor */
2039 ntv_vendorstr, /* cmio_vendorstr */
2040 ntv_family, /* cmio_family */
2041 ntv_model, /* cmio_model */
2042 ntv_stepping, /* cmio_stepping */
2043 ntv_chipid, /* cmio_chipid */
2044 ntv_procnodeid, /* cmio_procnodeid */
2045 ntv_coreid, /* cmio_coreid */
2046 ntv_strandid, /* cmio_strandid */
2047 ntv_procnodes_per_pkg, /* cmio_procnodes_per_pkg */
2048 ntv_strand_apicid, /* cmio_strand_apicid */
2049 ntv_chiprev, /* cmio_chiprev */
2050 ntv_chiprevstr, /* cmio_chiprevstr */
2051 ntv_getsockettype, /* cmio_getsockettype */
2052 ntv_getsocketstr, /* cmio_getsocketstr */
2053 ntv_chipsig, /* cmio_chipsig */
2054 ntv_logical_id, /* cmio_logical_id */
2055 ntv_getcr4, /* cmio_getcr4 */
2056 ntv_setcr4, /* cmio_setcr4 */
2057 ntv_rdmsr, /* cmio_rdmsr */
2058 ntv_wrmsr, /* cmio_wrmsr */
2059 ntv_msrinterpose, /* cmio_msrinterpose */
2060 ntv_int, /* cmio_int */
2061 ntv_online, /* cmio_online */
2062 ntv_smbiosid, /* cmio_smbiosid */
2063 ntv_smb_chipid, /* cmio_smb_chipid */
2064 ntv_smb_bboard /* cmio_smb_bboard */
2065 #endif
2066 };
2067