17aec1d6eScindi /* 27aec1d6eScindi * CDDL HEADER START 37aec1d6eScindi * 47aec1d6eScindi * The contents of this file are subject to the terms of the 53ad553a7Sgavinm * Common Development and Distribution License (the "License"). 63ad553a7Sgavinm * You may not use this file except in compliance with the License. 77aec1d6eScindi * 87aec1d6eScindi * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97aec1d6eScindi * or http://www.opensolaris.org/os/licensing. 107aec1d6eScindi * See the License for the specific language governing permissions 117aec1d6eScindi * and limitations under the License. 127aec1d6eScindi * 137aec1d6eScindi * When distributing Covered Code, include this CDDL HEADER in each 147aec1d6eScindi * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157aec1d6eScindi * If applicable, add the following below this CDDL HEADER, with the 167aec1d6eScindi * fields enclosed by brackets "[]" replaced with your own identifying 177aec1d6eScindi * information: Portions Copyright [yyyy] [name of copyright owner] 187aec1d6eScindi * 197aec1d6eScindi * CDDL HEADER END 207aec1d6eScindi */ 217aec1d6eScindi 227aec1d6eScindi /* 23e3d60c9bSAdrian Frost * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 247aec1d6eScindi * Use is subject to license terms. 25918e0d92SRobert Mustacchi * Copyright (c) 2018, Joyent, Inc. 267aec1d6eScindi */ 27a3114836SGerry Liu /* 28a3114836SGerry Liu * Copyright (c) 2010, Intel Corporation. 29a3114836SGerry Liu * All rights reserved. 30a3114836SGerry Liu */ 317aec1d6eScindi 3215c07adcSJohn Levon /* 3315c07adcSJohn Levon * Copyright (c) 2018, Joyent, Inc. 34*b445c7c6SAndrew Stormont * Copyright 2020 RackTop Systems, Inc. 3515c07adcSJohn Levon */ 3615c07adcSJohn Levon 377aec1d6eScindi /* 387aec1d6eScindi * Generic x86 CPU Module 397aec1d6eScindi * 407aec1d6eScindi * This CPU module is used for generic x86 CPUs when Solaris has no other 417aec1d6eScindi * CPU-specific support module available. Code in this module should be the 427aec1d6eScindi * absolute bare-bones support and must be cognizant of both Intel and AMD etc. 437aec1d6eScindi */ 447aec1d6eScindi 457aec1d6eScindi #include <sys/types.h> 467aec1d6eScindi #include <sys/cpu_module_impl.h> 477aec1d6eScindi #include <sys/cpuvar.h> 487aec1d6eScindi #include <sys/kmem.h> 497aec1d6eScindi #include <sys/modctl.h> 5020c794b3Sgavinm #include <sys/pghw.h> 512a613b59SRobert Mustacchi #include <sys/x86_archext.h> 527aec1d6eScindi 537aec1d6eScindi #include "gcpu.h" 547aec1d6eScindi 5520c794b3Sgavinm /* 5620c794b3Sgavinm * Prevent generic cpu support from loading. 5720c794b3Sgavinm */ 5820c794b3Sgavinm int gcpu_disable = 0; 597aec1d6eScindi 6020c794b3Sgavinm #define GCPU_MAX_CHIPID 32 6120c794b3Sgavinm static struct gcpu_chipshared *gcpu_shared[GCPU_MAX_CHIPID]; 622a613b59SRobert Mustacchi #ifdef DEBUG 632a613b59SRobert Mustacchi int gcpu_id_disable = 0; 642a613b59SRobert Mustacchi static const char *gcpu_id_override[GCPU_MAX_CHIPID] = { NULL }; 652a613b59SRobert Mustacchi #endif 662a613b59SRobert Mustacchi 672a613b59SRobert Mustacchi #ifndef __xpv 68a47ab03eSRobert Mustacchi 692a613b59SRobert Mustacchi /* 70a47ab03eSRobert Mustacchi * The purpose of this is to construct a unique identifier for a given processor 71a47ab03eSRobert Mustacchi * that can be used by things like FMA to determine when a FRU has been 72a47ab03eSRobert Mustacchi * replaced. It is supported on Intel Xeon Platforms since Ivy Bridge and AMD 73a47ab03eSRobert Mustacchi * 17h processors since Rome. See cpuid_pass1_ppin() for how we determine if a 74a47ab03eSRobert Mustacchi * CPU is supported. 75a47ab03eSRobert Mustacchi * 76a47ab03eSRobert Mustacchi * The protected processor inventory number (PPIN) can be used to create a 77a47ab03eSRobert Mustacchi * unique identifier when combined with the processor's cpuid signature. We 78a47ab03eSRobert Mustacchi * create a versioned, synthetic ID using the following scheme for the 79a47ab03eSRobert Mustacchi * identifier: iv0-<vendor>-<signature>-<PPIN>. The iv0 is the illumos version 80a47ab03eSRobert Mustacchi * zero of the ID. If we have a new scheme for a new generation of processors, 81a47ab03eSRobert Mustacchi * then that should rev the version field, otherwise for a given processor, this 82a47ab03eSRobert Mustacchi * synthetic ID should not change. 832a613b59SRobert Mustacchi * 84a47ab03eSRobert Mustacchi * We use the string "INTC" for Intel and "AMD" for AMD. None of these or the 85a47ab03eSRobert Mustacchi * formatting of the values can change without changing the version string. 862a613b59SRobert Mustacchi */ 872a613b59SRobert Mustacchi static char * 88a47ab03eSRobert Mustacchi gcpu_init_ident_ppin(cmi_hdl_t hdl) 892a613b59SRobert Mustacchi { 90a47ab03eSRobert Mustacchi uint_t ppin_ctl_msr, ppin_msr; 91a47ab03eSRobert Mustacchi uint64_t value; 92a47ab03eSRobert Mustacchi const char *vendor; 932a613b59SRobert Mustacchi 942a613b59SRobert Mustacchi /* 952a613b59SRobert Mustacchi * This list should be extended as new Intel Xeon family processors come 962a613b59SRobert Mustacchi * out. 972a613b59SRobert Mustacchi */ 98a47ab03eSRobert Mustacchi switch (cmi_hdl_vendor(hdl)) { 99a47ab03eSRobert Mustacchi case X86_VENDOR_Intel: 100a47ab03eSRobert Mustacchi ppin_ctl_msr = MSR_PPIN_CTL_INTC; 101a47ab03eSRobert Mustacchi ppin_msr = MSR_PPIN_INTC; 102a47ab03eSRobert Mustacchi vendor = "INTC"; 103a47ab03eSRobert Mustacchi break; 104a47ab03eSRobert Mustacchi case X86_VENDOR_AMD: 105a47ab03eSRobert Mustacchi ppin_ctl_msr = MSR_PPIN_CTL_AMD; 106a47ab03eSRobert Mustacchi ppin_msr = MSR_PPIN_AMD; 107a47ab03eSRobert Mustacchi vendor = "AMD"; 1082a613b59SRobert Mustacchi break; 1092a613b59SRobert Mustacchi default: 1102a613b59SRobert Mustacchi return (NULL); 1112a613b59SRobert Mustacchi } 1122a613b59SRobert Mustacchi 113a47ab03eSRobert Mustacchi if (cmi_hdl_rdmsr(hdl, ppin_ctl_msr, &value) != CMI_SUCCESS) { 1142a613b59SRobert Mustacchi return (NULL); 1152a613b59SRobert Mustacchi } 1162a613b59SRobert Mustacchi 117*b445c7c6SAndrew Stormont /* 118*b445c7c6SAndrew Stormont * If the PPIN is not enabled and not locked, attempt to enable it. 119*b445c7c6SAndrew Stormont * Note: in some environments such as Amazon EC2 the PPIN appears 120*b445c7c6SAndrew Stormont * to be disabled and unlocked but our attempts to enable it don't 121*b445c7c6SAndrew Stormont * stick, and when we attempt to read the PPIN we get an uncaught 122*b445c7c6SAndrew Stormont * #GP. To avoid that happening we read the MSR back and verify it 123*b445c7c6SAndrew Stormont * has taken the new value. 124*b445c7c6SAndrew Stormont */ 125a47ab03eSRobert Mustacchi if ((value & MSR_PPIN_CTL_ENABLED) == 0) { 126a47ab03eSRobert Mustacchi if ((value & MSR_PPIN_CTL_LOCKED) != 0) { 1272a613b59SRobert Mustacchi return (NULL); 1282a613b59SRobert Mustacchi } 1292a613b59SRobert Mustacchi 130a47ab03eSRobert Mustacchi if (cmi_hdl_wrmsr(hdl, ppin_ctl_msr, MSR_PPIN_CTL_ENABLED) != 1312a613b59SRobert Mustacchi CMI_SUCCESS) { 1322a613b59SRobert Mustacchi return (NULL); 1332a613b59SRobert Mustacchi } 134*b445c7c6SAndrew Stormont 135*b445c7c6SAndrew Stormont if (cmi_hdl_rdmsr(hdl, ppin_ctl_msr, &value) != CMI_SUCCESS) { 136*b445c7c6SAndrew Stormont return (NULL); 137*b445c7c6SAndrew Stormont } 138*b445c7c6SAndrew Stormont 139*b445c7c6SAndrew Stormont if ((value & MSR_PPIN_CTL_ENABLED) == 0) { 140*b445c7c6SAndrew Stormont return (NULL); 141*b445c7c6SAndrew Stormont } 1422a613b59SRobert Mustacchi } 1432a613b59SRobert Mustacchi 144a47ab03eSRobert Mustacchi if (cmi_hdl_rdmsr(hdl, ppin_msr, &value) != CMI_SUCCESS) { 1452a613b59SRobert Mustacchi return (NULL); 1462a613b59SRobert Mustacchi } 1472a613b59SRobert Mustacchi 1482a613b59SRobert Mustacchi /* 1492a613b59SRobert Mustacchi * Now that we've read data, lock the PPIN. Don't worry about success or 1502a613b59SRobert Mustacchi * failure of this part, as we will have gotten everything that we need. 1512a613b59SRobert Mustacchi * It is possible that it locked open, for example. 1522a613b59SRobert Mustacchi */ 153a47ab03eSRobert Mustacchi (void) cmi_hdl_wrmsr(hdl, ppin_ctl_msr, MSR_PPIN_CTL_LOCKED); 1542a613b59SRobert Mustacchi 155a47ab03eSRobert Mustacchi return (kmem_asprintf("iv0-%s-%x-%llx", vendor, cmi_hdl_chipsig(hdl), 156a47ab03eSRobert Mustacchi value)); 1572a613b59SRobert Mustacchi } 1582a613b59SRobert Mustacchi #endif /* __xpv */ 1592a613b59SRobert Mustacchi 1602a613b59SRobert Mustacchi static void 1612a613b59SRobert Mustacchi gcpu_init_ident(cmi_hdl_t hdl, struct gcpu_chipshared *sp) 1622a613b59SRobert Mustacchi { 1632a613b59SRobert Mustacchi #ifdef DEBUG 1642a613b59SRobert Mustacchi uint_t chipid; 1652a613b59SRobert Mustacchi 1662a613b59SRobert Mustacchi /* 1672a613b59SRobert Mustacchi * On debug, allow a developer to override the string to more 1682a613b59SRobert Mustacchi * easily test CPU autoreplace without needing to physically 1692a613b59SRobert Mustacchi * replace a CPU. 1702a613b59SRobert Mustacchi */ 1712a613b59SRobert Mustacchi if (gcpu_id_disable != 0) { 1722a613b59SRobert Mustacchi return; 1732a613b59SRobert Mustacchi } 1742a613b59SRobert Mustacchi 1752a613b59SRobert Mustacchi chipid = cmi_hdl_chipid(hdl); 1762a613b59SRobert Mustacchi if (gcpu_id_override[chipid] != NULL) { 1772a613b59SRobert Mustacchi sp->gcpus_ident = strdup(gcpu_id_override[chipid]); 1782a613b59SRobert Mustacchi return; 1792a613b59SRobert Mustacchi } 1802a613b59SRobert Mustacchi #endif 1812a613b59SRobert Mustacchi 1822a613b59SRobert Mustacchi #ifndef __xpv 183a47ab03eSRobert Mustacchi if (is_x86_feature(x86_featureset, X86FSET_PPIN)) { 184a47ab03eSRobert Mustacchi sp->gcpus_ident = gcpu_init_ident_ppin(hdl); 1852a613b59SRobert Mustacchi } 1862a613b59SRobert Mustacchi #endif /* __xpv */ 1872a613b59SRobert Mustacchi } 1887aec1d6eScindi 18920c794b3Sgavinm /* 19020c794b3Sgavinm * Our cmi_init entry point, called during startup of each cpu instance. 19120c794b3Sgavinm */ 19220c794b3Sgavinm int 19320c794b3Sgavinm gcpu_init(cmi_hdl_t hdl, void **datap) 194843e1988Sjohnlev { 19520c794b3Sgavinm uint_t chipid = cmi_hdl_chipid(hdl); 19620c794b3Sgavinm struct gcpu_chipshared *sp, *osp; 19720c794b3Sgavinm gcpu_data_t *gcpu; 19820c794b3Sgavinm 19920c794b3Sgavinm if (gcpu_disable || chipid >= GCPU_MAX_CHIPID) 20020c794b3Sgavinm return (ENOTSUP); 20120c794b3Sgavinm 20220c794b3Sgavinm /* 20320c794b3Sgavinm * Allocate the state structure for this cpu. We will only 20420c794b3Sgavinm * allocate the bank logout areas in gcpu_mca_init once we 20520c794b3Sgavinm * know how many banks there are. 20620c794b3Sgavinm */ 20720c794b3Sgavinm gcpu = *datap = kmem_zalloc(sizeof (gcpu_data_t), KM_SLEEP); 20820c794b3Sgavinm cmi_hdl_hold(hdl); /* release in gcpu_fini */ 20920c794b3Sgavinm gcpu->gcpu_hdl = hdl; 21020c794b3Sgavinm 21120c794b3Sgavinm /* 21220c794b3Sgavinm * Allocate a chipshared structure if no sibling cpu has already 21320c794b3Sgavinm * allocated it, but allow for the fact that a sibling core may 21420c794b3Sgavinm * be starting up in parallel. 21520c794b3Sgavinm */ 21620c794b3Sgavinm if ((sp = gcpu_shared[chipid]) == NULL) { 21720c794b3Sgavinm sp = kmem_zalloc(sizeof (struct gcpu_chipshared), KM_SLEEP); 218a3114836SGerry Liu mutex_init(&sp->gcpus_poll_lock, NULL, MUTEX_DRIVER, NULL); 219a3114836SGerry Liu mutex_init(&sp->gcpus_cfglock, NULL, MUTEX_DRIVER, NULL); 22020c794b3Sgavinm osp = atomic_cas_ptr(&gcpu_shared[chipid], NULL, sp); 221a3114836SGerry Liu if (osp != NULL) { 222a3114836SGerry Liu mutex_destroy(&sp->gcpus_cfglock); 223a3114836SGerry Liu mutex_destroy(&sp->gcpus_poll_lock); 22420c794b3Sgavinm kmem_free(sp, sizeof (struct gcpu_chipshared)); 22520c794b3Sgavinm sp = osp; 2262a613b59SRobert Mustacchi } else { 2272a613b59SRobert Mustacchi gcpu_init_ident(hdl, sp); 22820c794b3Sgavinm } 22920c794b3Sgavinm } 230a3114836SGerry Liu 231a3114836SGerry Liu atomic_inc_32(&sp->gcpus_actv_cnt); 23220c794b3Sgavinm gcpu->gcpu_shared = sp; 233843e1988Sjohnlev 2347aec1d6eScindi return (0); 2357aec1d6eScindi } 2367aec1d6eScindi 237a3114836SGerry Liu /* 238a3114836SGerry Liu * deconfigure gcpu_init() 239a3114836SGerry Liu */ 240a3114836SGerry Liu void 241a3114836SGerry Liu gcpu_fini(cmi_hdl_t hdl) 242a3114836SGerry Liu { 243a3114836SGerry Liu uint_t chipid = cmi_hdl_chipid(hdl); 244a3114836SGerry Liu gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl); 245a3114836SGerry Liu struct gcpu_chipshared *sp; 246a3114836SGerry Liu 247a3114836SGerry Liu if (gcpu_disable || chipid >= GCPU_MAX_CHIPID) 248a3114836SGerry Liu return; 249a3114836SGerry Liu 250a3114836SGerry Liu gcpu_mca_fini(hdl); 251a3114836SGerry Liu 252a3114836SGerry Liu /* 253a3114836SGerry Liu * Keep shared data in cache for reuse. 254a3114836SGerry Liu */ 255a3114836SGerry Liu sp = gcpu_shared[chipid]; 256a3114836SGerry Liu ASSERT(sp != NULL); 257a3114836SGerry Liu atomic_dec_32(&sp->gcpus_actv_cnt); 258a3114836SGerry Liu 259a3114836SGerry Liu if (gcpu != NULL) 260a3114836SGerry Liu kmem_free(gcpu, sizeof (gcpu_data_t)); 261a3114836SGerry Liu 262a3114836SGerry Liu /* Release reference count held in gcpu_init(). */ 263a3114836SGerry Liu cmi_hdl_rele(hdl); 264a3114836SGerry Liu } 265a3114836SGerry Liu 26620c794b3Sgavinm void 26720c794b3Sgavinm gcpu_post_startup(cmi_hdl_t hdl) 2687aec1d6eScindi { 26920c794b3Sgavinm gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl); 27020c794b3Sgavinm 271e4b86885SCheng Sean Ye if (gcpu_disable) 27220c794b3Sgavinm return; 2737aec1d6eScindi 274e4b86885SCheng Sean Ye if (gcpu != NULL) 275e4b86885SCheng Sean Ye cms_post_startup(hdl); 276e4b86885SCheng Sean Ye #ifdef __xpv 277e4b86885SCheng Sean Ye /* 278e4b86885SCheng Sean Ye * All cpu handles are initialized so we can begin polling now. 279e4b86885SCheng Sean Ye * Furthermore, our virq mechanism requires that everything 280e4b86885SCheng Sean Ye * be run on cpu 0 so we can assure that by starting from here. 281e4b86885SCheng Sean Ye */ 282e4b86885SCheng Sean Ye gcpu_mca_poll_start(hdl); 283918e0d92SRobert Mustacchi #else 284918e0d92SRobert Mustacchi /* 285918e0d92SRobert Mustacchi * The boot CPU has a bit of a chicken and egg problem for CMCI. Its MCA 286918e0d92SRobert Mustacchi * initialization is run before we have initialized the PSM module that 287918e0d92SRobert Mustacchi * we would use for enabling CMCI. Therefore, we use this as a chance to 288918e0d92SRobert Mustacchi * enable CMCI for the boot CPU. For all other CPUs, this chicken and 289918e0d92SRobert Mustacchi * egg problem will have already been solved. 290918e0d92SRobert Mustacchi */ 291918e0d92SRobert Mustacchi gcpu_mca_cmci_enable(hdl); 292e4b86885SCheng Sean Ye #endif 29320c794b3Sgavinm } 2947aec1d6eScindi 29520c794b3Sgavinm void 29620c794b3Sgavinm gcpu_post_mpstartup(cmi_hdl_t hdl) 297e4b86885SCheng Sean Ye { 298e4b86885SCheng Sean Ye if (gcpu_disable) 299e4b86885SCheng Sean Ye return; 300e4b86885SCheng Sean Ye 301e4b86885SCheng Sean Ye cms_post_mpstartup(hdl); 302e4b86885SCheng Sean Ye 303e4b86885SCheng Sean Ye #ifndef __xpv 30415c07adcSJohn Levon /* 30515c07adcSJohn Levon * All cpu handles are initialized only once all cpus are started, so we 30615c07adcSJohn Levon * can begin polling post mp startup. 30715c07adcSJohn Levon */ 30815c07adcSJohn Levon gcpu_mca_poll_start(hdl); 309e4b86885SCheng Sean Ye #endif 3107aec1d6eScindi } 3117aec1d6eScindi 3122a613b59SRobert Mustacchi const char * 3132a613b59SRobert Mustacchi gcpu_ident(cmi_hdl_t hdl) 3142a613b59SRobert Mustacchi { 3152a613b59SRobert Mustacchi uint_t chipid; 3162a613b59SRobert Mustacchi struct gcpu_chipshared *sp; 3172a613b59SRobert Mustacchi 3182a613b59SRobert Mustacchi if (gcpu_disable) 3192a613b59SRobert Mustacchi return (NULL); 3202a613b59SRobert Mustacchi 3212a613b59SRobert Mustacchi chipid = cmi_hdl_chipid(hdl); 3222a613b59SRobert Mustacchi if (chipid >= GCPU_MAX_CHIPID) 3232a613b59SRobert Mustacchi return (NULL); 3242a613b59SRobert Mustacchi 3252a613b59SRobert Mustacchi if (cmi_hdl_getcmidata(hdl) == NULL) 3262a613b59SRobert Mustacchi return (NULL); 3272a613b59SRobert Mustacchi 3282a613b59SRobert Mustacchi sp = gcpu_shared[cmi_hdl_chipid(hdl)]; 3292a613b59SRobert Mustacchi return (sp->gcpus_ident); 3302a613b59SRobert Mustacchi } 3312a613b59SRobert Mustacchi 332e4b86885SCheng Sean Ye #ifdef __xpv 333e4b86885SCheng Sean Ye #define GCPU_OP(ntvop, xpvop) xpvop 334e4b86885SCheng Sean Ye #else 335e4b86885SCheng Sean Ye #define GCPU_OP(ntvop, xpvop) ntvop 336e4b86885SCheng Sean Ye #endif 337e4b86885SCheng Sean Ye 338e4b86885SCheng Sean Ye cmi_api_ver_t _cmi_api_version = CMI_API_VERSION_3; 33920c794b3Sgavinm 3407aec1d6eScindi const cmi_ops_t _cmi_ops = { 34120c794b3Sgavinm gcpu_init, /* cmi_init */ 34220c794b3Sgavinm gcpu_post_startup, /* cmi_post_startup */ 34320c794b3Sgavinm gcpu_post_mpstartup, /* cmi_post_mpstartup */ 34420c794b3Sgavinm gcpu_faulted_enter, /* cmi_faulted_enter */ 34520c794b3Sgavinm gcpu_faulted_exit, /* cmi_faulted_exit */ 34620c794b3Sgavinm gcpu_mca_init, /* cmi_mca_init */ 347e4b86885SCheng Sean Ye GCPU_OP(gcpu_mca_trap, NULL), /* cmi_mca_trap */ 348e4b86885SCheng Sean Ye GCPU_OP(gcpu_cmci_trap, NULL), /* cmi_cmci_trap */ 34920c794b3Sgavinm gcpu_msrinject, /* cmi_msrinject */ 350e4b86885SCheng Sean Ye GCPU_OP(gcpu_hdl_poke, NULL), /* cmi_hdl_poke */ 351a3114836SGerry Liu gcpu_fini, /* cmi_fini */ 352e4b86885SCheng Sean Ye GCPU_OP(NULL, gcpu_xpv_panic_callback), /* cmi_panic_callback */ 3532a613b59SRobert Mustacchi gcpu_ident /* cmi_ident */ 3547aec1d6eScindi }; 3557aec1d6eScindi 3567aec1d6eScindi static struct modlcpu modlcpu = { 3577aec1d6eScindi &mod_cpuops, 3587aec1d6eScindi "Generic x86 CPU Module" 3597aec1d6eScindi }; 3607aec1d6eScindi 3617aec1d6eScindi static struct modlinkage modlinkage = { 3627aec1d6eScindi MODREV_1, 3637aec1d6eScindi (void *)&modlcpu, 3647aec1d6eScindi NULL 3657aec1d6eScindi }; 3667aec1d6eScindi 3677aec1d6eScindi int 3687aec1d6eScindi _init(void) 3697aec1d6eScindi { 3707aec1d6eScindi return (mod_install(&modlinkage)); 3717aec1d6eScindi } 3727aec1d6eScindi 3737aec1d6eScindi int 3747aec1d6eScindi _info(struct modinfo *modinfop) 3757aec1d6eScindi { 3767aec1d6eScindi return (mod_info(&modlinkage, modinfop)); 3777aec1d6eScindi } 3787aec1d6eScindi 3797aec1d6eScindi int 3807aec1d6eScindi _fini(void) 3817aec1d6eScindi { 3827aec1d6eScindi return (mod_remove(&modlinkage)); 3837aec1d6eScindi } 384