xref: /illumos-gate/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_main.c (revision 15c07adc1c7b828006b5e3c4d528b92229d6bd23)
17aec1d6eScindi /*
27aec1d6eScindi  * CDDL HEADER START
37aec1d6eScindi  *
47aec1d6eScindi  * The contents of this file are subject to the terms of the
53ad553a7Sgavinm  * Common Development and Distribution License (the "License").
63ad553a7Sgavinm  * You may not use this file except in compliance with the License.
77aec1d6eScindi  *
87aec1d6eScindi  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97aec1d6eScindi  * or http://www.opensolaris.org/os/licensing.
107aec1d6eScindi  * See the License for the specific language governing permissions
117aec1d6eScindi  * and limitations under the License.
127aec1d6eScindi  *
137aec1d6eScindi  * When distributing Covered Code, include this CDDL HEADER in each
147aec1d6eScindi  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157aec1d6eScindi  * If applicable, add the following below this CDDL HEADER, with the
167aec1d6eScindi  * fields enclosed by brackets "[]" replaced with your own identifying
177aec1d6eScindi  * information: Portions Copyright [yyyy] [name of copyright owner]
187aec1d6eScindi  *
197aec1d6eScindi  * CDDL HEADER END
207aec1d6eScindi  */
217aec1d6eScindi 
227aec1d6eScindi /*
23e3d60c9bSAdrian Frost  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
247aec1d6eScindi  * Use is subject to license terms.
257aec1d6eScindi  */
26a3114836SGerry Liu /*
27a3114836SGerry Liu  * Copyright (c) 2010, Intel Corporation.
28a3114836SGerry Liu  * All rights reserved.
29a3114836SGerry Liu  */
307aec1d6eScindi 
31*15c07adcSJohn Levon /*
32*15c07adcSJohn Levon  * Copyright (c) 2018, Joyent, Inc.
33*15c07adcSJohn Levon  */
34*15c07adcSJohn Levon 
357aec1d6eScindi /*
367aec1d6eScindi  * Generic x86 CPU Module
377aec1d6eScindi  *
387aec1d6eScindi  * This CPU module is used for generic x86 CPUs when Solaris has no other
397aec1d6eScindi  * CPU-specific support module available.  Code in this module should be the
407aec1d6eScindi  * absolute bare-bones support and must be cognizant of both Intel and AMD etc.
417aec1d6eScindi  */
427aec1d6eScindi 
437aec1d6eScindi #include <sys/types.h>
447aec1d6eScindi #include <sys/cpu_module_impl.h>
457aec1d6eScindi #include <sys/cpuvar.h>
467aec1d6eScindi #include <sys/kmem.h>
477aec1d6eScindi #include <sys/modctl.h>
4820c794b3Sgavinm #include <sys/pghw.h>
492a613b59SRobert Mustacchi #include <sys/x86_archext.h>
507aec1d6eScindi 
517aec1d6eScindi #include "gcpu.h"
527aec1d6eScindi 
5320c794b3Sgavinm /*
5420c794b3Sgavinm  * Prevent generic cpu support from loading.
5520c794b3Sgavinm  */
5620c794b3Sgavinm int gcpu_disable = 0;
577aec1d6eScindi 
5820c794b3Sgavinm #define	GCPU_MAX_CHIPID		32
5920c794b3Sgavinm static struct gcpu_chipshared *gcpu_shared[GCPU_MAX_CHIPID];
602a613b59SRobert Mustacchi #ifdef	DEBUG
612a613b59SRobert Mustacchi int gcpu_id_disable = 0;
622a613b59SRobert Mustacchi static const char *gcpu_id_override[GCPU_MAX_CHIPID] = { NULL };
632a613b59SRobert Mustacchi #endif
642a613b59SRobert Mustacchi 
652a613b59SRobert Mustacchi #ifndef	__xpv
662a613b59SRobert Mustacchi /*
672a613b59SRobert Mustacchi  * This should probably be delegated to a CPU specific module. However, as those
682a613b59SRobert Mustacchi  * haven't been developed as actively for recent CPUs, we should revisit this
692a613b59SRobert Mustacchi  * when we do have it and move this out of gcpu.
702a613b59SRobert Mustacchi  *
712a613b59SRobert Mustacchi  * This method is only supported on Intel Xeon platforms. It relies on a
722a613b59SRobert Mustacchi  * combination of the PPIN and the cpuid signature. Both are required to form
732a613b59SRobert Mustacchi  * the synthetic ID. This ID is preceded with iv0-INTC to represent that this is
742a613b59SRobert Mustacchi  * an Intel synthetic ID. The iv0 is the illumos version zero of the ID for
752a613b59SRobert Mustacchi  * Intel. If we have a new scheme for a new generation of processors, then that
762a613b59SRobert Mustacchi  * should rev the version field, otherwise for a given processor, this synthetic
772a613b59SRobert Mustacchi  * ID should not change. For more information on PPIN and these MSRS, see the
782a613b59SRobert Mustacchi  * relevant processor external design specification.
792a613b59SRobert Mustacchi  */
802a613b59SRobert Mustacchi static char *
812a613b59SRobert Mustacchi gcpu_init_ident_intc(cmi_hdl_t hdl)
822a613b59SRobert Mustacchi {
832a613b59SRobert Mustacchi 	uint64_t msr;
842a613b59SRobert Mustacchi 
852a613b59SRobert Mustacchi 	/*
862a613b59SRobert Mustacchi 	 * This list should be extended as new Intel Xeon family processors come
872a613b59SRobert Mustacchi 	 * out.
882a613b59SRobert Mustacchi 	 */
892a613b59SRobert Mustacchi 	switch (cmi_hdl_model(hdl)) {
902a613b59SRobert Mustacchi 	case INTC_MODEL_IVYBRIDGE_XEON:
912a613b59SRobert Mustacchi 	case INTC_MODEL_HASWELL_XEON:
922a613b59SRobert Mustacchi 	case INTC_MODEL_BROADWELL_XEON:
932a613b59SRobert Mustacchi 	case INTC_MODEL_BROADWELL_XEON_D:
942a613b59SRobert Mustacchi 	case INTC_MODEL_SKYLAKE_XEON:
952a613b59SRobert Mustacchi 		break;
962a613b59SRobert Mustacchi 	default:
972a613b59SRobert Mustacchi 		return (NULL);
982a613b59SRobert Mustacchi 	}
992a613b59SRobert Mustacchi 
1002a613b59SRobert Mustacchi 	if (cmi_hdl_rdmsr(hdl, MSR_PLATFORM_INFO, &msr) != CMI_SUCCESS) {
1012a613b59SRobert Mustacchi 		return (NULL);
1022a613b59SRobert Mustacchi 	}
1032a613b59SRobert Mustacchi 
1042a613b59SRobert Mustacchi 	if ((msr & MSR_PLATFORM_INFO_PPIN) == 0) {
1052a613b59SRobert Mustacchi 		return (NULL);
1062a613b59SRobert Mustacchi 	}
1072a613b59SRobert Mustacchi 
1082a613b59SRobert Mustacchi 	if (cmi_hdl_rdmsr(hdl, MSR_PPIN_CTL, &msr) != CMI_SUCCESS) {
1092a613b59SRobert Mustacchi 		return (NULL);
1102a613b59SRobert Mustacchi 	}
1112a613b59SRobert Mustacchi 
1122a613b59SRobert Mustacchi 	if ((msr & MSR_PPIN_CTL_ENABLED) == 0) {
1132a613b59SRobert Mustacchi 		if ((msr & MSR_PPIN_CTL_LOCKED) != 0) {
1142a613b59SRobert Mustacchi 			return (NULL);
1152a613b59SRobert Mustacchi 		}
1162a613b59SRobert Mustacchi 
1172a613b59SRobert Mustacchi 		if (cmi_hdl_wrmsr(hdl, MSR_PPIN_CTL, MSR_PPIN_CTL_ENABLED) !=
1182a613b59SRobert Mustacchi 		    CMI_SUCCESS) {
1192a613b59SRobert Mustacchi 			return (NULL);
1202a613b59SRobert Mustacchi 		}
1212a613b59SRobert Mustacchi 	}
1222a613b59SRobert Mustacchi 
1232a613b59SRobert Mustacchi 	if (cmi_hdl_rdmsr(hdl, MSR_PPIN, &msr) != CMI_SUCCESS) {
1242a613b59SRobert Mustacchi 		return (NULL);
1252a613b59SRobert Mustacchi 	}
1262a613b59SRobert Mustacchi 
1272a613b59SRobert Mustacchi 	/*
1282a613b59SRobert Mustacchi 	 * Now that we've read data, lock the PPIN. Don't worry about success or
1292a613b59SRobert Mustacchi 	 * failure of this part, as we will have gotten everything that we need.
1302a613b59SRobert Mustacchi 	 * It is possible that it locked open, for example.
1312a613b59SRobert Mustacchi 	 */
1322a613b59SRobert Mustacchi 	(void) cmi_hdl_wrmsr(hdl, MSR_PPIN_CTL, MSR_PPIN_CTL_LOCKED);
1332a613b59SRobert Mustacchi 
1342a613b59SRobert Mustacchi 	return (kmem_asprintf("iv0-INTC-%x-%llx", cmi_hdl_chipsig(hdl), msr));
1352a613b59SRobert Mustacchi }
1362a613b59SRobert Mustacchi #endif	/* __xpv */
1372a613b59SRobert Mustacchi 
1382a613b59SRobert Mustacchi static void
1392a613b59SRobert Mustacchi gcpu_init_ident(cmi_hdl_t hdl, struct gcpu_chipshared *sp)
1402a613b59SRobert Mustacchi {
1412a613b59SRobert Mustacchi #ifdef	DEBUG
1422a613b59SRobert Mustacchi 	uint_t chipid;
1432a613b59SRobert Mustacchi 
1442a613b59SRobert Mustacchi 	/*
1452a613b59SRobert Mustacchi 	 * On debug, allow a developer to override the string to more
1462a613b59SRobert Mustacchi 	 * easily test CPU autoreplace without needing to physically
1472a613b59SRobert Mustacchi 	 * replace a CPU.
1482a613b59SRobert Mustacchi 	 */
1492a613b59SRobert Mustacchi 	if (gcpu_id_disable != 0) {
1502a613b59SRobert Mustacchi 		return;
1512a613b59SRobert Mustacchi 	}
1522a613b59SRobert Mustacchi 
1532a613b59SRobert Mustacchi 	chipid = cmi_hdl_chipid(hdl);
1542a613b59SRobert Mustacchi 	if (gcpu_id_override[chipid] != NULL) {
1552a613b59SRobert Mustacchi 		sp->gcpus_ident = strdup(gcpu_id_override[chipid]);
1562a613b59SRobert Mustacchi 		return;
1572a613b59SRobert Mustacchi 	}
1582a613b59SRobert Mustacchi #endif
1592a613b59SRobert Mustacchi 
1602a613b59SRobert Mustacchi #ifndef __xpv
1612a613b59SRobert Mustacchi 	switch (cmi_hdl_vendor(hdl)) {
1622a613b59SRobert Mustacchi 	case X86_VENDOR_Intel:
1632a613b59SRobert Mustacchi 		sp->gcpus_ident = gcpu_init_ident_intc(hdl);
1642a613b59SRobert Mustacchi 	default:
1652a613b59SRobert Mustacchi 		break;
1662a613b59SRobert Mustacchi 	}
1672a613b59SRobert Mustacchi #endif	/* __xpv */
1682a613b59SRobert Mustacchi }
1697aec1d6eScindi 
17020c794b3Sgavinm /*
17120c794b3Sgavinm  * Our cmi_init entry point, called during startup of each cpu instance.
17220c794b3Sgavinm  */
17320c794b3Sgavinm int
17420c794b3Sgavinm gcpu_init(cmi_hdl_t hdl, void **datap)
175843e1988Sjohnlev {
17620c794b3Sgavinm 	uint_t chipid = cmi_hdl_chipid(hdl);
17720c794b3Sgavinm 	struct gcpu_chipshared *sp, *osp;
17820c794b3Sgavinm 	gcpu_data_t *gcpu;
17920c794b3Sgavinm 
18020c794b3Sgavinm 	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
18120c794b3Sgavinm 		return (ENOTSUP);
18220c794b3Sgavinm 
18320c794b3Sgavinm 	/*
18420c794b3Sgavinm 	 * Allocate the state structure for this cpu.  We will only
18520c794b3Sgavinm 	 * allocate the bank logout areas in gcpu_mca_init once we
18620c794b3Sgavinm 	 * know how many banks there are.
18720c794b3Sgavinm 	 */
18820c794b3Sgavinm 	gcpu = *datap = kmem_zalloc(sizeof (gcpu_data_t), KM_SLEEP);
18920c794b3Sgavinm 	cmi_hdl_hold(hdl);	/* release in gcpu_fini */
19020c794b3Sgavinm 	gcpu->gcpu_hdl = hdl;
19120c794b3Sgavinm 
19220c794b3Sgavinm 	/*
19320c794b3Sgavinm 	 * Allocate a chipshared structure if no sibling cpu has already
19420c794b3Sgavinm 	 * allocated it, but allow for the fact that a sibling core may
19520c794b3Sgavinm 	 * be starting up in parallel.
19620c794b3Sgavinm 	 */
19720c794b3Sgavinm 	if ((sp = gcpu_shared[chipid]) == NULL) {
19820c794b3Sgavinm 		sp = kmem_zalloc(sizeof (struct gcpu_chipshared), KM_SLEEP);
199a3114836SGerry Liu 		mutex_init(&sp->gcpus_poll_lock, NULL, MUTEX_DRIVER, NULL);
200a3114836SGerry Liu 		mutex_init(&sp->gcpus_cfglock, NULL, MUTEX_DRIVER, NULL);
20120c794b3Sgavinm 		osp = atomic_cas_ptr(&gcpu_shared[chipid], NULL, sp);
202a3114836SGerry Liu 		if (osp != NULL) {
203a3114836SGerry Liu 			mutex_destroy(&sp->gcpus_cfglock);
204a3114836SGerry Liu 			mutex_destroy(&sp->gcpus_poll_lock);
20520c794b3Sgavinm 			kmem_free(sp, sizeof (struct gcpu_chipshared));
20620c794b3Sgavinm 			sp = osp;
2072a613b59SRobert Mustacchi 		} else {
2082a613b59SRobert Mustacchi 			gcpu_init_ident(hdl, sp);
20920c794b3Sgavinm 		}
21020c794b3Sgavinm 	}
211a3114836SGerry Liu 
212a3114836SGerry Liu 	atomic_inc_32(&sp->gcpus_actv_cnt);
21320c794b3Sgavinm 	gcpu->gcpu_shared = sp;
214843e1988Sjohnlev 
2157aec1d6eScindi 	return (0);
2167aec1d6eScindi }
2177aec1d6eScindi 
218a3114836SGerry Liu /*
219a3114836SGerry Liu  * deconfigure gcpu_init()
220a3114836SGerry Liu  */
221a3114836SGerry Liu void
222a3114836SGerry Liu gcpu_fini(cmi_hdl_t hdl)
223a3114836SGerry Liu {
224a3114836SGerry Liu 	uint_t chipid = cmi_hdl_chipid(hdl);
225a3114836SGerry Liu 	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
226a3114836SGerry Liu 	struct gcpu_chipshared *sp;
227a3114836SGerry Liu 
228a3114836SGerry Liu 	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
229a3114836SGerry Liu 		return;
230a3114836SGerry Liu 
231a3114836SGerry Liu 	gcpu_mca_fini(hdl);
232a3114836SGerry Liu 
233a3114836SGerry Liu 	/*
234a3114836SGerry Liu 	 * Keep shared data in cache for reuse.
235a3114836SGerry Liu 	 */
236a3114836SGerry Liu 	sp = gcpu_shared[chipid];
237a3114836SGerry Liu 	ASSERT(sp != NULL);
238a3114836SGerry Liu 	atomic_dec_32(&sp->gcpus_actv_cnt);
239a3114836SGerry Liu 
240a3114836SGerry Liu 	if (gcpu != NULL)
241a3114836SGerry Liu 		kmem_free(gcpu, sizeof (gcpu_data_t));
242a3114836SGerry Liu 
243a3114836SGerry Liu 	/* Release reference count held in gcpu_init(). */
244a3114836SGerry Liu 	cmi_hdl_rele(hdl);
245a3114836SGerry Liu }
246a3114836SGerry Liu 
24720c794b3Sgavinm void
24820c794b3Sgavinm gcpu_post_startup(cmi_hdl_t hdl)
2497aec1d6eScindi {
25020c794b3Sgavinm 	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
25120c794b3Sgavinm 
252e4b86885SCheng Sean Ye 	if (gcpu_disable)
25320c794b3Sgavinm 		return;
2547aec1d6eScindi 
255e4b86885SCheng Sean Ye 	if (gcpu != NULL)
256e4b86885SCheng Sean Ye 		cms_post_startup(hdl);
257e4b86885SCheng Sean Ye #ifdef __xpv
258e4b86885SCheng Sean Ye 	/*
259e4b86885SCheng Sean Ye 	 * All cpu handles are initialized so we can begin polling now.
260e4b86885SCheng Sean Ye 	 * Furthermore, our virq mechanism requires that everything
261e4b86885SCheng Sean Ye 	 * be run on cpu 0 so we can assure that by starting from here.
262e4b86885SCheng Sean Ye 	 */
263e4b86885SCheng Sean Ye 	gcpu_mca_poll_start(hdl);
264e4b86885SCheng Sean Ye #endif
26520c794b3Sgavinm }
2667aec1d6eScindi 
26720c794b3Sgavinm void
26820c794b3Sgavinm gcpu_post_mpstartup(cmi_hdl_t hdl)
269e4b86885SCheng Sean Ye {
270e4b86885SCheng Sean Ye 	if (gcpu_disable)
271e4b86885SCheng Sean Ye 		return;
272e4b86885SCheng Sean Ye 
273e4b86885SCheng Sean Ye 	cms_post_mpstartup(hdl);
274e4b86885SCheng Sean Ye 
275e4b86885SCheng Sean Ye #ifndef __xpv
276*15c07adcSJohn Levon 	/*
277*15c07adcSJohn Levon 	 * All cpu handles are initialized only once all cpus are started, so we
278*15c07adcSJohn Levon 	 * can begin polling post mp startup.
279*15c07adcSJohn Levon 	 */
280*15c07adcSJohn Levon 	gcpu_mca_poll_start(hdl);
281e4b86885SCheng Sean Ye #endif
2827aec1d6eScindi }
2837aec1d6eScindi 
2842a613b59SRobert Mustacchi const char *
2852a613b59SRobert Mustacchi gcpu_ident(cmi_hdl_t hdl)
2862a613b59SRobert Mustacchi {
2872a613b59SRobert Mustacchi 	uint_t chipid;
2882a613b59SRobert Mustacchi 	struct gcpu_chipshared *sp;
2892a613b59SRobert Mustacchi 
2902a613b59SRobert Mustacchi 	if (gcpu_disable)
2912a613b59SRobert Mustacchi 		return (NULL);
2922a613b59SRobert Mustacchi 
2932a613b59SRobert Mustacchi 	chipid = cmi_hdl_chipid(hdl);
2942a613b59SRobert Mustacchi 	if (chipid >= GCPU_MAX_CHIPID)
2952a613b59SRobert Mustacchi 		return (NULL);
2962a613b59SRobert Mustacchi 
2972a613b59SRobert Mustacchi 	if (cmi_hdl_getcmidata(hdl) == NULL)
2982a613b59SRobert Mustacchi 		return (NULL);
2992a613b59SRobert Mustacchi 
3002a613b59SRobert Mustacchi 	sp = gcpu_shared[cmi_hdl_chipid(hdl)];
3012a613b59SRobert Mustacchi 	return (sp->gcpus_ident);
3022a613b59SRobert Mustacchi }
3032a613b59SRobert Mustacchi 
304e4b86885SCheng Sean Ye #ifdef __xpv
305e4b86885SCheng Sean Ye #define	GCPU_OP(ntvop, xpvop)	xpvop
306e4b86885SCheng Sean Ye #else
307e4b86885SCheng Sean Ye #define	GCPU_OP(ntvop, xpvop)	ntvop
308e4b86885SCheng Sean Ye #endif
309e4b86885SCheng Sean Ye 
310e4b86885SCheng Sean Ye cmi_api_ver_t _cmi_api_version = CMI_API_VERSION_3;
31120c794b3Sgavinm 
3127aec1d6eScindi const cmi_ops_t _cmi_ops = {
31320c794b3Sgavinm 	gcpu_init,				/* cmi_init */
31420c794b3Sgavinm 	gcpu_post_startup,			/* cmi_post_startup */
31520c794b3Sgavinm 	gcpu_post_mpstartup,			/* cmi_post_mpstartup */
31620c794b3Sgavinm 	gcpu_faulted_enter,			/* cmi_faulted_enter */
31720c794b3Sgavinm 	gcpu_faulted_exit,			/* cmi_faulted_exit */
31820c794b3Sgavinm 	gcpu_mca_init,				/* cmi_mca_init */
319e4b86885SCheng Sean Ye 	GCPU_OP(gcpu_mca_trap, NULL),		/* cmi_mca_trap */
320e4b86885SCheng Sean Ye 	GCPU_OP(gcpu_cmci_trap, NULL),		/* cmi_cmci_trap */
32120c794b3Sgavinm 	gcpu_msrinject,				/* cmi_msrinject */
322e4b86885SCheng Sean Ye 	GCPU_OP(gcpu_hdl_poke, NULL),		/* cmi_hdl_poke */
323a3114836SGerry Liu 	gcpu_fini,				/* cmi_fini */
324e4b86885SCheng Sean Ye 	GCPU_OP(NULL, gcpu_xpv_panic_callback),	/* cmi_panic_callback */
3252a613b59SRobert Mustacchi 	gcpu_ident				/* cmi_ident */
3267aec1d6eScindi };
3277aec1d6eScindi 
3287aec1d6eScindi static struct modlcpu modlcpu = {
3297aec1d6eScindi 	&mod_cpuops,
3307aec1d6eScindi 	"Generic x86 CPU Module"
3317aec1d6eScindi };
3327aec1d6eScindi 
3337aec1d6eScindi static struct modlinkage modlinkage = {
3347aec1d6eScindi 	MODREV_1,
3357aec1d6eScindi 	(void *)&modlcpu,
3367aec1d6eScindi 	NULL
3377aec1d6eScindi };
3387aec1d6eScindi 
3397aec1d6eScindi int
3407aec1d6eScindi _init(void)
3417aec1d6eScindi {
3427aec1d6eScindi 	return (mod_install(&modlinkage));
3437aec1d6eScindi }
3447aec1d6eScindi 
3457aec1d6eScindi int
3467aec1d6eScindi _info(struct modinfo *modinfop)
3477aec1d6eScindi {
3487aec1d6eScindi 	return (mod_info(&modlinkage, modinfop));
3497aec1d6eScindi }
3507aec1d6eScindi 
3517aec1d6eScindi int
3527aec1d6eScindi _fini(void)
3537aec1d6eScindi {
3547aec1d6eScindi 	return (mod_remove(&modlinkage));
3557aec1d6eScindi }
356