xref: /illumos-gate/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_main.c (revision a31148363f598def767ac48c5d82e1572e44b935)
17aec1d6eScindi /*
27aec1d6eScindi  * CDDL HEADER START
37aec1d6eScindi  *
47aec1d6eScindi  * The contents of this file are subject to the terms of the
53ad553a7Sgavinm  * Common Development and Distribution License (the "License").
63ad553a7Sgavinm  * You may not use this file except in compliance with the License.
77aec1d6eScindi  *
87aec1d6eScindi  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97aec1d6eScindi  * or http://www.opensolaris.org/os/licensing.
107aec1d6eScindi  * See the License for the specific language governing permissions
117aec1d6eScindi  * and limitations under the License.
127aec1d6eScindi  *
137aec1d6eScindi  * When distributing Covered Code, include this CDDL HEADER in each
147aec1d6eScindi  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157aec1d6eScindi  * If applicable, add the following below this CDDL HEADER, with the
167aec1d6eScindi  * fields enclosed by brackets "[]" replaced with your own identifying
177aec1d6eScindi  * information: Portions Copyright [yyyy] [name of copyright owner]
187aec1d6eScindi  *
197aec1d6eScindi  * CDDL HEADER END
207aec1d6eScindi  */
217aec1d6eScindi 
227aec1d6eScindi /*
23e3d60c9bSAdrian Frost  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
247aec1d6eScindi  * Use is subject to license terms.
257aec1d6eScindi  */
26*a3114836SGerry Liu /*
27*a3114836SGerry Liu  * Copyright (c) 2010, Intel Corporation.
28*a3114836SGerry Liu  * All rights reserved.
29*a3114836SGerry Liu  */
307aec1d6eScindi 
317aec1d6eScindi /*
327aec1d6eScindi  * Generic x86 CPU Module
337aec1d6eScindi  *
347aec1d6eScindi  * This CPU module is used for generic x86 CPUs when Solaris has no other
357aec1d6eScindi  * CPU-specific support module available.  Code in this module should be the
367aec1d6eScindi  * absolute bare-bones support and must be cognizant of both Intel and AMD etc.
377aec1d6eScindi  */
387aec1d6eScindi 
397aec1d6eScindi #include <sys/types.h>
407aec1d6eScindi #include <sys/cpu_module_impl.h>
417aec1d6eScindi #include <sys/cpuvar.h>
427aec1d6eScindi #include <sys/kmem.h>
437aec1d6eScindi #include <sys/modctl.h>
4420c794b3Sgavinm #include <sys/pghw.h>
457aec1d6eScindi 
467aec1d6eScindi #include "gcpu.h"
477aec1d6eScindi 
4820c794b3Sgavinm /*
4920c794b3Sgavinm  * Prevent generic cpu support from loading.
5020c794b3Sgavinm  */
5120c794b3Sgavinm int gcpu_disable = 0;
527aec1d6eScindi 
5320c794b3Sgavinm #define	GCPU_MAX_CHIPID		32
5420c794b3Sgavinm static struct gcpu_chipshared *gcpu_shared[GCPU_MAX_CHIPID];
557aec1d6eScindi 
5620c794b3Sgavinm /*
5720c794b3Sgavinm  * Our cmi_init entry point, called during startup of each cpu instance.
5820c794b3Sgavinm  */
5920c794b3Sgavinm int
6020c794b3Sgavinm gcpu_init(cmi_hdl_t hdl, void **datap)
61843e1988Sjohnlev {
6220c794b3Sgavinm 	uint_t chipid = cmi_hdl_chipid(hdl);
6320c794b3Sgavinm 	struct gcpu_chipshared *sp, *osp;
6420c794b3Sgavinm 	gcpu_data_t *gcpu;
6520c794b3Sgavinm 
6620c794b3Sgavinm 	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
6720c794b3Sgavinm 		return (ENOTSUP);
6820c794b3Sgavinm 
6920c794b3Sgavinm 	/*
7020c794b3Sgavinm 	 * Allocate the state structure for this cpu.  We will only
7120c794b3Sgavinm 	 * allocate the bank logout areas in gcpu_mca_init once we
7220c794b3Sgavinm 	 * know how many banks there are.
7320c794b3Sgavinm 	 */
7420c794b3Sgavinm 	gcpu = *datap = kmem_zalloc(sizeof (gcpu_data_t), KM_SLEEP);
7520c794b3Sgavinm 	cmi_hdl_hold(hdl);	/* release in gcpu_fini */
7620c794b3Sgavinm 	gcpu->gcpu_hdl = hdl;
7720c794b3Sgavinm 
7820c794b3Sgavinm 	/*
7920c794b3Sgavinm 	 * Allocate a chipshared structure if no sibling cpu has already
8020c794b3Sgavinm 	 * allocated it, but allow for the fact that a sibling core may
8120c794b3Sgavinm 	 * be starting up in parallel.
8220c794b3Sgavinm 	 */
8320c794b3Sgavinm 	if ((sp = gcpu_shared[chipid]) == NULL) {
8420c794b3Sgavinm 		sp = kmem_zalloc(sizeof (struct gcpu_chipshared), KM_SLEEP);
85*a3114836SGerry Liu 		mutex_init(&sp->gcpus_poll_lock, NULL, MUTEX_DRIVER, NULL);
86*a3114836SGerry Liu 		mutex_init(&sp->gcpus_cfglock, NULL, MUTEX_DRIVER, NULL);
8720c794b3Sgavinm 		osp = atomic_cas_ptr(&gcpu_shared[chipid], NULL, sp);
88*a3114836SGerry Liu 		if (osp != NULL) {
89*a3114836SGerry Liu 			mutex_destroy(&sp->gcpus_cfglock);
90*a3114836SGerry Liu 			mutex_destroy(&sp->gcpus_poll_lock);
9120c794b3Sgavinm 			kmem_free(sp, sizeof (struct gcpu_chipshared));
9220c794b3Sgavinm 			sp = osp;
9320c794b3Sgavinm 		}
9420c794b3Sgavinm 	}
95*a3114836SGerry Liu 
96*a3114836SGerry Liu 	atomic_inc_32(&sp->gcpus_actv_cnt);
9720c794b3Sgavinm 	gcpu->gcpu_shared = sp;
98843e1988Sjohnlev 
997aec1d6eScindi 	return (0);
1007aec1d6eScindi }
1017aec1d6eScindi 
102*a3114836SGerry Liu /*
103*a3114836SGerry Liu  * deconfigure gcpu_init()
104*a3114836SGerry Liu  */
105*a3114836SGerry Liu void
106*a3114836SGerry Liu gcpu_fini(cmi_hdl_t hdl)
107*a3114836SGerry Liu {
108*a3114836SGerry Liu 	uint_t chipid = cmi_hdl_chipid(hdl);
109*a3114836SGerry Liu 	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
110*a3114836SGerry Liu 	struct gcpu_chipshared *sp;
111*a3114836SGerry Liu 
112*a3114836SGerry Liu 	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
113*a3114836SGerry Liu 		return;
114*a3114836SGerry Liu 
115*a3114836SGerry Liu 	gcpu_mca_fini(hdl);
116*a3114836SGerry Liu 
117*a3114836SGerry Liu 	/*
118*a3114836SGerry Liu 	 * Keep shared data in cache for reuse.
119*a3114836SGerry Liu 	 */
120*a3114836SGerry Liu 	sp = gcpu_shared[chipid];
121*a3114836SGerry Liu 	ASSERT(sp != NULL);
122*a3114836SGerry Liu 	atomic_dec_32(&sp->gcpus_actv_cnt);
123*a3114836SGerry Liu 
124*a3114836SGerry Liu 	if (gcpu != NULL)
125*a3114836SGerry Liu 		kmem_free(gcpu, sizeof (gcpu_data_t));
126*a3114836SGerry Liu 
127*a3114836SGerry Liu 	/* Release reference count held in gcpu_init(). */
128*a3114836SGerry Liu 	cmi_hdl_rele(hdl);
129*a3114836SGerry Liu }
130*a3114836SGerry Liu 
13120c794b3Sgavinm void
13220c794b3Sgavinm gcpu_post_startup(cmi_hdl_t hdl)
1337aec1d6eScindi {
13420c794b3Sgavinm 	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
13520c794b3Sgavinm 
136e4b86885SCheng Sean Ye 	if (gcpu_disable)
13720c794b3Sgavinm 		return;
1387aec1d6eScindi 
139e4b86885SCheng Sean Ye 	if (gcpu != NULL)
140e4b86885SCheng Sean Ye 		cms_post_startup(hdl);
141e4b86885SCheng Sean Ye #ifdef __xpv
142e4b86885SCheng Sean Ye 	/*
143e4b86885SCheng Sean Ye 	 * All cpu handles are initialized so we can begin polling now.
144e4b86885SCheng Sean Ye 	 * Furthermore, our virq mechanism requires that everything
145e4b86885SCheng Sean Ye 	 * be run on cpu 0 so we can assure that by starting from here.
146e4b86885SCheng Sean Ye 	 */
147e4b86885SCheng Sean Ye 	gcpu_mca_poll_start(hdl);
148e4b86885SCheng Sean Ye #endif
14920c794b3Sgavinm }
1507aec1d6eScindi 
15120c794b3Sgavinm void
15220c794b3Sgavinm gcpu_post_mpstartup(cmi_hdl_t hdl)
153e4b86885SCheng Sean Ye {
154e4b86885SCheng Sean Ye 	if (gcpu_disable)
155e4b86885SCheng Sean Ye 		return;
156e4b86885SCheng Sean Ye 
157e4b86885SCheng Sean Ye 	cms_post_mpstartup(hdl);
158e4b86885SCheng Sean Ye 
159e4b86885SCheng Sean Ye #ifndef __xpv
160e4b86885SCheng Sean Ye 		/*
161e4b86885SCheng Sean Ye 		 * All cpu handles are initialized only once all cpus
162e4b86885SCheng Sean Ye 		 * are started, so we can begin polling post mp startup.
163e4b86885SCheng Sean Ye 		 */
16420c794b3Sgavinm 		gcpu_mca_poll_start(hdl);
165e4b86885SCheng Sean Ye #endif
1667aec1d6eScindi }
1677aec1d6eScindi 
168e4b86885SCheng Sean Ye #ifdef __xpv
169e4b86885SCheng Sean Ye #define	GCPU_OP(ntvop, xpvop)	xpvop
170e4b86885SCheng Sean Ye #else
171e4b86885SCheng Sean Ye #define	GCPU_OP(ntvop, xpvop)	ntvop
172e4b86885SCheng Sean Ye #endif
173e4b86885SCheng Sean Ye 
174e4b86885SCheng Sean Ye cmi_api_ver_t _cmi_api_version = CMI_API_VERSION_3;
17520c794b3Sgavinm 
1767aec1d6eScindi const cmi_ops_t _cmi_ops = {
17720c794b3Sgavinm 	gcpu_init,				/* cmi_init */
17820c794b3Sgavinm 	gcpu_post_startup,			/* cmi_post_startup */
17920c794b3Sgavinm 	gcpu_post_mpstartup,			/* cmi_post_mpstartup */
18020c794b3Sgavinm 	gcpu_faulted_enter,			/* cmi_faulted_enter */
18120c794b3Sgavinm 	gcpu_faulted_exit,			/* cmi_faulted_exit */
18220c794b3Sgavinm 	gcpu_mca_init,				/* cmi_mca_init */
183e4b86885SCheng Sean Ye 	GCPU_OP(gcpu_mca_trap, NULL),		/* cmi_mca_trap */
184e4b86885SCheng Sean Ye 	GCPU_OP(gcpu_cmci_trap, NULL),		/* cmi_cmci_trap */
18520c794b3Sgavinm 	gcpu_msrinject,				/* cmi_msrinject */
186e4b86885SCheng Sean Ye 	GCPU_OP(gcpu_hdl_poke, NULL),		/* cmi_hdl_poke */
187*a3114836SGerry Liu 	gcpu_fini,				/* cmi_fini */
188e4b86885SCheng Sean Ye 	GCPU_OP(NULL, gcpu_xpv_panic_callback),	/* cmi_panic_callback */
1897aec1d6eScindi };
1907aec1d6eScindi 
1917aec1d6eScindi static struct modlcpu modlcpu = {
1927aec1d6eScindi 	&mod_cpuops,
1937aec1d6eScindi 	"Generic x86 CPU Module"
1947aec1d6eScindi };
1957aec1d6eScindi 
1967aec1d6eScindi static struct modlinkage modlinkage = {
1977aec1d6eScindi 	MODREV_1,
1987aec1d6eScindi 	(void *)&modlcpu,
1997aec1d6eScindi 	NULL
2007aec1d6eScindi };
2017aec1d6eScindi 
2027aec1d6eScindi int
2037aec1d6eScindi _init(void)
2047aec1d6eScindi {
2057aec1d6eScindi 	return (mod_install(&modlinkage));
2067aec1d6eScindi }
2077aec1d6eScindi 
2087aec1d6eScindi int
2097aec1d6eScindi _info(struct modinfo *modinfop)
2107aec1d6eScindi {
2117aec1d6eScindi 	return (mod_info(&modlinkage, modinfop));
2127aec1d6eScindi }
2137aec1d6eScindi 
2147aec1d6eScindi int
2157aec1d6eScindi _fini(void)
2167aec1d6eScindi {
2177aec1d6eScindi 	return (mod_remove(&modlinkage));
2187aec1d6eScindi }
219