17aec1d6eScindi /*
27aec1d6eScindi  * CDDL HEADER START
37aec1d6eScindi  *
47aec1d6eScindi  * The contents of this file are subject to the terms of the
53ad553a7Sgavinm  * Common Development and Distribution License (the "License").
63ad553a7Sgavinm  * You may not use this file except in compliance with the License.
77aec1d6eScindi  *
87aec1d6eScindi  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97aec1d6eScindi  * or http://www.opensolaris.org/os/licensing.
107aec1d6eScindi  * See the License for the specific language governing permissions
117aec1d6eScindi  * and limitations under the License.
127aec1d6eScindi  *
137aec1d6eScindi  * When distributing Covered Code, include this CDDL HEADER in each
147aec1d6eScindi  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157aec1d6eScindi  * If applicable, add the following below this CDDL HEADER, with the
167aec1d6eScindi  * fields enclosed by brackets "[]" replaced with your own identifying
177aec1d6eScindi  * information: Portions Copyright [yyyy] [name of copyright owner]
187aec1d6eScindi  *
197aec1d6eScindi  * CDDL HEADER END
207aec1d6eScindi  */
217aec1d6eScindi 
227aec1d6eScindi /*
23e3d60c9bSAdrian Frost  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
247aec1d6eScindi  * Use is subject to license terms.
25918e0d92SRobert Mustacchi  * Copyright (c) 2018, Joyent, Inc.
267aec1d6eScindi  */
27a3114836SGerry Liu /*
28a3114836SGerry Liu  * Copyright (c) 2010, Intel Corporation.
29a3114836SGerry Liu  * All rights reserved.
30a3114836SGerry Liu  */
317aec1d6eScindi 
3215c07adcSJohn Levon /*
3315c07adcSJohn Levon  * Copyright (c) 2018, Joyent, Inc.
34b445c7c6SAndrew Stormont  * Copyright 2020 RackTop Systems, Inc.
3515c07adcSJohn Levon  */
3615c07adcSJohn Levon 
377aec1d6eScindi /*
387aec1d6eScindi  * Generic x86 CPU Module
397aec1d6eScindi  *
407aec1d6eScindi  * This CPU module is used for generic x86 CPUs when Solaris has no other
417aec1d6eScindi  * CPU-specific support module available.  Code in this module should be the
427aec1d6eScindi  * absolute bare-bones support and must be cognizant of both Intel and AMD etc.
437aec1d6eScindi  */
447aec1d6eScindi 
457aec1d6eScindi #include <sys/types.h>
467aec1d6eScindi #include <sys/cpu_module_impl.h>
477aec1d6eScindi #include <sys/cpuvar.h>
487aec1d6eScindi #include <sys/kmem.h>
497aec1d6eScindi #include <sys/modctl.h>
5020c794b3Sgavinm #include <sys/pghw.h>
512a613b59SRobert Mustacchi #include <sys/x86_archext.h>
527aec1d6eScindi 
537aec1d6eScindi #include "gcpu.h"
547aec1d6eScindi 
5520c794b3Sgavinm /*
5620c794b3Sgavinm  * Prevent generic cpu support from loading.
5720c794b3Sgavinm  */
5820c794b3Sgavinm int gcpu_disable = 0;
597aec1d6eScindi 
6020c794b3Sgavinm #define	GCPU_MAX_CHIPID		32
6120c794b3Sgavinm static struct gcpu_chipshared *gcpu_shared[GCPU_MAX_CHIPID];
622a613b59SRobert Mustacchi #ifdef	DEBUG
632a613b59SRobert Mustacchi int gcpu_id_disable = 0;
642a613b59SRobert Mustacchi static const char *gcpu_id_override[GCPU_MAX_CHIPID] = { NULL };
652a613b59SRobert Mustacchi #endif
662a613b59SRobert Mustacchi 
672a613b59SRobert Mustacchi #ifndef	__xpv
68a47ab03eSRobert Mustacchi 
692a613b59SRobert Mustacchi /*
70a47ab03eSRobert Mustacchi  * The purpose of this is to construct a unique identifier for a given processor
71a47ab03eSRobert Mustacchi  * that can be used by things like FMA to determine when a FRU has been
72a47ab03eSRobert Mustacchi  * replaced. It is supported on Intel Xeon Platforms since Ivy Bridge and AMD
73a47ab03eSRobert Mustacchi  * 17h processors since Rome. See cpuid_pass1_ppin() for how we determine if a
74a47ab03eSRobert Mustacchi  * CPU is supported.
75a47ab03eSRobert Mustacchi  *
76a47ab03eSRobert Mustacchi  * The protected processor inventory number (PPIN) can be used to create a
77a47ab03eSRobert Mustacchi  * unique identifier when combined with the processor's cpuid signature. We
78a47ab03eSRobert Mustacchi  * create a versioned, synthetic ID using the following scheme for the
79a47ab03eSRobert Mustacchi  * identifier: iv0-<vendor>-<signature>-<PPIN>. The iv0 is the illumos version
80a47ab03eSRobert Mustacchi  * zero of the ID. If we have a new scheme for a new generation of processors,
81a47ab03eSRobert Mustacchi  * then that should rev the version field, otherwise for a given processor, this
82a47ab03eSRobert Mustacchi  * synthetic ID should not change.
832a613b59SRobert Mustacchi  *
84a47ab03eSRobert Mustacchi  * We use the string "INTC" for Intel and "AMD" for AMD. None of these or the
85a47ab03eSRobert Mustacchi  * formatting of the values can change without changing the version string.
862a613b59SRobert Mustacchi  */
872a613b59SRobert Mustacchi static char *
gcpu_init_ident_ppin(cmi_hdl_t hdl)88a47ab03eSRobert Mustacchi gcpu_init_ident_ppin(cmi_hdl_t hdl)
892a613b59SRobert Mustacchi {
90a47ab03eSRobert Mustacchi 	uint_t ppin_ctl_msr, ppin_msr;
91a47ab03eSRobert Mustacchi 	uint64_t value;
92a47ab03eSRobert Mustacchi 	const char *vendor;
932a613b59SRobert Mustacchi 
942a613b59SRobert Mustacchi 	/*
952a613b59SRobert Mustacchi 	 * This list should be extended as new Intel Xeon family processors come
962a613b59SRobert Mustacchi 	 * out.
972a613b59SRobert Mustacchi 	 */
98a47ab03eSRobert Mustacchi 	switch (cmi_hdl_vendor(hdl)) {
99a47ab03eSRobert Mustacchi 	case X86_VENDOR_Intel:
100a47ab03eSRobert Mustacchi 		ppin_ctl_msr = MSR_PPIN_CTL_INTC;
101a47ab03eSRobert Mustacchi 		ppin_msr = MSR_PPIN_INTC;
102a47ab03eSRobert Mustacchi 		vendor = "INTC";
103a47ab03eSRobert Mustacchi 		break;
104a47ab03eSRobert Mustacchi 	case X86_VENDOR_AMD:
105a47ab03eSRobert Mustacchi 		ppin_ctl_msr = MSR_PPIN_CTL_AMD;
106a47ab03eSRobert Mustacchi 		ppin_msr = MSR_PPIN_AMD;
107a47ab03eSRobert Mustacchi 		vendor = "AMD";
1082a613b59SRobert Mustacchi 		break;
1092a613b59SRobert Mustacchi 	default:
1102a613b59SRobert Mustacchi 		return (NULL);
1112a613b59SRobert Mustacchi 	}
1122a613b59SRobert Mustacchi 
113a47ab03eSRobert Mustacchi 	if (cmi_hdl_rdmsr(hdl, ppin_ctl_msr, &value) != CMI_SUCCESS) {
1142a613b59SRobert Mustacchi 		return (NULL);
1152a613b59SRobert Mustacchi 	}
1162a613b59SRobert Mustacchi 
117b445c7c6SAndrew Stormont 	/*
118b445c7c6SAndrew Stormont 	 * If the PPIN is not enabled and not locked, attempt to enable it.
119b445c7c6SAndrew Stormont 	 * Note: in some environments such as Amazon EC2 the PPIN appears
120b445c7c6SAndrew Stormont 	 * to be disabled and unlocked but our attempts to enable it don't
121b445c7c6SAndrew Stormont 	 * stick, and when we attempt to read the PPIN we get an uncaught
122b445c7c6SAndrew Stormont 	 * #GP. To avoid that happening we read the MSR back and verify it
123b445c7c6SAndrew Stormont 	 * has taken the new value.
124b445c7c6SAndrew Stormont 	 */
125a47ab03eSRobert Mustacchi 	if ((value & MSR_PPIN_CTL_ENABLED) == 0) {
126a47ab03eSRobert Mustacchi 		if ((value & MSR_PPIN_CTL_LOCKED) != 0) {
1272a613b59SRobert Mustacchi 			return (NULL);
1282a613b59SRobert Mustacchi 		}
1292a613b59SRobert Mustacchi 
130a47ab03eSRobert Mustacchi 		if (cmi_hdl_wrmsr(hdl, ppin_ctl_msr, MSR_PPIN_CTL_ENABLED) !=
1312a613b59SRobert Mustacchi 		    CMI_SUCCESS) {
1322a613b59SRobert Mustacchi 			return (NULL);
1332a613b59SRobert Mustacchi 		}
134b445c7c6SAndrew Stormont 
135b445c7c6SAndrew Stormont 		if (cmi_hdl_rdmsr(hdl, ppin_ctl_msr, &value) != CMI_SUCCESS) {
136b445c7c6SAndrew Stormont 			return (NULL);
137b445c7c6SAndrew Stormont 		}
138b445c7c6SAndrew Stormont 
139b445c7c6SAndrew Stormont 		if ((value & MSR_PPIN_CTL_ENABLED) == 0) {
140b445c7c6SAndrew Stormont 			return (NULL);
141b445c7c6SAndrew Stormont 		}
1422a613b59SRobert Mustacchi 	}
1432a613b59SRobert Mustacchi 
144a47ab03eSRobert Mustacchi 	if (cmi_hdl_rdmsr(hdl, ppin_msr, &value) != CMI_SUCCESS) {
1452a613b59SRobert Mustacchi 		return (NULL);
1462a613b59SRobert Mustacchi 	}
1472a613b59SRobert Mustacchi 
1482a613b59SRobert Mustacchi 	/*
1492a613b59SRobert Mustacchi 	 * Now that we've read data, lock the PPIN. Don't worry about success or
1502a613b59SRobert Mustacchi 	 * failure of this part, as we will have gotten everything that we need.
1512a613b59SRobert Mustacchi 	 * It is possible that it locked open, for example.
1522a613b59SRobert Mustacchi 	 */
153*abe1d126SRobert Mustacchi 	if (cmi_hdl_wrmsr(hdl, ppin_ctl_msr, MSR_PPIN_CTL_DISABLED) ==
154*abe1d126SRobert Mustacchi 	    CMI_SUCCESS) {
155*abe1d126SRobert Mustacchi 		(void) cmi_hdl_wrmsr(hdl, ppin_ctl_msr, MSR_PPIN_CTL_LOCKED);
156*abe1d126SRobert Mustacchi 	}
1572a613b59SRobert Mustacchi 
158a47ab03eSRobert Mustacchi 	return (kmem_asprintf("iv0-%s-%x-%llx", vendor, cmi_hdl_chipsig(hdl),
159a47ab03eSRobert Mustacchi 	    value));
1602a613b59SRobert Mustacchi }
1612a613b59SRobert Mustacchi #endif	/* __xpv */
1622a613b59SRobert Mustacchi 
1632a613b59SRobert Mustacchi static void
gcpu_init_ident(cmi_hdl_t hdl,struct gcpu_chipshared * sp)1642a613b59SRobert Mustacchi gcpu_init_ident(cmi_hdl_t hdl, struct gcpu_chipshared *sp)
1652a613b59SRobert Mustacchi {
1662a613b59SRobert Mustacchi #ifdef	DEBUG
1672a613b59SRobert Mustacchi 	uint_t chipid;
1682a613b59SRobert Mustacchi 
1692a613b59SRobert Mustacchi 	/*
1702a613b59SRobert Mustacchi 	 * On debug, allow a developer to override the string to more
1712a613b59SRobert Mustacchi 	 * easily test CPU autoreplace without needing to physically
1722a613b59SRobert Mustacchi 	 * replace a CPU.
1732a613b59SRobert Mustacchi 	 */
1742a613b59SRobert Mustacchi 	if (gcpu_id_disable != 0) {
1752a613b59SRobert Mustacchi 		return;
1762a613b59SRobert Mustacchi 	}
1772a613b59SRobert Mustacchi 
1782a613b59SRobert Mustacchi 	chipid = cmi_hdl_chipid(hdl);
1792a613b59SRobert Mustacchi 	if (gcpu_id_override[chipid] != NULL) {
1802a613b59SRobert Mustacchi 		sp->gcpus_ident = strdup(gcpu_id_override[chipid]);
1812a613b59SRobert Mustacchi 		return;
1822a613b59SRobert Mustacchi 	}
1832a613b59SRobert Mustacchi #endif
1842a613b59SRobert Mustacchi 
1852a613b59SRobert Mustacchi #ifndef __xpv
186a47ab03eSRobert Mustacchi 	if (is_x86_feature(x86_featureset, X86FSET_PPIN)) {
187a47ab03eSRobert Mustacchi 		sp->gcpus_ident = gcpu_init_ident_ppin(hdl);
1882a613b59SRobert Mustacchi 	}
1892a613b59SRobert Mustacchi #endif	/* __xpv */
1902a613b59SRobert Mustacchi }
1917aec1d6eScindi 
19220c794b3Sgavinm /*
19320c794b3Sgavinm  * Our cmi_init entry point, called during startup of each cpu instance.
19420c794b3Sgavinm  */
19520c794b3Sgavinm int
gcpu_init(cmi_hdl_t hdl,void ** datap)19620c794b3Sgavinm gcpu_init(cmi_hdl_t hdl, void **datap)
197843e1988Sjohnlev {
19820c794b3Sgavinm 	uint_t chipid = cmi_hdl_chipid(hdl);
19920c794b3Sgavinm 	struct gcpu_chipshared *sp, *osp;
20020c794b3Sgavinm 	gcpu_data_t *gcpu;
20120c794b3Sgavinm 
20220c794b3Sgavinm 	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
20320c794b3Sgavinm 		return (ENOTSUP);
20420c794b3Sgavinm 
20520c794b3Sgavinm 	/*
20620c794b3Sgavinm 	 * Allocate the state structure for this cpu.  We will only
20720c794b3Sgavinm 	 * allocate the bank logout areas in gcpu_mca_init once we
20820c794b3Sgavinm 	 * know how many banks there are.
20920c794b3Sgavinm 	 */
21020c794b3Sgavinm 	gcpu = *datap = kmem_zalloc(sizeof (gcpu_data_t), KM_SLEEP);
21120c794b3Sgavinm 	cmi_hdl_hold(hdl);	/* release in gcpu_fini */
21220c794b3Sgavinm 	gcpu->gcpu_hdl = hdl;
21320c794b3Sgavinm 
21420c794b3Sgavinm 	/*
21520c794b3Sgavinm 	 * Allocate a chipshared structure if no sibling cpu has already
21620c794b3Sgavinm 	 * allocated it, but allow for the fact that a sibling core may
21720c794b3Sgavinm 	 * be starting up in parallel.
21820c794b3Sgavinm 	 */
21920c794b3Sgavinm 	if ((sp = gcpu_shared[chipid]) == NULL) {
22020c794b3Sgavinm 		sp = kmem_zalloc(sizeof (struct gcpu_chipshared), KM_SLEEP);
221a3114836SGerry Liu 		mutex_init(&sp->gcpus_poll_lock, NULL, MUTEX_DRIVER, NULL);
222a3114836SGerry Liu 		mutex_init(&sp->gcpus_cfglock, NULL, MUTEX_DRIVER, NULL);
22320c794b3Sgavinm 		osp = atomic_cas_ptr(&gcpu_shared[chipid], NULL, sp);
224a3114836SGerry Liu 		if (osp != NULL) {
225a3114836SGerry Liu 			mutex_destroy(&sp->gcpus_cfglock);
226a3114836SGerry Liu 			mutex_destroy(&sp->gcpus_poll_lock);
22720c794b3Sgavinm 			kmem_free(sp, sizeof (struct gcpu_chipshared));
22820c794b3Sgavinm 			sp = osp;
2292a613b59SRobert Mustacchi 		} else {
2302a613b59SRobert Mustacchi 			gcpu_init_ident(hdl, sp);
23120c794b3Sgavinm 		}
23220c794b3Sgavinm 	}
233a3114836SGerry Liu 
234a3114836SGerry Liu 	atomic_inc_32(&sp->gcpus_actv_cnt);
23520c794b3Sgavinm 	gcpu->gcpu_shared = sp;
236843e1988Sjohnlev 
2377aec1d6eScindi 	return (0);
2387aec1d6eScindi }
2397aec1d6eScindi 
240a3114836SGerry Liu /*
241a3114836SGerry Liu  * deconfigure gcpu_init()
242a3114836SGerry Liu  */
243a3114836SGerry Liu void
gcpu_fini(cmi_hdl_t hdl)244a3114836SGerry Liu gcpu_fini(cmi_hdl_t hdl)
245a3114836SGerry Liu {
246a3114836SGerry Liu 	uint_t chipid = cmi_hdl_chipid(hdl);
247a3114836SGerry Liu 	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
248a3114836SGerry Liu 	struct gcpu_chipshared *sp;
249a3114836SGerry Liu 
250a3114836SGerry Liu 	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
251a3114836SGerry Liu 		return;
252a3114836SGerry Liu 
253a3114836SGerry Liu 	gcpu_mca_fini(hdl);
254a3114836SGerry Liu 
255a3114836SGerry Liu 	/*
256a3114836SGerry Liu 	 * Keep shared data in cache for reuse.
257a3114836SGerry Liu 	 */
258a3114836SGerry Liu 	sp = gcpu_shared[chipid];
259a3114836SGerry Liu 	ASSERT(sp != NULL);
260a3114836SGerry Liu 	atomic_dec_32(&sp->gcpus_actv_cnt);
261a3114836SGerry Liu 
262a3114836SGerry Liu 	if (gcpu != NULL)
263a3114836SGerry Liu 		kmem_free(gcpu, sizeof (gcpu_data_t));
264a3114836SGerry Liu 
265a3114836SGerry Liu 	/* Release reference count held in gcpu_init(). */
266a3114836SGerry Liu 	cmi_hdl_rele(hdl);
267a3114836SGerry Liu }
268a3114836SGerry Liu 
26920c794b3Sgavinm void
gcpu_post_startup(cmi_hdl_t hdl)27020c794b3Sgavinm gcpu_post_startup(cmi_hdl_t hdl)
2717aec1d6eScindi {
27220c794b3Sgavinm 	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
27320c794b3Sgavinm 
274e4b86885SCheng Sean Ye 	if (gcpu_disable)
27520c794b3Sgavinm 		return;
2767aec1d6eScindi 
277e4b86885SCheng Sean Ye 	if (gcpu != NULL)
278e4b86885SCheng Sean Ye 		cms_post_startup(hdl);
279e4b86885SCheng Sean Ye #ifdef __xpv
280e4b86885SCheng Sean Ye 	/*
281e4b86885SCheng Sean Ye 	 * All cpu handles are initialized so we can begin polling now.
282e4b86885SCheng Sean Ye 	 * Furthermore, our virq mechanism requires that everything
283e4b86885SCheng Sean Ye 	 * be run on cpu 0 so we can assure that by starting from here.
284e4b86885SCheng Sean Ye 	 */
285e4b86885SCheng Sean Ye 	gcpu_mca_poll_start(hdl);
286918e0d92SRobert Mustacchi #else
287918e0d92SRobert Mustacchi 	/*
288918e0d92SRobert Mustacchi 	 * The boot CPU has a bit of a chicken and egg problem for CMCI. Its MCA
289918e0d92SRobert Mustacchi 	 * initialization is run before we have initialized the PSM module that
290918e0d92SRobert Mustacchi 	 * we would use for enabling CMCI. Therefore, we use this as a chance to
291918e0d92SRobert Mustacchi 	 * enable CMCI for the boot CPU. For all other CPUs, this chicken and
292918e0d92SRobert Mustacchi 	 * egg problem will have already been solved.
293918e0d92SRobert Mustacchi 	 */
294918e0d92SRobert Mustacchi 	gcpu_mca_cmci_enable(hdl);
295e4b86885SCheng Sean Ye #endif
29620c794b3Sgavinm }
2977aec1d6eScindi 
29820c794b3Sgavinm void
gcpu_post_mpstartup(cmi_hdl_t hdl)29920c794b3Sgavinm gcpu_post_mpstartup(cmi_hdl_t hdl)
300e4b86885SCheng Sean Ye {
301e4b86885SCheng Sean Ye 	if (gcpu_disable)
302e4b86885SCheng Sean Ye 		return;
303e4b86885SCheng Sean Ye 
304e4b86885SCheng Sean Ye 	cms_post_mpstartup(hdl);
305e4b86885SCheng Sean Ye 
306e4b86885SCheng Sean Ye #ifndef __xpv
30715c07adcSJohn Levon 	/*
30815c07adcSJohn Levon 	 * All cpu handles are initialized only once all cpus are started, so we
30915c07adcSJohn Levon 	 * can begin polling post mp startup.
31015c07adcSJohn Levon 	 */
31115c07adcSJohn Levon 	gcpu_mca_poll_start(hdl);
312e4b86885SCheng Sean Ye #endif
3137aec1d6eScindi }
3147aec1d6eScindi 
3152a613b59SRobert Mustacchi const char *
gcpu_ident(cmi_hdl_t hdl)3162a613b59SRobert Mustacchi gcpu_ident(cmi_hdl_t hdl)
3172a613b59SRobert Mustacchi {
3182a613b59SRobert Mustacchi 	uint_t chipid;
3192a613b59SRobert Mustacchi 	struct gcpu_chipshared *sp;
3202a613b59SRobert Mustacchi 
3212a613b59SRobert Mustacchi 	if (gcpu_disable)
3222a613b59SRobert Mustacchi 		return (NULL);
3232a613b59SRobert Mustacchi 
3242a613b59SRobert Mustacchi 	chipid = cmi_hdl_chipid(hdl);
3252a613b59SRobert Mustacchi 	if (chipid >= GCPU_MAX_CHIPID)
3262a613b59SRobert Mustacchi 		return (NULL);
3272a613b59SRobert Mustacchi 
3282a613b59SRobert Mustacchi 	if (cmi_hdl_getcmidata(hdl) == NULL)
3292a613b59SRobert Mustacchi 		return (NULL);
3302a613b59SRobert Mustacchi 
3312a613b59SRobert Mustacchi 	sp = gcpu_shared[cmi_hdl_chipid(hdl)];
3322a613b59SRobert Mustacchi 	return (sp->gcpus_ident);
3332a613b59SRobert Mustacchi }
3342a613b59SRobert Mustacchi 
335e4b86885SCheng Sean Ye #ifdef __xpv
336e4b86885SCheng Sean Ye #define	GCPU_OP(ntvop, xpvop)	xpvop
337e4b86885SCheng Sean Ye #else
338e4b86885SCheng Sean Ye #define	GCPU_OP(ntvop, xpvop)	ntvop
339e4b86885SCheng Sean Ye #endif
340e4b86885SCheng Sean Ye 
341e4b86885SCheng Sean Ye cmi_api_ver_t _cmi_api_version = CMI_API_VERSION_3;
34220c794b3Sgavinm 
3437aec1d6eScindi const cmi_ops_t _cmi_ops = {
34420c794b3Sgavinm 	gcpu_init,				/* cmi_init */
34520c794b3Sgavinm 	gcpu_post_startup,			/* cmi_post_startup */
34620c794b3Sgavinm 	gcpu_post_mpstartup,			/* cmi_post_mpstartup */
34720c794b3Sgavinm 	gcpu_faulted_enter,			/* cmi_faulted_enter */
34820c794b3Sgavinm 	gcpu_faulted_exit,			/* cmi_faulted_exit */
34920c794b3Sgavinm 	gcpu_mca_init,				/* cmi_mca_init */
350e4b86885SCheng Sean Ye 	GCPU_OP(gcpu_mca_trap, NULL),		/* cmi_mca_trap */
351e4b86885SCheng Sean Ye 	GCPU_OP(gcpu_cmci_trap, NULL),		/* cmi_cmci_trap */
35220c794b3Sgavinm 	gcpu_msrinject,				/* cmi_msrinject */
353e4b86885SCheng Sean Ye 	GCPU_OP(gcpu_hdl_poke, NULL),		/* cmi_hdl_poke */
354a3114836SGerry Liu 	gcpu_fini,				/* cmi_fini */
355e4b86885SCheng Sean Ye 	GCPU_OP(NULL, gcpu_xpv_panic_callback),	/* cmi_panic_callback */
3562a613b59SRobert Mustacchi 	gcpu_ident				/* cmi_ident */
3577aec1d6eScindi };
3587aec1d6eScindi 
3597aec1d6eScindi static struct modlcpu modlcpu = {
3607aec1d6eScindi 	&mod_cpuops,
3617aec1d6eScindi 	"Generic x86 CPU Module"
3627aec1d6eScindi };
3637aec1d6eScindi 
3647aec1d6eScindi static struct modlinkage modlinkage = {
3657aec1d6eScindi 	MODREV_1,
3667aec1d6eScindi 	(void *)&modlcpu,
3677aec1d6eScindi 	NULL
3687aec1d6eScindi };
3697aec1d6eScindi 
3707aec1d6eScindi int
_init(void)3717aec1d6eScindi _init(void)
3727aec1d6eScindi {
3737aec1d6eScindi 	return (mod_install(&modlinkage));
3747aec1d6eScindi }
3757aec1d6eScindi 
3767aec1d6eScindi int
_info(struct modinfo * modinfop)3777aec1d6eScindi _info(struct modinfo *modinfop)
3787aec1d6eScindi {
3797aec1d6eScindi 	return (mod_info(&modlinkage, modinfop));
3807aec1d6eScindi }
3817aec1d6eScindi 
3827aec1d6eScindi int
_fini(void)3837aec1d6eScindi _fini(void)
3847aec1d6eScindi {
3857aec1d6eScindi 	return (mod_remove(&modlinkage));
3867aec1d6eScindi }
387