120c794bgavinm/*
220c794bgavinm * CDDL HEADER START
320c794bgavinm *
420c794bgavinm * The contents of this file are subject to the terms of the
520c794bgavinm * Common Development and Distribution License (the "License").
620c794bgavinm * You may not use this file except in compliance with the License.
720c794bgavinm *
820c794bgavinm * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
920c794bgavinm * or http://www.opensolaris.org/os/licensing.
1020c794bgavinm * See the License for the specific language governing permissions
1120c794bgavinm * and limitations under the License.
1220c794bgavinm *
1320c794bgavinm * When distributing Covered Code, include this CDDL HEADER in each
1420c794bgavinm * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1520c794bgavinm * If applicable, add the following below this CDDL HEADER, with the
1620c794bgavinm * fields enclosed by brackets "[]" replaced with your own identifying
1720c794bgavinm * information: Portions Copyright [yyyy] [name of copyright owner]
1820c794bgavinm *
1920c794bgavinm * CDDL HEADER END
2020c794bgavinm */
2120c794bgavinm
2220c794bgavinm/*
231b31ef1gavinm * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
2420c794bgavinm * Use is subject to license terms.
25918e0d9Robert Mustacchi * Copyright (c) 2018, Joyent, Inc.
2620c794bgavinm */
27a311483Gerry Liu/*
28a311483Gerry Liu * Copyright (c) 2010, Intel Corporation.
29a311483Gerry Liu * All rights reserved.
30a311483Gerry Liu */
3120c794bgavinm
3220c794bgavinm/*
33e4b8688Cheng Sean Ye * Native MCA polling.  We establish an ommipresent cyclic to fire on all
34e4b8688Cheng Sean Ye * online cpus to check their MCA state and log any valid errors for
35e4b8688Cheng Sean Ye * diagnosis.
3620c794bgavinm */
3720c794bgavinm
3820c794bgavinm#include <sys/types.h>
39a311483Gerry Liu#include <sys/atomic.h>
4020c794bgavinm#include <sys/cyclic.h>
4120c794bgavinm#include <sys/x86_archext.h>
4220c794bgavinm#include <sys/mca_x86.h>
4320c794bgavinm
4420c794bgavinm#include "gcpu.h"
4520c794bgavinm
4620c794bgavinmhrtime_t gcpu_mca_poll_interval = NANOSEC * 10ULL;	/* tuneable */
47e4b8688Cheng Sean Yestatic cyclic_id_t gcpu_mca_poll_cycid;
48a311483Gerry Liustatic volatile uint_t gcpu_mca_poll_inits;
49a311483Gerry Liuextern int gcpu_poll_trace_always;
50a311483Gerry Liuextern uint_t gcpu_poll_trace_nent;
5120c794bgavinm
5220c794bgavinm/*
5320c794bgavinm * Return nonzero of the given handle should poll the MCH.  We stick with
5420c794bgavinm * the same handle as before unless the timestamp has not been updated
5520c794bgavinm * for a while.  There is no need to keep a hold on the mch_poll_owner
5620c794bgavinm * handle.
5720c794bgavinm */
58e4b8688Cheng Sean Ye
59e4b8688Cheng Sean Yestatic kmutex_t mch_poll_lock;
60e4b8688Cheng Sean Yestatic hrtime_t mch_poll_timestamp;
61e4b8688Cheng Sean Yestatic cmi_hdl_t mch_poll_owner;
62e4b8688Cheng Sean Ye
6320c794bgavinmstatic int
64e4b8688Cheng Sean Yemch_pollowner(cmi_hdl_t hdl)
6520c794bgavinm{
6620c794bgavinm	hrtime_t now = gethrtime_waitfree();
6720c794bgavinm	int dopoll = 0;
6820c794bgavinm
6920c794bgavinm	mutex_enter(&mch_poll_lock);
7020c794bgavinm	if (now - mch_poll_timestamp > 2 * gcpu_mca_poll_interval ||
7120c794bgavinm	    mch_poll_timestamp == 0) {
7220c794bgavinm		mch_poll_owner = hdl;
7320c794bgavinm		dopoll = 1;
7420c794bgavinm	} else if (mch_poll_owner == hdl) {
7520c794bgavinm		dopoll = 1;
7620c794bgavinm	}
7720c794bgavinm
7820c794bgavinm	if (dopoll)
7920c794bgavinm		mch_poll_timestamp = now;
8020c794bgavinm
8120c794bgavinm	mutex_exit(&mch_poll_lock);
8220c794bgavinm	return (dopoll);
8320c794bgavinm}
8420c794bgavinm
8520c794bgavinm
8620c794bgavinmstatic void
8720c794bgavinmgcpu_ntv_mca_poll(cmi_hdl_t hdl, int what)
8820c794bgavinm{
8920c794bgavinm	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
9020c794bgavinm	gcpu_mca_t *mca = &gcpu->gcpu_mca;
9120c794bgavinm	gcpu_mce_status_t mce;
9220c794bgavinm	int willpanic;
93e3d60c9Adrian Frost	uint64_t bankmask;
9420c794bgavinm
9520c794bgavinm	ASSERT(MUTEX_HELD(&gcpu->gcpu_shared->gcpus_poll_lock));
9620c794bgavinm
97e4b8688Cheng Sean Ye	/* Enable CMCI in first poll if is supported */
98918e0d9Robert Mustacchi	if ((mca->gcpu_mca_flags & GCPU_MCA_F_CMCI_ENABLE) != 0 &&
99918e0d9Robert Mustacchi	    (!mca->gcpu_mca_first_poll_cmci_enabled)) {
100e4b8688Cheng Sean Ye		int i;
101e4b8688Cheng Sean Ye		uint64_t ctl2;
102e4b8688Cheng Sean Ye
103e3d60c9Adrian Frost		for (i = 0; i < mca->gcpu_mca_nbanks; i++) {
104e3d60c9Adrian Frost			if (mca->gcpu_bank_cmci[i].cmci_cap) {
105728f047Adrian Frost				(void) cmi_hdl_rdmsr(hdl, IA32_MSR_MC_CTL2(i),
106728f047Adrian Frost				    &ctl2);
107728f047Adrian Frost				ctl2 |= MSR_MC_CTL2_EN;
108728f047Adrian Frost				(void) cmi_hdl_wrmsr(hdl, IA32_MSR_MC_CTL2(i),
109728f047Adrian Frost				    ctl2);
110e3d60c9Adrian Frost				mca->gcpu_bank_cmci[i].cmci_enabled = 1;
111e3d60c9Adrian Frost			}
112e3d60c9Adrian Frost		}
113e3d60c9Adrian Frost		mca->gcpu_mca_first_poll_cmci_enabled = 1;
114e3d60c9Adrian Frost	}
115e3d60c9Adrian Frost
11620c794bgavinm	if (mca->gcpu_mca_flags & GCPU_MCA_F_UNFAULTING) {
11720c794bgavinm		int i;
11820c794bgavinm
11920c794bgavinm		mca->gcpu_mca_flags &= ~GCPU_MCA_F_UNFAULTING;
120e4b8688Cheng Sean Ye		gcpu_poll_trace(&gcpu->gcpu_mca.gcpu_polltrace,
121e4b8688Cheng Sean Ye		    GCPU_MPT_WHAT_UNFAULTING, 0);
12220c794bgavinm
12320c794bgavinm		/*
12420c794bgavinm		 * On the first cyclic poll after unfaulting a CPU we
12520c794bgavinm		 * clear the status registers; see gcpu_faulted_exit
12620c794bgavinm		 * for details.  We don't do this if the poll was
12720c794bgavinm		 * initiated manually (presumably from some injection
12820c794bgavinm		 * activity).
12920c794bgavinm		 */
13020c794bgavinm		if (what == GCPU_MPT_WHAT_CYC_ERR) {
13120c794bgavinm			for (i = 0; i < mca->gcpu_mca_nbanks; i++) {
13220c794bgavinm				(void) cmi_hdl_wrmsr(hdl,
13320c794bgavinm				    IA32_MSR_MC(i, STATUS), 0ULL);
13420c794bgavinm			}
13520c794bgavinm			return;
13620c794bgavinm		}
13720c794bgavinm	}
13820c794bgavinm
13920c794bgavinm	/*
14020c794bgavinm	 * Logout errors of the MCA banks of this cpu.
14120c794bgavinm	 */
142e3d60c9Adrian Frost	if (what == GCPU_MPT_WHAT_CMCI_ERR) {
143e3d60c9Adrian Frost		/*
144e3d60c9Adrian Frost		 * for CMCI, all banks should be scanned for log out
145e3d60c9Adrian Frost		 */
146e3d60c9Adrian Frost		bankmask = -1ULL;
147e3d60c9Adrian Frost	} else {
148e3d60c9Adrian Frost		bankmask = cms_poll_ownermask(hdl, gcpu_mca_poll_interval);
149e3d60c9Adrian Frost	}
150e3d60c9Adrian Frost	gcpu_mca_logout(hdl, NULL, bankmask, &mce, B_TRUE, what);
15120c794bgavinm
152e4b8688Cheng Sean Ye	if (mce.mce_nerr != 0)
153e4b8688Cheng Sean Ye		gcpu_poll_trace(&gcpu->gcpu_mca.gcpu_polltrace, what,
154e4b8688Cheng Sean Ye		    mce.mce_nerr);
155e4b8688Cheng Sean Ye
15620c794bgavinm	mca->gcpu_mca_lastpoll = gethrtime_waitfree();
15720c794bgavinm
15820c794bgavinm	willpanic = mce.mce_disp & CMI_ERRDISP_FORCEFATAL && cmi_panic_on_ue();
15920c794bgavinm
160e3d60c9Adrian Frost	if (what != GCPU_MPT_WHAT_CMCI_ERR) {
161e3d60c9Adrian Frost		/*
162e3d60c9Adrian Frost		 * Call to the memory-controller driver which may report some
163e3d60c9Adrian Frost		 * errors not visible under the MCA (for off-chip NB).
164e3d60c9Adrian Frost		 * Since there is typically a single MCH we arrange that
165e3d60c9Adrian Frost		 * just one cpu perform this task at each cyclic fire.
166e3d60c9Adrian Frost		 */
167e4b8688Cheng Sean Ye		if (mch_pollowner(hdl))
168e3d60c9Adrian Frost			cmi_mc_logout(hdl, 0, willpanic);
169e3d60c9Adrian Frost	}
17020c794bgavinm
17120c794bgavinm	/*
17220c794bgavinm	 * In the common case any polled error is considered non-fatal,
17320c794bgavinm	 * even if it indicates PCC or UC etc.  The only condition on which
17420c794bgavinm	 * we will panic for a polled error is if model-specific support
17520c794bgavinm	 * forces the error to be terminal regardless of how it is
17620c794bgavinm	 * encountered.
17720c794bgavinm	 */
17820c794bgavinm	if (willpanic) {
17920c794bgavinm#ifdef DEBUG
18020c794bgavinm		cmn_err(CE_WARN, "MCA Poll: %u errors, disp=0x%llx, "
18120c794bgavinm		    "%u PCC (%u ok), "
18220c794bgavinm		    "%u UC (%u ok, %u poisoned), "
18320c794bgavinm		    "%u forcefatal, %u ignored",
18420c794bgavinm		    mce.mce_nerr, (u_longlong_t)mce.mce_disp,
18520c794bgavinm		    mce.mce_npcc, mce.mce_npcc_ok,
18620c794bgavinm		    mce.mce_nuc, mce.mce_nuc_ok, mce.mce_nuc_poisoned,
18720c794bgavinm		    mce.mce_forcefatal, mce.mce_ignored);
18820c794bgavinm
18920c794bgavinm#endif
19020c794bgavinm		fm_panic("Unrecoverable Machine-Check Exception (Polled)");
19120c794bgavinm	}
19220c794bgavinm}
19320c794bgavinm
19420c794bgavinm/*
19520c794bgavinm * See gcpu_mca_trap for an explanation of why preemption is disabled here.
19620c794bgavinm * Note that we disable preemption and then contend for an adaptive mutex -
19720c794bgavinm * we could block during the mutex operation, but once we return with the
19820c794bgavinm * mutex held we nust perform no operation that can block and we cannot
19920c794bgavinm * be preempted so we will stay on cpu for the duration.  The disabling
20020c794bgavinm * of preemption also means we cannot migrate cpus once we have returned
20120c794bgavinm * with the mutex held - cyclic invocations can't migrate, anyway, but
20220c794bgavinm * others could if they have failed to bind before this point.
20320c794bgavinm */
20420c794bgavinmstatic void
20520c794bgavinmgcpu_ntv_mca_poll_wrapper(cmi_hdl_t hdl, int what)
20620c794bgavinm{
2071b31ef1gavinm	gcpu_data_t *gcpu;
20820c794bgavinm
2091b31ef1gavinm	if (hdl == NULL || (gcpu = cmi_hdl_getcmidata(hdl)) == NULL ||
2101b31ef1gavinm	    gcpu->gcpu_mca.gcpu_mca_lgsz == 0)
21125f4767gavinm		return;
21225f4767gavinm
21320c794bgavinm	kpreempt_disable();
21420c794bgavinm	mutex_enter(&gcpu->gcpu_shared->gcpus_poll_lock);
21520c794bgavinm	gcpu_ntv_mca_poll(hdl, what);
21620c794bgavinm	mutex_exit(&gcpu->gcpu_shared->gcpus_poll_lock);
21720c794bgavinm	kpreempt_enable();
21820c794bgavinm}
21920c794bgavinm
22020c794bgavinmstatic void
22120c794bgavinmgcpu_ntv_mca_poll_cyclic(void *arg)
22220c794bgavinm{
22320c794bgavinm	gcpu_ntv_mca_poll_wrapper((cmi_hdl_t)arg, GCPU_MPT_WHAT_CYC_ERR);
22420c794bgavinm}
22520c794bgavinm
22620c794bgavinm/*ARGSUSED*/
22720c794bgavinmstatic void
22820c794bgavinmgcpu_ntv_mca_poll_online(void *arg, cpu_t *cp, cyc_handler_t *cyh,
22920c794bgavinm    cyc_time_t *cyt)
23020c794bgavinm{
23120c794bgavinm	cmi_hdl_t hdl;
23220c794bgavinm
2331b31ef1gavinm	/*
2341b31ef1gavinm	 * Lookup and hold a handle for this cpu (any hold released in
2351b31ef1gavinm	 * our offline function).  If we chose not to initialize a handle
2361b31ef1gavinm	 * for this cpu back at cmi_init time then this lookup will return
2371b31ef1gavinm	 * NULL, so the cyh_func we appoint must be prepared for that.
2381b31ef1gavinm	 */
2391b31ef1gavinm	hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(cp),
2401b31ef1gavinm	    cmi_ntv_hwcoreid(cp), cmi_ntv_hwstrandid(cp));
24120c794bgavinm
24220c794bgavinm	cyt->cyt_when = 0;
24320c794bgavinm	cyt->cyt_interval = gcpu_mca_poll_interval;
24420c794bgavinm	cyh->cyh_func = gcpu_ntv_mca_poll_cyclic;
24520c794bgavinm	cyh->cyh_arg = (void *)hdl;
24620c794bgavinm	cyh->cyh_level = CY_LOW_LEVEL;
24720c794bgavinm}
24820c794bgavinm
24920c794bgavinm/*ARGSUSED*/
25020c794bgavinmstatic void
25120c794bgavinmgcpu_ntv_mca_poll_offline(void *arg, cpu_t *cpu, void *cyh_arg)
25220c794bgavinm{
25320c794bgavinm	cmi_hdl_t hdl = (cmi_hdl_t)cyh_arg;
25420c794bgavinm
2551b31ef1gavinm	if (hdl != NULL)
2561b31ef1gavinm		cmi_hdl_rele(hdl);
25720c794bgavinm}
25820c794bgavinm
25920c794bgavinmstatic void
26020c794bgavinmgcpu_ntv_mca_poll_start(void)
26120c794bgavinm{
26220c794bgavinm	cyc_omni_handler_t cyo;
26320c794bgavinm
26425f4767gavinm	if (gcpu_mca_poll_interval == 0 || gcpu_mca_poll_inits == 0)
26520c794bgavinm		return;
26620c794bgavinm
26720c794bgavinm	cyo.cyo_online = gcpu_ntv_mca_poll_online;
26820c794bgavinm	cyo.cyo_offline = gcpu_ntv_mca_poll_offline;
26920c794bgavinm	cyo.cyo_arg = NULL;
27020c794bgavinm
27120c794bgavinm	mutex_enter(&cpu_lock);
27220c794bgavinm	gcpu_mca_poll_cycid = cyclic_add_omni(&cyo);
27320c794bgavinm	mutex_exit(&cpu_lock);
27420c794bgavinm}
27520c794bgavinm
276e4b8688Cheng Sean Ye/*
277e4b8688Cheng Sean Ye * gcpu_mca_poll_init is called from gcpu_mca_init for each cpu handle
278e4b8688Cheng Sean Ye * that we initialize for.  It should prepare for polling by allocating
279e4b8688Cheng Sean Ye * control structures and the like, but must not kick polling off yet.
280e4b8688Cheng Sean Ye */
281e4b8688Cheng Sean Ye
28220c794bgavinmvoid
283e4b8688Cheng Sean Yegcpu_mca_poll_init(cmi_hdl_t hdl)
28420c794bgavinm{
285e4b8688Cheng Sean Ye	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
286e4b8688Cheng Sean Ye	gcpu_poll_trace_ctl_t *ptc = &gcpu->gcpu_mca.gcpu_polltrace;
28720c794bgavinm
288e4b8688Cheng Sean Ye	ASSERT(cmi_hdl_class(hdl) == CMI_HDL_NATIVE);
28920c794bgavinm
290e4b8688Cheng Sean Ye	gcpu_poll_trace_init(ptc);
291e4b8688Cheng Sean Ye
292a311483Gerry Liu	atomic_inc_uint(&gcpu_mca_poll_inits);
293a311483Gerry Liu}
294a311483Gerry Liu
295a311483Gerry Liu/* deconfigure gcpu_mca_poll_init() */
296a311483Gerry Liuvoid
297a311483Gerry Liugcpu_mca_poll_fini(cmi_hdl_t hdl)
298a311483Gerry Liu{
299a311483Gerry Liu	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
300a311483Gerry Liu	gcpu_poll_trace_ctl_t *ptc = &gcpu->gcpu_mca.gcpu_polltrace;
301a311483Gerry Liu
302a311483Gerry Liu	ASSERT(cmi_hdl_class(hdl) == CMI_HDL_NATIVE);
303a311483Gerry Liu
304a311483Gerry Liu	if (gcpu_poll_trace_always && (ptc->mptc_tbufs != NULL)) {
305a311483Gerry Liu		kmem_free(ptc->mptc_tbufs, sizeof (gcpu_poll_trace_t) *
306a311483Gerry Liu		    gcpu_poll_trace_nent);
307a311483Gerry Liu	}
308a311483Gerry Liu
309a311483Gerry Liu	atomic_dec_uint(&gcpu_mca_poll_inits);
31020c794bgavinm}
31120c794bgavinm
31220c794bgavinmvoid
313e4b8688Cheng Sean Yegcpu_mca_poll_start(cmi_hdl_t hdl)
31420c794bgavinm{
315e4b8688Cheng Sean Ye	ASSERT(cmi_hdl_class(hdl) == CMI_HDL_NATIVE);
316e4b8688Cheng Sean Ye	gcpu_ntv_mca_poll_start();
317e4b8688Cheng Sean Ye}
31820c794bgavinm
319e4b8688Cheng Sean Yevoid
320e4b8688Cheng Sean Yegcpu_hdl_poke(cmi_hdl_t hdl)
321e4b8688Cheng Sean Ye{
322e4b8688Cheng Sean Ye	ASSERT(cmi_hdl_class(hdl) == CMI_HDL_NATIVE);
323e4b8688Cheng Sean Ye	gcpu_ntv_mca_poll_wrapper(hdl, GCPU_MPT_WHAT_POKE_ERR);
32420c794bgavinm}
325e3d60c9Adrian Frost
326e3d60c9Adrian Frostvoid
327e3d60c9Adrian Frostgcpu_cmci_trap(cmi_hdl_t hdl)
328e3d60c9Adrian Frost{
329e4b8688Cheng Sean Ye	gcpu_ntv_mca_poll_wrapper(hdl, GCPU_MPT_WHAT_CMCI_ERR);
330e3d60c9Adrian Frost}
331