1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * Ereport-handling routines for CPU errors
28 */
29
30#include <cmd_cpu.h>
31#include <cmd.h>
32
33#include <strings.h>
34#include <string.h>
35#include <errno.h>
36#include <fm/fmd_api.h>
37#include <sys/fm/protocol.h>
38#include <sys/async.h>
39#ifdef sun4u
40#include <sys/fm/cpu/UltraSPARC-III.h>
41#include <cmd_Lxcache.h>
42#include <cmd_opl.h>
43#endif
44
45/*
46 * We follow the same algorithm for handling all L1$, TLB, and L2/L3 cache
47 * tag events so we can have one common routine into which each handler
48 * calls.  The two tests of (strcmp(serdnm, "") != 0) are used to eliminate
49 * the need for a separate macro for UEs which override SERD engine
50 * counting CEs leading to same fault.
51 */
52/*ARGSUSED9*/
53static cmd_evdisp_t
54cmd_cpuerr_common(fmd_hdl_t *hdl, fmd_event_t *ep, cmd_cpu_t *cpu,
55    cmd_case_t *cc, cmd_ptrsubtype_t pstype, const char *serdnm,
56    const char *serdn, const char *serdt, const char *fltnm,
57    cmd_errcl_t clcode)
58{
59	const char *uuid;
60
61	if (cc->cc_cp != NULL && fmd_case_solved(hdl, cc->cc_cp))
62		return (CMD_EVD_REDUND);
63
64	if (cc->cc_cp == NULL) {
65		cc->cc_cp = cmd_case_create(hdl, &cpu->cpu_header, pstype,
66		    &uuid);
67		if (strcmp(serdnm, "") != 0) {
68			cc->cc_serdnm = cmd_cpu_serdnm_create(hdl, cpu,
69			    serdnm);
70			fmd_serd_create(hdl, cc->cc_serdnm,
71			    fmd_prop_get_int32(hdl, serdn),
72			    fmd_prop_get_int64(hdl, serdt));
73		}
74	}
75
76	if (strcmp(serdnm, "") != 0) {
77		fmd_hdl_debug(hdl, "adding event to %s\n", cc->cc_serdnm);
78		if (fmd_serd_record(hdl, cc->cc_serdnm, ep) == FMD_B_FALSE)
79			return (CMD_EVD_OK); /* serd engine hasn't fired yet */
80
81		fmd_case_add_serd(hdl, cc->cc_cp, cc->cc_serdnm);
82	} else {
83		if (cc->cc_serdnm != NULL) {
84			fmd_hdl_debug(hdl,
85			    "destroying existing %s state for class %x\n",
86			    cc->cc_serdnm, clcode);
87			fmd_serd_destroy(hdl, cc->cc_serdnm);
88			fmd_hdl_strfree(hdl, cc->cc_serdnm);
89			cc->cc_serdnm = NULL;
90		}
91		fmd_case_reset(hdl, cc->cc_cp);
92		fmd_case_add_ereport(hdl, cc->cc_cp, ep);
93	}
94
95	cmd_cpu_create_faultlist(hdl, cc->cc_cp, cpu, fltnm, NULL, 100);
96
97	fmd_case_solve(hdl, cc->cc_cp);
98
99	return (CMD_EVD_OK);
100}
101#ifdef sun4u
102
103#define	CMD_CPU_TAGHANDLER(name, casenm, ptr, ntname, fltname)	\
104cmd_evdisp_t								\
105cmd_##name(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,		\
106    const char *class, cmd_errcl_t clcode)				\
107{									\
108	uint8_t level = clcode & CMD_ERRCL_LEVEL_EXTRACT;		\
109	cmd_cpu_t *cpu;							\
110									\
111	clcode &= CMD_ERRCL_LEVEL_MASK;					\
112	if ((cpu = cmd_cpu_lookup_from_detector(hdl, nvl, class,	\
113	    level)) == NULL || cpu->cpu_faulting)			\
114		return (CMD_EVD_UNUSED);				\
115									\
116	if ((strstr(class, "ultraSPARC-IVplus.l3-thce") != 0) ||	\
117		(strstr(class, "ultraSPARC-IVplus.thce") != 0)) {	\
118		return (cmd_us4plus_tag_err(hdl, ep, nvl, cpu,	\
119		    ptr, ntname "_n", ntname "_t", fltname, clcode));	\
120	}								\
121	return (cmd_cpuerr_common(hdl, ep, cpu, &cpu->cpu_##casenm,	\
122	    ptr, ntname, ntname "_n", ntname "_t", fltname, clcode));	\
123}
124#endif
125
126#define	CMD_CPU_SIMPLEHANDLER(name, casenm, ptr, ntname, fltname)	\
127cmd_evdisp_t								\
128cmd_##name(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,		\
129    const char *class, cmd_errcl_t clcode)				\
130{									\
131	uint8_t level = clcode & CMD_ERRCL_LEVEL_EXTRACT;		\
132	cmd_cpu_t *cpu;							\
133									\
134	clcode &= CMD_ERRCL_LEVEL_MASK;					\
135	if ((cpu = cmd_cpu_lookup_from_detector(hdl, nvl, class,	\
136	    level)) == NULL || cpu->cpu_faulting)			\
137		return (CMD_EVD_UNUSED);				\
138									\
139	return (cmd_cpuerr_common(hdl, ep, cpu, &cpu->cpu_##casenm,	\
140	    ptr, ntname, ntname "_n", ntname "_t", fltname, clcode));	\
141}
142
143#ifdef sun4u
144CMD_CPU_TAGHANDLER(txce, l2tag, CMD_PTR_CPU_L2TAG, "l2tag", "l2cachetag")
145CMD_CPU_TAGHANDLER(l3_thce, l3tag, CMD_PTR_CPU_L3TAG, "l3tag", "l3cachetag")
146#else
147CMD_CPU_SIMPLEHANDLER(txce, l2tag, CMD_PTR_CPU_L2TAG, "l2tag", "l2cachetag")
148CMD_CPU_SIMPLEHANDLER(l3_thce, l3tag, CMD_PTR_CPU_L3TAG, "l3tag", "l3cachetag")
149#endif
150CMD_CPU_SIMPLEHANDLER(icache, icache, CMD_PTR_CPU_ICACHE, "icache", "icache")
151CMD_CPU_SIMPLEHANDLER(dcache, dcache, CMD_PTR_CPU_DCACHE, "dcache", "dcache")
152CMD_CPU_SIMPLEHANDLER(pcache, pcache, CMD_PTR_CPU_PCACHE, "pcache", "pcache")
153CMD_CPU_SIMPLEHANDLER(itlb, itlb, CMD_PTR_CPU_ITLB, "itlb", "itlb")
154CMD_CPU_SIMPLEHANDLER(dtlb, dtlb, CMD_PTR_CPU_DTLB, "dtlb", "dtlb")
155CMD_CPU_SIMPLEHANDLER(irc, ireg, CMD_PTR_CPU_IREG, "ireg", "ireg")
156CMD_CPU_SIMPLEHANDLER(frc, freg, CMD_PTR_CPU_FREG, "freg", "freg")
157CMD_CPU_SIMPLEHANDLER(mau, mau, CMD_PTR_CPU_MAU, "mau", "mau")
158CMD_CPU_SIMPLEHANDLER(miscregs_ce, misc_regs, CMD_PTR_CPU_MISC_REGS,
159	"misc_regs", "misc_reg")
160CMD_CPU_SIMPLEHANDLER(l2c, l2data, CMD_PTR_CPU_L2DATA, "l2data", "l2data-c")
161
162CMD_CPU_SIMPLEHANDLER(fpu, fpu, CMD_PTR_CPU_FPU, "", "fpu")
163CMD_CPU_SIMPLEHANDLER(l2ctl, l2ctl, CMD_PTR_CPU_L2CTL, "", "l2cachectl")
164CMD_CPU_SIMPLEHANDLER(iru, ireg, CMD_PTR_CPU_IREG, "", "ireg")
165CMD_CPU_SIMPLEHANDLER(fru, freg, CMD_PTR_CPU_FREG, "", "freg")
166CMD_CPU_SIMPLEHANDLER(miscregs_ue, misc_regs, CMD_PTR_CPU_MISC_REGS,
167	"", "misc_reg")
168CMD_CPU_SIMPLEHANDLER(l2u, l2data, CMD_PTR_CPU_L2DATA, "", "l2data-u")
169CMD_CPU_SIMPLEHANDLER(lfu_ue, lfu, CMD_PTR_CPU_LFU, "", "lfu-u")
170CMD_CPU_SIMPLEHANDLER(lfu_ce, lfu, CMD_PTR_CPU_LFU, "", "lfu-f")
171CMD_CPU_SIMPLEHANDLER(lfu_pe, lfu, CMD_PTR_CPU_LFU, "", "lfu-p")
172
173
174#ifdef sun4u
175/*
176 * The following macro handles UEs or CPU errors.
177 * It handles the error cases in which there is with or
178 * without "resource".
179 *
180 * If the "fltname" "core" is to be generated, the sibling CPUs
181 * within the core will be added to the suspect list.
182 * If the "fltname" "chip" is to be generated, the sibling CPUs
183 * within the chip will be added to the suspect list.
184 * If the "fltname" "strand" is to be generated, the strand
185 * itself will be in the suspect list.
186 */
187#define	CMD_OPL_UEHANDLER(name, casenm, ptr, fltname, has_rsrc)		\
188cmd_evdisp_t								\
189cmd_##name(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,		\
190    const char *class, cmd_errcl_t clcode)				\
191{									\
192	cmd_cpu_t *cpu;							\
193	cmd_case_t *cc;							\
194	cmd_evdisp_t rc;						\
195	nvlist_t  *rsrc = NULL;						\
196	uint8_t cpumask, version = 1;					\
197	uint8_t lookup_rsrc = has_rsrc;					\
198									\
199	fmd_hdl_debug(hdl,						\
200	    "Enter cmd_opl_ue_cpu for class %x\n", clcode);		\
201									\
202	if (lookup_rsrc) {						\
203		if (nvlist_lookup_nvlist(nvl,				\
204		    FM_EREPORT_PAYLOAD_NAME_RESOURCE, &rsrc) != 0)	\
205			return (CMD_EVD_BAD);				\
206									\
207		if ((cpu = cmd_cpu_lookup(hdl, rsrc, class,		\
208		    CMD_CPU_LEVEL_THREAD)) == NULL ||			\
209		    cpu->cpu_faulting)					\
210			return (CMD_EVD_UNUSED);			\
211	} else {							\
212		if ((cpu = cmd_cpu_lookup_from_detector(hdl, nvl, class,\
213		    CMD_CPU_LEVEL_THREAD)) == NULL || cpu->cpu_faulting)\
214			return (CMD_EVD_UNUSED);			\
215									\
216		(void) nvlist_lookup_nvlist(nvl,			\
217		    FM_EREPORT_DETECTOR, &rsrc);			\
218	}								\
219									\
220	if (nvlist_lookup_uint8(rsrc, FM_VERSION, &version) != 0 ||	\
221	    version > FM_CPU_SCHEME_VERSION ||				\
222	    nvlist_lookup_uint8(rsrc, FM_FMRI_CPU_MASK, &cpumask) != 0)	\
223		return (CMD_EVD_BAD);					\
224									\
225	cc = &cpu->cpu_##casenm;					\
226	rc = cmd_opl_ue_cpu(hdl, ep, class, fltname,			\
227	    ptr, cpu, cc, cpumask);					\
228	return (rc);							\
229}
230
231/*
232 * CPU errors without resource
233 */
234CMD_OPL_UEHANDLER(oplinv_urg, opl_inv_urg, CMD_PTR_CPU_UGESR_INV_URG, "core", 0)
235CMD_OPL_UEHANDLER(oplcre, opl_cre, CMD_PTR_CPU_UGESR_CRE, "core", 0)
236CMD_OPL_UEHANDLER(opltsb_ctx, opl_tsb_ctx, CMD_PTR_CPU_UGESR_TSB_CTX, "core", 0)
237CMD_OPL_UEHANDLER(opltsbp, opl_tsbp, CMD_PTR_CPU_UGESR_TSBP, "core", 0)
238CMD_OPL_UEHANDLER(oplpstate, opl_pstate, CMD_PTR_CPU_UGESR_PSTATE, "core", 0)
239CMD_OPL_UEHANDLER(opltstate, opl_tstate, CMD_PTR_CPU_UGESR_TSTATE, "core", 0)
240CMD_OPL_UEHANDLER(opliug_f, opl_iug_f, CMD_PTR_CPU_UGESR_IUG_F, "core", 0)
241CMD_OPL_UEHANDLER(opliug_r, opl_iug_r, CMD_PTR_CPU_UGESR_IUG_R, "core", 0)
242CMD_OPL_UEHANDLER(oplsdc, opl_sdc, CMD_PTR_CPU_UGESR_SDC, "chip", 0)
243CMD_OPL_UEHANDLER(oplwdt, opl_wdt, CMD_PTR_CPU_UGESR_WDT, "core", 0)
244CMD_OPL_UEHANDLER(opldtlb, opl_dtlb, CMD_PTR_CPU_UGESR_DTLB, "core", 0)
245CMD_OPL_UEHANDLER(oplitlb, opl_itlb, CMD_PTR_CPU_UGESR_ITLB, "core", 0)
246CMD_OPL_UEHANDLER(oplcore_err, opl_core_err, CMD_PTR_CPU_UGESR_CORE_ERR,
247"core", 0)
248CMD_OPL_UEHANDLER(opldae, opl_dae, CMD_PTR_CPU_UGESR_DAE, "core", 0)
249CMD_OPL_UEHANDLER(opliae, opl_iae, CMD_PTR_CPU_UGESR_IAE, "core", 0)
250CMD_OPL_UEHANDLER(opluge, opl_uge, CMD_PTR_CPU_UGESR_UGE, "core", 0)
251
252/*
253 * UEs with resource
254 */
255CMD_OPL_UEHANDLER(oplinv_sfsr, opl_invsfsr, CMD_PTR_CPU_INV_SFSR, "strand", 1)
256CMD_OPL_UEHANDLER(opluecpu_detcpu, oplue_detcpu, CMD_PTR_CPU_UE_DET_CPU,
257"core", 1)
258CMD_OPL_UEHANDLER(opluecpu_detio, oplue_detio, CMD_PTR_CPU_UE_DET_IO, "core", 1)
259CMD_OPL_UEHANDLER(oplmtlb, opl_mtlb, CMD_PTR_CPU_MTLB, "core", 1)
260CMD_OPL_UEHANDLER(opltlbp, opl_tlbp, CMD_PTR_CPU_TLBP, "core", 1)
261#endif	/* sun4u */
262
263/*ARGSUSED*/
264static void
265cmd_nop_hdlr(fmd_hdl_t *hdl, cmd_xr_t *xr, fmd_event_t *ep)
266{
267	fmd_hdl_debug(hdl, "nop train resolved for clcode %llx\n",
268	    xr->xr_clcode);
269}
270
271/*ARGSUSED*/
272static void
273cmd_xxu_hdlr(fmd_hdl_t *hdl, cmd_xr_t *xr, fmd_event_t *ep)
274{
275	const errdata_t *ed;
276	cmd_cpu_t *cpu = xr->xr_cpu;
277	cmd_case_t *cc;
278	const char *uuid;
279	nvlist_t *rsrc = NULL;
280
281	cmd_fill_errdata(xr->xr_clcode, cpu, &cc, &ed);
282
283	if (cpu->cpu_faulting) {
284		CMD_STAT_BUMP(xxu_retr_flt);
285		return;
286	}
287
288	if (cmd_afar_status_check(xr->xr_afar_status, xr->xr_clcode) < 0) {
289		fmd_hdl_debug(hdl, "xxU dropped, afar not VALID\n");
290		return;
291	}
292
293	if (cmd_cpu_synd_check(xr->xr_synd, xr->xr_clcode) < 0) {
294		fmd_hdl_debug(hdl, "xxU/LDxU dropped due to syndrome\n");
295		return;
296	}
297
298#ifdef sun4u
299	/*
300	 * UE cache needed for sun4u only, because sun4u doesn't poison
301	 * uncorrectable data loaded into L2/L3 cache.
302	 */
303	if (cmd_cpu_uec_match(xr->xr_cpu, xr->xr_afar)) {
304		fmd_hdl_debug(hdl, "ue matched in UE cache\n");
305		CMD_STAT_BUMP(xxu_ue_match);
306		return;
307	}
308#endif /* sun4u */
309
310	/*
311	 * We didn't match in the UE cache.  We don't need to sleep for UE
312	 * arrival, as we've already slept once for the train match.
313	 */
314
315	if (cc->cc_cp == NULL) {
316		cc->cc_cp = cmd_case_create(hdl, &cpu->cpu_header, ed->ed_pst,
317		    &uuid);
318	} else if (cc->cc_serdnm != NULL) {
319		fmd_hdl_debug(hdl, "destroying existing %s state\n",
320		    cc->cc_serdnm);
321
322		fmd_serd_destroy(hdl, cc->cc_serdnm);
323		fmd_hdl_strfree(hdl, cc->cc_serdnm);
324		cc->cc_serdnm = NULL;
325
326		fmd_case_reset(hdl, cc->cc_cp);
327	}
328
329	if (xr->xr_rsrc_nvl != NULL && nvlist_dup(xr->xr_rsrc_nvl,
330	    &rsrc, 0) != 0) {
331		fmd_hdl_abort(hdl, "failed to duplicate resource FMRI for "
332		    "%s fault", ed->ed_fltnm);
333	}
334
335	fmd_case_add_ereport(hdl, cc->cc_cp, ep);
336
337	cmd_cpu_create_faultlist(hdl, cc->cc_cp, cpu, ed->ed_fltnm, rsrc, 100);
338	nvlist_free(rsrc);
339	fmd_case_solve(hdl, cc->cc_cp);
340}
341
342static void
343cmd_xxc_hdlr(fmd_hdl_t *hdl, cmd_xr_t *xr, fmd_event_t *ep)
344{
345	const errdata_t *ed;
346	cmd_cpu_t *cpu = xr->xr_cpu;
347	cmd_case_t *cc;
348	const char *uuid;
349	nvlist_t *rsrc = NULL;
350
351#ifdef	sun4u
352	if (cmd_cache_ce_panther(hdl, ep, xr) == 0) {
353		return;
354	}
355#endif
356	cmd_fill_errdata(xr->xr_clcode, cpu, &cc, &ed);
357
358	if (cpu->cpu_faulting || (cc->cc_cp != NULL &&
359	    fmd_case_solved(hdl, cc->cc_cp)))
360		return;
361
362	if (cc->cc_cp == NULL) {
363		cc->cc_cp = cmd_case_create(hdl, &cpu->cpu_header, ed->ed_pst,
364		    &uuid);
365		cc->cc_serdnm = cmd_cpu_serdnm_create(hdl, cpu,
366		    ed->ed_serd->cs_name);
367
368		fmd_serd_create(hdl, cc->cc_serdnm, ed->ed_serd->cs_n,
369		    ed->ed_serd->cs_t);
370	}
371
372	fmd_hdl_debug(hdl, "adding event to %s\n", cc->cc_serdnm);
373
374	if (fmd_serd_record(hdl, cc->cc_serdnm, ep) == FMD_B_FALSE)
375		return; /* serd engine hasn't fired yet */
376
377	if (xr->xr_rsrc_nvl != NULL && nvlist_dup(xr->xr_rsrc_nvl,
378	    &rsrc, 0) != 0) {
379		fmd_hdl_abort(hdl, "failed to duplicate resource FMRI for "
380		    "%s fault", ed->ed_fltnm);
381	}
382
383	fmd_case_add_serd(hdl, cc->cc_cp, cc->cc_serdnm);
384	cmd_cpu_create_faultlist(hdl, cc->cc_cp, cpu, ed->ed_fltnm, rsrc, 100);
385	nvlist_free(rsrc);
386	fmd_case_solve(hdl, cc->cc_cp);
387}
388
389/*
390 * We're back from the timeout.  Check to see if this event was part of a train.
391 * If it was, make sure to only process the cause of the train.  If not,
392 * process the event directly.
393 */
394static void
395cmd_xxcu_resolve(fmd_hdl_t *hdl, cmd_xr_t *xr, fmd_event_t *ep,
396    cmd_xr_hdlr_f *hdlr)
397{
398	cmd_xxcu_trw_t *trw;
399	cmd_errcl_t cause;
400	uint64_t afar;
401
402
403	afar = 0;
404
405	if (xr->xr_afar_status == AFLT_STAT_VALID)
406		afar = xr->xr_afar;
407
408	if ((trw = cmd_trw_lookup(xr->xr_ena,
409	    xr->xr_afar_status, afar)) == NULL) {
410		fmd_hdl_debug(hdl, "cmd_trw_lookup: Not found\n");
411		return;
412	}
413
414	fmd_hdl_debug(hdl, "found waiter with mask 0x%08llx\n", trw->trw_mask);
415
416	trw->trw_flags |= CMD_TRW_F_DELETING;
417
418	/*
419	 * In sun4v, the matching train rule is changed. It matches only
420	 * a portion of the train mask, so can't discard the rest of
421	 * the error in the train mask.
422	 */
423#ifdef sun4u
424	if (trw->trw_flags & CMD_TRW_F_CAUSESEEN) {
425		fmd_hdl_debug(hdl, "cause already seen -- discarding\n");
426		goto done;
427	}
428#endif
429
430	if ((cause = cmd_train_match(trw->trw_mask, xr->xr_clcode)) == 0) {
431		/*
432		 * We didn't match in a train, so we're going to process each
433		 * event individually.
434		 */
435		fmd_hdl_debug(hdl, "didn't match in a train\n");
436		hdlr(hdl, xr, ep);
437		goto done;
438	}
439
440	fmd_hdl_debug(hdl, "found a match for train.  cause is %llx, "
441	    "this is %llx\n", cause, xr->xr_clcode);
442
443	/*
444	 * We've got a train match.  If this event is the cause of the train,
445	 * process it.
446	 */
447	if (cause == xr->xr_clcode) {
448		trw->trw_flags |= CMD_TRW_F_CAUSESEEN;
449		hdlr(hdl, xr, ep);
450	}
451
452done:
453	cmd_trw_deref(hdl, trw);
454}
455
456void
457cmd_xxc_resolve(fmd_hdl_t *hdl, cmd_xr_t *xr, fmd_event_t *ep)
458{
459	cmd_xxcu_resolve(hdl, xr, ep, cmd_xxc_hdlr);
460}
461
462void
463cmd_xxu_resolve(fmd_hdl_t *hdl, cmd_xr_t *xr, fmd_event_t *ep)
464{
465	cmd_xxcu_resolve(hdl, xr, ep, cmd_xxu_hdlr);
466}
467
468void
469cmd_nop_resolve(fmd_hdl_t *hdl, cmd_xr_t *xr, fmd_event_t *ep)
470{
471	cmd_xxcu_resolve(hdl, xr, ep, cmd_nop_hdlr);
472}
473
474cmd_evdisp_t
475cmd_xxcu_initial(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
476    const char *class, cmd_errcl_t clcode, uint_t hdlrid)
477{
478	cmd_xxcu_trw_t *trw;
479	cmd_case_t *cc;
480	cmd_cpu_t *cpu;
481	cmd_xr_t *xr;
482	uint64_t ena;
483	uint64_t afar;
484	uint8_t level = clcode & CMD_ERRCL_LEVEL_EXTRACT;
485	uint8_t	afar_status;
486	const errdata_t *ed = NULL;
487	int ref_incremented = 0;
488
489	clcode &= CMD_ERRCL_LEVEL_MASK; /* keep level bits out of train masks */
490
491	if ((cpu = cmd_cpu_lookup_from_detector(hdl, nvl, class,
492	    level)) == NULL || cpu->cpu_faulting)
493		return (CMD_EVD_UNUSED);
494
495	cmd_fill_errdata(clcode, cpu, &cc, &ed);
496
497	if (cc->cc_cp != NULL && fmd_case_solved(hdl, cc->cc_cp))
498		return (CMD_EVD_REDUND);
499
500	(void) nvlist_lookup_uint64(nvl, FM_EREPORT_ENA, &ena);
501
502	if (cmd_afar_valid(hdl, nvl, clcode, &afar) != 0) {
503		afar_status = AFLT_STAT_INVALID;
504		afar = 0;
505	} else {
506		afar_status = AFLT_STAT_VALID;
507	}
508
509	fmd_hdl_debug(hdl, "scheduling %s (%llx) for redelivery\n",
510	    class, clcode);
511	fmd_hdl_debug(hdl, "looking up ena %llx,afar %llx with\n", ena, afar);
512
513	fmd_hdl_debug(hdl, "afar status of %02x\n", afar_status);
514
515	if ((trw = cmd_trw_lookup(ena, afar_status, afar)) == NULL) {
516		if ((trw = cmd_trw_alloc(ena, afar)) == NULL) {
517			fmd_hdl_debug(hdl, "failed to get new trw\n");
518			goto redeliver;
519		}
520	}
521
522	if (trw->trw_flags & CMD_TRW_F_DELETING)
523		goto redeliver;
524
525	if (trw->trw_mask & clcode) {
526		fmd_hdl_debug(hdl, "clcode %llx is already in trw "
527		    "(mask %llx)\n", clcode, trw->trw_mask);
528		return (CMD_EVD_UNUSED);
529	}
530
531	cmd_trw_ref(hdl, trw, clcode);
532	ref_incremented++;
533
534	fmd_hdl_debug(hdl, "trw rescheduled for train delivery\n");
535
536redeliver:
537	if ((xr = cmd_xr_create(hdl, ep, nvl, cpu, clcode)) == NULL) {
538		fmd_hdl_debug(hdl, "cmd_xr_create failed");
539		if (ref_incremented)
540			cmd_trw_deref(hdl, trw);
541		return (CMD_EVD_BAD);
542	}
543
544	return (cmd_xr_reschedule(hdl, xr, hdlrid));
545}
546
547
548cmd_evdisp_t
549cmd_xxu(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class,
550    cmd_errcl_t clcode)
551{
552	return (cmd_xxcu_initial(hdl, ep, nvl, class, clcode, CMD_XR_HDLR_XXU));
553}
554
555cmd_evdisp_t
556cmd_xxc(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class,
557    cmd_errcl_t clcode)
558{
559	return (cmd_xxcu_initial(hdl, ep, nvl, class, clcode, CMD_XR_HDLR_XXC));
560}
561
562cmd_evdisp_t
563cmd_nop_train(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
564    const char *class, cmd_errcl_t clcode)
565{
566	return (cmd_xxcu_initial(hdl, ep, nvl, class, clcode, CMD_XR_HDLR_NOP));
567}
568
569cmd_evdisp_t
570cmd_miscregs_train(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
571    const char *class, cmd_errcl_t clcode)
572{
573	return (cmd_xxcu_initial(hdl, ep, nvl, class, clcode,
574	    CMD_XR_HDLR_XXC));
575}
576
577void
578cmd_cpuerr_close(fmd_hdl_t *hdl, void *arg)
579{
580	cmd_cpu_destroy(hdl, arg);
581}
582