1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/cmn_err.h>
29 #include <sys/errno.h>
30 #include <sys/log.h>
31 #include <sys/systm.h>
32 #include <sys/modctl.h>
33 #include <sys/errorq.h>
34 #include <sys/controlregs.h>
35 #include <sys/fm/util.h>
36 #include <sys/fm/protocol.h>
37 #include <sys/sysevent.h>
38 #include <sys/pghw.h>
39 #include <sys/cyclic.h>
40 #include <sys/pci_cfgspace.h>
41 #include <sys/mc_intel.h>
42 #include <sys/smbios.h>
43 #include "nb5000.h"
44 #include "nb_log.h"
45 #include "dimm_phys.h"
46 
47 static uint32_t uerrcnt[2];
48 static uint32_t cerrcnta[2][2];
49 static uint32_t cerrcntb[2][2];
50 static uint32_t cerrcntc[2][2];
51 static uint32_t cerrcntd[2][2];
52 static nb_logout_t nb_log;
53 
54 struct mch_error_code {
55 	int intel_error_list;	/* error number in Chipset Error List */
56 	uint32_t emask;		/* mask for machine check */
57 	uint32_t error_bit;	/* error bit in fault register */
58 };
59 
60 static struct mch_error_code fat_fbd_error_code[] = {
61 	{ 23, EMASK_FBD_M23, ERR_FAT_FBD_M23 },
62 	{ 3, EMASK_FBD_M3, ERR_FAT_FBD_M3 },
63 	{ 2, EMASK_FBD_M2, ERR_FAT_FBD_M2 },
64 	{ 1, EMASK_FBD_M1, ERR_FAT_FBD_M1 }
65 };
66 
67 static int
68 intel_fat_fbd_err(uint32_t fat_fbd)
69 {
70 	int rt = -1;
71 	int nerr = 0;
72 	uint32_t emask_fbd = 0;
73 	int i;
74 	int sz;
75 
76 	sz = sizeof (fat_fbd_error_code) / sizeof (struct mch_error_code);
77 
78 	for (i = 0; i < sz; i++) {
79 		if (fat_fbd & fat_fbd_error_code[i].error_bit) {
80 			rt = fat_fbd_error_code[i].intel_error_list;
81 			emask_fbd |= fat_fbd_error_code[i].emask;
82 			nerr++;
83 		}
84 	}
85 
86 	if (emask_fbd)
87 		nb_fbd_mask_mc(emask_fbd);
88 	if (nerr > 1)
89 		rt = -1;
90 	return (rt);
91 }
92 
93 static char *
94 fat_memory_error(const nb_regs_t *rp, void *data)
95 {
96 	int channel;
97 	uint32_t ferr_fat_fbd, nrecmemb;
98 	uint32_t nrecmema;
99 	char *intr = "nb.unknown";
100 	nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms;
101 
102 	ferr_fat_fbd = rp->nb.fat_fbd_regs.ferr_fat_fbd;
103 	if ((ferr_fat_fbd & ERR_FAT_FBD_MASK) == 0) {
104 		sp->intel_error_list =
105 		    intel_fat_fbd_err(rp->nb.fat_fbd_regs.nerr_fat_fbd);
106 		sp->branch = -1;
107 		sp->channel = -1;
108 		sp->rank = -1;
109 		sp->dimm = -1;
110 		sp->bank = -1;
111 		sp->cas = -1;
112 		sp->ras = -1;
113 		sp->pa = -1LL;
114 		sp->offset = -1;
115 		return (intr);
116 	}
117 	sp->intel_error_list = intel_fat_fbd_err(ferr_fat_fbd);
118 	channel = (ferr_fat_fbd >> 28) & 3;
119 	sp->branch = channel >> 1;
120 	sp->channel = channel;
121 	if ((ferr_fat_fbd & (ERR_FAT_FBD_M2|ERR_FAT_FBD_M1)) != 0) {
122 		if ((ferr_fat_fbd & ERR_FAT_FBD_M1) != 0)
123 			intr = "nb.fbd.alert";	/* Alert on FB-DIMM M1 */
124 		else
125 			intr = "nb.fbd.crc";	/* CRC error FB_DIMM M2 */
126 		nrecmema = rp->nb.fat_fbd_regs.nrecmema;
127 		nrecmemb = rp->nb.fat_fbd_regs.nrecmemb;
128 		sp->rank = (nrecmema >> 8) & RANK_MASK;
129 		sp->dimm = sp->rank >> 1;
130 		sp->bank = (nrecmema >> 12) & BANK_MASK;
131 		sp->cas = (nrecmemb >> 16) & CAS_MASK;
132 		sp->ras = nrecmemb & RAS_MASK;
133 		sp->pa = dimm_getphys(sp->branch, sp->rank, sp->bank, sp->ras,
134 		    sp->cas);
135 		sp->offset = dimm_getoffset(sp->branch, sp->rank, sp->bank,
136 		    sp->ras, sp->cas);
137 	} else {
138 		if ((ferr_fat_fbd & ERR_FAT_FBD_M3) != 0)
139 			intr = "nb.fbd.otf";	/* thermal temp > Tmid M3 */
140 		else if ((ferr_fat_fbd & ERR_FAT_FBD_M23) != 0) {
141 			intr = "nb.fbd.reset_timeout";
142 			sp->channel = -1;
143 		}
144 		sp->rank = -1;
145 		sp->dimm = -1;
146 		sp->bank = -1;
147 		sp->cas = -1;
148 		sp->ras = -1;
149 		sp->pa = -1LL;
150 		sp->offset = -1;
151 	}
152 	return (intr);
153 }
154 
155 
156 static struct mch_error_code nf_fbd_error_code[] = {
157 	{ 29, EMASK_FBD_M29, ERR_NF_FBD_M29 },
158 	{ 28, EMASK_FBD_M28, ERR_NF_FBD_M28 },
159 	{ 27, EMASK_FBD_M27, ERR_NF_FBD_M27 },
160 	{ 26, EMASK_FBD_M26, ERR_NF_FBD_M26 },
161 	{ 25, EMASK_FBD_M25, ERR_NF_FBD_M25 },
162 	{ 24, EMASK_FBD_M24, ERR_NF_FBD_M24 },
163 	{ 22, EMASK_FBD_M22, ERR_NF_FBD_M22 },
164 	{ 21, EMASK_FBD_M21, ERR_NF_FBD_M21 },
165 	{ 20, EMASK_FBD_M20, ERR_NF_FBD_M20 },
166 	{ 19, EMASK_FBD_M19, ERR_NF_FBD_M19 },
167 	{ 18, EMASK_FBD_M18, ERR_NF_FBD_M18 },
168 	{ 17, EMASK_FBD_M17, ERR_NF_FBD_M17 },
169 	{ 16, EMASK_FBD_M16, ERR_NF_FBD_M16 },
170 	{ 15, EMASK_FBD_M15, ERR_NF_FBD_M15 },
171 	{ 14, EMASK_FBD_M14, ERR_NF_FBD_M14 },
172 	{ 13, EMASK_FBD_M13, ERR_NF_FBD_M13 },
173 	{ 12, EMASK_FBD_M12, ERR_NF_FBD_M12 },
174 	{ 11, EMASK_FBD_M11, ERR_NF_FBD_M11 },
175 	{ 10, EMASK_FBD_M10, ERR_NF_FBD_M10 },
176 	{ 9, EMASK_FBD_M9, ERR_NF_FBD_M9 },
177 	{ 8, EMASK_FBD_M8, ERR_NF_FBD_M8 },
178 	{ 7, EMASK_FBD_M7, ERR_NF_FBD_M7 },
179 	{ 6, EMASK_FBD_M6, ERR_NF_FBD_M6 },
180 	{ 5, EMASK_FBD_M5, ERR_NF_FBD_M5 },
181 	{ 4, EMASK_FBD_M4, ERR_NF_FBD_M4 }
182 };
183 
184 static int
185 intel_nf_fbd_err(uint32_t nf_fbd)
186 {
187 	int rt = -1;
188 	int nerr = 0;
189 	uint32_t emask_fbd = 0;
190 	int i;
191 	int sz;
192 
193 	sz = sizeof (nf_fbd_error_code) / sizeof (struct mch_error_code);
194 
195 	for (i = 0; i < sz; i++) {
196 		if (nf_fbd & nf_fbd_error_code[i].error_bit) {
197 			rt = nf_fbd_error_code[i].intel_error_list;
198 			emask_fbd |= nf_fbd_error_code[i].emask;
199 			nerr++;
200 		}
201 	}
202 	if (emask_fbd)
203 		nb_fbd_mask_mc(emask_fbd);
204 	if (nerr > 1)
205 		rt = -1;
206 	return (rt);
207 }
208 
209 static char *
210 nf_memory_error(const nb_regs_t *rp, void *data)
211 {
212 	uint32_t ferr_nf_fbd, recmemb, redmemb;
213 	uint32_t recmema;
214 	int branch, channel, ecc_locator;
215 	char *intr = "nb.unknown";
216 	nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms;
217 
218 	sp->rank = -1;
219 	sp->dimm = -1;
220 	sp->bank = -1;
221 	sp->cas = -1;
222 	sp->ras = -1LL;
223 	sp->pa = -1LL;
224 	sp->offset = -1;
225 	ferr_nf_fbd = rp->nb.nf_fbd_regs.ferr_nf_fbd;
226 	if ((ferr_nf_fbd & ERR_NF_FBD_MASK) == 0) {
227 		sp->branch = -1;
228 		sp->channel = -1;
229 		sp->intel_error_list =
230 		    intel_nf_fbd_err(rp->nb.nf_fbd_regs.nerr_nf_fbd);
231 		return (intr);
232 	}
233 	sp->intel_error_list = intel_nf_fbd_err(ferr_nf_fbd);
234 	channel = (ferr_nf_fbd >> ERR_FBD_CH_SHIFT) & 3;
235 	branch = channel >> 1;
236 	sp->branch = branch;
237 	sp->channel = channel;
238 	if (ferr_nf_fbd & ERR_NF_FBD_MASK) {
239 		if (ferr_nf_fbd & ERR_NF_FBD_ECC_UE) {
240 			/*
241 			 * uncorrectable ECC M4 - M12
242 			 * we can only isolate to pair of dimms
243 			 * for single dimm configuration let eversholt
244 			 * sort it out with out needing a special rule
245 			 */
246 			sp->channel = -1;
247 			recmema = rp->nb.nf_fbd_regs.recmema;
248 			recmemb = rp->nb.nf_fbd_regs.recmemb;
249 			sp->rank = (recmema >> 8) & RANK_MASK;
250 			sp->bank = (recmema >> 12) & BANK_MASK;
251 			sp->cas = (recmemb >> 16) & CAS_MASK;
252 			sp->ras = recmemb & RAS_MASK;
253 			intr = "nb.mem_ue";
254 		} else if (ferr_nf_fbd & ERR_NF_FBD_M13) {
255 			/*
256 			 * write error M13
257 			 * we can only isolate to pair of dimms
258 			 */
259 			sp->channel = -1;
260 			if (nb_mode != NB_MEMORY_MIRROR) {
261 				recmema = rp->nb.nf_fbd_regs.recmema;
262 				sp->rank = (recmema >> 8) & RANK_MASK;
263 				sp->bank = (recmema >> 12) & BANK_MASK;
264 				sp->cas = (recmemb >> 16) & CAS_MASK;
265 				sp->ras = recmemb & RAS_MASK;
266 			}
267 			intr = "nb.fbd.ma"; /* memory alert */
268 		} else if (ferr_nf_fbd & ERR_NF_FBD_MA) { /* M14, M15 and M21 */
269 			intr = "nb.fbd.ch"; /* FBD on channel */
270 		} else if ((ferr_nf_fbd & ERR_NF_FBD_ECC_CE) != 0) {
271 			/* correctable ECC M17-M20 */
272 			recmema = rp->nb.nf_fbd_regs.recmema;
273 			recmemb = rp->nb.nf_fbd_regs.recmemb;
274 			sp->rank = (recmema >> 8) & RANK_MASK;
275 			redmemb = rp->nb.nf_fbd_regs.redmemb;
276 			ecc_locator = redmemb & 0x3ffff;
277 			if (ecc_locator & 0x1ff)
278 				sp->channel = branch << 1;
279 			else if (ecc_locator & 0x3fe00)
280 				sp->channel = (branch << 1) + 1;
281 			sp->dimm = sp->rank >> 1;
282 			sp->bank = (recmema >> 12) & BANK_MASK;
283 			sp->cas = (recmemb >> 16) & CAS_MASK;
284 			sp->ras = recmemb & RAS_MASK;
285 			intr = "nb.mem_ce";
286 		} else if ((ferr_nf_fbd & ERR_NF_FBD_SPARE) != 0) {
287 			/* spare dimm M27, M28 */
288 			intr = "nb.mem_ds";
289 			sp->channel = -1;
290 			if (rp->nb.nf_fbd_regs.spcps & SPCPS_SPARE_DEPLOYED) {
291 				sp->rank =
292 				    SPCPS_FAILED_RANK(rp->nb.nf_fbd_regs.spcps);
293 				nb_used_spare_rank(sp->branch, sp->rank);
294 				nb_config_gen++;
295 			}
296 		} else if ((ferr_nf_fbd & ERR_NF_FBD_M22) != 0) {
297 			intr = "nb.spd";	/* SPD protocol */
298 		}
299 	}
300 	if (sp->ras != -1) {
301 		sp->pa = dimm_getphys(sp->branch, sp->rank, sp->bank, sp->ras,
302 		    sp->cas);
303 		sp->offset = dimm_getoffset(sp->branch, sp->rank, sp->bank,
304 		    sp->ras, sp->cas);
305 	}
306 	return (intr);
307 }
308 
309 static struct mch_error_code fat_int_error_code[] = {
310 	{ 14, EMASK_INT_B14, ERR_FAT_INT_B14 },
311 	{ 12, EMASK_INT_B12, ERR_FAT_INT_B12 },
312 	{ 25, EMASK_INT_B25, ERR_FAT_INT_B25 },
313 	{ 23, EMASK_INT_B23, ERR_FAT_INT_B23 },
314 	{ 21, EMASK_INT_B21, ERR_FAT_INT_B21 },
315 	{ 7, EMASK_INT_B7, ERR_FAT_INT_B7 },
316 	{ 4, EMASK_INT_B4, ERR_FAT_INT_B4 },
317 	{ 3, EMASK_INT_B3, ERR_FAT_INT_B3 },
318 	{ 2, EMASK_INT_B2, ERR_FAT_INT_B2 },
319 	{ 1, EMASK_INT_B1, ERR_FAT_INT_B1 }
320 };
321 
322 static struct mch_error_code nf_int_error_code[] = {
323 	{ 27, 0, ERR_NF_INT_B27 },
324 	{ 24, 0, ERR_NF_INT_B24 },
325 	{ 22, EMASK_INT_B22, ERR_NF_INT_B22 },
326 	{ 20, EMASK_INT_B20, ERR_NF_INT_B20 },
327 	{ 19, EMASK_INT_B19, ERR_NF_INT_B19 },
328 	{ 18, 0, ERR_NF_INT_B18 },
329 	{ 17, 0, ERR_NF_INT_B17 },
330 	{ 16, 0, ERR_NF_INT_B16 },
331 	{ 11, EMASK_INT_B11, ERR_NF_INT_B11 },
332 	{ 10, EMASK_INT_B10, ERR_NF_INT_B10 },
333 	{ 9, EMASK_INT_B9, ERR_NF_INT_B9 },
334 	{ 8, EMASK_INT_B8, ERR_NF_INT_B8 },
335 	{ 6, EMASK_INT_B6, ERR_NF_INT_B6 },
336 	{ 5, EMASK_INT_B5, ERR_NF_INT_B5 }
337 };
338 
339 static int
340 intel_int_err(uint16_t err_fat_int, uint16_t err_nf_int)
341 {
342 	int rt = -1;
343 	int nerr = 0;
344 	uint32_t emask_int = 0;
345 	int i;
346 	int sz;
347 
348 	sz = sizeof (fat_int_error_code) / sizeof (struct mch_error_code);
349 
350 	for (i = 0; i < sz; i++) {
351 		if (err_fat_int & fat_int_error_code[i].error_bit) {
352 			rt = fat_int_error_code[i].intel_error_list;
353 			emask_int |= fat_int_error_code[i].emask;
354 			nerr++;
355 		}
356 	}
357 
358 	if (nb_chipset == INTEL_NB_5400 &&
359 	    (err_nf_int & NERR_NF_5400_INT_B26) != 0) {
360 		err_nf_int &= ~NERR_NF_5400_INT_B26;
361 		rt = 26;
362 		nerr++;
363 	}
364 
365 	if (rt)
366 		err_nf_int &= ~ERR_NF_INT_B18;
367 
368 	sz = sizeof (nf_int_error_code) / sizeof (struct mch_error_code);
369 
370 	for (i = 0; i < sz; i++) {
371 		if (err_nf_int & nf_int_error_code[i].error_bit) {
372 			rt = nf_int_error_code[i].intel_error_list;
373 			emask_int |= nf_int_error_code[i].emask;
374 			nerr++;
375 		}
376 	}
377 
378 	if (emask_int)
379 		nb_int_mask_mc(emask_int);
380 	if (nerr > 1)
381 		rt = -1;
382 	return (rt);
383 }
384 
385 static int
386 log_int_err(nb_regs_t *rp, int willpanic, int *interpose)
387 {
388 	int t = 0;
389 	int rt = 0;
390 
391 	rp->flag = NB_REG_LOG_INT;
392 	rp->nb.int_regs.ferr_fat_int = FERR_FAT_INT_RD(interpose);
393 	rp->nb.int_regs.ferr_nf_int = FERR_NF_INT_RD(&t);
394 	*interpose |= t;
395 	rp->nb.int_regs.nerr_fat_int = NERR_FAT_INT_RD(&t);
396 	*interpose |= t;
397 	rp->nb.int_regs.nerr_nf_int = NERR_NF_INT_RD(&t);
398 	*interpose |= t;
399 	rp->nb.int_regs.nrecint = NRECINT_RD();
400 	rp->nb.int_regs.recint = RECINT_RD();
401 	rp->nb.int_regs.nrecsf = NRECSF_RD();
402 	rp->nb.int_regs.recsf = RECSF_RD();
403 
404 	if (!willpanic) {
405 		if (rp->nb.int_regs.ferr_fat_int || *interpose)
406 			FERR_FAT_INT_WR(rp->nb.int_regs.ferr_fat_int);
407 		if (rp->nb.int_regs.ferr_nf_int || *interpose)
408 			FERR_NF_INT_WR(rp->nb.int_regs.ferr_nf_int);
409 		if (rp->nb.int_regs.nerr_fat_int)
410 			NERR_FAT_INT_WR(rp->nb.int_regs.nerr_fat_int);
411 		if (rp->nb.int_regs.nerr_nf_int)
412 			NERR_NF_INT_WR(rp->nb.int_regs.nerr_nf_int);
413 		/*
414 		 * if interpose write read-only registers to clear from pcii
415 		 * cache
416 		 */
417 		if (*interpose) {
418 			NRECINT_WR();
419 			RECINT_WR();
420 			NRECSF_WR();
421 			RECSF_WR();
422 		}
423 	}
424 	if (rp->nb.int_regs.ferr_fat_int == 0 &&
425 	    rp->nb.int_regs.nerr_fat_int == 0 &&
426 	    (rp->nb.int_regs.ferr_nf_int == ERR_NF_INT_B18 ||
427 	    (rp->nb.int_regs.ferr_nf_int == 0 &&
428 	    rp->nb.int_regs.nerr_nf_int == ERR_NF_INT_B18))) {
429 		rt = 1;
430 	}
431 	return (rt);
432 }
433 
434 static void
435 log_thermal_err(nb_regs_t *rp, int willpanic, int *interpose)
436 {
437 	int t = 0;
438 
439 	rp->flag = NB_REG_LOG_THR;
440 	rp->nb.thr_regs.ferr_fat_thr = FERR_FAT_THR_RD(interpose);
441 	rp->nb.thr_regs.nerr_fat_thr = NERR_FAT_THR_RD(&t);
442 	*interpose |= t;
443 	rp->nb.thr_regs.ferr_nf_thr = FERR_NF_THR_RD(&t);
444 	*interpose |= t;
445 	rp->nb.thr_regs.nerr_nf_thr = NERR_NF_THR_RD(&t);
446 	*interpose |= t;
447 	rp->nb.thr_regs.ctsts = CTSTS_RD();
448 	rp->nb.thr_regs.thrtsts = THRTSTS_RD();
449 
450 	if (!willpanic) {
451 		if (rp->nb.thr_regs.ferr_fat_thr || *interpose)
452 			FERR_FAT_THR_WR(rp->nb.thr_regs.ferr_fat_thr);
453 		if (rp->nb.thr_regs.nerr_fat_thr || *interpose)
454 			NERR_FAT_THR_WR(rp->nb.thr_regs.nerr_fat_thr);
455 		if (rp->nb.thr_regs.ferr_nf_thr || *interpose)
456 			FERR_NF_THR_WR(rp->nb.thr_regs.ferr_nf_thr);
457 		if (rp->nb.thr_regs.nerr_nf_thr || *interpose)
458 			NERR_NF_THR_WR(rp->nb.thr_regs.nerr_nf_thr);
459 
460 		if (*interpose) {
461 			CTSTS_WR(rp->nb.thr_regs.ctsts);
462 			THRTSTS_WR(rp->nb.thr_regs.thrtsts);
463 		}
464 	}
465 }
466 
467 static void
468 log_dma_err(nb_regs_t *rp, int *interpose)
469 {
470 	rp->flag = NB_REG_LOG_DMA;
471 
472 	rp->nb.dma_regs.pcists = PCISTS_RD(interpose);
473 	rp->nb.dma_regs.pexdevsts = PCIDEVSTS_RD();
474 }
475 
476 static struct mch_error_code fat_fsb_error_code[] = {
477 	{ 9, EMASK_FSB_F9, ERR_FAT_FSB_F9 },
478 	{ 2, EMASK_FSB_F2, ERR_FAT_FSB_F2 },
479 	{ 1, EMASK_FSB_F1, ERR_FAT_FSB_F1 }
480 };
481 
482 static struct mch_error_code nf_fsb_error_code[] = {
483 	{ 8, EMASK_FSB_F8, ERR_NF_FSB_F8 },
484 	{ 7, EMASK_FSB_F7, ERR_NF_FSB_F7 },
485 	{ 6, EMASK_FSB_F6, ERR_NF_FSB_F6 }
486 };
487 
488 static int
489 intel_fsb_err(int fsb, uint8_t err_fat_fsb, uint8_t err_nf_fsb)
490 {
491 	int rt = -1;
492 	int nerr = 0;
493 	uint16_t emask_fsb = 0;
494 	int i;
495 	int sz;
496 
497 	sz = sizeof (fat_fsb_error_code) / sizeof (struct mch_error_code);
498 
499 	for (i = 0; i < sz; i++) {
500 		if (err_fat_fsb & fat_fsb_error_code[i].error_bit) {
501 			rt = fat_fsb_error_code[i].intel_error_list;
502 			emask_fsb |= fat_fsb_error_code[i].emask;
503 			nerr++;
504 		}
505 	}
506 
507 	sz = sizeof (nf_fsb_error_code) / sizeof (struct mch_error_code);
508 
509 	for (i = 0; i < sz; i++) {
510 		if (err_nf_fsb & nf_fsb_error_code[i].error_bit) {
511 			rt = nf_fsb_error_code[i].intel_error_list;
512 			emask_fsb |= nf_fsb_error_code[i].emask;
513 			nerr++;
514 		}
515 	}
516 
517 	if (emask_fsb)
518 		nb_fsb_mask_mc(fsb, emask_fsb);
519 	if (nerr > 1)
520 		rt = -1;
521 	return (rt);
522 }
523 
524 static void
525 log_fsb_err(uint64_t ferr, nb_regs_t *rp, int willpanic, int *interpose)
526 {
527 	uint8_t fsb;
528 	int t = 0;
529 
530 	fsb = GE_FERR_FSB(ferr);
531 	rp->flag = NB_REG_LOG_FSB;
532 
533 	rp->nb.fsb_regs.fsb = fsb;
534 	rp->nb.fsb_regs.ferr_fat_fsb = FERR_FAT_FSB_RD(fsb, interpose);
535 	rp->nb.fsb_regs.ferr_nf_fsb = FERR_NF_FSB_RD(fsb, &t);
536 	*interpose |= t;
537 	rp->nb.fsb_regs.nerr_fat_fsb = NERR_FAT_FSB_RD(fsb, &t);
538 	*interpose |= t;
539 	rp->nb.fsb_regs.nerr_nf_fsb = NERR_NF_FSB_RD(fsb, &t);
540 	*interpose |= t;
541 	rp->nb.fsb_regs.nrecfsb = NRECFSB_RD(fsb);
542 	rp->nb.fsb_regs.nrecfsb_addr = NRECADDR_RD(fsb);
543 	rp->nb.fsb_regs.recfsb = RECFSB_RD(fsb);
544 	if (!willpanic) {
545 		if (rp->nb.fsb_regs.ferr_fat_fsb || *interpose)
546 			FERR_FAT_FSB_WR(fsb, rp->nb.fsb_regs.ferr_fat_fsb);
547 		if (rp->nb.fsb_regs.ferr_nf_fsb || *interpose)
548 			FERR_NF_FSB_WR(fsb, rp->nb.fsb_regs.ferr_nf_fsb);
549 		/*
550 		 * if interpose write read-only registers to clear from pcii
551 		 * cache
552 		 */
553 		if (*interpose) {
554 			NRECFSB_WR(fsb);
555 			NRECADDR_WR(fsb);
556 			RECFSB_WR(fsb);
557 		}
558 	}
559 }
560 
561 static struct mch_error_code fat_pex_error_code[] = {
562 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_FAT_IO19 },
563 	{ 18, EMASK_UNCOR_PEX_IO18, PEX_FAT_IO18 },
564 	{ 10, EMASK_UNCOR_PEX_IO10, PEX_FAT_IO10 },
565 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_FAT_IO9 },
566 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_FAT_IO8 },
567 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_FAT_IO7 },
568 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_FAT_IO6 },
569 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_FAT_IO5 },
570 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_FAT_IO4 },
571 	{ 3, EMASK_UNCOR_PEX_IO3, PEX_FAT_IO3 },
572 	{ 2, EMASK_UNCOR_PEX_IO2, PEX_FAT_IO2 },
573 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_FAT_IO0 }
574 };
575 
576 static struct mch_error_code fat_unit_pex_5400_error_code[] = {
577 	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_FAT_IO32 },
578 	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_FAT_IO31 },
579 	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_FAT_IO30 },
580 	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_FAT_IO29 },
581 	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_FAT_IO27 },
582 	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_FAT_IO26 },
583 	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_FAT_IO25 },
584 	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_FAT_IO24 },
585 	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_FAT_IO23 },
586 	{ 22, EMASK_UNIT_PEX_IO22, PEX_5400_FAT_IO22 },
587 };
588 
589 static struct mch_error_code fat_pex_5400_error_code[] = {
590 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_5400_FAT_IO19 },
591 	{ 18, EMASK_UNCOR_PEX_IO18, PEX_5400_FAT_IO18 },
592 	{ 10, EMASK_UNCOR_PEX_IO10, PEX_5400_FAT_IO10 },
593 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_5400_FAT_IO9 },
594 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_5400_FAT_IO8 },
595 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_5400_FAT_IO7 },
596 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_5400_FAT_IO6 },
597 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_5400_FAT_IO5 },
598 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_5400_FAT_IO4 },
599 	{ 2, EMASK_UNCOR_PEX_IO2, PEX_5400_FAT_IO2 },
600 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_5400_FAT_IO0 }
601 };
602 
603 static struct mch_error_code fat_rp_5400_error_code[] = {
604 	{ 1, EMASK_RP_PEX_IO1, PEX_5400_FAT_IO1 }
605 };
606 
607 static struct mch_error_code fat_rp_error_code[] = {
608 	{ 1, EMASK_RP_PEX_IO1, PEX_FAT_IO1 }
609 };
610 
611 static struct mch_error_code uncor_pex_error_code[] = {
612 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_NF_IO19 },
613 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_NF_IO9 },
614 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_NF_IO8 },
615 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_NF_IO7 },
616 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_NF_IO6 },
617 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_NF_IO5 },
618 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_NF_IO4 },
619 	{ 3, EMASK_UNCOR_PEX_IO3, PEX_NF_IO3 },
620 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_NF_IO0 }
621 };
622 
623 static struct mch_error_code uncor_pex_5400_error_code[] = {
624 	{ 33, EMASK_UNIT_PEX_IO33, PEX_5400_NF_IO33 },
625 	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_NF_IO32 },
626 	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_NF_IO31 },
627 	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_NF_IO30 },
628 	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_NF_IO29 },
629 	{ 28, EMASK_UNIT_PEX_IO28, PEX_5400_NF_IO28 },
630 	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_NF_IO27 },
631 	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_NF_IO26 },
632 	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_NF_IO25 },
633 	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_NF_IO24 },
634 	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_NF_IO23 },
635 };
636 
637 static struct mch_error_code cor_pex_error_code[] = {
638 	{ 20, EMASK_COR_PEX_IO20, PEX_5400_NF_IO20 },
639 	{ 16, EMASK_COR_PEX_IO16, PEX_NF_IO16 },
640 	{ 15, EMASK_COR_PEX_IO15, PEX_NF_IO15 },
641 	{ 14, EMASK_COR_PEX_IO14, PEX_NF_IO14 },
642 	{ 13, EMASK_COR_PEX_IO13, PEX_NF_IO13 },
643 	{ 12, EMASK_COR_PEX_IO12, PEX_NF_IO12 },
644 	{ 10, 0, PEX_NF_IO10 },
645 	{ 2, 0, PEX_NF_IO2 }
646 };
647 
648 static struct mch_error_code rp_pex_5400_error_code[] = {
649 	{ 17, EMASK_RP_PEX_IO17, PEX_5400_NF_IO17 },
650 	{ 11, EMASK_RP_PEX_IO11, PEX_5400_NF_IO11 }
651 };
652 
653 static struct mch_error_code cor_pex_5400_error_code1[] = {
654 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_5400_NF_IO19 },
655 	{ 10, EMASK_UNCOR_PEX_IO10, PEX_5400_NF_IO10 },
656 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_5400_NF_IO9 },
657 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_5400_NF_IO8 },
658 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_5400_NF_IO7 },
659 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_5400_NF_IO6 },
660 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_5400_NF_IO5 },
661 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_5400_NF_IO4 },
662 	{ 2, EMASK_UNCOR_PEX_IO2, PEX_5400_NF_IO2 },
663 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_5400_NF_IO0 }
664 };
665 
666 static struct mch_error_code cor_pex_5400_error_code2[] = {
667 	{ 20, EMASK_COR_PEX_IO20, PEX_5400_NF_IO20 },
668 	{ 16, EMASK_COR_PEX_IO16, PEX_5400_NF_IO16 },
669 	{ 15, EMASK_COR_PEX_IO15, PEX_5400_NF_IO15 },
670 	{ 14, EMASK_COR_PEX_IO14, PEX_5400_NF_IO14 },
671 	{ 13, EMASK_COR_PEX_IO13, PEX_5400_NF_IO13 },
672 	{ 12, EMASK_COR_PEX_IO12, PEX_5400_NF_IO12 }
673 };
674 
675 static struct mch_error_code cor_pex_5400_error_code3[] = {
676 	{ 33, EMASK_UNIT_PEX_IO33, PEX_5400_NF_IO33 },
677 	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_NF_IO32 },
678 	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_NF_IO31 },
679 	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_NF_IO30 },
680 	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_NF_IO29 },
681 	{ 28, EMASK_UNIT_PEX_IO28, PEX_5400_NF_IO28 },
682 	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_NF_IO27 },
683 	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_NF_IO26 },
684 	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_NF_IO25 },
685 	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_NF_IO24 },
686 	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_NF_IO23 }
687 };
688 
689 static struct mch_error_code rp_pex_error_code[] = {
690 	{ 17, EMASK_RP_PEX_IO17, PEX_NF_IO17 },
691 	{ 11, EMASK_RP_PEX_IO11, PEX_NF_IO11 },
692 };
693 
694 static int
695 intel_pex_err(uint32_t pex_fat, uint32_t pex_nf_cor)
696 {
697 	int rt = -1;
698 	int nerr = 0;
699 	int i;
700 	int sz;
701 
702 	sz = sizeof (fat_pex_error_code) / sizeof (struct mch_error_code);
703 
704 	for (i = 0; i < sz; i++) {
705 		if (pex_fat & fat_pex_error_code[i].error_bit) {
706 			rt = fat_pex_error_code[i].intel_error_list;
707 			nerr++;
708 		}
709 	}
710 	sz = sizeof (fat_rp_error_code) / sizeof (struct mch_error_code);
711 
712 	for (i = 0; i < sz; i++) {
713 		if (pex_fat & fat_rp_error_code[i].error_bit) {
714 			rt = fat_rp_error_code[i].intel_error_list;
715 			nerr++;
716 		}
717 	}
718 	sz = sizeof (uncor_pex_error_code) / sizeof (struct mch_error_code);
719 
720 	for (i = 0; i < sz; i++) {
721 		if (pex_nf_cor & uncor_pex_error_code[i].error_bit) {
722 			rt = uncor_pex_error_code[i].intel_error_list;
723 			nerr++;
724 		}
725 	}
726 
727 	sz = sizeof (cor_pex_error_code) / sizeof (struct mch_error_code);
728 
729 	for (i = 0; i < sz; i++) {
730 		if (pex_nf_cor & cor_pex_error_code[i].error_bit) {
731 			rt = cor_pex_error_code[i].intel_error_list;
732 			nerr++;
733 		}
734 	}
735 	sz = sizeof (rp_pex_error_code) / sizeof (struct mch_error_code);
736 
737 	for (i = 0; i < sz; i++) {
738 		if (pex_nf_cor & rp_pex_error_code[i].error_bit) {
739 			rt = rp_pex_error_code[i].intel_error_list;
740 			nerr++;
741 		}
742 	}
743 
744 	if (nerr > 1)
745 		rt = -1;
746 	return (rt);
747 }
748 
749 static struct mch_error_code fat_thr_error_code[] = {
750 	{ 2, EMASK_THR_F2, ERR_FAT_THR_F2 },
751 	{ 1, EMASK_THR_F1, ERR_FAT_THR_F1 }
752 };
753 
754 static struct mch_error_code nf_thr_error_code[] = {
755 	{ 5, EMASK_THR_F5, ERR_NF_THR_F5 },
756 	{ 4, EMASK_THR_F4, ERR_NF_THR_F4 },
757 	{ 3, EMASK_THR_F3, ERR_NF_THR_F3 }
758 };
759 
760 static int
761 intel_thr_err(uint8_t err_fat_thr, uint8_t err_nf_thr)
762 {
763 	int rt = -1;
764 	int nerr = 0;
765 	uint16_t emask_thr = 0;
766 	int i;
767 	int sz;
768 
769 	sz = sizeof (fat_thr_error_code) / sizeof (struct mch_error_code);
770 
771 	for (i = 0; i < sz; i++) {
772 		if (err_fat_thr & fat_thr_error_code[i].error_bit) {
773 			rt = fat_thr_error_code[i].intel_error_list;
774 			emask_thr |= fat_thr_error_code[i].emask;
775 			nerr++;
776 		}
777 	}
778 
779 	sz = sizeof (nf_thr_error_code) / sizeof (struct mch_error_code);
780 
781 	for (i = 0; i < sz; i++) {
782 		if (err_nf_thr & nf_thr_error_code[i].error_bit) {
783 			rt = nf_thr_error_code[i].intel_error_list;
784 			emask_thr |= nf_thr_error_code[i].emask;
785 			nerr++;
786 		}
787 	}
788 
789 	if (emask_thr)
790 		nb_thr_mask_mc(emask_thr);
791 	if (nerr > 1)
792 		rt = -1;
793 	return (rt);
794 }
795 
796 static int
797 intel_pex_5400_err(uint32_t pex_fat, uint32_t pex_nf_cor)
798 {
799 	int rt = -1;
800 	int nerr = 0;
801 	int i;
802 	int sz;
803 
804 	sz = sizeof (fat_pex_5400_error_code) / sizeof (struct mch_error_code);
805 
806 	for (i = 0; i < sz; i++) {
807 		if (pex_fat & fat_pex_5400_error_code[i].error_bit) {
808 			rt = fat_pex_5400_error_code[i].intel_error_list;
809 			nerr++;
810 		}
811 	}
812 	sz = sizeof (fat_rp_5400_error_code) / sizeof (struct mch_error_code);
813 
814 	for (i = 0; i < sz; i++) {
815 		if (pex_fat & fat_rp_5400_error_code[i].error_bit) {
816 			rt = fat_rp_5400_error_code[i].intel_error_list;
817 			nerr++;
818 		}
819 	}
820 	sz = sizeof (fat_unit_pex_5400_error_code) /
821 	    sizeof (struct mch_error_code);
822 
823 	for (i = 0; i < sz; i++) {
824 		if (pex_fat &
825 		    fat_unit_pex_5400_error_code[i].error_bit) {
826 			rt = fat_unit_pex_5400_error_code[i].intel_error_list;
827 			nerr++;
828 		}
829 	}
830 	sz = sizeof (uncor_pex_5400_error_code) /
831 	    sizeof (struct mch_error_code);
832 
833 	for (i = 0; i < sz; i++) {
834 		if (pex_fat & uncor_pex_5400_error_code[i].error_bit) {
835 			rt = uncor_pex_5400_error_code[i].intel_error_list;
836 			nerr++;
837 		}
838 	}
839 
840 	sz = sizeof (rp_pex_5400_error_code) / sizeof (struct mch_error_code);
841 
842 	for (i = 0; i < sz; i++) {
843 		if (pex_nf_cor & rp_pex_5400_error_code[i].error_bit) {
844 			rt = rp_pex_5400_error_code[i].intel_error_list;
845 			nerr++;
846 		}
847 	}
848 
849 	sz = sizeof (cor_pex_5400_error_code1) / sizeof (struct mch_error_code);
850 
851 	for (i = 0; i < sz; i++) {
852 		if (pex_nf_cor & cor_pex_5400_error_code1[i].error_bit) {
853 			rt = cor_pex_5400_error_code1[i].intel_error_list;
854 			nerr++;
855 		}
856 	}
857 
858 	sz = sizeof (cor_pex_5400_error_code2) / sizeof (struct mch_error_code);
859 
860 	for (i = 0; i < sz; i++) {
861 		if (pex_nf_cor & cor_pex_5400_error_code2[i].error_bit) {
862 			rt = cor_pex_5400_error_code2[i].intel_error_list;
863 			nerr++;
864 		}
865 	}
866 
867 	sz = sizeof (cor_pex_5400_error_code3) / sizeof (struct mch_error_code);
868 
869 	for (i = 0; i < sz; i++) {
870 		if (pex_nf_cor & cor_pex_5400_error_code3[i].error_bit) {
871 			rt = cor_pex_5400_error_code3[i].intel_error_list;
872 			nerr++;
873 		}
874 	}
875 
876 	if (nerr > 1)
877 		rt = -1;
878 	return (rt);
879 }
880 
881 static void
882 log_pex_err(uint64_t ferr, nb_regs_t *rp, int willpanic, int *interpose)
883 {
884 	uint8_t pex = (uint8_t)-1;
885 	int t = 0;
886 
887 	rp->flag = NB_REG_LOG_PEX;
888 	pex = GE_ERR_PEX(ferr);
889 
890 	rp->nb.pex_regs.pex = pex;
891 	rp->nb.pex_regs.pex_fat_ferr =  PEX_FAT_FERR_RD(pex, interpose);
892 	rp->nb.pex_regs.pex_fat_nerr = PEX_FAT_NERR_RD(pex, &t);
893 	*interpose |= t;
894 	rp->nb.pex_regs.pex_nf_corr_ferr = PEX_NF_FERR_RD(pex, &t);
895 	*interpose |= t;
896 	rp->nb.pex_regs.pex_nf_corr_nerr = PEX_NF_NERR_RD(pex, &t);
897 	*interpose |= t;
898 	rp->nb.pex_regs.uncerrsev = UNCERRSEV_RD(pex);
899 	rp->nb.pex_regs.rperrsts = RPERRSTS_RD(pex);
900 	rp->nb.pex_regs.rperrsid = RPERRSID_RD(pex);
901 	if (pex != (uint8_t)-1)
902 		rp->nb.pex_regs.uncerrsts = UNCERRSTS_RD(pex);
903 	else
904 		rp->nb.pex_regs.uncerrsts = 0;
905 	rp->nb.pex_regs.aerrcapctrl = AERRCAPCTRL_RD(pex);
906 	rp->nb.pex_regs.corerrsts = CORERRSTS_RD(pex);
907 	rp->nb.pex_regs.pexdevsts = PEXDEVSTS_RD(pex);
908 
909 	if (!willpanic) {
910 		if (rp->nb.pex_regs.pex_fat_ferr || *interpose)
911 			PEX_FAT_FERR_WR(pex, rp->nb.pex_regs.pex_fat_ferr);
912 		if (rp->nb.pex_regs.pex_fat_nerr)
913 			PEX_FAT_NERR_WR(pex, rp->nb.pex_regs.pex_fat_nerr);
914 		if (rp->nb.pex_regs.pex_nf_corr_ferr || *interpose)
915 			PEX_NF_FERR_WR(pex, rp->nb.pex_regs.pex_nf_corr_ferr);
916 		if (rp->nb.pex_regs.pex_nf_corr_nerr)
917 			PEX_NF_NERR_WR(pex, rp->nb.pex_regs.pex_nf_corr_nerr);
918 		if (*interpose)
919 			UNCERRSTS_WR(pex, rp->nb.pex_regs.uncerrsts);
920 		if (*interpose)
921 			RPERRSTS_WR(pex, rp->nb.pex_regs.rperrsts);
922 		if (*interpose)
923 			PEXDEVSTS_WR(pex, 0);
924 	}
925 }
926 
927 static void
928 log_fat_fbd_err(nb_regs_t *rp, int willpanic, int *interpose)
929 {
930 	int channel, branch;
931 	int t = 0;
932 
933 	rp->flag = NB_REG_LOG_FAT_FBD;
934 	rp->nb.fat_fbd_regs.ferr_fat_fbd = FERR_FAT_FBD_RD(interpose);
935 	channel = (rp->nb.fat_fbd_regs.ferr_fat_fbd >> 28) & 3;
936 	branch = channel >> 1;
937 	rp->nb.fat_fbd_regs.nerr_fat_fbd = NERR_FAT_FBD_RD(&t);
938 	*interpose |= t;
939 	rp->nb.fat_fbd_regs.nrecmema = NRECMEMA_RD(branch);
940 	rp->nb.fat_fbd_regs.nrecmemb = NRECMEMB_RD(branch);
941 	rp->nb.fat_fbd_regs.nrecfglog = NRECFGLOG_RD(branch);
942 	rp->nb.fat_fbd_regs.nrecfbda = NRECFBDA_RD(branch);
943 	rp->nb.fat_fbd_regs.nrecfbdb = NRECFBDB_RD(branch);
944 	rp->nb.fat_fbd_regs.nrecfbdc = NRECFBDC_RD(branch);
945 	rp->nb.fat_fbd_regs.nrecfbdd = NRECFBDD_RD(branch);
946 	rp->nb.fat_fbd_regs.nrecfbde = NRECFBDE_RD(branch);
947 	rp->nb.fat_fbd_regs.nrecfbdf = NRECFBDF_RD(branch);
948 	rp->nb.fat_fbd_regs.spcps = SPCPS_RD(branch);
949 	rp->nb.fat_fbd_regs.spcpc = SPCPC_RD(branch);
950 	rp->nb.fat_fbd_regs.uerrcnt = UERRCNT_RD(branch);
951 	rp->nb.fat_fbd_regs.uerrcnt_last = uerrcnt[branch];
952 	uerrcnt[branch] = rp->nb.fat_fbd_regs.uerrcnt;
953 	rp->nb.fat_fbd_regs.badrama = BADRAMA_RD(branch);
954 	rp->nb.fat_fbd_regs.badramb = BADRAMB_RD(branch);
955 	rp->nb.fat_fbd_regs.badcnt = BADCNT_RD(branch);
956 	if (!willpanic) {
957 		if (rp->nb.fat_fbd_regs.ferr_fat_fbd || *interpose)
958 			FERR_FAT_FBD_WR(rp->nb.fat_fbd_regs.ferr_fat_fbd);
959 		if (rp->nb.fat_fbd_regs.nerr_fat_fbd)
960 			NERR_FAT_FBD_WR(rp->nb.fat_fbd_regs.nerr_fat_fbd);
961 		/*
962 		 * if interpose write read-only registers to clear from pcii
963 		 * cache
964 		 */
965 		if (*interpose) {
966 			NRECMEMA_WR(branch);
967 			NRECMEMB_WR(branch);
968 			NRECFGLOG_WR(branch);
969 			NRECFBDA_WR(branch);
970 			NRECFBDB_WR(branch);
971 			NRECFBDC_WR(branch);
972 			NRECFBDD_WR(branch);
973 			NRECFBDE_WR(branch);
974 			NRECFBDF_WR(branch);
975 		}
976 	}
977 }
978 
979 static void
980 log_nf_fbd_err(nb_regs_t *rp, int willpanic, int *interpose)
981 {
982 	int channel, branch;
983 	int t = 0;
984 
985 	rp->flag = NB_REG_LOG_NF_FBD;
986 	rp->nb.nf_fbd_regs.ferr_nf_fbd = FERR_NF_FBD_RD(interpose);
987 	channel = (rp->nb.nf_fbd_regs.ferr_nf_fbd >> 28) & 3;
988 	branch = channel >> 1;
989 	rp->nb.nf_fbd_regs.nerr_nf_fbd = NERR_NF_FBD_RD(&t);
990 	*interpose |= t;
991 	rp->nb.nf_fbd_regs.redmemb = REDMEMB_RD();
992 	rp->nb.nf_fbd_regs.recmema = RECMEMA_RD(branch);
993 	rp->nb.nf_fbd_regs.recmemb = RECMEMB_RD(branch);
994 	rp->nb.nf_fbd_regs.recfglog = RECFGLOG_RD(branch);
995 	rp->nb.nf_fbd_regs.recfbda = RECFBDA_RD(branch);
996 	rp->nb.nf_fbd_regs.recfbdb = RECFBDB_RD(branch);
997 	rp->nb.nf_fbd_regs.recfbdc = RECFBDC_RD(branch);
998 	rp->nb.nf_fbd_regs.recfbdd = RECFBDD_RD(branch);
999 	rp->nb.nf_fbd_regs.recfbde = RECFBDE_RD(branch);
1000 	rp->nb.nf_fbd_regs.recfbdf = RECFBDF_RD(branch);
1001 	rp->nb.nf_fbd_regs.spcps = SPCPS_RD(branch);
1002 	rp->nb.nf_fbd_regs.spcpc = SPCPC_RD(branch);
1003 	if (nb_chipset == INTEL_NB_7300 || nb_chipset == INTEL_NB_5400) {
1004 		rp->nb.nf_fbd_regs.cerrcnta = CERRCNTA_RD(branch, channel);
1005 		rp->nb.nf_fbd_regs.cerrcntb = CERRCNTB_RD(branch, channel);
1006 		rp->nb.nf_fbd_regs.cerrcntc = CERRCNTC_RD(branch, channel);
1007 		rp->nb.nf_fbd_regs.cerrcntd = CERRCNTD_RD(branch, channel);
1008 	} else {
1009 		rp->nb.nf_fbd_regs.cerrcnta = CERRCNT_RD(branch);
1010 		rp->nb.nf_fbd_regs.cerrcntb = 0;
1011 		rp->nb.nf_fbd_regs.cerrcntc = 0;
1012 		rp->nb.nf_fbd_regs.cerrcntd = 0;
1013 	}
1014 	rp->nb.nf_fbd_regs.cerrcnta_last = cerrcnta[branch][channel & 1];
1015 	rp->nb.nf_fbd_regs.cerrcntb_last = cerrcntb[branch][channel & 1];
1016 	rp->nb.nf_fbd_regs.cerrcntc_last = cerrcntc[branch][channel & 1];
1017 	rp->nb.nf_fbd_regs.cerrcntd_last = cerrcntd[branch][channel & 1];
1018 	cerrcnta[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcnta;
1019 	cerrcntb[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntb;
1020 	cerrcntc[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntc;
1021 	cerrcntd[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntd;
1022 	rp->nb.nf_fbd_regs.badrama = BADRAMA_RD(branch);
1023 	rp->nb.nf_fbd_regs.badramb = BADRAMB_RD(branch);
1024 	rp->nb.nf_fbd_regs.badcnt = BADCNT_RD(branch);
1025 	if (!willpanic) {
1026 		if (rp->nb.nf_fbd_regs.ferr_nf_fbd || *interpose)
1027 			FERR_NF_FBD_WR(rp->nb.nf_fbd_regs.ferr_nf_fbd);
1028 		if (rp->nb.nf_fbd_regs.nerr_nf_fbd)
1029 			NERR_NF_FBD_WR(rp->nb.nf_fbd_regs.nerr_nf_fbd);
1030 		/*
1031 		 * if interpose write read-only registers to clear from pcii
1032 		 * cache
1033 		 */
1034 		if (*interpose) {
1035 			RECMEMA_WR(branch);
1036 			RECMEMB_WR(branch);
1037 			RECFGLOG_WR(branch);
1038 			RECFBDA_WR(branch);
1039 			RECFBDB_WR(branch);
1040 			RECFBDC_WR(branch);
1041 			RECFBDD_WR(branch);
1042 			RECFBDE_WR(branch);
1043 			RECFBDF_WR(branch);
1044 			SPCPS_WR(branch);
1045 		}
1046 	}
1047 }
1048 
1049 static void
1050 log_ferr(uint64_t ferr, uint32_t *nerrp, nb_logout_t *log, int willpanic)
1051 {
1052 	nb_regs_t *rp = &log->nb_regs;
1053 	uint32_t nerr = *nerrp;
1054 	int interpose = 0;
1055 	int spurious = 0;
1056 
1057 	log->acl_timestamp = gethrtime_waitfree();
1058 	if ((ferr & (GE_PCIEX_FATAL | GE_PCIEX_NF)) != 0) {
1059 		log_pex_err(ferr, rp, willpanic, &interpose);
1060 		*nerrp = nerr & ~(GE_PCIEX_FATAL | GE_PCIEX_NF);
1061 	} else if ((ferr & GE_FBD_FATAL) != 0) {
1062 		log_fat_fbd_err(rp, willpanic, &interpose);
1063 		*nerrp = nerr & ~GE_NERR_FBD_FATAL;
1064 	} else if ((ferr & GE_FBD_NF) != 0) {
1065 		log_nf_fbd_err(rp, willpanic, &interpose);
1066 		*nerrp = nerr & ~GE_NERR_FBD_NF;
1067 	} else if ((ferr & (GE_FERR_FSB_FATAL | GE_FERR_FSB_NF)) != 0) {
1068 		log_fsb_err(ferr, rp, willpanic, &interpose);
1069 		*nerrp = nerr & ~(GE_NERR_FSB_FATAL | GE_NERR_FSB_NF);
1070 	} else if ((ferr & (GE_DMA_FATAL | GE_DMA_NF)) != 0) {
1071 		log_dma_err(rp, &interpose);
1072 		*nerrp = nerr & ~(GE_DMA_FATAL | GE_DMA_NF);
1073 	} else if ((ferr & (GE_INT_FATAL | GE_INT_NF)) != 0) {
1074 		spurious = log_int_err(rp, willpanic, &interpose);
1075 		*nerrp = nerr & ~(GE_INT_FATAL | GE_INT_NF);
1076 	} else if (nb_chipset == INTEL_NB_5400 &&
1077 	    (ferr & (GE_FERR_THERMAL_FATAL | GE_FERR_THERMAL_NF)) != 0) {
1078 		log_thermal_err(rp, willpanic, &interpose);
1079 		*nerrp = nerr & ~(GE_FERR_THERMAL_FATAL | GE_FERR_THERMAL_NF);
1080 	}
1081 	if (interpose)
1082 		log->type = "inject";
1083 	else
1084 		log->type = "error";
1085 	if (!spurious) {
1086 		errorq_dispatch(nb_queue, log, sizeof (nb_logout_t),
1087 		    willpanic ? ERRORQ_SYNC : ERRORQ_ASYNC);
1088 	}
1089 }
1090 
1091 static void
1092 log_nerr(uint32_t *errp, nb_logout_t *log, int willpanic)
1093 {
1094 	uint32_t err;
1095 	nb_regs_t *rp = &log->nb_regs;
1096 	int interpose = 0;
1097 	int spurious = 0;
1098 
1099 	err = *errp;
1100 	log->acl_timestamp = gethrtime_waitfree();
1101 	if ((err & (GE_PCIEX_FATAL | GE_PCIEX_NF)) != 0) {
1102 		log_pex_err(err, rp, willpanic, &interpose);
1103 		*errp = err & ~(GE_PCIEX_FATAL | GE_PCIEX_NF);
1104 	} else if ((err & GE_NERR_FBD_FATAL) != 0) {
1105 		log_fat_fbd_err(rp, willpanic, &interpose);
1106 		*errp = err & ~GE_NERR_FBD_FATAL;
1107 	} else if ((err & GE_NERR_FBD_NF) != 0) {
1108 		log_nf_fbd_err(rp, willpanic, &interpose);
1109 		*errp = err & ~GE_NERR_FBD_NF;
1110 	} else if ((err & (GE_NERR_FSB_FATAL | GE_NERR_FSB_NF)) != 0) {
1111 		log_fsb_err(GE_NERR_TO_FERR_FSB(err), rp, willpanic,
1112 		    &interpose);
1113 		*errp = err & ~(GE_NERR_FSB_FATAL | GE_NERR_FSB_NF);
1114 	} else if ((err & (GE_DMA_FATAL | GE_DMA_NF)) != 0) {
1115 		log_dma_err(rp, &interpose);
1116 		*errp = err & ~(GE_DMA_FATAL | GE_DMA_NF);
1117 	} else if ((err & (GE_INT_FATAL | GE_INT_NF)) != 0) {
1118 		spurious = log_int_err(rp, willpanic, &interpose);
1119 		*errp = err & ~(GE_INT_FATAL | GE_INT_NF);
1120 	}
1121 	if (interpose)
1122 		log->type = "inject";
1123 	else
1124 		log->type = "error";
1125 	if (!spurious) {
1126 		errorq_dispatch(nb_queue, log, sizeof (nb_logout_t),
1127 		    willpanic ? ERRORQ_SYNC : ERRORQ_ASYNC);
1128 	}
1129 }
1130 
1131 /*ARGSUSED*/
1132 void
1133 nb_error_trap(cmi_hdl_t hdl, boolean_t ismc, boolean_t willpanic)
1134 {
1135 	uint64_t ferr;
1136 	uint32_t nerr, err;
1137 	int nmc = 0;
1138 	int i;
1139 
1140 	if (mutex_tryenter(&nb_mutex) == 0)
1141 		return;
1142 
1143 	nerr = NERR_GLOBAL_RD();
1144 	err = nerr;
1145 	for (i = 0; i < NB_MAX_ERRORS; i++) {
1146 		ferr = FERR_GLOBAL_RD();
1147 		nb_log.nb_regs.chipset = nb_chipset;
1148 		nb_log.nb_regs.ferr = ferr;
1149 		nb_log.nb_regs.nerr = nerr;
1150 		if (ferr) {
1151 			log_ferr(ferr, &err, &nb_log, willpanic);
1152 			FERR_GLOBAL_WR(ferr);
1153 			nmc++;
1154 		} else if (err) {
1155 			log_nerr(&err, &nb_log, willpanic);
1156 			nmc++;
1157 		}
1158 	}
1159 	if (nerr) {
1160 		NERR_GLOBAL_WR(nerr);
1161 	}
1162 	if (nmc == 0 && nb_mask_mc_set)
1163 		nb_mask_mc_reset();
1164 	mutex_exit(&nb_mutex);
1165 }
1166 
1167 static void
1168 nb_fsb_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1169     nb_scatchpad_t *data)
1170 {
1171 	int intel_error_list;
1172 	char buf[32];
1173 
1174 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FSB,
1175 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.fsb, NULL);
1176 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_FSB,
1177 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.ferr_fat_fsb, NULL);
1178 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_FSB,
1179 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.nerr_fat_fsb, NULL);
1180 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_FSB,
1181 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.ferr_nf_fsb, NULL);
1182 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_FSB,
1183 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.nerr_nf_fsb, NULL);
1184 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFSB,
1185 	    DATA_TYPE_UINT32, nb_regs->nb.fsb_regs.nrecfsb, NULL);
1186 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFSB_ADDR,
1187 	    DATA_TYPE_UINT64, nb_regs->nb.fsb_regs.nrecfsb_addr, NULL);
1188 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFSB,
1189 	    DATA_TYPE_UINT32, nb_regs->nb.fsb_regs.recfsb, NULL);
1190 	intel_error_list = data->intel_error_list;
1191 	if (intel_error_list >= 0)
1192 		(void) snprintf(buf, sizeof (buf), "F%d", intel_error_list);
1193 	else
1194 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1195 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1196 	    DATA_TYPE_STRING, buf, NULL);
1197 }
1198 
1199 static void
1200 nb_pex_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1201     nb_scatchpad_t *data)
1202 {
1203 	int intel_error_list;
1204 	char buf[32];
1205 
1206 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX,
1207 	    DATA_TYPE_UINT8, nb_regs->nb.pex_regs.pex, NULL);
1208 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_FAT_FERR,
1209 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_fat_ferr, NULL);
1210 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_FAT_NERR,
1211 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_fat_nerr, NULL);
1212 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_NF_CORR_FERR,
1213 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_nf_corr_ferr, NULL);
1214 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_NF_CORR_NERR,
1215 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_nf_corr_nerr, NULL);
1216 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UNCERRSEV,
1217 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.uncerrsev, NULL);
1218 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RPERRSTS,
1219 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.rperrsts, NULL);
1220 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RPERRSID,
1221 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.rperrsid, NULL);
1222 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UNCERRSTS,
1223 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.uncerrsts, NULL);
1224 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_AERRCAPCTRL,
1225 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.aerrcapctrl, NULL);
1226 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CORERRSTS,
1227 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.corerrsts, NULL);
1228 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEXDEVSTS,
1229 	    DATA_TYPE_UINT16, nb_regs->nb.pex_regs.pexdevsts, NULL);
1230 	intel_error_list = data->intel_error_list;
1231 	if (intel_error_list >= 0)
1232 		(void) snprintf(buf, sizeof (buf), "IO%d", intel_error_list);
1233 	else
1234 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1235 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1236 	    DATA_TYPE_STRING, buf, NULL);
1237 }
1238 
1239 static void
1240 nb_int_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1241     nb_scatchpad_t *data)
1242 {
1243 	int intel_error_list;
1244 	char buf[32];
1245 
1246 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_INT,
1247 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.ferr_fat_int, NULL);
1248 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_INT,
1249 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.ferr_nf_int, NULL);
1250 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_INT,
1251 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.nerr_fat_int, NULL);
1252 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_INT,
1253 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.nerr_nf_int, NULL);
1254 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECINT,
1255 	    DATA_TYPE_UINT32, nb_regs->nb.int_regs.nrecint, NULL);
1256 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECINT,
1257 	    DATA_TYPE_UINT32, nb_regs->nb.int_regs.recint, NULL);
1258 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECSF,
1259 	    DATA_TYPE_UINT64, nb_regs->nb.int_regs.nrecsf, NULL);
1260 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECSF,
1261 	    DATA_TYPE_UINT64, nb_regs->nb.int_regs.recsf, NULL);
1262 	intel_error_list = data->intel_error_list;
1263 	if (intel_error_list >= 0)
1264 		(void) snprintf(buf, sizeof (buf), "B%d", intel_error_list);
1265 	else
1266 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1267 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1268 	    DATA_TYPE_STRING, buf, NULL);
1269 }
1270 
1271 static void
1272 nb_fat_fbd_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1273     nb_scatchpad_t *data)
1274 {
1275 	nb_mem_scatchpad_t *sp;
1276 	char buf[32];
1277 
1278 	sp = &((nb_scatchpad_t *)data)->ms;
1279 
1280 	if (sp->ras != -1) {
1281 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK,
1282 		    DATA_TYPE_INT32, sp->bank, NULL);
1283 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS,
1284 		    DATA_TYPE_INT32, sp->cas, NULL);
1285 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS,
1286 		    DATA_TYPE_INT32, sp->ras, NULL);
1287 		if (sp->offset != -1LL) {
1288 			fm_payload_set(payload, FM_FMRI_MEM_OFFSET,
1289 			    DATA_TYPE_UINT64, sp->offset, NULL);
1290 		}
1291 		if (sp->pa != -1LL) {
1292 			fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR,
1293 			    DATA_TYPE_UINT64, sp->pa, NULL);
1294 		}
1295 	}
1296 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_FBD,
1297 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.ferr_fat_fbd, NULL);
1298 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_FBD,
1299 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nerr_fat_fbd, NULL);
1300 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMA,
1301 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecmema, NULL);
1302 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMB,
1303 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecmemb, NULL);
1304 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFGLOG,
1305 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfglog, NULL);
1306 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDA,
1307 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbda, NULL);
1308 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDB,
1309 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdb, NULL);
1310 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDC,
1311 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdc, NULL);
1312 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDD,
1313 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdd, NULL);
1314 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDE,
1315 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbde, NULL);
1316 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDF,
1317 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdf, NULL);
1318 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS,
1319 	    DATA_TYPE_UINT8, nb_regs->nb.fat_fbd_regs.spcps, NULL);
1320 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC,
1321 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.spcpc, NULL);
1322 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UERRCNT,
1323 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.uerrcnt, NULL);
1324 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UERRCNT_LAST,
1325 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.uerrcnt_last, NULL);
1326 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMA,
1327 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.badrama, NULL);
1328 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMB,
1329 	    DATA_TYPE_UINT16, nb_regs->nb.fat_fbd_regs.badramb, NULL);
1330 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT,
1331 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.badcnt, NULL);
1332 
1333 	if (sp->intel_error_list >= 0)
1334 		(void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list);
1335 	else
1336 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1337 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1338 	    DATA_TYPE_STRING, buf, NULL);
1339 }
1340 
1341 static void
1342 nb_nf_fbd_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1343     nb_scatchpad_t *data)
1344 {
1345 	nb_mem_scatchpad_t *sp;
1346 	char buf[32];
1347 
1348 	sp = &((nb_scatchpad_t *)data)->ms;
1349 
1350 	if (sp->dimm == -1 && sp->rank != -1) {
1351 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RANK,
1352 		    DATA_TYPE_INT32, sp->rank, NULL);
1353 	}
1354 	if (sp->ras != -1) {
1355 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK,
1356 		    DATA_TYPE_INT32, sp->bank, NULL);
1357 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS,
1358 		    DATA_TYPE_INT32, sp->cas, NULL);
1359 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS,
1360 		    DATA_TYPE_INT32, sp->ras, NULL);
1361 		if (sp->offset != -1LL) {
1362 			fm_payload_set(payload, FM_FMRI_MEM_OFFSET,
1363 			    DATA_TYPE_UINT64, sp->offset, NULL);
1364 		}
1365 		if (sp->pa != -1LL) {
1366 			fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR,
1367 			    DATA_TYPE_UINT64, sp->pa, NULL);
1368 		}
1369 	}
1370 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_FBD,
1371 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.ferr_nf_fbd, NULL);
1372 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_FBD,
1373 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.nerr_nf_fbd, NULL);
1374 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMA,
1375 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recmema, NULL);
1376 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMB,
1377 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recmemb, NULL);
1378 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFGLOG,
1379 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfglog, NULL);
1380 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDA,
1381 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbda, NULL);
1382 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDB,
1383 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdb, NULL);
1384 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDC,
1385 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdc, NULL);
1386 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDD,
1387 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdd, NULL);
1388 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDE,
1389 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbde, NULL);
1390 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDF,
1391 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdf, NULL);
1392 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS,
1393 	    DATA_TYPE_UINT8, nb_regs->nb.nf_fbd_regs.spcps, NULL);
1394 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC,
1395 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.spcpc, NULL);
1396 	if (nb_chipset == INTEL_NB_7300 || nb_chipset == INTEL_NB_5400) {
1397 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTA,
1398 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta, NULL);
1399 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTB,
1400 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntb, NULL);
1401 		if (nb_chipset == INTEL_NB_7300) {
1402 			fm_payload_set(payload,
1403 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTC,
1404 			    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntc,
1405 			    NULL);
1406 			fm_payload_set(payload,
1407 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTD,
1408 			    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntd,
1409 			    NULL);
1410 		}
1411 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTA_LAST,
1412 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta_last,
1413 		    NULL);
1414 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTB_LAST,
1415 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntb_last,
1416 		    NULL);
1417 		if (nb_chipset == INTEL_NB_7300) {
1418 			fm_payload_set(payload,
1419 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTC_LAST,
1420 			    DATA_TYPE_UINT32,
1421 			    nb_regs->nb.nf_fbd_regs.cerrcntc_last, NULL);
1422 			fm_payload_set(payload,
1423 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTD_LAST,
1424 			    DATA_TYPE_UINT32,
1425 			    nb_regs->nb.nf_fbd_regs.cerrcntd_last, NULL);
1426 		}
1427 	} else {
1428 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT,
1429 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta, NULL);
1430 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_LAST,
1431 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta_last,
1432 		    NULL);
1433 	}
1434 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMA,
1435 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.badrama, NULL);
1436 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMB,
1437 	    DATA_TYPE_UINT16, nb_regs->nb.nf_fbd_regs.badramb, NULL);
1438 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT,
1439 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.badcnt, NULL);
1440 
1441 	if (sp->intel_error_list >= 0)
1442 		(void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list);
1443 	else
1444 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1445 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1446 	    DATA_TYPE_STRING, buf, NULL);
1447 }
1448 
1449 static void
1450 nb_dma_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload)
1451 {
1452 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PCISTS,
1453 	    DATA_TYPE_UINT16, nb_regs->nb.dma_regs.pcists, NULL);
1454 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEXDEVSTS,
1455 	    DATA_TYPE_UINT16, nb_regs->nb.dma_regs.pexdevsts, NULL);
1456 }
1457 
1458 static void
1459 nb_thr_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1460     nb_scatchpad_t *data)
1461 {
1462 	char buf[32];
1463 
1464 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_THR,
1465 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ferr_fat_thr, NULL);
1466 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_THR,
1467 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.nerr_fat_thr, NULL);
1468 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_THR,
1469 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ferr_nf_thr, NULL);
1470 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_THR,
1471 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.nerr_nf_thr, NULL);
1472 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CTSTS,
1473 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ctsts, NULL);
1474 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_THRTSTS,
1475 	    DATA_TYPE_UINT16, nb_regs->nb.thr_regs.thrtsts, NULL);
1476 	if (data->intel_error_list >= 0) {
1477 		(void) snprintf(buf, sizeof (buf), "TH%d",
1478 		    data->intel_error_list);
1479 	} else {
1480 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1481 	}
1482 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1483 	    DATA_TYPE_STRING, buf, NULL);
1484 }
1485 
1486 static void
1487 nb_ereport_add_logout(nvlist_t *payload, const nb_logout_t *acl,
1488     nb_scatchpad_t *data)
1489 {
1490 	const nb_regs_t *nb_regs = &acl->nb_regs;
1491 
1492 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_MC_TYPE,
1493 	    DATA_TYPE_STRING, acl->type, NULL);
1494 	switch (nb_regs->flag) {
1495 	case NB_REG_LOG_FSB:
1496 		nb_fsb_err_payload(nb_regs, payload, data);
1497 		break;
1498 	case NB_REG_LOG_PEX:
1499 		nb_pex_err_payload(nb_regs, payload, data);
1500 		break;
1501 	case NB_REG_LOG_INT:
1502 		nb_int_err_payload(nb_regs, payload, data);
1503 		break;
1504 	case NB_REG_LOG_FAT_FBD:
1505 		nb_fat_fbd_err_payload(nb_regs, payload, data);
1506 		break;
1507 	case NB_REG_LOG_NF_FBD:
1508 		nb_nf_fbd_err_payload(nb_regs, payload, data);
1509 		break;
1510 	case NB_REG_LOG_DMA:
1511 		nb_dma_err_payload(nb_regs, payload);
1512 		break;
1513 	case NB_REG_LOG_THR:
1514 		nb_thr_err_payload(nb_regs, payload, data);
1515 		break;
1516 	default:
1517 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_GLOBAL,
1518 		    DATA_TYPE_UINT64, nb_regs->ferr, NULL);
1519 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_GLOBAL,
1520 		    DATA_TYPE_UINT32, nb_regs->nerr, NULL);
1521 		break;
1522 	}
1523 }
1524 
1525 void
1526 nb_fsb_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1527     nb_scatchpad_t *data)
1528 {
1529 	int chip;
1530 
1531 	if (nb_chipset == INTEL_NB_7300)
1532 		chip = nb_regs->nb.fsb_regs.fsb * 2;
1533 	else
1534 		chip = nb_regs->nb.fsb_regs.fsb;
1535 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1536 	    "motherboard", 0, "chip", chip);
1537 
1538 	if (nb_regs->nb.fsb_regs.ferr_fat_fsb == 0 &&
1539 	    nb_regs->nb.fsb_regs.ferr_nf_fsb == 0) {
1540 		data->intel_error_list = intel_fsb_err(nb_regs->nb.fsb_regs.fsb,
1541 		    nb_regs->nb.fsb_regs.nerr_fat_fsb,
1542 		    nb_regs->nb.fsb_regs.nerr_nf_fsb);
1543 	} else {
1544 		data->intel_error_list = intel_fsb_err(nb_regs->nb.fsb_regs.fsb,
1545 		    nb_regs->nb.fsb_regs.ferr_fat_fsb,
1546 		    nb_regs->nb.fsb_regs.ferr_nf_fsb);
1547 	}
1548 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1549 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "fsb");
1550 }
1551 
1552 void
1553 nb_pex_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1554     nb_scatchpad_t *data)
1555 {
1556 	int hostbridge;
1557 
1558 	if (nb_regs->nb.pex_regs.pex == 0) {
1559 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1560 		    "motherboard", 0);
1561 	} else {
1562 		hostbridge = nb_regs->nb.pex_regs.pex - 1;
1563 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1564 		    "motherboard", 0,
1565 		    "hostbridge", hostbridge);
1566 	}
1567 
1568 	if (nb_regs->nb.pex_regs.pex_fat_ferr == 0 &&
1569 	    nb_regs->nb.pex_regs.pex_nf_corr_ferr == 0) {
1570 		if (nb_chipset == INTEL_NB_5400) {
1571 			data->intel_error_list =
1572 			    intel_pex_5400_err(
1573 			    nb_regs->nb.pex_regs.pex_fat_nerr,
1574 			    nb_regs->nb.pex_regs.pex_nf_corr_nerr);
1575 		} else {
1576 			data->intel_error_list =
1577 			    intel_pex_err(nb_regs->nb.pex_regs.pex_fat_nerr,
1578 			    nb_regs->nb.pex_regs.pex_nf_corr_nerr);
1579 		}
1580 	} else {
1581 		if (nb_chipset == INTEL_NB_5400) {
1582 			data->intel_error_list =
1583 			    intel_pex_5400_err(
1584 			    nb_regs->nb.pex_regs.pex_fat_ferr,
1585 			    nb_regs->nb.pex_regs.pex_nf_corr_ferr);
1586 		} else {
1587 			data->intel_error_list =
1588 			    intel_pex_err(nb_regs->nb.pex_regs.pex_fat_ferr,
1589 			    nb_regs->nb.pex_regs.pex_nf_corr_ferr);
1590 		}
1591 	}
1592 
1593 	if (nb_regs->nb.pex_regs.pex == 0) {
1594 		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1595 		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "esi");
1596 	} else {
1597 		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1598 		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "pex");
1599 	}
1600 }
1601 
1602 void
1603 nb_int_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1604     void *data)
1605 {
1606 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1607 	    "motherboard", 0);
1608 
1609 	if (nb_regs->nb.int_regs.ferr_fat_int == 0 &&
1610 	    nb_regs->nb.int_regs.ferr_nf_int == 0) {
1611 		((nb_scatchpad_t *)data)->intel_error_list =
1612 		    intel_int_err(nb_regs->nb.int_regs.nerr_fat_int,
1613 		    nb_regs->nb.int_regs.nerr_nf_int);
1614 	} else {
1615 		((nb_scatchpad_t *)data)->intel_error_list =
1616 		    intel_int_err(nb_regs->nb.int_regs.ferr_fat_int,
1617 		    nb_regs->nb.int_regs.ferr_nf_int);
1618 	}
1619 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1620 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "ie");
1621 }
1622 
1623 void
1624 nb_fat_fbd_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1625     void *data)
1626 {
1627 	char *intr;
1628 	nb_mem_scatchpad_t *sp;
1629 
1630 	intr = fat_memory_error(nb_regs, data);
1631 	sp = &((nb_scatchpad_t *)data)->ms;
1632 
1633 	if (sp->dimm != -1) {
1634 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5,
1635 		    "motherboard", 0,
1636 		    "memory-controller", sp->branch,
1637 		    "dram-channel", sp->channel,
1638 		    "dimm", sp->dimm,
1639 		    "rank", sp->rank);
1640 	} else if (sp->channel != -1) {
1641 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3,
1642 		    "motherboard", 0,
1643 		    "memory-controller", sp->branch,
1644 		    "dram-channel", sp->channel);
1645 	} else if (sp->branch != -1) {
1646 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1647 		    "motherboard", 0,
1648 		    "memory-controller", sp->branch);
1649 	} else {
1650 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1651 		    "motherboard", 0);
1652 	}
1653 
1654 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s",
1655 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr);
1656 }
1657 
1658 void
1659 nb_nf_fbd_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1660     void *data)
1661 {
1662 	char *intr;
1663 	nb_mem_scatchpad_t *sp;
1664 
1665 	intr = nf_memory_error(nb_regs, data);
1666 	sp = &((nb_scatchpad_t *)data)->ms;
1667 
1668 	if (sp->dimm != -1) {
1669 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5,
1670 		    "motherboard", 0,
1671 		    "memory-controller", sp->branch,
1672 		    "dram-channel", sp->channel,
1673 		    "dimm", sp->dimm,
1674 		    "rank", sp->rank);
1675 	} else if (sp->channel != -1) {
1676 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3,
1677 		    "motherboard", 0,
1678 		    "memory-controller", sp->branch,
1679 		    "dram-channel", sp->channel);
1680 	} else if (sp->branch != -1) {
1681 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1682 		    "motherboard", 0,
1683 		    "memory-controller", sp->branch);
1684 	} else {
1685 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1686 		    "motherboard", 0);
1687 	}
1688 
1689 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s",
1690 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr);
1691 }
1692 
1693 void
1694 nb_dma_report(char *class, nvlist_t *detector)
1695 {
1696 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1697 	    "motherboard", 0);
1698 
1699 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1700 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "dma");
1701 }
1702 
1703 void
1704 nb_thr_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1705     void *data)
1706 {
1707 	((nb_scatchpad_t *)data)->intel_error_list =
1708 	    intel_thr_err(nb_regs->nb.thr_regs.ferr_fat_thr,
1709 	    nb_regs->nb.thr_regs.ferr_nf_thr);
1710 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1711 	    "motherboard", 0);
1712 
1713 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1714 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "otf");
1715 }
1716 
1717 
1718 nvlist_t *
1719 nb_report(const nb_regs_t *nb_regs, char *class, nv_alloc_t *nva, void *scratch)
1720 {
1721 	nvlist_t *detector = fm_nvlist_create(nva);
1722 
1723 	switch (nb_regs->flag) {
1724 	case NB_REG_LOG_FSB:
1725 		nb_fsb_report(nb_regs, class, detector, scratch);
1726 		break;
1727 	case NB_REG_LOG_PEX:
1728 		nb_pex_report(nb_regs, class, detector, scratch);
1729 		break;
1730 	case NB_REG_LOG_INT:
1731 		nb_int_report(nb_regs, class, detector, scratch);
1732 		break;
1733 	case NB_REG_LOG_FAT_FBD:
1734 		nb_fat_fbd_report(nb_regs, class, detector, scratch);
1735 		break;
1736 	case NB_REG_LOG_NF_FBD:
1737 		nb_nf_fbd_report(nb_regs, class, detector, scratch);
1738 		break;
1739 	case NB_REG_LOG_DMA:
1740 		nb_dma_report(class, detector);
1741 		break;
1742 	case NB_REG_LOG_THR:
1743 		nb_thr_report(nb_regs, class, detector, scratch);
1744 		break;
1745 	default:
1746 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1747 		    "motherboard", 0);
1748 
1749 		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1750 		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "unknown");
1751 	}
1752 	return (detector);
1753 }
1754 
1755 /*ARGSUSED*/
1756 void
1757 nb_drain(void *ignored, const void *data, const errorq_elem_t *eqe)
1758 {
1759 	nb_logout_t *acl = (nb_logout_t *)data;
1760 	errorq_elem_t *eqep, *scr_eqep;
1761 	nvlist_t *ereport, *detector;
1762 	nv_alloc_t *nva = NULL;
1763 	char buf[FM_MAX_CLASS];
1764 	nb_scatchpad_t nb_scatchpad;
1765 
1766 	if (panicstr) {
1767 		if ((eqep = errorq_reserve(ereport_errorq)) == NULL)
1768 			return;
1769 		ereport = errorq_elem_nvl(ereport_errorq, eqep);
1770 		/*
1771 		 * Now try to allocate another element for scratch space and
1772 		 * use that for further scratch space (eg for constructing
1773 		 * nvlists to add the main ereport).  If we can't reserve
1774 		 * a scratch element just fallback to working within the
1775 		 * element we already have, and hope for the best.  All this
1776 		 * is necessary because the fixed buffer nv allocator does
1777 		 * not reclaim freed space and nvlist construction is
1778 		 * expensive.
1779 		 */
1780 		if ((scr_eqep = errorq_reserve(ereport_errorq)) != NULL)
1781 			nva = errorq_elem_nva(ereport_errorq, scr_eqep);
1782 		else
1783 			nva = errorq_elem_nva(ereport_errorq, eqep);
1784 	} else {
1785 		ereport = fm_nvlist_create(NULL);
1786 	}
1787 	detector = nb_report(&acl->nb_regs, buf, nva, &nb_scatchpad);
1788 	if (detector == NULL)
1789 		return;
1790 	fm_ereport_set(ereport, FM_EREPORT_VERSION, buf,
1791 	    fm_ena_generate(acl->acl_timestamp, FM_ENA_FMT1), detector, NULL);
1792 	/*
1793 	 * We're done with 'detector' so reclaim the scratch space.
1794 	 */
1795 	if (panicstr) {
1796 		fm_nvlist_destroy(detector, FM_NVA_RETAIN);
1797 		nv_alloc_reset(nva);
1798 	} else {
1799 		fm_nvlist_destroy(detector, FM_NVA_FREE);
1800 	}
1801 
1802 	/*
1803 	 * Encode the error-specific data that was saved in the logout area.
1804 	 */
1805 	nb_ereport_add_logout(ereport, acl, &nb_scatchpad);
1806 
1807 	if (panicstr) {
1808 		errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
1809 		if (scr_eqep)
1810 			errorq_cancel(ereport_errorq, scr_eqep);
1811 	} else {
1812 		(void) fm_ereport_post(ereport, EVCH_TRYHARD);
1813 		fm_nvlist_destroy(ereport, FM_NVA_FREE);
1814 	}
1815 }
1816