1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/cmn_err.h>
29 #include <sys/errno.h>
30 #include <sys/log.h>
31 #include <sys/systm.h>
32 #include <sys/modctl.h>
33 #include <sys/errorq.h>
34 #include <sys/controlregs.h>
35 #include <sys/fm/util.h>
36 #include <sys/fm/protocol.h>
37 #include <sys/sysevent.h>
38 #include <sys/pghw.h>
39 #include <sys/cyclic.h>
40 #include <sys/pci_cfgspace.h>
41 #include <sys/mc_intel.h>
42 #include <sys/smbios.h>
43 #include "nb5000.h"
44 #include "nb_log.h"
45 #include "dimm_phys.h"
46 
47 int nb_check_validlog = 1;
48 
49 static uint32_t uerrcnt[2];
50 static uint32_t cerrcnta[2][2];
51 static uint32_t cerrcntb[2][2];
52 static uint32_t cerrcntc[2][2];
53 static uint32_t cerrcntd[2][2];
54 static nb_logout_t nb_log;
55 
56 struct mch_error_code {
57 	int intel_error_list;	/* error number in Chipset Error List */
58 	uint32_t emask;		/* mask for machine check */
59 	uint32_t error_bit;	/* error bit in fault register */
60 };
61 
62 static struct mch_error_code fat_fbd_error_code[] = {
63 	{ 23, EMASK_FBD_M23, ERR_FAT_FBD_M23 },
64 	{ 3, EMASK_FBD_M3, ERR_FAT_FBD_M3 },
65 	{ 2, EMASK_FBD_M2, ERR_FAT_FBD_M2 },
66 	{ 1, EMASK_FBD_M1, ERR_FAT_FBD_M1 }
67 };
68 
69 static int
70 intel_fat_fbd_err(uint32_t fat_fbd)
71 {
72 	int rt = -1;
73 	int nerr = 0;
74 	uint32_t emask_fbd = 0;
75 	int i;
76 	int sz;
77 
78 	sz = sizeof (fat_fbd_error_code) / sizeof (struct mch_error_code);
79 
80 	for (i = 0; i < sz; i++) {
81 		if (fat_fbd & fat_fbd_error_code[i].error_bit) {
82 			rt = fat_fbd_error_code[i].intel_error_list;
83 			emask_fbd |= fat_fbd_error_code[i].emask;
84 			nerr++;
85 		}
86 	}
87 
88 	if (emask_fbd)
89 		nb_fbd_mask_mc(emask_fbd);
90 	if (nerr > 1)
91 		rt = -1;
92 	return (rt);
93 }
94 
95 static char *
96 fat_memory_error(const nb_regs_t *rp, void *data)
97 {
98 	int channel;
99 	uint32_t ferr_fat_fbd, nrecmemb;
100 	uint32_t nrecmema;
101 	char *intr = "nb.unknown";
102 	nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms;
103 
104 	ferr_fat_fbd = rp->nb.fat_fbd_regs.ferr_fat_fbd;
105 	if ((ferr_fat_fbd & ERR_FAT_FBD_MASK) == 0) {
106 		sp->intel_error_list =
107 		    intel_fat_fbd_err(rp->nb.fat_fbd_regs.nerr_fat_fbd);
108 		sp->branch = -1;
109 		sp->channel = -1;
110 		sp->rank = -1;
111 		sp->dimm = -1;
112 		sp->bank = -1;
113 		sp->cas = -1;
114 		sp->ras = -1;
115 		sp->pa = -1LL;
116 		sp->offset = -1;
117 		return (intr);
118 	}
119 	sp->intel_error_list = intel_fat_fbd_err(ferr_fat_fbd);
120 	channel = (ferr_fat_fbd >> 28) & 3;
121 	sp->branch = channel >> 1;
122 	sp->channel = channel;
123 	if ((ferr_fat_fbd & (ERR_FAT_FBD_M2|ERR_FAT_FBD_M1)) != 0) {
124 		if ((ferr_fat_fbd & ERR_FAT_FBD_M1) != 0)
125 			intr = "nb.fbd.alert";	/* Alert on FB-DIMM M1 */
126 		else
127 			intr = "nb.fbd.crc";	/* CRC error FB_DIMM M2 */
128 		nrecmema = rp->nb.fat_fbd_regs.nrecmema;
129 		nrecmemb = rp->nb.fat_fbd_regs.nrecmemb;
130 		sp->rank = (nrecmema >> 8) & RANK_MASK;
131 		sp->dimm = sp->rank >> 1;
132 		sp->bank = (nrecmema >> 12) & BANK_MASK;
133 		sp->cas = (nrecmemb >> 16) & CAS_MASK;
134 		sp->ras = nrecmemb & RAS_MASK;
135 		sp->pa = dimm_getphys(sp->branch, sp->rank, sp->bank, sp->ras,
136 		    sp->cas);
137 		sp->offset = dimm_getoffset(sp->branch, sp->rank, sp->bank,
138 		    sp->ras, sp->cas);
139 	} else {
140 		if ((ferr_fat_fbd & ERR_FAT_FBD_M3) != 0)
141 			intr = "nb.fbd.otf";	/* thermal temp > Tmid M3 */
142 		else if ((ferr_fat_fbd & ERR_FAT_FBD_M23) != 0) {
143 			intr = "nb.fbd.reset_timeout";
144 			sp->channel = -1;
145 		}
146 		sp->rank = -1;
147 		sp->dimm = -1;
148 		sp->bank = -1;
149 		sp->cas = -1;
150 		sp->ras = -1;
151 		sp->pa = -1LL;
152 		sp->offset = -1;
153 	}
154 	return (intr);
155 }
156 
157 
158 static struct mch_error_code nf_fbd_error_code[] = {
159 	{ 29, EMASK_FBD_M29, ERR_NF_FBD_M29 },
160 	{ 28, EMASK_FBD_M28, ERR_NF_FBD_M28 },
161 	{ 27, EMASK_FBD_M27, ERR_NF_FBD_M27 },
162 	{ 26, EMASK_FBD_M26, ERR_NF_FBD_M26 },
163 	{ 25, EMASK_FBD_M25, ERR_NF_FBD_M25 },
164 	{ 24, EMASK_FBD_M24, ERR_NF_FBD_M24 },
165 	{ 22, EMASK_FBD_M22, ERR_NF_FBD_M22 },
166 	{ 21, EMASK_FBD_M21, ERR_NF_FBD_M21 },
167 	{ 20, EMASK_FBD_M20, ERR_NF_FBD_M20 },
168 	{ 19, EMASK_FBD_M19, ERR_NF_FBD_M19 },
169 	{ 18, EMASK_FBD_M18, ERR_NF_FBD_M18 },
170 	{ 17, EMASK_FBD_M17, ERR_NF_FBD_M17 },
171 	{ 16, EMASK_FBD_M16, ERR_NF_FBD_M16 },
172 	{ 15, EMASK_FBD_M15, ERR_NF_FBD_M15 },
173 	{ 14, EMASK_FBD_M14, ERR_NF_FBD_M14 },
174 	{ 13, EMASK_FBD_M13, ERR_NF_FBD_M13 },
175 	{ 12, EMASK_FBD_M12, ERR_NF_FBD_M12 },
176 	{ 11, EMASK_FBD_M11, ERR_NF_FBD_M11 },
177 	{ 10, EMASK_FBD_M10, ERR_NF_FBD_M10 },
178 	{ 9, EMASK_FBD_M9, ERR_NF_FBD_M9 },
179 	{ 8, EMASK_FBD_M8, ERR_NF_FBD_M8 },
180 	{ 7, EMASK_FBD_M7, ERR_NF_FBD_M7 },
181 	{ 6, EMASK_FBD_M6, ERR_NF_FBD_M6 },
182 	{ 5, EMASK_FBD_M5, ERR_NF_FBD_M5 },
183 	{ 4, EMASK_FBD_M4, ERR_NF_FBD_M4 }
184 };
185 
186 static int
187 intel_nf_fbd_err(uint32_t nf_fbd)
188 {
189 	int rt = -1;
190 	int nerr = 0;
191 	uint32_t emask_fbd = 0;
192 	int i;
193 	int sz;
194 
195 	sz = sizeof (nf_fbd_error_code) / sizeof (struct mch_error_code);
196 
197 	for (i = 0; i < sz; i++) {
198 		if (nf_fbd & nf_fbd_error_code[i].error_bit) {
199 			rt = nf_fbd_error_code[i].intel_error_list;
200 			emask_fbd |= nf_fbd_error_code[i].emask;
201 			nerr++;
202 		}
203 	}
204 	if (emask_fbd)
205 		nb_fbd_mask_mc(emask_fbd);
206 	if (nerr > 1)
207 		rt = -1;
208 	return (rt);
209 }
210 
211 static char *
212 nf_memory_error(const nb_regs_t *rp, void *data)
213 {
214 	uint32_t ferr_nf_fbd, recmemb, redmemb;
215 	uint32_t recmema;
216 	int branch, channel, ecc_locator;
217 	char *intr = "nb.unknown";
218 	nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms;
219 
220 	sp->rank = -1;
221 	sp->dimm = -1;
222 	sp->bank = -1;
223 	sp->cas = -1;
224 	sp->ras = -1LL;
225 	sp->pa = -1LL;
226 	sp->offset = -1;
227 	ferr_nf_fbd = rp->nb.nf_fbd_regs.ferr_nf_fbd;
228 	if ((ferr_nf_fbd & ERR_NF_FBD_MASK) == 0) {
229 		/* unknown ereport if a recognizable error was not found */
230 		sp->branch = -1;
231 		sp->channel = -1;
232 		sp->intel_error_list = -1;
233 		return (intr);
234 	}
235 	sp->intel_error_list = intel_nf_fbd_err(ferr_nf_fbd);
236 	channel = (ferr_nf_fbd >> ERR_FBD_CH_SHIFT) & 3;
237 	branch = channel >> 1;
238 	sp->branch = branch;
239 	sp->channel = channel;
240 	if (ferr_nf_fbd & ERR_NF_FBD_MASK) {
241 		if (ferr_nf_fbd & ERR_NF_FBD_ECC_UE) {
242 			/*
243 			 * uncorrectable ECC M4 - M12
244 			 * we can only isolate to pair of dimms
245 			 * for single dimm configuration let eversholt
246 			 * sort it out with out needing a special rule
247 			 */
248 			sp->channel = -1;
249 			recmema = rp->nb.nf_fbd_regs.recmema;
250 			recmemb = rp->nb.nf_fbd_regs.recmemb;
251 			sp->rank = (recmema >> 8) & RANK_MASK;
252 			sp->bank = (recmema >> 12) & BANK_MASK;
253 			sp->cas = (recmemb >> 16) & CAS_MASK;
254 			sp->ras = recmemb & RAS_MASK;
255 			intr = "nb.mem_ue";
256 		} else if (ferr_nf_fbd & ERR_NF_FBD_M13) {
257 			/*
258 			 * write error M13
259 			 * we can only isolate to pair of dimms
260 			 */
261 			sp->channel = -1;
262 			if (nb_mode != NB_MEMORY_MIRROR) {
263 				recmema = rp->nb.nf_fbd_regs.recmema;
264 				sp->rank = (recmema >> 8) & RANK_MASK;
265 				sp->bank = (recmema >> 12) & BANK_MASK;
266 				sp->cas = (recmemb >> 16) & CAS_MASK;
267 				sp->ras = recmemb & RAS_MASK;
268 			}
269 			intr = "nb.fbd.ma"; /* memory alert */
270 		} else if (ferr_nf_fbd & ERR_NF_FBD_MA) { /* M14, M15 and M21 */
271 			intr = "nb.fbd.ch"; /* FBD on channel */
272 		} else if ((ferr_nf_fbd & ERR_NF_FBD_ECC_CE) != 0) {
273 			/* correctable ECC M17-M20 */
274 			recmema = rp->nb.nf_fbd_regs.recmema;
275 			recmemb = rp->nb.nf_fbd_regs.recmemb;
276 			sp->rank = (recmema >> 8) & RANK_MASK;
277 			redmemb = rp->nb.nf_fbd_regs.redmemb;
278 			ecc_locator = redmemb & 0x3ffff;
279 			if (ecc_locator & 0x1ff)
280 				sp->channel = branch << 1;
281 			else if (ecc_locator & 0x3fe00)
282 				sp->channel = (branch << 1) + 1;
283 			sp->dimm = sp->rank >> 1;
284 			sp->bank = (recmema >> 12) & BANK_MASK;
285 			sp->cas = (recmemb >> 16) & CAS_MASK;
286 			sp->ras = recmemb & RAS_MASK;
287 			intr = "nb.mem_ce";
288 		} else if ((ferr_nf_fbd & ERR_NF_FBD_SPARE) != 0) {
289 			/* spare dimm M27, M28 */
290 			intr = "nb.mem_ds";
291 			sp->channel = -1;
292 			if (rp->nb.nf_fbd_regs.spcps & SPCPS_SPARE_DEPLOYED) {
293 				sp->rank =
294 				    SPCPS_FAILED_RANK(rp->nb.nf_fbd_regs.spcps);
295 				nb_used_spare_rank(sp->branch, sp->rank);
296 				nb_config_gen++;
297 			}
298 		} else if ((ferr_nf_fbd & ERR_NF_FBD_M22) != 0) {
299 			intr = "nb.spd";	/* SPD protocol */
300 		}
301 	}
302 	if (sp->ras != -1) {
303 		sp->pa = dimm_getphys(sp->branch, sp->rank, sp->bank, sp->ras,
304 		    sp->cas);
305 		sp->offset = dimm_getoffset(sp->branch, sp->rank, sp->bank,
306 		    sp->ras, sp->cas);
307 	}
308 	return (intr);
309 }
310 
311 static struct mch_error_code nf_mem_error_code[] = {
312 	{ 21, EMASK_MEM_M21, ERR_NF_MEM_M21 },
313 	{ 20, EMASK_MEM_M20, ERR_NF_MEM_M20 },
314 	{ 18, EMASK_MEM_M18, ERR_NF_MEM_M18 },
315 	{ 16, EMASK_MEM_M16, ERR_NF_MEM_M16 },
316 	{ 15, EMASK_MEM_M15, ERR_NF_MEM_M15 },
317 	{ 14, EMASK_MEM_M14, ERR_NF_MEM_M14 },
318 	{ 12, EMASK_MEM_M12, ERR_NF_MEM_M12 },
319 	{ 11, EMASK_MEM_M11, ERR_NF_MEM_M11 },
320 	{ 10, EMASK_MEM_M10, ERR_NF_MEM_M10 },
321 	{ 6, EMASK_MEM_M6, ERR_NF_MEM_M6 },
322 	{ 5, EMASK_MEM_M5, ERR_NF_MEM_M5 },
323 	{ 4, EMASK_MEM_M4, ERR_NF_MEM_M4 },
324 	{ 1, EMASK_MEM_M1, ERR_NF_MEM_M1 }
325 };
326 
327 static int
328 intel_nf_mem_err(uint32_t nf_mem)
329 {
330 	int rt = -1;
331 	int nerr = 0;
332 	uint32_t emask_mem = 0;
333 	int i;
334 	int sz;
335 
336 	sz = sizeof (nf_mem_error_code) / sizeof (struct mch_error_code);
337 
338 	for (i = 0; i < sz; i++) {
339 		if (nf_mem & nf_mem_error_code[i].error_bit) {
340 			rt = nf_mem_error_code[i].intel_error_list;
341 			emask_mem |= nf_mem_error_code[i].emask;
342 			nerr++;
343 		}
344 	}
345 	if (emask_mem)
346 		nb_mem_mask_mc(emask_mem);
347 	if (nerr > 1)
348 		rt = -1;
349 	return (rt);
350 }
351 
352 static char *
353 nf_mem_error(const nb_regs_t *rp, void *data)
354 {
355 	uint32_t ferr_nf_mem, recmema, recmemb;
356 	uint32_t nrecmema, nrecmemb, validlog;
357 	int channel;
358 	char *intr = "nb.unknown";
359 	nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms;
360 
361 	sp->rank = -1;
362 	sp->dimm = -1;
363 	sp->bank = -1;
364 	sp->cas = -1;
365 	sp->ras = -1LL;
366 	sp->pa = -1LL;
367 	sp->offset = -1;
368 	ferr_nf_mem = rp->nb.nf_mem_regs.ferr_nf_mem;
369 	if ((ferr_nf_mem & ERR_NF_MEM_MASK) == 0) {
370 		/* no first error found */
371 		sp->branch = -1;
372 		sp->channel = -1;
373 		sp->intel_error_list =
374 		    intel_nf_mem_err(rp->nb.nf_mem_regs.nerr_nf_mem);
375 		return (intr);
376 	}
377 	sp->intel_error_list = intel_nf_mem_err(ferr_nf_mem);
378 
379 	channel = (ferr_nf_mem >> ERR_MEM_CH_SHIFT) & 0x1;
380 	sp->branch = channel;
381 	sp->channel = -1;
382 	if (ferr_nf_mem & ERR_NF_MEM_MASK) {
383 		if (ferr_nf_mem & ERR_NF_MEM_ECC_UE) {
384 			/*
385 			 * uncorrectable ECC M1,M4-M6,M10-M12
386 			 * There is only channel per branch
387 			 * Invalidate the channel number so the mem ereport
388 			 * has the same detector with existing 5000 ereports.
389 			 * so we can leverage the existing Everhsolt rule.
390 			 */
391 			validlog = rp->nb.nf_mem_regs.validlog;
392 			if (ferr_nf_mem & ERR_NF_MEM_M1) {
393 				nrecmema = rp->nb.nf_mem_regs.nrecmema;
394 				nrecmemb = rp->nb.nf_mem_regs.nrecmemb;
395 				/* check if the nrecmem log is valid */
396 				if (validlog & 0x1 || nb_check_validlog == 0) {
397 					sp->rank = (nrecmema >> 8) & RANK_MASK;
398 					sp->bank = (nrecmema >> 12) & BANK_MASK;
399 					sp->cas = (nrecmemb >> 16) & CAS_MASK;
400 					sp->ras = nrecmemb & RAS_MASK;
401 				}
402 			} else {
403 				recmema = rp->nb.nf_mem_regs.recmema;
404 				recmemb = rp->nb.nf_mem_regs.recmemb;
405 				/* check if the recmem log is valid */
406 				if (validlog & 0x2 || nb_check_validlog == 0) {
407 					sp->rank = (recmema >> 8) & RANK_MASK;
408 					sp->bank = (recmema >> 12) & BANK_MASK;
409 					sp->cas = (recmemb >> 16) & CAS_MASK;
410 					sp->ras = recmemb & RAS_MASK;
411 				}
412 			}
413 			intr = "nb.ddr2_mem_ue";
414 		} else if ((ferr_nf_mem & ERR_NF_MEM_ECC_CE) != 0) {
415 			/* correctable ECC M14-M16 */
416 			recmema = rp->nb.nf_mem_regs.recmema;
417 			recmemb = rp->nb.nf_mem_regs.recmemb;
418 			validlog = rp->nb.nf_mem_regs.validlog;
419 			/* check if the recmem log is valid */
420 			if (validlog & 0x2 || nb_check_validlog == 0) {
421 				sp->channel = channel;
422 				sp->rank = (recmema >> 8) & RANK_MASK;
423 				sp->dimm = nb_rank2dimm(sp->channel, sp->rank);
424 				sp->bank = (recmema >> 12) & BANK_MASK;
425 				sp->cas = (recmemb >> 16) & CAS_MASK;
426 				sp->ras = recmemb & RAS_MASK;
427 			}
428 			intr = "nb.ddr2_mem_ce";
429 		} else if ((ferr_nf_mem & ERR_NF_MEM_SPARE) != 0) {
430 			/* spare dimm M20, M21 */
431 			intr = "nb.ddr2_mem_ds";
432 
433 			/*
434 			 * The channel can be valid here.
435 			 * However, there is only one channel per branch and
436 			 * to leverage the eversolt rules of other chipsets,
437 			 * the channel is ignored and let the rule find it out
438 			 * from the topology.
439 			 */
440 			if (rp->nb.nf_mem_regs.spcps & SPCPS_SPARE_DEPLOYED) {
441 				sp->rank =
442 				    SPCPS_FAILED_RANK(rp->nb.nf_mem_regs.spcps);
443 				nb_used_spare_rank(sp->branch, sp->rank);
444 				nb_config_gen++;
445 			}
446 		} else if ((ferr_nf_mem & ERR_NF_MEM_M18) != 0) {
447 			sp->channel = channel;
448 			intr = "nb.ddr2_spd";	/* SPD protocol */
449 
450 		}
451 	}
452 	if (sp->ras != -1) {
453 		sp->pa = dimm_getphys(sp->branch, sp->rank, sp->bank, sp->ras,
454 		    sp->cas);
455 		sp->offset = dimm_getoffset(sp->branch, sp->rank, sp->bank,
456 		    sp->ras, sp->cas);
457 	}
458 	return (intr);
459 }
460 
461 static struct mch_error_code fat_int_error_code[] = {
462 	{ 14, EMASK_INT_B14, ERR_FAT_INT_B14 },
463 	{ 12, EMASK_INT_B12, ERR_FAT_INT_B12 },
464 	{ 25, EMASK_INT_B25, ERR_FAT_INT_B25 },
465 	{ 23, EMASK_INT_B23, ERR_FAT_INT_B23 },
466 	{ 21, EMASK_INT_B21, ERR_FAT_INT_B21 },
467 	{ 7, EMASK_INT_B7, ERR_FAT_INT_B7 },
468 	{ 4, EMASK_INT_B4, ERR_FAT_INT_B4 },
469 	{ 3, EMASK_INT_B3, ERR_FAT_INT_B3 },
470 	{ 2, EMASK_INT_B2, ERR_FAT_INT_B2 },
471 	{ 1, EMASK_INT_B1, ERR_FAT_INT_B1 }
472 };
473 
474 static struct mch_error_code nf_int_error_code[] = {
475 	{ 27, 0, ERR_NF_INT_B27 },
476 	{ 24, 0, ERR_NF_INT_B24 },
477 	{ 22, EMASK_INT_B22, ERR_NF_INT_B22 },
478 	{ 20, EMASK_INT_B20, ERR_NF_INT_B20 },
479 	{ 19, EMASK_INT_B19, ERR_NF_INT_B19 },
480 	{ 18, 0, ERR_NF_INT_B18 },
481 	{ 17, 0, ERR_NF_INT_B17 },
482 	{ 16, 0, ERR_NF_INT_B16 },
483 	{ 11, EMASK_INT_B11, ERR_NF_INT_B11 },
484 	{ 10, EMASK_INT_B10, ERR_NF_INT_B10 },
485 	{ 9, EMASK_INT_B9, ERR_NF_INT_B9 },
486 	{ 8, EMASK_INT_B8, ERR_NF_INT_B8 },
487 	{ 6, EMASK_INT_B6, ERR_NF_INT_B6 },
488 	{ 5, EMASK_INT_B5, ERR_NF_INT_B5 }
489 };
490 
491 static int
492 intel_int_err(uint16_t err_fat_int, uint16_t err_nf_int)
493 {
494 	int rt = -1;
495 	int nerr = 0;
496 	uint32_t emask_int = 0;
497 	int i;
498 	int sz;
499 
500 	sz = sizeof (fat_int_error_code) / sizeof (struct mch_error_code);
501 
502 	for (i = 0; i < sz; i++) {
503 		if (err_fat_int & fat_int_error_code[i].error_bit) {
504 			rt = fat_int_error_code[i].intel_error_list;
505 			emask_int |= fat_int_error_code[i].emask;
506 			nerr++;
507 		}
508 	}
509 
510 	if (nb_chipset == INTEL_NB_5400 &&
511 	    (err_nf_int & NERR_NF_5400_INT_B26) != 0) {
512 		err_nf_int &= ~NERR_NF_5400_INT_B26;
513 		rt = 26;
514 		nerr++;
515 	}
516 
517 	if (rt)
518 		err_nf_int &= ~ERR_NF_INT_B18;
519 
520 	sz = sizeof (nf_int_error_code) / sizeof (struct mch_error_code);
521 
522 	for (i = 0; i < sz; i++) {
523 		if (err_nf_int & nf_int_error_code[i].error_bit) {
524 			rt = nf_int_error_code[i].intel_error_list;
525 			emask_int |= nf_int_error_code[i].emask;
526 			nerr++;
527 		}
528 	}
529 
530 	if (emask_int)
531 		nb_int_mask_mc(emask_int);
532 	if (nerr > 1)
533 		rt = -1;
534 	return (rt);
535 }
536 
537 static int
538 log_int_err(nb_regs_t *rp, int willpanic, int *interpose)
539 {
540 	int t = 0;
541 	int rt = 0;
542 
543 	rp->flag = NB_REG_LOG_INT;
544 	rp->nb.int_regs.ferr_fat_int = FERR_FAT_INT_RD(interpose);
545 	rp->nb.int_regs.ferr_nf_int = FERR_NF_INT_RD(&t);
546 	*interpose |= t;
547 	rp->nb.int_regs.nerr_fat_int = NERR_FAT_INT_RD(&t);
548 	*interpose |= t;
549 	rp->nb.int_regs.nerr_nf_int = NERR_NF_INT_RD(&t);
550 	*interpose |= t;
551 	rp->nb.int_regs.nrecint = NRECINT_RD();
552 	rp->nb.int_regs.recint = RECINT_RD();
553 	rp->nb.int_regs.nrecsf = NRECSF_RD();
554 	rp->nb.int_regs.recsf = RECSF_RD();
555 
556 	if (!willpanic) {
557 		if (rp->nb.int_regs.ferr_fat_int || *interpose)
558 			FERR_FAT_INT_WR(rp->nb.int_regs.ferr_fat_int);
559 		if (rp->nb.int_regs.ferr_nf_int || *interpose)
560 			FERR_NF_INT_WR(rp->nb.int_regs.ferr_nf_int);
561 		if (rp->nb.int_regs.nerr_fat_int)
562 			NERR_FAT_INT_WR(rp->nb.int_regs.nerr_fat_int);
563 		if (rp->nb.int_regs.nerr_nf_int)
564 			NERR_NF_INT_WR(rp->nb.int_regs.nerr_nf_int);
565 		/*
566 		 * if interpose write read-only registers to clear from pcii
567 		 * cache
568 		 */
569 		if (*interpose) {
570 			NRECINT_WR();
571 			RECINT_WR();
572 			NRECSF_WR();
573 			RECSF_WR();
574 		}
575 	}
576 	if (rp->nb.int_regs.ferr_fat_int == 0 &&
577 	    rp->nb.int_regs.nerr_fat_int == 0 &&
578 	    (rp->nb.int_regs.ferr_nf_int == ERR_NF_INT_B18 ||
579 	    (rp->nb.int_regs.ferr_nf_int == 0 &&
580 	    rp->nb.int_regs.nerr_nf_int == ERR_NF_INT_B18))) {
581 		rt = 1;
582 	}
583 	return (rt);
584 }
585 
586 static void
587 log_thermal_err(nb_regs_t *rp, int willpanic, int *interpose)
588 {
589 	int t = 0;
590 
591 	rp->flag = NB_REG_LOG_THR;
592 	rp->nb.thr_regs.ferr_fat_thr = FERR_FAT_THR_RD(interpose);
593 	rp->nb.thr_regs.nerr_fat_thr = NERR_FAT_THR_RD(&t);
594 	*interpose |= t;
595 	rp->nb.thr_regs.ferr_nf_thr = FERR_NF_THR_RD(&t);
596 	*interpose |= t;
597 	rp->nb.thr_regs.nerr_nf_thr = NERR_NF_THR_RD(&t);
598 	*interpose |= t;
599 	rp->nb.thr_regs.ctsts = CTSTS_RD();
600 	rp->nb.thr_regs.thrtsts = THRTSTS_RD();
601 
602 	if (!willpanic) {
603 		if (rp->nb.thr_regs.ferr_fat_thr || *interpose)
604 			FERR_FAT_THR_WR(rp->nb.thr_regs.ferr_fat_thr);
605 		if (rp->nb.thr_regs.nerr_fat_thr || *interpose)
606 			NERR_FAT_THR_WR(rp->nb.thr_regs.nerr_fat_thr);
607 		if (rp->nb.thr_regs.ferr_nf_thr || *interpose)
608 			FERR_NF_THR_WR(rp->nb.thr_regs.ferr_nf_thr);
609 		if (rp->nb.thr_regs.nerr_nf_thr || *interpose)
610 			NERR_NF_THR_WR(rp->nb.thr_regs.nerr_nf_thr);
611 
612 		if (*interpose) {
613 			CTSTS_WR(rp->nb.thr_regs.ctsts);
614 			THRTSTS_WR(rp->nb.thr_regs.thrtsts);
615 		}
616 	}
617 }
618 
619 static void
620 log_dma_err(nb_regs_t *rp, int *interpose)
621 {
622 	rp->flag = NB_REG_LOG_DMA;
623 
624 	rp->nb.dma_regs.pcists = PCISTS_RD(interpose);
625 	rp->nb.dma_regs.pexdevsts = PCIDEVSTS_RD();
626 }
627 
628 static struct mch_error_code fat_fsb_error_code[] = {
629 	{ 9, EMASK_FSB_F9, ERR_FAT_FSB_F9 },
630 	{ 2, EMASK_FSB_F2, ERR_FAT_FSB_F2 },
631 	{ 1, EMASK_FSB_F1, ERR_FAT_FSB_F1 }
632 };
633 
634 static struct mch_error_code nf_fsb_error_code[] = {
635 	{ 8, EMASK_FSB_F8, ERR_NF_FSB_F8 },
636 	{ 7, EMASK_FSB_F7, ERR_NF_FSB_F7 },
637 	{ 6, EMASK_FSB_F6, ERR_NF_FSB_F6 }
638 };
639 
640 static int
641 intel_fsb_err(int fsb, uint8_t err_fat_fsb, uint8_t err_nf_fsb)
642 {
643 	int rt = -1;
644 	int nerr = 0;
645 	uint16_t emask_fsb = 0;
646 	int i;
647 	int sz;
648 
649 	sz = sizeof (fat_fsb_error_code) / sizeof (struct mch_error_code);
650 
651 	for (i = 0; i < sz; i++) {
652 		if (err_fat_fsb & fat_fsb_error_code[i].error_bit) {
653 			rt = fat_fsb_error_code[i].intel_error_list;
654 			emask_fsb |= fat_fsb_error_code[i].emask;
655 			nerr++;
656 		}
657 	}
658 
659 	sz = sizeof (nf_fsb_error_code) / sizeof (struct mch_error_code);
660 
661 	for (i = 0; i < sz; i++) {
662 		if (err_nf_fsb & nf_fsb_error_code[i].error_bit) {
663 			rt = nf_fsb_error_code[i].intel_error_list;
664 			emask_fsb |= nf_fsb_error_code[i].emask;
665 			nerr++;
666 		}
667 	}
668 
669 	if (emask_fsb)
670 		nb_fsb_mask_mc(fsb, emask_fsb);
671 	if (nerr > 1)
672 		rt = -1;
673 	return (rt);
674 }
675 
676 static void
677 log_fsb_err(uint64_t ferr, nb_regs_t *rp, int willpanic, int *interpose)
678 {
679 	uint8_t fsb;
680 	int t = 0;
681 
682 	fsb = GE_FERR_FSB(ferr);
683 	rp->flag = NB_REG_LOG_FSB;
684 
685 	rp->nb.fsb_regs.fsb = fsb;
686 	rp->nb.fsb_regs.ferr_fat_fsb = FERR_FAT_FSB_RD(fsb, interpose);
687 	rp->nb.fsb_regs.ferr_nf_fsb = FERR_NF_FSB_RD(fsb, &t);
688 	*interpose |= t;
689 	rp->nb.fsb_regs.nerr_fat_fsb = NERR_FAT_FSB_RD(fsb, &t);
690 	*interpose |= t;
691 	rp->nb.fsb_regs.nerr_nf_fsb = NERR_NF_FSB_RD(fsb, &t);
692 	*interpose |= t;
693 	rp->nb.fsb_regs.nrecfsb = NRECFSB_RD(fsb);
694 	rp->nb.fsb_regs.nrecfsb_addr = NRECADDR_RD(fsb);
695 	rp->nb.fsb_regs.recfsb = RECFSB_RD(fsb);
696 	if (!willpanic) {
697 		/* Clear the fatal/non-fatal first/next FSB errors */
698 		if (rp->nb.fsb_regs.ferr_fat_fsb || *interpose)
699 			FERR_FAT_FSB_WR(fsb, rp->nb.fsb_regs.ferr_fat_fsb);
700 		if (rp->nb.fsb_regs.ferr_nf_fsb || *interpose)
701 			FERR_NF_FSB_WR(fsb, rp->nb.fsb_regs.ferr_nf_fsb);
702 		if (rp->nb.fsb_regs.nerr_fat_fsb || *interpose)
703 			NERR_FAT_FSB_WR(fsb, rp->nb.fsb_regs.nerr_fat_fsb);
704 		if (rp->nb.fsb_regs.nerr_nf_fsb || *interpose)
705 			NERR_NF_FSB_WR(fsb, rp->nb.fsb_regs.nerr_nf_fsb);
706 
707 		/*
708 		 * if interpose write read-only registers to clear from pcii
709 		 * cache
710 		 */
711 		if (*interpose) {
712 			NRECFSB_WR(fsb);
713 			NRECADDR_WR(fsb);
714 			RECFSB_WR(fsb);
715 		}
716 	}
717 }
718 
719 static struct mch_error_code fat_pex_error_code[] = {
720 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_FAT_IO19 },
721 	{ 18, EMASK_UNCOR_PEX_IO18, PEX_FAT_IO18 },
722 	{ 10, EMASK_UNCOR_PEX_IO10, PEX_FAT_IO10 },
723 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_FAT_IO9 },
724 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_FAT_IO8 },
725 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_FAT_IO7 },
726 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_FAT_IO6 },
727 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_FAT_IO5 },
728 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_FAT_IO4 },
729 	{ 3, EMASK_UNCOR_PEX_IO3, PEX_FAT_IO3 },
730 	{ 2, EMASK_UNCOR_PEX_IO2, PEX_FAT_IO2 },
731 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_FAT_IO0 }
732 };
733 
734 static struct mch_error_code fat_unit_pex_5400_error_code[] = {
735 	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_FAT_IO32 },
736 	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_FAT_IO31 },
737 	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_FAT_IO30 },
738 	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_FAT_IO29 },
739 	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_FAT_IO27 },
740 	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_FAT_IO26 },
741 	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_FAT_IO25 },
742 	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_FAT_IO24 },
743 	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_FAT_IO23 },
744 	{ 22, EMASK_UNIT_PEX_IO22, PEX_5400_FAT_IO22 },
745 };
746 
747 static struct mch_error_code fat_pex_5400_error_code[] = {
748 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_5400_FAT_IO19 },
749 	{ 18, EMASK_UNCOR_PEX_IO18, PEX_5400_FAT_IO18 },
750 	{ 10, EMASK_UNCOR_PEX_IO10, PEX_5400_FAT_IO10 },
751 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_5400_FAT_IO9 },
752 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_5400_FAT_IO8 },
753 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_5400_FAT_IO7 },
754 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_5400_FAT_IO6 },
755 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_5400_FAT_IO5 },
756 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_5400_FAT_IO4 },
757 	{ 2, EMASK_UNCOR_PEX_IO2, PEX_5400_FAT_IO2 },
758 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_5400_FAT_IO0 }
759 };
760 
761 static struct mch_error_code fat_rp_5400_error_code[] = {
762 	{ 1, EMASK_RP_PEX_IO1, PEX_5400_FAT_IO1 }
763 };
764 
765 static struct mch_error_code fat_rp_error_code[] = {
766 	{ 1, EMASK_RP_PEX_IO1, PEX_FAT_IO1 }
767 };
768 
769 static struct mch_error_code uncor_pex_error_code[] = {
770 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_NF_IO19 },
771 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_NF_IO9 },
772 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_NF_IO8 },
773 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_NF_IO7 },
774 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_NF_IO6 },
775 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_NF_IO5 },
776 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_NF_IO4 },
777 	{ 3, EMASK_UNCOR_PEX_IO3, PEX_NF_IO3 },
778 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_NF_IO0 }
779 };
780 
781 static struct mch_error_code uncor_pex_5400_error_code[] = {
782 	{ 33, EMASK_UNIT_PEX_IO33, PEX_5400_NF_IO33 },
783 	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_NF_IO32 },
784 	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_NF_IO31 },
785 	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_NF_IO30 },
786 	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_NF_IO29 },
787 	{ 28, EMASK_UNIT_PEX_IO28, PEX_5400_NF_IO28 },
788 	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_NF_IO27 },
789 	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_NF_IO26 },
790 	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_NF_IO25 },
791 	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_NF_IO24 },
792 	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_NF_IO23 },
793 };
794 
795 static struct mch_error_code cor_pex_error_code[] = {
796 	{ 20, EMASK_COR_PEX_IO20, PEX_5400_NF_IO20 },
797 	{ 16, EMASK_COR_PEX_IO16, PEX_NF_IO16 },
798 	{ 15, EMASK_COR_PEX_IO15, PEX_NF_IO15 },
799 	{ 14, EMASK_COR_PEX_IO14, PEX_NF_IO14 },
800 	{ 13, EMASK_COR_PEX_IO13, PEX_NF_IO13 },
801 	{ 12, EMASK_COR_PEX_IO12, PEX_NF_IO12 },
802 	{ 10, 0, PEX_NF_IO10 },
803 	{ 2, 0, PEX_NF_IO2 }
804 };
805 
806 static struct mch_error_code rp_pex_5400_error_code[] = {
807 	{ 17, EMASK_RP_PEX_IO17, PEX_5400_NF_IO17 },
808 	{ 11, EMASK_RP_PEX_IO11, PEX_5400_NF_IO11 }
809 };
810 
811 static struct mch_error_code cor_pex_5400_error_code1[] = {
812 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_5400_NF_IO19 },
813 	{ 10, EMASK_UNCOR_PEX_IO10, PEX_5400_NF_IO10 },
814 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_5400_NF_IO9 },
815 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_5400_NF_IO8 },
816 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_5400_NF_IO7 },
817 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_5400_NF_IO6 },
818 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_5400_NF_IO5 },
819 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_5400_NF_IO4 },
820 	{ 2, EMASK_UNCOR_PEX_IO2, PEX_5400_NF_IO2 },
821 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_5400_NF_IO0 }
822 };
823 
824 static struct mch_error_code cor_pex_5400_error_code2[] = {
825 	{ 20, EMASK_COR_PEX_IO20, PEX_5400_NF_IO20 },
826 	{ 16, EMASK_COR_PEX_IO16, PEX_5400_NF_IO16 },
827 	{ 15, EMASK_COR_PEX_IO15, PEX_5400_NF_IO15 },
828 	{ 14, EMASK_COR_PEX_IO14, PEX_5400_NF_IO14 },
829 	{ 13, EMASK_COR_PEX_IO13, PEX_5400_NF_IO13 },
830 	{ 12, EMASK_COR_PEX_IO12, PEX_5400_NF_IO12 }
831 };
832 
833 static struct mch_error_code cor_pex_5400_error_code3[] = {
834 	{ 33, EMASK_UNIT_PEX_IO33, PEX_5400_NF_IO33 },
835 	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_NF_IO32 },
836 	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_NF_IO31 },
837 	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_NF_IO30 },
838 	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_NF_IO29 },
839 	{ 28, EMASK_UNIT_PEX_IO28, PEX_5400_NF_IO28 },
840 	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_NF_IO27 },
841 	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_NF_IO26 },
842 	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_NF_IO25 },
843 	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_NF_IO24 },
844 	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_NF_IO23 }
845 };
846 
847 static struct mch_error_code rp_pex_error_code[] = {
848 	{ 17, EMASK_RP_PEX_IO17, PEX_NF_IO17 },
849 	{ 11, EMASK_RP_PEX_IO11, PEX_NF_IO11 },
850 };
851 
852 static int
853 intel_pex_err(uint32_t pex_fat, uint32_t pex_nf_cor)
854 {
855 	int rt = -1;
856 	int nerr = 0;
857 	int i;
858 	int sz;
859 
860 	sz = sizeof (fat_pex_error_code) / sizeof (struct mch_error_code);
861 
862 	for (i = 0; i < sz; i++) {
863 		if (pex_fat & fat_pex_error_code[i].error_bit) {
864 			rt = fat_pex_error_code[i].intel_error_list;
865 			nerr++;
866 		}
867 	}
868 	sz = sizeof (fat_rp_error_code) / sizeof (struct mch_error_code);
869 
870 	for (i = 0; i < sz; i++) {
871 		if (pex_fat & fat_rp_error_code[i].error_bit) {
872 			rt = fat_rp_error_code[i].intel_error_list;
873 			nerr++;
874 		}
875 	}
876 	sz = sizeof (uncor_pex_error_code) / sizeof (struct mch_error_code);
877 
878 	for (i = 0; i < sz; i++) {
879 		if (pex_nf_cor & uncor_pex_error_code[i].error_bit) {
880 			rt = uncor_pex_error_code[i].intel_error_list;
881 			nerr++;
882 		}
883 	}
884 
885 	sz = sizeof (cor_pex_error_code) / sizeof (struct mch_error_code);
886 
887 	for (i = 0; i < sz; i++) {
888 		if (pex_nf_cor & cor_pex_error_code[i].error_bit) {
889 			rt = cor_pex_error_code[i].intel_error_list;
890 			nerr++;
891 		}
892 	}
893 	sz = sizeof (rp_pex_error_code) / sizeof (struct mch_error_code);
894 
895 	for (i = 0; i < sz; i++) {
896 		if (pex_nf_cor & rp_pex_error_code[i].error_bit) {
897 			rt = rp_pex_error_code[i].intel_error_list;
898 			nerr++;
899 		}
900 	}
901 
902 	if (nerr > 1)
903 		rt = -1;
904 	return (rt);
905 }
906 
907 static struct mch_error_code fat_thr_error_code[] = {
908 	{ 2, EMASK_THR_F2, ERR_FAT_THR_F2 },
909 	{ 1, EMASK_THR_F1, ERR_FAT_THR_F1 }
910 };
911 
912 static struct mch_error_code nf_thr_error_code[] = {
913 	{ 5, EMASK_THR_F5, ERR_NF_THR_F5 },
914 	{ 4, EMASK_THR_F4, ERR_NF_THR_F4 },
915 	{ 3, EMASK_THR_F3, ERR_NF_THR_F3 }
916 };
917 
918 static int
919 intel_thr_err(uint8_t err_fat_thr, uint8_t err_nf_thr)
920 {
921 	int rt = -1;
922 	int nerr = 0;
923 	uint16_t emask_thr = 0;
924 	int i;
925 	int sz;
926 
927 	sz = sizeof (fat_thr_error_code) / sizeof (struct mch_error_code);
928 
929 	for (i = 0; i < sz; i++) {
930 		if (err_fat_thr & fat_thr_error_code[i].error_bit) {
931 			rt = fat_thr_error_code[i].intel_error_list;
932 			emask_thr |= fat_thr_error_code[i].emask;
933 			nerr++;
934 		}
935 	}
936 
937 	sz = sizeof (nf_thr_error_code) / sizeof (struct mch_error_code);
938 
939 	for (i = 0; i < sz; i++) {
940 		if (err_nf_thr & nf_thr_error_code[i].error_bit) {
941 			rt = nf_thr_error_code[i].intel_error_list;
942 			emask_thr |= nf_thr_error_code[i].emask;
943 			nerr++;
944 		}
945 	}
946 
947 	if (emask_thr)
948 		nb_thr_mask_mc(emask_thr);
949 	if (nerr > 1)
950 		rt = -1;
951 	return (rt);
952 }
953 
954 static int
955 intel_pex_5400_err(uint32_t pex_fat, uint32_t pex_nf_cor)
956 {
957 	int rt = -1;
958 	int nerr = 0;
959 	int i;
960 	int sz;
961 
962 	sz = sizeof (fat_pex_5400_error_code) / sizeof (struct mch_error_code);
963 
964 	for (i = 0; i < sz; i++) {
965 		if (pex_fat & fat_pex_5400_error_code[i].error_bit) {
966 			rt = fat_pex_5400_error_code[i].intel_error_list;
967 			nerr++;
968 		}
969 	}
970 	sz = sizeof (fat_rp_5400_error_code) / sizeof (struct mch_error_code);
971 
972 	for (i = 0; i < sz; i++) {
973 		if (pex_fat & fat_rp_5400_error_code[i].error_bit) {
974 			rt = fat_rp_5400_error_code[i].intel_error_list;
975 			nerr++;
976 		}
977 	}
978 	sz = sizeof (fat_unit_pex_5400_error_code) /
979 	    sizeof (struct mch_error_code);
980 
981 	for (i = 0; i < sz; i++) {
982 		if (pex_fat &
983 		    fat_unit_pex_5400_error_code[i].error_bit) {
984 			rt = fat_unit_pex_5400_error_code[i].intel_error_list;
985 			nerr++;
986 		}
987 	}
988 	sz = sizeof (uncor_pex_5400_error_code) /
989 	    sizeof (struct mch_error_code);
990 
991 	for (i = 0; i < sz; i++) {
992 		if (pex_fat & uncor_pex_5400_error_code[i].error_bit) {
993 			rt = uncor_pex_5400_error_code[i].intel_error_list;
994 			nerr++;
995 		}
996 	}
997 
998 	sz = sizeof (rp_pex_5400_error_code) / sizeof (struct mch_error_code);
999 
1000 	for (i = 0; i < sz; i++) {
1001 		if (pex_nf_cor & rp_pex_5400_error_code[i].error_bit) {
1002 			rt = rp_pex_5400_error_code[i].intel_error_list;
1003 			nerr++;
1004 		}
1005 	}
1006 
1007 	sz = sizeof (cor_pex_5400_error_code1) / sizeof (struct mch_error_code);
1008 
1009 	for (i = 0; i < sz; i++) {
1010 		if (pex_nf_cor & cor_pex_5400_error_code1[i].error_bit) {
1011 			rt = cor_pex_5400_error_code1[i].intel_error_list;
1012 			nerr++;
1013 		}
1014 	}
1015 
1016 	sz = sizeof (cor_pex_5400_error_code2) / sizeof (struct mch_error_code);
1017 
1018 	for (i = 0; i < sz; i++) {
1019 		if (pex_nf_cor & cor_pex_5400_error_code2[i].error_bit) {
1020 			rt = cor_pex_5400_error_code2[i].intel_error_list;
1021 			nerr++;
1022 		}
1023 	}
1024 
1025 	sz = sizeof (cor_pex_5400_error_code3) / sizeof (struct mch_error_code);
1026 
1027 	for (i = 0; i < sz; i++) {
1028 		if (pex_nf_cor & cor_pex_5400_error_code3[i].error_bit) {
1029 			rt = cor_pex_5400_error_code3[i].intel_error_list;
1030 			nerr++;
1031 		}
1032 	}
1033 
1034 	if (nerr > 1)
1035 		rt = -1;
1036 	return (rt);
1037 }
1038 
1039 static void
1040 log_pex_err(uint64_t ferr, nb_regs_t *rp, int willpanic, int *interpose)
1041 {
1042 	uint8_t pex = (uint8_t)-1;
1043 	int t = 0;
1044 
1045 	rp->flag = NB_REG_LOG_PEX;
1046 	pex = GE_ERR_PEX(ferr);
1047 
1048 	rp->nb.pex_regs.pex = pex;
1049 	rp->nb.pex_regs.pex_fat_ferr =  PEX_FAT_FERR_RD(pex, interpose);
1050 	rp->nb.pex_regs.pex_fat_nerr = PEX_FAT_NERR_RD(pex, &t);
1051 	*interpose |= t;
1052 	rp->nb.pex_regs.pex_nf_corr_ferr = PEX_NF_FERR_RD(pex, &t);
1053 	*interpose |= t;
1054 	rp->nb.pex_regs.pex_nf_corr_nerr = PEX_NF_NERR_RD(pex, &t);
1055 	*interpose |= t;
1056 	rp->nb.pex_regs.uncerrsev = UNCERRSEV_RD(pex);
1057 	rp->nb.pex_regs.rperrsts = RPERRSTS_RD(pex);
1058 	rp->nb.pex_regs.rperrsid = RPERRSID_RD(pex);
1059 	if (pex != (uint8_t)-1)
1060 		rp->nb.pex_regs.uncerrsts = UNCERRSTS_RD(pex);
1061 	else
1062 		rp->nb.pex_regs.uncerrsts = 0;
1063 	rp->nb.pex_regs.aerrcapctrl = AERRCAPCTRL_RD(pex);
1064 	rp->nb.pex_regs.corerrsts = CORERRSTS_RD(pex);
1065 	rp->nb.pex_regs.pexdevsts = PEXDEVSTS_RD(pex);
1066 
1067 	if (!willpanic) {
1068 		if (rp->nb.pex_regs.pex_fat_ferr || *interpose)
1069 			PEX_FAT_FERR_WR(pex, rp->nb.pex_regs.pex_fat_ferr);
1070 		if (rp->nb.pex_regs.pex_fat_nerr)
1071 			PEX_FAT_NERR_WR(pex, rp->nb.pex_regs.pex_fat_nerr);
1072 		if (rp->nb.pex_regs.pex_nf_corr_ferr || *interpose)
1073 			PEX_NF_FERR_WR(pex, rp->nb.pex_regs.pex_nf_corr_ferr);
1074 		if (rp->nb.pex_regs.pex_nf_corr_nerr)
1075 			PEX_NF_NERR_WR(pex, rp->nb.pex_regs.pex_nf_corr_nerr);
1076 		if (*interpose)
1077 			UNCERRSTS_WR(pex, rp->nb.pex_regs.uncerrsts);
1078 		if (*interpose)
1079 			RPERRSTS_WR(pex, rp->nb.pex_regs.rperrsts);
1080 		if (*interpose)
1081 			PEXDEVSTS_WR(pex, 0);
1082 	}
1083 }
1084 
1085 static void
1086 log_fat_fbd_err(nb_regs_t *rp, int willpanic, int *interpose)
1087 {
1088 	int channel, branch;
1089 	int t = 0;
1090 
1091 	rp->flag = NB_REG_LOG_FAT_FBD;
1092 	rp->nb.fat_fbd_regs.ferr_fat_fbd = FERR_FAT_FBD_RD(interpose);
1093 	channel = (rp->nb.fat_fbd_regs.ferr_fat_fbd >> 28) & 3;
1094 	branch = channel >> 1;
1095 	rp->nb.fat_fbd_regs.nerr_fat_fbd = NERR_FAT_FBD_RD(&t);
1096 	*interpose |= t;
1097 	rp->nb.fat_fbd_regs.nrecmema = NRECMEMA_RD(branch);
1098 	rp->nb.fat_fbd_regs.nrecmemb = NRECMEMB_RD(branch);
1099 	rp->nb.fat_fbd_regs.nrecfglog = NRECFGLOG_RD(branch);
1100 	rp->nb.fat_fbd_regs.nrecfbda = NRECFBDA_RD(branch);
1101 	rp->nb.fat_fbd_regs.nrecfbdb = NRECFBDB_RD(branch);
1102 	rp->nb.fat_fbd_regs.nrecfbdc = NRECFBDC_RD(branch);
1103 	rp->nb.fat_fbd_regs.nrecfbdd = NRECFBDD_RD(branch);
1104 	rp->nb.fat_fbd_regs.nrecfbde = NRECFBDE_RD(branch);
1105 	rp->nb.fat_fbd_regs.nrecfbdf = NRECFBDF_RD(branch);
1106 	rp->nb.fat_fbd_regs.spcps = SPCPS_RD(branch);
1107 	rp->nb.fat_fbd_regs.spcpc = SPCPC_RD(branch);
1108 	rp->nb.fat_fbd_regs.uerrcnt = UERRCNT_RD(branch);
1109 	rp->nb.fat_fbd_regs.uerrcnt_last = uerrcnt[branch];
1110 	uerrcnt[branch] = rp->nb.fat_fbd_regs.uerrcnt;
1111 	rp->nb.fat_fbd_regs.badrama = BADRAMA_RD(branch);
1112 	rp->nb.fat_fbd_regs.badramb = BADRAMB_RD(branch);
1113 	rp->nb.fat_fbd_regs.badcnt = BADCNT_RD(branch);
1114 	if (!willpanic) {
1115 		if (rp->nb.fat_fbd_regs.ferr_fat_fbd || *interpose)
1116 			FERR_FAT_FBD_WR(rp->nb.fat_fbd_regs.ferr_fat_fbd);
1117 		if (rp->nb.fat_fbd_regs.nerr_fat_fbd)
1118 			NERR_FAT_FBD_WR(rp->nb.fat_fbd_regs.nerr_fat_fbd);
1119 		/*
1120 		 * if interpose write read-only registers to clear from pcii
1121 		 * cache
1122 		 */
1123 		if (*interpose) {
1124 			NRECMEMA_WR(branch);
1125 			NRECMEMB_WR(branch);
1126 			NRECFGLOG_WR(branch);
1127 			NRECFBDA_WR(branch);
1128 			NRECFBDB_WR(branch);
1129 			NRECFBDC_WR(branch);
1130 			NRECFBDD_WR(branch);
1131 			NRECFBDE_WR(branch);
1132 			NRECFBDF_WR(branch);
1133 		}
1134 	}
1135 }
1136 
1137 static void
1138 log_nf_fbd_err(nb_regs_t *rp, int willpanic, int *interpose)
1139 {
1140 	int channel, branch;
1141 	int t = 0;
1142 
1143 	rp->flag = NB_REG_LOG_NF_FBD;
1144 	rp->nb.nf_fbd_regs.ferr_nf_fbd = FERR_NF_FBD_RD(interpose);
1145 	channel = (rp->nb.nf_fbd_regs.ferr_nf_fbd >> 28) & 3;
1146 	branch = channel >> 1;
1147 	rp->nb.nf_fbd_regs.nerr_nf_fbd = NERR_NF_FBD_RD(&t);
1148 	*interpose |= t;
1149 	rp->nb.nf_fbd_regs.redmemb = REDMEMB_RD();
1150 	rp->nb.nf_fbd_regs.recmema = RECMEMA_RD(branch);
1151 	rp->nb.nf_fbd_regs.recmemb = RECMEMB_RD(branch);
1152 	rp->nb.nf_fbd_regs.recfglog = RECFGLOG_RD(branch);
1153 	rp->nb.nf_fbd_regs.recfbda = RECFBDA_RD(branch);
1154 	rp->nb.nf_fbd_regs.recfbdb = RECFBDB_RD(branch);
1155 	rp->nb.nf_fbd_regs.recfbdc = RECFBDC_RD(branch);
1156 	rp->nb.nf_fbd_regs.recfbdd = RECFBDD_RD(branch);
1157 	rp->nb.nf_fbd_regs.recfbde = RECFBDE_RD(branch);
1158 	rp->nb.nf_fbd_regs.recfbdf = RECFBDF_RD(branch);
1159 	rp->nb.nf_fbd_regs.spcps = SPCPS_RD(branch);
1160 	rp->nb.nf_fbd_regs.spcpc = SPCPC_RD(branch);
1161 	if (nb_chipset == INTEL_NB_7300 || nb_chipset == INTEL_NB_5400) {
1162 		rp->nb.nf_fbd_regs.cerrcnta = CERRCNTA_RD(branch, channel);
1163 		rp->nb.nf_fbd_regs.cerrcntb = CERRCNTB_RD(branch, channel);
1164 		rp->nb.nf_fbd_regs.cerrcntc = CERRCNTC_RD(branch, channel);
1165 		rp->nb.nf_fbd_regs.cerrcntd = CERRCNTD_RD(branch, channel);
1166 	} else {
1167 		rp->nb.nf_fbd_regs.cerrcnta = CERRCNT_RD(branch);
1168 		rp->nb.nf_fbd_regs.cerrcntb = 0;
1169 		rp->nb.nf_fbd_regs.cerrcntc = 0;
1170 		rp->nb.nf_fbd_regs.cerrcntd = 0;
1171 	}
1172 	rp->nb.nf_fbd_regs.cerrcnta_last = cerrcnta[branch][channel & 1];
1173 	rp->nb.nf_fbd_regs.cerrcntb_last = cerrcntb[branch][channel & 1];
1174 	rp->nb.nf_fbd_regs.cerrcntc_last = cerrcntc[branch][channel & 1];
1175 	rp->nb.nf_fbd_regs.cerrcntd_last = cerrcntd[branch][channel & 1];
1176 	cerrcnta[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcnta;
1177 	cerrcntb[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntb;
1178 	cerrcntc[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntc;
1179 	cerrcntd[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntd;
1180 	rp->nb.nf_fbd_regs.badrama = BADRAMA_RD(branch);
1181 	rp->nb.nf_fbd_regs.badramb = BADRAMB_RD(branch);
1182 	rp->nb.nf_fbd_regs.badcnt = BADCNT_RD(branch);
1183 	if (!willpanic) {
1184 		if (rp->nb.nf_fbd_regs.ferr_nf_fbd || *interpose)
1185 			FERR_NF_FBD_WR(rp->nb.nf_fbd_regs.ferr_nf_fbd);
1186 		if (rp->nb.nf_fbd_regs.nerr_nf_fbd)
1187 			NERR_NF_FBD_WR(rp->nb.nf_fbd_regs.nerr_nf_fbd);
1188 		/*
1189 		 * if interpose write read-only registers to clear from pcii
1190 		 * cache
1191 		 */
1192 		if (*interpose) {
1193 			RECMEMA_WR(branch);
1194 			RECMEMB_WR(branch);
1195 			RECFGLOG_WR(branch);
1196 			RECFBDA_WR(branch);
1197 			RECFBDB_WR(branch);
1198 			RECFBDC_WR(branch);
1199 			RECFBDD_WR(branch);
1200 			RECFBDE_WR(branch);
1201 			RECFBDF_WR(branch);
1202 			SPCPS_WR(branch);
1203 		}
1204 	}
1205 }
1206 
1207 static void
1208 log_nf_mem_err(nb_regs_t *rp, int willpanic, int *interpose)
1209 {
1210 	int channel, branch;
1211 	int t = 0;
1212 
1213 	rp->flag = NB_REG_LOG_NF_MEM;
1214 
1215 	/* Memmory err registers */
1216 	rp->nb.nf_mem_regs.ferr_nf_mem = FERR_NF_MEM_RD(interpose);
1217 	channel = (rp->nb.nf_mem_regs.ferr_nf_mem >> 28) & 0x1;
1218 	branch = channel;
1219 	rp->nb.nf_mem_regs.nerr_nf_mem = NERR_NF_MEM_RD(&t);
1220 	*interpose |= t;
1221 	rp->nb.nf_mem_regs.redmema = MEM_REDMEMA_RD(branch);
1222 	rp->nb.nf_mem_regs.redmemb = MEM_REDMEMB_RD(branch);
1223 	rp->nb.nf_mem_regs.recmema = MEM_RECMEMA_RD(branch);
1224 	rp->nb.nf_mem_regs.recmemb = MEM_RECMEMB_RD(branch);
1225 	rp->nb.nf_mem_regs.nrecmema = MEM_NRECMEMA_RD(branch);
1226 	rp->nb.nf_mem_regs.nrecmemb = MEM_NRECMEMB_RD(branch);
1227 
1228 	/* spare rank */
1229 	rp->nb.nf_mem_regs.spcps = SPCPS_RD(branch);
1230 	rp->nb.nf_mem_regs.spcpc = SPCPC_RD(branch);
1231 
1232 	/* RAS registers */
1233 	rp->nb.nf_mem_regs.cerrcnt = MEM_CERRCNT_RD(branch);
1234 	rp->nb.nf_mem_regs.cerrcnt_ext = (uint32_t)MEM_CERRCNT_EXT_RD(branch);
1235 	rp->nb.nf_mem_regs.cerrcnt_last = cerrcnta[branch][channel & 1];
1236 	rp->nb.nf_mem_regs.cerrcnt_ext_last = cerrcntb[branch][channel & 1];
1237 	cerrcnta[branch][channel & 1] = rp->nb.nf_mem_regs.cerrcnt;
1238 	cerrcntb[branch][channel & 1] = rp->nb.nf_mem_regs.cerrcnt_ext;
1239 	rp->nb.nf_mem_regs.badram = BADRAMA_RD(branch);
1240 	rp->nb.nf_mem_regs.badcnt = BADCNT_RD(branch);
1241 	rp->nb.nf_mem_regs.validlog = VALIDLOG_RD(branch);
1242 
1243 	if (!willpanic) {
1244 		if (rp->nb.nf_mem_regs.ferr_nf_mem || *interpose)
1245 			FERR_NF_MEM_WR(rp->nb.nf_mem_regs.ferr_nf_mem);
1246 		if (rp->nb.nf_mem_regs.nerr_nf_mem)
1247 			NERR_NF_MEM_WR(rp->nb.nf_mem_regs.nerr_nf_mem);
1248 		/*
1249 		 * if interpose, write read-only registers to clear from pci
1250 		 * cache
1251 		 */
1252 		if (*interpose) {
1253 			MEM_NRECMEMA_WR(branch);
1254 			MEM_NRECMEMB_WR(branch);
1255 			MEM_REDMEMA_WR(branch);
1256 			MEM_REDMEMB_WR(branch);
1257 			MEM_RECMEMA_WR(branch);
1258 			MEM_RECMEMB_WR(branch);
1259 			SPCPS_WR(branch);
1260 		}
1261 	}
1262 }
1263 
1264 static void
1265 log_ferr(uint64_t ferr, uint32_t *nerrp, nb_logout_t *log, int willpanic)
1266 {
1267 	nb_regs_t *rp = &log->nb_regs;
1268 	uint32_t nerr = *nerrp;
1269 	int interpose = 0;
1270 	int spurious = 0;
1271 
1272 	log->acl_timestamp = gethrtime_waitfree();
1273 	if ((ferr & (GE_PCIEX_FATAL | GE_PCIEX_NF)) != 0) {
1274 		log_pex_err(ferr, rp, willpanic, &interpose);
1275 		*nerrp = nerr & ~(GE_PCIEX_FATAL | GE_PCIEX_NF);
1276 	} else if ((ferr & GE_FBD_FATAL) != 0) {
1277 		log_fat_fbd_err(rp, willpanic, &interpose);
1278 		*nerrp = nerr & ~GE_NERR_FBD_FATAL;
1279 	} else if ((ferr & GE_FBD_NF) != 0) {
1280 		log_nf_fbd_err(rp, willpanic, &interpose);
1281 		*nerrp = nerr & ~GE_NERR_FBD_NF;
1282 	} else if ((ferr & GE_MEM_NF) != 0) {
1283 		log_nf_mem_err(rp, willpanic, &interpose);
1284 		*nerrp = nerr & ~GE_NERR_MEM_NF;
1285 	} else if ((ferr & (GE_FERR_FSB_FATAL | GE_FERR_FSB_NF)) != 0) {
1286 		log_fsb_err(ferr, rp, willpanic, &interpose);
1287 		*nerrp = nerr & ~(GE_NERR_FSB_FATAL | GE_NERR_FSB_NF);
1288 	} else if ((ferr & (GE_DMA_FATAL | GE_DMA_NF)) != 0) {
1289 		log_dma_err(rp, &interpose);
1290 		*nerrp = nerr & ~(GE_DMA_FATAL | GE_DMA_NF);
1291 	} else if ((ferr & (GE_INT_FATAL | GE_INT_NF)) != 0) {
1292 		spurious = log_int_err(rp, willpanic, &interpose);
1293 		*nerrp = nerr & ~(GE_INT_FATAL | GE_INT_NF);
1294 	} else if (nb_chipset == INTEL_NB_5400 &&
1295 	    (ferr & (GE_FERR_THERMAL_FATAL | GE_FERR_THERMAL_NF)) != 0) {
1296 		log_thermal_err(rp, willpanic, &interpose);
1297 		*nerrp = nerr & ~(GE_FERR_THERMAL_FATAL | GE_FERR_THERMAL_NF);
1298 	}
1299 	if (interpose)
1300 		log->type = "inject";
1301 	else
1302 		log->type = "error";
1303 	if (!spurious) {
1304 		errorq_dispatch(nb_queue, log, sizeof (nb_logout_t),
1305 		    willpanic ? ERRORQ_SYNC : ERRORQ_ASYNC);
1306 	}
1307 }
1308 
1309 static void
1310 log_nerr(uint32_t *errp, nb_logout_t *log, int willpanic)
1311 {
1312 	uint32_t err;
1313 	nb_regs_t *rp = &log->nb_regs;
1314 	int interpose = 0;
1315 	int spurious = 0;
1316 
1317 	err = *errp;
1318 	log->acl_timestamp = gethrtime_waitfree();
1319 	if ((err & (GE_PCIEX_FATAL | GE_PCIEX_NF)) != 0) {
1320 		log_pex_err(err, rp, willpanic, &interpose);
1321 		*errp = err & ~(GE_PCIEX_FATAL | GE_PCIEX_NF);
1322 	} else if ((err & GE_NERR_FBD_FATAL) != 0) {
1323 		log_fat_fbd_err(rp, willpanic, &interpose);
1324 		*errp = err & ~GE_NERR_FBD_FATAL;
1325 	} else if ((err & GE_NERR_FBD_NF) != 0) {
1326 		log_nf_fbd_err(rp, willpanic, &interpose);
1327 		*errp = err & ~GE_NERR_FBD_NF;
1328 	} else if ((err & GE_NERR_MEM_NF) != 0) {
1329 		log_nf_mem_err(rp, willpanic, &interpose);
1330 		*errp = err & ~GE_NERR_MEM_NF;
1331 	} else if ((err & (GE_NERR_FSB_FATAL | GE_NERR_FSB_NF)) != 0) {
1332 		log_fsb_err(GE_NERR_TO_FERR_FSB(err), rp, willpanic,
1333 		    &interpose);
1334 		*errp = err & ~(GE_NERR_FSB_FATAL | GE_NERR_FSB_NF);
1335 	} else if ((err & (GE_DMA_FATAL | GE_DMA_NF)) != 0) {
1336 		log_dma_err(rp, &interpose);
1337 		*errp = err & ~(GE_DMA_FATAL | GE_DMA_NF);
1338 	} else if ((err & (GE_INT_FATAL | GE_INT_NF)) != 0) {
1339 		spurious = log_int_err(rp, willpanic, &interpose);
1340 		*errp = err & ~(GE_INT_FATAL | GE_INT_NF);
1341 	}
1342 	if (interpose)
1343 		log->type = "inject";
1344 	else
1345 		log->type = "error";
1346 	if (!spurious) {
1347 		errorq_dispatch(nb_queue, log, sizeof (nb_logout_t),
1348 		    willpanic ? ERRORQ_SYNC : ERRORQ_ASYNC);
1349 	}
1350 }
1351 
1352 /*ARGSUSED*/
1353 void
1354 nb_error_trap(cmi_hdl_t hdl, boolean_t ismc, boolean_t willpanic)
1355 {
1356 	uint64_t ferr;
1357 	uint32_t nerr, err;
1358 	int nmc = 0;
1359 	int i;
1360 
1361 	if (mutex_tryenter(&nb_mutex) == 0)
1362 		return;
1363 
1364 	nerr = NERR_GLOBAL_RD();
1365 	err = nerr;
1366 	for (i = 0; i < NB_MAX_ERRORS; i++) {
1367 		ferr = FERR_GLOBAL_RD();
1368 		nb_log.nb_regs.chipset = nb_chipset;
1369 		nb_log.nb_regs.ferr = ferr;
1370 		nb_log.nb_regs.nerr = nerr;
1371 		if (ferr) {
1372 			log_ferr(ferr, &err, &nb_log, willpanic);
1373 			FERR_GLOBAL_WR(ferr);
1374 			nmc++;
1375 		} else if (err) {
1376 			log_nerr(&err, &nb_log, willpanic);
1377 			nmc++;
1378 		}
1379 	}
1380 	if (nerr) {
1381 		NERR_GLOBAL_WR(nerr);
1382 	}
1383 	if (nmc == 0 && nb_mask_mc_set)
1384 		nb_mask_mc_reset();
1385 	mutex_exit(&nb_mutex);
1386 }
1387 
1388 static void
1389 nb_fsb_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1390     nb_scatchpad_t *data)
1391 {
1392 	int intel_error_list;
1393 	char buf[32];
1394 
1395 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FSB,
1396 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.fsb, NULL);
1397 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_FSB,
1398 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.ferr_fat_fsb, NULL);
1399 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_FSB,
1400 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.nerr_fat_fsb, NULL);
1401 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_FSB,
1402 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.ferr_nf_fsb, NULL);
1403 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_FSB,
1404 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.nerr_nf_fsb, NULL);
1405 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFSB,
1406 	    DATA_TYPE_UINT32, nb_regs->nb.fsb_regs.nrecfsb, NULL);
1407 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFSB_ADDR,
1408 	    DATA_TYPE_UINT64, nb_regs->nb.fsb_regs.nrecfsb_addr, NULL);
1409 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFSB,
1410 	    DATA_TYPE_UINT32, nb_regs->nb.fsb_regs.recfsb, NULL);
1411 	intel_error_list = data->intel_error_list;
1412 	if (intel_error_list >= 0)
1413 		(void) snprintf(buf, sizeof (buf), "F%d", intel_error_list);
1414 	else
1415 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1416 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1417 	    DATA_TYPE_STRING, buf, NULL);
1418 }
1419 
1420 static void
1421 nb_pex_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1422     nb_scatchpad_t *data)
1423 {
1424 	int intel_error_list;
1425 	char buf[32];
1426 
1427 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX,
1428 	    DATA_TYPE_UINT8, nb_regs->nb.pex_regs.pex, NULL);
1429 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_FAT_FERR,
1430 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_fat_ferr, NULL);
1431 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_FAT_NERR,
1432 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_fat_nerr, NULL);
1433 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_NF_CORR_FERR,
1434 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_nf_corr_ferr, NULL);
1435 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_NF_CORR_NERR,
1436 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_nf_corr_nerr, NULL);
1437 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UNCERRSEV,
1438 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.uncerrsev, NULL);
1439 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RPERRSTS,
1440 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.rperrsts, NULL);
1441 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RPERRSID,
1442 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.rperrsid, NULL);
1443 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UNCERRSTS,
1444 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.uncerrsts, NULL);
1445 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_AERRCAPCTRL,
1446 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.aerrcapctrl, NULL);
1447 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CORERRSTS,
1448 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.corerrsts, NULL);
1449 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEXDEVSTS,
1450 	    DATA_TYPE_UINT16, nb_regs->nb.pex_regs.pexdevsts, NULL);
1451 	intel_error_list = data->intel_error_list;
1452 	if (intel_error_list >= 0)
1453 		(void) snprintf(buf, sizeof (buf), "IO%d", intel_error_list);
1454 	else
1455 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1456 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1457 	    DATA_TYPE_STRING, buf, NULL);
1458 }
1459 
1460 static void
1461 nb_int_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1462     nb_scatchpad_t *data)
1463 {
1464 	int intel_error_list;
1465 	char buf[32];
1466 
1467 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_INT,
1468 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.ferr_fat_int, NULL);
1469 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_INT,
1470 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.ferr_nf_int, NULL);
1471 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_INT,
1472 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.nerr_fat_int, NULL);
1473 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_INT,
1474 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.nerr_nf_int, NULL);
1475 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECINT,
1476 	    DATA_TYPE_UINT32, nb_regs->nb.int_regs.nrecint, NULL);
1477 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECINT,
1478 	    DATA_TYPE_UINT32, nb_regs->nb.int_regs.recint, NULL);
1479 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECSF,
1480 	    DATA_TYPE_UINT64, nb_regs->nb.int_regs.nrecsf, NULL);
1481 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECSF,
1482 	    DATA_TYPE_UINT64, nb_regs->nb.int_regs.recsf, NULL);
1483 	intel_error_list = data->intel_error_list;
1484 	if (intel_error_list >= 0)
1485 		(void) snprintf(buf, sizeof (buf), "B%d", intel_error_list);
1486 	else
1487 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1488 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1489 	    DATA_TYPE_STRING, buf, NULL);
1490 }
1491 
1492 static void
1493 nb_fat_fbd_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1494     nb_scatchpad_t *data)
1495 {
1496 	nb_mem_scatchpad_t *sp;
1497 	char buf[32];
1498 
1499 	sp = &((nb_scatchpad_t *)data)->ms;
1500 
1501 	if (sp->ras != -1) {
1502 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK,
1503 		    DATA_TYPE_INT32, sp->bank, NULL);
1504 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS,
1505 		    DATA_TYPE_INT32, sp->cas, NULL);
1506 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS,
1507 		    DATA_TYPE_INT32, sp->ras, NULL);
1508 		if (sp->offset != -1LL) {
1509 			fm_payload_set(payload, FM_FMRI_MEM_OFFSET,
1510 			    DATA_TYPE_UINT64, sp->offset, NULL);
1511 		}
1512 		if (sp->pa != -1LL) {
1513 			fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR,
1514 			    DATA_TYPE_UINT64, sp->pa, NULL);
1515 		}
1516 	}
1517 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_FBD,
1518 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.ferr_fat_fbd, NULL);
1519 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_FBD,
1520 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nerr_fat_fbd, NULL);
1521 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMA,
1522 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecmema, NULL);
1523 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMB,
1524 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecmemb, NULL);
1525 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFGLOG,
1526 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfglog, NULL);
1527 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDA,
1528 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbda, NULL);
1529 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDB,
1530 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdb, NULL);
1531 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDC,
1532 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdc, NULL);
1533 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDD,
1534 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdd, NULL);
1535 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDE,
1536 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbde, NULL);
1537 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDF,
1538 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdf, NULL);
1539 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS,
1540 	    DATA_TYPE_UINT8, nb_regs->nb.fat_fbd_regs.spcps, NULL);
1541 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC,
1542 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.spcpc, NULL);
1543 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UERRCNT,
1544 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.uerrcnt, NULL);
1545 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UERRCNT_LAST,
1546 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.uerrcnt_last, NULL);
1547 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMA,
1548 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.badrama, NULL);
1549 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMB,
1550 	    DATA_TYPE_UINT16, nb_regs->nb.fat_fbd_regs.badramb, NULL);
1551 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT,
1552 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.badcnt, NULL);
1553 
1554 	if (sp->intel_error_list >= 0)
1555 		(void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list);
1556 	else
1557 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1558 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1559 	    DATA_TYPE_STRING, buf, NULL);
1560 }
1561 
1562 static void
1563 nb_nf_fbd_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1564     nb_scatchpad_t *data)
1565 {
1566 	nb_mem_scatchpad_t *sp;
1567 	char buf[32];
1568 
1569 	sp = &((nb_scatchpad_t *)data)->ms;
1570 
1571 	if (sp->dimm == -1 && sp->rank != -1) {
1572 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RANK,
1573 		    DATA_TYPE_INT32, sp->rank, NULL);
1574 	}
1575 	if (sp->ras != -1) {
1576 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK,
1577 		    DATA_TYPE_INT32, sp->bank, NULL);
1578 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS,
1579 		    DATA_TYPE_INT32, sp->cas, NULL);
1580 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS,
1581 		    DATA_TYPE_INT32, sp->ras, NULL);
1582 		if (sp->offset != -1LL) {
1583 			fm_payload_set(payload, FM_FMRI_MEM_OFFSET,
1584 			    DATA_TYPE_UINT64, sp->offset, NULL);
1585 		}
1586 		if (sp->pa != -1LL) {
1587 			fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR,
1588 			    DATA_TYPE_UINT64, sp->pa, NULL);
1589 		}
1590 	}
1591 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_FBD,
1592 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.ferr_nf_fbd, NULL);
1593 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_FBD,
1594 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.nerr_nf_fbd, NULL);
1595 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMA,
1596 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recmema, NULL);
1597 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMB,
1598 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recmemb, NULL);
1599 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFGLOG,
1600 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfglog, NULL);
1601 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDA,
1602 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbda, NULL);
1603 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDB,
1604 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdb, NULL);
1605 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDC,
1606 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdc, NULL);
1607 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDD,
1608 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdd, NULL);
1609 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDE,
1610 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbde, NULL);
1611 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDF,
1612 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdf, NULL);
1613 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS,
1614 	    DATA_TYPE_UINT8, nb_regs->nb.nf_fbd_regs.spcps, NULL);
1615 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC,
1616 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.spcpc, NULL);
1617 	if (nb_chipset == INTEL_NB_7300 || nb_chipset == INTEL_NB_5400) {
1618 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTA,
1619 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta, NULL);
1620 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTB,
1621 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntb, NULL);
1622 		if (nb_chipset == INTEL_NB_7300) {
1623 			fm_payload_set(payload,
1624 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTC,
1625 			    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntc,
1626 			    NULL);
1627 			fm_payload_set(payload,
1628 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTD,
1629 			    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntd,
1630 			    NULL);
1631 		}
1632 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTA_LAST,
1633 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta_last,
1634 		    NULL);
1635 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTB_LAST,
1636 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntb_last,
1637 		    NULL);
1638 		if (nb_chipset == INTEL_NB_7300) {
1639 			fm_payload_set(payload,
1640 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTC_LAST,
1641 			    DATA_TYPE_UINT32,
1642 			    nb_regs->nb.nf_fbd_regs.cerrcntc_last, NULL);
1643 			fm_payload_set(payload,
1644 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTD_LAST,
1645 			    DATA_TYPE_UINT32,
1646 			    nb_regs->nb.nf_fbd_regs.cerrcntd_last, NULL);
1647 		}
1648 	} else {
1649 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT,
1650 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta, NULL);
1651 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_LAST,
1652 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta_last,
1653 		    NULL);
1654 	}
1655 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMA,
1656 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.badrama, NULL);
1657 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMB,
1658 	    DATA_TYPE_UINT16, nb_regs->nb.nf_fbd_regs.badramb, NULL);
1659 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT,
1660 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.badcnt, NULL);
1661 
1662 	if (sp->intel_error_list >= 0)
1663 		(void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list);
1664 	else
1665 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1666 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1667 	    DATA_TYPE_STRING, buf, NULL);
1668 }
1669 
1670 static void
1671 nb_nf_mem_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1672     nb_scatchpad_t *data)
1673 {
1674 	nb_mem_scatchpad_t *sp;
1675 	char buf[32];
1676 
1677 	sp = &((nb_scatchpad_t *)data)->ms;
1678 
1679 	if (sp->dimm == -1 && sp->rank != -1) {
1680 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RANK,
1681 		    DATA_TYPE_INT32, sp->rank, NULL);
1682 	}
1683 	if (sp->ras != -1) {
1684 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK,
1685 		    DATA_TYPE_INT32, sp->bank, NULL);
1686 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS,
1687 		    DATA_TYPE_INT32, sp->cas, NULL);
1688 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS,
1689 		    DATA_TYPE_INT32, sp->ras, NULL);
1690 		if (sp->offset != -1LL) {
1691 			fm_payload_set(payload, FM_FMRI_MEM_OFFSET,
1692 			    DATA_TYPE_UINT64, sp->offset, NULL);
1693 		}
1694 		if (sp->pa != -1LL) {
1695 			fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR,
1696 			    DATA_TYPE_UINT64, sp->pa, NULL);
1697 		}
1698 	}
1699 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_MEM,
1700 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.ferr_nf_mem, NULL);
1701 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_MEM,
1702 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.nerr_nf_mem, NULL);
1703 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMA,
1704 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.recmema, NULL);
1705 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMB,
1706 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.recmemb, NULL);
1707 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_REDMEMA,
1708 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.redmema, NULL);
1709 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_REDMEMB,
1710 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.redmemb, NULL);
1711 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMA,
1712 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.nrecmema, NULL);
1713 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMB,
1714 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.nrecmemb, NULL);
1715 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS,
1716 	    DATA_TYPE_UINT8, nb_regs->nb.nf_mem_regs.spcps, NULL);
1717 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC,
1718 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.spcpc, NULL);
1719 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT,
1720 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.cerrcnt, NULL);
1721 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_LAST,
1722 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.cerrcnt_last, NULL);
1723 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_EXT,
1724 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.cerrcnt_ext, NULL);
1725 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_EXT_LAST,
1726 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.cerrcnt_ext_last, NULL);
1727 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAM,
1728 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.badram, NULL);
1729 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT,
1730 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.badcnt, NULL);
1731 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_VALIDLOG,
1732 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.validlog, NULL);
1733 
1734 	if (sp->intel_error_list >= 0)
1735 		(void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list);
1736 	else
1737 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1738 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1739 	    DATA_TYPE_STRING, buf, NULL);
1740 }
1741 
1742 static void
1743 nb_dma_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload)
1744 {
1745 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PCISTS,
1746 	    DATA_TYPE_UINT16, nb_regs->nb.dma_regs.pcists, NULL);
1747 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEXDEVSTS,
1748 	    DATA_TYPE_UINT16, nb_regs->nb.dma_regs.pexdevsts, NULL);
1749 }
1750 
1751 static void
1752 nb_thr_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1753     nb_scatchpad_t *data)
1754 {
1755 	char buf[32];
1756 
1757 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_THR,
1758 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ferr_fat_thr, NULL);
1759 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_THR,
1760 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.nerr_fat_thr, NULL);
1761 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_THR,
1762 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ferr_nf_thr, NULL);
1763 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_THR,
1764 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.nerr_nf_thr, NULL);
1765 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CTSTS,
1766 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ctsts, NULL);
1767 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_THRTSTS,
1768 	    DATA_TYPE_UINT16, nb_regs->nb.thr_regs.thrtsts, NULL);
1769 	if (data->intel_error_list >= 0) {
1770 		(void) snprintf(buf, sizeof (buf), "TH%d",
1771 		    data->intel_error_list);
1772 	} else {
1773 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1774 	}
1775 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1776 	    DATA_TYPE_STRING, buf, NULL);
1777 }
1778 
1779 static void
1780 nb_ereport_add_logout(nvlist_t *payload, const nb_logout_t *acl,
1781     nb_scatchpad_t *data)
1782 {
1783 	const nb_regs_t *nb_regs = &acl->nb_regs;
1784 
1785 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_MC_TYPE,
1786 	    DATA_TYPE_STRING, acl->type, NULL);
1787 	switch (nb_regs->flag) {
1788 	case NB_REG_LOG_FSB:
1789 		nb_fsb_err_payload(nb_regs, payload, data);
1790 		break;
1791 	case NB_REG_LOG_PEX:
1792 		nb_pex_err_payload(nb_regs, payload, data);
1793 		break;
1794 	case NB_REG_LOG_INT:
1795 		nb_int_err_payload(nb_regs, payload, data);
1796 		break;
1797 	case NB_REG_LOG_FAT_FBD:
1798 		nb_fat_fbd_err_payload(nb_regs, payload, data);
1799 		break;
1800 	case NB_REG_LOG_NF_FBD:
1801 		nb_nf_fbd_err_payload(nb_regs, payload, data);
1802 		break;
1803 	case NB_REG_LOG_DMA:
1804 		nb_dma_err_payload(nb_regs, payload);
1805 		break;
1806 	case NB_REG_LOG_THR:
1807 		nb_thr_err_payload(nb_regs, payload, data);
1808 		break;
1809 	case NB_REG_LOG_NF_MEM:
1810 		nb_nf_mem_err_payload(nb_regs, payload, data);
1811 		break;
1812 	default:
1813 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_GLOBAL,
1814 		    DATA_TYPE_UINT64, nb_regs->ferr, NULL);
1815 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_GLOBAL,
1816 		    DATA_TYPE_UINT32, nb_regs->nerr, NULL);
1817 		break;
1818 	}
1819 }
1820 
1821 void
1822 nb_fsb_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1823     nb_scatchpad_t *data)
1824 {
1825 	int chip;
1826 
1827 	if (nb_chipset == INTEL_NB_7300)
1828 		chip = nb_regs->nb.fsb_regs.fsb * 2;
1829 	else
1830 		chip = nb_regs->nb.fsb_regs.fsb;
1831 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1832 	    "motherboard", 0, "chip", chip);
1833 
1834 	if (nb_regs->nb.fsb_regs.ferr_fat_fsb == 0 &&
1835 	    nb_regs->nb.fsb_regs.ferr_nf_fsb == 0) {
1836 		data->intel_error_list = intel_fsb_err(nb_regs->nb.fsb_regs.fsb,
1837 		    nb_regs->nb.fsb_regs.nerr_fat_fsb,
1838 		    nb_regs->nb.fsb_regs.nerr_nf_fsb);
1839 	} else {
1840 		data->intel_error_list = intel_fsb_err(nb_regs->nb.fsb_regs.fsb,
1841 		    nb_regs->nb.fsb_regs.ferr_fat_fsb,
1842 		    nb_regs->nb.fsb_regs.ferr_nf_fsb);
1843 	}
1844 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1845 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "fsb");
1846 }
1847 
1848 void
1849 nb_pex_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1850     nb_scatchpad_t *data)
1851 {
1852 	int hostbridge;
1853 
1854 	if (nb_regs->nb.pex_regs.pex == 0) {
1855 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1856 		    "motherboard", 0);
1857 	} else {
1858 		hostbridge = nb_regs->nb.pex_regs.pex - 1;
1859 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1860 		    "motherboard", 0,
1861 		    "hostbridge", hostbridge);
1862 	}
1863 
1864 	if (nb_regs->nb.pex_regs.pex_fat_ferr == 0 &&
1865 	    nb_regs->nb.pex_regs.pex_nf_corr_ferr == 0) {
1866 		if (nb_chipset == INTEL_NB_5400) {
1867 			data->intel_error_list =
1868 			    intel_pex_5400_err(
1869 			    nb_regs->nb.pex_regs.pex_fat_nerr,
1870 			    nb_regs->nb.pex_regs.pex_nf_corr_nerr);
1871 		} else {
1872 			data->intel_error_list =
1873 			    intel_pex_err(nb_regs->nb.pex_regs.pex_fat_nerr,
1874 			    nb_regs->nb.pex_regs.pex_nf_corr_nerr);
1875 		}
1876 	} else {
1877 		if (nb_chipset == INTEL_NB_5400) {
1878 			data->intel_error_list =
1879 			    intel_pex_5400_err(
1880 			    nb_regs->nb.pex_regs.pex_fat_ferr,
1881 			    nb_regs->nb.pex_regs.pex_nf_corr_ferr);
1882 		} else {
1883 			data->intel_error_list =
1884 			    intel_pex_err(nb_regs->nb.pex_regs.pex_fat_ferr,
1885 			    nb_regs->nb.pex_regs.pex_nf_corr_ferr);
1886 		}
1887 	}
1888 
1889 	if (nb_regs->nb.pex_regs.pex == 0) {
1890 		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1891 		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "esi");
1892 	} else {
1893 		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1894 		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "pex");
1895 	}
1896 }
1897 
1898 void
1899 nb_int_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1900     void *data)
1901 {
1902 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1903 	    "motherboard", 0);
1904 
1905 	if (nb_regs->nb.int_regs.ferr_fat_int == 0 &&
1906 	    nb_regs->nb.int_regs.ferr_nf_int == 0) {
1907 		((nb_scatchpad_t *)data)->intel_error_list =
1908 		    intel_int_err(nb_regs->nb.int_regs.nerr_fat_int,
1909 		    nb_regs->nb.int_regs.nerr_nf_int);
1910 	} else {
1911 		((nb_scatchpad_t *)data)->intel_error_list =
1912 		    intel_int_err(nb_regs->nb.int_regs.ferr_fat_int,
1913 		    nb_regs->nb.int_regs.ferr_nf_int);
1914 	}
1915 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1916 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "ie");
1917 }
1918 
1919 void
1920 nb_fat_fbd_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1921     void *data)
1922 {
1923 	char *intr;
1924 	nb_mem_scatchpad_t *sp;
1925 
1926 	intr = fat_memory_error(nb_regs, data);
1927 	sp = &((nb_scatchpad_t *)data)->ms;
1928 
1929 	if (sp->dimm != -1) {
1930 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5,
1931 		    "motherboard", 0,
1932 		    "memory-controller", sp->branch,
1933 		    "dram-channel", sp->channel,
1934 		    "dimm", sp->dimm,
1935 		    "rank", sp->rank);
1936 	} else if (sp->channel != -1) {
1937 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3,
1938 		    "motherboard", 0,
1939 		    "memory-controller", sp->branch,
1940 		    "dram-channel", sp->channel);
1941 	} else if (sp->branch != -1) {
1942 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1943 		    "motherboard", 0,
1944 		    "memory-controller", sp->branch);
1945 	} else {
1946 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1947 		    "motherboard", 0);
1948 	}
1949 
1950 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s",
1951 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr);
1952 }
1953 
1954 void
1955 nb_nf_fbd_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1956     void *data)
1957 {
1958 	char *intr;
1959 	nb_mem_scatchpad_t *sp;
1960 
1961 	intr = nf_memory_error(nb_regs, data);
1962 	sp = &((nb_scatchpad_t *)data)->ms;
1963 
1964 	if (sp->dimm != -1) {
1965 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5,
1966 		    "motherboard", 0,
1967 		    "memory-controller", sp->branch,
1968 		    "dram-channel", sp->channel,
1969 		    "dimm", sp->dimm,
1970 		    "rank", sp->rank);
1971 	} else if (sp->channel != -1) {
1972 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3,
1973 		    "motherboard", 0,
1974 		    "memory-controller", sp->branch,
1975 		    "dram-channel", sp->channel);
1976 	} else if (sp->branch != -1) {
1977 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1978 		    "motherboard", 0,
1979 		    "memory-controller", sp->branch);
1980 	} else {
1981 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1982 		    "motherboard", 0);
1983 	}
1984 
1985 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s",
1986 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr);
1987 }
1988 
1989 void
1990 nb_dma_report(char *class, nvlist_t *detector)
1991 {
1992 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1993 	    "motherboard", 0);
1994 
1995 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1996 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "dma");
1997 }
1998 
1999 void
2000 nb_thr_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
2001     void *data)
2002 {
2003 	((nb_scatchpad_t *)data)->intel_error_list =
2004 	    intel_thr_err(nb_regs->nb.thr_regs.ferr_fat_thr,
2005 	    nb_regs->nb.thr_regs.ferr_nf_thr);
2006 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
2007 	    "motherboard", 0);
2008 
2009 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
2010 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "otf");
2011 }
2012 
2013 void
2014 nb_nf_mem_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
2015     void *data)
2016 {
2017 	char *intr;
2018 	nb_mem_scatchpad_t *sp;
2019 
2020 	intr = nf_mem_error(nb_regs, data);
2021 	sp = &((nb_scatchpad_t *)data)->ms;
2022 
2023 	if (sp->dimm != -1) {
2024 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5,
2025 		    "motherboard", 0,
2026 		    "memory-controller", sp->branch,
2027 		    "dram-channel", sp->channel,
2028 		    "dimm", sp->dimm,
2029 		    "rank", sp->rank);
2030 	} else if (sp->channel != -1) {
2031 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3,
2032 		    "motherboard", 0,
2033 		    "memory-controller", sp->branch,
2034 		    "dram-channel", sp->channel);
2035 	} else if (sp->branch != -1) {
2036 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
2037 		    "motherboard", 0,
2038 		    "memory-controller", sp->branch);
2039 	} else {
2040 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
2041 		    "motherboard", 0);
2042 	}
2043 
2044 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s",
2045 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr);
2046 }
2047 
2048 
2049 nvlist_t *
2050 nb_report(const nb_regs_t *nb_regs, char *class, nv_alloc_t *nva, void *scratch)
2051 {
2052 	nvlist_t *detector = fm_nvlist_create(nva);
2053 
2054 	switch (nb_regs->flag) {
2055 	case NB_REG_LOG_FSB:
2056 		nb_fsb_report(nb_regs, class, detector, scratch);
2057 		break;
2058 	case NB_REG_LOG_PEX:
2059 		nb_pex_report(nb_regs, class, detector, scratch);
2060 		break;
2061 	case NB_REG_LOG_INT:
2062 		nb_int_report(nb_regs, class, detector, scratch);
2063 		break;
2064 	case NB_REG_LOG_FAT_FBD:
2065 		nb_fat_fbd_report(nb_regs, class, detector, scratch);
2066 		break;
2067 	case NB_REG_LOG_NF_FBD:
2068 		nb_nf_fbd_report(nb_regs, class, detector, scratch);
2069 		break;
2070 	case NB_REG_LOG_DMA:
2071 		nb_dma_report(class, detector);
2072 		break;
2073 	case NB_REG_LOG_THR:
2074 		nb_thr_report(nb_regs, class, detector, scratch);
2075 		break;
2076 	case NB_REG_LOG_NF_MEM:
2077 		nb_nf_mem_report(nb_regs, class, detector, scratch);
2078 		break;
2079 	default:
2080 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
2081 		    "motherboard", 0);
2082 
2083 		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
2084 		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "unknown");
2085 	}
2086 	return (detector);
2087 }
2088 
2089 /*ARGSUSED*/
2090 void
2091 nb_drain(void *ignored, const void *data, const errorq_elem_t *eqe)
2092 {
2093 	nb_logout_t *acl = (nb_logout_t *)data;
2094 	errorq_elem_t *eqep, *scr_eqep;
2095 	nvlist_t *ereport, *detector;
2096 	nv_alloc_t *nva = NULL;
2097 	char buf[FM_MAX_CLASS];
2098 	nb_scatchpad_t nb_scatchpad;
2099 
2100 	if (panicstr) {
2101 		if ((eqep = errorq_reserve(ereport_errorq)) == NULL)
2102 			return;
2103 		ereport = errorq_elem_nvl(ereport_errorq, eqep);
2104 		/*
2105 		 * Now try to allocate another element for scratch space and
2106 		 * use that for further scratch space (eg for constructing
2107 		 * nvlists to add the main ereport).  If we can't reserve
2108 		 * a scratch element just fallback to working within the
2109 		 * element we already have, and hope for the best.  All this
2110 		 * is necessary because the fixed buffer nv allocator does
2111 		 * not reclaim freed space and nvlist construction is
2112 		 * expensive.
2113 		 */
2114 		if ((scr_eqep = errorq_reserve(ereport_errorq)) != NULL)
2115 			nva = errorq_elem_nva(ereport_errorq, scr_eqep);
2116 		else
2117 			nva = errorq_elem_nva(ereport_errorq, eqep);
2118 	} else {
2119 		ereport = fm_nvlist_create(NULL);
2120 	}
2121 	detector = nb_report(&acl->nb_regs, buf, nva, &nb_scatchpad);
2122 	if (detector == NULL)
2123 		return;
2124 	fm_ereport_set(ereport, FM_EREPORT_VERSION, buf,
2125 	    fm_ena_generate(acl->acl_timestamp, FM_ENA_FMT1), detector, NULL);
2126 	/*
2127 	 * We're done with 'detector' so reclaim the scratch space.
2128 	 */
2129 	if (panicstr) {
2130 		fm_nvlist_destroy(detector, FM_NVA_RETAIN);
2131 		nv_alloc_reset(nva);
2132 	} else {
2133 		fm_nvlist_destroy(detector, FM_NVA_FREE);
2134 	}
2135 
2136 	/*
2137 	 * Encode the error-specific data that was saved in the logout area.
2138 	 */
2139 	nb_ereport_add_logout(ereport, acl, &nb_scatchpad);
2140 
2141 	if (panicstr) {
2142 		errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
2143 		if (scr_eqep)
2144 			errorq_cancel(ereport_errorq, scr_eqep);
2145 	} else {
2146 		(void) fm_ereport_post(ereport, EVCH_TRYHARD);
2147 		fm_nvlist_destroy(ereport, FM_NVA_FREE);
2148 	}
2149 }
2150